diff --git a/CMakeLists.txt b/CMakeLists.txt
index c1ed1247de..d707f6b92f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -233,9 +233,6 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/dwarf/OP.zig"
"${CMAKE_SOURCE_DIR}/lib/std/dwarf/TAG.zig"
"${CMAKE_SOURCE_DIR}/lib/std/elf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/event.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/event/batch.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/event/loop.zig"
"${CMAKE_SOURCE_DIR}/lib/std/fifo.zig"
"${CMAKE_SOURCE_DIR}/lib/std/fmt.zig"
"${CMAKE_SOURCE_DIR}/lib/std/fmt/errol.zig"
diff --git a/doc/langref.html.in b/doc/langref.html.in
index f6dd3c9c61..885dbe41f6 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -10139,7 +10139,7 @@ pub fn main() void {
{#header_open|Invalid Error Set Cast#}
At compile-time:
- {#code_begin|test_err|test_comptime_invalid_error_set_cast|'error.B' not a member of error set 'error{C,A}'#}
+ {#code_begin|test_err|test_comptime_invalid_error_set_cast|'error.B' not a member of error set 'error{A,C}'#}
const Set1 = error{
A,
B,
diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig
index 0c438069f8..be0fbf7faa 100644
--- a/lib/std/Build/Step/Compile.zig
+++ b/lib/std/Build/Step/Compile.zig
@@ -55,7 +55,6 @@ global_base: ?u64 = null,
zig_lib_dir: ?LazyPath,
exec_cmd_args: ?[]const ?[]const u8,
filter: ?[]const u8,
-test_evented_io: bool = false,
test_runner: ?[]const u8,
test_server_mode: bool,
wasi_exec_model: ?std.builtin.WasiExecModel = null,
@@ -1307,10 +1306,6 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
try zig_args.append(filter);
}
- if (self.test_evented_io) {
- try zig_args.append("--test-evented-io");
- }
-
if (self.test_runner) |test_runner| {
try zig_args.append("--test-runner");
try zig_args.append(b.pathFromRoot(test_runner));
diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig
index 3df3d9ee53..6364ec0ecf 100644
--- a/lib/std/Build/Step/Run.zig
+++ b/lib/std/Build/Step/Run.zig
@@ -1147,19 +1147,14 @@ fn evalZigTest(
test_count = tm_hdr.tests_len;
const names_bytes = body[@sizeOf(TmHdr)..][0 .. test_count * @sizeOf(u32)];
- const async_frame_lens_bytes = body[@sizeOf(TmHdr) + names_bytes.len ..][0 .. test_count * @sizeOf(u32)];
- const expected_panic_msgs_bytes = body[@sizeOf(TmHdr) + names_bytes.len + async_frame_lens_bytes.len ..][0 .. test_count * @sizeOf(u32)];
- const string_bytes = body[@sizeOf(TmHdr) + names_bytes.len + async_frame_lens_bytes.len + expected_panic_msgs_bytes.len ..][0..tm_hdr.string_bytes_len];
+ const expected_panic_msgs_bytes = body[@sizeOf(TmHdr) + names_bytes.len ..][0 .. test_count * @sizeOf(u32)];
+ const string_bytes = body[@sizeOf(TmHdr) + names_bytes.len + expected_panic_msgs_bytes.len ..][0..tm_hdr.string_bytes_len];
const names = std.mem.bytesAsSlice(u32, names_bytes);
- const async_frame_lens = std.mem.bytesAsSlice(u32, async_frame_lens_bytes);
const expected_panic_msgs = std.mem.bytesAsSlice(u32, expected_panic_msgs_bytes);
const names_aligned = try arena.alloc(u32, names.len);
for (names_aligned, names) |*dest, src| dest.* = src;
- const async_frame_lens_aligned = try arena.alloc(u32, async_frame_lens.len);
- for (async_frame_lens_aligned, async_frame_lens) |*dest, src| dest.* = src;
-
const expected_panic_msgs_aligned = try arena.alloc(u32, expected_panic_msgs.len);
for (expected_panic_msgs_aligned, expected_panic_msgs) |*dest, src| dest.* = src;
@@ -1167,7 +1162,6 @@ fn evalZigTest(
metadata = .{
.string_bytes = try arena.dupe(u8, string_bytes),
.names = names_aligned,
- .async_frame_lens = async_frame_lens_aligned,
.expected_panic_msgs = expected_panic_msgs_aligned,
.next_index = 0,
.prog_node = prog_node,
@@ -1237,7 +1231,6 @@ fn evalZigTest(
const TestMetadata = struct {
names: []const u32,
- async_frame_lens: []const u32,
expected_panic_msgs: []const u32,
string_bytes: []const u8,
next_index: u32,
@@ -1253,7 +1246,6 @@ fn requestNextTest(in: fs.File, metadata: *TestMetadata, sub_prog_node: *?std.Pr
const i = metadata.next_index;
metadata.next_index += 1;
- if (metadata.async_frame_lens[i] != 0) continue;
if (metadata.expected_panic_msgs[i] != 0) continue;
const name = metadata.testName(i);
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index a0fbaea7de..70e51799cb 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -738,7 +738,6 @@ pub const CompilerBackend = enum(u64) {
pub const TestFn = struct {
name: []const u8,
func: *const fn () anyerror!void,
- async_frame_size: ?usize,
};
/// This function type is used by the Zig language code generation and
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 8b15b7d63b..e006fc8c45 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -495,7 +495,7 @@ pub const ChildProcess = struct {
}
fn spawnPosix(self: *ChildProcess) SpawnError!void {
- const pipe_flags = if (io.is_async) os.O.NONBLOCK else 0;
+ const pipe_flags = 0;
const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try os.pipe2(pipe_flags) else undefined;
errdefer if (self.stdin_behavior == StdIo.Pipe) {
destroyPipe(stdin_pipe);
@@ -667,7 +667,6 @@ pub const ChildProcess = struct {
.share_access = windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE,
.sa = &saAttr,
.creation = windows.OPEN_EXISTING,
- .io_mode = .blocking,
}) catch |err| switch (err) {
error.PathAlreadyExists => unreachable, // not possible for "NUL"
error.PipeBusy => unreachable, // not possible for "NUL"
@@ -1493,20 +1492,12 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8);
fn writeIntFd(fd: i32, value: ErrInt) !void {
- const file = File{
- .handle = fd,
- .capable_io_mode = .blocking,
- .intended_io_mode = .blocking,
- };
+ const file = File{ .handle = fd };
file.writer().writeInt(u64, @intCast(value), .little) catch return error.SystemResources;
}
fn readIntFd(fd: i32) !ErrInt {
- const file = File{
- .handle = fd,
- .capable_io_mode = .blocking,
- .intended_io_mode = .blocking,
- };
+ const file = File{ .handle = fd };
return @as(ErrInt, @intCast(file.reader().readInt(u64, .little) catch return error.SystemResources));
}
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 7f44cfa770..601a949ecf 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -1141,8 +1141,8 @@ pub fn readElfDebugInfo(
) !ModuleDebugInfo {
nosuspend {
const elf_file = (if (elf_filename) |filename| blk: {
- break :blk fs.cwd().openFile(filename, .{ .intended_io_mode = .blocking });
- } else fs.openSelfExe(.{ .intended_io_mode = .blocking })) catch |err| switch (err) {
+ break :blk fs.cwd().openFile(filename, .{});
+ } else fs.openSelfExe(.{})) catch |err| switch (err) {
error.FileNotFound => return error.MissingDebugInfo,
else => return err,
};
@@ -1452,7 +1452,7 @@ fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugIn
fn printLineFromFileAnyOs(out_stream: anytype, line_info: LineInfo) !void {
// Need this to always block even in async I/O mode, because this could potentially
// be called from e.g. the event loop code crashing.
- var f = try fs.cwd().openFile(line_info.file_name, .{ .intended_io_mode = .blocking });
+ var f = try fs.cwd().openFile(line_info.file_name, .{});
defer f.close();
// TODO fstat and make sure that the file has the correct size
@@ -1640,7 +1640,6 @@ const MachoSymbol = struct {
}
};
-/// `file` is expected to have been opened with .intended_io_mode == .blocking.
/// Takes ownership of file, even on error.
/// TODO it's weird to take ownership even on error, rework this code.
fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 {
@@ -1824,9 +1823,7 @@ pub const DebugInfo = struct {
errdefer self.allocator.destroy(obj_di);
const macho_path = mem.sliceTo(std.c._dyld_get_image_name(i), 0);
- const macho_file = fs.cwd().openFile(macho_path, .{
- .intended_io_mode = .blocking,
- }) catch |err| switch (err) {
+ const macho_file = fs.cwd().openFile(macho_path, .{}) catch |err| switch (err) {
error.FileNotFound => return error.MissingDebugInfo,
else => return err,
};
@@ -2162,7 +2159,7 @@ pub const ModuleDebugInfo = switch (native_os) {
}
fn loadOFile(self: *@This(), allocator: mem.Allocator, o_file_path: []const u8) !*OFileInfo {
- const o_file = try fs.cwd().openFile(o_file_path, .{ .intended_io_mode = .blocking });
+ const o_file = try fs.cwd().openFile(o_file_path, .{});
const mapped_mem = try mapWholeFile(o_file);
const hdr: *const macho.mach_header_64 = @ptrCast(@alignCast(mapped_mem.ptr));
diff --git a/lib/std/event.zig b/lib/std/event.zig
deleted file mode 100644
index b0d61afbd9..0000000000
--- a/lib/std/event.zig
+++ /dev/null
@@ -1,23 +0,0 @@
-pub const Channel = @import("event/channel.zig").Channel;
-pub const Future = @import("event/future.zig").Future;
-pub const Group = @import("event/group.zig").Group;
-pub const Batch = @import("event/batch.zig").Batch;
-pub const Lock = @import("event/lock.zig").Lock;
-pub const Locked = @import("event/locked.zig").Locked;
-pub const RwLock = @import("event/rwlock.zig").RwLock;
-pub const RwLocked = @import("event/rwlocked.zig").RwLocked;
-pub const Loop = @import("event/loop.zig").Loop;
-pub const WaitGroup = @import("event/wait_group.zig").WaitGroup;
-
-test {
- _ = @import("event/channel.zig");
- _ = @import("event/future.zig");
- _ = @import("event/group.zig");
- _ = @import("event/batch.zig");
- _ = @import("event/lock.zig");
- _ = @import("event/locked.zig");
- _ = @import("event/rwlock.zig");
- _ = @import("event/rwlocked.zig");
- _ = @import("event/loop.zig");
- _ = @import("event/wait_group.zig");
-}
diff --git a/lib/std/event/batch.zig b/lib/std/event/batch.zig
deleted file mode 100644
index 9703a2512e..0000000000
--- a/lib/std/event/batch.zig
+++ /dev/null
@@ -1,141 +0,0 @@
-const std = @import("../std.zig");
-const testing = std.testing;
-
-/// Performs multiple async functions in parallel, without heap allocation.
-/// Async function frames are managed externally to this abstraction, and
-/// passed in via the `add` function. Once all the jobs are added, call `wait`.
-/// This API is *not* thread-safe. The object must be accessed from one thread at
-/// a time, however, it need not be the same thread.
-pub fn Batch(
- /// The return value for each job.
- /// If a job slot was re-used due to maxed out concurrency, then its result
- /// value will be overwritten. The values can be accessed with the `results` field.
- comptime Result: type,
- /// How many jobs to run in parallel.
- comptime max_jobs: comptime_int,
- /// Controls whether the `add` and `wait` functions will be async functions.
- comptime async_behavior: enum {
- /// Observe the value of `std.io.is_async` to decide whether `add`
- /// and `wait` will be async functions. Asserts that the jobs do not suspend when
- /// `std.options.io_mode == .blocking`. This is a generally safe assumption, and the
- /// usual recommended option for this parameter.
- auto_async,
-
- /// Always uses the `nosuspend` keyword when using `await` on the jobs,
- /// making `add` and `wait` non-async functions. Asserts that the jobs do not suspend.
- never_async,
-
- /// `add` and `wait` use regular `await` keyword, making them async functions.
- always_async,
- },
-) type {
- return struct {
- jobs: [max_jobs]Job,
- next_job_index: usize,
- collected_result: CollectedResult,
-
- const Job = struct {
- frame: ?anyframe->Result,
- result: Result,
- };
-
- const Self = @This();
-
- const CollectedResult = switch (@typeInfo(Result)) {
- .ErrorUnion => Result,
- else => void,
- };
-
- const async_ok = switch (async_behavior) {
- .auto_async => std.io.is_async,
- .never_async => false,
- .always_async => true,
- };
-
- pub fn init() Self {
- return Self{
- .jobs = [1]Job{
- .{
- .frame = null,
- .result = undefined,
- },
- } ** max_jobs,
- .next_job_index = 0,
- .collected_result = {},
- };
- }
-
- /// Add a frame to the Batch. If all jobs are in-flight, then this function
- /// waits until one completes.
- /// This function is *not* thread-safe. It must be called from one thread at
- /// a time, however, it need not be the same thread.
- /// TODO: "select" language feature to use the next available slot, rather than
- /// awaiting the next index.
- pub fn add(self: *Self, frame: anyframe->Result) void {
- const job = &self.jobs[self.next_job_index];
- self.next_job_index = (self.next_job_index + 1) % max_jobs;
- if (job.frame) |existing| {
- job.result = if (async_ok) await existing else nosuspend await existing;
- if (CollectedResult != void) {
- job.result catch |err| {
- self.collected_result = err;
- };
- }
- }
- job.frame = frame;
- }
-
- /// Wait for all the jobs to complete.
- /// Safe to call any number of times.
- /// If `Result` is an error union, this function returns the last error that occurred, if any.
- /// Unlike the `results` field, the return value of `wait` will report any error that occurred;
- /// hitting max parallelism will not compromise the result.
- /// This function is *not* thread-safe. It must be called from one thread at
- /// a time, however, it need not be the same thread.
- pub fn wait(self: *Self) CollectedResult {
- for (self.jobs) |*job|
- if (job.frame) |f| {
- job.result = if (async_ok) await f else nosuspend await f;
- if (CollectedResult != void) {
- job.result catch |err| {
- self.collected_result = err;
- };
- }
- job.frame = null;
- };
- return self.collected_result;
- }
- };
-}
-
-test "std.event.Batch" {
- if (true) return error.SkipZigTest;
- var count: usize = 0;
- var batch = Batch(void, 2, .auto_async).init();
- batch.add(&async sleepALittle(&count));
- batch.add(&async increaseByTen(&count));
- batch.wait();
- try testing.expect(count == 11);
-
- var another = Batch(anyerror!void, 2, .auto_async).init();
- another.add(&async somethingElse());
- another.add(&async doSomethingThatFails());
- try testing.expectError(error.ItBroke, another.wait());
-}
-
-fn sleepALittle(count: *usize) void {
- std.time.sleep(1 * std.time.ns_per_ms);
- _ = @atomicRmw(usize, count, .Add, 1, .SeqCst);
-}
-
-fn increaseByTen(count: *usize) void {
- var i: usize = 0;
- while (i < 10) : (i += 1) {
- _ = @atomicRmw(usize, count, .Add, 1, .SeqCst);
- }
-}
-
-fn doSomethingThatFails() anyerror!void {}
-fn somethingElse() anyerror!void {
- return error.ItBroke;
-}
diff --git a/lib/std/event/channel.zig b/lib/std/event/channel.zig
deleted file mode 100644
index 3329694da7..0000000000
--- a/lib/std/event/channel.zig
+++ /dev/null
@@ -1,334 +0,0 @@
-const std = @import("../std.zig");
-const builtin = @import("builtin");
-const assert = std.debug.assert;
-const testing = std.testing;
-const Loop = std.event.Loop;
-
-/// Many producer, many consumer, thread-safe, runtime configurable buffer size.
-/// When buffer is empty, consumers suspend and are resumed by producers.
-/// When buffer is full, producers suspend and are resumed by consumers.
-pub fn Channel(comptime T: type) type {
- return struct {
- getters: std.atomic.Queue(GetNode),
- or_null_queue: std.atomic.Queue(*std.atomic.Queue(GetNode).Node),
- putters: std.atomic.Queue(PutNode),
- get_count: usize,
- put_count: usize,
- dispatch_lock: bool,
- need_dispatch: bool,
-
- // simple fixed size ring buffer
- buffer_nodes: []T,
- buffer_index: usize,
- buffer_len: usize,
-
- const SelfChannel = @This();
- const GetNode = struct {
- tick_node: *Loop.NextTickNode,
- data: Data,
-
- const Data = union(enum) {
- Normal: Normal,
- OrNull: OrNull,
- };
-
- const Normal = struct {
- ptr: *T,
- };
-
- const OrNull = struct {
- ptr: *?T,
- or_null: *std.atomic.Queue(*std.atomic.Queue(GetNode).Node).Node,
- };
- };
- const PutNode = struct {
- data: T,
- tick_node: *Loop.NextTickNode,
- };
-
- const global_event_loop = Loop.instance orelse
- @compileError("std.event.Channel currently only works with event-based I/O");
-
- /// Call `deinit` to free resources when done.
- /// `buffer` must live until `deinit` is called.
- /// For a zero length buffer, use `[0]T{}`.
- /// TODO https://github.com/ziglang/zig/issues/2765
- pub fn init(self: *SelfChannel, buffer: []T) void {
- // The ring buffer implementation only works with power of 2 buffer sizes
- // because of relying on subtracting across zero. For example (0 -% 1) % 10 == 5
- assert(buffer.len == 0 or @popCount(buffer.len) == 1);
-
- self.* = SelfChannel{
- .buffer_len = 0,
- .buffer_nodes = buffer,
- .buffer_index = 0,
- .dispatch_lock = false,
- .need_dispatch = false,
- .getters = std.atomic.Queue(GetNode).init(),
- .putters = std.atomic.Queue(PutNode).init(),
- .or_null_queue = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).init(),
- .get_count = 0,
- .put_count = 0,
- };
- }
-
- /// Must be called when all calls to put and get have suspended and no more calls occur.
- /// This can be omitted if caller can guarantee that the suspended putters and getters
- /// do not need to be run to completion. Note that this may leave awaiters hanging.
- pub fn deinit(self: *SelfChannel) void {
- while (self.getters.get()) |get_node| {
- resume get_node.data.tick_node.data;
- }
- while (self.putters.get()) |put_node| {
- resume put_node.data.tick_node.data;
- }
- self.* = undefined;
- }
-
- /// puts a data item in the channel. The function returns when the value has been added to the
- /// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter.
- /// Or when the channel is destroyed.
- pub fn put(self: *SelfChannel, data: T) void {
- var my_tick_node = Loop.NextTickNode{ .data = @frame() };
- var queue_node = std.atomic.Queue(PutNode).Node{
- .data = PutNode{
- .tick_node = &my_tick_node,
- .data = data,
- },
- };
-
- suspend {
- self.putters.put(&queue_node);
- _ = @atomicRmw(usize, &self.put_count, .Add, 1, .SeqCst);
-
- self.dispatch();
- }
- }
-
- /// await this function to get an item from the channel. If the buffer is empty, the frame will
- /// complete when the next item is put in the channel.
- pub fn get(self: *SelfChannel) callconv(.Async) T {
- // TODO https://github.com/ziglang/zig/issues/2765
- var result: T = undefined;
- var my_tick_node = Loop.NextTickNode{ .data = @frame() };
- var queue_node = std.atomic.Queue(GetNode).Node{
- .data = GetNode{
- .tick_node = &my_tick_node,
- .data = GetNode.Data{
- .Normal = GetNode.Normal{ .ptr = &result },
- },
- },
- };
-
- suspend {
- self.getters.put(&queue_node);
- _ = @atomicRmw(usize, &self.get_count, .Add, 1, .SeqCst);
-
- self.dispatch();
- }
- return result;
- }
-
- //pub async fn select(comptime EnumUnion: type, channels: ...) EnumUnion {
- // assert(@memberCount(EnumUnion) == channels.len); // enum union and channels mismatch
- // assert(channels.len != 0); // enum unions cannot have 0 fields
- // if (channels.len == 1) {
- // const result = await (async channels[0].get() catch unreachable);
- // return @unionInit(EnumUnion, @memberName(EnumUnion, 0), result);
- // }
- //}
-
- /// Get an item from the channel. If the buffer is empty and there are no
- /// puts waiting, this returns `null`.
- pub fn getOrNull(self: *SelfChannel) ?T {
- // TODO integrate this function with named return values
- // so we can get rid of this extra result copy
- var result: ?T = null;
- var my_tick_node = Loop.NextTickNode{ .data = @frame() };
- var or_null_node = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).Node{ .data = undefined };
- var queue_node = std.atomic.Queue(GetNode).Node{
- .data = GetNode{
- .tick_node = &my_tick_node,
- .data = GetNode.Data{
- .OrNull = GetNode.OrNull{
- .ptr = &result,
- .or_null = &or_null_node,
- },
- },
- },
- };
- or_null_node.data = &queue_node;
-
- suspend {
- self.getters.put(&queue_node);
- _ = @atomicRmw(usize, &self.get_count, .Add, 1, .SeqCst);
- self.or_null_queue.put(&or_null_node);
-
- self.dispatch();
- }
- return result;
- }
-
- fn dispatch(self: *SelfChannel) void {
- // set the "need dispatch" flag
- @atomicStore(bool, &self.need_dispatch, true, .SeqCst);
-
- lock: while (true) {
- // set the lock flag
- if (@atomicRmw(bool, &self.dispatch_lock, .Xchg, true, .SeqCst)) return;
-
- // clear the need_dispatch flag since we're about to do it
- @atomicStore(bool, &self.need_dispatch, false, .SeqCst);
-
- while (true) {
- one_dispatch: {
- // later we correct these extra subtractions
- var get_count = @atomicRmw(usize, &self.get_count, .Sub, 1, .SeqCst);
- var put_count = @atomicRmw(usize, &self.put_count, .Sub, 1, .SeqCst);
-
- // transfer self.buffer to self.getters
- while (self.buffer_len != 0) {
- if (get_count == 0) break :one_dispatch;
-
- const get_node = &self.getters.get().?.data;
- switch (get_node.data) {
- GetNode.Data.Normal => |info| {
- info.ptr.* = self.buffer_nodes[(self.buffer_index -% self.buffer_len) % self.buffer_nodes.len];
- },
- GetNode.Data.OrNull => |info| {
- _ = self.or_null_queue.remove(info.or_null);
- info.ptr.* = self.buffer_nodes[(self.buffer_index -% self.buffer_len) % self.buffer_nodes.len];
- },
- }
- global_event_loop.onNextTick(get_node.tick_node);
- self.buffer_len -= 1;
-
- get_count = @atomicRmw(usize, &self.get_count, .Sub, 1, .SeqCst);
- }
-
- // direct transfer self.putters to self.getters
- while (get_count != 0 and put_count != 0) {
- const get_node = &self.getters.get().?.data;
- const put_node = &self.putters.get().?.data;
-
- switch (get_node.data) {
- GetNode.Data.Normal => |info| {
- info.ptr.* = put_node.data;
- },
- GetNode.Data.OrNull => |info| {
- _ = self.or_null_queue.remove(info.or_null);
- info.ptr.* = put_node.data;
- },
- }
- global_event_loop.onNextTick(get_node.tick_node);
- global_event_loop.onNextTick(put_node.tick_node);
-
- get_count = @atomicRmw(usize, &self.get_count, .Sub, 1, .SeqCst);
- put_count = @atomicRmw(usize, &self.put_count, .Sub, 1, .SeqCst);
- }
-
- // transfer self.putters to self.buffer
- while (self.buffer_len != self.buffer_nodes.len and put_count != 0) {
- const put_node = &self.putters.get().?.data;
-
- self.buffer_nodes[self.buffer_index % self.buffer_nodes.len] = put_node.data;
- global_event_loop.onNextTick(put_node.tick_node);
- self.buffer_index +%= 1;
- self.buffer_len += 1;
-
- put_count = @atomicRmw(usize, &self.put_count, .Sub, 1, .SeqCst);
- }
- }
-
- // undo the extra subtractions
- _ = @atomicRmw(usize, &self.get_count, .Add, 1, .SeqCst);
- _ = @atomicRmw(usize, &self.put_count, .Add, 1, .SeqCst);
-
- // All the "get or null" functions should resume now.
- var remove_count: usize = 0;
- while (self.or_null_queue.get()) |or_null_node| {
- remove_count += @intFromBool(self.getters.remove(or_null_node.data));
- global_event_loop.onNextTick(or_null_node.data.data.tick_node);
- }
- if (remove_count != 0) {
- _ = @atomicRmw(usize, &self.get_count, .Sub, remove_count, .SeqCst);
- }
-
- // clear need-dispatch flag
- if (@atomicRmw(bool, &self.need_dispatch, .Xchg, false, .SeqCst)) continue;
-
- assert(@atomicRmw(bool, &self.dispatch_lock, .Xchg, false, .SeqCst));
-
- // we have to check again now that we unlocked
- if (@atomicLoad(bool, &self.need_dispatch, .SeqCst)) continue :lock;
-
- return;
- }
- }
- }
- };
-}
-
-test "std.event.Channel" {
- if (!std.io.is_async) return error.SkipZigTest;
-
- // https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded) return error.SkipZigTest;
-
- // https://github.com/ziglang/zig/issues/3251
- if (builtin.os.tag == .freebsd) return error.SkipZigTest;
-
- var channel: Channel(i32) = undefined;
- channel.init(&[0]i32{});
- defer channel.deinit();
-
- var handle = async testChannelGetter(&channel);
- var putter = async testChannelPutter(&channel);
-
- await handle;
- await putter;
-}
-
-test "std.event.Channel wraparound" {
-
- // TODO provide a way to run tests in evented I/O mode
- if (!std.io.is_async) return error.SkipZigTest;
-
- const channel_size = 2;
-
- var buf: [channel_size]i32 = undefined;
- var channel: Channel(i32) = undefined;
- channel.init(&buf);
- defer channel.deinit();
-
- // add items to channel and pull them out until
- // the buffer wraps around, make sure it doesn't crash.
- channel.put(5);
- try testing.expectEqual(@as(i32, 5), channel.get());
- channel.put(6);
- try testing.expectEqual(@as(i32, 6), channel.get());
- channel.put(7);
- try testing.expectEqual(@as(i32, 7), channel.get());
-}
-fn testChannelGetter(channel: *Channel(i32)) callconv(.Async) void {
- const value1 = channel.get();
- try testing.expect(value1 == 1234);
-
- const value2 = channel.get();
- try testing.expect(value2 == 4567);
-
- const value3 = channel.getOrNull();
- try testing.expect(value3 == null);
-
- var last_put = async testPut(channel, 4444);
- const value4 = channel.getOrNull();
- try testing.expect(value4.? == 4444);
- await last_put;
-}
-fn testChannelPutter(channel: *Channel(i32)) callconv(.Async) void {
- channel.put(1234);
- channel.put(4567);
-}
-fn testPut(channel: *Channel(i32), value: i32) callconv(.Async) void {
- channel.put(value);
-}
diff --git a/lib/std/event/future.zig b/lib/std/event/future.zig
deleted file mode 100644
index e38d54537d..0000000000
--- a/lib/std/event/future.zig
+++ /dev/null
@@ -1,115 +0,0 @@
-const std = @import("../std.zig");
-const builtin = @import("builtin");
-const assert = std.debug.assert;
-const testing = std.testing;
-const Lock = std.event.Lock;
-
-/// This is a value that starts out unavailable, until resolve() is called.
-/// While it is unavailable, functions suspend when they try to get() it,
-/// and then are resumed when resolve() is called.
-/// At this point the value remains forever available, and another resolve() is not allowed.
-pub fn Future(comptime T: type) type {
- return struct {
- lock: Lock,
- data: T,
- available: Available,
-
- const Available = enum(u8) {
- NotStarted,
- Started,
- Finished,
- };
-
- const Self = @This();
- const Queue = std.atomic.Queue(anyframe);
-
- pub fn init() Self {
- return Self{
- .lock = Lock.initLocked(),
- .available = .NotStarted,
- .data = undefined,
- };
- }
-
- /// Obtain the value. If it's not available, wait until it becomes
- /// available.
- /// Thread-safe.
- pub fn get(self: *Self) callconv(.Async) *T {
- if (@atomicLoad(Available, &self.available, .SeqCst) == .Finished) {
- return &self.data;
- }
- const held = self.lock.acquire();
- held.release();
-
- return &self.data;
- }
-
- /// Gets the data without waiting for it. If it's available, a pointer is
- /// returned. Otherwise, null is returned.
- pub fn getOrNull(self: *Self) ?*T {
- if (@atomicLoad(Available, &self.available, .SeqCst) == .Finished) {
- return &self.data;
- } else {
- return null;
- }
- }
-
- /// If someone else has started working on the data, wait for them to complete
- /// and return a pointer to the data. Otherwise, return null, and the caller
- /// should start working on the data.
- /// It's not required to call start() before resolve() but it can be useful since
- /// this method is thread-safe.
- pub fn start(self: *Self) callconv(.Async) ?*T {
- const state = @cmpxchgStrong(Available, &self.available, .NotStarted, .Started, .SeqCst, .SeqCst) orelse return null;
- switch (state) {
- .Started => {
- const held = self.lock.acquire();
- held.release();
- return &self.data;
- },
- .Finished => return &self.data,
- else => unreachable,
- }
- }
-
- /// Make the data become available. May be called only once.
- /// Before calling this, modify the `data` property.
- pub fn resolve(self: *Self) void {
- const prev = @atomicRmw(Available, &self.available, .Xchg, .Finished, .SeqCst);
- assert(prev != .Finished); // resolve() called twice
- Lock.Held.release(Lock.Held{ .lock = &self.lock });
- }
- };
-}
-
-test "std.event.Future" {
- // https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded) return error.SkipZigTest;
- // https://github.com/ziglang/zig/issues/3251
- if (builtin.os.tag == .freebsd) return error.SkipZigTest;
- // TODO provide a way to run tests in evented I/O mode
- if (!std.io.is_async) return error.SkipZigTest;
-
- testFuture();
-}
-
-fn testFuture() void {
- var future = Future(i32).init();
-
- var a = async waitOnFuture(&future);
- var b = async waitOnFuture(&future);
- resolveFuture(&future);
-
- const result = (await a) + (await b);
-
- try testing.expect(result == 12);
-}
-
-fn waitOnFuture(future: *Future(i32)) i32 {
- return future.get().*;
-}
-
-fn resolveFuture(future: *Future(i32)) void {
- future.data = 6;
- future.resolve();
-}
diff --git a/lib/std/event/group.zig b/lib/std/event/group.zig
deleted file mode 100644
index 6d513000f4..0000000000
--- a/lib/std/event/group.zig
+++ /dev/null
@@ -1,160 +0,0 @@
-const std = @import("../std.zig");
-const builtin = @import("builtin");
-const Lock = std.event.Lock;
-const testing = std.testing;
-const Allocator = std.mem.Allocator;
-
-/// ReturnType must be `void` or `E!void`
-/// TODO This API was created back with the old design of async/await, when calling any
-/// async function required an allocator. There is an ongoing experiment to transition
-/// all uses of this API to the simpler and more resource-aware `std.event.Batch` API.
-/// If the transition goes well, all usages of `Group` will be gone, and this API
-/// will be deleted.
-pub fn Group(comptime ReturnType: type) type {
- return struct {
- frame_stack: Stack,
- alloc_stack: AllocStack,
- lock: Lock,
- allocator: Allocator,
-
- const Self = @This();
-
- const Error = switch (@typeInfo(ReturnType)) {
- .ErrorUnion => |payload| payload.error_set,
- else => void,
- };
- const Stack = std.atomic.Stack(anyframe->ReturnType);
- const AllocStack = std.atomic.Stack(Node);
-
- pub const Node = struct {
- bytes: []const u8 = &[0]u8{},
- handle: anyframe->ReturnType,
- };
-
- pub fn init(allocator: Allocator) Self {
- return Self{
- .frame_stack = Stack.init(),
- .alloc_stack = AllocStack.init(),
- .lock = .{},
- .allocator = allocator,
- };
- }
-
- /// Add a frame to the group. Thread-safe.
- pub fn add(self: *Self, handle: anyframe->ReturnType) (error{OutOfMemory}!void) {
- const node = try self.allocator.create(AllocStack.Node);
- node.* = AllocStack.Node{
- .next = undefined,
- .data = Node{
- .handle = handle,
- },
- };
- self.alloc_stack.push(node);
- }
-
- /// Add a node to the group. Thread-safe. Cannot fail.
- /// `node.data` should be the frame handle to add to the group.
- /// The node's memory should be in the function frame of
- /// the handle that is in the node, or somewhere guaranteed to live
- /// at least as long.
- pub fn addNode(self: *Self, node: *Stack.Node) void {
- self.frame_stack.push(node);
- }
-
- /// This is equivalent to adding a frame to the group but the memory of its frame is
- /// allocated by the group and freed by `wait`.
- /// `func` must be async and have return type `ReturnType`.
- /// Thread-safe.
- pub fn call(self: *Self, comptime func: anytype, args: anytype) error{OutOfMemory}!void {
- const frame = try self.allocator.create(@TypeOf(@call(.{ .modifier = .async_kw }, func, args)));
- errdefer self.allocator.destroy(frame);
- const node = try self.allocator.create(AllocStack.Node);
- errdefer self.allocator.destroy(node);
- node.* = AllocStack.Node{
- .next = undefined,
- .data = Node{
- .handle = frame,
- .bytes = std.mem.asBytes(frame),
- },
- };
- frame.* = @call(.{ .modifier = .async_kw }, func, args);
- self.alloc_stack.push(node);
- }
-
- /// Wait for all the calls and promises of the group to complete.
- /// Thread-safe.
- /// Safe to call any number of times.
- pub fn wait(self: *Self) callconv(.Async) ReturnType {
- const held = self.lock.acquire();
- defer held.release();
-
- var result: ReturnType = {};
-
- while (self.frame_stack.pop()) |node| {
- if (Error == void) {
- await node.data;
- } else {
- (await node.data) catch |err| {
- result = err;
- };
- }
- }
- while (self.alloc_stack.pop()) |node| {
- const handle = node.data.handle;
- if (Error == void) {
- await handle;
- } else {
- (await handle) catch |err| {
- result = err;
- };
- }
- self.allocator.free(node.data.bytes);
- self.allocator.destroy(node);
- }
- return result;
- }
- };
-}
-
-test "std.event.Group" {
- // https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded) return error.SkipZigTest;
-
- if (!std.io.is_async) return error.SkipZigTest;
-
- // TODO this file has bit-rotted. repair it
- if (true) return error.SkipZigTest;
-
- _ = async testGroup(std.heap.page_allocator);
-}
-fn testGroup(allocator: Allocator) callconv(.Async) void {
- var count: usize = 0;
- var group = Group(void).init(allocator);
- var sleep_a_little_frame = async sleepALittle(&count);
- group.add(&sleep_a_little_frame) catch @panic("memory");
- var increase_by_ten_frame = async increaseByTen(&count);
- group.add(&increase_by_ten_frame) catch @panic("memory");
- group.wait();
- try testing.expect(count == 11);
-
- var another = Group(anyerror!void).init(allocator);
- var something_else_frame = async somethingElse();
- another.add(&something_else_frame) catch @panic("memory");
- var something_that_fails_frame = async doSomethingThatFails();
- another.add(&something_that_fails_frame) catch @panic("memory");
- try testing.expectError(error.ItBroke, another.wait());
-}
-fn sleepALittle(count: *usize) callconv(.Async) void {
- std.time.sleep(1 * std.time.ns_per_ms);
- _ = @atomicRmw(usize, count, .Add, 1, .SeqCst);
-}
-fn increaseByTen(count: *usize) callconv(.Async) void {
- var i: usize = 0;
- while (i < 10) : (i += 1) {
- _ = @atomicRmw(usize, count, .Add, 1, .SeqCst);
- }
-}
-fn doSomethingThatFails() callconv(.Async) anyerror!void {}
-fn somethingElse() callconv(.Async) anyerror!void {
- return error.ItBroke;
-}
diff --git a/lib/std/event/lock.zig b/lib/std/event/lock.zig
deleted file mode 100644
index 8608298c29..0000000000
--- a/lib/std/event/lock.zig
+++ /dev/null
@@ -1,162 +0,0 @@
-const std = @import("../std.zig");
-const builtin = @import("builtin");
-const assert = std.debug.assert;
-const testing = std.testing;
-const mem = std.mem;
-const Loop = std.event.Loop;
-
-/// Thread-safe async/await lock.
-/// Functions which are waiting for the lock are suspended, and
-/// are resumed when the lock is released, in order.
-/// Allows only one actor to hold the lock.
-/// TODO: make this API also work in blocking I/O mode.
-pub const Lock = struct {
- mutex: std.Thread.Mutex = std.Thread.Mutex{},
- head: usize = UNLOCKED,
-
- const UNLOCKED = 0;
- const LOCKED = 1;
-
- const global_event_loop = Loop.instance orelse
- @compileError("std.event.Lock currently only works with event-based I/O");
-
- const Waiter = struct {
- // forced Waiter alignment to ensure it doesn't clash with LOCKED
- next: ?*Waiter align(2),
- tail: *Waiter,
- node: Loop.NextTickNode,
- };
-
- pub fn initLocked() Lock {
- return Lock{ .head = LOCKED };
- }
-
- pub fn acquire(self: *Lock) Held {
- self.mutex.lock();
-
- // self.head transitions from multiple stages depending on the value:
- // UNLOCKED -> LOCKED:
- // acquire Lock ownership when there are no waiters
- // LOCKED -> :
- // Lock is already owned, enqueue first Waiter
- // -> :
- // Lock is owned with pending waiters. Push our waiter to the queue.
-
- if (self.head == UNLOCKED) {
- self.head = LOCKED;
- self.mutex.unlock();
- return Held{ .lock = self };
- }
-
- var waiter: Waiter = undefined;
- waiter.next = null;
- waiter.tail = &waiter;
-
- const head = switch (self.head) {
- UNLOCKED => unreachable,
- LOCKED => null,
- else => @as(*Waiter, @ptrFromInt(self.head)),
- };
-
- if (head) |h| {
- h.tail.next = &waiter;
- h.tail = &waiter;
- } else {
- self.head = @intFromPtr(&waiter);
- }
-
- suspend {
- waiter.node = Loop.NextTickNode{
- .prev = undefined,
- .next = undefined,
- .data = @frame(),
- };
- self.mutex.unlock();
- }
-
- return Held{ .lock = self };
- }
-
- pub const Held = struct {
- lock: *Lock,
-
- pub fn release(self: Held) void {
- const waiter = blk: {
- self.lock.mutex.lock();
- defer self.lock.mutex.unlock();
-
- // self.head goes through the reverse transition from acquire():
- // -> :
- // pop a waiter from the queue to give Lock ownership when there are still others pending
- // -> LOCKED:
- // pop the laster waiter from the queue, while also giving it lock ownership when awaken
- // LOCKED -> UNLOCKED:
- // last lock owner releases lock while no one else is waiting for it
-
- switch (self.lock.head) {
- UNLOCKED => {
- unreachable; // Lock unlocked while unlocking
- },
- LOCKED => {
- self.lock.head = UNLOCKED;
- break :blk null;
- },
- else => {
- const waiter = @as(*Waiter, @ptrFromInt(self.lock.head));
- self.lock.head = if (waiter.next == null) LOCKED else @intFromPtr(waiter.next);
- if (waiter.next) |next|
- next.tail = waiter.tail;
- break :blk waiter;
- },
- }
- };
-
- if (waiter) |w| {
- global_event_loop.onNextTick(&w.node);
- }
- }
- };
-};
-
-test "std.event.Lock" {
- if (!std.io.is_async) return error.SkipZigTest;
-
- // TODO https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded) return error.SkipZigTest;
-
- // TODO https://github.com/ziglang/zig/issues/3251
- if (builtin.os.tag == .freebsd) return error.SkipZigTest;
-
- var lock = Lock{};
- testLock(&lock);
-
- const expected_result = [1]i32{3 * @as(i32, @intCast(shared_test_data.len))} ** shared_test_data.len;
- try testing.expectEqualSlices(i32, &expected_result, &shared_test_data);
-}
-fn testLock(lock: *Lock) void {
- var handle1 = async lockRunner(lock);
- var handle2 = async lockRunner(lock);
- var handle3 = async lockRunner(lock);
-
- await handle1;
- await handle2;
- await handle3;
-}
-
-var shared_test_data = [1]i32{0} ** 10;
-var shared_test_index: usize = 0;
-
-fn lockRunner(lock: *Lock) void {
- Lock.global_event_loop.yield();
-
- var i: usize = 0;
- while (i < shared_test_data.len) : (i += 1) {
- const handle = lock.acquire();
- defer handle.release();
-
- shared_test_index = 0;
- while (shared_test_index < shared_test_data.len) : (shared_test_index += 1) {
- shared_test_data[shared_test_index] = shared_test_data[shared_test_index] + 1;
- }
- }
-}
diff --git a/lib/std/event/locked.zig b/lib/std/event/locked.zig
deleted file mode 100644
index 66495c3772..0000000000
--- a/lib/std/event/locked.zig
+++ /dev/null
@@ -1,42 +0,0 @@
-const std = @import("../std.zig");
-const Lock = std.event.Lock;
-
-/// Thread-safe async/await lock that protects one piece of data.
-/// Functions which are waiting for the lock are suspended, and
-/// are resumed when the lock is released, in order.
-pub fn Locked(comptime T: type) type {
- return struct {
- lock: Lock,
- private_data: T,
-
- const Self = @This();
-
- pub const HeldLock = struct {
- value: *T,
- held: Lock.Held,
-
- pub fn release(self: HeldLock) void {
- self.held.release();
- }
- };
-
- pub fn init(data: T) Self {
- return Self{
- .lock = .{},
- .private_data = data,
- };
- }
-
- pub fn deinit(self: *Self) void {
- self.lock.deinit();
- }
-
- pub fn acquire(self: *Self) callconv(.Async) HeldLock {
- return HeldLock{
- // TODO guaranteed allocation elision
- .held = self.lock.acquire(),
- .value = &self.private_data,
- };
- }
- };
-}
diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig
deleted file mode 100644
index d7b75a6672..0000000000
--- a/lib/std/event/loop.zig
+++ /dev/null
@@ -1,1791 +0,0 @@
-const std = @import("../std.zig");
-const builtin = @import("builtin");
-const assert = std.debug.assert;
-const testing = std.testing;
-const mem = std.mem;
-const os = std.os;
-const windows = os.windows;
-const maxInt = std.math.maxInt;
-const Thread = std.Thread;
-
-const is_windows = builtin.os.tag == .windows;
-
-pub const Loop = struct {
- next_tick_queue: std.atomic.Queue(anyframe),
- os_data: OsData,
- final_resume_node: ResumeNode,
- pending_event_count: usize,
- extra_threads: []Thread,
- /// TODO change this to a pool of configurable number of threads
- /// and rename it to be not file-system-specific. it will become
- /// a thread pool for turning non-CPU-bound blocking things into
- /// async things. A fallback for any missing OS-specific API.
- fs_thread: Thread,
- fs_queue: std.atomic.Queue(Request),
- fs_end_request: Request.Node,
- fs_thread_wakeup: std.Thread.ResetEvent,
-
- /// For resources that have the same lifetime as the `Loop`.
- /// This is only used by `Loop` for the thread pool and associated resources.
- arena: std.heap.ArenaAllocator,
-
- /// State which manages frames that are sleeping on timers
- delay_queue: DelayQueue,
-
- /// Pre-allocated eventfds. All permanently active.
- /// This is how `Loop` sends promises to be resumed on other threads.
- available_eventfd_resume_nodes: std.atomic.Stack(ResumeNode.EventFd),
- eventfd_resume_nodes: []std.atomic.Stack(ResumeNode.EventFd).Node,
-
- pub const NextTickNode = std.atomic.Queue(anyframe).Node;
-
- pub const ResumeNode = struct {
- id: Id,
- handle: anyframe,
- overlapped: Overlapped,
-
- pub const overlapped_init = switch (builtin.os.tag) {
- .windows => windows.OVERLAPPED{
- .Internal = 0,
- .InternalHigh = 0,
- .DUMMYUNIONNAME = .{
- .DUMMYSTRUCTNAME = .{
- .Offset = 0,
- .OffsetHigh = 0,
- },
- },
- .hEvent = null,
- },
- else => {},
- };
- pub const Overlapped = @TypeOf(overlapped_init);
-
- pub const Id = enum {
- basic,
- stop,
- event_fd,
- };
-
- pub const EventFd = switch (builtin.os.tag) {
- .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => KEventFd,
- .linux => struct {
- base: ResumeNode,
- epoll_op: u32,
- eventfd: i32,
- },
- .windows => struct {
- base: ResumeNode,
- completion_key: usize,
- },
- else => struct {},
- };
-
- const KEventFd = struct {
- base: ResumeNode,
- kevent: os.Kevent,
- };
-
- pub const Basic = switch (builtin.os.tag) {
- .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => KEventBasic,
- .linux => struct {
- base: ResumeNode,
- },
- .windows => struct {
- base: ResumeNode,
- },
- else => @compileError("unsupported OS"),
- };
-
- const KEventBasic = struct {
- base: ResumeNode,
- kev: os.Kevent,
- };
- };
-
- pub const Instance = switch (std.options.io_mode) {
- .blocking => @TypeOf(null),
- .evented => ?*Loop,
- };
- pub const instance = std.options.event_loop;
-
- var global_instance_state: Loop = undefined;
- pub const default_instance = switch (std.options.io_mode) {
- .blocking => null,
- .evented => &global_instance_state,
- };
-
- pub const Mode = enum {
- single_threaded,
- multi_threaded,
- };
- pub const default_mode = .multi_threaded;
-
- /// TODO copy elision / named return values so that the threads referencing *Loop
- /// have the correct pointer value.
- /// https://github.com/ziglang/zig/issues/2761 and https://github.com/ziglang/zig/issues/2765
- pub fn init(self: *Loop) !void {
- if (builtin.single_threaded or std.options.event_loop_mode == .single_threaded) {
- return self.initSingleThreaded();
- } else {
- return self.initMultiThreaded();
- }
- }
-
- /// After initialization, call run().
- /// TODO copy elision / named return values so that the threads referencing *Loop
- /// have the correct pointer value.
- /// https://github.com/ziglang/zig/issues/2761 and https://github.com/ziglang/zig/issues/2765
- pub fn initSingleThreaded(self: *Loop) !void {
- return self.initThreadPool(1);
- }
-
- /// After initialization, call run().
- /// This is the same as `initThreadPool` using `Thread.getCpuCount` to determine the thread
- /// pool size.
- /// TODO copy elision / named return values so that the threads referencing *Loop
- /// have the correct pointer value.
- /// https://github.com/ziglang/zig/issues/2761 and https://github.com/ziglang/zig/issues/2765
- pub fn initMultiThreaded(self: *Loop) !void {
- if (builtin.single_threaded)
- @compileError("initMultiThreaded unavailable when building in single-threaded mode");
- const core_count = try Thread.getCpuCount();
- return self.initThreadPool(core_count);
- }
-
- /// Thread count is the total thread count. The thread pool size will be
- /// max(thread_count - 1, 0)
- pub fn initThreadPool(self: *Loop, thread_count: usize) !void {
- self.* = Loop{
- .arena = std.heap.ArenaAllocator.init(std.heap.page_allocator),
- .pending_event_count = 1,
- .os_data = undefined,
- .next_tick_queue = std.atomic.Queue(anyframe).init(),
- .extra_threads = undefined,
- .available_eventfd_resume_nodes = std.atomic.Stack(ResumeNode.EventFd).init(),
- .eventfd_resume_nodes = undefined,
- .final_resume_node = ResumeNode{
- .id = .stop,
- .handle = undefined,
- .overlapped = ResumeNode.overlapped_init,
- },
- .fs_end_request = .{ .data = .{ .msg = .end, .finish = .no_action } },
- .fs_queue = std.atomic.Queue(Request).init(),
- .fs_thread = undefined,
- .fs_thread_wakeup = .{},
- .delay_queue = undefined,
- };
- errdefer self.arena.deinit();
-
- // We need at least one of these in case the fs thread wants to use onNextTick
- const extra_thread_count = thread_count - 1;
- const resume_node_count = @max(extra_thread_count, 1);
- self.eventfd_resume_nodes = try self.arena.allocator().alloc(
- std.atomic.Stack(ResumeNode.EventFd).Node,
- resume_node_count,
- );
-
- self.extra_threads = try self.arena.allocator().alloc(Thread, extra_thread_count);
-
- try self.initOsData(extra_thread_count);
- errdefer self.deinitOsData();
-
- if (!builtin.single_threaded) {
- self.fs_thread = try Thread.spawn(.{}, posixFsRun, .{self});
- }
- errdefer if (!builtin.single_threaded) {
- self.posixFsRequest(&self.fs_end_request);
- self.fs_thread.join();
- };
-
- if (!builtin.single_threaded)
- try self.delay_queue.init();
- }
-
- pub fn deinit(self: *Loop) void {
- self.deinitOsData();
- self.arena.deinit();
- self.* = undefined;
- }
-
- const InitOsDataError = os.EpollCreateError || mem.Allocator.Error || os.EventFdError ||
- Thread.SpawnError || os.EpollCtlError || os.KEventError ||
- windows.CreateIoCompletionPortError;
-
- const wakeup_bytes = [_]u8{0x1} ** 8;
-
- fn initOsData(self: *Loop, extra_thread_count: usize) InitOsDataError!void {
- nosuspend switch (builtin.os.tag) {
- .linux => {
- errdefer {
- while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
- }
- for (self.eventfd_resume_nodes) |*eventfd_node| {
- eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
- .data = ResumeNode.EventFd{
- .base = ResumeNode{
- .id = .event_fd,
- .handle = undefined,
- .overlapped = ResumeNode.overlapped_init,
- },
- .eventfd = try os.eventfd(1, os.linux.EFD.CLOEXEC | os.linux.EFD.NONBLOCK),
- .epoll_op = os.linux.EPOLL.CTL_ADD,
- },
- .next = undefined,
- };
- self.available_eventfd_resume_nodes.push(eventfd_node);
- }
-
- self.os_data.epollfd = try os.epoll_create1(os.linux.EPOLL.CLOEXEC);
- errdefer os.close(self.os_data.epollfd);
-
- self.os_data.final_eventfd = try os.eventfd(0, os.linux.EFD.CLOEXEC | os.linux.EFD.NONBLOCK);
- errdefer os.close(self.os_data.final_eventfd);
-
- self.os_data.final_eventfd_event = os.linux.epoll_event{
- .events = os.linux.EPOLL.IN,
- .data = os.linux.epoll_data{ .ptr = @intFromPtr(&self.final_resume_node) },
- };
- try os.epoll_ctl(
- self.os_data.epollfd,
- os.linux.EPOLL.CTL_ADD,
- self.os_data.final_eventfd,
- &self.os_data.final_eventfd_event,
- );
-
- if (builtin.single_threaded) {
- assert(extra_thread_count == 0);
- return;
- }
-
- var extra_thread_index: usize = 0;
- errdefer {
- // writing 8 bytes to an eventfd cannot fail
- const amt = os.write(self.os_data.final_eventfd, &wakeup_bytes) catch unreachable;
- assert(amt == wakeup_bytes.len);
- while (extra_thread_index != 0) {
- extra_thread_index -= 1;
- self.extra_threads[extra_thread_index].join();
- }
- }
- while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
- self.extra_threads[extra_thread_index] = try Thread.spawn(.{}, workerRun, .{self});
- }
- },
- .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly => {
- self.os_data.kqfd = try os.kqueue();
- errdefer os.close(self.os_data.kqfd);
-
- const empty_kevs = &[0]os.Kevent{};
-
- for (self.eventfd_resume_nodes, 0..) |*eventfd_node, i| {
- eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
- .data = ResumeNode.EventFd{
- .base = ResumeNode{
- .id = .event_fd,
- .handle = undefined,
- .overlapped = ResumeNode.overlapped_init,
- },
- // this one is for sending events
- .kevent = os.Kevent{
- .ident = i,
- .filter = os.system.EVFILT_USER,
- .flags = os.system.EV_CLEAR | os.system.EV_ADD | os.system.EV_DISABLE,
- .fflags = 0,
- .data = 0,
- .udata = @intFromPtr(&eventfd_node.data.base),
- },
- },
- .next = undefined,
- };
- self.available_eventfd_resume_nodes.push(eventfd_node);
- const kevent_array = @as(*const [1]os.Kevent, &eventfd_node.data.kevent);
- _ = try os.kevent(self.os_data.kqfd, kevent_array, empty_kevs, null);
- eventfd_node.data.kevent.flags = os.system.EV_CLEAR | os.system.EV_ENABLE;
- eventfd_node.data.kevent.fflags = os.system.NOTE_TRIGGER;
- }
-
- // Pre-add so that we cannot get error.SystemResources
- // later when we try to activate it.
- self.os_data.final_kevent = os.Kevent{
- .ident = extra_thread_count,
- .filter = os.system.EVFILT_USER,
- .flags = os.system.EV_ADD | os.system.EV_DISABLE,
- .fflags = 0,
- .data = 0,
- .udata = @intFromPtr(&self.final_resume_node),
- };
- const final_kev_arr = @as(*const [1]os.Kevent, &self.os_data.final_kevent);
- _ = try os.kevent(self.os_data.kqfd, final_kev_arr, empty_kevs, null);
- self.os_data.final_kevent.flags = os.system.EV_ENABLE;
- self.os_data.final_kevent.fflags = os.system.NOTE_TRIGGER;
-
- if (builtin.single_threaded) {
- assert(extra_thread_count == 0);
- return;
- }
-
- var extra_thread_index: usize = 0;
- errdefer {
- _ = os.kevent(self.os_data.kqfd, final_kev_arr, empty_kevs, null) catch unreachable;
- while (extra_thread_index != 0) {
- extra_thread_index -= 1;
- self.extra_threads[extra_thread_index].join();
- }
- }
- while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
- self.extra_threads[extra_thread_index] = try Thread.spawn(.{}, workerRun, .{self});
- }
- },
- .openbsd => {
- self.os_data.kqfd = try os.kqueue();
- errdefer os.close(self.os_data.kqfd);
-
- const empty_kevs = &[0]os.Kevent{};
-
- for (self.eventfd_resume_nodes, 0..) |*eventfd_node, i| {
- eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
- .data = ResumeNode.EventFd{
- .base = ResumeNode{
- .id = .event_fd,
- .handle = undefined,
- .overlapped = ResumeNode.overlapped_init,
- },
- // this one is for sending events
- .kevent = os.Kevent{
- .ident = i,
- .filter = os.system.EVFILT_TIMER,
- .flags = os.system.EV_CLEAR | os.system.EV_ADD | os.system.EV_DISABLE | os.system.EV_ONESHOT,
- .fflags = 0,
- .data = 0,
- .udata = @intFromPtr(&eventfd_node.data.base),
- },
- },
- .next = undefined,
- };
- self.available_eventfd_resume_nodes.push(eventfd_node);
- const kevent_array = @as(*const [1]os.Kevent, &eventfd_node.data.kevent);
- _ = try os.kevent(self.os_data.kqfd, kevent_array, empty_kevs, null);
- eventfd_node.data.kevent.flags = os.system.EV_CLEAR | os.system.EV_ENABLE;
- }
-
- // Pre-add so that we cannot get error.SystemResources
- // later when we try to activate it.
- self.os_data.final_kevent = os.Kevent{
- .ident = extra_thread_count,
- .filter = os.system.EVFILT_TIMER,
- .flags = os.system.EV_ADD | os.system.EV_ONESHOT | os.system.EV_DISABLE,
- .fflags = 0,
- .data = 0,
- .udata = @intFromPtr(&self.final_resume_node),
- };
- const final_kev_arr = @as(*const [1]os.Kevent, &self.os_data.final_kevent);
- _ = try os.kevent(self.os_data.kqfd, final_kev_arr, empty_kevs, null);
- self.os_data.final_kevent.flags = os.system.EV_ENABLE;
-
- if (builtin.single_threaded) {
- assert(extra_thread_count == 0);
- return;
- }
-
- var extra_thread_index: usize = 0;
- errdefer {
- _ = os.kevent(self.os_data.kqfd, final_kev_arr, empty_kevs, null) catch unreachable;
- while (extra_thread_index != 0) {
- extra_thread_index -= 1;
- self.extra_threads[extra_thread_index].join();
- }
- }
- while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
- self.extra_threads[extra_thread_index] = try Thread.spawn(.{}, workerRun, .{self});
- }
- },
- .windows => {
- self.os_data.io_port = try windows.CreateIoCompletionPort(
- windows.INVALID_HANDLE_VALUE,
- null,
- undefined,
- maxInt(windows.DWORD),
- );
- errdefer windows.CloseHandle(self.os_data.io_port);
-
- for (self.eventfd_resume_nodes) |*eventfd_node| {
- eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
- .data = ResumeNode.EventFd{
- .base = ResumeNode{
- .id = .event_fd,
- .handle = undefined,
- .overlapped = ResumeNode.overlapped_init,
- },
- // this one is for sending events
- .completion_key = @intFromPtr(&eventfd_node.data.base),
- },
- .next = undefined,
- };
- self.available_eventfd_resume_nodes.push(eventfd_node);
- }
-
- if (builtin.single_threaded) {
- assert(extra_thread_count == 0);
- return;
- }
-
- var extra_thread_index: usize = 0;
- errdefer {
- var i: usize = 0;
- while (i < extra_thread_index) : (i += 1) {
- while (true) {
- const overlapped = &self.final_resume_node.overlapped;
- windows.PostQueuedCompletionStatus(self.os_data.io_port, undefined, undefined, overlapped) catch continue;
- break;
- }
- }
- while (extra_thread_index != 0) {
- extra_thread_index -= 1;
- self.extra_threads[extra_thread_index].join();
- }
- }
- while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
- self.extra_threads[extra_thread_index] = try Thread.spawn(.{}, workerRun, .{self});
- }
- },
- else => {},
- };
- }
-
- fn deinitOsData(self: *Loop) void {
- nosuspend switch (builtin.os.tag) {
- .linux => {
- os.close(self.os_data.final_eventfd);
- while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
- os.close(self.os_data.epollfd);
- },
- .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
- os.close(self.os_data.kqfd);
- },
- .windows => {
- windows.CloseHandle(self.os_data.io_port);
- },
- else => {},
- };
- }
-
- /// resume_node must live longer than the anyframe that it holds a reference to.
- /// flags must contain EPOLLET
- pub fn linuxAddFd(self: *Loop, fd: i32, resume_node: *ResumeNode, flags: u32) !void {
- assert(flags & os.linux.EPOLL.ET == os.linux.EPOLL.ET);
- self.beginOneEvent();
- errdefer self.finishOneEvent();
- try self.linuxModFd(
- fd,
- os.linux.EPOLL.CTL_ADD,
- flags,
- resume_node,
- );
- }
-
- pub fn linuxModFd(self: *Loop, fd: i32, op: u32, flags: u32, resume_node: *ResumeNode) !void {
- assert(flags & os.linux.EPOLL.ET == os.linux.EPOLL.ET);
- var ev = os.linux.epoll_event{
- .events = flags,
- .data = os.linux.epoll_data{ .ptr = @intFromPtr(resume_node) },
- };
- try os.epoll_ctl(self.os_data.epollfd, op, fd, &ev);
- }
-
- pub fn linuxRemoveFd(self: *Loop, fd: i32) void {
- os.epoll_ctl(self.os_data.epollfd, os.linux.EPOLL.CTL_DEL, fd, null) catch {};
- self.finishOneEvent();
- }
-
- pub fn linuxWaitFd(self: *Loop, fd: i32, flags: u32) void {
- assert(flags & os.linux.EPOLL.ET == os.linux.EPOLL.ET);
- assert(flags & os.linux.EPOLL.ONESHOT == os.linux.EPOLL.ONESHOT);
- var resume_node = ResumeNode.Basic{
- .base = ResumeNode{
- .id = .basic,
- .handle = @frame(),
- .overlapped = ResumeNode.overlapped_init,
- },
- };
- var need_to_delete = true;
- defer if (need_to_delete) self.linuxRemoveFd(fd);
-
- suspend {
- self.linuxAddFd(fd, &resume_node.base, flags) catch |err| switch (err) {
- error.FileDescriptorNotRegistered => unreachable,
- error.OperationCausesCircularLoop => unreachable,
- error.FileDescriptorIncompatibleWithEpoll => unreachable,
- error.FileDescriptorAlreadyPresentInSet => unreachable, // evented writes to the same fd is not thread-safe
-
- error.SystemResources,
- error.UserResourceLimitReached,
- error.Unexpected,
- => {
- need_to_delete = false;
- // Fall back to a blocking poll(). Ideally this codepath is never hit, since
- // epoll should be just fine. But this is better than incorrect behavior.
- var poll_flags: i16 = 0;
- if ((flags & os.linux.EPOLL.IN) != 0) poll_flags |= os.POLL.IN;
- if ((flags & os.linux.EPOLL.OUT) != 0) poll_flags |= os.POLL.OUT;
- var pfd = [1]os.pollfd{os.pollfd{
- .fd = fd,
- .events = poll_flags,
- .revents = undefined,
- }};
- _ = os.poll(&pfd, -1) catch |poll_err| switch (poll_err) {
- error.NetworkSubsystemFailed => unreachable, // only possible on windows
-
- error.SystemResources,
- error.Unexpected,
- => {
- // Even poll() didn't work. The best we can do now is sleep for a
- // small duration and then hope that something changed.
- std.time.sleep(1 * std.time.ns_per_ms);
- },
- };
- resume @frame();
- },
- };
- }
- }
-
- pub fn waitUntilFdReadable(self: *Loop, fd: os.fd_t) void {
- switch (builtin.os.tag) {
- .linux => {
- self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.IN);
- },
- .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
- self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_READ, os.system.EV_ONESHOT);
- },
- else => @compileError("Unsupported OS"),
- }
- }
-
- pub fn waitUntilFdWritable(self: *Loop, fd: os.fd_t) void {
- switch (builtin.os.tag) {
- .linux => {
- self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.OUT);
- },
- .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
- self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_WRITE, os.system.EV_ONESHOT);
- },
- else => @compileError("Unsupported OS"),
- }
- }
-
- pub fn waitUntilFdWritableOrReadable(self: *Loop, fd: os.fd_t) void {
- switch (builtin.os.tag) {
- .linux => {
- self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.OUT | os.linux.EPOLL.IN);
- },
- .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
- self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_READ, os.system.EV_ONESHOT);
- self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_WRITE, os.system.EV_ONESHOT);
- },
- else => @compileError("Unsupported OS"),
- }
- }
-
- pub fn bsdWaitKev(self: *Loop, ident: usize, filter: i16, flags: u16) void {
- var resume_node = ResumeNode.Basic{
- .base = ResumeNode{
- .id = .basic,
- .handle = @frame(),
- .overlapped = ResumeNode.overlapped_init,
- },
- .kev = undefined,
- };
-
- defer {
- // If the kevent was set to be ONESHOT, it doesn't need to be deleted manually.
- if (flags & os.system.EV_ONESHOT != 0) {
- self.bsdRemoveKev(ident, filter);
- }
- }
-
- suspend {
- self.bsdAddKev(&resume_node, ident, filter, flags) catch unreachable;
- }
- }
-
- /// resume_node must live longer than the anyframe that it holds a reference to.
- pub fn bsdAddKev(self: *Loop, resume_node: *ResumeNode.Basic, ident: usize, filter: i16, flags: u16) !void {
- self.beginOneEvent();
- errdefer self.finishOneEvent();
- var kev = [1]os.Kevent{os.Kevent{
- .ident = ident,
- .filter = filter,
- .flags = os.system.EV_ADD | os.system.EV_ENABLE | os.system.EV_CLEAR | flags,
- .fflags = 0,
- .data = 0,
- .udata = @intFromPtr(&resume_node.base),
- }};
- const empty_kevs = &[0]os.Kevent{};
- _ = try os.kevent(self.os_data.kqfd, &kev, empty_kevs, null);
- }
-
- pub fn bsdRemoveKev(self: *Loop, ident: usize, filter: i16) void {
- var kev = [1]os.Kevent{os.Kevent{
- .ident = ident,
- .filter = filter,
- .flags = os.system.EV_DELETE,
- .fflags = 0,
- .data = 0,
- .udata = 0,
- }};
- const empty_kevs = &[0]os.Kevent{};
- _ = os.kevent(self.os_data.kqfd, &kev, empty_kevs, null) catch undefined;
- self.finishOneEvent();
- }
-
- fn dispatch(self: *Loop) void {
- while (self.available_eventfd_resume_nodes.pop()) |resume_stack_node| {
- const next_tick_node = self.next_tick_queue.get() orelse {
- self.available_eventfd_resume_nodes.push(resume_stack_node);
- return;
- };
- const eventfd_node = &resume_stack_node.data;
- eventfd_node.base.handle = next_tick_node.data;
- switch (builtin.os.tag) {
- .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
- const kevent_array = @as(*const [1]os.Kevent, &eventfd_node.kevent);
- const empty_kevs = &[0]os.Kevent{};
- _ = os.kevent(self.os_data.kqfd, kevent_array, empty_kevs, null) catch {
- self.next_tick_queue.unget(next_tick_node);
- self.available_eventfd_resume_nodes.push(resume_stack_node);
- return;
- };
- },
- .linux => {
- // the pending count is already accounted for
- const epoll_events = os.linux.EPOLL.ONESHOT | os.linux.EPOLL.IN | os.linux.EPOLL.OUT |
- os.linux.EPOLL.ET;
- self.linuxModFd(
- eventfd_node.eventfd,
- eventfd_node.epoll_op,
- epoll_events,
- &eventfd_node.base,
- ) catch {
- self.next_tick_queue.unget(next_tick_node);
- self.available_eventfd_resume_nodes.push(resume_stack_node);
- return;
- };
- },
- .windows => {
- windows.PostQueuedCompletionStatus(
- self.os_data.io_port,
- undefined,
- undefined,
- &eventfd_node.base.overlapped,
- ) catch {
- self.next_tick_queue.unget(next_tick_node);
- self.available_eventfd_resume_nodes.push(resume_stack_node);
- return;
- };
- },
- else => @compileError("unsupported OS"),
- }
- }
- }
-
- /// Bring your own linked list node. This means it can't fail.
- pub fn onNextTick(self: *Loop, node: *NextTickNode) void {
- self.beginOneEvent(); // finished in dispatch()
- self.next_tick_queue.put(node);
- self.dispatch();
- }
-
- pub fn cancelOnNextTick(self: *Loop, node: *NextTickNode) void {
- if (self.next_tick_queue.remove(node)) {
- self.finishOneEvent();
- }
- }
-
- pub fn run(self: *Loop) void {
- self.finishOneEvent(); // the reference we start with
-
- self.workerRun();
-
- if (!builtin.single_threaded) {
- switch (builtin.os.tag) {
- .linux,
- .macos,
- .ios,
- .tvos,
- .watchos,
- .freebsd,
- .netbsd,
- .dragonfly,
- .openbsd,
- => self.fs_thread.join(),
- else => {},
- }
- }
-
- for (self.extra_threads) |extra_thread| {
- extra_thread.join();
- }
-
- self.delay_queue.deinit();
- }
-
- /// Runs the provided function asynchronously. The function's frame is allocated
- /// with `allocator` and freed when the function returns.
- /// `func` must return void and it can be an async function.
- /// Yields to the event loop, running the function on the next tick.
- pub fn runDetached(self: *Loop, alloc: mem.Allocator, comptime func: anytype, args: anytype) error{OutOfMemory}!void {
- if (!std.io.is_async) @compileError("Can't use runDetached in non-async mode!");
- if (@TypeOf(@call(.{}, func, args)) != void) {
- @compileError("`func` must not have a return value");
- }
-
- const Wrapper = struct {
- const Args = @TypeOf(args);
- fn run(func_args: Args, loop: *Loop, allocator: mem.Allocator) void {
- loop.beginOneEvent();
- loop.yield();
- @call(.{}, func, func_args); // compile error when called with non-void ret type
- suspend {
- loop.finishOneEvent();
- allocator.destroy(@frame());
- }
- }
- };
-
- const run_frame = try alloc.create(@Frame(Wrapper.run));
- run_frame.* = async Wrapper.run(args, self, alloc);
- }
-
- /// Yielding lets the event loop run, starting any unstarted async operations.
- /// Note that async operations automatically start when a function yields for any other reason,
- /// for example, when async I/O is performed. This function is intended to be used only when
- /// CPU bound tasks would be waiting in the event loop but never get started because no async I/O
- /// is performed.
- pub fn yield(self: *Loop) void {
- suspend {
- var my_tick_node = NextTickNode{
- .prev = undefined,
- .next = undefined,
- .data = @frame(),
- };
- self.onNextTick(&my_tick_node);
- }
- }
-
- /// If the build is multi-threaded and there is an event loop, then it calls `yield`. Otherwise,
- /// does nothing.
- pub fn startCpuBoundOperation() void {
- if (builtin.single_threaded) {
- return;
- } else if (instance) |event_loop| {
- event_loop.yield();
- }
- }
-
- /// call finishOneEvent when done
- pub fn beginOneEvent(self: *Loop) void {
- _ = @atomicRmw(usize, &self.pending_event_count, .Add, 1, .SeqCst);
- }
-
- pub fn finishOneEvent(self: *Loop) void {
- nosuspend {
- const prev = @atomicRmw(usize, &self.pending_event_count, .Sub, 1, .SeqCst);
- if (prev != 1) return;
-
- // cause all the threads to stop
- self.posixFsRequest(&self.fs_end_request);
-
- switch (builtin.os.tag) {
- .linux => {
- // writing to the eventfd will only wake up one thread, thus multiple writes
- // are needed to wakeup all the threads
- var i: usize = 0;
- while (i < self.extra_threads.len + 1) : (i += 1) {
- // writing 8 bytes to an eventfd cannot fail
- const amt = os.write(self.os_data.final_eventfd, &wakeup_bytes) catch unreachable;
- assert(amt == wakeup_bytes.len);
- }
- return;
- },
- .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
- const final_kevent = @as(*const [1]os.Kevent, &self.os_data.final_kevent);
- const empty_kevs = &[0]os.Kevent{};
- // cannot fail because we already added it and this just enables it
- _ = os.kevent(self.os_data.kqfd, final_kevent, empty_kevs, null) catch unreachable;
- return;
- },
- .windows => {
- var i: usize = 0;
- while (i < self.extra_threads.len + 1) : (i += 1) {
- while (true) {
- const overlapped = &self.final_resume_node.overlapped;
- windows.PostQueuedCompletionStatus(self.os_data.io_port, undefined, undefined, overlapped) catch continue;
- break;
- }
- }
- return;
- },
- else => @compileError("unsupported OS"),
- }
- }
- }
-
- pub fn sleep(self: *Loop, nanoseconds: u64) void {
- if (builtin.single_threaded)
- @compileError("TODO: integrate timers with epoll/kevent/iocp for single-threaded");
-
- suspend {
- const now = self.delay_queue.timer.read();
-
- var entry: DelayQueue.Waiters.Entry = undefined;
- entry.init(@frame(), now + nanoseconds);
- self.delay_queue.waiters.insert(&entry);
-
- // Speculatively wake up the timer thread when we add a new entry.
- // If the timer thread is sleeping on a longer entry, we need to
- // interrupt it so that our entry can be expired in time.
- self.delay_queue.event.set();
- }
- }
-
- const DelayQueue = struct {
- timer: std.time.Timer,
- waiters: Waiters,
- thread: std.Thread,
- event: std.Thread.ResetEvent,
- is_running: std.atomic.Value(bool),
-
- /// Initialize the delay queue by spawning the timer thread
- /// and starting any timer resources.
- fn init(self: *DelayQueue) !void {
- self.* = DelayQueue{
- .timer = try std.time.Timer.start(),
- .waiters = DelayQueue.Waiters{
- .entries = std.atomic.Queue(anyframe).init(),
- },
- .thread = undefined,
- .event = .{},
- .is_running = std.atomic.Value(bool).init(true),
- };
-
- // Must be after init so that it can read the other state, such as `is_running`.
- self.thread = try std.Thread.spawn(.{}, DelayQueue.run, .{self});
- }
-
- fn deinit(self: *DelayQueue) void {
- self.is_running.store(false, .SeqCst);
- self.event.set();
- self.thread.join();
- }
-
- /// Entry point for the timer thread
- /// which waits for timer entries to expire and reschedules them.
- fn run(self: *DelayQueue) void {
- const loop = @fieldParentPtr(Loop, "delay_queue", self);
-
- while (self.is_running.load(.SeqCst)) {
- self.event.reset();
- const now = self.timer.read();
-
- if (self.waiters.popExpired(now)) |entry| {
- loop.onNextTick(&entry.node);
- continue;
- }
-
- if (self.waiters.nextExpire()) |expires| {
- if (now >= expires)
- continue;
- self.event.timedWait(expires - now) catch {};
- } else {
- self.event.wait();
- }
- }
- }
-
- // TODO: use a tickless hierarchical timer wheel:
- // https://github.com/wahern/timeout/
- const Waiters = struct {
- entries: std.atomic.Queue(anyframe),
-
- const Entry = struct {
- node: NextTickNode,
- expires: u64,
-
- fn init(self: *Entry, frame: anyframe, expires: u64) void {
- self.node.data = frame;
- self.expires = expires;
- }
- };
-
- /// Registers the entry into the queue of waiting frames
- fn insert(self: *Waiters, entry: *Entry) void {
- self.entries.put(&entry.node);
- }
-
- /// Dequeues one expired event relative to `now`
- fn popExpired(self: *Waiters, now: u64) ?*Entry {
- const entry = self.peekExpiringEntry() orelse return null;
- if (entry.expires > now)
- return null;
-
- assert(self.entries.remove(&entry.node));
- return entry;
- }
-
- /// Returns an estimate for the amount of time
- /// to wait until the next waiting entry expires.
- fn nextExpire(self: *Waiters) ?u64 {
- const entry = self.peekExpiringEntry() orelse return null;
- return entry.expires;
- }
-
- fn peekExpiringEntry(self: *Waiters) ?*Entry {
- self.entries.mutex.lock();
- defer self.entries.mutex.unlock();
-
- // starting from the head
- var head = self.entries.head orelse return null;
-
- // traverse the list of waiting entries to
- // find the Node with the smallest `expires` field
- var min = head;
- while (head.next) |node| {
- const minEntry = @fieldParentPtr(Entry, "node", min);
- const nodeEntry = @fieldParentPtr(Entry, "node", node);
- if (nodeEntry.expires < minEntry.expires)
- min = node;
- head = node;
- }
-
- return @fieldParentPtr(Entry, "node", min);
- }
- };
- };
-
- /// ------- I/0 APIs -------
- pub fn accept(
- self: *Loop,
- /// This argument is a socket that has been created with `socket`, bound to a local address
- /// with `bind`, and is listening for connections after a `listen`.
- sockfd: os.socket_t,
- /// This argument is a pointer to a sockaddr structure. This structure is filled in with the
- /// address of the peer socket, as known to the communications layer. The exact format of the
- /// address returned addr is determined by the socket's address family (see `socket` and the
- /// respective protocol man pages).
- addr: *os.sockaddr,
- /// This argument is a value-result argument: the caller must initialize it to contain the
- /// size (in bytes) of the structure pointed to by addr; on return it will contain the actual size
- /// of the peer address.
- ///
- /// The returned address is truncated if the buffer provided is too small; in this case, `addr_size`
- /// will return a value greater than was supplied to the call.
- addr_size: *os.socklen_t,
- /// The following values can be bitwise ORed in flags to obtain different behavior:
- /// * `SOCK.CLOEXEC` - Set the close-on-exec (`FD_CLOEXEC`) flag on the new file descriptor. See the
- /// description of the `O.CLOEXEC` flag in `open` for reasons why this may be useful.
- flags: u32,
- ) os.AcceptError!os.socket_t {
- while (true) {
- return os.accept(sockfd, addr, addr_size, flags | os.SOCK.NONBLOCK) catch |err| switch (err) {
- error.WouldBlock => {
- self.waitUntilFdReadable(sockfd);
- continue;
- },
- else => return err,
- };
- }
- }
-
- pub fn connect(self: *Loop, sockfd: os.socket_t, sock_addr: *const os.sockaddr, len: os.socklen_t) os.ConnectError!void {
- os.connect(sockfd, sock_addr, len) catch |err| switch (err) {
- error.WouldBlock => {
- self.waitUntilFdWritable(sockfd);
- return os.getsockoptError(sockfd);
- },
- else => return err,
- };
- }
-
- /// Performs an async `os.open` using a separate thread.
- pub fn openZ(self: *Loop, file_path: [*:0]const u8, flags: u32, mode: os.mode_t) os.OpenError!os.fd_t {
- var req_node = Request.Node{
- .data = .{
- .msg = .{
- .open = .{
- .path = file_path,
- .flags = flags,
- .mode = mode,
- .result = undefined,
- },
- },
- .finish = .{ .tick_node = .{ .data = @frame() } },
- },
- };
- suspend {
- self.posixFsRequest(&req_node);
- }
- return req_node.data.msg.open.result;
- }
-
- /// Performs an async `os.opent` using a separate thread.
- pub fn openatZ(self: *Loop, fd: os.fd_t, file_path: [*:0]const u8, flags: u32, mode: os.mode_t) os.OpenError!os.fd_t {
- var req_node = Request.Node{
- .data = .{
- .msg = .{
- .openat = .{
- .fd = fd,
- .path = file_path,
- .flags = flags,
- .mode = mode,
- .result = undefined,
- },
- },
- .finish = .{ .tick_node = .{ .data = @frame() } },
- },
- };
- suspend {
- self.posixFsRequest(&req_node);
- }
- return req_node.data.msg.openat.result;
- }
-
- /// Performs an async `os.close` using a separate thread.
- pub fn close(self: *Loop, fd: os.fd_t) void {
- var req_node = Request.Node{
- .data = .{
- .msg = .{ .close = .{ .fd = fd } },
- .finish = .{ .tick_node = .{ .data = @frame() } },
- },
- };
- suspend {
- self.posixFsRequest(&req_node);
- }
- }
-
- /// Performs an async `os.read` using a separate thread.
- /// `fd` must block and not return EAGAIN.
- pub fn read(self: *Loop, fd: os.fd_t, buf: []u8, simulate_evented: bool) os.ReadError!usize {
- if (simulate_evented) {
- var req_node = Request.Node{
- .data = .{
- .msg = .{
- .read = .{
- .fd = fd,
- .buf = buf,
- .result = undefined,
- },
- },
- .finish = .{ .tick_node = .{ .data = @frame() } },
- },
- };
- suspend {
- self.posixFsRequest(&req_node);
- }
- return req_node.data.msg.read.result;
- } else {
- while (true) {
- return os.read(fd, buf) catch |err| switch (err) {
- error.WouldBlock => {
- self.waitUntilFdReadable(fd);
- continue;
- },
- else => return err,
- };
- }
- }
- }
-
- /// Performs an async `os.readv` using a separate thread.
- /// `fd` must block and not return EAGAIN.
- pub fn readv(self: *Loop, fd: os.fd_t, iov: []const os.iovec, simulate_evented: bool) os.ReadError!usize {
- if (simulate_evented) {
- var req_node = Request.Node{
- .data = .{
- .msg = .{
- .readv = .{
- .fd = fd,
- .iov = iov,
- .result = undefined,
- },
- },
- .finish = .{ .tick_node = .{ .data = @frame() } },
- },
- };
- suspend {
- self.posixFsRequest(&req_node);
- }
- return req_node.data.msg.readv.result;
- } else {
- while (true) {
- return os.readv(fd, iov) catch |err| switch (err) {
- error.WouldBlock => {
- self.waitUntilFdReadable(fd);
- continue;
- },
- else => return err,
- };
- }
- }
- }
-
- /// Performs an async `os.pread` using a separate thread.
- /// `fd` must block and not return EAGAIN.
- pub fn pread(self: *Loop, fd: os.fd_t, buf: []u8, offset: u64, simulate_evented: bool) os.PReadError!usize {
- if (simulate_evented) {
- var req_node = Request.Node{
- .data = .{
- .msg = .{
- .pread = .{
- .fd = fd,
- .buf = buf,
- .offset = offset,
- .result = undefined,
- },
- },
- .finish = .{ .tick_node = .{ .data = @frame() } },
- },
- };
- suspend {
- self.posixFsRequest(&req_node);
- }
- return req_node.data.msg.pread.result;
- } else {
- while (true) {
- return os.pread(fd, buf, offset) catch |err| switch (err) {
- error.WouldBlock => {
- self.waitUntilFdReadable(fd);
- continue;
- },
- else => return err,
- };
- }
- }
- }
-
- /// Performs an async `os.preadv` using a separate thread.
- /// `fd` must block and not return EAGAIN.
- pub fn preadv(self: *Loop, fd: os.fd_t, iov: []const os.iovec, offset: u64, simulate_evented: bool) os.ReadError!usize {
- if (simulate_evented) {
- var req_node = Request.Node{
- .data = .{
- .msg = .{
- .preadv = .{
- .fd = fd,
- .iov = iov,
- .offset = offset,
- .result = undefined,
- },
- },
- .finish = .{ .tick_node = .{ .data = @frame() } },
- },
- };
- suspend {
- self.posixFsRequest(&req_node);
- }
- return req_node.data.msg.preadv.result;
- } else {
- while (true) {
- return os.preadv(fd, iov, offset) catch |err| switch (err) {
- error.WouldBlock => {
- self.waitUntilFdReadable(fd);
- continue;
- },
- else => return err,
- };
- }
- }
- }
-
- /// Performs an async `os.write` using a separate thread.
- /// `fd` must block and not return EAGAIN.
- pub fn write(self: *Loop, fd: os.fd_t, bytes: []const u8, simulate_evented: bool) os.WriteError!usize {
- if (simulate_evented) {
- var req_node = Request.Node{
- .data = .{
- .msg = .{
- .write = .{
- .fd = fd,
- .bytes = bytes,
- .result = undefined,
- },
- },
- .finish = .{ .tick_node = .{ .data = @frame() } },
- },
- };
- suspend {
- self.posixFsRequest(&req_node);
- }
- return req_node.data.msg.write.result;
- } else {
- while (true) {
- return os.write(fd, bytes) catch |err| switch (err) {
- error.WouldBlock => {
- self.waitUntilFdWritable(fd);
- continue;
- },
- else => return err,
- };
- }
- }
- }
-
- /// Performs an async `os.writev` using a separate thread.
- /// `fd` must block and not return EAGAIN.
- pub fn writev(self: *Loop, fd: os.fd_t, iov: []const os.iovec_const, simulate_evented: bool) os.WriteError!usize {
- if (simulate_evented) {
- var req_node = Request.Node{
- .data = .{
- .msg = .{
- .writev = .{
- .fd = fd,
- .iov = iov,
- .result = undefined,
- },
- },
- .finish = .{ .tick_node = .{ .data = @frame() } },
- },
- };
- suspend {
- self.posixFsRequest(&req_node);
- }
- return req_node.data.msg.writev.result;
- } else {
- while (true) {
- return os.writev(fd, iov) catch |err| switch (err) {
- error.WouldBlock => {
- self.waitUntilFdWritable(fd);
- continue;
- },
- else => return err,
- };
- }
- }
- }
-
- /// Performs an async `os.pwrite` using a separate thread.
- /// `fd` must block and not return EAGAIN.
- pub fn pwrite(self: *Loop, fd: os.fd_t, bytes: []const u8, offset: u64, simulate_evented: bool) os.PerformsWriteError!usize {
- if (simulate_evented) {
- var req_node = Request.Node{
- .data = .{
- .msg = .{
- .pwrite = .{
- .fd = fd,
- .bytes = bytes,
- .offset = offset,
- .result = undefined,
- },
- },
- .finish = .{ .tick_node = .{ .data = @frame() } },
- },
- };
- suspend {
- self.posixFsRequest(&req_node);
- }
- return req_node.data.msg.pwrite.result;
- } else {
- while (true) {
- return os.pwrite(fd, bytes, offset) catch |err| switch (err) {
- error.WouldBlock => {
- self.waitUntilFdWritable(fd);
- continue;
- },
- else => return err,
- };
- }
- }
- }
-
- /// Performs an async `os.pwritev` using a separate thread.
- /// `fd` must block and not return EAGAIN.
- pub fn pwritev(self: *Loop, fd: os.fd_t, iov: []const os.iovec_const, offset: u64, simulate_evented: bool) os.PWriteError!usize {
- if (simulate_evented) {
- var req_node = Request.Node{
- .data = .{
- .msg = .{
- .pwritev = .{
- .fd = fd,
- .iov = iov,
- .offset = offset,
- .result = undefined,
- },
- },
- .finish = .{ .tick_node = .{ .data = @frame() } },
- },
- };
- suspend {
- self.posixFsRequest(&req_node);
- }
- return req_node.data.msg.pwritev.result;
- } else {
- while (true) {
- return os.pwritev(fd, iov, offset) catch |err| switch (err) {
- error.WouldBlock => {
- self.waitUntilFdWritable(fd);
- continue;
- },
- else => return err,
- };
- }
- }
- }
-
- pub fn sendto(
- self: *Loop,
- /// The file descriptor of the sending socket.
- sockfd: os.fd_t,
- /// Message to send.
- buf: []const u8,
- flags: u32,
- dest_addr: ?*const os.sockaddr,
- addrlen: os.socklen_t,
- ) os.SendToError!usize {
- while (true) {
- return os.sendto(sockfd, buf, flags, dest_addr, addrlen) catch |err| switch (err) {
- error.WouldBlock => {
- self.waitUntilFdWritable(sockfd);
- continue;
- },
- else => return err,
- };
- }
- }
-
- pub fn recvfrom(
- self: *Loop,
- sockfd: os.fd_t,
- buf: []u8,
- flags: u32,
- src_addr: ?*os.sockaddr,
- addrlen: ?*os.socklen_t,
- ) os.RecvFromError!usize {
- while (true) {
- return os.recvfrom(sockfd, buf, flags, src_addr, addrlen) catch |err| switch (err) {
- error.WouldBlock => {
- self.waitUntilFdReadable(sockfd);
- continue;
- },
- else => return err,
- };
- }
- }
-
- /// Performs an async `os.faccessatZ` using a separate thread.
- /// `fd` must block and not return EAGAIN.
- pub fn faccessatZ(
- self: *Loop,
- dirfd: os.fd_t,
- path_z: [*:0]const u8,
- mode: u32,
- flags: u32,
- ) os.AccessError!void {
- var req_node = Request.Node{
- .data = .{
- .msg = .{
- .faccessat = .{
- .dirfd = dirfd,
- .path = path_z,
- .mode = mode,
- .flags = flags,
- .result = undefined,
- },
- },
- .finish = .{ .tick_node = .{ .data = @frame() } },
- },
- };
- suspend {
- self.posixFsRequest(&req_node);
- }
- return req_node.data.msg.faccessat.result;
- }
-
- fn workerRun(self: *Loop) void {
- while (true) {
- while (true) {
- const next_tick_node = self.next_tick_queue.get() orelse break;
- self.dispatch();
- resume next_tick_node.data;
- self.finishOneEvent();
- }
-
- switch (builtin.os.tag) {
- .linux => {
- // only process 1 event so we don't steal from other threads
- var events: [1]os.linux.epoll_event = undefined;
- const count = os.epoll_wait(self.os_data.epollfd, events[0..], -1);
- for (events[0..count]) |ev| {
- const resume_node = @as(*ResumeNode, @ptrFromInt(ev.data.ptr));
- const handle = resume_node.handle;
- const resume_node_id = resume_node.id;
- switch (resume_node_id) {
- .basic => {},
- .stop => return,
- .event_fd => {
- const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
- event_fd_node.epoll_op = os.linux.EPOLL.CTL_MOD;
- const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
- self.available_eventfd_resume_nodes.push(stack_node);
- },
- }
- resume handle;
- if (resume_node_id == .event_fd) {
- self.finishOneEvent();
- }
- }
- },
- .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
- var eventlist: [1]os.Kevent = undefined;
- const empty_kevs = &[0]os.Kevent{};
- const count = os.kevent(self.os_data.kqfd, empty_kevs, eventlist[0..], null) catch unreachable;
- for (eventlist[0..count]) |ev| {
- const resume_node = @as(*ResumeNode, @ptrFromInt(ev.udata));
- const handle = resume_node.handle;
- const resume_node_id = resume_node.id;
- switch (resume_node_id) {
- .basic => {
- const basic_node = @fieldParentPtr(ResumeNode.Basic, "base", resume_node);
- basic_node.kev = ev;
- },
- .stop => return,
- .event_fd => {
- const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
- const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
- self.available_eventfd_resume_nodes.push(stack_node);
- },
- }
- resume handle;
- if (resume_node_id == .event_fd) {
- self.finishOneEvent();
- }
- }
- },
- .windows => {
- var completion_key: usize = undefined;
- const overlapped = while (true) {
- var nbytes: windows.DWORD = undefined;
- var overlapped: ?*windows.OVERLAPPED = undefined;
- switch (windows.GetQueuedCompletionStatus(self.os_data.io_port, &nbytes, &completion_key, &overlapped, windows.INFINITE)) {
- .Aborted => return,
- .Normal => {},
- .EOF => {},
- .Cancelled => continue,
- }
- if (overlapped) |o| break o;
- };
- const resume_node = @fieldParentPtr(ResumeNode, "overlapped", overlapped);
- const handle = resume_node.handle;
- const resume_node_id = resume_node.id;
- switch (resume_node_id) {
- .basic => {},
- .stop => return,
- .event_fd => {
- const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
- const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
- self.available_eventfd_resume_nodes.push(stack_node);
- },
- }
- resume handle;
- self.finishOneEvent();
- },
- else => @compileError("unsupported OS"),
- }
- }
- }
-
- fn posixFsRequest(self: *Loop, request_node: *Request.Node) void {
- self.beginOneEvent(); // finished in posixFsRun after processing the msg
- self.fs_queue.put(request_node);
- self.fs_thread_wakeup.set();
- }
-
- fn posixFsCancel(self: *Loop, request_node: *Request.Node) void {
- if (self.fs_queue.remove(request_node)) {
- self.finishOneEvent();
- }
- }
-
- fn posixFsRun(self: *Loop) void {
- nosuspend while (true) {
- self.fs_thread_wakeup.reset();
- while (self.fs_queue.get()) |node| {
- switch (node.data.msg) {
- .end => return,
- .read => |*msg| {
- msg.result = os.read(msg.fd, msg.buf);
- },
- .readv => |*msg| {
- msg.result = os.readv(msg.fd, msg.iov);
- },
- .write => |*msg| {
- msg.result = os.write(msg.fd, msg.bytes);
- },
- .writev => |*msg| {
- msg.result = os.writev(msg.fd, msg.iov);
- },
- .pwrite => |*msg| {
- msg.result = os.pwrite(msg.fd, msg.bytes, msg.offset);
- },
- .pwritev => |*msg| {
- msg.result = os.pwritev(msg.fd, msg.iov, msg.offset);
- },
- .pread => |*msg| {
- msg.result = os.pread(msg.fd, msg.buf, msg.offset);
- },
- .preadv => |*msg| {
- msg.result = os.preadv(msg.fd, msg.iov, msg.offset);
- },
- .open => |*msg| {
- if (is_windows) unreachable; // TODO
- msg.result = os.openZ(msg.path, msg.flags, msg.mode);
- },
- .openat => |*msg| {
- if (is_windows) unreachable; // TODO
- msg.result = os.openatZ(msg.fd, msg.path, msg.flags, msg.mode);
- },
- .faccessat => |*msg| {
- msg.result = os.faccessatZ(msg.dirfd, msg.path, msg.mode, msg.flags);
- },
- .close => |*msg| os.close(msg.fd),
- }
- switch (node.data.finish) {
- .tick_node => |*tick_node| self.onNextTick(tick_node),
- .no_action => {},
- }
- self.finishOneEvent();
- }
- self.fs_thread_wakeup.wait();
- };
- }
-
- const OsData = switch (builtin.os.tag) {
- .linux => LinuxOsData,
- .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => KEventData,
- .windows => struct {
- io_port: windows.HANDLE,
- extra_thread_count: usize,
- },
- else => struct {},
- };
-
- const KEventData = struct {
- kqfd: i32,
- final_kevent: os.Kevent,
- };
-
- const LinuxOsData = struct {
- epollfd: i32,
- final_eventfd: i32,
- final_eventfd_event: os.linux.epoll_event,
- };
-
- pub const Request = struct {
- msg: Msg,
- finish: Finish,
-
- pub const Node = std.atomic.Queue(Request).Node;
-
- pub const Finish = union(enum) {
- tick_node: Loop.NextTickNode,
- no_action,
- };
-
- pub const Msg = union(enum) {
- read: Read,
- readv: ReadV,
- write: Write,
- writev: WriteV,
- pwrite: PWrite,
- pwritev: PWriteV,
- pread: PRead,
- preadv: PReadV,
- open: Open,
- openat: OpenAt,
- close: Close,
- faccessat: FAccessAt,
-
- /// special - means the fs thread should exit
- end,
-
- pub const Read = struct {
- fd: os.fd_t,
- buf: []u8,
- result: Error!usize,
-
- pub const Error = os.ReadError;
- };
-
- pub const ReadV = struct {
- fd: os.fd_t,
- iov: []const os.iovec,
- result: Error!usize,
-
- pub const Error = os.ReadError;
- };
-
- pub const Write = struct {
- fd: os.fd_t,
- bytes: []const u8,
- result: Error!usize,
-
- pub const Error = os.WriteError;
- };
-
- pub const WriteV = struct {
- fd: os.fd_t,
- iov: []const os.iovec_const,
- result: Error!usize,
-
- pub const Error = os.WriteError;
- };
-
- pub const PWrite = struct {
- fd: os.fd_t,
- bytes: []const u8,
- offset: usize,
- result: Error!usize,
-
- pub const Error = os.PWriteError;
- };
-
- pub const PWriteV = struct {
- fd: os.fd_t,
- iov: []const os.iovec_const,
- offset: usize,
- result: Error!usize,
-
- pub const Error = os.PWriteError;
- };
-
- pub const PRead = struct {
- fd: os.fd_t,
- buf: []u8,
- offset: usize,
- result: Error!usize,
-
- pub const Error = os.PReadError;
- };
-
- pub const PReadV = struct {
- fd: os.fd_t,
- iov: []const os.iovec,
- offset: usize,
- result: Error!usize,
-
- pub const Error = os.PReadError;
- };
-
- pub const Open = struct {
- path: [*:0]const u8,
- flags: u32,
- mode: os.mode_t,
- result: Error!os.fd_t,
-
- pub const Error = os.OpenError;
- };
-
- pub const OpenAt = struct {
- fd: os.fd_t,
- path: [*:0]const u8,
- flags: u32,
- mode: os.mode_t,
- result: Error!os.fd_t,
-
- pub const Error = os.OpenError;
- };
-
- pub const Close = struct {
- fd: os.fd_t,
- };
-
- pub const FAccessAt = struct {
- dirfd: os.fd_t,
- path: [*:0]const u8,
- mode: u32,
- flags: u32,
- result: Error!void,
-
- pub const Error = os.AccessError;
- };
- };
- };
-};
-
-test "std.event.Loop - basic" {
- // https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded) return error.SkipZigTest;
-
- if (true) {
- // https://github.com/ziglang/zig/issues/4922
- return error.SkipZigTest;
- }
-
- var loop: Loop = undefined;
- try loop.initMultiThreaded();
- defer loop.deinit();
-
- loop.run();
-}
-
-fn testEventLoop() i32 {
- return 1234;
-}
-
-fn testEventLoop2(h: anyframe->i32, did_it: *bool) void {
- const value = await h;
- try testing.expect(value == 1234);
- did_it.* = true;
-}
-
-var testRunDetachedData: usize = 0;
-test "std.event.Loop - runDetached" {
- // https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded) return error.SkipZigTest;
- if (!std.io.is_async) return error.SkipZigTest;
- if (true) {
- // https://github.com/ziglang/zig/issues/4922
- return error.SkipZigTest;
- }
-
- var loop: Loop = undefined;
- try loop.initMultiThreaded();
- defer loop.deinit();
-
- // Schedule the execution, won't actually start until we start the
- // event loop.
- try loop.runDetached(std.testing.allocator, testRunDetached, .{});
-
- // Now we can start the event loop. The function will return only
- // after all tasks have been completed, allowing us to synchronize
- // with the previous runDetached.
- loop.run();
-
- try testing.expect(testRunDetachedData == 1);
-}
-
-fn testRunDetached() void {
- testRunDetachedData += 1;
-}
-
-test "std.event.Loop - sleep" {
- // https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded) return error.SkipZigTest;
- if (!std.io.is_async) return error.SkipZigTest;
-
- const frames = try testing.allocator.alloc(@Frame(testSleep), 10);
- defer testing.allocator.free(frames);
-
- const wait_time = 100 * std.time.ns_per_ms;
- var sleep_count: usize = 0;
-
- for (frames) |*frame|
- frame.* = async testSleep(wait_time, &sleep_count);
- for (frames) |*frame|
- await frame;
-
- try testing.expect(sleep_count == frames.len);
-}
-
-fn testSleep(wait_ns: u64, sleep_count: *usize) void {
- Loop.instance.?.sleep(wait_ns);
- _ = @atomicRmw(usize, sleep_count, .Add, 1, .SeqCst);
-}
diff --git a/lib/std/event/rwlock.zig b/lib/std/event/rwlock.zig
deleted file mode 100644
index 0f017a0ca0..0000000000
--- a/lib/std/event/rwlock.zig
+++ /dev/null
@@ -1,292 +0,0 @@
-const std = @import("../std.zig");
-const builtin = @import("builtin");
-const assert = std.debug.assert;
-const testing = std.testing;
-const mem = std.mem;
-const Loop = std.event.Loop;
-const Allocator = std.mem.Allocator;
-
-/// Thread-safe async/await lock.
-/// Functions which are waiting for the lock are suspended, and
-/// are resumed when the lock is released, in order.
-/// Many readers can hold the lock at the same time; however locking for writing is exclusive.
-/// When a read lock is held, it will not be released until the reader queue is empty.
-/// When a write lock is held, it will not be released until the writer queue is empty.
-/// TODO: make this API also work in blocking I/O mode
-pub const RwLock = struct {
- shared_state: State,
- writer_queue: Queue,
- reader_queue: Queue,
- writer_queue_empty: bool,
- reader_queue_empty: bool,
- reader_lock_count: usize,
-
- const State = enum(u8) {
- Unlocked,
- WriteLock,
- ReadLock,
- };
-
- const Queue = std.atomic.Queue(anyframe);
-
- const global_event_loop = Loop.instance orelse
- @compileError("std.event.RwLock currently only works with event-based I/O");
-
- pub const HeldRead = struct {
- lock: *RwLock,
-
- pub fn release(self: HeldRead) void {
- // If other readers still hold the lock, we're done.
- if (@atomicRmw(usize, &self.lock.reader_lock_count, .Sub, 1, .SeqCst) != 1) {
- return;
- }
-
- @atomicStore(bool, &self.lock.reader_queue_empty, true, .SeqCst);
- if (@cmpxchgStrong(State, &self.lock.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
- // Didn't unlock. Someone else's problem.
- return;
- }
-
- self.lock.commonPostUnlock();
- }
- };
-
- pub const HeldWrite = struct {
- lock: *RwLock,
-
- pub fn release(self: HeldWrite) void {
- // See if we can leave it locked for writing, and pass the lock to the next writer
- // in the queue to grab the lock.
- if (self.lock.writer_queue.get()) |node| {
- global_event_loop.onNextTick(node);
- return;
- }
-
- // We need to release the write lock. Check if any readers are waiting to grab the lock.
- if (!@atomicLoad(bool, &self.lock.reader_queue_empty, .SeqCst)) {
- // Switch to a read lock.
- @atomicStore(State, &self.lock.shared_state, .ReadLock, .SeqCst);
- while (self.lock.reader_queue.get()) |node| {
- global_event_loop.onNextTick(node);
- }
- return;
- }
-
- @atomicStore(bool, &self.lock.writer_queue_empty, true, .SeqCst);
- @atomicStore(State, &self.lock.shared_state, .Unlocked, .SeqCst);
-
- self.lock.commonPostUnlock();
- }
- };
-
- pub fn init() RwLock {
- return .{
- .shared_state = .Unlocked,
- .writer_queue = Queue.init(),
- .writer_queue_empty = true,
- .reader_queue = Queue.init(),
- .reader_queue_empty = true,
- .reader_lock_count = 0,
- };
- }
-
- /// Must be called when not locked. Not thread safe.
- /// All calls to acquire() and release() must complete before calling deinit().
- pub fn deinit(self: *RwLock) void {
- assert(self.shared_state == .Unlocked);
- while (self.writer_queue.get()) |node| resume node.data;
- while (self.reader_queue.get()) |node| resume node.data;
- }
-
- pub fn acquireRead(self: *RwLock) callconv(.Async) HeldRead {
- _ = @atomicRmw(usize, &self.reader_lock_count, .Add, 1, .SeqCst);
-
- suspend {
- var my_tick_node = Loop.NextTickNode{
- .data = @frame(),
- .prev = undefined,
- .next = undefined,
- };
-
- self.reader_queue.put(&my_tick_node);
-
- // At this point, we are in the reader_queue, so we might have already been resumed.
-
- // We set this bit so that later we can rely on the fact, that if reader_queue_empty == true,
- // some actor will attempt to grab the lock.
- @atomicStore(bool, &self.reader_queue_empty, false, .SeqCst);
-
- // Here we don't care if we are the one to do the locking or if it was already locked for reading.
- const have_read_lock = if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == .ReadLock else true;
- if (have_read_lock) {
- // Give out all the read locks.
- if (self.reader_queue.get()) |first_node| {
- while (self.reader_queue.get()) |node| {
- global_event_loop.onNextTick(node);
- }
- resume first_node.data;
- }
- }
- }
- return HeldRead{ .lock = self };
- }
-
- pub fn acquireWrite(self: *RwLock) callconv(.Async) HeldWrite {
- suspend {
- var my_tick_node = Loop.NextTickNode{
- .data = @frame(),
- .prev = undefined,
- .next = undefined,
- };
-
- self.writer_queue.put(&my_tick_node);
-
- // At this point, we are in the writer_queue, so we might have already been resumed.
-
- // We set this bit so that later we can rely on the fact, that if writer_queue_empty == true,
- // some actor will attempt to grab the lock.
- @atomicStore(bool, &self.writer_queue_empty, false, .SeqCst);
-
- // Here we must be the one to acquire the write lock. It cannot already be locked.
- if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) == null) {
- // We now have a write lock.
- if (self.writer_queue.get()) |node| {
- // Whether this node is us or someone else, we tail resume it.
- resume node.data;
- }
- }
- }
- return HeldWrite{ .lock = self };
- }
-
- fn commonPostUnlock(self: *RwLock) void {
- while (true) {
- // There might be a writer_queue item or a reader_queue item
- // If we check and both are empty, we can be done, because the other actors will try to
- // obtain the lock.
- // But if there's a writer_queue item or a reader_queue item,
- // we are the actor which must loop and attempt to grab the lock again.
- if (!@atomicLoad(bool, &self.writer_queue_empty, .SeqCst)) {
- if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) != null) {
- // We did not obtain the lock. Great, the queues are someone else's problem.
- return;
- }
- // If there's an item in the writer queue, give them the lock, and we're done.
- if (self.writer_queue.get()) |node| {
- global_event_loop.onNextTick(node);
- return;
- }
- // Release the lock again.
- @atomicStore(bool, &self.writer_queue_empty, true, .SeqCst);
- @atomicStore(State, &self.shared_state, .Unlocked, .SeqCst);
- continue;
- }
-
- if (!@atomicLoad(bool, &self.reader_queue_empty, .SeqCst)) {
- if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst) != null) {
- // We did not obtain the lock. Great, the queues are someone else's problem.
- return;
- }
- // If there are any items in the reader queue, give out all the reader locks, and we're done.
- if (self.reader_queue.get()) |first_node| {
- global_event_loop.onNextTick(first_node);
- while (self.reader_queue.get()) |node| {
- global_event_loop.onNextTick(node);
- }
- return;
- }
- // Release the lock again.
- @atomicStore(bool, &self.reader_queue_empty, true, .SeqCst);
- if (@cmpxchgStrong(State, &self.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
- // Didn't unlock. Someone else's problem.
- return;
- }
- continue;
- }
- return;
- }
- }
-};
-
-test "std.event.RwLock" {
- // https://github.com/ziglang/zig/issues/2377
- if (true) return error.SkipZigTest;
-
- // https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded) return error.SkipZigTest;
-
- // TODO provide a way to run tests in evented I/O mode
- if (!std.io.is_async) return error.SkipZigTest;
-
- var lock = RwLock.init();
- defer lock.deinit();
-
- _ = testLock(std.heap.page_allocator, &lock);
-
- const expected_result = [1]i32{shared_it_count * @as(i32, @intCast(shared_test_data.len))} ** shared_test_data.len;
- try testing.expectEqualSlices(i32, expected_result, shared_test_data);
-}
-fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void {
- var read_nodes: [100]Loop.NextTickNode = undefined;
- for (&read_nodes) |*read_node| {
- const frame = allocator.create(@Frame(readRunner)) catch @panic("memory");
- read_node.data = frame;
- frame.* = async readRunner(lock);
- Loop.instance.?.onNextTick(read_node);
- }
-
- var write_nodes: [shared_it_count]Loop.NextTickNode = undefined;
- for (&write_nodes) |*write_node| {
- const frame = allocator.create(@Frame(writeRunner)) catch @panic("memory");
- write_node.data = frame;
- frame.* = async writeRunner(lock);
- Loop.instance.?.onNextTick(write_node);
- }
-
- for (&write_nodes) |*write_node| {
- const casted = @as(*const @Frame(writeRunner), @ptrCast(write_node.data));
- await casted;
- allocator.destroy(casted);
- }
- for (&read_nodes) |*read_node| {
- const casted = @as(*const @Frame(readRunner), @ptrCast(read_node.data));
- await casted;
- allocator.destroy(casted);
- }
-}
-
-const shared_it_count = 10;
-var shared_test_data = [1]i32{0} ** 10;
-var shared_test_index: usize = 0;
-var shared_count: usize = 0;
-fn writeRunner(lock: *RwLock) callconv(.Async) void {
- suspend {} // resumed by onNextTick
-
- var i: usize = 0;
- while (i < shared_test_data.len) : (i += 1) {
- std.time.sleep(100 * std.time.microsecond);
- const lock_promise = async lock.acquireWrite();
- const handle = await lock_promise;
- defer handle.release();
-
- shared_count += 1;
- while (shared_test_index < shared_test_data.len) : (shared_test_index += 1) {
- shared_test_data[shared_test_index] = shared_test_data[shared_test_index] + 1;
- }
- shared_test_index = 0;
- }
-}
-fn readRunner(lock: *RwLock) callconv(.Async) void {
- suspend {} // resumed by onNextTick
- std.time.sleep(1);
-
- var i: usize = 0;
- while (i < shared_test_data.len) : (i += 1) {
- const lock_promise = async lock.acquireRead();
- const handle = await lock_promise;
- defer handle.release();
-
- try testing.expect(shared_test_index == 0);
- try testing.expect(shared_test_data[i] == @as(i32, @intCast(shared_count)));
- }
-}
diff --git a/lib/std/event/rwlocked.zig b/lib/std/event/rwlocked.zig
deleted file mode 100644
index 9a569e8f1f..0000000000
--- a/lib/std/event/rwlocked.zig
+++ /dev/null
@@ -1,57 +0,0 @@
-const std = @import("../std.zig");
-const RwLock = std.event.RwLock;
-
-/// Thread-safe async/await RW lock that protects one piece of data.
-/// Functions which are waiting for the lock are suspended, and
-/// are resumed when the lock is released, in order.
-pub fn RwLocked(comptime T: type) type {
- return struct {
- lock: RwLock,
- locked_data: T,
-
- const Self = @This();
-
- pub const HeldReadLock = struct {
- value: *const T,
- held: RwLock.HeldRead,
-
- pub fn release(self: HeldReadLock) void {
- self.held.release();
- }
- };
-
- pub const HeldWriteLock = struct {
- value: *T,
- held: RwLock.HeldWrite,
-
- pub fn release(self: HeldWriteLock) void {
- self.held.release();
- }
- };
-
- pub fn init(data: T) Self {
- return Self{
- .lock = RwLock.init(),
- .locked_data = data,
- };
- }
-
- pub fn deinit(self: *Self) void {
- self.lock.deinit();
- }
-
- pub fn acquireRead(self: *Self) callconv(.Async) HeldReadLock {
- return HeldReadLock{
- .held = self.lock.acquireRead(),
- .value = &self.locked_data,
- };
- }
-
- pub fn acquireWrite(self: *Self) callconv(.Async) HeldWriteLock {
- return HeldWriteLock{
- .held = self.lock.acquireWrite(),
- .value = &self.locked_data,
- };
- }
- };
-}
diff --git a/lib/std/event/wait_group.zig b/lib/std/event/wait_group.zig
deleted file mode 100644
index c88b01c812..0000000000
--- a/lib/std/event/wait_group.zig
+++ /dev/null
@@ -1,115 +0,0 @@
-const std = @import("../std.zig");
-const builtin = @import("builtin");
-const Loop = std.event.Loop;
-
-/// A WaitGroup keeps track and waits for a group of async tasks to finish.
-/// Call `begin` when creating new tasks, and have tasks call `finish` when done.
-/// You can provide a count for both operations to perform them in bulk.
-/// Call `wait` to suspend until all tasks are completed.
-/// Multiple waiters are supported.
-///
-/// WaitGroup is an instance of WaitGroupGeneric, which takes in a bitsize
-/// for the internal counter. WaitGroup defaults to a `usize` counter.
-/// It's also possible to define a max value for the counter so that
-/// `begin` will return error.Overflow when the limit is reached, even
-/// if the integer type has not has not overflowed.
-/// By default `max_value` is set to std.math.maxInt(CounterType).
-pub const WaitGroup = WaitGroupGeneric(@bitSizeOf(usize));
-
-pub fn WaitGroupGeneric(comptime counter_size: u16) type {
- const CounterType = std.meta.Int(.unsigned, counter_size);
-
- const global_event_loop = Loop.instance orelse
- @compileError("std.event.WaitGroup currently only works with event-based I/O");
-
- return struct {
- counter: CounterType = 0,
- max_counter: CounterType = std.math.maxInt(CounterType),
- mutex: std.Thread.Mutex = .{},
- waiters: ?*Waiter = null,
- const Waiter = struct {
- next: ?*Waiter,
- tail: *Waiter,
- node: Loop.NextTickNode,
- };
-
- const Self = @This();
- pub fn begin(self: *Self, count: CounterType) error{Overflow}!void {
- self.mutex.lock();
- defer self.mutex.unlock();
-
- const new_counter = try std.math.add(CounterType, self.counter, count);
- if (new_counter > self.max_counter) return error.Overflow;
- self.counter = new_counter;
- }
-
- pub fn finish(self: *Self, count: CounterType) void {
- var waiters = blk: {
- self.mutex.lock();
- defer self.mutex.unlock();
- self.counter = std.math.sub(CounterType, self.counter, count) catch unreachable;
- if (self.counter == 0) {
- const temp = self.waiters;
- self.waiters = null;
- break :blk temp;
- }
- break :blk null;
- };
-
- // We don't need to hold the lock to reschedule any potential waiter.
- while (waiters) |w| {
- const temp_w = w;
- waiters = w.next;
- global_event_loop.onNextTick(&temp_w.node);
- }
- }
-
- pub fn wait(self: *Self) void {
- self.mutex.lock();
-
- if (self.counter == 0) {
- self.mutex.unlock();
- return;
- }
-
- var self_waiter: Waiter = undefined;
- self_waiter.node.data = @frame();
- if (self.waiters) |head| {
- head.tail.next = &self_waiter;
- head.tail = &self_waiter;
- } else {
- self.waiters = &self_waiter;
- self_waiter.tail = &self_waiter;
- self_waiter.next = null;
- }
- suspend {
- self.mutex.unlock();
- }
- }
- };
-}
-
-test "basic WaitGroup usage" {
- if (!std.io.is_async) return error.SkipZigTest;
-
- // TODO https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded) return error.SkipZigTest;
-
- // TODO https://github.com/ziglang/zig/issues/3251
- if (builtin.os.tag == .freebsd) return error.SkipZigTest;
-
- var initial_wg = WaitGroup{};
- var final_wg = WaitGroup{};
-
- try initial_wg.begin(1);
- try final_wg.begin(1);
- var task_frame = async task(&initial_wg, &final_wg);
- initial_wg.finish(1);
- final_wg.wait();
- await task_frame;
-}
-
-fn task(wg_i: *WaitGroup, wg_f: *WaitGroup) void {
- wg_i.wait();
- wg_f.finish(1);
-}
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 93125c2530..5222e5e5ca 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -31,8 +31,6 @@ pub const realpathW = os.realpathW;
pub const getAppDataDir = @import("fs/get_app_data_dir.zig").getAppDataDir;
pub const GetAppDataDirError = @import("fs/get_app_data_dir.zig").GetAppDataDirError;
-pub const Watch = @import("fs/watch.zig").Watch;
-
/// This represents the maximum size of a UTF-8 encoded file path that the
/// operating system will accept. Paths, including those returned from file
/// system operations, may be longer than this length, but such paths cannot
@@ -86,13 +84,6 @@ pub const base64_encoder = base64.Base64Encoder.init(base64_alphabet, null);
/// Base64 decoder, replacing the standard `+/` with `-_` so that it can be used in a file name on any filesystem.
pub const base64_decoder = base64.Base64Decoder.init(base64_alphabet, null);
-/// Whether or not async file system syscalls need a dedicated thread because the operating
-/// system does not support non-blocking I/O on the file system.
-pub const need_async_thread = std.io.is_async and switch (builtin.os.tag) {
- .windows, .other => false,
- else => true,
-};
-
/// TODO remove the allocator requirement from this API
/// TODO move to Dir
pub fn atomicSymLink(allocator: Allocator, existing_path: []const u8, new_path: []const u8) !void {
@@ -231,17 +222,17 @@ pub fn renameW(old_dir: Dir, old_sub_path_w: []const u16, new_dir: Dir, new_sub_
/// On POSIX targets, this function is comptime-callable.
pub fn cwd() Dir {
if (builtin.os.tag == .windows) {
- return Dir{ .fd = os.windows.peb().ProcessParameters.CurrentDirectory.Handle };
+ return .{ .fd = os.windows.peb().ProcessParameters.CurrentDirectory.Handle };
} else if (builtin.os.tag == .wasi) {
- return std.options.wasiCwd();
+ return .{ .fd = std.options.wasiCwd() };
} else {
- return Dir{ .fd = os.AT.FDCWD };
+ return .{ .fd = os.AT.FDCWD };
}
}
-pub fn defaultWasiCwd() Dir {
+pub fn defaultWasiCwd() std.os.wasi.fd_t {
// Expect the first preopen to be current working directory.
- return .{ .fd = 3 };
+ return 3;
}
/// Opens a directory at the given path. The directory is a system resource that remains
@@ -641,5 +632,4 @@ test {
_ = &path;
_ = @import("fs/test.zig");
_ = @import("fs/get_app_data_dir.zig");
- _ = @import("fs/watch.zig");
}
diff --git a/lib/std/fs/Dir.zig b/lib/std/fs/Dir.zig
index 0f996affcc..8449d88d75 100644
--- a/lib/std/fs/Dir.zig
+++ b/lib/std/fs/Dir.zig
@@ -751,11 +751,7 @@ pub const OpenError = error{
} || posix.UnexpectedError;
pub fn close(self: *Dir) void {
- if (fs.need_async_thread) {
- std.event.Loop.instance.?.close(self.fd);
- } else {
- posix.close(self.fd);
- }
+ posix.close(self.fd);
self.* = undefined;
}
@@ -837,10 +833,7 @@ pub fn openFileZ(self: Dir, sub_path: [*:0]const u8, flags: File.OpenFlags) File
.write_only => @as(u32, posix.O.WRONLY),
.read_write => @as(u32, posix.O.RDWR),
};
- const fd = if (flags.intended_io_mode != .blocking)
- try std.event.Loop.instance.?.openatZ(self.fd, sub_path, os_flags, 0)
- else
- try posix.openatZ(self.fd, sub_path, os_flags, 0);
+ const fd = try posix.openatZ(self.fd, sub_path, os_flags, 0);
errdefer posix.close(fd);
// WASI doesn't have posix.flock so we intetinally check OS prior to the inner if block
@@ -877,11 +870,7 @@ pub fn openFileZ(self: Dir, sub_path: [*:0]const u8, flags: File.OpenFlags) File
};
}
- return File{
- .handle = fd,
- .capable_io_mode = .blocking,
- .intended_io_mode = flags.intended_io_mode,
- };
+ return File{ .handle = fd };
}
/// Same as `openFile` but Windows-only and the path parameter is
@@ -895,10 +884,7 @@ pub fn openFileW(self: Dir, sub_path_w: []const u16, flags: File.OpenFlags) File
(if (flags.isRead()) @as(u32, w.GENERIC_READ) else 0) |
(if (flags.isWrite()) @as(u32, w.GENERIC_WRITE) else 0),
.creation = w.FILE_OPEN,
- .io_mode = flags.intended_io_mode,
}),
- .capable_io_mode = std.io.default_mode,
- .intended_io_mode = flags.intended_io_mode,
};
errdefer file.close();
var io: w.IO_STATUS_BLOCK = undefined;
@@ -994,10 +980,7 @@ pub fn createFileZ(self: Dir, sub_path_c: [*:0]const u8, flags: File.CreateFlags
(if (flags.truncate) @as(u32, posix.O.TRUNC) else 0) |
(if (flags.read) @as(u32, posix.O.RDWR) else posix.O.WRONLY) |
(if (flags.exclusive) @as(u32, posix.O.EXCL) else 0);
- const fd = if (flags.intended_io_mode != .blocking)
- try std.event.Loop.instance.?.openatZ(self.fd, sub_path_c, os_flags, flags.mode)
- else
- try posix.openatZ(self.fd, sub_path_c, os_flags, flags.mode);
+ const fd = try posix.openatZ(self.fd, sub_path_c, os_flags, flags.mode);
errdefer posix.close(fd);
// WASI doesn't have posix.flock so we intetinally check OS prior to the inner if block
@@ -1034,11 +1017,7 @@ pub fn createFileZ(self: Dir, sub_path_c: [*:0]const u8, flags: File.CreateFlags
};
}
- return File{
- .handle = fd,
- .capable_io_mode = .blocking,
- .intended_io_mode = flags.intended_io_mode,
- };
+ return File{ .handle = fd };
}
/// Same as `createFile` but Windows-only and the path parameter is
@@ -1056,10 +1035,7 @@ pub fn createFileW(self: Dir, sub_path_w: []const u16, flags: File.CreateFlags)
@as(u32, w.FILE_OVERWRITE_IF)
else
@as(u32, w.FILE_OPEN_IF),
- .io_mode = flags.intended_io_mode,
}),
- .capable_io_mode = std.io.default_mode,
- .intended_io_mode = flags.intended_io_mode,
};
errdefer file.close();
var io: w.IO_STATUS_BLOCK = undefined;
@@ -1276,7 +1252,6 @@ pub fn realpathW(self: Dir, pathname: []const u16, out_buffer: []u8) ![]u8 {
.access_mask = access_mask,
.share_access = share_access,
.creation = creation,
- .io_mode = .blocking,
.filter = .any,
}) catch |err| switch (err) {
error.WouldBlock => unreachable,
@@ -1449,11 +1424,7 @@ pub fn openDirW(self: Dir, sub_path_w: [*:0]const u16, args: OpenDirOptions) Ope
/// `flags` must contain `posix.O.DIRECTORY`.
fn openDirFlagsZ(self: Dir, sub_path_c: [*:0]const u8, flags: u32) OpenError!Dir {
- const result = if (fs.need_async_thread)
- std.event.Loop.instance.?.openatZ(self.fd, sub_path_c, flags, 0)
- else
- posix.openatZ(self.fd, sub_path_c, flags, 0);
- const fd = result catch |err| switch (err) {
+ const fd = posix.openatZ(self.fd, sub_path_c, flags, 0) catch |err| switch (err) {
error.FileTooBig => unreachable, // can't happen for directories
error.IsDir => unreachable, // we're providing O.DIRECTORY
error.NoSpaceLeft => unreachable, // not providing O.CREAT
@@ -2270,10 +2241,7 @@ pub fn accessZ(self: Dir, sub_path: [*:0]const u8, flags: File.OpenFlags) Access
.write_only => @as(u32, posix.W_OK),
.read_write => @as(u32, posix.R_OK | posix.W_OK),
};
- const result = if (fs.need_async_thread and flags.intended_io_mode != .blocking)
- std.event.Loop.instance.?.faccessatZ(self.fd, sub_path, os_mode, 0)
- else
- posix.faccessatZ(self.fd, sub_path, os_mode, 0);
+ const result = posix.faccessatZ(self.fd, sub_path, os_mode, 0);
return result;
}
@@ -2457,10 +2425,7 @@ pub const Stat = File.Stat;
pub const StatError = File.StatError;
pub fn stat(self: Dir) StatError!Stat {
- const file: File = .{
- .handle = self.fd,
- .capable_io_mode = .blocking,
- };
+ const file: File = .{ .handle = self.fd };
return file.stat();
}
@@ -2496,10 +2461,7 @@ pub const ChmodError = File.ChmodError;
/// of the directory. Additionally, the directory must have been opened
/// with `OpenDirOptions{ .iterate = true }`.
pub fn chmod(self: Dir, new_mode: File.Mode) ChmodError!void {
- const file: File = .{
- .handle = self.fd,
- .capable_io_mode = .blocking,
- };
+ const file: File = .{ .handle = self.fd };
try file.chmod(new_mode);
}
@@ -2510,10 +2472,7 @@ pub fn chmod(self: Dir, new_mode: File.Mode) ChmodError!void {
/// must have been opened with `OpenDirOptions{ .iterate = true }`. If the
/// owner or group is specified as `null`, the ID is not changed.
pub fn chown(self: Dir, owner: ?File.Uid, group: ?File.Gid) ChownError!void {
- const file: File = .{
- .handle = self.fd,
- .capable_io_mode = .blocking,
- };
+ const file: File = .{ .handle = self.fd };
try file.chown(owner, group);
}
@@ -2525,10 +2484,7 @@ pub const SetPermissionsError = File.SetPermissionsError;
/// Sets permissions according to the provided `Permissions` struct.
/// This method is *NOT* available on WASI
pub fn setPermissions(self: Dir, permissions: Permissions) SetPermissionsError!void {
- const file: File = .{
- .handle = self.fd,
- .capable_io_mode = .blocking,
- };
+ const file: File = .{ .handle = self.fd };
try file.setPermissions(permissions);
}
@@ -2537,10 +2493,7 @@ pub const MetadataError = File.MetadataError;
/// Returns a `Metadata` struct, representing the permissions on the directory
pub fn metadata(self: Dir) MetadataError!Metadata {
- const file: File = .{
- .handle = self.fd,
- .capable_io_mode = .blocking,
- };
+ const file: File = .{ .handle = self.fd };
return try file.metadata();
}
diff --git a/lib/std/fs/File.zig b/lib/std/fs/File.zig
index b286380c85..576222ce73 100644
--- a/lib/std/fs/File.zig
+++ b/lib/std/fs/File.zig
@@ -1,20 +1,6 @@
/// The OS-specific file descriptor or file handle.
handle: Handle,
-/// On some systems, such as Linux, file system file descriptors are incapable
-/// of non-blocking I/O. This forces us to perform asynchronous I/O on a dedicated thread,
-/// to achieve non-blocking file-system I/O. To do this, `File` must be aware of whether
-/// it is a file system file descriptor, or, more specifically, whether the I/O is always
-/// blocking.
-capable_io_mode: io.ModeOverride = io.default_mode,
-
-/// Furthermore, even when `std.options.io_mode` is async, it is still sometimes desirable
-/// to perform blocking I/O, although not by default. For example, when printing a
-/// stack trace to stderr. This field tracks both by acting as an overriding I/O mode.
-/// When not building in async I/O mode, the type only has the `.blocking` tag, making
-/// it a zero-bit type.
-intended_io_mode: io.ModeOverride = io.default_mode,
-
pub const Handle = posix.fd_t;
pub const Mode = posix.mode_t;
pub const INode = posix.ino_t;
@@ -108,16 +94,8 @@ pub const OpenFlags = struct {
/// Sets whether or not to wait until the file is locked to return. If set to true,
/// `error.WouldBlock` will be returned. Otherwise, the file will wait until the file
/// is available to proceed.
- /// In async I/O mode, non-blocking at the OS level is
- /// determined by `intended_io_mode`, and `true` means `error.WouldBlock` is returned,
- /// and `false` means `error.WouldBlock` is handled by the event loop.
lock_nonblocking: bool = false,
- /// Setting this to `.blocking` prevents `O.NONBLOCK` from being passed even
- /// if `std.io.is_async`. It allows the use of `nosuspend` when calling functions
- /// related to opening the file, reading, writing, and locking.
- intended_io_mode: io.ModeOverride = io.default_mode,
-
/// Set this to allow the opened file to automatically become the
/// controlling TTY for the current process.
allow_ctty: bool = false,
@@ -172,19 +150,11 @@ pub const CreateFlags = struct {
/// Sets whether or not to wait until the file is locked to return. If set to true,
/// `error.WouldBlock` will be returned. Otherwise, the file will wait until the file
/// is available to proceed.
- /// In async I/O mode, non-blocking at the OS level is
- /// determined by `intended_io_mode`, and `true` means `error.WouldBlock` is returned,
- /// and `false` means `error.WouldBlock` is handled by the event loop.
lock_nonblocking: bool = false,
/// For POSIX systems this is the file system mode the file will
/// be created with. On other systems this is always 0.
mode: Mode = default_mode,
-
- /// Setting this to `.blocking` prevents `O.NONBLOCK` from being passed even
- /// if `std.io.is_async`. It allows the use of `nosuspend` when calling functions
- /// related to opening the file, reading, writing, and locking.
- intended_io_mode: io.ModeOverride = io.default_mode,
};
/// Upon success, the stream is in an uninitialized state. To continue using it,
@@ -192,8 +162,6 @@ pub const CreateFlags = struct {
pub fn close(self: File) void {
if (is_windows) {
windows.CloseHandle(self.handle);
- } else if (self.capable_io_mode != self.intended_io_mode) {
- std.event.Loop.instance.?.close(self.handle);
} else {
posix.close(self.handle);
}
@@ -1013,14 +981,10 @@ pub const PReadError = posix.PReadError;
pub fn read(self: File, buffer: []u8) ReadError!usize {
if (is_windows) {
- return windows.ReadFile(self.handle, buffer, null, self.intended_io_mode);
+ return windows.ReadFile(self.handle, buffer, null);
}
- if (self.intended_io_mode == .blocking) {
- return posix.read(self.handle, buffer);
- } else {
- return std.event.Loop.instance.?.read(self.handle, buffer, self.capable_io_mode != self.intended_io_mode);
- }
+ return posix.read(self.handle, buffer);
}
/// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it
@@ -1039,14 +1003,10 @@ pub fn readAll(self: File, buffer: []u8) ReadError!usize {
/// https://github.com/ziglang/zig/issues/12783
pub fn pread(self: File, buffer: []u8, offset: u64) PReadError!usize {
if (is_windows) {
- return windows.ReadFile(self.handle, buffer, offset, self.intended_io_mode);
+ return windows.ReadFile(self.handle, buffer, offset);
}
- if (self.intended_io_mode == .blocking) {
- return posix.pread(self.handle, buffer, offset);
- } else {
- return std.event.Loop.instance.?.pread(self.handle, buffer, offset, self.capable_io_mode != self.intended_io_mode);
- }
+ return posix.pread(self.handle, buffer, offset);
}
/// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it
@@ -1069,14 +1029,10 @@ pub fn readv(self: File, iovecs: []const posix.iovec) ReadError!usize {
// TODO improve this to use ReadFileScatter
if (iovecs.len == 0) return @as(usize, 0);
const first = iovecs[0];
- return windows.ReadFile(self.handle, first.iov_base[0..first.iov_len], null, self.intended_io_mode);
+ return windows.ReadFile(self.handle, first.iov_base[0..first.iov_len], null);
}
- if (self.intended_io_mode == .blocking) {
- return posix.readv(self.handle, iovecs);
- } else {
- return std.event.Loop.instance.?.readv(self.handle, iovecs, self.capable_io_mode != self.intended_io_mode);
- }
+ return posix.readv(self.handle, iovecs);
}
/// Returns the number of bytes read. If the number read is smaller than the total bytes
@@ -1129,14 +1085,10 @@ pub fn preadv(self: File, iovecs: []const posix.iovec, offset: u64) PReadError!u
// TODO improve this to use ReadFileScatter
if (iovecs.len == 0) return @as(usize, 0);
const first = iovecs[0];
- return windows.ReadFile(self.handle, first.iov_base[0..first.iov_len], offset, self.intended_io_mode);
+ return windows.ReadFile(self.handle, first.iov_base[0..first.iov_len], offset);
}
- if (self.intended_io_mode == .blocking) {
- return posix.preadv(self.handle, iovecs, offset);
- } else {
- return std.event.Loop.instance.?.preadv(self.handle, iovecs, offset, self.capable_io_mode != self.intended_io_mode);
- }
+ return posix.preadv(self.handle, iovecs, offset);
}
/// Returns the number of bytes read. If the number read is smaller than the total bytes
@@ -1173,14 +1125,10 @@ pub const PWriteError = posix.PWriteError;
pub fn write(self: File, bytes: []const u8) WriteError!usize {
if (is_windows) {
- return windows.WriteFile(self.handle, bytes, null, self.intended_io_mode);
+ return windows.WriteFile(self.handle, bytes, null);
}
- if (self.intended_io_mode == .blocking) {
- return posix.write(self.handle, bytes);
- } else {
- return std.event.Loop.instance.?.write(self.handle, bytes, self.capable_io_mode != self.intended_io_mode);
- }
+ return posix.write(self.handle, bytes);
}
pub fn writeAll(self: File, bytes: []const u8) WriteError!void {
@@ -1194,14 +1142,10 @@ pub fn writeAll(self: File, bytes: []const u8) WriteError!void {
/// https://github.com/ziglang/zig/issues/12783
pub fn pwrite(self: File, bytes: []const u8, offset: u64) PWriteError!usize {
if (is_windows) {
- return windows.WriteFile(self.handle, bytes, offset, self.intended_io_mode);
+ return windows.WriteFile(self.handle, bytes, offset);
}
- if (self.intended_io_mode == .blocking) {
- return posix.pwrite(self.handle, bytes, offset);
- } else {
- return std.event.Loop.instance.?.pwrite(self.handle, bytes, offset, self.capable_io_mode != self.intended_io_mode);
- }
+ return posix.pwrite(self.handle, bytes, offset);
}
/// On Windows, this function currently does alter the file pointer.
@@ -1220,14 +1164,10 @@ pub fn writev(self: File, iovecs: []const posix.iovec_const) WriteError!usize {
// TODO improve this to use WriteFileScatter
if (iovecs.len == 0) return @as(usize, 0);
const first = iovecs[0];
- return windows.WriteFile(self.handle, first.iov_base[0..first.iov_len], null, self.intended_io_mode);
+ return windows.WriteFile(self.handle, first.iov_base[0..first.iov_len], null);
}
- if (self.intended_io_mode == .blocking) {
- return posix.writev(self.handle, iovecs);
- } else {
- return std.event.Loop.instance.?.writev(self.handle, iovecs, self.capable_io_mode != self.intended_io_mode);
- }
+ return posix.writev(self.handle, iovecs);
}
/// The `iovecs` parameter is mutable because:
@@ -1271,14 +1211,10 @@ pub fn pwritev(self: File, iovecs: []posix.iovec_const, offset: u64) PWriteError
// TODO improve this to use WriteFileScatter
if (iovecs.len == 0) return @as(usize, 0);
const first = iovecs[0];
- return windows.WriteFile(self.handle, first.iov_base[0..first.iov_len], offset, self.intended_io_mode);
+ return windows.WriteFile(self.handle, first.iov_base[0..first.iov_len], offset);
}
- if (self.intended_io_mode == .blocking) {
- return posix.pwritev(self.handle, iovecs, offset);
- } else {
- return std.event.Loop.instance.?.pwritev(self.handle, iovecs, offset, self.capable_io_mode != self.intended_io_mode);
- }
+ return posix.pwritev(self.handle, iovecs, offset);
}
/// The `iovecs` parameter is mutable because this function needs to mutate the fields in
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 63102e49d2..b84d0ce0f4 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -1508,11 +1508,6 @@ test "open file with exclusive and shared nonblocking lock" {
test "open file with exclusive lock twice, make sure second lock waits" {
if (builtin.single_threaded) return error.SkipZigTest;
- if (std.io.is_async) {
- // This test starts its own threads and is not compatible with async I/O.
- return error.SkipZigTest;
- }
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const filename = try ctx.transformPath("file_lock_test.txt");
diff --git a/lib/std/fs/watch.zig b/lib/std/fs/watch.zig
deleted file mode 100644
index e6485093ca..0000000000
--- a/lib/std/fs/watch.zig
+++ /dev/null
@@ -1,719 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const event = std.event;
-const assert = std.debug.assert;
-const testing = std.testing;
-const os = std.os;
-const mem = std.mem;
-const windows = os.windows;
-const Loop = event.Loop;
-const fd_t = os.fd_t;
-const File = std.fs.File;
-const Allocator = mem.Allocator;
-
-const global_event_loop = Loop.instance orelse
- @compileError("std.fs.Watch currently only works with event-based I/O");
-
-const WatchEventId = enum {
- CloseWrite,
- Delete,
-};
-
-const WatchEventError = error{
- UserResourceLimitReached,
- SystemResources,
- AccessDenied,
- Unexpected, // TODO remove this possibility
-};
-
-pub fn Watch(comptime V: type) type {
- return struct {
- channel: event.Channel(Event.Error!Event),
- os_data: OsData,
- allocator: Allocator,
-
- const OsData = switch (builtin.os.tag) {
- // TODO https://github.com/ziglang/zig/issues/3778
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => KqOsData,
- .linux => LinuxOsData,
- .windows => WindowsOsData,
-
- else => @compileError("Unsupported OS"),
- };
-
- const KqOsData = struct {
- table_lock: event.Lock,
- file_table: FileTable,
-
- const FileTable = std.StringHashMapUnmanaged(*Put);
- const Put = struct {
- putter_frame: @Frame(kqPutEvents),
- cancelled: bool = false,
- value: V,
- };
- };
-
- const WindowsOsData = struct {
- table_lock: event.Lock,
- dir_table: DirTable,
- cancelled: bool = false,
-
- const DirTable = std.StringHashMapUnmanaged(*Dir);
- const FileTable = std.StringHashMapUnmanaged(V);
-
- const Dir = struct {
- putter_frame: @Frame(windowsDirReader),
- file_table: FileTable,
- dir_handle: os.windows.HANDLE,
- };
- };
-
- const LinuxOsData = struct {
- putter_frame: @Frame(linuxEventPutter),
- inotify_fd: i32,
- wd_table: WdTable,
- table_lock: event.Lock,
- cancelled: bool = false,
-
- const WdTable = std.AutoHashMapUnmanaged(i32, Dir);
- const FileTable = std.StringHashMapUnmanaged(V);
-
- const Dir = struct {
- dirname: []const u8,
- file_table: FileTable,
- };
- };
-
- const Self = @This();
-
- pub const Event = struct {
- id: Id,
- data: V,
- dirname: []const u8,
- basename: []const u8,
-
- pub const Id = WatchEventId;
- pub const Error = WatchEventError;
- };
-
- pub fn init(allocator: Allocator, event_buf_count: usize) !*Self {
- const self = try allocator.create(Self);
- errdefer allocator.destroy(self);
-
- switch (builtin.os.tag) {
- .linux => {
- const inotify_fd = try os.inotify_init1(os.linux.IN_NONBLOCK | os.linux.IN_CLOEXEC);
- errdefer os.close(inotify_fd);
-
- self.* = Self{
- .allocator = allocator,
- .channel = undefined,
- .os_data = OsData{
- .putter_frame = undefined,
- .inotify_fd = inotify_fd,
- .wd_table = OsData.WdTable.init(allocator),
- .table_lock = event.Lock{},
- },
- };
-
- const buf = try allocator.alloc(Event.Error!Event, event_buf_count);
- self.channel.init(buf);
- self.os_data.putter_frame = async self.linuxEventPutter();
- return self;
- },
-
- .windows => {
- self.* = Self{
- .allocator = allocator,
- .channel = undefined,
- .os_data = OsData{
- .table_lock = event.Lock{},
- .dir_table = OsData.DirTable.init(allocator),
- },
- };
-
- const buf = try allocator.alloc(Event.Error!Event, event_buf_count);
- self.channel.init(buf);
- return self;
- },
-
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => {
- self.* = Self{
- .allocator = allocator,
- .channel = undefined,
- .os_data = OsData{
- .table_lock = event.Lock{},
- .file_table = OsData.FileTable.init(allocator),
- },
- };
-
- const buf = try allocator.alloc(Event.Error!Event, event_buf_count);
- self.channel.init(buf);
- return self;
- },
- else => @compileError("Unsupported OS"),
- }
- }
-
- pub fn deinit(self: *Self) void {
- switch (builtin.os.tag) {
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => {
- var it = self.os_data.file_table.iterator();
- while (it.next()) |entry| {
- const key = entry.key_ptr.*;
- const value = entry.value_ptr.*;
- value.cancelled = true;
- // @TODO Close the fd here?
- await value.putter_frame;
- self.allocator.free(key);
- self.allocator.destroy(value);
- }
- },
- .linux => {
- self.os_data.cancelled = true;
- {
- // Remove all directory watches linuxEventPutter will take care of
- // cleaning up the memory and closing the inotify fd.
- var dir_it = self.os_data.wd_table.keyIterator();
- while (dir_it.next()) |wd_key| {
- const rc = os.linux.inotify_rm_watch(self.os_data.inotify_fd, wd_key.*);
- // Errno can only be EBADF, EINVAL if either the inotify fs or the wd are invalid
- std.debug.assert(rc == 0);
- }
- }
- await self.os_data.putter_frame;
- },
- .windows => {
- self.os_data.cancelled = true;
- var dir_it = self.os_data.dir_table.iterator();
- while (dir_it.next()) |dir_entry| {
- if (windows.kernel32.CancelIoEx(dir_entry.value.dir_handle, null) != 0) {
- // We canceled the pending ReadDirectoryChangesW operation, but our
- // frame is still suspending, now waiting indefinitely.
- // Thus, it is safe to resume it ourslves
- resume dir_entry.value.putter_frame;
- } else {
- std.debug.assert(windows.kernel32.GetLastError() == .NOT_FOUND);
- // We are at another suspend point, we can await safely for the
- // function to exit the loop
- await dir_entry.value.putter_frame;
- }
-
- self.allocator.free(dir_entry.key_ptr.*);
- var file_it = dir_entry.value.file_table.keyIterator();
- while (file_it.next()) |file_entry| {
- self.allocator.free(file_entry.*);
- }
- dir_entry.value.file_table.deinit(self.allocator);
- self.allocator.destroy(dir_entry.value_ptr.*);
- }
- self.os_data.dir_table.deinit(self.allocator);
- },
- else => @compileError("Unsupported OS"),
- }
- self.allocator.free(self.channel.buffer_nodes);
- self.channel.deinit();
- self.allocator.destroy(self);
- }
-
- pub fn addFile(self: *Self, file_path: []const u8, value: V) !?V {
- switch (builtin.os.tag) {
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => return addFileKEvent(self, file_path, value),
- .linux => return addFileLinux(self, file_path, value),
- .windows => return addFileWindows(self, file_path, value),
- else => @compileError("Unsupported OS"),
- }
- }
-
- fn addFileKEvent(self: *Self, file_path: []const u8, value: V) !?V {
- var realpath_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
- const realpath = try os.realpath(file_path, &realpath_buf);
-
- const held = self.os_data.table_lock.acquire();
- defer held.release();
-
- const gop = try self.os_data.file_table.getOrPut(self.allocator, realpath);
- errdefer assert(self.os_data.file_table.remove(realpath));
- if (gop.found_existing) {
- const prev_value = gop.value_ptr.value;
- gop.value_ptr.value = value;
- return prev_value;
- }
-
- gop.key_ptr.* = try self.allocator.dupe(u8, realpath);
- errdefer self.allocator.free(gop.key_ptr.*);
- gop.value_ptr.* = try self.allocator.create(OsData.Put);
- errdefer self.allocator.destroy(gop.value_ptr.*);
- gop.value_ptr.* = .{
- .putter_frame = undefined,
- .value = value,
- };
-
- // @TODO Can I close this fd and get an error from bsdWaitKev?
- const flags = if (comptime builtin.target.isDarwin()) os.O.SYMLINK | os.O.EVTONLY else 0;
- const fd = try os.open(realpath, flags, 0);
- gop.value_ptr.putter_frame = async self.kqPutEvents(fd, gop.key_ptr.*, gop.value_ptr.*);
- return null;
- }
-
- fn kqPutEvents(self: *Self, fd: os.fd_t, file_path: []const u8, put: *OsData.Put) void {
- global_event_loop.beginOneEvent();
- defer {
- global_event_loop.finishOneEvent();
- // @TODO: Remove this if we force close otherwise
- os.close(fd);
- }
-
- // We need to manually do a bsdWaitKev to access the fflags.
- var resume_node = event.Loop.ResumeNode.Basic{
- .base = .{
- .id = .Basic,
- .handle = @frame(),
- .overlapped = event.Loop.ResumeNode.overlapped_init,
- },
- .kev = undefined,
- };
-
- var kevs = [1]os.Kevent{undefined};
- const kev = &kevs[0];
-
- while (!put.cancelled) {
- kev.* = os.Kevent{
- .ident = @as(usize, @intCast(fd)),
- .filter = os.EVFILT_VNODE,
- .flags = os.EV_ADD | os.EV_ENABLE | os.EV_CLEAR | os.EV_ONESHOT |
- os.NOTE_WRITE | os.NOTE_DELETE | os.NOTE_REVOKE,
- .fflags = 0,
- .data = 0,
- .udata = @intFromPtr(&resume_node.base),
- };
- suspend {
- global_event_loop.beginOneEvent();
- errdefer global_event_loop.finishOneEvent();
-
- const empty_kevs = &[0]os.Kevent{};
- _ = os.kevent(global_event_loop.os_data.kqfd, &kevs, empty_kevs, null) catch |err| switch (err) {
- error.EventNotFound,
- error.ProcessNotFound,
- error.Overflow,
- => unreachable,
- error.AccessDenied, error.SystemResources => |e| {
- self.channel.put(e);
- continue;
- },
- };
- }
-
- if (kev.flags & os.EV_ERROR != 0) {
- self.channel.put(os.unexpectedErrno(os.errno(kev.data)));
- continue;
- }
-
- if (kev.fflags & os.NOTE_DELETE != 0 or kev.fflags & os.NOTE_REVOKE != 0) {
- self.channel.put(Self.Event{
- .id = .Delete,
- .data = put.value,
- .dirname = std.fs.path.dirname(file_path) orelse "/",
- .basename = std.fs.path.basename(file_path),
- });
- } else if (kev.fflags & os.NOTE_WRITE != 0) {
- self.channel.put(Self.Event{
- .id = .CloseWrite,
- .data = put.value,
- .dirname = std.fs.path.dirname(file_path) orelse "/",
- .basename = std.fs.path.basename(file_path),
- });
- }
- }
- }
-
- fn addFileLinux(self: *Self, file_path: []const u8, value: V) !?V {
- const dirname = std.fs.path.dirname(file_path) orelse if (file_path[0] == '/') "/" else ".";
- const basename = std.fs.path.basename(file_path);
-
- const wd = try os.inotify_add_watch(
- self.os_data.inotify_fd,
- dirname,
- os.linux.IN_CLOSE_WRITE | os.linux.IN_ONLYDIR | os.linux.IN_DELETE | os.linux.IN_EXCL_UNLINK,
- );
- // wd is either a newly created watch or an existing one.
-
- const held = self.os_data.table_lock.acquire();
- defer held.release();
-
- const gop = try self.os_data.wd_table.getOrPut(self.allocator, wd);
- errdefer assert(self.os_data.wd_table.remove(wd));
- if (!gop.found_existing) {
- gop.value_ptr.* = OsData.Dir{
- .dirname = try self.allocator.dupe(u8, dirname),
- .file_table = OsData.FileTable.init(self.allocator),
- };
- }
-
- const dir = gop.value_ptr;
- const file_table_gop = try dir.file_table.getOrPut(self.allocator, basename);
- errdefer assert(dir.file_table.remove(basename));
- if (file_table_gop.found_existing) {
- const prev_value = file_table_gop.value_ptr.*;
- file_table_gop.value_ptr.* = value;
- return prev_value;
- } else {
- file_table_gop.key_ptr.* = try self.allocator.dupe(u8, basename);
- file_table_gop.value_ptr.* = value;
- return null;
- }
- }
-
- fn addFileWindows(self: *Self, file_path: []const u8, value: V) !?V {
- // TODO we might need to convert dirname and basename to canonical file paths ("short"?)
- const dirname = std.fs.path.dirname(file_path) orelse if (file_path[0] == '/') "/" else ".";
- var dirname_path_space: windows.PathSpace = undefined;
- dirname_path_space.len = try std.unicode.utf8ToUtf16Le(&dirname_path_space.data, dirname);
- dirname_path_space.data[dirname_path_space.len] = 0;
-
- const basename = std.fs.path.basename(file_path);
- var basename_path_space: windows.PathSpace = undefined;
- basename_path_space.len = try std.unicode.utf8ToUtf16Le(&basename_path_space.data, basename);
- basename_path_space.data[basename_path_space.len] = 0;
-
- const held = self.os_data.table_lock.acquire();
- defer held.release();
-
- const gop = try self.os_data.dir_table.getOrPut(self.allocator, dirname);
- errdefer assert(self.os_data.dir_table.remove(dirname));
- if (gop.found_existing) {
- const dir = gop.value_ptr.*;
-
- const file_gop = try dir.file_table.getOrPut(self.allocator, basename);
- errdefer assert(dir.file_table.remove(basename));
- if (file_gop.found_existing) {
- const prev_value = file_gop.value_ptr.*;
- file_gop.value_ptr.* = value;
- return prev_value;
- } else {
- file_gop.value_ptr.* = value;
- file_gop.key_ptr.* = try self.allocator.dupe(u8, basename);
- return null;
- }
- } else {
- const dir_handle = try windows.OpenFile(dirname_path_space.span(), .{
- .dir = std.fs.cwd().fd,
- .access_mask = windows.FILE_LIST_DIRECTORY,
- .creation = windows.FILE_OPEN,
- .io_mode = .evented,
- .filter = .dir_only,
- });
- errdefer windows.CloseHandle(dir_handle);
-
- const dir = try self.allocator.create(OsData.Dir);
- errdefer self.allocator.destroy(dir);
-
- gop.key_ptr.* = try self.allocator.dupe(u8, dirname);
- errdefer self.allocator.free(gop.key_ptr.*);
-
- dir.* = OsData.Dir{
- .file_table = OsData.FileTable.init(self.allocator),
- .putter_frame = undefined,
- .dir_handle = dir_handle,
- };
- gop.value_ptr.* = dir;
- try dir.file_table.put(self.allocator, try self.allocator.dupe(u8, basename), value);
- dir.putter_frame = async self.windowsDirReader(dir, gop.key_ptr.*);
- return null;
- }
- }
-
- fn windowsDirReader(self: *Self, dir: *OsData.Dir, dirname: []const u8) void {
- defer os.close(dir.dir_handle);
- var resume_node = Loop.ResumeNode.Basic{
- .base = Loop.ResumeNode{
- .id = .Basic,
- .handle = @frame(),
- .overlapped = windows.OVERLAPPED{
- .Internal = 0,
- .InternalHigh = 0,
- .DUMMYUNIONNAME = .{
- .DUMMYSTRUCTNAME = .{
- .Offset = 0,
- .OffsetHigh = 0,
- },
- },
- .hEvent = null,
- },
- },
- };
-
- var event_buf: [4096]u8 align(@alignOf(windows.FILE_NOTIFY_INFORMATION)) = undefined;
-
- global_event_loop.beginOneEvent();
- defer global_event_loop.finishOneEvent();
-
- while (!self.os_data.cancelled) main_loop: {
- suspend {
- _ = windows.kernel32.ReadDirectoryChangesW(
- dir.dir_handle,
- &event_buf,
- event_buf.len,
- windows.FALSE, // watch subtree
- windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME |
- windows.FILE_NOTIFY_CHANGE_ATTRIBUTES | windows.FILE_NOTIFY_CHANGE_SIZE |
- windows.FILE_NOTIFY_CHANGE_LAST_WRITE | windows.FILE_NOTIFY_CHANGE_LAST_ACCESS |
- windows.FILE_NOTIFY_CHANGE_CREATION | windows.FILE_NOTIFY_CHANGE_SECURITY,
- null, // number of bytes transferred (unused for async)
- &resume_node.base.overlapped,
- null, // completion routine - unused because we use IOCP
- );
- }
-
- var bytes_transferred: windows.DWORD = undefined;
- if (windows.kernel32.GetOverlappedResult(
- dir.dir_handle,
- &resume_node.base.overlapped,
- &bytes_transferred,
- windows.FALSE,
- ) == 0) {
- const potential_error = windows.kernel32.GetLastError();
- const err = switch (potential_error) {
- .OPERATION_ABORTED, .IO_INCOMPLETE => err_blk: {
- if (self.os_data.cancelled)
- break :main_loop
- else
- break :err_blk windows.unexpectedError(potential_error);
- },
- else => |err| windows.unexpectedError(err),
- };
- self.channel.put(err);
- } else {
- var ptr: [*]u8 = &event_buf;
- const end_ptr = ptr + bytes_transferred;
- while (@intFromPtr(ptr) < @intFromPtr(end_ptr)) {
- const ev = @as(*const windows.FILE_NOTIFY_INFORMATION, @ptrCast(ptr));
- const emit = switch (ev.Action) {
- windows.FILE_ACTION_REMOVED => WatchEventId.Delete,
- windows.FILE_ACTION_MODIFIED => .CloseWrite,
- else => null,
- };
- if (emit) |id| {
- const basename_ptr = @as([*]u16, @ptrCast(ptr + @sizeOf(windows.FILE_NOTIFY_INFORMATION)));
- const basename_utf16le = basename_ptr[0 .. ev.FileNameLength / 2];
- var basename_data: [std.fs.MAX_PATH_BYTES]u8 = undefined;
- const basename = basename_data[0 .. std.unicode.utf16leToUtf8(&basename_data, basename_utf16le) catch unreachable];
-
- if (dir.file_table.getEntry(basename)) |entry| {
- self.channel.put(Event{
- .id = id,
- .data = entry.value_ptr.*,
- .dirname = dirname,
- .basename = entry.key_ptr.*,
- });
- }
- }
-
- if (ev.NextEntryOffset == 0) break;
- ptr = @alignCast(ptr + ev.NextEntryOffset);
- }
- }
- }
- }
-
- pub fn removeFile(self: *Self, file_path: []const u8) !?V {
- switch (builtin.os.tag) {
- .linux => {
- const dirname = std.fs.path.dirname(file_path) orelse if (file_path[0] == '/') "/" else ".";
- const basename = std.fs.path.basename(file_path);
-
- const held = self.os_data.table_lock.acquire();
- defer held.release();
-
- const dir = self.os_data.wd_table.get(dirname) orelse return null;
- if (dir.file_table.fetchRemove(basename)) |file_entry| {
- self.allocator.free(file_entry.key);
- return file_entry.value;
- }
- return null;
- },
- .windows => {
- const dirname = std.fs.path.dirname(file_path) orelse if (file_path[0] == '/') "/" else ".";
- const basename = std.fs.path.basename(file_path);
-
- const held = self.os_data.table_lock.acquire();
- defer held.release();
-
- const dir = self.os_data.dir_table.get(dirname) orelse return null;
- if (dir.file_table.fetchRemove(basename)) |file_entry| {
- self.allocator.free(file_entry.key);
- return file_entry.value;
- }
- return null;
- },
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => {
- var realpath_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
- const realpath = try os.realpath(file_path, &realpath_buf);
-
- const held = self.os_data.table_lock.acquire();
- defer held.release();
-
- const entry = self.os_data.file_table.getEntry(realpath) orelse return null;
- entry.value_ptr.cancelled = true;
- // @TODO Close the fd here?
- await entry.value_ptr.putter_frame;
- self.allocator.free(entry.key_ptr.*);
- self.allocator.destroy(entry.value_ptr.*);
-
- assert(self.os_data.file_table.remove(realpath));
- },
- else => @compileError("Unsupported OS"),
- }
- }
-
- fn linuxEventPutter(self: *Self) void {
- global_event_loop.beginOneEvent();
-
- defer {
- std.debug.assert(self.os_data.wd_table.count() == 0);
- self.os_data.wd_table.deinit(self.allocator);
- os.close(self.os_data.inotify_fd);
- self.allocator.free(self.channel.buffer_nodes);
- self.channel.deinit();
- global_event_loop.finishOneEvent();
- }
-
- var event_buf: [4096]u8 align(@alignOf(os.linux.inotify_event)) = undefined;
-
- while (!self.os_data.cancelled) {
- const bytes_read = global_event_loop.read(self.os_data.inotify_fd, &event_buf, false) catch unreachable;
-
- var ptr: [*]u8 = &event_buf;
- const end_ptr = ptr + bytes_read;
- while (@intFromPtr(ptr) < @intFromPtr(end_ptr)) {
- const ev = @as(*const os.linux.inotify_event, @ptrCast(ptr));
- if (ev.mask & os.linux.IN_CLOSE_WRITE == os.linux.IN_CLOSE_WRITE) {
- const basename_ptr = ptr + @sizeOf(os.linux.inotify_event);
- const basename = std.mem.span(@as([*:0]u8, @ptrCast(basename_ptr)));
-
- const dir = &self.os_data.wd_table.get(ev.wd).?;
- if (dir.file_table.getEntry(basename)) |file_value| {
- self.channel.put(Event{
- .id = .CloseWrite,
- .data = file_value.value_ptr.*,
- .dirname = dir.dirname,
- .basename = file_value.key_ptr.*,
- });
- }
- } else if (ev.mask & os.linux.IN_IGNORED == os.linux.IN_IGNORED) {
- // Directory watch was removed
- const held = self.os_data.table_lock.acquire();
- defer held.release();
- if (self.os_data.wd_table.fetchRemove(ev.wd)) |wd_entry| {
- var file_it = wd_entry.value.file_table.keyIterator();
- while (file_it.next()) |file_entry| {
- self.allocator.free(file_entry.*);
- }
- self.allocator.free(wd_entry.value.dirname);
- wd_entry.value.file_table.deinit(self.allocator);
- }
- } else if (ev.mask & os.linux.IN_DELETE == os.linux.IN_DELETE) {
- // File or directory was removed or deleted
- const basename_ptr = ptr + @sizeOf(os.linux.inotify_event);
- const basename = std.mem.span(@as([*:0]u8, @ptrCast(basename_ptr)));
-
- const dir = &self.os_data.wd_table.get(ev.wd).?;
- if (dir.file_table.getEntry(basename)) |file_value| {
- self.channel.put(Event{
- .id = .Delete,
- .data = file_value.value_ptr.*,
- .dirname = dir.dirname,
- .basename = file_value.key_ptr.*,
- });
- }
- }
-
- ptr = @alignCast(ptr + @sizeOf(os.linux.inotify_event) + ev.len);
- }
- }
- }
- };
-}
-
-const test_tmp_dir = "std_event_fs_test";
-
-test "write a file, watch it, write it again, delete it" {
- if (!std.io.is_async) return error.SkipZigTest;
- // TODO https://github.com/ziglang/zig/issues/1908
- if (builtin.single_threaded) return error.SkipZigTest;
-
- try std.fs.cwd().makePath(test_tmp_dir);
- defer std.fs.cwd().deleteTree(test_tmp_dir) catch {};
-
- return testWriteWatchWriteDelete(std.testing.allocator);
-}
-
-fn testWriteWatchWriteDelete(allocator: Allocator) !void {
- const file_path = try std.fs.path.join(allocator, &[_][]const u8{ test_tmp_dir, "file.txt" });
- defer allocator.free(file_path);
-
- const contents =
- \\line 1
- \\line 2
- ;
- const line2_offset = 7;
-
- // first just write then read the file
- try std.fs.cwd().writeFile(file_path, contents);
-
- const read_contents = try std.fs.cwd().readFileAlloc(allocator, file_path, 1024 * 1024);
- defer allocator.free(read_contents);
- try testing.expectEqualSlices(u8, contents, read_contents);
-
- // now watch the file
- var watch = try Watch(void).init(allocator, 0);
- defer watch.deinit();
-
- try testing.expect((try watch.addFile(file_path, {})) == null);
-
- var ev = async watch.channel.get();
- var ev_consumed = false;
- defer if (!ev_consumed) {
- _ = await ev;
- };
-
- // overwrite line 2
- const file = try std.fs.cwd().openFile(file_path, .{ .mode = .read_write });
- {
- defer file.close();
- const write_contents = "lorem ipsum";
- var iovec = [_]os.iovec_const{.{
- .iov_base = write_contents,
- .iov_len = write_contents.len,
- }};
- _ = try file.pwritevAll(&iovec, line2_offset);
- }
-
- switch ((try await ev).id) {
- .CloseWrite => {
- ev_consumed = true;
- },
- .Delete => @panic("wrong event"),
- }
-
- const contents_updated = try std.fs.cwd().readFileAlloc(allocator, file_path, 1024 * 1024);
- defer allocator.free(contents_updated);
-
- try testing.expectEqualSlices(u8,
- \\line 1
- \\lorem ipsum
- , contents_updated);
-
- ev = async watch.channel.get();
- ev_consumed = false;
-
- try std.fs.cwd().deleteFile(file_path);
- switch ((try await ev).id) {
- .Delete => {
- ev_consumed = true;
- },
- .CloseWrite => @panic("wrong event"),
- }
-}
-
-// TODO Test: Add another file watch, remove the old file watch, get an event in the new
diff --git a/lib/std/io.zig b/lib/std/io.zig
index 985297e051..3eb3c4e391 100644
--- a/lib/std/io.zig
+++ b/lib/std/io.zig
@@ -12,22 +12,6 @@ const meta = std.meta;
const File = std.fs.File;
const Allocator = std.mem.Allocator;
-pub const Mode = enum {
- /// I/O operates normally, waiting for the operating system syscalls to complete.
- blocking,
-
- /// I/O functions are generated async and rely on a global event loop. Event-based I/O.
- evented,
-};
-
-const mode = std.options.io_mode;
-pub const is_async = mode != .blocking;
-
-/// This is an enum value to use for I/O mode at runtime, since it takes up zero bytes at runtime,
-/// and makes expressions comptime-known when `is_async` is `false`.
-pub const ModeOverride = if (is_async) Mode else enum { blocking };
-pub const default_mode: ModeOverride = if (is_async) Mode.evented else .blocking;
-
fn getStdOutHandle() os.fd_t {
if (builtin.os.tag == .windows) {
if (builtin.zig_backend == .stage2_aarch64) {
@@ -44,14 +28,8 @@ fn getStdOutHandle() os.fd_t {
return os.STDOUT_FILENO;
}
-/// TODO: async stdout on windows without a dedicated thread.
-/// https://github.com/ziglang/zig/pull/4816#issuecomment-604521023
pub fn getStdOut() File {
- return File{
- .handle = getStdOutHandle(),
- .capable_io_mode = .blocking,
- .intended_io_mode = default_mode,
- };
+ return File{ .handle = getStdOutHandle() };
}
fn getStdErrHandle() os.fd_t {
@@ -70,14 +48,8 @@ fn getStdErrHandle() os.fd_t {
return os.STDERR_FILENO;
}
-/// This returns a `File` that is configured to block with every write, in order
-/// to facilitate better debugging. This can be changed by modifying the `intended_io_mode` field.
pub fn getStdErr() File {
- return File{
- .handle = getStdErrHandle(),
- .capable_io_mode = .blocking,
- .intended_io_mode = .blocking,
- };
+ return File{ .handle = getStdErrHandle() };
}
fn getStdInHandle() os.fd_t {
@@ -96,14 +68,8 @@ fn getStdInHandle() os.fd_t {
return os.STDIN_FILENO;
}
-/// TODO: async stdin on windows without a dedicated thread.
-/// https://github.com/ziglang/zig/pull/4816#issuecomment-604521023
pub fn getStdIn() File {
- return File{
- .handle = getStdInHandle(),
- .capable_io_mode = .blocking,
- .intended_io_mode = default_mode,
- };
+ return File{ .handle = getStdInHandle() };
}
pub fn GenericReader(
diff --git a/lib/std/log.zig b/lib/std/log.zig
index 1c5b60ff1a..5c4650cc1c 100644
--- a/lib/std/log.zig
+++ b/lib/std/log.zig
@@ -18,12 +18,12 @@
//! ```
//! const std = @import("std");
//!
-//! pub const std_options = struct {
+//! pub const std_options = .{
//! // Set the log level to info
-//! pub const log_level = .info;
+//! .log_level = .info,
//!
//! // Define logFn to override the std implementation
-//! pub const logFn = myLogFn;
+//! .logFn = myLogFn,
//! };
//!
//! pub fn myLogFn(
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 28967cba55..154e2f7375 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -651,7 +651,7 @@ pub const Ip6Address = extern struct {
};
pub fn connectUnixSocket(path: []const u8) !Stream {
- const opt_non_block = if (std.io.is_async) os.SOCK.NONBLOCK else 0;
+ const opt_non_block = 0;
const sockfd = try os.socket(
os.AF.UNIX,
os.SOCK.STREAM | os.SOCK.CLOEXEC | opt_non_block,
@@ -660,17 +660,9 @@ pub fn connectUnixSocket(path: []const u8) !Stream {
errdefer os.closeSocket(sockfd);
var addr = try std.net.Address.initUnix(path);
+ try os.connect(sockfd, &addr.any, addr.getOsSockLen());
- if (std.io.is_async) {
- const loop = std.event.Loop.instance orelse return error.WouldBlock;
- try loop.connect(sockfd, &addr.any, addr.getOsSockLen());
- } else {
- try os.connect(sockfd, &addr.any, addr.getOsSockLen());
- }
-
- return Stream{
- .handle = sockfd,
- };
+ return Stream{ .handle = sockfd };
}
fn if_nametoindex(name: []const u8) IPv6InterfaceError!u32 {
@@ -742,18 +734,13 @@ pub fn tcpConnectToHost(allocator: mem.Allocator, name: []const u8, port: u16) T
pub const TcpConnectToAddressError = std.os.SocketError || std.os.ConnectError;
pub fn tcpConnectToAddress(address: Address) TcpConnectToAddressError!Stream {
- const nonblock = if (std.io.is_async) os.SOCK.NONBLOCK else 0;
+ const nonblock = 0;
const sock_flags = os.SOCK.STREAM | nonblock |
(if (builtin.target.os.tag == .windows) 0 else os.SOCK.CLOEXEC);
const sockfd = try os.socket(address.any.family, sock_flags, os.IPPROTO.TCP);
errdefer os.closeSocket(sockfd);
- if (std.io.is_async) {
- const loop = std.event.Loop.instance orelse return error.WouldBlock;
- try loop.connect(sockfd, &address.any, address.getOsSockLen());
- } else {
- try os.connect(sockfd, &address.any, address.getOsSockLen());
- }
+ try os.connect(sockfd, &address.any, address.getOsSockLen());
return Stream{ .handle = sockfd };
}
@@ -1618,11 +1605,7 @@ fn resMSendRc(
if (answers[i].len == 0) {
var j: usize = 0;
while (j < ns.len) : (j += 1) {
- if (std.io.is_async) {
- _ = std.event.Loop.instance.?.sendto(fd, queries[i], os.MSG.NOSIGNAL, &ns[j].any, sl) catch undefined;
- } else {
- _ = os.sendto(fd, queries[i], os.MSG.NOSIGNAL, &ns[j].any, sl) catch undefined;
- }
+ _ = os.sendto(fd, queries[i], os.MSG.NOSIGNAL, &ns[j].any, sl) catch undefined;
}
}
}
@@ -1637,10 +1620,7 @@ fn resMSendRc(
while (true) {
var sl_copy = sl;
- const rlen = if (std.io.is_async)
- std.event.Loop.instance.?.recvfrom(fd, answer_bufs[next], 0, &sa.any, &sl_copy) catch break
- else
- os.recvfrom(fd, answer_bufs[next], 0, &sa.any, &sl_copy) catch break;
+ const rlen = os.recvfrom(fd, answer_bufs[next], 0, &sa.any, &sl_copy) catch break;
// Ignore non-identifiable packets
if (rlen < 4) continue;
@@ -1666,11 +1646,7 @@ fn resMSendRc(
0, 3 => {},
2 => if (servfail_retry != 0) {
servfail_retry -= 1;
- if (std.io.is_async) {
- _ = std.event.Loop.instance.?.sendto(fd, queries[i], os.MSG.NOSIGNAL, &ns[j].any, sl) catch undefined;
- } else {
- _ = os.sendto(fd, queries[i], os.MSG.NOSIGNAL, &ns[j].any, sl) catch undefined;
- }
+ _ = os.sendto(fd, queries[i], os.MSG.NOSIGNAL, &ns[j].any, sl) catch undefined;
},
else => continue,
}
@@ -1778,14 +1754,10 @@ pub const Stream = struct {
pub fn read(self: Stream, buffer: []u8) ReadError!usize {
if (builtin.os.tag == .windows) {
- return os.windows.ReadFile(self.handle, buffer, null, io.default_mode);
+ return os.windows.ReadFile(self.handle, buffer, null);
}
- if (std.io.is_async) {
- return std.event.Loop.instance.?.read(self.handle, buffer, false);
- } else {
- return os.read(self.handle, buffer);
- }
+ return os.read(self.handle, buffer);
}
pub fn readv(s: Stream, iovecs: []const os.iovec) ReadError!usize {
@@ -1793,7 +1765,7 @@ pub const Stream = struct {
// TODO improve this to use ReadFileScatter
if (iovecs.len == 0) return @as(usize, 0);
const first = iovecs[0];
- return os.windows.ReadFile(s.handle, first.iov_base[0..first.iov_len], null, io.default_mode);
+ return os.windows.ReadFile(s.handle, first.iov_base[0..first.iov_len], null);
}
return os.readv(s.handle, iovecs);
@@ -1827,14 +1799,10 @@ pub const Stream = struct {
/// use non-blocking I/O.
pub fn write(self: Stream, buffer: []const u8) WriteError!usize {
if (builtin.os.tag == .windows) {
- return os.windows.WriteFile(self.handle, buffer, null, io.default_mode);
+ return os.windows.WriteFile(self.handle, buffer, null);
}
- if (std.io.is_async) {
- return std.event.Loop.instance.?.write(self.handle, buffer, false);
- } else {
- return os.write(self.handle, buffer);
- }
+ return os.write(self.handle, buffer);
}
pub fn writeAll(self: Stream, bytes: []const u8) WriteError!void {
@@ -1847,15 +1815,7 @@ pub const Stream = struct {
/// See https://github.com/ziglang/zig/issues/7699
/// See equivalent function: `std.fs.File.writev`.
pub fn writev(self: Stream, iovecs: []const os.iovec_const) WriteError!usize {
- if (std.io.is_async) {
- // TODO improve to actually take advantage of writev syscall, if available.
- if (iovecs.len == 0) return 0;
- const first_buffer = iovecs[0].iov_base[0..iovecs[0].iov_len];
- try self.write(first_buffer);
- return first_buffer.len;
- } else {
- return os.writev(self.handle, iovecs);
- }
+ return os.writev(self.handle, iovecs);
}
/// The `iovecs` parameter is mutable because this function needs to mutate the fields in
@@ -1927,7 +1887,7 @@ pub const StreamServer = struct {
}
pub fn listen(self: *StreamServer, address: Address) !void {
- const nonblock = if (std.io.is_async) os.SOCK.NONBLOCK else 0;
+ const nonblock = 0;
const sock_flags = os.SOCK.STREAM | os.SOCK.CLOEXEC | nonblock;
var use_sock_flags: u32 = sock_flags;
if (self.force_nonblocking) use_sock_flags |= os.SOCK.NONBLOCK;
@@ -2016,14 +1976,7 @@ pub const StreamServer = struct {
pub fn accept(self: *StreamServer) AcceptError!Connection {
var accepted_addr: Address = undefined;
var adr_len: os.socklen_t = @sizeOf(Address);
- const accept_result = blk: {
- if (std.io.is_async) {
- const loop = std.event.Loop.instance orelse return error.UnexpectedError;
- break :blk loop.accept(self.sockfd.?, &accepted_addr.any, &adr_len, os.SOCK.CLOEXEC);
- } else {
- break :blk os.accept(self.sockfd.?, &accepted_addr.any, &adr_len, os.SOCK.CLOEXEC);
- }
- };
+ const accept_result = os.accept(self.sockfd.?, &accepted_addr.any, &adr_len, os.SOCK.CLOEXEC);
if (accept_result) |fd| {
return Connection{
diff --git a/lib/std/net/test.zig b/lib/std/net/test.zig
index 8cc2a09d10..e359abb6d5 100644
--- a/lib/std/net/test.zig
+++ b/lib/std/net/test.zig
@@ -207,54 +207,6 @@ test "listen on a port, send bytes, receive bytes" {
try testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
}
-test "listen on a port, send bytes, receive bytes, async-only" {
- if (!std.io.is_async) return error.SkipZigTest;
-
- if (builtin.os.tag != .linux and !builtin.os.tag.isDarwin()) {
- // TODO build abstractions for other operating systems
- return error.SkipZigTest;
- }
-
- // TODO doing this at comptime crashed the compiler
- const localhost = try net.Address.parseIp("127.0.0.1", 0);
-
- var server = net.StreamServer.init(net.StreamServer.Options{});
- defer server.deinit();
- try server.listen(localhost);
-
- var server_frame = async testServer(&server);
- var client_frame = async testClient(server.listen_address);
-
- try await server_frame;
- try await client_frame;
-}
-
-test "listen on ipv4 try connect on ipv6 then ipv4" {
- if (!std.io.is_async) return error.SkipZigTest;
-
- if (builtin.os.tag != .linux and !builtin.os.tag.isDarwin()) {
- // TODO build abstractions for other operating systems
- return error.SkipZigTest;
- }
-
- // TODO doing this at comptime crashed the compiler
- const localhost = try net.Address.parseIp("127.0.0.1", 0);
-
- var server = net.StreamServer.init(net.StreamServer.Options{});
- defer server.deinit();
- try server.listen(localhost);
-
- var server_frame = async testServer(&server);
- var client_frame = async testClientToHost(
- testing.allocator,
- "localhost",
- server.listen_address.getPort(),
- );
-
- try await server_frame;
- try await client_frame;
-}
-
test "listen on an in use port" {
if (builtin.os.tag != .linux and comptime !builtin.os.tag.isDarwin()) {
// TODO build abstractions for other operating systems
diff --git a/lib/std/os.zig b/lib/std/os.zig
index de414338f4..c963ca7f3b 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -683,11 +683,7 @@ fn getRandomBytesDevURandom(buf: []u8) !void {
return error.NoDevice;
}
- const file = std.fs.File{
- .handle = fd,
- .capable_io_mode = .blocking,
- .intended_io_mode = .blocking,
- };
+ const file = std.fs.File{ .handle = fd };
const stream = file.reader();
stream.readNoEof(buf) catch return error.Unexpected;
}
@@ -856,7 +852,7 @@ pub const ReadError = error{
pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
if (buf.len == 0) return 0;
if (builtin.os.tag == .windows) {
- return windows.ReadFile(fd, buf, null, std.io.default_mode);
+ return windows.ReadFile(fd, buf, null);
}
if (builtin.os.tag == .wasi and !builtin.link_libc) {
const iovs = [1]iovec{iovec{
@@ -995,7 +991,7 @@ pub const PReadError = ReadError || error{Unseekable};
pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize {
if (buf.len == 0) return 0;
if (builtin.os.tag == .windows) {
- return windows.ReadFile(fd, buf, offset, std.io.default_mode);
+ return windows.ReadFile(fd, buf, offset);
}
if (builtin.os.tag == .wasi and !builtin.link_libc) {
const iovs = [1]iovec{iovec{
@@ -1257,7 +1253,7 @@ pub const WriteError = error{
pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
if (bytes.len == 0) return 0;
if (builtin.os.tag == .windows) {
- return windows.WriteFile(fd, bytes, null, std.io.default_mode);
+ return windows.WriteFile(fd, bytes, null);
}
if (builtin.os.tag == .wasi and !builtin.link_libc) {
@@ -1415,7 +1411,7 @@ pub const PWriteError = WriteError || error{Unseekable};
pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
if (bytes.len == 0) return 0;
if (builtin.os.tag == .windows) {
- return windows.WriteFile(fd, bytes, offset, std.io.default_mode);
+ return windows.WriteFile(fd, bytes, offset);
}
if (builtin.os.tag == .wasi and !builtin.link_libc) {
const ciovs = [1]iovec_const{iovec_const{
@@ -1711,7 +1707,6 @@ fn openOptionsFromFlagsWindows(flags: u32) windows.OpenFileOptions {
return .{
.access_mask = access_mask,
- .io_mode = .blocking,
.creation = creation,
.filter = filter,
.follow_symlinks = follow_symlinks,
@@ -2797,7 +2792,6 @@ pub fn renameatW(
.dir = old_dir_fd,
.access_mask = windows.SYNCHRONIZE | windows.GENERIC_WRITE | windows.DELETE,
.creation = windows.FILE_OPEN,
- .io_mode = .blocking,
.filter = .any, // This function is supposed to rename both files and directories.
.follow_symlinks = false,
}) catch |err| switch (err) {
@@ -2962,7 +2956,6 @@ pub fn mkdiratW(dir_fd: fd_t, sub_path_w: []const u16, mode: u32) MakeDirError!v
.dir = dir_fd,
.access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
.creation = windows.FILE_CREATE,
- .io_mode = .blocking,
.filter = .dir_only,
}) catch |err| switch (err) {
error.IsDir => unreachable,
@@ -3042,7 +3035,6 @@ pub fn mkdirW(dir_path_w: []const u16, mode: u32) MakeDirError!void {
.dir = std.fs.cwd().fd,
.access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
.creation = windows.FILE_CREATE,
- .io_mode = .blocking,
.filter = .dir_only,
}) catch |err| switch (err) {
error.IsDir => unreachable,
@@ -5440,7 +5432,6 @@ pub fn realpathW(pathname: []const u16, out_buffer: *[MAX_PATH_BYTES]u8) RealPat
.access_mask = access_mask,
.share_access = share_access,
.creation = creation,
- .io_mode = .blocking,
.filter = .any,
}) catch |err| switch (err) {
error.WouldBlock => unreachable,
@@ -6404,12 +6395,7 @@ pub fn sendfile(
// manually, the same as ENOSYS.
break :sf;
},
- .AGAIN => if (std.event.Loop.instance) |loop| {
- loop.waitUntilFdWritable(out_fd);
- continue;
- } else {
- return error.WouldBlock;
- },
+ .AGAIN => return error.WouldBlock,
.IO => return error.InputOutput,
.PIPE => return error.BrokenPipe,
.NOMEM => return error.SystemResources,
@@ -6476,18 +6462,12 @@ pub fn sendfile(
.AGAIN => if (amt != 0) {
return amt;
- } else if (std.event.Loop.instance) |loop| {
- loop.waitUntilFdWritable(out_fd);
- continue;
} else {
return error.WouldBlock;
},
.BUSY => if (amt != 0) {
return amt;
- } else if (std.event.Loop.instance) |loop| {
- loop.waitUntilFdReadable(in_fd);
- continue;
} else {
return error.WouldBlock;
},
@@ -6550,9 +6530,6 @@ pub fn sendfile(
.AGAIN => if (amt != 0) {
return amt;
- } else if (std.event.Loop.instance) |loop| {
- loop.waitUntilFdWritable(out_fd);
- continue;
} else {
return error.WouldBlock;
},
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index d48261a97a..2a19462a7c 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -49,7 +49,6 @@ pub const OpenFileOptions = struct {
sa: ?*SECURITY_ATTRIBUTES = null,
share_access: ULONG = FILE_SHARE_WRITE | FILE_SHARE_READ | FILE_SHARE_DELETE,
creation: ULONG,
- io_mode: std.io.ModeOverride,
/// If true, tries to open path as a directory.
/// Defaults to false.
filter: Filter = .file_only,
@@ -95,7 +94,7 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
.SecurityQualityOfService = null,
};
var io: IO_STATUS_BLOCK = undefined;
- const blocking_flag: ULONG = if (options.io_mode == .blocking) FILE_SYNCHRONOUS_IO_NONALERT else 0;
+ const blocking_flag: ULONG = FILE_SYNCHRONOUS_IO_NONALERT;
const file_or_dir_flag: ULONG = switch (options.filter) {
.file_only => FILE_NON_DIRECTORY_FILE,
.dir_only => FILE_DIRECTORY_FILE,
@@ -119,12 +118,7 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
0,
);
switch (rc) {
- .SUCCESS => {
- if (std.io.is_async and options.io_mode == .evented) {
- _ = CreateIoCompletionPort(result, std.event.Loop.instance.?.os_data.io_port, undefined, undefined) catch undefined;
- }
- return result;
- },
+ .SUCCESS => return result,
.OBJECT_NAME_INVALID => unreachable,
.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
@@ -457,81 +451,36 @@ pub const ReadFileError = error{
/// If buffer's length exceeds what a Windows DWORD integer can hold, it will be broken into
/// multiple non-atomic reads.
-pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.ModeOverride) ReadFileError!usize {
- if (io_mode != .blocking) {
- const loop = std.event.Loop.instance.?;
- // TODO make getting the file position non-blocking
- const off = if (offset) |o| o else try SetFilePointerEx_CURRENT_get(in_hFile);
- var resume_node = std.event.Loop.ResumeNode.Basic{
- .base = .{
- .id = .Basic,
- .handle = @frame(),
- .overlapped = OVERLAPPED{
- .Internal = 0,
- .InternalHigh = 0,
- .DUMMYUNIONNAME = .{
- .DUMMYSTRUCTNAME = .{
- .Offset = @as(u32, @truncate(off)),
- .OffsetHigh = @as(u32, @truncate(off >> 32)),
- },
+pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64) ReadFileError!usize {
+ while (true) {
+ const want_read_count: DWORD = @min(@as(DWORD, maxInt(DWORD)), buffer.len);
+ var amt_read: DWORD = undefined;
+ var overlapped_data: OVERLAPPED = undefined;
+ const overlapped: ?*OVERLAPPED = if (offset) |off| blk: {
+ overlapped_data = .{
+ .Internal = 0,
+ .InternalHigh = 0,
+ .DUMMYUNIONNAME = .{
+ .DUMMYSTRUCTNAME = .{
+ .Offset = @as(u32, @truncate(off)),
+ .OffsetHigh = @as(u32, @truncate(off >> 32)),
},
- .hEvent = null,
},
- },
- };
- loop.beginOneEvent();
- suspend {
- // TODO handle buffer bigger than DWORD can hold
- _ = kernel32.ReadFile(in_hFile, buffer.ptr, @as(DWORD, @intCast(buffer.len)), null, &resume_node.base.overlapped);
- }
- var bytes_transferred: DWORD = undefined;
- if (kernel32.GetOverlappedResult(in_hFile, &resume_node.base.overlapped, &bytes_transferred, FALSE) == 0) {
+ .hEvent = null,
+ };
+ break :blk &overlapped_data;
+ } else null;
+ if (kernel32.ReadFile(in_hFile, buffer.ptr, want_read_count, &amt_read, overlapped) == 0) {
switch (kernel32.GetLastError()) {
.IO_PENDING => unreachable,
- .OPERATION_ABORTED => return error.OperationAborted,
- .BROKEN_PIPE => return error.BrokenPipe,
+ .OPERATION_ABORTED => continue,
+ .BROKEN_PIPE => return 0,
+ .HANDLE_EOF => return 0,
.NETNAME_DELETED => return error.NetNameDeleted,
- .HANDLE_EOF => return @as(usize, bytes_transferred),
else => |err| return unexpectedError(err),
}
}
- if (offset == null) {
- // TODO make setting the file position non-blocking
- const new_off = off + bytes_transferred;
- try SetFilePointerEx_CURRENT(in_hFile, @as(i64, @bitCast(new_off)));
- }
- return @as(usize, bytes_transferred);
- } else {
- while (true) {
- const want_read_count: DWORD = @min(@as(DWORD, maxInt(DWORD)), buffer.len);
- var amt_read: DWORD = undefined;
- var overlapped_data: OVERLAPPED = undefined;
- const overlapped: ?*OVERLAPPED = if (offset) |off| blk: {
- overlapped_data = .{
- .Internal = 0,
- .InternalHigh = 0,
- .DUMMYUNIONNAME = .{
- .DUMMYSTRUCTNAME = .{
- .Offset = @as(u32, @truncate(off)),
- .OffsetHigh = @as(u32, @truncate(off >> 32)),
- },
- },
- .hEvent = null,
- };
- break :blk &overlapped_data;
- } else null;
- if (kernel32.ReadFile(in_hFile, buffer.ptr, want_read_count, &amt_read, overlapped) == 0) {
- switch (kernel32.GetLastError()) {
- .IO_PENDING => unreachable,
- .OPERATION_ABORTED => continue,
- .BROKEN_PIPE => return 0,
- .HANDLE_EOF => return 0,
- .NETNAME_DELETED => return error.NetNameDeleted,
- else => |err| return unexpectedError(err),
- }
- }
- return amt_read;
- }
+ return amt_read;
}
}
@@ -550,85 +499,38 @@ pub fn WriteFile(
handle: HANDLE,
bytes: []const u8,
offset: ?u64,
- io_mode: std.io.ModeOverride,
) WriteFileError!usize {
- if (std.event.Loop.instance != null and io_mode != .blocking) {
- const loop = std.event.Loop.instance.?;
- // TODO make getting the file position non-blocking
- const off = if (offset) |o| o else try SetFilePointerEx_CURRENT_get(handle);
- var resume_node = std.event.Loop.ResumeNode.Basic{
- .base = .{
- .id = .Basic,
- .handle = @frame(),
- .overlapped = OVERLAPPED{
- .Internal = 0,
- .InternalHigh = 0,
- .DUMMYUNIONNAME = .{
- .DUMMYSTRUCTNAME = .{
- .Offset = @as(u32, @truncate(off)),
- .OffsetHigh = @as(u32, @truncate(off >> 32)),
- },
- },
- .hEvent = null,
+ var bytes_written: DWORD = undefined;
+ var overlapped_data: OVERLAPPED = undefined;
+ const overlapped: ?*OVERLAPPED = if (offset) |off| blk: {
+ overlapped_data = .{
+ .Internal = 0,
+ .InternalHigh = 0,
+ .DUMMYUNIONNAME = .{
+ .DUMMYSTRUCTNAME = .{
+ .Offset = @as(u32, @truncate(off)),
+ .OffsetHigh = @as(u32, @truncate(off >> 32)),
},
},
+ .hEvent = null,
};
- loop.beginOneEvent();
- suspend {
- const adjusted_len = math.cast(DWORD, bytes.len) orelse maxInt(DWORD);
- _ = kernel32.WriteFile(handle, bytes.ptr, adjusted_len, null, &resume_node.base.overlapped);
+ break :blk &overlapped_data;
+ } else null;
+ const adjusted_len = math.cast(u32, bytes.len) orelse maxInt(u32);
+ if (kernel32.WriteFile(handle, bytes.ptr, adjusted_len, &bytes_written, overlapped) == 0) {
+ switch (kernel32.GetLastError()) {
+ .INVALID_USER_BUFFER => return error.SystemResources,
+ .NOT_ENOUGH_MEMORY => return error.SystemResources,
+ .OPERATION_ABORTED => return error.OperationAborted,
+ .NOT_ENOUGH_QUOTA => return error.SystemResources,
+ .IO_PENDING => unreachable,
+ .BROKEN_PIPE => return error.BrokenPipe,
+ .INVALID_HANDLE => return error.NotOpenForWriting,
+ .LOCK_VIOLATION => return error.LockViolation,
+ else => |err| return unexpectedError(err),
}
- var bytes_transferred: DWORD = undefined;
- if (kernel32.GetOverlappedResult(handle, &resume_node.base.overlapped, &bytes_transferred, FALSE) == 0) {
- switch (kernel32.GetLastError()) {
- .IO_PENDING => unreachable,
- .INVALID_USER_BUFFER => return error.SystemResources,
- .NOT_ENOUGH_MEMORY => return error.SystemResources,
- .OPERATION_ABORTED => return error.OperationAborted,
- .NOT_ENOUGH_QUOTA => return error.SystemResources,
- .BROKEN_PIPE => return error.BrokenPipe,
- else => |err| return unexpectedError(err),
- }
- }
- if (offset == null) {
- // TODO make setting the file position non-blocking
- const new_off = off + bytes_transferred;
- try SetFilePointerEx_CURRENT(handle, @as(i64, @bitCast(new_off)));
- }
- return bytes_transferred;
- } else {
- var bytes_written: DWORD = undefined;
- var overlapped_data: OVERLAPPED = undefined;
- const overlapped: ?*OVERLAPPED = if (offset) |off| blk: {
- overlapped_data = .{
- .Internal = 0,
- .InternalHigh = 0,
- .DUMMYUNIONNAME = .{
- .DUMMYSTRUCTNAME = .{
- .Offset = @as(u32, @truncate(off)),
- .OffsetHigh = @as(u32, @truncate(off >> 32)),
- },
- },
- .hEvent = null,
- };
- break :blk &overlapped_data;
- } else null;
- const adjusted_len = math.cast(u32, bytes.len) orelse maxInt(u32);
- if (kernel32.WriteFile(handle, bytes.ptr, adjusted_len, &bytes_written, overlapped) == 0) {
- switch (kernel32.GetLastError()) {
- .INVALID_USER_BUFFER => return error.SystemResources,
- .NOT_ENOUGH_MEMORY => return error.SystemResources,
- .OPERATION_ABORTED => return error.OperationAborted,
- .NOT_ENOUGH_QUOTA => return error.SystemResources,
- .IO_PENDING => unreachable,
- .BROKEN_PIPE => return error.BrokenPipe,
- .INVALID_HANDLE => return error.NotOpenForWriting,
- .LOCK_VIOLATION => return error.LockViolation,
- else => |err| return unexpectedError(err),
- }
- }
- return bytes_written;
}
+ return bytes_written;
}
pub const SetCurrentDirectoryError = error{
@@ -732,7 +634,6 @@ pub fn CreateSymbolicLink(
.access_mask = SYNCHRONIZE | GENERIC_READ | GENERIC_WRITE,
.dir = dir,
.creation = FILE_CREATE,
- .io_mode = .blocking,
.filter = if (is_directory) .dir_only else .file_only,
}) catch |err| switch (err) {
error.IsDir => return error.PathAlreadyExists,
@@ -1256,7 +1157,6 @@ pub fn GetFinalPathNameByHandle(
.access_mask = SYNCHRONIZE,
.share_access = FILE_SHARE_READ | FILE_SHARE_WRITE,
.creation = FILE_OPEN,
- .io_mode = .blocking,
}) catch |err| switch (err) {
error.IsDir => unreachable,
error.NotDir => unreachable,
diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig
index d0623145a0..9640ec3569 100644
--- a/lib/std/pdb.zig
+++ b/lib/std/pdb.zig
@@ -513,7 +513,7 @@ pub const Pdb = struct {
};
pub fn init(allocator: mem.Allocator, path: []const u8) !Pdb {
- const file = try fs.cwd().openFile(path, .{ .intended_io_mode = .blocking });
+ const file = try fs.cwd().openFile(path, .{});
errdefer file.close();
return Pdb{
diff --git a/lib/std/start.zig b/lib/std/start.zig
index f04e812271..9e9872fe93 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -347,7 +347,7 @@ fn WinStartup() callconv(std.os.windows.WINAPI) noreturn {
std.debug.maybeEnableSegfaultHandler();
- std.os.windows.ntdll.RtlExitUserProcess(initEventLoopAndCallMain());
+ std.os.windows.ntdll.RtlExitUserProcess(callMain());
}
fn wWinMainCRTStartup() callconv(std.os.windows.WINAPI) noreturn {
@@ -358,7 +358,7 @@ fn wWinMainCRTStartup() callconv(std.os.windows.WINAPI) noreturn {
std.debug.maybeEnableSegfaultHandler();
- const result: std.os.windows.INT = initEventLoopAndCallWinMain();
+ const result: std.os.windows.INT = call_wWinMain();
std.os.windows.ntdll.RtlExitUserProcess(@as(std.os.windows.UINT, @bitCast(result)));
}
@@ -422,7 +422,7 @@ fn posixCallMainAndExit() callconv(.C) noreturn {
expandStackSize(phdrs);
}
- std.os.exit(@call(.always_inline, callMainWithArgs, .{ argc, argv, envp }));
+ std.os.exit(callMainWithArgs(argc, argv, envp));
}
fn expandStackSize(phdrs: []elf.Phdr) void {
@@ -459,14 +459,14 @@ fn expandStackSize(phdrs: []elf.Phdr) void {
}
}
-fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 {
+inline fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 {
std.os.argv = argv[0..argc];
std.os.environ = envp;
std.debug.maybeEnableSegfaultHandler();
std.os.maybeIgnoreSigpipe();
- return initEventLoopAndCallMain();
+ return callMain();
}
fn main(c_argc: c_int, c_argv: [*][*:0]c_char, c_envp: [*:null]?[*:0]c_char) callconv(.C) c_int {
@@ -481,92 +481,18 @@ fn main(c_argc: c_int, c_argv: [*][*:0]c_char, c_envp: [*:null]?[*:0]c_char) cal
expandStackSize(phdrs);
}
- return @call(.always_inline, callMainWithArgs, .{ @as(usize, @intCast(c_argc)), @as([*][*:0]u8, @ptrCast(c_argv)), envp });
+ return callMainWithArgs(@as(usize, @intCast(c_argc)), @as([*][*:0]u8, @ptrCast(c_argv)), envp);
}
fn mainWithoutEnv(c_argc: c_int, c_argv: [*][*:0]c_char) callconv(.C) c_int {
std.os.argv = @as([*][*:0]u8, @ptrCast(c_argv))[0..@as(usize, @intCast(c_argc))];
- return @call(.always_inline, callMain, .{});
+ return callMain();
}
// General error message for a malformed return type
const bad_main_ret = "expected return type of main to be 'void', '!void', 'noreturn', 'u8', or '!u8'";
-// This is marked inline because for some reason LLVM in release mode fails to inline it,
-// and we want fewer call frames in stack traces.
-inline fn initEventLoopAndCallMain() u8 {
- if (std.event.Loop.instance) |loop| {
- if (loop == std.event.Loop.default_instance) {
- loop.init() catch |err| {
- std.log.err("{s}", .{@errorName(err)});
- if (@errorReturnTrace()) |trace| {
- std.debug.dumpStackTrace(trace.*);
- }
- return 1;
- };
- defer loop.deinit();
-
- var result: u8 = undefined;
- var frame: @Frame(callMainAsync) = undefined;
- _ = @asyncCall(&frame, &result, callMainAsync, .{loop});
- loop.run();
- return result;
- }
- }
-
- // This is marked inline because for some reason LLVM in release mode fails to inline it,
- // and we want fewer call frames in stack traces.
- return @call(.always_inline, callMain, .{});
-}
-
-// This is marked inline because for some reason LLVM in release mode fails to inline it,
-// and we want fewer call frames in stack traces.
-// TODO This function is duplicated from initEventLoopAndCallMain instead of using generics
-// because it is working around stage1 compiler bugs.
-inline fn initEventLoopAndCallWinMain() std.os.windows.INT {
- if (std.event.Loop.instance) |loop| {
- if (loop == std.event.Loop.default_instance) {
- loop.init() catch |err| {
- std.log.err("{s}", .{@errorName(err)});
- if (@errorReturnTrace()) |trace| {
- std.debug.dumpStackTrace(trace.*);
- }
- return 1;
- };
- defer loop.deinit();
-
- var result: std.os.windows.INT = undefined;
- var frame: @Frame(callWinMainAsync) = undefined;
- _ = @asyncCall(&frame, &result, callWinMainAsync, .{loop});
- loop.run();
- return result;
- }
- }
-
- // This is marked inline because for some reason LLVM in release mode fails to inline it,
- // and we want fewer call frames in stack traces.
- return @call(.always_inline, call_wWinMain, .{});
-}
-
-fn callMainAsync(loop: *std.event.Loop) callconv(.Async) u8 {
- // This prevents the event loop from terminating at least until main() has returned.
- // TODO This shouldn't be needed here; it should be in the event loop code.
- loop.beginOneEvent();
- defer loop.finishOneEvent();
- return callMain();
-}
-
-fn callWinMainAsync(loop: *std.event.Loop) callconv(.Async) std.os.windows.INT {
- // This prevents the event loop from terminating at least until main() has returned.
- // TODO This shouldn't be needed here; it should be in the event loop code.
- loop.beginOneEvent();
- defer loop.finishOneEvent();
- return call_wWinMain();
-}
-
-// This is not marked inline because it is called with @asyncCall when
-// there is an event loop.
-pub fn callMain() u8 {
+pub inline fn callMain() u8 {
switch (@typeInfo(@typeInfo(@TypeOf(root.main)).Fn.return_type.?)) {
.NoReturn => {
root.main();
diff --git a/lib/std/std.zig b/lib/std/std.zig
index 047da005c3..6db92d7f8a 100644
--- a/lib/std/std.zig
+++ b/lib/std/std.zig
@@ -92,9 +92,6 @@ pub const elf = @import("elf.zig");
/// Enum-related metaprogramming helpers.
pub const enums = @import("enums.zig");
-/// Evented I/O data structures.
-pub const event = @import("event.zig");
-
/// First in, first out data structures.
pub const fifo = @import("fifo.zig");
@@ -198,79 +195,35 @@ pub const zig = @import("zig.zig");
pub const start = @import("start.zig");
const root = @import("root");
-const options_override = if (@hasDecl(root, "std_options")) root.std_options else struct {};
/// Stdlib-wide options that can be overridden by the root file.
-pub const options = struct {
- pub const enable_segfault_handler: bool = if (@hasDecl(options_override, "enable_segfault_handler"))
- options_override.enable_segfault_handler
- else
- debug.default_enable_segfault_handler;
+pub const options: Options = if (@hasDecl(root, "std_options")) root.std_options else .{};
+
+pub const Options = struct {
+ enable_segfault_handler: bool = debug.default_enable_segfault_handler,
/// Function used to implement `std.fs.cwd` for WASI.
- pub const wasiCwd: fn () fs.Dir = if (@hasDecl(options_override, "wasiCwd"))
- options_override.wasiCwd
- else
- fs.defaultWasiCwd;
-
- /// The application's chosen I/O mode.
- pub const io_mode: io.Mode = if (@hasDecl(options_override, "io_mode"))
- options_override.io_mode
- else if (@hasDecl(options_override, "event_loop"))
- .evented
- else
- .blocking;
-
- pub const event_loop: event.Loop.Instance = if (@hasDecl(options_override, "event_loop"))
- options_override.event_loop
- else
- event.Loop.default_instance;
-
- pub const event_loop_mode: event.Loop.Mode = if (@hasDecl(options_override, "event_loop_mode"))
- options_override.event_loop_mode
- else
- event.Loop.default_mode;
+ wasiCwd: fn () os.wasi.fd_t = fs.defaultWasiCwd,
/// The current log level.
- pub const log_level: log.Level = if (@hasDecl(options_override, "log_level"))
- options_override.log_level
- else
- log.default_level;
+ log_level: log.Level = log.default_level,
- pub const log_scope_levels: []const log.ScopeLevel = if (@hasDecl(options_override, "log_scope_levels"))
- options_override.log_scope_levels
- else
- &.{};
+ log_scope_levels: []const log.ScopeLevel = &.{},
- pub const logFn: fn (
+ logFn: fn (
comptime message_level: log.Level,
comptime scope: @TypeOf(.enum_literal),
comptime format: []const u8,
args: anytype,
- ) void = if (@hasDecl(options_override, "logFn"))
- options_override.logFn
- else
- log.defaultLog;
+ ) void = log.defaultLog,
- pub const fmt_max_depth = if (@hasDecl(options_override, "fmt_max_depth"))
- options_override.fmt_max_depth
- else
- fmt.default_max_depth;
+ fmt_max_depth: usize = fmt.default_max_depth,
- pub const cryptoRandomSeed: fn (buffer: []u8) void = if (@hasDecl(options_override, "cryptoRandomSeed"))
- options_override.cryptoRandomSeed
- else
- @import("crypto/tlcsprng.zig").defaultRandomSeed;
+ cryptoRandomSeed: fn (buffer: []u8) void = @import("crypto/tlcsprng.zig").defaultRandomSeed,
- pub const crypto_always_getrandom: bool = if (@hasDecl(options_override, "crypto_always_getrandom"))
- options_override.crypto_always_getrandom
- else
- false;
+ crypto_always_getrandom: bool = false,
- pub const crypto_fork_safety: bool = if (@hasDecl(options_override, "crypto_fork_safety"))
- options_override.crypto_fork_safety
- else
- true;
+ crypto_fork_safety: bool = true,
/// By default Zig disables SIGPIPE by setting a "no-op" handler for it. Set this option
/// to `true` to prevent that.
@@ -283,35 +236,22 @@ pub const options = struct {
/// cases it's unclear why the process was terminated. By capturing SIGPIPE instead, functions that
/// write to broken pipes will return the EPIPE error (error.BrokenPipe) and the program can handle
/// it like any other error.
- pub const keep_sigpipe: bool = if (@hasDecl(options_override, "keep_sigpipe"))
- options_override.keep_sigpipe
- else
- false;
+ keep_sigpipe: bool = false,
/// By default, std.http.Client will support HTTPS connections. Set this option to `true` to
/// disable TLS support.
///
/// This will likely reduce the size of the binary, but it will also make it impossible to
/// make a HTTPS connection.
- pub const http_disable_tls = if (@hasDecl(options_override, "http_disable_tls"))
- options_override.http_disable_tls
- else
- false;
+ http_disable_tls: bool = false,
- pub const side_channels_mitigations: crypto.SideChannelsMitigations = if (@hasDecl(options_override, "side_channels_mitigations"))
- options_override.side_channels_mitigations
- else
- crypto.default_side_channels_mitigations;
+ side_channels_mitigations: crypto.SideChannelsMitigations = crypto.default_side_channels_mitigations,
};
// This forces the start.zig file to be imported, and the comptime logic inside that
// file decides whether to export any appropriate start symbols, and call main.
comptime {
_ = start;
-
- for (@typeInfo(options_override).Struct.decls) |decl| {
- if (!@hasDecl(options, decl.name)) @compileError("no option named " ++ decl.name);
- }
}
test {
diff --git a/lib/std/time.zig b/lib/std/time.zig
index dad81385e9..012dc838f8 100644
--- a/lib/std/time.zig
+++ b/lib/std/time.zig
@@ -9,11 +9,6 @@ pub const epoch = @import("time/epoch.zig");
/// Spurious wakeups are possible and no precision of timing is guaranteed.
pub fn sleep(nanoseconds: u64) void {
- // TODO: opting out of async sleeping?
- if (std.io.is_async) {
- return std.event.Loop.instance.?.sleep(nanoseconds);
- }
-
if (builtin.os.tag == .windows) {
const big_ms_from_ns = nanoseconds / ns_per_ms;
const ms = math.cast(os.windows.DWORD, big_ms_from_ns) orelse math.maxInt(os.windows.DWORD);
diff --git a/lib/std/zig/Server.zig b/lib/std/zig/Server.zig
index 7d5abaf6ea..915450c50a 100644
--- a/lib/std/zig/Server.zig
+++ b/lib/std/zig/Server.zig
@@ -38,8 +38,6 @@ pub const Message = struct {
/// Trailing:
/// * name: [tests_len]u32
/// - null-terminated string_bytes index
- /// * async_frame_len: [tests_len]u32,
- /// - 0 means not async
/// * expected_panic_msg: [tests_len]u32,
/// - null-terminated string_bytes index
/// - 0 means does not expect pani
@@ -210,7 +208,6 @@ pub fn serveErrorBundle(s: *Server, error_bundle: std.zig.ErrorBundle) !void {
pub const TestMetadata = struct {
names: []u32,
- async_frame_sizes: []u32,
expected_panic_msgs: []u32,
string_bytes: []const u8,
};
@@ -220,17 +217,16 @@ pub fn serveTestMetadata(s: *Server, test_metadata: TestMetadata) !void {
.tests_len = bswap(@as(u32, @intCast(test_metadata.names.len))),
.string_bytes_len = bswap(@as(u32, @intCast(test_metadata.string_bytes.len))),
};
+ const trailing = 2;
const bytes_len = @sizeOf(OutMessage.TestMetadata) +
- 3 * 4 * test_metadata.names.len + test_metadata.string_bytes.len;
+ trailing * @sizeOf(u32) * test_metadata.names.len + test_metadata.string_bytes.len;
if (need_bswap) {
bswap_u32_array(test_metadata.names);
- bswap_u32_array(test_metadata.async_frame_sizes);
bswap_u32_array(test_metadata.expected_panic_msgs);
}
defer if (need_bswap) {
bswap_u32_array(test_metadata.names);
- bswap_u32_array(test_metadata.async_frame_sizes);
bswap_u32_array(test_metadata.expected_panic_msgs);
};
@@ -241,7 +237,6 @@ pub fn serveTestMetadata(s: *Server, test_metadata: TestMetadata) !void {
std.mem.asBytes(&header),
// TODO: implement @ptrCast between slices changing the length
std.mem.sliceAsBytes(test_metadata.names),
- std.mem.sliceAsBytes(test_metadata.async_frame_sizes),
std.mem.sliceAsBytes(test_metadata.expected_panic_msgs),
test_metadata.string_bytes,
});
diff --git a/lib/std/zig/system/linux.zig b/lib/std/zig/system/linux.zig
index d2d31b4079..ad49f4d626 100644
--- a/lib/std/zig/system/linux.zig
+++ b/lib/std/zig/system/linux.zig
@@ -328,7 +328,7 @@ fn CpuinfoParser(comptime impl: anytype) type {
}
pub fn detectNativeCpuAndFeatures() ?Target.Cpu {
- var f = fs.openFileAbsolute("/proc/cpuinfo", .{ .intended_io_mode = .blocking }) catch |err| switch (err) {
+ var f = fs.openFileAbsolute("/proc/cpuinfo", .{}) catch |err| switch (err) {
else => return null,
};
defer f.close();
diff --git a/lib/test_runner.zig b/lib/test_runner.zig
index 18608d298a..e11d187d2a 100644
--- a/lib/test_runner.zig
+++ b/lib/test_runner.zig
@@ -3,9 +3,8 @@ const std = @import("std");
const io = std.io;
const builtin = @import("builtin");
-pub const std_options = struct {
- pub const io_mode: io.Mode = builtin.test_io_mode;
- pub const logFn = log;
+pub const std_options = .{
+ .logFn = log,
};
var log_err_count: usize = 0;
@@ -65,24 +64,19 @@ fn mainServer() !void {
const test_fns = builtin.test_functions;
const names = try std.testing.allocator.alloc(u32, test_fns.len);
defer std.testing.allocator.free(names);
- const async_frame_sizes = try std.testing.allocator.alloc(u32, test_fns.len);
- defer std.testing.allocator.free(async_frame_sizes);
const expected_panic_msgs = try std.testing.allocator.alloc(u32, test_fns.len);
defer std.testing.allocator.free(expected_panic_msgs);
- for (test_fns, names, async_frame_sizes, expected_panic_msgs) |test_fn, *name, *async_frame_size, *expected_panic_msg| {
+ for (test_fns, names, expected_panic_msgs) |test_fn, *name, *expected_panic_msg| {
name.* = @as(u32, @intCast(string_bytes.items.len));
try string_bytes.ensureUnusedCapacity(std.testing.allocator, test_fn.name.len + 1);
string_bytes.appendSliceAssumeCapacity(test_fn.name);
string_bytes.appendAssumeCapacity(0);
-
- async_frame_size.* = @as(u32, @intCast(test_fn.async_frame_size orelse 0));
expected_panic_msg.* = 0;
}
try server.serveTestMetadata(.{
.names = names,
- .async_frame_sizes = async_frame_sizes,
.expected_panic_msgs = expected_panic_msgs,
.string_bytes = string_bytes.items,
});
@@ -93,8 +87,6 @@ fn mainServer() !void {
log_err_count = 0;
const index = try server.receiveBody_u32();
const test_fn = builtin.test_functions[index];
- if (test_fn.async_frame_size != null)
- @panic("TODO test runner implement async tests");
var fail = false;
var skip = false;
var leak = false;
@@ -163,23 +155,7 @@ fn mainTerminal() void {
if (!have_tty) {
std.debug.print("{d}/{d} {s}... ", .{ i + 1, test_fn_list.len, test_fn.name });
}
- const result = if (test_fn.async_frame_size) |size| switch (std.options.io_mode) {
- .evented => blk: {
- if (async_frame_buffer.len < size) {
- std.heap.page_allocator.free(async_frame_buffer);
- async_frame_buffer = std.heap.page_allocator.alignedAlloc(u8, std.Target.stack_align, size) catch @panic("out of memory");
- }
- const casted_fn = @as(fn () callconv(.Async) anyerror!void, @ptrCast(test_fn.func));
- break :blk await @asyncCall(async_frame_buffer, {}, casted_fn, .{});
- },
- .blocking => {
- skip_count += 1;
- test_node.end();
- progress.log("SKIP (async test)\n", .{});
- continue;
- },
- } else test_fn.func();
- if (result) |_| {
+ if (test_fn.func()) |_| {
ok_count += 1;
test_node.end();
if (!have_tty) std.debug.print("OK\n", .{});
diff --git a/src/Builtin.zig b/src/Builtin.zig
index 3211dd3a69..fb0c1e9490 100644
--- a/src/Builtin.zig
+++ b/src/Builtin.zig
@@ -3,7 +3,6 @@ zig_backend: std.builtin.CompilerBackend,
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
is_test: bool,
-test_evented_io: bool,
single_threaded: bool,
link_libc: bool,
link_libcpp: bool,
@@ -222,17 +221,6 @@ pub fn append(opts: @This(), buffer: *std.ArrayList(u8)) Allocator.Error!void {
\\pub var test_functions: []const std.builtin.TestFn = undefined; // overwritten later
\\
);
- if (opts.test_evented_io) {
- try buffer.appendSlice(
- \\pub const test_io_mode = .evented;
- \\
- );
- } else {
- try buffer.appendSlice(
- \\pub const test_io_mode = .blocking;
- \\
- );
- }
}
}
diff --git a/src/Compilation.zig b/src/Compilation.zig
index d2ed241960..8d7d3ea8b1 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -1613,7 +1613,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
hash.add(options.config.use_lib_llvm);
hash.add(options.config.dll_export_fns);
hash.add(options.config.is_test);
- hash.add(options.config.test_evented_io);
hash.addOptionalBytes(options.test_filter);
hash.addOptionalBytes(options.test_name_prefix);
hash.add(options.skip_linker_dependencies);
@@ -2476,7 +2475,6 @@ fn addNonIncrementalStuffToCacheManifest(
try addModuleTableToCacheHash(gpa, arena, &man.hash, mod.root_mod, mod.main_mod, .{ .files = man });
// Synchronize with other matching comments: ZigOnlyHashStuff
- man.hash.add(comp.config.test_evented_io);
man.hash.addOptionalBytes(comp.test_filter);
man.hash.addOptionalBytes(comp.test_name_prefix);
man.hash.add(comp.skip_linker_dependencies);
diff --git a/src/Compilation/Config.zig b/src/Compilation/Config.zig
index 78273f66e4..2f6422b28a 100644
--- a/src/Compilation/Config.zig
+++ b/src/Compilation/Config.zig
@@ -54,7 +54,6 @@ import_memory: bool,
export_memory: bool,
shared_memory: bool,
is_test: bool,
-test_evented_io: bool,
debug_format: DebugFormat,
root_strip: bool,
root_error_tracing: bool,
@@ -104,7 +103,6 @@ pub const Options = struct {
import_memory: ?bool = null,
export_memory: ?bool = null,
shared_memory: ?bool = null,
- test_evented_io: bool = false,
debug_format: ?DebugFormat = null,
dll_export_fns: ?bool = null,
rdynamic: ?bool = null,
@@ -477,7 +475,6 @@ pub fn resolve(options: Options) ResolveError!Config {
.output_mode = options.output_mode,
.have_zcu = options.have_zcu,
.is_test = options.is_test,
- .test_evented_io = options.test_evented_io,
.link_mode = link_mode,
.link_libc = link_libc,
.link_libcpp = link_libcpp,
diff --git a/src/Module.zig b/src/Module.zig
index b51ceda1a9..d332c4db53 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -5620,10 +5620,6 @@ pub fn populateTestFunctions(
}
const decl = mod.declPtr(decl_index);
const test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod);
- const null_usize = try mod.intern(.{ .opt = .{
- .ty = try mod.intern(.{ .opt_type = .usize_type }),
- .val = .none,
- } });
const array_decl_index = d: {
// Add mod.test_functions to an array decl then make the test_functions
@@ -5631,11 +5627,6 @@ pub fn populateTestFunctions(
const test_fn_vals = try gpa.alloc(InternPool.Index, mod.test_functions.count());
defer gpa.free(test_fn_vals);
- // Add a dependency on each test name and function pointer.
- var array_decl_dependencies = std.ArrayListUnmanaged(Decl.Index){};
- defer array_decl_dependencies.deinit(gpa);
- try array_decl_dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2);
-
for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| {
const test_decl = mod.declPtr(test_decl_index);
// TODO: write something like getCoercedInts to avoid needing to dupe
@@ -5655,8 +5646,6 @@ pub fn populateTestFunctions(
});
break :n test_name_decl_index;
};
- array_decl_dependencies.appendAssumeCapacity(test_decl_index);
- array_decl_dependencies.appendAssumeCapacity(test_name_decl_index);
try mod.linkerUpdateDecl(test_name_decl_index);
const test_fn_fields = .{
@@ -5682,8 +5671,6 @@ pub fn populateTestFunctions(
} }),
.addr = .{ .decl = test_decl_index },
} }),
- // async_frame_size
- null_usize,
};
test_fn_val.* = try mod.intern(.{ .aggregate = .{
.ty = test_fn_ty.toIntern(),
diff --git a/src/Package/Module.zig b/src/Package/Module.zig
index 66d5a21eab..6fadbf1dd5 100644
--- a/src/Package/Module.zig
+++ b/src/Package/Module.zig
@@ -349,7 +349,6 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
.output_mode = options.global.output_mode,
.link_mode = options.global.link_mode,
.is_test = options.global.is_test,
- .test_evented_io = options.global.test_evented_io,
.single_threaded = single_threaded,
.link_libc = options.global.link_libc,
.link_libcpp = options.global.link_libcpp,
diff --git a/src/main.zig b/src/main.zig
index b6463489bc..1a9c264b7d 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -29,27 +29,27 @@ const AstGen = @import("AstGen.zig");
const mingw = @import("mingw.zig");
const Server = std.zig.Server;
-pub const std_options = struct {
- pub const wasiCwd = wasi_cwd;
- pub const logFn = log;
- pub const enable_segfault_handler = false;
+pub const std_options = .{
+ .wasiCwd = wasi_cwd,
+ .logFn = log,
+ .enable_segfault_handler = false,
- pub const log_level: std.log.Level = switch (builtin.mode) {
+ .log_level = switch (builtin.mode) {
.Debug => .debug,
.ReleaseSafe, .ReleaseFast => .info,
.ReleaseSmall => .err,
- };
+ },
};
// Crash report needs to override the panic handler
pub const panic = crash_report.panic;
var wasi_preopens: fs.wasi.Preopens = undefined;
-pub fn wasi_cwd() fs.Dir {
+pub fn wasi_cwd() std.os.wasi.fd_t {
// Expect the first preopen to be current working directory.
const cwd_fd: std.os.fd_t = 3;
assert(mem.eql(u8, wasi_preopens.names[cwd_fd], "."));
- return .{ .fd = cwd_fd };
+ return cwd_fd;
}
fn getWasiPreopen(name: []const u8) Compilation.Directory {
@@ -1335,8 +1335,6 @@ fn buildOutputType(
create_module.each_lib_rpath = false;
} else if (mem.eql(u8, arg, "--test-cmd-bin")) {
try test_exec_args.append(null);
- } else if (mem.eql(u8, arg, "--test-evented-io")) {
- create_module.opts.test_evented_io = true;
} else if (mem.eql(u8, arg, "--test-no-exec")) {
test_no_exec = true;
} else if (mem.eql(u8, arg, "-ftime-report")) {
diff --git a/test/cases/compile_errors/missing_main_fn_in_executable.zig b/test/cases/compile_errors/missing_main_fn_in_executable.zig
index 12756234cb..c1af588b99 100644
--- a/test/cases/compile_errors/missing_main_fn_in_executable.zig
+++ b/test/cases/compile_errors/missing_main_fn_in_executable.zig
@@ -7,4 +7,3 @@
// : note: struct declared here
// : note: called from here
// : note: called from here
-// : note: called from here
diff --git a/test/cases/compile_errors/private_main_fn.zig b/test/cases/compile_errors/private_main_fn.zig
index 6e53fbdce2..a867e14ef9 100644
--- a/test/cases/compile_errors/private_main_fn.zig
+++ b/test/cases/compile_errors/private_main_fn.zig
@@ -9,4 +9,3 @@ fn main() void {}
// :1:1: note: declared here
// : note: called from here
// : note: called from here
-// : note: called from here
diff --git a/test/compare_output.zig b/test/compare_output.zig
index d30b6714c9..d07864360f 100644
--- a/test/compare_output.zig
+++ b/test/compare_output.zig
@@ -440,14 +440,14 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
cases.add("std.log per scope log level override",
\\const std = @import("std");
\\
- \\pub const std_options = struct {
- \\ pub const log_level: std.log.Level = .debug;
+ \\pub const std_options = .{
+ \\ .log_level = .debug,
\\
- \\ pub const log_scope_levels = &[_]std.log.ScopeLevel{
+ \\ .log_scope_levels = &.{
\\ .{ .scope = .a, .level = .warn },
\\ .{ .scope = .c, .level = .err },
- \\ };
- \\ pub const logFn = log;
+ \\ },
+ \\ .logFn = log,
\\};
\\
\\const loga = std.log.scoped(.a);
@@ -497,9 +497,9 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
cases.add("std.heap.LoggingAllocator logs to std.log",
\\const std = @import("std");
\\
- \\pub const std_options = struct {
- \\ pub const log_level: std.log.Level = .debug;
- \\ pub const logFn = log;
+ \\pub const std_options = .{
+ \\ .log_level = .debug,
+ \\ .logFn = log,
\\};
\\
\\pub fn main() !void {
diff --git a/test/src/Cases.zig b/test/src/Cases.zig
index 614cf690a9..3e3470dfe5 100644
--- a/test/src/Cases.zig
+++ b/test/src/Cases.zig
@@ -1207,8 +1207,8 @@ const WaitGroup = std.Thread.WaitGroup;
const build_options = @import("build_options");
const Package = @import("../../src/Package.zig");
-pub const std_options = struct {
- pub const log_level: std.log.Level = .err;
+pub const std_options = .{
+ .log_level = .err,
};
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{
diff --git a/test/standalone/http.zig b/test/standalone/http.zig
index 3cad556482..5002d8910d 100644
--- a/test/standalone/http.zig
+++ b/test/standalone/http.zig
@@ -7,8 +7,8 @@ const Client = http.Client;
const mem = std.mem;
const testing = std.testing;
-pub const std_options = struct {
- pub const http_disable_tls = true;
+pub const std_options = .{
+ .http_disable_tls = true,
};
const max_header_size = 8192;
diff --git a/test/standalone/issue_7030.zig b/test/standalone/issue_7030.zig
index 8a4146e84a..4d4a7279f6 100644
--- a/test/standalone/issue_7030.zig
+++ b/test/standalone/issue_7030.zig
@@ -1,7 +1,7 @@
const std = @import("std");
-pub const std_options = struct {
- pub const logFn = log;
+pub const std_options = .{
+ .logFn = log,
};
pub fn log(
diff --git a/test/standalone/issue_9693/main.zig b/test/standalone/issue_9693/main.zig
deleted file mode 100644
index 19e9c5b96c..0000000000
--- a/test/standalone/issue_9693/main.zig
+++ /dev/null
@@ -1,4 +0,0 @@
-pub const std_options = struct {
- pub const io_mode = .evented;
-};
-pub fn main() void {}
diff --git a/test/standalone/sigpipe/breakpipe.zig b/test/standalone/sigpipe/breakpipe.zig
index 3623451db5..d4d123c317 100644
--- a/test/standalone/sigpipe/breakpipe.zig
+++ b/test/standalone/sigpipe/breakpipe.zig
@@ -1,11 +1,11 @@
const std = @import("std");
const build_options = @import("build_options");
-pub const std_options = if (build_options.keep_sigpipe) struct {
- pub const keep_sigpipe = true;
-} else struct {
- // intentionally not setting keep_sigpipe to ensure the default behavior is equivalent to false
-};
+pub usingnamespace if (build_options.keep_sigpipe) struct {
+ pub const std_options = .{
+ .keep_sigpipe = true,
+ };
+} else struct {};
pub fn main() !void {
const pipe = try std.os.pipe();