Merge branch 'master' of https://github.com/ziglang/zig into 5002-fix-entrypoint-with-winmain
This commit is contained in:
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@@ -1 +1 @@
|
||||
github: [andrewrk]
|
||||
github: [ziglang]
|
||||
|
||||
@@ -53,6 +53,8 @@ set(ZIG_STATIC off CACHE BOOL "Attempt to build a static zig executable (not com
|
||||
set(ZIG_STATIC_LLVM off CACHE BOOL "Prefer linking against static LLVM libraries")
|
||||
set(ZIG_ENABLE_MEM_PROFILE off CACHE BOOL "Activate memory usage instrumentation")
|
||||
set(ZIG_PREFER_CLANG_CPP_DYLIB off CACHE BOOL "Try to link against -lclang-cpp")
|
||||
set(ZIG_WORKAROUND_4799 off CACHE BOOL "workaround for https://github.com/ziglang/zig/issues/4799")
|
||||
set(ZIG_WORKAROUND_POLLY_SO off CACHE STRING "workaround for https://github.com/ziglang/zig/issues/4799")
|
||||
set(ZIG_USE_CCACHE off CACHE BOOL "Use ccache if available")
|
||||
|
||||
if(CCACHE_PROGRAM AND ZIG_USE_CCACHE)
|
||||
@@ -88,6 +90,11 @@ if(APPLE AND ZIG_STATIC)
|
||||
list(APPEND LLVM_LIBRARIES "${ZLIB}")
|
||||
endif()
|
||||
|
||||
if(APPLE AND ZIG_WORKAROUND_4799)
|
||||
# eg: ${CMAKE_PREFIX_PATH} could be /usr/local/opt/llvm/
|
||||
list(APPEND LLVM_LIBRARIES "-Wl,${CMAKE_PREFIX_PATH}/lib/libPolly.a" "-Wl,${CMAKE_PREFIX_PATH}/lib/libPollyPPCG.a" "-Wl,${CMAKE_PREFIX_PATH}/lib/libPollyISL.a")
|
||||
endif()
|
||||
|
||||
set(ZIG_CPP_LIB_DIR "${CMAKE_BINARY_DIR}/zig_cpp")
|
||||
|
||||
# Handle multi-config builds and place each into a common lib. The VS generator
|
||||
@@ -288,6 +295,7 @@ set(ZIG_SOURCES
|
||||
"${CMAKE_SOURCE_DIR}/src/target.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/tokenizer.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/util.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/softfloat_ext.cpp"
|
||||
"${ZIG_SOURCES_MEM_PROFILE}"
|
||||
)
|
||||
set(OPTIMIZED_C_SOURCES
|
||||
@@ -396,11 +404,15 @@ add_library(zig_cpp STATIC ${ZIG_CPP_SOURCES})
|
||||
set_target_properties(zig_cpp PROPERTIES
|
||||
COMPILE_FLAGS ${EXE_CFLAGS}
|
||||
)
|
||||
|
||||
target_link_libraries(zig_cpp LINK_PUBLIC
|
||||
${CLANG_LIBRARIES}
|
||||
${LLD_LIBRARIES}
|
||||
${LLVM_LIBRARIES}
|
||||
)
|
||||
if(ZIG_WORKAROUND_POLLY_SO)
|
||||
target_link_libraries(zig_cpp LINK_PUBLIC "-Wl,${ZIG_WORKAROUND_POLLY_SO}")
|
||||
endif()
|
||||
|
||||
add_library(opt_c_util STATIC ${OPTIMIZED_C_SOURCES})
|
||||
set_target_properties(opt_c_util PROPERTIES
|
||||
|
||||
@@ -152,6 +152,11 @@ The relevant tests for this feature are:
|
||||
same, and that the program exits cleanly. This kind of test coverage is preferred, when
|
||||
possible, because it makes sure that the resulting Zig code is actually viable.
|
||||
|
||||
* `test/stage1/behavior/translate_c_macros.zig` - each test case consists of a Zig test
|
||||
which checks that the relevant macros in `test/stage1/behavior/translate_c_macros.h`.
|
||||
have the correct values. Macros have to be tested separately since they are expanded by
|
||||
Clang in `run_translated_c` tests.
|
||||
|
||||
* `test/translate_c.zig` - each test case is C code, with a list of expected strings which
|
||||
must be found in the resulting Zig code. This kind of test is more precise in what it
|
||||
measures, but does not provide test coverage of whether the resulting Zig code is valid.
|
||||
|
||||
@@ -51,6 +51,8 @@ cmake ..
|
||||
make install
|
||||
```
|
||||
|
||||
Need help? [Troubleshooting Build Issues](https://github.com/ziglang/zig/wiki/Troubleshooting-Build-Issues)
|
||||
|
||||
##### MacOS
|
||||
|
||||
```
|
||||
@@ -64,9 +66,11 @@ make install
|
||||
|
||||
You will now run into this issue:
|
||||
[homebrew and llvm 10 packages in apt.llvm.org are broken with undefined reference to getPollyPluginInfo](https://github.com/ziglang/zig/issues/4799)
|
||||
or
|
||||
[error: unable to create target: 'Unable to find target for this triple (no targets are registered)'](https://github.com/ziglang/zig/issues/5055),
|
||||
in which case try `-DZIG_WORKAROUND_4799=ON`
|
||||
|
||||
Please help upstream LLVM and Homebrew solve this issue, there is nothing Zig
|
||||
can do about it. See that issue for a workaround you can do in the meantime.
|
||||
Hopefully this will be fixed upstream with LLVM 10.0.1.
|
||||
|
||||
##### Windows
|
||||
|
||||
|
||||
70
build.zig
70
build.zig
@@ -34,26 +34,12 @@ pub fn build(b: *Builder) !void {
|
||||
|
||||
const test_step = b.step("test", "Run all the tests");
|
||||
|
||||
const config_h_text = if (b.option(
|
||||
[]const u8,
|
||||
"config_h",
|
||||
"Path to the generated config.h",
|
||||
)) |config_h_path|
|
||||
try std.fs.cwd().readFileAlloc(b.allocator, toNativePathSep(b, config_h_path), max_config_h_bytes)
|
||||
else
|
||||
try findAndReadConfigH(b);
|
||||
|
||||
var test_stage2 = b.addTest("src-self-hosted/test.zig");
|
||||
test_stage2.setBuildMode(.Debug); // note this is only the mode of the test harness
|
||||
test_stage2.addPackagePath("stage2_tests", "test/stage2/test.zig");
|
||||
|
||||
const fmt_build_zig = b.addFmt(&[_][]const u8{"build.zig"});
|
||||
|
||||
var exe = b.addExecutable("zig", "src-self-hosted/main.zig");
|
||||
exe.setBuildMode(mode);
|
||||
test_step.dependOn(&exe.step);
|
||||
b.default_step.dependOn(&exe.step);
|
||||
|
||||
const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false;
|
||||
const skip_release_small = b.option(bool, "skip-release-small", "Main test suite skips release-small builds") orelse skip_release;
|
||||
const skip_release_fast = b.option(bool, "skip-release-fast", "Main test suite skips release-fast builds") orelse skip_release;
|
||||
@@ -63,17 +49,44 @@ pub fn build(b: *Builder) !void {
|
||||
|
||||
const only_install_lib_files = b.option(bool, "lib-files-only", "Only install library files") orelse false;
|
||||
const enable_llvm = b.option(bool, "enable-llvm", "Build self-hosted compiler with LLVM backend enabled") orelse false;
|
||||
if (enable_llvm) {
|
||||
var ctx = parseConfigH(b, config_h_text);
|
||||
ctx.llvm = try findLLVM(b, ctx.llvm_config_exe);
|
||||
const config_h_path_option = b.option([]const u8, "config_h", "Path to the generated config.h");
|
||||
|
||||
try configureStage2(b, exe, ctx);
|
||||
}
|
||||
if (!only_install_lib_files) {
|
||||
exe.install();
|
||||
var exe = b.addExecutable("zig", "src-self-hosted/main.zig");
|
||||
exe.setBuildMode(mode);
|
||||
test_step.dependOn(&exe.step);
|
||||
b.default_step.dependOn(&exe.step);
|
||||
|
||||
if (enable_llvm) {
|
||||
const config_h_text = if (config_h_path_option) |config_h_path|
|
||||
try std.fs.cwd().readFileAlloc(b.allocator, toNativePathSep(b, config_h_path), max_config_h_bytes)
|
||||
else
|
||||
try findAndReadConfigH(b);
|
||||
|
||||
var ctx = parseConfigH(b, config_h_text);
|
||||
ctx.llvm = try findLLVM(b, ctx.llvm_config_exe);
|
||||
|
||||
try configureStage2(b, exe, ctx);
|
||||
}
|
||||
if (!only_install_lib_files) {
|
||||
exe.install();
|
||||
}
|
||||
const tracy = b.option([]const u8, "tracy", "Enable Tracy integration. Supply path to Tracy source");
|
||||
const link_libc = b.option(bool, "force-link-libc", "Force self-hosted compiler to link libc") orelse false;
|
||||
if (link_libc) exe.linkLibC();
|
||||
|
||||
exe.addBuildOption(bool, "enable_tracy", tracy != null);
|
||||
if (tracy) |tracy_path| {
|
||||
const client_cpp = fs.path.join(
|
||||
b.allocator,
|
||||
&[_][]const u8{ tracy_path, "TracyClient.cpp" },
|
||||
) catch unreachable;
|
||||
exe.addIncludeDir(tracy_path);
|
||||
exe.addCSourceFile(client_cpp, &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" });
|
||||
exe.linkSystemLibraryName("c++");
|
||||
exe.linkLibC();
|
||||
}
|
||||
}
|
||||
const link_libc = b.option(bool, "force-link-libc", "Force self-hosted compiler to link libc") orelse false;
|
||||
if (link_libc) exe.linkLibC();
|
||||
|
||||
b.installDirectory(InstallDirectoryOptions{
|
||||
.source_dir = "lib",
|
||||
@@ -126,7 +139,10 @@ pub fn build(b: *Builder) !void {
|
||||
test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addStandaloneTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addStackTraceTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addCliTests(b, test_filter, modes));
|
||||
const test_cli = tests.addCliTests(b, test_filter, modes);
|
||||
const test_cli_step = b.step("test-cli", "Run zig cli tests");
|
||||
test_cli_step.dependOn(test_cli);
|
||||
test_step.dependOn(test_cli);
|
||||
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addTranslateCTests(b, test_filter));
|
||||
@@ -137,7 +153,7 @@ pub fn build(b: *Builder) !void {
|
||||
test_step.dependOn(docs_step);
|
||||
}
|
||||
|
||||
fn dependOnLib(b: *Builder, lib_exe_obj: var, dep: LibraryDep) void {
|
||||
fn dependOnLib(b: *Builder, lib_exe_obj: anytype, dep: LibraryDep) void {
|
||||
for (dep.libdirs.items) |lib_dir| {
|
||||
lib_exe_obj.addLibPath(lib_dir);
|
||||
}
|
||||
@@ -177,7 +193,7 @@ fn fileExists(filename: []const u8) !bool {
|
||||
return true;
|
||||
}
|
||||
|
||||
fn addCppLib(b: *Builder, lib_exe_obj: var, cmake_binary_dir: []const u8, lib_name: []const u8) void {
|
||||
fn addCppLib(b: *Builder, lib_exe_obj: anytype, cmake_binary_dir: []const u8, lib_name: []const u8) void {
|
||||
lib_exe_obj.addObjectFile(fs.path.join(b.allocator, &[_][]const u8{
|
||||
cmake_binary_dir,
|
||||
"zig_cpp",
|
||||
@@ -259,7 +275,7 @@ fn findLLVM(b: *Builder, llvm_config_exe: []const u8) !LibraryDep {
|
||||
return result;
|
||||
}
|
||||
|
||||
fn configureStage2(b: *Builder, exe: var, ctx: Context) !void {
|
||||
fn configureStage2(b: *Builder, exe: anytype, ctx: Context) !void {
|
||||
exe.addIncludeDir("src");
|
||||
exe.addIncludeDir(ctx.cmake_binary_dir);
|
||||
addCppLib(b, exe, ctx.cmake_binary_dir, "zig_cpp");
|
||||
@@ -324,7 +340,7 @@ fn configureStage2(b: *Builder, exe: var, ctx: Context) !void {
|
||||
fn addCxxKnownPath(
|
||||
b: *Builder,
|
||||
ctx: Context,
|
||||
exe: var,
|
||||
exe: anytype,
|
||||
objname: []const u8,
|
||||
errtxt: ?[]const u8,
|
||||
) !void {
|
||||
|
||||
@@ -12,7 +12,7 @@ sudo apt-get update -q
|
||||
|
||||
sudo apt-get remove -y llvm-*
|
||||
sudo rm -rf /usr/local/*
|
||||
sudo apt-get install -y libxml2-dev libclang-10-dev llvm-10 llvm-10-dev liblld-10-dev cmake s3cmd gcc-7 g++-7 ninja-build
|
||||
sudo apt-get install -y libxml2-dev libclang-10-dev llvm-10 llvm-10-dev liblld-10-dev cmake s3cmd gcc-7 g++-7 ninja-build tidy
|
||||
|
||||
QEMUBASE="qemu-linux-x86_64-5.0.0-49ee115552"
|
||||
wget https://ziglang.org/deps/$QEMUBASE.tar.xz
|
||||
@@ -51,6 +51,10 @@ cd build
|
||||
cmake .. -DCMAKE_BUILD_TYPE=Release -GNinja
|
||||
ninja install
|
||||
./zig build test -Denable-qemu -Denable-wasmtime
|
||||
|
||||
# look for HTML errors
|
||||
tidy -qe ../zig-cache/langref.html
|
||||
|
||||
VERSION="$(./zig version)"
|
||||
|
||||
if [ "${BUILD_REASON}" != "PullRequest" ]; then
|
||||
|
||||
@@ -40,12 +40,20 @@ jobs:
|
||||
timeoutInMinutes: 360
|
||||
|
||||
steps:
|
||||
- powershell: |
|
||||
(New-Object Net.WebClient).DownloadFile("https://github.com/msys2/msys2-installer/releases/download/2020-06-02/msys2-base-x86_64-20200602.sfx.exe", "sfx.exe")
|
||||
.\sfx.exe -y -o\
|
||||
del sfx.exe
|
||||
displayName: Download/Extract/Install MSYS2
|
||||
- script: |
|
||||
git clone https://github.com/msys2/msys2-ci-base.git %CD:~0,2%\msys64
|
||||
%CD:~0,2%\msys64\usr\bin\rm -rf %CD:~0,2%\msys64\.git
|
||||
set PATH=%CD:~0,2%\msys64\usr\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem
|
||||
%CD:~0,2%\msys64\usr\bin\pacman --noconfirm -Syyuu
|
||||
displayName: Install and Update MSYS2
|
||||
@REM install updated filesystem package first without dependency checking
|
||||
@REM because of: https://github.com/msys2/MSYS2-packages/issues/2021
|
||||
%CD:~0,2%\msys64\usr\bin\bash -lc "pacman --noconfirm -Sydd filesystem"
|
||||
displayName: Workaround filesystem dash MSYS2 dependency issue
|
||||
- script: |
|
||||
%CD:~0,2%\msys64\usr\bin\bash -lc "pacman --noconfirm -Syuu"
|
||||
%CD:~0,2%\msys64\usr\bin\bash -lc "pacman --noconfirm -Syuu"
|
||||
displayName: Update MSYS2
|
||||
- task: DownloadSecureFile@1
|
||||
inputs:
|
||||
secureFile: s3cfg
|
||||
|
||||
@@ -4,7 +4,7 @@ set -x
|
||||
set -e
|
||||
|
||||
pacman -Su --needed --noconfirm
|
||||
pacman -S --needed --noconfirm wget p7zip python3-pip
|
||||
pacman -S --needed --noconfirm wget p7zip python3-pip tar xz
|
||||
pip install s3cmd
|
||||
wget -nv "https://ziglang.org/deps/llvm%2bclang%2blld-10.0.0-x86_64-windows-msvc-release-mt.tar.xz"
|
||||
tar xf llvm+clang+lld-10.0.0-x86_64-windows-msvc-release-mt.tar.xz
|
||||
|
||||
@@ -212,7 +212,7 @@ const Tokenizer = struct {
|
||||
}
|
||||
};
|
||||
|
||||
fn parseError(tokenizer: *Tokenizer, token: Token, comptime fmt: []const u8, args: var) anyerror {
|
||||
fn parseError(tokenizer: *Tokenizer, token: Token, comptime fmt: []const u8, args: anytype) anyerror {
|
||||
const loc = tokenizer.getTokenLocation(token);
|
||||
const args_prefix = .{ tokenizer.source_file_name, loc.line + 1, loc.column + 1 };
|
||||
warn("{}:{}:{}: error: " ++ fmt ++ "\n", args_prefix ++ args);
|
||||
@@ -392,7 +392,7 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
|
||||
.n = header_stack_size,
|
||||
},
|
||||
});
|
||||
if (try urls.put(urlized, tag_token)) |entry| {
|
||||
if (try urls.fetchPut(urlized, tag_token)) |entry| {
|
||||
parseError(tokenizer, tag_token, "duplicate header url: #{}", .{urlized}) catch {};
|
||||
parseError(tokenizer, entry.value, "other tag here", .{}) catch {};
|
||||
return error.ParseError;
|
||||
@@ -634,7 +634,7 @@ fn escapeHtml(allocator: *mem.Allocator, input: []const u8) ![]u8 {
|
||||
return buf.toOwnedSlice();
|
||||
}
|
||||
|
||||
fn writeEscaped(out: var, input: []const u8) !void {
|
||||
fn writeEscaped(out: anytype, input: []const u8) !void {
|
||||
for (input) |c| {
|
||||
try switch (c) {
|
||||
'&' => out.writeAll("&"),
|
||||
@@ -765,7 +765,7 @@ fn isType(name: []const u8) bool {
|
||||
return false;
|
||||
}
|
||||
|
||||
fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Token, raw_src: []const u8) !void {
|
||||
fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: anytype, source_token: Token, raw_src: []const u8) !void {
|
||||
const src = mem.trim(u8, raw_src, " \n");
|
||||
try out.writeAll("<code class=\"zig\">");
|
||||
var tokenizer = std.zig.Tokenizer.init(src);
|
||||
@@ -825,6 +825,7 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Tok
|
||||
.Keyword_volatile,
|
||||
.Keyword_allowzero,
|
||||
.Keyword_while,
|
||||
.Keyword_anytype,
|
||||
=> {
|
||||
try out.writeAll("<span class=\"tok-kw\">");
|
||||
try writeEscaped(out, src[token.loc.start..token.loc.end]);
|
||||
@@ -977,12 +978,12 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Tok
|
||||
try out.writeAll("</code>");
|
||||
}
|
||||
|
||||
fn tokenizeAndPrint(docgen_tokenizer: *Tokenizer, out: var, source_token: Token) !void {
|
||||
fn tokenizeAndPrint(docgen_tokenizer: *Tokenizer, out: anytype, source_token: Token) !void {
|
||||
const raw_src = docgen_tokenizer.buffer[source_token.start..source_token.end];
|
||||
return tokenizeAndPrintRaw(docgen_tokenizer, out, source_token, raw_src);
|
||||
}
|
||||
|
||||
fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: var, zig_exe: []const u8) !void {
|
||||
fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: anytype, zig_exe: []const u8) !void {
|
||||
var code_progress_index: usize = 0;
|
||||
|
||||
var env_map = try process.getEnvMap(allocator);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -53,7 +53,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
/// Deprecated: use `items` field directly.
|
||||
/// Return contents as a slice. Only valid while the list
|
||||
/// doesn't change size.
|
||||
pub fn span(self: var) @TypeOf(self.items) {
|
||||
pub fn span(self: anytype) @TypeOf(self.items) {
|
||||
return self.items;
|
||||
}
|
||||
|
||||
@@ -162,19 +162,24 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
mem.copy(T, self.items[oldlen..], items);
|
||||
}
|
||||
|
||||
/// Same as `append` except it returns the number of bytes written, which is always the same
|
||||
/// as `m.len`. The purpose of this function existing is to match `std.io.OutStream` API.
|
||||
/// This function may be called only when `T` is `u8`.
|
||||
fn appendWrite(self: *Self, m: []const u8) !usize {
|
||||
try self.appendSlice(m);
|
||||
return m.len;
|
||||
}
|
||||
pub usingnamespace if (T != u8) struct {} else struct {
|
||||
pub const Writer = std.io.Writer(*Self, error{OutOfMemory}, appendWrite);
|
||||
|
||||
/// Initializes an OutStream which will append to the list.
|
||||
/// This function may be called only when `T` is `u8`.
|
||||
pub fn outStream(self: *Self) std.io.OutStream(*Self, error{OutOfMemory}, appendWrite) {
|
||||
return .{ .context = self };
|
||||
}
|
||||
/// Initializes a Writer which will append to the list.
|
||||
pub fn writer(self: *Self) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
/// Deprecated: use `writer`
|
||||
pub const outStream = writer;
|
||||
|
||||
/// Same as `append` except it returns the number of bytes written, which is always the same
|
||||
/// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API.
|
||||
fn appendWrite(self: *Self, m: []const u8) !usize {
|
||||
try self.appendSlice(m);
|
||||
return m.len;
|
||||
}
|
||||
};
|
||||
|
||||
/// Append a value to the list `n` times.
|
||||
/// Allocates more memory as necessary.
|
||||
@@ -205,6 +210,14 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
self.capacity = new_len;
|
||||
}
|
||||
|
||||
/// Reduce length to `new_len`.
|
||||
/// Invalidates element pointers.
|
||||
/// Keeps capacity the same.
|
||||
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
|
||||
assert(new_len <= self.items.len);
|
||||
self.items.len = new_len;
|
||||
}
|
||||
|
||||
pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
|
||||
var better_capacity = self.capacity;
|
||||
if (better_capacity >= new_capacity) return;
|
||||
@@ -214,7 +227,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
if (better_capacity >= new_capacity) break;
|
||||
}
|
||||
|
||||
const new_memory = try self.allocator.realloc(self.allocatedSlice(), better_capacity);
|
||||
const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), better_capacity);
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
}
|
||||
@@ -244,6 +257,24 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
return &self.items[self.items.len - 1];
|
||||
}
|
||||
|
||||
/// Resize the array, adding `n` new elements, which have `undefined` values.
|
||||
/// The return value is an array pointing to the newly allocated elements.
|
||||
pub fn addManyAsArray(self: *Self, comptime n: usize) !*[n]T {
|
||||
const prev_len = self.items.len;
|
||||
try self.resize(self.items.len + n);
|
||||
return self.items[prev_len..][0..n];
|
||||
}
|
||||
|
||||
/// Resize the array, adding `n` new elements, which have `undefined` values.
|
||||
/// The return value is an array pointing to the newly allocated elements.
|
||||
/// Asserts that there is already space for the new item without allocating more.
|
||||
pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T {
|
||||
assert(self.items.len + n <= self.capacity);
|
||||
const prev_len = self.items.len;
|
||||
self.items.len += n;
|
||||
return self.items[prev_len..][0..n];
|
||||
}
|
||||
|
||||
/// Remove and return the last element from the list.
|
||||
/// Asserts the list has at least one item.
|
||||
pub fn pop(self: *Self) T {
|
||||
@@ -427,6 +458,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
self.capacity = new_len;
|
||||
}
|
||||
|
||||
/// Reduce length to `new_len`.
|
||||
/// Invalidates element pointers.
|
||||
/// Keeps capacity the same.
|
||||
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
|
||||
assert(new_len <= self.items.len);
|
||||
self.items.len = new_len;
|
||||
}
|
||||
|
||||
pub fn ensureCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
|
||||
var better_capacity = self.capacity;
|
||||
if (better_capacity >= new_capacity) return;
|
||||
@@ -436,7 +475,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
if (better_capacity >= new_capacity) break;
|
||||
}
|
||||
|
||||
const new_memory = try allocator.realloc(self.allocatedSlice(), better_capacity);
|
||||
const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), better_capacity);
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
}
|
||||
@@ -467,6 +506,24 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
return &self.items[self.items.len - 1];
|
||||
}
|
||||
|
||||
/// Resize the array, adding `n` new elements, which have `undefined` values.
|
||||
/// The return value is an array pointing to the newly allocated elements.
|
||||
pub fn addManyAsArray(self: *Self, allocator: *Allocator, comptime n: usize) !*[n]T {
|
||||
const prev_len = self.items.len;
|
||||
try self.resize(allocator, self.items.len + n);
|
||||
return self.items[prev_len..][0..n];
|
||||
}
|
||||
|
||||
/// Resize the array, adding `n` new elements, which have `undefined` values.
|
||||
/// The return value is an array pointing to the newly allocated elements.
|
||||
/// Asserts that there is already space for the new item without allocating more.
|
||||
pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T {
|
||||
assert(self.items.len + n <= self.capacity);
|
||||
const prev_len = self.items.len;
|
||||
self.items.len += n;
|
||||
return self.items[prev_len..][0..n];
|
||||
}
|
||||
|
||||
/// Remove and return the last element from the list.
|
||||
/// Asserts the list has at least one item.
|
||||
/// This operation does not invalidate any element pointers.
|
||||
@@ -694,3 +751,39 @@ test "std.ArrayList.shrink still sets length on error.OutOfMemory" {
|
||||
list.shrink(1);
|
||||
testing.expect(list.items.len == 1);
|
||||
}
|
||||
|
||||
test "std.ArrayList.writer" {
|
||||
var list = ArrayList(u8).init(std.testing.allocator);
|
||||
defer list.deinit();
|
||||
|
||||
const writer = list.writer();
|
||||
try writer.writeAll("a");
|
||||
try writer.writeAll("bc");
|
||||
try writer.writeAll("d");
|
||||
try writer.writeAll("efg");
|
||||
testing.expectEqualSlices(u8, list.items, "abcdefg");
|
||||
}
|
||||
|
||||
test "addManyAsArray" {
|
||||
const a = std.testing.allocator;
|
||||
{
|
||||
var list = ArrayList(u8).init(a);
|
||||
defer list.deinit();
|
||||
|
||||
(try list.addManyAsArray(4)).* = "aoeu".*;
|
||||
try list.ensureCapacity(8);
|
||||
list.addManyAsArrayAssumeCapacity(4).* = "asdf".*;
|
||||
|
||||
testing.expectEqualSlices(u8, list.items, "aoeuasdf");
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(u8){};
|
||||
defer list.deinit(a);
|
||||
|
||||
(try list.addManyAsArray(a, 4)).* = "aoeu".*;
|
||||
try list.ensureCapacity(a, 8);
|
||||
list.addManyAsArrayAssumeCapacity(4).* = "asdf".*;
|
||||
|
||||
testing.expectEqualSlices(u8, list.items, "aoeuasdf");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ pub fn ArrayListSentineled(comptime T: type, comptime sentinel: T) type {
|
||||
}
|
||||
|
||||
/// Only works when `T` is `u8`.
|
||||
pub fn allocPrint(allocator: *Allocator, comptime format: []const u8, args: var) !Self {
|
||||
pub fn allocPrint(allocator: *Allocator, comptime format: []const u8, args: anytype) !Self {
|
||||
const size = std.math.cast(usize, std.fmt.count(format, args)) catch |err| switch (err) {
|
||||
error.Overflow => return error.OutOfMemory,
|
||||
};
|
||||
@@ -82,7 +82,7 @@ pub fn ArrayListSentineled(comptime T: type, comptime sentinel: T) type {
|
||||
self.list.deinit();
|
||||
}
|
||||
|
||||
pub fn span(self: var) @TypeOf(self.list.items[0..:sentinel]) {
|
||||
pub fn span(self: anytype) @TypeOf(self.list.items[0..:sentinel]) {
|
||||
return self.list.items[0..self.len() :sentinel];
|
||||
}
|
||||
|
||||
|
||||
@@ -123,10 +123,10 @@ pub fn Queue(comptime T: type) type {
|
||||
/// Dumps the contents of the queue to `stream`.
|
||||
/// Up to 4 elements from the head are dumped and the tail of the queue is
|
||||
/// dumped as well.
|
||||
pub fn dumpToStream(self: *Self, stream: var) !void {
|
||||
pub fn dumpToStream(self: *Self, stream: anytype) !void {
|
||||
const S = struct {
|
||||
fn dumpRecursive(
|
||||
s: var,
|
||||
s: anytype,
|
||||
optional_node: ?*Node,
|
||||
indent: usize,
|
||||
comptime depth: comptime_int,
|
||||
|
||||
@@ -33,10 +33,10 @@ pub const BufMap = struct {
|
||||
pub fn setMove(self: *BufMap, key: []u8, value: []u8) !void {
|
||||
const get_or_put = try self.hash_map.getOrPut(key);
|
||||
if (get_or_put.found_existing) {
|
||||
self.free(get_or_put.kv.key);
|
||||
get_or_put.kv.key = key;
|
||||
self.free(get_or_put.entry.key);
|
||||
get_or_put.entry.key = key;
|
||||
}
|
||||
get_or_put.kv.value = value;
|
||||
get_or_put.entry.value = value;
|
||||
}
|
||||
|
||||
/// `key` and `value` are copied into the BufMap.
|
||||
@@ -45,19 +45,18 @@ pub const BufMap = struct {
|
||||
errdefer self.free(value_copy);
|
||||
const get_or_put = try self.hash_map.getOrPut(key);
|
||||
if (get_or_put.found_existing) {
|
||||
self.free(get_or_put.kv.value);
|
||||
self.free(get_or_put.entry.value);
|
||||
} else {
|
||||
get_or_put.kv.key = self.copy(key) catch |err| {
|
||||
get_or_put.entry.key = self.copy(key) catch |err| {
|
||||
_ = self.hash_map.remove(key);
|
||||
return err;
|
||||
};
|
||||
}
|
||||
get_or_put.kv.value = value_copy;
|
||||
get_or_put.entry.value = value_copy;
|
||||
}
|
||||
|
||||
pub fn get(self: BufMap, key: []const u8) ?[]const u8 {
|
||||
const entry = self.hash_map.get(key) orelse return null;
|
||||
return entry.value;
|
||||
return self.hash_map.get(key);
|
||||
}
|
||||
|
||||
pub fn delete(self: *BufMap, key: []const u8) void {
|
||||
@@ -79,7 +78,7 @@ pub const BufMap = struct {
|
||||
}
|
||||
|
||||
fn copy(self: BufMap, value: []const u8) ![]u8 {
|
||||
return mem.dupe(self.hash_map.allocator, u8, value);
|
||||
return self.hash_map.allocator.dupe(u8, value);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -14,14 +14,12 @@ pub const BufSet = struct {
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *const BufSet) void {
|
||||
var it = self.hash_map.iterator();
|
||||
while (true) {
|
||||
const entry = it.next() orelse break;
|
||||
pub fn deinit(self: *BufSet) void {
|
||||
for (self.hash_map.items()) |entry| {
|
||||
self.free(entry.key);
|
||||
}
|
||||
|
||||
self.hash_map.deinit();
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn put(self: *BufSet, key: []const u8) !void {
|
||||
|
||||
@@ -286,7 +286,7 @@ pub const Builder = struct {
|
||||
}
|
||||
|
||||
pub fn dupe(self: *Builder, bytes: []const u8) []u8 {
|
||||
return mem.dupe(self.allocator, u8, bytes) catch unreachable;
|
||||
return self.allocator.dupe(u8, bytes) catch unreachable;
|
||||
}
|
||||
|
||||
pub fn dupePath(self: *Builder, bytes: []const u8) []u8 {
|
||||
@@ -312,7 +312,7 @@ pub const Builder = struct {
|
||||
return write_file_step;
|
||||
}
|
||||
|
||||
pub fn addLog(self: *Builder, comptime format: []const u8, args: var) *LogStep {
|
||||
pub fn addLog(self: *Builder, comptime format: []const u8, args: anytype) *LogStep {
|
||||
const data = self.fmt(format, args);
|
||||
const log_step = self.allocator.create(LogStep) catch unreachable;
|
||||
log_step.* = LogStep.init(self, data);
|
||||
@@ -422,12 +422,12 @@ pub const Builder = struct {
|
||||
.type_id = type_id,
|
||||
.description = description,
|
||||
};
|
||||
if ((self.available_options_map.put(name, available_option) catch unreachable) != null) {
|
||||
if ((self.available_options_map.fetchPut(name, available_option) catch unreachable) != null) {
|
||||
panic("Option '{}' declared twice", .{name});
|
||||
}
|
||||
self.available_options_list.append(available_option) catch unreachable;
|
||||
|
||||
const entry = self.user_input_options.get(name) orelse return null;
|
||||
const entry = self.user_input_options.getEntry(name) orelse return null;
|
||||
entry.value.used = true;
|
||||
switch (type_id) {
|
||||
TypeId.Bool => switch (entry.value.value) {
|
||||
@@ -512,7 +512,7 @@ pub const Builder = struct {
|
||||
if (self.release_mode != null) {
|
||||
@panic("setPreferredReleaseMode must be called before standardReleaseOptions and may not be called twice");
|
||||
}
|
||||
const description = self.fmt("create a release build ({})", .{@tagName(mode)});
|
||||
const description = self.fmt("Create a release build ({})", .{@tagName(mode)});
|
||||
self.is_release = self.option(bool, "release", description) orelse false;
|
||||
self.release_mode = if (self.is_release) mode else builtin.Mode.Debug;
|
||||
}
|
||||
@@ -522,9 +522,9 @@ pub const Builder = struct {
|
||||
pub fn standardReleaseOptions(self: *Builder) builtin.Mode {
|
||||
if (self.release_mode) |mode| return mode;
|
||||
|
||||
const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") orelse false;
|
||||
const release_fast = self.option(bool, "release-fast", "optimizations on and safety off") orelse false;
|
||||
const release_small = self.option(bool, "release-small", "size optimizations on and safety off") orelse false;
|
||||
const release_safe = self.option(bool, "release-safe", "Optimizations on and safety on") orelse false;
|
||||
const release_fast = self.option(bool, "release-fast", "Optimizations on and safety off") orelse false;
|
||||
const release_small = self.option(bool, "release-small", "Size optimizations on and safety off") orelse false;
|
||||
|
||||
const mode = if (release_safe and !release_fast and !release_small)
|
||||
builtin.Mode.ReleaseSafe
|
||||
@@ -555,7 +555,7 @@ pub const Builder = struct {
|
||||
const triple = self.option(
|
||||
[]const u8,
|
||||
"target",
|
||||
"The CPU architecture, OS, and ABI to build for.",
|
||||
"The CPU architecture, OS, and ABI to build for",
|
||||
) orelse return args.default_target;
|
||||
|
||||
// TODO add cpu and features as part of the target triple
|
||||
@@ -634,7 +634,7 @@ pub const Builder = struct {
|
||||
pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) !bool {
|
||||
const gop = try self.user_input_options.getOrPut(name);
|
||||
if (!gop.found_existing) {
|
||||
gop.kv.value = UserInputOption{
|
||||
gop.entry.value = UserInputOption{
|
||||
.name = name,
|
||||
.value = UserValue{ .Scalar = value },
|
||||
.used = false,
|
||||
@@ -643,7 +643,7 @@ pub const Builder = struct {
|
||||
}
|
||||
|
||||
// option already exists
|
||||
switch (gop.kv.value.value) {
|
||||
switch (gop.entry.value.value) {
|
||||
UserValue.Scalar => |s| {
|
||||
// turn it into a list
|
||||
var list = ArrayList([]const u8).init(self.allocator);
|
||||
@@ -675,7 +675,7 @@ pub const Builder = struct {
|
||||
pub fn addUserInputFlag(self: *Builder, name: []const u8) !bool {
|
||||
const gop = try self.user_input_options.getOrPut(name);
|
||||
if (!gop.found_existing) {
|
||||
gop.kv.value = UserInputOption{
|
||||
gop.entry.value = UserInputOption{
|
||||
.name = name,
|
||||
.value = UserValue{ .Flag = {} },
|
||||
.used = false,
|
||||
@@ -684,7 +684,7 @@ pub const Builder = struct {
|
||||
}
|
||||
|
||||
// option already exists
|
||||
switch (gop.kv.value.value) {
|
||||
switch (gop.entry.value.value) {
|
||||
UserValue.Scalar => |s| {
|
||||
warn("Flag '-D{}' conflicts with option '-D{}={}'.\n", .{ name, name, s });
|
||||
return true;
|
||||
@@ -883,7 +883,7 @@ pub const Builder = struct {
|
||||
return fs.path.resolve(self.allocator, &[_][]const u8{ self.build_root, rel_path }) catch unreachable;
|
||||
}
|
||||
|
||||
pub fn fmt(self: *Builder, comptime format: []const u8, args: var) []u8 {
|
||||
pub fn fmt(self: *Builder, comptime format: []const u8, args: anytype) []u8 {
|
||||
return fmt_lib.allocPrint(self.allocator, format, args) catch unreachable;
|
||||
}
|
||||
|
||||
@@ -1905,10 +1905,11 @@ pub const LibExeObjStep = struct {
|
||||
builder.allocator,
|
||||
&[_][]const u8{ builder.cache_root, builder.fmt("{}_build_options.zig", .{self.name}) },
|
||||
);
|
||||
try fs.cwd().writeFile(build_options_file, self.build_options_contents.span());
|
||||
const path_from_root = builder.pathFromRoot(build_options_file);
|
||||
try fs.cwd().writeFile(path_from_root, self.build_options_contents.span());
|
||||
try zig_args.append("--pkg-begin");
|
||||
try zig_args.append("build_options");
|
||||
try zig_args.append(builder.pathFromRoot(build_options_file));
|
||||
try zig_args.append(path_from_root);
|
||||
try zig_args.append("--pkg-end");
|
||||
}
|
||||
|
||||
@@ -2558,3 +2559,10 @@ pub const InstalledFile = struct {
|
||||
dir: InstallDir,
|
||||
path: []const u8,
|
||||
};
|
||||
|
||||
test "" {
|
||||
// The only purpose of this test is to get all these untested functions
|
||||
// to be referenced to avoid regression so it is okay to skip some targets.
|
||||
if (comptime std.Target.current.cpu.arch.ptrBitWidth() == 64)
|
||||
std.meta.refAllDecls(@This());
|
||||
}
|
||||
|
||||
@@ -126,7 +126,7 @@ const BinaryElfOutput = struct {
|
||||
return segment.p_offset <= section.elfOffset and (segment.p_offset + segment.p_filesz) >= (section.elfOffset + section.fileSize);
|
||||
}
|
||||
|
||||
fn sectionValidForOutput(shdr: var) bool {
|
||||
fn sectionValidForOutput(shdr: anytype) bool {
|
||||
return shdr.sh_size > 0 and shdr.sh_type != elf.SHT_NOBITS and
|
||||
((shdr.sh_flags & elf.SHF_ALLOC) == elf.SHF_ALLOC);
|
||||
}
|
||||
@@ -215,3 +215,7 @@ pub const InstallRawStep = struct {
|
||||
try emitRaw(builder.allocator, full_src_path, full_dest_path);
|
||||
}
|
||||
};
|
||||
|
||||
test "" {
|
||||
std.meta.refAllDecls(InstallRawStep);
|
||||
}
|
||||
|
||||
@@ -131,6 +131,15 @@ pub const CallingConvention = enum {
|
||||
AAPCSVFP,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const SourceLocation = struct {
|
||||
file: [:0]const u8,
|
||||
fn_name: [:0]const u8,
|
||||
line: u32,
|
||||
column: u32,
|
||||
};
|
||||
|
||||
pub const TypeId = @TagType(TypeInfo);
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
@@ -157,7 +166,7 @@ pub const TypeInfo = union(enum) {
|
||||
Fn: Fn,
|
||||
BoundFn: Fn,
|
||||
Opaque: void,
|
||||
Frame: void,
|
||||
Frame: Frame,
|
||||
AnyFrame: AnyFrame,
|
||||
Vector: Vector,
|
||||
EnumLiteral: void,
|
||||
@@ -189,7 +198,7 @@ pub const TypeInfo = union(enum) {
|
||||
/// The type of the sentinel is the element type of the pointer, which is
|
||||
/// the value of the `child` field in this struct. However there is no way
|
||||
/// to refer to that type here, so we use `var`.
|
||||
sentinel: var,
|
||||
sentinel: anytype,
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
@@ -211,7 +220,7 @@ pub const TypeInfo = union(enum) {
|
||||
/// The type of the sentinel is the element type of the array, which is
|
||||
/// the value of the `child` field in this struct. However there is no way
|
||||
/// to refer to that type here, so we use `var`.
|
||||
sentinel: var,
|
||||
sentinel: anytype,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
@@ -228,15 +237,16 @@ pub const TypeInfo = union(enum) {
|
||||
name: []const u8,
|
||||
offset: ?comptime_int,
|
||||
field_type: type,
|
||||
default_value: var,
|
||||
default_value: anytype,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const Struct = struct {
|
||||
layout: ContainerLayout,
|
||||
fields: []StructField,
|
||||
decls: []Declaration,
|
||||
fields: []const StructField,
|
||||
decls: []const Declaration,
|
||||
is_tuple: bool,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
@@ -256,12 +266,13 @@ pub const TypeInfo = union(enum) {
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const Error = struct {
|
||||
name: []const u8,
|
||||
/// This field is ignored when using @Type().
|
||||
value: comptime_int,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const ErrorSet = ?[]Error;
|
||||
pub const ErrorSet = ?[]const Error;
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
@@ -275,8 +286,8 @@ pub const TypeInfo = union(enum) {
|
||||
pub const Enum = struct {
|
||||
layout: ContainerLayout,
|
||||
tag_type: type,
|
||||
fields: []EnumField,
|
||||
decls: []Declaration,
|
||||
fields: []const EnumField,
|
||||
decls: []const Declaration,
|
||||
is_exhaustive: bool,
|
||||
};
|
||||
|
||||
@@ -293,8 +304,8 @@ pub const TypeInfo = union(enum) {
|
||||
pub const Union = struct {
|
||||
layout: ContainerLayout,
|
||||
tag_type: ?type,
|
||||
fields: []UnionField,
|
||||
decls: []Declaration,
|
||||
fields: []const UnionField,
|
||||
decls: []const Declaration,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
@@ -312,7 +323,13 @@ pub const TypeInfo = union(enum) {
|
||||
is_generic: bool,
|
||||
is_var_args: bool,
|
||||
return_type: ?type,
|
||||
args: []FnArg,
|
||||
args: []const FnArg,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const Frame = struct {
|
||||
function: anytype,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
@@ -352,7 +369,7 @@ pub const TypeInfo = union(enum) {
|
||||
is_export: bool,
|
||||
lib_name: ?[]const u8,
|
||||
return_type: type,
|
||||
arg_names: [][]const u8,
|
||||
arg_names: []const []const u8,
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
@@ -436,7 +453,7 @@ pub const Version = struct {
|
||||
self: Version,
|
||||
comptime fmt: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
) !void {
|
||||
if (fmt.len == 0) {
|
||||
if (self.patch == 0) {
|
||||
|
||||
@@ -27,7 +27,7 @@ pub usingnamespace switch (std.Target.current.os.tag) {
|
||||
else => struct {},
|
||||
};
|
||||
|
||||
pub fn getErrno(rc: var) u16 {
|
||||
pub fn getErrno(rc: anytype) u16 {
|
||||
if (rc == -1) {
|
||||
return @intCast(u16, _errno().*);
|
||||
} else {
|
||||
@@ -73,7 +73,6 @@ pub extern "c" fn abort() noreturn;
|
||||
pub extern "c" fn exit(code: c_int) noreturn;
|
||||
pub extern "c" fn isatty(fd: fd_t) c_int;
|
||||
pub extern "c" fn close(fd: fd_t) c_int;
|
||||
pub extern "c" fn fstatat(dirfd: fd_t, path: [*:0]const u8, stat_buf: *Stat, flags: u32) c_int;
|
||||
pub extern "c" fn lseek(fd: fd_t, offset: off_t, whence: c_int) off_t;
|
||||
pub extern "c" fn open(path: [*:0]const u8, oflag: c_uint, ...) c_int;
|
||||
pub extern "c" fn openat(fd: c_int, path: [*:0]const u8, oflag: c_uint, ...) c_int;
|
||||
@@ -102,6 +101,7 @@ pub extern "c" fn pipe2(fds: *[2]fd_t, flags: u32) c_int;
|
||||
pub extern "c" fn mkdir(path: [*:0]const u8, mode: c_uint) c_int;
|
||||
pub extern "c" fn mkdirat(dirfd: fd_t, path: [*:0]const u8, mode: u32) c_int;
|
||||
pub extern "c" fn symlink(existing: [*:0]const u8, new: [*:0]const u8) c_int;
|
||||
pub extern "c" fn symlinkat(oldpath: [*:0]const u8, newdirfd: fd_t, newpath: [*:0]const u8) c_int;
|
||||
pub extern "c" fn rename(old: [*:0]const u8, new: [*:0]const u8) c_int;
|
||||
pub extern "c" fn renameat(olddirfd: fd_t, old: [*:0]const u8, newdirfd: fd_t, new: [*:0]const u8) c_int;
|
||||
pub extern "c" fn chdir(path: [*:0]const u8) c_int;
|
||||
@@ -115,9 +115,11 @@ pub extern "c" fn readlinkat(dirfd: fd_t, noalias path: [*:0]const u8, noalias b
|
||||
pub usingnamespace switch (builtin.os.tag) {
|
||||
.macosx, .ios, .watchos, .tvos => struct {
|
||||
pub const realpath = @"realpath$DARWIN_EXTSN";
|
||||
pub const fstatat = @"fstatat$INODE64";
|
||||
},
|
||||
else => struct {
|
||||
pub extern "c" fn realpath(noalias file_name: [*:0]const u8, noalias resolved_name: [*]u8) ?[*:0]u8;
|
||||
pub extern "c" fn fstatat(dirfd: fd_t, path: [*:0]const u8, stat_buf: *Stat, flags: u32) c_int;
|
||||
},
|
||||
};
|
||||
|
||||
@@ -231,6 +233,17 @@ pub extern "c" fn setuid(uid: c_uint) c_int;
|
||||
|
||||
pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void;
|
||||
pub extern "c" fn malloc(usize) ?*c_void;
|
||||
|
||||
pub usingnamespace switch (builtin.os.tag) {
|
||||
.linux, .freebsd, .kfreebsd, .netbsd, .openbsd => struct {
|
||||
pub extern "c" fn malloc_usable_size(?*const c_void) usize;
|
||||
},
|
||||
.macosx, .ios, .watchos, .tvos => struct {
|
||||
pub extern "c" fn malloc_size(?*const c_void) usize;
|
||||
},
|
||||
else => struct {},
|
||||
};
|
||||
|
||||
pub extern "c" fn realloc(?*c_void, usize) ?*c_void;
|
||||
pub extern "c" fn free(*c_void) void;
|
||||
pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int;
|
||||
|
||||
@@ -64,7 +64,7 @@ pub const Error = union(enum) {
|
||||
NothingDeclared: SimpleError("declaration doesn't declare anything"),
|
||||
QualifierIgnored: SingleTokenError("qualifier '{}' ignored"),
|
||||
|
||||
pub fn render(self: *const Error, tree: *Tree, stream: var) !void {
|
||||
pub fn render(self: *const Error, tree: *Tree, stream: anytype) !void {
|
||||
switch (self.*) {
|
||||
.InvalidToken => |*x| return x.render(tree, stream),
|
||||
.ExpectedToken => |*x| return x.render(tree, stream),
|
||||
@@ -114,7 +114,7 @@ pub const Error = union(enum) {
|
||||
token: TokenIndex,
|
||||
expected_id: @TagType(Token.Id),
|
||||
|
||||
pub fn render(self: *const ExpectedToken, tree: *Tree, stream: var) !void {
|
||||
pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void {
|
||||
const found_token = tree.tokens.at(self.token);
|
||||
if (found_token.id == .Invalid) {
|
||||
return stream.print("expected '{}', found invalid bytes", .{self.expected_id.symbol()});
|
||||
@@ -129,7 +129,7 @@ pub const Error = union(enum) {
|
||||
token: TokenIndex,
|
||||
type_spec: *Node.TypeSpec,
|
||||
|
||||
pub fn render(self: *const ExpectedToken, tree: *Tree, stream: var) !void {
|
||||
pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void {
|
||||
try stream.write("invalid type specifier '");
|
||||
try type_spec.spec.print(tree, stream);
|
||||
const token_name = tree.tokens.at(self.token).id.symbol();
|
||||
@@ -141,7 +141,7 @@ pub const Error = union(enum) {
|
||||
kw: TokenIndex,
|
||||
name: TokenIndex,
|
||||
|
||||
pub fn render(self: *const ExpectedToken, tree: *Tree, stream: var) !void {
|
||||
pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void {
|
||||
return stream.print("must use '{}' tag to refer to type '{}'", .{ tree.slice(kw), tree.slice(name) });
|
||||
}
|
||||
};
|
||||
@@ -150,7 +150,7 @@ pub const Error = union(enum) {
|
||||
return struct {
|
||||
token: TokenIndex,
|
||||
|
||||
pub fn render(self: *const @This(), tree: *Tree, stream: var) !void {
|
||||
pub fn render(self: *const @This(), tree: *Tree, stream: anytype) !void {
|
||||
const actual_token = tree.tokens.at(self.token);
|
||||
return stream.print(msg, .{actual_token.id.symbol()});
|
||||
}
|
||||
@@ -163,7 +163,7 @@ pub const Error = union(enum) {
|
||||
|
||||
token: TokenIndex,
|
||||
|
||||
pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: var) !void {
|
||||
pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: anytype) !void {
|
||||
return stream.write(msg);
|
||||
}
|
||||
};
|
||||
@@ -317,7 +317,7 @@ pub const Node = struct {
|
||||
sym_type: *Type,
|
||||
},
|
||||
|
||||
pub fn print(self: *@This(), self: *const @This(), tree: *Tree, stream: var) !void {
|
||||
pub fn print(self: *@This(), self: *const @This(), tree: *Tree, stream: anytype) !void {
|
||||
switch (self.spec) {
|
||||
.None => unreachable,
|
||||
.Void => |index| try stream.write(tree.slice(index)),
|
||||
|
||||
@@ -16,6 +16,7 @@ pub extern "c" fn @"realpath$DARWIN_EXTSN"(noalias file_name: [*:0]const u8, noa
|
||||
|
||||
pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: [*]u8, buf_len: usize, basep: *i64) isize;
|
||||
pub extern "c" fn @"fstat$INODE64"(fd: fd_t, buf: *Stat) c_int;
|
||||
pub extern "c" fn @"fstatat$INODE64"(dirfd: fd_t, path_name: [*:0]const u8, buf: *Stat, flags: u32) c_int;
|
||||
|
||||
pub extern "c" fn mach_absolute_time() u64;
|
||||
pub extern "c" fn mach_timebase_info(tinfo: ?*mach_timebase_info_data) void;
|
||||
|
||||
@@ -278,62 +278,62 @@ pub const Token = struct {
|
||||
|
||||
// TODO extensions
|
||||
pub const keywords = std.ComptimeStringMap(Id, .{
|
||||
.{"auto", .Keyword_auto},
|
||||
.{"break", .Keyword_break},
|
||||
.{"case", .Keyword_case},
|
||||
.{"char", .Keyword_char},
|
||||
.{"const", .Keyword_const},
|
||||
.{"continue", .Keyword_continue},
|
||||
.{"default", .Keyword_default},
|
||||
.{"do", .Keyword_do},
|
||||
.{"double", .Keyword_double},
|
||||
.{"else", .Keyword_else},
|
||||
.{"enum", .Keyword_enum},
|
||||
.{"extern", .Keyword_extern},
|
||||
.{"float", .Keyword_float},
|
||||
.{"for", .Keyword_for},
|
||||
.{"goto", .Keyword_goto},
|
||||
.{"if", .Keyword_if},
|
||||
.{"int", .Keyword_int},
|
||||
.{"long", .Keyword_long},
|
||||
.{"register", .Keyword_register},
|
||||
.{"return", .Keyword_return},
|
||||
.{"short", .Keyword_short},
|
||||
.{"signed", .Keyword_signed},
|
||||
.{"sizeof", .Keyword_sizeof},
|
||||
.{"static", .Keyword_static},
|
||||
.{"struct", .Keyword_struct},
|
||||
.{"switch", .Keyword_switch},
|
||||
.{"typedef", .Keyword_typedef},
|
||||
.{"union", .Keyword_union},
|
||||
.{"unsigned", .Keyword_unsigned},
|
||||
.{"void", .Keyword_void},
|
||||
.{"volatile", .Keyword_volatile},
|
||||
.{"while", .Keyword_while},
|
||||
.{ "auto", .Keyword_auto },
|
||||
.{ "break", .Keyword_break },
|
||||
.{ "case", .Keyword_case },
|
||||
.{ "char", .Keyword_char },
|
||||
.{ "const", .Keyword_const },
|
||||
.{ "continue", .Keyword_continue },
|
||||
.{ "default", .Keyword_default },
|
||||
.{ "do", .Keyword_do },
|
||||
.{ "double", .Keyword_double },
|
||||
.{ "else", .Keyword_else },
|
||||
.{ "enum", .Keyword_enum },
|
||||
.{ "extern", .Keyword_extern },
|
||||
.{ "float", .Keyword_float },
|
||||
.{ "for", .Keyword_for },
|
||||
.{ "goto", .Keyword_goto },
|
||||
.{ "if", .Keyword_if },
|
||||
.{ "int", .Keyword_int },
|
||||
.{ "long", .Keyword_long },
|
||||
.{ "register", .Keyword_register },
|
||||
.{ "return", .Keyword_return },
|
||||
.{ "short", .Keyword_short },
|
||||
.{ "signed", .Keyword_signed },
|
||||
.{ "sizeof", .Keyword_sizeof },
|
||||
.{ "static", .Keyword_static },
|
||||
.{ "struct", .Keyword_struct },
|
||||
.{ "switch", .Keyword_switch },
|
||||
.{ "typedef", .Keyword_typedef },
|
||||
.{ "union", .Keyword_union },
|
||||
.{ "unsigned", .Keyword_unsigned },
|
||||
.{ "void", .Keyword_void },
|
||||
.{ "volatile", .Keyword_volatile },
|
||||
.{ "while", .Keyword_while },
|
||||
|
||||
// ISO C99
|
||||
.{"_Bool", .Keyword_bool},
|
||||
.{"_Complex", .Keyword_complex},
|
||||
.{"_Imaginary", .Keyword_imaginary},
|
||||
.{"inline", .Keyword_inline},
|
||||
.{"restrict", .Keyword_restrict},
|
||||
.{ "_Bool", .Keyword_bool },
|
||||
.{ "_Complex", .Keyword_complex },
|
||||
.{ "_Imaginary", .Keyword_imaginary },
|
||||
.{ "inline", .Keyword_inline },
|
||||
.{ "restrict", .Keyword_restrict },
|
||||
|
||||
// ISO C11
|
||||
.{"_Alignas", .Keyword_alignas},
|
||||
.{"_Alignof", .Keyword_alignof},
|
||||
.{"_Atomic", .Keyword_atomic},
|
||||
.{"_Generic", .Keyword_generic},
|
||||
.{"_Noreturn", .Keyword_noreturn},
|
||||
.{"_Static_assert", .Keyword_static_assert},
|
||||
.{"_Thread_local", .Keyword_thread_local},
|
||||
.{ "_Alignas", .Keyword_alignas },
|
||||
.{ "_Alignof", .Keyword_alignof },
|
||||
.{ "_Atomic", .Keyword_atomic },
|
||||
.{ "_Generic", .Keyword_generic },
|
||||
.{ "_Noreturn", .Keyword_noreturn },
|
||||
.{ "_Static_assert", .Keyword_static_assert },
|
||||
.{ "_Thread_local", .Keyword_thread_local },
|
||||
|
||||
// Preprocessor directives
|
||||
.{"include", .Keyword_include},
|
||||
.{"define", .Keyword_define},
|
||||
.{"ifdef", .Keyword_ifdef},
|
||||
.{"ifndef", .Keyword_ifndef},
|
||||
.{"error", .Keyword_error},
|
||||
.{"pragma", .Keyword_pragma},
|
||||
.{ "include", .Keyword_include },
|
||||
.{ "define", .Keyword_define },
|
||||
.{ "ifdef", .Keyword_ifdef },
|
||||
.{ "ifndef", .Keyword_ifndef },
|
||||
.{ "error", .Keyword_error },
|
||||
.{ "pragma", .Keyword_pragma },
|
||||
});
|
||||
|
||||
// TODO do this in the preprocessor
|
||||
|
||||
@@ -70,7 +70,7 @@ pub const CacheHash = struct {
|
||||
|
||||
/// Convert the input value into bytes and record it as a dependency of the
|
||||
/// process being cached
|
||||
pub fn add(self: *CacheHash, val: var) void {
|
||||
pub fn add(self: *CacheHash, val: anytype) void {
|
||||
assert(self.manifest_file == null);
|
||||
|
||||
const valPtr = switch (@typeInfo(@TypeOf(val))) {
|
||||
@@ -207,7 +207,7 @@ pub const CacheHash = struct {
|
||||
}
|
||||
|
||||
if (cache_hash_file.path == null) {
|
||||
cache_hash_file.path = try mem.dupe(self.allocator, u8, file_path);
|
||||
cache_hash_file.path = try self.allocator.dupe(u8, file_path);
|
||||
}
|
||||
|
||||
const this_file = fs.cwd().openFile(cache_hash_file.path.?, .{ .read = true }) catch {
|
||||
|
||||
@@ -8,7 +8,7 @@ const mem = std.mem;
|
||||
/// `kvs` expects a list literal containing list literals or an array/slice of structs
|
||||
/// where `.@"0"` is the `[]const u8` key and `.@"1"` is the associated value of type `V`.
|
||||
/// TODO: https://github.com/ziglang/zig/issues/4335
|
||||
pub fn ComptimeStringMap(comptime V: type, comptime kvs: var) type {
|
||||
pub fn ComptimeStringMap(comptime V: type, comptime kvs: anytype) type {
|
||||
const precomputed = comptime blk: {
|
||||
@setEvalBranchQuota(2000);
|
||||
const KV = struct {
|
||||
@@ -126,7 +126,7 @@ test "ComptimeStringMap slice of structs" {
|
||||
testMap(map);
|
||||
}
|
||||
|
||||
fn testMap(comptime map: var) void {
|
||||
fn testMap(comptime map: anytype) void {
|
||||
std.testing.expectEqual(TestEnum.A, map.get("have").?);
|
||||
std.testing.expectEqual(TestEnum.B, map.get("nothing").?);
|
||||
std.testing.expect(null == map.get("missing"));
|
||||
@@ -165,7 +165,7 @@ test "ComptimeStringMap void value type, list literal of list literals" {
|
||||
testSet(map);
|
||||
}
|
||||
|
||||
fn testSet(comptime map: var) void {
|
||||
fn testSet(comptime map: anytype) void {
|
||||
std.testing.expectEqual({}, map.get("have").?);
|
||||
std.testing.expectEqual({}, map.get("nothing").?);
|
||||
std.testing.expect(null == map.get("missing"));
|
||||
|
||||
@@ -29,7 +29,7 @@ const hashes = [_]Crypto{
|
||||
Crypto{ .ty = crypto.Blake3, .name = "blake3" },
|
||||
};
|
||||
|
||||
pub fn benchmarkHash(comptime Hash: var, comptime bytes: comptime_int) !u64 {
|
||||
pub fn benchmarkHash(comptime Hash: anytype, comptime bytes: comptime_int) !u64 {
|
||||
var h = Hash.init();
|
||||
|
||||
var block: [Hash.digest_length]u8 = undefined;
|
||||
@@ -56,7 +56,7 @@ const macs = [_]Crypto{
|
||||
Crypto{ .ty = crypto.HmacSha256, .name = "hmac-sha256" },
|
||||
};
|
||||
|
||||
pub fn benchmarkMac(comptime Mac: var, comptime bytes: comptime_int) !u64 {
|
||||
pub fn benchmarkMac(comptime Mac: anytype, comptime bytes: comptime_int) !u64 {
|
||||
std.debug.assert(32 >= Mac.mac_length and 32 >= Mac.minimum_key_length);
|
||||
|
||||
var in: [1 * MiB]u8 = undefined;
|
||||
@@ -81,7 +81,7 @@ pub fn benchmarkMac(comptime Mac: var, comptime bytes: comptime_int) !u64 {
|
||||
|
||||
const exchanges = [_]Crypto{Crypto{ .ty = crypto.X25519, .name = "x25519" }};
|
||||
|
||||
pub fn benchmarkKeyExchange(comptime DhKeyExchange: var, comptime exchange_count: comptime_int) !u64 {
|
||||
pub fn benchmarkKeyExchange(comptime DhKeyExchange: anytype, comptime exchange_count: comptime_int) !u64 {
|
||||
std.debug.assert(DhKeyExchange.minimum_key_length >= DhKeyExchange.secret_length);
|
||||
|
||||
var in: [DhKeyExchange.minimum_key_length]u8 = undefined;
|
||||
@@ -123,15 +123,6 @@ fn mode(comptime x: comptime_int) comptime_int {
|
||||
return if (builtin.mode == .Debug) x / 64 else x;
|
||||
}
|
||||
|
||||
// TODO(#1358): Replace with builtin formatted padding when available.
|
||||
fn printPad(stdout: var, s: []const u8) !void {
|
||||
var i: usize = 0;
|
||||
while (i < 12 - s.len) : (i += 1) {
|
||||
try stdout.print(" ", .{});
|
||||
}
|
||||
try stdout.print("{}", .{s});
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
const stdout = std.io.getStdOut().outStream();
|
||||
|
||||
@@ -175,24 +166,21 @@ pub fn main() !void {
|
||||
inline for (hashes) |H| {
|
||||
if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) {
|
||||
const throughput = try benchmarkHash(H.ty, mode(32 * MiB));
|
||||
try printPad(stdout, H.name);
|
||||
try stdout.print(": {} MiB/s\n", .{throughput / (1 * MiB)});
|
||||
try stdout.print("{:>11}: {:5} MiB/s\n", .{ H.name, throughput / (1 * MiB) });
|
||||
}
|
||||
}
|
||||
|
||||
inline for (macs) |M| {
|
||||
if (filter == null or std.mem.indexOf(u8, M.name, filter.?) != null) {
|
||||
const throughput = try benchmarkMac(M.ty, mode(128 * MiB));
|
||||
try printPad(stdout, M.name);
|
||||
try stdout.print(": {} MiB/s\n", .{throughput / (1 * MiB)});
|
||||
try stdout.print("{:>11}: {:5} MiB/s\n", .{ M.name, throughput / (1 * MiB) });
|
||||
}
|
||||
}
|
||||
|
||||
inline for (exchanges) |E| {
|
||||
if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) {
|
||||
const throughput = try benchmarkKeyExchange(E.ty, mode(1000));
|
||||
try printPad(stdout, E.name);
|
||||
try stdout.print(": {} exchanges/s\n", .{throughput});
|
||||
try stdout.print("{:>11}: {:5} exchanges/s\n", .{ E.name, throughput });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ const mem = std.mem;
|
||||
const fmt = std.fmt;
|
||||
|
||||
// Hash using the specified hasher `H` asserting `expected == H(input)`.
|
||||
pub fn assertEqualHash(comptime Hasher: var, comptime expected: []const u8, input: []const u8) void {
|
||||
pub fn assertEqualHash(comptime Hasher: anytype, comptime expected: []const u8, input: []const u8) void {
|
||||
var h: [expected.len / 2]u8 = undefined;
|
||||
Hasher.hash(input, h[0..]);
|
||||
|
||||
|
||||
@@ -50,33 +50,21 @@ pub const LineInfo = struct {
|
||||
}
|
||||
};
|
||||
|
||||
/// Tries to write to stderr, unbuffered, and ignores any error returned.
|
||||
/// Does not append a newline.
|
||||
var stderr_file: File = undefined;
|
||||
var stderr_file_writer: File.Writer = undefined;
|
||||
|
||||
var stderr_stream: ?*File.OutStream = null;
|
||||
var stderr_mutex = std.Mutex.init();
|
||||
|
||||
pub fn warn(comptime fmt: []const u8, args: var) void {
|
||||
/// Deprecated. Use `std.log` functions for logging or `std.debug.print` for
|
||||
/// "printf debugging".
|
||||
pub const warn = print;
|
||||
|
||||
/// Print to stderr, unbuffered, and silently returning on failure. Intended
|
||||
/// for use in "printf debugging." Use `std.log` functions for proper logging.
|
||||
pub fn print(comptime fmt: []const u8, args: anytype) void {
|
||||
const held = stderr_mutex.acquire();
|
||||
defer held.release();
|
||||
const stderr = getStderrStream();
|
||||
const stderr = io.getStdErr().writer();
|
||||
nosuspend stderr.print(fmt, args) catch return;
|
||||
}
|
||||
|
||||
pub fn getStderrStream() *File.OutStream {
|
||||
if (stderr_stream) |st| {
|
||||
return st;
|
||||
} else {
|
||||
stderr_file = io.getStdErr();
|
||||
stderr_file_writer = stderr_file.outStream();
|
||||
const st = &stderr_file_writer;
|
||||
stderr_stream = st;
|
||||
return st;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getStderrMutex() *std.Mutex {
|
||||
return &stderr_mutex;
|
||||
}
|
||||
@@ -99,6 +87,7 @@ pub fn detectTTYConfig() TTY.Config {
|
||||
if (process.getEnvVarOwned(allocator, "ZIG_DEBUG_COLOR")) |_| {
|
||||
return .escape_codes;
|
||||
} else |_| {
|
||||
const stderr_file = io.getStdErr();
|
||||
if (stderr_file.supportsAnsiEscapeCodes()) {
|
||||
return .escape_codes;
|
||||
} else if (builtin.os.tag == .windows and stderr_file.isTty()) {
|
||||
@@ -113,7 +102,7 @@ pub fn detectTTYConfig() TTY.Config {
|
||||
/// TODO multithreaded awareness
|
||||
pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
|
||||
nosuspend {
|
||||
const stderr = getStderrStream();
|
||||
const stderr = io.getStdErr().writer();
|
||||
if (builtin.strip_debug_info) {
|
||||
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
|
||||
return;
|
||||
@@ -134,7 +123,7 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
|
||||
/// TODO multithreaded awareness
|
||||
pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void {
|
||||
nosuspend {
|
||||
const stderr = getStderrStream();
|
||||
const stderr = io.getStdErr().writer();
|
||||
if (builtin.strip_debug_info) {
|
||||
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
|
||||
return;
|
||||
@@ -204,7 +193,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *builtin.StackTrace
|
||||
/// TODO multithreaded awareness
|
||||
pub fn dumpStackTrace(stack_trace: builtin.StackTrace) void {
|
||||
nosuspend {
|
||||
const stderr = getStderrStream();
|
||||
const stderr = io.getStdErr().writer();
|
||||
if (builtin.strip_debug_info) {
|
||||
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
|
||||
return;
|
||||
@@ -234,7 +223,7 @@ pub fn assert(ok: bool) void {
|
||||
if (!ok) unreachable; // assertion failure
|
||||
}
|
||||
|
||||
pub fn panic(comptime format: []const u8, args: var) noreturn {
|
||||
pub fn panic(comptime format: []const u8, args: anytype) noreturn {
|
||||
@setCold(true);
|
||||
// TODO: remove conditional once wasi / LLVM defines __builtin_return_address
|
||||
const first_trace_addr = if (builtin.os.tag == .wasi) null else @returnAddress();
|
||||
@@ -252,7 +241,7 @@ var panic_mutex = std.Mutex.init();
|
||||
/// This is used to catch and handle panics triggered by the panic handler.
|
||||
threadlocal var panic_stage: usize = 0;
|
||||
|
||||
pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: var) noreturn {
|
||||
pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: anytype) noreturn {
|
||||
@setCold(true);
|
||||
|
||||
if (enable_segfault_handler) {
|
||||
@@ -272,7 +261,7 @@ pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, c
|
||||
const held = panic_mutex.acquire();
|
||||
defer held.release();
|
||||
|
||||
const stderr = getStderrStream();
|
||||
const stderr = io.getStdErr().writer();
|
||||
stderr.print(format ++ "\n", args) catch os.abort();
|
||||
if (trace) |t| {
|
||||
dumpStackTrace(t.*);
|
||||
@@ -297,7 +286,7 @@ pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, c
|
||||
// A panic happened while trying to print a previous panic message,
|
||||
// we're still holding the mutex but that's fine as we're going to
|
||||
// call abort()
|
||||
const stderr = getStderrStream();
|
||||
const stderr = io.getStdErr().writer();
|
||||
stderr.print("Panicked during a panic. Aborting.\n", .{}) catch os.abort();
|
||||
},
|
||||
else => {
|
||||
@@ -317,7 +306,7 @@ const RESET = "\x1b[0m";
|
||||
|
||||
pub fn writeStackTrace(
|
||||
stack_trace: builtin.StackTrace,
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
allocator: *mem.Allocator,
|
||||
debug_info: *DebugInfo,
|
||||
tty_config: TTY.Config,
|
||||
@@ -395,7 +384,7 @@ pub const StackIterator = struct {
|
||||
};
|
||||
|
||||
pub fn writeCurrentStackTrace(
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
debug_info: *DebugInfo,
|
||||
tty_config: TTY.Config,
|
||||
start_addr: ?usize,
|
||||
@@ -410,7 +399,7 @@ pub fn writeCurrentStackTrace(
|
||||
}
|
||||
|
||||
pub fn writeCurrentStackTraceWindows(
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
debug_info: *DebugInfo,
|
||||
tty_config: TTY.Config,
|
||||
start_addr: ?usize,
|
||||
@@ -446,7 +435,7 @@ pub const TTY = struct {
|
||||
// TODO give this a payload of file handle
|
||||
windows_api,
|
||||
|
||||
fn setColor(conf: Config, out_stream: var, color: Color) void {
|
||||
fn setColor(conf: Config, out_stream: anytype, color: Color) void {
|
||||
nosuspend switch (conf) {
|
||||
.no_color => return,
|
||||
.escape_codes => switch (color) {
|
||||
@@ -458,6 +447,7 @@ pub const TTY = struct {
|
||||
.Reset => out_stream.writeAll(RESET) catch return,
|
||||
},
|
||||
.windows_api => if (builtin.os.tag == .windows) {
|
||||
const stderr_file = io.getStdErr();
|
||||
const S = struct {
|
||||
var attrs: windows.WORD = undefined;
|
||||
var init_attrs = false;
|
||||
@@ -565,7 +555,7 @@ fn machoSearchSymbols(symbols: []const MachoSymbol, address: usize) ?*const Mach
|
||||
}
|
||||
|
||||
/// TODO resources https://github.com/ziglang/zig/issues/4353
|
||||
pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: var, address: usize, tty_config: TTY.Config) !void {
|
||||
pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: TTY.Config) !void {
|
||||
const module = debug_info.getModuleForAddress(address) catch |err| switch (err) {
|
||||
error.MissingDebugInfo, error.InvalidDebugInfo => {
|
||||
return printLineInfo(
|
||||
@@ -596,13 +586,13 @@ pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: var, address: us
|
||||
}
|
||||
|
||||
fn printLineInfo(
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
line_info: ?LineInfo,
|
||||
address: usize,
|
||||
symbol_name: []const u8,
|
||||
compile_unit_name: []const u8,
|
||||
tty_config: TTY.Config,
|
||||
comptime printLineFromFile: var,
|
||||
comptime printLineFromFile: anytype,
|
||||
) !void {
|
||||
nosuspend {
|
||||
tty_config.setColor(out_stream, .White);
|
||||
@@ -830,7 +820,7 @@ fn readCoffDebugInfo(allocator: *mem.Allocator, coff_file: File) !ModuleDebugInf
|
||||
}
|
||||
}
|
||||
|
||||
fn readSparseBitVector(stream: var, allocator: *mem.Allocator) ![]usize {
|
||||
fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]usize {
|
||||
const num_words = try stream.readIntLittle(u32);
|
||||
var word_i: usize = 0;
|
||||
var list = ArrayList(usize).init(allocator);
|
||||
@@ -1014,7 +1004,7 @@ fn readMachODebugInfo(allocator: *mem.Allocator, macho_file: File) !ModuleDebugI
|
||||
};
|
||||
}
|
||||
|
||||
fn printLineFromFileAnyOs(out_stream: var, line_info: LineInfo) !void {
|
||||
fn printLineFromFileAnyOs(out_stream: anytype, line_info: LineInfo) !void {
|
||||
// Need this to always block even in async I/O mode, because this could potentially
|
||||
// be called from e.g. the event loop code crashing.
|
||||
var f = try fs.cwd().openFile(line_info.file_name, .{ .intended_io_mode = .blocking });
|
||||
@@ -1142,7 +1132,7 @@ pub const DebugInfo = struct {
|
||||
const seg_end = seg_start + segment_cmd.vmsize;
|
||||
|
||||
if (rebased_address >= seg_start and rebased_address < seg_end) {
|
||||
if (self.address_map.getValue(base_address)) |obj_di| {
|
||||
if (self.address_map.get(base_address)) |obj_di| {
|
||||
return obj_di;
|
||||
}
|
||||
|
||||
@@ -1214,7 +1204,7 @@ pub const DebugInfo = struct {
|
||||
const seg_end = seg_start + info.SizeOfImage;
|
||||
|
||||
if (address >= seg_start and address < seg_end) {
|
||||
if (self.address_map.getValue(seg_start)) |obj_di| {
|
||||
if (self.address_map.get(seg_start)) |obj_di| {
|
||||
return obj_di;
|
||||
}
|
||||
|
||||
@@ -1288,7 +1278,7 @@ pub const DebugInfo = struct {
|
||||
else => return error.MissingDebugInfo,
|
||||
}
|
||||
|
||||
if (self.address_map.getValue(ctx.base_address)) |obj_di| {
|
||||
if (self.address_map.get(ctx.base_address)) |obj_di| {
|
||||
return obj_di;
|
||||
}
|
||||
|
||||
@@ -1451,7 +1441,7 @@ pub const ModuleDebugInfo = switch (builtin.os.tag) {
|
||||
const o_file_path = mem.spanZ(self.strings[symbol.ofile.?.n_strx..]);
|
||||
|
||||
// Check if its debug infos are already in the cache
|
||||
var o_file_di = self.ofiles.getValue(o_file_path) orelse
|
||||
var o_file_di = self.ofiles.get(o_file_path) orelse
|
||||
(self.loadOFile(o_file_path) catch |err| switch (err) {
|
||||
error.FileNotFound,
|
||||
error.MissingDebugInfo,
|
||||
|
||||
@@ -1,171 +1,211 @@
|
||||
const std = @import("std");
|
||||
const testing = std.testing;
|
||||
|
||||
pub fn readULEB128(comptime T: type, in_stream: var) !T {
|
||||
const ShiftT = std.meta.Int(false, std.math.log2(T.bit_count));
|
||||
/// Read a single unsigned LEB128 value from the given reader as type T,
|
||||
/// or error.Overflow if the value cannot fit.
|
||||
pub fn readULEB128(comptime T: type, reader: anytype) !T {
|
||||
const U = if (T.bit_count < 8) u8 else T;
|
||||
const ShiftT = std.math.Log2Int(U);
|
||||
|
||||
var result: T = 0;
|
||||
var shift: usize = 0;
|
||||
const max_group = (U.bit_count + 6) / 7;
|
||||
|
||||
var value = @as(U, 0);
|
||||
var group = @as(ShiftT, 0);
|
||||
|
||||
while (group < max_group) : (group += 1) {
|
||||
const byte = try reader.readByte();
|
||||
var temp = @as(U, byte & 0x7f);
|
||||
|
||||
if (@shlWithOverflow(U, temp, group * 7, &temp)) return error.Overflow;
|
||||
|
||||
value |= temp;
|
||||
if (byte & 0x80 == 0) break;
|
||||
} else {
|
||||
return error.Overflow;
|
||||
}
|
||||
|
||||
// only applies in the case that we extended to u8
|
||||
if (U != T) {
|
||||
if (value > std.math.maxInt(T)) return error.Overflow;
|
||||
}
|
||||
|
||||
return @truncate(T, value);
|
||||
}
|
||||
|
||||
/// Write a single unsigned integer as unsigned LEB128 to the given writer.
|
||||
pub fn writeULEB128(writer: anytype, uint_value: anytype) !void {
|
||||
const T = @TypeOf(uint_value);
|
||||
const U = if (T.bit_count < 8) u8 else T;
|
||||
var value = @intCast(U, uint_value);
|
||||
|
||||
while (true) {
|
||||
const byte = try in_stream.readByte();
|
||||
|
||||
if (shift > T.bit_count)
|
||||
return error.Overflow;
|
||||
|
||||
var operand: T = undefined;
|
||||
if (@shlWithOverflow(T, byte & 0x7f, @intCast(ShiftT, shift), &operand))
|
||||
return error.Overflow;
|
||||
|
||||
result |= operand;
|
||||
|
||||
if ((byte & 0x80) == 0)
|
||||
return result;
|
||||
|
||||
shift += 7;
|
||||
const byte = @truncate(u8, value & 0x7f);
|
||||
value >>= 7;
|
||||
if (value == 0) {
|
||||
try writer.writeByte(byte);
|
||||
break;
|
||||
} else {
|
||||
try writer.writeByte(byte | 0x80);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn readULEB128Mem(comptime T: type, ptr: *[*]const u8) !T {
|
||||
const ShiftT = std.meta.Int(false, std.math.log2(T.bit_count));
|
||||
/// Read a single unsinged integer from the given memory as type T.
|
||||
/// The provided slice reference will be updated to point to the byte after the last byte read.
|
||||
pub fn readULEB128Mem(comptime T: type, ptr: *[]const u8) !T {
|
||||
var buf = std.io.fixedBufferStream(ptr.*);
|
||||
const value = try readULEB128(T, buf.reader());
|
||||
ptr.*.ptr += buf.pos;
|
||||
return value;
|
||||
}
|
||||
|
||||
var result: T = 0;
|
||||
var shift: usize = 0;
|
||||
var i: usize = 0;
|
||||
/// Write a single unsigned LEB128 integer to the given memory as unsigned LEB128,
|
||||
/// returning the number of bytes written.
|
||||
pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize {
|
||||
const T = @TypeOf(uint_value);
|
||||
const max_group = (T.bit_count + 6) / 7;
|
||||
var buf = std.io.fixedBufferStream(ptr);
|
||||
try writeULEB128(buf.writer(), uint_value);
|
||||
return buf.pos;
|
||||
}
|
||||
|
||||
while (true) : (i += 1) {
|
||||
const byte = ptr.*[i];
|
||||
/// Read a single signed LEB128 value from the given reader as type T,
|
||||
/// or error.Overflow if the value cannot fit.
|
||||
pub fn readILEB128(comptime T: type, reader: anytype) !T {
|
||||
const S = if (T.bit_count < 8) i8 else T;
|
||||
const U = std.meta.Int(false, S.bit_count);
|
||||
const ShiftU = std.math.Log2Int(U);
|
||||
|
||||
if (shift > T.bit_count)
|
||||
return error.Overflow;
|
||||
const max_group = (U.bit_count + 6) / 7;
|
||||
|
||||
var operand: T = undefined;
|
||||
if (@shlWithOverflow(T, byte & 0x7f, @intCast(ShiftT, shift), &operand))
|
||||
return error.Overflow;
|
||||
var value = @as(U, 0);
|
||||
var group = @as(ShiftU, 0);
|
||||
|
||||
result |= operand;
|
||||
while (group < max_group) : (group += 1) {
|
||||
const byte = try reader.readByte();
|
||||
var temp = @as(U, byte & 0x7f);
|
||||
|
||||
if ((byte & 0x80) == 0) {
|
||||
ptr.* += i + 1;
|
||||
return result;
|
||||
const shift = group * 7;
|
||||
if (@shlWithOverflow(U, temp, shift, &temp)) {
|
||||
// Overflow is ok so long as the sign bit is set and this is the last byte
|
||||
if (byte & 0x80 != 0) return error.Overflow;
|
||||
if (@bitCast(S, temp) >= 0) return error.Overflow;
|
||||
|
||||
// and all the overflowed bits are 1
|
||||
const remaining_shift = @intCast(u3, U.bit_count - @as(u16, shift));
|
||||
const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
|
||||
if (remaining_bits != -1) return error.Overflow;
|
||||
}
|
||||
|
||||
shift += 7;
|
||||
value |= temp;
|
||||
if (byte & 0x80 == 0) {
|
||||
const needs_sign_ext = group + 1 < max_group;
|
||||
if (byte & 0x40 != 0 and needs_sign_ext) {
|
||||
const ones = @as(S, -1);
|
||||
value |= @bitCast(U, ones) << (shift + 7);
|
||||
}
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
return error.Overflow;
|
||||
}
|
||||
|
||||
const result = @bitCast(S, value);
|
||||
// Only applies if we extended to i8
|
||||
if (S != T) {
|
||||
if (result > std.math.maxInt(T) or result < std.math.minInt(T)) return error.Overflow;
|
||||
}
|
||||
|
||||
return @truncate(T, result);
|
||||
}
|
||||
|
||||
pub fn readILEB128(comptime T: type, in_stream: var) !T {
|
||||
const UT = std.meta.Int(false, T.bit_count);
|
||||
const ShiftT = std.meta.Int(false, std.math.log2(T.bit_count));
|
||||
/// Write a single signed integer as signed LEB128 to the given writer.
|
||||
pub fn writeILEB128(writer: anytype, int_value: anytype) !void {
|
||||
const T = @TypeOf(int_value);
|
||||
const S = if (T.bit_count < 8) i8 else T;
|
||||
const U = std.meta.Int(false, S.bit_count);
|
||||
|
||||
var result: UT = 0;
|
||||
var shift: usize = 0;
|
||||
var value = @intCast(S, int_value);
|
||||
|
||||
while (true) {
|
||||
const byte: u8 = try in_stream.readByte();
|
||||
|
||||
if (shift > T.bit_count)
|
||||
return error.Overflow;
|
||||
|
||||
var operand: UT = undefined;
|
||||
if (@shlWithOverflow(UT, @as(UT, byte & 0x7f), @intCast(ShiftT, shift), &operand)) {
|
||||
if (byte != 0x7f)
|
||||
return error.Overflow;
|
||||
}
|
||||
|
||||
result |= operand;
|
||||
|
||||
shift += 7;
|
||||
|
||||
if ((byte & 0x80) == 0) {
|
||||
if (shift < T.bit_count and (byte & 0x40) != 0) {
|
||||
result |= @bitCast(UT, @intCast(T, -1)) << @intCast(ShiftT, shift);
|
||||
}
|
||||
return @bitCast(T, result);
|
||||
const uvalue = @bitCast(U, value);
|
||||
const byte = @truncate(u8, uvalue);
|
||||
value >>= 6;
|
||||
if (value == -1 or value == 0) {
|
||||
try writer.writeByte(byte & 0x7F);
|
||||
break;
|
||||
} else {
|
||||
value >>= 1;
|
||||
try writer.writeByte(byte | 0x80);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn readILEB128Mem(comptime T: type, ptr: *[*]const u8) !T {
|
||||
const UT = std.meta.Int(false, T.bit_count);
|
||||
const ShiftT = std.meta.Int(false, std.math.log2(T.bit_count));
|
||||
|
||||
var result: UT = 0;
|
||||
var shift: usize = 0;
|
||||
var i: usize = 0;
|
||||
|
||||
while (true) : (i += 1) {
|
||||
const byte = ptr.*[i];
|
||||
|
||||
if (shift > T.bit_count)
|
||||
return error.Overflow;
|
||||
|
||||
var operand: UT = undefined;
|
||||
if (@shlWithOverflow(UT, @as(UT, byte & 0x7f), @intCast(ShiftT, shift), &operand)) {
|
||||
if (byte != 0x7f)
|
||||
return error.Overflow;
|
||||
}
|
||||
|
||||
result |= operand;
|
||||
|
||||
shift += 7;
|
||||
|
||||
if ((byte & 0x80) == 0) {
|
||||
if (shift < T.bit_count and (byte & 0x40) != 0) {
|
||||
result |= @bitCast(UT, @intCast(T, -1)) << @intCast(ShiftT, shift);
|
||||
}
|
||||
ptr.* += i + 1;
|
||||
return @bitCast(T, result);
|
||||
}
|
||||
}
|
||||
/// Read a single singed LEB128 integer from the given memory as type T.
|
||||
/// The provided slice reference will be updated to point to the byte after the last byte read.
|
||||
pub fn readILEB128Mem(comptime T: type, ptr: *[]const u8) !T {
|
||||
var buf = std.io.fixedBufferStream(ptr.*);
|
||||
const value = try readILEB128(T, buf.reader());
|
||||
ptr.*.ptr += buf.pos;
|
||||
return value;
|
||||
}
|
||||
|
||||
/// Write a single signed LEB128 integer to the given memory as unsigned LEB128,
|
||||
/// returning the number of bytes written.
|
||||
pub fn writeILEB128Mem(ptr: []u8, int_value: anytype) !usize {
|
||||
const T = @TypeOf(int_value);
|
||||
var buf = std.io.fixedBufferStream(ptr);
|
||||
try writeILEB128(buf.writer(), int_value);
|
||||
return buf.pos;
|
||||
}
|
||||
|
||||
// tests
|
||||
fn test_read_stream_ileb128(comptime T: type, encoded: []const u8) !T {
|
||||
var in_stream = std.io.fixedBufferStream(encoded);
|
||||
return try readILEB128(T, in_stream.inStream());
|
||||
var reader = std.io.fixedBufferStream(encoded);
|
||||
return try readILEB128(T, reader.reader());
|
||||
}
|
||||
|
||||
fn test_read_stream_uleb128(comptime T: type, encoded: []const u8) !T {
|
||||
var in_stream = std.io.fixedBufferStream(encoded);
|
||||
return try readULEB128(T, in_stream.inStream());
|
||||
var reader = std.io.fixedBufferStream(encoded);
|
||||
return try readULEB128(T, reader.reader());
|
||||
}
|
||||
|
||||
fn test_read_ileb128(comptime T: type, encoded: []const u8) !T {
|
||||
var in_stream = std.io.fixedBufferStream(encoded);
|
||||
const v1 = readILEB128(T, in_stream.inStream());
|
||||
var in_ptr = encoded.ptr;
|
||||
const v2 = readILEB128Mem(T, &in_ptr);
|
||||
var reader = std.io.fixedBufferStream(encoded);
|
||||
const v1 = try readILEB128(T, reader.reader());
|
||||
var in_ptr = encoded;
|
||||
const v2 = try readILEB128Mem(T, &in_ptr);
|
||||
testing.expectEqual(v1, v2);
|
||||
return v1;
|
||||
}
|
||||
|
||||
fn test_read_uleb128(comptime T: type, encoded: []const u8) !T {
|
||||
var in_stream = std.io.fixedBufferStream(encoded);
|
||||
const v1 = readULEB128(T, in_stream.inStream());
|
||||
var in_ptr = encoded.ptr;
|
||||
const v2 = readULEB128Mem(T, &in_ptr);
|
||||
var reader = std.io.fixedBufferStream(encoded);
|
||||
const v1 = try readULEB128(T, reader.reader());
|
||||
var in_ptr = encoded;
|
||||
const v2 = try readULEB128Mem(T, &in_ptr);
|
||||
testing.expectEqual(v1, v2);
|
||||
return v1;
|
||||
}
|
||||
|
||||
fn test_read_ileb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) void {
|
||||
var in_stream = std.io.fixedBufferStream(encoded);
|
||||
var in_ptr = encoded.ptr;
|
||||
fn test_read_ileb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) !void {
|
||||
var reader = std.io.fixedBufferStream(encoded);
|
||||
var in_ptr = encoded;
|
||||
var i: usize = 0;
|
||||
while (i < N) : (i += 1) {
|
||||
const v1 = readILEB128(T, in_stream.inStream());
|
||||
const v2 = readILEB128Mem(T, &in_ptr);
|
||||
const v1 = try readILEB128(T, reader.reader());
|
||||
const v2 = try readILEB128Mem(T, &in_ptr);
|
||||
testing.expectEqual(v1, v2);
|
||||
}
|
||||
}
|
||||
|
||||
fn test_read_uleb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) void {
|
||||
var in_stream = std.io.fixedBufferStream(encoded);
|
||||
var in_ptr = encoded.ptr;
|
||||
fn test_read_uleb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) !void {
|
||||
var reader = std.io.fixedBufferStream(encoded);
|
||||
var in_ptr = encoded;
|
||||
var i: usize = 0;
|
||||
while (i < N) : (i += 1) {
|
||||
const v1 = readULEB128(T, in_stream.inStream());
|
||||
const v2 = readULEB128Mem(T, &in_ptr);
|
||||
const v1 = try readULEB128(T, reader.reader());
|
||||
const v2 = try readULEB128Mem(T, &in_ptr);
|
||||
testing.expectEqual(v1, v2);
|
||||
}
|
||||
}
|
||||
@@ -212,7 +252,7 @@ test "deserialize signed LEB128" {
|
||||
testing.expect((try test_read_ileb128(i64, "\x80\x81\x80\x00")) == 0x80);
|
||||
|
||||
// Decode sequence of SLEB128 values
|
||||
test_read_ileb128_seq(i64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00");
|
||||
try test_read_ileb128_seq(i64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00");
|
||||
}
|
||||
|
||||
test "deserialize unsigned LEB128" {
|
||||
@@ -252,5 +292,99 @@ test "deserialize unsigned LEB128" {
|
||||
testing.expect((try test_read_uleb128(u64, "\x80\x81\x80\x00")) == 0x80);
|
||||
|
||||
// Decode sequence of ULEB128 values
|
||||
test_read_uleb128_seq(u64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00");
|
||||
try test_read_uleb128_seq(u64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00");
|
||||
}
|
||||
|
||||
fn test_write_leb128(value: anytype) !void {
|
||||
const T = @TypeOf(value);
|
||||
|
||||
const writeStream = if (T.is_signed) writeILEB128 else writeULEB128;
|
||||
const writeMem = if (T.is_signed) writeILEB128Mem else writeULEB128Mem;
|
||||
const readStream = if (T.is_signed) readILEB128 else readULEB128;
|
||||
const readMem = if (T.is_signed) readILEB128Mem else readULEB128Mem;
|
||||
|
||||
// decode to a larger bit size too, to ensure sign extension
|
||||
// is working as expected
|
||||
const larger_type_bits = ((T.bit_count + 8) / 8) * 8;
|
||||
const B = std.meta.Int(T.is_signed, larger_type_bits);
|
||||
|
||||
const bytes_needed = bn: {
|
||||
const S = std.meta.Int(T.is_signed, @sizeOf(T) * 8);
|
||||
if (T.bit_count <= 7) break :bn @as(u16, 1);
|
||||
|
||||
const unused_bits = if (value < 0) @clz(T, ~value) else @clz(T, value);
|
||||
const used_bits: u16 = (T.bit_count - unused_bits) + @boolToInt(T.is_signed);
|
||||
if (used_bits <= 7) break :bn @as(u16, 1);
|
||||
break :bn ((used_bits + 6) / 7);
|
||||
};
|
||||
|
||||
const max_groups = if (T.bit_count == 0) 1 else (T.bit_count + 6) / 7;
|
||||
|
||||
var buf: [max_groups]u8 = undefined;
|
||||
var fbs = std.io.fixedBufferStream(&buf);
|
||||
|
||||
// stream write
|
||||
try writeStream(fbs.writer(), value);
|
||||
const w1_pos = fbs.pos;
|
||||
testing.expect(w1_pos == bytes_needed);
|
||||
|
||||
// stream read
|
||||
fbs.pos = 0;
|
||||
const sr = try readStream(T, fbs.reader());
|
||||
testing.expect(fbs.pos == w1_pos);
|
||||
testing.expect(sr == value);
|
||||
|
||||
// bigger type stream read
|
||||
fbs.pos = 0;
|
||||
const bsr = try readStream(B, fbs.reader());
|
||||
testing.expect(fbs.pos == w1_pos);
|
||||
testing.expect(bsr == value);
|
||||
|
||||
// mem write
|
||||
const w2_pos = try writeMem(&buf, value);
|
||||
testing.expect(w2_pos == w1_pos);
|
||||
|
||||
// mem read
|
||||
var buf_ref: []u8 = buf[0..];
|
||||
const mr = try readMem(T, &buf_ref);
|
||||
testing.expect(@ptrToInt(buf_ref.ptr) - @ptrToInt(&buf) == w2_pos);
|
||||
testing.expect(mr == value);
|
||||
|
||||
// bigger type mem read
|
||||
buf_ref = buf[0..];
|
||||
const bmr = try readMem(T, &buf_ref);
|
||||
testing.expect(@ptrToInt(buf_ref.ptr) - @ptrToInt(&buf) == w2_pos);
|
||||
testing.expect(bmr == value);
|
||||
}
|
||||
|
||||
test "serialize unsigned LEB128" {
|
||||
const max_bits = 18;
|
||||
|
||||
comptime var t = 0;
|
||||
inline while (t <= max_bits) : (t += 1) {
|
||||
const T = std.meta.Int(false, t);
|
||||
const min = std.math.minInt(T);
|
||||
const max = std.math.maxInt(T);
|
||||
var i = @as(std.meta.Int(false, T.bit_count + 1), min);
|
||||
|
||||
while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
|
||||
}
|
||||
}
|
||||
|
||||
test "serialize signed LEB128" {
|
||||
// explicitly test i0 because starting `t` at 0
|
||||
// will break the while loop
|
||||
try test_write_leb128(@as(i0, 0));
|
||||
|
||||
const max_bits = 18;
|
||||
|
||||
comptime var t = 1;
|
||||
inline while (t <= max_bits) : (t += 1) {
|
||||
const T = std.meta.Int(true, t);
|
||||
const min = std.math.minInt(T);
|
||||
const max = std.math.maxInt(T);
|
||||
var i = @as(std.meta.Int(true, T.bit_count + 1), min);
|
||||
|
||||
while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -236,7 +236,7 @@ const LineNumberProgram = struct {
|
||||
}
|
||||
};
|
||||
|
||||
fn readUnitLength(in_stream: var, endian: builtin.Endian, is_64: *bool) !u64 {
|
||||
fn readUnitLength(in_stream: anytype, endian: builtin.Endian, is_64: *bool) !u64 {
|
||||
const first_32_bits = try in_stream.readInt(u32, endian);
|
||||
is_64.* = (first_32_bits == 0xffffffff);
|
||||
if (is_64.*) {
|
||||
@@ -249,7 +249,7 @@ fn readUnitLength(in_stream: var, endian: builtin.Endian, is_64: *bool) !u64 {
|
||||
}
|
||||
|
||||
// TODO the nosuspends here are workarounds
|
||||
fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8 {
|
||||
fn readAllocBytes(allocator: *mem.Allocator, in_stream: anytype, size: usize) ![]u8 {
|
||||
const buf = try allocator.alloc(u8, size);
|
||||
errdefer allocator.free(buf);
|
||||
if ((try nosuspend in_stream.read(buf)) < size) return error.EndOfFile;
|
||||
@@ -257,25 +257,25 @@ fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8
|
||||
}
|
||||
|
||||
// TODO the nosuspends here are workarounds
|
||||
fn readAddress(in_stream: var, endian: builtin.Endian, is_64: bool) !u64 {
|
||||
fn readAddress(in_stream: anytype, endian: builtin.Endian, is_64: bool) !u64 {
|
||||
return nosuspend if (is_64)
|
||||
try in_stream.readInt(u64, endian)
|
||||
else
|
||||
@as(u64, try in_stream.readInt(u32, endian));
|
||||
}
|
||||
|
||||
fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
|
||||
fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: anytype, size: usize) !FormValue {
|
||||
const buf = try readAllocBytes(allocator, in_stream, size);
|
||||
return FormValue{ .Block = buf };
|
||||
}
|
||||
|
||||
// TODO the nosuspends here are workarounds
|
||||
fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: var, endian: builtin.Endian, size: usize) !FormValue {
|
||||
fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: builtin.Endian, size: usize) !FormValue {
|
||||
const block_len = try nosuspend in_stream.readVarInt(usize, endian, size);
|
||||
return parseFormValueBlockLen(allocator, in_stream, block_len);
|
||||
}
|
||||
|
||||
fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: bool, endian: builtin.Endian, comptime size: i32) !FormValue {
|
||||
fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: builtin.Endian, comptime size: i32) !FormValue {
|
||||
// TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here.
|
||||
// `nosuspend` should be removed from all the function calls once it is fixed.
|
||||
return FormValue{
|
||||
@@ -302,7 +302,7 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: boo
|
||||
}
|
||||
|
||||
// TODO the nosuspends here are workarounds
|
||||
fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, endian: builtin.Endian, size: i32) !FormValue {
|
||||
fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: builtin.Endian, size: i32) !FormValue {
|
||||
return FormValue{
|
||||
.Ref = switch (size) {
|
||||
1 => try nosuspend in_stream.readInt(u8, endian),
|
||||
@@ -316,7 +316,7 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, endian: builtin.
|
||||
}
|
||||
|
||||
// TODO the nosuspends here are workarounds
|
||||
fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, endian: builtin.Endian, is_64: bool) anyerror!FormValue {
|
||||
fn parseFormValue(allocator: *mem.Allocator, in_stream: anytype, form_id: u64, endian: builtin.Endian, is_64: bool) anyerror!FormValue {
|
||||
return switch (form_id) {
|
||||
FORM_addr => FormValue{ .Address = try readAddress(in_stream, endian, @sizeOf(usize) == 8) },
|
||||
FORM_block1 => parseFormValueBlock(allocator, in_stream, endian, 1),
|
||||
@@ -359,7 +359,7 @@ fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, endia
|
||||
const F = @TypeOf(async parseFormValue(allocator, in_stream, child_form_id, endian, is_64));
|
||||
var frame = try allocator.create(F);
|
||||
defer allocator.destroy(frame);
|
||||
return await @asyncCall(frame, {}, parseFormValue, allocator, in_stream, child_form_id, endian, is_64);
|
||||
return await @asyncCall(frame, {}, parseFormValue, .{ allocator, in_stream, child_form_id, endian, is_64 });
|
||||
},
|
||||
else => error.InvalidDebugInfo,
|
||||
};
|
||||
@@ -670,7 +670,7 @@ pub const DwarfInfo = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn parseDie(di: *DwarfInfo, in_stream: var, abbrev_table: *const AbbrevTable, is_64: bool) !?Die {
|
||||
fn parseDie(di: *DwarfInfo, in_stream: anytype, abbrev_table: *const AbbrevTable, is_64: bool) !?Die {
|
||||
const abbrev_code = try leb.readULEB128(u64, in_stream);
|
||||
if (abbrev_code == 0) return null;
|
||||
const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) orelse return error.InvalidDebugInfo;
|
||||
|
||||
@@ -517,7 +517,7 @@ pub fn readAllHeaders(allocator: *mem.Allocator, file: File) !AllHeaders {
|
||||
return hdrs;
|
||||
}
|
||||
|
||||
pub fn int(is_64: bool, need_bswap: bool, int_32: var, int_64: var) @TypeOf(int_64) {
|
||||
pub fn int(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) {
|
||||
if (is_64) {
|
||||
if (need_bswap) {
|
||||
return @byteSwap(@TypeOf(int_64), int_64);
|
||||
@@ -529,7 +529,7 @@ pub fn int(is_64: bool, need_bswap: bool, int_32: var, int_64: var) @TypeOf(int_
|
||||
}
|
||||
}
|
||||
|
||||
pub fn int32(need_bswap: bool, int_32: var, comptime Int64: var) Int64 {
|
||||
pub fn int32(need_bswap: bool, int_32: anytype, comptime Int64: anytype) Int64 {
|
||||
if (need_bswap) {
|
||||
return @byteSwap(@TypeOf(int_32), int_32);
|
||||
} else {
|
||||
@@ -551,6 +551,7 @@ fn preadNoEof(file: std.fs.File, buf: []u8, offset: u64) !void {
|
||||
error.InputOutput => return error.FileSystem,
|
||||
error.Unexpected => return error.Unexpected,
|
||||
error.WouldBlock => return error.Unexpected,
|
||||
error.AccessDenied => return error.Unexpected,
|
||||
};
|
||||
if (len == 0) return error.UnexpectedEndOfFile;
|
||||
i += len;
|
||||
|
||||
@@ -65,7 +65,7 @@ pub fn Group(comptime ReturnType: type) type {
|
||||
/// allocated by the group and freed by `wait`.
|
||||
/// `func` must be async and have return type `ReturnType`.
|
||||
/// Thread-safe.
|
||||
pub fn call(self: *Self, comptime func: var, args: var) error{OutOfMemory}!void {
|
||||
pub fn call(self: *Self, comptime func: anytype, args: anytype) error{OutOfMemory}!void {
|
||||
var frame = try self.allocator.create(@TypeOf(@call(.{ .modifier = .async_kw }, func, args)));
|
||||
errdefer self.allocator.destroy(frame);
|
||||
const node = try self.allocator.create(AllocStack.Node);
|
||||
|
||||
485
lib/std/fmt.zig
485
lib/std/fmt.zig
File diff suppressed because it is too large
Load Diff
@@ -261,17 +261,7 @@ pub const Dir = struct {
|
||||
name: []const u8,
|
||||
kind: Kind,
|
||||
|
||||
pub const Kind = enum {
|
||||
BlockDevice,
|
||||
CharacterDevice,
|
||||
Directory,
|
||||
NamedPipe,
|
||||
SymLink,
|
||||
File,
|
||||
UnixDomainSocket,
|
||||
Whiteout,
|
||||
Unknown,
|
||||
};
|
||||
pub const Kind = File.Kind;
|
||||
};
|
||||
|
||||
const IteratorError = error{AccessDenied} || os.UnexpectedError;
|
||||
@@ -463,6 +453,8 @@ pub const Dir = struct {
|
||||
|
||||
pub const Error = IteratorError;
|
||||
|
||||
/// Memory such as file names referenced in this returned entry becomes invalid
|
||||
/// with subsequent calls to `next`, as well as when this `Dir` is deinitialized.
|
||||
pub fn next(self: *Self) Error!?Entry {
|
||||
start_over: while (true) {
|
||||
const w = os.windows;
|
||||
@@ -545,14 +537,15 @@ pub const Dir = struct {
|
||||
w.EFAULT => unreachable,
|
||||
w.ENOTDIR => unreachable,
|
||||
w.EINVAL => unreachable,
|
||||
w.ENOTCAPABLE => return error.AccessDenied,
|
||||
else => |err| return os.unexpectedErrno(err),
|
||||
}
|
||||
if (bufused == 0) return null;
|
||||
self.index = 0;
|
||||
self.end_index = bufused;
|
||||
}
|
||||
const entry = @ptrCast(*align(1) os.wasi.dirent_t, &self.buf[self.index]);
|
||||
const entry_size = @sizeOf(os.wasi.dirent_t);
|
||||
const entry = @ptrCast(*align(1) w.dirent_t, &self.buf[self.index]);
|
||||
const entry_size = @sizeOf(w.dirent_t);
|
||||
const name_index = self.index + entry_size;
|
||||
const name = mem.span(self.buf[name_index .. name_index + entry.d_namlen]);
|
||||
|
||||
@@ -566,12 +559,12 @@ pub const Dir = struct {
|
||||
}
|
||||
|
||||
const entry_kind = switch (entry.d_type) {
|
||||
wasi.FILETYPE_BLOCK_DEVICE => Entry.Kind.BlockDevice,
|
||||
wasi.FILETYPE_CHARACTER_DEVICE => Entry.Kind.CharacterDevice,
|
||||
wasi.FILETYPE_DIRECTORY => Entry.Kind.Directory,
|
||||
wasi.FILETYPE_SYMBOLIC_LINK => Entry.Kind.SymLink,
|
||||
wasi.FILETYPE_REGULAR_FILE => Entry.Kind.File,
|
||||
wasi.FILETYPE_SOCKET_STREAM, wasi.FILETYPE_SOCKET_DGRAM => Entry.Kind.UnixDomainSocket,
|
||||
w.FILETYPE_BLOCK_DEVICE => Entry.Kind.BlockDevice,
|
||||
w.FILETYPE_CHARACTER_DEVICE => Entry.Kind.CharacterDevice,
|
||||
w.FILETYPE_DIRECTORY => Entry.Kind.Directory,
|
||||
w.FILETYPE_SYMBOLIC_LINK => Entry.Kind.SymLink,
|
||||
w.FILETYPE_REGULAR_FILE => Entry.Kind.File,
|
||||
w.FILETYPE_SOCKET_STREAM, wasi.FILETYPE_SOCKET_DGRAM => Entry.Kind.UnixDomainSocket,
|
||||
else => Entry.Kind.Unknown,
|
||||
};
|
||||
return Entry{
|
||||
@@ -1109,6 +1102,7 @@ pub const Dir = struct {
|
||||
.OBJECT_NAME_INVALID => unreachable,
|
||||
.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
|
||||
.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
|
||||
.NOT_A_DIRECTORY => return error.NotDir,
|
||||
.INVALID_PARAMETER => unreachable,
|
||||
else => return w.unexpectedStatus(rc),
|
||||
}
|
||||
@@ -1119,10 +1113,18 @@ pub const Dir = struct {
|
||||
/// Delete a file name and possibly the file it refers to, based on an open directory handle.
|
||||
/// Asserts that the path parameter has no null bytes.
|
||||
pub fn deleteFile(self: Dir, sub_path: []const u8) DeleteFileError!void {
|
||||
os.unlinkat(self.fd, sub_path, 0) catch |err| switch (err) {
|
||||
error.DirNotEmpty => unreachable, // not passing AT_REMOVEDIR
|
||||
else => |e| return e,
|
||||
};
|
||||
if (builtin.os.tag == .windows) {
|
||||
const sub_path_w = try os.windows.sliceToPrefixedFileW(sub_path);
|
||||
return self.deleteFileW(sub_path_w.span().ptr);
|
||||
} else if (builtin.os.tag == .wasi) {
|
||||
os.unlinkatWasi(self.fd, sub_path, 0) catch |err| switch (err) {
|
||||
error.DirNotEmpty => unreachable, // not passing AT_REMOVEDIR
|
||||
else => |e| return e,
|
||||
};
|
||||
} else {
|
||||
const sub_path_c = try os.toPosixPath(sub_path);
|
||||
return self.deleteFileZ(&sub_path_c);
|
||||
}
|
||||
}
|
||||
|
||||
pub const deleteFileC = @compileError("deprecated: renamed to deleteFileZ");
|
||||
@@ -1131,6 +1133,17 @@ pub const Dir = struct {
|
||||
pub fn deleteFileZ(self: Dir, sub_path_c: [*:0]const u8) DeleteFileError!void {
|
||||
os.unlinkatZ(self.fd, sub_path_c, 0) catch |err| switch (err) {
|
||||
error.DirNotEmpty => unreachable, // not passing AT_REMOVEDIR
|
||||
error.AccessDenied => |e| switch (builtin.os.tag) {
|
||||
// non-Linux POSIX systems return EPERM when trying to delete a directory, so
|
||||
// we need to handle that case specifically and translate the error
|
||||
.macosx, .ios, .freebsd, .netbsd, .dragonfly => {
|
||||
// Don't follow symlinks to match unlinkat (which acts on symlinks rather than follows them)
|
||||
const fstat = os.fstatatZ(self.fd, sub_path_c, os.AT_SYMLINK_NOFOLLOW) catch return e;
|
||||
const is_dir = fstat.mode & os.S_IFMT == os.S_IFDIR;
|
||||
return if (is_dir) error.IsDir else e;
|
||||
},
|
||||
else => return e,
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
@@ -1229,14 +1242,9 @@ pub const Dir = struct {
|
||||
var file = try self.openFile(file_path, .{});
|
||||
defer file.close();
|
||||
|
||||
const size = math.cast(usize, try file.getEndPos()) catch math.maxInt(usize);
|
||||
if (size > max_bytes) return error.FileTooBig;
|
||||
const stat_size = try file.getEndPos();
|
||||
|
||||
const buf = try allocator.allocWithOptions(u8, size, alignment, optional_sentinel);
|
||||
errdefer allocator.free(buf);
|
||||
|
||||
try file.inStream().readNoEof(buf);
|
||||
return buf;
|
||||
return file.readAllAllocOptions(allocator, stat_size, max_bytes, alignment, optional_sentinel);
|
||||
}
|
||||
|
||||
pub const DeleteTreeError = error{
|
||||
@@ -1532,9 +1540,9 @@ pub const Dir = struct {
|
||||
|
||||
var size: ?u64 = null;
|
||||
const mode = options.override_mode orelse blk: {
|
||||
const stat = try in_file.stat();
|
||||
size = stat.size;
|
||||
break :blk stat.mode;
|
||||
const st = try in_file.stat();
|
||||
size = st.size;
|
||||
break :blk st.mode;
|
||||
};
|
||||
|
||||
var atomic_file = try dest_dir.atomicFile(dest_path, .{ .mode = mode });
|
||||
@@ -1560,6 +1568,17 @@ pub const Dir = struct {
|
||||
return AtomicFile.init(dest_path, options.mode, self, false);
|
||||
}
|
||||
}
|
||||
|
||||
pub const Stat = File.Stat;
|
||||
pub const StatError = File.StatError;
|
||||
|
||||
pub fn stat(self: Dir) StatError!Stat {
|
||||
const file: File = .{
|
||||
.handle = self.fd,
|
||||
.capable_io_mode = .blocking,
|
||||
};
|
||||
return file.stat();
|
||||
}
|
||||
};
|
||||
|
||||
/// Returns an handle to the current working directory. It is not opened with iteration capability.
|
||||
@@ -1808,7 +1827,7 @@ pub fn selfExePathAlloc(allocator: *Allocator) ![]u8 {
|
||||
// TODO(#4812): Investigate other systems and whether it is possible to get
|
||||
// this path by trying larger and larger buffers until one succeeds.
|
||||
var buf: [MAX_PATH_BYTES]u8 = undefined;
|
||||
return mem.dupe(allocator, u8, try selfExePath(&buf));
|
||||
return allocator.dupe(u8, try selfExePath(&buf));
|
||||
}
|
||||
|
||||
/// Get the path to the current executable.
|
||||
@@ -1871,7 +1890,7 @@ pub fn selfExeDirPathAlloc(allocator: *Allocator) ![]u8 {
|
||||
// TODO(#4812): Investigate other systems and whether it is possible to get
|
||||
// this path by trying larger and larger buffers until one succeeds.
|
||||
var buf: [MAX_PATH_BYTES]u8 = undefined;
|
||||
return mem.dupe(allocator, u8, try selfExeDirPath(&buf));
|
||||
return allocator.dupe(u8, try selfExeDirPath(&buf));
|
||||
}
|
||||
|
||||
/// Get the directory path that contains the current executable.
|
||||
@@ -1893,7 +1912,7 @@ pub fn realpathAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 {
|
||||
// paths. musl supports passing NULL but restricts the output to PATH_MAX
|
||||
// anyway.
|
||||
var buf: [MAX_PATH_BYTES]u8 = undefined;
|
||||
return mem.dupe(allocator, u8, try os.realpath(pathname, &buf));
|
||||
return allocator.dupe(u8, try os.realpath(pathname, &buf));
|
||||
}
|
||||
|
||||
test "" {
|
||||
|
||||
@@ -29,6 +29,18 @@ pub const File = struct {
|
||||
pub const Mode = os.mode_t;
|
||||
pub const INode = os.ino_t;
|
||||
|
||||
pub const Kind = enum {
|
||||
BlockDevice,
|
||||
CharacterDevice,
|
||||
Directory,
|
||||
NamedPipe,
|
||||
SymLink,
|
||||
File,
|
||||
UnixDomainSocket,
|
||||
Whiteout,
|
||||
Unknown,
|
||||
};
|
||||
|
||||
pub const default_mode = switch (builtin.os.tag) {
|
||||
.windows => 0,
|
||||
.wasi => 0,
|
||||
@@ -209,7 +221,7 @@ pub const File = struct {
|
||||
/// TODO: integrate with async I/O
|
||||
pub fn mode(self: File) ModeError!Mode {
|
||||
if (builtin.os.tag == .windows) {
|
||||
return {};
|
||||
return 0;
|
||||
}
|
||||
return (try self.stat()).mode;
|
||||
}
|
||||
@@ -219,13 +231,14 @@ pub const File = struct {
|
||||
/// unique across time, as some file systems may reuse an inode after its file has been deleted.
|
||||
/// Some systems may change the inode of a file over time.
|
||||
///
|
||||
/// On Linux, the inode _is_ structure that stores the metadata, and the inode _number_ is what
|
||||
/// On Linux, the inode is a structure that stores the metadata, and the inode _number_ is what
|
||||
/// you see here: the index number of the inode.
|
||||
///
|
||||
/// The FileIndex on Windows is similar. It is a number for a file that is unique to each filesystem.
|
||||
inode: INode,
|
||||
size: u64,
|
||||
mode: Mode,
|
||||
kind: Kind,
|
||||
|
||||
/// Access time in nanoseconds, relative to UTC 1970-01-01.
|
||||
atime: i128,
|
||||
@@ -254,6 +267,7 @@ pub const File = struct {
|
||||
.inode = info.InternalInformation.IndexNumber,
|
||||
.size = @bitCast(u64, info.StandardInformation.EndOfFile),
|
||||
.mode = 0,
|
||||
.kind = if (info.StandardInformation.Directory == 0) .File else .Directory,
|
||||
.atime = windows.fromSysTime(info.BasicInformation.LastAccessTime),
|
||||
.mtime = windows.fromSysTime(info.BasicInformation.LastWriteTime),
|
||||
.ctime = windows.fromSysTime(info.BasicInformation.CreationTime),
|
||||
@@ -268,6 +282,27 @@ pub const File = struct {
|
||||
.inode = st.ino,
|
||||
.size = @bitCast(u64, st.size),
|
||||
.mode = st.mode,
|
||||
.kind = switch (builtin.os.tag) {
|
||||
.wasi => switch (st.filetype) {
|
||||
os.FILETYPE_BLOCK_DEVICE => Kind.BlockDevice,
|
||||
os.FILETYPE_CHARACTER_DEVICE => Kind.CharacterDevice,
|
||||
os.FILETYPE_DIRECTORY => Kind.Directory,
|
||||
os.FILETYPE_SYMBOLIC_LINK => Kind.SymLink,
|
||||
os.FILETYPE_REGULAR_FILE => Kind.File,
|
||||
os.FILETYPE_SOCKET_STREAM, os.FILETYPE_SOCKET_DGRAM => Kind.UnixDomainSocket,
|
||||
else => Kind.Unknown,
|
||||
},
|
||||
else => switch (st.mode & os.S_IFMT) {
|
||||
os.S_IFBLK => Kind.BlockDevice,
|
||||
os.S_IFCHR => Kind.CharacterDevice,
|
||||
os.S_IFDIR => Kind.Directory,
|
||||
os.S_IFIFO => Kind.NamedPipe,
|
||||
os.S_IFLNK => Kind.SymLink,
|
||||
os.S_IFREG => Kind.File,
|
||||
os.S_IFSOCK => Kind.UnixDomainSocket,
|
||||
else => Kind.Unknown,
|
||||
},
|
||||
},
|
||||
.atime = @as(i128, atime.tv_sec) * std.time.ns_per_s + atime.tv_nsec,
|
||||
.mtime = @as(i128, mtime.tv_sec) * std.time.ns_per_s + mtime.tv_nsec,
|
||||
.ctime = @as(i128, ctime.tv_sec) * std.time.ns_per_s + ctime.tv_nsec,
|
||||
@@ -306,6 +341,33 @@ pub const File = struct {
|
||||
try os.futimens(self.handle, ×);
|
||||
}
|
||||
|
||||
/// On success, caller owns returned buffer.
|
||||
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
|
||||
pub fn readAllAlloc(self: File, allocator: *mem.Allocator, stat_size: u64, max_bytes: usize) ![]u8 {
|
||||
return self.readAllAllocOptions(allocator, stat_size, max_bytes, @alignOf(u8), null);
|
||||
}
|
||||
|
||||
/// On success, caller owns returned buffer.
|
||||
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
|
||||
/// Allows specifying alignment and a sentinel value.
|
||||
pub fn readAllAllocOptions(
|
||||
self: File,
|
||||
allocator: *mem.Allocator,
|
||||
stat_size: u64,
|
||||
max_bytes: usize,
|
||||
comptime alignment: u29,
|
||||
comptime optional_sentinel: ?u8,
|
||||
) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) {
|
||||
const size = math.cast(usize, stat_size) catch math.maxInt(usize);
|
||||
if (size > max_bytes) return error.FileTooBig;
|
||||
|
||||
const buf = try allocator.allocWithOptions(u8, size, alignment, optional_sentinel);
|
||||
errdefer allocator.free(buf);
|
||||
|
||||
try self.reader().readNoEof(buf);
|
||||
return buf;
|
||||
}
|
||||
|
||||
pub const ReadError = os.ReadError;
|
||||
pub const PReadError = os.PReadError;
|
||||
|
||||
|
||||
@@ -1034,7 +1034,7 @@ pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8)
|
||||
var from_it = mem.tokenize(resolved_from, "/\\");
|
||||
var to_it = mem.tokenize(resolved_to, "/\\");
|
||||
while (true) {
|
||||
const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest());
|
||||
const from_component = from_it.next() orelse return allocator.dupe(u8, to_it.rest());
|
||||
const to_rest = to_it.rest();
|
||||
if (to_it.next()) |to_component| {
|
||||
// TODO ASCII is wrong, we actually need full unicode support to compare paths.
|
||||
@@ -1085,7 +1085,7 @@ pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![
|
||||
var from_it = mem.tokenize(resolved_from, "/");
|
||||
var to_it = mem.tokenize(resolved_to, "/");
|
||||
while (true) {
|
||||
const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest());
|
||||
const from_component = from_it.next() orelse return allocator.dupe(u8, to_it.rest());
|
||||
const to_rest = to_it.rest();
|
||||
if (to_it.next()) |to_component| {
|
||||
if (mem.eql(u8, from_component, to_component))
|
||||
|
||||
@@ -1,7 +1,157 @@
|
||||
const std = @import("../std.zig");
|
||||
const testing = std.testing;
|
||||
const builtin = std.builtin;
|
||||
const fs = std.fs;
|
||||
const mem = std.mem;
|
||||
const wasi = std.os.wasi;
|
||||
|
||||
const ArenaAllocator = std.heap.ArenaAllocator;
|
||||
const Dir = std.fs.Dir;
|
||||
const File = std.fs.File;
|
||||
const tmpDir = testing.tmpDir;
|
||||
|
||||
test "Dir.Iterator" {
|
||||
var tmp_dir = tmpDir(.{ .iterate = true });
|
||||
defer tmp_dir.cleanup();
|
||||
|
||||
// First, create a couple of entries to iterate over.
|
||||
const file = try tmp_dir.dir.createFile("some_file", .{});
|
||||
file.close();
|
||||
|
||||
try tmp_dir.dir.makeDir("some_dir");
|
||||
|
||||
var arena = ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
|
||||
var entries = std.ArrayList(Dir.Entry).init(&arena.allocator);
|
||||
|
||||
// Create iterator.
|
||||
var iter = tmp_dir.dir.iterate();
|
||||
while (try iter.next()) |entry| {
|
||||
// We cannot just store `entry` as on Windows, we're re-using the name buffer
|
||||
// which means we'll actually share the `name` pointer between entries!
|
||||
const name = try arena.allocator.dupe(u8, entry.name);
|
||||
try entries.append(Dir.Entry{ .name = name, .kind = entry.kind });
|
||||
}
|
||||
|
||||
testing.expect(entries.items.len == 2); // note that the Iterator skips '.' and '..'
|
||||
testing.expect(contains(&entries, Dir.Entry{ .name = "some_file", .kind = Dir.Entry.Kind.File }));
|
||||
testing.expect(contains(&entries, Dir.Entry{ .name = "some_dir", .kind = Dir.Entry.Kind.Directory }));
|
||||
}
|
||||
|
||||
fn entry_eql(lhs: Dir.Entry, rhs: Dir.Entry) bool {
|
||||
return mem.eql(u8, lhs.name, rhs.name) and lhs.kind == rhs.kind;
|
||||
}
|
||||
|
||||
fn contains(entries: *const std.ArrayList(Dir.Entry), el: Dir.Entry) bool {
|
||||
for (entries.items) |entry| {
|
||||
if (entry_eql(entry, el)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
test "readAllAlloc" {
|
||||
var tmp_dir = tmpDir(.{});
|
||||
defer tmp_dir.cleanup();
|
||||
|
||||
var file = try tmp_dir.dir.createFile("test_file", .{ .read = true });
|
||||
defer file.close();
|
||||
|
||||
const buf1 = try file.readAllAlloc(testing.allocator, 0, 1024);
|
||||
defer testing.allocator.free(buf1);
|
||||
testing.expect(buf1.len == 0);
|
||||
|
||||
const write_buf: []const u8 = "this is a test.\nthis is a test.\nthis is a test.\nthis is a test.\n";
|
||||
try file.writeAll(write_buf);
|
||||
try file.seekTo(0);
|
||||
const file_size = try file.getEndPos();
|
||||
|
||||
// max_bytes > file_size
|
||||
const buf2 = try file.readAllAlloc(testing.allocator, file_size, 1024);
|
||||
defer testing.allocator.free(buf2);
|
||||
testing.expectEqual(write_buf.len, buf2.len);
|
||||
testing.expect(std.mem.eql(u8, write_buf, buf2));
|
||||
try file.seekTo(0);
|
||||
|
||||
// max_bytes == file_size
|
||||
const buf3 = try file.readAllAlloc(testing.allocator, file_size, write_buf.len);
|
||||
defer testing.allocator.free(buf3);
|
||||
testing.expectEqual(write_buf.len, buf3.len);
|
||||
testing.expect(std.mem.eql(u8, write_buf, buf3));
|
||||
|
||||
// max_bytes < file_size
|
||||
testing.expectError(error.FileTooBig, file.readAllAlloc(testing.allocator, file_size, write_buf.len - 1));
|
||||
}
|
||||
|
||||
test "directory operations on files" {
|
||||
var tmp_dir = tmpDir(.{});
|
||||
defer tmp_dir.cleanup();
|
||||
|
||||
const test_file_name = "test_file";
|
||||
|
||||
var file = try tmp_dir.dir.createFile(test_file_name, .{ .read = true });
|
||||
file.close();
|
||||
|
||||
testing.expectError(error.PathAlreadyExists, tmp_dir.dir.makeDir(test_file_name));
|
||||
testing.expectError(error.NotDir, tmp_dir.dir.openDir(test_file_name, .{}));
|
||||
testing.expectError(error.NotDir, tmp_dir.dir.deleteDir(test_file_name));
|
||||
|
||||
if (builtin.os.tag != .wasi) {
|
||||
// TODO: use Dir's realpath function once that exists
|
||||
const absolute_path = blk: {
|
||||
const relative_path = try fs.path.join(testing.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..], test_file_name });
|
||||
defer testing.allocator.free(relative_path);
|
||||
break :blk try fs.realpathAlloc(testing.allocator, relative_path);
|
||||
};
|
||||
defer testing.allocator.free(absolute_path);
|
||||
|
||||
testing.expectError(error.PathAlreadyExists, fs.makeDirAbsolute(absolute_path));
|
||||
testing.expectError(error.NotDir, fs.deleteDirAbsolute(absolute_path));
|
||||
}
|
||||
|
||||
// ensure the file still exists and is a file as a sanity check
|
||||
file = try tmp_dir.dir.openFile(test_file_name, .{});
|
||||
const stat = try file.stat();
|
||||
testing.expect(stat.kind == .File);
|
||||
file.close();
|
||||
}
|
||||
|
||||
test "file operations on directories" {
|
||||
var tmp_dir = tmpDir(.{});
|
||||
defer tmp_dir.cleanup();
|
||||
|
||||
const test_dir_name = "test_dir";
|
||||
|
||||
try tmp_dir.dir.makeDir(test_dir_name);
|
||||
|
||||
testing.expectError(error.IsDir, tmp_dir.dir.createFile(test_dir_name, .{}));
|
||||
testing.expectError(error.IsDir, tmp_dir.dir.deleteFile(test_dir_name));
|
||||
// Currently, WASI will return error.Unexpected (via ENOTCAPABLE) when attempting fd_read on a directory handle.
|
||||
// TODO: Re-enable on WASI once https://github.com/bytecodealliance/wasmtime/issues/1935 is resolved.
|
||||
if (builtin.os.tag != .wasi) {
|
||||
testing.expectError(error.IsDir, tmp_dir.dir.readFileAlloc(testing.allocator, test_dir_name, std.math.maxInt(usize)));
|
||||
}
|
||||
// Note: The `.write = true` is necessary to ensure the error occurs on all platforms.
|
||||
// TODO: Add a read-only test as well, see https://github.com/ziglang/zig/issues/5732
|
||||
testing.expectError(error.IsDir, tmp_dir.dir.openFile(test_dir_name, .{ .write = true }));
|
||||
|
||||
if (builtin.os.tag != .wasi) {
|
||||
// TODO: use Dir's realpath function once that exists
|
||||
const absolute_path = blk: {
|
||||
const relative_path = try fs.path.join(testing.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..], test_dir_name });
|
||||
defer testing.allocator.free(relative_path);
|
||||
break :blk try fs.realpathAlloc(testing.allocator, relative_path);
|
||||
};
|
||||
defer testing.allocator.free(absolute_path);
|
||||
|
||||
testing.expectError(error.IsDir, fs.createFileAbsolute(absolute_path, .{}));
|
||||
testing.expectError(error.IsDir, fs.deleteFileAbsolute(absolute_path));
|
||||
}
|
||||
|
||||
// ensure the directory still exists as a sanity check
|
||||
var dir = try tmp_dir.dir.openDir(test_dir_name, .{});
|
||||
dir.close();
|
||||
}
|
||||
|
||||
test "openSelfExe" {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
@@ -10,6 +160,163 @@ test "openSelfExe" {
|
||||
self_exe_file.close();
|
||||
}
|
||||
|
||||
test "makePath, put some files in it, deleteTree" {
|
||||
var tmp = tmpDir(.{});
|
||||
defer tmp.cleanup();
|
||||
|
||||
try tmp.dir.makePath("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "c");
|
||||
try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "c" ++ fs.path.sep_str ++ "file.txt", "nonsense");
|
||||
try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "file2.txt", "blah");
|
||||
try tmp.dir.deleteTree("os_test_tmp");
|
||||
if (tmp.dir.openDir("os_test_tmp", .{})) |dir| {
|
||||
@panic("expected error");
|
||||
} else |err| {
|
||||
testing.expect(err == error.FileNotFound);
|
||||
}
|
||||
}
|
||||
|
||||
test "access file" {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
|
||||
var tmp = tmpDir(.{});
|
||||
defer tmp.cleanup();
|
||||
|
||||
try tmp.dir.makePath("os_test_tmp");
|
||||
if (tmp.dir.access("os_test_tmp" ++ fs.path.sep_str ++ "file.txt", .{})) |ok| {
|
||||
@panic("expected error");
|
||||
} else |err| {
|
||||
testing.expect(err == error.FileNotFound);
|
||||
}
|
||||
|
||||
try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "file.txt", "");
|
||||
try tmp.dir.access("os_test_tmp" ++ fs.path.sep_str ++ "file.txt", .{});
|
||||
try tmp.dir.deleteTree("os_test_tmp");
|
||||
}
|
||||
|
||||
test "sendfile" {
|
||||
var tmp = tmpDir(.{});
|
||||
defer tmp.cleanup();
|
||||
|
||||
try tmp.dir.makePath("os_test_tmp");
|
||||
defer tmp.dir.deleteTree("os_test_tmp") catch {};
|
||||
|
||||
var dir = try tmp.dir.openDir("os_test_tmp", .{});
|
||||
defer dir.close();
|
||||
|
||||
const line1 = "line1\n";
|
||||
const line2 = "second line\n";
|
||||
var vecs = [_]std.os.iovec_const{
|
||||
.{
|
||||
.iov_base = line1,
|
||||
.iov_len = line1.len,
|
||||
},
|
||||
.{
|
||||
.iov_base = line2,
|
||||
.iov_len = line2.len,
|
||||
},
|
||||
};
|
||||
|
||||
var src_file = try dir.createFile("sendfile1.txt", .{ .read = true });
|
||||
defer src_file.close();
|
||||
|
||||
try src_file.writevAll(&vecs);
|
||||
|
||||
var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true });
|
||||
defer dest_file.close();
|
||||
|
||||
const header1 = "header1\n";
|
||||
const header2 = "second header\n";
|
||||
const trailer1 = "trailer1\n";
|
||||
const trailer2 = "second trailer\n";
|
||||
var hdtr = [_]std.os.iovec_const{
|
||||
.{
|
||||
.iov_base = header1,
|
||||
.iov_len = header1.len,
|
||||
},
|
||||
.{
|
||||
.iov_base = header2,
|
||||
.iov_len = header2.len,
|
||||
},
|
||||
.{
|
||||
.iov_base = trailer1,
|
||||
.iov_len = trailer1.len,
|
||||
},
|
||||
.{
|
||||
.iov_base = trailer2,
|
||||
.iov_len = trailer2.len,
|
||||
},
|
||||
};
|
||||
|
||||
var written_buf: [100]u8 = undefined;
|
||||
try dest_file.writeFileAll(src_file, .{
|
||||
.in_offset = 1,
|
||||
.in_len = 10,
|
||||
.headers_and_trailers = &hdtr,
|
||||
.header_count = 2,
|
||||
});
|
||||
const amt = try dest_file.preadAll(&written_buf, 0);
|
||||
testing.expect(mem.eql(u8, written_buf[0..amt], "header1\nsecond header\nine1\nsecontrailer1\nsecond trailer\n"));
|
||||
}
|
||||
|
||||
test "fs.copyFile" {
|
||||
const data = "u6wj+JmdF3qHsFPE BUlH2g4gJCmEz0PP";
|
||||
const src_file = "tmp_test_copy_file.txt";
|
||||
const dest_file = "tmp_test_copy_file2.txt";
|
||||
const dest_file2 = "tmp_test_copy_file3.txt";
|
||||
|
||||
var tmp = tmpDir(.{});
|
||||
defer tmp.cleanup();
|
||||
|
||||
try tmp.dir.writeFile(src_file, data);
|
||||
defer tmp.dir.deleteFile(src_file) catch {};
|
||||
|
||||
try tmp.dir.copyFile(src_file, tmp.dir, dest_file, .{});
|
||||
defer tmp.dir.deleteFile(dest_file) catch {};
|
||||
|
||||
try tmp.dir.copyFile(src_file, tmp.dir, dest_file2, .{ .override_mode = File.default_mode });
|
||||
defer tmp.dir.deleteFile(dest_file2) catch {};
|
||||
|
||||
try expectFileContents(tmp.dir, dest_file, data);
|
||||
try expectFileContents(tmp.dir, dest_file2, data);
|
||||
}
|
||||
|
||||
fn expectFileContents(dir: Dir, file_path: []const u8, data: []const u8) !void {
|
||||
const contents = try dir.readFileAlloc(testing.allocator, file_path, 1000);
|
||||
defer testing.allocator.free(contents);
|
||||
|
||||
testing.expectEqualSlices(u8, data, contents);
|
||||
}
|
||||
|
||||
test "AtomicFile" {
|
||||
const test_out_file = "tmp_atomic_file_test_dest.txt";
|
||||
const test_content =
|
||||
\\ hello!
|
||||
\\ this is a test file
|
||||
;
|
||||
|
||||
var tmp = tmpDir(.{});
|
||||
defer tmp.cleanup();
|
||||
|
||||
{
|
||||
var af = try tmp.dir.atomicFile(test_out_file, .{});
|
||||
defer af.deinit();
|
||||
try af.file.writeAll(test_content);
|
||||
try af.finish();
|
||||
}
|
||||
const content = try tmp.dir.readFileAlloc(testing.allocator, test_out_file, 9999);
|
||||
defer testing.allocator.free(content);
|
||||
testing.expect(mem.eql(u8, content, test_content));
|
||||
|
||||
try tmp.dir.deleteFile(test_out_file);
|
||||
}
|
||||
|
||||
test "realpath" {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
|
||||
var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
|
||||
testing.expectError(error.FileNotFound, fs.realpath("definitely_bogus_does_not_exist1234", &buf));
|
||||
}
|
||||
|
||||
const FILE_LOCK_TEST_SLEEP_TIME = 5 * std.time.ns_per_ms;
|
||||
|
||||
test "open file with exclusive nonblocking lock twice" {
|
||||
@@ -116,7 +423,7 @@ test "create file, lock and read from multiple process at once" {
|
||||
test "open file with exclusive nonblocking lock twice (absolute paths)" {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
|
||||
const allocator = std.testing.allocator;
|
||||
const allocator = testing.allocator;
|
||||
|
||||
const file_paths: [1][]const u8 = .{"zig-test-absolute-paths.txt"};
|
||||
const filename = try fs.path.resolve(allocator, &file_paths);
|
||||
@@ -126,7 +433,7 @@ test "open file with exclusive nonblocking lock twice (absolute paths)" {
|
||||
|
||||
const file2 = fs.createFileAbsolute(filename, .{ .lock = .Exclusive, .lock_nonblocking = true });
|
||||
file1.close();
|
||||
std.testing.expectError(error.WouldBlock, file2);
|
||||
testing.expectError(error.WouldBlock, file2);
|
||||
|
||||
try fs.deleteFileAbsolute(filename);
|
||||
}
|
||||
@@ -187,7 +494,7 @@ const FileLockTestContext = struct {
|
||||
};
|
||||
|
||||
fn run_lock_file_test(contexts: []FileLockTestContext) !void {
|
||||
var threads = std.ArrayList(*std.Thread).init(std.testing.allocator);
|
||||
var threads = std.ArrayList(*std.Thread).init(testing.allocator);
|
||||
defer {
|
||||
for (threads.items) |thread| {
|
||||
thread.wait();
|
||||
|
||||
@@ -1,15 +1,42 @@
|
||||
const std = @import("std");
|
||||
const os = std.os;
|
||||
const mem = std.mem;
|
||||
const math = std.math;
|
||||
const Allocator = mem.Allocator;
|
||||
|
||||
usingnamespace std.os.wasi;
|
||||
|
||||
/// Type-tag of WASI preopen.
|
||||
///
|
||||
/// WASI currently offers only `Dir` as a valid preopen resource.
|
||||
pub const PreopenTypeTag = enum {
|
||||
Dir,
|
||||
};
|
||||
|
||||
/// Type of WASI preopen.
|
||||
///
|
||||
/// WASI currently offers only `Dir` as a valid preopen resource.
|
||||
pub const PreopenType = enum {
|
||||
Dir,
|
||||
pub const PreopenType = union(PreopenTypeTag) {
|
||||
/// Preopened directory type.
|
||||
Dir: []const u8,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn eql(self: Self, other: PreopenType) bool {
|
||||
if (!mem.eql(u8, @tagName(self), @tagName(other))) return false;
|
||||
|
||||
switch (self) {
|
||||
PreopenTypeTag.Dir => |this_path| return mem.eql(u8, this_path, other.Dir),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn format(self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype) !void {
|
||||
try out_stream.print("PreopenType{{ ", .{});
|
||||
switch (self) {
|
||||
PreopenType.Dir => |path| try out_stream.print(".Dir = '{}'", .{path}),
|
||||
}
|
||||
return out_stream.print(" }}", .{});
|
||||
}
|
||||
};
|
||||
|
||||
/// WASI preopen struct. This struct consists of a WASI file descriptor
|
||||
@@ -20,29 +47,15 @@ pub const Preopen = struct {
|
||||
fd: fd_t,
|
||||
|
||||
/// Type of the preopen.
|
||||
@"type": union(PreopenType) {
|
||||
/// Path to a preopened directory.
|
||||
Dir: []const u8,
|
||||
},
|
||||
@"type": PreopenType,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
/// Construct new `Preopen` instance of type `PreopenType.Dir` from
|
||||
/// WASI file descriptor and WASI path.
|
||||
pub fn newDir(fd: fd_t, path: []const u8) Self {
|
||||
return Self{
|
||||
/// Construct new `Preopen` instance.
|
||||
pub fn new(fd: fd_t, preopen_type: PreopenType) Preopen {
|
||||
return Preopen{
|
||||
.fd = fd,
|
||||
.@"type" = .{ .Dir = path },
|
||||
.@"type" = preopen_type,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn format(self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: var) !void {
|
||||
try out_stream.print("{{ .fd = {}, ", .{self.fd});
|
||||
switch (self.@"type") {
|
||||
PreopenType.Dir => |path| try out_stream.print(".Dir = '{}'", .{path}),
|
||||
}
|
||||
return out_stream.print(" }}", .{});
|
||||
}
|
||||
};
|
||||
|
||||
/// Dynamically-sized array list of WASI preopens. This struct is a
|
||||
@@ -60,7 +73,7 @@ pub const PreopenList = struct {
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub const Error = os.UnexpectedError || Allocator.Error;
|
||||
pub const Error = error{ OutOfMemory, Overflow } || os.UnexpectedError;
|
||||
|
||||
/// Deinitialize with `deinit`.
|
||||
pub fn init(allocator: *Allocator) Self {
|
||||
@@ -82,6 +95,12 @@ pub const PreopenList = struct {
|
||||
///
|
||||
/// If called more than once, it will clear its contents every time before
|
||||
/// issuing the syscalls.
|
||||
///
|
||||
/// In the unlinkely event of overflowing the number of available file descriptors,
|
||||
/// returns `error.Overflow`. In this case, even though an error condition was reached
|
||||
/// the preopen list still contains all valid preopened file descriptors that are valid
|
||||
/// for use. Therefore, it is fine to call `find`, `asSlice`, or `toOwnedSlice`. Finally,
|
||||
/// `deinit` still must be called!
|
||||
pub fn populate(self: *Self) Error!void {
|
||||
// Clear contents if we're being called again
|
||||
for (self.toOwnedSlice()) |preopen| {
|
||||
@@ -98,6 +117,7 @@ pub const PreopenList = struct {
|
||||
ESUCCESS => {},
|
||||
ENOTSUP => {
|
||||
// not a preopen, so keep going
|
||||
fd = try math.add(fd_t, fd, 1);
|
||||
continue;
|
||||
},
|
||||
EBADF => {
|
||||
@@ -113,24 +133,18 @@ pub const PreopenList = struct {
|
||||
ESUCCESS => {},
|
||||
else => |err| return os.unexpectedErrno(err),
|
||||
}
|
||||
const preopen = Preopen.newDir(fd, path_buf);
|
||||
const preopen = Preopen.new(fd, PreopenType{ .Dir = path_buf });
|
||||
try self.buffer.append(preopen);
|
||||
fd += 1;
|
||||
fd = try math.add(fd_t, fd, 1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Find preopen by path. If the preopen exists, return it.
|
||||
/// Find preopen by type. If the preopen exists, return it.
|
||||
/// Otherwise, return `null`.
|
||||
///
|
||||
/// TODO make the function more generic by searching by `PreopenType` union. This will
|
||||
/// be needed in the future when WASI extends its capabilities to resources
|
||||
/// other than preopened directories.
|
||||
pub fn find(self: Self, path: []const u8) ?*const Preopen {
|
||||
for (self.buffer.items) |preopen| {
|
||||
switch (preopen.@"type") {
|
||||
PreopenType.Dir => |preopen_path| {
|
||||
if (mem.eql(u8, path, preopen_path)) return &preopen;
|
||||
},
|
||||
pub fn find(self: Self, preopen_type: PreopenType) ?*const Preopen {
|
||||
for (self.buffer.items) |*preopen| {
|
||||
if (preopen.@"type".eql(preopen_type)) {
|
||||
return preopen;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
@@ -156,7 +170,7 @@ test "extracting WASI preopens" {
|
||||
try preopens.populate();
|
||||
|
||||
std.testing.expectEqual(@as(usize, 1), preopens.asSlice().len);
|
||||
const preopen = preopens.find(".") orelse unreachable;
|
||||
std.testing.expect(std.mem.eql(u8, ".", preopen.@"type".Dir));
|
||||
const preopen = preopens.find(PreopenType{ .Dir = "." }) orelse unreachable;
|
||||
std.testing.expect(preopen.@"type".eql(PreopenType{ .Dir = "." }));
|
||||
std.testing.expectEqual(@as(usize, 3), preopen.fd);
|
||||
}
|
||||
|
||||
@@ -360,7 +360,7 @@ pub fn Watch(comptime V: type) type {
|
||||
|
||||
fn addFileWindows(self: *Self, file_path: []const u8, value: V) !?V {
|
||||
// TODO we might need to convert dirname and basename to canonical file paths ("short"?)
|
||||
const dirname = try std.mem.dupe(self.allocator, u8, std.fs.path.dirname(file_path) orelse ".");
|
||||
const dirname = try self.allocator.dupe(u8, std.fs.path.dirname(file_path) orelse ".");
|
||||
var dirname_consumed = false;
|
||||
defer if (!dirname_consumed) self.allocator.free(dirname);
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ pub const HashStrategy = enum {
|
||||
};
|
||||
|
||||
/// Helper function to hash a pointer and mutate the strategy if needed.
|
||||
pub fn hashPointer(hasher: var, key: var, comptime strat: HashStrategy) void {
|
||||
pub fn hashPointer(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
|
||||
const info = @typeInfo(@TypeOf(key));
|
||||
|
||||
switch (info.Pointer.size) {
|
||||
@@ -53,7 +53,7 @@ pub fn hashPointer(hasher: var, key: var, comptime strat: HashStrategy) void {
|
||||
}
|
||||
|
||||
/// Helper function to hash a set of contiguous objects, from an array or slice.
|
||||
pub fn hashArray(hasher: var, key: var, comptime strat: HashStrategy) void {
|
||||
pub fn hashArray(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
|
||||
switch (strat) {
|
||||
.Shallow => {
|
||||
// TODO detect via a trait when Key has no padding bits to
|
||||
@@ -73,7 +73,7 @@ pub fn hashArray(hasher: var, key: var, comptime strat: HashStrategy) void {
|
||||
|
||||
/// Provides generic hashing for any eligible type.
|
||||
/// Strategy is provided to determine if pointers should be followed or not.
|
||||
pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
|
||||
pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
|
||||
const Key = @TypeOf(key);
|
||||
switch (@typeInfo(Key)) {
|
||||
.NoReturn,
|
||||
@@ -161,7 +161,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
|
||||
/// Provides generic hashing for any eligible type.
|
||||
/// Only hashes `key` itself, pointers are not followed.
|
||||
/// Slices are rejected to avoid ambiguity on the user's intention.
|
||||
pub fn autoHash(hasher: var, key: var) void {
|
||||
pub fn autoHash(hasher: anytype, key: anytype) void {
|
||||
const Key = @TypeOf(key);
|
||||
if (comptime meta.trait.isSlice(Key)) {
|
||||
comptime assert(@hasDecl(std, "StringHashMap")); // detect when the following message needs updated
|
||||
@@ -181,28 +181,28 @@ pub fn autoHash(hasher: var, key: var) void {
|
||||
const testing = std.testing;
|
||||
const Wyhash = std.hash.Wyhash;
|
||||
|
||||
fn testHash(key: var) u64 {
|
||||
fn testHash(key: anytype) u64 {
|
||||
// Any hash could be used here, for testing autoHash.
|
||||
var hasher = Wyhash.init(0);
|
||||
hash(&hasher, key, .Shallow);
|
||||
return hasher.final();
|
||||
}
|
||||
|
||||
fn testHashShallow(key: var) u64 {
|
||||
fn testHashShallow(key: anytype) u64 {
|
||||
// Any hash could be used here, for testing autoHash.
|
||||
var hasher = Wyhash.init(0);
|
||||
hash(&hasher, key, .Shallow);
|
||||
return hasher.final();
|
||||
}
|
||||
|
||||
fn testHashDeep(key: var) u64 {
|
||||
fn testHashDeep(key: anytype) u64 {
|
||||
// Any hash could be used here, for testing autoHash.
|
||||
var hasher = Wyhash.init(0);
|
||||
hash(&hasher, key, .Deep);
|
||||
return hasher.final();
|
||||
}
|
||||
|
||||
fn testHashDeepRecursive(key: var) u64 {
|
||||
fn testHashDeepRecursive(key: anytype) u64 {
|
||||
// Any hash could be used here, for testing autoHash.
|
||||
var hasher = Wyhash.init(0);
|
||||
hash(&hasher, key, .DeepRecursive);
|
||||
|
||||
@@ -88,7 +88,7 @@ const Result = struct {
|
||||
|
||||
const block_size: usize = 8 * 8192;
|
||||
|
||||
pub fn benchmarkHash(comptime H: var, bytes: usize) !Result {
|
||||
pub fn benchmarkHash(comptime H: anytype, bytes: usize) !Result {
|
||||
var h = blk: {
|
||||
if (H.init_u8s) |init| {
|
||||
break :blk H.ty.init(init);
|
||||
@@ -119,7 +119,7 @@ pub fn benchmarkHash(comptime H: var, bytes: usize) !Result {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn benchmarkHashSmallKeys(comptime H: var, key_size: usize, bytes: usize) !Result {
|
||||
pub fn benchmarkHashSmallKeys(comptime H: anytype, key_size: usize, bytes: usize) !Result {
|
||||
const key_count = bytes / key_size;
|
||||
var block: [block_size]u8 = undefined;
|
||||
prng.random.bytes(block[0..]);
|
||||
@@ -172,7 +172,7 @@ fn mode(comptime x: comptime_int) comptime_int {
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
const stdout = std.io.getStdOut().outStream();
|
||||
const stdout = std.io.getStdOut().writer();
|
||||
|
||||
var buffer: [1024]u8 = undefined;
|
||||
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
|
||||
@@ -248,13 +248,13 @@ pub fn main() !void {
|
||||
if (H.has_iterative_api) {
|
||||
prng.seed(seed);
|
||||
const result = try benchmarkHash(H, count);
|
||||
try stdout.print(" iterative: {:4} MiB/s [{x:0<16}]\n", .{ result.throughput / (1 * MiB), result.hash });
|
||||
try stdout.print(" iterative: {:5} MiB/s [{x:0<16}]\n", .{ result.throughput / (1 * MiB), result.hash });
|
||||
}
|
||||
|
||||
if (!test_iterative_only) {
|
||||
prng.seed(seed);
|
||||
const result_small = try benchmarkHashSmallKeys(H, key_size, count);
|
||||
try stdout.print(" small keys: {:4} MiB/s [{x:0<16}]\n", .{ result_small.throughput / (1 * MiB), result_small.hash });
|
||||
try stdout.print(" small keys: {:5} MiB/s [{x:0<16}]\n", .{ result_small.throughput / (1 * MiB), result_small.hash });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -354,7 +354,7 @@ pub const CityHash64 = struct {
|
||||
}
|
||||
};
|
||||
|
||||
fn SMHasherTest(comptime hash_fn: var, comptime hashbits: u32) u32 {
|
||||
fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
|
||||
const hashbytes = hashbits / 8;
|
||||
var key: [256]u8 = undefined;
|
||||
var hashes: [hashbytes * 256]u8 = undefined;
|
||||
|
||||
@@ -279,7 +279,7 @@ pub const Murmur3_32 = struct {
|
||||
}
|
||||
};
|
||||
|
||||
fn SMHasherTest(comptime hash_fn: var, comptime hashbits: u32) u32 {
|
||||
fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
|
||||
const hashbytes = hashbits / 8;
|
||||
var key: [256]u8 = undefined;
|
||||
var hashes: [hashbytes * 256]u8 = undefined;
|
||||
|
||||
1153
lib/std/hash_map.zig
1153
lib/std/hash_map.zig
File diff suppressed because it is too large
Load Diff
618
lib/std/heap.zig
618
lib/std/heap.zig
@@ -15,23 +15,59 @@ pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
|
||||
usingnamespace if (comptime @hasDecl(c, "malloc_size"))
|
||||
struct {
|
||||
pub const supports_malloc_size = true;
|
||||
pub const malloc_size = c.malloc_size;
|
||||
}
|
||||
else if (comptime @hasDecl(c, "malloc_usable_size"))
|
||||
struct {
|
||||
pub const supports_malloc_size = true;
|
||||
pub const malloc_size = c.malloc_usable_size;
|
||||
}
|
||||
else
|
||||
struct {
|
||||
pub const supports_malloc_size = false;
|
||||
};
|
||||
|
||||
pub const c_allocator = &c_allocator_state;
|
||||
var c_allocator_state = Allocator{
|
||||
.reallocFn = cRealloc,
|
||||
.shrinkFn = cShrink,
|
||||
.allocFn = cAlloc,
|
||||
.resizeFn = cResize,
|
||||
};
|
||||
|
||||
fn cRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
assert(new_align <= @alignOf(c_longdouble));
|
||||
const old_ptr = if (old_mem.len == 0) null else @ptrCast(*c_void, old_mem.ptr);
|
||||
const buf = c.realloc(old_ptr, new_size) orelse return error.OutOfMemory;
|
||||
return @ptrCast([*]u8, buf)[0..new_size];
|
||||
fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 {
|
||||
assert(ptr_align <= @alignOf(c_longdouble));
|
||||
const ptr = @ptrCast([*]u8, c.malloc(len) orelse return error.OutOfMemory);
|
||||
if (len_align == 0) {
|
||||
return ptr[0..len];
|
||||
}
|
||||
const full_len = init: {
|
||||
if (supports_malloc_size) {
|
||||
const s = malloc_size(ptr);
|
||||
assert(s >= len);
|
||||
break :init s;
|
||||
}
|
||||
break :init len;
|
||||
};
|
||||
return ptr[0..mem.alignBackwardAnyAlign(full_len, len_align)];
|
||||
}
|
||||
|
||||
fn cShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
const old_ptr = @ptrCast(*c_void, old_mem.ptr);
|
||||
const buf = c.realloc(old_ptr, new_size) orelse return old_mem[0..new_size];
|
||||
return @ptrCast([*]u8, buf)[0..new_size];
|
||||
fn cResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize {
|
||||
if (new_len == 0) {
|
||||
c.free(buf.ptr);
|
||||
return 0;
|
||||
}
|
||||
if (new_len <= buf.len) {
|
||||
return mem.alignAllocLen(buf.len, new_len, len_align);
|
||||
}
|
||||
if (supports_malloc_size) {
|
||||
const full_len = malloc_size(buf.ptr);
|
||||
if (new_len <= full_len) {
|
||||
return mem.alignAllocLen(full_len, new_len, len_align);
|
||||
}
|
||||
}
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
/// This allocator makes a syscall directly for every allocation and free.
|
||||
@@ -44,19 +80,27 @@ else
|
||||
&page_allocator_state;
|
||||
|
||||
var page_allocator_state = Allocator{
|
||||
.reallocFn = PageAllocator.realloc,
|
||||
.shrinkFn = PageAllocator.shrink,
|
||||
.allocFn = PageAllocator.alloc,
|
||||
.resizeFn = PageAllocator.resize,
|
||||
};
|
||||
var wasm_page_allocator_state = Allocator{
|
||||
.reallocFn = WasmPageAllocator.realloc,
|
||||
.shrinkFn = WasmPageAllocator.shrink,
|
||||
.allocFn = WasmPageAllocator.alloc,
|
||||
.resizeFn = WasmPageAllocator.resize,
|
||||
};
|
||||
|
||||
pub const direct_allocator = @compileError("deprecated; use std.heap.page_allocator");
|
||||
|
||||
/// Verifies that the adjusted length will still map to the full length
|
||||
pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
|
||||
const aligned_len = mem.alignAllocLen(full_len, len, len_align);
|
||||
assert(mem.alignForward(aligned_len, mem.page_size) == full_len);
|
||||
return aligned_len;
|
||||
}
|
||||
|
||||
const PageAllocator = struct {
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
|
||||
if (n == 0) return &[0]u8{};
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
assert(n > 0);
|
||||
const alignedLen = mem.alignForward(n, mem.page_size);
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
const w = os.windows;
|
||||
@@ -68,21 +112,21 @@ const PageAllocator = struct {
|
||||
// see https://devblogs.microsoft.com/oldnewthing/?p=42223
|
||||
const addr = w.VirtualAlloc(
|
||||
null,
|
||||
n,
|
||||
alignedLen,
|
||||
w.MEM_COMMIT | w.MEM_RESERVE,
|
||||
w.PAGE_READWRITE,
|
||||
) catch return error.OutOfMemory;
|
||||
|
||||
// If the allocation is sufficiently aligned, use it.
|
||||
if (@ptrToInt(addr) & (alignment - 1) == 0) {
|
||||
return @ptrCast([*]u8, addr)[0..n];
|
||||
return @ptrCast([*]u8, addr)[0..alignPageAllocLen(alignedLen, n, len_align)];
|
||||
}
|
||||
|
||||
// If it wasn't, actually do an explicitely aligned allocation.
|
||||
w.VirtualFree(addr, 0, w.MEM_RELEASE);
|
||||
const alloc_size = n + alignment;
|
||||
const alloc_size = n + alignment - mem.page_size;
|
||||
|
||||
const final_addr = while (true) {
|
||||
while (true) {
|
||||
// Reserve a range of memory large enough to find a sufficiently
|
||||
// aligned address.
|
||||
const reserved_addr = w.VirtualAlloc(
|
||||
@@ -102,48 +146,49 @@ const PageAllocator = struct {
|
||||
// until it succeeds.
|
||||
const ptr = w.VirtualAlloc(
|
||||
@intToPtr(*c_void, aligned_addr),
|
||||
n,
|
||||
alignedLen,
|
||||
w.MEM_COMMIT | w.MEM_RESERVE,
|
||||
w.PAGE_READWRITE,
|
||||
) catch continue;
|
||||
|
||||
return @ptrCast([*]u8, ptr)[0..n];
|
||||
};
|
||||
|
||||
return @ptrCast([*]u8, final_addr)[0..n];
|
||||
return @ptrCast([*]u8, ptr)[0..alignPageAllocLen(alignedLen, n, len_align)];
|
||||
}
|
||||
}
|
||||
|
||||
const alloc_size = if (alignment <= mem.page_size) n else n + alignment;
|
||||
const maxDropLen = alignment - std.math.min(alignment, mem.page_size);
|
||||
const allocLen = if (maxDropLen <= alignedLen - n) alignedLen else mem.alignForward(alignedLen + maxDropLen, mem.page_size);
|
||||
const slice = os.mmap(
|
||||
null,
|
||||
mem.alignForward(alloc_size, mem.page_size),
|
||||
allocLen,
|
||||
os.PROT_READ | os.PROT_WRITE,
|
||||
os.MAP_PRIVATE | os.MAP_ANONYMOUS,
|
||||
-1,
|
||||
0,
|
||||
) catch return error.OutOfMemory;
|
||||
if (alloc_size == n) return slice[0..n];
|
||||
assert(mem.isAligned(@ptrToInt(slice.ptr), mem.page_size));
|
||||
|
||||
const aligned_addr = mem.alignForward(@ptrToInt(slice.ptr), alignment);
|
||||
|
||||
// Unmap the extra bytes that were only requested in order to guarantee
|
||||
// that the range of memory we were provided had a proper alignment in
|
||||
// it somewhere. The extra bytes could be at the beginning, or end, or both.
|
||||
const unused_start_len = aligned_addr - @ptrToInt(slice.ptr);
|
||||
if (unused_start_len != 0) {
|
||||
os.munmap(slice[0..unused_start_len]);
|
||||
}
|
||||
const aligned_end_addr = mem.alignForward(aligned_addr + n, mem.page_size);
|
||||
const unused_end_len = @ptrToInt(slice.ptr) + slice.len - aligned_end_addr;
|
||||
if (unused_end_len != 0) {
|
||||
os.munmap(@intToPtr([*]align(mem.page_size) u8, aligned_end_addr)[0..unused_end_len]);
|
||||
const dropLen = aligned_addr - @ptrToInt(slice.ptr);
|
||||
if (dropLen != 0) {
|
||||
os.munmap(slice[0..dropLen]);
|
||||
}
|
||||
|
||||
return @intToPtr([*]u8, aligned_addr)[0..n];
|
||||
// Unmap extra pages
|
||||
const alignedBufferLen = allocLen - dropLen;
|
||||
if (alignedBufferLen > alignedLen) {
|
||||
os.munmap(@alignCast(mem.page_size, @intToPtr([*]u8, aligned_addr))[alignedLen..alignedBufferLen]);
|
||||
}
|
||||
|
||||
return @intToPtr([*]u8, aligned_addr)[0..alignPageAllocLen(alignedLen, n, len_align)];
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
const old_mem = @alignCast(mem.page_size, old_mem_unaligned);
|
||||
fn resize(allocator: *Allocator, buf_unaligned: []u8, new_size: usize, len_align: u29) Allocator.Error!usize {
|
||||
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
const w = os.windows;
|
||||
if (new_size == 0) {
|
||||
@@ -153,100 +198,45 @@ const PageAllocator = struct {
|
||||
// is reserved in the initial allocation call to VirtualAlloc."
|
||||
// So we can only use MEM_RELEASE when actually releasing the
|
||||
// whole allocation.
|
||||
w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE);
|
||||
} else {
|
||||
const base_addr = @ptrToInt(old_mem.ptr);
|
||||
const old_addr_end = base_addr + old_mem.len;
|
||||
const new_addr_end = base_addr + new_size;
|
||||
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
|
||||
if (old_addr_end > new_addr_end_rounded) {
|
||||
w.VirtualFree(buf_unaligned.ptr, 0, w.MEM_RELEASE);
|
||||
return 0;
|
||||
}
|
||||
if (new_size < buf_unaligned.len) {
|
||||
const base_addr = @ptrToInt(buf_unaligned.ptr);
|
||||
const old_addr_end = base_addr + buf_unaligned.len;
|
||||
const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size);
|
||||
if (old_addr_end > new_addr_end) {
|
||||
// For shrinking that is not releasing, we will only
|
||||
// decommit the pages not needed anymore.
|
||||
w.VirtualFree(
|
||||
@intToPtr(*c_void, new_addr_end_rounded),
|
||||
old_addr_end - new_addr_end_rounded,
|
||||
@intToPtr(*c_void, new_addr_end),
|
||||
old_addr_end - new_addr_end,
|
||||
w.MEM_DECOMMIT,
|
||||
);
|
||||
}
|
||||
return alignPageAllocLen(new_size_aligned, new_size, len_align);
|
||||
}
|
||||
return old_mem[0..new_size];
|
||||
}
|
||||
const base_addr = @ptrToInt(old_mem.ptr);
|
||||
const old_addr_end = base_addr + old_mem.len;
|
||||
const new_addr_end = base_addr + new_size;
|
||||
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
|
||||
if (old_addr_end > new_addr_end_rounded) {
|
||||
const ptr = @intToPtr([*]align(mem.page_size) u8, new_addr_end_rounded);
|
||||
os.munmap(ptr[0 .. old_addr_end - new_addr_end_rounded]);
|
||||
}
|
||||
return old_mem[0..new_size];
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
const old_mem = @alignCast(mem.page_size, old_mem_unaligned);
|
||||
if (builtin.os.tag == .windows) {
|
||||
if (old_mem.len == 0) {
|
||||
return alloc(allocator, new_size, new_align);
|
||||
if (new_size == buf_unaligned.len) {
|
||||
return alignPageAllocLen(new_size_aligned, new_size, len_align);
|
||||
}
|
||||
|
||||
if (new_size <= old_mem.len and new_align <= old_align) {
|
||||
return shrink(allocator, old_mem, old_align, new_size, new_align);
|
||||
}
|
||||
|
||||
const w = os.windows;
|
||||
const base_addr = @ptrToInt(old_mem.ptr);
|
||||
|
||||
if (new_align > old_align and base_addr & (new_align - 1) != 0) {
|
||||
// Current allocation doesn't satisfy the new alignment.
|
||||
// For now we'll do a new one no matter what, but maybe
|
||||
// there is something smarter to do instead.
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
assert(old_mem.len != 0);
|
||||
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
||||
w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
const old_addr_end = base_addr + old_mem.len;
|
||||
const old_addr_end_rounded = mem.alignForward(old_addr_end, mem.page_size);
|
||||
const new_addr_end = base_addr + new_size;
|
||||
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
|
||||
if (new_addr_end_rounded == old_addr_end_rounded) {
|
||||
// The reallocation fits in the already allocated pages.
|
||||
return @ptrCast([*]u8, old_mem.ptr)[0..new_size];
|
||||
}
|
||||
assert(new_addr_end_rounded > old_addr_end_rounded);
|
||||
|
||||
// We need to commit new pages.
|
||||
const additional_size = new_addr_end - old_addr_end_rounded;
|
||||
const realloc_addr = w.kernel32.VirtualAlloc(
|
||||
@intToPtr(*c_void, old_addr_end_rounded),
|
||||
additional_size,
|
||||
w.MEM_COMMIT | w.MEM_RESERVE,
|
||||
w.PAGE_READWRITE,
|
||||
) orelse {
|
||||
// Committing new pages at the end of the existing allocation
|
||||
// failed, we need to try a new one.
|
||||
const new_alloc_mem = try alloc(allocator, new_size, new_align);
|
||||
@memcpy(new_alloc_mem.ptr, old_mem.ptr, old_mem.len);
|
||||
w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE);
|
||||
|
||||
return new_alloc_mem;
|
||||
};
|
||||
|
||||
assert(@ptrToInt(realloc_addr) == old_addr_end_rounded);
|
||||
return @ptrCast([*]u8, old_mem.ptr)[0..new_size];
|
||||
// new_size > buf_unaligned.len not implemented
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
if (new_size <= old_mem.len and new_align <= old_align) {
|
||||
return shrink(allocator, old_mem, old_align, new_size, new_align);
|
||||
|
||||
const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size);
|
||||
if (new_size_aligned == buf_aligned_len)
|
||||
return alignPageAllocLen(new_size_aligned, new_size, len_align);
|
||||
|
||||
if (new_size_aligned < buf_aligned_len) {
|
||||
const ptr = @intToPtr([*]align(mem.page_size) u8, @ptrToInt(buf_unaligned.ptr) + new_size_aligned);
|
||||
os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]);
|
||||
if (new_size_aligned == 0)
|
||||
return 0;
|
||||
return alignPageAllocLen(new_size_aligned, new_size, len_align);
|
||||
}
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
if (old_mem.len != 0) {
|
||||
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
||||
os.munmap(old_mem);
|
||||
}
|
||||
return result;
|
||||
|
||||
// TODO: call mremap
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -299,7 +289,7 @@ const WasmPageAllocator = struct {
|
||||
// Revisit if this is settled: https://github.com/ziglang/zig/issues/3806
|
||||
const not_found = std.math.maxInt(usize);
|
||||
|
||||
fn useRecycled(self: FreeBlock, num_pages: usize) usize {
|
||||
fn useRecycled(self: FreeBlock, num_pages: usize, alignment: u29) usize {
|
||||
@setCold(true);
|
||||
for (self.data) |segment, i| {
|
||||
const spills_into_next = @bitCast(i128, segment) < 0;
|
||||
@@ -312,7 +302,8 @@ const WasmPageAllocator = struct {
|
||||
var count: usize = 0;
|
||||
while (j + count < self.totalPages() and self.getBit(j + count) == .free) {
|
||||
count += 1;
|
||||
if (count >= num_pages) {
|
||||
const addr = j * mem.page_size;
|
||||
if (count >= num_pages and mem.isAligned(addr, alignment)) {
|
||||
self.setBits(j, num_pages, .used);
|
||||
return j;
|
||||
}
|
||||
@@ -338,73 +329,72 @@ const WasmPageAllocator = struct {
|
||||
}
|
||||
|
||||
fn nPages(memsize: usize) usize {
|
||||
return std.mem.alignForward(memsize, std.mem.page_size) / std.mem.page_size;
|
||||
return mem.alignForward(memsize, mem.page_size) / mem.page_size;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, page_count: usize, alignment: u29) error{OutOfMemory}!usize {
|
||||
var idx = conventional.useRecycled(page_count);
|
||||
if (idx != FreeBlock.not_found) {
|
||||
return idx;
|
||||
fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
const page_count = nPages(len);
|
||||
const page_idx = try allocPages(page_count, alignment);
|
||||
return @intToPtr([*]u8, page_idx * mem.page_size)[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)];
|
||||
}
|
||||
fn allocPages(page_count: usize, alignment: u29) !usize {
|
||||
{
|
||||
const idx = conventional.useRecycled(page_count, alignment);
|
||||
if (idx != FreeBlock.not_found) {
|
||||
return idx;
|
||||
}
|
||||
}
|
||||
|
||||
idx = extended.useRecycled(page_count);
|
||||
const idx = extended.useRecycled(page_count, alignment);
|
||||
if (idx != FreeBlock.not_found) {
|
||||
return idx + extendedOffset();
|
||||
}
|
||||
|
||||
const prev_page_count = @wasmMemoryGrow(0, @intCast(u32, page_count));
|
||||
if (prev_page_count <= 0) {
|
||||
const next_page_idx = @wasmMemorySize(0);
|
||||
const next_page_addr = next_page_idx * mem.page_size;
|
||||
const aligned_addr = mem.alignForward(next_page_addr, alignment);
|
||||
const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size);
|
||||
const result = @wasmMemoryGrow(0, @intCast(u32, drop_page_count + page_count));
|
||||
if (result <= 0)
|
||||
return error.OutOfMemory;
|
||||
assert(result == next_page_idx);
|
||||
const aligned_page_idx = next_page_idx + drop_page_count;
|
||||
if (drop_page_count > 0) {
|
||||
freePages(next_page_idx, aligned_page_idx);
|
||||
}
|
||||
|
||||
return @intCast(usize, prev_page_count);
|
||||
return @intCast(usize, aligned_page_idx);
|
||||
}
|
||||
|
||||
pub fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) Allocator.Error![]u8 {
|
||||
if (new_align > std.mem.page_size) {
|
||||
return error.OutOfMemory;
|
||||
fn freePages(start: usize, end: usize) void {
|
||||
if (start < extendedOffset()) {
|
||||
conventional.recycle(start, std.math.min(extendedOffset(), end) - start);
|
||||
}
|
||||
if (end > extendedOffset()) {
|
||||
var new_end = end;
|
||||
if (!extended.isInitialized()) {
|
||||
// Steal the last page from the memory currently being recycled
|
||||
// TODO: would it be better if we use the first page instead?
|
||||
new_end -= 1;
|
||||
|
||||
if (nPages(new_size) == nPages(old_mem.len)) {
|
||||
return old_mem.ptr[0..new_size];
|
||||
} else if (new_size < old_mem.len) {
|
||||
return shrink(allocator, old_mem, old_align, new_size, new_align);
|
||||
} else {
|
||||
const page_idx = try alloc(allocator, nPages(new_size), new_align);
|
||||
const new_mem = @intToPtr([*]u8, page_idx * std.mem.page_size)[0..new_size];
|
||||
std.mem.copy(u8, new_mem, old_mem);
|
||||
_ = shrink(allocator, old_mem, old_align, 0, 0);
|
||||
return new_mem;
|
||||
extended.data = @intToPtr([*]u128, new_end * mem.page_size)[0 .. mem.page_size / @sizeOf(u128)];
|
||||
// Since this is the first page being freed and we consume it, assume *nothing* is free.
|
||||
mem.set(u128, extended.data, PageStatus.none_free);
|
||||
}
|
||||
const clamped_start = std.math.max(extendedOffset(), start);
|
||||
extended.recycle(clamped_start - extendedOffset(), new_end - clamped_start);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
@setCold(true);
|
||||
const free_start = nPages(@ptrToInt(old_mem.ptr) + new_size);
|
||||
var free_end = nPages(@ptrToInt(old_mem.ptr) + old_mem.len);
|
||||
|
||||
if (free_end > free_start) {
|
||||
if (free_start < extendedOffset()) {
|
||||
const clamped_end = std.math.min(extendedOffset(), free_end);
|
||||
conventional.recycle(free_start, clamped_end - free_start);
|
||||
}
|
||||
|
||||
if (free_end > extendedOffset()) {
|
||||
if (!extended.isInitialized()) {
|
||||
// Steal the last page from the memory currently being recycled
|
||||
// TODO: would it be better if we use the first page instead?
|
||||
free_end -= 1;
|
||||
|
||||
extended.data = @intToPtr([*]u128, free_end * std.mem.page_size)[0 .. std.mem.page_size / @sizeOf(u128)];
|
||||
// Since this is the first page being freed and we consume it, assume *nothing* is free.
|
||||
std.mem.set(u128, extended.data, PageStatus.none_free);
|
||||
}
|
||||
const clamped_start = std.math.max(extendedOffset(), free_start);
|
||||
extended.recycle(clamped_start - extendedOffset(), free_end - clamped_start);
|
||||
}
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
const aligned_len = mem.alignForward(buf.len, mem.page_size);
|
||||
if (new_len > aligned_len) return error.OutOfMemory;
|
||||
const current_n = nPages(aligned_len);
|
||||
const new_n = nPages(new_len);
|
||||
if (new_n != current_n) {
|
||||
const base = nPages(@ptrToInt(buf.ptr));
|
||||
freePages(base + new_n, base + current_n);
|
||||
}
|
||||
|
||||
return old_mem[0..new_size];
|
||||
return if (new_len == 0) 0 else alignPageAllocLen(new_n * mem.page_size, new_len, len_align);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -418,8 +408,8 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
pub fn init() HeapAllocator {
|
||||
return HeapAllocator{
|
||||
.allocator = Allocator{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.heap_handle = null,
|
||||
};
|
||||
@@ -431,11 +421,14 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
}
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
||||
if (n == 0) return &[0]u8{};
|
||||
fn getRecordPtr(buf: []u8) *align(1) usize {
|
||||
return @intToPtr(*align(1) usize, @ptrToInt(buf.ptr) + buf.len);
|
||||
}
|
||||
|
||||
const amt = n + alignment + @sizeOf(usize);
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
||||
|
||||
const amt = n + ptr_align - 1 + @sizeOf(usize);
|
||||
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, builtin.AtomicOrder.SeqCst);
|
||||
const heap_handle = optional_heap_handle orelse blk: {
|
||||
const options = if (builtin.single_threaded) os.windows.HEAP_NO_SERIALIZE else 0;
|
||||
@@ -446,66 +439,60 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
};
|
||||
const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory;
|
||||
const root_addr = @ptrToInt(ptr);
|
||||
const adjusted_addr = mem.alignForward(root_addr, alignment);
|
||||
const record_addr = adjusted_addr + n;
|
||||
@intToPtr(*align(1) usize, record_addr).* = root_addr;
|
||||
return @intToPtr([*]u8, adjusted_addr)[0..n];
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
return realloc(allocator, old_mem, old_align, new_size, new_align) catch {
|
||||
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
|
||||
const old_record_addr = old_adjusted_addr + old_mem.len;
|
||||
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
|
||||
const old_ptr = @intToPtr(*c_void, root_addr);
|
||||
const new_record_addr = old_record_addr - new_size + old_mem.len;
|
||||
@intToPtr(*align(1) usize, new_record_addr).* = root_addr;
|
||||
return old_mem[0..new_size];
|
||||
const aligned_addr = mem.alignForward(root_addr, ptr_align);
|
||||
const return_len = init: {
|
||||
if (len_align == 0) break :init n;
|
||||
const full_len = os.windows.kernel32.HeapSize(heap_handle, 0, ptr);
|
||||
assert(full_len != std.math.maxInt(usize));
|
||||
assert(full_len >= amt);
|
||||
break :init mem.alignBackwardAnyAlign(full_len - (aligned_addr - root_addr), len_align);
|
||||
};
|
||||
const buf = @intToPtr([*]u8, aligned_addr)[0..return_len];
|
||||
getRecordPtr(buf).* = root_addr;
|
||||
return buf;
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
if (old_mem.len == 0) return alloc(allocator, new_size, new_align);
|
||||
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
||||
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
|
||||
const old_record_addr = old_adjusted_addr + old_mem.len;
|
||||
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
|
||||
const old_ptr = @intToPtr(*c_void, root_addr);
|
||||
|
||||
if (new_size == 0) {
|
||||
os.windows.HeapFree(self.heap_handle.?, 0, old_ptr);
|
||||
return old_mem[0..0];
|
||||
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
|
||||
return 0;
|
||||
}
|
||||
|
||||
const amt = new_size + new_align + @sizeOf(usize);
|
||||
const root_addr = getRecordPtr(buf).*;
|
||||
const align_offset = @ptrToInt(buf.ptr) - root_addr;
|
||||
const amt = align_offset + new_size + @sizeOf(usize);
|
||||
const new_ptr = os.windows.kernel32.HeapReAlloc(
|
||||
self.heap_handle.?,
|
||||
0,
|
||||
old_ptr,
|
||||
os.windows.HEAP_REALLOC_IN_PLACE_ONLY,
|
||||
@intToPtr(*c_void, root_addr),
|
||||
amt,
|
||||
) orelse return error.OutOfMemory;
|
||||
const offset = old_adjusted_addr - root_addr;
|
||||
const new_root_addr = @ptrToInt(new_ptr);
|
||||
var new_adjusted_addr = new_root_addr + offset;
|
||||
const offset_is_valid = new_adjusted_addr + new_size + @sizeOf(usize) <= new_root_addr + amt;
|
||||
const offset_is_aligned = new_adjusted_addr % new_align == 0;
|
||||
if (!offset_is_valid or !offset_is_aligned) {
|
||||
// If HeapReAlloc didn't happen to move the memory to the new alignment,
|
||||
// or the memory starting at the old offset would be outside of the new allocation,
|
||||
// then we need to copy the memory to a valid aligned address and use that
|
||||
const new_aligned_addr = mem.alignForward(new_root_addr, new_align);
|
||||
@memcpy(@intToPtr([*]u8, new_aligned_addr), @intToPtr([*]u8, new_adjusted_addr), std.math.min(old_mem.len, new_size));
|
||||
new_adjusted_addr = new_aligned_addr;
|
||||
}
|
||||
const new_record_addr = new_adjusted_addr + new_size;
|
||||
@intToPtr(*align(1) usize, new_record_addr).* = new_root_addr;
|
||||
return @intToPtr([*]u8, new_adjusted_addr)[0..new_size];
|
||||
assert(new_ptr == @intToPtr(*c_void, root_addr));
|
||||
const return_len = init: {
|
||||
if (len_align == 0) break :init new_size;
|
||||
const full_len = os.windows.kernel32.HeapSize(self.heap_handle.?, 0, new_ptr);
|
||||
assert(full_len != std.math.maxInt(usize));
|
||||
assert(full_len >= amt);
|
||||
break :init mem.alignBackwardAnyAlign(full_len - align_offset, len_align);
|
||||
};
|
||||
getRecordPtr(buf.ptr[0..return_len]).* = root_addr;
|
||||
return return_len;
|
||||
}
|
||||
},
|
||||
else => @compileError("Unsupported OS"),
|
||||
};
|
||||
|
||||
fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool {
|
||||
return @ptrToInt(ptr) >= @ptrToInt(container.ptr) and
|
||||
@ptrToInt(ptr) < (@ptrToInt(container.ptr) + container.len);
|
||||
}
|
||||
|
||||
fn sliceContainsSlice(container: []u8, slice: []u8) bool {
|
||||
return @ptrToInt(slice.ptr) >= @ptrToInt(container.ptr) and
|
||||
(@ptrToInt(slice.ptr) + slice.len) <= (@ptrToInt(container.ptr) + container.len);
|
||||
}
|
||||
|
||||
pub const FixedBufferAllocator = struct {
|
||||
allocator: Allocator,
|
||||
end_index: usize,
|
||||
@@ -514,19 +501,33 @@ pub const FixedBufferAllocator = struct {
|
||||
pub fn init(buffer: []u8) FixedBufferAllocator {
|
||||
return FixedBufferAllocator{
|
||||
.allocator = Allocator{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.buffer = buffer,
|
||||
.end_index = 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
|
||||
pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
|
||||
return sliceContainsPtr(self.buffer, ptr);
|
||||
}
|
||||
|
||||
pub fn ownsSlice(self: *FixedBufferAllocator, slice: []u8) bool {
|
||||
return sliceContainsSlice(self.buffer, slice);
|
||||
}
|
||||
|
||||
/// NOTE: this will not work in all cases, if the last allocation had an adjusted_index
|
||||
/// then we won't be able to determine what the last allocation was. This is because
|
||||
/// the alignForward operation done in alloc is not reverisible.
|
||||
pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
|
||||
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
||||
const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
|
||||
const adjusted_addr = mem.alignForward(addr, alignment);
|
||||
const adjusted_index = self.end_index + (adjusted_addr - addr);
|
||||
const aligned_addr = mem.alignForward(@ptrToInt(self.buffer.ptr) + self.end_index, ptr_align);
|
||||
const adjusted_index = aligned_addr - @ptrToInt(self.buffer.ptr);
|
||||
const new_end_index = adjusted_index + n;
|
||||
if (new_end_index > self.buffer.len) {
|
||||
return error.OutOfMemory;
|
||||
@@ -537,30 +538,28 @@ pub const FixedBufferAllocator = struct {
|
||||
return result;
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) Allocator.Error!usize {
|
||||
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
||||
assert(old_mem.len <= self.end_index);
|
||||
if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len and
|
||||
mem.alignForward(@ptrToInt(old_mem.ptr), new_align) == @ptrToInt(old_mem.ptr))
|
||||
{
|
||||
const start_index = self.end_index - old_mem.len;
|
||||
const new_end_index = start_index + new_size;
|
||||
if (new_end_index > self.buffer.len) return error.OutOfMemory;
|
||||
const result = self.buffer[start_index..new_end_index];
|
||||
self.end_index = new_end_index;
|
||||
return result;
|
||||
} else if (new_size <= old_mem.len and new_align <= old_align) {
|
||||
// We can't do anything with the memory, so tell the client to keep it.
|
||||
return error.OutOfMemory;
|
||||
} else {
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
||||
return result;
|
||||
}
|
||||
}
|
||||
assert(self.ownsSlice(buf)); // sanity check
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
return old_mem[0..new_size];
|
||||
if (!self.isLastAllocation(buf)) {
|
||||
if (new_size > buf.len)
|
||||
return error.OutOfMemory;
|
||||
return if (new_size == 0) 0 else mem.alignAllocLen(buf.len, new_size, len_align);
|
||||
}
|
||||
|
||||
if (new_size <= buf.len) {
|
||||
const sub = buf.len - new_size;
|
||||
self.end_index -= sub;
|
||||
return if (new_size == 0) 0 else mem.alignAllocLen(buf.len - sub, new_size, len_align);
|
||||
}
|
||||
|
||||
const add = new_size - buf.len;
|
||||
if (add + self.end_index > self.buffer.len) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
self.end_index += add;
|
||||
return new_size;
|
||||
}
|
||||
|
||||
pub fn reset(self: *FixedBufferAllocator) void {
|
||||
@@ -581,20 +580,20 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
|
||||
pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator {
|
||||
return ThreadSafeFixedBufferAllocator{
|
||||
.allocator = Allocator{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
.allocFn = alloc,
|
||||
.resizeFn = Allocator.noResize,
|
||||
},
|
||||
.buffer = buffer,
|
||||
.end_index = 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
|
||||
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
|
||||
while (true) {
|
||||
const addr = @ptrToInt(self.buffer.ptr) + end_index;
|
||||
const adjusted_addr = mem.alignForward(addr, alignment);
|
||||
const adjusted_addr = mem.alignForward(addr, ptr_align);
|
||||
const adjusted_index = end_index + (adjusted_addr - addr);
|
||||
const new_end_index = adjusted_index + n;
|
||||
if (new_end_index > self.buffer.len) {
|
||||
@@ -604,21 +603,6 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
|
||||
}
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
if (new_size <= old_mem.len and new_align <= old_align) {
|
||||
// We can't do anything useful with the memory, tell the client to keep it.
|
||||
return error.OutOfMemory;
|
||||
} else {
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
return old_mem[0..new_size];
|
||||
}
|
||||
|
||||
pub fn reset(self: *ThreadSafeFixedBufferAllocator) void {
|
||||
self.end_index = 0;
|
||||
}
|
||||
@@ -632,8 +616,8 @@ pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) Stack
|
||||
.fallback_allocator = fallback_allocator,
|
||||
.fixed_buffer_allocator = undefined,
|
||||
.allocator = Allocator{
|
||||
.reallocFn = StackFallbackAllocator(size).realloc,
|
||||
.shrinkFn = StackFallbackAllocator(size).shrink,
|
||||
.allocFn = StackFallbackAllocator(size).realloc,
|
||||
.resizeFn = StackFallbackAllocator(size).resize,
|
||||
},
|
||||
};
|
||||
}
|
||||
@@ -652,58 +636,19 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
||||
return &self.allocator;
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![*]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
|
||||
@ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
|
||||
if (in_buffer) {
|
||||
return FixedBufferAllocator.realloc(
|
||||
&self.fixed_buffer_allocator.allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
new_align,
|
||||
) catch {
|
||||
const result = try self.fallback_allocator.reallocFn(
|
||||
self.fallback_allocator,
|
||||
&[0]u8{},
|
||||
undefined,
|
||||
new_size,
|
||||
new_align,
|
||||
);
|
||||
mem.copy(u8, result, old_mem);
|
||||
return result;
|
||||
};
|
||||
}
|
||||
return self.fallback_allocator.reallocFn(
|
||||
self.fallback_allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
new_align,
|
||||
);
|
||||
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align) catch
|
||||
return fallback_allocator.alloc(len, ptr_align);
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
fn resize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!void {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
|
||||
@ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
|
||||
if (in_buffer) {
|
||||
return FixedBufferAllocator.shrink(
|
||||
&self.fixed_buffer_allocator.allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
new_align,
|
||||
);
|
||||
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
|
||||
try self.fixed_buffer_allocator.callResizeFn(buf, new_len);
|
||||
} else {
|
||||
try self.fallback_allocator.callResizeFn(buf, new_len);
|
||||
}
|
||||
return self.fallback_allocator.shrinkFn(
|
||||
self.fallback_allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
new_align,
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -718,8 +663,8 @@ test "c_allocator" {
|
||||
|
||||
test "WasmPageAllocator internals" {
|
||||
if (comptime std.Target.current.isWasm()) {
|
||||
const conventional_memsize = WasmPageAllocator.conventional.totalPages() * std.mem.page_size;
|
||||
const initial = try page_allocator.alloc(u8, std.mem.page_size);
|
||||
const conventional_memsize = WasmPageAllocator.conventional.totalPages() * mem.page_size;
|
||||
const initial = try page_allocator.alloc(u8, mem.page_size);
|
||||
std.debug.assert(@ptrToInt(initial.ptr) < conventional_memsize); // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite.
|
||||
|
||||
var inplace = try page_allocator.realloc(initial, 1);
|
||||
@@ -772,6 +717,11 @@ test "PageAllocator" {
|
||||
slice[127] = 0x34;
|
||||
allocator.free(slice);
|
||||
}
|
||||
{
|
||||
var buf = try allocator.alloc(u8, mem.page_size + 1);
|
||||
defer allocator.free(buf);
|
||||
buf = try allocator.realloc(buf, 1); // shrink past the page boundary
|
||||
}
|
||||
}
|
||||
|
||||
test "HeapAllocator" {
|
||||
@@ -799,7 +749,7 @@ test "ArenaAllocator" {
|
||||
|
||||
var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
|
||||
test "FixedBufferAllocator" {
|
||||
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
|
||||
var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
|
||||
|
||||
try testAllocator(&fixed_buffer_allocator.allocator);
|
||||
try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
|
||||
@@ -865,7 +815,10 @@ test "ThreadSafeFixedBufferAllocator" {
|
||||
try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
|
||||
}
|
||||
|
||||
fn testAllocator(allocator: *mem.Allocator) !void {
|
||||
pub fn testAllocator(base_allocator: *mem.Allocator) !void {
|
||||
var validationAllocator = mem.validationWrap(base_allocator);
|
||||
const allocator = &validationAllocator.allocator;
|
||||
|
||||
var slice = try allocator.alloc(*i32, 100);
|
||||
testing.expect(slice.len == 100);
|
||||
for (slice) |*item, i| {
|
||||
@@ -893,7 +846,10 @@ fn testAllocator(allocator: *mem.Allocator) !void {
|
||||
allocator.free(slice);
|
||||
}
|
||||
|
||||
fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !void {
|
||||
pub fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29) !void {
|
||||
var validationAllocator = mem.validationWrap(base_allocator);
|
||||
const allocator = &validationAllocator.allocator;
|
||||
|
||||
// initial
|
||||
var slice = try allocator.alignedAlloc(u8, alignment, 10);
|
||||
testing.expect(slice.len == 10);
|
||||
@@ -917,7 +873,10 @@ fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !voi
|
||||
testing.expect(slice.len == 0);
|
||||
}
|
||||
|
||||
fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void {
|
||||
pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) mem.Allocator.Error!void {
|
||||
var validationAllocator = mem.validationWrap(base_allocator);
|
||||
const allocator = &validationAllocator.allocator;
|
||||
|
||||
//Maybe a platform's page_size is actually the same as or
|
||||
// very near usize?
|
||||
if (mem.page_size << 2 > maxInt(usize)) return;
|
||||
@@ -946,7 +905,10 @@ fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!vo
|
||||
allocator.free(slice);
|
||||
}
|
||||
|
||||
fn testAllocatorAlignedShrink(allocator: *mem.Allocator) mem.Allocator.Error!void {
|
||||
pub fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) mem.Allocator.Error!void {
|
||||
var validationAllocator = mem.validationWrap(base_allocator);
|
||||
const allocator = &validationAllocator.allocator;
|
||||
|
||||
var debug_buffer: [1000]u8 = undefined;
|
||||
const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator;
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@ pub const ArenaAllocator = struct {
|
||||
pub fn promote(self: State, child_allocator: *Allocator) ArenaAllocator {
|
||||
return .{
|
||||
.allocator = Allocator{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
.allocFn = alloc,
|
||||
.resizeFn = Allocator.noResize,
|
||||
},
|
||||
.child_allocator = child_allocator,
|
||||
.state = self,
|
||||
@@ -49,9 +49,8 @@ pub const ArenaAllocator = struct {
|
||||
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
|
||||
const big_enough_len = prev_len + actual_min_size;
|
||||
const len = big_enough_len + big_enough_len / 2;
|
||||
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
|
||||
const buf_node_slice = mem.bytesAsSlice(BufNode, buf[0..@sizeOf(BufNode)]);
|
||||
const buf_node = &buf_node_slice[0];
|
||||
const buf = try self.child_allocator.callAllocFn(len, @alignOf(BufNode), 1);
|
||||
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
|
||||
buf_node.* = BufNode{
|
||||
.data = buf,
|
||||
.next = null,
|
||||
@@ -61,18 +60,18 @@ pub const ArenaAllocator = struct {
|
||||
return buf_node;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
|
||||
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
|
||||
|
||||
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + alignment);
|
||||
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
|
||||
while (true) {
|
||||
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
|
||||
const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index;
|
||||
const adjusted_addr = mem.alignForward(addr, alignment);
|
||||
const adjusted_addr = mem.alignForward(addr, ptr_align);
|
||||
const adjusted_index = self.state.end_index + (adjusted_addr - addr);
|
||||
const new_end_index = adjusted_index + n;
|
||||
if (new_end_index > cur_buf.len) {
|
||||
cur_node = try self.createNode(cur_buf.len, n + alignment);
|
||||
cur_node = try self.createNode(cur_buf.len, n + ptr_align);
|
||||
continue;
|
||||
}
|
||||
const result = cur_buf[adjusted_index..new_end_index];
|
||||
@@ -80,19 +79,4 @@ pub const ArenaAllocator = struct {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
if (new_size <= old_mem.len and new_align <= new_size) {
|
||||
// We can't do anything with the memory, so tell the client to keep it.
|
||||
return error.OutOfMemory;
|
||||
} else {
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
return old_mem[0..new_size];
|
||||
}
|
||||
};
|
||||
|
||||
@@ -15,62 +15,75 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
|
||||
pub fn init(parent_allocator: *Allocator, out_stream: OutStreamType) Self {
|
||||
return Self{
|
||||
.allocator = Allocator{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
.allocFn = alloc,
|
||||
.resizeFn = resize,
|
||||
},
|
||||
.parent_allocator = parent_allocator,
|
||||
.out_stream = out_stream,
|
||||
};
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
if (old_mem.len == 0) {
|
||||
self.out_stream.print("allocation of {} ", .{new_size}) catch {};
|
||||
} else {
|
||||
self.out_stream.print("resize from {} to {} ", .{ old_mem.len, new_size }) catch {};
|
||||
}
|
||||
const result = self.parent_allocator.reallocFn(self.parent_allocator, old_mem, old_align, new_size, new_align);
|
||||
self.out_stream.print("alloc : {}", .{len}) catch {};
|
||||
const result = self.parent_allocator.callAllocFn(len, ptr_align, len_align);
|
||||
if (result) |buff| {
|
||||
self.out_stream.print("success!\n", .{}) catch {};
|
||||
self.out_stream.print(" success!\n", .{}) catch {};
|
||||
} else |err| {
|
||||
self.out_stream.print("failure!\n", .{}) catch {};
|
||||
self.out_stream.print(" failure!\n", .{}) catch {};
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
const result = self.parent_allocator.shrinkFn(self.parent_allocator, old_mem, old_align, new_size, new_align);
|
||||
if (new_size == 0) {
|
||||
self.out_stream.print("free of {} bytes success!\n", .{old_mem.len}) catch {};
|
||||
if (new_len == 0) {
|
||||
self.out_stream.print("free : {}\n", .{buf.len}) catch {};
|
||||
} else if (new_len <= buf.len) {
|
||||
self.out_stream.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {};
|
||||
} else {
|
||||
self.out_stream.print("shrink from {} bytes to {} bytes success!\n", .{ old_mem.len, new_size }) catch {};
|
||||
self.out_stream.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
|
||||
}
|
||||
if (self.parent_allocator.callResizeFn(buf, new_len, len_align)) |resized_len| {
|
||||
if (new_len > buf.len) {
|
||||
self.out_stream.print(" success!\n", .{}) catch {};
|
||||
}
|
||||
return resized_len;
|
||||
} else |e| {
|
||||
std.debug.assert(new_len > buf.len);
|
||||
self.out_stream.print(" failure!\n", .{}) catch {};
|
||||
return e;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn loggingAllocator(
|
||||
parent_allocator: *Allocator,
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
) LoggingAllocator(@TypeOf(out_stream)) {
|
||||
return LoggingAllocator(@TypeOf(out_stream)).init(parent_allocator, out_stream);
|
||||
}
|
||||
|
||||
test "LoggingAllocator" {
|
||||
var buf: [255]u8 = undefined;
|
||||
var fbs = std.io.fixedBufferStream(&buf);
|
||||
var log_buf: [255]u8 = undefined;
|
||||
var fbs = std.io.fixedBufferStream(&log_buf);
|
||||
|
||||
const allocator = &loggingAllocator(std.testing.allocator, fbs.outStream()).allocator;
|
||||
var allocator_buf: [10]u8 = undefined;
|
||||
var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
|
||||
const allocator = &loggingAllocator(&fixedBufferAllocator.allocator, fbs.outStream()).allocator;
|
||||
|
||||
const ptr = try allocator.alloc(u8, 10);
|
||||
allocator.free(ptr);
|
||||
var a = try allocator.alloc(u8, 10);
|
||||
a.len = allocator.shrinkBytes(a, 5, 0);
|
||||
std.debug.assert(a.len == 5);
|
||||
std.testing.expectError(error.OutOfMemory, allocator.callResizeFn(a, 20, 0));
|
||||
allocator.free(a);
|
||||
|
||||
std.testing.expectEqualSlices(u8,
|
||||
\\allocation of 10 success!
|
||||
\\free of 10 bytes success!
|
||||
\\alloc : 10 success!
|
||||
\\shrink: 10 to 5
|
||||
\\expand: 5 to 20 failure!
|
||||
\\free : 5
|
||||
\\
|
||||
, fbs.getWritten());
|
||||
}
|
||||
|
||||
@@ -27,7 +27,6 @@ fn never_index_default(name: []const u8) bool {
|
||||
}
|
||||
|
||||
const HeaderEntry = struct {
|
||||
allocator: *Allocator,
|
||||
name: []const u8,
|
||||
value: []u8,
|
||||
never_index: bool,
|
||||
@@ -36,23 +35,22 @@ const HeaderEntry = struct {
|
||||
|
||||
fn init(allocator: *Allocator, name: []const u8, value: []const u8, never_index: ?bool) !Self {
|
||||
return Self{
|
||||
.allocator = allocator,
|
||||
.name = name, // takes reference
|
||||
.value = try mem.dupe(allocator, u8, value),
|
||||
.value = try allocator.dupe(u8, value),
|
||||
.never_index = never_index orelse never_index_default(name),
|
||||
};
|
||||
}
|
||||
|
||||
fn deinit(self: Self) void {
|
||||
self.allocator.free(self.value);
|
||||
fn deinit(self: Self, allocator: *Allocator) void {
|
||||
allocator.free(self.value);
|
||||
}
|
||||
|
||||
pub fn modify(self: *Self, value: []const u8, never_index: ?bool) !void {
|
||||
pub fn modify(self: *Self, allocator: *Allocator, value: []const u8, never_index: ?bool) !void {
|
||||
const old_len = self.value.len;
|
||||
if (value.len > old_len) {
|
||||
self.value = try self.allocator.realloc(self.value, value.len);
|
||||
self.value = try allocator.realloc(self.value, value.len);
|
||||
} else if (value.len < old_len) {
|
||||
self.value = self.allocator.shrink(self.value, value.len);
|
||||
self.value = allocator.shrink(self.value, value.len);
|
||||
}
|
||||
mem.copy(u8, self.value, value);
|
||||
self.never_index = never_index orelse never_index_default(self.name);
|
||||
@@ -85,22 +83,22 @@ const HeaderEntry = struct {
|
||||
|
||||
test "HeaderEntry" {
|
||||
var e = try HeaderEntry.init(testing.allocator, "foo", "bar", null);
|
||||
defer e.deinit();
|
||||
defer e.deinit(testing.allocator);
|
||||
testing.expectEqualSlices(u8, "foo", e.name);
|
||||
testing.expectEqualSlices(u8, "bar", e.value);
|
||||
testing.expectEqual(false, e.never_index);
|
||||
|
||||
try e.modify("longer value", null);
|
||||
try e.modify(testing.allocator, "longer value", null);
|
||||
testing.expectEqualSlices(u8, "longer value", e.value);
|
||||
|
||||
// shorter value
|
||||
try e.modify("x", null);
|
||||
try e.modify(testing.allocator, "x", null);
|
||||
testing.expectEqualSlices(u8, "x", e.value);
|
||||
}
|
||||
|
||||
const HeaderList = std.ArrayList(HeaderEntry);
|
||||
const HeaderIndexList = std.ArrayList(usize);
|
||||
const HeaderIndex = std.StringHashMap(HeaderIndexList);
|
||||
const HeaderList = std.ArrayListUnmanaged(HeaderEntry);
|
||||
const HeaderIndexList = std.ArrayListUnmanaged(usize);
|
||||
const HeaderIndex = std.StringHashMapUnmanaged(HeaderIndexList);
|
||||
|
||||
pub const Headers = struct {
|
||||
// the owned header field name is stored in the index as part of the key
|
||||
@@ -113,62 +111,62 @@ pub const Headers = struct {
|
||||
pub fn init(allocator: *Allocator) Self {
|
||||
return Self{
|
||||
.allocator = allocator,
|
||||
.data = HeaderList.init(allocator),
|
||||
.index = HeaderIndex.init(allocator),
|
||||
.data = HeaderList{},
|
||||
.index = HeaderIndex{},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: Self) void {
|
||||
pub fn deinit(self: *Self) void {
|
||||
{
|
||||
var it = self.index.iterator();
|
||||
while (it.next()) |kv| {
|
||||
var dex = &kv.value;
|
||||
dex.deinit();
|
||||
self.allocator.free(kv.key);
|
||||
for (self.index.items()) |*entry| {
|
||||
const dex = &entry.value;
|
||||
dex.deinit(self.allocator);
|
||||
self.allocator.free(entry.key);
|
||||
}
|
||||
self.index.deinit();
|
||||
self.index.deinit(self.allocator);
|
||||
}
|
||||
{
|
||||
for (self.data.span()) |entry| {
|
||||
entry.deinit();
|
||||
for (self.data.items) |entry| {
|
||||
entry.deinit(self.allocator);
|
||||
}
|
||||
self.data.deinit();
|
||||
self.data.deinit(self.allocator);
|
||||
}
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn clone(self: Self, allocator: *Allocator) !Self {
|
||||
var other = Headers.init(allocator);
|
||||
errdefer other.deinit();
|
||||
try other.data.ensureCapacity(self.data.items.len);
|
||||
try other.index.initCapacity(self.index.entries.len);
|
||||
for (self.data.span()) |entry| {
|
||||
try other.data.ensureCapacity(allocator, self.data.items.len);
|
||||
try other.index.initCapacity(allocator, self.index.entries.len);
|
||||
for (self.data.items) |entry| {
|
||||
try other.append(entry.name, entry.value, entry.never_index);
|
||||
}
|
||||
return other;
|
||||
}
|
||||
|
||||
pub fn toSlice(self: Self) []const HeaderEntry {
|
||||
return self.data.span();
|
||||
return self.data.items;
|
||||
}
|
||||
|
||||
pub fn append(self: *Self, name: []const u8, value: []const u8, never_index: ?bool) !void {
|
||||
const n = self.data.items.len + 1;
|
||||
try self.data.ensureCapacity(n);
|
||||
try self.data.ensureCapacity(self.allocator, n);
|
||||
var entry: HeaderEntry = undefined;
|
||||
if (self.index.get(name)) |kv| {
|
||||
if (self.index.getEntry(name)) |kv| {
|
||||
entry = try HeaderEntry.init(self.allocator, kv.key, value, never_index);
|
||||
errdefer entry.deinit();
|
||||
var dex = &kv.value;
|
||||
try dex.append(n - 1);
|
||||
errdefer entry.deinit(self.allocator);
|
||||
const dex = &kv.value;
|
||||
try dex.append(self.allocator, n - 1);
|
||||
} else {
|
||||
const name_dup = try mem.dupe(self.allocator, u8, name);
|
||||
const name_dup = try self.allocator.dupe(u8, name);
|
||||
errdefer self.allocator.free(name_dup);
|
||||
entry = try HeaderEntry.init(self.allocator, name_dup, value, never_index);
|
||||
errdefer entry.deinit();
|
||||
var dex = HeaderIndexList.init(self.allocator);
|
||||
try dex.append(n - 1);
|
||||
errdefer dex.deinit();
|
||||
_ = try self.index.put(name_dup, dex);
|
||||
errdefer entry.deinit(self.allocator);
|
||||
var dex = HeaderIndexList{};
|
||||
try dex.append(self.allocator, n - 1);
|
||||
errdefer dex.deinit(self.allocator);
|
||||
_ = try self.index.put(self.allocator, name_dup, dex);
|
||||
}
|
||||
self.data.appendAssumeCapacity(entry);
|
||||
}
|
||||
@@ -194,8 +192,8 @@ pub const Headers = struct {
|
||||
|
||||
/// Returns boolean indicating if something was deleted.
|
||||
pub fn delete(self: *Self, name: []const u8) bool {
|
||||
if (self.index.remove(name)) |kv| {
|
||||
var dex = &kv.value;
|
||||
if (self.index.remove(name)) |*kv| {
|
||||
const dex = &kv.value;
|
||||
// iterate backwards
|
||||
var i = dex.items.len;
|
||||
while (i > 0) {
|
||||
@@ -203,11 +201,11 @@ pub const Headers = struct {
|
||||
const data_index = dex.items[i];
|
||||
const removed = self.data.orderedRemove(data_index);
|
||||
assert(mem.eql(u8, removed.name, name));
|
||||
removed.deinit();
|
||||
removed.deinit(self.allocator);
|
||||
}
|
||||
dex.deinit();
|
||||
dex.deinit(self.allocator);
|
||||
self.allocator.free(kv.key);
|
||||
self.rebuild_index();
|
||||
self.rebuildIndex();
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
@@ -216,45 +214,52 @@ pub const Headers = struct {
|
||||
|
||||
/// Removes the element at the specified index.
|
||||
/// Moves items down to fill the empty space.
|
||||
/// TODO this implementation can be replaced by adding
|
||||
/// orderedRemove to the new hash table implementation as an
|
||||
/// alternative to swapRemove.
|
||||
pub fn orderedRemove(self: *Self, i: usize) void {
|
||||
const removed = self.data.orderedRemove(i);
|
||||
const kv = self.index.get(removed.name).?;
|
||||
var dex = &kv.value;
|
||||
const kv = self.index.getEntry(removed.name).?;
|
||||
const dex = &kv.value;
|
||||
if (dex.items.len == 1) {
|
||||
// was last item; delete the index
|
||||
_ = self.index.remove(kv.key);
|
||||
dex.deinit();
|
||||
removed.deinit();
|
||||
self.allocator.free(kv.key);
|
||||
dex.deinit(self.allocator);
|
||||
removed.deinit(self.allocator);
|
||||
const key = kv.key;
|
||||
_ = self.index.remove(key); // invalidates `kv` and `dex`
|
||||
self.allocator.free(key);
|
||||
} else {
|
||||
dex.shrink(dex.items.len - 1);
|
||||
removed.deinit();
|
||||
dex.shrink(self.allocator, dex.items.len - 1);
|
||||
removed.deinit(self.allocator);
|
||||
}
|
||||
// if it was the last item; no need to rebuild index
|
||||
if (i != self.data.items.len) {
|
||||
self.rebuild_index();
|
||||
self.rebuildIndex();
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes the element at the specified index.
|
||||
/// The empty slot is filled from the end of the list.
|
||||
/// TODO this implementation can be replaced by simply using the
|
||||
/// new hash table which does swap removal.
|
||||
pub fn swapRemove(self: *Self, i: usize) void {
|
||||
const removed = self.data.swapRemove(i);
|
||||
const kv = self.index.get(removed.name).?;
|
||||
var dex = &kv.value;
|
||||
const kv = self.index.getEntry(removed.name).?;
|
||||
const dex = &kv.value;
|
||||
if (dex.items.len == 1) {
|
||||
// was last item; delete the index
|
||||
_ = self.index.remove(kv.key);
|
||||
dex.deinit();
|
||||
removed.deinit();
|
||||
self.allocator.free(kv.key);
|
||||
dex.deinit(self.allocator);
|
||||
removed.deinit(self.allocator);
|
||||
const key = kv.key;
|
||||
_ = self.index.remove(key); // invalidates `kv` and `dex`
|
||||
self.allocator.free(key);
|
||||
} else {
|
||||
dex.shrink(dex.items.len - 1);
|
||||
removed.deinit();
|
||||
dex.shrink(self.allocator, dex.items.len - 1);
|
||||
removed.deinit(self.allocator);
|
||||
}
|
||||
// if it was the last item; no need to rebuild index
|
||||
if (i != self.data.items.len) {
|
||||
self.rebuild_index();
|
||||
self.rebuildIndex();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -266,11 +271,7 @@ pub const Headers = struct {
|
||||
/// Returns a list of indices containing headers with the given name.
|
||||
/// The returned list should not be modified by the caller.
|
||||
pub fn getIndices(self: Self, name: []const u8) ?HeaderIndexList {
|
||||
if (self.index.get(name)) |kv| {
|
||||
return kv.value;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
return self.index.get(name);
|
||||
}
|
||||
|
||||
/// Returns a slice containing each header with the given name.
|
||||
@@ -279,7 +280,7 @@ pub const Headers = struct {
|
||||
|
||||
const buf = try allocator.alloc(HeaderEntry, dex.items.len);
|
||||
var n: usize = 0;
|
||||
for (dex.span()) |idx| {
|
||||
for (dex.items) |idx| {
|
||||
buf[n] = self.data.items[idx];
|
||||
n += 1;
|
||||
}
|
||||
@@ -302,7 +303,7 @@ pub const Headers = struct {
|
||||
// adapted from mem.join
|
||||
const total_len = blk: {
|
||||
var sum: usize = dex.items.len - 1; // space for separator(s)
|
||||
for (dex.span()) |idx|
|
||||
for (dex.items) |idx|
|
||||
sum += self.data.items[idx].value.len;
|
||||
break :blk sum;
|
||||
};
|
||||
@@ -325,32 +326,27 @@ pub const Headers = struct {
|
||||
return buf;
|
||||
}
|
||||
|
||||
fn rebuild_index(self: *Self) void {
|
||||
{ // clear out the indexes
|
||||
var it = self.index.iterator();
|
||||
while (it.next()) |kv| {
|
||||
var dex = &kv.value;
|
||||
dex.items.len = 0; // keeps capacity available
|
||||
}
|
||||
fn rebuildIndex(self: *Self) void {
|
||||
// clear out the indexes
|
||||
for (self.index.items()) |*entry| {
|
||||
entry.value.shrinkRetainingCapacity(0);
|
||||
}
|
||||
{ // fill up indexes again; we know capacity is fine from before
|
||||
for (self.data.span()) |entry, i| {
|
||||
var dex = &self.index.get(entry.name).?.value;
|
||||
dex.appendAssumeCapacity(i);
|
||||
}
|
||||
// fill up indexes again; we know capacity is fine from before
|
||||
for (self.data.items) |entry, i| {
|
||||
self.index.getEntry(entry.name).?.value.appendAssumeCapacity(i);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sort(self: *Self) void {
|
||||
std.sort.sort(HeaderEntry, self.data.items, {}, HeaderEntry.compare);
|
||||
self.rebuild_index();
|
||||
self.rebuildIndex();
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
self: Self,
|
||||
comptime fmt: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
) !void {
|
||||
for (self.toSlice()) |entry| {
|
||||
try out_stream.writeAll(entry.name);
|
||||
@@ -495,8 +491,8 @@ test "Headers.getIndices" {
|
||||
try h.append("set-cookie", "y=2", null);
|
||||
|
||||
testing.expect(null == h.getIndices("not-present"));
|
||||
testing.expectEqualSlices(usize, &[_]usize{0}, h.getIndices("foo").?.span());
|
||||
testing.expectEqualSlices(usize, &[_]usize{ 1, 2 }, h.getIndices("set-cookie").?.span());
|
||||
testing.expectEqualSlices(usize, &[_]usize{0}, h.getIndices("foo").?.items);
|
||||
testing.expectEqualSlices(usize, &[_]usize{ 1, 2 }, h.getIndices("set-cookie").?.items);
|
||||
}
|
||||
|
||||
test "Headers.get" {
|
||||
|
||||
@@ -170,7 +170,7 @@ pub fn BitReader(endian: builtin.Endian, comptime ReaderType: type) type {
|
||||
|
||||
pub fn bitReader(
|
||||
comptime endian: builtin.Endian,
|
||||
underlying_stream: var,
|
||||
underlying_stream: anytype,
|
||||
) BitReader(endian, @TypeOf(underlying_stream)) {
|
||||
return BitReader(endian, @TypeOf(underlying_stream)).init(underlying_stream);
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ pub fn BitWriter(endian: builtin.Endian, comptime WriterType: type) type {
|
||||
/// Write the specified number of bits to the stream from the least significant bits of
|
||||
/// the specified unsigned int value. Bits will only be written to the stream when there
|
||||
/// are enough to fill a byte.
|
||||
pub fn writeBits(self: *Self, value: var, bits: usize) Error!void {
|
||||
pub fn writeBits(self: *Self, value: anytype, bits: usize) Error!void {
|
||||
if (bits == 0) return;
|
||||
|
||||
const U = @TypeOf(value);
|
||||
@@ -145,7 +145,7 @@ pub fn BitWriter(endian: builtin.Endian, comptime WriterType: type) type {
|
||||
|
||||
pub fn bitWriter(
|
||||
comptime endian: builtin.Endian,
|
||||
underlying_stream: var,
|
||||
underlying_stream: anytype,
|
||||
) BitWriter(endian, @TypeOf(underlying_stream)) {
|
||||
return BitWriter(endian, @TypeOf(underlying_stream)).init(underlying_stream);
|
||||
}
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
pub const BufferedOutStream = @import("./buffered_writer.zig").BufferedWriter;
|
||||
|
||||
/// Deprecated: use `std.io.buffered_writer.bufferedWriter`
|
||||
pub const bufferedOutStream = @import("./buffered_writer.zig").bufferedWriter
|
||||
pub const bufferedOutStream = @import("./buffered_writer.zig").bufferedWriter;
|
||||
|
||||
@@ -48,7 +48,7 @@ pub fn BufferedReader(comptime buffer_size: usize, comptime ReaderType: type) ty
|
||||
};
|
||||
}
|
||||
|
||||
pub fn bufferedReader(underlying_stream: var) BufferedReader(4096, @TypeOf(underlying_stream)) {
|
||||
pub fn bufferedReader(underlying_stream: anytype) BufferedReader(4096, @TypeOf(underlying_stream)) {
|
||||
return .{ .unbuffered_reader = underlying_stream };
|
||||
}
|
||||
|
||||
|
||||
@@ -43,6 +43,6 @@ pub fn BufferedWriter(comptime buffer_size: usize, comptime WriterType: type) ty
|
||||
};
|
||||
}
|
||||
|
||||
pub fn bufferedWriter(underlying_stream: var) BufferedWriter(4096, @TypeOf(underlying_stream)) {
|
||||
pub fn bufferedWriter(underlying_stream: anytype) BufferedWriter(4096, @TypeOf(underlying_stream)) {
|
||||
return .{ .unbuffered_writer = underlying_stream };
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ pub fn CountingWriter(comptime WriterType: type) type {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn countingWriter(child_stream: var) CountingWriter(@TypeOf(child_stream)) {
|
||||
pub fn countingWriter(child_stream: anytype) CountingWriter(@TypeOf(child_stream)) {
|
||||
return .{ .bytes_written = 0, .child_stream = child_stream };
|
||||
}
|
||||
|
||||
|
||||
@@ -127,7 +127,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fixedBufferStream(buffer: var) FixedBufferStream(NonSentinelSpan(@TypeOf(buffer))) {
|
||||
pub fn fixedBufferStream(buffer: anytype) FixedBufferStream(NonSentinelSpan(@TypeOf(buffer))) {
|
||||
return .{ .buffer = mem.span(buffer), .pos = 0 };
|
||||
}
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ pub fn MultiWriter(comptime Writers: type) type {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn multiWriter(streams: var) MultiWriter(@TypeOf(streams)) {
|
||||
pub fn multiWriter(streams: anytype) MultiWriter(@TypeOf(streams)) {
|
||||
return .{ .streams = streams };
|
||||
}
|
||||
|
||||
|
||||
@@ -80,7 +80,7 @@ pub fn PeekStream(
|
||||
|
||||
pub fn peekStream(
|
||||
comptime lookahead: comptime_int,
|
||||
underlying_stream: var,
|
||||
underlying_stream: anytype,
|
||||
) PeekStream(.{ .Static = lookahead }, @TypeOf(underlying_stream)) {
|
||||
return PeekStream(.{ .Static = lookahead }, @TypeOf(underlying_stream)).init(underlying_stream);
|
||||
}
|
||||
|
||||
@@ -40,8 +40,7 @@ pub fn Reader(
|
||||
return index;
|
||||
}
|
||||
|
||||
/// Returns the number of bytes read. If the number read would be smaller than buf.len,
|
||||
/// error.EndOfStream is returned instead.
|
||||
/// If the number read would be smaller than `buf.len`, `error.EndOfStream` is returned instead.
|
||||
pub fn readNoEof(self: Self, buf: []u8) !void {
|
||||
const amt_read = try self.readAll(buf);
|
||||
if (amt_read < buf.len) return error.EndOfStream;
|
||||
|
||||
@@ -16,14 +16,16 @@ pub const Packing = enum {
|
||||
};
|
||||
|
||||
/// Creates a deserializer that deserializes types from any stream.
|
||||
/// If `is_packed` is true, the data stream is treated as bit-packed,
|
||||
/// otherwise data is expected to be packed to the smallest byte.
|
||||
/// Types may implement a custom deserialization routine with a
|
||||
/// function named `deserialize` in the form of:
|
||||
/// pub fn deserialize(self: *Self, deserializer: var) !void
|
||||
/// which will be called when the deserializer is used to deserialize
|
||||
/// that type. It will pass a pointer to the type instance to deserialize
|
||||
/// into and a pointer to the deserializer struct.
|
||||
/// If `is_packed` is true, the data stream is treated as bit-packed,
|
||||
/// otherwise data is expected to be packed to the smallest byte.
|
||||
/// Types may implement a custom deserialization routine with a
|
||||
/// function named `deserialize` in the form of:
|
||||
/// ```
|
||||
/// pub fn deserialize(self: *Self, deserializer: anytype) !void
|
||||
/// ```
|
||||
/// which will be called when the deserializer is used to deserialize
|
||||
/// that type. It will pass a pointer to the type instance to deserialize
|
||||
/// into and a pointer to the deserializer struct.
|
||||
pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing, comptime ReaderType: type) type {
|
||||
return struct {
|
||||
in_stream: if (packing == .Bit) io.BitReader(endian, ReaderType) else ReaderType,
|
||||
@@ -93,7 +95,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
|
||||
}
|
||||
|
||||
/// Deserializes data into the type pointed to by `ptr`
|
||||
pub fn deserializeInto(self: *Self, ptr: var) !void {
|
||||
pub fn deserializeInto(self: *Self, ptr: anytype) !void {
|
||||
const T = @TypeOf(ptr);
|
||||
comptime assert(trait.is(.Pointer)(T));
|
||||
|
||||
@@ -108,7 +110,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
|
||||
const C = comptime meta.Child(T);
|
||||
const child_type_id = @typeInfo(C);
|
||||
|
||||
//custom deserializer: fn(self: *Self, deserializer: var) !void
|
||||
//custom deserializer: fn(self: *Self, deserializer: anytype) !void
|
||||
if (comptime trait.hasFn("deserialize")(C)) return C.deserialize(ptr, self);
|
||||
|
||||
if (comptime trait.isPacked(C) and packing != .Bit) {
|
||||
@@ -190,24 +192,26 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
|
||||
pub fn deserializer(
|
||||
comptime endian: builtin.Endian,
|
||||
comptime packing: Packing,
|
||||
in_stream: var,
|
||||
in_stream: anytype,
|
||||
) Deserializer(endian, packing, @TypeOf(in_stream)) {
|
||||
return Deserializer(endian, packing, @TypeOf(in_stream)).init(in_stream);
|
||||
}
|
||||
|
||||
/// Creates a serializer that serializes types to any stream.
|
||||
/// If `is_packed` is true, the data will be bit-packed into the stream.
|
||||
/// Note that the you must call `serializer.flush()` when you are done
|
||||
/// writing bit-packed data in order ensure any unwritten bits are committed.
|
||||
/// If `is_packed` is false, data is packed to the smallest byte. In the case
|
||||
/// of packed structs, the struct will written bit-packed and with the specified
|
||||
/// endianess, after which data will resume being written at the next byte boundary.
|
||||
/// Types may implement a custom serialization routine with a
|
||||
/// function named `serialize` in the form of:
|
||||
/// pub fn serialize(self: Self, serializer: var) !void
|
||||
/// which will be called when the serializer is used to serialize that type. It will
|
||||
/// pass a const pointer to the type instance to be serialized and a pointer
|
||||
/// to the serializer struct.
|
||||
/// If `is_packed` is true, the data will be bit-packed into the stream.
|
||||
/// Note that the you must call `serializer.flush()` when you are done
|
||||
/// writing bit-packed data in order ensure any unwritten bits are committed.
|
||||
/// If `is_packed` is false, data is packed to the smallest byte. In the case
|
||||
/// of packed structs, the struct will written bit-packed and with the specified
|
||||
/// endianess, after which data will resume being written at the next byte boundary.
|
||||
/// Types may implement a custom serialization routine with a
|
||||
/// function named `serialize` in the form of:
|
||||
/// ```
|
||||
/// pub fn serialize(self: Self, serializer: anytype) !void
|
||||
/// ```
|
||||
/// which will be called when the serializer is used to serialize that type. It will
|
||||
/// pass a const pointer to the type instance to be serialized and a pointer
|
||||
/// to the serializer struct.
|
||||
pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, comptime OutStreamType: type) type {
|
||||
return struct {
|
||||
out_stream: if (packing == .Bit) io.BitOutStream(endian, OutStreamType) else OutStreamType,
|
||||
@@ -229,7 +233,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
|
||||
if (packing == .Bit) return self.out_stream.flushBits();
|
||||
}
|
||||
|
||||
fn serializeInt(self: *Self, value: var) Error!void {
|
||||
fn serializeInt(self: *Self, value: anytype) Error!void {
|
||||
const T = @TypeOf(value);
|
||||
comptime assert(trait.is(.Int)(T) or trait.is(.Float)(T));
|
||||
|
||||
@@ -261,7 +265,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
|
||||
}
|
||||
|
||||
/// Serializes the passed value into the stream
|
||||
pub fn serialize(self: *Self, value: var) Error!void {
|
||||
pub fn serialize(self: *Self, value: anytype) Error!void {
|
||||
const T = comptime @TypeOf(value);
|
||||
|
||||
if (comptime trait.isIndexable(T)) {
|
||||
@@ -270,7 +274,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
|
||||
return;
|
||||
}
|
||||
|
||||
//custom serializer: fn(self: Self, serializer: var) !void
|
||||
//custom serializer: fn(self: Self, serializer: anytype) !void
|
||||
if (comptime trait.hasFn("serialize")(T)) return T.serialize(value, self);
|
||||
|
||||
if (comptime trait.isPacked(T) and packing != .Bit) {
|
||||
@@ -346,7 +350,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
|
||||
pub fn serializer(
|
||||
comptime endian: builtin.Endian,
|
||||
comptime packing: Packing,
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
) Serializer(endian, packing, @TypeOf(out_stream)) {
|
||||
return Serializer(endian, packing, @TypeOf(out_stream)).init(out_stream);
|
||||
}
|
||||
@@ -462,7 +466,7 @@ test "Serializer/Deserializer Int: Inf/NaN" {
|
||||
try testIntSerializerDeserializerInfNaN(.Little, .Bit);
|
||||
}
|
||||
|
||||
fn testAlternateSerializer(self: var, _serializer: var) !void {
|
||||
fn testAlternateSerializer(self: anytype, _serializer: anytype) !void {
|
||||
try _serializer.serialize(self.f_f16);
|
||||
}
|
||||
|
||||
@@ -503,7 +507,7 @@ fn testSerializerDeserializer(comptime endian: builtin.Endian, comptime packing:
|
||||
f_f16: f16,
|
||||
f_unused_u32: u32,
|
||||
|
||||
pub fn deserialize(self: *@This(), _deserializer: var) !void {
|
||||
pub fn deserialize(self: *@This(), _deserializer: anytype) !void {
|
||||
try _deserializer.deserializeInto(&self.f_f16);
|
||||
self.f_unused_u32 = 47;
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ pub fn Writer(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print(self: Self, comptime format: []const u8, args: var) Error!void {
|
||||
pub fn print(self: Self, comptime format: []const u8, args: anytype) Error!void {
|
||||
return std.fmt.format(self, format, args);
|
||||
}
|
||||
|
||||
|
||||
@@ -239,7 +239,7 @@ pub const StreamingParser = struct {
|
||||
NullLiteral3,
|
||||
|
||||
// Only call this function to generate array/object final state.
|
||||
pub fn fromInt(x: var) State {
|
||||
pub fn fromInt(x: anytype) State {
|
||||
debug.assert(x == 0 or x == 1);
|
||||
const T = @TagType(State);
|
||||
return @intToEnum(State, @intCast(T, x));
|
||||
@@ -1236,7 +1236,7 @@ pub const Value = union(enum) {
|
||||
pub fn jsonStringify(
|
||||
value: @This(),
|
||||
options: StringifyOptions,
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
) @TypeOf(out_stream).Error!void {
|
||||
switch (value) {
|
||||
.Null => try stringify(null, options, out_stream),
|
||||
@@ -1288,7 +1288,7 @@ pub const Value = union(enum) {
|
||||
var held = std.debug.getStderrMutex().acquire();
|
||||
defer held.release();
|
||||
|
||||
const stderr = std.debug.getStderrStream();
|
||||
const stderr = io.getStdErr().writer();
|
||||
std.json.stringify(self, std.json.StringifyOptions{ .whitespace = null }, stderr) catch return;
|
||||
}
|
||||
};
|
||||
@@ -1535,7 +1535,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||
const allocator = options.allocator orelse return error.AllocatorRequired;
|
||||
switch (ptrInfo.size) {
|
||||
.One => {
|
||||
const r: T = allocator.create(ptrInfo.child);
|
||||
const r: T = try allocator.create(ptrInfo.child);
|
||||
r.* = try parseInternal(ptrInfo.child, token, tokens, options);
|
||||
return r;
|
||||
},
|
||||
@@ -1567,7 +1567,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||
if (ptrInfo.child != u8) return error.UnexpectedToken;
|
||||
const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
|
||||
switch (stringToken.escapes) {
|
||||
.None => return mem.dupe(allocator, u8, source_slice),
|
||||
.None => return allocator.dupe(u8, source_slice),
|
||||
.Some => |some_escapes| {
|
||||
const output = try allocator.alloc(u8, stringToken.decodedLength());
|
||||
errdefer allocator.free(output);
|
||||
@@ -1629,7 +1629,7 @@ pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
|
||||
switch (ptrInfo.size) {
|
||||
.One => {
|
||||
parseFree(ptrInfo.child, value.*, options);
|
||||
allocator.destroy(v);
|
||||
allocator.destroy(value);
|
||||
},
|
||||
.Slice => {
|
||||
for (value) |v| {
|
||||
@@ -2043,7 +2043,7 @@ pub const Parser = struct {
|
||||
fn parseString(p: *Parser, allocator: *Allocator, s: std.meta.TagPayloadType(Token, Token.String), input: []const u8, i: usize) !Value {
|
||||
const slice = s.slice(input, i);
|
||||
switch (s.escapes) {
|
||||
.None => return Value{ .String = if (p.copy_strings) try mem.dupe(allocator, u8, slice) else slice },
|
||||
.None => return Value{ .String = if (p.copy_strings) try allocator.dupe(u8, slice) else slice },
|
||||
.Some => |some_escapes| {
|
||||
const output = try allocator.alloc(u8, s.decodedLength());
|
||||
errdefer allocator.free(output);
|
||||
@@ -2149,27 +2149,27 @@ test "json.parser.dynamic" {
|
||||
|
||||
var root = tree.root;
|
||||
|
||||
var image = root.Object.get("Image").?.value;
|
||||
var image = root.Object.get("Image").?;
|
||||
|
||||
const width = image.Object.get("Width").?.value;
|
||||
const width = image.Object.get("Width").?;
|
||||
testing.expect(width.Integer == 800);
|
||||
|
||||
const height = image.Object.get("Height").?.value;
|
||||
const height = image.Object.get("Height").?;
|
||||
testing.expect(height.Integer == 600);
|
||||
|
||||
const title = image.Object.get("Title").?.value;
|
||||
const title = image.Object.get("Title").?;
|
||||
testing.expect(mem.eql(u8, title.String, "View from 15th Floor"));
|
||||
|
||||
const animated = image.Object.get("Animated").?.value;
|
||||
const animated = image.Object.get("Animated").?;
|
||||
testing.expect(animated.Bool == false);
|
||||
|
||||
const array_of_object = image.Object.get("ArrayOfObject").?.value;
|
||||
const array_of_object = image.Object.get("ArrayOfObject").?;
|
||||
testing.expect(array_of_object.Array.items.len == 1);
|
||||
|
||||
const obj0 = array_of_object.Array.items[0].Object.get("n").?.value;
|
||||
const obj0 = array_of_object.Array.items[0].Object.get("n").?;
|
||||
testing.expect(mem.eql(u8, obj0.String, "m"));
|
||||
|
||||
const double = image.Object.get("double").?.value;
|
||||
const double = image.Object.get("double").?;
|
||||
testing.expect(double.Float == 1.3412);
|
||||
}
|
||||
|
||||
@@ -2217,12 +2217,12 @@ test "write json then parse it" {
|
||||
var tree = try parser.parse(fixed_buffer_stream.getWritten());
|
||||
defer tree.deinit();
|
||||
|
||||
testing.expect(tree.root.Object.get("f").?.value.Bool == false);
|
||||
testing.expect(tree.root.Object.get("t").?.value.Bool == true);
|
||||
testing.expect(tree.root.Object.get("int").?.value.Integer == 1234);
|
||||
testing.expect(tree.root.Object.get("array").?.value.Array.items[0].Null == {});
|
||||
testing.expect(tree.root.Object.get("array").?.value.Array.items[1].Float == 12.34);
|
||||
testing.expect(mem.eql(u8, tree.root.Object.get("str").?.value.String, "hello"));
|
||||
testing.expect(tree.root.Object.get("f").?.Bool == false);
|
||||
testing.expect(tree.root.Object.get("t").?.Bool == true);
|
||||
testing.expect(tree.root.Object.get("int").?.Integer == 1234);
|
||||
testing.expect(tree.root.Object.get("array").?.Array.items[0].Null == {});
|
||||
testing.expect(tree.root.Object.get("array").?.Array.items[1].Float == 12.34);
|
||||
testing.expect(mem.eql(u8, tree.root.Object.get("str").?.String, "hello"));
|
||||
}
|
||||
|
||||
fn test_parse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value {
|
||||
@@ -2245,7 +2245,7 @@ test "integer after float has proper type" {
|
||||
\\ "ints": [1, 2, 3]
|
||||
\\}
|
||||
);
|
||||
std.testing.expect(json.Object.getValue("ints").?.Array.items[0] == .Integer);
|
||||
std.testing.expect(json.Object.get("ints").?.Array.items[0] == .Integer);
|
||||
}
|
||||
|
||||
test "escaped characters" {
|
||||
@@ -2271,16 +2271,16 @@ test "escaped characters" {
|
||||
|
||||
const obj = (try test_parse(&arena_allocator.allocator, input)).Object;
|
||||
|
||||
testing.expectEqualSlices(u8, obj.get("backslash").?.value.String, "\\");
|
||||
testing.expectEqualSlices(u8, obj.get("forwardslash").?.value.String, "/");
|
||||
testing.expectEqualSlices(u8, obj.get("newline").?.value.String, "\n");
|
||||
testing.expectEqualSlices(u8, obj.get("carriagereturn").?.value.String, "\r");
|
||||
testing.expectEqualSlices(u8, obj.get("tab").?.value.String, "\t");
|
||||
testing.expectEqualSlices(u8, obj.get("formfeed").?.value.String, "\x0C");
|
||||
testing.expectEqualSlices(u8, obj.get("backspace").?.value.String, "\x08");
|
||||
testing.expectEqualSlices(u8, obj.get("doublequote").?.value.String, "\"");
|
||||
testing.expectEqualSlices(u8, obj.get("unicode").?.value.String, "ą");
|
||||
testing.expectEqualSlices(u8, obj.get("surrogatepair").?.value.String, "😂");
|
||||
testing.expectEqualSlices(u8, obj.get("backslash").?.String, "\\");
|
||||
testing.expectEqualSlices(u8, obj.get("forwardslash").?.String, "/");
|
||||
testing.expectEqualSlices(u8, obj.get("newline").?.String, "\n");
|
||||
testing.expectEqualSlices(u8, obj.get("carriagereturn").?.String, "\r");
|
||||
testing.expectEqualSlices(u8, obj.get("tab").?.String, "\t");
|
||||
testing.expectEqualSlices(u8, obj.get("formfeed").?.String, "\x0C");
|
||||
testing.expectEqualSlices(u8, obj.get("backspace").?.String, "\x08");
|
||||
testing.expectEqualSlices(u8, obj.get("doublequote").?.String, "\"");
|
||||
testing.expectEqualSlices(u8, obj.get("unicode").?.String, "ą");
|
||||
testing.expectEqualSlices(u8, obj.get("surrogatepair").?.String, "😂");
|
||||
}
|
||||
|
||||
test "string copy option" {
|
||||
@@ -2306,11 +2306,11 @@ test "string copy option" {
|
||||
const obj_copy = tree_copy.root.Object;
|
||||
|
||||
for ([_][]const u8{ "noescape", "simple", "unicode", "surrogatepair" }) |field_name| {
|
||||
testing.expectEqualSlices(u8, obj_nocopy.getValue(field_name).?.String, obj_copy.getValue(field_name).?.String);
|
||||
testing.expectEqualSlices(u8, obj_nocopy.get(field_name).?.String, obj_copy.get(field_name).?.String);
|
||||
}
|
||||
|
||||
const nocopy_addr = &obj_nocopy.getValue("noescape").?.String[0];
|
||||
const copy_addr = &obj_copy.getValue("noescape").?.String[0];
|
||||
const nocopy_addr = &obj_nocopy.get("noescape").?.String[0];
|
||||
const copy_addr = &obj_copy.get("noescape").?.String[0];
|
||||
|
||||
var found_nocopy = false;
|
||||
for (input) |_, index| {
|
||||
@@ -2338,7 +2338,7 @@ pub const StringifyOptions = struct {
|
||||
|
||||
pub fn outputIndent(
|
||||
whitespace: @This(),
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
) @TypeOf(out_stream).Error!void {
|
||||
var char: u8 = undefined;
|
||||
var n_chars: usize = undefined;
|
||||
@@ -2380,7 +2380,7 @@ pub const StringifyOptions = struct {
|
||||
|
||||
fn outputUnicodeEscape(
|
||||
codepoint: u21,
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
) !void {
|
||||
if (codepoint <= 0xFFFF) {
|
||||
// If the character is in the Basic Multilingual Plane (U+0000 through U+FFFF),
|
||||
@@ -2402,9 +2402,9 @@ fn outputUnicodeEscape(
|
||||
}
|
||||
|
||||
pub fn stringify(
|
||||
value: var,
|
||||
value: anytype,
|
||||
options: StringifyOptions,
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
) @TypeOf(out_stream).Error!void {
|
||||
const T = @TypeOf(value);
|
||||
switch (@typeInfo(T)) {
|
||||
@@ -2576,15 +2576,15 @@ pub fn stringify(
|
||||
},
|
||||
.Array => return stringify(&value, options, out_stream),
|
||||
.Vector => |info| {
|
||||
const array: [info.len]info.child = value;
|
||||
return stringify(&array, options, out_stream);
|
||||
const array: [info.len]info.child = value;
|
||||
return stringify(&array, options, out_stream);
|
||||
},
|
||||
else => @compileError("Unable to stringify type '" ++ @typeName(T) ++ "'"),
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
fn teststringify(expected: []const u8, value: var, options: StringifyOptions) !void {
|
||||
fn teststringify(expected: []const u8, value: anytype, options: StringifyOptions) !void {
|
||||
const ValidationOutStream = struct {
|
||||
const Self = @This();
|
||||
pub const OutStream = std.io.OutStream(*Self, Error, write);
|
||||
@@ -2758,7 +2758,7 @@ test "stringify struct with custom stringifier" {
|
||||
pub fn jsonStringify(
|
||||
value: Self,
|
||||
options: StringifyOptions,
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
) !void {
|
||||
try out_stream.writeAll("[\"something special\",");
|
||||
try stringify(42, options, out_stream);
|
||||
@@ -2770,4 +2770,3 @@ test "stringify struct with custom stringifier" {
|
||||
test "stringify vector" {
|
||||
try teststringify("[1,1]", @splat(2, @as(u32, 1)), StringifyOptions{});
|
||||
}
|
||||
|
||||
|
||||
@@ -152,7 +152,7 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
|
||||
self: *Self,
|
||||
/// An integer, float, or `std.math.BigInt`. Emitted as a bare number if it fits losslessly
|
||||
/// in a IEEE 754 double float, otherwise emitted as a string to the full precision.
|
||||
value: var,
|
||||
value: anytype,
|
||||
) !void {
|
||||
assert(self.state[self.state_index] == State.Value);
|
||||
switch (@typeInfo(@TypeOf(value))) {
|
||||
@@ -215,7 +215,7 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
|
||||
self.state_index -= 1;
|
||||
}
|
||||
|
||||
fn stringify(self: *Self, value: var) !void {
|
||||
fn stringify(self: *Self, value: anytype) !void {
|
||||
try std.json.stringify(value, std.json.StringifyOptions{
|
||||
.whitespace = self.whitespace,
|
||||
}, self.stream);
|
||||
@@ -224,7 +224,7 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
|
||||
}
|
||||
|
||||
pub fn writeStream(
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
comptime max_depth: usize,
|
||||
) WriteStream(@TypeOf(out_stream), max_depth) {
|
||||
return WriteStream(@TypeOf(out_stream), max_depth).init(out_stream);
|
||||
|
||||
202
lib/std/log.zig
Normal file
202
lib/std/log.zig
Normal file
@@ -0,0 +1,202 @@
|
||||
const std = @import("std.zig");
|
||||
const builtin = std.builtin;
|
||||
const root = @import("root");
|
||||
|
||||
//! std.log is standardized interface for logging which allows for the logging
|
||||
//! of programs and libraries using this interface to be formatted and filtered
|
||||
//! by the implementer of the root.log function.
|
||||
//!
|
||||
//! The scope parameter should be used to give context to the logging. For
|
||||
//! example, a library called 'libfoo' might use .libfoo as its scope.
|
||||
//!
|
||||
//! An example root.log might look something like this:
|
||||
//!
|
||||
//! ```
|
||||
//! const std = @import("std");
|
||||
//!
|
||||
//! // Set the log level to warning
|
||||
//! pub const log_level: std.log.Level = .warn;
|
||||
//!
|
||||
//! // Define root.log to override the std implementation
|
||||
//! pub fn log(
|
||||
//! comptime level: std.log.Level,
|
||||
//! comptime scope: @TypeOf(.EnumLiteral),
|
||||
//! comptime format: []const u8,
|
||||
//! args: anytype,
|
||||
//! ) void {
|
||||
//! // Ignore all non-critical logging from sources other than
|
||||
//! // .my_project and .nice_library
|
||||
//! const scope_prefix = "(" ++ switch (scope) {
|
||||
//! .my_project, .nice_library => @tagName(scope),
|
||||
//! else => if (@enumToInt(level) <= @enumToInt(std.log.Level.crit))
|
||||
//! @tagName(scope)
|
||||
//! else
|
||||
//! return,
|
||||
//! } ++ "): ";
|
||||
//!
|
||||
//! const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
|
||||
//!
|
||||
//! // Print the message to stderr, silently ignoring any errors
|
||||
//! const held = std.debug.getStderrMutex().acquire();
|
||||
//! defer held.release();
|
||||
//! const stderr = std.debug.getStderrStream();
|
||||
//! nosuspend stderr.print(prefix ++ format, args) catch return;
|
||||
//! }
|
||||
//!
|
||||
//! pub fn main() void {
|
||||
//! // Won't be printed as log_level is .warn
|
||||
//! std.log.info(.my_project, "Starting up.\n", .{});
|
||||
//! std.log.err(.nice_library, "Something went very wrong, sorry.\n", .{});
|
||||
//! // Won't be printed as it gets filtered out by our log function
|
||||
//! std.log.err(.lib_that_logs_too_much, "Added 1 + 1\n", .{});
|
||||
//! }
|
||||
//! ```
|
||||
//! Which produces the following output:
|
||||
//! ```
|
||||
//! [err] (nice_library): Something went very wrong, sorry.
|
||||
//! ```
|
||||
|
||||
pub const Level = enum {
|
||||
/// Emergency: a condition that cannot be handled, usually followed by a
|
||||
/// panic.
|
||||
emerg,
|
||||
/// Alert: a condition that should be corrected immediately (e.g. database
|
||||
/// corruption).
|
||||
alert,
|
||||
/// Critical: A bug has been detected or something has gone wrong and it
|
||||
/// will have an effect on the operation of the program.
|
||||
crit,
|
||||
/// Error: A bug has been detected or something has gone wrong but it is
|
||||
/// recoverable.
|
||||
err,
|
||||
/// Warning: it is uncertain if something has gone wrong or not, but the
|
||||
/// circumstances would be worth investigating.
|
||||
warn,
|
||||
/// Notice: non-error but significant conditions.
|
||||
notice,
|
||||
/// Informational: general messages about the state of the program.
|
||||
info,
|
||||
/// Debug: messages only useful for debugging.
|
||||
debug,
|
||||
};
|
||||
|
||||
/// The default log level is based on build mode. Note that in ReleaseSmall
|
||||
/// builds the default level is emerg but no messages will be stored/logged
|
||||
/// by the default logger to save space.
|
||||
pub const default_level: Level = switch (builtin.mode) {
|
||||
.Debug => .debug,
|
||||
.ReleaseSafe => .notice,
|
||||
.ReleaseFast => .err,
|
||||
.ReleaseSmall => .emerg,
|
||||
};
|
||||
|
||||
/// The current log level. This is set to root.log_level if present, otherwise
|
||||
/// log.default_level.
|
||||
pub const level: Level = if (@hasDecl(root, "log_level"))
|
||||
root.log_level
|
||||
else
|
||||
default_level;
|
||||
|
||||
fn log(
|
||||
comptime message_level: Level,
|
||||
comptime scope: @Type(.EnumLiteral),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
if (@enumToInt(message_level) <= @enumToInt(level)) {
|
||||
if (@hasDecl(root, "log")) {
|
||||
root.log(message_level, scope, format, args);
|
||||
} else if (builtin.mode != .ReleaseSmall) {
|
||||
const held = std.debug.getStderrMutex().acquire();
|
||||
defer held.release();
|
||||
const stderr = std.io.getStdErr().writer();
|
||||
nosuspend stderr.print(format, args) catch return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Log an emergency message to stderr. This log level is intended to be used
|
||||
/// for conditions that cannot be handled and is usually followed by a panic.
|
||||
pub fn emerg(
|
||||
comptime scope: @Type(.EnumLiteral),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
@setCold(true);
|
||||
log(.emerg, scope, format, args);
|
||||
}
|
||||
|
||||
/// Log an alert message to stderr. This log level is intended to be used for
|
||||
/// conditions that should be corrected immediately (e.g. database corruption).
|
||||
pub fn alert(
|
||||
comptime scope: @Type(.EnumLiteral),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
@setCold(true);
|
||||
log(.alert, scope, format, args);
|
||||
}
|
||||
|
||||
/// Log a critical message to stderr. This log level is intended to be used
|
||||
/// when a bug has been detected or something has gone wrong and it will have
|
||||
/// an effect on the operation of the program.
|
||||
pub fn crit(
|
||||
comptime scope: @Type(.EnumLiteral),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
@setCold(true);
|
||||
log(.crit, scope, format, args);
|
||||
}
|
||||
|
||||
/// Log an error message to stderr. This log level is intended to be used when
|
||||
/// a bug has been detected or something has gone wrong but it is recoverable.
|
||||
pub fn err(
|
||||
comptime scope: @Type(.EnumLiteral),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
@setCold(true);
|
||||
log(.err, scope, format, args);
|
||||
}
|
||||
|
||||
/// Log a warning message to stderr. This log level is intended to be used if
|
||||
/// it is uncertain whether something has gone wrong or not, but the
|
||||
/// circumstances would be worth investigating.
|
||||
pub fn warn(
|
||||
comptime scope: @Type(.EnumLiteral),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
log(.warn, scope, format, args);
|
||||
}
|
||||
|
||||
/// Log a notice message to stderr. This log level is intended to be used for
|
||||
/// non-error but significant conditions.
|
||||
pub fn notice(
|
||||
comptime scope: @Type(.EnumLiteral),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
log(.notice, scope, format, args);
|
||||
}
|
||||
|
||||
/// Log an info message to stderr. This log level is intended to be used for
|
||||
/// general messages about the state of the program.
|
||||
pub fn info(
|
||||
comptime scope: @Type(.EnumLiteral),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
log(.info, scope, format, args);
|
||||
}
|
||||
|
||||
/// Log a debug message to stderr. This log level is intended to be used for
|
||||
/// messages which are only useful for debugging.
|
||||
pub fn debug(
|
||||
comptime scope: @Type(.EnumLiteral),
|
||||
comptime format: []const u8,
|
||||
args: anytype,
|
||||
) void {
|
||||
log(.debug, scope, format, args);
|
||||
}
|
||||
@@ -104,7 +104,7 @@ pub fn approxEq(comptime T: type, x: T, y: T, epsilon: T) bool {
|
||||
}
|
||||
|
||||
// TODO: Hide the following in an internal module.
|
||||
pub fn forceEval(value: var) void {
|
||||
pub fn forceEval(value: anytype) void {
|
||||
const T = @TypeOf(value);
|
||||
switch (T) {
|
||||
f16 => {
|
||||
@@ -122,6 +122,11 @@ pub fn forceEval(value: var) void {
|
||||
const p = @ptrCast(*volatile f64, &x);
|
||||
p.* = x;
|
||||
},
|
||||
f128 => {
|
||||
var x: f128 = undefined;
|
||||
const p = @ptrCast(*volatile f128, &x);
|
||||
p.* = x;
|
||||
},
|
||||
else => {
|
||||
@compileError("forceEval not implemented for " ++ @typeName(T));
|
||||
},
|
||||
@@ -254,7 +259,7 @@ pub fn Min(comptime A: type, comptime B: type) type {
|
||||
|
||||
/// Returns the smaller number. When one of the parameter's type's full range fits in the other,
|
||||
/// the return type is the smaller type.
|
||||
pub fn min(x: var, y: var) Min(@TypeOf(x), @TypeOf(y)) {
|
||||
pub fn min(x: anytype, y: anytype) Min(@TypeOf(x), @TypeOf(y)) {
|
||||
const Result = Min(@TypeOf(x), @TypeOf(y));
|
||||
if (x < y) {
|
||||
// TODO Zig should allow this as an implicit cast because x is immutable and in this
|
||||
@@ -305,7 +310,7 @@ test "math.min" {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn max(x: var, y: var) @TypeOf(x, y) {
|
||||
pub fn max(x: anytype, y: anytype) @TypeOf(x, y) {
|
||||
return if (x > y) x else y;
|
||||
}
|
||||
|
||||
@@ -313,7 +318,7 @@ test "math.max" {
|
||||
testing.expect(max(@as(i32, -1), @as(i32, 2)) == 2);
|
||||
}
|
||||
|
||||
pub fn clamp(val: var, lower: var, upper: var) @TypeOf(val, lower, upper) {
|
||||
pub fn clamp(val: anytype, lower: anytype, upper: anytype) @TypeOf(val, lower, upper) {
|
||||
assert(lower <= upper);
|
||||
return max(lower, min(val, upper));
|
||||
}
|
||||
@@ -349,7 +354,7 @@ pub fn sub(comptime T: type, a: T, b: T) (error{Overflow}!T) {
|
||||
return if (@subWithOverflow(T, a, b, &answer)) error.Overflow else answer;
|
||||
}
|
||||
|
||||
pub fn negate(x: var) !@TypeOf(x) {
|
||||
pub fn negate(x: anytype) !@TypeOf(x) {
|
||||
return sub(@TypeOf(x), 0, x);
|
||||
}
|
||||
|
||||
@@ -360,7 +365,7 @@ pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T {
|
||||
|
||||
/// Shifts left. Overflowed bits are truncated.
|
||||
/// A negative shift amount results in a right shift.
|
||||
pub fn shl(comptime T: type, a: T, shift_amt: var) T {
|
||||
pub fn shl(comptime T: type, a: T, shift_amt: anytype) T {
|
||||
const abs_shift_amt = absCast(shift_amt);
|
||||
const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
|
||||
|
||||
@@ -386,7 +391,7 @@ test "math.shl" {
|
||||
|
||||
/// Shifts right. Overflowed bits are truncated.
|
||||
/// A negative shift amount results in a left shift.
|
||||
pub fn shr(comptime T: type, a: T, shift_amt: var) T {
|
||||
pub fn shr(comptime T: type, a: T, shift_amt: anytype) T {
|
||||
const abs_shift_amt = absCast(shift_amt);
|
||||
const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
|
||||
|
||||
@@ -414,7 +419,7 @@ test "math.shr" {
|
||||
|
||||
/// Rotates right. Only unsigned values can be rotated.
|
||||
/// Negative shift values results in shift modulo the bit count.
|
||||
pub fn rotr(comptime T: type, x: T, r: var) T {
|
||||
pub fn rotr(comptime T: type, x: T, r: anytype) T {
|
||||
if (T.is_signed) {
|
||||
@compileError("cannot rotate signed integer");
|
||||
} else {
|
||||
@@ -433,7 +438,7 @@ test "math.rotr" {
|
||||
|
||||
/// Rotates left. Only unsigned values can be rotated.
|
||||
/// Negative shift values results in shift modulo the bit count.
|
||||
pub fn rotl(comptime T: type, x: T, r: var) T {
|
||||
pub fn rotl(comptime T: type, x: T, r: anytype) T {
|
||||
if (T.is_signed) {
|
||||
@compileError("cannot rotate signed integer");
|
||||
} else {
|
||||
@@ -536,7 +541,7 @@ fn testOverflow() void {
|
||||
testing.expect((shlExact(i32, 0b11, 4) catch unreachable) == 0b110000);
|
||||
}
|
||||
|
||||
pub fn absInt(x: var) !@TypeOf(x) {
|
||||
pub fn absInt(x: anytype) !@TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
comptime assert(@typeInfo(T) == .Int); // must pass an integer to absInt
|
||||
comptime assert(T.is_signed); // must pass a signed integer to absInt
|
||||
@@ -684,7 +689,7 @@ fn testRem() void {
|
||||
|
||||
/// Returns the absolute value of the integer parameter.
|
||||
/// Result is an unsigned integer.
|
||||
pub fn absCast(x: var) switch (@typeInfo(@TypeOf(x))) {
|
||||
pub fn absCast(x: anytype) switch (@typeInfo(@TypeOf(x))) {
|
||||
.ComptimeInt => comptime_int,
|
||||
.Int => |intInfo| std.meta.Int(false, intInfo.bits),
|
||||
else => @compileError("absCast only accepts integers"),
|
||||
@@ -719,7 +724,7 @@ test "math.absCast" {
|
||||
|
||||
/// Returns the negation of the integer parameter.
|
||||
/// Result is a signed integer.
|
||||
pub fn negateCast(x: var) !std.meta.Int(true, @TypeOf(x).bit_count) {
|
||||
pub fn negateCast(x: anytype) !std.meta.Int(true, @TypeOf(x).bit_count) {
|
||||
if (@TypeOf(x).is_signed) return negate(x);
|
||||
|
||||
const int = std.meta.Int(true, @TypeOf(x).bit_count);
|
||||
@@ -742,7 +747,7 @@ test "math.negateCast" {
|
||||
|
||||
/// Cast an integer to a different integer type. If the value doesn't fit,
|
||||
/// return an error.
|
||||
pub fn cast(comptime T: type, x: var) (error{Overflow}!T) {
|
||||
pub fn cast(comptime T: type, x: anytype) (error{Overflow}!T) {
|
||||
comptime assert(@typeInfo(T) == .Int); // must pass an integer
|
||||
comptime assert(@typeInfo(@TypeOf(x)) == .Int); // must pass an integer
|
||||
if (maxInt(@TypeOf(x)) > maxInt(T) and x > maxInt(T)) {
|
||||
@@ -767,7 +772,7 @@ test "math.cast" {
|
||||
pub const AlignCastError = error{UnalignedMemory};
|
||||
|
||||
/// Align cast a pointer but return an error if it's the wrong alignment
|
||||
pub fn alignCast(comptime alignment: u29, ptr: var) AlignCastError!@TypeOf(@alignCast(alignment, ptr)) {
|
||||
pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!@TypeOf(@alignCast(alignment, ptr)) {
|
||||
const addr = @ptrToInt(ptr);
|
||||
if (addr % alignment != 0) {
|
||||
return error.UnalignedMemory;
|
||||
@@ -775,7 +780,7 @@ pub fn alignCast(comptime alignment: u29, ptr: var) AlignCastError!@TypeOf(@alig
|
||||
return @alignCast(alignment, ptr);
|
||||
}
|
||||
|
||||
pub fn isPowerOfTwo(v: var) bool {
|
||||
pub fn isPowerOfTwo(v: anytype) bool {
|
||||
assert(v != 0);
|
||||
return (v & (v - 1)) == 0;
|
||||
}
|
||||
@@ -892,7 +897,7 @@ test "std.math.log2_int_ceil" {
|
||||
testing.expect(log2_int_ceil(u32, 10) == 4);
|
||||
}
|
||||
|
||||
pub fn lossyCast(comptime T: type, value: var) T {
|
||||
pub fn lossyCast(comptime T: type, value: anytype) T {
|
||||
switch (@typeInfo(@TypeOf(value))) {
|
||||
.Int => return @intToFloat(T, value),
|
||||
.Float => return @floatCast(T, value),
|
||||
@@ -1026,7 +1031,7 @@ pub const Order = enum {
|
||||
};
|
||||
|
||||
/// Given two numbers, this function returns the order they are with respect to each other.
|
||||
pub fn order(a: var, b: var) Order {
|
||||
pub fn order(a: anytype, b: anytype) Order {
|
||||
if (a == b) {
|
||||
return .eq;
|
||||
} else if (a < b) {
|
||||
@@ -1042,19 +1047,14 @@ pub fn order(a: var, b: var) Order {
|
||||
pub const CompareOperator = enum {
|
||||
/// Less than (`<`)
|
||||
lt,
|
||||
|
||||
/// Less than or equal (`<=`)
|
||||
lte,
|
||||
|
||||
/// Equal (`==`)
|
||||
eq,
|
||||
|
||||
/// Greater than or equal (`>=`)
|
||||
gte,
|
||||
|
||||
/// Greater than (`>`)
|
||||
gt,
|
||||
|
||||
/// Not equal (`!=`)
|
||||
neq,
|
||||
};
|
||||
@@ -1062,7 +1062,7 @@ pub const CompareOperator = enum {
|
||||
/// This function does the same thing as comparison operators, however the
|
||||
/// operator is a runtime-known enum value. Works on any operands that
|
||||
/// support comparison operators.
|
||||
pub fn compare(a: var, op: CompareOperator, b: var) bool {
|
||||
pub fn compare(a: anytype, op: CompareOperator, b: anytype) bool {
|
||||
return switch (op) {
|
||||
.lt => a < b,
|
||||
.lte => a <= b,
|
||||
|
||||
@@ -12,7 +12,7 @@ const expect = std.testing.expect;
|
||||
///
|
||||
/// Special cases:
|
||||
/// - acos(x) = nan if x < -1 or x > 1
|
||||
pub fn acos(x: var) @TypeOf(x) {
|
||||
pub fn acos(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => acos32(x),
|
||||
|
||||
@@ -14,7 +14,7 @@ const expect = std.testing.expect;
|
||||
/// Special cases:
|
||||
/// - acosh(x) = snan if x < 1
|
||||
/// - acosh(nan) = nan
|
||||
pub fn acosh(x: var) @TypeOf(x) {
|
||||
pub fn acosh(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => acosh32(x),
|
||||
|
||||
@@ -13,7 +13,7 @@ const expect = std.testing.expect;
|
||||
/// Special Cases:
|
||||
/// - asin(+-0) = +-0
|
||||
/// - asin(x) = nan if x < -1 or x > 1
|
||||
pub fn asin(x: var) @TypeOf(x) {
|
||||
pub fn asin(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => asin32(x),
|
||||
|
||||
@@ -15,7 +15,7 @@ const maxInt = std.math.maxInt;
|
||||
/// - asinh(+-0) = +-0
|
||||
/// - asinh(+-inf) = +-inf
|
||||
/// - asinh(nan) = nan
|
||||
pub fn asinh(x: var) @TypeOf(x) {
|
||||
pub fn asinh(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => asinh32(x),
|
||||
|
||||
@@ -13,7 +13,7 @@ const expect = std.testing.expect;
|
||||
/// Special Cases:
|
||||
/// - atan(+-0) = +-0
|
||||
/// - atan(+-inf) = +-pi/2
|
||||
pub fn atan(x: var) @TypeOf(x) {
|
||||
pub fn atan(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => atan32(x),
|
||||
|
||||
@@ -15,7 +15,7 @@ const maxInt = std.math.maxInt;
|
||||
/// - atanh(+-1) = +-inf with signal
|
||||
/// - atanh(x) = nan if |x| > 1 with signal
|
||||
/// - atanh(nan) = nan
|
||||
pub fn atanh(x: var) @TypeOf(x) {
|
||||
pub fn atanh(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => atanh_32(x),
|
||||
|
||||
@@ -12,7 +12,7 @@ const assert = std.debug.assert;
|
||||
|
||||
/// Returns the number of limbs needed to store `scalar`, which must be a
|
||||
/// primitive integer value.
|
||||
pub fn calcLimbLen(scalar: var) usize {
|
||||
pub fn calcLimbLen(scalar: anytype) usize {
|
||||
const T = @TypeOf(scalar);
|
||||
switch (@typeInfo(T)) {
|
||||
.Int => |info| {
|
||||
@@ -110,7 +110,7 @@ pub const Mutable = struct {
|
||||
/// `value` is a primitive integer type.
|
||||
/// Asserts the value fits within the provided `limbs_buffer`.
|
||||
/// Note: `calcLimbLen` can be used to figure out how big an array to allocate for `limbs_buffer`.
|
||||
pub fn init(limbs_buffer: []Limb, value: var) Mutable {
|
||||
pub fn init(limbs_buffer: []Limb, value: anytype) Mutable {
|
||||
limbs_buffer[0] = 0;
|
||||
var self: Mutable = .{
|
||||
.limbs = limbs_buffer,
|
||||
@@ -169,7 +169,7 @@ pub const Mutable = struct {
|
||||
/// Asserts the value fits within the limbs buffer.
|
||||
/// Note: `calcLimbLen` can be used to figure out how big the limbs buffer
|
||||
/// needs to be to store a specific value.
|
||||
pub fn set(self: *Mutable, value: var) void {
|
||||
pub fn set(self: *Mutable, value: anytype) void {
|
||||
const T = @TypeOf(value);
|
||||
|
||||
switch (@typeInfo(T)) {
|
||||
@@ -281,7 +281,7 @@ pub const Mutable = struct {
|
||||
///
|
||||
/// Asserts the result fits in `r`. An upper bound on the number of limbs needed by
|
||||
/// r is `math.max(a.limbs.len, calcLimbLen(scalar)) + 1`.
|
||||
pub fn addScalar(r: *Mutable, a: Const, scalar: var) void {
|
||||
pub fn addScalar(r: *Mutable, a: Const, scalar: anytype) void {
|
||||
var limbs: [calcLimbLen(scalar)]Limb = undefined;
|
||||
const operand = init(&limbs, scalar).toConst();
|
||||
return add(r, a, operand);
|
||||
@@ -1058,7 +1058,7 @@ pub const Const = struct {
|
||||
self: Const,
|
||||
comptime fmt: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
) !void {
|
||||
comptime var radix = 10;
|
||||
comptime var uppercase = false;
|
||||
@@ -1105,7 +1105,7 @@ pub const Const = struct {
|
||||
assert(base <= 16);
|
||||
|
||||
if (self.eqZero()) {
|
||||
return mem.dupe(allocator, u8, "0");
|
||||
return allocator.dupe(u8, "0");
|
||||
}
|
||||
const string = try allocator.alloc(u8, self.sizeInBaseUpperBound(base));
|
||||
errdefer allocator.free(string);
|
||||
@@ -1261,7 +1261,7 @@ pub const Const = struct {
|
||||
}
|
||||
|
||||
/// Same as `order` but the right-hand operand is a primitive integer.
|
||||
pub fn orderAgainstScalar(lhs: Const, scalar: var) math.Order {
|
||||
pub fn orderAgainstScalar(lhs: Const, scalar: anytype) math.Order {
|
||||
var limbs: [calcLimbLen(scalar)]Limb = undefined;
|
||||
const rhs = Mutable.init(&limbs, scalar);
|
||||
return order(lhs, rhs.toConst());
|
||||
@@ -1333,7 +1333,7 @@ pub const Managed = struct {
|
||||
/// Creates a new `Managed` with value `value`.
|
||||
///
|
||||
/// This is identical to an `init`, followed by a `set`.
|
||||
pub fn initSet(allocator: *Allocator, value: var) !Managed {
|
||||
pub fn initSet(allocator: *Allocator, value: anytype) !Managed {
|
||||
var s = try Managed.init(allocator);
|
||||
try s.set(value);
|
||||
return s;
|
||||
@@ -1496,7 +1496,7 @@ pub const Managed = struct {
|
||||
}
|
||||
|
||||
/// Sets an Managed to value. Value must be an primitive integer type.
|
||||
pub fn set(self: *Managed, value: var) Allocator.Error!void {
|
||||
pub fn set(self: *Managed, value: anytype) Allocator.Error!void {
|
||||
try self.ensureCapacity(calcLimbLen(value));
|
||||
var m = self.toMutable();
|
||||
m.set(value);
|
||||
@@ -1549,7 +1549,7 @@ pub const Managed = struct {
|
||||
self: Managed,
|
||||
comptime fmt: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
out_stream: var,
|
||||
out_stream: anytype,
|
||||
) !void {
|
||||
return self.toConst().format(fmt, options, out_stream);
|
||||
}
|
||||
@@ -1607,7 +1607,7 @@ pub const Managed = struct {
|
||||
/// scalar is a primitive integer type.
|
||||
///
|
||||
/// Returns an error if memory could not be allocated.
|
||||
pub fn addScalar(r: *Managed, a: Const, scalar: var) Allocator.Error!void {
|
||||
pub fn addScalar(r: *Managed, a: Const, scalar: anytype) Allocator.Error!void {
|
||||
try r.ensureCapacity(math.max(a.limbs.len, calcLimbLen(scalar)) + 1);
|
||||
var m = r.toMutable();
|
||||
m.addScalar(a, scalar);
|
||||
|
||||
@@ -43,7 +43,7 @@ pub const Rational = struct {
|
||||
}
|
||||
|
||||
/// Set a Rational from a primitive integer type.
|
||||
pub fn setInt(self: *Rational, a: var) !void {
|
||||
pub fn setInt(self: *Rational, a: anytype) !void {
|
||||
try self.p.set(a);
|
||||
try self.q.set(1);
|
||||
}
|
||||
@@ -280,7 +280,7 @@ pub const Rational = struct {
|
||||
}
|
||||
|
||||
/// Set a rational from an integer ratio.
|
||||
pub fn setRatio(self: *Rational, p: var, q: var) !void {
|
||||
pub fn setRatio(self: *Rational, p: anytype, q: anytype) !void {
|
||||
try self.p.set(p);
|
||||
try self.q.set(q);
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ const expect = std.testing.expect;
|
||||
/// - cbrt(+-0) = +-0
|
||||
/// - cbrt(+-inf) = +-inf
|
||||
/// - cbrt(nan) = nan
|
||||
pub fn cbrt(x: var) @TypeOf(x) {
|
||||
pub fn cbrt(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => cbrt32(x),
|
||||
|
||||
@@ -15,11 +15,12 @@ const expect = std.testing.expect;
|
||||
/// - ceil(+-0) = +-0
|
||||
/// - ceil(+-inf) = +-inf
|
||||
/// - ceil(nan) = nan
|
||||
pub fn ceil(x: var) @TypeOf(x) {
|
||||
pub fn ceil(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => ceil32(x),
|
||||
f64 => ceil64(x),
|
||||
f128 => ceil128(x),
|
||||
else => @compileError("ceil not implemented for " ++ @typeName(T)),
|
||||
};
|
||||
}
|
||||
@@ -86,9 +87,37 @@ fn ceil64(x: f64) f64 {
|
||||
}
|
||||
}
|
||||
|
||||
fn ceil128(x: f128) f128 {
|
||||
const u = @bitCast(u128, x);
|
||||
const e = (u >> 112) & 0x7FFF;
|
||||
var y: f128 = undefined;
|
||||
|
||||
if (e >= 0x3FFF + 112 or x == 0) return x;
|
||||
|
||||
if (u >> 127 != 0) {
|
||||
y = x - math.f128_toint + math.f128_toint - x;
|
||||
} else {
|
||||
y = x + math.f128_toint - math.f128_toint - x;
|
||||
}
|
||||
|
||||
if (e <= 0x3FFF - 1) {
|
||||
math.forceEval(y);
|
||||
if (u >> 127 != 0) {
|
||||
return -0.0;
|
||||
} else {
|
||||
return 1.0;
|
||||
}
|
||||
} else if (y < 0) {
|
||||
return x + y + 1;
|
||||
} else {
|
||||
return x + y;
|
||||
}
|
||||
}
|
||||
|
||||
test "math.ceil" {
|
||||
expect(ceil(@as(f32, 0.0)) == ceil32(0.0));
|
||||
expect(ceil(@as(f64, 0.0)) == ceil64(0.0));
|
||||
expect(ceil(@as(f128, 0.0)) == ceil128(0.0));
|
||||
}
|
||||
|
||||
test "math.ceil32" {
|
||||
@@ -103,6 +132,12 @@ test "math.ceil64" {
|
||||
expect(ceil64(0.2) == 1.0);
|
||||
}
|
||||
|
||||
test "math.ceil128" {
|
||||
expect(ceil128(1.3) == 2.0);
|
||||
expect(ceil128(-1.3) == -1.0);
|
||||
expect(ceil128(0.2) == 1.0);
|
||||
}
|
||||
|
||||
test "math.ceil32.special" {
|
||||
expect(ceil32(0.0) == 0.0);
|
||||
expect(ceil32(-0.0) == -0.0);
|
||||
@@ -118,3 +153,11 @@ test "math.ceil64.special" {
|
||||
expect(math.isNegativeInf(ceil64(-math.inf(f64))));
|
||||
expect(math.isNan(ceil64(math.nan(f64))));
|
||||
}
|
||||
|
||||
test "math.ceil128.special" {
|
||||
expect(ceil128(0.0) == 0.0);
|
||||
expect(ceil128(-0.0) == -0.0);
|
||||
expect(math.isPositiveInf(ceil128(math.inf(f128))));
|
||||
expect(math.isNegativeInf(ceil128(-math.inf(f128))));
|
||||
expect(math.isNan(ceil128(math.nan(f128))));
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the absolute value (modulus) of z.
|
||||
pub fn abs(z: var) @TypeOf(z.re) {
|
||||
pub fn abs(z: anytype) @TypeOf(z.re) {
|
||||
const T = @TypeOf(z.re);
|
||||
return math.hypot(T, z.re, z.im);
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the arc-cosine of z.
|
||||
pub fn acos(z: var) Complex(@TypeOf(z.re)) {
|
||||
pub fn acos(z: anytype) Complex(@TypeOf(z.re)) {
|
||||
const T = @TypeOf(z.re);
|
||||
const q = cmath.asin(z);
|
||||
return Complex(T).new(@as(T, math.pi) / 2 - q.re, -q.im);
|
||||
|
||||
@@ -5,7 +5,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the hyperbolic arc-cosine of z.
|
||||
pub fn acosh(z: var) Complex(@TypeOf(z.re)) {
|
||||
pub fn acosh(z: anytype) Complex(@TypeOf(z.re)) {
|
||||
const T = @TypeOf(z.re);
|
||||
const q = cmath.acos(z);
|
||||
return Complex(T).new(-q.im, q.re);
|
||||
|
||||
@@ -5,7 +5,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the angular component (in radians) of z.
|
||||
pub fn arg(z: var) @TypeOf(z.re) {
|
||||
pub fn arg(z: anytype) @TypeOf(z.re) {
|
||||
const T = @TypeOf(z.re);
|
||||
return math.atan2(T, z.im, z.re);
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
// Returns the arc-sine of z.
|
||||
pub fn asin(z: var) Complex(@TypeOf(z.re)) {
|
||||
pub fn asin(z: anytype) Complex(@TypeOf(z.re)) {
|
||||
const T = @TypeOf(z.re);
|
||||
const x = z.re;
|
||||
const y = z.im;
|
||||
|
||||
@@ -5,7 +5,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the hyperbolic arc-sine of z.
|
||||
pub fn asinh(z: var) Complex(@TypeOf(z.re)) {
|
||||
pub fn asinh(z: anytype) Complex(@TypeOf(z.re)) {
|
||||
const T = @TypeOf(z.re);
|
||||
const q = Complex(T).new(-z.im, z.re);
|
||||
const r = cmath.asin(q);
|
||||
|
||||
@@ -12,7 +12,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the arc-tangent of z.
|
||||
pub fn atan(z: var) @TypeOf(z) {
|
||||
pub fn atan(z: anytype) @TypeOf(z) {
|
||||
const T = @TypeOf(z.re);
|
||||
return switch (T) {
|
||||
f32 => atan32(z),
|
||||
|
||||
@@ -5,7 +5,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the hyperbolic arc-tangent of z.
|
||||
pub fn atanh(z: var) Complex(@TypeOf(z.re)) {
|
||||
pub fn atanh(z: anytype) Complex(@TypeOf(z.re)) {
|
||||
const T = @TypeOf(z.re);
|
||||
const q = Complex(T).new(-z.im, z.re);
|
||||
const r = cmath.atan(q);
|
||||
|
||||
@@ -5,7 +5,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the complex conjugate of z.
|
||||
pub fn conj(z: var) Complex(@TypeOf(z.re)) {
|
||||
pub fn conj(z: anytype) Complex(@TypeOf(z.re)) {
|
||||
const T = @TypeOf(z.re);
|
||||
return Complex(T).new(z.re, -z.im);
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the cosine of z.
|
||||
pub fn cos(z: var) Complex(@TypeOf(z.re)) {
|
||||
pub fn cos(z: anytype) Complex(@TypeOf(z.re)) {
|
||||
const T = @TypeOf(z.re);
|
||||
const p = Complex(T).new(-z.im, z.re);
|
||||
return cmath.cosh(p);
|
||||
|
||||
@@ -14,7 +14,7 @@ const Complex = cmath.Complex;
|
||||
const ldexp_cexp = @import("ldexp.zig").ldexp_cexp;
|
||||
|
||||
/// Returns the hyperbolic arc-cosine of z.
|
||||
pub fn cosh(z: var) Complex(@TypeOf(z.re)) {
|
||||
pub fn cosh(z: anytype) Complex(@TypeOf(z.re)) {
|
||||
const T = @TypeOf(z.re);
|
||||
return switch (T) {
|
||||
f32 => cosh32(z),
|
||||
|
||||
@@ -14,7 +14,7 @@ const Complex = cmath.Complex;
|
||||
const ldexp_cexp = @import("ldexp.zig").ldexp_cexp;
|
||||
|
||||
/// Returns e raised to the power of z (e^z).
|
||||
pub fn exp(z: var) @TypeOf(z) {
|
||||
pub fn exp(z: anytype) @TypeOf(z) {
|
||||
const T = @TypeOf(z.re);
|
||||
|
||||
return switch (T) {
|
||||
|
||||
@@ -11,7 +11,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns exp(z) scaled to avoid overflow.
|
||||
pub fn ldexp_cexp(z: var, expt: i32) @TypeOf(z) {
|
||||
pub fn ldexp_cexp(z: anytype, expt: i32) @TypeOf(z) {
|
||||
const T = @TypeOf(z.re);
|
||||
|
||||
return switch (T) {
|
||||
|
||||
@@ -5,7 +5,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the natural logarithm of z.
|
||||
pub fn log(z: var) Complex(@TypeOf(z.re)) {
|
||||
pub fn log(z: anytype) Complex(@TypeOf(z.re)) {
|
||||
const T = @TypeOf(z.re);
|
||||
const r = cmath.abs(z);
|
||||
const phi = cmath.arg(z);
|
||||
|
||||
@@ -5,7 +5,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the projection of z onto the riemann sphere.
|
||||
pub fn proj(z: var) Complex(@TypeOf(z.re)) {
|
||||
pub fn proj(z: anytype) Complex(@TypeOf(z.re)) {
|
||||
const T = @TypeOf(z.re);
|
||||
|
||||
if (math.isInf(z.re) or math.isInf(z.im)) {
|
||||
|
||||
@@ -5,7 +5,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the sine of z.
|
||||
pub fn sin(z: var) Complex(@TypeOf(z.re)) {
|
||||
pub fn sin(z: anytype) Complex(@TypeOf(z.re)) {
|
||||
const T = @TypeOf(z.re);
|
||||
const p = Complex(T).new(-z.im, z.re);
|
||||
const q = cmath.sinh(p);
|
||||
|
||||
@@ -14,7 +14,7 @@ const Complex = cmath.Complex;
|
||||
const ldexp_cexp = @import("ldexp.zig").ldexp_cexp;
|
||||
|
||||
/// Returns the hyperbolic sine of z.
|
||||
pub fn sinh(z: var) @TypeOf(z) {
|
||||
pub fn sinh(z: anytype) @TypeOf(z) {
|
||||
const T = @TypeOf(z.re);
|
||||
return switch (T) {
|
||||
f32 => sinh32(z),
|
||||
|
||||
@@ -12,7 +12,7 @@ const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the square root of z. The real and imaginary parts of the result have the same sign
|
||||
/// as the imaginary part of z.
|
||||
pub fn sqrt(z: var) @TypeOf(z) {
|
||||
pub fn sqrt(z: anytype) @TypeOf(z) {
|
||||
const T = @TypeOf(z.re);
|
||||
|
||||
return switch (T) {
|
||||
|
||||
@@ -5,7 +5,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the tanget of z.
|
||||
pub fn tan(z: var) Complex(@TypeOf(z.re)) {
|
||||
pub fn tan(z: anytype) Complex(@TypeOf(z.re)) {
|
||||
const T = @TypeOf(z.re);
|
||||
const q = Complex(T).new(-z.im, z.re);
|
||||
const r = cmath.tanh(q);
|
||||
|
||||
@@ -12,7 +12,7 @@ const cmath = math.complex;
|
||||
const Complex = cmath.Complex;
|
||||
|
||||
/// Returns the hyperbolic tangent of z.
|
||||
pub fn tanh(z: var) @TypeOf(z) {
|
||||
pub fn tanh(z: anytype) @TypeOf(z) {
|
||||
const T = @TypeOf(z.re);
|
||||
return switch (T) {
|
||||
f32 => tanh32(z),
|
||||
|
||||
@@ -13,7 +13,7 @@ const expect = std.testing.expect;
|
||||
/// Special Cases:
|
||||
/// - cos(+-inf) = nan
|
||||
/// - cos(nan) = nan
|
||||
pub fn cos(x: var) @TypeOf(x) {
|
||||
pub fn cos(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => cos_(f32, x),
|
||||
|
||||
@@ -17,7 +17,7 @@ const maxInt = std.math.maxInt;
|
||||
/// - cosh(+-0) = 1
|
||||
/// - cosh(+-inf) = +inf
|
||||
/// - cosh(nan) = nan
|
||||
pub fn cosh(x: var) @TypeOf(x) {
|
||||
pub fn cosh(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => cosh32(x),
|
||||
|
||||
@@ -14,7 +14,7 @@ const builtin = @import("builtin");
|
||||
/// Special Cases:
|
||||
/// - exp(+inf) = +inf
|
||||
/// - exp(nan) = nan
|
||||
pub fn exp(x: var) @TypeOf(x) {
|
||||
pub fn exp(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => exp32(x),
|
||||
|
||||
@@ -13,7 +13,7 @@ const expect = std.testing.expect;
|
||||
/// Special Cases:
|
||||
/// - exp2(+inf) = +inf
|
||||
/// - exp2(nan) = nan
|
||||
pub fn exp2(x: var) @TypeOf(x) {
|
||||
pub fn exp2(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => exp2_32(x),
|
||||
|
||||
@@ -18,7 +18,7 @@ const expect = std.testing.expect;
|
||||
/// - expm1(+inf) = +inf
|
||||
/// - expm1(-inf) = -1
|
||||
/// - expm1(nan) = nan
|
||||
pub fn expm1(x: var) @TypeOf(x) {
|
||||
pub fn expm1(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => expm1_32(x),
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
const math = @import("../math.zig");
|
||||
|
||||
/// Returns exp(x) / 2 for x >= log(maxFloat(T)).
|
||||
pub fn expo2(x: var) @TypeOf(x) {
|
||||
pub fn expo2(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f32 => expo2f(x),
|
||||
|
||||
@@ -14,7 +14,7 @@ const maxInt = std.math.maxInt;
|
||||
/// Special Cases:
|
||||
/// - fabs(+-inf) = +inf
|
||||
/// - fabs(nan) = nan
|
||||
pub fn fabs(x: var) @TypeOf(x) {
|
||||
pub fn fabs(x: anytype) @TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
return switch (T) {
|
||||
f16 => fabs16(x),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user