diff --git a/.builds/freebsd.yml b/.builds/freebsd.yml
index 37e6a65680..a149d060d1 100644
--- a/.builds/freebsd.yml
+++ b/.builds/freebsd.yml
@@ -1,7 +1,7 @@
image: freebsd/latest
secrets:
- 51bfddf5-86a6-4e01-8576-358c72a4a0a4
- - 5cfede76-914e-4071-893e-e5e2e6ae3cea
+ - 512ed797-0927-475a-83fd-bc997792860c
sources:
- https://github.com/ziglang/zig
tasks:
diff --git a/.builds/netbsd.yml b/.builds/netbsd.yml
index 69395b5e05..f1c1c19385 100644
--- a/.builds/netbsd.yml
+++ b/.builds/netbsd.yml
@@ -1,7 +1,7 @@
image: netbsd/latest
secrets:
- 51bfddf5-86a6-4e01-8576-358c72a4a0a4
- - 5cfede76-914e-4071-893e-e5e2e6ae3cea
+ - 512ed797-0927-475a-83fd-bc997792860c
sources:
- https://github.com/ziglang/zig
tasks:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 54ff9b6179..2839a3cffc 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -537,41 +537,43 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/zig/system.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/system/x86.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/tokenizer.zig"
+ "${CMAKE_SOURCE_DIR}/src/Air.zig"
+ "${CMAKE_SOURCE_DIR}/src/AstGen.zig"
"${CMAKE_SOURCE_DIR}/src/Cache.zig"
"${CMAKE_SOURCE_DIR}/src/Compilation.zig"
"${CMAKE_SOURCE_DIR}/src/DepTokenizer.zig"
+ "${CMAKE_SOURCE_DIR}/src/Liveness.zig"
"${CMAKE_SOURCE_DIR}/src/Module.zig"
"${CMAKE_SOURCE_DIR}/src/Package.zig"
"${CMAKE_SOURCE_DIR}/src/RangeSet.zig"
+ "${CMAKE_SOURCE_DIR}/src/Sema.zig"
"${CMAKE_SOURCE_DIR}/src/ThreadPool.zig"
"${CMAKE_SOURCE_DIR}/src/TypedValue.zig"
"${CMAKE_SOURCE_DIR}/src/WaitGroup.zig"
- "${CMAKE_SOURCE_DIR}/src/AstGen.zig"
+ "${CMAKE_SOURCE_DIR}/src/Zir.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/aarch64/bits.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/arm/bits.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/riscv64/bits.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/x86_64/bits.zig"
"${CMAKE_SOURCE_DIR}/src/clang.zig"
"${CMAKE_SOURCE_DIR}/src/clang_options.zig"
"${CMAKE_SOURCE_DIR}/src/clang_options_data.zig"
"${CMAKE_SOURCE_DIR}/src/codegen.zig"
- "${CMAKE_SOURCE_DIR}/src/codegen/aarch64.zig"
- "${CMAKE_SOURCE_DIR}/src/codegen/arm.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/c.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/llvm.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/llvm/bindings.zig"
- "${CMAKE_SOURCE_DIR}/src/codegen/riscv64.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/wasm.zig"
- "${CMAKE_SOURCE_DIR}/src/codegen/x86_64.zig"
"${CMAKE_SOURCE_DIR}/src/glibc.zig"
"${CMAKE_SOURCE_DIR}/src/introspect.zig"
- "${CMAKE_SOURCE_DIR}/src/Air.zig"
"${CMAKE_SOURCE_DIR}/src/libc_installation.zig"
"${CMAKE_SOURCE_DIR}/src/libcxx.zig"
"${CMAKE_SOURCE_DIR}/src/libtsan.zig"
"${CMAKE_SOURCE_DIR}/src/libunwind.zig"
"${CMAKE_SOURCE_DIR}/src/link.zig"
"${CMAKE_SOURCE_DIR}/src/link/C.zig"
+ "${CMAKE_SOURCE_DIR}/src/link/C/zig.h"
"${CMAKE_SOURCE_DIR}/src/link/Coff.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf.zig"
- "${CMAKE_SOURCE_DIR}/src/link/Plan9.zig"
- "${CMAKE_SOURCE_DIR}/src/link/Plan9/aout.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/Archive.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/Atom.zig"
@@ -582,20 +584,22 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/link/MachO/Trie.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/bind.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/commands.zig"
+ "${CMAKE_SOURCE_DIR}/src/link/Plan9.zig"
+ "${CMAKE_SOURCE_DIR}/src/link/Plan9/aout.zig"
"${CMAKE_SOURCE_DIR}/src/link/Wasm.zig"
+ "${CMAKE_SOURCE_DIR}/src/link/msdos-stub.bin"
"${CMAKE_SOURCE_DIR}/src/link/tapi.zig"
+ "${CMAKE_SOURCE_DIR}/src/link/tapi/Tokenizer.zig"
"${CMAKE_SOURCE_DIR}/src/link/tapi/parse.zig"
"${CMAKE_SOURCE_DIR}/src/link/tapi/parse/test.zig"
- "${CMAKE_SOURCE_DIR}/src/link/tapi/Tokenizer.zig"
"${CMAKE_SOURCE_DIR}/src/link/tapi/yaml.zig"
- "${CMAKE_SOURCE_DIR}/src/link/C/zig.h"
- "${CMAKE_SOURCE_DIR}/src/link/msdos-stub.bin"
- "${CMAKE_SOURCE_DIR}/src/Liveness.zig"
"${CMAKE_SOURCE_DIR}/src/main.zig"
"${CMAKE_SOURCE_DIR}/src/mingw.zig"
"${CMAKE_SOURCE_DIR}/src/musl.zig"
+ "${CMAKE_SOURCE_DIR}/src/print_air.zig"
"${CMAKE_SOURCE_DIR}/src/print_env.zig"
"${CMAKE_SOURCE_DIR}/src/print_targets.zig"
+ "${CMAKE_SOURCE_DIR}/src/print_zir.zig"
"${CMAKE_SOURCE_DIR}/src/stage1.zig"
"${CMAKE_SOURCE_DIR}/src/target.zig"
"${CMAKE_SOURCE_DIR}/src/tracy.zig"
@@ -605,8 +609,6 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/value.zig"
"${CMAKE_SOURCE_DIR}/src/wasi_libc.zig"
"${CMAKE_SOURCE_DIR}/src/windows_sdk.zig"
- "${CMAKE_SOURCE_DIR}/src/Zir.zig"
- "${CMAKE_SOURCE_DIR}/src/Sema.zig"
)
if(MSVC)
diff --git a/build.zig b/build.zig
index 8bbf06e83a..acfb43337e 100644
--- a/build.zig
+++ b/build.zig
@@ -18,6 +18,7 @@ pub fn build(b: *Builder) !void {
const mode = b.standardReleaseOptions();
const target = b.standardTargetOptions(.{});
const single_threaded = b.option(bool, "single-threaded", "Build artifacts that run in single threaded mode") orelse false;
+ const use_zig_libcxx = b.option(bool, "use-zig-libcxx", "If libc++ is needed, use zig's bundled version, don't try to integrate with the system") orelse false;
var docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
docgen_exe.single_threaded = single_threaded;
@@ -125,10 +126,18 @@ pub fn build(b: *Builder) !void {
exe.install();
exe.setBuildMode(mode);
exe.setTarget(target);
- toolchain_step.dependOn(&exe.step);
+ if (!skip_stage2_tests) {
+ toolchain_step.dependOn(&exe.step);
+ }
b.default_step.dependOn(&exe.step);
exe.single_threaded = single_threaded;
+ if (target.isWindows() and target.getAbi() == .gnu) {
+ // LTO is currently broken on mingw, this can be removed when it's fixed.
+ exe.want_lto = false;
+ test_stage2.want_lto = false;
+ }
+
const exe_options = b.addOptions();
exe.addOptions("build_options", exe_options);
@@ -182,8 +191,8 @@ pub fn build(b: *Builder) !void {
b.addSearchPrefix(cfg.cmake_prefix_path);
}
- try addCmakeCfgOptionsToExe(b, cfg, exe);
- try addCmakeCfgOptionsToExe(b, cfg, test_stage2);
+ try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx);
+ try addCmakeCfgOptionsToExe(b, cfg, test_stage2, use_zig_libcxx);
} else {
// Here we are -Denable-llvm but no cmake integration.
try addStaticLlvmOptionsToExe(exe);
@@ -260,12 +269,24 @@ pub fn build(b: *Builder) !void {
b.allocator,
&[_][]const u8{ tracy_path, "TracyClient.cpp" },
) catch unreachable;
+
+ // On mingw, we need to opt into windows 7+ to get some features required by tracy.
+ const tracy_c_flags: []const []const u8 = if (target.isWindows() and target.getAbi() == .gnu)
+ &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined", "-D_WIN32_WINNT=0x601" }
+ else
+ &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" };
+
exe.addIncludeDir(tracy_path);
- exe.addCSourceFile(client_cpp, &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" });
+ exe.addCSourceFile(client_cpp, tracy_c_flags);
if (!enable_llvm) {
exe.linkSystemLibraryName("c++");
}
exe.linkLibC();
+
+ if (target.isWindows()) {
+ exe.linkSystemLibrary("dbghelp");
+ exe.linkSystemLibrary("ws2_32");
+ }
}
const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");
@@ -434,6 +455,7 @@ fn addCmakeCfgOptionsToExe(
b: *Builder,
cfg: CMakeConfig,
exe: *std.build.LibExeObjStep,
+ use_zig_libcxx: bool,
) !void {
exe.addObjectFile(fs.path.join(b.allocator, &[_][]const u8{
cfg.cmake_binary_dir,
@@ -446,28 +468,32 @@ fn addCmakeCfgOptionsToExe(
addCMakeLibraryList(exe, cfg.lld_libraries);
addCMakeLibraryList(exe, cfg.llvm_libraries);
- const need_cpp_includes = true;
+ if (use_zig_libcxx) {
+ exe.linkLibCpp();
+ } else {
+ const need_cpp_includes = true;
- // System -lc++ must be used because in this code path we are attempting to link
- // against system-provided LLVM, Clang, LLD.
- if (exe.target.getOsTag() == .linux) {
- // First we try to static link against gcc libstdc++. If that doesn't work,
- // we fall back to -lc++ and cross our fingers.
- addCxxKnownPath(b, cfg, exe, "libstdc++.a", "", need_cpp_includes) catch |err| switch (err) {
- error.RequiredLibraryNotFound => {
- exe.linkSystemLibrary("c++");
- },
- else => |e| return e,
- };
- exe.linkSystemLibrary("unwind");
- } else if (exe.target.isFreeBSD()) {
- try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
- exe.linkSystemLibrary("pthread");
- } else if (exe.target.getOsTag() == .openbsd) {
- try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
- try addCxxKnownPath(b, cfg, exe, "libc++abi.a", null, need_cpp_includes);
- } else if (exe.target.isDarwin()) {
- exe.linkSystemLibrary("c++");
+ // System -lc++ must be used because in this code path we are attempting to link
+ // against system-provided LLVM, Clang, LLD.
+ if (exe.target.getOsTag() == .linux) {
+ // First we try to static link against gcc libstdc++. If that doesn't work,
+ // we fall back to -lc++ and cross our fingers.
+ addCxxKnownPath(b, cfg, exe, "libstdc++.a", "", need_cpp_includes) catch |err| switch (err) {
+ error.RequiredLibraryNotFound => {
+ exe.linkSystemLibrary("c++");
+ },
+ else => |e| return e,
+ };
+ exe.linkSystemLibrary("unwind");
+ } else if (exe.target.isFreeBSD()) {
+ try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
+ exe.linkSystemLibrary("pthread");
+ } else if (exe.target.getOsTag() == .openbsd) {
+ try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
+ try addCxxKnownPath(b, cfg, exe, "libc++abi.a", null, need_cpp_includes);
+ } else if (exe.target.isDarwin()) {
+ exe.linkSystemLibrary("c++");
+ }
}
if (cfg.dia_guids_lib.len != 0) {
@@ -504,6 +530,7 @@ fn addStaticLlvmOptionsToExe(
if (exe.target.getOs().tag == .windows) {
exe.linkSystemLibrary("version");
exe.linkSystemLibrary("uuid");
+ exe.linkSystemLibrary("ole32");
}
}
diff --git a/ci/azure/linux_script b/ci/azure/linux_script
index 96676b928f..3a1f8b8928 100755
--- a/ci/azure/linux_script
+++ b/ci/azure/linux_script
@@ -20,7 +20,7 @@ cd $HOME
wget -nv "https://ziglang.org/deps/$CACHE_BASENAME.tar.xz"
tar xf "$CACHE_BASENAME.tar.xz"
-QEMUBASE="qemu-linux-x86_64-5.2.0.1"
+QEMUBASE="qemu-linux-x86_64-6.1.0.1"
wget -nv "https://ziglang.org/deps/$QEMUBASE.tar.xz"
tar xf "$QEMUBASE.tar.xz"
export PATH="$(pwd)/$QEMUBASE/bin:$PATH"
@@ -71,9 +71,22 @@ make $JOBS install
release/bin/zig test ../test/behavior.zig -fno-stage1 -fLLVM -I ../test
-release/bin/zig build test-toolchain -Denable-qemu -Denable-wasmtime
-release/bin/zig build test-std -Denable-qemu -Denable-wasmtime
-release/bin/zig build docs -Denable-qemu -Denable-wasmtime
+release/bin/zig build test-behavior -Denable-qemu -Denable-wasmtime
+release/bin/zig build test-compiler-rt -Denable-qemu -Denable-wasmtime
+release/bin/zig build test-std -Denable-qemu -Denable-wasmtime
+release/bin/zig build test-minilibc -Denable-qemu -Denable-wasmtime
+release/bin/zig build test-compare-output -Denable-qemu -Denable-wasmtime
+release/bin/zig build test-standalone -Denable-qemu -Denable-wasmtime
+release/bin/zig build test-stack-traces -Denable-qemu -Denable-wasmtime
+release/bin/zig build test-cli -Denable-qemu -Denable-wasmtime
+release/bin/zig build test-asm-link -Denable-qemu -Denable-wasmtime
+release/bin/zig build test-runtime-safety -Denable-qemu -Denable-wasmtime
+release/bin/zig build test-translate-c -Denable-qemu -Denable-wasmtime
+release/bin/zig build test-run-translated-c -Denable-qemu -Denable-wasmtime
+release/bin/zig build docs -Denable-qemu -Denable-wasmtime
+release/bin/zig build # test building self-hosted without LLVM
+release/bin/zig build test-fmt -Denable-qemu -Denable-wasmtime
+release/bin/zig build test-stage2 -Denable-qemu -Denable-wasmtime
# Look for HTML errors.
tidy -qe ../zig-cache/langref.html
diff --git a/ci/azure/pipelines.yml b/ci/azure/pipelines.yml
index 755020a6f4..f57ef14218 100644
--- a/ci/azure/pipelines.yml
+++ b/ci/azure/pipelines.yml
@@ -38,9 +38,8 @@ jobs:
timeoutInMinutes: 360
steps:
- powershell: |
- (New-Object Net.WebClient).DownloadFile("https://github.com/msys2/msys2-installer/releases/download/2021-06-04/msys2-base-x86_64-20210604.sfx.exe", "sfx.exe")
+ (New-Object Net.WebClient).DownloadFile("https://github.com/msys2/msys2-installer/releases/download/2021-07-25/msys2-base-x86_64-20210725.sfx.exe", "sfx.exe")
.\sfx.exe -y -o\
- del sfx.exe
displayName: Download/Extract/Install MSYS2
- script: |
@REM install updated filesystem package first without dependency checking
diff --git a/ci/azure/windows_msvc_script.bat b/ci/azure/windows_msvc_script.bat
index c7b6f5e733..623d17fe95 100644
--- a/ci/azure/windows_msvc_script.bat
+++ b/ci/azure/windows_msvc_script.bat
@@ -1,7 +1,7 @@
@echo on
SET "SRCROOT=%cd%"
SET "PREVPATH=%PATH%"
-SET "PREVMSYSEM=%MSYSTEM%"
+SET "PREVMSYSTEM=%MSYSTEM%"
set "PATH=%CD:~0,2%\msys64\usr\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem"
SET "MSYSTEM=MINGW64"
diff --git a/doc/docgen.zig b/doc/docgen.zig
index 79fd1519cf..148a8bedb7 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -901,6 +901,7 @@ fn tokenizeAndPrintRaw(
switch (token.tag) {
.eof => break,
+ .keyword_addrspace,
.keyword_align,
.keyword_and,
.keyword_asm,
@@ -1057,15 +1058,21 @@ fn tokenizeAndPrintRaw(
.plus_equal,
.plus_percent,
.plus_percent_equal,
+ .plus_pipe,
+ .plus_pipe_equal,
.minus,
.minus_equal,
.minus_percent,
.minus_percent_equal,
+ .minus_pipe,
+ .minus_pipe_equal,
.asterisk,
.asterisk_equal,
.asterisk_asterisk,
.asterisk_percent,
.asterisk_percent_equal,
+ .asterisk_pipe,
+ .asterisk_pipe_equal,
.arrow,
.colon,
.slash,
@@ -1078,6 +1085,8 @@ fn tokenizeAndPrintRaw(
.angle_bracket_left_equal,
.angle_bracket_angle_bracket_left,
.angle_bracket_angle_bracket_left_equal,
+ .angle_bracket_angle_bracket_left_pipe,
+ .angle_bracket_angle_bracket_left_pipe_equal,
.angle_bracket_right,
.angle_bracket_right_equal,
.angle_bracket_angle_bracket_right,
@@ -1222,9 +1231,7 @@ fn genHtml(
try printSourceBlock(allocator, tokenizer, out, syntax_block);
- // TODO: remove code.just_check_syntax after updating code samples
- // that have stopped working due to a change in the compiler.
- if (!do_code_tests or code.just_check_syntax) {
+ if (!do_code_tests) {
continue;
}
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 9a3eef2390..ec9d96d069 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -1244,8 +1244,9 @@ fn divide(a: i32, b: i32) i32 {
Operators such as {#syntax#}+{#endsyntax#} and {#syntax#}-{#endsyntax#} cause undefined behavior on
- integer overflow. Also available are operations such as {#syntax#}+%{#endsyntax#} and
- {#syntax#}-%{#endsyntax#} which are defined to have wrapping arithmetic on all targets.
+ integer overflow. Alternative operators are provided for wrapping and saturating arithmetic on all targets.
+ {#syntax#}+%{#endsyntax#} and {#syntax#}-%{#endsyntax#} perform wrapping arithmetic
+ while {#syntax#}+|{#endsyntax#} and {#syntax#}-|{#endsyntax#} perform saturating arithmetic.
Zig supports arbitrary bit-width integers, referenced by using
@@ -1395,6 +1396,23 @@ a +%= b{#endsyntax#}
{#syntax#}a >> b
a >>= b{#endsyntax#} |
@@ -1968,14 +2038,14 @@ const B = error{Two};
a!b
x{}
!x -x -%x ~x &x ?x
-* / % ** *% ||
-+ - ++ +% -%
-<< >>
+* / % ** *% *| ||
++ - ++ +% -% +| -|
+<< >> <<|
& ^ | orelse catch
== != < > <= >=
and
or
-= *= /= %= += -= <<= >>= &= ^= |={#endsyntax#}
+= *= *%= *|= /= %= += +%= +|= -= -%= -|= <<= <<|= >>= &= ^= |={#endsyntax#}
{#header_close#}
{#header_close#}
{#header_open|Arrays#}
@@ -2125,7 +2195,7 @@ fn dump(args: anytype) !void {
{#header_open|Multidimensional Arrays#}
- Mutlidimensional arrays can be created by nesting arrays:
+ Multidimensional arrays can be created by nesting arrays:
{#code_begin|test|multidimensional#}
const std = @import("std");
@@ -2898,7 +2968,7 @@ fn bar(x: *const u3) u3 {
}
{#code_end#}
- In this case, the function {#syntax#}bar{#endsyntax#} cannot be called becuse the pointer
+ In this case, the function {#syntax#}bar{#endsyntax#} cannot be called because the pointer
to the non-ABI-aligned field mentions the bit offset, but the function expects an ABI-aligned pointer.
@@ -4771,6 +4841,8 @@ test "parse u64" {
{#header_open|catch#}
If you want to provide a default value, you can use the {#syntax#}catch{#endsyntax#} binary operator:
{#code_begin|syntax#}
+const parseU64 = @import("error_union_parsing_u64.zig").parseU64;
+
fn doAThing(str: []u8) void {
const number = parseU64(str, 10) catch 13;
_ = number; // ...
@@ -4786,6 +4858,8 @@ fn doAThing(str: []u8) void {
Let's say you wanted to return the error if you got one, otherwise continue with the
function logic:
{#code_begin|syntax#}
+const parseU64 = @import("error_union_parsing_u64.zig").parseU64;
+
fn doAThing(str: []u8) !void {
const number = parseU64(str, 10) catch |err| return err;
_ = number; // ...
@@ -4795,6 +4869,8 @@ fn doAThing(str: []u8) !void {
There is a shortcut for this. The {#syntax#}try{#endsyntax#} expression:
{#code_begin|syntax#}
+const parseU64 = @import("error_union_parsing_u64.zig").parseU64;
+
fn doAThing(str: []u8) !void {
const number = try parseU64(str, 10);
_ = number; // ...
@@ -4810,7 +4886,7 @@ fn doAThing(str: []u8) !void {
Maybe you know with complete certainty that an expression will never be an error.
In this case you can do this:
- {#code_begin|syntax#}const number = parseU64("1234", 10) catch unreachable;{#code_end#}
+ {#syntax#}const number = parseU64("1234", 10) catch unreachable;{#endsyntax#}
Here we know for sure that "1234" will parse successfully. So we put the
{#syntax#}unreachable{#endsyntax#} value on the right hand side. {#syntax#}unreachable{#endsyntax#} generates
@@ -4822,7 +4898,7 @@ fn doAThing(str: []u8) !void {
Finally, you may want to take a different action for every situation. For that, we combine
the {#link|if#} and {#link|switch#} expression:
- {#code_begin|syntax#}
+ {#syntax_block|zig|handle_all_error_scenarios.zig#}
fn doAThing(str: []u8) void {
if (parseU64(str, 10)) |number| {
doSomethingWithNumber(number);
@@ -4834,7 +4910,7 @@ fn doAThing(str: []u8) void {
error.InvalidChar => unreachable,
}
}
- {#code_end#}
+ {#end_syntax_block#}
{#header_open|errdefer#}
The other component to error handling is defer statements.
@@ -4845,7 +4921,7 @@ fn doAThing(str: []u8) void {
Example:
- {#code_begin|syntax#}
+ {#syntax_block|zig|errdefer_example.zig#}
fn createFoo(param: i32) !Foo {
const foo = try tryToAllocateFoo();
// now we have allocated foo. we need to free it if the function fails.
@@ -4863,7 +4939,7 @@ fn createFoo(param: i32) !Foo {
// but the defer will run!
return foo;
}
- {#code_end#}
+ {#end_syntax_block#}
The neat thing about this is that you get robust error handling without
the verbosity and cognitive overhead of trying to make sure every exit path
@@ -4955,7 +5031,7 @@ test "merge error sets" {
{#header_open|Inferred Error Sets#}
Because many functions in Zig return a possible error, Zig supports inferring the error set.
- To infer the error set for a function, use this syntax:
+ To infer the error set for a function, prepend the {#syntax#}!{#endsyntax#} operator to the function’s return type, like {#syntax#}!T{#endsyntax#}:
{#code_begin|test|inferred_error_sets#}
// With an inferred error set
@@ -5132,12 +5208,12 @@ fn bang2() void {
For the case when no errors are returned, the cost is a single memory write operation, only in the first non-failable function in the call graph that calls a failable function, i.e. when a function returning {#syntax#}void{#endsyntax#} calls a function returning {#syntax#}error{#endsyntax#}.
This is to initialize this struct in the stack memory:
- {#code_begin|syntax#}
+ {#syntax_block|zig|stack_trace_struct.zig#}
pub const StackTrace = struct {
index: usize,
instruction_addresses: [N]usize,
};
- {#code_end#}
+ {#end_syntax_block#}
Here, N is the maximum function call depth as determined by call graph analysis. Recursion is ignored and counts for 2.
@@ -5150,13 +5226,13 @@ pub const StackTrace = struct {
When generating the code for a function that returns an error, just before the {#syntax#}return{#endsyntax#} statement (only for the {#syntax#}return{#endsyntax#} statements that return errors), Zig generates a call to this function:
- {#code_begin|syntax#}
+ {#syntax_block|zig|zig_return_error_fn.zig#}
// marked as "no-inline" in LLVM IR
fn __zig_return_error(stack_trace: *StackTrace) void {
stack_trace.instruction_addresses[stack_trace.index] = @returnAddress();
stack_trace.index = (stack_trace.index + 1) % N;
}
- {#code_end#}
+ {#end_syntax_block#}
The cost is 2 math operations plus some memory reads and writes. The memory accessed is constrained and should remain cached for the duration of the error return bubbling.
@@ -5206,16 +5282,16 @@ const optional_int: ?i32 = 5678;
Task: call malloc, if the result is null, return null.
C code
- // malloc prototype included for reference
+ {#syntax_block|c|call_malloc_in_c.c#}// malloc prototype included for reference
void *malloc(size_t size);
struct Foo *do_a_thing(void) {
char *ptr = malloc(1234);
if (!ptr) return NULL;
// ...
-}
+}{#end_syntax_block#}
Zig code
- {#code_begin|syntax#}
+ {#syntax_block|zig|call_malloc_from_zig.zig#}
// malloc prototype included for reference
extern fn malloc(size: size_t) ?*u8;
@@ -5223,7 +5299,7 @@ fn doAThing() ?*Foo {
const ptr = malloc(1234) orelse return null;
_ = ptr; // ...
}
- {#code_end#}
+ {#end_syntax_block#}
Here, Zig is at least as convenient, if not more, than C. And, the type of "ptr"
is {#syntax#}*u8{#endsyntax#} not {#syntax#}?*u8{#endsyntax#}. The {#syntax#}orelse{#endsyntax#} keyword
@@ -5233,7 +5309,7 @@ fn doAThing() ?*Foo {
The other form of checking against NULL you might see looks like this:
- void do_a_thing(struct Foo *foo) {
+ {#syntax_block|c|checking_null_in_c.c#}void do_a_thing(struct Foo *foo) {
// do some stuff
if (foo) {
@@ -5241,11 +5317,14 @@ fn doAThing() ?*Foo {
}
// do some stuff
-}
+}{#end_syntax_block#}
In Zig you can accomplish the same thing:
- {#code_begin|syntax#}
+ {#code_begin|syntax|checking_null_in_zig#}
+const Foo = struct{};
+fn doSomethingWithFoo(foo: *Foo) void { _ = foo; }
+
fn doAThing(optional_foo: ?*Foo) void {
// do some stuff
@@ -5540,7 +5619,7 @@ test "coerce to optionals" {
}
{#code_end#}
It works nested inside the {#link|Error Union Type#}, too:
- {#code_begin|test|test_corerce_optional_wrapped_error_union#}
+ {#code_begin|test|test_coerce_optional_wrapped_error_union#}
const std = @import("std");
const expect = std.testing.expect;
@@ -6111,7 +6190,7 @@ test "perform fn" {
different code. In this example, the function {#syntax#}performFn{#endsyntax#} is generated three different times,
for the different values of {#syntax#}prefix_char{#endsyntax#} provided:
- {#code_begin|syntax#}
+ {#syntax_block|zig|performFn_1#}
// From the line:
// expect(performFn('t', 1) == 6);
fn performFn(start_value: i32) i32 {
@@ -6120,8 +6199,8 @@ fn performFn(start_value: i32) i32 {
result = three(result);
return result;
}
- {#code_end#}
- {#code_begin|syntax#}
+ {#end_syntax_block#}
+ {#syntax_block|zig|performFn_2#}
// From the line:
// expect(performFn('o', 0) == 1);
fn performFn(start_value: i32) i32 {
@@ -6129,15 +6208,15 @@ fn performFn(start_value: i32) i32 {
result = one(result);
return result;
}
- {#code_end#}
- {#code_begin|syntax#}
+ {#end_syntax_block#}
+ {#syntax_block|zig|performFn_3#}
// From the line:
// expect(performFn('w', 99) == 99);
fn performFn(start_value: i32) i32 {
var result: i32 = start_value;
return result;
}
- {#code_end#}
+ {#end_syntax_block#}
Note that this happens even in a debug build; in a release build these generated functions still
pass through rigorous LLVM optimizations. The important thing to note, however, is not that this
@@ -6367,11 +6446,11 @@ const Node = struct {
it works fine.
{#header_close#}
- {#header_open|Case Study: printf in Zig#}
+ {#header_open|Case Study: print in Zig#}
- Putting all of this together, let's see how {#syntax#}printf{#endsyntax#} works in Zig.
+ Putting all of this together, let's see how {#syntax#}print{#endsyntax#} works in Zig.
- {#code_begin|exe|printf#}
+ {#code_begin|exe|print#}
const print = @import("std").debug.print;
const a_number: i32 = 1234;
@@ -6386,67 +6465,84 @@ pub fn main() void {
Let's crack open the implementation of this and see how it works:
- {#code_begin|syntax#}
-/// Calls print and then flushes the buffer.
-pub fn printf(self: *Writer, comptime format: []const u8, args: anytype) anyerror!void {
- const State = enum {
- start,
- open_brace,
- close_brace,
- };
+ {#code_begin|syntax|poc_print_fn#}
+const Writer = struct {
+ /// Calls print and then flushes the buffer.
+ pub fn print(self: *Writer, comptime format: []const u8, args: anytype) anyerror!void {
+ const State = enum {
+ start,
+ open_brace,
+ close_brace,
+ };
- comptime var start_index: usize = 0;
- comptime var state = State.start;
- comptime var next_arg: usize = 0;
+ comptime var start_index: usize = 0;
+ comptime var state = State.start;
+ comptime var next_arg: usize = 0;
- inline for (format) |c, i| {
- switch (state) {
- State.start => switch (c) {
- '{' => {
- if (start_index < i) try self.write(format[start_index..i]);
- state = State.open_brace;
+ inline for (format) |c, i| {
+ switch (state) {
+ State.start => switch (c) {
+ '{' => {
+ if (start_index < i) try self.write(format[start_index..i]);
+ state = State.open_brace;
+ },
+ '}' => {
+ if (start_index < i) try self.write(format[start_index..i]);
+ state = State.close_brace;
+ },
+ else => {},
},
- '}' => {
- if (start_index < i) try self.write(format[start_index..i]);
- state = State.close_brace;
+ State.open_brace => switch (c) {
+ '{' => {
+ state = State.start;
+ start_index = i;
+ },
+ '}' => {
+ try self.printValue(args[next_arg]);
+ next_arg += 1;
+ state = State.start;
+ start_index = i + 1;
+ },
+ 's' => {
+ continue;
+ },
+ else => @compileError("Unknown format character: " ++ [1]u8{c}),
},
- else => {},
- },
- State.open_brace => switch (c) {
- '{' => {
- state = State.start;
- start_index = i;
+ State.close_brace => switch (c) {
+ '}' => {
+ state = State.start;
+ start_index = i;
+ },
+ else => @compileError("Single '}' encountered in format string"),
},
- '}' => {
- try self.printValue(args[next_arg]);
- next_arg += 1;
- state = State.start;
- start_index = i + 1;
- },
- else => @compileError("Unknown format character: " ++ c),
- },
- State.close_brace => switch (c) {
- '}' => {
- state = State.start;
- start_index = i;
- },
- else => @compileError("Single '}' encountered in format string"),
- },
+ }
}
- }
- comptime {
- if (args.len != next_arg) {
- @compileError("Unused arguments");
+ comptime {
+ if (args.len != next_arg) {
+ @compileError("Unused arguments");
+ }
+ if (state != State.start) {
+ @compileError("Incomplete format string: " ++ format);
+ }
}
- if (state != State.Start) {
- @compileError("Incomplete format string: " ++ format);
+ if (start_index < format.len) {
+ try self.write(format[start_index..format.len]);
}
+ try self.flush();
}
- if (start_index < format.len) {
- try self.write(format[start_index..format.len]);
+
+ fn write(self: *Writer, value: []const u8) !void {
+ _ = self;
+ _ = value;
}
- try self.flush();
-}
+ pub fn printValue(self: *Writer, value: anytype) !void {
+ _ = self;
+ _ = value;
+ }
+ fn flush(self: *Writer) !void {
+ _ = self;
+ }
+};
{#code_end#}
This is a proof of concept implementation; the actual function in the standard library has more
@@ -6459,8 +6555,8 @@ pub fn printf(self: *Writer, comptime format: []const u8, args: anytype) anyerro
When this function is analyzed from our example code above, Zig partially evaluates the function
and emits a function that actually looks like this:
- {#code_begin|syntax#}
-pub fn printf(self: *Writer, arg0: i32, arg1: []const u8) !void {
+ {#syntax_block|zig|Emitted print Function#}
+pub fn print(self: *Writer, arg0: []const u8, arg1: i32) !void {
try self.write("here is a string: '");
try self.printValue(arg0);
try self.write("' here is a number: ");
@@ -6468,28 +6564,46 @@ pub fn printf(self: *Writer, arg0: i32, arg1: []const u8) !void {
try self.write("\n");
try self.flush();
}
- {#code_end#}
+ {#end_syntax_block#}
{#syntax#}printValue{#endsyntax#} is a function that takes a parameter of any type, and does different things depending
on the type:
- {#code_begin|syntax#}
-pub fn printValue(self: *Writer, value: anytype) !void {
- switch (@typeInfo(@TypeOf(value))) {
- .Int => {
- return self.printInt(T, value);
- },
- .Float => {
- return self.printFloat(T, value);
- },
- else => {
- @compileError("Unable to print type '" ++ @typeName(T) ++ "'");
- },
+ {#code_begin|syntax|poc_printValue_fn#}
+ const Writer = struct {
+ pub fn printValue(self: *Writer, value: anytype) !void {
+ switch (@typeInfo(@TypeOf(value))) {
+ .Int => {
+ return self.writeInt(value);
+ },
+ .Float => {
+ return self.writeFloat(value);
+ },
+ .Pointer => {
+ return self.write(value);
+ },
+ else => {
+ @compileError("Unable to print type '" ++ @typeName(@TypeOf(value)) ++ "'");
+ },
+ }
}
-}
+
+ fn write(self: *Writer, value: []const u8) !void {
+ _ = self;
+ _ = value;
+ }
+ fn writeInt(self: *Writer, value: anytype) !void {
+ _ = self;
+ _ = value;
+ }
+ fn writeFloat(self: *Writer, value: anytype) !void {
+ _ = self;
+ _ = value;
+ }
+};
{#code_end#}
- And now, what happens if we give too many arguments to {#syntax#}printf{#endsyntax#}?
+ And now, what happens if we give too many arguments to {#syntax#}print{#endsyntax#}?
{#code_begin|test_err|Unused argument in 'here is a string: '{s}' here is a number: {}#}
const print = @import("std").debug.print;
@@ -6497,7 +6611,7 @@ const print = @import("std").debug.print;
const a_number: i32 = 1234;
const a_string = "foobar";
-test "printf too many arguments" {
+test "print too many arguments" {
print("here is a string: '{s}' here is a number: {}\n", .{
a_string,
a_number,
@@ -6512,7 +6626,7 @@ test "printf too many arguments" {
Zig doesn't care whether the format argument is a string literal,
only that it is a compile-time known value that can be coerced to a {#syntax#}[]const u8{#endsyntax#}:
- {#code_begin|exe|printf#}
+ {#code_begin|exe|print#}
const print = @import("std").debug.print;
const a_number: i32 = 1234;
@@ -7118,16 +7232,6 @@ fn readFile(allocator: *Allocator, filename: []const u8) ![]u8 {
If no overflow or underflow occurs, returns {#syntax#}false{#endsyntax#}.
{#header_close#}
- {#header_open|@addWithSaturation#}
- {#syntax#}@addWithSaturation(a: T, b: T) T{#endsyntax#}
-
- Returns {#syntax#}a + b{#endsyntax#}. The result will be clamped between the type maximum and minimum.
-
-
- Once Saturating arithmetic.
- is completed, the syntax {#syntax#}a +| b{#endsyntax#} will be equivalent to calling {#syntax#}@addWithSaturation(a, b){#endsyntax#}.
-
- {#header_close#}
{#header_open|@alignCast#}
{#syntax#}@alignCast(comptime alignment: u29, ptr: anytype) anytype{#endsyntax#}
@@ -7216,7 +7320,9 @@ fn func(y: *i32) void {
{#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
+ {#see_also|@atomicStore|@atomicRmw|@fence|@cmpxchgWeak|@cmpxchgStrong#}
{#header_close#}
+
{#header_open|@atomicRmw#}
{#syntax#}@atomicRmw(comptime T: type, ptr: *T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) T{#endsyntax#}
@@ -7242,7 +7348,9 @@ fn func(y: *i32) void {
{#syntax#}.Max{#endsyntax#} - stores the operand if it is larger. Supports integers and floats.
{#syntax#}.Min{#endsyntax#} - stores the operand if it is smaller. Supports integers and floats.
+ {#see_also|@atomicStore|@atomicLoad|@fence|@cmpxchgWeak|@cmpxchgStrong#}
{#header_close#}
+
{#header_open|@atomicStore#}
{#syntax#}@atomicStore(comptime T: type, ptr: *T, value: T, comptime ordering: builtin.AtomicOrder) void{#endsyntax#}
@@ -7252,6 +7360,7 @@ fn func(y: *i32) void {
{#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
+ {#see_also|@atomicLoad|@atomicRmw|@fence|@cmpxchgWeak|@cmpxchgStrong#}
{#header_close#}
{#header_open|@bitCast#}
@@ -7396,9 +7505,11 @@ fn add(a: i32, b: i32) i32 {
{#syntax#}@call{#endsyntax#} allows more flexibility than normal function call syntax does. The
{#syntax#}CallOptions{#endsyntax#} struct is reproduced here:
- {#code_begin|syntax#}
+ {#syntax_block|zig|builtin.CallOptions struct#}
pub const CallOptions = struct {
modifier: Modifier = .auto,
+
+ /// Only valid when `Modifier` is `Modifier.async_kw`.
stack: ?[]align(std.Target.stack_align) u8 = null,
pub const Modifier = enum {
@@ -7435,7 +7546,7 @@ pub const CallOptions = struct {
compile_time,
};
};
- {#code_end#}
+ {#end_syntax_block#}
{#header_close#}
{#header_open|@cDefine#}
@@ -7540,15 +7651,16 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_v
an integer or an enum.
{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}
- {#see_also|Compile Variables|cmpxchgWeak#}
+ {#see_also|@atomicStore|@atomicLoad|@atomicRmw|@fence|@cmpxchgWeak#}
{#header_close#}
+
{#header_open|@cmpxchgWeak#}
{#syntax#}@cmpxchgWeak(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T{#endsyntax#}
This function performs a weak atomic compare exchange operation. It's the equivalent of this code,
except atomic:
- {#code_begin|syntax#}
+ {#syntax_block|zig|cmpxchgWeakButNotAtomic#}
fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_value: T) ?T {
const old_value = ptr.*;
if (old_value == expected_value and usuallyTrueButSometimesFalse()) {
@@ -7558,7 +7670,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
return old_value;
}
}
- {#code_end#}
+ {#end_syntax_block#}
If you are using cmpxchg in a loop, the sporadic failure will be no problem, and {#syntax#}cmpxchgWeak{#endsyntax#}
is the better choice, because it can be implemented more efficiently in machine instructions.
@@ -7569,7 +7681,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
an integer or an enum.
{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}
- {#see_also|Compile Variables|cmpxchgStrong#}
+ {#see_also|@atomicStore|@atomicLoad|@atomicRmw|@fence|@cmpxchgStrong#}
{#header_close#}
{#header_open|@compileError#}
@@ -7617,7 +7729,7 @@ test "main" {
}
{#code_end#}
- will ouput:
+ will output:
If all {#syntax#}@compileLog{#endsyntax#} calls are removed or
@@ -7734,7 +7846,7 @@ test "main" {
the tag value is used as the enumeration value.
- If there is only one possible enum value, the resut is a {#syntax#}comptime_int{#endsyntax#}
+ If there is only one possible enum value, the result is a {#syntax#}comptime_int{#endsyntax#}
known at {#link|comptime#}.
{#see_also|@intToEnum#}
@@ -7849,7 +7961,7 @@ export fn @"A function name that is a complete sentence."() void {}
{#syntax#}AtomicOrder{#endsyntax#} can be found with {#syntax#}@import("std").builtin.AtomicOrder{#endsyntax#}.
- {#see_also|Compile Variables#}
+ {#see_also|@atomicStore|@atomicLoad|@atomicRmw|@cmpxchgWeak|@cmpxchgStrong#}
{#header_close#}
{#header_open|@field#}
@@ -8241,22 +8353,6 @@ test "@wasmMemoryGrow" {
{#header_close#}
- {#header_open|@mulWithSaturation#}
- {#syntax#}@mulWithSaturation(a: T, b: T) T{#endsyntax#}
-
- Returns {#syntax#}a * b{#endsyntax#}. The result will be clamped between the type maximum and minimum.
-
-
- Once Saturating arithmetic.
- is completed, the syntax {#syntax#}a *| b{#endsyntax#} will be equivalent to calling {#syntax#}@mulWithSaturation(a, b){#endsyntax#}.
-
-
- NOTE: Currently there is a bug in the llvm.smul.fix.sat intrinsic which affects {#syntax#}@mulWithSaturation{#endsyntax#} of signed integers.
- This may result in an incorrect sign bit when there is overflow. This will be fixed in zig's 0.9.0 release.
- Check this issue for more information.
-
- {#header_close#}
-
{#header_open|@panic#}
{#syntax#}@panic(message: []const u8) noreturn{#endsyntax#}
@@ -8474,14 +8570,16 @@ test "@setRuntimeSafety" {
{#header_open|@shlExact#}
{#syntax#}@shlExact(value: T, shift_amt: Log2T) T{#endsyntax#}
- Performs the left shift operation ({#syntax#}<<{#endsyntax#}). Caller guarantees
- that the shift will not shift any 1 bits out.
+ Performs the left shift operation ({#syntax#}<<{#endsyntax#}).
+ For unsigned integers, the result is {#link|undefined#} if any 1 bits
+ are shifted out. For signed integers, the result is {#link|undefined#} if
+ any bits that disagree with the resultant sign bit are shifted out.
The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(T.bit_count){#endsyntax#} bits.
This is because {#syntax#}shift_amt >= T.bit_count{#endsyntax#} is undefined behavior.
- {#see_also|@shrExact|@shlWithOverflow|@shlWithSaturation#}
+ {#see_also|@shrExact|@shlWithOverflow#}
{#header_close#}
{#header_open|@shlWithOverflow#}
@@ -8495,24 +8593,9 @@ test "@setRuntimeSafety" {
The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(T.bit_count){#endsyntax#} bits.
This is because {#syntax#}shift_amt >= T.bit_count{#endsyntax#} is undefined behavior.
- {#see_also|@shlExact|@shrExact|@shlWithSaturation#}
+ {#see_also|@shlExact|@shrExact#}
{#header_close#}
- {#header_open|@shlWithSaturation#}
- {#syntax#}@shlWithSaturation(a: T, shift_amt: T) T{#endsyntax#}
-
- Returns {#syntax#}a << b{#endsyntax#}. The result will be clamped between type minimum and maximum.
-
-
- Once Saturating arithmetic.
- is completed, the syntax {#syntax#}a <<| b{#endsyntax#} will be equivalent to calling {#syntax#}@shlWithSaturation(a, b){#endsyntax#}.
-
-
- Unlike other @shl builtins, shift_amt doesn't need to be a Log2T as saturated overshifting is well defined.
-
- {#see_also|@shlExact|@shrExact|@shlWithOverflow#}
- {#header_close#}
-
{#header_open|@shrExact#}
{#syntax#}@shrExact(value: T, shift_amt: Log2T) T{#endsyntax#}
@@ -8523,7 +8606,7 @@ test "@setRuntimeSafety" {
The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(T.bit_count){#endsyntax#} bits.
This is because {#syntax#}shift_amt >= T.bit_count{#endsyntax#} is undefined behavior.
- {#see_also|@shlExact|@shlWithOverflow|@shlWithSaturation#}
+ {#see_also|@shlExact|@shlWithOverflow#}
{#header_close#}
{#header_open|@shuffle#}
@@ -8684,7 +8767,7 @@ fn doTheTest() !void {
{#header_open|@sin#}
{#syntax#}@sin(value: anytype) @TypeOf(value){#endsyntax#}
- Sine trigometric function on a floating point number. Uses a dedicated hardware instruction
+ Sine trigonometric function on a floating point number. Uses a dedicated hardware instruction
when available.
@@ -8695,7 +8778,7 @@ fn doTheTest() !void {
{#header_open|@cos#}
{#syntax#}@cos(value: anytype) @TypeOf(value){#endsyntax#}
- Cosine trigometric function on a floating point number. Uses a dedicated hardware instruction
+ Cosine trigonometric function on a floating point number. Uses a dedicated hardware instruction
when available.
@@ -8783,7 +8866,7 @@ fn doTheTest() !void {
{#header_open|@ceil#}
{#syntax#}@ceil(value: anytype) @TypeOf(value){#endsyntax#}
- Returns the largest integral value not less than the given floating point number.
+ Returns the smallest integral value not less than the given floating point number.
Uses a dedicated hardware instruction when available.
@@ -8823,17 +8906,6 @@ fn doTheTest() !void {
{#header_close#}
- {#header_open|@subWithSaturation#}
- {#syntax#}@subWithSaturation(a: T, b: T) T{#endsyntax#}
-
- Returns {#syntax#}a - b{#endsyntax#}. The result will be clamped between the type maximum and minimum.
-
-
- Once Saturating arithmetic.
- is completed, the syntax {#syntax#}a -| b{#endsyntax#} will be equivalent to calling {#syntax#}@subWithSaturation(a, b){#endsyntax#}.
-
- {#header_close#}
-
{#header_open|@tagName#}
{#syntax#}@tagName(value: anytype) [:0]const u8{#endsyntax#}
@@ -10153,7 +10225,7 @@ pub fn main() void {
This expression is evaluated at compile-time and is used to control
preprocessor directives and include multiple .h files:
- {#code_begin|syntax#}
+ {#syntax_block|zig|@cImport Expression#}
const builtin = @import("builtin");
const c = @cImport({
@@ -10167,7 +10239,7 @@ const c = @cImport({
}
@cInclude("soundio.h");
});
- {#code_end#}
+ {#end_syntax_block#}
{#see_also|@cImport|@cInclude|@cDefine|@cUndef|@import#}
{#header_close#}
@@ -10273,7 +10345,7 @@ pub fn main() void {
Some C constructs cannot be translated to Zig - for example, goto,
structs with bitfields, and token-pasting macros. Zig employs demotion to allow translation
- to continue in the face of non-translateable entities.
+ to continue in the face of non-translatable entities.
Demotion comes in three varieties - {#link|opaque#}, extern, and
@@ -10283,13 +10355,13 @@ pub fn main() void {
Functions that contain opaque types or code constructs that cannot be translated will be demoted
to {#syntax#}extern{#endsyntax#} declarations.
- Thus, non-translateable types can still be used as pointers, and non-translateable functions
+ Thus, non-translatable types can still be used as pointers, and non-translatable functions
can be called so long as the linker is aware of the compiled function.
{#syntax#}@compileError{#endsyntax#} is used when top-level definitions (global variables,
function prototypes, macros) cannot be translated or demoted. Since Zig uses lazy analysis for
- top-level declarations, untranslateable entities will not cause a compile error in your code unless
+ top-level declarations, untranslatable entities will not cause a compile error in your code unless
you actually use them.
{#see_also|opaque|extern|@compileError#}
@@ -10301,7 +10373,7 @@ pub fn main() void {
can be translated to Zig. Macros that cannot be translated will be be demoted to
{#syntax#}@compileError{#endsyntax#}. Note that C code which uses macros will be
translated without any additional issues (since Zig operates on the pre-processed source
- with macros expanded). It is merely the macros themselves which may not be translateable to
+ with macros expanded). It is merely the macros themselves which may not be translatable to
Zig.
Consider the following example:
@@ -10321,7 +10393,7 @@ pub export fn foo() c_int {
}
pub const MAKELOCAL = @compileError("unable to translate C expr: unexpected token .Equal"); // macro.c:1:9
{#code_end#}
- Note that {#syntax#}foo{#endsyntax#} was translated correctly despite using a non-translateable
+
Note that {#syntax#}foo{#endsyntax#} was translated correctly despite using a non-translatable
macro. {#syntax#}MAKELOCAL{#endsyntax#} was demoted to {#syntax#}@compileError{#endsyntax#} since
it cannot be expressed as a Zig function; this simply means that you cannot directly use
{#syntax#}MAKELOCAL{#endsyntax#} from Zig.
@@ -10491,8 +10563,8 @@ const typedArray = new Uint8Array(source);
WebAssembly.instantiate(typedArray, {
env: {
- print: (result) => { console.log(`The result is ${result}`); }
- }}).then(result => {
+ print: (result) => { console.log(`The result is ${result}`); }
+ }}).then(result => {
const add = result.instance.exports.add;
add(1, 2);
});{#end_syntax_block#}
@@ -11787,6 +11859,7 @@ AssignOp
/ PLUSEQUAL
/ MINUSEQUAL
/ LARROW2EQUAL
+ / LARROW2PIPEEQUAL
/ RARROW2EQUAL
/ AMPERSANDEQUAL
/ CARETEQUAL
@@ -11821,6 +11894,8 @@ AdditionOp
/ PLUS2
/ PLUSPERCENT
/ MINUSPERCENT
+ / PLUSPIPE
+ / MINUSPIPE
MultiplyOp
<- PIPE2
@@ -11829,6 +11904,7 @@ MultiplyOp
/ PERCENT
/ ASTERISK2
/ ASTERISKPERCENT
+ / ASTERISKPIPE
PrefixOp
<- EXCLAMATIONMARK
@@ -11992,6 +12068,8 @@ ASTERISK2 <- '**' skip
ASTERISKEQUAL <- '*=' skip
ASTERISKPERCENT <- '*%' ![=] skip
ASTERISKPERCENTEQUAL <- '*%=' skip
+ASTERISKPIPE <- '*|' ![=] skip
+ASTERISKPIPEEQUAL <- '*|=' skip
CARET <- '^' ![=] skip
CARETEQUAL <- '^=' skip
COLON <- ':' skip
@@ -12008,6 +12086,8 @@ EXCLAMATIONMARK <- '!' ![=] skip
EXCLAMATIONMARKEQUAL <- '!=' skip
LARROW <- '<' ![<=] skip
LARROW2 <- '<<' ![=] skip
+LARROW2PIPE <- '<<|' ![=] skip
+LARROW2PIPEEQUAL <- '<<|=' ![=] skip
LARROW2EQUAL <- '<<=' skip
LARROWEQUAL <- '<=' skip
LBRACE <- '{' skip
@@ -12017,6 +12097,8 @@ MINUS <- '-' ![%=>] skip
MINUSEQUAL <- '-=' skip
MINUSPERCENT <- '-%' ![=] skip
MINUSPERCENTEQUAL <- '-%=' skip
+MINUSPIPE <- '-|' ![=] skip
+MINUSPIPEEQUAL <- '-|=' skip
MINUSRARROW <- '->' skip
PERCENT <- '%' ![=] skip
PERCENTEQUAL <- '%=' skip
@@ -12028,6 +12110,8 @@ PLUS2 <- '++' skip
PLUSEQUAL <- '+=' skip
PLUSPERCENT <- '+%' ![=] skip
PLUSPERCENTEQUAL <- '+%=' skip
+PLUSPIPE <- '+|' ![=] skip
+PLUSPIPEEQUAL <- '+|=' skip
LETTERC <- 'c' skip
QUESTIONMARK <- '?' skip
RARROW <- '>' ![>=] skip
diff --git a/lib/libc/mingw/stdio/fseeki64.c b/lib/libc/mingw/stdio/fseeki64.c
new file mode 100644
index 0000000000..f70062e391
--- /dev/null
+++ b/lib/libc/mingw/stdio/fseeki64.c
@@ -0,0 +1,50 @@
+/**
+ * This file has no copyright assigned and is placed in the Public Domain.
+ * This file is part of the mingw-w64 runtime package.
+ * No warranty is given; refer to the file DISCLAIMER.PD within this package.
+ */
+#include
+#include
+#include
+
+#if !defined(__arm__) && !defined(__aarch64__) /* we have F_ARM_ANY(_fseeki64) in msvcrt.def.in */
+int __cdecl _fseeki64(FILE* stream, __int64 offset, int whence)
+{
+ fpos_t pos;
+ if (whence == SEEK_CUR)
+ {
+ /* If stream is invalid, fgetpos sets errno. */
+ if (fgetpos (stream, &pos))
+ return (-1);
+ pos += (fpos_t) offset;
+ }
+ else if (whence == SEEK_END)
+ {
+ /* If writing, we need to flush before getting file length. */
+ fflush (stream);
+ pos = (fpos_t) (_filelengthi64 (_fileno (stream)) + offset);
+ }
+ else if (whence == SEEK_SET)
+ pos = (fpos_t) offset;
+ else
+ {
+ errno = EINVAL;
+ return (-1);
+ }
+ return fsetpos (stream, &pos);
+}
+
+int __cdecl (*__MINGW_IMP_SYMBOL(_fseeki64))(FILE*, __int64, int) = _fseeki64;
+#endif /* !defined(__arm__) && !defined(__aarch64__) */
+
+__int64 __cdecl _ftelli64(FILE* stream)
+{
+ fpos_t pos;
+ if (fgetpos (stream, &pos))
+ return -1LL;
+ else
+ return (__int64) pos;
+}
+
+__int64 __cdecl (*__MINGW_IMP_SYMBOL(_ftelli64))(FILE*) = _ftelli64;
+
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index 1fe8ca89d2..ae3a3e9d9e 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -41,6 +41,7 @@ pub const max_name_len = switch (target.os.tag) {
.netbsd => 31,
.freebsd => 15,
.openbsd => 31,
+ .solaris => 31,
else => 0,
};
@@ -112,7 +113,7 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void {
else => |e| return os.unexpectedErrno(e),
}
},
- .netbsd => if (use_pthreads) {
+ .netbsd, .solaris => if (use_pthreads) {
const err = std.c.pthread_setname_np(self.getHandle(), name_with_terminator.ptr, null);
switch (err) {
.SUCCESS => return,
@@ -202,7 +203,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co
else => |e| return os.unexpectedErrno(e),
}
},
- .netbsd => if (use_pthreads) {
+ .netbsd, .solaris => if (use_pthreads) {
const err = std.c.pthread_getname_np(self.getHandle(), buffer.ptr, max_name_len + 1);
switch (err) {
.SUCCESS => return std.mem.sliceTo(buffer, 0),
@@ -565,6 +566,16 @@ const PosixThreadImpl = struct {
};
return @intCast(usize, count);
},
+ .solaris => {
+ // The "proper" way to get the cpu count would be to query
+ // /dev/kstat via ioctls, and traverse a linked list for each
+ // cpu.
+ const rc = c.sysconf(os._SC.NPROCESSORS_ONLN);
+ return switch (os.errno(rc)) {
+ .SUCCESS => @intCast(usize, rc),
+ else => |err| os.unexpectedErrno(err),
+ };
+ },
.haiku => {
var count: u32 = undefined;
var system_info: os.system_info = undefined;
diff --git a/lib/std/Thread/Condition.zig b/lib/std/Thread/Condition.zig
index 647a50b913..d08a7c3c96 100644
--- a/lib/std/Thread/Condition.zig
+++ b/lib/std/Thread/Condition.zig
@@ -54,7 +54,7 @@ pub const WindowsCondition = struct {
pub fn wait(cond: *WindowsCondition, mutex: *Mutex) void {
const rc = windows.kernel32.SleepConditionVariableSRW(
&cond.cond,
- &mutex.srwlock,
+ &mutex.impl.srwlock,
windows.INFINITE,
@as(windows.ULONG, 0),
);
diff --git a/lib/std/Thread/Mutex.zig b/lib/std/Thread/Mutex.zig
index a337809a18..7473d9ec7f 100644
--- a/lib/std/Thread/Mutex.zig
+++ b/lib/std/Thread/Mutex.zig
@@ -33,17 +33,29 @@ const testing = std.testing;
const StaticResetEvent = std.thread.StaticResetEvent;
/// Try to acquire the mutex without blocking. Returns `null` if the mutex is
-/// unavailable. Otherwise returns `Held`. Call `release` on `Held`.
-pub fn tryAcquire(m: *Mutex) ?Impl.Held {
+/// unavailable. Otherwise returns `Held`. Call `release` on `Held`, or use
+/// releaseDirect().
+pub fn tryAcquire(m: *Mutex) ?Held {
return m.impl.tryAcquire();
}
/// Acquire the mutex. Deadlocks if the mutex is already
/// held by the calling thread.
-pub fn acquire(m: *Mutex) Impl.Held {
+pub fn acquire(m: *Mutex) Held {
return m.impl.acquire();
}
+/// Release the mutex. Prefer Held.release() if available.
+pub fn releaseDirect(m: *Mutex) void {
+ return m.impl.releaseDirect();
+}
+
+/// A held mutex handle. Call release to allow other threads to
+/// take the mutex. Do not call release() more than once.
+/// For more complex scenarios, this handle can be discarded
+/// and Mutex.releaseDirect can be called instead.
+pub const Held = Impl.Held;
+
const Impl = if (builtin.single_threaded)
Dummy
else if (builtin.os.tag == .windows)
@@ -53,6 +65,32 @@ else if (std.Thread.use_pthreads)
else
AtomicMutex;
+fn HeldInterface(comptime MutexType: type) type {
+ return struct {
+ const Mixin = @This();
+ pub const Held = struct {
+ mutex: *MutexType,
+
+ pub fn release(held: Mixin.Held) void {
+ held.mutex.releaseDirect();
+ }
+ };
+
+ pub fn tryAcquire(m: *MutexType) ?Mixin.Held {
+ if (m.tryAcquireDirect()) {
+ return Mixin.Held{ .mutex = m };
+ } else {
+ return null;
+ }
+ }
+
+ pub fn acquire(m: *MutexType) Mixin.Held {
+ m.acquireDirect();
+ return Mixin.Held{ .mutex = m };
+ }
+ };
+}
+
pub const AtomicMutex = struct {
state: State = .unlocked,
@@ -62,39 +100,32 @@ pub const AtomicMutex = struct {
waiting,
};
- pub const Held = struct {
- mutex: *AtomicMutex,
+ pub usingnamespace HeldInterface(@This());
- pub fn release(held: Held) void {
- switch (@atomicRmw(State, &held.mutex.state, .Xchg, .unlocked, .Release)) {
- .unlocked => unreachable,
- .locked => {},
- .waiting => held.mutex.unlockSlow(),
- }
- }
- };
-
- pub fn tryAcquire(m: *AtomicMutex) ?Held {
- if (@cmpxchgStrong(
+ fn tryAcquireDirect(m: *AtomicMutex) bool {
+ return @cmpxchgStrong(
State,
&m.state,
.unlocked,
.locked,
.Acquire,
.Monotonic,
- ) == null) {
- return Held{ .mutex = m };
- } else {
- return null;
- }
+ ) == null;
}
- pub fn acquire(m: *AtomicMutex) Held {
+ fn acquireDirect(m: *AtomicMutex) void {
switch (@atomicRmw(State, &m.state, .Xchg, .locked, .Acquire)) {
.unlocked => {},
else => |s| m.lockSlow(s),
}
- return Held{ .mutex = m };
+ }
+
+ fn releaseDirect(m: *AtomicMutex) void {
+ switch (@atomicRmw(State, &m.state, .Xchg, .unlocked, .Release)) {
+ .unlocked => unreachable,
+ .locked => {},
+ .waiting => m.unlockSlow(),
+ }
}
fn lockSlow(m: *AtomicMutex, current_state: State) void {
@@ -171,36 +202,20 @@ pub const AtomicMutex = struct {
pub const PthreadMutex = struct {
pthread_mutex: std.c.pthread_mutex_t = .{},
- pub const Held = struct {
- mutex: *PthreadMutex,
+ pub usingnamespace HeldInterface(@This());
- pub fn release(held: Held) void {
- switch (std.c.pthread_mutex_unlock(&held.mutex.pthread_mutex)) {
- .SUCCESS => return,
- .INVAL => unreachable,
- .AGAIN => unreachable,
- .PERM => unreachable,
- else => unreachable,
- }
- }
- };
-
- /// Try to acquire the mutex without blocking. Returns null if
- /// the mutex is unavailable. Otherwise returns Held. Call
- /// release on Held.
- pub fn tryAcquire(m: *PthreadMutex) ?Held {
- if (std.c.pthread_mutex_trylock(&m.pthread_mutex) == .SUCCESS) {
- return Held{ .mutex = m };
- } else {
- return null;
- }
+ /// Try to acquire the mutex without blocking. Returns true if
+ /// the mutex is unavailable. Otherwise returns false. Call
+ /// release when done.
+ fn tryAcquireDirect(m: *PthreadMutex) bool {
+ return std.c.pthread_mutex_trylock(&m.pthread_mutex) == .SUCCESS;
}
/// Acquire the mutex. Will deadlock if the mutex is already
/// held by the calling thread.
- pub fn acquire(m: *PthreadMutex) Held {
+ fn acquireDirect(m: *PthreadMutex) void {
switch (std.c.pthread_mutex_lock(&m.pthread_mutex)) {
- .SUCCESS => return Held{ .mutex = m },
+ .SUCCESS => {},
.INVAL => unreachable,
.BUSY => unreachable,
.AGAIN => unreachable,
@@ -209,6 +224,16 @@ pub const PthreadMutex = struct {
else => unreachable,
}
}
+
+ fn releaseDirect(m: *PthreadMutex) void {
+ switch (std.c.pthread_mutex_unlock(&m.pthread_mutex)) {
+ .SUCCESS => return,
+ .INVAL => unreachable,
+ .AGAIN => unreachable,
+ .PERM => unreachable,
+ else => unreachable,
+ }
+ }
};
/// This has the sematics as `Mutex`, however it does not actually do any
@@ -216,58 +241,50 @@ pub const PthreadMutex = struct {
pub const Dummy = struct {
lock: @TypeOf(lock_init) = lock_init,
+ pub usingnamespace HeldInterface(@This());
+
const lock_init = if (std.debug.runtime_safety) false else {};
- pub const Held = struct {
- mutex: *Dummy,
-
- pub fn release(held: Held) void {
- if (std.debug.runtime_safety) {
- held.mutex.lock = false;
- }
- }
- };
-
- /// Try to acquire the mutex without blocking. Returns null if
- /// the mutex is unavailable. Otherwise returns Held. Call
- /// release on Held.
- pub fn tryAcquire(m: *Dummy) ?Held {
+ /// Try to acquire the mutex without blocking. Returns false if
+ /// the mutex is unavailable. Otherwise returns true.
+ fn tryAcquireDirect(m: *Dummy) bool {
if (std.debug.runtime_safety) {
- if (m.lock) return null;
+ if (m.lock) return false;
m.lock = true;
}
- return Held{ .mutex = m };
+ return true;
}
/// Acquire the mutex. Will deadlock if the mutex is already
/// held by the calling thread.
- pub fn acquire(m: *Dummy) Held {
- return m.tryAcquire() orelse @panic("deadlock detected");
+ fn acquireDirect(m: *Dummy) void {
+ if (!m.tryAcquireDirect()) {
+ @panic("deadlock detected");
+ }
+ }
+
+ fn releaseDirect(m: *Dummy) void {
+ if (std.debug.runtime_safety) {
+ m.lock = false;
+ }
}
};
const WindowsMutex = struct {
srwlock: windows.SRWLOCK = windows.SRWLOCK_INIT,
- pub const Held = struct {
- mutex: *WindowsMutex,
+ pub usingnamespace HeldInterface(@This());
- pub fn release(held: Held) void {
- windows.kernel32.ReleaseSRWLockExclusive(&held.mutex.srwlock);
- }
- };
-
- pub fn tryAcquire(m: *WindowsMutex) ?Held {
- if (windows.kernel32.TryAcquireSRWLockExclusive(&m.srwlock) != windows.FALSE) {
- return Held{ .mutex = m };
- } else {
- return null;
- }
+ fn tryAcquireDirect(m: *WindowsMutex) bool {
+ return windows.kernel32.TryAcquireSRWLockExclusive(&m.srwlock) != windows.FALSE;
}
- pub fn acquire(m: *WindowsMutex) Held {
+ fn acquireDirect(m: *WindowsMutex) void {
windows.kernel32.AcquireSRWLockExclusive(&m.srwlock);
- return Held{ .mutex = m };
+ }
+
+ fn releaseDirect(m: *WindowsMutex) void {
+ windows.kernel32.ReleaseSRWLockExclusive(&m.srwlock);
}
};
diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig
index 91e0c4d883..42443f2138 100644
--- a/lib/std/array_hash_map.zig
+++ b/lib/std/array_hash_map.zig
@@ -90,7 +90,7 @@ pub fn ArrayHashMap(
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
- /// unless `ensureCapacity` was previously used.
+ /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
pub const Entry = Unmanaged.Entry;
/// A KV pair which has been copied out of the backing store
@@ -110,7 +110,7 @@ pub fn ArrayHashMap(
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
- /// unless `ensureCapacity` was previously used.
+ /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
pub const GetOrPutResult = Unmanaged.GetOrPutResult;
/// An Iterator over Entry pointers.
@@ -478,7 +478,7 @@ pub fn ArrayHashMapUnmanaged(
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
- /// unless `ensureCapacity` was previously used.
+ /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
pub const Entry = struct {
key_ptr: *K,
value_ptr: *V,
@@ -509,7 +509,7 @@ pub fn ArrayHashMapUnmanaged(
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
- /// unless `ensureCapacity` was previously used.
+ /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
pub const GetOrPutResult = struct {
key_ptr: *K,
value_ptr: *V,
@@ -759,20 +759,20 @@ pub fn ArrayHashMapUnmanaged(
}
pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_capacity: usize, ctx: Context) !void {
if (new_capacity <= linear_scan_max) {
- try self.entries.ensureCapacity(allocator, new_capacity);
+ try self.entries.ensureTotalCapacity(allocator, new_capacity);
return;
}
if (self.index_header) |header| {
if (new_capacity <= header.capacity()) {
- try self.entries.ensureCapacity(allocator, new_capacity);
+ try self.entries.ensureTotalCapacity(allocator, new_capacity);
return;
}
}
const new_bit_index = try IndexHeader.findBitIndex(new_capacity);
const new_header = try IndexHeader.alloc(allocator, new_bit_index);
- try self.entries.ensureCapacity(allocator, new_capacity);
+ try self.entries.ensureTotalCapacity(allocator, new_capacity);
if (self.index_header) |old_header| old_header.free(allocator);
self.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, new_header);
@@ -1441,7 +1441,7 @@ pub fn ArrayHashMapUnmanaged(
unreachable;
}
- /// Must ensureCapacity before calling this.
+ /// Must `ensureTotalCapacity`/`ensureUnusedCapacity` before calling this.
fn getOrPutInternal(self: *Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type) GetOrPutResult {
const slice = self.entries.slice();
const hashes_array = if (store_hash) slice.items(.hash) else {};
@@ -1485,7 +1485,7 @@ pub fn ArrayHashMapUnmanaged(
}
// This pointer survives the following append because we call
- // entries.ensureCapacity before getOrPutInternal.
+ // entries.ensureTotalCapacity before getOrPutInternal.
const hash_match = if (store_hash) h == hashes_array[slot_data.entry_index] else true;
if (hash_match and checkedEql(ctx, key, keys_array[slot_data.entry_index])) {
return .{
@@ -1946,7 +1946,7 @@ test "iterator hash map" {
var reset_map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer reset_map.deinit();
- // test ensureCapacity with a 0 parameter
+ // test ensureTotalCapacity with a 0 parameter
try reset_map.ensureTotalCapacity(0);
try reset_map.putNoClobber(0, 11);
diff --git a/lib/std/build.zig b/lib/std/build.zig
index 7b976405dc..58fc94503f 100644
--- a/lib/std/build.zig
+++ b/lib/std/build.zig
@@ -684,7 +684,11 @@ pub const Builder = struct {
);
const mcpu = self.option([]const u8, "cpu", "Target CPU features to add or subtract");
- const triple = maybe_triple orelse return args.default_target;
+ if (maybe_triple == null and mcpu == null) {
+ return args.default_target;
+ }
+
+ const triple = maybe_triple orelse "native";
var diags: CrossTarget.ParseOptions.Diagnostics = .{};
const selected_target = CrossTarget.parse(.{
@@ -2432,11 +2436,8 @@ pub const LibExeObjStep = struct {
if (populated_cpu_features.eql(cross.cpu.features)) {
// The CPU name alone is sufficient.
- // If it is the baseline CPU, no command line args are required.
- if (cross.cpu.model != std.Target.Cpu.baseline(cross.cpu.arch).model) {
- try zig_args.append("-mcpu");
- try zig_args.append(cross.cpu.model.name);
- }
+ try zig_args.append("-mcpu");
+ try zig_args.append(cross.cpu.model.name);
} else {
var mcpu_buffer = std.ArrayList(u8).init(builder.allocator);
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 7f6c40c6f5..c0d2e9f725 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -166,6 +166,15 @@ pub const CallingConvention = enum {
SysV,
};
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const AddressSpace = enum {
+ generic,
+ gs,
+ fs,
+ ss,
+};
+
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const SourceLocation = struct {
@@ -226,6 +235,7 @@ pub const TypeInfo = union(enum) {
is_const: bool,
is_volatile: bool,
alignment: comptime_int,
+ address_space: AddressSpace,
child: type,
is_allowzero: bool,
@@ -700,7 +710,7 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace) noreturn
},
else => {
const first_trace_addr = @returnAddress();
- std.debug.panicExtra(error_return_trace, first_trace_addr, "{s}", .{msg});
+ std.debug.panicImpl(error_return_trace, first_trace_addr, msg);
},
}
}
diff --git a/lib/std/c.zig b/lib/std/c.zig
index 6a2624a93e..84fbb59640 100644
--- a/lib/std/c.zig
+++ b/lib/std/c.zig
@@ -252,6 +252,33 @@ pub extern "c" fn kevent(
timeout: ?*const c.timespec,
) c_int;
+pub extern "c" fn port_create() c.port_t;
+pub extern "c" fn port_associate(
+ port: c.port_t,
+ source: u32,
+ object: usize,
+ events: u32,
+ user_var: ?*c_void,
+) c_int;
+pub extern "c" fn port_dissociate(port: c.port_t, source: u32, object: usize) c_int;
+pub extern "c" fn port_send(port: c.port_t, events: u32, user_var: ?*c_void) c_int;
+pub extern "c" fn port_sendn(
+ ports: [*]c.port_t,
+ errors: []u32,
+ num_ports: u32,
+ events: u32,
+ user_var: ?*c_void,
+) c_int;
+pub extern "c" fn port_get(port: c.port_t, event: *c.port_event, timeout: ?*c.timespec) c_int;
+pub extern "c" fn port_getn(
+ port: c.port_t,
+ event_list: []c.port_event,
+ max_events: u32,
+ events_retrieved: *u32,
+ timeout: ?*c.timespec,
+) c_int;
+pub extern "c" fn port_alert(port: c.port_t, flags: u32, events: u32, user_var: ?*c_void) c_int;
+
pub extern "c" fn getaddrinfo(
noalias node: ?[*:0]const u8,
noalias service: ?[*:0]const u8,
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index 8bb30efab3..b85a5bc40f 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -291,6 +291,7 @@ pub const sockaddr = extern struct {
family: sa_family_t,
data: [14]u8,
+ pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {
len: u8 = @sizeOf(in),
diff --git a/lib/std/c/dragonfly.zig b/lib/std/c/dragonfly.zig
index 35bbeac6a3..a2b7e31b4f 100644
--- a/lib/std/c/dragonfly.zig
+++ b/lib/std/c/dragonfly.zig
@@ -465,6 +465,7 @@ pub const sockaddr = extern struct {
family: u8,
data: [14]u8,
+ pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {
diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig
index f65af3f915..ecc9690069 100644
--- a/lib/std/c/freebsd.zig
+++ b/lib/std/c/freebsd.zig
@@ -323,6 +323,7 @@ pub const sockaddr = extern struct {
/// actually longer; address value
data: [14]u8,
+ pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {
diff --git a/lib/std/c/haiku.zig b/lib/std/c/haiku.zig
index 1ad51cfadd..dcebeea95e 100644
--- a/lib/std/c/haiku.zig
+++ b/lib/std/c/haiku.zig
@@ -339,6 +339,7 @@ pub const sockaddr = extern struct {
/// actually longer; address value
data: [14]u8,
+ pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {
diff --git a/lib/std/c/netbsd.zig b/lib/std/c/netbsd.zig
index d76a9ecdf5..042d540bcc 100644
--- a/lib/std/c/netbsd.zig
+++ b/lib/std/c/netbsd.zig
@@ -476,6 +476,7 @@ pub const sockaddr = extern struct {
/// actually longer; address value
data: [14]u8,
+ pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {
diff --git a/lib/std/c/openbsd.zig b/lib/std/c/openbsd.zig
index b3919d4724..39425b5e0e 100644
--- a/lib/std/c/openbsd.zig
+++ b/lib/std/c/openbsd.zig
@@ -279,6 +279,7 @@ pub const sockaddr = extern struct {
/// actually longer; address value
data: [14]u8,
+ pub const SS_MAXSIZE = 256;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {
diff --git a/lib/std/c/solaris.zig b/lib/std/c/solaris.zig
index 7c70a01fc4..283ea792cd 100644
--- a/lib/std/c/solaris.zig
+++ b/lib/std/c/solaris.zig
@@ -1,15 +1,1913 @@
+const std = @import("../std.zig");
+const builtin = @import("builtin");
+const maxInt = std.math.maxInt;
+const iovec = std.os.iovec;
+const iovec_const = std.os.iovec_const;
+const timezone = std.c.timezone;
+
+extern "c" fn ___errno() *c_int;
+pub const _errno = ___errno;
+
+pub const dl_iterate_phdr_callback = fn (info: *dl_phdr_info, size: usize, data: ?*c_void) callconv(.C) c_int;
+pub extern "c" fn dl_iterate_phdr(callback: dl_iterate_phdr_callback, data: ?*c_void) c_int;
+
+pub extern "c" fn getdents(fd: c_int, buf_ptr: [*]u8, nbytes: usize) usize;
+pub extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
+pub extern "c" fn pipe2(fds: *[2]fd_t, flags: u32) c_int;
+pub extern "c" fn arc4random_buf(buf: [*]u8, len: usize) void;
+pub extern "c" fn posix_memalign(memptr: *?*c_void, alignment: usize, size: usize) c_int;
+pub extern "c" fn sysconf(sc: c_int) i64;
+pub extern "c" fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) c_int;
+pub extern "c" fn madvise(address: [*]u8, len: usize, advise: u32) c_int;
+
pub const pthread_mutex_t = extern struct {
- __pthread_mutex_flag1: u16 = 0,
- __pthread_mutex_flag2: u8 = 0,
- __pthread_mutex_ceiling: u8 = 0,
- __pthread_mutex_type: u16 = 0,
- __pthread_mutex_magic: u16 = 0x4d58,
- __pthread_mutex_lock: u64 = 0,
- __pthread_mutex_data: u64 = 0,
+ flag1: u16 = 0,
+ flag2: u8 = 0,
+ ceiling: u8 = 0,
+ @"type": u16 = 0,
+ magic: u16 = 0x4d58,
+ lock: u64 = 0,
+ data: u64 = 0,
};
pub const pthread_cond_t = extern struct {
- __pthread_cond_flag: u32 = 0,
- __pthread_cond_type: u16 = 0,
- __pthread_cond_magic: u16 = 0x4356,
- __pthread_cond_data: u64 = 0,
+ flag: [4]u8 = [_]u8{0} ** 4,
+ @"type": u16 = 0,
+ magic: u16 = 0x4356,
+ data: u64 = 0,
};
+pub const pthread_rwlock_t = extern struct {
+ readers: i32 = 0,
+ @"type": u16 = 0,
+ magic: u16 = 0x5257,
+ mutex: pthread_mutex_t = .{},
+ readercv: pthread_cond_t = .{},
+ writercv: pthread_cond_t = .{},
+};
+pub const pthread_attr_t = extern struct {
+ mutexattr: ?*c_void = null,
+};
+pub const pthread_key_t = c_int;
+
+pub const sem_t = extern struct {
+ count: u32 = 0,
+ @"type": u16 = 0,
+ magic: u16 = 0x534d,
+ __pad1: [3]u64 = [_]u64{0} ** 3,
+ __pad2: [2]u64 = [_]u64{0} ** 2,
+};
+
+pub extern "c" fn pthread_setname_np(thread: std.c.pthread_t, name: [*:0]const u8, arg: ?*c_void) E;
+pub extern "c" fn pthread_getname_np(thread: std.c.pthread_t, name: [*:0]u8, len: usize) E;
+
+pub const blkcnt_t = i64;
+pub const blksize_t = i32;
+pub const clock_t = i64;
+pub const dev_t = i32;
+pub const fd_t = c_int;
+pub const gid_t = u32;
+pub const ino_t = u64;
+pub const mode_t = u32;
+pub const nlink_t = u32;
+pub const off_t = i64;
+pub const pid_t = i32;
+pub const socklen_t = u32;
+pub const time_t = i64;
+pub const suseconds_t = i64;
+pub const uid_t = u32;
+pub const major_t = u32;
+pub const minor_t = u32;
+pub const port_t = c_int;
+pub const nfds_t = usize;
+pub const id_t = i32;
+pub const taskid_t = id_t;
+pub const projid_t = id_t;
+pub const poolid_t = id_t;
+pub const zoneid_t = id_t;
+pub const ctid_t = id_t;
+
+pub const dl_phdr_info = extern struct {
+ dlpi_addr: std.elf.Addr,
+ dlpi_name: ?[*:0]const u8,
+ dlpi_phdr: [*]std.elf.Phdr,
+ dlpi_phnum: std.elf.Half,
+ /// Incremented when a new object is mapped into the process.
+ dlpi_adds: u64,
+ /// Incremented when an object is unmapped from the process.
+ dlpi_subs: u64,
+};
+
+pub const RTLD = struct {
+ pub const LAZY = 0x00001;
+ pub const NOW = 0x00002;
+ pub const NOLOAD = 0x00004;
+ pub const GLOBAL = 0x00100;
+ pub const LOCAL = 0x00000;
+ pub const PARENT = 0x00200;
+ pub const GROUP = 0x00400;
+ pub const WORLD = 0x00800;
+ pub const NODELETE = 0x01000;
+ pub const FIRST = 0x02000;
+ pub const CONFGEN = 0x10000;
+
+ pub const NEXT = @intToPtr(*c_void, @bitCast(usize, @as(isize, -1)));
+ pub const DEFAULT = @intToPtr(*c_void, @bitCast(usize, @as(isize, -2)));
+ pub const SELF = @intToPtr(*c_void, @bitCast(usize, @as(isize, -3)));
+ pub const PROBE = @intToPtr(*c_void, @bitCast(usize, @as(isize, -4)));
+};
+
+pub const Flock = extern struct {
+ l_type: c_short,
+ l_whence: c_short,
+ l_start: off_t,
+ // len == 0 means until end of file.
+ l_len: off_t,
+ l_sysid: c_int,
+ l_pid: pid_t,
+ __pad: [4]c_long,
+};
+
+pub const utsname = extern struct {
+ sysname: [256:0]u8,
+ nodename: [256:0]u8,
+ release: [256:0]u8,
+ version: [256:0]u8,
+ machine: [256:0]u8,
+ domainname: [256:0]u8,
+};
+
+pub const addrinfo = extern struct {
+ flags: i32,
+ family: i32,
+ socktype: i32,
+ protocol: i32,
+ addrlen: socklen_t,
+ canonname: ?[*:0]u8,
+ addr: ?*sockaddr,
+ next: ?*addrinfo,
+};
+
+pub const EAI = enum(c_int) {
+ /// address family for hostname not supported
+ ADDRFAMILY = 1,
+ /// name could not be resolved at this time
+ AGAIN = 2,
+ /// flags parameter had an invalid value
+ BADFLAGS = 3,
+ /// non-recoverable failure in name resolution
+ FAIL = 4,
+ /// address family not recognized
+ FAMILY = 5,
+ /// memory allocation failure
+ MEMORY = 6,
+ /// no address associated with hostname
+ NODATA = 7,
+ /// name does not resolve
+ NONAME = 8,
+ /// service not recognized for socket type
+ SERVICE = 9,
+ /// intended socket type was not recognized
+ SOCKTYPE = 10,
+ /// system error returned in errno
+ SYSTEM = 11,
+ /// argument buffer overflow
+ OVERFLOW = 12,
+ /// resolved protocol is unknown
+ PROTOCOL = 13,
+
+ _,
+};
+
+pub const EAI_MAX = 14;
+
+pub const msghdr = extern struct {
+ /// optional address
+ msg_name: ?*sockaddr,
+ /// size of address
+ msg_namelen: socklen_t,
+ /// scatter/gather array
+ msg_iov: [*]iovec,
+ /// # elements in msg_iov
+ msg_iovlen: i32,
+ /// ancillary data
+ msg_control: ?*c_void,
+ /// ancillary data buffer len
+ msg_controllen: socklen_t,
+ /// flags on received message
+ msg_flags: i32,
+};
+
+pub const msghdr_const = extern struct {
+ /// optional address
+ msg_name: ?*const sockaddr,
+ /// size of address
+ msg_namelen: socklen_t,
+ /// scatter/gather array
+ msg_iov: [*]iovec_const,
+ /// # elements in msg_iov
+ msg_iovlen: i32,
+ /// ancillary data
+ msg_control: ?*c_void,
+ /// ancillary data buffer len
+ msg_controllen: socklen_t,
+ /// flags on received message
+ msg_flags: i32,
+};
+
+pub const cmsghdr = extern struct {
+ cmsg_len: socklen_t,
+ cmsg_level: i32,
+ cmsg_type: i32,
+};
+
+/// The stat structure used by libc.
+pub const Stat = extern struct {
+ dev: dev_t,
+ ino: ino_t,
+ mode: mode_t,
+ nlink: nlink_t,
+ uid: uid_t,
+ gid: gid_t,
+ rdev: dev_t,
+ size: off_t,
+ atim: timespec,
+ mtim: timespec,
+ ctim: timespec,
+ blksize: blksize_t,
+ blocks: blkcnt_t,
+ fstype: [16]u8,
+
+ pub fn atime(self: @This()) timespec {
+ return self.atim;
+ }
+
+ pub fn mtime(self: @This()) timespec {
+ return self.mtim;
+ }
+
+ pub fn ctime(self: @This()) timespec {
+ return self.ctim;
+ }
+};
+
+pub const timespec = extern struct {
+ tv_sec: i64,
+ tv_nsec: isize,
+};
+
+pub const timeval = extern struct {
+ /// seconds
+ tv_sec: time_t,
+ /// microseconds
+ tv_usec: suseconds_t,
+};
+
+pub const MAXNAMLEN = 511;
+
+pub const dirent = extern struct {
+ /// Inode number of entry.
+ d_ino: ino_t,
+ /// Offset of this entry on disk.
+ d_off: off_t,
+ /// Length of this record.
+ d_reclen: u16,
+ /// File name.
+ d_name: [MAXNAMLEN:0]u8,
+
+ pub fn reclen(self: dirent) u16 {
+ return self.d_reclen;
+ }
+};
+
+pub const SOCK = struct {
+ /// Datagram.
+ pub const DGRAM = 1;
+ /// STREAM.
+ pub const STREAM = 2;
+ /// Raw-protocol interface.
+ pub const RAW = 4;
+ /// Reliably-delivered message.
+ pub const RDM = 5;
+ /// Sequenced packed stream.
+ pub const SEQPACKET = 6;
+
+ pub const NONBLOCK = 0x100000;
+ pub const NDELAY = 0x200000;
+ pub const CLOEXEC = 0x080000;
+};
+
+pub const SO = struct {
+ pub const DEBUG = 0x0001;
+ pub const ACCEPTCONN = 0x0002;
+ pub const REUSEADDR = 0x0004;
+ pub const KEEPALIVE = 0x0008;
+ pub const DONTROUTE = 0x0010;
+ pub const BROADCAST = 0x0020;
+ pub const USELOOPBACK = 0x0040;
+ pub const LINGER = 0x0080;
+ pub const OOBINLINE = 0x0100;
+ pub const DGRAM_ERRIND = 0x0200;
+ pub const RECVUCRED = 0x0400;
+
+ pub const SNDBUF = 0x1001;
+ pub const RCVBUF = 0x1002;
+ pub const SNDLOWAT = 0x1003;
+ pub const RCVLOWAT = 0x1004;
+ pub const SNDTIMEO = 0x1005;
+ pub const RCVTIMEO = 0x1006;
+ pub const ERROR = 0x1007;
+ pub const TYPE = 0x1008;
+ pub const PROTOTYPE = 0x1009;
+ pub const ANON_MLP = 0x100a;
+ pub const MAC_EXEMPT = 0x100b;
+ pub const DOMAIN = 0x100c;
+ pub const RCVPSH = 0x100d;
+
+ pub const SECATTR = 0x1011;
+ pub const TIMESTAMP = 0x1013;
+ pub const ALLZONES = 0x1014;
+ pub const EXCLBIND = 0x1015;
+ pub const MAC_IMPLICIT = 0x1016;
+ pub const VRRP = 0x1017;
+};
+
+pub const SOMAXCONN = 128;
+
+pub const SCM = struct {
+ pub const UCRED = 0x1012;
+ pub const RIGHTS = 0x1010;
+ pub const TIMESTAMP = SO.TIMESTAMP;
+};
+
+pub const AF = struct {
+ pub const UNSPEC = 0;
+ pub const UNIX = 1;
+ pub const LOCAL = UNIX;
+ pub const FILE = UNIX;
+ pub const INET = 2;
+ pub const IMPLINK = 3;
+ pub const PUP = 4;
+ pub const CHAOS = 5;
+ pub const NS = 6;
+ pub const NBS = 7;
+ pub const ECMA = 8;
+ pub const DATAKIT = 9;
+ pub const CCITT = 10;
+ pub const SNA = 11;
+ pub const DECnet = 12;
+ pub const DLI = 13;
+ pub const LAT = 14;
+ pub const HYLINK = 15;
+ pub const APPLETALK = 16;
+ pub const NIT = 17;
+ pub const @"802" = 18;
+ pub const OSI = 19;
+ pub const X25 = 20;
+ pub const OSINET = 21;
+ pub const GOSIP = 22;
+ pub const IPX = 23;
+ pub const ROUTE = 24;
+ pub const LINK = 25;
+ pub const INET6 = 26;
+ pub const KEY = 27;
+ pub const NCA = 28;
+ pub const POLICY = 29;
+ pub const INET_OFFLOAD = 30;
+ pub const TRILL = 31;
+ pub const PACKET = 32;
+ pub const LX_NETLINK = 33;
+ pub const MAX = 33;
+};
+
+pub const SOL = struct {
+ pub const SOCKET = 0xffff;
+ pub const ROUTE = 0xfffe;
+ pub const PACKET = 0xfffd;
+ pub const FILTER = 0xfffc;
+};
+
+pub const PF = struct {
+ pub const UNSPEC = AF.UNSPEC;
+ pub const UNIX = AF.UNIX;
+ pub const LOCAL = UNIX;
+ pub const FILE = UNIX;
+ pub const INET = AF.INET;
+ pub const IMPLINK = AF.IMPLINK;
+ pub const PUP = AF.PUP;
+ pub const CHAOS = AF.CHAOS;
+ pub const NS = AF.NS;
+ pub const NBS = AF.NBS;
+ pub const ECMA = AF.ECMA;
+ pub const DATAKIT = AF.DATAKIT;
+ pub const CCITT = AF.CCITT;
+ pub const SNA = AF.SNA;
+ pub const DECnet = AF.DECnet;
+ pub const DLI = AF.DLI;
+ pub const LAT = AF.LAT;
+ pub const HYLINK = AF.HYLINK;
+ pub const APPLETALK = AF.APPLETALK;
+ pub const NIT = AF.NIT;
+ pub const @"802" = AF.@"802";
+ pub const OSI = AF.OSI;
+ pub const X25 = AF.X25;
+ pub const OSINET = AF.OSINET;
+ pub const GOSIP = AF.GOSIP;
+ pub const IPX = AF.IPX;
+ pub const ROUTE = AF.ROUTE;
+ pub const LINK = AF.LINK;
+ pub const INET6 = AF.INET6;
+ pub const KEY = AF.KEY;
+ pub const NCA = AF.NCA;
+ pub const POLICY = AF.POLICY;
+ pub const TRILL = AF.TRILL;
+ pub const PACKET = AF.PACKET;
+ pub const LX_NETLINK = AF.LX_NETLINK;
+ pub const MAX = AF.MAX;
+};
+
+pub const in_port_t = u16;
+pub const sa_family_t = u16;
+
+pub const sockaddr = extern struct {
+ /// address family
+ family: sa_family_t,
+
+ /// actually longer; address value
+ data: [14]u8,
+
+ pub const SS_MAXSIZE = 256;
+ pub const storage = std.x.os.Socket.Address.Native.Storage;
+
+ pub const in = extern struct {
+ family: sa_family_t = AF.INET,
+ port: in_port_t,
+ addr: u32,
+ zero: [8]u8 = [8]u8{ 0, 0, 0, 0, 0, 0, 0, 0 },
+ };
+
+ pub const in6 = extern struct {
+ family: sa_family_t = AF.INET6,
+ port: in_port_t,
+ flowinfo: u32,
+ addr: [16]u8,
+ scope_id: u32,
+ __src_id: u32 = 0,
+ };
+
+ /// Definitions for UNIX IPC domain.
+ pub const un = extern struct {
+ family: sa_family_t = AF.UNIX,
+ path: [108]u8,
+ };
+};
+
+pub const AI = struct {
+ /// IPv4-mapped IPv6 address
+ pub const V4MAPPED = 0x0001;
+ pub const ALL = 0x0002;
+ /// only if any address is assigned
+ pub const ADDRCONFIG = 0x0004;
+ /// get address to use bind()
+ pub const PASSIVE = 0x0008;
+ /// fill ai_canonname
+ pub const CANONNAME = 0x0010;
+ /// prevent host name resolution
+ pub const NUMERICHOST = 0x0020;
+ /// prevent service name resolution
+ pub const NUMERICSERV = 0x0040;
+};
+
+pub const NI = struct {
+ pub const NOFQDN = 0x0001;
+ pub const NUMERICHOST = 0x0002;
+ pub const NAMEREQD = 0x0004;
+ pub const NUMERICSERV = 0x0008;
+ pub const DGRAM = 0x0010;
+ pub const WITHSCOPEID = 0x0020;
+ pub const NUMERICSCOPE = 0x0040;
+
+ pub const MAXHOST = 1025;
+ pub const MAXSERV = 32;
+};
+
+pub const PATH_MAX = 1024;
+pub const IOV_MAX = 1024;
+
+pub const STDIN_FILENO = 0;
+pub const STDOUT_FILENO = 1;
+pub const STDERR_FILENO = 2;
+
+pub const PROT = struct {
+ pub const NONE = 0;
+ pub const READ = 1;
+ pub const WRITE = 2;
+ pub const EXEC = 4;
+};
+
+pub const CLOCK = struct {
+ pub const VIRTUAL = 1;
+ pub const THREAD_CPUTIME_ID = 2;
+ pub const REALTIME = 3;
+ pub const MONOTONIC = 4;
+ pub const PROCESS_CPUTIME_ID = 5;
+ pub const HIGHRES = MONOTONIC;
+ pub const PROF = THREAD_CPUTIME_ID;
+};
+
+pub const MAP = struct {
+ pub const FAILED = @intToPtr(*c_void, maxInt(usize));
+ pub const SHARED = 0x0001;
+ pub const PRIVATE = 0x0002;
+ pub const TYPE = 0x000f;
+
+ pub const FILE = 0x0000;
+ pub const FIXED = 0x0010;
+ // Unimplemented
+ pub const RENAME = 0x0020;
+ pub const NORESERVE = 0x0040;
+ /// Force mapping in lower 4G address space
+ pub const @"32BIT" = 0x0080;
+
+ pub const ANON = 0x0100;
+ pub const ANONYMOUS = ANON;
+ pub const ALIGN = 0x0200;
+ pub const TEXT = 0x0400;
+ pub const INITDATA = 0x0800;
+};
+
+pub const MADV = struct {
+ /// no further special treatment
+ pub const NORMAL = 0;
+ /// expect random page references
+ pub const RANDOM = 1;
+ /// expect sequential page references
+ pub const SEQUENTIAL = 2;
+ /// will need these pages
+ pub const WILLNEED = 3;
+ /// don't need these pages
+ pub const DONTNEED = 4;
+ /// contents can be freed
+ pub const FREE = 5;
+ /// default access
+ pub const ACCESS_DEFAULT = 6;
+ /// next LWP to access heavily
+ pub const ACCESS_LWP = 7;
+ /// many processes to access heavily
+ pub const ACCESS_MANY = 8;
+ /// contents will be purged
+ pub const PURGE = 9;
+};
+
+pub const W = struct {
+ pub const EXITED = 0o001;
+ pub const TRAPPED = 0o002;
+ pub const UNTRACED = 0o004;
+ pub const STOPPED = UNTRACED;
+ pub const CONTINUED = 0o010;
+ pub const NOHANG = 0o100;
+ pub const NOWAIT = 0o200;
+
+ pub fn EXITSTATUS(s: u32) u8 {
+ return @intCast(u8, (s >> 8) & 0xff);
+ }
+ pub fn TERMSIG(s: u32) u32 {
+ return s & 0x7f;
+ }
+ pub fn STOPSIG(s: u32) u32 {
+ return EXITSTATUS(s);
+ }
+ pub fn IFEXITED(s: u32) bool {
+ return TERMSIG(s) == 0;
+ }
+
+ pub fn IFCONTINUED(s: u32) bool {
+ return ((s & 0o177777) == 0o177777);
+ }
+
+ pub fn IFSTOPPED(s: u32) bool {
+ return (s & 0x00ff != 0o177) and !(s & 0xff00 != 0);
+ }
+
+ pub fn IFSIGNALED(s: u32) bool {
+ return s & 0x00ff > 0 and s & 0xff00 == 0;
+ }
+};
+
+pub const SA = struct {
+ pub const ONSTACK = 0x00000001;
+ pub const RESETHAND = 0x00000002;
+ pub const RESTART = 0x00000004;
+ pub const SIGINFO = 0x00000008;
+ pub const NODEFER = 0x00000010;
+ pub const NOCLDWAIT = 0x00010000;
+};
+
+// access function
+pub const F_OK = 0; // test for existence of file
+pub const X_OK = 1; // test for execute or search permission
+pub const W_OK = 2; // test for write permission
+pub const R_OK = 4; // test for read permission
+
+pub const F = struct {
+ /// Unlock a previously locked region
+ pub const ULOCK = 0;
+ /// Lock a region for exclusive use
+ pub const LOCK = 1;
+ /// Test and lock a region for exclusive use
+ pub const TLOCK = 2;
+ /// Test a region for other processes locks
+ pub const TEST = 3;
+
+ /// Duplicate fildes
+ pub const DUPFD = 0;
+ /// Get fildes flags
+ pub const GETFD = 1;
+ /// Set fildes flags
+ pub const SETFD = 2;
+ /// Get file flags
+ pub const GETFL = 3;
+ /// Get file flags including open-only flags
+ pub const GETXFL = 45;
+ /// Set file flags
+ pub const SETFL = 4;
+
+ /// Unused
+ pub const CHKFL = 8;
+ /// Duplicate fildes at third arg
+ pub const DUP2FD = 9;
+ /// Like DUP2FD with O_CLOEXEC set EINVAL is fildes matches arg1
+ pub const DUP2FD_CLOEXEC = 36;
+ /// Like DUPFD with O_CLOEXEC set
+ pub const DUPFD_CLOEXEC = 37;
+
+ /// Is the file desc. a stream ?
+ pub const ISSTREAM = 13;
+ /// Turn on private access to file
+ pub const PRIV = 15;
+ /// Turn off private access to file
+ pub const NPRIV = 16;
+ /// UFS quota call
+ pub const QUOTACTL = 17;
+ /// Get number of BLKSIZE blocks allocated
+ pub const BLOCKS = 18;
+ /// Get optimal I/O block size
+ pub const BLKSIZE = 19;
+ /// Get owner (socket emulation)
+ pub const GETOWN = 23;
+ /// Set owner (socket emulation)
+ pub const SETOWN = 24;
+ /// Object reuse revoke access to file desc.
+ pub const REVOKE = 25;
+ /// Does vp have NFS locks private to lock manager
+ pub const HASREMOTELOCKS = 26;
+
+ /// Set file lock
+ pub const SETLK = 6;
+ /// Set file lock and wait
+ pub const SETLKW = 7;
+ /// Allocate file space
+ pub const ALLOCSP = 10;
+ /// Free file space
+ pub const FREESP = 11;
+ /// Get file lock
+ pub const GETLK = 14;
+ /// Get file lock owned by file
+ pub const OFD_GETLK = 47;
+ /// Set file lock owned by file
+ pub const OFD_SETLK = 48;
+ /// Set file lock owned by file and wait
+ pub const OFD_SETLKW = 49;
+ /// Set a file share reservation
+ pub const SHARE = 40;
+ /// Remove a file share reservation
+ pub const UNSHARE = 41;
+ /// Create Poison FD
+ pub const BADFD = 46;
+
+ /// Read lock
+ pub const RDLCK = 1;
+ /// Write lock
+ pub const WRLCK = 2;
+ /// Remove lock(s)
+ pub const UNLCK = 3;
+ /// remove remote locks for a given system
+ pub const UNLKSYS = 4;
+
+ // f_access values
+ /// Read-only share access
+ pub const RDACC = 0x1;
+ /// Write-only share access
+ pub const WRACC = 0x2;
+ /// Read-Write share access
+ pub const RWACC = 0x3;
+
+ // f_deny values
+ /// Don't deny others access
+ pub const NODNY = 0x0;
+ /// Deny others read share access
+ pub const RDDNY = 0x1;
+ /// Deny others write share access
+ pub const WRDNY = 0x2;
+ /// Deny others read or write share access
+ pub const RWDNY = 0x3;
+ /// private flag: Deny delete share access
+ pub const RMDNY = 0x4;
+};
+
+pub const O = struct {
+ pub const RDONLY = 0;
+ pub const WRONLY = 1;
+ pub const RDWR = 2;
+ pub const SEARCH = 0x200000;
+ pub const EXEC = 0x400000;
+ pub const NDELAY = 0x04;
+ pub const APPEND = 0x08;
+ pub const SYNC = 0x10;
+ pub const DSYNC = 0x40;
+ pub const RSYNC = 0x8000;
+ pub const NONBLOCK = 0x80;
+ pub const LARGEFILE = 0x2000;
+
+ pub const CREAT = 0x100;
+ pub const TRUNC = 0x200;
+ pub const EXCL = 0x400;
+ pub const NOCTTY = 0x800;
+ pub const XATTR = 0x4000;
+ pub const NOFOLLOW = 0x20000;
+ pub const NOLINKS = 0x40000;
+ pub const CLOEXEC = 0x800000;
+ pub const DIRECTORY = 0x1000000;
+ pub const DIRECT = 0x2000000;
+};
+
+pub const LOCK = struct {
+ pub const SH = 1;
+ pub const EX = 2;
+ pub const NB = 4;
+ pub const UN = 8;
+};
+
+pub const FD_CLOEXEC = 1;
+
+pub const SEEK = struct {
+ pub const SET = 0;
+ pub const CUR = 1;
+ pub const END = 2;
+ pub const DATA = 3;
+ pub const HOLE = 4;
+};
+
+pub const tcflag_t = c_uint;
+pub const cc_t = u8;
+pub const speed_t = c_uint;
+
+pub const NCCS = 19;
+
+pub const termios = extern struct {
+ c_iflag: tcflag_t,
+ c_oflag: tcflag_t,
+ c_cflag: tcflag_t,
+ c_lflag: tcflag_t,
+ c_cc: [NCCS]cc_t,
+};
+
+fn tioc(t: u16, num: u8) u16 {
+ return (t << 8) | num;
+}
+
+pub const T = struct {
+ pub const CGETA = tioc('T', 1);
+ pub const CSETA = tioc('T', 2);
+ pub const CSETAW = tioc('T', 3);
+ pub const CSETAF = tioc('T', 4);
+ pub const CSBRK = tioc('T', 5);
+ pub const CXONC = tioc('T', 6);
+ pub const CFLSH = tioc('T', 7);
+ pub const IOCGWINSZ = tioc('T', 104);
+ pub const IOCSWINSZ = tioc('T', 103);
+ // Softcarrier ioctls
+ pub const IOCGSOFTCAR = tioc('T', 105);
+ pub const IOCSSOFTCAR = tioc('T', 106);
+ // termios ioctls
+ pub const CGETS = tioc('T', 13);
+ pub const CSETS = tioc('T', 14);
+ pub const CSANOW = tioc('T', 14);
+ pub const CSETSW = tioc('T', 15);
+ pub const CSADRAIN = tioc('T', 15);
+ pub const CSETSF = tioc('T', 16);
+ pub const IOCSETLD = tioc('T', 123);
+ pub const IOCGETLD = tioc('T', 124);
+ // NTP PPS ioctls
+ pub const IOCGPPS = tioc('T', 125);
+ pub const IOCSPPS = tioc('T', 126);
+ pub const IOCGPPSEV = tioc('T', 127);
+
+ pub const IOCGETD = tioc('t', 0);
+ pub const IOCSETD = tioc('t', 1);
+ pub const IOCHPCL = tioc('t', 2);
+ pub const IOCGETP = tioc('t', 8);
+ pub const IOCSETP = tioc('t', 9);
+ pub const IOCSETN = tioc('t', 10);
+ pub const IOCEXCL = tioc('t', 13);
+ pub const IOCNXCL = tioc('t', 14);
+ pub const IOCFLUSH = tioc('t', 16);
+ pub const IOCSETC = tioc('t', 17);
+ pub const IOCGETC = tioc('t', 18);
+ /// bis local mode bits
+ pub const IOCLBIS = tioc('t', 127);
+ /// bic local mode bits
+ pub const IOCLBIC = tioc('t', 126);
+ /// set entire local mode word
+ pub const IOCLSET = tioc('t', 125);
+ /// get local modes
+ pub const IOCLGET = tioc('t', 124);
+ /// set break bit
+ pub const IOCSBRK = tioc('t', 123);
+ /// clear break bit
+ pub const IOCCBRK = tioc('t', 122);
+ /// set data terminal ready
+ pub const IOCSDTR = tioc('t', 121);
+ /// clear data terminal ready
+ pub const IOCCDTR = tioc('t', 120);
+ /// set local special chars
+ pub const IOCSLTC = tioc('t', 117);
+ /// get local special chars
+ pub const IOCGLTC = tioc('t', 116);
+ /// driver output queue size
+ pub const IOCOUTQ = tioc('t', 115);
+ /// void tty association
+ pub const IOCNOTTY = tioc('t', 113);
+ /// get a ctty
+ pub const IOCSCTTY = tioc('t', 132);
+ /// stop output, like ^S
+ pub const IOCSTOP = tioc('t', 111);
+ /// start output, like ^Q
+ pub const IOCSTART = tioc('t', 110);
+ /// get pgrp of tty
+ pub const IOCGPGRP = tioc('t', 20);
+ /// set pgrp of tty
+ pub const IOCSPGRP = tioc('t', 21);
+ /// get session id on ctty
+ pub const IOCGSID = tioc('t', 22);
+ /// simulate terminal input
+ pub const IOCSTI = tioc('t', 23);
+ /// set all modem bits
+ pub const IOCMSET = tioc('t', 26);
+ /// bis modem bits
+ pub const IOCMBIS = tioc('t', 27);
+ /// bic modem bits
+ pub const IOCMBIC = tioc('t', 28);
+ /// get all modem bits
+ pub const IOCMGET = tioc('t', 29);
+};
+
+pub const winsize = extern struct {
+ ws_row: u16,
+ ws_col: u16,
+ ws_xpixel: u16,
+ ws_ypixel: u16,
+};
+
+const NSIG = 75;
+
+pub const SIG = struct {
+ pub const DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
+ pub const ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
+ pub const IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
+ pub const HOLD = @intToPtr(?Sigaction.sigaction_fn, 2);
+
+ pub const WORDS = 4;
+ pub const MAXSIG = 75;
+
+ pub const SIG_BLOCK = 1;
+ pub const SIG_UNBLOCK = 2;
+ pub const SIG_SETMASK = 3;
+
+ pub const HUP = 1;
+ pub const INT = 2;
+ pub const QUIT = 3;
+ pub const ILL = 4;
+ pub const TRAP = 5;
+ pub const IOT = 6;
+ pub const ABRT = 6;
+ pub const EMT = 7;
+ pub const FPE = 8;
+ pub const KILL = 9;
+ pub const BUS = 10;
+ pub const SEGV = 11;
+ pub const SYS = 12;
+ pub const PIPE = 13;
+ pub const ALRM = 14;
+ pub const TERM = 15;
+ pub const USR1 = 16;
+ pub const USR2 = 17;
+ pub const CLD = 18;
+ pub const CHLD = 18;
+ pub const PWR = 19;
+ pub const WINCH = 20;
+ pub const URG = 21;
+ pub const POLL = 22;
+ pub const IO = .POLL;
+ pub const STOP = 23;
+ pub const TSTP = 24;
+ pub const CONT = 25;
+ pub const TTIN = 26;
+ pub const TTOU = 27;
+ pub const VTALRM = 28;
+ pub const PROF = 29;
+ pub const XCPU = 30;
+ pub const XFSZ = 31;
+ pub const WAITING = 32;
+ pub const LWP = 33;
+ pub const FREEZE = 34;
+ pub const THAW = 35;
+ pub const CANCEL = 36;
+ pub const LOST = 37;
+ pub const XRES = 38;
+ pub const JVM1 = 39;
+ pub const JVM2 = 40;
+ pub const INFO = 41;
+
+ pub const RTMIN = 42;
+ pub const RTMAX = 74;
+
+ pub inline fn IDX(sig: usize) usize {
+ return sig - 1;
+ }
+ pub inline fn WORD(sig: usize) usize {
+ return IDX(sig) >> 5;
+ }
+ pub inline fn BIT(sig: usize) usize {
+ return 1 << (IDX(sig) & 31);
+ }
+ pub inline fn VALID(sig: usize) usize {
+ return sig <= MAXSIG and sig > 0;
+ }
+};
+
+/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
+pub const Sigaction = extern struct {
+ pub const handler_fn = fn (c_int) callconv(.C) void;
+ pub const sigaction_fn = fn (c_int, *const siginfo_t, ?*const c_void) callconv(.C) void;
+
+ /// signal options
+ flags: c_uint,
+ /// signal handler
+ handler: extern union {
+ handler: ?handler_fn,
+ sigaction: ?sigaction_fn,
+ },
+ /// signal mask to apply
+ mask: sigset_t,
+};
+
+pub const sigval_t = extern union {
+ int: c_int,
+ ptr: ?*c_void,
+};
+
+pub const siginfo_t = extern struct {
+ signo: c_int,
+ code: c_int,
+ errno: c_int,
+ // 64bit architectures insert 4bytes of padding here, this is done by
+ // correctly aligning the reason field
+ reason: extern union {
+ proc: extern struct {
+ pid: pid_t,
+ pdata: extern union {
+ kill: extern struct {
+ uid: uid_t,
+ value: sigval_t,
+ },
+ cld: extern struct {
+ utime: clock_t,
+ status: c_int,
+ stime: clock_t,
+ },
+ },
+ contract: ctid_t,
+ zone: zoneid_t,
+ },
+ fault: extern struct {
+ addr: ?*c_void,
+ trapno: c_int,
+ pc: ?*c_void,
+ },
+ file: extern struct {
+ // fd not currently available for SIGPOLL.
+ fd: c_int,
+ band: c_long,
+ },
+ prof: extern struct {
+ addr: ?*c_void,
+ timestamp: timespec,
+ syscall: c_short,
+ sysarg: u8,
+ fault: u8,
+ args: [8]c_long,
+ state: [10]c_int,
+ },
+ rctl: extern struct {
+ entity: i32,
+ },
+ __pad: [256 - 4 * @sizeOf(c_int)]u8,
+ } align(@sizeOf(usize)),
+};
+
+comptime {
+ std.debug.assert(@sizeOf(siginfo_t) == 256);
+ std.debug.assert(@alignOf(siginfo_t) == @sizeOf(usize));
+}
+
+pub const sigset_t = extern struct {
+ __bits: [SIG.WORDS]u32,
+};
+
+pub const empty_sigset = sigset_t{ .__bits = [_]u32{0} ** SIG.WORDS };
+
+pub const fpregset_t = extern union {
+ regs: [130]u32,
+ chip_state: extern struct {
+ cw: u16,
+ sw: u16,
+ fctw: u8,
+ __fx_rsvd: u8,
+ fop: u16,
+ rip: u64,
+ rdp: u64,
+ mxcsr: u32,
+ mxcsr_mask: u32,
+ st: [8]extern union {
+ fpr_16: [5]u16,
+ __fpr_pad: u128,
+ },
+ xmm: [16]u128,
+ __fx_ign2: [6]u128,
+ status: u32,
+ xstatus: u32,
+ },
+};
+
+pub const mcontext_t = extern struct {
+ gregs: [28]u64,
+ fpregs: fpregset_t,
+};
+
+pub const REG = struct {
+ pub const RBP = 10;
+ pub const RIP = 17;
+ pub const RSP = 20;
+};
+
+pub const ucontext_t = extern struct {
+ flags: u64,
+ link: ?*ucontext_t,
+ sigmask: sigset_t,
+ stack: stack_t,
+ mcontext: mcontext_t,
+ brand_data: [3]?*c_void,
+ filler: [2]i64,
+};
+
+pub const GETCONTEXT = 0;
+pub const SETCONTEXT = 1;
+pub const GETUSTACK = 2;
+pub const SETUSTACK = 3;
+
+pub const E = enum(u16) {
+ /// No error occurred.
+ SUCCESS = 0,
+ /// Not super-user
+ PERM = 1,
+ /// No such file or directory
+ NOENT = 2,
+ /// No such process
+ SRCH = 3,
+ /// interrupted system call
+ INTR = 4,
+ /// I/O error
+ IO = 5,
+ /// No such device or address
+ NXIO = 6,
+ /// Arg list too long
+ @"2BIG" = 7,
+ /// Exec format error
+ NOEXEC = 8,
+ /// Bad file number
+ BADF = 9,
+ /// No children
+ CHILD = 10,
+ /// Resource temporarily unavailable.
+ /// also: WOULDBLOCK: Operation would block.
+ AGAIN = 11,
+ /// Not enough core
+ NOMEM = 12,
+ /// Permission denied
+ ACCES = 13,
+ /// Bad address
+ FAULT = 14,
+ /// Block device required
+ NOTBLK = 15,
+ /// Mount device busy
+ BUSY = 16,
+ /// File exists
+ EXIST = 17,
+ /// Cross-device link
+ XDEV = 18,
+ /// No such device
+ NODEV = 19,
+ /// Not a directory
+ NOTDIR = 20,
+ /// Is a directory
+ ISDIR = 21,
+ /// Invalid argument
+ INVAL = 22,
+ /// File table overflow
+ NFILE = 23,
+ /// Too many open files
+ MFILE = 24,
+ /// Inappropriate ioctl for device
+ NOTTY = 25,
+ /// Text file busy
+ TXTBSY = 26,
+ /// File too large
+ FBIG = 27,
+ /// No space left on device
+ NOSPC = 28,
+ /// Illegal seek
+ SPIPE = 29,
+ /// Read only file system
+ ROFS = 30,
+ /// Too many links
+ MLINK = 31,
+ /// Broken pipe
+ PIPE = 32,
+ /// Math arg out of domain of func
+ DOM = 33,
+ /// Math result not representable
+ RANGE = 34,
+ /// No message of desired type
+ NOMSG = 35,
+ /// Identifier removed
+ IDRM = 36,
+ /// Channel number out of range
+ CHRNG = 37,
+ /// Level 2 not synchronized
+ L2NSYNC = 38,
+ /// Level 3 halted
+ L3HLT = 39,
+ /// Level 3 reset
+ L3RST = 40,
+ /// Link number out of range
+ LNRNG = 41,
+ /// Protocol driver not attached
+ UNATCH = 42,
+ /// No CSI structure available
+ NOCSI = 43,
+ /// Level 2 halted
+ L2HLT = 44,
+ /// Deadlock condition.
+ DEADLK = 45,
+ /// No record locks available.
+ NOLCK = 46,
+ /// Operation canceled
+ CANCELED = 47,
+ /// Operation not supported
+ NOTSUP = 48,
+
+ // Filesystem Quotas
+ /// Disc quota exceeded
+ DQUOT = 49,
+
+ // Convergent Error Returns
+ /// invalid exchange
+ BADE = 50,
+ /// invalid request descriptor
+ BADR = 51,
+ /// exchange full
+ XFULL = 52,
+ /// no anode
+ NOANO = 53,
+ /// invalid request code
+ BADRQC = 54,
+ /// invalid slot
+ BADSLT = 55,
+ /// file locking deadlock error
+ DEADLOCK = 56,
+ /// bad font file fmt
+ BFONT = 57,
+
+ // Interprocess Robust Locks
+ /// process died with the lock
+ OWNERDEAD = 58,
+ /// lock is not recoverable
+ NOTRECOVERABLE = 59,
+ /// locked lock was unmapped
+ LOCKUNMAPPED = 72,
+ /// Facility is not active
+ NOTACTIVE = 73,
+ /// multihop attempted
+ MULTIHOP = 74,
+ /// trying to read unreadable message
+ BADMSG = 77,
+ /// path name is too long
+ NAMETOOLONG = 78,
+ /// value too large to be stored in data type
+ OVERFLOW = 79,
+ /// given log. name not unique
+ NOTUNIQ = 80,
+ /// f.d. invalid for this operation
+ BADFD = 81,
+ /// Remote address changed
+ REMCHG = 82,
+
+ // Stream Problems
+ /// Device not a stream
+ NOSTR = 60,
+ /// no data (for no delay io)
+ NODATA = 61,
+ /// timer expired
+ TIME = 62,
+ /// out of streams resources
+ NOSR = 63,
+ /// Machine is not on the network
+ NONET = 64,
+ /// Package not installed
+ NOPKG = 65,
+ /// The object is remote
+ REMOTE = 66,
+ /// the link has been severed
+ NOLINK = 67,
+ /// advertise error
+ ADV = 68,
+ /// srmount error
+ SRMNT = 69,
+ /// Communication error on send
+ COMM = 70,
+ /// Protocol error
+ PROTO = 71,
+
+ // Shared Library Problems
+ /// Can't access a needed shared lib.
+ LIBACC = 83,
+ /// Accessing a corrupted shared lib.
+ LIBBAD = 84,
+ /// .lib section in a.out corrupted.
+ LIBSCN = 85,
+ /// Attempting to link in too many libs.
+ LIBMAX = 86,
+ /// Attempting to exec a shared library.
+ LIBEXEC = 87,
+ /// Illegal byte sequence.
+ ILSEQ = 88,
+ /// Unsupported file system operation
+ NOSYS = 89,
+ /// Symbolic link loop
+ LOOP = 90,
+ /// Restartable system call
+ RESTART = 91,
+ /// if pipe/FIFO, don't sleep in stream head
+ STRPIPE = 92,
+ /// directory not empty
+ NOTEMPTY = 93,
+ /// Too many users (for UFS)
+ USERS = 94,
+
+ // BSD Networking Software
+ // Argument Errors
+ /// Socket operation on non-socket
+ NOTSOCK = 95,
+ /// Destination address required
+ DESTADDRREQ = 96,
+ /// Message too long
+ MSGSIZE = 97,
+ /// Protocol wrong type for socket
+ PROTOTYPE = 98,
+ /// Protocol not available
+ NOPROTOOPT = 99,
+ /// Protocol not supported
+ PROTONOSUPPORT = 120,
+ /// Socket type not supported
+ SOCKTNOSUPPORT = 121,
+ /// Operation not supported on socket
+ OPNOTSUPP = 122,
+ /// Protocol family not supported
+ PFNOSUPPORT = 123,
+ /// Address family not supported by
+ AFNOSUPPORT = 124,
+ /// Address already in use
+ ADDRINUSE = 125,
+ /// Can't assign requested address
+ ADDRNOTAVAIL = 126,
+
+ // Operational Errors
+ /// Network is down
+ NETDOWN = 127,
+ /// Network is unreachable
+ NETUNREACH = 128,
+ /// Network dropped connection because
+ NETRESET = 129,
+ /// Software caused connection abort
+ CONNABORTED = 130,
+ /// Connection reset by peer
+ CONNRESET = 131,
+ /// No buffer space available
+ NOBUFS = 132,
+ /// Socket is already connected
+ ISCONN = 133,
+ /// Socket is not connected
+ NOTCONN = 134,
+ /// Can't send after socket shutdown
+ SHUTDOWN = 143,
+ /// Too many references: can't splice
+ TOOMANYREFS = 144,
+ /// Connection timed out
+ TIMEDOUT = 145,
+ /// Connection refused
+ CONNREFUSED = 146,
+ /// Host is down
+ HOSTDOWN = 147,
+ /// No route to host
+ HOSTUNREACH = 148,
+ /// operation already in progress
+ ALREADY = 149,
+ /// operation now in progress
+ INPROGRESS = 150,
+
+ // SUN Network File System
+ /// Stale NFS file handle
+ STALE = 151,
+
+ _,
+};
+
+pub const MINSIGSTKSZ = 2048;
+pub const SIGSTKSZ = 8192;
+
+pub const SS_ONSTACK = 0x1;
+pub const SS_DISABLE = 0x2;
+
+pub const stack_t = extern struct {
+ sp: [*]u8,
+ size: isize,
+ flags: i32,
+};
+
+pub const S = struct {
+ pub const IFMT = 0o170000;
+
+ pub const IFIFO = 0o010000;
+ pub const IFCHR = 0o020000;
+ pub const IFDIR = 0o040000;
+ pub const IFBLK = 0o060000;
+ pub const IFREG = 0o100000;
+ pub const IFLNK = 0o120000;
+ pub const IFSOCK = 0o140000;
+ /// SunOS 2.6 Door
+ pub const IFDOOR = 0o150000;
+ /// Solaris 10 Event Port
+ pub const IFPORT = 0o160000;
+
+ pub const ISUID = 0o4000;
+ pub const ISGID = 0o2000;
+ pub const ISVTX = 0o1000;
+ pub const IRWXU = 0o700;
+ pub const IRUSR = 0o400;
+ pub const IWUSR = 0o200;
+ pub const IXUSR = 0o100;
+ pub const IRWXG = 0o070;
+ pub const IRGRP = 0o040;
+ pub const IWGRP = 0o020;
+ pub const IXGRP = 0o010;
+ pub const IRWXO = 0o007;
+ pub const IROTH = 0o004;
+ pub const IWOTH = 0o002;
+ pub const IXOTH = 0o001;
+
+ pub fn ISFIFO(m: u32) bool {
+ return m & IFMT == IFIFO;
+ }
+
+ pub fn ISCHR(m: u32) bool {
+ return m & IFMT == IFCHR;
+ }
+
+ pub fn ISDIR(m: u32) bool {
+ return m & IFMT == IFDIR;
+ }
+
+ pub fn ISBLK(m: u32) bool {
+ return m & IFMT == IFBLK;
+ }
+
+ pub fn ISREG(m: u32) bool {
+ return m & IFMT == IFREG;
+ }
+
+ pub fn ISLNK(m: u32) bool {
+ return m & IFMT == IFLNK;
+ }
+
+ pub fn ISSOCK(m: u32) bool {
+ return m & IFMT == IFSOCK;
+ }
+
+ pub fn ISDOOR(m: u32) bool {
+ return m & IFMT == IFDOOR;
+ }
+
+ pub fn ISPORT(m: u32) bool {
+ return m & IFMT == IFPORT;
+ }
+};
+
+pub const AT = struct {
+ /// Magic value that specify the use of the current working directory
+ /// to determine the target of relative file paths in the openat() and
+ /// similar syscalls.
+ pub const FDCWD = @bitCast(fd_t, @as(u32, 0xffd19553));
+
+ /// Do not follow symbolic links
+ pub const SYMLINK_NOFOLLOW = 0x1000;
+ /// Follow symbolic link
+ pub const SYMLINK_FOLLOW = 0x2000;
+ /// Remove directory instead of file
+ pub const REMOVEDIR = 0x1;
+ pub const TRIGGER = 0x2;
+ /// Check access using effective user and group ID
+ pub const EACCESS = 0x4;
+};
+
+pub const POSIX_FADV = struct {
+ pub const NORMAL = 0;
+ pub const RANDOM = 1;
+ pub const SEQUENTIAL = 2;
+ pub const WILLNEED = 3;
+ pub const DONTNEED = 4;
+ pub const NOREUSE = 5;
+};
+
+pub const HOST_NAME_MAX = 255;
+
+pub const IPPROTO = struct {
+ /// dummy for IP
+ pub const IP = 0;
+ /// Hop by hop header for IPv6
+ pub const HOPOPTS = 0;
+ /// control message protocol
+ pub const ICMP = 1;
+ /// group control protocol
+ pub const IGMP = 2;
+ /// gateway^2 (deprecated)
+ pub const GGP = 3;
+ /// IP in IP encapsulation
+ pub const ENCAP = 4;
+ /// tcp
+ pub const TCP = 6;
+ /// exterior gateway protocol
+ pub const EGP = 8;
+ /// pup
+ pub const PUP = 12;
+ /// user datagram protocol
+ pub const UDP = 17;
+ /// xns idp
+ pub const IDP = 22;
+ /// IPv6 encapsulated in IP
+ pub const IPV6 = 41;
+ /// Routing header for IPv6
+ pub const ROUTING = 43;
+ /// Fragment header for IPv6
+ pub const FRAGMENT = 44;
+ /// rsvp
+ pub const RSVP = 46;
+ /// IPsec Encap. Sec. Payload
+ pub const ESP = 50;
+ /// IPsec Authentication Hdr.
+ pub const AH = 51;
+ /// ICMP for IPv6
+ pub const ICMPV6 = 58;
+ /// No next header for IPv6
+ pub const NONE = 59;
+ /// Destination options
+ pub const DSTOPTS = 60;
+ /// "hello" routing protocol
+ pub const HELLO = 63;
+ /// UNOFFICIAL net disk proto
+ pub const ND = 77;
+ /// ISO clnp
+ pub const EON = 80;
+ /// OSPF
+ pub const OSPF = 89;
+ /// PIM routing protocol
+ pub const PIM = 103;
+ /// Stream Control
+ pub const SCTP = 132;
+ /// raw IP packet
+ pub const RAW = 255;
+ /// Sockets Direct Protocol
+ pub const PROTO_SDP = 257;
+};
+
+pub const priority = enum(c_int) {
+ PROCESS = 0,
+ PGRP = 1,
+ USER = 2,
+ GROUP = 3,
+ SESSION = 4,
+ LWP = 5,
+ TASK = 6,
+ PROJECT = 7,
+ ZONE = 8,
+ CONTRACT = 9,
+};
+
+pub const rlimit_resource = enum(c_int) {
+ CPU = 0,
+ FSIZE = 1,
+ DATA = 2,
+ STACK = 3,
+ CORE = 4,
+ NOFILE = 5,
+ VMEM = 6,
+ _,
+
+ pub const AS: rlimit_resource = .VMEM;
+};
+
+pub const rlim_t = u64;
+
+pub const RLIM = struct {
+ /// No limit
+ pub const INFINITY: rlim_t = (1 << 63) - 3;
+ pub const SAVED_MAX: rlim_t = (1 << 63) - 2;
+ pub const SAVED_CUR: rlim_t = (1 << 63) - 1;
+};
+
+pub const rlimit = extern struct {
+ /// Soft limit
+ cur: rlim_t,
+ /// Hard limit
+ max: rlim_t,
+};
+
+pub const RUSAGE_SELF = 0;
+pub const RUSAGE_CHILDREN = -1;
+pub const RUSAGE_THREAD = 1;
+
+pub const rusage = extern struct {
+ utime: timeval,
+ stime: timeval,
+ maxrss: isize,
+ ixrss: isize,
+ idrss: isize,
+ isrss: isize,
+ minflt: isize,
+ majflt: isize,
+ nswap: isize,
+ inblock: isize,
+ oublock: isize,
+ msgsnd: isize,
+ msgrcv: isize,
+ nsignals: isize,
+ nvcsw: isize,
+ nivcsw: isize,
+};
+
+pub const SHUT = struct {
+ pub const RD = 0;
+ pub const WR = 1;
+ pub const RDWR = 2;
+};
+
+pub const pollfd = extern struct {
+ fd: fd_t,
+ events: i16,
+ revents: i16,
+};
+
+/// Testable events (may be specified in ::pollfd::events).
+pub const POLL = struct {
+ pub const IN = 0x0001;
+ pub const PRI = 0x0002;
+ pub const OUT = 0x0004;
+ pub const RDNORM = 0x0040;
+ pub const WRNORM = .OUT;
+ pub const RDBAND = 0x0080;
+ pub const WRBAND = 0x0100;
+ /// Read-side hangup.
+ pub const RDHUP = 0x4000;
+
+ /// Non-testable events (may not be specified in events).
+ pub const ERR = 0x0008;
+ pub const HUP = 0x0010;
+ pub const NVAL = 0x0020;
+
+ /// Events to control `/dev/poll` (not specified in revents)
+ pub const REMOVE = 0x0800;
+ pub const ONESHOT = 0x1000;
+ pub const ET = 0x2000;
+};
+
+/// Extensions to the ELF auxiliary vector.
+pub const AT_SUN = struct {
+ /// effective user id
+ pub const UID = 2000;
+ /// real user id
+ pub const RUID = 2001;
+ /// effective group id
+ pub const GID = 2002;
+ /// real group id
+ pub const RGID = 2003;
+ /// dynamic linker's ELF header
+ pub const LDELF = 2004;
+ /// dynamic linker's section headers
+ pub const LDSHDR = 2005;
+ /// name of dynamic linker
+ pub const LDNAME = 2006;
+ /// large pagesize
+ pub const LPAGESZ = 2007;
+ /// platform name
+ pub const PLATFORM = 2008;
+ /// hints about hardware capabilities.
+ pub const HWCAP = 2009;
+ pub const HWCAP2 = 2023;
+ /// flush icache?
+ pub const IFLUSH = 2010;
+ /// cpu name
+ pub const CPU = 2011;
+ /// exec() path name in the auxv, null terminated.
+ pub const EXECNAME = 2014;
+ /// mmu module name
+ pub const MMU = 2015;
+ /// dynamic linkers data segment
+ pub const LDDATA = 2016;
+ /// AF_SUN_ flags passed from the kernel
+ pub const AUXFLAGS = 2017;
+ /// name of the emulation binary for the linker
+ pub const EMULATOR = 2018;
+ /// name of the brand library for the linker
+ pub const BRANDNAME = 2019;
+ /// vectors for brand modules.
+ pub const BRAND_AUX1 = 2020;
+ pub const BRAND_AUX2 = 2021;
+ pub const BRAND_AUX3 = 2022;
+ pub const BRAND_AUX4 = 2025;
+ pub const BRAND_NROOT = 2024;
+ /// vector for comm page.
+ pub const COMMPAGE = 2026;
+ /// information about the x86 FPU.
+ pub const FPTYPE = 2027;
+ pub const FPSIZE = 2028;
+};
+
+/// ELF auxiliary vector flags.
+pub const AF_SUN = struct {
+ /// tell ld.so.1 to run "secure" and ignore the environment.
+ pub const SETUGID = 0x00000001;
+ /// hardware capabilities can be verified against AT_SUN_HWCAP
+ pub const HWCAPVERIFY = 0x00000002;
+ pub const NOPLM = 0x00000004;
+};
+
+// TODO: Add sysconf numbers when the other OSs do.
+pub const _SC = struct {
+ pub const NPROCESSORS_ONLN = 15;
+};
+
+pub const procfs = struct {
+ pub const misc_header = extern struct {
+ size: u32,
+ @"type": enum(u32) {
+ Pathname,
+ Socketname,
+ Peersockname,
+ SockoptsBoolOpts,
+ SockoptLinger,
+ SockoptSndbuf,
+ SockoptRcvbuf,
+ SockoptIpNexthop,
+ SockoptIpv6Nexthop,
+ SockoptType,
+ SockoptTcpCongestion,
+ SockfiltersPriv = 14,
+ },
+ };
+
+ pub const fdinfo = extern struct {
+ fd: fd_t,
+ mode: mode_t,
+ ino: ino_t,
+ size: off_t,
+ offset: off_t,
+ uid: uid_t,
+ gid: gid_t,
+ dev_major: major_t,
+ dev_minor: minor_t,
+ special_major: major_t,
+ special_minor: minor_t,
+ fileflags: i32,
+ fdflags: i32,
+ locktype: i16,
+ lockpid: pid_t,
+ locksysid: i32,
+ peerpid: pid_t,
+ __filler: [25]c_int,
+ peername: [15:0]u8,
+ misc: [1]u8,
+ };
+};
+
+pub const SFD = struct {
+ pub const CLOEXEC = 0o2000000;
+ pub const NONBLOCK = 0o4000;
+};
+
+pub const signalfd_siginfo = extern struct {
+ signo: u32,
+ errno: i32,
+ code: i32,
+ pid: u32,
+ uid: uid_t,
+ fd: i32,
+ tid: u32, // unused
+ band: u32,
+ overrun: u32, // unused
+ trapno: u32,
+ status: i32,
+ int: i32, // unused
+ ptr: u64, // unused
+ utime: u64,
+ stime: u64,
+ addr: u64,
+ __pad: [48]u8,
+};
+
+pub const PORT_SOURCE = struct {
+ pub const AIO = 1;
+ pub const TIMER = 2;
+ pub const USER = 3;
+ pub const FD = 4;
+ pub const ALERT = 5;
+ pub const MQ = 6;
+ pub const FILE = 7;
+};
+
+pub const PORT_ALERT = struct {
+ pub const SET = 0x01;
+ pub const UPDATE = 0x02;
+};
+
+/// User watchable file events.
+pub const FILE_EVENT = struct {
+ pub const ACCESS = 0x00000001;
+ pub const MODIFIED = 0x00000002;
+ pub const ATTRIB = 0x00000004;
+ pub const DELETE = 0x00000010;
+ pub const RENAME_TO = 0x00000020;
+ pub const RENAME_FROM = 0x00000040;
+ pub const TRUNC = 0x00100000;
+ pub const NOFOLLOW = 0x10000000;
+ /// The filesystem holding the watched file was unmounted.
+ pub const UNMOUNTED = 0x20000000;
+ /// Some other file/filesystem got mounted over the watched file/directory.
+ pub const MOUNTEDOVER = 0x40000000;
+
+ pub fn isException(event: u32) bool {
+ return event & (UNMOUNTED | DELETE | RENAME_TO | RENAME_FROM | MOUNTEDOVER) > 0;
+ }
+};
+
+pub const port_event = extern struct {
+ events: u32,
+ /// Event source.
+ source: u16,
+ __pad: u16,
+ /// Source-specific object.
+ object: ?*c_void,
+ /// User cookie.
+ cookie: ?*c_void,
+};
+
+pub const port_notify = extern struct {
+ /// Bind request(s) to port.
+ port: u32,
+ /// User defined variable.
+ user: ?*void,
+};
+
+pub const file_obj = extern struct {
+ /// Access time.
+ atim: timespec,
+ /// Modification time
+ mtim: timespec,
+ /// Change time
+ ctim: timespec,
+ __pad: [3]usize,
+ name: [*:0]u8,
+};
+
+// struct ifreq is marked obsolete, with struct lifreq prefered for interface requests.
+// Here we alias lifreq to ifreq to avoid chainging existing code in os and x.os.IPv6.
+pub const SIOCGLIFINDEX = IOWR('i', 133, lifreq);
+pub const SIOCGIFINDEX = SIOCGLIFINDEX;
+pub const MAX_HDW_LEN = 64;
+pub const IFNAMESIZE = 32;
+
+pub const lif_nd_req = extern struct {
+ addr: sockaddr.storage,
+ state_create: u8,
+ state_same_lla: u8,
+ state_diff_lla: u8,
+ hdw_len: i32,
+ flags: i32,
+ __pad: i32,
+ hdw_addr: [MAX_HDW_LEN]u8,
+};
+
+pub const lif_ifinfo_req = extern struct {
+ maxhops: u8,
+ reachtime: u32,
+ reachretrans: u32,
+ maxmtu: u32,
+};
+
+/// IP interface request. See if_tcp(7p) for more info.
+pub const lifreq = extern struct {
+ // Not actually in a union, but the stdlib expects one for ifreq
+ ifrn: extern union {
+ /// Interface name, e.g. "lo0", "en0".
+ name: [IFNAMESIZE]u8,
+ },
+ ru1: extern union {
+ /// For subnet/token etc.
+ addrlen: i32,
+ /// Driver's PPA (physical point of attachment).
+ ppa: u32,
+ },
+ /// One of the IFT types, e.g. IFT_ETHER.
+ @"type": u32,
+ ifru: extern union {
+ /// Address.
+ addr: sockaddr.storage,
+ /// Other end of a peer-to-peer link.
+ dstaddr: sockaddr.storage,
+ /// Broadcast address.
+ broadaddr: sockaddr.storage,
+ /// Address token.
+ token: sockaddr.storage,
+ /// Subnet prefix.
+ subnet: sockaddr.storage,
+ /// Interface index.
+ ivalue: i32,
+ /// Flags for SIOC?LIFFLAGS.
+ flags: u64,
+ /// Hop count metric
+ metric: i32,
+ /// Maximum transmission unit
+ mtu: u32,
+ // Technically [2]i32
+ muxid: packed struct { ip: i32, arp: i32 },
+ /// Neighbor reachability determination entries
+ nd_req: lif_nd_req,
+ /// Link info
+ ifinfo_req: lif_ifinfo_req,
+ /// Name of the multipath interface group
+ groupname: [IFNAMESIZE]u8,
+ binding: [IFNAMESIZE]u8,
+ /// Zone id associated with this interface.
+ zoneid: zoneid_t,
+ /// Duplicate address detection state. Either in progress or completed.
+ dadstate: u32,
+ },
+};
+
+pub const ifreq = lifreq;
+
+const IoCtlCommand = enum(u32) {
+ none = 0x20000000, // no parameters
+ write = 0x40000000, // copy out parameters
+ read = 0x80000000, // copy in parameters
+ read_write = 0xc0000000,
+};
+
+fn ioImpl(cmd: IoCtlCommand, io_type: u8, nr: u8, comptime IOT: type) i32 {
+ const size = @intCast(u32, @truncate(u8, @sizeOf(IOT))) << 16;
+ const t = @intCast(u32, io_type) << 8;
+ return @bitCast(i32, @enumToInt(cmd) | size | t | nr);
+}
+
+pub fn IO(io_type: u8, nr: u8) i32 {
+ return ioImpl(.none, io_type, nr, void);
+}
+
+pub fn IOR(io_type: u8, nr: u8, comptime IOT: type) i32 {
+ return ioImpl(.write, io_type, nr, IOT);
+}
+
+pub fn IOW(io_type: u8, nr: u8, comptime IOT: type) i32 {
+ return ioImpl(.read, io_type, nr, IOT);
+}
+
+pub fn IOWR(io_type: u8, nr: u8, comptime IOT: type) i32 {
+ return ioImpl(.read_write, io_type, nr, IOT);
+}
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 530a9b68a6..b6f4f4c516 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -195,7 +195,7 @@ pub const ChildProcess = struct {
};
var dead_fds: usize = 0;
- // We ask for ensureCapacity with this much extra space. This has more of an
+ // We ask for ensureTotalCapacity with this much extra space. This has more of an
// effect on small reads because once the reads start to get larger the amount
// of space an ArrayList will allocate grows exponentially.
const bump_amt = 512;
@@ -215,7 +215,7 @@ pub const ChildProcess = struct {
if (poll_fds[0].revents & os.POLL.IN != 0) {
// stdout is ready.
const new_capacity = std.math.min(stdout.items.len + bump_amt, max_output_bytes);
- try stdout.ensureCapacity(new_capacity);
+ try stdout.ensureTotalCapacity(new_capacity);
const buf = stdout.unusedCapacitySlice();
if (buf.len == 0) return error.StdoutStreamTooLong;
const nread = try os.read(poll_fds[0].fd, buf);
@@ -230,7 +230,7 @@ pub const ChildProcess = struct {
if (poll_fds[1].revents & os.POLL.IN != 0) {
// stderr is ready.
const new_capacity = std.math.min(stderr.items.len + bump_amt, max_output_bytes);
- try stderr.ensureCapacity(new_capacity);
+ try stderr.ensureTotalCapacity(new_capacity);
const buf = stderr.unusedCapacitySlice();
if (buf.len == 0) return error.StderrStreamTooLong;
const nread = try os.read(poll_fds[1].fd, buf);
@@ -276,7 +276,8 @@ pub const ChildProcess = struct {
// Windows Async IO requires an initial call to ReadFile before waiting on the handle
for ([_]u1{ 0, 1 }) |i| {
- try outs[i].ensureCapacity(bump_amt);
+ const new_capacity = std.math.min(outs[i].items.len + bump_amt, max_output_bytes);
+ try outs[i].ensureTotalCapacity(new_capacity);
const buf = outs[i].unusedCapacitySlice();
_ = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]);
wait_objects[wait_object_count] = handles[i];
@@ -318,7 +319,7 @@ pub const ChildProcess = struct {
outs[i].items.len += read_bytes;
const new_capacity = std.math.min(outs[i].items.len + bump_amt, max_output_bytes);
- try outs[i].ensureCapacity(new_capacity);
+ try outs[i].ensureTotalCapacity(new_capacity);
const buf = outs[i].unusedCapacitySlice();
if (buf.len == 0) return if (i == 0) error.StdoutStreamTooLong else error.StderrStreamTooLong;
_ = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]);
diff --git a/lib/std/coff.zig b/lib/std/coff.zig
index 6caf214728..c8b0a44044 100644
--- a/lib/std/coff.zig
+++ b/lib/std/coff.zig
@@ -277,7 +277,7 @@ pub const Coff = struct {
if (self.sections.items.len == self.coff_header.number_of_sections)
return;
- try self.sections.ensureCapacity(self.coff_header.number_of_sections);
+ try self.sections.ensureTotalCapacity(self.coff_header.number_of_sections);
const in = self.in_file.reader();
diff --git a/lib/std/compress/deflate.zig b/lib/std/compress/deflate.zig
index b443c2971f..2fef0b32bc 100644
--- a/lib/std/compress/deflate.zig
+++ b/lib/std/compress/deflate.zig
@@ -58,8 +58,10 @@ const Huffman = struct {
}
// All zero.
- if (self.count[0] == code_length.len)
+ if (self.count[0] == code_length.len) {
+ self.min_code_len = 0;
return;
+ }
var left: isize = 1;
for (self.count[1..]) |val| {
@@ -280,7 +282,7 @@ pub fn InflateStream(comptime ReaderType: type) type {
return self.bits & mask;
}
fn readBits(self: *Self, bits: usize) !u32 {
- const val = self.peekBits(bits);
+ const val = try self.peekBits(bits);
self.discardBits(bits);
return val;
}
@@ -487,6 +489,8 @@ pub fn InflateStream(comptime ReaderType: type) type {
// We can't read PREFIX_LUT_BITS as we don't want to read past the
// deflate stream end, use an incremental approach instead.
var code_len = h.min_code_len;
+ if (code_len == 0)
+ return error.OutOfCodes;
while (true) {
_ = try self.peekBits(code_len);
// Small optimization win, use as many bits as possible in the
@@ -658,11 +662,27 @@ test "lengths overflow" {
// f dy hlit hdist hclen 16 17 18 0 (18) x138 (18) x138 (18) x39 (16) x6
// 1 10 11101 11101 0000 010 010 010 010 (11) 1111111 (11) 1111111 (11) 0011100 (01) 11
const stream = [_]u8{ 0b11101101, 0b00011101, 0b00100100, 0b11101001, 0b11111111, 0b11111111, 0b00111001, 0b00001110 };
-
- const reader = std.io.fixedBufferStream(&stream).reader();
- var window: [0x8000]u8 = undefined;
- var inflate = inflateStream(reader, &window);
-
- var buf: [1]u8 = undefined;
- try std.testing.expectError(error.InvalidLength, inflate.read(&buf));
+ try std.testing.expectError(error.InvalidLength, testInflate(stream[0..]));
+}
+
+test "empty distance alphabet" {
+ // dynamic block with empty distance alphabet is valid if end of data symbol is used immediately
+ // f dy hlit hdist hclen 16 17 18 0 8 7 9 6 10 5 11 4 12 3 13 2 14 1 15 (18) x128 (18) x128 (1) ( 0) (256)
+ // 1 10 00000 00000 1111 000 000 010 010 000 000 000 000 000 000 000 000 000 000 000 000 000 001 000 (11) 1110101 (11) 1110101 (0) (10) (0)
+ const stream = [_]u8{ 0b00000101, 0b11100000, 0b00000001, 0b00001001, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00010000, 0b01011100, 0b10111111, 0b00101110 };
+ try testInflate(stream[0..]);
+}
+
+test "inflateStream fuzzing" {
+ // see https://github.com/ziglang/zig/issues/9842
+ try std.testing.expectError(error.EndOfStream, testInflate("\x950000"));
+ try std.testing.expectError(error.OutOfCodes, testInflate("\x950\x00\x0000000"));
+}
+
+fn testInflate(data: []const u8) !void {
+ var window: [0x8000]u8 = undefined;
+ const reader = std.io.fixedBufferStream(data).reader();
+ var inflate = inflateStream(reader, &window);
+ var inflated = try inflate.reader().readAllAlloc(std.testing.allocator, std.math.maxInt(usize));
+ defer std.testing.allocator.free(inflated);
}
diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig
index d9ea4b172a..65e7ecb680 100644
--- a/lib/std/crypto/benchmark.zig
+++ b/lib/std/crypto/benchmark.zig
@@ -1,4 +1,4 @@
-// zig run benchmark.zig --release-fast --zig-lib-dir ..
+// zig run -O ReleaseFast --zig-lib-dir ../.. benchmark.zig
const std = @import("../std.zig");
const builtin = std.builtin;
diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig
index 6a2950645a..2e3f3f3c7b 100644
--- a/lib/std/crypto/blake3.zig
+++ b/lib/std/crypto/blake3.zig
@@ -398,10 +398,10 @@ pub const Blake3 = struct {
return Blake3.init_internal(context_key_words, DERIVE_KEY_MATERIAL);
}
- pub fn hash(in: []const u8, out: []u8, options: Options) void {
- var hasher = Blake3.init(options);
- hasher.update(in);
- hasher.final(out);
+ pub fn hash(b: []const u8, out: []u8, options: Options) void {
+ var d = Blake3.init(options);
+ d.update(b);
+ d.final(out);
}
fn pushCv(self: *Blake3, cv: [8]u32) void {
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 8517703566..2194886c0a 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -228,9 +228,32 @@ pub fn assert(ok: bool) void {
pub fn panic(comptime format: []const u8, args: anytype) noreturn {
@setCold(true);
- // TODO: remove conditional once wasi / LLVM defines __builtin_return_address
- const first_trace_addr = if (native_os == .wasi) null else @returnAddress();
- panicExtra(null, first_trace_addr, format, args);
+
+ panicExtra(null, format, args);
+}
+
+/// `panicExtra` is useful when you want to print out an `@errorReturnTrace`
+/// and also print out some values.
+pub fn panicExtra(
+ trace: ?*builtin.StackTrace,
+ comptime format: []const u8,
+ args: anytype,
+) noreturn {
+ @setCold(true);
+
+ const size = 0x1000;
+ const trunc_msg = "(msg truncated)";
+ var buf: [size + trunc_msg.len]u8 = undefined;
+ // a minor annoyance with this is that it will result in the NoSpaceLeft
+ // error being part of the @panic stack trace (but that error should
+ // only happen rarely)
+ const msg = std.fmt.bufPrint(buf[0..size], format, args) catch |err| switch (err) {
+ std.fmt.BufPrintError.NoSpaceLeft => blk: {
+ std.mem.copy(u8, buf[size..], trunc_msg);
+ break :blk &buf;
+ },
+ };
+ builtin.panic(msg, trace);
}
/// Non-zero whenever the program triggered a panic.
@@ -244,7 +267,9 @@ var panic_mutex = std.Thread.Mutex{};
/// This is used to catch and handle panics triggered by the panic handler.
threadlocal var panic_stage: usize = 0;
-pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: anytype) noreturn {
+// `panicImpl` could be useful in implementing a custom panic handler which
+// calls the default handler (on supported platforms)
+pub fn panicImpl(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, msg: []const u8) noreturn {
@setCold(true);
if (enable_segfault_handler) {
@@ -271,7 +296,7 @@ pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, c
const current_thread_id = std.Thread.getCurrentId();
stderr.print("thread {} panic: ", .{current_thread_id}) catch os.abort();
}
- stderr.print(format ++ "\n", args) catch os.abort();
+ stderr.print("{s}\n", .{msg}) catch os.abort();
if (trace) |t| {
dumpStackTrace(t.*);
}
@@ -654,6 +679,7 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
.openbsd,
.macos,
.windows,
+ .solaris,
=> return DebugInfo.init(allocator),
else => return error.UnsupportedDebugInfo,
}
@@ -1420,7 +1446,7 @@ pub const ModuleDebugInfo = switch (native_os) {
};
}
},
- .linux, .netbsd, .freebsd, .dragonfly, .openbsd, .haiku => struct {
+ .linux, .netbsd, .freebsd, .dragonfly, .openbsd, .haiku, .solaris => struct {
base_address: usize,
dwarf: DW.DwarfInfo,
mapped_memory: []const u8,
@@ -1468,7 +1494,7 @@ fn getDebugInfoAllocator() *mem.Allocator {
/// Whether or not the current target can print useful debug information when a segfault occurs.
pub const have_segfault_handling_support = switch (native_os) {
- .linux, .netbsd => true,
+ .linux, .netbsd, .solaris => true,
.windows => true,
.freebsd, .openbsd => @hasDecl(os.system, "ucontext_t"),
else => false,
@@ -1535,6 +1561,7 @@ fn handleSegfaultLinux(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const c_v
.freebsd => @ptrToInt(info.addr),
.netbsd => @ptrToInt(info.info.reason.fault.addr),
.openbsd => @ptrToInt(info.data.fault.addr),
+ .solaris => @ptrToInt(info.reason.fault.addr),
else => unreachable,
};
@@ -1559,13 +1586,13 @@ fn handleSegfaultLinux(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const c_v
.x86_64 => {
const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
const ip = switch (native_os) {
- .linux, .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]),
+ .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]),
.freebsd => @intCast(usize, ctx.mcontext.rip),
.openbsd => @intCast(usize, ctx.sc_rip),
else => unreachable,
};
const bp = switch (native_os) {
- .linux, .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]),
+ .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]),
.openbsd => @intCast(usize, ctx.sc_rbp),
.freebsd => @intCast(usize, ctx.mcontext.rbp),
else => unreachable,
@@ -1624,9 +1651,14 @@ fn handleSegfaultWindowsExtra(info: *windows.EXCEPTION_POINTERS, comptime msg: u
os.abort();
} else {
switch (msg) {
- 0 => panicExtra(null, exception_address, format.?, .{}),
- 1 => panicExtra(null, exception_address, "Segmentation fault at address 0x{x}", .{info.ExceptionRecord.ExceptionInformation[1]}),
- 2 => panicExtra(null, exception_address, "Illegal Instruction", .{}),
+ 0 => panicImpl(null, exception_address, format.?),
+ 1 => {
+ const format_item = "Segmentation fault at address 0x{x}";
+ var buf: [format_item.len + 64]u8 = undefined; // 64 is arbitrary, but sufficiently large
+ const to_print = std.fmt.bufPrint(buf[0..buf.len], format_item, .{info.ExceptionRecord.ExceptionInformation[1]}) catch unreachable;
+ panicImpl(null, exception_address, to_print);
+ },
+ 2 => panicImpl(null, exception_address, "Illegal Instruction"),
else => unreachable,
}
}
diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig
index fb4cefed0a..91c66503f1 100644
--- a/lib/std/dynamic_library.zig
+++ b/lib/std/dynamic_library.zig
@@ -14,7 +14,7 @@ const max = std.math.max;
pub const DynLib = switch (builtin.os.tag) {
.linux => if (builtin.link_libc) DlDynlib else ElfDynLib,
.windows => WindowsDynLib,
- .macos, .tvos, .watchos, .ios, .freebsd, .netbsd, .openbsd, .dragonfly => DlDynlib,
+ .macos, .tvos, .watchos, .ios, .freebsd, .netbsd, .openbsd, .dragonfly, .solaris => DlDynlib,
else => void,
};
diff --git a/lib/std/elf.zig b/lib/std/elf.zig
index 69f04868e8..52d03c6c01 100644
--- a/lib/std/elf.zig
+++ b/lib/std/elf.zig
@@ -1591,3 +1591,100 @@ pub const PF_MASKOS = 0x0ff00000;
/// Bits for processor-specific semantics.
pub const PF_MASKPROC = 0xf0000000;
+
+// Special section indexes used in Elf{32,64}_Sym.
+pub const SHN_UNDEF = 0;
+pub const SHN_LORESERVE = 0xff00;
+pub const SHN_LOPROC = 0xff00;
+pub const SHN_HIPROC = 0xff1f;
+pub const SHN_LIVEPATCH = 0xff20;
+pub const SHN_ABS = 0xfff1;
+pub const SHN_COMMON = 0xfff2;
+pub const SHN_HIRESERVE = 0xffff;
+
+/// AMD x86-64 relocations.
+/// No reloc
+pub const R_X86_64_NONE = 0;
+/// Direct 64 bit
+pub const R_X86_64_64 = 1;
+/// PC relative 32 bit signed
+pub const R_X86_64_PC32 = 2;
+/// 32 bit GOT entry
+pub const R_X86_64_GOT32 = 3;
+/// 32 bit PLT address
+pub const R_X86_64_PLT32 = 4;
+/// Copy symbol at runtime
+pub const R_X86_64_COPY = 5;
+/// Create GOT entry
+pub const R_X86_64_GLOB_DAT = 6;
+/// Create PLT entry
+pub const R_X86_64_JUMP_SLOT = 7;
+/// Adjust by program base
+pub const R_X86_64_RELATIVE = 8;
+/// 32 bit signed PC relative offset to GOT
+pub const R_X86_64_GOTPCREL = 9;
+/// Direct 32 bit zero extended
+pub const R_X86_64_32 = 10;
+/// Direct 32 bit sign extended
+pub const R_X86_64_32S = 11;
+/// Direct 16 bit zero extended
+pub const R_X86_64_16 = 12;
+/// 16 bit sign extended pc relative
+pub const R_X86_64_PC16 = 13;
+/// Direct 8 bit sign extended
+pub const R_X86_64_8 = 14;
+/// 8 bit sign extended pc relative
+pub const R_X86_64_PC8 = 15;
+/// ID of module containing symbol
+pub const R_X86_64_DTPMOD64 = 16;
+/// Offset in module's TLS block
+pub const R_X86_64_DTPOFF64 = 17;
+/// Offset in initial TLS block
+pub const R_X86_64_TPOFF64 = 18;
+/// 32 bit signed PC relative offset to two GOT entries for GD symbol
+pub const R_X86_64_TLSGD = 19;
+/// 32 bit signed PC relative offset to two GOT entries for LD symbol
+pub const R_X86_64_TLSLD = 20;
+/// Offset in TLS block
+pub const R_X86_64_DTPOFF32 = 21;
+/// 32 bit signed PC relative offset to GOT entry for IE symbol
+pub const R_X86_64_GOTTPOFF = 22;
+/// Offset in initial TLS block
+pub const R_X86_64_TPOFF32 = 23;
+/// PC relative 64 bit
+pub const R_X86_64_PC64 = 24;
+/// 64 bit offset to GOT
+pub const R_X86_64_GOTOFF64 = 25;
+/// 32 bit signed pc relative offset to GOT
+pub const R_X86_64_GOTPC32 = 26;
+/// 64 bit GOT entry offset
+pub const R_X86_64_GOT64 = 27;
+/// 64 bit PC relative offset to GOT entry
+pub const R_X86_64_GOTPCREL64 = 28;
+/// 64 bit PC relative offset to GOT
+pub const R_X86_64_GOTPC64 = 29;
+/// Like GOT64, says PLT entry needed
+pub const R_X86_64_GOTPLT64 = 30;
+/// 64-bit GOT relative offset to PLT entry
+pub const R_X86_64_PLTOFF64 = 31;
+/// Size of symbol plus 32-bit addend
+pub const R_X86_64_SIZE32 = 32;
+/// Size of symbol plus 64-bit addend
+pub const R_X86_64_SIZE64 = 33;
+/// GOT offset for TLS descriptor
+pub const R_X86_64_GOTPC32_TLSDESC = 34;
+/// Marker for call through TLS descriptor
+pub const R_X86_64_TLSDESC_CALL = 35;
+/// TLS descriptor
+pub const R_X86_64_TLSDESC = 36;
+/// Adjust indirectly by program base
+pub const R_X86_64_IRELATIVE = 37;
+/// 64-bit adjust by program base
+pub const R_X86_64_RELATIVE64 = 38;
+/// 39 Reserved was R_X86_64_PC32_BND
+/// 40 Reserved was R_X86_64_PLT32_BND
+/// Load from 32 bit signed pc relative offset to GOT entry without REX prefix, relaxable
+pub const R_X86_64_GOTPCRELX = 41;
+/// Load from 32 bit signed PC relative offset to GOT entry with REX prefix, relaxable
+pub const R_X86_64_REX_GOTPCRELX = 42;
+pub const R_X86_64_NUM = 43;
diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig
index 93ef50b861..e4b2cd8be9 100644
--- a/lib/std/fifo.zig
+++ b/lib/std/fifo.zig
@@ -119,8 +119,11 @@ pub fn LinearFifo(
}
}
+ /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`.
+ pub const ensureCapacity = ensureTotalCapacity;
+
/// Ensure that the buffer can fit at least `size` items
- pub fn ensureCapacity(self: *Self, size: usize) !void {
+ pub fn ensureTotalCapacity(self: *Self, size: usize) !void {
if (self.buf.len >= size) return;
if (buffer_type == .Dynamic) {
self.realign();
@@ -135,7 +138,7 @@ pub fn LinearFifo(
pub fn ensureUnusedCapacity(self: *Self, size: usize) error{OutOfMemory}!void {
if (self.writableLength() >= size) return;
- return try self.ensureCapacity(math.add(usize, self.count, size) catch return error.OutOfMemory);
+ return try self.ensureTotalCapacity(math.add(usize, self.count, size) catch return error.OutOfMemory);
}
/// Returns number of items currently in fifo
@@ -471,7 +474,7 @@ test "LinearFifo(u8, .Dynamic)" {
}
{
- try fifo.ensureCapacity(1);
+ try fifo.ensureTotalCapacity(1);
var in_fbs = std.io.fixedBufferStream("pump test");
var out_buf: [50]u8 = undefined;
var out_fbs = std.io.fixedBufferStream(&out_buf);
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 66eb1d6642..7aa5dd3976 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -35,7 +35,7 @@ pub const Watch = @import("fs/watch.zig").Watch;
/// fit into a UTF-8 encoded array of this length.
/// The byte count includes room for a null sentinel byte.
pub const MAX_PATH_BYTES = switch (builtin.os.tag) {
- .linux, .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .haiku => os.PATH_MAX,
+ .linux, .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .haiku, .solaris => os.PATH_MAX,
// Each UTF-16LE character may be expanded to 3 UTF-8 bytes.
// If it would require 4 UTF-8 bytes, then there would be a surrogate
// pair in the UTF-16LE, and we (over)account 3 bytes for it that way.
@@ -298,10 +298,10 @@ pub const Dir = struct {
pub const Kind = File.Kind;
};
- const IteratorError = error{AccessDenied} || os.UnexpectedError;
+ const IteratorError = error{ AccessDenied, SystemResources } || os.UnexpectedError;
pub const Iterator = switch (builtin.os.tag) {
- .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd => struct {
+ .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris => struct {
dir: Dir,
seek: i64,
buf: [8192]u8, // TODO align(@alignOf(os.system.dirent)),
@@ -318,6 +318,7 @@ pub const Dir = struct {
switch (builtin.os.tag) {
.macos, .ios => return self.nextDarwin(),
.freebsd, .netbsd, .dragonfly, .openbsd => return self.nextBsd(),
+ .solaris => return self.nextSolaris(),
else => @compileError("unimplemented"),
}
}
@@ -372,6 +373,60 @@ pub const Dir = struct {
}
}
+ fn nextSolaris(self: *Self) !?Entry {
+ start_over: while (true) {
+ if (self.index >= self.end_index) {
+ const rc = os.system.getdents(self.dir.fd, &self.buf, self.buf.len);
+ switch (os.errno(rc)) {
+ .SUCCESS => {},
+ .BADF => unreachable, // Dir is invalid or was opened without iteration ability
+ .FAULT => unreachable,
+ .NOTDIR => unreachable,
+ .INVAL => unreachable,
+ else => |err| return os.unexpectedErrno(err),
+ }
+ if (rc == 0) return null;
+ self.index = 0;
+ self.end_index = @intCast(usize, rc);
+ }
+ const entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]);
+ const next_index = self.index + entry.reclen();
+ self.index = next_index;
+
+ const name = mem.spanZ(@ptrCast([*:0]u8, &entry.d_name));
+ if (mem.eql(u8, name, ".") or mem.eql(u8, name, ".."))
+ continue :start_over;
+
+ // Solaris dirent doesn't expose d_type, so we have to call stat to get it.
+ const stat_info = os.fstatat(
+ self.dir.fd,
+ name,
+ os.AT.SYMLINK_NOFOLLOW,
+ ) catch |err| switch (err) {
+ error.NameTooLong => unreachable,
+ error.SymLinkLoop => unreachable,
+ error.FileNotFound => unreachable, // lost the race
+ else => |e| return e,
+ };
+ const entry_kind = switch (stat_info.mode & os.S.IFMT) {
+ os.S.IFIFO => Entry.Kind.NamedPipe,
+ os.S.IFCHR => Entry.Kind.CharacterDevice,
+ os.S.IFDIR => Entry.Kind.Directory,
+ os.S.IFBLK => Entry.Kind.BlockDevice,
+ os.S.IFREG => Entry.Kind.File,
+ os.S.IFLNK => Entry.Kind.SymLink,
+ os.S.IFSOCK => Entry.Kind.UnixDomainSocket,
+ os.S.IFDOOR => Entry.Kind.Door,
+ os.S.IFPORT => Entry.Kind.EventPort,
+ else => Entry.Kind.Unknown,
+ };
+ return Entry{
+ .name = name,
+ .kind = entry_kind,
+ };
+ }
+ }
+
fn nextBsd(self: *Self) !?Entry {
start_over: while (true) {
if (self.index >= self.end_index) {
@@ -704,6 +759,7 @@ pub const Dir = struct {
.netbsd,
.dragonfly,
.openbsd,
+ .solaris,
=> return Iterator{
.dir = self,
.seek = 0,
@@ -1556,7 +1612,7 @@ pub const Dir = struct {
error.AccessDenied => |e| switch (builtin.os.tag) {
// non-Linux POSIX systems return EPERM when trying to delete a directory, so
// we need to handle that case specifically and translate the error
- .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd => {
+ .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris => {
// Don't follow symlinks to match unlinkat (which acts on symlinks rather than follows them)
const fstat = os.fstatatZ(self.fd, sub_path_c, os.AT.SYMLINK_NOFOLLOW) catch return e;
const is_dir = fstat.mode & os.S.IFMT == os.S.IFDIR;
@@ -2441,6 +2497,7 @@ pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 {
}
switch (builtin.os.tag) {
.linux => return os.readlinkZ("/proc/self/exe", out_buffer),
+ .solaris => return os.readlinkZ("/proc/self/path/a.out", out_buffer),
.freebsd, .dragonfly => {
var mib = [4]c_int{ os.CTL.KERN, os.KERN.PROC, os.KERN.PROC_PATHNAME, -1 };
var out_len: usize = out_buffer.len;
diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig
index d08b743919..a71c9ae0d2 100644
--- a/lib/std/fs/file.zig
+++ b/lib/std/fs/file.zig
@@ -41,6 +41,8 @@ pub const File = struct {
File,
UnixDomainSocket,
Whiteout,
+ Door,
+ EventPort,
Unknown,
};
@@ -320,28 +322,40 @@ pub const File = struct {
const atime = st.atime();
const mtime = st.mtime();
const ctime = st.ctime();
+ const kind: Kind = if (builtin.os.tag == .wasi and !builtin.link_libc) switch (st.filetype) {
+ .BLOCK_DEVICE => Kind.BlockDevice,
+ .CHARACTER_DEVICE => Kind.CharacterDevice,
+ .DIRECTORY => Kind.Directory,
+ .SYMBOLIC_LINK => Kind.SymLink,
+ .REGULAR_FILE => Kind.File,
+ .SOCKET_STREAM, .SOCKET_DGRAM => Kind.UnixDomainSocket,
+ else => Kind.Unknown,
+ } else blk: {
+ const m = st.mode & os.S.IFMT;
+ switch (m) {
+ os.S.IFBLK => break :blk Kind.BlockDevice,
+ os.S.IFCHR => break :blk Kind.CharacterDevice,
+ os.S.IFDIR => break :blk Kind.Directory,
+ os.S.IFIFO => break :blk Kind.NamedPipe,
+ os.S.IFLNK => break :blk Kind.SymLink,
+ os.S.IFREG => break :blk Kind.File,
+ os.S.IFSOCK => break :blk Kind.UnixDomainSocket,
+ else => {},
+ }
+ if (builtin.os.tag == .solaris) switch (m) {
+ os.S.IFDOOR => break :blk Kind.Door,
+ os.S.IFPORT => break :blk Kind.EventPort,
+ else => {},
+ };
+
+ break :blk .Unknown;
+ };
+
return Stat{
.inode = st.ino,
.size = @bitCast(u64, st.size),
.mode = st.mode,
- .kind = if (builtin.os.tag == .wasi and !builtin.link_libc) switch (st.filetype) {
- .BLOCK_DEVICE => Kind.BlockDevice,
- .CHARACTER_DEVICE => Kind.CharacterDevice,
- .DIRECTORY => Kind.Directory,
- .SYMBOLIC_LINK => Kind.SymLink,
- .REGULAR_FILE => Kind.File,
- .SOCKET_STREAM, .SOCKET_DGRAM => Kind.UnixDomainSocket,
- else => Kind.Unknown,
- } else switch (st.mode & os.S.IFMT) {
- os.S.IFBLK => Kind.BlockDevice,
- os.S.IFCHR => Kind.CharacterDevice,
- os.S.IFDIR => Kind.Directory,
- os.S.IFIFO => Kind.NamedPipe,
- os.S.IFLNK => Kind.SymLink,
- os.S.IFREG => Kind.File,
- os.S.IFSOCK => Kind.UnixDomainSocket,
- else => Kind.Unknown,
- },
+ .kind = kind,
.atime = @as(i128, atime.tv_sec) * std.time.ns_per_s + atime.tv_nsec,
.mtime = @as(i128, mtime.tv_sec) * std.time.ns_per_s + mtime.tv_nsec,
.ctime = @as(i128, ctime.tv_sec) * std.time.ns_per_s + ctime.tv_nsec,
@@ -852,6 +866,7 @@ pub const File = struct {
pub const LockError = error{
SystemResources,
+ FileLocksNotSupported,
} || os.UnexpectedError;
/// Blocks when an incompatible lock is held by another process.
@@ -914,6 +929,7 @@ pub const File = struct {
return os.flock(file.handle, os.LOCK.UN) catch |err| switch (err) {
error.WouldBlock => unreachable, // unlocking can't block
error.SystemResources => unreachable, // We are deallocating resources.
+ error.FileLocksNotSupported => unreachable, // We already got the lock.
error.Unexpected => unreachable, // Resource deallocation must succeed.
};
}
diff --git a/lib/std/fs/get_app_data_dir.zig b/lib/std/fs/get_app_data_dir.zig
index fed1c85f39..491b6fe824 100644
--- a/lib/std/fs/get_app_data_dir.zig
+++ b/lib/std/fs/get_app_data_dir.zig
@@ -44,7 +44,7 @@ pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataD
};
return fs.path.join(allocator, &[_][]const u8{ home_dir, "Library", "Application Support", appname });
},
- .linux, .freebsd, .netbsd, .dragonfly, .openbsd => {
+ .linux, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris => {
const home_dir = os.getenv("HOME") orelse {
// TODO look in /etc/passwd
return error.AppDataDirUnavailable;
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 0ce416454b..a6e1e440fd 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -188,7 +188,7 @@ fn contains(entries: *const std.ArrayList(Dir.Entry), el: Dir.Entry) bool {
test "Dir.realpath smoke test" {
switch (builtin.os.tag) {
- .linux, .windows, .macos, .ios, .watchos, .tvos => {},
+ .linux, .windows, .macos, .ios, .watchos, .tvos, .solaris => {},
else => return error.SkipZigTest,
}
diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig
index b4952a9260..d9865bcdd8 100644
--- a/lib/std/hash/benchmark.zig
+++ b/lib/std/hash/benchmark.zig
@@ -1,4 +1,4 @@
-// zig run benchmark.zig --release-fast --zig-lib-dir ..
+// zig run -O ReleaseFast --zig-lib-dir ../.. benchmark.zig
const builtin = std.builtin;
const std = @import("std");
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig
index 644429f871..a75178d428 100644
--- a/lib/std/hash_map.zig
+++ b/lib/std/hash_map.zig
@@ -1568,11 +1568,11 @@ test "std.hash_map basic usage" {
try expectEqual(total, sum);
}
-test "std.hash_map ensureCapacity" {
+test "std.hash_map ensureTotalCapacity" {
var map = AutoHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
- try map.ensureCapacity(20);
+ try map.ensureTotalCapacity(20);
const initial_capacity = map.capacity();
try testing.expect(initial_capacity >= 20);
var i: i32 = 0;
@@ -1583,13 +1583,13 @@ test "std.hash_map ensureCapacity" {
try testing.expect(initial_capacity == map.capacity());
}
-test "std.hash_map ensureCapacity with tombstones" {
+test "std.hash_map ensureUnusedCapacity with tombstones" {
var map = AutoHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
var i: i32 = 0;
while (i < 100) : (i += 1) {
- try map.ensureCapacity(@intCast(u32, map.count() + 1));
+ try map.ensureUnusedCapacity(1);
map.putAssumeCapacity(i, i);
// Remove to create tombstones that still count as load in the hashmap.
_ = map.remove(i);
@@ -1669,7 +1669,7 @@ test "std.hash_map clone" {
try expectEqual(b.get(3).?, 3);
}
-test "std.hash_map ensureCapacity with existing elements" {
+test "std.hash_map ensureTotalCapacity with existing elements" {
var map = AutoHashMap(u32, u32).init(std.testing.allocator);
defer map.deinit();
@@ -1677,16 +1677,16 @@ test "std.hash_map ensureCapacity with existing elements" {
try expectEqual(map.count(), 1);
try expectEqual(map.capacity(), @TypeOf(map).Unmanaged.minimal_capacity);
- try map.ensureCapacity(65);
+ try map.ensureTotalCapacity(65);
try expectEqual(map.count(), 1);
try expectEqual(map.capacity(), 128);
}
-test "std.hash_map ensureCapacity satisfies max load factor" {
+test "std.hash_map ensureTotalCapacity satisfies max load factor" {
var map = AutoHashMap(u32, u32).init(std.testing.allocator);
defer map.deinit();
- try map.ensureCapacity(127);
+ try map.ensureTotalCapacity(127);
try expectEqual(map.capacity(), 256);
}
@@ -1870,7 +1870,7 @@ test "std.hash_map putAssumeCapacity" {
var map = AutoHashMap(u32, u32).init(std.testing.allocator);
defer map.deinit();
- try map.ensureCapacity(20);
+ try map.ensureTotalCapacity(20);
var i: u32 = 0;
while (i < 20) : (i += 1) {
map.putAssumeCapacityNoClobber(i, i);
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index d51c349be2..3dd6b9db3d 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -746,10 +746,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const new_aligned_size = math.max(len, ptr_align);
if (new_aligned_size > largest_bucket_object_size) {
- try self.large_allocations.ensureCapacity(
- self.backing_allocator,
- self.large_allocations.count() + 1,
- );
+ try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig
index 3e8eb8ec24..f8e7650bc4 100644
--- a/lib/std/io/reader.zig
+++ b/lib/std/io/reader.zig
@@ -61,7 +61,7 @@ pub fn Reader(
array_list: *std.ArrayListAligned(u8, alignment),
max_append_size: usize,
) !void {
- try array_list.ensureCapacity(math.min(max_append_size, 4096));
+ try array_list.ensureTotalCapacity(math.min(max_append_size, 4096));
const original_len = array_list.items.len;
var start_index: usize = original_len;
while (true) {
@@ -81,7 +81,7 @@ pub fn Reader(
}
// This will trigger ArrayList to expand superlinearly at whatever its growth rate is.
- try array_list.ensureCapacity(start_index + 1);
+ try array_list.ensureTotalCapacity(start_index + 1);
}
}
diff --git a/lib/std/json.zig b/lib/std/json.zig
index e8f9d9d395..acae9e7d1f 100644
--- a/lib/std/json.zig
+++ b/lib/std/json.zig
@@ -1838,7 +1838,7 @@ fn parseInternal(
else => {},
}
- try arraylist.ensureCapacity(arraylist.items.len + 1);
+ try arraylist.ensureUnusedCapacity(1);
const v = try parseInternal(ptrInfo.child, tok, tokens, options);
arraylist.appendAssumeCapacity(v);
}
diff --git a/lib/std/leb128.zig b/lib/std/leb128.zig
index c908b56f69..470bdc85ed 100644
--- a/lib/std/leb128.zig
+++ b/lib/std/leb128.zig
@@ -76,6 +76,14 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
if (remaining_bits != -1) return error.Overflow;
+ } else {
+ // If we don't overflow and this is the last byte and the number being decoded
+ // is negative, check that the remaining bits are 1
+ if ((byte & 0x80 == 0) and (@bitCast(S, temp) < 0)) {
+ const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
+ const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
+ if (remaining_bits != -1) return error.Overflow;
+ }
}
value |= temp;
@@ -215,6 +223,8 @@ test "deserialize signed LEB128" {
try testing.expectError(error.Overflow, test_read_ileb128(i32, "\x80\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, test_read_ileb128(i8, "\xff\x7e"));
+ try testing.expectError(error.Overflow, test_read_ileb128(i32, "\x80\x80\x80\x80\x08"));
+ try testing.expectError(error.Overflow, test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01"));
// Decode SLEB128
try testing.expect((try test_read_ileb128(i64, "\x00")) == 0);
@@ -233,8 +243,8 @@ test "deserialize signed LEB128" {
try testing.expect((try test_read_ileb128(i8, "\xff\x7f")) == -1);
try testing.expect((try test_read_ileb128(i16, "\xff\xff\x7f")) == -1);
try testing.expect((try test_read_ileb128(i32, "\xff\xff\xff\xff\x7f")) == -1);
- try testing.expect((try test_read_ileb128(i32, "\x80\x80\x80\x80\x08")) == -0x80000000);
- try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01")) == @bitCast(i64, @intCast(u64, 0x8000000000000000)));
+ try testing.expect((try test_read_ileb128(i32, "\x80\x80\x80\x80\x78")) == -0x80000000);
+ try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @bitCast(i64, @intCast(u64, 0x8000000000000000)));
try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x40")) == -0x4000000000000000);
try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == -0x8000000000000000);
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 9efd4d5752..b7ea284004 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -552,47 +552,78 @@ pub const Mutable = struct {
r.positive = a.positive;
}
- /// r = a | b
+ /// r = a | b under 2s complement semantics.
/// r may alias with a or b.
///
/// a and b are zero-extended to the longer of a or b.
///
/// Asserts that r has enough limbs to store the result. Upper bound is `math.max(a.limbs.len, b.limbs.len)`.
pub fn bitOr(r: *Mutable, a: Const, b: Const) void {
- if (a.limbs.len > b.limbs.len) {
- llor(r.limbs[0..], a.limbs[0..a.limbs.len], b.limbs[0..b.limbs.len]);
- r.len = a.limbs.len;
+ // Trivial cases, llsignedor does not support zero.
+ if (a.eqZero()) {
+ r.copy(b);
+ return;
+ } else if (b.eqZero()) {
+ r.copy(a);
+ return;
+ }
+
+ if (a.limbs.len >= b.limbs.len) {
+ r.positive = llsignedor(r.limbs, a.limbs, a.positive, b.limbs, b.positive);
+ r.normalize(if (b.positive) a.limbs.len else b.limbs.len);
} else {
- llor(r.limbs[0..], b.limbs[0..b.limbs.len], a.limbs[0..a.limbs.len]);
- r.len = b.limbs.len;
+ r.positive = llsignedor(r.limbs, b.limbs, b.positive, a.limbs, a.positive);
+ r.normalize(if (a.positive) b.limbs.len else a.limbs.len);
}
}
- /// r = a & b
+ /// r = a & b under 2s complement semantics.
/// r may alias with a or b.
///
- /// Asserts that r has enough limbs to store the result. Upper bound is `math.min(a.limbs.len, b.limbs.len)`.
+ /// Asserts that r has enough limbs to store the result.
+ /// If a or b is positive, the upper bound is `math.min(a.limbs.len, b.limbs.len)`.
+ /// If a and b are negative, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`.
pub fn bitAnd(r: *Mutable, a: Const, b: Const) void {
- if (a.limbs.len > b.limbs.len) {
- lland(r.limbs[0..], a.limbs[0..a.limbs.len], b.limbs[0..b.limbs.len]);
- r.normalize(b.limbs.len);
+ // Trivial cases, llsignedand does not support zero.
+ if (a.eqZero()) {
+ r.copy(a);
+ return;
+ } else if (b.eqZero()) {
+ r.copy(b);
+ return;
+ }
+
+ if (a.limbs.len >= b.limbs.len) {
+ r.positive = llsignedand(r.limbs, a.limbs, a.positive, b.limbs, b.positive);
+ r.normalize(if (a.positive or b.positive) b.limbs.len else a.limbs.len + 1);
} else {
- lland(r.limbs[0..], b.limbs[0..b.limbs.len], a.limbs[0..a.limbs.len]);
- r.normalize(a.limbs.len);
+ r.positive = llsignedand(r.limbs, b.limbs, b.positive, a.limbs, a.positive);
+ r.normalize(if (a.positive or b.positive) a.limbs.len else b.limbs.len + 1);
}
}
- /// r = a ^ b
+ /// r = a ^ b under 2s complement semantics.
/// r may alias with a or b.
///
- /// Asserts that r has enough limbs to store the result. Upper bound is `math.max(a.limbs.len, b.limbs.len)`.
+ /// Asserts that r has enough limbs to store the result. If a and b share the same signedness, the
+ /// upper bound is `math.max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative
+ /// but not both, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`.
pub fn bitXor(r: *Mutable, a: Const, b: Const) void {
+ // Trivial cases, because llsignedxor does not support negative zero.
+ if (a.eqZero()) {
+ r.copy(b);
+ return;
+ } else if (b.eqZero()) {
+ r.copy(a);
+ return;
+ }
+
if (a.limbs.len > b.limbs.len) {
- llxor(r.limbs[0..], a.limbs[0..a.limbs.len], b.limbs[0..b.limbs.len]);
- r.normalize(a.limbs.len);
+ r.positive = llsignedxor(r.limbs, a.limbs, a.positive, b.limbs, b.positive);
+ r.normalize(a.limbs.len + @boolToInt(a.positive != b.positive));
} else {
- llxor(r.limbs[0..], b.limbs[0..b.limbs.len], a.limbs[0..a.limbs.len]);
- r.normalize(b.limbs.len);
+ r.positive = llsignedxor(r.limbs, b.limbs, b.positive, a.limbs, a.positive);
+ r.normalize(b.limbs.len + @boolToInt(a.positive != b.positive));
}
}
@@ -1834,7 +1865,11 @@ pub const Managed = struct {
/// r = a & b
pub fn bitAnd(r: *Managed, a: Managed, b: Managed) !void {
- try r.ensureCapacity(math.min(a.len(), b.len()));
+ const cap = if (a.isPositive() or b.isPositive())
+ math.min(a.len(), b.len())
+ else
+ math.max(a.len(), b.len()) + 1;
+ try r.ensureCapacity(cap);
var m = r.toMutable();
m.bitAnd(a.toConst(), b.toConst());
r.setMetadata(m.positive, m.len);
@@ -1842,7 +1877,9 @@ pub const Managed = struct {
/// r = a ^ b
pub fn bitXor(r: *Managed, a: Managed, b: Managed) !void {
- try r.ensureCapacity(math.max(a.len(), b.len()));
+ var cap = math.max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive());
+ try r.ensureCapacity(cap);
+
var m = r.toMutable();
m.bitXor(a.toConst(), b.toConst());
r.setMetadata(m.positive, m.len);
@@ -2221,42 +2258,299 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
}
}
-fn llor(r: []Limb, a: []const Limb, b: []const Limb) void {
+// r = a | b with 2s complement semantics.
+// r may alias.
+// a and b must not be 0.
+// Returns `true` when the result is positive.
+// When b is positive, r requires at least `a.len` limbs of storage.
+// When b is negative, r requires at least `b.len` limbs of storage.
+fn llsignedor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_positive: bool) bool {
@setRuntimeSafety(debug_safety);
assert(r.len >= a.len);
assert(a.len >= b.len);
- var i: usize = 0;
- while (i < b.len) : (i += 1) {
- r[i] = a[i] | b[i];
- }
- while (i < a.len) : (i += 1) {
- r[i] = a[i];
+ if (a_positive and b_positive) {
+ // Trivial case, result is positive.
+ var i: usize = 0;
+ while (i < b.len) : (i += 1) {
+ r[i] = a[i] | b[i];
+ }
+ while (i < a.len) : (i += 1) {
+ r[i] = a[i];
+ }
+
+ return true;
+ } else if (!a_positive and b_positive) {
+ // Result is negative.
+ // r = (--a) | b
+ // = ~(-a - 1) | b
+ // = ~(-a - 1) | ~~b
+ // = ~((-a - 1) & ~b)
+ // = -(((-a - 1) & ~b) + 1)
+
+ var i: usize = 0;
+ var a_borrow: u1 = 1;
+ var r_carry: u1 = 1;
+
+ while (i < b.len) : (i += 1) {
+ var a_limb: Limb = undefined;
+ a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
+
+ r[i] = a_limb & ~b[i];
+ r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
+ }
+
+ // In order for r_carry to be nonzero at this point, ~b[i] would need to be
+ // all ones, which would require b[i] to be zero. This cannot be when
+ // b is normalized, so there cannot be a carry here.
+ // Also, x & ~b can only clear bits, so (x & ~b) <= x, meaning (-a - 1) + 1 never overflows.
+ assert(r_carry == 0);
+
+ // With b = 0, we get (-a - 1) & ~0 = -a - 1.
+ // Note, if a_borrow is zero we do not need to compute anything for
+ // the higher limbs so we can early return here.
+ while (i < a.len and a_borrow == 1) : (i += 1) {
+ a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &r[i]));
+ }
+
+ assert(a_borrow == 0); // a was 0.
+
+ return false;
+ } else if (a_positive and !b_positive) {
+ // Result is negative.
+ // r = a | (--b)
+ // = a | ~(-b - 1)
+ // = ~~a | ~(-b - 1)
+ // = ~(~a & (-b - 1))
+ // = -((~a & (-b - 1)) + 1)
+
+ var i: usize = 0;
+ var b_borrow: u1 = 1;
+ var r_carry: u1 = 1;
+
+ while (i < b.len) : (i += 1) {
+ var b_limb: Limb = undefined;
+ b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &b_limb));
+
+ r[i] = ~a[i] & b_limb;
+ r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
+ }
+
+ // b is at least 1, so this should never underflow.
+ assert(b_borrow == 0); // b was 0
+
+ // x & ~a can only clear bits, so (x & ~a) <= x, meaning (-b - 1) + 1 never overflows.
+ assert(r_carry == 0);
+
+ // With b = 0 and b_borrow = 0, we get ~a & (-0 - 0) = ~a & 0 = 0.
+ // Omit setting the upper bytes, just deal with those when calling llsignedor.
+
+ return false;
+ } else {
+ // Result is negative.
+ // r = (--a) | (--b)
+ // = ~(-a - 1) | ~(-b - 1)
+ // = ~((-a - 1) & (-b - 1))
+ // = -(~(~((-a - 1) & (-b - 1))) + 1)
+ // = -((-a - 1) & (-b - 1) + 1)
+
+ var i: usize = 0;
+ var a_borrow: u1 = 1;
+ var b_borrow: u1 = 1;
+ var r_carry: u1 = 1;
+
+ while (i < b.len) : (i += 1) {
+ var a_limb: Limb = undefined;
+ a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
+
+ var b_limb: Limb = undefined;
+ b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &b_limb));
+
+ r[i] = a_limb & b_limb;
+ r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
+ }
+
+ // b is at least 1, so this should never underflow.
+ assert(b_borrow == 0); // b was 0
+
+ // Can never overflow because in order for b_limb to be maxInt(Limb),
+ // b_borrow would need to equal 1.
+
+ // x & y can only clear bits, meaning x & y <= x and x & y <= y. This implies that
+ // for x = a - 1 and y = b - 1, the +1 term would never cause an overflow.
+ assert(r_carry == 0);
+
+ // With b = 0 and b_borrow = 0 we get (-a - 1) & (-0 - 0) = (-a - 1) & 0 = 0.
+ // Omit setting the upper bytes, just deal with those when calling llsignedor.
+ return false;
}
}
-fn lland(r: []Limb, a: []const Limb, b: []const Limb) void {
+// r = a & b with 2s complement semantics.
+// r may alias.
+// a and b must not be 0.
+// Returns `true` when the result is positive.
+// When either or both of a and b are positive, r requires at least `b.len` limbs of storage.
+// When both a and b are negative, r requires at least `a.limbs.len + 1` limbs of storage.
+fn llsignedand(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_positive: bool) bool {
@setRuntimeSafety(debug_safety);
- assert(r.len >= b.len);
- assert(a.len >= b.len);
-
- var i: usize = 0;
- while (i < b.len) : (i += 1) {
- r[i] = a[i] & b[i];
- }
-}
-
-fn llxor(r: []Limb, a: []const Limb, b: []const Limb) void {
+ assert(a.len != 0 and b.len != 0);
assert(r.len >= a.len);
assert(a.len >= b.len);
+ if (a_positive and b_positive) {
+ // Trivial case, result is positive.
+ var i: usize = 0;
+ while (i < b.len) : (i += 1) {
+ r[i] = a[i] & b[i];
+ }
+
+ // With b = 0 we have a & 0 = 0, so the upper bytes are zero.
+ // Omit setting them here and simply discard them whenever
+ // llsignedand is called.
+
+ return true;
+ } else if (!a_positive and b_positive) {
+ // Result is positive.
+ // r = (--a) & b
+ // = ~(-a - 1) & b
+
+ var i: usize = 0;
+ var a_borrow: u1 = 1;
+
+ while (i < b.len) : (i += 1) {
+ var a_limb: Limb = undefined;
+ a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
+ r[i] = ~a_limb & b[i];
+ }
+
+ // With b = 0 we have ~(a - 1) & 0 = 0, so the upper bytes are zero.
+ // Omit setting them here and simply discard them whenever
+ // llsignedand is called.
+
+ return true;
+ } else if (a_positive and !b_positive) {
+ // Result is positive.
+ // r = a & (--b)
+ // = a & ~(-b - 1)
+
+ var i: usize = 0;
+ var b_borrow: u1 = 1;
+
+ while (i < b.len) : (i += 1) {
+ var a_limb: Limb = undefined;
+ b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &a_limb));
+ r[i] = a[i] & ~a_limb;
+ }
+
+ assert(b_borrow == 0); // b was 0
+
+ // With b = 0 and b_borrow = 0 we have a & ~(-0 - 0) = a & 0 = 0, so
+ // the upper bytes are zero. Omit setting them here and simply discard
+ // them whenever llsignedand is called.
+
+ return true;
+ } else {
+ // Result is negative.
+ // r = (--a) & (--b)
+ // = ~(-a - 1) & ~(-b - 1)
+ // = ~((-a - 1) | (-b - 1))
+ // = -(((-a - 1) | (-b - 1)) + 1)
+
+ var i: usize = 0;
+ var a_borrow: u1 = 1;
+ var b_borrow: u1 = 1;
+ var r_carry: u1 = 1;
+
+ while (i < b.len) : (i += 1) {
+ var a_limb: Limb = undefined;
+ a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
+
+ var b_limb: Limb = undefined;
+ b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &b_limb));
+
+ r[i] = a_limb | b_limb;
+ r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
+ }
+
+ // b is at least 1, so this should never underflow.
+ assert(b_borrow == 0); // b was 0
+
+ // With b = 0 and b_borrow = 0 we get (-a - 1) | (-0 - 0) = (-a - 1) | 0 = -a - 1.
+ while (i < a.len) : (i += 1) {
+ a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &r[i]));
+ r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
+ }
+
+ assert(a_borrow == 0); // a was 0.
+
+ // The final addition can overflow here, so we need to keep that in mind.
+ r[i] = r_carry;
+
+ return false;
+ }
+}
+
+// r = a ^ b with 2s complement semantics.
+// r may alias.
+// a and b must not be -0.
+// Returns `true` when the result is positive.
+// If the sign of a and b is equal, then r requires at least `max(a.len, b.len)` limbs are required.
+// Otherwise, r requires at least `max(a.len, b.len) + 1` limbs.
+fn llsignedxor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_positive: bool) bool {
+ @setRuntimeSafety(debug_safety);
+ assert(a.len != 0 and b.len != 0);
+ assert(r.len >= a.len);
+ assert(a.len >= b.len);
+
+ // If a and b are positive, the result is positive and r = a ^ b.
+ // If a negative, b positive, result is negative and we have
+ // r = --(--a ^ b)
+ // = --(~(-a - 1) ^ b)
+ // = -(~(~(-a - 1) ^ b) + 1)
+ // = -(((-a - 1) ^ b) + 1)
+ // Same if a is positive and b is negative, sides switched.
+ // If both a and b are negative, the result is positive and we have
+ // r = (--a) ^ (--b)
+ // = ~(-a - 1) ^ ~(-b - 1)
+ // = (-a - 1) ^ (-b - 1)
+ // These operations can be made more generic as follows:
+ // - If a is negative, subtract 1 from |a| before the xor.
+ // - If b is negative, subtract 1 from |b| before the xor.
+ // - if the result is supposed to be negative, add 1.
+
var i: usize = 0;
+ var a_borrow = @boolToInt(!a_positive);
+ var b_borrow = @boolToInt(!b_positive);
+ var r_carry = @boolToInt(a_positive != b_positive);
+
while (i < b.len) : (i += 1) {
- r[i] = a[i] ^ b[i];
+ var a_limb: Limb = undefined;
+ a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &a_limb));
+
+ var b_limb: Limb = undefined;
+ b_borrow = @boolToInt(@subWithOverflow(Limb, b[i], b_borrow, &b_limb));
+
+ r[i] = a_limb ^ b_limb;
+ r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
}
+
while (i < a.len) : (i += 1) {
- r[i] = a[i];
+ a_borrow = @boolToInt(@subWithOverflow(Limb, a[i], a_borrow, &r[i]));
+ r_carry = @boolToInt(@addWithOverflow(Limb, r[i], r_carry, &r[i]));
}
+
+ // If both inputs don't share the same sign, an extra limb is required.
+ if (a_positive != b_positive) {
+ r[i] = r_carry;
+ } else {
+ assert(r_carry == 0);
+ }
+
+ assert(a_borrow == 0);
+ assert(b_borrow == 0);
+
+ return a_positive == b_positive;
}
/// r MUST NOT alias x.
diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig
index 757f994f7e..7b5e14b808 100644
--- a/lib/std/math/big/int_test.zig
+++ b/lib/std/math/big/int_test.zig
@@ -5,6 +5,7 @@ const Managed = std.math.big.int.Managed;
const Mutable = std.math.big.int.Mutable;
const Limb = std.math.big.Limb;
const DoubleLimb = std.math.big.DoubleLimb;
+const SignedDoubleLimb = std.math.big.SignedDoubleLimb;
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
@@ -1364,6 +1365,83 @@ test "big.int bitwise and multi-limb" {
try testing.expect((try a.to(u128)) == 0);
}
+test "big.int bitwise and negative-positive simple" {
+ var a = try Managed.initSet(testing.allocator, -0xffffffff11111111);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, 0xeeeeeeee22222222);
+ defer b.deinit();
+
+ try a.bitAnd(a, b);
+
+ try testing.expect((try a.to(u64)) == 0x22222222);
+}
+
+test "big.int bitwise and negative-positive multi-limb" {
+ var a = try Managed.initSet(testing.allocator, -maxInt(Limb) - 1);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, maxInt(Limb));
+ defer b.deinit();
+
+ try a.bitAnd(a, b);
+
+ try testing.expect(a.eqZero());
+}
+
+test "big.int bitwise and positive-negative simple" {
+ var a = try Managed.initSet(testing.allocator, 0xffffffff11111111);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -0xeeeeeeee22222222);
+ defer b.deinit();
+
+ try a.bitAnd(a, b);
+
+ try testing.expect((try a.to(u64)) == 0x1111111111111110);
+}
+
+test "big.int bitwise and positive-negative multi-limb" {
+ var a = try Managed.initSet(testing.allocator, maxInt(Limb));
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -maxInt(Limb) - 1);
+ defer b.deinit();
+
+ try a.bitAnd(a, b);
+
+ try testing.expect(a.eqZero());
+}
+
+test "big.int bitwise and negative-negative simple" {
+ var a = try Managed.initSet(testing.allocator, -0xffffffff11111111);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -0xeeeeeeee22222222);
+ defer b.deinit();
+
+ try a.bitAnd(a, b);
+
+ try testing.expect((try a.to(i128)) == -0xffffffff33333332);
+}
+
+test "big.int bitwise and negative-negative multi-limb" {
+ var a = try Managed.initSet(testing.allocator, -maxInt(Limb) - 1);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -maxInt(Limb) - 2);
+ defer b.deinit();
+
+ try a.bitAnd(a, b);
+
+ try testing.expect((try a.to(i128)) == -maxInt(Limb) * 2 - 2);
+}
+
+test "big.int bitwise and negative overflow" {
+ var a = try Managed.initSet(testing.allocator, -maxInt(Limb));
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -2);
+ defer b.deinit();
+
+ try a.bitAnd(a, b);
+
+ try testing.expect((try a.to(SignedDoubleLimb)) == -maxInt(Limb) - 1);
+}
+
test "big.int bitwise xor simple" {
var a = try Managed.initSet(testing.allocator, 0xffffffff11111111);
defer a.deinit();
@@ -1386,6 +1464,72 @@ test "big.int bitwise xor multi-limb" {
try testing.expect((try a.to(DoubleLimb)) == (maxInt(Limb) + 1) ^ maxInt(Limb));
}
+test "big.int bitwise xor single negative simple" {
+ var a = try Managed.initSet(testing.allocator, 0x6b03e381328a3154);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -0x45fd3acef9191fad);
+ defer b.deinit();
+
+ try a.bitXor(a, b);
+
+ try testing.expect((try a.to(i64)) == -0x2efed94fcb932ef9);
+}
+
+test "big.int bitwise xor single negative zero" {
+ var a = try Managed.initSet(testing.allocator, 0);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -0);
+ defer b.deinit();
+
+ try a.bitXor(a, b);
+
+ try testing.expect(a.eqZero());
+}
+
+test "big.int bitwise xor single negative multi-limb" {
+ var a = try Managed.initSet(testing.allocator, -0x9849c6e7a10d66d0e4260d4846254c32);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, 0xf2194e7d1c855272a997fcde16f6d5a8);
+ defer b.deinit();
+
+ try a.bitXor(a, b);
+
+ try testing.expect((try a.to(i128)) == -0x6a50889abd8834a24db1f19650d3999a);
+}
+
+test "big.int bitwise xor single negative overflow" {
+ var a = try Managed.initSet(testing.allocator, maxInt(Limb));
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -1);
+ defer b.deinit();
+
+ try a.bitXor(a, b);
+
+ try testing.expect((try a.to(SignedDoubleLimb)) == -(maxInt(Limb) + 1));
+}
+
+test "big.int bitwise xor double negative simple" {
+ var a = try Managed.initSet(testing.allocator, -0x8e48bd5f755ef1f3);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -0x4dd4fa576f3046ac);
+ defer b.deinit();
+
+ try a.bitXor(a, b);
+
+ try testing.expect((try a.to(u64)) == 0xc39c47081a6eb759);
+}
+
+test "big.int bitwise xor double negative multi-limb" {
+ var a = try Managed.initSet(testing.allocator, -0x684e5da8f500ec8ca7204c33ccc51c9c);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -0xcb07736a7b62289c78d967c3985eebeb);
+ defer b.deinit();
+
+ try a.bitXor(a, b);
+
+ try testing.expect((try a.to(u128)) == 0xa3492ec28e62c410dff92bf0549bf771);
+}
+
test "big.int bitwise or simple" {
var a = try Managed.initSet(testing.allocator, 0xffffffff11111111);
defer a.deinit();
@@ -1409,6 +1553,72 @@ test "big.int bitwise or multi-limb" {
try testing.expect((try a.to(DoubleLimb)) == (maxInt(Limb) + 1) + maxInt(Limb));
}
+test "big.int bitwise or negative-positive simple" {
+ var a = try Managed.initSet(testing.allocator, -0xffffffff11111111);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, 0xeeeeeeee22222222);
+ defer b.deinit();
+
+ try a.bitOr(a, b);
+
+ try testing.expect((try a.to(i64)) == -0x1111111111111111);
+}
+
+test "big.int bitwise or negative-positive multi-limb" {
+ var a = try Managed.initSet(testing.allocator, -maxInt(Limb) - 1);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, 1);
+ defer b.deinit();
+
+ try a.bitOr(a, b);
+
+ try testing.expect((try a.to(SignedDoubleLimb)) == -maxInt(Limb));
+}
+
+test "big.int bitwise or positive-negative simple" {
+ var a = try Managed.initSet(testing.allocator, 0xffffffff11111111);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -0xeeeeeeee22222222);
+ defer b.deinit();
+
+ try a.bitOr(a, b);
+
+ try testing.expect((try a.to(i64)) == -0x22222221);
+}
+
+test "big.int bitwise or positive-negative multi-limb" {
+ var a = try Managed.initSet(testing.allocator, maxInt(Limb) + 1);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -1);
+ defer b.deinit();
+
+ try a.bitOr(a, b);
+
+ try testing.expect((try a.to(SignedDoubleLimb)) == -1);
+}
+
+test "big.int bitwise or negative-negative simple" {
+ var a = try Managed.initSet(testing.allocator, -0xffffffff11111111);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -0xeeeeeeee22222222);
+ defer b.deinit();
+
+ try a.bitOr(a, b);
+
+ try testing.expect((try a.to(i128)) == -0xeeeeeeee00000001);
+}
+
+test "big.int bitwise or negative-negative multi-limb" {
+ var a = try Managed.initSet(testing.allocator, -maxInt(Limb) - 1);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, -maxInt(Limb));
+ defer b.deinit();
+
+ try a.bitOr(a, b);
+
+ try testing.expect((try a.to(SignedDoubleLimb)) == -maxInt(Limb));
+}
+
test "big.int var args" {
var a = try Managed.initSet(testing.allocator, 5);
defer a.deinit();
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 9eaf185119..b3d0755adc 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -147,6 +147,46 @@ test "mem.Allocator basics" {
try testing.expectError(error.OutOfMemory, failAllocator.allocSentinel(u8, 1, 0));
}
+test "Allocator.resize" {
+ const primitiveIntTypes = .{
+ i8,
+ u8,
+ i16,
+ u16,
+ i32,
+ u32,
+ i64,
+ u64,
+ i128,
+ u128,
+ isize,
+ usize,
+ };
+ inline for (primitiveIntTypes) |T| {
+ var values = try testing.allocator.alloc(T, 100);
+ defer testing.allocator.free(values);
+
+ for (values) |*v, i| v.* = @intCast(T, i);
+ values = try testing.allocator.resize(values, values.len + 10);
+ try testing.expect(values.len == 110);
+ }
+
+ const primitiveFloatTypes = .{
+ f16,
+ f32,
+ f64,
+ f128,
+ };
+ inline for (primitiveFloatTypes) |T| {
+ var values = try testing.allocator.alloc(T, 100);
+ defer testing.allocator.free(values);
+
+ for (values) |*v, i| v.* = @intToFloat(T, i);
+ values = try testing.allocator.resize(values, values.len + 10);
+ try testing.expect(values.len == 110);
+ }
+}
+
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.
/// If the slices overlap, dest.ptr must be <= src.ptr.
@@ -2472,6 +2512,7 @@ fn CopyPtrAttrs(comptime source: type, comptime size: std.builtin.TypeInfo.Point
.is_volatile = info.is_volatile,
.is_allowzero = info.is_allowzero,
.alignment = info.alignment,
+ .address_space = info.address_space,
.child = child,
.sentinel = null,
},
@@ -2960,6 +3001,7 @@ fn AlignedSlice(comptime AttributeSource: type, comptime new_alignment: u29) typ
.is_volatile = info.is_volatile,
.is_allowzero = info.is_allowzero,
.alignment = new_alignment,
+ .address_space = info.address_space,
.child = info.child,
.sentinel = null,
},
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
index 9ea7aeb90e..a76c27f5a0 100644
--- a/lib/std/mem/Allocator.zig
+++ b/lib/std/mem/Allocator.zig
@@ -313,7 +313,7 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
assert(rc == new_byte_count);
- const new_byte_slice = old_mem.ptr[0..new_byte_count];
+ const new_byte_slice = old_byte_slice.ptr[0..new_byte_count];
return mem.bytesAsSlice(T, new_byte_slice);
}
diff --git a/lib/std/meta.zig b/lib/std/meta.zig
index a1bfacf597..62866bb711 100644
--- a/lib/std/meta.zig
+++ b/lib/std/meta.zig
@@ -235,6 +235,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
.is_const = info.is_const,
.is_volatile = info.is_volatile,
.alignment = info.alignment,
+ .address_space = info.address_space,
.child = @Type(.{
.Array = .{
.len = array_info.len,
@@ -254,6 +255,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
.is_const = info.is_const,
.is_volatile = info.is_volatile,
.alignment = info.alignment,
+ .address_space = info.address_space,
.child = info.child,
.is_allowzero = info.is_allowzero,
.sentinel = sentinel_val,
@@ -271,6 +273,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
.is_const = ptr_info.is_const,
.is_volatile = ptr_info.is_volatile,
.alignment = ptr_info.alignment,
+ .address_space = ptr_info.address_space,
.child = ptr_info.child,
.is_allowzero = ptr_info.is_allowzero,
.sentinel = sentinel_val,
diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig
index 693937b399..243a7413fa 100644
--- a/lib/std/multi_array_list.zig
+++ b/lib/std/multi_array_list.zig
@@ -189,7 +189,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// sets the given index to the specified element. May reallocate
/// and invalidate iterators.
pub fn insert(self: *Self, gpa: *Allocator, index: usize, elem: S) void {
- try self.ensureCapacity(gpa, self.len + 1);
+ try self.ensureUnusedCapacity(gpa, 1);
self.insertAssumeCapacity(index, elem);
}
@@ -376,7 +376,7 @@ pub fn MultiArrayList(comptime S: type) type {
pub fn clone(self: Self, gpa: *Allocator) !Self {
var result = Self{};
errdefer result.deinit(gpa);
- try result.ensureCapacity(gpa, self.len);
+ try result.ensureTotalCapacity(gpa, self.len);
result.len = self.len;
const self_slice = self.slice();
const result_slice = result.slice();
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 1f1a020028..94df1532d6 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -157,7 +157,7 @@ pub const Address = extern union {
unreachable;
}
- try std.fmt.format(out_stream, "{s}", .{&self.un.path});
+ try std.fmt.format(out_stream, "{s}", .{std.mem.sliceTo(&self.un.path, 0)});
},
else => unreachable,
}
diff --git a/lib/std/net/test.zig b/lib/std/net/test.zig
index 2e63fc9329..16a43fa421 100644
--- a/lib/std/net/test.zig
+++ b/lib/std/net/test.zig
@@ -90,6 +90,19 @@ test "parse and render IPv4 addresses" {
try testing.expectError(error.NonCanonical, net.Address.parseIp4("127.01.0.1", 0));
}
+test "parse and render UNIX addresses" {
+ if (builtin.os.tag == .wasi) return error.SkipZigTest;
+ if (!net.has_unix_sockets) return error.SkipZigTest;
+
+ var buffer: [14]u8 = undefined;
+ const addr = net.Address.initUnix("/tmp/testpath") catch unreachable;
+ const fmt_addr = std.fmt.bufPrint(buffer[0..], "{}", .{addr}) catch unreachable;
+ try std.testing.expectEqualSlices(u8, "/tmp/testpath", fmt_addr);
+
+ const too_long = [_]u8{'a'} ** (addr.un.path.len + 1);
+ try testing.expectError(error.NameTooLong, net.Address.initUnix(too_long[0..]));
+}
+
test "resolve DNS" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 5a4828286d..a4fad9bc20 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -31,6 +31,7 @@ pub const freebsd = std.c;
pub const haiku = std.c;
pub const netbsd = std.c;
pub const openbsd = std.c;
+pub const solaris = std.c;
pub const linux = @import("os/linux.zig");
pub const uefi = @import("os/uefi.zig");
pub const wasi = @import("os/wasi.zig");
@@ -64,8 +65,10 @@ else switch (builtin.os.tag) {
};
pub const AF = system.AF;
+pub const AF_SUN = system.AF_SUN;
pub const ARCH = system.ARCH;
pub const AT = system.AT;
+pub const AT_SUN = system.AT_SUN;
pub const CLOCK = system.CLOCK;
pub const CPU_COUNT = system.CPU_COUNT;
pub const CTL = system.CTL;
@@ -101,6 +104,7 @@ pub const RR = system.RR;
pub const S = system.S;
pub const SA = system.SA;
pub const SC = system.SC;
+pub const _SC = system._SC;
pub const SEEK = system.SEEK;
pub const SHUT = system.SHUT;
pub const SIG = system.SIG;
@@ -143,6 +147,10 @@ pub const off_t = system.off_t;
pub const oflags_t = system.oflags_t;
pub const pid_t = system.pid_t;
pub const pollfd = system.pollfd;
+pub const port_t = system.port_t;
+pub const port_event = system.port_event;
+pub const port_notify = system.port_notify;
+pub const file_obj = system.file_obj;
pub const rights_t = system.rights_t;
pub const rlim_t = system.rlim_t;
pub const rlimit = system.rlimit;
@@ -2038,6 +2046,7 @@ pub fn unlinkatZ(dirfd: fd_t, file_path_c: [*:0]const u8, flags: u32) UnlinkatEr
.NOTDIR => return error.NotDir,
.NOMEM => return error.SystemResources,
.ROFS => return error.ReadOnlyFileSystem,
+ .EXIST => return error.DirNotEmpty,
.NOTEMPTY => return error.DirNotEmpty,
.INVAL => unreachable, // invalid flags, or pathname has . as last component
@@ -4492,8 +4501,12 @@ pub const FlockError = error{
/// The kernel ran out of memory for allocating file locks
SystemResources,
+
+ /// The underlying filesystem does not support file locks
+ FileLocksNotSupported,
} || UnexpectedError;
+/// Depending on the operating system `flock` may or may not interact with `fcntl` locks made by other processes.
pub fn flock(fd: fd_t, operation: i32) FlockError!void {
while (true) {
const rc = system.flock(fd, operation);
@@ -4504,6 +4517,7 @@ pub fn flock(fd: fd_t, operation: i32) FlockError!void {
.INVAL => unreachable, // invalid parameters
.NOLCK => return error.SystemResources,
.AGAIN => return error.WouldBlock, // TODO: integrate with async instead of just returning an error
+ .OPNOTSUPP => return error.FileLocksNotSupported,
else => |err| return unexpectedErrno(err),
}
}
@@ -4667,6 +4681,16 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
};
return target;
},
+ .solaris => {
+ var procfs_buf: ["/proc/self/path/-2147483648".len:0]u8 = undefined;
+ const proc_path = std.fmt.bufPrintZ(procfs_buf[0..], "/proc/self/path/{d}", .{fd}) catch unreachable;
+
+ const target = readlinkZ(proc_path, out_buffer) catch |err| switch (err) {
+ error.UnsupportedReparsePointType => unreachable,
+ else => |e| return e,
+ };
+ return target;
+ },
else => @compileError("querying for canonical path of a handle is unsupported on this host"),
}
}
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index df7166a4ff..f3e4495220 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -89,6 +89,7 @@ pub const user_desc = arch_bits.user_desc;
pub const tls = @import("linux/tls.zig");
pub const pie = @import("linux/start_pie.zig");
pub const BPF = @import("linux/bpf.zig");
+pub const IOCTL = @import("linux/ioctl.zig");
pub const MAP = struct {
pub usingnamespace arch_bits.MAP;
@@ -2585,18 +2586,18 @@ pub const T = struct {
pub const IOCGSID = 0x5429;
pub const IOCGRS485 = 0x542E;
pub const IOCSRS485 = 0x542F;
- pub const IOCGPTN = 0x80045430;
- pub const IOCSPTLCK = 0x40045431;
- pub const IOCGDEV = 0x80045432;
+ pub const IOCGPTN = IOCTL.IOR('T', 0x30, c_uint);
+ pub const IOCSPTLCK = IOCTL.IOW('T', 0x31, c_int);
+ pub const IOCGDEV = IOCTL.IOR('T', 0x32, c_uint);
pub const CGETX = 0x5432;
pub const CSETX = 0x5433;
pub const CSETXF = 0x5434;
pub const CSETXW = 0x5435;
- pub const IOCSIG = 0x40045436;
+ pub const IOCSIG = IOCTL.IOW('T', 0x36, c_int);
pub const IOCVHANGUP = 0x5437;
- pub const IOCGPKT = 0x80045438;
- pub const IOCGPTLCK = 0x80045439;
- pub const IOCGEXCL = 0x80045440;
+ pub const IOCGPKT = IOCTL.IOR('T', 0x38, c_int);
+ pub const IOCGPTLCK = IOCTL.IOR('T', 0x39, c_int);
+ pub const IOCGEXCL = IOCTL.IOR('T', 0x40, c_int);
};
pub const EPOLL = struct {
@@ -2923,6 +2924,7 @@ pub const sockaddr = extern struct {
family: sa_family_t,
data: [14]u8,
+ pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
/// IPv4 socket address
diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig
index a98ce009a9..9653e2a08c 100644
--- a/lib/std/os/linux/io_uring.zig
+++ b/lib/std/os/linux/io_uring.zig
@@ -404,6 +404,25 @@ pub const IO_Uring = struct {
return sqe;
}
+ /// Queues (but does not submit) an SQE to perform a IORING_OP_READ_FIXED.
+ /// The `buffer` provided must be registered with the kernel by calling `register_buffers` first.
+ /// The `buffer_index` must be the same as its index in the array provided to `register_buffers`.
+ ///
+ /// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
+ pub fn read_fixed(
+ self: *IO_Uring,
+ user_data: u64,
+ fd: os.fd_t,
+ buffer: *os.iovec,
+ offset: u64,
+ buffer_index: u16,
+ ) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_read_fixed(sqe, fd, buffer, offset, buffer_index);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
/// Queues (but does not submit) an SQE to perform a `pwritev()`.
/// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
/// For example, if you want to do a `pwritev2()` then set `rw_flags` on the returned SQE.
@@ -421,6 +440,25 @@ pub const IO_Uring = struct {
return sqe;
}
+ /// Queues (but does not submit) an SQE to perform a IORING_OP_WRITE_FIXED.
+ /// The `buffer` provided must be registered with the kernel by calling `register_buffers` first.
+ /// The `buffer_index` must be the same as its index in the array provided to `register_buffers`.
+ ///
+ /// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
+ pub fn write_fixed(
+ self: *IO_Uring,
+ user_data: u64,
+ fd: os.fd_t,
+ buffer: *os.iovec,
+ offset: u64,
+ buffer_index: u16,
+ ) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_write_fixed(sqe, fd, buffer, offset, buffer_index);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
/// Queues (but does not submit) an SQE to perform an `accept4(2)` on a socket.
/// Returns a pointer to the SQE.
pub fn accept(
@@ -674,6 +712,29 @@ pub const IO_Uring = struct {
try handle_registration_result(res);
}
+ /// Registers an array of buffers for use with `read_fixed` and `write_fixed`.
+ pub fn register_buffers(self: *IO_Uring, buffers: []const os.iovec) !void {
+ assert(self.fd >= 0);
+ const res = linux.io_uring_register(
+ self.fd,
+ .REGISTER_BUFFERS,
+ buffers.ptr,
+ @intCast(u32, buffers.len),
+ );
+ try handle_registration_result(res);
+ }
+
+ /// Unregister the registered buffers.
+ pub fn unregister_buffers(self: *IO_Uring) !void {
+ assert(self.fd >= 0);
+ const res = linux.io_uring_register(self.fd, .UNREGISTER_BUFFERS, null, 0);
+ switch (linux.getErrno(res)) {
+ .SUCCESS => {},
+ .NXIO => return error.BuffersNotRegistered,
+ else => |errno| return os.unexpectedErrno(errno),
+ }
+ }
+
fn handle_registration_result(res: usize) !void {
switch (linux.getErrno(res)) {
.SUCCESS => {},
@@ -905,6 +966,16 @@ pub fn io_uring_prep_writev(
io_uring_prep_rw(.WRITEV, sqe, fd, @ptrToInt(iovecs.ptr), iovecs.len, offset);
}
+pub fn io_uring_prep_read_fixed(sqe: *io_uring_sqe, fd: os.fd_t, buffer: *os.iovec, offset: u64, buffer_index: u16) void {
+ io_uring_prep_rw(.READ_FIXED, sqe, fd, @ptrToInt(buffer.iov_base), buffer.iov_len, offset);
+ sqe.buf_index = buffer_index;
+}
+
+pub fn io_uring_prep_write_fixed(sqe: *io_uring_sqe, fd: os.fd_t, buffer: *os.iovec, offset: u64, buffer_index: u16) void {
+ io_uring_prep_rw(.WRITE_FIXED, sqe, fd, @ptrToInt(buffer.iov_base), buffer.iov_len, offset);
+ sqe.buf_index = buffer_index;
+}
+
pub fn io_uring_prep_accept(
sqe: *io_uring_sqe,
fd: os.fd_t,
@@ -1282,6 +1353,63 @@ test "write/read" {
try testing.expectEqualSlices(u8, buffer_write[0..], buffer_read[0..]);
}
+test "write_fixed/read_fixed" {
+ if (builtin.os.tag != .linux) return error.SkipZigTest;
+
+ var ring = IO_Uring.init(2, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const path = "test_io_uring_write_read_fixed";
+ const file = try std.fs.cwd().createFile(path, .{ .read = true, .truncate = true });
+ defer file.close();
+ defer std.fs.cwd().deleteFile(path) catch {};
+ const fd = file.handle;
+
+ var raw_buffers: [2][11]u8 = undefined;
+ // First buffer will be written to the file.
+ std.mem.set(u8, &raw_buffers[0], 'z');
+ std.mem.copy(u8, &raw_buffers[0], "foobar");
+
+ var buffers = [2]os.iovec{
+ .{ .iov_base = &raw_buffers[0], .iov_len = raw_buffers[0].len },
+ .{ .iov_base = &raw_buffers[1], .iov_len = raw_buffers[1].len },
+ };
+ try ring.register_buffers(&buffers);
+
+ const sqe_write = try ring.write_fixed(0x45454545, fd, &buffers[0], 3, 0);
+ try testing.expectEqual(linux.IORING_OP.WRITE_FIXED, sqe_write.opcode);
+ try testing.expectEqual(@as(u64, 3), sqe_write.off);
+ sqe_write.flags |= linux.IOSQE_IO_LINK;
+
+ const sqe_read = try ring.read_fixed(0x12121212, fd, &buffers[1], 0, 1);
+ try testing.expectEqual(linux.IORING_OP.READ_FIXED, sqe_read.opcode);
+ try testing.expectEqual(@as(u64, 0), sqe_read.off);
+
+ try testing.expectEqual(@as(u32, 2), try ring.submit());
+
+ const cqe_write = try ring.copy_cqe();
+ const cqe_read = try ring.copy_cqe();
+
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x45454545,
+ .res = @intCast(i32, buffers[0].iov_len),
+ .flags = 0,
+ }, cqe_write);
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x12121212,
+ .res = @intCast(i32, buffers[1].iov_len),
+ .flags = 0,
+ }, cqe_read);
+
+ try testing.expectEqualSlices(u8, "\x00\x00\x00", buffers[1].iov_base[0..3]);
+ try testing.expectEqualSlices(u8, "foobar", buffers[1].iov_base[3..9]);
+ try testing.expectEqualSlices(u8, "zz", buffers[1].iov_base[9..11]);
+}
+
test "openat" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
diff --git a/lib/std/os/linux/ioctl.zig b/lib/std/os/linux/ioctl.zig
new file mode 100644
index 0000000000..35ff1bfc32
--- /dev/null
+++ b/lib/std/os/linux/ioctl.zig
@@ -0,0 +1,56 @@
+const std = @import("../../std.zig");
+
+const bits = switch (@import("builtin").cpu.arch) {
+ .mips,
+ .mipsel,
+ .mips64,
+ .mips64el,
+ .powerpc,
+ .powerpcle,
+ .powerpc64,
+ .powerpc64le,
+ .sparc,
+ .sparcv9,
+ .sparcel,
+ => .{ .size = 13, .dir = 3, .none = 1, .read = 2, .write = 4 },
+ else => .{ .size = 14, .dir = 2, .none = 0, .read = 2, .write = 1 },
+};
+
+const Direction = std.meta.Int(.unsigned, bits.dir);
+
+pub const Request = packed struct {
+ nr: u8,
+ io_type: u8,
+ size: std.meta.Int(.unsigned, bits.size),
+ dir: Direction,
+};
+
+fn io_impl(dir: Direction, io_type: u8, nr: u8, comptime T: type) u32 {
+ const request = Request{
+ .dir = dir,
+ .size = @sizeOf(T),
+ .io_type = io_type,
+ .nr = nr,
+ };
+ return @bitCast(u32, request);
+}
+
+pub fn IO(io_type: u8, nr: u8) u32 {
+ return io_impl(bits.none, io_type, nr, void);
+}
+
+pub fn IOR(io_type: u8, nr: u8, comptime T: type) u32 {
+ return io_impl(bits.read, io_type, nr, T);
+}
+
+pub fn IOW(io_type: u8, nr: u8, comptime T: type) u32 {
+ return io_impl(bits.write, io_type, nr, T);
+}
+
+pub fn IOWR(io_type: u8, nr: u8, comptime T: type) u32 {
+ return io_impl(bits.read | bits.write, io_type, nr, T);
+}
+
+comptime {
+ std.debug.assert(@bitSizeOf(Request) == 32);
+}
diff --git a/lib/std/os/linux/sparc64.zig b/lib/std/os/linux/sparc64.zig
index 3ae490f5e9..b1f96f144c 100644
--- a/lib/std/os/linux/sparc64.zig
+++ b/lib/std/os/linux/sparc64.zig
@@ -674,6 +674,10 @@ pub const msghdr_const = extern struct {
pub const off_t = i64;
pub const ino_t = u64;
pub const mode_t = u32;
+pub const dev_t = usize;
+pub const nlink_t = u32;
+pub const blksize_t = isize;
+pub const blkcnt_t = isize;
// The `stat64` definition used by the kernel.
pub const Stat = extern struct {
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index 3a0187f735..d3c8d13bd1 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -188,7 +188,10 @@ fn testReadlink(target_path: []const u8, symlink_path: []const u8) !void {
}
test "link with relative paths" {
- if (native_os != .linux) return error.SkipZigTest;
+ switch (native_os) {
+ .linux, .solaris => {},
+ else => return error.SkipZigTest,
+ }
var cwd = fs.cwd();
cwd.deleteFile("example.txt") catch {};
@@ -222,7 +225,10 @@ test "link with relative paths" {
}
test "linkat with different directories" {
- if (native_os != .linux) return error.SkipZigTest;
+ switch (native_os) {
+ .linux, .solaris => {},
+ else => return error.SkipZigTest,
+ }
var cwd = fs.cwd();
var tmp = tmpDir(.{});
@@ -634,8 +640,10 @@ test "fcntl" {
}
test "signalfd" {
- if (native_os != .linux)
- return error.SkipZigTest;
+ switch (native_os) {
+ .linux, .solaris => {},
+ else => return error.SkipZigTest,
+ }
_ = std.os.signalfd;
}
@@ -658,8 +666,10 @@ test "sync" {
}
test "fsync" {
- if (native_os != .linux and native_os != .windows)
- return error.SkipZigTest;
+ switch (native_os) {
+ .linux, .windows, .solaris => {},
+ else => return error.SkipZigTest,
+ }
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -754,7 +764,10 @@ test "sigaction" {
}
test "dup & dup2" {
- if (native_os != .linux) return error.SkipZigTest;
+ switch (native_os) {
+ .linux, .solaris => {},
+ else => return error.SkipZigTest,
+ }
var tmp = tmpDir(.{});
defer tmp.cleanup();
diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig
index 89a00ed892..cde4a4906b 100644
--- a/lib/std/os/windows/kernel32.zig
+++ b/lib/std/os/windows/kernel32.zig
@@ -152,6 +152,8 @@ pub extern "kernel32" fn GetCommandLineW() callconv(WINAPI) LPWSTR;
pub extern "kernel32" fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: *DWORD) callconv(WINAPI) BOOL;
+pub extern "kernel32" fn GetConsoleOutputCP() callconv(WINAPI) UINT;
+
pub extern "kernel32" fn GetConsoleScreenBufferInfo(hConsoleOutput: HANDLE, lpConsoleScreenBufferInfo: *CONSOLE_SCREEN_BUFFER_INFO) callconv(WINAPI) BOOL;
pub extern "kernel32" fn FillConsoleOutputCharacterA(hConsoleOutput: HANDLE, cCharacter: CHAR, nLength: DWORD, dwWriteCoord: COORD, lpNumberOfCharsWritten: *DWORD) callconv(WINAPI) BOOL;
pub extern "kernel32" fn FillConsoleOutputCharacterW(hConsoleOutput: HANDLE, cCharacter: WCHAR, nLength: DWORD, dwWriteCoord: COORD, lpNumberOfCharsWritten: *DWORD) callconv(WINAPI) BOOL;
@@ -286,6 +288,8 @@ pub extern "kernel32" fn SetConsoleCtrlHandler(
Add: BOOL,
) callconv(WINAPI) BOOL;
+pub extern "kernel32" fn SetConsoleOutputCP(wCodePageID: UINT) callconv(WINAPI) BOOL;
+
pub extern "kernel32" fn SetFileCompletionNotificationModes(
FileHandle: HANDLE,
Flags: UCHAR,
diff --git a/lib/std/os/windows/ws2_32.zig b/lib/std/os/windows/ws2_32.zig
index 168a098397..a6eb9e07ff 100644
--- a/lib/std/os/windows/ws2_32.zig
+++ b/lib/std/os/windows/ws2_32.zig
@@ -1105,6 +1105,7 @@ pub const sockaddr = extern struct {
family: ADDRESS_FAMILY,
data: [14]u8,
+ pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
/// IPv4 socket address
diff --git a/lib/std/priority_dequeue.zig b/lib/std/priority_dequeue.zig
index d154f5df5e..5bde0a36d0 100644
--- a/lib/std/priority_dequeue.zig
+++ b/lib/std/priority_dequeue.zig
@@ -43,13 +43,13 @@ pub fn PriorityDequeue(comptime T: type) type {
/// Insert a new element, maintaining priority.
pub fn add(self: *Self, elem: T) !void {
- try ensureCapacity(self, self.len + 1);
+ try self.ensureUnusedCapacity(1);
addUnchecked(self, elem);
}
/// Add each element in `items` to the dequeue.
pub fn addSlice(self: *Self, items: []const T) !void {
- try self.ensureCapacity(self.len + items.len);
+ try self.ensureUnusedCapacity(items.len);
for (items) |e| {
self.addUnchecked(e);
}
@@ -359,7 +359,11 @@ pub fn PriorityDequeue(comptime T: type) type {
return queue;
}
- pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
+ /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`.
+ pub const ensureCapacity = ensureTotalCapacity;
+
+ /// Ensure that the dequeue can fit at least `new_capacity` items.
+ pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) !void {
var better_capacity = self.capacity();
if (better_capacity >= new_capacity) return;
while (true) {
@@ -369,6 +373,11 @@ pub fn PriorityDequeue(comptime T: type) type {
self.items = try self.allocator.realloc(self.items, better_capacity);
}
+ /// Ensure that the dequeue can fit at least `additional_count` **more** items.
+ pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) !void {
+ return self.ensureTotalCapacity(self.len + additional_count);
+ }
+
/// Reduce allocated capacity to `new_len`.
pub fn shrinkAndFree(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
@@ -824,7 +833,7 @@ test "std.PriorityDequeue: shrinkAndFree" {
var queue = PDQ.init(testing.allocator, lessThanComparison);
defer queue.deinit();
- try queue.ensureCapacity(4);
+ try queue.ensureTotalCapacity(4);
try expect(queue.capacity() >= 4);
try queue.add(1);
@@ -940,7 +949,7 @@ fn fuzzTestMinMax(rng: *std.rand.Random, queue_size: usize) !void {
fn generateRandomSlice(allocator: *std.mem.Allocator, rng: *std.rand.Random, size: usize) ![]u32 {
var array = std.ArrayList(u32).init(allocator);
- try array.ensureCapacity(size);
+ try array.ensureTotalCapacity(size);
var i: usize = 0;
while (i < size) : (i += 1) {
diff --git a/lib/std/priority_queue.zig b/lib/std/priority_queue.zig
index 228c07cadb..fcdd81b1dd 100644
--- a/lib/std/priority_queue.zig
+++ b/lib/std/priority_queue.zig
@@ -42,7 +42,7 @@ pub fn PriorityQueue(comptime T: type) type {
/// Insert a new element, maintaining priority.
pub fn add(self: *Self, elem: T) !void {
- try ensureCapacity(self, self.len + 1);
+ try self.ensureUnusedCapacity(1);
addUnchecked(self, elem);
}
@@ -69,7 +69,7 @@ pub fn PriorityQueue(comptime T: type) type {
/// Add each element in `items` to the queue.
pub fn addSlice(self: *Self, items: []const T) !void {
- try self.ensureCapacity(self.len + items.len);
+ try self.ensureUnusedCapacity(items.len);
for (items) |e| {
self.addUnchecked(e);
}
@@ -175,7 +175,11 @@ pub fn PriorityQueue(comptime T: type) type {
return queue;
}
- pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
+ /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`.
+ pub const ensureCapacity = ensureTotalCapacity;
+
+ /// Ensure that the queue can fit at least `new_capacity` items.
+ pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) !void {
var better_capacity = self.capacity();
if (better_capacity >= new_capacity) return;
while (true) {
@@ -185,6 +189,11 @@ pub fn PriorityQueue(comptime T: type) type {
self.items = try self.allocator.realloc(self.items, better_capacity);
}
+ /// Ensure that the queue can fit at least `additional_count` **more** item.
+ pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) !void {
+ return self.ensureTotalCapacity(self.len + additional_count);
+ }
+
/// Reduce allocated capacity to `new_len`.
pub fn shrinkAndFree(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
@@ -483,7 +492,7 @@ test "std.PriorityQueue: shrinkAndFree" {
var queue = PQ.init(testing.allocator, lessThan);
defer queue.deinit();
- try queue.ensureCapacity(4);
+ try queue.ensureTotalCapacity(4);
try expect(queue.capacity() >= 4);
try queue.add(1);
diff --git a/lib/std/process.zig b/lib/std/process.zig
index f8b986d695..8dce2462a2 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -625,7 +625,7 @@ pub const UserInfo = struct {
/// POSIX function which gets a uid from username.
pub fn getUserInfo(name: []const u8) !UserInfo {
return switch (builtin.os.tag) {
- .linux, .macos, .watchos, .tvos, .ios, .freebsd, .netbsd, .openbsd, .haiku => posixGetUserInfo(name),
+ .linux, .macos, .watchos, .tvos, .ios, .freebsd, .netbsd, .openbsd, .haiku, .solaris => posixGetUserInfo(name),
else => @compileError("Unsupported OS"),
};
}
@@ -753,6 +753,7 @@ pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]
.netbsd,
.dragonfly,
.openbsd,
+ .solaris,
=> {
var paths = List.init(allocator);
errdefer {
diff --git a/lib/std/special/c.zig b/lib/std/special/c.zig
index d34e7a8a46..a7821643da 100644
--- a/lib/std/special/c.zig
+++ b/lib/std/special/c.zig
@@ -1,177 +1,36 @@
-// This is Zig's multi-target implementation of libc.
-// When builtin.link_libc is true, we need to export all the functions and
-// provide an entire C API.
-// Otherwise, only the functions which LLVM generates calls to need to be generated,
-// such as memcpy, memset, and some math functions.
+//! This is Zig's multi-target implementation of libc.
+//! When builtin.link_libc is true, we need to export all the functions and
+//! provide an entire C API.
+//! Otherwise, only the functions which LLVM generates calls to need to be generated,
+//! such as memcpy, memset, and some math functions.
const std = @import("std");
-const builtin = std.builtin;
-const maxInt = std.math.maxInt;
-const isNan = std.math.isNan;
-const native_arch = std.Target.current.cpu.arch;
-const native_abi = std.Target.current.abi;
-const native_os = std.Target.current.os.tag;
+const builtin = @import("builtin");
+const native_os = builtin.os.tag;
-const is_wasm = switch (native_arch) {
- .wasm32, .wasm64 => true,
- else => false,
-};
-const is_msvc = switch (native_abi) {
- .msvc => true,
- else => false,
-};
-const is_freestanding = switch (native_os) {
- .freestanding => true,
- else => false,
-};
comptime {
- if (is_freestanding and is_wasm and builtin.link_libc) {
- @export(wasm_start, .{ .name = "_start", .linkage = .Strong });
+ // When the self-hosted compiler is further along, all the logic from c_stage1.zig will
+ // be migrated to this file and then c_stage1.zig will be deleted. Until then we have a
+ // simpler implementation of c.zig that only uses features already implemented in self-hosted.
+ if (builtin.zig_is_stage2) {
+ @export(memset, .{ .name = "memset", .linkage = .Strong });
+ @export(memcpy, .{ .name = "memcpy", .linkage = .Strong });
+ } else {
+ _ = @import("c_stage1.zig");
}
- if (builtin.link_libc) {
- @export(strcmp, .{ .name = "strcmp", .linkage = .Strong });
- @export(strncmp, .{ .name = "strncmp", .linkage = .Strong });
- @export(strerror, .{ .name = "strerror", .linkage = .Strong });
- @export(strlen, .{ .name = "strlen", .linkage = .Strong });
- @export(strcpy, .{ .name = "strcpy", .linkage = .Strong });
- @export(strncpy, .{ .name = "strncpy", .linkage = .Strong });
- @export(strcat, .{ .name = "strcat", .linkage = .Strong });
- @export(strncat, .{ .name = "strncat", .linkage = .Strong });
- } else if (is_msvc) {
- @export(_fltused, .{ .name = "_fltused", .linkage = .Strong });
- }
-}
-
-var _fltused: c_int = 1;
-
-extern fn main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
-fn wasm_start() callconv(.C) void {
- _ = main(0, undefined);
-}
-
-fn strcpy(dest: [*:0]u8, src: [*:0]const u8) callconv(.C) [*:0]u8 {
- var i: usize = 0;
- while (src[i] != 0) : (i += 1) {
- dest[i] = src[i];
- }
- dest[i] = 0;
-
- return dest;
-}
-
-test "strcpy" {
- var s1: [9:0]u8 = undefined;
-
- s1[0] = 0;
- _ = strcpy(&s1, "foobarbaz");
- try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.spanZ(&s1));
-}
-
-fn strncpy(dest: [*:0]u8, src: [*:0]const u8, n: usize) callconv(.C) [*:0]u8 {
- var i: usize = 0;
- while (i < n and src[i] != 0) : (i += 1) {
- dest[i] = src[i];
- }
- while (i < n) : (i += 1) {
- dest[i] = 0;
- }
-
- return dest;
-}
-
-test "strncpy" {
- var s1: [9:0]u8 = undefined;
-
- s1[0] = 0;
- _ = strncpy(&s1, "foobarbaz", @sizeOf(@TypeOf(s1)));
- try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.spanZ(&s1));
-}
-
-fn strcat(dest: [*:0]u8, src: [*:0]const u8) callconv(.C) [*:0]u8 {
- var dest_end: usize = 0;
- while (dest[dest_end] != 0) : (dest_end += 1) {}
-
- var i: usize = 0;
- while (src[i] != 0) : (i += 1) {
- dest[dest_end + i] = src[i];
- }
- dest[dest_end + i] = 0;
-
- return dest;
-}
-
-test "strcat" {
- var s1: [9:0]u8 = undefined;
-
- s1[0] = 0;
- _ = strcat(&s1, "foo");
- _ = strcat(&s1, "bar");
- _ = strcat(&s1, "baz");
- try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.spanZ(&s1));
-}
-
-fn strncat(dest: [*:0]u8, src: [*:0]const u8, avail: usize) callconv(.C) [*:0]u8 {
- var dest_end: usize = 0;
- while (dest[dest_end] != 0) : (dest_end += 1) {}
-
- var i: usize = 0;
- while (i < avail and src[i] != 0) : (i += 1) {
- dest[dest_end + i] = src[i];
- }
- dest[dest_end + i] = 0;
-
- return dest;
-}
-
-test "strncat" {
- var s1: [9:0]u8 = undefined;
-
- s1[0] = 0;
- _ = strncat(&s1, "foo1111", 3);
- _ = strncat(&s1, "bar1111", 3);
- _ = strncat(&s1, "baz1111", 3);
- try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.spanZ(&s1));
-}
-
-fn strcmp(s1: [*:0]const u8, s2: [*:0]const u8) callconv(.C) c_int {
- return std.cstr.cmp(s1, s2);
-}
-
-fn strlen(s: [*:0]const u8) callconv(.C) usize {
- return std.mem.len(s);
-}
-
-fn strncmp(_l: [*:0]const u8, _r: [*:0]const u8, _n: usize) callconv(.C) c_int {
- if (_n == 0) return 0;
- var l = _l;
- var r = _r;
- var n = _n - 1;
- while (l[0] != 0 and r[0] != 0 and n != 0 and l[0] == r[0]) {
- l += 1;
- r += 1;
- n -= 1;
- }
- return @as(c_int, l[0]) - @as(c_int, r[0]);
-}
-
-fn strerror(errnum: c_int) callconv(.C) [*:0]const u8 {
- _ = errnum;
- return "TODO strerror implementation";
-}
-
-test "strncmp" {
- try std.testing.expect(strncmp("a", "b", 1) == -1);
- try std.testing.expect(strncmp("a", "c", 1) == -2);
- try std.testing.expect(strncmp("b", "a", 1) == 1);
- try std.testing.expect(strncmp("\xff", "\x02", 1) == 253);
}
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
-pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
+pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn {
+ @setCold(true);
_ = error_return_trace;
+ if (builtin.zig_is_stage2) {
+ while (true) {
+ @breakpoint();
+ }
+ }
if (builtin.is_test) {
- @setCold(true);
std.debug.panic("{s}", .{msg});
}
if (native_os != .freestanding and native_os != .other) {
@@ -180,1028 +39,38 @@ pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn
while (true) {}
}
-export fn memset(dest: ?[*]u8, c: u8, n: usize) callconv(.C) ?[*]u8 {
+fn memset(dest: ?[*]u8, c: u8, len: usize) callconv(.C) ?[*]u8 {
@setRuntimeSafety(false);
- var index: usize = 0;
- while (index != n) : (index += 1)
- dest.?[index] = c;
-
- return dest;
-}
-
-export fn __memset(dest: ?[*]u8, c: u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 {
- if (dest_n < n)
- @panic("buffer overflow");
- return memset(dest, c, n);
-}
-
-export fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8 {
- @setRuntimeSafety(false);
-
- var index: usize = 0;
- while (index != n) : (index += 1)
- dest.?[index] = src.?[index];
-
- return dest;
-}
-
-export fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8 {
- @setRuntimeSafety(false);
-
- if (@ptrToInt(dest) < @ptrToInt(src)) {
- var index: usize = 0;
- while (index != n) : (index += 1) {
- dest.?[index] = src.?[index];
- }
- } else {
- var index = n;
- while (index != 0) {
- index -= 1;
- dest.?[index] = src.?[index];
+ if (len != 0) {
+ var d = dest.?;
+ var n = len;
+ while (true) {
+ d.* = c;
+ n -= 1;
+ if (n == 0) break;
+ d += 1;
}
}
return dest;
}
-export fn memcmp(vl: ?[*]const u8, vr: ?[*]const u8, n: usize) callconv(.C) c_int {
+fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, len: usize) callconv(.C) ?[*]u8 {
@setRuntimeSafety(false);
- var index: usize = 0;
- while (index != n) : (index += 1) {
- const compare_val = @bitCast(i8, vl.?[index] -% vr.?[index]);
- if (compare_val != 0) {
- return compare_val;
+ if (len != 0) {
+ var d = dest.?;
+ var s = src.?;
+ var n = len;
+ while (true) {
+ d.* = s.*;
+ n -= 1;
+ if (n == 0) break;
+ d += 1;
+ s += 1;
}
}
- return 0;
-}
-
-test "memcmp" {
- const base_arr = &[_]u8{ 1, 1, 1 };
- const arr1 = &[_]u8{ 1, 1, 1 };
- const arr2 = &[_]u8{ 1, 0, 1 };
- const arr3 = &[_]u8{ 1, 2, 1 };
-
- try std.testing.expect(memcmp(base_arr[0..], arr1[0..], base_arr.len) == 0);
- try std.testing.expect(memcmp(base_arr[0..], arr2[0..], base_arr.len) > 0);
- try std.testing.expect(memcmp(base_arr[0..], arr3[0..], base_arr.len) < 0);
-}
-
-export fn bcmp(vl: [*]allowzero const u8, vr: [*]allowzero const u8, n: usize) callconv(.C) c_int {
- @setRuntimeSafety(false);
-
- var index: usize = 0;
- while (index != n) : (index += 1) {
- if (vl[index] != vr[index]) {
- return 1;
- }
- }
-
- return 0;
-}
-
-test "bcmp" {
- const base_arr = &[_]u8{ 1, 1, 1 };
- const arr1 = &[_]u8{ 1, 1, 1 };
- const arr2 = &[_]u8{ 1, 0, 1 };
- const arr3 = &[_]u8{ 1, 2, 1 };
-
- try std.testing.expect(bcmp(base_arr[0..], arr1[0..], base_arr.len) == 0);
- try std.testing.expect(bcmp(base_arr[0..], arr2[0..], base_arr.len) != 0);
- try std.testing.expect(bcmp(base_arr[0..], arr3[0..], base_arr.len) != 0);
-}
-
-comptime {
- if (native_os == .linux) {
- @export(clone, .{ .name = "clone" });
- }
-}
-
-// TODO we should be able to put this directly in std/linux/x86_64.zig but
-// it causes a segfault in release mode. this is a workaround of calling it
-// across .o file boundaries. fix comptime @ptrCast of nakedcc functions.
-fn clone() callconv(.Naked) void {
- switch (native_arch) {
- .i386 => {
- // __clone(func, stack, flags, arg, ptid, tls, ctid)
- // +8, +12, +16, +20, +24, +28, +32
- // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
- // eax, ebx, ecx, edx, esi, edi
- asm volatile (
- \\ push %%ebp
- \\ mov %%esp,%%ebp
- \\ push %%ebx
- \\ push %%esi
- \\ push %%edi
- \\ // Setup the arguments
- \\ mov 16(%%ebp),%%ebx
- \\ mov 12(%%ebp),%%ecx
- \\ and $-16,%%ecx
- \\ sub $20,%%ecx
- \\ mov 20(%%ebp),%%eax
- \\ mov %%eax,4(%%ecx)
- \\ mov 8(%%ebp),%%eax
- \\ mov %%eax,0(%%ecx)
- \\ mov 24(%%ebp),%%edx
- \\ mov 28(%%ebp),%%esi
- \\ mov 32(%%ebp),%%edi
- \\ mov $120,%%eax
- \\ int $128
- \\ test %%eax,%%eax
- \\ jnz 1f
- \\ pop %%eax
- \\ xor %%ebp,%%ebp
- \\ call *%%eax
- \\ mov %%eax,%%ebx
- \\ xor %%eax,%%eax
- \\ inc %%eax
- \\ int $128
- \\ hlt
- \\1:
- \\ pop %%edi
- \\ pop %%esi
- \\ pop %%ebx
- \\ pop %%ebp
- \\ ret
- );
- },
- .x86_64 => {
- asm volatile (
- \\ xor %%eax,%%eax
- \\ mov $56,%%al // SYS_clone
- \\ mov %%rdi,%%r11
- \\ mov %%rdx,%%rdi
- \\ mov %%r8,%%rdx
- \\ mov %%r9,%%r8
- \\ mov 8(%%rsp),%%r10
- \\ mov %%r11,%%r9
- \\ and $-16,%%rsi
- \\ sub $8,%%rsi
- \\ mov %%rcx,(%%rsi)
- \\ syscall
- \\ test %%eax,%%eax
- \\ jnz 1f
- \\ xor %%ebp,%%ebp
- \\ pop %%rdi
- \\ call *%%r9
- \\ mov %%eax,%%edi
- \\ xor %%eax,%%eax
- \\ mov $60,%%al // SYS_exit
- \\ syscall
- \\ hlt
- \\1: ret
- \\
- );
- },
- .aarch64 => {
- // __clone(func, stack, flags, arg, ptid, tls, ctid)
- // x0, x1, w2, x3, x4, x5, x6
-
- // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
- // x8, x0, x1, x2, x3, x4
- asm volatile (
- \\ // align stack and save func,arg
- \\ and x1,x1,#-16
- \\ stp x0,x3,[x1,#-16]!
- \\
- \\ // syscall
- \\ uxtw x0,w2
- \\ mov x2,x4
- \\ mov x3,x5
- \\ mov x4,x6
- \\ mov x8,#220 // SYS_clone
- \\ svc #0
- \\
- \\ cbz x0,1f
- \\ // parent
- \\ ret
- \\ // child
- \\1: ldp x1,x0,[sp],#16
- \\ blr x1
- \\ mov x8,#93 // SYS_exit
- \\ svc #0
- );
- },
- .arm, .thumb => {
- // __clone(func, stack, flags, arg, ptid, tls, ctid)
- // r0, r1, r2, r3, +0, +4, +8
-
- // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
- // r7 r0, r1, r2, r3, r4
- asm volatile (
- \\ stmfd sp!,{r4,r5,r6,r7}
- \\ mov r7,#120
- \\ mov r6,r3
- \\ mov r5,r0
- \\ mov r0,r2
- \\ and r1,r1,#-16
- \\ ldr r2,[sp,#16]
- \\ ldr r3,[sp,#20]
- \\ ldr r4,[sp,#24]
- \\ svc 0
- \\ tst r0,r0
- \\ beq 1f
- \\ ldmfd sp!,{r4,r5,r6,r7}
- \\ bx lr
- \\
- \\1: mov r0,r6
- \\ bl 3f
- \\2: mov r7,#1
- \\ svc 0
- \\ b 2b
- \\3: bx r5
- );
- },
- .riscv64 => {
- // __clone(func, stack, flags, arg, ptid, tls, ctid)
- // a0, a1, a2, a3, a4, a5, a6
-
- // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
- // a7 a0, a1, a2, a3, a4
- asm volatile (
- \\ # Save func and arg to stack
- \\ addi a1, a1, -16
- \\ sd a0, 0(a1)
- \\ sd a3, 8(a1)
- \\
- \\ # Call SYS_clone
- \\ mv a0, a2
- \\ mv a2, a4
- \\ mv a3, a5
- \\ mv a4, a6
- \\ li a7, 220 # SYS_clone
- \\ ecall
- \\
- \\ beqz a0, 1f
- \\ # Parent
- \\ ret
- \\
- \\ # Child
- \\1: ld a1, 0(sp)
- \\ ld a0, 8(sp)
- \\ jalr a1
- \\
- \\ # Exit
- \\ li a7, 93 # SYS_exit
- \\ ecall
- );
- },
- .mips, .mipsel => {
- // __clone(func, stack, flags, arg, ptid, tls, ctid)
- // 3, 4, 5, 6, 7, 8, 9
-
- // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
- // 2 4, 5, 6, 7, 8
- asm volatile (
- \\ # Save function pointer and argument pointer on new thread stack
- \\ and $5, $5, -8
- \\ subu $5, $5, 16
- \\ sw $4, 0($5)
- \\ sw $7, 4($5)
- \\ # Shuffle (fn,sp,fl,arg,ptid,tls,ctid) to (fl,sp,ptid,tls,ctid)
- \\ move $4, $6
- \\ lw $6, 16($sp)
- \\ lw $7, 20($sp)
- \\ lw $9, 24($sp)
- \\ subu $sp, $sp, 16
- \\ sw $9, 16($sp)
- \\ li $2, 4120
- \\ syscall
- \\ beq $7, $0, 1f
- \\ nop
- \\ addu $sp, $sp, 16
- \\ jr $ra
- \\ subu $2, $0, $2
- \\1:
- \\ beq $2, $0, 1f
- \\ nop
- \\ addu $sp, $sp, 16
- \\ jr $ra
- \\ nop
- \\1:
- \\ lw $25, 0($sp)
- \\ lw $4, 4($sp)
- \\ jalr $25
- \\ nop
- \\ move $4, $2
- \\ li $2, 4001
- \\ syscall
- );
- },
- .powerpc => {
- // __clone(func, stack, flags, arg, ptid, tls, ctid)
- // 3, 4, 5, 6, 7, 8, 9
-
- // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
- // 0 3, 4, 5, 6, 7
- asm volatile (
- \\# store non-volatile regs r30, r31 on stack in order to put our
- \\# start func and its arg there
- \\stwu 30, -16(1)
- \\stw 31, 4(1)
- \\
- \\# save r3 (func) into r30, and r6(arg) into r31
- \\mr 30, 3
- \\mr 31, 6
- \\
- \\# create initial stack frame for new thread
- \\clrrwi 4, 4, 4
- \\li 0, 0
- \\stwu 0, -16(4)
- \\
- \\#move c into first arg
- \\mr 3, 5
- \\#mr 4, 4
- \\mr 5, 7
- \\mr 6, 8
- \\mr 7, 9
- \\
- \\# move syscall number into r0
- \\li 0, 120
- \\
- \\sc
- \\
- \\# check for syscall error
- \\bns+ 1f # jump to label 1 if no summary overflow.
- \\#else
- \\neg 3, 3 #negate the result (errno)
- \\1:
- \\# compare sc result with 0
- \\cmpwi cr7, 3, 0
- \\
- \\# if not 0, jump to end
- \\bne cr7, 2f
- \\
- \\#else: we're the child
- \\#call funcptr: move arg (d) into r3
- \\mr 3, 31
- \\#move r30 (funcptr) into CTR reg
- \\mtctr 30
- \\# call CTR reg
- \\bctrl
- \\# mov SYS_exit into r0 (the exit param is already in r3)
- \\li 0, 1
- \\sc
- \\
- \\2:
- \\
- \\# restore stack
- \\lwz 30, 0(1)
- \\lwz 31, 4(1)
- \\addi 1, 1, 16
- \\
- \\blr
- );
- },
- .powerpc64, .powerpc64le => {
- // __clone(func, stack, flags, arg, ptid, tls, ctid)
- // 3, 4, 5, 6, 7, 8, 9
-
- // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
- // 0 3, 4, 5, 6, 7
- asm volatile (
- \\ # create initial stack frame for new thread
- \\ clrrdi 4, 4, 4
- \\ li 0, 0
- \\ stdu 0,-32(4)
- \\
- \\ # save fn and arg to child stack
- \\ std 3, 8(4)
- \\ std 6, 16(4)
- \\
- \\ # shuffle args into correct registers and call SYS_clone
- \\ mr 3, 5
- \\ #mr 4, 4
- \\ mr 5, 7
- \\ mr 6, 8
- \\ mr 7, 9
- \\ li 0, 120 # SYS_clone = 120
- \\ sc
- \\
- \\ # if error, negate return (errno)
- \\ bns+ 1f
- \\ neg 3, 3
- \\
- \\1:
- \\ # if we're the parent, return
- \\ cmpwi cr7, 3, 0
- \\ bnelr cr7
- \\
- \\ # we're the child. call fn(arg)
- \\ ld 3, 16(1)
- \\ ld 12, 8(1)
- \\ mtctr 12
- \\ bctrl
- \\
- \\ # call SYS_exit. exit code is already in r3 from fn return value
- \\ li 0, 1 # SYS_exit = 1
- \\ sc
- );
- },
- .sparcv9 => {
- // __clone(func, stack, flags, arg, ptid, tls, ctid)
- // i0, i1, i2, i3, i4, i5, sp
- // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
- // g1 o0, o1, o2, o3, o4
- asm volatile (
- \\ save %%sp, -192, %%sp
- \\ # Save the func pointer and the arg pointer
- \\ mov %%i0, %%g2
- \\ mov %%i3, %%g3
- \\ # Shuffle the arguments
- \\ mov 217, %%g1
- \\ mov %%i2, %%o0
- \\ # Add some extra space for the initial frame
- \\ sub %%i1, 176 + 2047, %%o1
- \\ mov %%i4, %%o2
- \\ mov %%i5, %%o3
- \\ ldx [%%fp + 0x8af], %%o4
- \\ t 0x6d
- \\ bcs,pn %%xcc, 2f
- \\ nop
- \\ # The child pid is returned in o0 while o1 tells if this
- \\ # process is # the child (=1) or the parent (=0).
- \\ brnz %%o1, 1f
- \\ nop
- \\ # Parent process, return the child pid
- \\ mov %%o0, %%i0
- \\ ret
- \\ restore
- \\1:
- \\ # Child process, call func(arg)
- \\ mov %%g0, %%fp
- \\ call %%g2
- \\ mov %%g3, %%o0
- \\ # Exit
- \\ mov 1, %%g1
- \\ t 0x6d
- \\2:
- \\ # The syscall failed
- \\ sub %%g0, %%o0, %%i0
- \\ ret
- \\ restore
- );
- },
- else => @compileError("Implement clone() for this arch."),
- }
-}
-
-const math = std.math;
-
-export fn fmodf(x: f32, y: f32) f32 {
- return generic_fmod(f32, x, y);
-}
-export fn fmod(x: f64, y: f64) f64 {
- return generic_fmod(f64, x, y);
-}
-
-// TODO add intrinsics for these (and probably the double version too)
-// and have the math stuff use the intrinsic. same as @mod and @rem
-export fn floorf(x: f32) f32 {
- return math.floor(x);
-}
-
-export fn ceilf(x: f32) f32 {
- return math.ceil(x);
-}
-
-export fn floor(x: f64) f64 {
- return math.floor(x);
-}
-
-export fn ceil(x: f64) f64 {
- return math.ceil(x);
-}
-
-export fn fma(a: f64, b: f64, c: f64) f64 {
- return math.fma(f64, a, b, c);
-}
-
-export fn fmaf(a: f32, b: f32, c: f32) f32 {
- return math.fma(f32, a, b, c);
-}
-
-export fn sin(a: f64) f64 {
- return math.sin(a);
-}
-
-export fn sinf(a: f32) f32 {
- return math.sin(a);
-}
-
-export fn cos(a: f64) f64 {
- return math.cos(a);
-}
-
-export fn cosf(a: f32) f32 {
- return math.cos(a);
-}
-
-export fn sincos(a: f64, r_sin: *f64, r_cos: *f64) void {
- r_sin.* = math.sin(a);
- r_cos.* = math.cos(a);
-}
-
-export fn sincosf(a: f32, r_sin: *f32, r_cos: *f32) void {
- r_sin.* = math.sin(a);
- r_cos.* = math.cos(a);
-}
-
-export fn exp(a: f64) f64 {
- return math.exp(a);
-}
-
-export fn expf(a: f32) f32 {
- return math.exp(a);
-}
-
-export fn exp2(a: f64) f64 {
- return math.exp2(a);
-}
-
-export fn exp2f(a: f32) f32 {
- return math.exp2(a);
-}
-
-export fn log(a: f64) f64 {
- return math.ln(a);
-}
-
-export fn logf(a: f32) f32 {
- return math.ln(a);
-}
-
-export fn log2(a: f64) f64 {
- return math.log2(a);
-}
-
-export fn log2f(a: f32) f32 {
- return math.log2(a);
-}
-
-export fn log10(a: f64) f64 {
- return math.log10(a);
-}
-
-export fn log10f(a: f32) f32 {
- return math.log10(a);
-}
-
-export fn fabs(a: f64) f64 {
- return math.fabs(a);
-}
-
-export fn fabsf(a: f32) f32 {
- return math.fabs(a);
-}
-
-export fn trunc(a: f64) f64 {
- return math.trunc(a);
-}
-
-export fn truncf(a: f32) f32 {
- return math.trunc(a);
-}
-
-export fn round(a: f64) f64 {
- return math.round(a);
-}
-
-export fn roundf(a: f32) f32 {
- return math.round(a);
-}
-
-fn generic_fmod(comptime T: type, x: T, y: T) T {
- @setRuntimeSafety(false);
-
- const bits = @typeInfo(T).Float.bits;
- const uint = std.meta.Int(.unsigned, bits);
- const log2uint = math.Log2Int(uint);
- const digits = if (T == f32) 23 else 52;
- const exp_bits = if (T == f32) 9 else 12;
- const bits_minus_1 = bits - 1;
- const mask = if (T == f32) 0xff else 0x7ff;
- var ux = @bitCast(uint, x);
- var uy = @bitCast(uint, y);
- var ex = @intCast(i32, (ux >> digits) & mask);
- var ey = @intCast(i32, (uy >> digits) & mask);
- const sx = if (T == f32) @intCast(u32, ux & 0x80000000) else @intCast(i32, ux >> bits_minus_1);
- var i: uint = undefined;
-
- if (uy << 1 == 0 or isNan(@bitCast(T, uy)) or ex == mask)
- return (x * y) / (x * y);
-
- if (ux << 1 <= uy << 1) {
- if (ux << 1 == uy << 1)
- return 0 * x;
- return x;
- }
-
- // normalize x and y
- if (ex == 0) {
- i = ux << exp_bits;
- while (i >> bits_minus_1 == 0) : ({
- ex -= 1;
- i <<= 1;
- }) {}
- ux <<= @intCast(log2uint, @bitCast(u32, -ex + 1));
- } else {
- ux &= maxInt(uint) >> exp_bits;
- ux |= 1 << digits;
- }
- if (ey == 0) {
- i = uy << exp_bits;
- while (i >> bits_minus_1 == 0) : ({
- ey -= 1;
- i <<= 1;
- }) {}
- uy <<= @intCast(log2uint, @bitCast(u32, -ey + 1));
- } else {
- uy &= maxInt(uint) >> exp_bits;
- uy |= 1 << digits;
- }
-
- // x mod y
- while (ex > ey) : (ex -= 1) {
- i = ux -% uy;
- if (i >> bits_minus_1 == 0) {
- if (i == 0)
- return 0 * x;
- ux = i;
- }
- ux <<= 1;
- }
- i = ux -% uy;
- if (i >> bits_minus_1 == 0) {
- if (i == 0)
- return 0 * x;
- ux = i;
- }
- while (ux >> digits == 0) : ({
- ux <<= 1;
- ex -= 1;
- }) {}
-
- // scale result up
- if (ex > 0) {
- ux -%= 1 << digits;
- ux |= @as(uint, @bitCast(u32, ex)) << digits;
- } else {
- ux >>= @intCast(log2uint, @bitCast(u32, -ex + 1));
- }
- if (T == f32) {
- ux |= sx;
- } else {
- ux |= @intCast(uint, sx) << bits_minus_1;
- }
- return @bitCast(T, ux);
-}
-
-test "fmod, fmodf" {
- inline for ([_]type{ f32, f64 }) |T| {
- const nan_val = math.nan(T);
- const inf_val = math.inf(T);
-
- try std.testing.expect(isNan(generic_fmod(T, nan_val, 1.0)));
- try std.testing.expect(isNan(generic_fmod(T, 1.0, nan_val)));
- try std.testing.expect(isNan(generic_fmod(T, inf_val, 1.0)));
- try std.testing.expect(isNan(generic_fmod(T, 0.0, 0.0)));
- try std.testing.expect(isNan(generic_fmod(T, 1.0, 0.0)));
-
- try std.testing.expectEqual(@as(T, 0.0), generic_fmod(T, 0.0, 2.0));
- try std.testing.expectEqual(@as(T, -0.0), generic_fmod(T, -0.0, 2.0));
-
- try std.testing.expectEqual(@as(T, -2.0), generic_fmod(T, -32.0, 10.0));
- try std.testing.expectEqual(@as(T, -2.0), generic_fmod(T, -32.0, -10.0));
- try std.testing.expectEqual(@as(T, 2.0), generic_fmod(T, 32.0, 10.0));
- try std.testing.expectEqual(@as(T, 2.0), generic_fmod(T, 32.0, -10.0));
- }
-}
-
-fn generic_fmin(comptime T: type, x: T, y: T) T {
- if (isNan(x))
- return y;
- if (isNan(y))
- return x;
- return if (x < y) x else y;
-}
-
-export fn fminf(x: f32, y: f32) callconv(.C) f32 {
- return generic_fmin(f32, x, y);
-}
-
-export fn fmin(x: f64, y: f64) callconv(.C) f64 {
- return generic_fmin(f64, x, y);
-}
-
-test "fmin, fminf" {
- inline for ([_]type{ f32, f64 }) |T| {
- const nan_val = math.nan(T);
-
- try std.testing.expect(isNan(generic_fmin(T, nan_val, nan_val)));
- try std.testing.expectEqual(@as(T, 1.0), generic_fmin(T, nan_val, 1.0));
- try std.testing.expectEqual(@as(T, 1.0), generic_fmin(T, 1.0, nan_val));
-
- try std.testing.expectEqual(@as(T, 1.0), generic_fmin(T, 1.0, 10.0));
- try std.testing.expectEqual(@as(T, -1.0), generic_fmin(T, 1.0, -1.0));
- }
-}
-
-fn generic_fmax(comptime T: type, x: T, y: T) T {
- if (isNan(x))
- return y;
- if (isNan(y))
- return x;
- return if (x < y) y else x;
-}
-
-export fn fmaxf(x: f32, y: f32) callconv(.C) f32 {
- return generic_fmax(f32, x, y);
-}
-
-export fn fmax(x: f64, y: f64) callconv(.C) f64 {
- return generic_fmax(f64, x, y);
-}
-
-test "fmax, fmaxf" {
- inline for ([_]type{ f32, f64 }) |T| {
- const nan_val = math.nan(T);
-
- try std.testing.expect(isNan(generic_fmax(T, nan_val, nan_val)));
- try std.testing.expectEqual(@as(T, 1.0), generic_fmax(T, nan_val, 1.0));
- try std.testing.expectEqual(@as(T, 1.0), generic_fmax(T, 1.0, nan_val));
-
- try std.testing.expectEqual(@as(T, 10.0), generic_fmax(T, 1.0, 10.0));
- try std.testing.expectEqual(@as(T, 1.0), generic_fmax(T, 1.0, -1.0));
- }
-}
-
-// NOTE: The original code is full of implicit signed -> unsigned assumptions and u32 wraparound
-// behaviour. Most intermediate i32 values are changed to u32 where appropriate but there are
-// potentially some edge cases remaining that are not handled in the same way.
-export fn sqrt(x: f64) f64 {
- const tiny: f64 = 1.0e-300;
- const sign: u32 = 0x80000000;
- const u = @bitCast(u64, x);
-
- var ix0 = @intCast(u32, u >> 32);
- var ix1 = @intCast(u32, u & 0xFFFFFFFF);
-
- // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = nan
- if (ix0 & 0x7FF00000 == 0x7FF00000) {
- return x * x + x;
- }
-
- // sqrt(+-0) = +-0
- if (x == 0.0) {
- return x;
- }
- // sqrt(-ve) = snan
- if (ix0 & sign != 0) {
- return math.snan(f64);
- }
-
- // normalize x
- var m = @intCast(i32, ix0 >> 20);
- if (m == 0) {
- // subnormal
- while (ix0 == 0) {
- m -= 21;
- ix0 |= ix1 >> 11;
- ix1 <<= 21;
- }
-
- // subnormal
- var i: u32 = 0;
- while (ix0 & 0x00100000 == 0) : (i += 1) {
- ix0 <<= 1;
- }
- m -= @intCast(i32, i) - 1;
- ix0 |= ix1 >> @intCast(u5, 32 - i);
- ix1 <<= @intCast(u5, i);
- }
-
- // unbias exponent
- m -= 1023;
- ix0 = (ix0 & 0x000FFFFF) | 0x00100000;
- if (m & 1 != 0) {
- ix0 += ix0 + (ix1 >> 31);
- ix1 = ix1 +% ix1;
- }
- m >>= 1;
-
- // sqrt(x) bit by bit
- ix0 += ix0 + (ix1 >> 31);
- ix1 = ix1 +% ix1;
-
- var q: u32 = 0;
- var q1: u32 = 0;
- var s0: u32 = 0;
- var s1: u32 = 0;
- var r: u32 = 0x00200000;
- var t: u32 = undefined;
- var t1: u32 = undefined;
-
- while (r != 0) {
- t = s0 +% r;
- if (t <= ix0) {
- s0 = t + r;
- ix0 -= t;
- q += r;
- }
- ix0 = ix0 +% ix0 +% (ix1 >> 31);
- ix1 = ix1 +% ix1;
- r >>= 1;
- }
-
- r = sign;
- while (r != 0) {
- t1 = s1 +% r;
- t = s0;
- if (t < ix0 or (t == ix0 and t1 <= ix1)) {
- s1 = t1 +% r;
- if (t1 & sign == sign and s1 & sign == 0) {
- s0 += 1;
- }
- ix0 -= t;
- if (ix1 < t1) {
- ix0 -= 1;
- }
- ix1 = ix1 -% t1;
- q1 += r;
- }
- ix0 = ix0 +% ix0 +% (ix1 >> 31);
- ix1 = ix1 +% ix1;
- r >>= 1;
- }
-
- // rounding direction
- if (ix0 | ix1 != 0) {
- var z = 1.0 - tiny; // raise inexact
- if (z >= 1.0) {
- z = 1.0 + tiny;
- if (q1 == 0xFFFFFFFF) {
- q1 = 0;
- q += 1;
- } else if (z > 1.0) {
- if (q1 == 0xFFFFFFFE) {
- q += 1;
- }
- q1 += 2;
- } else {
- q1 += q1 & 1;
- }
- }
- }
-
- ix0 = (q >> 1) + 0x3FE00000;
- ix1 = q1 >> 1;
- if (q & 1 != 0) {
- ix1 |= 0x80000000;
- }
-
- // NOTE: musl here appears to rely on signed twos-complement wraparound. +% has the same
- // behaviour at least.
- var iix0 = @intCast(i32, ix0);
- iix0 = iix0 +% (m << 20);
-
- const uz = (@intCast(u64, iix0) << 32) | ix1;
- return @bitCast(f64, uz);
-}
-
-test "sqrt" {
- const V = [_]f64{
- 0.0,
- 4.089288054930154,
- 7.538757127071935,
- 8.97780793672623,
- 5.304443821913729,
- 5.682408965311888,
- 0.5846878579110049,
- 3.650338664297043,
- 0.3178091951800732,
- 7.1505232436382835,
- 3.6589165881946464,
- };
-
- // Note that @sqrt will either generate the sqrt opcode (if supported by the
- // target ISA) or a call to `sqrtf` otherwise.
- for (V) |val|
- try std.testing.expectEqual(@sqrt(val), sqrt(val));
-}
-
-test "sqrt special" {
- try std.testing.expect(std.math.isPositiveInf(sqrt(std.math.inf(f64))));
- try std.testing.expect(sqrt(0.0) == 0.0);
- try std.testing.expect(sqrt(-0.0) == -0.0);
- try std.testing.expect(isNan(sqrt(-1.0)));
- try std.testing.expect(isNan(sqrt(std.math.nan(f64))));
-}
-
-export fn sqrtf(x: f32) f32 {
- const tiny: f32 = 1.0e-30;
- const sign: i32 = @bitCast(i32, @as(u32, 0x80000000));
- var ix: i32 = @bitCast(i32, x);
-
- if ((ix & 0x7F800000) == 0x7F800000) {
- return x * x + x; // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = snan
- }
-
- // zero
- if (ix <= 0) {
- if (ix & ~sign == 0) {
- return x; // sqrt (+-0) = +-0
- }
- if (ix < 0) {
- return math.snan(f32);
- }
- }
-
- // normalize
- var m = ix >> 23;
- if (m == 0) {
- // subnormal
- var i: i32 = 0;
- while (ix & 0x00800000 == 0) : (i += 1) {
- ix <<= 1;
- }
- m -= i - 1;
- }
-
- m -= 127; // unbias exponent
- ix = (ix & 0x007FFFFF) | 0x00800000;
-
- if (m & 1 != 0) { // odd m, double x to even
- ix += ix;
- }
-
- m >>= 1; // m = [m / 2]
-
- // sqrt(x) bit by bit
- ix += ix;
- var q: i32 = 0; // q = sqrt(x)
- var s: i32 = 0;
- var r: i32 = 0x01000000; // r = moving bit right -> left
-
- while (r != 0) {
- const t = s + r;
- if (t <= ix) {
- s = t + r;
- ix -= t;
- q += r;
- }
- ix += ix;
- r >>= 1;
- }
-
- // floating add to find rounding direction
- if (ix != 0) {
- var z = 1.0 - tiny; // inexact
- if (z >= 1.0) {
- z = 1.0 + tiny;
- if (z > 1.0) {
- q += 2;
- } else {
- if (q & 1 != 0) {
- q += 1;
- }
- }
- }
- }
-
- ix = (q >> 1) + 0x3f000000;
- ix += m << 23;
- return @bitCast(f32, ix);
-}
-
-test "sqrtf" {
- const V = [_]f32{
- 0.0,
- 4.089288054930154,
- 7.538757127071935,
- 8.97780793672623,
- 5.304443821913729,
- 5.682408965311888,
- 0.5846878579110049,
- 3.650338664297043,
- 0.3178091951800732,
- 7.1505232436382835,
- 3.6589165881946464,
- };
-
- // Note that @sqrt will either generate the sqrt opcode (if supported by the
- // target ISA) or a call to `sqrtf` otherwise.
- for (V) |val|
- try std.testing.expectEqual(@sqrt(val), sqrtf(val));
-}
-
-test "sqrtf special" {
- try std.testing.expect(std.math.isPositiveInf(sqrtf(std.math.inf(f32))));
- try std.testing.expect(sqrtf(0.0) == 0.0);
- try std.testing.expect(sqrtf(-0.0) == -0.0);
- try std.testing.expect(isNan(sqrtf(-1.0)));
- try std.testing.expect(isNan(sqrtf(std.math.nan(f32))));
+ return dest;
}
diff --git a/lib/std/special/c_stage1.zig b/lib/std/special/c_stage1.zig
new file mode 100644
index 0000000000..7ea2b95704
--- /dev/null
+++ b/lib/std/special/c_stage1.zig
@@ -0,0 +1,1187 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const maxInt = std.math.maxInt;
+const isNan = std.math.isNan;
+const native_arch = builtin.cpu.arch;
+const native_abi = builtin.abi;
+const native_os = builtin.os.tag;
+
+const is_wasm = switch (native_arch) {
+ .wasm32, .wasm64 => true,
+ else => false,
+};
+const is_msvc = switch (native_abi) {
+ .msvc => true,
+ else => false,
+};
+const is_freestanding = switch (native_os) {
+ .freestanding => true,
+ else => false,
+};
+comptime {
+ if (is_freestanding and is_wasm and builtin.link_libc) {
+ @export(wasm_start, .{ .name = "_start", .linkage = .Strong });
+ }
+ if (builtin.link_libc) {
+ @export(strcmp, .{ .name = "strcmp", .linkage = .Strong });
+ @export(strncmp, .{ .name = "strncmp", .linkage = .Strong });
+ @export(strerror, .{ .name = "strerror", .linkage = .Strong });
+ @export(strlen, .{ .name = "strlen", .linkage = .Strong });
+ @export(strcpy, .{ .name = "strcpy", .linkage = .Strong });
+ @export(strncpy, .{ .name = "strncpy", .linkage = .Strong });
+ @export(strcat, .{ .name = "strcat", .linkage = .Strong });
+ @export(strncat, .{ .name = "strncat", .linkage = .Strong });
+ } else if (is_msvc) {
+ @export(_fltused, .{ .name = "_fltused", .linkage = .Strong });
+ }
+}
+
+var _fltused: c_int = 1;
+
+extern fn main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
+fn wasm_start() callconv(.C) void {
+ _ = main(0, undefined);
+}
+
+fn strcpy(dest: [*:0]u8, src: [*:0]const u8) callconv(.C) [*:0]u8 {
+ var i: usize = 0;
+ while (src[i] != 0) : (i += 1) {
+ dest[i] = src[i];
+ }
+ dest[i] = 0;
+
+ return dest;
+}
+
+test "strcpy" {
+ var s1: [9:0]u8 = undefined;
+
+ s1[0] = 0;
+ _ = strcpy(&s1, "foobarbaz");
+ try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.spanZ(&s1));
+}
+
+fn strncpy(dest: [*:0]u8, src: [*:0]const u8, n: usize) callconv(.C) [*:0]u8 {
+ var i: usize = 0;
+ while (i < n and src[i] != 0) : (i += 1) {
+ dest[i] = src[i];
+ }
+ while (i < n) : (i += 1) {
+ dest[i] = 0;
+ }
+
+ return dest;
+}
+
+test "strncpy" {
+ var s1: [9:0]u8 = undefined;
+
+ s1[0] = 0;
+ _ = strncpy(&s1, "foobarbaz", @sizeOf(@TypeOf(s1)));
+ try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.spanZ(&s1));
+}
+
+fn strcat(dest: [*:0]u8, src: [*:0]const u8) callconv(.C) [*:0]u8 {
+ var dest_end: usize = 0;
+ while (dest[dest_end] != 0) : (dest_end += 1) {}
+
+ var i: usize = 0;
+ while (src[i] != 0) : (i += 1) {
+ dest[dest_end + i] = src[i];
+ }
+ dest[dest_end + i] = 0;
+
+ return dest;
+}
+
+test "strcat" {
+ var s1: [9:0]u8 = undefined;
+
+ s1[0] = 0;
+ _ = strcat(&s1, "foo");
+ _ = strcat(&s1, "bar");
+ _ = strcat(&s1, "baz");
+ try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.spanZ(&s1));
+}
+
+fn strncat(dest: [*:0]u8, src: [*:0]const u8, avail: usize) callconv(.C) [*:0]u8 {
+ var dest_end: usize = 0;
+ while (dest[dest_end] != 0) : (dest_end += 1) {}
+
+ var i: usize = 0;
+ while (i < avail and src[i] != 0) : (i += 1) {
+ dest[dest_end + i] = src[i];
+ }
+ dest[dest_end + i] = 0;
+
+ return dest;
+}
+
+test "strncat" {
+ var s1: [9:0]u8 = undefined;
+
+ s1[0] = 0;
+ _ = strncat(&s1, "foo1111", 3);
+ _ = strncat(&s1, "bar1111", 3);
+ _ = strncat(&s1, "baz1111", 3);
+ try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.spanZ(&s1));
+}
+
+fn strcmp(s1: [*:0]const u8, s2: [*:0]const u8) callconv(.C) c_int {
+ return std.cstr.cmp(s1, s2);
+}
+
+fn strlen(s: [*:0]const u8) callconv(.C) usize {
+ return std.mem.len(s);
+}
+
+fn strncmp(_l: [*:0]const u8, _r: [*:0]const u8, _n: usize) callconv(.C) c_int {
+ if (_n == 0) return 0;
+ var l = _l;
+ var r = _r;
+ var n = _n - 1;
+ while (l[0] != 0 and r[0] != 0 and n != 0 and l[0] == r[0]) {
+ l += 1;
+ r += 1;
+ n -= 1;
+ }
+ return @as(c_int, l[0]) - @as(c_int, r[0]);
+}
+
+fn strerror(errnum: c_int) callconv(.C) [*:0]const u8 {
+ _ = errnum;
+ return "TODO strerror implementation";
+}
+
+test "strncmp" {
+ try std.testing.expect(strncmp("a", "b", 1) == -1);
+ try std.testing.expect(strncmp("a", "c", 1) == -2);
+ try std.testing.expect(strncmp("b", "a", 1) == 1);
+ try std.testing.expect(strncmp("\xff", "\x02", 1) == 253);
+}
+
+export fn memset(dest: ?[*]u8, c: u8, n: usize) callconv(.C) ?[*]u8 {
+ @setRuntimeSafety(false);
+
+ var index: usize = 0;
+ while (index != n) : (index += 1)
+ dest.?[index] = c;
+
+ return dest;
+}
+
+export fn __memset(dest: ?[*]u8, c: u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 {
+ if (dest_n < n)
+ @panic("buffer overflow");
+ return memset(dest, c, n);
+}
+
+export fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8 {
+ @setRuntimeSafety(false);
+
+ var index: usize = 0;
+ while (index != n) : (index += 1)
+ dest.?[index] = src.?[index];
+
+ return dest;
+}
+
+export fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8 {
+ @setRuntimeSafety(false);
+
+ if (@ptrToInt(dest) < @ptrToInt(src)) {
+ var index: usize = 0;
+ while (index != n) : (index += 1) {
+ dest.?[index] = src.?[index];
+ }
+ } else {
+ var index = n;
+ while (index != 0) {
+ index -= 1;
+ dest.?[index] = src.?[index];
+ }
+ }
+
+ return dest;
+}
+
+export fn memcmp(vl: ?[*]const u8, vr: ?[*]const u8, n: usize) callconv(.C) c_int {
+ @setRuntimeSafety(false);
+
+ var index: usize = 0;
+ while (index != n) : (index += 1) {
+ const compare_val = @bitCast(i8, vl.?[index] -% vr.?[index]);
+ if (compare_val != 0) {
+ return compare_val;
+ }
+ }
+
+ return 0;
+}
+
+test "memcmp" {
+ const base_arr = &[_]u8{ 1, 1, 1 };
+ const arr1 = &[_]u8{ 1, 1, 1 };
+ const arr2 = &[_]u8{ 1, 0, 1 };
+ const arr3 = &[_]u8{ 1, 2, 1 };
+
+ try std.testing.expect(memcmp(base_arr[0..], arr1[0..], base_arr.len) == 0);
+ try std.testing.expect(memcmp(base_arr[0..], arr2[0..], base_arr.len) > 0);
+ try std.testing.expect(memcmp(base_arr[0..], arr3[0..], base_arr.len) < 0);
+}
+
+export fn bcmp(vl: [*]allowzero const u8, vr: [*]allowzero const u8, n: usize) callconv(.C) c_int {
+ @setRuntimeSafety(false);
+
+ var index: usize = 0;
+ while (index != n) : (index += 1) {
+ if (vl[index] != vr[index]) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+test "bcmp" {
+ const base_arr = &[_]u8{ 1, 1, 1 };
+ const arr1 = &[_]u8{ 1, 1, 1 };
+ const arr2 = &[_]u8{ 1, 0, 1 };
+ const arr3 = &[_]u8{ 1, 2, 1 };
+
+ try std.testing.expect(bcmp(base_arr[0..], arr1[0..], base_arr.len) == 0);
+ try std.testing.expect(bcmp(base_arr[0..], arr2[0..], base_arr.len) != 0);
+ try std.testing.expect(bcmp(base_arr[0..], arr3[0..], base_arr.len) != 0);
+}
+
+comptime {
+ if (native_os == .linux) {
+ @export(clone, .{ .name = "clone" });
+ }
+}
+
+// TODO we should be able to put this directly in std/linux/x86_64.zig but
+// it causes a segfault in release mode. this is a workaround of calling it
+// across .o file boundaries. fix comptime @ptrCast of nakedcc functions.
+fn clone() callconv(.Naked) void {
+ switch (native_arch) {
+ .i386 => {
+ // __clone(func, stack, flags, arg, ptid, tls, ctid)
+ // +8, +12, +16, +20, +24, +28, +32
+ // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
+ // eax, ebx, ecx, edx, esi, edi
+ asm volatile (
+ \\ push %%ebp
+ \\ mov %%esp,%%ebp
+ \\ push %%ebx
+ \\ push %%esi
+ \\ push %%edi
+ \\ // Setup the arguments
+ \\ mov 16(%%ebp),%%ebx
+ \\ mov 12(%%ebp),%%ecx
+ \\ and $-16,%%ecx
+ \\ sub $20,%%ecx
+ \\ mov 20(%%ebp),%%eax
+ \\ mov %%eax,4(%%ecx)
+ \\ mov 8(%%ebp),%%eax
+ \\ mov %%eax,0(%%ecx)
+ \\ mov 24(%%ebp),%%edx
+ \\ mov 28(%%ebp),%%esi
+ \\ mov 32(%%ebp),%%edi
+ \\ mov $120,%%eax
+ \\ int $128
+ \\ test %%eax,%%eax
+ \\ jnz 1f
+ \\ pop %%eax
+ \\ xor %%ebp,%%ebp
+ \\ call *%%eax
+ \\ mov %%eax,%%ebx
+ \\ xor %%eax,%%eax
+ \\ inc %%eax
+ \\ int $128
+ \\ hlt
+ \\1:
+ \\ pop %%edi
+ \\ pop %%esi
+ \\ pop %%ebx
+ \\ pop %%ebp
+ \\ ret
+ );
+ },
+ .x86_64 => {
+ asm volatile (
+ \\ xor %%eax,%%eax
+ \\ mov $56,%%al // SYS_clone
+ \\ mov %%rdi,%%r11
+ \\ mov %%rdx,%%rdi
+ \\ mov %%r8,%%rdx
+ \\ mov %%r9,%%r8
+ \\ mov 8(%%rsp),%%r10
+ \\ mov %%r11,%%r9
+ \\ and $-16,%%rsi
+ \\ sub $8,%%rsi
+ \\ mov %%rcx,(%%rsi)
+ \\ syscall
+ \\ test %%eax,%%eax
+ \\ jnz 1f
+ \\ xor %%ebp,%%ebp
+ \\ pop %%rdi
+ \\ call *%%r9
+ \\ mov %%eax,%%edi
+ \\ xor %%eax,%%eax
+ \\ mov $60,%%al // SYS_exit
+ \\ syscall
+ \\ hlt
+ \\1: ret
+ \\
+ );
+ },
+ .aarch64 => {
+ // __clone(func, stack, flags, arg, ptid, tls, ctid)
+ // x0, x1, w2, x3, x4, x5, x6
+
+ // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
+ // x8, x0, x1, x2, x3, x4
+ asm volatile (
+ \\ // align stack and save func,arg
+ \\ and x1,x1,#-16
+ \\ stp x0,x3,[x1,#-16]!
+ \\
+ \\ // syscall
+ \\ uxtw x0,w2
+ \\ mov x2,x4
+ \\ mov x3,x5
+ \\ mov x4,x6
+ \\ mov x8,#220 // SYS_clone
+ \\ svc #0
+ \\
+ \\ cbz x0,1f
+ \\ // parent
+ \\ ret
+ \\ // child
+ \\1: ldp x1,x0,[sp],#16
+ \\ blr x1
+ \\ mov x8,#93 // SYS_exit
+ \\ svc #0
+ );
+ },
+ .arm, .thumb => {
+ // __clone(func, stack, flags, arg, ptid, tls, ctid)
+ // r0, r1, r2, r3, +0, +4, +8
+
+ // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
+ // r7 r0, r1, r2, r3, r4
+ asm volatile (
+ \\ stmfd sp!,{r4,r5,r6,r7}
+ \\ mov r7,#120
+ \\ mov r6,r3
+ \\ mov r5,r0
+ \\ mov r0,r2
+ \\ and r1,r1,#-16
+ \\ ldr r2,[sp,#16]
+ \\ ldr r3,[sp,#20]
+ \\ ldr r4,[sp,#24]
+ \\ svc 0
+ \\ tst r0,r0
+ \\ beq 1f
+ \\ ldmfd sp!,{r4,r5,r6,r7}
+ \\ bx lr
+ \\
+ \\1: mov r0,r6
+ \\ bl 3f
+ \\2: mov r7,#1
+ \\ svc 0
+ \\ b 2b
+ \\3: bx r5
+ );
+ },
+ .riscv64 => {
+ // __clone(func, stack, flags, arg, ptid, tls, ctid)
+ // a0, a1, a2, a3, a4, a5, a6
+
+ // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
+ // a7 a0, a1, a2, a3, a4
+ asm volatile (
+ \\ # Save func and arg to stack
+ \\ addi a1, a1, -16
+ \\ sd a0, 0(a1)
+ \\ sd a3, 8(a1)
+ \\
+ \\ # Call SYS_clone
+ \\ mv a0, a2
+ \\ mv a2, a4
+ \\ mv a3, a5
+ \\ mv a4, a6
+ \\ li a7, 220 # SYS_clone
+ \\ ecall
+ \\
+ \\ beqz a0, 1f
+ \\ # Parent
+ \\ ret
+ \\
+ \\ # Child
+ \\1: ld a1, 0(sp)
+ \\ ld a0, 8(sp)
+ \\ jalr a1
+ \\
+ \\ # Exit
+ \\ li a7, 93 # SYS_exit
+ \\ ecall
+ );
+ },
+ .mips, .mipsel => {
+ // __clone(func, stack, flags, arg, ptid, tls, ctid)
+ // 3, 4, 5, 6, 7, 8, 9
+
+ // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
+ // 2 4, 5, 6, 7, 8
+ asm volatile (
+ \\ # Save function pointer and argument pointer on new thread stack
+ \\ and $5, $5, -8
+ \\ subu $5, $5, 16
+ \\ sw $4, 0($5)
+ \\ sw $7, 4($5)
+ \\ # Shuffle (fn,sp,fl,arg,ptid,tls,ctid) to (fl,sp,ptid,tls,ctid)
+ \\ move $4, $6
+ \\ lw $6, 16($sp)
+ \\ lw $7, 20($sp)
+ \\ lw $9, 24($sp)
+ \\ subu $sp, $sp, 16
+ \\ sw $9, 16($sp)
+ \\ li $2, 4120
+ \\ syscall
+ \\ beq $7, $0, 1f
+ \\ nop
+ \\ addu $sp, $sp, 16
+ \\ jr $ra
+ \\ subu $2, $0, $2
+ \\1:
+ \\ beq $2, $0, 1f
+ \\ nop
+ \\ addu $sp, $sp, 16
+ \\ jr $ra
+ \\ nop
+ \\1:
+ \\ lw $25, 0($sp)
+ \\ lw $4, 4($sp)
+ \\ jalr $25
+ \\ nop
+ \\ move $4, $2
+ \\ li $2, 4001
+ \\ syscall
+ );
+ },
+ .powerpc => {
+ // __clone(func, stack, flags, arg, ptid, tls, ctid)
+ // 3, 4, 5, 6, 7, 8, 9
+
+ // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
+ // 0 3, 4, 5, 6, 7
+ asm volatile (
+ \\# store non-volatile regs r30, r31 on stack in order to put our
+ \\# start func and its arg there
+ \\stwu 30, -16(1)
+ \\stw 31, 4(1)
+ \\
+ \\# save r3 (func) into r30, and r6(arg) into r31
+ \\mr 30, 3
+ \\mr 31, 6
+ \\
+ \\# create initial stack frame for new thread
+ \\clrrwi 4, 4, 4
+ \\li 0, 0
+ \\stwu 0, -16(4)
+ \\
+ \\#move c into first arg
+ \\mr 3, 5
+ \\#mr 4, 4
+ \\mr 5, 7
+ \\mr 6, 8
+ \\mr 7, 9
+ \\
+ \\# move syscall number into r0
+ \\li 0, 120
+ \\
+ \\sc
+ \\
+ \\# check for syscall error
+ \\bns+ 1f # jump to label 1 if no summary overflow.
+ \\#else
+ \\neg 3, 3 #negate the result (errno)
+ \\1:
+ \\# compare sc result with 0
+ \\cmpwi cr7, 3, 0
+ \\
+ \\# if not 0, jump to end
+ \\bne cr7, 2f
+ \\
+ \\#else: we're the child
+ \\#call funcptr: move arg (d) into r3
+ \\mr 3, 31
+ \\#move r30 (funcptr) into CTR reg
+ \\mtctr 30
+ \\# call CTR reg
+ \\bctrl
+ \\# mov SYS_exit into r0 (the exit param is already in r3)
+ \\li 0, 1
+ \\sc
+ \\
+ \\2:
+ \\
+ \\# restore stack
+ \\lwz 30, 0(1)
+ \\lwz 31, 4(1)
+ \\addi 1, 1, 16
+ \\
+ \\blr
+ );
+ },
+ .powerpc64, .powerpc64le => {
+ // __clone(func, stack, flags, arg, ptid, tls, ctid)
+ // 3, 4, 5, 6, 7, 8, 9
+
+ // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
+ // 0 3, 4, 5, 6, 7
+ asm volatile (
+ \\ # create initial stack frame for new thread
+ \\ clrrdi 4, 4, 4
+ \\ li 0, 0
+ \\ stdu 0,-32(4)
+ \\
+ \\ # save fn and arg to child stack
+ \\ std 3, 8(4)
+ \\ std 6, 16(4)
+ \\
+ \\ # shuffle args into correct registers and call SYS_clone
+ \\ mr 3, 5
+ \\ #mr 4, 4
+ \\ mr 5, 7
+ \\ mr 6, 8
+ \\ mr 7, 9
+ \\ li 0, 120 # SYS_clone = 120
+ \\ sc
+ \\
+ \\ # if error, negate return (errno)
+ \\ bns+ 1f
+ \\ neg 3, 3
+ \\
+ \\1:
+ \\ # if we're the parent, return
+ \\ cmpwi cr7, 3, 0
+ \\ bnelr cr7
+ \\
+ \\ # we're the child. call fn(arg)
+ \\ ld 3, 16(1)
+ \\ ld 12, 8(1)
+ \\ mtctr 12
+ \\ bctrl
+ \\
+ \\ # call SYS_exit. exit code is already in r3 from fn return value
+ \\ li 0, 1 # SYS_exit = 1
+ \\ sc
+ );
+ },
+ .sparcv9 => {
+ // __clone(func, stack, flags, arg, ptid, tls, ctid)
+ // i0, i1, i2, i3, i4, i5, sp
+ // syscall(SYS_clone, flags, stack, ptid, tls, ctid)
+ // g1 o0, o1, o2, o3, o4
+ asm volatile (
+ \\ save %%sp, -192, %%sp
+ \\ # Save the func pointer and the arg pointer
+ \\ mov %%i0, %%g2
+ \\ mov %%i3, %%g3
+ \\ # Shuffle the arguments
+ \\ mov 217, %%g1
+ \\ mov %%i2, %%o0
+ \\ # Add some extra space for the initial frame
+ \\ sub %%i1, 176 + 2047, %%o1
+ \\ mov %%i4, %%o2
+ \\ mov %%i5, %%o3
+ \\ ldx [%%fp + 0x8af], %%o4
+ \\ t 0x6d
+ \\ bcs,pn %%xcc, 2f
+ \\ nop
+ \\ # The child pid is returned in o0 while o1 tells if this
+ \\ # process is # the child (=1) or the parent (=0).
+ \\ brnz %%o1, 1f
+ \\ nop
+ \\ # Parent process, return the child pid
+ \\ mov %%o0, %%i0
+ \\ ret
+ \\ restore
+ \\1:
+ \\ # Child process, call func(arg)
+ \\ mov %%g0, %%fp
+ \\ call %%g2
+ \\ mov %%g3, %%o0
+ \\ # Exit
+ \\ mov 1, %%g1
+ \\ t 0x6d
+ \\2:
+ \\ # The syscall failed
+ \\ sub %%g0, %%o0, %%i0
+ \\ ret
+ \\ restore
+ );
+ },
+ else => @compileError("Implement clone() for this arch."),
+ }
+}
+
+const math = std.math;
+
+export fn fmodf(x: f32, y: f32) f32 {
+ return generic_fmod(f32, x, y);
+}
+export fn fmod(x: f64, y: f64) f64 {
+ return generic_fmod(f64, x, y);
+}
+
+// TODO add intrinsics for these (and probably the double version too)
+// and have the math stuff use the intrinsic. same as @mod and @rem
+export fn floorf(x: f32) f32 {
+ return math.floor(x);
+}
+
+export fn ceilf(x: f32) f32 {
+ return math.ceil(x);
+}
+
+export fn floor(x: f64) f64 {
+ return math.floor(x);
+}
+
+export fn ceil(x: f64) f64 {
+ return math.ceil(x);
+}
+
+export fn fma(a: f64, b: f64, c: f64) f64 {
+ return math.fma(f64, a, b, c);
+}
+
+export fn fmaf(a: f32, b: f32, c: f32) f32 {
+ return math.fma(f32, a, b, c);
+}
+
+export fn sin(a: f64) f64 {
+ return math.sin(a);
+}
+
+export fn sinf(a: f32) f32 {
+ return math.sin(a);
+}
+
+export fn cos(a: f64) f64 {
+ return math.cos(a);
+}
+
+export fn cosf(a: f32) f32 {
+ return math.cos(a);
+}
+
+export fn sincos(a: f64, r_sin: *f64, r_cos: *f64) void {
+ r_sin.* = math.sin(a);
+ r_cos.* = math.cos(a);
+}
+
+export fn sincosf(a: f32, r_sin: *f32, r_cos: *f32) void {
+ r_sin.* = math.sin(a);
+ r_cos.* = math.cos(a);
+}
+
+export fn exp(a: f64) f64 {
+ return math.exp(a);
+}
+
+export fn expf(a: f32) f32 {
+ return math.exp(a);
+}
+
+export fn exp2(a: f64) f64 {
+ return math.exp2(a);
+}
+
+export fn exp2f(a: f32) f32 {
+ return math.exp2(a);
+}
+
+export fn log(a: f64) f64 {
+ return math.ln(a);
+}
+
+export fn logf(a: f32) f32 {
+ return math.ln(a);
+}
+
+export fn log2(a: f64) f64 {
+ return math.log2(a);
+}
+
+export fn log2f(a: f32) f32 {
+ return math.log2(a);
+}
+
+export fn log10(a: f64) f64 {
+ return math.log10(a);
+}
+
+export fn log10f(a: f32) f32 {
+ return math.log10(a);
+}
+
+export fn fabs(a: f64) f64 {
+ return math.fabs(a);
+}
+
+export fn fabsf(a: f32) f32 {
+ return math.fabs(a);
+}
+
+export fn trunc(a: f64) f64 {
+ return math.trunc(a);
+}
+
+export fn truncf(a: f32) f32 {
+ return math.trunc(a);
+}
+
+export fn round(a: f64) f64 {
+ return math.round(a);
+}
+
+export fn roundf(a: f32) f32 {
+ return math.round(a);
+}
+
+fn generic_fmod(comptime T: type, x: T, y: T) T {
+ @setRuntimeSafety(false);
+
+ const bits = @typeInfo(T).Float.bits;
+ const uint = std.meta.Int(.unsigned, bits);
+ const log2uint = math.Log2Int(uint);
+ const digits = if (T == f32) 23 else 52;
+ const exp_bits = if (T == f32) 9 else 12;
+ const bits_minus_1 = bits - 1;
+ const mask = if (T == f32) 0xff else 0x7ff;
+ var ux = @bitCast(uint, x);
+ var uy = @bitCast(uint, y);
+ var ex = @intCast(i32, (ux >> digits) & mask);
+ var ey = @intCast(i32, (uy >> digits) & mask);
+ const sx = if (T == f32) @intCast(u32, ux & 0x80000000) else @intCast(i32, ux >> bits_minus_1);
+ var i: uint = undefined;
+
+ if (uy << 1 == 0 or isNan(@bitCast(T, uy)) or ex == mask)
+ return (x * y) / (x * y);
+
+ if (ux << 1 <= uy << 1) {
+ if (ux << 1 == uy << 1)
+ return 0 * x;
+ return x;
+ }
+
+ // normalize x and y
+ if (ex == 0) {
+ i = ux << exp_bits;
+ while (i >> bits_minus_1 == 0) : ({
+ ex -= 1;
+ i <<= 1;
+ }) {}
+ ux <<= @intCast(log2uint, @bitCast(u32, -ex + 1));
+ } else {
+ ux &= maxInt(uint) >> exp_bits;
+ ux |= 1 << digits;
+ }
+ if (ey == 0) {
+ i = uy << exp_bits;
+ while (i >> bits_minus_1 == 0) : ({
+ ey -= 1;
+ i <<= 1;
+ }) {}
+ uy <<= @intCast(log2uint, @bitCast(u32, -ey + 1));
+ } else {
+ uy &= maxInt(uint) >> exp_bits;
+ uy |= 1 << digits;
+ }
+
+ // x mod y
+ while (ex > ey) : (ex -= 1) {
+ i = ux -% uy;
+ if (i >> bits_minus_1 == 0) {
+ if (i == 0)
+ return 0 * x;
+ ux = i;
+ }
+ ux <<= 1;
+ }
+ i = ux -% uy;
+ if (i >> bits_minus_1 == 0) {
+ if (i == 0)
+ return 0 * x;
+ ux = i;
+ }
+ while (ux >> digits == 0) : ({
+ ux <<= 1;
+ ex -= 1;
+ }) {}
+
+ // scale result up
+ if (ex > 0) {
+ ux -%= 1 << digits;
+ ux |= @as(uint, @bitCast(u32, ex)) << digits;
+ } else {
+ ux >>= @intCast(log2uint, @bitCast(u32, -ex + 1));
+ }
+ if (T == f32) {
+ ux |= sx;
+ } else {
+ ux |= @intCast(uint, sx) << bits_minus_1;
+ }
+ return @bitCast(T, ux);
+}
+
+test "fmod, fmodf" {
+ inline for ([_]type{ f32, f64 }) |T| {
+ const nan_val = math.nan(T);
+ const inf_val = math.inf(T);
+
+ try std.testing.expect(isNan(generic_fmod(T, nan_val, 1.0)));
+ try std.testing.expect(isNan(generic_fmod(T, 1.0, nan_val)));
+ try std.testing.expect(isNan(generic_fmod(T, inf_val, 1.0)));
+ try std.testing.expect(isNan(generic_fmod(T, 0.0, 0.0)));
+ try std.testing.expect(isNan(generic_fmod(T, 1.0, 0.0)));
+
+ try std.testing.expectEqual(@as(T, 0.0), generic_fmod(T, 0.0, 2.0));
+ try std.testing.expectEqual(@as(T, -0.0), generic_fmod(T, -0.0, 2.0));
+
+ try std.testing.expectEqual(@as(T, -2.0), generic_fmod(T, -32.0, 10.0));
+ try std.testing.expectEqual(@as(T, -2.0), generic_fmod(T, -32.0, -10.0));
+ try std.testing.expectEqual(@as(T, 2.0), generic_fmod(T, 32.0, 10.0));
+ try std.testing.expectEqual(@as(T, 2.0), generic_fmod(T, 32.0, -10.0));
+ }
+}
+
+fn generic_fmin(comptime T: type, x: T, y: T) T {
+ if (isNan(x))
+ return y;
+ if (isNan(y))
+ return x;
+ return if (x < y) x else y;
+}
+
+export fn fminf(x: f32, y: f32) callconv(.C) f32 {
+ return generic_fmin(f32, x, y);
+}
+
+export fn fmin(x: f64, y: f64) callconv(.C) f64 {
+ return generic_fmin(f64, x, y);
+}
+
+test "fmin, fminf" {
+ inline for ([_]type{ f32, f64 }) |T| {
+ const nan_val = math.nan(T);
+
+ try std.testing.expect(isNan(generic_fmin(T, nan_val, nan_val)));
+ try std.testing.expectEqual(@as(T, 1.0), generic_fmin(T, nan_val, 1.0));
+ try std.testing.expectEqual(@as(T, 1.0), generic_fmin(T, 1.0, nan_val));
+
+ try std.testing.expectEqual(@as(T, 1.0), generic_fmin(T, 1.0, 10.0));
+ try std.testing.expectEqual(@as(T, -1.0), generic_fmin(T, 1.0, -1.0));
+ }
+}
+
+fn generic_fmax(comptime T: type, x: T, y: T) T {
+ if (isNan(x))
+ return y;
+ if (isNan(y))
+ return x;
+ return if (x < y) y else x;
+}
+
+export fn fmaxf(x: f32, y: f32) callconv(.C) f32 {
+ return generic_fmax(f32, x, y);
+}
+
+export fn fmax(x: f64, y: f64) callconv(.C) f64 {
+ return generic_fmax(f64, x, y);
+}
+
+test "fmax, fmaxf" {
+ inline for ([_]type{ f32, f64 }) |T| {
+ const nan_val = math.nan(T);
+
+ try std.testing.expect(isNan(generic_fmax(T, nan_val, nan_val)));
+ try std.testing.expectEqual(@as(T, 1.0), generic_fmax(T, nan_val, 1.0));
+ try std.testing.expectEqual(@as(T, 1.0), generic_fmax(T, 1.0, nan_val));
+
+ try std.testing.expectEqual(@as(T, 10.0), generic_fmax(T, 1.0, 10.0));
+ try std.testing.expectEqual(@as(T, 1.0), generic_fmax(T, 1.0, -1.0));
+ }
+}
+
+// NOTE: The original code is full of implicit signed -> unsigned assumptions and u32 wraparound
+// behaviour. Most intermediate i32 values are changed to u32 where appropriate but there are
+// potentially some edge cases remaining that are not handled in the same way.
+export fn sqrt(x: f64) f64 {
+ const tiny: f64 = 1.0e-300;
+ const sign: u32 = 0x80000000;
+ const u = @bitCast(u64, x);
+
+ var ix0 = @intCast(u32, u >> 32);
+ var ix1 = @intCast(u32, u & 0xFFFFFFFF);
+
+ // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = nan
+ if (ix0 & 0x7FF00000 == 0x7FF00000) {
+ return x * x + x;
+ }
+
+ // sqrt(+-0) = +-0
+ if (x == 0.0) {
+ return x;
+ }
+ // sqrt(-ve) = snan
+ if (ix0 & sign != 0) {
+ return math.snan(f64);
+ }
+
+ // normalize x
+ var m = @intCast(i32, ix0 >> 20);
+ if (m == 0) {
+ // subnormal
+ while (ix0 == 0) {
+ m -= 21;
+ ix0 |= ix1 >> 11;
+ ix1 <<= 21;
+ }
+
+ // subnormal
+ var i: u32 = 0;
+ while (ix0 & 0x00100000 == 0) : (i += 1) {
+ ix0 <<= 1;
+ }
+ m -= @intCast(i32, i) - 1;
+ ix0 |= ix1 >> @intCast(u5, 32 - i);
+ ix1 <<= @intCast(u5, i);
+ }
+
+ // unbias exponent
+ m -= 1023;
+ ix0 = (ix0 & 0x000FFFFF) | 0x00100000;
+ if (m & 1 != 0) {
+ ix0 += ix0 + (ix1 >> 31);
+ ix1 = ix1 +% ix1;
+ }
+ m >>= 1;
+
+ // sqrt(x) bit by bit
+ ix0 += ix0 + (ix1 >> 31);
+ ix1 = ix1 +% ix1;
+
+ var q: u32 = 0;
+ var q1: u32 = 0;
+ var s0: u32 = 0;
+ var s1: u32 = 0;
+ var r: u32 = 0x00200000;
+ var t: u32 = undefined;
+ var t1: u32 = undefined;
+
+ while (r != 0) {
+ t = s0 +% r;
+ if (t <= ix0) {
+ s0 = t + r;
+ ix0 -= t;
+ q += r;
+ }
+ ix0 = ix0 +% ix0 +% (ix1 >> 31);
+ ix1 = ix1 +% ix1;
+ r >>= 1;
+ }
+
+ r = sign;
+ while (r != 0) {
+ t1 = s1 +% r;
+ t = s0;
+ if (t < ix0 or (t == ix0 and t1 <= ix1)) {
+ s1 = t1 +% r;
+ if (t1 & sign == sign and s1 & sign == 0) {
+ s0 += 1;
+ }
+ ix0 -= t;
+ if (ix1 < t1) {
+ ix0 -= 1;
+ }
+ ix1 = ix1 -% t1;
+ q1 += r;
+ }
+ ix0 = ix0 +% ix0 +% (ix1 >> 31);
+ ix1 = ix1 +% ix1;
+ r >>= 1;
+ }
+
+ // rounding direction
+ if (ix0 | ix1 != 0) {
+ var z = 1.0 - tiny; // raise inexact
+ if (z >= 1.0) {
+ z = 1.0 + tiny;
+ if (q1 == 0xFFFFFFFF) {
+ q1 = 0;
+ q += 1;
+ } else if (z > 1.0) {
+ if (q1 == 0xFFFFFFFE) {
+ q += 1;
+ }
+ q1 += 2;
+ } else {
+ q1 += q1 & 1;
+ }
+ }
+ }
+
+ ix0 = (q >> 1) + 0x3FE00000;
+ ix1 = q1 >> 1;
+ if (q & 1 != 0) {
+ ix1 |= 0x80000000;
+ }
+
+ // NOTE: musl here appears to rely on signed twos-complement wraparound. +% has the same
+ // behaviour at least.
+ var iix0 = @intCast(i32, ix0);
+ iix0 = iix0 +% (m << 20);
+
+ const uz = (@intCast(u64, iix0) << 32) | ix1;
+ return @bitCast(f64, uz);
+}
+
+test "sqrt" {
+ const V = [_]f64{
+ 0.0,
+ 4.089288054930154,
+ 7.538757127071935,
+ 8.97780793672623,
+ 5.304443821913729,
+ 5.682408965311888,
+ 0.5846878579110049,
+ 3.650338664297043,
+ 0.3178091951800732,
+ 7.1505232436382835,
+ 3.6589165881946464,
+ };
+
+ // Note that @sqrt will either generate the sqrt opcode (if supported by the
+ // target ISA) or a call to `sqrtf` otherwise.
+ for (V) |val|
+ try std.testing.expectEqual(@sqrt(val), sqrt(val));
+}
+
+test "sqrt special" {
+ try std.testing.expect(std.math.isPositiveInf(sqrt(std.math.inf(f64))));
+ try std.testing.expect(sqrt(0.0) == 0.0);
+ try std.testing.expect(sqrt(-0.0) == -0.0);
+ try std.testing.expect(isNan(sqrt(-1.0)));
+ try std.testing.expect(isNan(sqrt(std.math.nan(f64))));
+}
+
+export fn sqrtf(x: f32) f32 {
+ const tiny: f32 = 1.0e-30;
+ const sign: i32 = @bitCast(i32, @as(u32, 0x80000000));
+ var ix: i32 = @bitCast(i32, x);
+
+ if ((ix & 0x7F800000) == 0x7F800000) {
+ return x * x + x; // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = snan
+ }
+
+ // zero
+ if (ix <= 0) {
+ if (ix & ~sign == 0) {
+ return x; // sqrt (+-0) = +-0
+ }
+ if (ix < 0) {
+ return math.snan(f32);
+ }
+ }
+
+ // normalize
+ var m = ix >> 23;
+ if (m == 0) {
+ // subnormal
+ var i: i32 = 0;
+ while (ix & 0x00800000 == 0) : (i += 1) {
+ ix <<= 1;
+ }
+ m -= i - 1;
+ }
+
+ m -= 127; // unbias exponent
+ ix = (ix & 0x007FFFFF) | 0x00800000;
+
+ if (m & 1 != 0) { // odd m, double x to even
+ ix += ix;
+ }
+
+ m >>= 1; // m = [m / 2]
+
+ // sqrt(x) bit by bit
+ ix += ix;
+ var q: i32 = 0; // q = sqrt(x)
+ var s: i32 = 0;
+ var r: i32 = 0x01000000; // r = moving bit right -> left
+
+ while (r != 0) {
+ const t = s + r;
+ if (t <= ix) {
+ s = t + r;
+ ix -= t;
+ q += r;
+ }
+ ix += ix;
+ r >>= 1;
+ }
+
+ // floating add to find rounding direction
+ if (ix != 0) {
+ var z = 1.0 - tiny; // inexact
+ if (z >= 1.0) {
+ z = 1.0 + tiny;
+ if (z > 1.0) {
+ q += 2;
+ } else {
+ if (q & 1 != 0) {
+ q += 1;
+ }
+ }
+ }
+ }
+
+ ix = (q >> 1) + 0x3f000000;
+ ix += m << 23;
+ return @bitCast(f32, ix);
+}
+
+test "sqrtf" {
+ const V = [_]f32{
+ 0.0,
+ 4.089288054930154,
+ 7.538757127071935,
+ 8.97780793672623,
+ 5.304443821913729,
+ 5.682408965311888,
+ 0.5846878579110049,
+ 3.650338664297043,
+ 0.3178091951800732,
+ 7.1505232436382835,
+ 3.6589165881946464,
+ };
+
+ // Note that @sqrt will either generate the sqrt opcode (if supported by the
+ // target ISA) or a call to `sqrtf` otherwise.
+ for (V) |val|
+ try std.testing.expectEqual(@sqrt(val), sqrtf(val));
+}
+
+test "sqrtf special" {
+ try std.testing.expect(std.math.isPositiveInf(sqrtf(std.math.inf(f32))));
+ try std.testing.expect(sqrtf(0.0) == 0.0);
+ try std.testing.expect(sqrtf(-0.0) == -0.0);
+ try std.testing.expect(isNan(sqrtf(-1.0)));
+ try std.testing.expect(isNan(sqrtf(std.math.nan(f32))));
+}
diff --git a/lib/std/special/compiler_rt.zig b/lib/std/special/compiler_rt.zig
index ed7f9d0c1c..2fb68f85dc 100644
--- a/lib/std/special/compiler_rt.zig
+++ b/lib/std/special/compiler_rt.zig
@@ -1,171 +1,24 @@
const std = @import("std");
-const builtin = std.builtin;
+const builtin = @import("builtin");
const is_test = builtin.is_test;
const os_tag = std.Target.current.os.tag;
-const arch = std.Target.current.cpu.arch;
+const arch = builtin.stage2_arch;
const abi = std.Target.current.abi;
const is_gnu = abi.isGnu();
const is_mingw = os_tag == .windows and is_gnu;
+const linkage = if (is_test)
+ std.builtin.GlobalLinkage.Internal
+else
+ std.builtin.GlobalLinkage.Weak;
+
+const strong_linkage = if (is_test)
+ std.builtin.GlobalLinkage.Internal
+else
+ std.builtin.GlobalLinkage.Strong;
+
comptime {
- const linkage = if (is_test) builtin.GlobalLinkage.Internal else builtin.GlobalLinkage.Weak;
- const strong_linkage = if (is_test) builtin.GlobalLinkage.Internal else builtin.GlobalLinkage.Strong;
-
- switch (arch) {
- .i386,
- .x86_64,
- => {
- const zig_probe_stack = @import("compiler_rt/stack_probe.zig").zig_probe_stack;
- @export(zig_probe_stack, .{
- .name = "__zig_probe_stack",
- .linkage = linkage,
- });
- },
-
- else => {},
- }
-
- // __clear_cache manages its own logic about whether to be exported or not.
- _ = @import("compiler_rt/clear_cache.zig").clear_cache;
-
- const __lesf2 = @import("compiler_rt/compareXf2.zig").__lesf2;
- @export(__lesf2, .{ .name = "__lesf2", .linkage = linkage });
- const __ledf2 = @import("compiler_rt/compareXf2.zig").__ledf2;
- @export(__ledf2, .{ .name = "__ledf2", .linkage = linkage });
- const __letf2 = @import("compiler_rt/compareXf2.zig").__letf2;
- @export(__letf2, .{ .name = "__letf2", .linkage = linkage });
-
- const __gesf2 = @import("compiler_rt/compareXf2.zig").__gesf2;
- @export(__gesf2, .{ .name = "__gesf2", .linkage = linkage });
- const __gedf2 = @import("compiler_rt/compareXf2.zig").__gedf2;
- @export(__gedf2, .{ .name = "__gedf2", .linkage = linkage });
- const __getf2 = @import("compiler_rt/compareXf2.zig").__getf2;
- @export(__getf2, .{ .name = "__getf2", .linkage = linkage });
-
- if (!is_test) {
- @export(__lesf2, .{ .name = "__cmpsf2", .linkage = linkage });
- @export(__ledf2, .{ .name = "__cmpdf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__cmptf2", .linkage = linkage });
-
- const __eqsf2 = @import("compiler_rt/compareXf2.zig").__eqsf2;
- @export(__eqsf2, .{ .name = "__eqsf2", .linkage = linkage });
- const __eqdf2 = @import("compiler_rt/compareXf2.zig").__eqdf2;
- @export(__eqdf2, .{ .name = "__eqdf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__eqtf2", .linkage = linkage });
-
- const __ltsf2 = @import("compiler_rt/compareXf2.zig").__ltsf2;
- @export(__ltsf2, .{ .name = "__ltsf2", .linkage = linkage });
- const __ltdf2 = @import("compiler_rt/compareXf2.zig").__ltdf2;
- @export(__ltdf2, .{ .name = "__ltdf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__lttf2", .linkage = linkage });
-
- const __nesf2 = @import("compiler_rt/compareXf2.zig").__nesf2;
- @export(__nesf2, .{ .name = "__nesf2", .linkage = linkage });
- const __nedf2 = @import("compiler_rt/compareXf2.zig").__nedf2;
- @export(__nedf2, .{ .name = "__nedf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__netf2", .linkage = linkage });
-
- const __gtsf2 = @import("compiler_rt/compareXf2.zig").__gtsf2;
- @export(__gtsf2, .{ .name = "__gtsf2", .linkage = linkage });
- const __gtdf2 = @import("compiler_rt/compareXf2.zig").__gtdf2;
- @export(__gtdf2, .{ .name = "__gtdf2", .linkage = linkage });
- @export(__getf2, .{ .name = "__gttf2", .linkage = linkage });
-
- const __extendhfsf2 = @import("compiler_rt/extendXfYf2.zig").__extendhfsf2;
- @export(__extendhfsf2, .{ .name = "__gnu_h2f_ieee", .linkage = linkage });
- const __truncsfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncsfhf2;
- @export(__truncsfhf2, .{ .name = "__gnu_f2h_ieee", .linkage = linkage });
- }
-
- const __unordsf2 = @import("compiler_rt/compareXf2.zig").__unordsf2;
- @export(__unordsf2, .{ .name = "__unordsf2", .linkage = linkage });
- const __unorddf2 = @import("compiler_rt/compareXf2.zig").__unorddf2;
- @export(__unorddf2, .{ .name = "__unorddf2", .linkage = linkage });
- const __unordtf2 = @import("compiler_rt/compareXf2.zig").__unordtf2;
- @export(__unordtf2, .{ .name = "__unordtf2", .linkage = linkage });
-
- const __addsf3 = @import("compiler_rt/addXf3.zig").__addsf3;
- @export(__addsf3, .{ .name = "__addsf3", .linkage = linkage });
- const __adddf3 = @import("compiler_rt/addXf3.zig").__adddf3;
- @export(__adddf3, .{ .name = "__adddf3", .linkage = linkage });
- const __addtf3 = @import("compiler_rt/addXf3.zig").__addtf3;
- @export(__addtf3, .{ .name = "__addtf3", .linkage = linkage });
- const __subsf3 = @import("compiler_rt/addXf3.zig").__subsf3;
- @export(__subsf3, .{ .name = "__subsf3", .linkage = linkage });
- const __subdf3 = @import("compiler_rt/addXf3.zig").__subdf3;
- @export(__subdf3, .{ .name = "__subdf3", .linkage = linkage });
- const __subtf3 = @import("compiler_rt/addXf3.zig").__subtf3;
- @export(__subtf3, .{ .name = "__subtf3", .linkage = linkage });
-
- const __mulsf3 = @import("compiler_rt/mulXf3.zig").__mulsf3;
- @export(__mulsf3, .{ .name = "__mulsf3", .linkage = linkage });
- const __muldf3 = @import("compiler_rt/mulXf3.zig").__muldf3;
- @export(__muldf3, .{ .name = "__muldf3", .linkage = linkage });
- const __multf3 = @import("compiler_rt/mulXf3.zig").__multf3;
- @export(__multf3, .{ .name = "__multf3", .linkage = linkage });
-
- const __divsf3 = @import("compiler_rt/divsf3.zig").__divsf3;
- @export(__divsf3, .{ .name = "__divsf3", .linkage = linkage });
- const __divdf3 = @import("compiler_rt/divdf3.zig").__divdf3;
- @export(__divdf3, .{ .name = "__divdf3", .linkage = linkage });
- const __divtf3 = @import("compiler_rt/divtf3.zig").__divtf3;
- @export(__divtf3, .{ .name = "__divtf3", .linkage = linkage });
-
- const __ashldi3 = @import("compiler_rt/shift.zig").__ashldi3;
- @export(__ashldi3, .{ .name = "__ashldi3", .linkage = linkage });
- const __ashlti3 = @import("compiler_rt/shift.zig").__ashlti3;
- @export(__ashlti3, .{ .name = "__ashlti3", .linkage = linkage });
- const __ashrdi3 = @import("compiler_rt/shift.zig").__ashrdi3;
- @export(__ashrdi3, .{ .name = "__ashrdi3", .linkage = linkage });
- const __ashrti3 = @import("compiler_rt/shift.zig").__ashrti3;
- @export(__ashrti3, .{ .name = "__ashrti3", .linkage = linkage });
- const __lshrdi3 = @import("compiler_rt/shift.zig").__lshrdi3;
- @export(__lshrdi3, .{ .name = "__lshrdi3", .linkage = linkage });
- const __lshrti3 = @import("compiler_rt/shift.zig").__lshrti3;
- @export(__lshrti3, .{ .name = "__lshrti3", .linkage = linkage });
-
- const __floatsidf = @import("compiler_rt/floatsiXf.zig").__floatsidf;
- @export(__floatsidf, .{ .name = "__floatsidf", .linkage = linkage });
- const __floatsisf = @import("compiler_rt/floatsiXf.zig").__floatsisf;
- @export(__floatsisf, .{ .name = "__floatsisf", .linkage = linkage });
- const __floatdidf = @import("compiler_rt/floatdidf.zig").__floatdidf;
- @export(__floatdidf, .{ .name = "__floatdidf", .linkage = linkage });
- const __floatsitf = @import("compiler_rt/floatsiXf.zig").__floatsitf;
- @export(__floatsitf, .{ .name = "__floatsitf", .linkage = linkage });
-
- const __floatunsisf = @import("compiler_rt/floatunsisf.zig").__floatunsisf;
- @export(__floatunsisf, .{ .name = "__floatunsisf", .linkage = linkage });
- const __floatundisf = @import("compiler_rt/floatundisf.zig").__floatundisf;
- @export(__floatundisf, .{ .name = "__floatundisf", .linkage = linkage });
- const __floatunsidf = @import("compiler_rt/floatunsidf.zig").__floatunsidf;
- @export(__floatunsidf, .{ .name = "__floatunsidf", .linkage = linkage });
- const __floatundidf = @import("compiler_rt/floatundidf.zig").__floatundidf;
- @export(__floatundidf, .{ .name = "__floatundidf", .linkage = linkage });
-
- const __floatditf = @import("compiler_rt/floatditf.zig").__floatditf;
- @export(__floatditf, .{ .name = "__floatditf", .linkage = linkage });
- const __floattitf = @import("compiler_rt/floattitf.zig").__floattitf;
- @export(__floattitf, .{ .name = "__floattitf", .linkage = linkage });
- const __floattidf = @import("compiler_rt/floattidf.zig").__floattidf;
- @export(__floattidf, .{ .name = "__floattidf", .linkage = linkage });
- const __floattisf = @import("compiler_rt/floatXisf.zig").__floattisf;
- @export(__floattisf, .{ .name = "__floattisf", .linkage = linkage });
- const __floatdisf = @import("compiler_rt/floatXisf.zig").__floatdisf;
- @export(__floatdisf, .{ .name = "__floatdisf", .linkage = linkage });
-
- const __floatunditf = @import("compiler_rt/floatunditf.zig").__floatunditf;
- @export(__floatunditf, .{ .name = "__floatunditf", .linkage = linkage });
- const __floatunsitf = @import("compiler_rt/floatunsitf.zig").__floatunsitf;
- @export(__floatunsitf, .{ .name = "__floatunsitf", .linkage = linkage });
-
- const __floatuntitf = @import("compiler_rt/floatuntitf.zig").__floatuntitf;
- @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = linkage });
- const __floatuntidf = @import("compiler_rt/floatuntidf.zig").__floatuntidf;
- @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = linkage });
- const __floatuntisf = @import("compiler_rt/floatuntisf.zig").__floatuntisf;
- @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = linkage });
-
const __extenddftf2 = @import("compiler_rt/extendXfYf2.zig").__extenddftf2;
@export(__extenddftf2, .{ .name = "__extenddftf2", .linkage = linkage });
const __extendsftf2 = @import("compiler_rt/extendXfYf2.zig").__extendsftf2;
@@ -175,446 +28,611 @@ comptime {
const __extendhftf2 = @import("compiler_rt/extendXfYf2.zig").__extendhftf2;
@export(__extendhftf2, .{ .name = "__extendhftf2", .linkage = linkage });
- const __truncsfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncsfhf2;
- @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = linkage });
- const __truncdfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncdfhf2;
- @export(__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = linkage });
- const __trunctfhf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfhf2;
- @export(__trunctfhf2, .{ .name = "__trunctfhf2", .linkage = linkage });
- const __trunctfdf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfdf2;
- @export(__trunctfdf2, .{ .name = "__trunctfdf2", .linkage = linkage });
- const __trunctfsf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfsf2;
- @export(__trunctfsf2, .{ .name = "__trunctfsf2", .linkage = linkage });
-
- const __truncdfsf2 = @import("compiler_rt/truncXfYf2.zig").__truncdfsf2;
- @export(__truncdfsf2, .{ .name = "__truncdfsf2", .linkage = linkage });
-
- const __extendsfdf2 = @import("compiler_rt/extendXfYf2.zig").__extendsfdf2;
- @export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = linkage });
-
- const __fixunssfsi = @import("compiler_rt/fixunssfsi.zig").__fixunssfsi;
- @export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = linkage });
- const __fixunssfdi = @import("compiler_rt/fixunssfdi.zig").__fixunssfdi;
- @export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = linkage });
- const __fixunssfti = @import("compiler_rt/fixunssfti.zig").__fixunssfti;
- @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = linkage });
-
- const __fixunsdfsi = @import("compiler_rt/fixunsdfsi.zig").__fixunsdfsi;
- @export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = linkage });
- const __fixunsdfdi = @import("compiler_rt/fixunsdfdi.zig").__fixunsdfdi;
- @export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = linkage });
- const __fixunsdfti = @import("compiler_rt/fixunsdfti.zig").__fixunsdfti;
- @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = linkage });
-
- const __fixunstfsi = @import("compiler_rt/fixunstfsi.zig").__fixunstfsi;
- @export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = linkage });
- const __fixunstfdi = @import("compiler_rt/fixunstfdi.zig").__fixunstfdi;
- @export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = linkage });
- const __fixunstfti = @import("compiler_rt/fixunstfti.zig").__fixunstfti;
- @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = linkage });
-
- const __fixdfdi = @import("compiler_rt/fixdfdi.zig").__fixdfdi;
- @export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = linkage });
- const __fixdfsi = @import("compiler_rt/fixdfsi.zig").__fixdfsi;
- @export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = linkage });
- const __fixdfti = @import("compiler_rt/fixdfti.zig").__fixdfti;
- @export(__fixdfti, .{ .name = "__fixdfti", .linkage = linkage });
- const __fixsfdi = @import("compiler_rt/fixsfdi.zig").__fixsfdi;
- @export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = linkage });
- const __fixsfsi = @import("compiler_rt/fixsfsi.zig").__fixsfsi;
- @export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = linkage });
- const __fixsfti = @import("compiler_rt/fixsfti.zig").__fixsfti;
- @export(__fixsfti, .{ .name = "__fixsfti", .linkage = linkage });
- const __fixtfdi = @import("compiler_rt/fixtfdi.zig").__fixtfdi;
- @export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = linkage });
- const __fixtfsi = @import("compiler_rt/fixtfsi.zig").__fixtfsi;
- @export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = linkage });
- const __fixtfti = @import("compiler_rt/fixtfti.zig").__fixtfti;
- @export(__fixtfti, .{ .name = "__fixtfti", .linkage = linkage });
-
- const __udivmoddi4 = @import("compiler_rt/int.zig").__udivmoddi4;
- @export(__udivmoddi4, .{ .name = "__udivmoddi4", .linkage = linkage });
- const __popcountdi2 = @import("compiler_rt/popcountdi2.zig").__popcountdi2;
- @export(__popcountdi2, .{ .name = "__popcountdi2", .linkage = linkage });
-
- const __mulsi3 = @import("compiler_rt/int.zig").__mulsi3;
- @export(__mulsi3, .{ .name = "__mulsi3", .linkage = linkage });
- const __muldi3 = @import("compiler_rt/muldi3.zig").__muldi3;
- @export(__muldi3, .{ .name = "__muldi3", .linkage = linkage });
- const __divmoddi4 = @import("compiler_rt/int.zig").__divmoddi4;
- @export(__divmoddi4, .{ .name = "__divmoddi4", .linkage = linkage });
- const __divsi3 = @import("compiler_rt/int.zig").__divsi3;
- @export(__divsi3, .{ .name = "__divsi3", .linkage = linkage });
- const __divdi3 = @import("compiler_rt/int.zig").__divdi3;
- @export(__divdi3, .{ .name = "__divdi3", .linkage = linkage });
- const __udivsi3 = @import("compiler_rt/int.zig").__udivsi3;
- @export(__udivsi3, .{ .name = "__udivsi3", .linkage = linkage });
- const __udivdi3 = @import("compiler_rt/int.zig").__udivdi3;
- @export(__udivdi3, .{ .name = "__udivdi3", .linkage = linkage });
- const __modsi3 = @import("compiler_rt/int.zig").__modsi3;
- @export(__modsi3, .{ .name = "__modsi3", .linkage = linkage });
- const __moddi3 = @import("compiler_rt/int.zig").__moddi3;
- @export(__moddi3, .{ .name = "__moddi3", .linkage = linkage });
- const __umodsi3 = @import("compiler_rt/int.zig").__umodsi3;
- @export(__umodsi3, .{ .name = "__umodsi3", .linkage = linkage });
- const __umoddi3 = @import("compiler_rt/int.zig").__umoddi3;
- @export(__umoddi3, .{ .name = "__umoddi3", .linkage = linkage });
- const __divmodsi4 = @import("compiler_rt/int.zig").__divmodsi4;
- @export(__divmodsi4, .{ .name = "__divmodsi4", .linkage = linkage });
- const __udivmodsi4 = @import("compiler_rt/int.zig").__udivmodsi4;
- @export(__udivmodsi4, .{ .name = "__udivmodsi4", .linkage = linkage });
-
- const __negsf2 = @import("compiler_rt/negXf2.zig").__negsf2;
- @export(__negsf2, .{ .name = "__negsf2", .linkage = linkage });
- const __negdf2 = @import("compiler_rt/negXf2.zig").__negdf2;
- @export(__negdf2, .{ .name = "__negdf2", .linkage = linkage });
-
- const __clzsi2 = @import("compiler_rt/count0bits.zig").__clzsi2;
- @export(__clzsi2, .{ .name = "__clzsi2", .linkage = linkage });
- const __clzdi2 = @import("compiler_rt/count0bits.zig").__clzdi2;
- @export(__clzdi2, .{ .name = "__clzdi2", .linkage = linkage });
- const __clzti2 = @import("compiler_rt/count0bits.zig").__clzti2;
- @export(__clzti2, .{ .name = "__clzti2", .linkage = linkage });
-
- if (builtin.link_libc and os_tag == .openbsd) {
- const __emutls_get_address = @import("compiler_rt/emutls.zig").__emutls_get_address;
- @export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = linkage });
- }
-
- if ((arch.isARM() or arch.isThumb()) and !is_test) {
- const __aeabi_unwind_cpp_pr0 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr0;
- @export(__aeabi_unwind_cpp_pr0, .{ .name = "__aeabi_unwind_cpp_pr0", .linkage = linkage });
- const __aeabi_unwind_cpp_pr1 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr1;
- @export(__aeabi_unwind_cpp_pr1, .{ .name = "__aeabi_unwind_cpp_pr1", .linkage = linkage });
- const __aeabi_unwind_cpp_pr2 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr2;
- @export(__aeabi_unwind_cpp_pr2, .{ .name = "__aeabi_unwind_cpp_pr2", .linkage = linkage });
-
- @export(__muldi3, .{ .name = "__aeabi_lmul", .linkage = linkage });
-
- const __aeabi_ldivmod = @import("compiler_rt/arm.zig").__aeabi_ldivmod;
- @export(__aeabi_ldivmod, .{ .name = "__aeabi_ldivmod", .linkage = linkage });
- const __aeabi_uldivmod = @import("compiler_rt/arm.zig").__aeabi_uldivmod;
- @export(__aeabi_uldivmod, .{ .name = "__aeabi_uldivmod", .linkage = linkage });
-
- @export(__divsi3, .{ .name = "__aeabi_idiv", .linkage = linkage });
- const __aeabi_idivmod = @import("compiler_rt/arm.zig").__aeabi_idivmod;
- @export(__aeabi_idivmod, .{ .name = "__aeabi_idivmod", .linkage = linkage });
- @export(__udivsi3, .{ .name = "__aeabi_uidiv", .linkage = linkage });
- const __aeabi_uidivmod = @import("compiler_rt/arm.zig").__aeabi_uidivmod;
- @export(__aeabi_uidivmod, .{ .name = "__aeabi_uidivmod", .linkage = linkage });
-
- const __aeabi_memcpy = @import("compiler_rt/arm.zig").__aeabi_memcpy;
- @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy", .linkage = linkage });
- @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy4", .linkage = linkage });
- @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy8", .linkage = linkage });
-
- const __aeabi_memmove = @import("compiler_rt/arm.zig").__aeabi_memmove;
- @export(__aeabi_memmove, .{ .name = "__aeabi_memmove", .linkage = linkage });
- @export(__aeabi_memmove, .{ .name = "__aeabi_memmove4", .linkage = linkage });
- @export(__aeabi_memmove, .{ .name = "__aeabi_memmove8", .linkage = linkage });
-
- const __aeabi_memset = @import("compiler_rt/arm.zig").__aeabi_memset;
- @export(__aeabi_memset, .{ .name = "__aeabi_memset", .linkage = linkage });
- @export(__aeabi_memset, .{ .name = "__aeabi_memset4", .linkage = linkage });
- @export(__aeabi_memset, .{ .name = "__aeabi_memset8", .linkage = linkage });
-
- const __aeabi_memclr = @import("compiler_rt/arm.zig").__aeabi_memclr;
- @export(__aeabi_memclr, .{ .name = "__aeabi_memclr", .linkage = linkage });
- @export(__aeabi_memclr, .{ .name = "__aeabi_memclr4", .linkage = linkage });
- @export(__aeabi_memclr, .{ .name = "__aeabi_memclr8", .linkage = linkage });
-
- if (os_tag == .linux) {
- const __aeabi_read_tp = @import("compiler_rt/arm.zig").__aeabi_read_tp;
- @export(__aeabi_read_tp, .{ .name = "__aeabi_read_tp", .linkage = linkage });
- }
-
- const __aeabi_f2d = @import("compiler_rt/extendXfYf2.zig").__aeabi_f2d;
- @export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = linkage });
- const __aeabi_i2d = @import("compiler_rt/floatsiXf.zig").__aeabi_i2d;
- @export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = linkage });
- const __aeabi_l2d = @import("compiler_rt/floatdidf.zig").__aeabi_l2d;
- @export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = linkage });
- const __aeabi_l2f = @import("compiler_rt/floatXisf.zig").__aeabi_l2f;
- @export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = linkage });
- const __aeabi_ui2d = @import("compiler_rt/floatunsidf.zig").__aeabi_ui2d;
- @export(__aeabi_ui2d, .{ .name = "__aeabi_ui2d", .linkage = linkage });
- const __aeabi_ul2d = @import("compiler_rt/floatundidf.zig").__aeabi_ul2d;
- @export(__aeabi_ul2d, .{ .name = "__aeabi_ul2d", .linkage = linkage });
- const __aeabi_ui2f = @import("compiler_rt/floatunsisf.zig").__aeabi_ui2f;
- @export(__aeabi_ui2f, .{ .name = "__aeabi_ui2f", .linkage = linkage });
- const __aeabi_ul2f = @import("compiler_rt/floatundisf.zig").__aeabi_ul2f;
- @export(__aeabi_ul2f, .{ .name = "__aeabi_ul2f", .linkage = linkage });
-
- const __aeabi_fneg = @import("compiler_rt/negXf2.zig").__aeabi_fneg;
- @export(__aeabi_fneg, .{ .name = "__aeabi_fneg", .linkage = linkage });
- const __aeabi_dneg = @import("compiler_rt/negXf2.zig").__aeabi_dneg;
- @export(__aeabi_dneg, .{ .name = "__aeabi_dneg", .linkage = linkage });
-
- const __aeabi_fmul = @import("compiler_rt/mulXf3.zig").__aeabi_fmul;
- @export(__aeabi_fmul, .{ .name = "__aeabi_fmul", .linkage = linkage });
- const __aeabi_dmul = @import("compiler_rt/mulXf3.zig").__aeabi_dmul;
- @export(__aeabi_dmul, .{ .name = "__aeabi_dmul", .linkage = linkage });
-
- const __aeabi_d2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2h;
- @export(__aeabi_d2h, .{ .name = "__aeabi_d2h", .linkage = linkage });
-
- const __aeabi_f2ulz = @import("compiler_rt/fixunssfdi.zig").__aeabi_f2ulz;
- @export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = linkage });
- const __aeabi_d2ulz = @import("compiler_rt/fixunsdfdi.zig").__aeabi_d2ulz;
- @export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = linkage });
-
- const __aeabi_f2lz = @import("compiler_rt/fixsfdi.zig").__aeabi_f2lz;
- @export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = linkage });
- const __aeabi_d2lz = @import("compiler_rt/fixdfdi.zig").__aeabi_d2lz;
- @export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = linkage });
-
- const __aeabi_d2uiz = @import("compiler_rt/fixunsdfsi.zig").__aeabi_d2uiz;
- @export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = linkage });
-
- const __aeabi_h2f = @import("compiler_rt/extendXfYf2.zig").__aeabi_h2f;
- @export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = linkage });
- const __aeabi_f2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_f2h;
- @export(__aeabi_f2h, .{ .name = "__aeabi_f2h", .linkage = linkage });
-
- const __aeabi_i2f = @import("compiler_rt/floatsiXf.zig").__aeabi_i2f;
- @export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = linkage });
- const __aeabi_d2f = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2f;
- @export(__aeabi_d2f, .{ .name = "__aeabi_d2f", .linkage = linkage });
-
- const __aeabi_fadd = @import("compiler_rt/addXf3.zig").__aeabi_fadd;
- @export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = linkage });
- const __aeabi_dadd = @import("compiler_rt/addXf3.zig").__aeabi_dadd;
- @export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = linkage });
- const __aeabi_fsub = @import("compiler_rt/addXf3.zig").__aeabi_fsub;
- @export(__aeabi_fsub, .{ .name = "__aeabi_fsub", .linkage = linkage });
- const __aeabi_dsub = @import("compiler_rt/addXf3.zig").__aeabi_dsub;
- @export(__aeabi_dsub, .{ .name = "__aeabi_dsub", .linkage = linkage });
-
- const __aeabi_f2uiz = @import("compiler_rt/fixunssfsi.zig").__aeabi_f2uiz;
- @export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = linkage });
-
- const __aeabi_f2iz = @import("compiler_rt/fixsfsi.zig").__aeabi_f2iz;
- @export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = linkage });
- const __aeabi_d2iz = @import("compiler_rt/fixdfsi.zig").__aeabi_d2iz;
- @export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = linkage });
-
- const __aeabi_fdiv = @import("compiler_rt/divsf3.zig").__aeabi_fdiv;
- @export(__aeabi_fdiv, .{ .name = "__aeabi_fdiv", .linkage = linkage });
- const __aeabi_ddiv = @import("compiler_rt/divdf3.zig").__aeabi_ddiv;
- @export(__aeabi_ddiv, .{ .name = "__aeabi_ddiv", .linkage = linkage });
-
- const __aeabi_llsl = @import("compiler_rt/shift.zig").__aeabi_llsl;
- @export(__aeabi_llsl, .{ .name = "__aeabi_llsl", .linkage = linkage });
- const __aeabi_lasr = @import("compiler_rt/shift.zig").__aeabi_lasr;
- @export(__aeabi_lasr, .{ .name = "__aeabi_lasr", .linkage = linkage });
- const __aeabi_llsr = @import("compiler_rt/shift.zig").__aeabi_llsr;
- @export(__aeabi_llsr, .{ .name = "__aeabi_llsr", .linkage = linkage });
-
- const __aeabi_fcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpeq;
- @export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = linkage });
- const __aeabi_fcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmplt;
- @export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = linkage });
- const __aeabi_fcmple = @import("compiler_rt/compareXf2.zig").__aeabi_fcmple;
- @export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = linkage });
- const __aeabi_fcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpge;
- @export(__aeabi_fcmpge, .{ .name = "__aeabi_fcmpge", .linkage = linkage });
- const __aeabi_fcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpgt;
- @export(__aeabi_fcmpgt, .{ .name = "__aeabi_fcmpgt", .linkage = linkage });
- const __aeabi_fcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpun;
- @export(__aeabi_fcmpun, .{ .name = "__aeabi_fcmpun", .linkage = linkage });
-
- const __aeabi_dcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpeq;
- @export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = linkage });
- const __aeabi_dcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmplt;
- @export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = linkage });
- const __aeabi_dcmple = @import("compiler_rt/compareXf2.zig").__aeabi_dcmple;
- @export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = linkage });
- const __aeabi_dcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpge;
- @export(__aeabi_dcmpge, .{ .name = "__aeabi_dcmpge", .linkage = linkage });
- const __aeabi_dcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpgt;
- @export(__aeabi_dcmpgt, .{ .name = "__aeabi_dcmpgt", .linkage = linkage });
- const __aeabi_dcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpun;
- @export(__aeabi_dcmpun, .{ .name = "__aeabi_dcmpun", .linkage = linkage });
- }
-
- if (arch == .i386 and abi == .msvc) {
- // Don't let LLVM apply the stdcall name mangling on those MSVC builtins
- const _alldiv = @import("compiler_rt/aulldiv.zig")._alldiv;
- @export(_alldiv, .{ .name = "\x01__alldiv", .linkage = strong_linkage });
- const _aulldiv = @import("compiler_rt/aulldiv.zig")._aulldiv;
- @export(_aulldiv, .{ .name = "\x01__aulldiv", .linkage = strong_linkage });
- const _allrem = @import("compiler_rt/aullrem.zig")._allrem;
- @export(_allrem, .{ .name = "\x01__allrem", .linkage = strong_linkage });
- const _aullrem = @import("compiler_rt/aullrem.zig")._aullrem;
- @export(_aullrem, .{ .name = "\x01__aullrem", .linkage = strong_linkage });
- }
-
- if (arch.isSPARC()) {
- // SPARC systems use a different naming scheme
- const _Qp_add = @import("compiler_rt/sparc.zig")._Qp_add;
- @export(_Qp_add, .{ .name = "_Qp_add", .linkage = linkage });
- const _Qp_div = @import("compiler_rt/sparc.zig")._Qp_div;
- @export(_Qp_div, .{ .name = "_Qp_div", .linkage = linkage });
- const _Qp_mul = @import("compiler_rt/sparc.zig")._Qp_mul;
- @export(_Qp_mul, .{ .name = "_Qp_mul", .linkage = linkage });
- const _Qp_sub = @import("compiler_rt/sparc.zig")._Qp_sub;
- @export(_Qp_sub, .{ .name = "_Qp_sub", .linkage = linkage });
-
- const _Qp_cmp = @import("compiler_rt/sparc.zig")._Qp_cmp;
- @export(_Qp_cmp, .{ .name = "_Qp_cmp", .linkage = linkage });
- const _Qp_feq = @import("compiler_rt/sparc.zig")._Qp_feq;
- @export(_Qp_feq, .{ .name = "_Qp_feq", .linkage = linkage });
- const _Qp_fne = @import("compiler_rt/sparc.zig")._Qp_fne;
- @export(_Qp_fne, .{ .name = "_Qp_fne", .linkage = linkage });
- const _Qp_flt = @import("compiler_rt/sparc.zig")._Qp_flt;
- @export(_Qp_flt, .{ .name = "_Qp_flt", .linkage = linkage });
- const _Qp_fle = @import("compiler_rt/sparc.zig")._Qp_fle;
- @export(_Qp_fle, .{ .name = "_Qp_fle", .linkage = linkage });
- const _Qp_fgt = @import("compiler_rt/sparc.zig")._Qp_fgt;
- @export(_Qp_fgt, .{ .name = "_Qp_fgt", .linkage = linkage });
- const _Qp_fge = @import("compiler_rt/sparc.zig")._Qp_fge;
- @export(_Qp_fge, .{ .name = "_Qp_fge", .linkage = linkage });
-
- const _Qp_itoq = @import("compiler_rt/sparc.zig")._Qp_itoq;
- @export(_Qp_itoq, .{ .name = "_Qp_itoq", .linkage = linkage });
- const _Qp_uitoq = @import("compiler_rt/sparc.zig")._Qp_uitoq;
- @export(_Qp_uitoq, .{ .name = "_Qp_uitoq", .linkage = linkage });
- const _Qp_xtoq = @import("compiler_rt/sparc.zig")._Qp_xtoq;
- @export(_Qp_xtoq, .{ .name = "_Qp_xtoq", .linkage = linkage });
- const _Qp_uxtoq = @import("compiler_rt/sparc.zig")._Qp_uxtoq;
- @export(_Qp_uxtoq, .{ .name = "_Qp_uxtoq", .linkage = linkage });
- const _Qp_stoq = @import("compiler_rt/sparc.zig")._Qp_stoq;
- @export(_Qp_stoq, .{ .name = "_Qp_stoq", .linkage = linkage });
- const _Qp_dtoq = @import("compiler_rt/sparc.zig")._Qp_dtoq;
- @export(_Qp_dtoq, .{ .name = "_Qp_dtoq", .linkage = linkage });
- const _Qp_qtoi = @import("compiler_rt/sparc.zig")._Qp_qtoi;
- @export(_Qp_qtoi, .{ .name = "_Qp_qtoi", .linkage = linkage });
- const _Qp_qtoui = @import("compiler_rt/sparc.zig")._Qp_qtoui;
- @export(_Qp_qtoui, .{ .name = "_Qp_qtoui", .linkage = linkage });
- const _Qp_qtox = @import("compiler_rt/sparc.zig")._Qp_qtox;
- @export(_Qp_qtox, .{ .name = "_Qp_qtox", .linkage = linkage });
- const _Qp_qtoux = @import("compiler_rt/sparc.zig")._Qp_qtoux;
- @export(_Qp_qtoux, .{ .name = "_Qp_qtoux", .linkage = linkage });
- const _Qp_qtos = @import("compiler_rt/sparc.zig")._Qp_qtos;
- @export(_Qp_qtos, .{ .name = "_Qp_qtos", .linkage = linkage });
- const _Qp_qtod = @import("compiler_rt/sparc.zig")._Qp_qtod;
- @export(_Qp_qtod, .{ .name = "_Qp_qtod", .linkage = linkage });
- }
-
- if ((arch == .powerpc or arch.isPPC64()) and !is_test) {
- @export(__addtf3, .{ .name = "__addkf3", .linkage = linkage });
- @export(__subtf3, .{ .name = "__subkf3", .linkage = linkage });
- @export(__multf3, .{ .name = "__mulkf3", .linkage = linkage });
- @export(__divtf3, .{ .name = "__divkf3", .linkage = linkage });
- @export(__extendsftf2, .{ .name = "__extendsfkf2", .linkage = linkage });
- @export(__extenddftf2, .{ .name = "__extenddfkf2", .linkage = linkage });
- @export(__trunctfsf2, .{ .name = "__trunckfsf2", .linkage = linkage });
- @export(__trunctfdf2, .{ .name = "__trunckfdf2", .linkage = linkage });
- @export(__fixtfdi, .{ .name = "__fixkfdi", .linkage = linkage });
- @export(__fixtfsi, .{ .name = "__fixkfsi", .linkage = linkage });
- @export(__fixunstfsi, .{ .name = "__fixunskfsi", .linkage = linkage });
- @export(__fixunstfdi, .{ .name = "__fixunskfdi", .linkage = linkage });
- @export(__floatsitf, .{ .name = "__floatsikf", .linkage = linkage });
- @export(__floatditf, .{ .name = "__floatdikf", .linkage = linkage });
- @export(__floatunditf, .{ .name = "__floatundikf", .linkage = linkage });
- @export(__floatunsitf, .{ .name = "__floatunsikf", .linkage = linkage });
-
- @export(__letf2, .{ .name = "__eqkf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__nekf2", .linkage = linkage });
- @export(__getf2, .{ .name = "__gekf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__ltkf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__lekf2", .linkage = linkage });
- @export(__getf2, .{ .name = "__gtkf2", .linkage = linkage });
- @export(__unordtf2, .{ .name = "__unordkf2", .linkage = linkage });
- }
-
- if (builtin.os.tag == .windows) {
- // Default stack-probe functions emitted by LLVM
- if (is_mingw) {
- const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
- @export(_chkstk, .{ .name = "_alloca", .linkage = strong_linkage });
- const ___chkstk_ms = @import("compiler_rt/stack_probe.zig").___chkstk_ms;
- @export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = strong_linkage });
- } else if (!builtin.link_libc) {
- // This symbols are otherwise exported by MSVCRT.lib
- const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
- @export(_chkstk, .{ .name = "_chkstk", .linkage = strong_linkage });
- const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
- @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
- }
-
+ if (!builtin.zig_is_stage2) {
switch (arch) {
- .i386 => {
- const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
- @export(__divti3, .{ .name = "__divti3", .linkage = linkage });
- const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
- @export(__modti3, .{ .name = "__modti3", .linkage = linkage });
- const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
- @export(__multi3, .{ .name = "__multi3", .linkage = linkage });
- const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
- @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
- const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
- @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
- const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
- @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
- },
- .x86_64 => {
- // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
- // that LLVM expects compiler-rt to have.
- const __divti3_windows_x86_64 = @import("compiler_rt/divti3.zig").__divti3_windows_x86_64;
- @export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = linkage });
- const __modti3_windows_x86_64 = @import("compiler_rt/modti3.zig").__modti3_windows_x86_64;
- @export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = linkage });
- const __multi3_windows_x86_64 = @import("compiler_rt/multi3.zig").__multi3_windows_x86_64;
- @export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = linkage });
- const __udivti3_windows_x86_64 = @import("compiler_rt/udivti3.zig").__udivti3_windows_x86_64;
- @export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = linkage });
- const __udivmodti4_windows_x86_64 = @import("compiler_rt/udivmodti4.zig").__udivmodti4_windows_x86_64;
- @export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = linkage });
- const __umodti3_windows_x86_64 = @import("compiler_rt/umodti3.zig").__umodti3_windows_x86_64;
- @export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = linkage });
+ .i386,
+ .x86_64,
+ => {
+ const zig_probe_stack = @import("compiler_rt/stack_probe.zig").zig_probe_stack;
+ @export(zig_probe_stack, .{
+ .name = "__zig_probe_stack",
+ .linkage = linkage,
+ });
},
+
else => {},
}
- if (arch.isAARCH64()) {
- const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
- @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
- const __divti3_windows = @import("compiler_rt/divti3.zig").__divti3;
- @export(__divti3_windows, .{ .name = "__divti3", .linkage = linkage });
+
+ // __clear_cache manages its own logic about whether to be exported or not.
+ _ = @import("compiler_rt/clear_cache.zig").clear_cache;
+
+ const __lesf2 = @import("compiler_rt/compareXf2.zig").__lesf2;
+ @export(__lesf2, .{ .name = "__lesf2", .linkage = linkage });
+ const __ledf2 = @import("compiler_rt/compareXf2.zig").__ledf2;
+ @export(__ledf2, .{ .name = "__ledf2", .linkage = linkage });
+ const __letf2 = @import("compiler_rt/compareXf2.zig").__letf2;
+ @export(__letf2, .{ .name = "__letf2", .linkage = linkage });
+
+ const __gesf2 = @import("compiler_rt/compareXf2.zig").__gesf2;
+ @export(__gesf2, .{ .name = "__gesf2", .linkage = linkage });
+ const __gedf2 = @import("compiler_rt/compareXf2.zig").__gedf2;
+ @export(__gedf2, .{ .name = "__gedf2", .linkage = linkage });
+ const __getf2 = @import("compiler_rt/compareXf2.zig").__getf2;
+ @export(__getf2, .{ .name = "__getf2", .linkage = linkage });
+
+ if (!is_test) {
+ @export(__lesf2, .{ .name = "__cmpsf2", .linkage = linkage });
+ @export(__ledf2, .{ .name = "__cmpdf2", .linkage = linkage });
+ @export(__letf2, .{ .name = "__cmptf2", .linkage = linkage });
+
+ const __eqsf2 = @import("compiler_rt/compareXf2.zig").__eqsf2;
+ @export(__eqsf2, .{ .name = "__eqsf2", .linkage = linkage });
+ const __eqdf2 = @import("compiler_rt/compareXf2.zig").__eqdf2;
+ @export(__eqdf2, .{ .name = "__eqdf2", .linkage = linkage });
+ @export(__letf2, .{ .name = "__eqtf2", .linkage = linkage });
+
+ const __ltsf2 = @import("compiler_rt/compareXf2.zig").__ltsf2;
+ @export(__ltsf2, .{ .name = "__ltsf2", .linkage = linkage });
+ const __ltdf2 = @import("compiler_rt/compareXf2.zig").__ltdf2;
+ @export(__ltdf2, .{ .name = "__ltdf2", .linkage = linkage });
+ @export(__letf2, .{ .name = "__lttf2", .linkage = linkage });
+
+ const __nesf2 = @import("compiler_rt/compareXf2.zig").__nesf2;
+ @export(__nesf2, .{ .name = "__nesf2", .linkage = linkage });
+ const __nedf2 = @import("compiler_rt/compareXf2.zig").__nedf2;
+ @export(__nedf2, .{ .name = "__nedf2", .linkage = linkage });
+ @export(__letf2, .{ .name = "__netf2", .linkage = linkage });
+
+ const __gtsf2 = @import("compiler_rt/compareXf2.zig").__gtsf2;
+ @export(__gtsf2, .{ .name = "__gtsf2", .linkage = linkage });
+ const __gtdf2 = @import("compiler_rt/compareXf2.zig").__gtdf2;
+ @export(__gtdf2, .{ .name = "__gtdf2", .linkage = linkage });
+ @export(__getf2, .{ .name = "__gttf2", .linkage = linkage });
+
+ @export(@import("compiler_rt/extendXfYf2.zig").__extendhfsf2, .{
+ .name = "__gnu_h2f_ieee",
+ .linkage = linkage,
+ });
+ @export(@import("compiler_rt/truncXfYf2.zig").__truncsfhf2, .{
+ .name = "__gnu_f2h_ieee",
+ .linkage = linkage,
+ });
+ }
+
+ const __unordsf2 = @import("compiler_rt/compareXf2.zig").__unordsf2;
+ @export(__unordsf2, .{ .name = "__unordsf2", .linkage = linkage });
+ const __unorddf2 = @import("compiler_rt/compareXf2.zig").__unorddf2;
+ @export(__unorddf2, .{ .name = "__unorddf2", .linkage = linkage });
+ const __unordtf2 = @import("compiler_rt/compareXf2.zig").__unordtf2;
+ @export(__unordtf2, .{ .name = "__unordtf2", .linkage = linkage });
+
+ const __addsf3 = @import("compiler_rt/addXf3.zig").__addsf3;
+ @export(__addsf3, .{ .name = "__addsf3", .linkage = linkage });
+ const __adddf3 = @import("compiler_rt/addXf3.zig").__adddf3;
+ @export(__adddf3, .{ .name = "__adddf3", .linkage = linkage });
+ const __addtf3 = @import("compiler_rt/addXf3.zig").__addtf3;
+ @export(__addtf3, .{ .name = "__addtf3", .linkage = linkage });
+ const __subsf3 = @import("compiler_rt/addXf3.zig").__subsf3;
+ @export(__subsf3, .{ .name = "__subsf3", .linkage = linkage });
+ const __subdf3 = @import("compiler_rt/addXf3.zig").__subdf3;
+ @export(__subdf3, .{ .name = "__subdf3", .linkage = linkage });
+ const __subtf3 = @import("compiler_rt/addXf3.zig").__subtf3;
+ @export(__subtf3, .{ .name = "__subtf3", .linkage = linkage });
+
+ const __mulsf3 = @import("compiler_rt/mulXf3.zig").__mulsf3;
+ @export(__mulsf3, .{ .name = "__mulsf3", .linkage = linkage });
+ const __muldf3 = @import("compiler_rt/mulXf3.zig").__muldf3;
+ @export(__muldf3, .{ .name = "__muldf3", .linkage = linkage });
+ const __multf3 = @import("compiler_rt/mulXf3.zig").__multf3;
+ @export(__multf3, .{ .name = "__multf3", .linkage = linkage });
+
+ const __divsf3 = @import("compiler_rt/divsf3.zig").__divsf3;
+ @export(__divsf3, .{ .name = "__divsf3", .linkage = linkage });
+ const __divdf3 = @import("compiler_rt/divdf3.zig").__divdf3;
+ @export(__divdf3, .{ .name = "__divdf3", .linkage = linkage });
+ const __divtf3 = @import("compiler_rt/divtf3.zig").__divtf3;
+ @export(__divtf3, .{ .name = "__divtf3", .linkage = linkage });
+
+ const __ashldi3 = @import("compiler_rt/shift.zig").__ashldi3;
+ @export(__ashldi3, .{ .name = "__ashldi3", .linkage = linkage });
+ const __ashlti3 = @import("compiler_rt/shift.zig").__ashlti3;
+ @export(__ashlti3, .{ .name = "__ashlti3", .linkage = linkage });
+ const __ashrdi3 = @import("compiler_rt/shift.zig").__ashrdi3;
+ @export(__ashrdi3, .{ .name = "__ashrdi3", .linkage = linkage });
+ const __ashrti3 = @import("compiler_rt/shift.zig").__ashrti3;
+ @export(__ashrti3, .{ .name = "__ashrti3", .linkage = linkage });
+ const __lshrdi3 = @import("compiler_rt/shift.zig").__lshrdi3;
+ @export(__lshrdi3, .{ .name = "__lshrdi3", .linkage = linkage });
+ const __lshrti3 = @import("compiler_rt/shift.zig").__lshrti3;
+ @export(__lshrti3, .{ .name = "__lshrti3", .linkage = linkage });
+
+ const __floatsidf = @import("compiler_rt/floatsiXf.zig").__floatsidf;
+ @export(__floatsidf, .{ .name = "__floatsidf", .linkage = linkage });
+ const __floatsisf = @import("compiler_rt/floatsiXf.zig").__floatsisf;
+ @export(__floatsisf, .{ .name = "__floatsisf", .linkage = linkage });
+ const __floatdidf = @import("compiler_rt/floatdidf.zig").__floatdidf;
+ @export(__floatdidf, .{ .name = "__floatdidf", .linkage = linkage });
+ const __floatsitf = @import("compiler_rt/floatsiXf.zig").__floatsitf;
+ @export(__floatsitf, .{ .name = "__floatsitf", .linkage = linkage });
+
+ const __floatunsisf = @import("compiler_rt/floatunsisf.zig").__floatunsisf;
+ @export(__floatunsisf, .{ .name = "__floatunsisf", .linkage = linkage });
+ const __floatundisf = @import("compiler_rt/floatundisf.zig").__floatundisf;
+ @export(__floatundisf, .{ .name = "__floatundisf", .linkage = linkage });
+ const __floatunsidf = @import("compiler_rt/floatunsidf.zig").__floatunsidf;
+ @export(__floatunsidf, .{ .name = "__floatunsidf", .linkage = linkage });
+ const __floatundidf = @import("compiler_rt/floatundidf.zig").__floatundidf;
+ @export(__floatundidf, .{ .name = "__floatundidf", .linkage = linkage });
+
+ const __floatditf = @import("compiler_rt/floatditf.zig").__floatditf;
+ @export(__floatditf, .{ .name = "__floatditf", .linkage = linkage });
+ const __floattitf = @import("compiler_rt/floattitf.zig").__floattitf;
+ @export(__floattitf, .{ .name = "__floattitf", .linkage = linkage });
+ const __floattidf = @import("compiler_rt/floattidf.zig").__floattidf;
+ @export(__floattidf, .{ .name = "__floattidf", .linkage = linkage });
+ const __floattisf = @import("compiler_rt/floatXisf.zig").__floattisf;
+ @export(__floattisf, .{ .name = "__floattisf", .linkage = linkage });
+ const __floatdisf = @import("compiler_rt/floatXisf.zig").__floatdisf;
+ @export(__floatdisf, .{ .name = "__floatdisf", .linkage = linkage });
+
+ const __floatunditf = @import("compiler_rt/floatunditf.zig").__floatunditf;
+ @export(__floatunditf, .{ .name = "__floatunditf", .linkage = linkage });
+ const __floatunsitf = @import("compiler_rt/floatunsitf.zig").__floatunsitf;
+ @export(__floatunsitf, .{ .name = "__floatunsitf", .linkage = linkage });
+
+ const __floatuntitf = @import("compiler_rt/floatuntitf.zig").__floatuntitf;
+ @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = linkage });
+ const __floatuntidf = @import("compiler_rt/floatuntidf.zig").__floatuntidf;
+ @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = linkage });
+ const __floatuntisf = @import("compiler_rt/floatuntisf.zig").__floatuntisf;
+ @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = linkage });
+
+ const __truncsfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncsfhf2;
+ @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = linkage });
+ const __truncdfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncdfhf2;
+ @export(__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = linkage });
+ const __trunctfhf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfhf2;
+ @export(__trunctfhf2, .{ .name = "__trunctfhf2", .linkage = linkage });
+ const __trunctfdf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfdf2;
+ @export(__trunctfdf2, .{ .name = "__trunctfdf2", .linkage = linkage });
+ const __trunctfsf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfsf2;
+ @export(__trunctfsf2, .{ .name = "__trunctfsf2", .linkage = linkage });
+
+ const __truncdfsf2 = @import("compiler_rt/truncXfYf2.zig").__truncdfsf2;
+ @export(__truncdfsf2, .{ .name = "__truncdfsf2", .linkage = linkage });
+
+ const __extendsfdf2 = @import("compiler_rt/extendXfYf2.zig").__extendsfdf2;
+ @export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = linkage });
+
+ const __fixunssfsi = @import("compiler_rt/fixunssfsi.zig").__fixunssfsi;
+ @export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = linkage });
+ const __fixunssfdi = @import("compiler_rt/fixunssfdi.zig").__fixunssfdi;
+ @export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = linkage });
+ const __fixunssfti = @import("compiler_rt/fixunssfti.zig").__fixunssfti;
+ @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = linkage });
+
+ const __fixunsdfsi = @import("compiler_rt/fixunsdfsi.zig").__fixunsdfsi;
+ @export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = linkage });
+ const __fixunsdfdi = @import("compiler_rt/fixunsdfdi.zig").__fixunsdfdi;
+ @export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = linkage });
+ const __fixunsdfti = @import("compiler_rt/fixunsdfti.zig").__fixunsdfti;
+ @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = linkage });
+
+ const __fixunstfsi = @import("compiler_rt/fixunstfsi.zig").__fixunstfsi;
+ @export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = linkage });
+ const __fixunstfdi = @import("compiler_rt/fixunstfdi.zig").__fixunstfdi;
+ @export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = linkage });
+ const __fixunstfti = @import("compiler_rt/fixunstfti.zig").__fixunstfti;
+ @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = linkage });
+
+ const __fixdfdi = @import("compiler_rt/fixdfdi.zig").__fixdfdi;
+ @export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = linkage });
+ const __fixdfsi = @import("compiler_rt/fixdfsi.zig").__fixdfsi;
+ @export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = linkage });
+ const __fixdfti = @import("compiler_rt/fixdfti.zig").__fixdfti;
+ @export(__fixdfti, .{ .name = "__fixdfti", .linkage = linkage });
+ const __fixsfdi = @import("compiler_rt/fixsfdi.zig").__fixsfdi;
+ @export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = linkage });
+ const __fixsfsi = @import("compiler_rt/fixsfsi.zig").__fixsfsi;
+ @export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = linkage });
+ const __fixsfti = @import("compiler_rt/fixsfti.zig").__fixsfti;
+ @export(__fixsfti, .{ .name = "__fixsfti", .linkage = linkage });
+ const __fixtfdi = @import("compiler_rt/fixtfdi.zig").__fixtfdi;
+ @export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = linkage });
+ const __fixtfsi = @import("compiler_rt/fixtfsi.zig").__fixtfsi;
+ @export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = linkage });
+ const __fixtfti = @import("compiler_rt/fixtfti.zig").__fixtfti;
+ @export(__fixtfti, .{ .name = "__fixtfti", .linkage = linkage });
+
+ const __udivmoddi4 = @import("compiler_rt/int.zig").__udivmoddi4;
+ @export(__udivmoddi4, .{ .name = "__udivmoddi4", .linkage = linkage });
+ const __popcountdi2 = @import("compiler_rt/popcountdi2.zig").__popcountdi2;
+ @export(__popcountdi2, .{ .name = "__popcountdi2", .linkage = linkage });
+
+ const __mulsi3 = @import("compiler_rt/int.zig").__mulsi3;
+ @export(__mulsi3, .{ .name = "__mulsi3", .linkage = linkage });
+ const __muldi3 = @import("compiler_rt/muldi3.zig").__muldi3;
+ @export(__muldi3, .{ .name = "__muldi3", .linkage = linkage });
+ const __divmoddi4 = @import("compiler_rt/int.zig").__divmoddi4;
+ @export(__divmoddi4, .{ .name = "__divmoddi4", .linkage = linkage });
+ const __divsi3 = @import("compiler_rt/int.zig").__divsi3;
+ @export(__divsi3, .{ .name = "__divsi3", .linkage = linkage });
+ const __divdi3 = @import("compiler_rt/int.zig").__divdi3;
+ @export(__divdi3, .{ .name = "__divdi3", .linkage = linkage });
+ const __udivsi3 = @import("compiler_rt/int.zig").__udivsi3;
+ @export(__udivsi3, .{ .name = "__udivsi3", .linkage = linkage });
+ const __udivdi3 = @import("compiler_rt/int.zig").__udivdi3;
+ @export(__udivdi3, .{ .name = "__udivdi3", .linkage = linkage });
+ const __modsi3 = @import("compiler_rt/int.zig").__modsi3;
+ @export(__modsi3, .{ .name = "__modsi3", .linkage = linkage });
+ const __moddi3 = @import("compiler_rt/int.zig").__moddi3;
+ @export(__moddi3, .{ .name = "__moddi3", .linkage = linkage });
+ const __umodsi3 = @import("compiler_rt/int.zig").__umodsi3;
+ @export(__umodsi3, .{ .name = "__umodsi3", .linkage = linkage });
+ const __umoddi3 = @import("compiler_rt/int.zig").__umoddi3;
+ @export(__umoddi3, .{ .name = "__umoddi3", .linkage = linkage });
+ const __divmodsi4 = @import("compiler_rt/int.zig").__divmodsi4;
+ @export(__divmodsi4, .{ .name = "__divmodsi4", .linkage = linkage });
+ const __udivmodsi4 = @import("compiler_rt/int.zig").__udivmodsi4;
+ @export(__udivmodsi4, .{ .name = "__udivmodsi4", .linkage = linkage });
+
+ const __negsf2 = @import("compiler_rt/negXf2.zig").__negsf2;
+ @export(__negsf2, .{ .name = "__negsf2", .linkage = linkage });
+ const __negdf2 = @import("compiler_rt/negXf2.zig").__negdf2;
+ @export(__negdf2, .{ .name = "__negdf2", .linkage = linkage });
+
+ const __clzsi2 = @import("compiler_rt/count0bits.zig").__clzsi2;
+ @export(__clzsi2, .{ .name = "__clzsi2", .linkage = linkage });
+ const __clzdi2 = @import("compiler_rt/count0bits.zig").__clzdi2;
+ @export(__clzdi2, .{ .name = "__clzdi2", .linkage = linkage });
+ const __clzti2 = @import("compiler_rt/count0bits.zig").__clzti2;
+ @export(__clzti2, .{ .name = "__clzti2", .linkage = linkage });
+
+ if (builtin.link_libc and os_tag == .openbsd) {
+ const __emutls_get_address = @import("compiler_rt/emutls.zig").__emutls_get_address;
+ @export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = linkage });
+ }
+
+ if ((arch.isARM() or arch.isThumb()) and !is_test) {
+ const __aeabi_unwind_cpp_pr0 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr0;
+ @export(__aeabi_unwind_cpp_pr0, .{ .name = "__aeabi_unwind_cpp_pr0", .linkage = linkage });
+ const __aeabi_unwind_cpp_pr1 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr1;
+ @export(__aeabi_unwind_cpp_pr1, .{ .name = "__aeabi_unwind_cpp_pr1", .linkage = linkage });
+ const __aeabi_unwind_cpp_pr2 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr2;
+ @export(__aeabi_unwind_cpp_pr2, .{ .name = "__aeabi_unwind_cpp_pr2", .linkage = linkage });
+
+ @export(__muldi3, .{ .name = "__aeabi_lmul", .linkage = linkage });
+
+ const __aeabi_ldivmod = @import("compiler_rt/arm.zig").__aeabi_ldivmod;
+ @export(__aeabi_ldivmod, .{ .name = "__aeabi_ldivmod", .linkage = linkage });
+ const __aeabi_uldivmod = @import("compiler_rt/arm.zig").__aeabi_uldivmod;
+ @export(__aeabi_uldivmod, .{ .name = "__aeabi_uldivmod", .linkage = linkage });
+
+ @export(__divsi3, .{ .name = "__aeabi_idiv", .linkage = linkage });
+ const __aeabi_idivmod = @import("compiler_rt/arm.zig").__aeabi_idivmod;
+ @export(__aeabi_idivmod, .{ .name = "__aeabi_idivmod", .linkage = linkage });
+ @export(__udivsi3, .{ .name = "__aeabi_uidiv", .linkage = linkage });
+ const __aeabi_uidivmod = @import("compiler_rt/arm.zig").__aeabi_uidivmod;
+ @export(__aeabi_uidivmod, .{ .name = "__aeabi_uidivmod", .linkage = linkage });
+
+ const __aeabi_memcpy = @import("compiler_rt/arm.zig").__aeabi_memcpy;
+ @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy", .linkage = linkage });
+ @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy4", .linkage = linkage });
+ @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy8", .linkage = linkage });
+
+ const __aeabi_memmove = @import("compiler_rt/arm.zig").__aeabi_memmove;
+ @export(__aeabi_memmove, .{ .name = "__aeabi_memmove", .linkage = linkage });
+ @export(__aeabi_memmove, .{ .name = "__aeabi_memmove4", .linkage = linkage });
+ @export(__aeabi_memmove, .{ .name = "__aeabi_memmove8", .linkage = linkage });
+
+ const __aeabi_memset = @import("compiler_rt/arm.zig").__aeabi_memset;
+ @export(__aeabi_memset, .{ .name = "__aeabi_memset", .linkage = linkage });
+ @export(__aeabi_memset, .{ .name = "__aeabi_memset4", .linkage = linkage });
+ @export(__aeabi_memset, .{ .name = "__aeabi_memset8", .linkage = linkage });
+
+ const __aeabi_memclr = @import("compiler_rt/arm.zig").__aeabi_memclr;
+ @export(__aeabi_memclr, .{ .name = "__aeabi_memclr", .linkage = linkage });
+ @export(__aeabi_memclr, .{ .name = "__aeabi_memclr4", .linkage = linkage });
+ @export(__aeabi_memclr, .{ .name = "__aeabi_memclr8", .linkage = linkage });
+
+ if (os_tag == .linux) {
+ const __aeabi_read_tp = @import("compiler_rt/arm.zig").__aeabi_read_tp;
+ @export(__aeabi_read_tp, .{ .name = "__aeabi_read_tp", .linkage = linkage });
+ }
+
+ const __aeabi_f2d = @import("compiler_rt/extendXfYf2.zig").__aeabi_f2d;
+ @export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = linkage });
+ const __aeabi_i2d = @import("compiler_rt/floatsiXf.zig").__aeabi_i2d;
+ @export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = linkage });
+ const __aeabi_l2d = @import("compiler_rt/floatdidf.zig").__aeabi_l2d;
+ @export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = linkage });
+ const __aeabi_l2f = @import("compiler_rt/floatXisf.zig").__aeabi_l2f;
+ @export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = linkage });
+ const __aeabi_ui2d = @import("compiler_rt/floatunsidf.zig").__aeabi_ui2d;
+ @export(__aeabi_ui2d, .{ .name = "__aeabi_ui2d", .linkage = linkage });
+ const __aeabi_ul2d = @import("compiler_rt/floatundidf.zig").__aeabi_ul2d;
+ @export(__aeabi_ul2d, .{ .name = "__aeabi_ul2d", .linkage = linkage });
+ const __aeabi_ui2f = @import("compiler_rt/floatunsisf.zig").__aeabi_ui2f;
+ @export(__aeabi_ui2f, .{ .name = "__aeabi_ui2f", .linkage = linkage });
+ const __aeabi_ul2f = @import("compiler_rt/floatundisf.zig").__aeabi_ul2f;
+ @export(__aeabi_ul2f, .{ .name = "__aeabi_ul2f", .linkage = linkage });
+
+ const __aeabi_fneg = @import("compiler_rt/negXf2.zig").__aeabi_fneg;
+ @export(__aeabi_fneg, .{ .name = "__aeabi_fneg", .linkage = linkage });
+ const __aeabi_dneg = @import("compiler_rt/negXf2.zig").__aeabi_dneg;
+ @export(__aeabi_dneg, .{ .name = "__aeabi_dneg", .linkage = linkage });
+
+ const __aeabi_fmul = @import("compiler_rt/mulXf3.zig").__aeabi_fmul;
+ @export(__aeabi_fmul, .{ .name = "__aeabi_fmul", .linkage = linkage });
+ const __aeabi_dmul = @import("compiler_rt/mulXf3.zig").__aeabi_dmul;
+ @export(__aeabi_dmul, .{ .name = "__aeabi_dmul", .linkage = linkage });
+
+ const __aeabi_d2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2h;
+ @export(__aeabi_d2h, .{ .name = "__aeabi_d2h", .linkage = linkage });
+
+ const __aeabi_f2ulz = @import("compiler_rt/fixunssfdi.zig").__aeabi_f2ulz;
+ @export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = linkage });
+ const __aeabi_d2ulz = @import("compiler_rt/fixunsdfdi.zig").__aeabi_d2ulz;
+ @export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = linkage });
+
+ const __aeabi_f2lz = @import("compiler_rt/fixsfdi.zig").__aeabi_f2lz;
+ @export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = linkage });
+ const __aeabi_d2lz = @import("compiler_rt/fixdfdi.zig").__aeabi_d2lz;
+ @export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = linkage });
+
+ const __aeabi_d2uiz = @import("compiler_rt/fixunsdfsi.zig").__aeabi_d2uiz;
+ @export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = linkage });
+
+ const __aeabi_h2f = @import("compiler_rt/extendXfYf2.zig").__aeabi_h2f;
+ @export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = linkage });
+ const __aeabi_f2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_f2h;
+ @export(__aeabi_f2h, .{ .name = "__aeabi_f2h", .linkage = linkage });
+
+ const __aeabi_i2f = @import("compiler_rt/floatsiXf.zig").__aeabi_i2f;
+ @export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = linkage });
+ const __aeabi_d2f = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2f;
+ @export(__aeabi_d2f, .{ .name = "__aeabi_d2f", .linkage = linkage });
+
+ const __aeabi_fadd = @import("compiler_rt/addXf3.zig").__aeabi_fadd;
+ @export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = linkage });
+ const __aeabi_dadd = @import("compiler_rt/addXf3.zig").__aeabi_dadd;
+ @export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = linkage });
+ const __aeabi_fsub = @import("compiler_rt/addXf3.zig").__aeabi_fsub;
+ @export(__aeabi_fsub, .{ .name = "__aeabi_fsub", .linkage = linkage });
+ const __aeabi_dsub = @import("compiler_rt/addXf3.zig").__aeabi_dsub;
+ @export(__aeabi_dsub, .{ .name = "__aeabi_dsub", .linkage = linkage });
+
+ const __aeabi_f2uiz = @import("compiler_rt/fixunssfsi.zig").__aeabi_f2uiz;
+ @export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = linkage });
+
+ const __aeabi_f2iz = @import("compiler_rt/fixsfsi.zig").__aeabi_f2iz;
+ @export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = linkage });
+ const __aeabi_d2iz = @import("compiler_rt/fixdfsi.zig").__aeabi_d2iz;
+ @export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = linkage });
+
+ const __aeabi_fdiv = @import("compiler_rt/divsf3.zig").__aeabi_fdiv;
+ @export(__aeabi_fdiv, .{ .name = "__aeabi_fdiv", .linkage = linkage });
+ const __aeabi_ddiv = @import("compiler_rt/divdf3.zig").__aeabi_ddiv;
+ @export(__aeabi_ddiv, .{ .name = "__aeabi_ddiv", .linkage = linkage });
+
+ const __aeabi_llsl = @import("compiler_rt/shift.zig").__aeabi_llsl;
+ @export(__aeabi_llsl, .{ .name = "__aeabi_llsl", .linkage = linkage });
+ const __aeabi_lasr = @import("compiler_rt/shift.zig").__aeabi_lasr;
+ @export(__aeabi_lasr, .{ .name = "__aeabi_lasr", .linkage = linkage });
+ const __aeabi_llsr = @import("compiler_rt/shift.zig").__aeabi_llsr;
+ @export(__aeabi_llsr, .{ .name = "__aeabi_llsr", .linkage = linkage });
+
+ const __aeabi_fcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpeq;
+ @export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = linkage });
+ const __aeabi_fcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmplt;
+ @export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = linkage });
+ const __aeabi_fcmple = @import("compiler_rt/compareXf2.zig").__aeabi_fcmple;
+ @export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = linkage });
+ const __aeabi_fcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpge;
+ @export(__aeabi_fcmpge, .{ .name = "__aeabi_fcmpge", .linkage = linkage });
+ const __aeabi_fcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpgt;
+ @export(__aeabi_fcmpgt, .{ .name = "__aeabi_fcmpgt", .linkage = linkage });
+ const __aeabi_fcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpun;
+ @export(__aeabi_fcmpun, .{ .name = "__aeabi_fcmpun", .linkage = linkage });
+
+ const __aeabi_dcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpeq;
+ @export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = linkage });
+ const __aeabi_dcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmplt;
+ @export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = linkage });
+ const __aeabi_dcmple = @import("compiler_rt/compareXf2.zig").__aeabi_dcmple;
+ @export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = linkage });
+ const __aeabi_dcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpge;
+ @export(__aeabi_dcmpge, .{ .name = "__aeabi_dcmpge", .linkage = linkage });
+ const __aeabi_dcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpgt;
+ @export(__aeabi_dcmpgt, .{ .name = "__aeabi_dcmpgt", .linkage = linkage });
+ const __aeabi_dcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpun;
+ @export(__aeabi_dcmpun, .{ .name = "__aeabi_dcmpun", .linkage = linkage });
+ }
+
+ if (arch == .i386 and abi == .msvc) {
+ // Don't let LLVM apply the stdcall name mangling on those MSVC builtins
+ const _alldiv = @import("compiler_rt/aulldiv.zig")._alldiv;
+ @export(_alldiv, .{ .name = "\x01__alldiv", .linkage = strong_linkage });
+ const _aulldiv = @import("compiler_rt/aulldiv.zig")._aulldiv;
+ @export(_aulldiv, .{ .name = "\x01__aulldiv", .linkage = strong_linkage });
+ const _allrem = @import("compiler_rt/aullrem.zig")._allrem;
+ @export(_allrem, .{ .name = "\x01__allrem", .linkage = strong_linkage });
+ const _aullrem = @import("compiler_rt/aullrem.zig")._aullrem;
+ @export(_aullrem, .{ .name = "\x01__aullrem", .linkage = strong_linkage });
+ }
+
+ if (arch.isSPARC()) {
+ // SPARC systems use a different naming scheme
+ const _Qp_add = @import("compiler_rt/sparc.zig")._Qp_add;
+ @export(_Qp_add, .{ .name = "_Qp_add", .linkage = linkage });
+ const _Qp_div = @import("compiler_rt/sparc.zig")._Qp_div;
+ @export(_Qp_div, .{ .name = "_Qp_div", .linkage = linkage });
+ const _Qp_mul = @import("compiler_rt/sparc.zig")._Qp_mul;
+ @export(_Qp_mul, .{ .name = "_Qp_mul", .linkage = linkage });
+ const _Qp_sub = @import("compiler_rt/sparc.zig")._Qp_sub;
+ @export(_Qp_sub, .{ .name = "_Qp_sub", .linkage = linkage });
+
+ const _Qp_cmp = @import("compiler_rt/sparc.zig")._Qp_cmp;
+ @export(_Qp_cmp, .{ .name = "_Qp_cmp", .linkage = linkage });
+ const _Qp_feq = @import("compiler_rt/sparc.zig")._Qp_feq;
+ @export(_Qp_feq, .{ .name = "_Qp_feq", .linkage = linkage });
+ const _Qp_fne = @import("compiler_rt/sparc.zig")._Qp_fne;
+ @export(_Qp_fne, .{ .name = "_Qp_fne", .linkage = linkage });
+ const _Qp_flt = @import("compiler_rt/sparc.zig")._Qp_flt;
+ @export(_Qp_flt, .{ .name = "_Qp_flt", .linkage = linkage });
+ const _Qp_fle = @import("compiler_rt/sparc.zig")._Qp_fle;
+ @export(_Qp_fle, .{ .name = "_Qp_fle", .linkage = linkage });
+ const _Qp_fgt = @import("compiler_rt/sparc.zig")._Qp_fgt;
+ @export(_Qp_fgt, .{ .name = "_Qp_fgt", .linkage = linkage });
+ const _Qp_fge = @import("compiler_rt/sparc.zig")._Qp_fge;
+ @export(_Qp_fge, .{ .name = "_Qp_fge", .linkage = linkage });
+
+ const _Qp_itoq = @import("compiler_rt/sparc.zig")._Qp_itoq;
+ @export(_Qp_itoq, .{ .name = "_Qp_itoq", .linkage = linkage });
+ const _Qp_uitoq = @import("compiler_rt/sparc.zig")._Qp_uitoq;
+ @export(_Qp_uitoq, .{ .name = "_Qp_uitoq", .linkage = linkage });
+ const _Qp_xtoq = @import("compiler_rt/sparc.zig")._Qp_xtoq;
+ @export(_Qp_xtoq, .{ .name = "_Qp_xtoq", .linkage = linkage });
+ const _Qp_uxtoq = @import("compiler_rt/sparc.zig")._Qp_uxtoq;
+ @export(_Qp_uxtoq, .{ .name = "_Qp_uxtoq", .linkage = linkage });
+ const _Qp_stoq = @import("compiler_rt/sparc.zig")._Qp_stoq;
+ @export(_Qp_stoq, .{ .name = "_Qp_stoq", .linkage = linkage });
+ const _Qp_dtoq = @import("compiler_rt/sparc.zig")._Qp_dtoq;
+ @export(_Qp_dtoq, .{ .name = "_Qp_dtoq", .linkage = linkage });
+ const _Qp_qtoi = @import("compiler_rt/sparc.zig")._Qp_qtoi;
+ @export(_Qp_qtoi, .{ .name = "_Qp_qtoi", .linkage = linkage });
+ const _Qp_qtoui = @import("compiler_rt/sparc.zig")._Qp_qtoui;
+ @export(_Qp_qtoui, .{ .name = "_Qp_qtoui", .linkage = linkage });
+ const _Qp_qtox = @import("compiler_rt/sparc.zig")._Qp_qtox;
+ @export(_Qp_qtox, .{ .name = "_Qp_qtox", .linkage = linkage });
+ const _Qp_qtoux = @import("compiler_rt/sparc.zig")._Qp_qtoux;
+ @export(_Qp_qtoux, .{ .name = "_Qp_qtoux", .linkage = linkage });
+ const _Qp_qtos = @import("compiler_rt/sparc.zig")._Qp_qtos;
+ @export(_Qp_qtos, .{ .name = "_Qp_qtos", .linkage = linkage });
+ const _Qp_qtod = @import("compiler_rt/sparc.zig")._Qp_qtod;
+ @export(_Qp_qtod, .{ .name = "_Qp_qtod", .linkage = linkage });
+ }
+
+ if ((arch == .powerpc or arch.isPPC64()) and !is_test) {
+ @export(__addtf3, .{ .name = "__addkf3", .linkage = linkage });
+ @export(__subtf3, .{ .name = "__subkf3", .linkage = linkage });
+ @export(__multf3, .{ .name = "__mulkf3", .linkage = linkage });
+ @export(__divtf3, .{ .name = "__divkf3", .linkage = linkage });
+ @export(__extendsftf2, .{ .name = "__extendsfkf2", .linkage = linkage });
+ @export(__extenddftf2, .{ .name = "__extenddfkf2", .linkage = linkage });
+ @export(__trunctfsf2, .{ .name = "__trunckfsf2", .linkage = linkage });
+ @export(__trunctfdf2, .{ .name = "__trunckfdf2", .linkage = linkage });
+ @export(__fixtfdi, .{ .name = "__fixkfdi", .linkage = linkage });
+ @export(__fixtfsi, .{ .name = "__fixkfsi", .linkage = linkage });
+ @export(__fixunstfsi, .{ .name = "__fixunskfsi", .linkage = linkage });
+ @export(__fixunstfdi, .{ .name = "__fixunskfdi", .linkage = linkage });
+ @export(__floatsitf, .{ .name = "__floatsikf", .linkage = linkage });
+ @export(__floatditf, .{ .name = "__floatdikf", .linkage = linkage });
+ @export(__floatunditf, .{ .name = "__floatundikf", .linkage = linkage });
+ @export(__floatunsitf, .{ .name = "__floatunsikf", .linkage = linkage });
+
+ @export(__letf2, .{ .name = "__eqkf2", .linkage = linkage });
+ @export(__letf2, .{ .name = "__nekf2", .linkage = linkage });
+ @export(__getf2, .{ .name = "__gekf2", .linkage = linkage });
+ @export(__letf2, .{ .name = "__ltkf2", .linkage = linkage });
+ @export(__letf2, .{ .name = "__lekf2", .linkage = linkage });
+ @export(__getf2, .{ .name = "__gtkf2", .linkage = linkage });
+ @export(__unordtf2, .{ .name = "__unordkf2", .linkage = linkage });
+ }
+
+ if (builtin.os.tag == .windows) {
+ // Default stack-probe functions emitted by LLVM
+ if (is_mingw) {
+ const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
+ @export(_chkstk, .{ .name = "_alloca", .linkage = strong_linkage });
+ const ___chkstk_ms = @import("compiler_rt/stack_probe.zig").___chkstk_ms;
+ @export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = strong_linkage });
+ } else if (!builtin.link_libc) {
+ // This symbols are otherwise exported by MSVCRT.lib
+ const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
+ @export(_chkstk, .{ .name = "_chkstk", .linkage = strong_linkage });
+ const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
+ @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
+ }
+
+ switch (arch) {
+ .i386 => {
+ const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
+ @export(__divti3, .{ .name = "__divti3", .linkage = linkage });
+ const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
+ @export(__modti3, .{ .name = "__modti3", .linkage = linkage });
+ const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
+ @export(__multi3, .{ .name = "__multi3", .linkage = linkage });
+ const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
+ @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
+ const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
+ @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
+ const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
+ @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
+ },
+ .x86_64 => {
+ // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
+ // that LLVM expects compiler-rt to have.
+ const __divti3_windows_x86_64 = @import("compiler_rt/divti3.zig").__divti3_windows_x86_64;
+ @export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = linkage });
+ const __modti3_windows_x86_64 = @import("compiler_rt/modti3.zig").__modti3_windows_x86_64;
+ @export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = linkage });
+ const __multi3_windows_x86_64 = @import("compiler_rt/multi3.zig").__multi3_windows_x86_64;
+ @export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = linkage });
+ const __udivti3_windows_x86_64 = @import("compiler_rt/udivti3.zig").__udivti3_windows_x86_64;
+ @export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = linkage });
+ const __udivmodti4_windows_x86_64 = @import("compiler_rt/udivmodti4.zig").__udivmodti4_windows_x86_64;
+ @export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = linkage });
+ const __umodti3_windows_x86_64 = @import("compiler_rt/umodti3.zig").__umodti3_windows_x86_64;
+ @export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = linkage });
+ },
+ else => {},
+ }
+ if (arch.isAARCH64()) {
+ const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
+ @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
+ const __divti3_windows = @import("compiler_rt/divti3.zig").__divti3;
+ @export(__divti3_windows, .{ .name = "__divti3", .linkage = linkage });
+ const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
+ @export(__modti3, .{ .name = "__modti3", .linkage = linkage });
+ const __udivti3_windows = @import("compiler_rt/udivti3.zig").__udivti3;
+ @export(__udivti3_windows, .{ .name = "__udivti3", .linkage = linkage });
+ const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
+ @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
+ }
+ } else {
+ const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
+ @export(__divti3, .{ .name = "__divti3", .linkage = linkage });
const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
@export(__modti3, .{ .name = "__modti3", .linkage = linkage });
- const __udivti3_windows = @import("compiler_rt/udivti3.zig").__udivti3;
- @export(__udivti3_windows, .{ .name = "__udivti3", .linkage = linkage });
+ const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
+ @export(__multi3, .{ .name = "__multi3", .linkage = linkage });
+ const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
+ @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
+ const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
+ @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
@export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
}
- } else {
- const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
- @export(__divti3, .{ .name = "__divti3", .linkage = linkage });
- const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
- @export(__modti3, .{ .name = "__modti3", .linkage = linkage });
- const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
- @export(__multi3, .{ .name = "__multi3", .linkage = linkage });
- const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
- @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
- const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
- @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
- const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
- @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
- }
- const __muloti4 = @import("compiler_rt/muloti4.zig").__muloti4;
- @export(__muloti4, .{ .name = "__muloti4", .linkage = linkage });
- const __mulodi4 = @import("compiler_rt/mulodi4.zig").__mulodi4;
- @export(__mulodi4, .{ .name = "__mulodi4", .linkage = linkage });
+ const __muloti4 = @import("compiler_rt/muloti4.zig").__muloti4;
+ @export(__muloti4, .{ .name = "__muloti4", .linkage = linkage });
+ const __mulodi4 = @import("compiler_rt/mulodi4.zig").__mulodi4;
+ @export(__mulodi4, .{ .name = "__mulodi4", .linkage = linkage });
- _ = @import("compiler_rt/atomics.zig");
+ _ = @import("compiler_rt/atomics.zig");
+ }
}
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
-pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
+pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn {
_ = error_return_trace;
@setCold(true);
+ if (builtin.zig_is_stage2) {
+ while (true) {
+ @breakpoint();
+ }
+ }
if (is_test) {
std.debug.panic("{s}", .{msg});
} else {
diff --git a/lib/std/special/compiler_rt/extendXfYf2.zig b/lib/std/special/compiler_rt/extendXfYf2.zig
index 5571bd9ed3..7afb6b1645 100644
--- a/lib/std/special/compiler_rt/extendXfYf2.zig
+++ b/lib/std/special/compiler_rt/extendXfYf2.zig
@@ -3,23 +3,23 @@ const builtin = @import("builtin");
const is_test = builtin.is_test;
pub fn __extendsfdf2(a: f32) callconv(.C) f64 {
- return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f64, f32, @bitCast(u32, a) });
+ return extendXfYf2(f64, f32, @bitCast(u32, a));
}
pub fn __extenddftf2(a: f64) callconv(.C) f128 {
- return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f64, @bitCast(u64, a) });
+ return extendXfYf2(f128, f64, @bitCast(u64, a));
}
pub fn __extendsftf2(a: f32) callconv(.C) f128 {
- return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f32, @bitCast(u32, a) });
+ return extendXfYf2(f128, f32, @bitCast(u32, a));
}
pub fn __extendhfsf2(a: u16) callconv(.C) f32 {
- return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f32, f16, a });
+ return extendXfYf2(f32, f16, a);
}
pub fn __extendhftf2(a: u16) callconv(.C) f128 {
- return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f16, a });
+ return extendXfYf2(f128, f16, a);
}
pub fn __aeabi_h2f(arg: u16) callconv(.AAPCS) f32 {
@@ -34,7 +34,7 @@ pub fn __aeabi_f2d(arg: f32) callconv(.AAPCS) f64 {
const CHAR_BIT = 8;
-fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) dst_t {
+inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) dst_t {
@setRuntimeSafety(builtin.is_test);
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
diff --git a/lib/std/special/compiler_rt/stack_probe.zig b/lib/std/special/compiler_rt/stack_probe.zig
index db2a86bdfb..256207d6e6 100644
--- a/lib/std/special/compiler_rt/stack_probe.zig
+++ b/lib/std/special/compiler_rt/stack_probe.zig
@@ -53,19 +53,6 @@ pub fn zig_probe_stack() callconv(.Naked) void {
},
else => {},
}
- if (comptime native_arch.isAARCH64()) {
- asm volatile (
- \\ lsl x16, x15, #4
- \\ mov x17, sp
- \\1:
- \\ sub x17, x17, #PAGE_SIZE
- \\ subs x16, x16, #PAGE_SIZE
- \\ ldr xzr, [x17]
- \\ b.gt 1b
- \\
- \\ ret
- );
- }
unreachable;
}
@@ -118,6 +105,21 @@ fn win_probe_stack_only() void {
},
else => {},
}
+ if (comptime native_arch.isAARCH64()) {
+ // NOTE: page size hardcoded to 4096 for now
+ asm volatile (
+ \\ lsl x16, x15, #4
+ \\ mov x17, sp
+ \\1:
+ \\
+ \\ sub x17, x17, 4096
+ \\ subs x16, x16, 4096
+ \\ ldr xzr, [x17]
+ \\ b.gt 1b
+ \\
+ \\ ret
+ );
+ }
unreachable;
}
@@ -199,7 +201,9 @@ pub fn _chkstk() callconv(.Naked) void {
}
pub fn __chkstk() callconv(.Naked) void {
@setRuntimeSafety(false);
- switch (native_arch) {
+ if (comptime native_arch.isAARCH64()) {
+ @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{});
+ } else switch (native_arch) {
.i386 => @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{}),
.x86_64 => @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{}),
else => unreachable,
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index 4ca627d133..e72204377f 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -13,6 +13,12 @@ fn processArgs() void {
const args = std.process.argsAlloc(&args_allocator.allocator) catch {
@panic("Too many bytes passed over the CLI to the test runner");
};
+ if (args.len != 2) {
+ const self_name = if (args.len >= 1) args[0] else if (builtin.os.tag == .windows) "test.exe" else "test";
+ const zig_ext = if (builtin.os.tag == .windows) ".exe" else "";
+ std.debug.print("Usage: {s} path/to/zig{s}\n", .{ self_name, zig_ext });
+ @panic("Wrong number of command line arguments");
+ }
std.testing.zig_exe_path = args[1];
}
@@ -56,7 +62,7 @@ pub fn main() void {
.evented => blk: {
if (async_frame_buffer.len < size) {
std.heap.page_allocator.free(async_frame_buffer);
- async_frame_buffer = try std.heap.page_allocator.alignedAlloc(u8, std.Target.stack_align, size);
+ async_frame_buffer = std.heap.page_allocator.alignedAlloc(u8, std.Target.stack_align, size) catch @panic("out of memory");
}
const casted_fn = @ptrCast(fn () callconv(.Async) anyerror!void, test_fn.func);
break :blk await @asyncCall(async_frame_buffer, {}, casted_fn, .{});
@@ -123,8 +129,16 @@ pub fn log(
}
pub fn main2() anyerror!void {
+ var bad = false;
// Simpler main(), exercising fewer language features, so that stage2 can handle it.
for (builtin.test_functions) |test_fn| {
- try test_fn.func();
+ test_fn.func() catch |err| {
+ if (err != error.SkipZigTest) {
+ bad = true;
+ }
+ };
+ }
+ if (bad) {
+ return error.TestsFailed;
}
}
diff --git a/lib/std/start.zig b/lib/std/start.zig
index d8e5796d37..cd2cf230af 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -28,6 +28,8 @@ comptime {
if (@typeInfo(@TypeOf(root.main)).Fn.calling_convention != .C) {
@export(main2, .{ .name = "main" });
}
+ } else if (builtin.os.tag == .windows) {
+ @export(wWinMainCRTStartup2, .{ .name = "wWinMainCRTStartup" });
} else {
if (!@hasDecl(root, "_start")) {
@export(_start2, .{ .name = "_start" });
@@ -87,6 +89,16 @@ fn main2() callconv(.C) c_int {
}
fn _start2() callconv(.Naked) noreturn {
+ callMain2();
+}
+
+fn callMain2() noreturn {
+ @setAlignStack(16);
+ root.main();
+ exit2(0);
+}
+
+fn wWinMainCRTStartup2() callconv(.C) noreturn {
root.main();
exit2(0);
}
@@ -143,11 +155,16 @@ fn exit2(code: usize) noreturn {
},
else => @compileError("TODO"),
},
+ .windows => {
+ ExitProcess(@truncate(u32, code));
+ },
else => @compileError("TODO"),
}
unreachable;
}
+extern "kernel32" fn ExitProcess(exit_code: u32) callconv(.C) noreturn;
+
////////////////////////////////////////////////////////////////////////////////
fn _DllMainCRTStartup(
diff --git a/lib/std/target.zig b/lib/std/target.zig
index 333088887b..6be8e9d77c 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -235,7 +235,6 @@ pub const Target = struct {
.fuchsia,
.kfreebsd,
.lv2,
- .solaris,
.zos,
.haiku,
.minix,
@@ -310,6 +309,12 @@ pub const Target = struct {
.max = .{ .major = 6, .minor = 0 },
},
},
+ .solaris => return .{
+ .semver = .{
+ .min = .{ .major = 5, .minor = 11 },
+ .max = .{ .major = 5, .minor = 11 },
+ },
+ },
.linux => return .{
.linux = .{
@@ -353,6 +358,7 @@ pub const Target = struct {
.netbsd,
.openbsd,
.dragonfly,
+ .solaris,
=> return TaggedVersionRange{ .semver = self.version_range.semver },
else => return .none,
@@ -385,6 +391,7 @@ pub const Target = struct {
.dragonfly,
.openbsd,
.haiku,
+ .solaris,
=> true,
.linux,
@@ -395,7 +402,6 @@ pub const Target = struct {
.fuchsia,
.kfreebsd,
.lv2,
- .solaris,
.zos,
.minix,
.rtems,
@@ -1523,6 +1529,7 @@ pub const Target = struct {
.netbsd => return copy(&result, "/libexec/ld.elf_so"),
.openbsd => return copy(&result, "/usr/libexec/ld.so"),
.dragonfly => return copy(&result, "/libexec/ld-elf.so.2"),
+ .solaris => return copy(&result, "/lib/64/ld.so.1"),
.linux => switch (self.cpu.arch) {
.i386,
.sparc,
@@ -1642,7 +1649,6 @@ pub const Target = struct {
.fuchsia,
.kfreebsd,
.lv2,
- .solaris,
.zos,
.minix,
.rtems,
diff --git a/lib/std/unicode.zig b/lib/std/unicode.zig
index b93b5e361f..5da7686d66 100644
--- a/lib/std/unicode.zig
+++ b/lib/std/unicode.zig
@@ -553,8 +553,9 @@ fn testDecode(bytes: []const u8) !u21 {
/// Caller must free returned memory.
pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 {
var result = std.ArrayList(u8).init(allocator);
+ errdefer result.deinit();
// optimistically guess that it will all be ascii.
- try result.ensureCapacity(utf16le.len);
+ try result.ensureTotalCapacity(utf16le.len);
var out_index: usize = 0;
var it = Utf16LeIterator.init(utf16le);
while (try it.nextCodepoint()) |codepoint| {
@@ -569,9 +570,10 @@ pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8
/// Caller must free returned memory.
pub fn utf16leToUtf8AllocZ(allocator: *mem.Allocator, utf16le: []const u16) ![:0]u8 {
- var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len);
+ var result = std.ArrayList(u8).init(allocator);
+ errdefer result.deinit();
// optimistically guess that it will all be ascii.
- try result.ensureCapacity(utf16le.len);
+ try result.ensureTotalCapacity(utf16le.len);
var out_index: usize = 0;
var it = Utf16LeIterator.init(utf16le);
while (try it.nextCodepoint()) |codepoint| {
@@ -653,12 +655,20 @@ test "utf16leToUtf8" {
defer std.testing.allocator.free(utf8);
try testing.expect(mem.eql(u8, utf8, "\xf4\x8f\xb0\x80"));
}
+
+ {
+ mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 0xdcdc);
+ mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 0xdcdc);
+ const result = utf16leToUtf8Alloc(std.testing.allocator, &utf16le);
+ try std.testing.expectError(error.UnexpectedSecondSurrogateHalf, result);
+ }
}
pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![:0]u16 {
var result = std.ArrayList(u16).init(allocator);
+ errdefer result.deinit();
// optimistically guess that it will not require surrogate pairs
- try result.ensureCapacity(utf8.len + 1);
+ try result.ensureTotalCapacity(utf8.len + 1);
const view = try Utf8View.init(utf8);
var it = view.iterator();
@@ -718,6 +728,10 @@ test "utf8ToUtf16Le" {
try testing.expectEqual(@as(usize, 2), length);
try testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", mem.sliceAsBytes(utf16le[0..]));
}
+ {
+ const result = utf8ToUtf16Le(utf16le[0..], "\xf4\x90\x80\x80");
+ try testing.expectError(error.InvalidUtf8, result);
+ }
}
test "utf8ToUtf16LeWithNull" {
@@ -733,6 +747,10 @@ test "utf8ToUtf16LeWithNull" {
try testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", mem.sliceAsBytes(utf16[0..]));
try testing.expect(utf16[2] == 0);
}
+ {
+ const result = utf8ToUtf16LeWithNull(testing.allocator, "\xf4\x90\x80\x80");
+ try testing.expectError(error.InvalidUtf8, result);
+ }
}
/// Converts a UTF-8 string literal into a UTF-16LE string literal.
diff --git a/lib/std/x/os/net.zig b/lib/std/x/os/net.zig
index 5b06d492a5..a529396c3c 100644
--- a/lib/std/x/os/net.zig
+++ b/lib/std/x/os/net.zig
@@ -27,7 +27,7 @@ pub fn resolveScopeId(name: []const u8) !u32 {
return rc;
}
- const fd = try os.socket(os.AF.UNIX, os.SOCK.DGRAM, 0);
+ const fd = try os.socket(os.AF.INET, os.SOCK.DGRAM, 0);
defer os.closeSocket(fd);
var f: os.ifreq = undefined;
@@ -566,21 +566,17 @@ test "ipv6: parse & format" {
test "ipv6: parse & format addresses with scope ids" {
if (!have_ifnamesize) return error.SkipZigTest;
+ const iface = if (native_os.tag == .linux)
+ "lo"
+ else
+ "lo0";
+ const input = "FF01::FB%" ++ iface;
+ const output = "ff01::fb%1";
- const inputs = [_][]const u8{
- "FF01::FB%lo",
+ const parsed = IPv6.parse(input) catch |err| switch (err) {
+ error.InterfaceNotFound => return,
+ else => return err,
};
- const outputs = [_][]const u8{
- "ff01::fb%1",
- };
-
- for (inputs) |input, i| {
- const parsed = IPv6.parse(input) catch |err| switch (err) {
- error.InterfaceNotFound => continue,
- else => return err,
- };
-
- try testing.expectFmt(outputs[i], "{}", .{parsed});
- }
+ try testing.expectFmt(output, "{}", .{parsed});
}
diff --git a/lib/std/x/os/socket.zig b/lib/std/x/os/socket.zig
index 5930b8cb9a..529fd19598 100644
--- a/lib/std/x/os/socket.zig
+++ b/lib/std/x/os/socket.zig
@@ -37,7 +37,7 @@ pub const Socket = struct {
/// POSIX `sockaddr.storage`. The expected size and alignment is specified in IETF RFC 2553.
pub const Storage = extern struct {
- pub const expected_size = 128;
+ pub const expected_size = os.sockaddr.SS_MAXSIZE;
pub const expected_alignment = 8;
pub const padding_size = expected_size -
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index 35e75f4db2..4ee3a45221 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -262,6 +262,9 @@ pub fn renderError(tree: Tree, parse_error: Error, stream: anytype) !void {
token_tags[parse_error.token].symbol(),
});
},
+ .extra_addrspace_qualifier => {
+ return stream.writeAll("extra addrspace qualifier");
+ },
.extra_align_qualifier => {
return stream.writeAll("extra align qualifier");
},
@@ -392,14 +395,18 @@ pub fn firstToken(tree: Tree, node: Node.Index) TokenIndex {
.assign_mod,
.assign_add,
.assign_sub,
- .assign_bit_shift_left,
- .assign_bit_shift_right,
+ .assign_shl,
+ .assign_shl_sat,
+ .assign_shr,
.assign_bit_and,
.assign_bit_xor,
.assign_bit_or,
.assign_mul_wrap,
.assign_add_wrap,
.assign_sub_wrap,
+ .assign_mul_sat,
+ .assign_add_sat,
+ .assign_sub_sat,
.assign,
.merge_error_sets,
.mul,
@@ -407,13 +414,17 @@ pub fn firstToken(tree: Tree, node: Node.Index) TokenIndex {
.mod,
.array_mult,
.mul_wrap,
+ .mul_sat,
.add,
.sub,
.array_cat,
.add_wrap,
.sub_wrap,
- .bit_shift_left,
- .bit_shift_right,
+ .add_sat,
+ .sub_sat,
+ .shl,
+ .shl_sat,
+ .shr,
.bit_and,
.bit_xor,
.bit_or,
@@ -648,14 +659,18 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
.assign_mod,
.assign_add,
.assign_sub,
- .assign_bit_shift_left,
- .assign_bit_shift_right,
+ .assign_shl,
+ .assign_shl_sat,
+ .assign_shr,
.assign_bit_and,
.assign_bit_xor,
.assign_bit_or,
.assign_mul_wrap,
.assign_add_wrap,
.assign_sub_wrap,
+ .assign_mul_sat,
+ .assign_add_sat,
+ .assign_sub_sat,
.assign,
.merge_error_sets,
.mul,
@@ -663,13 +678,17 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
.mod,
.array_mult,
.mul_wrap,
+ .mul_sat,
.add,
.sub,
.array_cat,
.add_wrap,
.sub_wrap,
- .bit_shift_left,
- .bit_shift_right,
+ .add_sat,
+ .sub_sat,
+ .shl,
+ .shl_sat,
+ .shr,
.bit_and,
.bit_xor,
.bit_or,
@@ -1021,7 +1040,7 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
},
.fn_proto_one => {
const extra = tree.extraData(datas[n].lhs, Node.FnProtoOne);
- // linksection, callconv, align can appear in any order, so we
+ // addrspace, linksection, callconv, align can appear in any order, so we
// find the last one here.
var max_node: Node.Index = datas[n].rhs;
var max_start = token_starts[main_tokens[max_node]];
@@ -1034,6 +1053,14 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
max_offset = 1; // for the rparen
}
}
+ if (extra.addrspace_expr != 0) {
+ const start = token_starts[main_tokens[extra.addrspace_expr]];
+ if (start > max_start) {
+ max_node = extra.addrspace_expr;
+ max_start = start;
+ max_offset = 1; // for the rparen
+ }
+ }
if (extra.section_expr != 0) {
const start = token_starts[main_tokens[extra.section_expr]];
if (start > max_start) {
@@ -1055,7 +1082,7 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
},
.fn_proto => {
const extra = tree.extraData(datas[n].lhs, Node.FnProto);
- // linksection, callconv, align can appear in any order, so we
+ // addrspace, linksection, callconv, align can appear in any order, so we
// find the last one here.
var max_node: Node.Index = datas[n].rhs;
var max_start = token_starts[main_tokens[max_node]];
@@ -1068,6 +1095,14 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex {
max_offset = 1; // for the rparen
}
}
+ if (extra.addrspace_expr != 0) {
+ const start = token_starts[main_tokens[extra.addrspace_expr]];
+ if (start > max_start) {
+ max_node = extra.addrspace_expr;
+ max_start = start;
+ max_offset = 1; // for the rparen
+ }
+ }
if (extra.section_expr != 0) {
const start = token_starts[main_tokens[extra.section_expr]];
if (start > max_start) {
@@ -1138,6 +1173,7 @@ pub fn globalVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
return tree.fullVarDecl(.{
.type_node = extra.type_node,
.align_node = extra.align_node,
+ .addrspace_node = extra.addrspace_node,
.section_node = extra.section_node,
.init_node = data.rhs,
.mut_token = tree.nodes.items(.main_token)[node],
@@ -1151,6 +1187,7 @@ pub fn localVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
return tree.fullVarDecl(.{
.type_node = extra.type_node,
.align_node = extra.align_node,
+ .addrspace_node = 0,
.section_node = 0,
.init_node = data.rhs,
.mut_token = tree.nodes.items(.main_token)[node],
@@ -1163,6 +1200,7 @@ pub fn simpleVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
return tree.fullVarDecl(.{
.type_node = data.lhs,
.align_node = 0,
+ .addrspace_node = 0,
.section_node = 0,
.init_node = data.rhs,
.mut_token = tree.nodes.items(.main_token)[node],
@@ -1175,6 +1213,7 @@ pub fn alignedVarDecl(tree: Tree, node: Node.Index) full.VarDecl {
return tree.fullVarDecl(.{
.type_node = 0,
.align_node = data.lhs,
+ .addrspace_node = 0,
.section_node = 0,
.init_node = data.rhs,
.mut_token = tree.nodes.items(.main_token)[node],
@@ -1249,6 +1288,7 @@ pub fn fnProtoSimple(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.
.return_type = data.rhs,
.params = params,
.align_expr = 0,
+ .addrspace_expr = 0,
.section_expr = 0,
.callconv_expr = 0,
});
@@ -1265,6 +1305,7 @@ pub fn fnProtoMulti(tree: Tree, node: Node.Index) full.FnProto {
.return_type = data.rhs,
.params = params,
.align_expr = 0,
+ .addrspace_expr = 0,
.section_expr = 0,
.callconv_expr = 0,
});
@@ -1282,6 +1323,7 @@ pub fn fnProtoOne(tree: Tree, buffer: *[1]Node.Index, node: Node.Index) full.FnP
.return_type = data.rhs,
.params = params,
.align_expr = extra.align_expr,
+ .addrspace_expr = extra.addrspace_expr,
.section_expr = extra.section_expr,
.callconv_expr = extra.callconv_expr,
});
@@ -1298,6 +1340,7 @@ pub fn fnProto(tree: Tree, node: Node.Index) full.FnProto {
.return_type = data.rhs,
.params = params,
.align_expr = extra.align_expr,
+ .addrspace_expr = extra.addrspace_expr,
.section_expr = extra.section_expr,
.callconv_expr = extra.callconv_expr,
});
@@ -1453,6 +1496,7 @@ pub fn ptrTypeAligned(tree: Tree, node: Node.Index) full.PtrType {
return tree.fullPtrType(.{
.main_token = tree.nodes.items(.main_token)[node],
.align_node = data.lhs,
+ .addrspace_node = 0,
.sentinel = 0,
.bit_range_start = 0,
.bit_range_end = 0,
@@ -1466,6 +1510,7 @@ pub fn ptrTypeSentinel(tree: Tree, node: Node.Index) full.PtrType {
return tree.fullPtrType(.{
.main_token = tree.nodes.items(.main_token)[node],
.align_node = 0,
+ .addrspace_node = 0,
.sentinel = data.lhs,
.bit_range_start = 0,
.bit_range_end = 0,
@@ -1480,6 +1525,7 @@ pub fn ptrType(tree: Tree, node: Node.Index) full.PtrType {
return tree.fullPtrType(.{
.main_token = tree.nodes.items(.main_token)[node],
.align_node = extra.align_node,
+ .addrspace_node = extra.addrspace_node,
.sentinel = extra.sentinel,
.bit_range_start = 0,
.bit_range_end = 0,
@@ -1494,6 +1540,7 @@ pub fn ptrTypeBitRange(tree: Tree, node: Node.Index) full.PtrType {
return tree.fullPtrType(.{
.main_token = tree.nodes.items(.main_token)[node],
.align_node = extra.align_node,
+ .addrspace_node = extra.addrspace_node,
.sentinel = extra.sentinel,
.bit_range_start = extra.bit_range_start,
.bit_range_end = extra.bit_range_end,
@@ -2063,6 +2110,7 @@ pub const full = struct {
mut_token: TokenIndex,
type_node: Node.Index,
align_node: Node.Index,
+ addrspace_node: Node.Index,
section_node: Node.Index,
init_node: Node.Index,
};
@@ -2130,6 +2178,7 @@ pub const full = struct {
return_type: Node.Index,
params: []const Node.Index,
align_expr: Node.Index,
+ addrspace_expr: Node.Index,
section_expr: Node.Index,
callconv_expr: Node.Index,
};
@@ -2288,6 +2337,7 @@ pub const full = struct {
pub const Components = struct {
main_token: TokenIndex,
align_node: Node.Index,
+ addrspace_node: Node.Index,
sentinel: Node.Index,
bit_range_start: Node.Index,
bit_range_end: Node.Index,
@@ -2397,6 +2447,7 @@ pub const Error = struct {
expected_var_decl_or_fn,
expected_loop_payload,
expected_container,
+ extra_addrspace_qualifier,
extra_align_qualifier,
extra_allowzero_qualifier,
extra_const_qualifier,
@@ -2489,9 +2540,11 @@ pub const Node = struct {
/// `lhs -= rhs`. main_token is op.
assign_sub,
/// `lhs <<= rhs`. main_token is op.
- assign_bit_shift_left,
+ assign_shl,
+ /// `lhs <<|= rhs`. main_token is op.
+ assign_shl_sat,
/// `lhs >>= rhs`. main_token is op.
- assign_bit_shift_right,
+ assign_shr,
/// `lhs &= rhs`. main_token is op.
assign_bit_and,
/// `lhs ^= rhs`. main_token is op.
@@ -2504,6 +2557,12 @@ pub const Node = struct {
assign_add_wrap,
/// `lhs -%= rhs`. main_token is op.
assign_sub_wrap,
+ /// `lhs *|= rhs`. main_token is op.
+ assign_mul_sat,
+ /// `lhs +|= rhs`. main_token is op.
+ assign_add_sat,
+ /// `lhs -|= rhs`. main_token is op.
+ assign_sub_sat,
/// `lhs = rhs`. main_token is op.
assign,
/// `lhs || rhs`. main_token is the `||`.
@@ -2518,6 +2577,8 @@ pub const Node = struct {
array_mult,
/// `lhs *% rhs`. main_token is the `*%`.
mul_wrap,
+ /// `lhs *| rhs`. main_token is the `*|`.
+ mul_sat,
/// `lhs + rhs`. main_token is the `+`.
add,
/// `lhs - rhs`. main_token is the `-`.
@@ -2528,10 +2589,16 @@ pub const Node = struct {
add_wrap,
/// `lhs -% rhs`. main_token is the `-%`.
sub_wrap,
+ /// `lhs +| rhs`. main_token is the `+|`.
+ add_sat,
+ /// `lhs -| rhs`. main_token is the `-|`.
+ sub_sat,
/// `lhs << rhs`. main_token is the `<<`.
- bit_shift_left,
+ shl,
+ /// `lhs <<| rhs`. main_token is the `<<|`.
+ shl_sat,
/// `lhs >> rhs`. main_token is the `>>`.
- bit_shift_right,
+ shr,
/// `lhs & rhs`. main_token is the `&`.
bit_and,
/// `lhs ^ rhs`. main_token is the `^`.
@@ -2723,13 +2790,13 @@ pub const Node = struct {
/// main_token is the `fn` keyword.
/// extern function declarations use this tag.
fn_proto_multi,
- /// `fn(a: b) rhs linksection(e) callconv(f)`. `FnProtoOne[lhs]`.
+ /// `fn(a: b) rhs addrspace(e) linksection(f) callconv(g)`. `FnProtoOne[lhs]`.
/// zero or one parameters.
/// anytype and ... parameters are omitted from the AST tree.
/// main_token is the `fn` keyword.
/// extern function declarations use this tag.
fn_proto_one,
- /// `fn(a: b, c: d) rhs linksection(e) callconv(f)`. `FnProto[lhs]`.
+ /// `fn(a: b, c: d) rhs addrspace(e) linksection(f) callconv(g)`. `FnProto[lhs]`.
/// anytype and ... parameters are omitted from the AST tree.
/// main_token is the `fn` keyword.
/// extern function declarations use this tag.
@@ -2893,11 +2960,13 @@ pub const Node = struct {
pub const PtrType = struct {
sentinel: Index,
align_node: Index,
+ addrspace_node: Index,
};
pub const PtrTypeBitRange = struct {
sentinel: Index,
align_node: Index,
+ addrspace_node: Index,
bit_range_start: Index,
bit_range_end: Index,
};
@@ -2920,8 +2989,13 @@ pub const Node = struct {
};
pub const GlobalVarDecl = struct {
+ /// Populated if there is an explicit type ascription.
type_node: Index,
+ /// Populated if align(A) is present.
align_node: Index,
+ /// Populated if addrspace(A) is present.
+ addrspace_node: Index,
+ /// Populated if linksection(A) is present.
section_node: Index,
};
@@ -2953,6 +3027,8 @@ pub const Node = struct {
param: Index,
/// Populated if align(A) is present.
align_expr: Index,
+ /// Populated if addrspace(A) is present.
+ addrspace_expr: Index,
/// Populated if linksection(A) is present.
section_expr: Index,
/// Populated if callconv(A) is present.
@@ -2964,6 +3040,8 @@ pub const Node = struct {
params_end: Index,
/// Populated if align(A) is present.
align_expr: Index,
+ /// Populated if addrspace(A) is present.
+ addrspace_expr: Index,
/// Populated if linksection(A) is present.
section_expr: Index,
/// Populated if callconv(A) is present.
diff --git a/lib/std/zig/c_translation.zig b/lib/std/zig/c_translation.zig
index bb8a699f69..999572d212 100644
--- a/lib/std/zig/c_translation.zig
+++ b/lib/std/zig/c_translation.zig
@@ -325,6 +325,7 @@ pub fn FlexibleArrayType(comptime SelfType: type, ElementType: type) type {
.is_const = ptr.is_const,
.is_volatile = ptr.is_volatile,
.alignment = @alignOf(ElementType),
+ .address_space = .generic,
.child = ElementType,
.is_allowzero = true,
.sentinel = null,
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
index 842847a295..a449f6ae0f 100644
--- a/lib/std/zig/parse.zig
+++ b/lib/std/zig/parse.zig
@@ -17,7 +17,7 @@ pub fn parse(gpa: *Allocator, source: [:0]const u8) Allocator.Error!Ast {
// Empirically, the zig std lib has an 8:1 ratio of source bytes to token count.
const estimated_token_count = source.len / 8;
- try tokens.ensureCapacity(gpa, estimated_token_count);
+ try tokens.ensureTotalCapacity(gpa, estimated_token_count);
var tokenizer = std.zig.Tokenizer.init(source);
while (true) {
@@ -48,7 +48,7 @@ pub fn parse(gpa: *Allocator, source: [:0]const u8) Allocator.Error!Ast {
// Empirically, Zig source code has a 2:1 ratio of tokens to AST nodes.
// Make sure at least 1 so we can use appendAssumeCapacity on the root node below.
const estimated_node_count = (tokens.len + 2) / 2;
- try parser.nodes.ensureCapacity(gpa, estimated_node_count);
+ try parser.nodes.ensureTotalCapacity(gpa, estimated_node_count);
// Root node must be index 0.
// Root <- skip ContainerMembers eof
@@ -138,7 +138,7 @@ const Parser = struct {
fn addExtra(p: *Parser, extra: anytype) Allocator.Error!Node.Index {
const fields = std.meta.fields(@TypeOf(extra));
- try p.extra_data.ensureCapacity(p.gpa, p.extra_data.items.len + fields.len);
+ try p.extra_data.ensureUnusedCapacity(p.gpa, fields.len);
const result = @intCast(u32, p.extra_data.items.len);
inline for (fields) |field| {
comptime assert(field.field_type == Node.Index);
@@ -629,7 +629,7 @@ const Parser = struct {
};
}
- /// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? LinkSection? CallConv? EXCLAMATIONMARK? TypeExpr
+ /// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? AddrSpace? LinkSection? CallConv? EXCLAMATIONMARK? TypeExpr
fn parseFnProto(p: *Parser) !Node.Index {
const fn_token = p.eatToken(.keyword_fn) orelse return null_node;
@@ -639,6 +639,7 @@ const Parser = struct {
_ = p.eatToken(.identifier);
const params = try p.parseParamDeclList();
const align_expr = try p.parseByteAlign();
+ const addrspace_expr = try p.parseAddrSpace();
const section_expr = try p.parseLinkSection();
const callconv_expr = try p.parseCallconv();
_ = p.eatToken(.bang);
@@ -650,7 +651,7 @@ const Parser = struct {
try p.warn(.expected_return_type);
}
- if (align_expr == 0 and section_expr == 0 and callconv_expr == 0) {
+ if (align_expr == 0 and section_expr == 0 and callconv_expr == 0 and addrspace_expr == 0) {
switch (params) {
.zero_or_one => |param| return p.setNode(fn_proto_index, .{
.tag = .fn_proto_simple,
@@ -683,6 +684,7 @@ const Parser = struct {
.lhs = try p.addExtra(Node.FnProtoOne{
.param = param,
.align_expr = align_expr,
+ .addrspace_expr = addrspace_expr,
.section_expr = section_expr,
.callconv_expr = callconv_expr,
}),
@@ -698,6 +700,7 @@ const Parser = struct {
.params_start = span.start,
.params_end = span.end,
.align_expr = align_expr,
+ .addrspace_expr = addrspace_expr,
.section_expr = section_expr,
.callconv_expr = callconv_expr,
}),
@@ -708,7 +711,7 @@ const Parser = struct {
}
}
- /// VarDecl <- (KEYWORD_const / KEYWORD_var) IDENTIFIER (COLON TypeExpr)? ByteAlign? LinkSection? (EQUAL Expr)? SEMICOLON
+ /// VarDecl <- (KEYWORD_const / KEYWORD_var) IDENTIFIER (COLON TypeExpr)? ByteAlign? AddrSpace? LinkSection? (EQUAL Expr)? SEMICOLON
fn parseVarDecl(p: *Parser) !Node.Index {
const mut_token = p.eatToken(.keyword_const) orelse
p.eatToken(.keyword_var) orelse
@@ -717,9 +720,10 @@ const Parser = struct {
_ = try p.expectToken(.identifier);
const type_node: Node.Index = if (p.eatToken(.colon) == null) 0 else try p.expectTypeExpr();
const align_node = try p.parseByteAlign();
+ const addrspace_node = try p.parseAddrSpace();
const section_node = try p.parseLinkSection();
const init_node: Node.Index = if (p.eatToken(.equal) == null) 0 else try p.expectExpr();
- if (section_node == 0) {
+ if (section_node == 0 and addrspace_node == 0) {
if (align_node == 0) {
return p.addNode(.{
.tag = .simple_var_decl,
@@ -759,6 +763,7 @@ const Parser = struct {
.lhs = try p.addExtra(Node.GlobalVarDecl{
.type_node = type_node,
.align_node = align_node,
+ .addrspace_node = addrspace_node,
.section_node = section_node,
}),
.rhs = init_node,
@@ -1263,14 +1268,18 @@ const Parser = struct {
.percent_equal => .assign_mod,
.plus_equal => .assign_add,
.minus_equal => .assign_sub,
- .angle_bracket_angle_bracket_left_equal => .assign_bit_shift_left,
- .angle_bracket_angle_bracket_right_equal => .assign_bit_shift_right,
+ .angle_bracket_angle_bracket_left_equal => .assign_shl,
+ .angle_bracket_angle_bracket_left_pipe_equal => .assign_shl_sat,
+ .angle_bracket_angle_bracket_right_equal => .assign_shr,
.ampersand_equal => .assign_bit_and,
.caret_equal => .assign_bit_xor,
.pipe_equal => .assign_bit_or,
.asterisk_percent_equal => .assign_mul_wrap,
.plus_percent_equal => .assign_add_wrap,
.minus_percent_equal => .assign_sub_wrap,
+ .asterisk_pipe_equal => .assign_mul_sat,
+ .plus_pipe_equal => .assign_add_sat,
+ .minus_pipe_equal => .assign_sub_sat,
.equal => .assign,
else => return expr,
};
@@ -1337,14 +1346,17 @@ const Parser = struct {
.keyword_orelse = .{ .prec = 40, .tag = .@"orelse" },
.keyword_catch = .{ .prec = 40, .tag = .@"catch" },
- .angle_bracket_angle_bracket_left = .{ .prec = 50, .tag = .bit_shift_left },
- .angle_bracket_angle_bracket_right = .{ .prec = 50, .tag = .bit_shift_right },
+ .angle_bracket_angle_bracket_left = .{ .prec = 50, .tag = .shl },
+ .angle_bracket_angle_bracket_left_pipe = .{ .prec = 50, .tag = .shl_sat },
+ .angle_bracket_angle_bracket_right = .{ .prec = 50, .tag = .shr },
.plus = .{ .prec = 60, .tag = .add },
.minus = .{ .prec = 60, .tag = .sub },
.plus_plus = .{ .prec = 60, .tag = .array_cat },
.plus_percent = .{ .prec = 60, .tag = .add_wrap },
.minus_percent = .{ .prec = 60, .tag = .sub_wrap },
+ .plus_pipe = .{ .prec = 60, .tag = .add_sat },
+ .minus_pipe = .{ .prec = 60, .tag = .sub_sat },
.pipe_pipe = .{ .prec = 70, .tag = .merge_error_sets },
.asterisk = .{ .prec = 70, .tag = .mul },
@@ -1352,6 +1364,7 @@ const Parser = struct {
.percent = .{ .prec = 70, .tag = .mod },
.asterisk_asterisk = .{ .prec = 70, .tag = .array_mult },
.asterisk_percent = .{ .prec = 70, .tag = .mul_wrap },
+ .asterisk_pipe = .{ .prec = 70, .tag = .mul_sat },
});
fn parseExprPrecedence(p: *Parser, min_prec: i32) Error!Node.Index {
@@ -1440,8 +1453,8 @@ const Parser = struct {
/// PrefixTypeOp
/// <- QUESTIONMARK
/// / KEYWORD_anyframe MINUSRARROW
- /// / SliceTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
- /// / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
+ /// / SliceTypeStart (ByteAlign / AddrSpace / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
+ /// / PtrTypeStart (AddrSpace / KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
/// / ArrayTypeStart
/// SliceTypeStart <- LBRACKET (COLON Expr)? RBRACKET
/// PtrTypeStart
@@ -1474,16 +1487,7 @@ const Parser = struct {
const asterisk = p.nextToken();
const mods = try p.parsePtrModifiers();
const elem_type = try p.expectTypeExpr();
- if (mods.bit_range_start == 0) {
- return p.addNode(.{
- .tag = .ptr_type_aligned,
- .main_token = asterisk,
- .data = .{
- .lhs = mods.align_node,
- .rhs = elem_type,
- },
- });
- } else {
+ if (mods.bit_range_start != 0) {
return p.addNode(.{
.tag = .ptr_type_bit_range,
.main_token = asterisk,
@@ -1491,12 +1495,35 @@ const Parser = struct {
.lhs = try p.addExtra(Node.PtrTypeBitRange{
.sentinel = 0,
.align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
.bit_range_start = mods.bit_range_start,
.bit_range_end = mods.bit_range_end,
}),
.rhs = elem_type,
},
});
+ } else if (mods.addrspace_node != 0) {
+ return p.addNode(.{
+ .tag = .ptr_type,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrType{
+ .sentinel = 0,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .ptr_type_aligned,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = mods.align_node,
+ .rhs = elem_type,
+ },
+ });
}
},
.asterisk_asterisk => {
@@ -1504,16 +1531,7 @@ const Parser = struct {
const mods = try p.parsePtrModifiers();
const elem_type = try p.expectTypeExpr();
const inner: Node.Index = inner: {
- if (mods.bit_range_start == 0) {
- break :inner try p.addNode(.{
- .tag = .ptr_type_aligned,
- .main_token = asterisk,
- .data = .{
- .lhs = mods.align_node,
- .rhs = elem_type,
- },
- });
- } else {
+ if (mods.bit_range_start != 0) {
break :inner try p.addNode(.{
.tag = .ptr_type_bit_range,
.main_token = asterisk,
@@ -1521,12 +1539,35 @@ const Parser = struct {
.lhs = try p.addExtra(Node.PtrTypeBitRange{
.sentinel = 0,
.align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
.bit_range_start = mods.bit_range_start,
.bit_range_end = mods.bit_range_end,
}),
.rhs = elem_type,
},
});
+ } else if (mods.addrspace_node != 0) {
+ break :inner try p.addNode(.{
+ .tag = .ptr_type,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrType{
+ .sentinel = 0,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ } else {
+ break :inner try p.addNode(.{
+ .tag = .ptr_type_aligned,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = mods.align_node,
+ .rhs = elem_type,
+ },
+ });
}
};
return p.addNode(.{
@@ -1543,24 +1584,19 @@ const Parser = struct {
_ = p.nextToken();
const asterisk = p.nextToken();
var sentinel: Node.Index = 0;
- prefix: {
- if (p.eatToken(.identifier)) |ident| {
- const token_slice = p.source[p.token_starts[ident]..][0..2];
- if (!std.mem.eql(u8, token_slice, "c]")) {
- p.tok_i -= 1;
- } else {
- break :prefix;
- }
- }
- if (p.eatToken(.colon)) |_| {
- sentinel = try p.expectExpr();
+ if (p.eatToken(.identifier)) |ident| {
+ const ident_slice = p.source[p.token_starts[ident]..p.token_starts[ident + 1]];
+ if (!std.mem.eql(u8, std.mem.trimRight(u8, ident_slice, &std.ascii.spaces), "c")) {
+ p.tok_i -= 1;
}
+ } else if (p.eatToken(.colon)) |_| {
+ sentinel = try p.expectExpr();
}
_ = try p.expectToken(.r_bracket);
const mods = try p.parsePtrModifiers();
const elem_type = try p.expectTypeExpr();
if (mods.bit_range_start == 0) {
- if (sentinel == 0) {
+ if (sentinel == 0 and mods.addrspace_node == 0) {
return p.addNode(.{
.tag = .ptr_type_aligned,
.main_token = asterisk,
@@ -1569,7 +1605,7 @@ const Parser = struct {
.rhs = elem_type,
},
});
- } else if (mods.align_node == 0) {
+ } else if (mods.align_node == 0 and mods.addrspace_node == 0) {
return p.addNode(.{
.tag = .ptr_type_sentinel,
.main_token = asterisk,
@@ -1586,6 +1622,7 @@ const Parser = struct {
.lhs = try p.addExtra(Node.PtrType{
.sentinel = sentinel,
.align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
}),
.rhs = elem_type,
},
@@ -1599,6 +1636,7 @@ const Parser = struct {
.lhs = try p.addExtra(Node.PtrTypeBitRange{
.sentinel = sentinel,
.align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
.bit_range_start = mods.bit_range_start,
.bit_range_end = mods.bit_range_end,
}),
@@ -1624,7 +1662,7 @@ const Parser = struct {
.token = p.nodes.items(.main_token)[mods.bit_range_start],
});
}
- if (sentinel == 0) {
+ if (sentinel == 0 and mods.addrspace_node == 0) {
return p.addNode(.{
.tag = .ptr_type_aligned,
.main_token = lbracket,
@@ -1633,7 +1671,7 @@ const Parser = struct {
.rhs = elem_type,
},
});
- } else if (mods.align_node == 0) {
+ } else if (mods.align_node == 0 and mods.addrspace_node == 0) {
return p.addNode(.{
.tag = .ptr_type_sentinel,
.main_token = lbracket,
@@ -1650,6 +1688,7 @@ const Parser = struct {
.lhs = try p.addExtra(Node.PtrType{
.sentinel = sentinel,
.align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
}),
.rhs = elem_type,
},
@@ -1661,6 +1700,7 @@ const Parser = struct {
.keyword_const,
.keyword_volatile,
.keyword_allowzero,
+ .keyword_addrspace,
=> return p.fail(.ptr_mod_on_array_child_type),
else => {},
}
@@ -2879,6 +2919,15 @@ const Parser = struct {
return expr_node;
}
+ /// AddrSpace <- KEYWORD_addrspace LPAREN Expr RPAREN
+ fn parseAddrSpace(p: *Parser) !Node.Index {
+ _ = p.eatToken(.keyword_addrspace) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const expr_node = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ return expr_node;
+ }
+
/// ParamDecl
/// <- (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType
/// / DOT3
@@ -3011,6 +3060,7 @@ const Parser = struct {
const PtrModifiers = struct {
align_node: Node.Index,
+ addrspace_node: Node.Index,
bit_range_start: Node.Index,
bit_range_end: Node.Index,
};
@@ -3018,12 +3068,14 @@ const Parser = struct {
fn parsePtrModifiers(p: *Parser) !PtrModifiers {
var result: PtrModifiers = .{
.align_node = 0,
+ .addrspace_node = 0,
.bit_range_start = 0,
.bit_range_end = 0,
};
var saw_const = false;
var saw_volatile = false;
var saw_allowzero = false;
+ var saw_addrspace = false;
while (true) {
switch (p.token_tags[p.tok_i]) {
.keyword_align => {
@@ -3063,6 +3115,12 @@ const Parser = struct {
p.tok_i += 1;
saw_allowzero = true;
},
+ .keyword_addrspace => {
+ if (saw_addrspace) {
+ try p.warn(.extra_addrspace_qualifier);
+ }
+ result.addrspace_node = try p.parseAddrSpace();
+ },
else => return result,
}
}
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index 615648d1ad..f69f0598dd 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -404,6 +404,10 @@ test "zig fmt: trailing comma in fn parameter list" {
\\pub fn f(
\\ a: i32,
\\ b: i32,
+ \\) addrspace(.generic) i32 {}
+ \\pub fn f(
+ \\ a: i32,
+ \\ b: i32,
\\) linksection(".text") i32 {}
\\pub fn f(
\\ a: i32,
@@ -553,8 +557,8 @@ test "zig fmt: sentinel-terminated slice type" {
test "zig fmt: pointer-to-one with modifiers" {
try testCanonical(
\\const x: *u32 = undefined;
- \\const y: *allowzero align(8) const volatile u32 = undefined;
- \\const z: *allowzero align(8:4:2) const volatile u32 = undefined;
+ \\const y: *allowzero align(8) addrspace(.generic) const volatile u32 = undefined;
+ \\const z: *allowzero align(8:4:2) addrspace(.generic) const volatile u32 = undefined;
\\
);
}
@@ -562,8 +566,8 @@ test "zig fmt: pointer-to-one with modifiers" {
test "zig fmt: pointer-to-many with modifiers" {
try testCanonical(
\\const x: [*]u32 = undefined;
- \\const y: [*]allowzero align(8) const volatile u32 = undefined;
- \\const z: [*]allowzero align(8:4:2) const volatile u32 = undefined;
+ \\const y: [*]allowzero align(8) addrspace(.generic) const volatile u32 = undefined;
+ \\const z: [*]allowzero align(8:4:2) addrspace(.generic) const volatile u32 = undefined;
\\
);
}
@@ -571,8 +575,8 @@ test "zig fmt: pointer-to-many with modifiers" {
test "zig fmt: sentinel pointer with modifiers" {
try testCanonical(
\\const x: [*:42]u32 = undefined;
- \\const y: [*:42]allowzero align(8) const volatile u32 = undefined;
- \\const y: [*:42]allowzero align(8:4:2) const volatile u32 = undefined;
+ \\const y: [*:42]allowzero align(8) addrspace(.generic) const volatile u32 = undefined;
+ \\const y: [*:42]allowzero align(8:4:2) addrspace(.generic) const volatile u32 = undefined;
\\
);
}
@@ -580,8 +584,8 @@ test "zig fmt: sentinel pointer with modifiers" {
test "zig fmt: c pointer with modifiers" {
try testCanonical(
\\const x: [*c]u32 = undefined;
- \\const y: [*c]allowzero align(8) const volatile u32 = undefined;
- \\const z: [*c]allowzero align(8:4:2) const volatile u32 = undefined;
+ \\const y: [*c]allowzero align(8) addrspace(.generic) const volatile u32 = undefined;
+ \\const z: [*c]allowzero align(8:4:2) addrspace(.generic) const volatile u32 = undefined;
\\
);
}
@@ -589,7 +593,7 @@ test "zig fmt: c pointer with modifiers" {
test "zig fmt: slice with modifiers" {
try testCanonical(
\\const x: []u32 = undefined;
- \\const y: []allowzero align(8) const volatile u32 = undefined;
+ \\const y: []allowzero align(8) addrspace(.generic) const volatile u32 = undefined;
\\
);
}
@@ -597,7 +601,7 @@ test "zig fmt: slice with modifiers" {
test "zig fmt: sentinel slice with modifiers" {
try testCanonical(
\\const x: [:42]u32 = undefined;
- \\const y: [:42]allowzero align(8) const volatile u32 = undefined;
+ \\const y: [:42]allowzero align(8) addrspace(.generic) const volatile u32 = undefined;
\\
);
}
@@ -1129,6 +1133,16 @@ test "zig fmt: linksection" {
);
}
+test "zig fmt: addrspace" {
+ try testCanonical(
+ \\export var python_length: u64 align(1) addrspace(.generic);
+ \\export var python_color: Color addrspace(.generic) = .green;
+ \\export var python_legs: u0 align(8) addrspace(.generic) linksection(".python") = 0;
+ \\export fn python_hiss() align(8) addrspace(.generic) linksection(".python") void;
+ \\
+ );
+}
+
test "zig fmt: correctly space struct fields with doc comments" {
try testTransform(
\\pub const S = struct {
@@ -4725,6 +4739,26 @@ test "zig fmt: assignment with inline for and inline while" {
);
}
+test "zig fmt: saturating arithmetic" {
+ try testCanonical(
+ \\test {
+ \\ const actual = switch (op) {
+ \\ .add => a +| b,
+ \\ .sub => a -| b,
+ \\ .mul => a *| b,
+ \\ .shl => a <<| b,
+ \\ };
+ \\ switch (op) {
+ \\ .add => actual +|= b,
+ \\ .sub => actual -|= b,
+ \\ .mul => actual *|= b,
+ \\ .shl => actual <<|= b,
+ \\ }
+ \\}
+ \\
+ );
+}
+
test "zig fmt: insert trailing comma if there are comments between switch values" {
try testTransform(
\\const a = switch (b) {
@@ -5225,6 +5259,14 @@ test "recovery: nonfinal varargs" {
});
}
+test "recovery: eof in c pointer" {
+ try testError(
+ \\const Ptr = [*c
+ , &[_]Error{
+ .expected_token,
+ });
+}
+
const std = @import("std");
const mem = std.mem;
const print = std.debug.print;
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index 265049e1f9..4357960251 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -333,27 +333,33 @@ fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index,
.add,
.add_wrap,
+ .add_sat,
.array_cat,
.array_mult,
.assign,
.assign_bit_and,
.assign_bit_or,
- .assign_bit_shift_left,
- .assign_bit_shift_right,
+ .assign_shl,
+ .assign_shl_sat,
+ .assign_shr,
.assign_bit_xor,
.assign_div,
.assign_sub,
.assign_sub_wrap,
+ .assign_sub_sat,
.assign_mod,
.assign_add,
.assign_add_wrap,
+ .assign_add_sat,
.assign_mul,
.assign_mul_wrap,
+ .assign_mul_sat,
.bang_equal,
.bit_and,
.bit_or,
- .bit_shift_left,
- .bit_shift_right,
+ .shl,
+ .shl_sat,
+ .shr,
.bit_xor,
.bool_and,
.bool_or,
@@ -367,8 +373,10 @@ fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index,
.mod,
.mul,
.mul_wrap,
+ .mul_sat,
.sub,
.sub_wrap,
+ .sub_sat,
.@"orelse",
=> {
const infix = datas[node];
@@ -797,6 +805,14 @@ fn renderPtrType(
}
}
+ if (ptr_type.ast.addrspace_node != 0) {
+ const addrspace_first = tree.firstToken(ptr_type.ast.addrspace_node);
+ try renderToken(ais, tree, addrspace_first - 2, .none); // addrspace
+ try renderToken(ais, tree, addrspace_first - 1, .none); // lparen
+ try renderExpression(gpa, ais, tree, ptr_type.ast.addrspace_node, .none);
+ try renderToken(ais, tree, tree.lastToken(ptr_type.ast.addrspace_node) + 1, .space); // rparen
+ }
+
if (ptr_type.const_token) |const_token| {
try renderToken(ais, tree, const_token, .space);
}
@@ -921,6 +937,7 @@ fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDe
const name_space = if (var_decl.ast.type_node == 0 and
(var_decl.ast.align_node != 0 or
+ var_decl.ast.addrspace_node != 0 or
var_decl.ast.section_node != 0 or
var_decl.ast.init_node != 0))
Space.space
@@ -930,8 +947,8 @@ fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDe
if (var_decl.ast.type_node != 0) {
try renderToken(ais, tree, var_decl.ast.mut_token + 2, Space.space); // :
- if (var_decl.ast.align_node != 0 or var_decl.ast.section_node != 0 or
- var_decl.ast.init_node != 0)
+ if (var_decl.ast.align_node != 0 or var_decl.ast.addrspace_node != 0 or
+ var_decl.ast.section_node != 0 or var_decl.ast.init_node != 0)
{
try renderExpression(gpa, ais, tree, var_decl.ast.type_node, .space);
} else {
@@ -948,6 +965,23 @@ fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDe
try renderToken(ais, tree, align_kw, Space.none); // align
try renderToken(ais, tree, lparen, Space.none); // (
try renderExpression(gpa, ais, tree, var_decl.ast.align_node, Space.none);
+ if (var_decl.ast.addrspace_node != 0 or var_decl.ast.section_node != 0 or
+ var_decl.ast.init_node != 0)
+ {
+ try renderToken(ais, tree, rparen, .space); // )
+ } else {
+ try renderToken(ais, tree, rparen, .none); // )
+ return renderToken(ais, tree, rparen + 1, Space.newline); // ;
+ }
+ }
+
+ if (var_decl.ast.addrspace_node != 0) {
+ const lparen = tree.firstToken(var_decl.ast.addrspace_node) - 1;
+ const addrspace_kw = lparen - 1;
+ const rparen = tree.lastToken(var_decl.ast.addrspace_node) + 1;
+ try renderToken(ais, tree, addrspace_kw, Space.none); // addrspace
+ try renderToken(ais, tree, lparen, Space.none); // (
+ try renderExpression(gpa, ais, tree, var_decl.ast.addrspace_node, Space.none);
if (var_decl.ast.section_node != 0 or var_decl.ast.init_node != 0) {
try renderToken(ais, tree, rparen, .space); // )
} else {
@@ -1267,6 +1301,14 @@ fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnPro
smallest_start = start;
}
}
+ if (fn_proto.ast.addrspace_expr != 0) {
+ const tok = tree.firstToken(fn_proto.ast.addrspace_expr) - 3;
+ const start = token_starts[tok];
+ if (start < smallest_start) {
+ rparen = tok;
+ smallest_start = start;
+ }
+ }
if (fn_proto.ast.section_expr != 0) {
const tok = tree.firstToken(fn_proto.ast.section_expr) - 3;
const start = token_starts[tok];
@@ -1407,6 +1449,16 @@ fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnPro
try renderToken(ais, tree, align_rparen, .space); // )
}
+ if (fn_proto.ast.addrspace_expr != 0) {
+ const align_lparen = tree.firstToken(fn_proto.ast.addrspace_expr) - 1;
+ const align_rparen = tree.lastToken(fn_proto.ast.addrspace_expr) + 1;
+
+ try renderToken(ais, tree, align_lparen - 1, .none); // addrspace
+ try renderToken(ais, tree, align_lparen, .none); // (
+ try renderExpression(gpa, ais, tree, fn_proto.ast.addrspace_expr, .none);
+ try renderToken(ais, tree, align_rparen, .space); // )
+ }
+
if (fn_proto.ast.section_expr != 0) {
const section_lparen = tree.firstToken(fn_proto.ast.section_expr) - 1;
const section_rparen = tree.lastToken(fn_proto.ast.section_expr) + 1;
@@ -2476,8 +2528,8 @@ fn nodeCausesSliceOpSpace(tag: Ast.Node.Tag) bool {
.assign,
.assign_bit_and,
.assign_bit_or,
- .assign_bit_shift_left,
- .assign_bit_shift_right,
+ .assign_shl,
+ .assign_shr,
.assign_bit_xor,
.assign_div,
.assign_sub,
@@ -2490,8 +2542,8 @@ fn nodeCausesSliceOpSpace(tag: Ast.Node.Tag) bool {
.bang_equal,
.bit_and,
.bit_or,
- .bit_shift_left,
- .bit_shift_right,
+ .shl,
+ .shr,
.bit_xor,
.bool_and,
.bool_or,
diff --git a/lib/std/zig/string_literal.zig b/lib/std/zig/string_literal.zig
index 64242038d3..2a38195b1f 100644
--- a/lib/std/zig/string_literal.zig
+++ b/lib/std/zig/string_literal.zig
@@ -29,7 +29,7 @@ pub fn parseAppend(buf: *std.ArrayList(u8), bytes: []const u8) error{OutOfMemory
const slice = bytes[1..];
const prev_len = buf.items.len;
- try buf.ensureCapacity(prev_len + slice.len - 1);
+ try buf.ensureUnusedCapacity(slice.len - 1);
errdefer buf.shrinkRetainingCapacity(prev_len);
const State = enum {
diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig
index da4057d6e1..a86d6a2d45 100644
--- a/lib/std/zig/system.zig
+++ b/lib/std/zig/system.zig
@@ -101,6 +101,17 @@ pub const NativePaths = struct {
return self;
}
+ if (comptime native_target.os.tag == .solaris) {
+ try self.addLibDir("/usr/lib/64");
+ try self.addLibDir("/usr/local/lib/64");
+ try self.addLibDir("/lib/64");
+
+ try self.addIncludeDir("/usr/include");
+ try self.addIncludeDir("/usr/local/include");
+
+ return self;
+ }
+
if (native_target.os.tag != .windows) {
const triple = try native_target.linuxTriple(allocator);
const qual = native_target.cpu.arch.ptrBitWidth();
@@ -243,6 +254,18 @@ pub const NativeTargetInfo = struct {
error.InvalidVersion => {},
}
},
+ .solaris => {
+ const uts = std.os.uname();
+ const release = mem.spanZ(&uts.release);
+ if (std.builtin.Version.parse(release)) |ver| {
+ os.version_range.semver.min = ver;
+ os.version_range.semver.max = ver;
+ } else |err| switch (err) {
+ error.Overflow => {},
+ error.InvalidCharacter => {},
+ error.InvalidVersion => {},
+ }
+ },
.windows => {
const detected_version = windows.detectRuntimeVersion();
os.version_range.windows.min = detected_version;
diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig
index 3fdbb3ec7b..02fa3dd381 100644
--- a/lib/std/zig/tokenizer.zig
+++ b/lib/std/zig/tokenizer.zig
@@ -11,6 +11,7 @@ pub const Token = struct {
};
pub const keywords = std.ComptimeStringMap(Tag, .{
+ .{ "addrspace", .keyword_addrspace },
.{ "align", .keyword_align },
.{ "allowzero", .keyword_allowzero },
.{ "and", .keyword_and },
@@ -102,15 +103,21 @@ pub const Token = struct {
plus_equal,
plus_percent,
plus_percent_equal,
+ plus_pipe,
+ plus_pipe_equal,
minus,
minus_equal,
minus_percent,
minus_percent_equal,
+ minus_pipe,
+ minus_pipe_equal,
asterisk,
asterisk_equal,
asterisk_asterisk,
asterisk_percent,
asterisk_percent_equal,
+ asterisk_pipe,
+ asterisk_pipe_equal,
arrow,
colon,
slash,
@@ -123,6 +130,8 @@ pub const Token = struct {
angle_bracket_left_equal,
angle_bracket_angle_bracket_left,
angle_bracket_angle_bracket_left_equal,
+ angle_bracket_angle_bracket_left_pipe,
+ angle_bracket_angle_bracket_left_pipe_equal,
angle_bracket_right,
angle_bracket_right_equal,
angle_bracket_angle_bracket_right,
@@ -132,6 +141,7 @@ pub const Token = struct {
float_literal,
doc_comment,
container_doc_comment,
+ keyword_addrspace,
keyword_align,
keyword_allowzero,
keyword_and,
@@ -225,15 +235,21 @@ pub const Token = struct {
.plus_equal => "+=",
.plus_percent => "+%",
.plus_percent_equal => "+%=",
+ .plus_pipe => "+|",
+ .plus_pipe_equal => "+|=",
.minus => "-",
.minus_equal => "-=",
.minus_percent => "-%",
.minus_percent_equal => "-%=",
+ .minus_pipe => "-|",
+ .minus_pipe_equal => "-|=",
.asterisk => "*",
.asterisk_equal => "*=",
.asterisk_asterisk => "**",
.asterisk_percent => "*%",
.asterisk_percent_equal => "*%=",
+ .asterisk_pipe => "*|",
+ .asterisk_pipe_equal => "*|=",
.arrow => "->",
.colon => ":",
.slash => "/",
@@ -246,11 +262,14 @@ pub const Token = struct {
.angle_bracket_left_equal => "<=",
.angle_bracket_angle_bracket_left => "<<",
.angle_bracket_angle_bracket_left_equal => "<<=",
+ .angle_bracket_angle_bracket_left_pipe => "<<|",
+ .angle_bracket_angle_bracket_left_pipe_equal => "<<|=",
.angle_bracket_right => ">",
.angle_bracket_right_equal => ">=",
.angle_bracket_angle_bracket_right => ">>",
.angle_bracket_angle_bracket_right_equal => ">>=",
.tilde => "~",
+ .keyword_addrspace => "addrspace",
.keyword_align => "align",
.keyword_allowzero => "allowzero",
.keyword_and => "and",
@@ -349,8 +368,10 @@ pub const Tokenizer = struct {
pipe,
minus,
minus_percent,
+ minus_pipe,
asterisk,
asterisk_percent,
+ asterisk_pipe,
slash,
line_comment_start,
line_comment,
@@ -379,8 +400,10 @@ pub const Tokenizer = struct {
percent,
plus,
plus_percent,
+ plus_pipe,
angle_bracket_left,
angle_bracket_angle_bracket_left,
+ angle_bracket_angle_bracket_left_pipe,
angle_bracket_right,
angle_bracket_angle_bracket_right,
period,
@@ -581,6 +604,9 @@ pub const Tokenizer = struct {
'%' => {
state = .asterisk_percent;
},
+ '|' => {
+ state = .asterisk_pipe;
+ },
else => {
result.tag = .asterisk;
break;
@@ -599,6 +625,18 @@ pub const Tokenizer = struct {
},
},
+ .asterisk_pipe => switch (c) {
+ '=' => {
+ result.tag = .asterisk_pipe_equal;
+ self.index += 1;
+ break;
+ },
+ else => {
+ result.tag = .asterisk_pipe;
+ break;
+ },
+ },
+
.percent => switch (c) {
'=' => {
result.tag = .percent_equal;
@@ -625,6 +663,9 @@ pub const Tokenizer = struct {
'%' => {
state = .plus_percent;
},
+ '|' => {
+ state = .plus_pipe;
+ },
else => {
result.tag = .plus;
break;
@@ -643,6 +684,18 @@ pub const Tokenizer = struct {
},
},
+ .plus_pipe => switch (c) {
+ '=' => {
+ result.tag = .plus_pipe_equal;
+ self.index += 1;
+ break;
+ },
+ else => {
+ result.tag = .plus_pipe;
+ break;
+ },
+ },
+
.caret => switch (c) {
'=' => {
result.tag = .caret_equal;
@@ -700,7 +753,7 @@ pub const Tokenizer = struct {
},
.string_literal_backslash => switch (c) {
- '\n' => {
+ 0, '\n' => {
result.tag = .invalid;
break;
},
@@ -769,6 +822,10 @@ pub const Tokenizer = struct {
},
.char_literal_unicode_escape_saw_u => switch (c) {
+ 0 => {
+ result.tag = .invalid;
+ break;
+ },
'{' => {
state = .char_literal_unicode_escape;
},
@@ -779,6 +836,10 @@ pub const Tokenizer = struct {
},
.char_literal_unicode_escape => switch (c) {
+ 0 => {
+ result.tag = .invalid;
+ break;
+ },
'0'...'9', 'a'...'f', 'A'...'F' => {},
'}' => {
state = .char_literal_end; // too many/few digits handled later
@@ -892,6 +953,9 @@ pub const Tokenizer = struct {
'%' => {
state = .minus_percent;
},
+ '|' => {
+ state = .minus_pipe;
+ },
else => {
result.tag = .minus;
break;
@@ -909,6 +973,17 @@ pub const Tokenizer = struct {
break;
},
},
+ .minus_pipe => switch (c) {
+ '=' => {
+ result.tag = .minus_pipe_equal;
+ self.index += 1;
+ break;
+ },
+ else => {
+ result.tag = .minus_pipe;
+ break;
+ },
+ },
.angle_bracket_left => switch (c) {
'<' => {
@@ -931,12 +1006,27 @@ pub const Tokenizer = struct {
self.index += 1;
break;
},
+ '|' => {
+ state = .angle_bracket_angle_bracket_left_pipe;
+ },
else => {
result.tag = .angle_bracket_angle_bracket_left;
break;
},
},
+ .angle_bracket_angle_bracket_left_pipe => switch (c) {
+ '=' => {
+ result.tag = .angle_bracket_angle_bracket_left_pipe_equal;
+ self.index += 1;
+ break;
+ },
+ else => {
+ result.tag = .angle_bracket_angle_bracket_left_pipe;
+ break;
+ },
+ },
+
.angle_bracket_right => switch (c) {
'>' => {
state = .angle_bracket_angle_bracket_right;
@@ -1919,6 +2009,30 @@ test "tokenizer - invalid builtin identifiers" {
try testTokenize("@0()", &.{ .invalid, .integer_literal, .l_paren, .r_paren });
}
+test "tokenizer - invalid token with unfinished escape right before eof" {
+ try testTokenize("\"\\", &.{.invalid});
+ try testTokenize("'\\", &.{.invalid});
+ try testTokenize("'\\u", &.{.invalid});
+}
+
+test "tokenizer - saturating" {
+ try testTokenize("<<", &.{.angle_bracket_angle_bracket_left});
+ try testTokenize("<<|", &.{.angle_bracket_angle_bracket_left_pipe});
+ try testTokenize("<<|=", &.{.angle_bracket_angle_bracket_left_pipe_equal});
+
+ try testTokenize("*", &.{.asterisk});
+ try testTokenize("*|", &.{.asterisk_pipe});
+ try testTokenize("*|=", &.{.asterisk_pipe_equal});
+
+ try testTokenize("+", &.{.plus});
+ try testTokenize("+|", &.{.plus_pipe});
+ try testTokenize("+|=", &.{.plus_pipe_equal});
+
+ try testTokenize("-", &.{.minus});
+ try testTokenize("-|", &.{.minus_pipe});
+ try testTokenize("-|=", &.{.minus_pipe_equal});
+}
+
fn testTokenize(source: [:0]const u8, expected_tokens: []const Token.Tag) !void {
var tokenizer = Tokenizer.init(source);
for (expected_tokens) |expected_token_id| {
diff --git a/src/Air.zig b/src/Air.zig
index 29deb9a523..f7eccfd5a5 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -44,6 +44,11 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
addwrap,
+ /// Saturating integer addition.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
+ /// Uses the `bin_op` field.
+ add_sat,
/// Float or integer subtraction. For integers, wrapping is undefined behavior.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
@@ -54,6 +59,11 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
subwrap,
+ /// Saturating integer subtraction.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
+ /// Uses the `bin_op` field.
+ sub_sat,
/// Float or integer multiplication. For integers, wrapping is undefined behavior.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
@@ -64,15 +74,26 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
mulwrap,
+ /// Saturating integer multiplication.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
+ /// Uses the `bin_op` field.
+ mul_sat,
/// Integer or float division. For integers, wrapping is undefined behavior.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
div,
- /// Integer or float remainder.
- /// Both operands are guaranteed to be the same type, and the result type is the same as both operands.
+ /// Integer or float remainder division.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
/// Uses the `bin_op` field.
rem,
+ /// Integer or float modulus division.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
+ /// Uses the `bin_op` field.
+ mod,
/// Add an offset to a pointer, returning a new pointer.
/// The offset is in element type units, not bytes.
/// Wrapping is undefined behavior.
@@ -104,6 +125,14 @@ pub const Inst = struct {
/// Shift left. `<<`
/// Uses the `bin_op` field.
shl,
+ /// Shift left; For unsigned integers, the shift produces a poison value if it shifts
+ /// out any non-zero bits. For signed integers, the shift produces a poison value if
+ /// it shifts out any bits that disagree with the resultant sign bit.
+ /// Uses the `bin_op` field.
+ shl_exact,
+ /// Saturating integer shift left. `<<|`
+ /// Uses the `bin_op` field.
+ shl_sat,
/// Bitwise XOR. `^`
/// Uses the `bin_op` field.
xor,
@@ -131,6 +160,15 @@ pub const Inst = struct {
/// Result type is the return type of the function being called.
/// Uses the `pl_op` field with the `Call` payload. operand is the callee.
call,
+ /// Count leading zeroes of an integer according to its representation in twos complement.
+ /// Result type will always be an unsigned integer big enough to fit the answer.
+ /// Uses the `ty_op` field.
+ clz,
+ /// Count trailing zeroes of an integer according to its representation in twos complement.
+ /// Result type will always be an unsigned integer big enough to fit the answer.
+ /// Uses the `ty_op` field.
+ ctz,
+
/// `<`. Result type is always bool.
/// Uses the `bin_op` field.
cmp_lt,
@@ -149,6 +187,7 @@ pub const Inst = struct {
/// `!=`. Result type is always bool.
/// Uses the `bin_op` field.
cmp_neq,
+
/// Conditional branch.
/// Result type is always noreturn; no instructions in a block follow this one.
/// Uses the `pl_op` field. Operand is the condition. Payload is `CondBr`.
@@ -225,9 +264,12 @@ pub const Inst = struct {
/// Indicates the program counter will never get to this instruction.
/// Result type is always noreturn; no instructions in a block follow this one.
unreach,
- /// Convert from one float type to another.
+ /// Convert from a float type to a smaller one.
/// Uses the `ty_op` field.
- floatcast,
+ fptrunc,
+ /// Convert from a float type to a wider one.
+ /// Uses the `ty_op` field.
+ fpext,
/// Returns an integer with a different type than the operand. The new type may have
/// fewer, the same, or more bits than the operand type. However, the instruction
/// guarantees that the same integer value fits in both types.
@@ -265,19 +307,29 @@ pub const Inst = struct {
/// wrap from E to E!T
/// Uses the `ty_op` field.
wrap_errunion_err,
- /// Given a pointer to a struct and a field index, returns a pointer to the field.
+ /// Given a pointer to a struct or union and a field index, returns a pointer to the field.
/// Uses the `ty_pl` field, payload is `StructField`.
+ /// TODO rename to `agg_field_ptr`.
struct_field_ptr,
- /// Given a pointer to a struct, returns a pointer to the field.
+ /// Given a pointer to a struct or union, returns a pointer to the field.
/// The field index is the number at the end of the name.
/// Uses `ty_op` field.
+ /// TODO rename to `agg_field_ptr_index_X`
struct_field_ptr_index_0,
struct_field_ptr_index_1,
struct_field_ptr_index_2,
struct_field_ptr_index_3,
- /// Given a byval struct and a field index, returns the field byval.
+ /// Given a byval struct or union and a field index, returns the field byval.
/// Uses the `ty_pl` field, payload is `StructField`.
+ /// TODO rename to `agg_field_val`
struct_field_val,
+ /// Given a pointer to a tagged union, set its tag to the provided value.
+ /// Result type is always void.
+ /// Uses the `bin_op` field. LHS is union pointer, RHS is new tag value.
+ set_union_tag,
+ /// Given a tagged union value, get its tag value.
+ /// Uses the `ty_op` field.
+ get_union_tag,
/// Given a slice value, return the length.
/// Result type is always usize.
/// Uses the `ty_op` field.
@@ -309,10 +361,52 @@ pub const Inst = struct {
/// Given a pointer to an array, return a slice.
/// Uses the `ty_op` field.
array_to_slice,
+ /// Given a float operand, return the integer with the closest mathematical meaning.
+ /// Uses the `ty_op` field.
+ float_to_int,
+ /// Given an integer operand, return the float with the closest mathematical meaning.
+ /// Uses the `ty_op` field.
+ int_to_float,
+
+ /// Given dest ptr, value, and len, set all elements at dest to value.
+ /// Result type is always void.
+ /// Uses the `pl_op` field. Operand is the dest ptr. Payload is `Bin`. `lhs` is the
+ /// value, `rhs` is the length.
+ /// The element type may be any type, not just u8.
+ memset,
+ /// Given dest ptr, src ptr, and len, copy len elements from src to dest.
+ /// Result type is always void.
+ /// Uses the `pl_op` field. Operand is the dest ptr. Payload is `Bin`. `lhs` is the
+ /// src ptr, `rhs` is the length.
+ /// The element type may be any type, not just u8.
+ memcpy,
+
/// Uses the `ty_pl` field with payload `Cmpxchg`.
cmpxchg_weak,
/// Uses the `ty_pl` field with payload `Cmpxchg`.
cmpxchg_strong,
+ /// Lowers to a memory fence instruction.
+ /// Result type is always void.
+ /// Uses the `fence` field.
+ fence,
+ /// Atomically load from a pointer.
+ /// Result type is the element type of the pointer.
+ /// Uses the `atomic_load` field.
+ atomic_load,
+ /// Atomically store through a pointer.
+ /// Result type is always `void`.
+ /// Uses the `bin_op` field. LHS is pointer, RHS is element.
+ atomic_store_unordered,
+ /// Same as `atomic_store_unordered` but with `AtomicOrder.Monotonic`.
+ atomic_store_monotonic,
+ /// Same as `atomic_store_unordered` but with `AtomicOrder.Release`.
+ atomic_store_release,
+ /// Same as `atomic_store_unordered` but with `AtomicOrder.SeqCst`.
+ atomic_store_seq_cst,
+ /// Atomically read-modify-write via a pointer.
+ /// Result type is the element type of the pointer.
+ /// Uses the `pl_op` field with payload `AtomicRmw`. Operand is `ptr`.
+ atomic_rmw,
pub fn fromCmpOp(op: std.math.CompareOperator) Tag {
return switch (op) {
@@ -380,6 +474,11 @@ pub const Inst = struct {
line: u32,
column: u32,
},
+ fence: std.builtin.AtomicOrder,
+ atomic_load: struct {
+ ptr: Ref,
+ order: std.builtin.AtomicOrder,
+ },
// Make sure we don't accidentally add a field to make this union
// bigger than expected. Note that in Debug builds, Zig is allowed
@@ -464,6 +563,21 @@ pub const Cmpxchg = struct {
}
};
+pub const AtomicRmw = struct {
+ operand: Inst.Ref,
+ /// 0b00000000000000000000000000000XXX - ordering
+ /// 0b0000000000000000000000000XXXX000 - op
+ flags: u32,
+
+ pub fn ordering(self: AtomicRmw) std.builtin.AtomicOrder {
+ return @intToEnum(std.builtin.AtomicOrder, @truncate(u3, self.flags));
+ }
+
+ pub fn op(self: AtomicRmw) std.builtin.AtomicRmwOp {
+ return @intToEnum(std.builtin.AtomicRmwOp, @truncate(u4, self.flags >> 3));
+ }
+};
+
pub fn getMainBody(air: Air) []const Air.Inst.Index {
const body_index = air.extra[@enumToInt(ExtraIndex.main_block)];
const extra = air.extraData(Block, body_index);
@@ -485,12 +599,16 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.add,
.addwrap,
+ .add_sat,
.sub,
.subwrap,
+ .sub_sat,
.mul,
.mulwrap,
+ .mul_sat,
.div,
.rem,
+ .mod,
.bit_and,
.bit_or,
.xor,
@@ -498,6 +616,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.ptr_sub,
.shr,
.shl,
+ .shl_exact,
+ .shl_sat,
=> return air.typeOf(datas[inst].bin_op.lhs),
.cmp_lt,
@@ -535,7 +655,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.not,
.bitcast,
.load,
- .floatcast,
+ .fpext,
+ .fptrunc,
.intcast,
.trunc,
.optional_payload,
@@ -553,6 +674,11 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.array_to_slice,
+ .float_to_int,
+ .int_to_float,
+ .get_union_tag,
+ .clz,
+ .ctz,
=> return air.getRefType(datas[inst].ty_op.ty),
.loop,
@@ -566,6 +692,14 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.breakpoint,
.dbg_stmt,
.store,
+ .fence,
+ .atomic_store_unordered,
+ .atomic_store_monotonic,
+ .atomic_store_release,
+ .atomic_store_seq_cst,
+ .memset,
+ .memcpy,
+ .set_union_tag,
=> return Type.initTag(.void),
.ptrtoint,
@@ -588,6 +722,14 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
const inner_ptr_ty = outer_ptr_ty.elemType();
return inner_ptr_ty.elemType();
},
+ .atomic_load => {
+ const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr);
+ return ptr_ty.elemType();
+ },
+ .atomic_rmw => {
+ const ptr_ty = air.typeOf(datas[inst].pl_op.operand);
+ return ptr_ty.elemType();
+ },
}
}
diff --git a/src/AstGen.zig b/src/AstGen.zig
index b9d7d6f5be..387364cb82 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -56,6 +56,7 @@ fn addExtraAssumeCapacity(astgen: *AstGen, extra: anytype) u32 {
u32 => @field(extra, field.name),
Zir.Inst.Ref => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)),
+ Zir.Inst.Call.Flags => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type"),
});
}
@@ -124,7 +125,7 @@ pub fn generate(gpa: *Allocator, tree: Ast) Allocator.Error!Zir {
container_decl,
.Auto,
)) |struct_decl_ref| {
- astgen.extra.items[@enumToInt(Zir.ExtraIndex.main_struct)] = @enumToInt(struct_decl_ref);
+ assert(refToIndex(struct_decl_ref).? == 0);
} else |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {}, // Handled via compile_errors below.
@@ -197,7 +198,7 @@ pub const ResultLoc = union(enum) {
none_or_ref,
/// The expression will be coerced into this type, but it will be evaluated as an rvalue.
ty: Zir.Inst.Ref,
- /// Same as `ty` but it is guaranteed that Sema will additionall perform the coercion,
+ /// Same as `ty` but it is guaranteed that Sema will additionally perform the coercion,
/// so no `as` instruction needs to be emitted.
coerced_ty: Zir.Inst.Ref,
/// The expression must store its result into this typed pointer. The result instruction
@@ -316,29 +317,37 @@ fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Ins
.assign,
.assign_bit_and,
.assign_bit_or,
- .assign_bit_shift_left,
- .assign_bit_shift_right,
+ .assign_shl,
+ .assign_shl_sat,
+ .assign_shr,
.assign_bit_xor,
.assign_div,
.assign_sub,
.assign_sub_wrap,
+ .assign_sub_sat,
.assign_mod,
.assign_add,
.assign_add_wrap,
+ .assign_add_sat,
.assign_mul,
.assign_mul_wrap,
+ .assign_mul_sat,
.add,
.add_wrap,
+ .add_sat,
.sub,
.sub_wrap,
+ .sub_sat,
.mul,
.mul_wrap,
+ .mul_sat,
.div,
.mod,
.bit_and,
.bit_or,
- .bit_shift_left,
- .bit_shift_right,
+ .shl,
+ .shl_sat,
+ .shr,
.bit_xor,
.bang_equal,
.equal_equal,
@@ -479,7 +488,7 @@ fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Ins
return expr(gz, scope, .ref, node);
}
-/// Turn Zig AST into untyped ZIR istructions.
+/// Turn Zig AST into untyped ZIR instructions.
/// When `rl` is discard, ptr, inferred_ptr, or inferred_ptr, the
/// result instruction can be used to inspect whether it is isNoReturn() but that is it,
/// it must otherwise not be used.
@@ -521,11 +530,15 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr
return rvalue(gz, rl, .void_value, node);
},
- .assign_bit_shift_left => {
+ .assign_shl => {
try assignShift(gz, scope, node, .shl);
return rvalue(gz, rl, .void_value, node);
},
- .assign_bit_shift_right => {
+ .assign_shl_sat => {
+ try assignShiftSat(gz, scope, node);
+ return rvalue(gz, rl, .void_value, node);
+ },
+ .assign_shr => {
try assignShift(gz, scope, node, .shr);
return rvalue(gz, rl, .void_value, node);
},
@@ -554,6 +567,10 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr
try assignOp(gz, scope, node, .subwrap);
return rvalue(gz, rl, .void_value, node);
},
+ .assign_sub_sat => {
+ try assignOp(gz, scope, node, .sub_sat);
+ return rvalue(gz, rl, .void_value, node);
+ },
.assign_mod => {
try assignOp(gz, scope, node, .mod_rem);
return rvalue(gz, rl, .void_value, node);
@@ -566,6 +583,10 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr
try assignOp(gz, scope, node, .addwrap);
return rvalue(gz, rl, .void_value, node);
},
+ .assign_add_sat => {
+ try assignOp(gz, scope, node, .add_sat);
+ return rvalue(gz, rl, .void_value, node);
+ },
.assign_mul => {
try assignOp(gz, scope, node, .mul);
return rvalue(gz, rl, .void_value, node);
@@ -574,19 +595,28 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr
try assignOp(gz, scope, node, .mulwrap);
return rvalue(gz, rl, .void_value, node);
},
+ .assign_mul_sat => {
+ try assignOp(gz, scope, node, .mul_sat);
+ return rvalue(gz, rl, .void_value, node);
+ },
// zig fmt: off
- .bit_shift_left => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shl),
- .bit_shift_right => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shr),
+ .shl => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shl),
+ .shr => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shr),
.add => return simpleBinOp(gz, scope, rl, node, .add),
.add_wrap => return simpleBinOp(gz, scope, rl, node, .addwrap),
+ .add_sat => return simpleBinOp(gz, scope, rl, node, .add_sat),
.sub => return simpleBinOp(gz, scope, rl, node, .sub),
.sub_wrap => return simpleBinOp(gz, scope, rl, node, .subwrap),
+ .sub_sat => return simpleBinOp(gz, scope, rl, node, .sub_sat),
.mul => return simpleBinOp(gz, scope, rl, node, .mul),
.mul_wrap => return simpleBinOp(gz, scope, rl, node, .mulwrap),
+ .mul_sat => return simpleBinOp(gz, scope, rl, node, .mul_sat),
.div => return simpleBinOp(gz, scope, rl, node, .div),
.mod => return simpleBinOp(gz, scope, rl, node, .mod_rem),
+ .shl_sat => return simpleBinOp(gz, scope, rl, node, .shl_sat),
+
.bit_and => {
const current_ampersand_token = main_tokens[node];
if (token_tags[current_ampersand_token + 1] == .ampersand) {
@@ -1116,6 +1146,11 @@ fn fnProtoExpr(
const align_inst: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: {
break :inst try expr(gz, scope, align_rl, fn_proto.ast.align_expr);
};
+
+ if (fn_proto.ast.addrspace_expr != 0) {
+ return astgen.failNode(fn_proto.ast.addrspace_expr, "addrspace not allowed on function prototypes", .{});
+ }
+
if (fn_proto.ast.section_expr != 0) {
return astgen.failNode(fn_proto.ast.section_expr, "linksection not allowed on function prototypes", .{});
}
@@ -1352,7 +1387,14 @@ fn structInitExpr(
const array_type: Ast.full.ArrayType = switch (node_tags[struct_init.ast.type_expr]) {
.array_type => tree.arrayType(struct_init.ast.type_expr),
.array_type_sentinel => tree.arrayTypeSentinel(struct_init.ast.type_expr),
- else => break :array,
+ else => {
+ if (struct_init.ast.fields.len == 0) {
+ const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr);
+ const result = try gz.addUnNode(.struct_init_empty, ty_inst, node);
+ return rvalue(gz, rl, result, node);
+ }
+ break :array;
+ },
};
const is_inferred_array_len = node_tags[array_type.ast.elem_count] == .identifier and
// This intentionally does not support `@"_"` syntax.
@@ -1414,8 +1456,8 @@ fn structInitExpr(
const result = try structInitExprRlTy(gz, scope, node, struct_init, inner_ty_inst, .struct_init);
return rvalue(gz, rl, result, node);
},
- .ptr, .inferred_ptr => |ptr_inst| return structInitExprRlPtr(gz, scope, node, struct_init, ptr_inst),
- .block_ptr => |block_gz| return structInitExprRlPtr(gz, scope, node, struct_init, block_gz.rl_ptr),
+ .ptr, .inferred_ptr => |ptr_inst| return structInitExprRlPtr(gz, scope, rl, node, struct_init, ptr_inst),
+ .block_ptr => |block_gz| return structInitExprRlPtr(gz, scope, rl, node, struct_init, block_gz.rl_ptr),
}
}
@@ -1454,6 +1496,26 @@ fn structInitExprRlNone(
}
fn structInitExprRlPtr(
+ gz: *GenZir,
+ scope: *Scope,
+ rl: ResultLoc,
+ node: Ast.Node.Index,
+ struct_init: Ast.full.StructInit,
+ result_ptr: Zir.Inst.Ref,
+) InnerError!Zir.Inst.Ref {
+ if (struct_init.ast.type_expr == 0) {
+ return structInitExprRlPtrInner(gz, scope, node, struct_init, result_ptr);
+ }
+ const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr);
+
+ var as_scope = try gz.makeCoercionScope(scope, ty_inst, result_ptr);
+ defer as_scope.instructions.deinit(gz.astgen.gpa);
+
+ const result = try structInitExprRlPtrInner(&as_scope, scope, node, struct_init, as_scope.rl_ptr);
+ return as_scope.finishCoercion(gz, rl, node, result, ty_inst);
+}
+
+fn structInitExprRlPtrInner(
gz: *GenZir,
scope: *Scope,
node: Ast.Node.Index,
@@ -1467,9 +1529,6 @@ fn structInitExprRlPtr(
const field_ptr_list = try gpa.alloc(Zir.Inst.Index, struct_init.ast.fields.len);
defer gpa.free(field_ptr_list);
- if (struct_init.ast.type_expr != 0)
- _ = try typeExpr(gz, scope, struct_init.ast.type_expr);
-
for (struct_init.ast.fields) |field_init, i| {
const name_token = tree.firstToken(field_init) - 2;
const str_index = try astgen.identAsString(name_token);
@@ -1484,7 +1543,7 @@ fn structInitExprRlPtr(
.body_len = @intCast(u32, field_ptr_list.len),
});
try astgen.extra.appendSlice(gpa, field_ptr_list);
- return .void_value;
+ return Zir.Inst.Ref.void_value;
}
fn structInitExprRlTy(
@@ -1868,8 +1927,8 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod
.assign => try assign(gz, scope, statement),
- .assign_bit_shift_left => try assignShift(gz, scope, statement, .shl),
- .assign_bit_shift_right => try assignShift(gz, scope, statement, .shr),
+ .assign_shl => try assignShift(gz, scope, statement, .shl),
+ .assign_shr => try assignShift(gz, scope, statement, .shr),
.assign_bit_and => try assignOp(gz, scope, statement, .bit_and),
.assign_bit_or => try assignOp(gz, scope, statement, .bit_or),
@@ -1905,17 +1964,21 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
// in the above while loop.
const zir_tags = gz.astgen.instructions.items(.tag);
switch (zir_tags[inst]) {
- // For some instructions, swap in a slightly different ZIR tag
+ // For some instructions, modify the zir data
// so we can avoid a separate ensure_result_used instruction.
- .call_chkused => unreachable,
.call => {
- zir_tags[inst] = .call_chkused;
+ const extra_index = gz.astgen.instructions.items(.data)[inst].pl_node.payload_index;
+ const slot = &gz.astgen.extra.items[extra_index];
+ var flags = @bitCast(Zir.Inst.Call.Flags, slot.*);
+ flags.ensure_result_used = true;
+ slot.* = @bitCast(u32, flags);
break :b true;
},
// ZIR instructions that might be a type other than `noreturn` or `void`.
.add,
.addwrap,
+ .add_sat,
.param,
.param_comptime,
.param_anytype,
@@ -1947,9 +2010,6 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.bool_br_and,
.bool_br_or,
.bool_not,
- .call_compile_time,
- .call_nosuspend,
- .call_async,
.cmp_lt,
.cmp_lte,
.cmp_eq,
@@ -1967,8 +2027,10 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.elem_val_node,
.field_ptr,
.field_val,
+ .field_call_bind,
.field_ptr_named,
.field_val_named,
+ .field_call_bind_named,
.func,
.func_inferred,
.int,
@@ -1983,13 +2045,15 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.mod_rem,
.mul,
.mulwrap,
- .param_type,
+ .mul_sat,
.ref,
.shl,
+ .shl_sat,
.shr,
.str,
.sub,
.subwrap,
+ .sub_sat,
.negate,
.negate_wrap,
.typeof,
@@ -2049,9 +2113,6 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.union_init_ptr,
.field_type,
.field_type_ref,
- .opaque_decl,
- .opaque_decl_anon,
- .opaque_decl_func,
.error_set_decl,
.error_set_decl_anon,
.error_set_decl_func,
@@ -2118,14 +2179,11 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.select,
.atomic_load,
.atomic_rmw,
- .atomic_store,
.mul_add,
.builtin_call,
.field_ptr_type,
.field_parent_ptr,
.maximum,
- .memcpy,
- .memset,
.minimum,
.builtin_async_call,
.c_import,
@@ -2134,6 +2192,7 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.await_nosuspend,
.ret_err_value_code,
.extended,
+ .closure_get,
=> break :b false,
// ZIR instructions that are always `noreturn`.
@@ -2162,8 +2221,10 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.ensure_result_used,
.ensure_result_non_error,
.@"export",
+ .export_value,
.set_eval_branch_quota,
.ensure_err_payload_void,
+ .atomic_store,
.store,
.store_node,
.store_to_block_ptr,
@@ -2175,6 +2236,9 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.set_cold,
.set_float_mode,
.set_runtime_safety,
+ .closure_capture,
+ .memcpy,
+ .memset,
=> break :b true,
}
} else switch (maybe_unused_result) {
@@ -2371,6 +2435,7 @@ fn varDecl(
const gpa = astgen.gpa;
const tree = astgen.tree;
const token_tags = tree.tokens.items(.tag);
+ const main_tokens = tree.nodes.items(.main_token);
const name_token = var_decl.ast.mut_token + 1;
const ident_name_raw = tree.tokenSlice(name_token);
@@ -2385,6 +2450,14 @@ fn varDecl(
return astgen.failNode(node, "variables must be initialized", .{});
}
+ if (var_decl.ast.addrspace_node != 0) {
+ return astgen.failTok(main_tokens[var_decl.ast.addrspace_node], "cannot set address space of local variable '{s}'", .{ident_name_raw});
+ }
+
+ if (var_decl.ast.section_node != 0) {
+ return astgen.failTok(main_tokens[var_decl.ast.section_node], "cannot set section of local variable '{s}'", .{ident_name_raw});
+ }
+
const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node != 0)
try expr(gz, scope, align_rl, var_decl.ast.align_node)
else
@@ -2494,7 +2567,11 @@ fn varDecl(
for (init_scope.instructions.items) |src_inst| {
if (zir_tags[src_inst] == .store_to_block_ptr) {
if (zir_datas[src_inst].bin.lhs == init_scope.rl_ptr) {
- zir_tags[src_inst] = .store_to_inferred_ptr;
+ if (var_decl.ast.type_node != 0) {
+ zir_tags[src_inst] = .store;
+ } else {
+ zir_tags[src_inst] = .store_to_inferred_ptr;
+ }
}
}
parent_zir.appendAssumeCapacity(src_inst);
@@ -2668,6 +2745,24 @@ fn assignShift(
_ = try gz.addBin(.store, lhs_ptr, result);
}
+fn assignShiftSat(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void {
+ try emitDbgNode(gz, infix_node);
+ const astgen = gz.astgen;
+ const tree = astgen.tree;
+ const node_datas = tree.nodes.items(.data);
+
+ const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs);
+ const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node);
+ // Saturating shift-left allows any integer type for both the LHS and RHS.
+ const rhs = try expr(gz, scope, .none, node_datas[infix_node].rhs);
+
+ const result = try gz.addPlNode(.shl_sat, infix_node, Zir.Inst.Bin{
+ .lhs = lhs,
+ .rhs = rhs,
+ });
+ _ = try gz.addBin(.store, lhs_ptr, result);
+}
+
fn boolNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
@@ -2714,6 +2809,7 @@ fn ptrType(
const elem_type = try typeExpr(gz, scope, ptr_info.ast.child_type);
const simple = ptr_info.ast.align_node == 0 and
+ ptr_info.ast.addrspace_node == 0 and
ptr_info.ast.sentinel == 0 and
ptr_info.ast.bit_range_start == 0;
@@ -2732,6 +2828,7 @@ fn ptrType(
var sentinel_ref: Zir.Inst.Ref = .none;
var align_ref: Zir.Inst.Ref = .none;
+ var addrspace_ref: Zir.Inst.Ref = .none;
var bit_start_ref: Zir.Inst.Ref = .none;
var bit_end_ref: Zir.Inst.Ref = .none;
var trailing_count: u32 = 0;
@@ -2744,6 +2841,10 @@ fn ptrType(
align_ref = try expr(gz, scope, align_rl, ptr_info.ast.align_node);
trailing_count += 1;
}
+ if (ptr_info.ast.addrspace_node != 0) {
+ addrspace_ref = try expr(gz, scope, .{ .ty = .address_space_type }, ptr_info.ast.addrspace_node);
+ trailing_count += 1;
+ }
if (ptr_info.ast.bit_range_start != 0) {
assert(ptr_info.ast.bit_range_end != 0);
bit_start_ref = try expr(gz, scope, .none, ptr_info.ast.bit_range_start);
@@ -2764,6 +2865,9 @@ fn ptrType(
if (align_ref != .none) {
gz.astgen.extra.appendAssumeCapacity(@enumToInt(align_ref));
}
+ if (addrspace_ref != .none) {
+ gz.astgen.extra.appendAssumeCapacity(@enumToInt(addrspace_ref));
+ }
if (bit_start_ref != .none) {
gz.astgen.extra.appendAssumeCapacity(@enumToInt(bit_start_ref));
gz.astgen.extra.appendAssumeCapacity(@enumToInt(bit_end_ref));
@@ -2779,6 +2883,7 @@ fn ptrType(
.is_volatile = ptr_info.volatile_token != null,
.has_sentinel = sentinel_ref != .none,
.has_align = align_ref != .none,
+ .has_addrspace = addrspace_ref != .none,
.has_bit_range = bit_start_ref != .none,
},
.size = ptr_info.size,
@@ -2847,7 +2952,7 @@ const WipDecls = struct {
is_pub: bool,
is_export: bool,
has_align: bool,
- has_section: bool,
+ has_section_or_addrspace: bool,
) Allocator.Error!void {
if (wip_decls.decl_index % fields_per_u32 == 0 and wip_decls.decl_index != 0) {
try wip_decls.bit_bag.append(gpa, wip_decls.cur_bit_bag);
@@ -2857,7 +2962,7 @@ const WipDecls = struct {
(@as(u32, @boolToInt(is_pub)) << 28) |
(@as(u32, @boolToInt(is_export)) << 29) |
(@as(u32, @boolToInt(has_align)) << 30) |
- (@as(u32, @boolToInt(has_section)) << 31);
+ (@as(u32, @boolToInt(has_section_or_addrspace)) << 31);
wip_decls.decl_index += 1;
}
@@ -2922,7 +3027,8 @@ fn fnDecl(
const maybe_inline_token = fn_proto.extern_export_inline_token orelse break :blk false;
break :blk token_tags[maybe_inline_token] == .keyword_inline;
};
- try wip_decls.next(gpa, is_pub, is_export, fn_proto.ast.align_expr != 0, fn_proto.ast.section_expr != 0);
+ const has_section_or_addrspace = fn_proto.ast.section_expr != 0 or fn_proto.ast.addrspace_expr != 0;
+ try wip_decls.next(gpa, is_pub, is_export, fn_proto.ast.align_expr != 0, has_section_or_addrspace);
var params_scope = &fn_gz.base;
const is_var_args = is_var_args: {
@@ -3011,6 +3117,9 @@ fn fnDecl(
const align_inst: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: {
break :inst try expr(&decl_gz, params_scope, align_rl, fn_proto.ast.align_expr);
};
+ const addrspace_inst: Zir.Inst.Ref = if (fn_proto.ast.addrspace_expr == 0) .none else inst: {
+ break :inst try expr(&decl_gz, params_scope, .{ .ty = .address_space_type }, fn_proto.ast.addrspace_expr);
+ };
const section_inst: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: {
break :inst try comptimeExpr(&decl_gz, params_scope, .{ .ty = .const_slice_u8_type }, fn_proto.ast.section_expr);
};
@@ -3112,7 +3221,7 @@ fn fnDecl(
_ = try decl_gz.addBreak(.break_inline, block_inst, func_inst);
try decl_gz.setBlockBody(block_inst);
- try wip_decls.payload.ensureUnusedCapacity(gpa, 9);
+ try wip_decls.payload.ensureUnusedCapacity(gpa, 10);
{
const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node));
const casted = @bitCast([4]u32, contents_hash);
@@ -3127,8 +3236,10 @@ fn fnDecl(
if (align_inst != .none) {
wip_decls.payload.appendAssumeCapacity(@enumToInt(align_inst));
}
- if (section_inst != .none) {
+
+ if (has_section_or_addrspace) {
wip_decls.payload.appendAssumeCapacity(@enumToInt(section_inst));
+ wip_decls.payload.appendAssumeCapacity(@enumToInt(addrspace_inst));
}
}
@@ -3175,10 +3286,14 @@ fn globalVarDecl(
const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node == 0) .none else inst: {
break :inst try expr(&block_scope, &block_scope.base, align_rl, var_decl.ast.align_node);
};
+ const addrspace_inst: Zir.Inst.Ref = if (var_decl.ast.addrspace_node == 0) .none else inst: {
+ break :inst try expr(&block_scope, &block_scope.base, .{ .ty = .address_space_type }, var_decl.ast.addrspace_node);
+ };
const section_inst: Zir.Inst.Ref = if (var_decl.ast.section_node == 0) .none else inst: {
break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .ty = .const_slice_u8_type }, var_decl.ast.section_node);
};
- try wip_decls.next(gpa, is_pub, is_export, align_inst != .none, section_inst != .none);
+ const has_section_or_addrspace = section_inst != .none or addrspace_inst != .none;
+ try wip_decls.next(gpa, is_pub, is_export, align_inst != .none, has_section_or_addrspace);
const is_threadlocal = if (var_decl.threadlocal_token) |tok| blk: {
if (!is_mutable) {
@@ -3256,7 +3371,7 @@ fn globalVarDecl(
_ = try block_scope.addBreak(.break_inline, block_inst, var_inst);
try block_scope.setBlockBody(block_inst);
- try wip_decls.payload.ensureUnusedCapacity(gpa, 9);
+ try wip_decls.payload.ensureUnusedCapacity(gpa, 10);
{
const contents_hash = std.zig.hashSrc(tree.getNodeSource(node));
const casted = @bitCast([4]u32, contents_hash);
@@ -3271,8 +3386,9 @@ fn globalVarDecl(
if (align_inst != .none) {
wip_decls.payload.appendAssumeCapacity(@enumToInt(align_inst));
}
- if (section_inst != .none) {
+ if (has_section_or_addrspace) {
wip_decls.payload.appendAssumeCapacity(@enumToInt(section_inst));
+ wip_decls.payload.appendAssumeCapacity(@enumToInt(addrspace_inst));
}
}
@@ -3474,8 +3590,9 @@ fn structDeclInner(
container_decl: Ast.full.ContainerDecl,
layout: std.builtin.TypeInfo.ContainerLayout,
) InnerError!Zir.Inst.Ref {
+ const decl_inst = try gz.reserveInstructionIndex();
+
if (container_decl.ast.members.len == 0) {
- const decl_inst = try gz.reserveInstructionIndex();
try gz.setStruct(decl_inst, .{
.src_node = node,
.layout = layout,
@@ -3493,11 +3610,19 @@ fn structDeclInner(
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
+ var namespace: Scope.Namespace = .{
+ .parent = scope,
+ .node = node,
+ .inst = decl_inst,
+ .declaring_gz = gz,
+ };
+ defer namespace.deinit(gpa);
+
// The struct_decl instruction introduces a scope in which the decls of the struct
// are in scope, so that field types, alignments, and default value expressions
// can refer to decls within the struct itself.
var block_scope: GenZir = .{
- .parent = scope,
+ .parent = &namespace.base,
.decl_node_index = node,
.decl_line = gz.calcLine(node),
.astgen = astgen,
@@ -3506,16 +3631,13 @@ fn structDeclInner(
};
defer block_scope.instructions.deinit(gpa);
- var namespace: Scope.Namespace = .{ .parent = scope, .node = node };
- defer namespace.decls.deinit(gpa);
-
try astgen.scanDecls(&namespace, container_decl.ast.members);
var wip_decls: WipDecls = .{};
defer wip_decls.deinit(gpa);
// We don't know which members are fields until we iterate, so cannot do
- // an accurate ensureCapacity yet.
+ // an accurate ensureTotalCapacity yet.
var fields_data = ArrayListUnmanaged(u32){};
defer fields_data.deinit(gpa);
@@ -3713,7 +3835,6 @@ fn structDeclInner(
}
}
- const decl_inst = try gz.reserveInstructionIndex();
if (block_scope.instructions.items.len != 0) {
_ = try block_scope.addBreak(.break_inline, decl_inst, .void_value);
}
@@ -3727,11 +3848,18 @@ fn structDeclInner(
.known_has_bits = known_has_bits,
});
- try astgen.extra.ensureUnusedCapacity(gpa, bit_bag.items.len +
- @boolToInt(field_index != 0) + fields_data.items.len +
+ // zig fmt: off
+ try astgen.extra.ensureUnusedCapacity(gpa,
+ bit_bag.items.len +
+ @boolToInt(wip_decls.decl_index != 0) +
+ wip_decls.payload.items.len +
block_scope.instructions.items.len +
- wip_decls.bit_bag.items.len + @boolToInt(wip_decls.decl_index != 0) +
- wip_decls.payload.items.len);
+ wip_decls.bit_bag.items.len +
+ @boolToInt(field_index != 0) +
+ fields_data.items.len
+ );
+ // zig fmt: on
+
astgen.extra.appendSliceAssumeCapacity(wip_decls.bit_bag.items); // Likely empty.
if (wip_decls.decl_index != 0) {
astgen.extra.appendAssumeCapacity(wip_decls.cur_bit_bag);
@@ -3758,17 +3886,27 @@ fn unionDeclInner(
arg_node: Ast.Node.Index,
have_auto_enum: bool,
) InnerError!Zir.Inst.Ref {
+ const decl_inst = try gz.reserveInstructionIndex();
+
const astgen = gz.astgen;
const gpa = astgen.gpa;
const tree = astgen.tree;
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
+ var namespace: Scope.Namespace = .{
+ .parent = scope,
+ .node = node,
+ .inst = decl_inst,
+ .declaring_gz = gz,
+ };
+ defer namespace.deinit(gpa);
+
// The union_decl instruction introduces a scope in which the decls of the union
// are in scope, so that field types, alignments, and default value expressions
// can refer to decls within the union itself.
var block_scope: GenZir = .{
- .parent = scope,
+ .parent = &namespace.base,
.decl_node_index = node,
.decl_line = gz.calcLine(node),
.astgen = astgen,
@@ -3777,13 +3915,10 @@ fn unionDeclInner(
};
defer block_scope.instructions.deinit(gpa);
- var namespace: Scope.Namespace = .{ .parent = scope, .node = node };
- defer namespace.decls.deinit(gpa);
-
try astgen.scanDecls(&namespace, members);
const arg_inst: Zir.Inst.Ref = if (arg_node != 0)
- try typeExpr(gz, &namespace.base, arg_node)
+ try typeExpr(&block_scope, &namespace.base, arg_node)
else
.none;
@@ -3791,7 +3926,7 @@ fn unionDeclInner(
defer wip_decls.deinit(gpa);
// We don't know which members are fields until we iterate, so cannot do
- // an accurate ensureCapacity yet.
+ // an accurate ensureTotalCapacity yet.
var fields_data = ArrayListUnmanaged(u32){};
defer fields_data.deinit(gpa);
@@ -3996,7 +4131,6 @@ fn unionDeclInner(
}
}
- const decl_inst = try gz.reserveInstructionIndex();
if (block_scope.instructions.items.len != 0) {
_ = try block_scope.addBreak(.break_inline, decl_inst, .void_value);
}
@@ -4011,11 +4145,18 @@ fn unionDeclInner(
.auto_enum_tag = have_auto_enum,
});
- try astgen.extra.ensureUnusedCapacity(gpa, bit_bag.items.len +
- 1 + fields_data.items.len +
+ // zig fmt: off
+ try astgen.extra.ensureUnusedCapacity(gpa,
+ bit_bag.items.len +
+ @boolToInt(wip_decls.decl_index != 0) +
+ wip_decls.payload.items.len +
block_scope.instructions.items.len +
- wip_decls.bit_bag.items.len + @boolToInt(wip_decls.decl_index != 0) +
- wip_decls.payload.items.len);
+ wip_decls.bit_bag.items.len +
+ 1 + // cur_bit_bag
+ fields_data.items.len
+ );
+ // zig fmt: on
+
astgen.extra.appendSliceAssumeCapacity(wip_decls.bit_bag.items); // Likely empty.
if (wip_decls.decl_index != 0) {
astgen.extra.appendAssumeCapacity(wip_decls.cur_bit_bag);
@@ -4178,10 +4319,20 @@ fn containerDecl(
// how structs are handled above.
const nonexhaustive = counts.nonexhaustive_node != 0;
+ const decl_inst = try gz.reserveInstructionIndex();
+
+ var namespace: Scope.Namespace = .{
+ .parent = scope,
+ .node = node,
+ .inst = decl_inst,
+ .declaring_gz = gz,
+ };
+ defer namespace.deinit(gpa);
+
// The enum_decl instruction introduces a scope in which the decls of the enum
// are in scope, so that tag values can refer to decls within the enum itself.
var block_scope: GenZir = .{
- .parent = scope,
+ .parent = &namespace.base,
.decl_node_index = node,
.decl_line = gz.calcLine(node),
.astgen = astgen,
@@ -4190,13 +4341,10 @@ fn containerDecl(
};
defer block_scope.instructions.deinit(gpa);
- var namespace: Scope.Namespace = .{ .parent = scope, .node = node };
- defer namespace.decls.deinit(gpa);
-
try astgen.scanDecls(&namespace, container_decl.ast.members);
const arg_inst: Zir.Inst.Ref = if (container_decl.ast.arg != 0)
- try comptimeExpr(gz, &namespace.base, .{ .ty = .type_type }, container_decl.ast.arg)
+ try comptimeExpr(&block_scope, &namespace.base, .{ .ty = .type_type }, container_decl.ast.arg)
else
.none;
@@ -4391,7 +4539,6 @@ fn containerDecl(
}
}
- const decl_inst = try gz.reserveInstructionIndex();
if (block_scope.instructions.items.len != 0) {
_ = try block_scope.addBreak(.break_inline, decl_inst, .void_value);
}
@@ -4405,11 +4552,18 @@ fn containerDecl(
.decls_len = @intCast(u32, wip_decls.decl_index),
});
- try astgen.extra.ensureUnusedCapacity(gpa, bit_bag.items.len +
- 1 + fields_data.items.len +
+ // zig fmt: off
+ try astgen.extra.ensureUnusedCapacity(gpa,
+ bit_bag.items.len +
+ @boolToInt(wip_decls.decl_index != 0) +
+ wip_decls.payload.items.len +
block_scope.instructions.items.len +
- wip_decls.bit_bag.items.len + @boolToInt(wip_decls.decl_index != 0) +
- wip_decls.payload.items.len);
+ wip_decls.bit_bag.items.len +
+ 1 + // cur_bit_bag
+ fields_data.items.len
+ );
+ // zig fmt: on
+
astgen.extra.appendSliceAssumeCapacity(wip_decls.bit_bag.items); // Likely empty.
if (wip_decls.decl_index != 0) {
astgen.extra.appendAssumeCapacity(wip_decls.cur_bit_bag);
@@ -4426,8 +4580,15 @@ fn containerDecl(
.keyword_opaque => {
assert(container_decl.ast.arg == 0);
- var namespace: Scope.Namespace = .{ .parent = scope, .node = node };
- defer namespace.decls.deinit(gpa);
+ const decl_inst = try gz.reserveInstructionIndex();
+
+ var namespace: Scope.Namespace = .{
+ .parent = scope,
+ .node = node,
+ .inst = decl_inst,
+ .declaring_gz = gz,
+ };
+ defer namespace.deinit(gpa);
try astgen.scanDecls(&namespace, container_decl.ast.members);
@@ -4565,21 +4726,20 @@ fn containerDecl(
wip_decls.cur_bit_bag >>= @intCast(u5, empty_slot_count * WipDecls.bits_per_field);
}
}
- const tag: Zir.Inst.Tag = switch (gz.anon_name_strategy) {
- .parent => .opaque_decl,
- .anon => .opaque_decl_anon,
- .func => .opaque_decl_func,
- };
- const decl_inst = try gz.addBlock(tag, node);
- try gz.instructions.append(gpa, decl_inst);
- try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.OpaqueDecl).Struct.fields.len +
- wip_decls.bit_bag.items.len + @boolToInt(wip_decls.decl_index != 0) +
- wip_decls.payload.items.len);
- const zir_datas = astgen.instructions.items(.data);
- zir_datas[decl_inst].pl_node.payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.OpaqueDecl{
+ try gz.setOpaque(decl_inst, .{
+ .src_node = node,
.decls_len = @intCast(u32, wip_decls.decl_index),
});
+
+ // zig fmt: off
+ try astgen.extra.ensureUnusedCapacity(gpa,
+ wip_decls.bit_bag.items.len +
+ @boolToInt(wip_decls.decl_index != 0) +
+ wip_decls.payload.items.len
+ );
+ // zig fmt: on
+
astgen.extra.appendSliceAssumeCapacity(wip_decls.bit_bag.items); // Likely empty.
if (wip_decls.decl_index != 0) {
astgen.extra.appendAssumeCapacity(wip_decls.cur_bit_bag);
@@ -4865,6 +5025,21 @@ fn fieldAccess(
scope: *Scope,
rl: ResultLoc,
node: Ast.Node.Index,
+) InnerError!Zir.Inst.Ref {
+ if (rl == .ref) {
+ return addFieldAccess(.field_ptr, gz, scope, .ref, node);
+ } else {
+ const access = try addFieldAccess(.field_val, gz, scope, .none_or_ref, node);
+ return rvalue(gz, rl, access, node);
+ }
+}
+
+fn addFieldAccess(
+ tag: Zir.Inst.Tag,
+ gz: *GenZir,
+ scope: *Scope,
+ lhs_rl: ResultLoc,
+ node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
@@ -4875,16 +5050,11 @@ fn fieldAccess(
const dot_token = main_tokens[node];
const field_ident = dot_token + 1;
const str_index = try astgen.identAsString(field_ident);
- switch (rl) {
- .ref => return gz.addPlNode(.field_ptr, node, Zir.Inst.Field{
- .lhs = try expr(gz, scope, .ref, object_node),
- .field_name_start = str_index,
- }),
- else => return rvalue(gz, rl, try gz.addPlNode(.field_val, node, Zir.Inst.Field{
- .lhs = try expr(gz, scope, .none_or_ref, object_node),
- .field_name_start = str_index,
- }), node),
- }
+
+ return gz.addPlNode(tag, node, Zir.Inst.Field{
+ .lhs = try expr(gz, scope, lhs_rl, object_node),
+ .field_name_start = str_index,
+ });
}
fn arrayAccess(
@@ -6320,6 +6490,7 @@ fn identifier(
const astgen = gz.astgen;
const tree = astgen.tree;
+ const gpa = astgen.gpa;
const main_tokens = tree.nodes.items(.main_token);
const ident_token = main_tokens[ident];
@@ -6366,16 +6537,28 @@ fn identifier(
const name_str_index = try astgen.identAsString(ident_token);
var s = scope;
var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already
- var hit_namespace: Ast.Node.Index = 0;
+ var num_namespaces_out: u32 = 0;
+ var capturing_namespace: ?*Scope.Namespace = null;
while (true) switch (s.tag) {
.local_val => {
const local_val = s.cast(Scope.LocalVal).?;
if (local_val.name == name_str_index) {
- local_val.used = true;
// Locals cannot shadow anything, so we do not need to look for ambiguous
// references in this case.
- return rvalue(gz, rl, local_val.inst, ident);
+ local_val.used = true;
+
+ const value_inst = try tunnelThroughClosure(
+ gz,
+ ident,
+ num_namespaces_out,
+ capturing_namespace,
+ local_val.inst,
+ local_val.token_src,
+ gpa,
+ );
+
+ return rvalue(gz, rl, value_inst, ident);
}
s = local_val.parent;
},
@@ -6383,16 +6566,29 @@ fn identifier(
const local_ptr = s.cast(Scope.LocalPtr).?;
if (local_ptr.name == name_str_index) {
local_ptr.used = true;
- if (hit_namespace != 0 and !local_ptr.maybe_comptime) {
+
+ // Can't close over a runtime variable
+ if (num_namespaces_out != 0 and !local_ptr.maybe_comptime) {
return astgen.failNodeNotes(ident, "mutable '{s}' not accessible from here", .{ident_name}, &.{
try astgen.errNoteTok(local_ptr.token_src, "declared mutable here", .{}),
- try astgen.errNoteNode(hit_namespace, "crosses namespace boundary here", .{}),
+ try astgen.errNoteNode(capturing_namespace.?.node, "crosses namespace boundary here", .{}),
});
}
+
+ const ptr_inst = try tunnelThroughClosure(
+ gz,
+ ident,
+ num_namespaces_out,
+ capturing_namespace,
+ local_ptr.ptr,
+ local_ptr.token_src,
+ gpa,
+ );
+
switch (rl) {
- .ref, .none_or_ref => return local_ptr.ptr,
+ .ref, .none_or_ref => return ptr_inst,
else => {
- const loaded = try gz.addUnNode(.load, local_ptr.ptr, ident);
+ const loaded = try gz.addUnNode(.load, ptr_inst, ident);
return rvalue(gz, rl, loaded, ident);
},
}
@@ -6413,7 +6609,8 @@ fn identifier(
// We found a match but must continue looking for ambiguous references to decls.
found_already = i;
}
- hit_namespace = ns.node;
+ num_namespaces_out += 1;
+ capturing_namespace = ns;
s = ns.parent;
},
.top => break,
@@ -6433,6 +6630,37 @@ fn identifier(
}
}
+/// Adds a capture to a namespace, if needed.
+/// Returns the index of the closure_capture instruction.
+fn tunnelThroughClosure(
+ gz: *GenZir,
+ inner_ref_node: Ast.Node.Index,
+ num_tunnels: u32,
+ ns: ?*Scope.Namespace,
+ value: Zir.Inst.Ref,
+ token: Ast.TokenIndex,
+ gpa: *Allocator,
+) !Zir.Inst.Ref {
+ // For trivial values, we don't need a tunnel.
+ // Just return the ref.
+ if (num_tunnels == 0 or refToIndex(value) == null) {
+ return value;
+ }
+
+ // Otherwise we need a tunnel. Check if this namespace
+ // already has one for this value.
+ const gop = try ns.?.captures.getOrPut(gpa, refToIndex(value).?);
+ if (!gop.found_existing) {
+ // Make a new capture for this value
+ const capture_ref = try ns.?.declaring_gz.?.addUnTok(.closure_capture, value, token);
+ gop.value_ptr.* = refToIndex(capture_ref).?;
+ }
+
+ // Add an instruction to get the value from the closure into
+ // our current context
+ return try gz.addInstNode(.closure_get, gop.value_ptr.*, inner_ref_node);
+}
+
fn stringLiteral(
gz: *GenZir,
rl: ResultLoc,
@@ -6866,35 +7094,13 @@ fn asRlPtr(
operand_node: Ast.Node.Index,
dest_type: Zir.Inst.Ref,
) InnerError!Zir.Inst.Ref {
- // Detect whether this expr() call goes into rvalue() to store the result into the
- // result location. If it does, elide the coerce_result_ptr instruction
- // as well as the store instruction, instead passing the result as an rvalue.
const astgen = parent_gz.astgen;
- var as_scope = parent_gz.makeSubBlock(scope);
+ var as_scope = try parent_gz.makeCoercionScope(scope, dest_type, result_ptr);
defer as_scope.instructions.deinit(astgen.gpa);
- as_scope.rl_ptr = try as_scope.addBin(.coerce_result_ptr, dest_type, result_ptr);
const result = try reachableExpr(&as_scope, &as_scope.base, .{ .block_ptr = &as_scope }, operand_node, src_node);
- const parent_zir = &parent_gz.instructions;
- if (as_scope.rvalue_rl_count == 1) {
- // Busted! This expression didn't actually need a pointer.
- const zir_tags = astgen.instructions.items(.tag);
- const zir_datas = astgen.instructions.items(.data);
- try parent_zir.ensureUnusedCapacity(astgen.gpa, as_scope.instructions.items.len);
- for (as_scope.instructions.items) |src_inst| {
- if (indexToRef(src_inst) == as_scope.rl_ptr) continue;
- if (zir_tags[src_inst] == .store_to_block_ptr) {
- if (zir_datas[src_inst].bin.lhs == as_scope.rl_ptr) continue;
- }
- parent_zir.appendAssumeCapacity(src_inst);
- }
- const casted_result = try parent_gz.addBin(.as, dest_type, result);
- return rvalue(parent_gz, rl, casted_result, operand_node);
- } else {
- try parent_zir.appendSlice(astgen.gpa, as_scope.instructions.items);
- return result;
- }
+ return as_scope.finishCoercion(parent_gz, rl, operand_node, result, dest_type);
}
fn bitCast(
@@ -7030,16 +7236,15 @@ fn builtinCall(
return rvalue(gz, rl, result, node);
},
.field => {
- const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]);
if (rl == .ref) {
return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .ref, params[0]),
- .field_name = field_name,
+ .field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]),
});
}
const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .none, params[0]),
- .field_name = field_name,
+ .field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]),
});
return rvalue(gz, rl, result, node);
},
@@ -7047,7 +7252,7 @@ fn builtinCall(
.bit_cast => return bitCast( gz, scope, rl, node, params[0], params[1]),
.TypeOf => return typeOf( gz, scope, rl, node, params),
.union_init => return unionInit(gz, scope, rl, node, params),
- .c_import => return cImport( gz, scope, rl, node, params[0]),
+ .c_import => return cImport( gz, scope, node, params[0]),
.@"export" => {
const node_tags = tree.nodes.items(.tag);
@@ -7060,32 +7265,55 @@ fn builtinCall(
.identifier => {
const ident_token = main_tokens[params[0]];
decl_name = try astgen.identAsString(ident_token);
- {
- var s = scope;
- while (true) switch (s.tag) {
- .local_val => {
- const local_val = s.cast(Scope.LocalVal).?;
- if (local_val.name == decl_name) {
- local_val.used = true;
- break;
+
+ var s = scope;
+ var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already
+ while (true) switch (s.tag) {
+ .local_val => {
+ const local_val = s.cast(Scope.LocalVal).?;
+ if (local_val.name == decl_name) {
+ local_val.used = true;
+ _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{
+ .operand = local_val.inst,
+ .options = try comptimeExpr(gz, scope, .{ .coerced_ty = .export_options_type }, params[1]),
+ });
+ return rvalue(gz, rl, .void_value, node);
+ }
+ s = local_val.parent;
+ },
+ .local_ptr => {
+ const local_ptr = s.cast(Scope.LocalPtr).?;
+ if (local_ptr.name == decl_name) {
+ if (!local_ptr.maybe_comptime)
+ return astgen.failNode(params[0], "unable to export runtime-known value", .{});
+ local_ptr.used = true;
+ const loaded = try gz.addUnNode(.load, local_ptr.ptr, node);
+ _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{
+ .operand = loaded,
+ .options = try comptimeExpr(gz, scope, .{ .coerced_ty = .export_options_type }, params[1]),
+ });
+ return rvalue(gz, rl, .void_value, node);
+ }
+ s = local_ptr.parent;
+ },
+ .gen_zir => s = s.cast(GenZir).?.parent,
+ .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
+ .namespace => {
+ const ns = s.cast(Scope.Namespace).?;
+ if (ns.decls.get(decl_name)) |i| {
+ if (found_already) |f| {
+ return astgen.failNodeNotes(node, "ambiguous reference", .{}, &.{
+ try astgen.errNoteNode(f, "declared here", .{}),
+ try astgen.errNoteNode(i, "also declared here", .{}),
+ });
}
- s = local_val.parent;
- },
- .local_ptr => {
- const local_ptr = s.cast(Scope.LocalPtr).?;
- if (local_ptr.name == decl_name) {
- if (!local_ptr.maybe_comptime)
- return astgen.failNode(params[0], "unable to export runtime-known value", .{});
- local_ptr.used = true;
- break;
- }
- s = local_ptr.parent;
- },
- .gen_zir => s = s.cast(GenZir).?.parent,
- .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
- .namespace, .top => break,
- };
- }
+ // We found a match but must continue looking for ambiguous references to decls.
+ found_already = i;
+ }
+ s = ns.parent;
+ },
+ .top => break,
+ };
},
.field_access => {
const namespace_node = node_datas[params[0]].lhs;
@@ -7116,9 +7344,13 @@ fn builtinCall(
});
return rvalue(gz, rl, result, node);
},
+ .fence => {
+ const order = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[0]);
+ const result = try gz.addUnNode(.fence, order, node);
+ return rvalue(gz, rl, result, node);
+ },
.breakpoint => return simpleNoOpVoid(gz, rl, node, .breakpoint),
- .fence => return simpleNoOpVoid(gz, rl, node, .fence),
.This => return rvalue(gz, rl, try gz.addNodeExtended(.this, node), node),
.return_address => return rvalue(gz, rl, try gz.addNodeExtended(.ret_addr, node), node),
@@ -7230,6 +7462,7 @@ fn builtinCall(
return rvalue(gz, rl, result, node);
},
.c_define => {
+ if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{});
const name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[0]);
const value = try comptimeExpr(gz, scope, .none, params[1]);
const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{
@@ -7305,13 +7538,9 @@ fn builtinCall(
return rvalue(gz, rl, result, node);
},
- .add_with_saturation => return saturatingArithmetic(gz, scope, rl, node, params, .add_with_saturation),
- .sub_with_saturation => return saturatingArithmetic(gz, scope, rl, node, params, .sub_with_saturation),
- .mul_with_saturation => return saturatingArithmetic(gz, scope, rl, node, params, .mul_with_saturation),
- .shl_with_saturation => return saturatingArithmetic(gz, scope, rl, node, params, .shl_with_saturation),
-
.atomic_load => {
const int_type = try typeExpr(gz, scope, params[0]);
+ // TODO allow this pointer type to be volatile
const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{
.ptr_type_simple = .{
.is_allowzero = false,
@@ -7321,16 +7550,17 @@ fn builtinCall(
.elem_type = int_type,
},
} });
- const ptr = try expr(gz, scope, .{ .ty = ptr_type }, params[1]);
- const ordering = try expr(gz, scope, .{ .ty = .atomic_order_type }, params[2]);
const result = try gz.addPlNode(.atomic_load, node, Zir.Inst.Bin{
- .lhs = ptr,
- .rhs = ordering,
+ // zig fmt: off
+ .lhs = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]),
+ .rhs = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[2]),
+ // zig fmt: on
});
return rvalue(gz, rl, result, node);
},
.atomic_rmw => {
const int_type = try typeExpr(gz, scope, params[0]);
+ // TODO allow this pointer type to be volatile
const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{
.ptr_type_simple = .{
.is_allowzero = false,
@@ -7340,20 +7570,19 @@ fn builtinCall(
.elem_type = int_type,
},
} });
- const ptr = try expr(gz, scope, .{ .ty = ptr_type }, params[1]);
- const operation = try expr(gz, scope, .{ .ty = .atomic_rmw_op_type }, params[2]);
- const operand = try expr(gz, scope, .{ .ty = int_type }, params[3]);
- const ordering = try expr(gz, scope, .{ .ty = .atomic_order_type }, params[4]);
const result = try gz.addPlNode(.atomic_rmw, node, Zir.Inst.AtomicRmw{
- .ptr = ptr,
- .operation = operation,
- .operand = operand,
- .ordering = ordering,
+ // zig fmt: off
+ .ptr = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]),
+ .operation = try expr(gz, scope, .{ .coerced_ty = .atomic_rmw_op_type }, params[2]),
+ .operand = try expr(gz, scope, .{ .coerced_ty = int_type }, params[3]),
+ .ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[4]),
+ // zig fmt: on
});
return rvalue(gz, rl, result, node);
},
.atomic_store => {
const int_type = try typeExpr(gz, scope, params[0]);
+ // TODO allow this pointer type to be volatile
const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{
.ptr_type_simple = .{
.is_allowzero = false,
@@ -7363,13 +7592,12 @@ fn builtinCall(
.elem_type = int_type,
},
} });
- const ptr = try expr(gz, scope, .{ .ty = ptr_type }, params[1]);
- const operand = try expr(gz, scope, .{ .ty = int_type }, params[2]);
- const ordering = try expr(gz, scope, .{ .ty = .atomic_order_type }, params[3]);
const result = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{
- .ptr = ptr,
- .operand = operand,
- .ordering = ordering,
+ // zig fmt: off
+ .ptr = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]),
+ .operand = try expr(gz, scope, .{ .coerced_ty = int_type }, params[2]),
+ .ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[3]),
+ // zig fmt: on
});
return rvalue(gz, rl, result, node);
},
@@ -7387,7 +7615,7 @@ fn builtinCall(
},
.call => {
const options = try comptimeExpr(gz, scope, .{ .ty = .call_options_type }, params[0]);
- const callee = try expr(gz, scope, .none, params[1]);
+ const callee = try calleeExpr(gz, scope, params[1]);
const args = try expr(gz, scope, .none, params[2]);
const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{
.options = options,
@@ -7409,17 +7637,17 @@ fn builtinCall(
},
.memcpy => {
const result = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{
- .dest = try expr(gz, scope, .{ .ty = .manyptr_u8_type }, params[0]),
- .source = try expr(gz, scope, .{ .ty = .manyptr_const_u8_type }, params[1]),
- .byte_count = try expr(gz, scope, .{ .ty = .usize_type }, params[2]),
+ .dest = try expr(gz, scope, .{ .coerced_ty = .manyptr_u8_type }, params[0]),
+ .source = try expr(gz, scope, .{ .coerced_ty = .manyptr_const_u8_type }, params[1]),
+ .byte_count = try expr(gz, scope, .{ .coerced_ty = .usize_type }, params[2]),
});
return rvalue(gz, rl, result, node);
},
.memset => {
const result = try gz.addPlNode(.memset, node, Zir.Inst.Memset{
- .dest = try expr(gz, scope, .{ .ty = .manyptr_u8_type }, params[0]),
- .byte = try expr(gz, scope, .{ .ty = .u8_type }, params[1]),
- .byte_count = try expr(gz, scope, .{ .ty = .usize_type }, params[2]),
+ .dest = try expr(gz, scope, .{ .coerced_ty = .manyptr_u8_type }, params[0]),
+ .byte = try expr(gz, scope, .{ .coerced_ty = .u8_type }, params[1]),
+ .byte_count = try expr(gz, scope, .{ .coerced_ty = .usize_type }, params[2]),
});
return rvalue(gz, rl, result, node);
},
@@ -7452,12 +7680,11 @@ fn builtinCall(
},
.Vector => {
const result = try gz.addPlNode(.vector_type, node, Zir.Inst.Bin{
- .lhs = try comptimeExpr(gz, scope, .{.ty = .u32_type}, params[0]),
+ .lhs = try comptimeExpr(gz, scope, .{ .ty = .u32_type }, params[0]),
.rhs = try typeExpr(gz, scope, params[1]),
});
return rvalue(gz, rl, result, node);
},
-
}
// zig fmt: on
}
@@ -7603,6 +7830,8 @@ fn simpleCBuiltin(
operand_node: Ast.Node.Index,
tag: Zir.Inst.Extended,
) InnerError!Zir.Inst.Ref {
+ const name: []const u8 = if (tag == .c_undef) "C undef" else "C include";
+ if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name});
const operand = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, operand_node);
_ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
@@ -7651,7 +7880,6 @@ fn shiftOp(
fn cImport(
gz: *GenZir,
scope: *Scope,
- rl: ResultLoc,
node: Ast.Node.Index,
body_node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
@@ -7660,6 +7888,7 @@ fn cImport(
var block_scope = gz.makeSubBlock(scope);
block_scope.force_comptime = true;
+ block_scope.c_import = true;
defer block_scope.instructions.deinit(gpa);
const block_inst = try gz.addBlock(.c_import, node);
@@ -7670,7 +7899,7 @@ fn cImport(
try block_scope.setBlockBody(block_inst);
try gz.instructions.append(gpa, block_inst);
- return rvalue(gz, rl, .void_value, node);
+ return indexToRef(block_inst);
}
fn overflowArithmetic(
@@ -7703,24 +7932,6 @@ fn overflowArithmetic(
return rvalue(gz, rl, result, node);
}
-fn saturatingArithmetic(
- gz: *GenZir,
- scope: *Scope,
- rl: ResultLoc,
- node: Ast.Node.Index,
- params: []const Ast.Node.Index,
- tag: Zir.Inst.Extended,
-) InnerError!Zir.Inst.Ref {
- const lhs = try expr(gz, scope, .none, params[0]);
- const rhs = try expr(gz, scope, .none, params[1]);
- const result = try gz.addExtendedPayload(tag, Zir.Inst.SaturatingArithmetic{
- .node = gz.nodeIndexToRelative(node),
- .lhs = lhs,
- .rhs = rhs,
- });
- return rvalue(gz, rl, result, node);
-}
-
fn callExpr(
gz: *GenZir,
scope: *Scope,
@@ -7729,20 +7940,16 @@ fn callExpr(
call: Ast.full.Call,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
- const lhs = try expr(gz, scope, .none, call.ast.fn_expr);
+
+ const callee = try calleeExpr(gz, scope, call.ast.fn_expr);
const args = try astgen.gpa.alloc(Zir.Inst.Ref, call.ast.params.len);
defer astgen.gpa.free(args);
for (call.ast.params) |param_node, i| {
- const param_type = try gz.add(.{
- .tag = .param_type,
- .data = .{ .param_type = .{
- .callee = lhs,
- .param_index = @intCast(u32, i),
- } },
- });
- args[i] = try expr(gz, scope, .{ .coerced_ty = param_type }, param_node);
+ // Parameters are always temporary values, they have no
+ // meaningful result location. Sema will coerce them.
+ args[i] = try expr(gz, scope, .none, param_node);
}
const modifier: std.builtin.CallOptions.Modifier = blk: {
@@ -7757,20 +7964,72 @@ fn callExpr(
}
break :blk .auto;
};
- const result: Zir.Inst.Ref = res: {
- const tag: Zir.Inst.Tag = switch (modifier) {
- .auto => .call,
- .async_kw => .call_async,
- .never_tail => unreachable,
- .never_inline => unreachable,
- .no_async => .call_nosuspend,
- .always_tail => unreachable,
- .always_inline => unreachable,
- .compile_time => .call_compile_time,
- };
- break :res try gz.addCall(tag, lhs, args, node);
- };
- return rvalue(gz, rl, result, node); // TODO function call with result location
+ const call_inst = try gz.addCall(modifier, callee, args, node);
+ return rvalue(gz, rl, call_inst, node); // TODO function call with result location
+}
+
+/// calleeExpr generates the function part of a call expression (f in f(x)), or the
+/// callee argument to the @call() builtin. If the lhs is a field access or the
+/// @field() builtin, we need to generate a special field_call_bind instruction
+/// instead of the normal field_val or field_ptr. If this is a inst.func() call,
+/// this instruction will capture the value of the first argument before evaluating
+/// the other arguments. We need to use .ref here to guarantee we will be able to
+/// promote an lvalue to an address if the first parameter requires it. This
+/// unfortunately also means we need to take a reference to any types on the lhs.
+fn calleeExpr(
+ gz: *GenZir,
+ scope: *Scope,
+ node: Ast.Node.Index,
+) InnerError!Zir.Inst.Ref {
+ const astgen = gz.astgen;
+ const tree = astgen.tree;
+
+ const tag = tree.nodes.items(.tag)[node];
+ switch (tag) {
+ .field_access => return addFieldAccess(.field_call_bind, gz, scope, .ref, node),
+
+ .builtin_call_two,
+ .builtin_call_two_comma,
+ .builtin_call,
+ .builtin_call_comma,
+ => {
+ const node_datas = tree.nodes.items(.data);
+ const main_tokens = tree.nodes.items(.main_token);
+ const builtin_token = main_tokens[node];
+ const builtin_name = tree.tokenSlice(builtin_token);
+
+ var inline_params: [2]Ast.Node.Index = undefined;
+ var params: []Ast.Node.Index = switch (tag) {
+ .builtin_call,
+ .builtin_call_comma,
+ => tree.extra_data[node_datas[node].lhs..node_datas[node].rhs],
+
+ .builtin_call_two,
+ .builtin_call_two_comma,
+ => blk: {
+ inline_params = .{ node_datas[node].lhs, node_datas[node].rhs };
+ const len: usize = if (inline_params[0] == 0) @as(usize, 0) else if (inline_params[1] == 0) @as(usize, 1) else @as(usize, 2);
+ break :blk inline_params[0..len];
+ },
+
+ else => unreachable,
+ };
+
+ // If anything is wrong, fall back to builtinCall.
+ // It will emit any necessary compile errors and notes.
+ if (std.mem.eql(u8, builtin_name, "@field") and params.len == 2) {
+ const lhs = try expr(gz, scope, .ref, params[0]);
+ const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]);
+ return gz.addPlNode(.field_call_bind_named, node, Zir.Inst.FieldNamed{
+ .lhs = lhs,
+ .field_name = field_name,
+ });
+ }
+
+ return builtinCall(gz, scope, .none, node, params);
+ },
+ else => return expr(gz, scope, .none, node),
+ }
}
pub const simple_types = std.ComptimeStringMap(Zir.Inst.Ref, .{
@@ -7892,27 +8151,33 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index) bool
.asm_simple,
.add,
.add_wrap,
+ .add_sat,
.array_cat,
.array_mult,
.assign,
.assign_bit_and,
.assign_bit_or,
- .assign_bit_shift_left,
- .assign_bit_shift_right,
+ .assign_shl,
+ .assign_shl_sat,
+ .assign_shr,
.assign_bit_xor,
.assign_div,
.assign_sub,
.assign_sub_wrap,
+ .assign_sub_sat,
.assign_mod,
.assign_add,
.assign_add_wrap,
+ .assign_add_sat,
.assign_mul,
.assign_mul_wrap,
+ .assign_mul_sat,
.bang_equal,
.bit_and,
.bit_or,
- .bit_shift_left,
- .bit_shift_right,
+ .shl,
+ .shl_sat,
+ .shr,
.bit_xor,
.bool_and,
.bool_or,
@@ -7927,10 +8192,12 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index) bool
.mod,
.mul,
.mul_wrap,
+ .mul_sat,
.switch_range,
.field_access,
.sub,
.sub_wrap,
+ .sub_sat,
.slice,
.slice_open,
.slice_sentinel,
@@ -8008,17 +8275,31 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index) bool
}
},
- .builtin_call,
- .builtin_call_comma,
- .builtin_call_two,
- .builtin_call_two_comma,
- => {
+ .builtin_call_two, .builtin_call_two_comma => {
const builtin_token = main_tokens[node];
const builtin_name = tree.tokenSlice(builtin_token);
// If the builtin is an invalid name, we don't cause an error here; instead
// let it pass, and the error will be "invalid builtin function" later.
const builtin_info = BuiltinFn.list.get(builtin_name) orelse return false;
- return builtin_info.needs_mem_loc;
+ switch (builtin_info.needs_mem_loc) {
+ .never => return false,
+ .always => return true,
+ .forward1 => node = node_datas[node].rhs,
+ }
+ },
+
+ .builtin_call, .builtin_call_comma => {
+ const params = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs];
+ const builtin_token = main_tokens[node];
+ const builtin_name = tree.tokenSlice(builtin_token);
+ // If the builtin is an invalid name, we don't cause an error here; instead
+ // let it pass, and the error will be "invalid builtin function" later.
+ const builtin_info = BuiltinFn.list.get(builtin_name) orelse return false;
+ switch (builtin_info.needs_mem_loc) {
+ .never => return false,
+ .always => return true,
+ .forward1 => node = params[1],
+ }
},
}
}
@@ -8125,27 +8406,33 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) enum { never
.tagged_union_enum_tag_trailing,
.add,
.add_wrap,
+ .add_sat,
.array_cat,
.array_mult,
.assign,
.assign_bit_and,
.assign_bit_or,
- .assign_bit_shift_left,
- .assign_bit_shift_right,
+ .assign_shl,
+ .assign_shl_sat,
+ .assign_shr,
.assign_bit_xor,
.assign_div,
.assign_sub,
.assign_sub_wrap,
+ .assign_sub_sat,
.assign_mod,
.assign_add,
.assign_add_wrap,
+ .assign_add_sat,
.assign_mul,
.assign_mul_wrap,
+ .assign_mul_sat,
.bang_equal,
.bit_and,
.bit_or,
- .bit_shift_left,
- .bit_shift_right,
+ .shl,
+ .shl_sat,
+ .shr,
.bit_xor,
.bool_and,
.bool_or,
@@ -8160,9 +8447,11 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) enum { never
.mod,
.mul,
.mul_wrap,
+ .mul_sat,
.switch_range,
.sub,
.sub_wrap,
+ .sub_sat,
.slice,
.slice_open,
.slice_sentinel,
@@ -8297,27 +8586,33 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool {
.asm_simple,
.add,
.add_wrap,
+ .add_sat,
.array_cat,
.array_mult,
.assign,
.assign_bit_and,
.assign_bit_or,
- .assign_bit_shift_left,
- .assign_bit_shift_right,
+ .assign_shl,
+ .assign_shl_sat,
+ .assign_shr,
.assign_bit_xor,
.assign_div,
.assign_sub,
.assign_sub_wrap,
+ .assign_sub_sat,
.assign_mod,
.assign_add,
.assign_add_wrap,
+ .assign_add_sat,
.assign_mul,
.assign_mul_wrap,
+ .assign_mul_sat,
.bang_equal,
.bit_and,
.bit_or,
- .bit_shift_left,
- .bit_shift_right,
+ .shl,
+ .shl_sat,
+ .shr,
.bit_xor,
.bool_and,
.bool_or,
@@ -8332,10 +8627,12 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool {
.mod,
.mul,
.mul_wrap,
+ .mul_sat,
.switch_range,
.field_access,
.sub,
.sub_wrap,
+ .sub_sat,
.slice,
.slice_open,
.slice_sentinel,
@@ -8894,6 +9191,17 @@ const Scope = struct {
return @fieldParentPtr(T, "base", base);
}
+ fn parent(base: *Scope) ?*Scope {
+ return switch (base.tag) {
+ .gen_zir => base.cast(GenZir).?.parent,
+ .local_val => base.cast(LocalVal).?.parent,
+ .local_ptr => base.cast(LocalPtr).?.parent,
+ .defer_normal, .defer_error => base.cast(Defer).?.parent,
+ .namespace => base.cast(Namespace).?.parent,
+ .top => null,
+ };
+ }
+
const Tag = enum {
gen_zir,
local_val,
@@ -8919,7 +9227,7 @@ const Scope = struct {
const LocalVal = struct {
const base_tag: Tag = .local_val;
base: Scope = Scope{ .tag = base_tag },
- /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`.
+ /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`.
parent: *Scope,
gen_zir: *GenZir,
inst: Zir.Inst.Ref,
@@ -8938,7 +9246,7 @@ const Scope = struct {
const LocalPtr = struct {
const base_tag: Tag = .local_ptr;
base: Scope = Scope{ .tag = base_tag },
- /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`.
+ /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`.
parent: *Scope,
gen_zir: *GenZir,
ptr: Zir.Inst.Ref,
@@ -8956,7 +9264,7 @@ const Scope = struct {
const Defer = struct {
base: Scope,
- /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`.
+ /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`.
parent: *Scope,
defer_node: Ast.Node.Index,
};
@@ -8967,11 +9275,27 @@ const Scope = struct {
const base_tag: Tag = .namespace;
base: Scope = Scope{ .tag = base_tag },
+ /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`.
parent: *Scope,
/// Maps string table index to the source location of declaration,
/// for the purposes of reporting name shadowing compile errors.
decls: std.AutoHashMapUnmanaged(u32, Ast.Node.Index) = .{},
node: Ast.Node.Index,
+ inst: Zir.Inst.Index,
+
+ /// The astgen scope containing this namespace.
+ /// Only valid during astgen.
+ declaring_gz: ?*GenZir,
+
+ /// Map from the raw captured value to the instruction
+ /// ref of the capture for decls in this namespace
+ captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{},
+
+ pub fn deinit(self: *Namespace, gpa: *Allocator) void {
+ self.decls.deinit(gpa);
+ self.captures.deinit(gpa);
+ self.* = undefined;
+ }
};
const Top = struct {
@@ -8987,12 +9311,14 @@ const GenZir = struct {
base: Scope = Scope{ .tag = base_tag },
force_comptime: bool,
in_defer: bool,
+ c_import: bool = false,
/// How decls created in this scope should be named.
anon_name_strategy: Zir.Inst.NameStrategy = .anon,
/// The containing decl AST node.
decl_node_index: Ast.Node.Index,
/// The containing decl line index, absolute.
decl_line: u32,
+ /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`.
parent: *Scope,
/// All `GenZir` scopes for the same ZIR share this.
astgen: *AstGen,
@@ -9028,10 +9354,17 @@ const GenZir = struct {
suspend_node: Ast.Node.Index = 0,
nosuspend_node: Ast.Node.Index = 0,
+ /// Namespace members are lazy. When executing a decl within a namespace,
+ /// any references to external instructions need to be treated specially.
+ /// This list tracks those references. See also .closure_capture and .closure_get.
+ /// Keys are the raw instruction index, values are the closure_capture instruction.
+ captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{},
+
fn makeSubBlock(gz: *GenZir, scope: *Scope) GenZir {
return .{
.force_comptime = gz.force_comptime,
.in_defer = gz.in_defer,
+ .c_import = gz.c_import,
.decl_node_index = gz.decl_node_index,
.decl_line = gz.decl_line,
.parent = scope,
@@ -9041,6 +9374,52 @@ const GenZir = struct {
};
}
+ fn makeCoercionScope(
+ parent_gz: *GenZir,
+ scope: *Scope,
+ dest_type: Zir.Inst.Ref,
+ result_ptr: Zir.Inst.Ref,
+ ) !GenZir {
+ // Detect whether this expr() call goes into rvalue() to store the result into the
+ // result location. If it does, elide the coerce_result_ptr instruction
+ // as well as the store instruction, instead passing the result as an rvalue.
+ var as_scope = parent_gz.makeSubBlock(scope);
+ errdefer as_scope.instructions.deinit(parent_gz.astgen.gpa);
+ as_scope.rl_ptr = try as_scope.addBin(.coerce_result_ptr, dest_type, result_ptr);
+
+ return as_scope;
+ }
+
+ fn finishCoercion(
+ as_scope: *GenZir,
+ parent_gz: *GenZir,
+ rl: ResultLoc,
+ src_node: Ast.Node.Index,
+ result: Zir.Inst.Ref,
+ dest_type: Zir.Inst.Ref,
+ ) !Zir.Inst.Ref {
+ const astgen = as_scope.astgen;
+ const parent_zir = &parent_gz.instructions;
+ if (as_scope.rvalue_rl_count == 1) {
+ // Busted! This expression didn't actually need a pointer.
+ const zir_tags = astgen.instructions.items(.tag);
+ const zir_datas = astgen.instructions.items(.data);
+ try parent_zir.ensureUnusedCapacity(astgen.gpa, as_scope.instructions.items.len);
+ for (as_scope.instructions.items) |src_inst| {
+ if (indexToRef(src_inst) == as_scope.rl_ptr) continue;
+ if (zir_tags[src_inst] == .store_to_block_ptr) {
+ if (zir_datas[src_inst].bin.lhs == as_scope.rl_ptr) continue;
+ }
+ parent_zir.appendAssumeCapacity(src_inst);
+ }
+ const casted_result = try parent_gz.addBin(.as, dest_type, result);
+ return rvalue(parent_gz, rl, casted_result, src_node);
+ } else {
+ try parent_zir.appendSlice(astgen.gpa, as_scope.instructions.items);
+ return result;
+ }
+ }
+
const Label = struct {
token: Ast.TokenIndex,
block_inst: Zir.Inst.Index,
@@ -9357,7 +9736,7 @@ const GenZir = struct {
fn addCall(
gz: *GenZir,
- tag: Zir.Inst.Tag,
+ modifier: std.builtin.CallOptions.Modifier,
callee: Zir.Inst.Ref,
args: []const Zir.Inst.Ref,
/// Absolute node index. This function does the conversion to offset from Decl.
@@ -9366,20 +9745,24 @@ const GenZir = struct {
assert(callee != .none);
assert(src_node != 0);
const gpa = gz.astgen.gpa;
+ const Call = Zir.Inst.Call;
try gz.instructions.ensureUnusedCapacity(gpa, 1);
try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1);
- try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Call).Struct.fields.len +
+ try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Call).Struct.fields.len +
args.len);
- const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Call{
+ const payload_index = gz.astgen.addExtraAssumeCapacity(Call{
.callee = callee,
- .args_len = @intCast(u32, args.len),
+ .flags = .{
+ .packed_modifier = @intCast(Call.Flags.PackedModifier, @enumToInt(modifier)),
+ .args_len = @intCast(Call.Flags.PackedArgsLen, args.len),
+ },
});
gz.astgen.appendRefsAssumeCapacity(args);
const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
gz.astgen.instructions.appendAssumeCapacity(.{
- .tag = tag,
+ .tag = .call,
.data = .{ .pl_node = .{
.src_node = gz.nodeIndexToRelative(src_node),
.payload_index = payload_index,
@@ -9695,6 +10078,22 @@ const GenZir = struct {
});
}
+ fn addInstNode(
+ gz: *GenZir,
+ tag: Zir.Inst.Tag,
+ inst: Zir.Inst.Index,
+ /// Absolute node index. This function does the conversion to offset from Decl.
+ src_node: Ast.Node.Index,
+ ) !Zir.Inst.Ref {
+ return gz.add(.{
+ .tag = tag,
+ .data = .{ .inst_node = .{
+ .inst = inst,
+ .src_node = gz.nodeIndexToRelative(src_node),
+ } },
+ });
+ }
+
fn addNodeExtended(
gz: *GenZir,
opcode: Zir.Inst.Extended,
@@ -9996,6 +10395,37 @@ const GenZir = struct {
});
}
+ fn setOpaque(gz: *GenZir, inst: Zir.Inst.Index, args: struct {
+ src_node: Ast.Node.Index,
+ decls_len: u32,
+ }) !void {
+ const astgen = gz.astgen;
+ const gpa = astgen.gpa;
+
+ try astgen.extra.ensureUnusedCapacity(gpa, 2);
+ const payload_index = @intCast(u32, astgen.extra.items.len);
+
+ if (args.src_node != 0) {
+ const node_offset = gz.nodeIndexToRelative(args.src_node);
+ astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset));
+ }
+ if (args.decls_len != 0) {
+ astgen.extra.appendAssumeCapacity(args.decls_len);
+ }
+ astgen.instructions.set(inst, .{
+ .tag = .extended,
+ .data = .{ .extended = .{
+ .opcode = .opaque_decl,
+ .small = @bitCast(u16, Zir.Inst.OpaqueDecl.Small{
+ .has_src_node = args.src_node != 0,
+ .has_decls_len = args.decls_len != 0,
+ .name_strategy = gz.anon_name_strategy,
+ }),
+ .operand = payload_index,
+ } },
+ });
+ }
+
fn add(gz: *GenZir, inst: Zir.Inst) !Zir.Inst.Ref {
return indexToRef(try gz.addAsIndex(inst));
}
diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig
index e415d27a3a..e1f4f5bd16 100644
--- a/src/BuiltinFn.zig
+++ b/src/BuiltinFn.zig
@@ -2,7 +2,6 @@ const std = @import("std");
pub const Tag = enum {
add_with_overflow,
- add_with_saturation,
align_cast,
align_of,
as,
@@ -66,7 +65,6 @@ pub const Tag = enum {
wasm_memory_grow,
mod,
mul_with_overflow,
- mul_with_saturation,
panic,
pop_count,
ptr_cast,
@@ -81,12 +79,10 @@ pub const Tag = enum {
set_runtime_safety,
shl_exact,
shl_with_overflow,
- shl_with_saturation,
shr_exact,
shuffle,
size_of,
splat,
- sub_with_saturation,
reduce,
src,
sqrt,
@@ -114,10 +110,19 @@ pub const Tag = enum {
Vector,
};
+pub const MemLocRequirement = enum {
+ /// The builtin never needs a memory location.
+ never,
+ /// The builtin always needs a memory location.
+ always,
+ /// The builtin forwards the question to argument at index 1.
+ forward1,
+};
+
tag: Tag,
-/// `true` if the builtin call can take advantage of a result location pointer.
-needs_mem_loc: bool = false,
+/// Info about the builtin call's ability to take advantage of a result location pointer.
+needs_mem_loc: MemLocRequirement = .never,
/// `true` if the builtin call can be the left-hand side of an expression (assigned to).
allows_lvalue: bool = false,
/// The number of parameters to this builtin function. `null` means variable number
@@ -152,7 +157,7 @@ pub const list = list: {
"@as",
.{
.tag = .as,
- .needs_mem_loc = true,
+ .needs_mem_loc = .forward1,
.param_count = 2,
},
},
@@ -188,7 +193,7 @@ pub const list = list: {
"@bitCast",
.{
.tag = .bit_cast,
- .needs_mem_loc = true,
+ .needs_mem_loc = .forward1,
.param_count = 2,
},
},
@@ -252,7 +257,7 @@ pub const list = list: {
"@call",
.{
.tag = .call,
- .needs_mem_loc = true,
+ .needs_mem_loc = .always,
.param_count = 3,
},
},
@@ -414,7 +419,7 @@ pub const list = list: {
"@field",
.{
.tag = .field,
- .needs_mem_loc = true,
+ .needs_mem_loc = .always,
.param_count = 2,
.allows_lvalue = true,
},
@@ -531,34 +536,6 @@ pub const list = list: {
.param_count = 2,
},
},
- .{
- "@addWithSaturation",
- .{
- .tag = .add_with_saturation,
- .param_count = 2,
- },
- },
- .{
- "@subWithSaturation",
- .{
- .tag = .sub_with_saturation,
- .param_count = 2,
- },
- },
- .{
- "@mulWithSaturation",
- .{
- .tag = .mul_with_saturation,
- .param_count = 2,
- },
- },
- .{
- "@shlWithSaturation",
- .{
- .tag = .shl_with_saturation,
- .param_count = 2,
- },
- },
.{
"@memcpy",
.{
@@ -731,7 +708,6 @@ pub const list = list: {
"@splat",
.{
.tag = .splat,
- .needs_mem_loc = true,
.param_count = 2,
},
},
@@ -746,7 +722,7 @@ pub const list = list: {
"@src",
.{
.tag = .src,
- .needs_mem_loc = true,
+ .needs_mem_loc = .always,
.param_count = 0,
},
},
@@ -901,7 +877,7 @@ pub const list = list: {
"@unionInit",
.{
.tag = .union_init,
- .needs_mem_loc = true,
+ .needs_mem_loc = .always,
.param_count = 3,
},
},
diff --git a/src/Compilation.zig b/src/Compilation.zig
index eb041c7536..95337de1e1 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -812,7 +812,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
const needs_c_symbols = !options.skip_linker_dependencies and is_exe_or_dyn_lib;
- // WASI-only. Resolve the optinal exec-model option, defaults to command.
+ // WASI-only. Resolve the optional exec-model option, defaults to command.
const wasi_exec_model = if (options.target.os.tag != .wasi) undefined else options.wasi_exec_model orelse .command;
const comp: *Compilation = comp: {
@@ -849,10 +849,6 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
if (options.use_llvm) |explicit|
break :blk explicit;
- // If we have no zig code to compile, no need for LLVM.
- if (options.main_pkg == null)
- break :blk false;
-
// If we are outputting .c code we must use Zig backend.
if (ofmt == .c)
break :blk false;
@@ -861,6 +857,10 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
if (options.emit_llvm_ir != null or options.emit_llvm_bc != null)
break :blk true;
+ // If we have no zig code to compile, no need for LLVM.
+ if (options.main_pkg == null)
+ break :blk false;
+
// The stage1 compiler depends on the stage1 C++ LLVM backend
// to compile zig code.
if (use_stage1)
@@ -876,9 +876,6 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
if (options.use_llvm == true) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
- if (options.machine_code_model != .default) {
- return error.MachineCodeModelNotSupportedWithoutLlvm;
- }
if (options.emit_llvm_ir != null or options.emit_llvm_bc != null) {
return error.EmittingLlvmModuleRequiresUsingLlvmBackend;
}
@@ -1100,7 +1097,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
if (feature.llvm_name) |llvm_name| {
const plus_or_minus = "-+"[@boolToInt(is_enabled)];
- try buf.ensureCapacity(buf.items.len + 2 + llvm_name.len);
+ try buf.ensureUnusedCapacity(2 + llvm_name.len);
buf.appendAssumeCapacity(plus_or_minus);
buf.appendSliceAssumeCapacity(llvm_name);
buf.appendSliceAssumeCapacity(",");
@@ -1180,6 +1177,8 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
}
hash.add(valgrind);
hash.add(single_threaded);
+ hash.add(use_stage1);
+ hash.add(use_llvm);
hash.add(dll_export_fns);
hash.add(options.is_test);
hash.add(options.skip_linker_dependencies);
@@ -1350,7 +1349,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
var system_libs: std.StringArrayHashMapUnmanaged(void) = .{};
errdefer system_libs.deinit(gpa);
- try system_libs.ensureCapacity(gpa, options.system_libs.len);
+ try system_libs.ensureTotalCapacity(gpa, options.system_libs.len);
for (options.system_libs) |lib_name| {
system_libs.putAssumeCapacity(lib_name, {});
}
@@ -1486,7 +1485,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
errdefer comp.astgen_wait_group.deinit();
// Add a `CObject` for each `c_source_files`.
- try comp.c_object_table.ensureCapacity(gpa, options.c_source_files.len);
+ try comp.c_object_table.ensureTotalCapacity(gpa, options.c_source_files.len);
for (options.c_source_files) |c_source_file| {
const c_object = try gpa.create(CObject);
errdefer gpa.destroy(c_object);
@@ -1577,25 +1576,30 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
// also test the use case of `build-obj -fcompiler-rt` with the self-hosted compiler
// and make sure the compiler-rt symbols are emitted. Currently this is hooked up for
// stage1 but not stage2.
- if (comp.bin_file.options.use_stage1) {
- if (comp.bin_file.options.include_compiler_rt) {
- if (is_exe_or_dyn_lib) {
- try comp.work_queue.writeItem(.{ .compiler_rt_lib = {} });
- } else if (options.output_mode != .Obj) {
- // If build-obj with -fcompiler-rt is requested, that is handled specially
- // elsewhere. In this case we are making a static library, so we ask
- // for a compiler-rt object to put in it.
- try comp.work_queue.writeItem(.{ .compiler_rt_obj = {} });
- }
+ const capable_of_building_compiler_rt = comp.bin_file.options.use_stage1 or
+ comp.bin_file.options.use_llvm;
+ const capable_of_building_zig_libc = comp.bin_file.options.use_stage1 or
+ comp.bin_file.options.use_llvm;
+ const capable_of_building_ssp = comp.bin_file.options.use_stage1;
+
+ if (comp.bin_file.options.include_compiler_rt and capable_of_building_compiler_rt) {
+ if (is_exe_or_dyn_lib) {
+ try comp.work_queue.writeItem(.{ .compiler_rt_lib = {} });
+ } else if (options.output_mode != .Obj) {
+ // If build-obj with -fcompiler-rt is requested, that is handled specially
+ // elsewhere. In this case we are making a static library, so we ask
+ // for a compiler-rt object to put in it.
+ try comp.work_queue.writeItem(.{ .compiler_rt_obj = {} });
}
- if (needs_c_symbols) {
- // MinGW provides no libssp, use our own implementation.
- if (comp.getTarget().isMinGW()) {
- try comp.work_queue.writeItem(.{ .libssp = {} });
- }
- if (!comp.bin_file.options.link_libc) {
- try comp.work_queue.writeItem(.{ .zig_libc = {} });
- }
+ }
+ if (needs_c_symbols) {
+ // MinGW provides no libssp, use our own implementation.
+ if (comp.getTarget().isMinGW() and capable_of_building_ssp) {
+ try comp.work_queue.writeItem(.{ .libssp = {} });
+ }
+
+ if (!comp.bin_file.options.link_libc and capable_of_building_zig_libc) {
+ try comp.work_queue.writeItem(.{ .zig_libc = {} });
}
}
}
@@ -1647,6 +1651,9 @@ pub fn destroy(self: *Compilation) void {
if (self.compiler_rt_static_lib) |*crt_file| {
crt_file.deinit(gpa);
}
+ if (self.compiler_rt_obj) |*crt_file| {
+ crt_file.deinit(gpa);
+ }
if (self.libssp_static_lib) |*crt_file| {
crt_file.deinit(gpa);
}
@@ -1793,6 +1800,10 @@ pub fn update(self: *Compilation) !void {
}
}
+ // Flush takes care of -femit-bin, but we still have -femit-llvm-ir, -femit-llvm-bc, and
+ // -femit-asm to handle, in the case of C objects.
+ try self.emitOthers();
+
// If there are any errors, we anticipate the source files being loaded
// to report error messages. Otherwise we unload all source files to save memory.
// The ZIR needs to stay loaded in memory because (1) Decl objects contain references
@@ -1808,6 +1819,37 @@ pub fn update(self: *Compilation) !void {
}
}
+fn emitOthers(comp: *Compilation) !void {
+ if (comp.bin_file.options.output_mode != .Obj or comp.bin_file.options.module != null or
+ comp.c_object_table.count() == 0)
+ {
+ return;
+ }
+ const obj_path = comp.c_object_table.keys()[0].status.success.object_path;
+ const cwd = std.fs.cwd();
+ const ext = std.fs.path.extension(obj_path);
+ const basename = obj_path[0 .. obj_path.len - ext.len];
+ // This obj path always ends with the object file extension, but if we change the
+ // extension to .ll, .bc, or .s, then it will be the path to those things.
+ const outs = [_]struct {
+ emit: ?EmitLoc,
+ ext: []const u8,
+ }{
+ .{ .emit = comp.emit_asm, .ext = ".s" },
+ .{ .emit = comp.emit_llvm_ir, .ext = ".ll" },
+ .{ .emit = comp.emit_llvm_bc, .ext = ".bc" },
+ };
+ for (outs) |out| {
+ if (out.emit) |loc| {
+ if (loc.directory) |directory| {
+ const src_path = try std.fmt.allocPrint(comp.gpa, "{s}{s}", .{ basename, out.ext });
+ defer comp.gpa.free(src_path);
+ try cwd.copyFile(src_path, directory.handle, loc.basename, .{});
+ }
+ }
+ }
+}
+
/// Having the file open for writing is problematic as far as executing the
/// binary is concerned. This will remove the write flag, or close the file,
/// or whatever is needed so that it can be executed.
@@ -2113,7 +2155,11 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
const module = self.bin_file.options.module.?;
const decl = func.owner_decl;
- var air = module.analyzeFnBody(decl, func) catch |err| switch (err) {
+ var tmp_arena = std.heap.ArenaAllocator.init(gpa);
+ defer tmp_arena.deinit();
+ const sema_arena = &tmp_arena.allocator;
+
+ var air = module.analyzeFnBody(decl, func, sema_arena) catch |err| switch (err) {
error.AnalysisFail => {
assert(func.state != .in_progress);
continue;
@@ -2175,16 +2221,20 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
const decl_emit_h = decl.getEmitH(module);
const fwd_decl = &decl_emit_h.fwd_decl;
fwd_decl.shrinkRetainingCapacity(0);
+ var typedefs_arena = std.heap.ArenaAllocator.init(gpa);
+ defer typedefs_arena.deinit();
var dg: c_codegen.DeclGen = .{
+ .gpa = gpa,
.module = module,
.error_msg = null,
.decl = decl,
.fwd_decl = fwd_decl.toManaged(gpa),
- // we don't want to emit optionals and error unions to headers since they have no ABI
- .typedefs = undefined,
+ .typedefs = c_codegen.TypedefMap.init(gpa),
+ .typedefs_arena = &typedefs_arena.allocator,
};
defer dg.fwd_decl.deinit();
+ defer dg.typedefs.deinit();
c_codegen.genHeader(&dg) catch |err| switch (err) {
error.AnalysisFail => {
@@ -2612,7 +2662,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
const dep_basename = std.fs.path.basename(out_dep_path);
try man.addDepFilePost(zig_cache_tmp_dir, dep_basename);
- try comp.stage1_cache_manifest.addDepFilePost(zig_cache_tmp_dir, dep_basename);
+ if (build_options.is_stage1 and comp.bin_file.options.use_stage1) try comp.stage1_cache_manifest.addDepFilePost(zig_cache_tmp_dir, dep_basename);
const digest = man.final();
const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
@@ -2721,16 +2771,12 @@ fn reportRetryableAstGenError(
try Module.ErrorMsg.create(
gpa,
src_loc,
- "unable to load '{'}" ++ std.fs.path.sep_str ++ "{'}': {s}",
- .{
- std.zig.fmtEscapes(dir_path),
- std.zig.fmtEscapes(file.sub_file_path),
- @errorName(err),
- },
+ "unable to load '{s}" ++ std.fs.path.sep_str ++ "{s}': {s}",
+ .{ dir_path, file.sub_file_path, @errorName(err) },
)
else
- try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{'}': {s}", .{
- std.zig.fmtEscapes(file.sub_file_path), @errorName(err),
+ try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{s}': {s}", .{
+ file.sub_file_path, @errorName(err),
});
errdefer err_msg.destroy(gpa);
@@ -2766,6 +2812,9 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
defer man.deinit();
man.hash.add(comp.clang_preprocessor_mode);
+ man.hash.addOptionalEmitLoc(comp.emit_asm);
+ man.hash.addOptionalEmitLoc(comp.emit_llvm_ir);
+ man.hash.addOptionalEmitLoc(comp.emit_llvm_bc);
try man.hashCSource(c_object.src);
@@ -2789,16 +2838,29 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
comp.bin_file.options.root_name
else
c_source_basename[0 .. c_source_basename.len - std.fs.path.extension(c_source_basename).len];
- const o_basename = try std.fmt.allocPrint(arena, "{s}{s}", .{
- o_basename_noext,
- comp.bin_file.options.object_format.fileExt(comp.bin_file.options.target.cpu.arch),
- });
+ const o_ext = comp.bin_file.options.object_format.fileExt(comp.bin_file.options.target.cpu.arch);
const digest = if (!comp.disable_c_depfile and try man.hit()) man.final() else blk: {
var argv = std.ArrayList([]const u8).init(comp.gpa);
defer argv.deinit();
- // We can't know the digest until we do the C compiler invocation, so we need a temporary filename.
+ // In case we are doing passthrough mode, we need to detect -S and -emit-llvm.
+ const out_ext = e: {
+ if (!comp.clang_passthrough_mode)
+ break :e o_ext;
+ if (comp.emit_asm != null)
+ break :e ".s";
+ if (comp.emit_llvm_ir != null)
+ break :e ".ll";
+ if (comp.emit_llvm_bc != null)
+ break :e ".bc";
+
+ break :e o_ext;
+ };
+ const o_basename = try std.fmt.allocPrint(arena, "{s}{s}", .{ o_basename_noext, out_ext });
+
+ // We can't know the digest until we do the C compiler invocation,
+ // so we need a temporary filename.
const out_obj_path = try comp.tmpFilePath(arena, o_basename);
var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath("tmp", .{});
defer zig_cache_tmp_dir.close();
@@ -2812,15 +2874,23 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
try std.fmt.allocPrint(arena, "{s}.d", .{out_obj_path});
try comp.addCCArgs(arena, &argv, ext, out_dep_path);
- try argv.ensureCapacity(argv.items.len + 3);
+ try argv.ensureUnusedCapacity(6 + c_object.src.extra_flags.len);
switch (comp.clang_preprocessor_mode) {
.no => argv.appendSliceAssumeCapacity(&[_][]const u8{ "-c", "-o", out_obj_path }),
.yes => argv.appendSliceAssumeCapacity(&[_][]const u8{ "-E", "-o", out_obj_path }),
.stdout => argv.appendAssumeCapacity("-E"),
}
-
- try argv.append(c_object.src.src_path);
- try argv.appendSlice(c_object.src.extra_flags);
+ if (comp.clang_passthrough_mode) {
+ if (comp.emit_asm != null) {
+ argv.appendAssumeCapacity("-S");
+ } else if (comp.emit_llvm_ir != null) {
+ argv.appendSliceAssumeCapacity(&[_][]const u8{ "-emit-llvm", "-S" });
+ } else if (comp.emit_llvm_bc != null) {
+ argv.appendAssumeCapacity("-emit-llvm");
+ }
+ }
+ argv.appendAssumeCapacity(c_object.src.src_path);
+ argv.appendSliceAssumeCapacity(c_object.src.extra_flags);
if (comp.verbose_cc) {
dump_argv(argv.items);
@@ -2840,8 +2910,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
switch (term) {
.Exited => |code| {
if (code != 0) {
- // TODO https://github.com/ziglang/zig/issues/6342
- std.process.exit(1);
+ std.process.exit(code);
}
if (comp.clang_preprocessor_mode == .stdout)
std.process.exit(0);
@@ -2857,9 +2926,6 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
const stderr_reader = child.stderr.?.reader();
- // TODO https://github.com/ziglang/zig/issues/6343
- // Please uncomment and use stdout once this issue is fixed
- // const stdout = try stdout_reader.readAllAlloc(arena, std.math.maxInt(u32));
const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024);
const term = child.wait() catch |err| {
@@ -2909,6 +2975,8 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
break :blk digest;
};
+ const o_basename = try std.fmt.allocPrint(arena, "{s}{s}", .{ o_basename_noext, o_ext });
+
c_object.status = .{
.success = .{
.object_path = try comp.local_cache_directory.join(comp.gpa, &[_][]const u8{
@@ -3032,7 +3100,7 @@ pub fn addCCArgs(
// It would be really nice if there was a more compact way to communicate this info to Clang.
const all_features_list = target.cpu.arch.allFeaturesList();
- try argv.ensureCapacity(argv.items.len + all_features_list.len * 4);
+ try argv.ensureUnusedCapacity(all_features_list.len * 4);
for (all_features_list) |feature, index_usize| {
const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize);
const is_enabled = target.cpu.features.isEnabled(index);
@@ -3533,7 +3601,7 @@ fn detectLibCIncludeDirs(
fn detectLibCFromLibCInstallation(arena: *Allocator, target: Target, lci: *const LibCInstallation) !LibCDirs {
var list = std.ArrayList([]const u8).init(arena);
- try list.ensureCapacity(4);
+ try list.ensureTotalCapacity(4);
list.appendAssumeCapacity(lci.include_dir.?);
@@ -3640,7 +3708,7 @@ fn setMiscFailure(
comptime format: []const u8,
args: anytype,
) Allocator.Error!void {
- try comp.misc_failures.ensureCapacity(comp.gpa, comp.misc_failures.count() + 1);
+ try comp.misc_failures.ensureUnusedCapacity(comp.gpa, 1);
const msg = try std.fmt.allocPrint(comp.gpa, format, args);
comp.misc_failures.putAssumeCapacityNoClobber(tag, .{ .msg = msg });
}
@@ -3662,6 +3730,8 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) Alloc
const target = comp.getTarget();
const generic_arch_name = target.cpu.arch.genericName();
const use_stage1 = build_options.is_stage1 and comp.bin_file.options.use_stage1;
+ const stage2_x86_cx16 = target.cpu.arch == .x86_64 and
+ std.Target.x86.featureSetHas(target.cpu.features, .cx16);
@setEvalBranchQuota(4000);
try buffer.writer().print(
@@ -3673,6 +3743,8 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) Alloc
\\pub const zig_is_stage2 = {};
\\/// Temporary until self-hosted supports the `cpu.arch` value.
\\pub const stage2_arch: std.Target.Cpu.Arch = .{};
+ \\/// Temporary until self-hosted can call `std.Target.x86.featureSetHas` at comptime.
+ \\pub const stage2_x86_cx16 = {};
\\
\\pub const output_mode = std.builtin.OutputMode.{};
\\pub const link_mode = std.builtin.LinkMode.{};
@@ -3688,6 +3760,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) Alloc
build_options.version,
!use_stage1,
std.zig.fmtId(@tagName(target.cpu.arch)),
+ stage2_x86_cx16,
std.zig.fmtId(@tagName(comp.bin_file.options.output_mode)),
std.zig.fmtId(@tagName(comp.bin_file.options.link_mode)),
comp.bin_file.options.is_test,
@@ -3912,6 +3985,7 @@ fn buildOutputFromZig(
},
.root_src_path = src_basename,
};
+ defer main_pkg.deinitTable(comp.gpa);
const root_name = src_basename[0 .. src_basename.len - std.fs.path.extension(src_basename).len];
const target = comp.getTarget();
const bin_basename = try std.zig.binNameAlloc(comp.gpa, .{
@@ -3970,7 +4044,7 @@ fn buildOutputFromZig(
defer if (!keep_errors) errors.deinit(sub_compilation.gpa);
if (errors.list.len != 0) {
- try comp.misc_failures.ensureCapacity(comp.gpa, comp.misc_failures.count() + 1);
+ try comp.misc_failures.ensureUnusedCapacity(comp.gpa, 1);
comp.misc_failures.putAssumeCapacityNoClobber(misc_task_tag, .{
.msg = try std.fmt.allocPrint(comp.gpa, "sub-compilation of {s} failed", .{
@tagName(misc_task_tag),
@@ -4402,7 +4476,7 @@ pub fn build_crt_file(
try sub_compilation.updateSubCompilation();
- try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1);
+ try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
comp.crt_files.putAssumeCapacityNoClobber(basename, .{
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(comp.gpa, &[_][]const u8{
diff --git a/src/Liveness.zig b/src/Liveness.zig
index 5d8e3eed34..71a0414383 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -30,7 +30,7 @@ tomb_bits: []usize,
/// The main tomb bits are still used and the extra ones are starting with the lsb of the
/// value here.
special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32),
-/// Auxilliary data. The way this data is interpreted is determined contextually.
+/// Auxiliary data. The way this data is interpreted is determined contextually.
extra: []const u32,
/// Trailing is the set of instructions whose lifetimes end at the start of the then branch,
@@ -226,12 +226,16 @@ fn analyzeInst(
switch (inst_tags[inst]) {
.add,
.addwrap,
+ .add_sat,
.sub,
.subwrap,
+ .sub_sat,
.mul,
.mulwrap,
+ .mul_sat,
.div,
.rem,
+ .mod,
.ptr_add,
.ptr_sub,
.bit_and,
@@ -251,7 +255,14 @@ fn analyzeInst(
.ptr_elem_val,
.ptr_ptr_elem_val,
.shl,
+ .shl_exact,
+ .shl_sat,
.shr,
+ .atomic_store_unordered,
+ .atomic_store_monotonic,
+ .atomic_store_release,
+ .atomic_store_seq_cst,
+ .set_union_tag,
=> {
const o = inst_datas[inst].bin_op;
return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none });
@@ -264,12 +275,14 @@ fn analyzeInst(
.breakpoint,
.dbg_stmt,
.unreach,
+ .fence,
=> return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }),
.not,
.bitcast,
.load,
- .floatcast,
+ .fpext,
+ .fptrunc,
.intcast,
.trunc,
.optional_payload,
@@ -288,6 +301,11 @@ fn analyzeInst(
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.array_to_slice,
+ .float_to_int,
+ .int_to_float,
+ .get_union_tag,
+ .clz,
+ .ctz,
=> {
const o = inst_datas[inst].ty_op;
return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none });
@@ -344,6 +362,20 @@ fn analyzeInst(
const extra = a.air.extraData(Air.Cmpxchg, inst_datas[inst].ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.ptr, extra.expected_value, extra.new_value });
},
+ .atomic_load => {
+ const ptr = inst_datas[inst].atomic_load.ptr;
+ return trackOperands(a, new_set, inst, main_tomb, .{ ptr, .none, .none });
+ },
+ .atomic_rmw => {
+ const pl_op = inst_datas[inst].pl_op;
+ const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data;
+ return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.operand, .none });
+ },
+ .memset, .memcpy => {
+ const pl_op = inst_datas[inst].pl_op;
+ const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
+ return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs });
+ },
.br => {
const br = inst_datas[inst].br;
return trackOperands(a, new_set, inst, main_tomb, .{ br.operand, .none, .none });
@@ -440,7 +472,7 @@ fn analyzeInst(
}
// Now we have to correctly populate new_set.
if (new_set) |ns| {
- try ns.ensureCapacity(gpa, @intCast(u32, ns.count() + then_table.count() + else_table.count()));
+ try ns.ensureUnusedCapacity(gpa, @intCast(u32, then_table.count() + else_table.count()));
var it = then_table.keyIterator();
while (it.next()) |key| {
_ = ns.putAssumeCapacity(key.*, {});
diff --git a/src/Module.zig b/src/Module.zig
index ec3bb2bbd3..6c790d3804 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -64,11 +64,16 @@ import_table: std.StringArrayHashMapUnmanaged(*Scope.File) = .{},
/// The set of all the generic function instantiations. This is used so that when a generic
/// function is called twice with the same comptime parameter arguments, both calls dispatch
/// to the same function.
+/// TODO: remove functions from this set when they are destroyed.
monomorphed_funcs: MonomorphedFuncsSet = .{},
-
/// The set of all comptime function calls that have been cached so that future calls
/// with the same parameters will get the same return value.
memoized_calls: MemoizedCallSet = .{},
+/// Contains the values from `@setAlignStack`. A sparse table is used here
+/// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while
+/// functions are many.
+/// TODO: remove functions from this set when they are destroyed.
+align_stack_fns: std.AutoHashMapUnmanaged(*const Fn, SetAlignStack) = .{},
/// We optimize memory usage for a compilation with no compile errors by storing the
/// error messages and mapping outside of `Decl`.
@@ -215,6 +220,13 @@ pub const MemoizedCall = struct {
}
};
+pub const SetAlignStack = struct {
+ alignment: u32,
+ /// TODO: This needs to store a non-lazy source location for the case of an inline function
+ /// which does `@setAlignStack` (applying it to the caller).
+ src: LazySrcLoc,
+};
+
/// A `Module` has zero or one of these depending on whether `-femit-h` is enabled.
pub const GlobalEmitH = struct {
/// Where to put the output.
@@ -236,6 +248,9 @@ pub const Export = struct {
link: link.File.Export,
/// The Decl that performs the export. Note that this is *not* the Decl being exported.
owner_decl: *Decl,
+ /// The Decl containing the export statement. Inline function calls
+ /// may cause this to be different from the owner_decl.
+ src_decl: *Decl,
/// The Decl being exported. Note this is *not* the Decl performing the export.
exported_decl: *Decl,
status: enum {
@@ -249,8 +264,8 @@ pub const Export = struct {
pub fn getSrcLoc(exp: Export) SrcLoc {
return .{
- .file_scope = exp.owner_decl.namespace.file_scope,
- .parent_decl_node = exp.owner_decl.src_node,
+ .file_scope = exp.src_decl.namespace.file_scope,
+ .parent_decl_node = exp.src_decl.src_node,
.lazy = exp.src,
};
}
@@ -263,6 +278,56 @@ pub const DeclPlusEmitH = struct {
emit_h: EmitH,
};
+pub const CaptureScope = struct {
+ parent: ?*CaptureScope,
+
+ /// Values from this decl's evaluation that will be closed over in
+ /// child decls. Values stored in the value_arena of the linked decl.
+ /// During sema, this map is backed by the gpa. Once sema completes,
+ /// it is reallocated using the value_arena.
+ captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, TypedValue) = .{},
+};
+
+pub const WipCaptureScope = struct {
+ scope: *CaptureScope,
+ finalized: bool,
+ gpa: *Allocator,
+ perm_arena: *Allocator,
+
+ pub fn init(gpa: *Allocator, perm_arena: *Allocator, parent: ?*CaptureScope) !@This() {
+ const scope = try perm_arena.create(CaptureScope);
+ scope.* = .{ .parent = parent };
+ return @This(){
+ .scope = scope,
+ .finalized = false,
+ .gpa = gpa,
+ .perm_arena = perm_arena,
+ };
+ }
+
+ pub fn finalize(noalias self: *@This()) !void {
+ assert(!self.finalized);
+ // use a temp to avoid unintentional aliasing due to RLS
+ const tmp = try self.scope.captures.clone(self.perm_arena);
+ self.scope.captures = tmp;
+ self.finalized = true;
+ }
+
+ pub fn reset(noalias self: *@This(), parent: ?*CaptureScope) !void {
+ if (!self.finalized) try self.finalize();
+ self.scope = try self.perm_arena.create(CaptureScope);
+ self.scope.* = .{ .parent = parent };
+ self.finalized = false;
+ }
+
+ pub fn deinit(noalias self: *@This()) void {
+ if (!self.finalized) {
+ self.scope.captures.deinit(self.gpa);
+ }
+ self.* = undefined;
+ }
+};
+
pub const Decl = struct {
/// Allocated with Module's allocator; outlives the ZIR code.
name: [*:0]const u8,
@@ -276,7 +341,9 @@ pub const Decl = struct {
align_val: Value,
/// Populated when `has_tv`.
linksection_val: Value,
- /// The memory for ty, val, align_val, linksection_val.
+ /// Populated when `has_tv`.
+ @"addrspace": std.builtin.AddressSpace,
+ /// The memory for ty, val, align_val, linksection_val, and captures.
/// If this is `null` then there is no memory management needed.
value_arena: ?*std.heap.ArenaAllocator.State = null,
/// The direct parent namespace of the Decl.
@@ -285,6 +352,11 @@ pub const Decl = struct {
/// the namespace of the struct, since there is no parent.
namespace: *Scope.Namespace,
+ /// The scope which lexically contains this decl. A decl must depend
+ /// on its lexical parent, in order to ensure that this pointer is valid.
+ /// This scope is allocated out of the arena of the parent decl.
+ src_scope: ?*CaptureScope,
+
/// An integer that can be checked against the corresponding incrementing
/// generation field of Module. This is used to determine whether `complete` status
/// represents pre- or post- re-analysis.
@@ -339,7 +411,7 @@ pub const Decl = struct {
/// to require re-analysis.
outdated,
},
- /// Whether `typed_value`, `align_val`, and `linksection_val` are populated.
+ /// Whether `typed_value`, `align_val`, `linksection_val` and `addrspace` are populated.
has_tv: bool,
/// If `true` it means the `Decl` is the resource owner of the type/value associated
/// with it. That means when `Decl` is destroyed, the cleanup code should additionally
@@ -354,8 +426,8 @@ pub const Decl = struct {
is_exported: bool,
/// Whether the ZIR code provides an align instruction.
has_align: bool,
- /// Whether the ZIR code provides a linksection instruction.
- has_linksection: bool,
+ /// Whether the ZIR code provides a linksection and address space instruction.
+ has_linksection_or_addrspace: bool,
/// Flag used by garbage collection to mark and sweep.
/// Decls which correspond to an AST node always have this field set to `true`.
/// Anonymous Decls are initialized with this field set to `false` and then it
@@ -477,14 +549,22 @@ pub const Decl = struct {
if (!decl.has_align) return .none;
assert(decl.zir_decl_index != 0);
const zir = decl.namespace.file_scope.zir;
- return @intToEnum(Zir.Inst.Ref, zir.extra[decl.zir_decl_index + 6]);
+ return @intToEnum(Zir.Inst.Ref, zir.extra[decl.zir_decl_index + 7]);
}
pub fn zirLinksectionRef(decl: Decl) Zir.Inst.Ref {
- if (!decl.has_linksection) return .none;
+ if (!decl.has_linksection_or_addrspace) return .none;
assert(decl.zir_decl_index != 0);
const zir = decl.namespace.file_scope.zir;
- const extra_index = decl.zir_decl_index + 6 + @boolToInt(decl.has_align);
+ const extra_index = decl.zir_decl_index + 7 + @boolToInt(decl.has_align);
+ return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
+ }
+
+ pub fn zirAddrspaceRef(decl: Decl) Zir.Inst.Ref {
+ if (!decl.has_linksection_or_addrspace) return .none;
+ assert(decl.zir_decl_index != 0);
+ const zir = decl.namespace.file_scope.zir;
+ const extra_index = decl.zir_decl_index + 7 + @boolToInt(decl.has_align) + 1;
return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
}
@@ -542,11 +622,11 @@ pub const Decl = struct {
return decl.namespace.renderFullyQualifiedName(unqualified_name, writer);
}
- pub fn getFullyQualifiedName(decl: Decl, gpa: *Allocator) ![]u8 {
+ pub fn getFullyQualifiedName(decl: Decl, gpa: *Allocator) ![:0]u8 {
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
try decl.renderFullyQualifiedName(buffer.writer());
- return buffer.toOwnedSlice();
+ return buffer.toOwnedSliceSentinel(0);
}
pub fn typedValue(decl: Decl) error{AnalysisFail}!TypedValue {
@@ -588,7 +668,7 @@ pub const Decl = struct {
/// If the Decl has a value and it is a function, return it,
/// otherwise null.
- pub fn getFunction(decl: *Decl) ?*Fn {
+ pub fn getFunction(decl: *const Decl) ?*Fn {
if (!decl.owns_tv) return null;
const func = (decl.val.castTag(.function) orelse return null).data;
assert(func.owner_decl == decl);
@@ -733,7 +813,7 @@ pub const Struct = struct {
is_comptime: bool,
};
- pub fn getFullyQualifiedName(s: *Struct, gpa: *Allocator) ![]u8 {
+ pub fn getFullyQualifiedName(s: *Struct, gpa: *Allocator) ![:0]u8 {
return s.owner_decl.getFullyQualifiedName(gpa);
}
@@ -779,6 +859,36 @@ pub const EnumSimple = struct {
}
};
+/// Represents the data that an enum declaration provides, when there are no
+/// declarations. However an integer tag type is provided, and the enum tag values
+/// are explicitly provided.
+pub const EnumNumbered = struct {
+ /// The Decl that corresponds to the enum itself.
+ owner_decl: *Decl,
+ /// An integer type which is used for the numerical value of the enum.
+ /// Whether zig chooses this type or the user specifies it, it is stored here.
+ tag_ty: Type,
+ /// Set of field names in declaration order.
+ fields: NameMap,
+ /// Maps integer tag value to field index.
+ /// Entries are in declaration order, same as `fields`.
+ /// If this hash map is empty, it means the enum tags are auto-numbered.
+ values: ValueMap,
+ /// Offset from `owner_decl`, points to the enum decl AST node.
+ node_offset: i32,
+
+ pub const NameMap = EnumFull.NameMap;
+ pub const ValueMap = EnumFull.ValueMap;
+
+ pub fn srcLoc(self: EnumNumbered) SrcLoc {
+ return .{
+ .file_scope = self.owner_decl.getFileScope(),
+ .parent_decl_node = self.owner_decl.src_node,
+ .lazy = .{ .node_offset = self.node_offset },
+ };
+ }
+};
+
/// Represents the data that an enum declaration provides, when there is
/// at least one tag value explicitly specified, or at least one declaration.
pub const EnumFull = struct {
@@ -788,16 +898,17 @@ pub const EnumFull = struct {
/// Whether zig chooses this type or the user specifies it, it is stored here.
tag_ty: Type,
/// Set of field names in declaration order.
- fields: std.StringArrayHashMapUnmanaged(void),
+ fields: NameMap,
/// Maps integer tag value to field index.
/// Entries are in declaration order, same as `fields`.
/// If this hash map is empty, it means the enum tags are auto-numbered.
values: ValueMap,
- /// Represents the declarations inside this struct.
+ /// Represents the declarations inside this enum.
namespace: Scope.Namespace,
/// Offset from `owner_decl`, points to the enum decl AST node.
node_offset: i32,
+ pub const NameMap = std.StringArrayHashMapUnmanaged(void);
pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false);
pub fn srcLoc(self: EnumFull) SrcLoc {
@@ -853,6 +964,44 @@ pub const Union = struct {
.lazy = .{ .node_offset = self.node_offset },
};
}
+
+ pub fn haveFieldTypes(u: Union) bool {
+ return switch (u.status) {
+ .none,
+ .field_types_wip,
+ => false,
+ .have_field_types,
+ .layout_wip,
+ .have_layout,
+ => true,
+ };
+ }
+
+ pub fn onlyTagHasCodegenBits(u: Union) bool {
+ assert(u.haveFieldTypes());
+ for (u.fields.values()) |field| {
+ if (field.ty.hasCodeGenBits()) return false;
+ }
+ return true;
+ }
+
+ pub fn mostAlignedField(u: Union, target: Target) u32 {
+ assert(u.haveFieldTypes());
+ var most_alignment: u64 = 0;
+ var most_index: usize = undefined;
+ for (u.fields.values()) |field, i| {
+ if (!field.ty.hasCodeGenBits()) continue;
+ const field_align = if (field.abi_align.tag() == .abi_align_default)
+ field.ty.abiAlignment(target)
+ else
+ field.abi_align.toUnsignedInt();
+ if (field_align > most_alignment) {
+ most_alignment = field_align;
+ most_index = i;
+ }
+ }
+ return @intCast(u32, most_index);
+ }
};
/// Some Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator.
@@ -881,6 +1030,7 @@ pub const Fn = struct {
state: Analysis,
is_cold: bool = false,
+ is_noinline: bool = false,
pub const Analysis = enum {
queued,
@@ -936,14 +1086,7 @@ pub const Scope = struct {
return @fieldParentPtr(T, "base", base);
}
- pub fn ownerDecl(scope: *Scope) ?*Decl {
- return switch (scope.tag) {
- .block => scope.cast(Block).?.sema.owner_decl,
- .file => null,
- .namespace => null,
- };
- }
-
+ /// Get the decl which contains this decl, for the purposes of source reporting
pub fn srcDecl(scope: *Scope) ?*Decl {
return switch (scope.tag) {
.block => scope.cast(Block).?.src_decl,
@@ -952,6 +1095,15 @@ pub const Scope = struct {
};
}
+ /// Get the scope which contains this decl, for resolving closure_get instructions.
+ pub fn srcScope(scope: *Scope) ?*CaptureScope {
+ return switch (scope.tag) {
+ .block => scope.cast(Block).?.wip_capture_scope,
+ .file => null,
+ .namespace => scope.cast(Namespace).?.getDecl().src_scope,
+ };
+ }
+
/// Asserts the scope has a parent which is a Namespace and returns it.
pub fn namespace(scope: *Scope) *Namespace {
switch (scope.tag) {
@@ -1288,6 +1440,9 @@ pub const Scope = struct {
instructions: ArrayListUnmanaged(Air.Inst.Index),
// `param` instructions are collected here to be used by the `func` instruction.
params: std.ArrayListUnmanaged(Param) = .{},
+
+ wip_capture_scope: *CaptureScope,
+
label: ?*Label = null,
inlining: ?*Inlining,
/// If runtime_index is not 0 then one of these is guaranteed to be non null.
@@ -1302,6 +1457,8 @@ pub const Scope = struct {
/// when null, it is determined by build mode, changed by @setRuntimeSafety
want_safety: ?bool = null,
+ c_import_buf: ?*std.ArrayList(u8) = null,
+
const Param = struct {
/// `noreturn` means `anytype`.
ty: Type,
@@ -1347,6 +1504,7 @@ pub const Scope = struct {
.sema = parent.sema,
.src_decl = parent.src_decl,
.instructions = .{},
+ .wip_capture_scope = parent.wip_capture_scope,
.label = null,
.inlining = parent.inlining,
.is_comptime = parent.is_comptime,
@@ -1354,6 +1512,7 @@ pub const Scope = struct {
.runtime_loop = parent.runtime_loop,
.runtime_index = parent.runtime_index,
.want_safety = parent.want_safety,
+ .c_import_buf = parent.c_import_buf,
};
}
@@ -1453,6 +1612,40 @@ pub const Scope = struct {
});
}
+ pub fn addStructFieldPtr(
+ block: *Block,
+ struct_ptr: Air.Inst.Ref,
+ field_index: u32,
+ ptr_field_ty: Type,
+ ) !Air.Inst.Ref {
+ const ty = try block.sema.addType(ptr_field_ty);
+ const tag: Air.Inst.Tag = switch (field_index) {
+ 0 => .struct_field_ptr_index_0,
+ 1 => .struct_field_ptr_index_1,
+ 2 => .struct_field_ptr_index_2,
+ 3 => .struct_field_ptr_index_3,
+ else => {
+ return block.addInst(.{
+ .tag = .struct_field_ptr,
+ .data = .{ .ty_pl = .{
+ .ty = ty,
+ .payload = try block.sema.addExtra(Air.StructField{
+ .struct_operand = struct_ptr,
+ .field_index = @intCast(u32, field_index),
+ }),
+ } },
+ });
+ },
+ };
+ return block.addInst(.{
+ .tag = tag,
+ .data = .{ .ty_op = .{
+ .ty = ty,
+ .operand = struct_ptr,
+ } },
+ });
+ }
+
pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref {
return Air.indexToRef(try block.addInstAsIndex(inst));
}
@@ -2092,39 +2285,39 @@ pub const LazySrcLoc = union(enum) {
node_offset_bin_op: i32,
/// The source location points to the LHS of a binary expression, found
/// by taking this AST node index offset from the containing Decl AST node,
- /// which points to a binary expression AST node. Next, nagivate to the LHS.
+ /// which points to a binary expression AST node. Next, navigate to the LHS.
/// The Decl is determined contextually.
node_offset_bin_lhs: i32,
/// The source location points to the RHS of a binary expression, found
/// by taking this AST node index offset from the containing Decl AST node,
- /// which points to a binary expression AST node. Next, nagivate to the RHS.
+ /// which points to a binary expression AST node. Next, navigate to the RHS.
/// The Decl is determined contextually.
node_offset_bin_rhs: i32,
/// The source location points to the operand of a switch expression, found
/// by taking this AST node index offset from the containing Decl AST node,
- /// which points to a switch expression AST node. Next, nagivate to the operand.
+ /// which points to a switch expression AST node. Next, navigate to the operand.
/// The Decl is determined contextually.
node_offset_switch_operand: i32,
/// The source location points to the else/`_` prong of a switch expression, found
/// by taking this AST node index offset from the containing Decl AST node,
- /// which points to a switch expression AST node. Next, nagivate to the else/`_` prong.
+ /// which points to a switch expression AST node. Next, navigate to the else/`_` prong.
/// The Decl is determined contextually.
node_offset_switch_special_prong: i32,
/// The source location points to all the ranges of a switch expression, found
/// by taking this AST node index offset from the containing Decl AST node,
- /// which points to a switch expression AST node. Next, nagivate to any of the
+ /// which points to a switch expression AST node. Next, navigate to any of the
/// range nodes. The error applies to all of them.
/// The Decl is determined contextually.
node_offset_switch_range: i32,
/// The source location points to the calling convention of a function type
/// expression, found by taking this AST node index offset from the containing
- /// Decl AST node, which points to a function type AST node. Next, nagivate to
+ /// Decl AST node, which points to a function type AST node. Next, navigate to
/// the calling convention node.
/// The Decl is determined contextually.
node_offset_fn_type_cc: i32,
/// The source location points to the return type of a function type
/// expression, found by taking this AST node index offset from the containing
- /// Decl AST node, which points to a function type AST node. Next, nagivate to
+ /// Decl AST node, which points to a function type AST node. Next, navigate to
/// the return type node.
/// The Decl is determined contextually.
node_offset_fn_type_ret_ty: i32,
@@ -2347,6 +2540,7 @@ pub fn deinit(mod: *Module) void {
mod.error_name_list.deinit(gpa);
mod.test_functions.deinit(gpa);
+ mod.align_stack_fns.deinit(gpa);
mod.monomorphed_funcs.deinit(gpa);
{
@@ -2362,6 +2556,7 @@ pub fn deinit(mod: *Module) void {
fn freeExportList(gpa: *Allocator, export_list: []*Export) void {
for (export_list) |exp| {
gpa.free(exp.options.name);
+ if (exp.options.section) |s| gpa.free(s);
gpa.destroy(exp);
}
gpa.free(export_list);
@@ -2372,7 +2567,7 @@ const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8;
// We need a better language feature for initializing a union with
// a runtime known tag.
const Stage1DataLayout = extern struct {
- data: [8]u8 align(8),
+ data: [8]u8 align(@alignOf(Zir.Inst.Data)),
safety_tag: u8,
};
comptime {
@@ -2873,12 +3068,10 @@ pub fn mapOldZirToNew(
var match_stack: std.ArrayListUnmanaged(MatchedZirDecl) = .{};
defer match_stack.deinit(gpa);
- const old_main_struct_inst = old_zir.getMainStruct();
- const new_main_struct_inst = new_zir.getMainStruct();
-
+ // Main struct inst is always the same
try match_stack.append(gpa, .{
- .old_inst = old_main_struct_inst,
- .new_inst = new_main_struct_inst,
+ .old_inst = Zir.main_struct_inst,
+ .new_inst = Zir.main_struct_inst,
});
var old_decls = std.ArrayList(Zir.Inst.Index).init(gpa);
@@ -3036,6 +3229,7 @@ pub fn semaFile(mod: *Module, file: *Scope.File) SemaError!void {
const struct_obj = try new_decl_arena.allocator.create(Module.Struct);
const struct_ty = try Type.Tag.@"struct".create(&new_decl_arena.allocator, struct_obj);
const struct_val = try Value.Tag.ty.create(&new_decl_arena.allocator, struct_ty);
+ const ty_ty = comptime Type.initTag(.type);
struct_obj.* = .{
.owner_decl = undefined, // set below
.fields = .{},
@@ -3050,7 +3244,7 @@ pub fn semaFile(mod: *Module, file: *Scope.File) SemaError!void {
.file_scope = file,
},
};
- const new_decl = try mod.allocateNewDecl(&struct_obj.namespace, 0);
+ const new_decl = try mod.allocateNewDecl(&struct_obj.namespace, 0, null);
file.root_decl = new_decl;
struct_obj.owner_decl = new_decl;
new_decl.src_line = 0;
@@ -3058,8 +3252,8 @@ pub fn semaFile(mod: *Module, file: *Scope.File) SemaError!void {
new_decl.is_pub = true;
new_decl.is_exported = false;
new_decl.has_align = false;
- new_decl.has_linksection = false;
- new_decl.ty = struct_ty;
+ new_decl.has_linksection_or_addrspace = false;
+ new_decl.ty = ty_ty;
new_decl.val = struct_val;
new_decl.has_tv = true;
new_decl.owns_tv = true;
@@ -3069,7 +3263,7 @@ pub fn semaFile(mod: *Module, file: *Scope.File) SemaError!void {
if (file.status == .success_zir) {
assert(file.zir_loaded);
- const main_struct_inst = file.zir.getMainStruct();
+ const main_struct_inst = Zir.main_struct_inst;
struct_obj.zir_index = main_struct_inst;
var sema_arena = std.heap.ArenaAllocator.init(gpa);
@@ -3079,6 +3273,7 @@ pub fn semaFile(mod: *Module, file: *Scope.File) SemaError!void {
.mod = mod,
.gpa = gpa,
.arena = &sema_arena.allocator,
+ .perm_arena = &new_decl_arena.allocator,
.code = file.zir,
.owner_decl = new_decl,
.namespace = &struct_obj.namespace,
@@ -3087,10 +3282,15 @@ pub fn semaFile(mod: *Module, file: *Scope.File) SemaError!void {
.owner_func = null,
};
defer sema.deinit();
+
+ var wip_captures = try WipCaptureScope.init(gpa, &new_decl_arena.allocator, null);
+ defer wip_captures.deinit();
+
var block_scope: Scope.Block = .{
.parent = null,
.sema = &sema,
.src_decl = new_decl,
+ .wip_capture_scope = wip_captures.scope,
.instructions = .{},
.inlining = null,
.is_comptime = true,
@@ -3098,6 +3298,7 @@ pub fn semaFile(mod: *Module, file: *Scope.File) SemaError!void {
defer block_scope.instructions.deinit(gpa);
if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_obj)) |_| {
+ try wip_captures.finalize();
new_decl.analysis = .complete;
} else |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
@@ -3127,6 +3328,10 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
decl.analysis = .in_progress;
+ // We need the memory for the Type to go into the arena for the Decl
+ var decl_arena = std.heap.ArenaAllocator.init(gpa);
+ errdefer decl_arena.deinit();
+
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
@@ -3134,6 +3339,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
.mod = mod,
.gpa = gpa,
.arena = &analysis_arena.allocator,
+ .perm_arena = &decl_arena.allocator,
.code = zir,
.owner_decl = decl,
.namespace = decl.namespace,
@@ -3145,7 +3351,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
if (decl.isRoot()) {
log.debug("semaDecl root {*} ({s})", .{ decl, decl.name });
- const main_struct_inst = zir.getMainStruct();
+ const main_struct_inst = Zir.main_struct_inst;
const struct_obj = decl.getStruct().?;
// This might not have gotten set in `semaFile` if the first time had
// a ZIR failure, so we set it here in case.
@@ -3157,10 +3363,14 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
}
log.debug("semaDecl {*} ({s})", .{ decl, decl.name });
+ var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope);
+ defer wip_captures.deinit();
+
var block_scope: Scope.Block = .{
.parent = null,
.sema = &sema,
.src_decl = decl,
+ .wip_capture_scope = wip_captures.scope,
.instructions = .{},
.inlining = null,
.is_comptime = true,
@@ -3175,6 +3385,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
const extra = zir.extraData(Zir.Inst.Block, inst_data.payload_index);
const body = zir.extra[extra.end..][0..extra.data.body_len];
const break_index = try sema.analyzeBody(&block_scope, body);
+ try wip_captures.finalize();
const result_ref = zir_datas[break_index].@"break".operand;
const src: LazySrcLoc = .{ .node_offset = 0 };
const decl_tv = try sema.resolveInstValue(&block_scope, src, result_ref);
@@ -3188,14 +3399,29 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
if (linksection_ref == .none) break :blk Value.initTag(.null_value);
break :blk (try sema.resolveInstConst(&block_scope, src, linksection_ref)).val;
};
+ const address_space = blk: {
+ const addrspace_ctx: Sema.AddressSpaceContext = switch (decl_tv.val.tag()) {
+ .function, .extern_fn => .function,
+ .variable => .variable,
+ else => .constant,
+ };
+
+ break :blk switch (decl.zirAddrspaceRef()) {
+ .none => switch (addrspace_ctx) {
+ .function => target_util.defaultAddressSpace(sema.mod.getTarget(), .function),
+ .variable => target_util.defaultAddressSpace(sema.mod.getTarget(), .global_mutable),
+ .constant => target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant),
+ else => unreachable,
+ },
+ else => |addrspace_ref| try sema.analyzeAddrspace(&block_scope, src, addrspace_ref, addrspace_ctx),
+ };
+ };
+
// Note this resolves the type of the Decl, not the value; if this Decl
// is a struct, for example, this resolves `type` (which needs no resolution),
// not the struct itself.
try sema.resolveTypeLayout(&block_scope, src, decl_tv.ty);
- // We need the memory for the Type to go into the arena for the Decl
- var decl_arena = std.heap.ArenaAllocator.init(gpa);
- errdefer decl_arena.deinit();
const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
if (decl.is_usingnamespace) {
@@ -3244,6 +3470,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
decl.val = try decl_tv.val.copy(&decl_arena.allocator);
decl.align_val = try align_val.copy(&decl_arena.allocator);
decl.linksection_val = try linksection_val.copy(&decl_arena.allocator);
+ decl.@"addrspace" = address_space;
decl.has_tv = true;
decl.owns_tv = owns_tv;
decl_arena_state.* = decl_arena.state;
@@ -3271,7 +3498,8 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
return mod.fail(&block_scope.base, export_src, "export of inline function", .{});
}
// The scope needs to have the decl in it.
- try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl);
+ const options: std.builtin.ExportOptions = .{ .name = mem.spanZ(decl.name) };
+ try mod.analyzeExport(&block_scope, export_src, options, decl);
}
return type_changed or is_inline != prev_is_inline;
}
@@ -3305,6 +3533,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
decl.val = try decl_tv.val.copy(&decl_arena.allocator);
decl.align_val = try align_val.copy(&decl_arena.allocator);
decl.linksection_val = try linksection_val.copy(&decl_arena.allocator);
+ decl.@"addrspace" = address_space;
decl.has_tv = true;
decl_arena_state.* = decl_arena.state;
decl.value_arena = decl_arena_state;
@@ -3329,7 +3558,8 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
if (decl.is_exported) {
const export_src = src; // TODO point to the export token
// The scope needs to have the decl in it.
- try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl);
+ const options: std.builtin.ExportOptions = .{ .name = mem.spanZ(decl.name) };
+ try mod.analyzeExport(&block_scope, export_src, options, decl);
}
return type_changed;
@@ -3490,7 +3720,7 @@ pub fn scanNamespace(
const zir = namespace.file_scope.zir;
try mod.comp.work_queue.ensureUnusedCapacity(decls_len);
- try namespace.decls.ensureCapacity(gpa, decls_len);
+ try namespace.decls.ensureTotalCapacity(gpa, decls_len);
const bit_bags_count = std.math.divCeil(usize, decls_len, 8) catch unreachable;
var extra_index = extra_start + bit_bags_count;
@@ -3512,8 +3742,8 @@ pub fn scanNamespace(
const decl_sub_index = extra_index;
extra_index += 7; // src_hash(4) + line(1) + name(1) + value(1)
- extra_index += @truncate(u1, flags >> 2);
- extra_index += @truncate(u1, flags >> 3);
+ extra_index += @truncate(u1, flags >> 2); // Align
+ extra_index += @as(u2, @truncate(u1, flags >> 3)) * 2; // Link section or address space, consists of 2 Refs
try scanDecl(&scan_decl_iter, decl_sub_index, flags);
}
@@ -3539,10 +3769,10 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
const zir = namespace.file_scope.zir;
// zig fmt: off
- const is_pub = (flags & 0b0001) != 0;
- const export_bit = (flags & 0b0010) != 0;
- const has_align = (flags & 0b0100) != 0;
- const has_linksection = (flags & 0b1000) != 0;
+ const is_pub = (flags & 0b0001) != 0;
+ const export_bit = (flags & 0b0010) != 0;
+ const has_align = (flags & 0b0100) != 0;
+ const has_linksection_or_addrspace = (flags & 0b1000) != 0;
// zig fmt: on
const line = iter.parent_decl.relativeToLine(zir.extra[decl_sub_index + 4]);
@@ -3588,7 +3818,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
// We create a Decl for it regardless of analysis status.
const gop = try namespace.decls.getOrPut(gpa, decl_name);
if (!gop.found_existing) {
- const new_decl = try mod.allocateNewDecl(namespace, decl_node);
+ const new_decl = try mod.allocateNewDecl(namespace, decl_node, iter.parent_decl.src_scope);
if (is_usingnamespace) {
namespace.usingnamespace_set.putAssumeCapacity(new_decl, is_pub);
}
@@ -3625,7 +3855,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
new_decl.is_exported = is_exported;
new_decl.is_usingnamespace = is_usingnamespace;
new_decl.has_align = has_align;
- new_decl.has_linksection = has_linksection;
+ new_decl.has_linksection_or_addrspace = has_linksection_or_addrspace;
new_decl.zir_decl_index = @intCast(u32, decl_sub_index);
new_decl.alive = true; // This Decl corresponds to an AST node and therefore always alive.
return;
@@ -3642,7 +3872,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
decl.is_exported = is_exported;
decl.is_usingnamespace = is_usingnamespace;
decl.has_align = has_align;
- decl.has_linksection = has_linksection;
+ decl.has_linksection_or_addrspace = has_linksection_or_addrspace;
decl.zir_decl_index = @intCast(u32, decl_sub_index);
if (decl.getFunction()) |_| {
switch (mod.comp.bin_file.tag) {
@@ -3660,7 +3890,9 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
},
.plan9 => {
- // TODO implement for plan9
+ // TODO Look into detecting when this would be unnecessary by storing enough state
+ // in `Decl` to notice that the line number did not change.
+ mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
},
.c, .wasm, .spirv => {},
}
@@ -3706,13 +3938,6 @@ pub fn clearDecl(
dep.removeDependency(decl);
if (outdated_decls) |map| {
map.putAssumeCapacity(dep, {});
- } else if (std.debug.runtime_safety) {
- // If `outdated_decls` is `null`, it means we're being called from
- // `Compilation` after `performAllTheWork` and we cannot queue up any
- // more work. `dep` must necessarily be another Decl that is no longer
- // being referenced, and will be in the `deletion_set`. Otherwise,
- // something has gone wrong.
- assert(mod.deletion_set.contains(dep));
}
}
decl.dependants.clearRetainingCapacity();
@@ -3740,7 +3965,7 @@ pub fn clearDecl(
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.TextBlock.empty },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
- .c => .{ .c = link.File.C.DeclBlock.empty },
+ .c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
.spirv => .{ .spirv = {} },
};
@@ -3749,7 +3974,7 @@ pub fn clearDecl(
.elf => .{ .elf = link.File.Elf.SrcFn.empty },
.macho => .{ .macho = link.File.MachO.SrcFn.empty },
.plan9 => .{ .plan9 = {} },
- .c => .{ .c = link.File.C.FnBlock.empty },
+ .c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
.spirv => .{ .spirv = .{} },
};
@@ -3779,10 +4004,13 @@ pub fn deleteUnusedDecl(mod: *Module, decl: *Decl) void {
// about the Decl in the first place.
// Until then, we did call `allocateDeclIndexes` on this anonymous Decl and so we
// must call `freeDecl` in the linker backend now.
- if (decl.has_tv) {
- if (decl.ty.hasCodeGenBits()) {
- mod.comp.bin_file.freeDecl(decl);
- }
+ switch (mod.comp.bin_file.tag) {
+ .c => {}, // this linker backend has already migrated to the new API
+ else => if (decl.has_tv) {
+ if (decl.ty.hasCodeGenBits()) {
+ mod.comp.bin_file.freeDecl(decl);
+ }
+ },
}
const dependants = decl.dependants.keys();
@@ -3800,7 +4028,7 @@ pub fn deleteUnusedDecl(mod: *Module, decl: *Decl) void {
pub fn deleteAnonDecl(mod: *Module, scope: *Scope, decl: *Decl) void {
log.debug("deleteAnonDecl {*} ({s})", .{ decl, decl.name });
- const scope_decl = scope.ownerDecl().?;
+ const scope_decl = scope.srcDecl().?;
assert(scope_decl.namespace.anon_decls.swapRemove(decl));
decl.destroy(mod);
}
@@ -3844,22 +4072,21 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void {
mod.gpa.free(kv.value);
}
-pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air {
+pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: *Allocator) SemaError!Air {
const tracy = trace(@src());
defer tracy.end();
const gpa = mod.gpa;
- // Use the Decl's arena for function memory.
- var arena = decl.value_arena.?.promote(gpa);
- defer decl.value_arena.?.* = arena.state;
-
- const fn_ty = decl.ty;
+ // Use the Decl's arena for captured values.
+ var decl_arena = decl.value_arena.?.promote(gpa);
+ defer decl.value_arena.?.* = decl_arena.state;
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
- .arena = &arena.allocator,
+ .arena = arena,
+ .perm_arena = &decl_arena.allocator,
.code = decl.namespace.file_scope.zir,
.owner_decl = decl,
.namespace = decl.namespace,
@@ -3874,10 +4101,14 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air {
try sema.air_extra.ensureTotalCapacity(gpa, reserved_count);
sema.air_extra.items.len += reserved_count;
+ var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope);
+ defer wip_captures.deinit();
+
var inner_block: Scope.Block = .{
.parent = null,
.sema = &sema,
.src_decl = decl,
+ .wip_capture_scope = wip_captures.scope,
.instructions = .{},
.inlining = null,
.is_comptime = false,
@@ -3893,6 +4124,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air {
// This could be a generic function instantiation, however, in which case we need to
// map the comptime parameters to constant values and only emit arg AIR instructions
// for the runtime ones.
+ const fn_ty = decl.ty;
const runtime_params_len = @intCast(u32, fn_ty.fnParamLen());
try inner_block.instructions.ensureTotalCapacity(gpa, runtime_params_len);
try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType`
@@ -3952,6 +4184,8 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air {
else => |e| return e,
};
+ try wip_captures.finalize();
+
// Copy the block into place and mark that as the main block.
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
inner_block.instructions.items.len);
@@ -3977,6 +4211,12 @@ fn markOutdatedDecl(mod: *Module, decl: *Decl) !void {
if (mod.failed_decls.fetchSwapRemove(decl)) |kv| {
kv.value.destroy(mod.gpa);
}
+ if (decl.has_tv and decl.owns_tv) {
+ if (decl.val.castTag(.function)) |payload| {
+ const func = payload.data;
+ _ = mod.align_stack_fns.remove(func);
+ }
+ }
if (mod.emit_h) |emit_h| {
if (emit_h.failed_decls.fetchSwapRemove(decl)) |kv| {
kv.value.destroy(mod.gpa);
@@ -3986,7 +4226,7 @@ fn markOutdatedDecl(mod: *Module, decl: *Decl) !void {
decl.analysis = .outdated;
}
-pub fn allocateNewDecl(mod: *Module, namespace: *Scope.Namespace, src_node: Ast.Node.Index) !*Decl {
+pub fn allocateNewDecl(mod: *Module, namespace: *Scope.Namespace, src_node: Ast.Node.Index, src_scope: ?*CaptureScope) !*Decl {
// If we have emit-h then we must allocate a bigger structure to store the emit-h state.
const new_decl: *Decl = if (mod.emit_h != null) blk: {
const parent_struct = try mod.gpa.create(DeclPlusEmitH);
@@ -4008,15 +4248,17 @@ pub fn allocateNewDecl(mod: *Module, namespace: *Scope.Namespace, src_node: Ast.
.val = undefined,
.align_val = undefined,
.linksection_val = undefined,
+ .@"addrspace" = undefined,
.analysis = .unreferenced,
.deletion_flag = false,
.zir_decl_index = 0,
+ .src_scope = src_scope,
.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.TextBlock.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.TextBlock.empty },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
- .c => .{ .c = link.File.C.DeclBlock.empty },
+ .c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
.spirv => .{ .spirv = {} },
},
@@ -4025,18 +4267,19 @@ pub fn allocateNewDecl(mod: *Module, namespace: *Scope.Namespace, src_node: Ast.
.elf => .{ .elf = link.File.Elf.SrcFn.empty },
.macho => .{ .macho = link.File.MachO.SrcFn.empty },
.plan9 => .{ .plan9 = {} },
- .c => .{ .c = link.File.C.FnBlock.empty },
+ .c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
.spirv => .{ .spirv = .{} },
},
.generation = 0,
.is_pub = false,
.is_exported = false,
- .has_linksection = false,
+ .has_linksection_or_addrspace = false,
.has_align = false,
.alive = false,
.is_usingnamespace = false,
};
+
return new_decl;
}
@@ -4051,7 +4294,7 @@ pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged
}
errdefer assert(mod.global_error_set.remove(name));
- try mod.error_name_list.ensureCapacity(mod.gpa, mod.error_name_list.items.len + 1);
+ try mod.error_name_list.ensureUnusedCapacity(mod.gpa, 1);
gop.key_ptr.* = try mod.gpa.dupe(u8, name);
gop.value_ptr.* = @intCast(ErrorInt, mod.error_name_list.items.len);
mod.error_name_list.appendAssumeCapacity(gop.key_ptr.*);
@@ -4063,34 +4306,44 @@ pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged
pub fn analyzeExport(
mod: *Module,
- scope: *Scope,
+ block: *Scope.Block,
src: LazySrcLoc,
- borrowed_symbol_name: []const u8,
+ borrowed_options: std.builtin.ExportOptions,
exported_decl: *Decl,
) !void {
try mod.ensureDeclAnalyzed(exported_decl);
switch (exported_decl.ty.zigTypeTag()) {
.Fn => {},
- else => return mod.fail(scope, src, "unable to export type '{}'", .{exported_decl.ty}),
+ else => return mod.fail(&block.base, src, "unable to export type '{}'", .{exported_decl.ty}),
}
- try mod.decl_exports.ensureUnusedCapacity(mod.gpa, 1);
- try mod.export_owners.ensureUnusedCapacity(mod.gpa, 1);
+ const gpa = mod.gpa;
- const new_export = try mod.gpa.create(Export);
- errdefer mod.gpa.destroy(new_export);
+ try mod.decl_exports.ensureUnusedCapacity(gpa, 1);
+ try mod.export_owners.ensureUnusedCapacity(gpa, 1);
- const symbol_name = try mod.gpa.dupe(u8, borrowed_symbol_name);
- errdefer mod.gpa.free(symbol_name);
+ const new_export = try gpa.create(Export);
+ errdefer gpa.destroy(new_export);
- const owner_decl = scope.ownerDecl().?;
+ const symbol_name = try gpa.dupe(u8, borrowed_options.name);
+ errdefer gpa.free(symbol_name);
+
+ const section: ?[]const u8 = if (borrowed_options.section) |s| try gpa.dupe(u8, s) else null;
+ errdefer if (section) |s| gpa.free(s);
+
+ const src_decl = block.src_decl;
+ const owner_decl = block.sema.owner_decl;
log.debug("exporting Decl '{s}' as symbol '{s}' from Decl '{s}'", .{
- exported_decl.name, borrowed_symbol_name, owner_decl.name,
+ exported_decl.name, symbol_name, owner_decl.name,
});
new_export.* = .{
- .options = .{ .name = symbol_name },
+ .options = .{
+ .name = symbol_name,
+ .linkage = borrowed_options.linkage,
+ .section = section,
+ },
.src = src,
.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = {} },
@@ -4102,6 +4355,7 @@ pub fn analyzeExport(
.spirv => .{ .spirv = {} },
},
.owner_decl = owner_decl,
+ .src_decl = src_decl,
.exported_decl = exported_decl,
.status = .in_progress,
};
@@ -4111,18 +4365,18 @@ pub fn analyzeExport(
if (!eo_gop.found_existing) {
eo_gop.value_ptr.* = &[0]*Export{};
}
- eo_gop.value_ptr.* = try mod.gpa.realloc(eo_gop.value_ptr.*, eo_gop.value_ptr.len + 1);
+ eo_gop.value_ptr.* = try gpa.realloc(eo_gop.value_ptr.*, eo_gop.value_ptr.len + 1);
eo_gop.value_ptr.*[eo_gop.value_ptr.len - 1] = new_export;
- errdefer eo_gop.value_ptr.* = mod.gpa.shrink(eo_gop.value_ptr.*, eo_gop.value_ptr.len - 1);
+ errdefer eo_gop.value_ptr.* = gpa.shrink(eo_gop.value_ptr.*, eo_gop.value_ptr.len - 1);
// Add to exported_decl table.
const de_gop = mod.decl_exports.getOrPutAssumeCapacity(exported_decl);
if (!de_gop.found_existing) {
de_gop.value_ptr.* = &[0]*Export{};
}
- de_gop.value_ptr.* = try mod.gpa.realloc(de_gop.value_ptr.*, de_gop.value_ptr.len + 1);
+ de_gop.value_ptr.* = try gpa.realloc(de_gop.value_ptr.*, de_gop.value_ptr.len + 1);
de_gop.value_ptr.*[de_gop.value_ptr.len - 1] = new_export;
- errdefer de_gop.value_ptr.* = mod.gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1);
+ errdefer de_gop.value_ptr.* = gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1);
}
/// Takes ownership of `name` even if it returns an error.
@@ -4132,39 +4386,43 @@ pub fn createAnonymousDeclNamed(
typed_value: TypedValue,
name: [:0]u8,
) !*Decl {
- return mod.createAnonymousDeclFromDeclNamed(scope.ownerDecl().?, typed_value, name);
+ return mod.createAnonymousDeclFromDeclNamed(scope.srcDecl().?, scope.srcScope(), typed_value, name);
}
pub fn createAnonymousDecl(mod: *Module, scope: *Scope, typed_value: TypedValue) !*Decl {
- return mod.createAnonymousDeclFromDecl(scope.ownerDecl().?, typed_value);
+ return mod.createAnonymousDeclFromDecl(scope.srcDecl().?, scope.srcScope(), typed_value);
}
-pub fn createAnonymousDeclFromDecl(mod: *Module, owner_decl: *Decl, tv: TypedValue) !*Decl {
+pub fn createAnonymousDeclFromDecl(mod: *Module, src_decl: *Decl, src_scope: ?*CaptureScope, tv: TypedValue) !*Decl {
const name_index = mod.getNextAnonNameIndex();
const name = try std.fmt.allocPrintZ(mod.gpa, "{s}__anon_{d}", .{
- owner_decl.name, name_index,
+ src_decl.name, name_index,
});
- return mod.createAnonymousDeclFromDeclNamed(owner_decl, tv, name);
+ return mod.createAnonymousDeclFromDeclNamed(src_decl, src_scope, tv, name);
}
/// Takes ownership of `name` even if it returns an error.
pub fn createAnonymousDeclFromDeclNamed(
mod: *Module,
- owner_decl: *Decl,
+ src_decl: *Decl,
+ src_scope: ?*CaptureScope,
typed_value: TypedValue,
name: [:0]u8,
) !*Decl {
errdefer mod.gpa.free(name);
- const namespace = owner_decl.namespace;
+ const namespace = src_decl.namespace;
try namespace.anon_decls.ensureUnusedCapacity(mod.gpa, 1);
- const new_decl = try mod.allocateNewDecl(namespace, owner_decl.src_node);
+ const new_decl = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope);
new_decl.name = name;
- new_decl.src_line = owner_decl.src_line;
+ new_decl.src_line = src_decl.src_line;
new_decl.ty = typed_value.ty;
new_decl.val = typed_value.val;
+ new_decl.align_val = Value.initTag(.null_value);
+ new_decl.linksection_val = Value.initTag(.null_value);
+ new_decl.@"addrspace" = .generic; // default global addrspace
new_decl.has_tv = true;
new_decl.analysis = .complete;
new_decl.generation = mod.generation;
@@ -4305,61 +4563,6 @@ pub fn failWithOwnedErrorMsg(mod: *Module, scope: *Scope, err_msg: *ErrorMsg) Co
return error.AnalysisFail;
}
-pub fn simplePtrType(
- arena: *Allocator,
- elem_ty: Type,
- mutable: bool,
- size: std.builtin.TypeInfo.Pointer.Size,
-) Allocator.Error!Type {
- if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) {
- return Type.initTag(.const_slice_u8);
- }
- // TODO stage1 type inference bug
- const T = Type.Tag;
-
- const type_payload = try arena.create(Type.Payload.ElemType);
- type_payload.* = .{
- .base = .{
- .tag = switch (size) {
- .One => if (mutable) T.single_mut_pointer else T.single_const_pointer,
- .Many => if (mutable) T.many_mut_pointer else T.many_const_pointer,
- .C => if (mutable) T.c_mut_pointer else T.c_const_pointer,
- .Slice => if (mutable) T.mut_slice else T.const_slice,
- },
- },
- .data = elem_ty,
- };
- return Type.initPayload(&type_payload.base);
-}
-
-pub fn ptrType(
- arena: *Allocator,
- elem_ty: Type,
- sentinel: ?Value,
- @"align": u32,
- bit_offset: u16,
- host_size: u16,
- mutable: bool,
- @"allowzero": bool,
- @"volatile": bool,
- size: std.builtin.TypeInfo.Pointer.Size,
-) Allocator.Error!Type {
- assert(host_size == 0 or bit_offset < host_size * 8);
-
- // TODO check if type can be represented by simplePtrType
- return Type.Tag.pointer.create(arena, .{
- .pointee_type = elem_ty,
- .sentinel = sentinel,
- .@"align" = @"align",
- .bit_offset = bit_offset,
- .host_size = host_size,
- .@"allowzero" = @"allowzero",
- .mutable = mutable,
- .@"volatile" = @"volatile",
- .size = size,
- });
-}
-
pub fn optionalType(arena: *Allocator, child_type: Type) Allocator.Error!Type {
switch (child_type.tag()) {
.single_const_pointer => return Type.Tag.optional_single_const_pointer.create(
@@ -4689,7 +4892,7 @@ pub fn populateTestFunctions(mod: *Module) !void {
const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file;
const builtin_namespace = builtin_file.root_decl.?.namespace;
const decl = builtin_namespace.decls.get("test_functions").?;
- var buf: Type.Payload.ElemType = undefined;
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).elemType();
const array_decl = d: {
@@ -4700,7 +4903,7 @@ pub fn populateTestFunctions(mod: *Module) !void {
const arena = &new_decl_arena.allocator;
const test_fn_vals = try arena.alloc(Value, mod.test_functions.count());
- const array_decl = try mod.createAnonymousDeclFromDecl(decl, .{
+ const array_decl = try mod.createAnonymousDeclFromDecl(decl, null, .{
.ty = try Type.Tag.array.create(arena, .{
.len = test_fn_vals.len,
.elem_type = try tmp_test_fn_ty.copy(arena),
@@ -4713,7 +4916,7 @@ pub fn populateTestFunctions(mod: *Module) !void {
var name_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer name_decl_arena.deinit();
const bytes = try name_decl_arena.allocator.dupe(u8, test_name_slice);
- const test_name_decl = try mod.createAnonymousDeclFromDecl(array_decl, .{
+ const test_name_decl = try mod.createAnonymousDeclFromDecl(array_decl, null, .{
.ty = try Type.Tag.array_u8.create(&name_decl_arena.allocator, bytes.len),
.val = try Value.Tag.bytes.create(&name_decl_arena.allocator, bytes),
});
diff --git a/src/Package.zig b/src/Package.zig
index 1f19c1d43a..f5380aaacb 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -99,19 +99,22 @@ pub fn destroy(pkg: *Package, gpa: *Allocator) void {
}
}
- {
- var it = pkg.table.keyIterator();
- while (it.next()) |key| {
- gpa.free(key.*);
- }
- }
-
- pkg.table.deinit(gpa);
+ pkg.deinitTable(gpa);
gpa.destroy(pkg);
}
+/// Only frees memory associated with the table.
+pub fn deinitTable(pkg: *Package, gpa: *Allocator) void {
+ var it = pkg.table.keyIterator();
+ while (it.next()) |key| {
+ gpa.free(key.*);
+ }
+
+ pkg.table.deinit(gpa);
+}
+
pub fn add(pkg: *Package, gpa: *Allocator, name: []const u8, package: *Package) !void {
- try pkg.table.ensureCapacity(gpa, pkg.table.count() + 1);
+ try pkg.table.ensureUnusedCapacity(gpa, 1);
const name_dupe = try mem.dupe(gpa, u8, name);
pkg.table.putAssumeCapacityNoClobber(name_dupe, package);
}
diff --git a/src/Sema.zig b/src/Sema.zig
index de0d0b7c88..7c2ef32ad3 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -8,8 +8,12 @@
mod: *Module,
/// Alias to `mod.gpa`.
gpa: *Allocator,
-/// Points to the arena allocator of the Decl.
+/// Points to the temporary arena allocator of the Sema.
+/// This arena will be cleared when the sema is destroyed.
arena: *Allocator,
+/// Points to the arena allocator for the owner_decl.
+/// This arena will persist until the decl is invalidated.
+perm_arena: *Allocator,
code: Zir,
air_instructions: std.MultiArrayList(Air.Inst) = .{},
air_extra: std.ArrayListUnmanaged(u32) = .{},
@@ -80,9 +84,13 @@ const Scope = Module.Scope;
const CompileError = Module.CompileError;
const SemaError = Module.SemaError;
const Decl = Module.Decl;
+const CaptureScope = Module.CaptureScope;
+const WipCaptureScope = Module.WipCaptureScope;
const LazySrcLoc = Module.LazySrcLoc;
const RangeSet = @import("RangeSet.zig");
const target_util = @import("target.zig");
+const Package = @import("Package.zig");
+const crash_report = @import("crash_report.zig");
pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Ref);
@@ -128,15 +136,34 @@ pub fn analyzeBody(
) CompileError!Zir.Inst.Index {
// No tracy calls here, to avoid interfering with the tail call mechanism.
+ const parent_capture_scope = block.wip_capture_scope;
+
+ var wip_captures = WipCaptureScope{
+ .finalized = true,
+ .scope = parent_capture_scope,
+ .perm_arena = sema.perm_arena,
+ .gpa = sema.gpa,
+ };
+ defer if (wip_captures.scope != parent_capture_scope) {
+ wip_captures.deinit();
+ };
+
const map = &block.sema.inst_map;
const tags = block.sema.code.instructions.items(.tag);
const datas = block.sema.code.instructions.items(.data);
+ var orig_captures: usize = parent_capture_scope.captures.count();
+
+ var crash_info = crash_report.prepAnalyzeBody(sema, block, body);
+ crash_info.push();
+ defer crash_info.pop();
+
// We use a while(true) loop here to avoid a redundant way of breaking out of
// the loop. The only way to break out of the loop is with a `noreturn`
// instruction.
var i: usize = 0;
- while (true) {
+ const result = while (true) {
+ crash_info.setBodyIndex(i);
const inst = body[i];
const air_inst: Air.Inst.Ref = switch (tags[inst]) {
// zig fmt: off
@@ -164,11 +191,8 @@ pub fn analyzeBody(
.bool_br_and => try sema.zirBoolBr(block, inst, false),
.bool_br_or => try sema.zirBoolBr(block, inst, true),
.c_import => try sema.zirCImport(block, inst),
- .call => try sema.zirCall(block, inst, .auto, false),
- .call_chkused => try sema.zirCall(block, inst, .auto, true),
- .call_compile_time => try sema.zirCall(block, inst, .compile_time, false),
- .call_nosuspend => try sema.zirCall(block, inst, .no_async, false),
- .call_async => try sema.zirCall(block, inst, .async_kw, false),
+ .call => try sema.zirCall(block, inst),
+ .closure_get => try sema.zirClosureGet(block, inst),
.cmp_lt => try sema.zirCmp(block, inst, .lt),
.cmp_lte => try sema.zirCmp(block, inst, .lte),
.cmp_eq => try sema.zirCmpEq(block, inst, .eq, .cmp_eq),
@@ -201,6 +225,8 @@ pub fn analyzeBody(
.field_ptr_named => try sema.zirFieldPtrNamed(block, inst),
.field_val => try sema.zirFieldVal(block, inst),
.field_val_named => try sema.zirFieldValNamed(block, inst),
+ .field_call_bind => try sema.zirFieldCallBind(block, inst),
+ .field_call_bind_named => try sema.zirFieldCallBindNamed(block, inst),
.func => try sema.zirFunc(block, inst, false),
.func_inferred => try sema.zirFunc(block, inst, true),
.import => try sema.zirImport(block, inst),
@@ -222,12 +248,10 @@ pub fn analyzeBody(
.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false),
.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false),
.optional_type => try sema.zirOptionalType(block, inst),
- .param_type => try sema.zirParamType(block, inst),
.ptr_type => try sema.zirPtrType(block, inst),
.ptr_type_simple => try sema.zirPtrTypeSimple(block, inst),
.ref => try sema.zirRef(block, inst),
.ret_err_value_code => try sema.zirRetErrValueCode(block, inst),
- .shl => try sema.zirShl(block, inst),
.shr => try sema.zirShr(block, inst),
.slice_end => try sema.zirSliceEnd(block, inst),
.slice_sentinel => try sema.zirSliceSentinel(block, inst),
@@ -300,9 +324,6 @@ pub fn analyzeBody(
.div_exact => try sema.zirDivExact(block, inst),
.div_floor => try sema.zirDivFloor(block, inst),
.div_trunc => try sema.zirDivTrunc(block, inst),
- .mod => try sema.zirMod(block, inst),
- .rem => try sema.zirRem(block, inst),
- .shl_exact => try sema.zirShlExact(block, inst),
.shr_exact => try sema.zirShrExact(block, inst),
.bit_offset_of => try sema.zirBitOffsetOf(block, inst),
.offset_of => try sema.zirOffsetOf(block, inst),
@@ -314,14 +335,11 @@ pub fn analyzeBody(
.select => try sema.zirSelect(block, inst),
.atomic_load => try sema.zirAtomicLoad(block, inst),
.atomic_rmw => try sema.zirAtomicRmw(block, inst),
- .atomic_store => try sema.zirAtomicStore(block, inst),
.mul_add => try sema.zirMulAdd(block, inst),
.builtin_call => try sema.zirBuiltinCall(block, inst),
.field_ptr_type => try sema.zirFieldPtrType(block, inst),
.field_parent_ptr => try sema.zirFieldParentPtr(block, inst),
.maximum => try sema.zirMaximum(block, inst),
- .memcpy => try sema.zirMemcpy(block, inst),
- .memset => try sema.zirMemset(block, inst),
.minimum => try sema.zirMinimum(block, inst),
.builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst),
.@"resume" => try sema.zirResume(block, inst),
@@ -343,32 +361,38 @@ pub fn analyzeBody(
.trunc => try sema.zirUnaryMath(block, inst),
.round => try sema.zirUnaryMath(block, inst),
- .opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent),
- .opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon),
- .opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func),
.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent),
.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon),
.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func),
- .add => try sema.zirArithmetic(block, inst),
- .addwrap => try sema.zirArithmetic(block, inst),
- .div => try sema.zirArithmetic(block, inst),
- .mod_rem => try sema.zirArithmetic(block, inst),
- .mul => try sema.zirArithmetic(block, inst),
- .mulwrap => try sema.zirArithmetic(block, inst),
- .sub => try sema.zirArithmetic(block, inst),
- .subwrap => try sema.zirArithmetic(block, inst),
+ .add => try sema.zirArithmetic(block, inst, .add),
+ .addwrap => try sema.zirArithmetic(block, inst, .addwrap),
+ .add_sat => try sema.zirArithmetic(block, inst, .add_sat),
+ .div => try sema.zirArithmetic(block, inst, .div),
+ .mod_rem => try sema.zirArithmetic(block, inst, .mod_rem),
+ .mod => try sema.zirArithmetic(block, inst, .mod),
+ .rem => try sema.zirArithmetic(block, inst, .rem),
+ .mul => try sema.zirArithmetic(block, inst, .mul),
+ .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap),
+ .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat),
+ .sub => try sema.zirArithmetic(block, inst, .sub),
+ .subwrap => try sema.zirArithmetic(block, inst, .subwrap),
+ .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat),
+
+ .shl => try sema.zirShl(block, inst, .shl),
+ .shl_exact => try sema.zirShl(block, inst, .shl_exact),
+ .shl_sat => try sema.zirShl(block, inst, .shl_sat),
// Instructions that we know to *always* be noreturn based solely on their tag.
// These functions match the return type of analyzeBody so that we can
// tail call them here.
- .compile_error => return sema.zirCompileError(block, inst),
- .ret_coerce => return sema.zirRetCoerce(block, inst),
- .ret_node => return sema.zirRetNode(block, inst),
- .ret_load => return sema.zirRetLoad(block, inst),
- .ret_err_value => return sema.zirRetErrValue(block, inst),
- .@"unreachable" => return sema.zirUnreachable(block, inst),
- .panic => return sema.zirPanic(block, inst),
+ .compile_error => break sema.zirCompileError(block, inst),
+ .ret_coerce => break sema.zirRetCoerce(block, inst),
+ .ret_node => break sema.zirRetNode(block, inst),
+ .ret_load => break sema.zirRetLoad(block, inst),
+ .ret_err_value => break sema.zirRetErrValue(block, inst),
+ .@"unreachable" => break sema.zirUnreachable(block, inst),
+ .panic => break sema.zirPanic(block, inst),
// zig fmt: on
// Instructions that we know can *never* be noreturn based solely on
@@ -377,7 +401,9 @@ pub fn analyzeBody(
// We also know that they cannot be referenced later, so we avoid
// putting them into the map.
.breakpoint => {
- try sema.zirBreakpoint(block, inst);
+ if (!block.is_comptime) {
+ _ = try block.addNoOp(.breakpoint);
+ }
i += 1;
continue;
},
@@ -411,6 +437,11 @@ pub fn analyzeBody(
i += 1;
continue;
},
+ .atomic_store => {
+ try sema.zirAtomicStore(block, inst);
+ i += 1;
+ continue;
+ },
.store => {
try sema.zirStore(block, inst);
i += 1;
@@ -451,6 +482,11 @@ pub fn analyzeBody(
i += 1;
continue;
},
+ .export_value => {
+ try sema.zirExportValue(block, inst);
+ i += 1;
+ continue;
+ },
.set_align_stack => {
try sema.zirSetAlignStack(block, inst);
i += 1;
@@ -491,34 +527,59 @@ pub fn analyzeBody(
i += 1;
continue;
},
+ .closure_capture => {
+ try sema.zirClosureCapture(block, inst);
+ i += 1;
+ continue;
+ },
+ .memcpy => {
+ try sema.zirMemcpy(block, inst);
+ i += 1;
+ continue;
+ },
+ .memset => {
+ try sema.zirMemset(block, inst);
+ i += 1;
+ continue;
+ },
// Special case instructions to handle comptime control flow.
.@"break" => {
if (block.is_comptime) {
- return inst; // same as break_inline
+ break inst; // same as break_inline
} else {
- return sema.zirBreak(block, inst);
+ break sema.zirBreak(block, inst);
}
},
- .break_inline => return inst,
+ .break_inline => break inst,
.repeat => {
if (block.is_comptime) {
// Send comptime control flow back to the beginning of this block.
const src: LazySrcLoc = .{ .node_offset = datas[inst].node };
try sema.emitBackwardBranch(block, src);
+ if (wip_captures.scope.captures.count() != orig_captures) {
+ try wip_captures.reset(parent_capture_scope);
+ block.wip_capture_scope = wip_captures.scope;
+ orig_captures = 0;
+ }
i = 0;
continue;
} else {
const src_node = sema.code.instructions.items(.data)[inst].node;
const src: LazySrcLoc = .{ .node_offset = src_node };
try sema.requireRuntimeBlock(block, src);
- return always_noreturn;
+ break always_noreturn;
}
},
.repeat_inline => {
// Send comptime control flow back to the beginning of this block.
const src: LazySrcLoc = .{ .node_offset = datas[inst].node };
try sema.emitBackwardBranch(block, src);
+ if (wip_captures.scope.captures.count() != orig_captures) {
+ try wip_captures.reset(parent_capture_scope);
+ block.wip_capture_scope = wip_captures.scope;
+ orig_captures = 0;
+ }
i = 0;
continue;
},
@@ -533,7 +594,7 @@ pub fn analyzeBody(
if (inst == break_data.block_inst) {
break :blk sema.resolveInst(break_data.operand);
} else {
- return break_inst;
+ break break_inst;
}
},
.block => blk: {
@@ -547,7 +608,7 @@ pub fn analyzeBody(
if (inst == break_data.block_inst) {
break :blk sema.resolveInst(break_data.operand);
} else {
- return break_inst;
+ break break_inst;
}
},
.block_inline => blk: {
@@ -560,11 +621,11 @@ pub fn analyzeBody(
if (inst == break_data.block_inst) {
break :blk sema.resolveInst(break_data.operand);
} else {
- return break_inst;
+ break break_inst;
}
},
.condbr => blk: {
- if (!block.is_comptime) return sema.zirCondbr(block, inst);
+ if (!block.is_comptime) break sema.zirCondbr(block, inst);
// Same as condbr_inline. TODO https://github.com/ziglang/zig/issues/8220
const inst_data = datas[inst].pl_node;
const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node };
@@ -578,7 +639,7 @@ pub fn analyzeBody(
if (inst == break_data.block_inst) {
break :blk sema.resolveInst(break_data.operand);
} else {
- return break_inst;
+ break break_inst;
}
},
.condbr_inline => blk: {
@@ -594,15 +655,22 @@ pub fn analyzeBody(
if (inst == break_data.block_inst) {
break :blk sema.resolveInst(break_data.operand);
} else {
- return break_inst;
+ break break_inst;
}
},
};
if (sema.typeOf(air_inst).isNoReturn())
- return always_noreturn;
+ break always_noreturn;
try map.put(sema.gpa, inst, air_inst);
i += 1;
+ } else unreachable;
+
+ if (!wip_captures.finalized) {
+ try wip_captures.finalize();
+ block.wip_capture_scope = parent_capture_scope;
}
+
+ return result;
}
fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -614,6 +682,7 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
.struct_decl => return sema.zirStructDecl( block, extended, inst),
.enum_decl => return sema.zirEnumDecl( block, extended),
.union_decl => return sema.zirUnionDecl( block, extended, inst),
+ .opaque_decl => return sema.zirOpaqueDecl( block, extended, inst),
.ret_ptr => return sema.zirRetPtr( block, extended),
.ret_type => return sema.zirRetType( block, extended),
.this => return sema.zirThis( block, extended),
@@ -636,10 +705,6 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
.c_define => return sema.zirCDefine( block, extended),
.wasm_memory_size => return sema.zirWasmMemorySize( block, extended),
.wasm_memory_grow => return sema.zirWasmMemoryGrow( block, extended),
- .add_with_saturation=> return sema.zirSatArithmetic( block, extended),
- .sub_with_saturation=> return sema.zirSatArithmetic( block, extended),
- .mul_with_saturation=> return sema.zirSatArithmetic( block, extended),
- .shl_with_saturation=> return sema.zirSatArithmetic( block, extended),
// zig fmt: on
}
}
@@ -828,9 +893,18 @@ fn failWithUseOfUndef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) Compile
return sema.mod.fail(&block.base, src, "use of undefined value here causes undefined behavior", .{});
}
+fn failWithDivideByZero(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) CompileError {
+ return sema.mod.fail(&block.base, src, "division by zero here causes undefined behavior", .{});
+}
+
+fn failWithModRemNegative(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, lhs_ty: Type, rhs_ty: Type) CompileError {
+ return sema.mod.fail(&block.base, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ lhs_ty, rhs_ty });
+}
+
/// Appropriate to call when the coercion has already been done by result
/// location semantics. Asserts the value fits in the provided `Int` type.
/// Only supports `Int` types 64 bits or less.
+/// TODO don't ever call this since we're migrating towards ResultLoc.coerced_ty.
fn resolveAlreadyCoercedInt(
sema: *Sema,
block: *Scope.Block,
@@ -847,6 +921,23 @@ fn resolveAlreadyCoercedInt(
}
}
+fn resolveAlign(
+ sema: *Sema,
+ block: *Scope.Block,
+ src: LazySrcLoc,
+ zir_ref: Zir.Inst.Ref,
+) !u16 {
+ const alignment_big = try sema.resolveInt(block, src, zir_ref, Type.initTag(.u16));
+ const alignment = @intCast(u16, alignment_big); // We coerce to u16 in the prev line.
+ if (alignment == 0) return sema.mod.fail(&block.base, src, "alignment must be >= 1", .{});
+ if (!std.math.isPowerOfTwo(alignment)) {
+ return sema.mod.fail(&block.base, src, "alignment value {d} is not a power of two", .{
+ alignment,
+ });
+ }
+ return alignment;
+}
+
fn resolveInt(
sema: *Sema,
block: *Scope.Block,
@@ -900,10 +991,38 @@ fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) C
}
fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- _ = inst;
const tracy = trace(@src());
defer tracy.end();
- return sema.mod.fail(&block.base, sema.src, "TODO implement zirCoerceResultPtr", .{});
+
+ const src: LazySrcLoc = sema.src;
+ const bin_inst = sema.code.instructions.items(.data)[inst].bin;
+ const pointee_ty = try sema.resolveType(block, src, bin_inst.lhs);
+ const ptr = sema.resolveInst(bin_inst.rhs);
+
+ // Create a runtime bitcast instruction with exactly the type the pointer wants.
+ const ptr_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = pointee_ty,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
+ try sema.requireRuntimeBlock(block, src);
+ const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr);
+
+ if (Air.refToIndex(ptr)) |ptr_inst| {
+ if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) {
+ const air_datas = sema.air_instructions.items(.data);
+ const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload];
+ if (ptr_val.castTag(.inferred_alloc)) |inferred_alloc| {
+ // Add the stored instruction to the set we will use to resolve peer types
+ // for the inferred allocation.
+ // This instruction will not make it to codegen; it is only to participate
+ // in the `stored_inst_list` of the `inferred_alloc`.
+ const operand = try block.addTyOp(.bitcast, pointee_ty, .void_value);
+ try inferred_alloc.data.stored_inst_list.append(sema.arena, operand);
+ }
+ }
+ }
+
+ return bitcasted_ptr;
}
pub fn analyzeStructDecl(
@@ -981,7 +1100,6 @@ fn zirStructDecl(
}
fn createTypeName(sema: *Sema, block: *Scope.Block, name_strategy: Zir.Inst.NameStrategy) ![:0]u8 {
- _ = block;
switch (name_strategy) {
.anon => {
// It would be neat to have "struct:line:column" but this name has
@@ -990,14 +1108,14 @@ fn createTypeName(sema: *Sema, block: *Scope.Block, name_strategy: Zir.Inst.Name
// semantically analyzed.
const name_index = sema.mod.getNextAnonNameIndex();
return std.fmt.allocPrintZ(sema.gpa, "{s}__anon_{d}", .{
- sema.owner_decl.name, name_index,
+ block.src_decl.name, name_index,
});
},
- .parent => return sema.gpa.dupeZ(u8, mem.spanZ(sema.owner_decl.name)),
+ .parent => return sema.gpa.dupeZ(u8, mem.spanZ(block.src_decl.name)),
.func => {
const name_index = sema.mod.getNextAnonNameIndex();
const name = try std.fmt.allocPrintZ(sema.gpa, "{s}__anon_{d}", .{
- sema.owner_decl.name, name_index,
+ block.src_decl.name, name_index,
});
log.warn("TODO: handle NameStrategy.func correctly instead of using anon name '{s}'", .{
name,
@@ -1053,17 +1171,6 @@ fn zirEnumDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
- const tag_ty = blk: {
- if (tag_type_ref != .none) {
- // TODO better source location
- // TODO (needs AstGen fix too) move this eval to the block so it gets allocated
- // in the new decl arena.
- break :blk try sema.resolveType(block, src, tag_type_ref);
- }
- const bits = std.math.log2_int_ceil(usize, fields_len);
- break :blk try Type.Tag.int_unsigned.create(&new_decl_arena.allocator, bits);
- };
-
const enum_obj = try new_decl_arena.allocator.create(Module.EnumFull);
const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumFull);
enum_ty_payload.* = .{
@@ -1082,7 +1189,7 @@ fn zirEnumDecl(
enum_obj.* = .{
.owner_decl = new_decl,
- .tag_ty = tag_ty,
+ .tag_ty = Type.initTag(.@"null"),
.fields = .{},
.values = .{},
.node_offset = src.node_offset,
@@ -1110,16 +1217,6 @@ fn zirEnumDecl(
const body_end = extra_index;
extra_index += bit_bags_count;
- try enum_obj.fields.ensureCapacity(&new_decl_arena.allocator, fields_len);
- const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| {
- if (bag != 0) break true;
- } else false;
- if (any_values) {
- try enum_obj.values.ensureTotalCapacityContext(&new_decl_arena.allocator, fields_len, .{
- .ty = tag_ty,
- });
- }
-
{
// We create a block for the field type instructions because they
// may need to reference Decls from inside the enum namespace.
@@ -1142,10 +1239,14 @@ fn zirEnumDecl(
sema.func = null;
defer sema.func = prev_func;
+ var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope);
+ defer wip_captures.deinit();
+
var enum_block: Scope.Block = .{
.parent = null,
.sema = sema,
.src_decl = new_decl,
+ .wip_capture_scope = wip_captures.scope,
.instructions = .{},
.inlining = null,
.is_comptime = true,
@@ -1155,7 +1256,30 @@ fn zirEnumDecl(
if (body.len != 0) {
_ = try sema.analyzeBody(&enum_block, body);
}
+
+ try wip_captures.finalize();
+
+ const tag_ty = blk: {
+ if (tag_type_ref != .none) {
+ // TODO better source location
+ break :blk try sema.resolveType(block, src, tag_type_ref);
+ }
+ const bits = std.math.log2_int_ceil(usize, fields_len);
+ break :blk try Type.Tag.int_unsigned.create(&new_decl_arena.allocator, bits);
+ };
+ enum_obj.tag_ty = tag_ty;
}
+
+ try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len);
+ const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| {
+ if (bag != 0) break true;
+ } else false;
+ if (any_values) {
+ try enum_obj.values.ensureTotalCapacityContext(&new_decl_arena.allocator, fields_len, .{
+ .ty = enum_obj.tag_ty,
+ });
+ }
+
var bit_bag_index: usize = body_end;
var cur_bit_bag: u32 = undefined;
var field_i: u32 = 0;
@@ -1194,10 +1318,10 @@ fn zirEnumDecl(
// that points to this default value expression rather than the struct.
// But only resolve the source location if we need to emit a compile error.
const tag_val = (try sema.resolveInstConst(block, src, tag_val_ref)).val;
- enum_obj.values.putAssumeCapacityNoClobberContext(tag_val, {}, .{ .ty = tag_ty });
+ enum_obj.values.putAssumeCapacityNoClobberContext(tag_val, {}, .{ .ty = enum_obj.tag_ty });
} else if (any_values) {
const tag_val = try Value.Tag.int_u64.create(&new_decl_arena.allocator, field_i);
- enum_obj.values.putAssumeCapacityNoClobberContext(tag_val, {}, .{ .ty = tag_ty });
+ enum_obj.values.putAssumeCapacityNoClobberContext(tag_val, {}, .{ .ty = enum_obj.tag_ty });
}
}
@@ -1237,7 +1361,13 @@ fn zirUnionDecl(
errdefer new_decl_arena.deinit();
const union_obj = try new_decl_arena.allocator.create(Module.Union);
- const union_ty = try Type.Tag.@"union".create(&new_decl_arena.allocator, union_obj);
+ const type_tag: Type.Tag = if (small.has_tag_type or small.auto_enum_tag) .union_tagged else .@"union";
+ const union_payload = try new_decl_arena.allocator.create(Type.Payload.Union);
+ union_payload.* = .{
+ .base = .{ .tag = type_tag },
+ .data = union_obj,
+ };
+ const union_ty = Type.initPayload(&union_payload.base);
const union_val = try Value.Tag.ty.create(&new_decl_arena.allocator, union_ty);
const type_name = try sema.createTypeName(block, small.name_strategy);
const new_decl = try sema.mod.createAnonymousDeclNamed(&block.base, .{
@@ -1275,20 +1405,14 @@ fn zirUnionDecl(
fn zirOpaqueDecl(
sema: *Sema,
block: *Scope.Block,
+ extended: Zir.Inst.Extended.InstData,
inst: Zir.Inst.Index,
- name_strategy: Zir.Inst.NameStrategy,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index);
-
- _ = name_strategy;
- _ = inst_data;
- _ = src;
- _ = extra;
+ _ = extended;
+ _ = inst;
return sema.mod.fail(&block.base, sema.src, "TODO implement zirOpaqueDecl", .{});
}
@@ -1349,7 +1473,10 @@ fn zirRetPtr(
return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty);
}
- const ptr_type = try Module.simplePtrType(sema.arena, sema.fn_ret_ty, true, .One);
+ const ptr_type = try Type.ptr(sema.arena, .{
+ .pointee_type = sema.fn_ret_ty,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
return block.addTy(.alloc, ptr_type);
}
@@ -1466,7 +1593,42 @@ fn zirAllocExtended(
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand);
const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
- return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{});
+ const ty_src = src; // TODO better source location
+ const align_src = src; // TODO better source location
+ const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small);
+
+ var extra_index: usize = extra.end;
+
+ const var_ty: Type = if (small.has_type) blk: {
+ const type_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
+ extra_index += 1;
+ break :blk try sema.resolveType(block, ty_src, type_ref);
+ } else {
+ return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended inferred", .{});
+ };
+
+ const alignment: u16 = if (small.has_align) blk: {
+ const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
+ extra_index += 1;
+ const alignment = try sema.resolveAlign(block, align_src, align_ref);
+ break :blk alignment;
+ } else 0;
+
+ if (small.is_comptime) {
+ return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended comptime", .{});
+ }
+
+ if (!small.is_const) {
+ return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended var", .{});
+ }
+
+ const ptr_type = try Type.ptr(sema.arena, .{
+ .pointee_type = var_ty,
+ .@"align" = alignment,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
+ try sema.requireRuntimeBlock(block, src);
+ return block.addTy(.alloc, ptr_type);
}
fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -1497,7 +1659,10 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
const var_decl_src = inst_data.src();
const var_type = try sema.resolveType(block, ty_src, inst_data.operand);
- const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One);
+ const ptr_type = try Type.ptr(sema.arena, .{
+ .pointee_type = var_type,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
try sema.requireRuntimeBlock(block, var_decl_src);
return block.addTy(.alloc, ptr_type);
}
@@ -1513,8 +1678,11 @@ fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
if (block.is_comptime) {
return sema.analyzeComptimeAlloc(block, var_type);
}
- try sema.validateVarType(block, ty_src, var_type);
- const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One);
+ try sema.validateVarType(block, ty_src, var_type, false);
+ const ptr_type = try Type.ptr(sema.arena, .{
+ .pointee_type = var_type,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
try sema.requireRuntimeBlock(block, var_decl_src);
return block.addTy(.alloc, ptr_type);
}
@@ -1574,7 +1742,10 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde
try sema.mod.declareDeclDependency(sema.owner_decl, decl);
const final_elem_ty = try decl.ty.copy(sema.arena);
- const final_ptr_ty = try Module.simplePtrType(sema.arena, final_elem_ty, true, .One);
+ const final_ptr_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = final_elem_ty,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
const final_ptr_ty_inst = try sema.addType(final_ptr_ty);
sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst;
@@ -1593,10 +1764,13 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde
const peer_inst_list = inferred_alloc.data.stored_inst_list.items;
const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none);
if (var_is_mut) {
- try sema.validateVarType(block, ty_src, final_elem_ty);
+ try sema.validateVarType(block, ty_src, final_elem_ty, false);
}
// Change it to a normal alloc.
- const final_ptr_ty = try Module.simplePtrType(sema.arena, final_elem_ty, true, .One);
+ const final_ptr_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = final_elem_ty,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
sema.air_instructions.set(ptr_inst, .{
.tag = .alloc,
.data = .{ .ty = final_ptr_ty },
@@ -1609,19 +1783,82 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind
const tracy = trace(@src());
defer tracy.end();
- const gpa = sema.gpa;
- const mod = sema.mod;
const validate_inst = sema.code.instructions.items(.data)[inst].pl_node;
- const struct_init_src = validate_inst.src();
+ const init_src = validate_inst.src();
const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
const instrs = sema.code.extra[validate_extra.end..][0..validate_extra.data.body_len];
+ const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node;
+ const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
+ const object_ptr = sema.resolveInst(field_ptr_extra.lhs);
+ const agg_ty = sema.typeOf(object_ptr).elemType();
+ switch (agg_ty.zigTypeTag()) {
+ .Struct => return sema.validateStructInitPtr(
+ block,
+ agg_ty.castTag(.@"struct").?.data,
+ init_src,
+ instrs,
+ ),
+ .Union => return sema.validateUnionInitPtr(
+ block,
+ agg_ty.cast(Type.Payload.Union).?.data,
+ init_src,
+ instrs,
+ object_ptr,
+ ),
+ else => unreachable,
+ }
+}
- const struct_obj: *Module.Struct = s: {
- const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node;
- const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
- const object_ptr = sema.resolveInst(field_ptr_extra.lhs);
- break :s sema.typeOf(object_ptr).elemType().castTag(.@"struct").?.data;
- };
+fn validateUnionInitPtr(
+ sema: *Sema,
+ block: *Scope.Block,
+ union_obj: *Module.Union,
+ init_src: LazySrcLoc,
+ instrs: []const Zir.Inst.Index,
+ union_ptr: Air.Inst.Ref,
+) CompileError!void {
+ const mod = sema.mod;
+
+ if (instrs.len != 1) {
+ // TODO add note for other field
+ // TODO add note for union declared here
+ return mod.fail(&block.base, init_src, "only one union field can be active at once", .{});
+ }
+
+ const field_ptr = instrs[0];
+ const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node;
+ const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_ptr_data.src_node };
+ const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
+ const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start);
+ const field_index_big = union_obj.fields.getIndex(field_name) orelse
+ return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
+ const field_index = @intCast(u32, field_index_big);
+
+ // TODO here we need to go back and see if we need to convert the union
+ // to a comptime-known value. This will involve editing the AIR code we have
+ // generated so far - in particular deleting some runtime pointer bitcast
+ // instructions which are not actually needed if the initialization expression
+ // ends up being comptime-known.
+
+ // Otherwise, we set the new union tag now.
+ const new_tag = try sema.addConstant(
+ union_obj.tag_ty,
+ try Value.Tag.enum_field_index.create(sema.arena, field_index),
+ );
+
+ try sema.requireRuntimeBlock(block, init_src);
+ _ = try block.addBinOp(.set_union_tag, union_ptr, new_tag);
+}
+
+fn validateStructInitPtr(
+ sema: *Sema,
+ block: *Scope.Block,
+ struct_obj: *Module.Struct,
+ init_src: LazySrcLoc,
+ instrs: []const Zir.Inst.Index,
+) CompileError!void {
+ const gpa = sema.gpa;
+ const mod = sema.mod;
// Maps field index to field_ptr index of where it was already initialized.
const found_fields = try gpa.alloc(Zir.Inst.Index, struct_obj.fields.count());
@@ -1660,9 +1897,9 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind
const template = "missing struct field: {s}";
const args = .{field_name};
if (root_msg) |msg| {
- try mod.errNote(&block.base, struct_init_src, msg, template, args);
+ try mod.errNote(&block.base, init_src, msg, template, args);
} else {
- root_msg = try mod.errMsg(&block.base, struct_init_src, template, args);
+ root_msg = try mod.errMsg(&block.base, init_src, template, args);
}
}
if (root_msg) |msg| {
@@ -1750,7 +1987,11 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co
}
const ptr = sema.resolveInst(bin_inst.lhs);
const value = sema.resolveInst(bin_inst.rhs);
- const ptr_ty = try Module.simplePtrType(sema.arena, sema.typeOf(value), true, .One);
+ const ptr_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = sema.typeOf(value),
+ // TODO figure out which address space is appropriate here
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
// TODO detect when this store should be done at compile-time. For example,
// if expressions should force it when the condition is compile-time known.
const src: LazySrcLoc = .unneeded;
@@ -1797,7 +2038,10 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index)
// for the inferred allocation.
try inferred_alloc.data.stored_inst_list.append(sema.arena, operand);
// Create a runtime bitcast instruction with exactly the type the pointer wants.
- const ptr_ty = try Module.simplePtrType(sema.arena, operand_ty, true, .One);
+ const ptr_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = operand_ty,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr);
return sema.storePtr(block, src, bitcasted_ptr, operand);
}
@@ -1834,45 +2078,6 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE
return sema.storePtr(block, src, ptr, value);
}
-fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const tracy = trace(@src());
- defer tracy.end();
-
- const src = sema.src;
- const fn_inst_src = sema.src;
-
- const inst_data = sema.code.instructions.items(.data)[inst].param_type;
- const fn_inst = sema.resolveInst(inst_data.callee);
- const fn_inst_ty = sema.typeOf(fn_inst);
- const param_index = inst_data.param_index;
-
- const fn_ty: Type = switch (fn_inst_ty.zigTypeTag()) {
- .Fn => fn_inst_ty,
- .BoundFn => {
- return sema.mod.fail(&block.base, fn_inst_src, "TODO implement zirParamType for method call syntax", .{});
- },
- else => {
- return sema.mod.fail(&block.base, fn_inst_src, "expected function, found '{}'", .{fn_inst_ty});
- },
- };
-
- const param_count = fn_ty.fnParamLen();
- if (param_index >= param_count) {
- if (fn_ty.fnIsVarArgs()) {
- return sema.addType(Type.initTag(.var_args_param));
- }
- return sema.mod.fail(&block.base, src, "arg index {d} out of bounds; '{}' has {d} argument(s)", .{
- param_index,
- fn_ty,
- param_count,
- });
- }
-
- // TODO support generic functions
- const param_type = fn_ty.fnParamType(param_index);
- return sema.addType(param_type);
-}
-
fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -2070,10 +2275,78 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Com
const tracy = trace(@src());
defer tracy.end();
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
+ const pl_node = sema.code.instructions.items(.data)[inst].pl_node;
+ const src = pl_node.src();
+ const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index);
+ const body = sema.code.extra[extra.end..][0..extra.data.body_len];
- return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{});
+ // we check this here to avoid undefined symbols
+ if (!@import("build_options").have_llvm)
+ return sema.mod.fail(&parent_block.base, src, "cannot do C import on Zig compiler not built with LLVM-extension", .{});
+
+ var c_import_buf = std.ArrayList(u8).init(sema.gpa);
+ defer c_import_buf.deinit();
+
+ var child_block: Scope.Block = .{
+ .parent = parent_block,
+ .sema = sema,
+ .src_decl = parent_block.src_decl,
+ .wip_capture_scope = parent_block.wip_capture_scope,
+ .instructions = .{},
+ .inlining = parent_block.inlining,
+ .is_comptime = parent_block.is_comptime,
+ .c_import_buf = &c_import_buf,
+ };
+ defer child_block.instructions.deinit(sema.gpa);
+
+ _ = try sema.analyzeBody(&child_block, body);
+
+ const c_import_res = sema.mod.comp.cImport(c_import_buf.items) catch |err|
+ return sema.mod.fail(&child_block.base, src, "C import failed: {s}", .{@errorName(err)});
+
+ if (c_import_res.errors.len != 0) {
+ const msg = msg: {
+ const msg = try sema.mod.errMsg(&child_block.base, src, "C import failed", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ if (!sema.mod.comp.bin_file.options.link_libc)
+ try sema.mod.errNote(&child_block.base, src, msg, "libc headers not available; compilation does not link against libc", .{});
+
+ for (c_import_res.errors) |_| {
+ // TODO integrate with LazySrcLoc
+ // try sema.mod.errNoteNonLazy(.{}, msg, "{s}", .{clang_err.msg_ptr[0..clang_err.msg_len]});
+ // if (clang_err.filename_ptr) |p| p[0..clang_err.filename_len] else "(no file)",
+ // clang_err.line + 1,
+ // clang_err.column + 1,
+ }
+ @import("clang.zig").Stage2ErrorMsg.delete(c_import_res.errors.ptr, c_import_res.errors.len);
+ break :msg msg;
+ };
+ return sema.mod.failWithOwnedErrorMsg(&child_block.base, msg);
+ }
+ const c_import_pkg = Package.create(
+ sema.gpa,
+ null,
+ c_import_res.out_zig_path,
+ ) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => unreachable, // we pass null for root_src_dir_path
+ };
+ const std_pkg = sema.mod.main_pkg.table.get("std").?;
+ const builtin_pkg = sema.mod.main_pkg.table.get("builtin").?;
+ try c_import_pkg.add(sema.gpa, "builtin", builtin_pkg);
+ try c_import_pkg.add(sema.gpa, "std", std_pkg);
+
+ const result = sema.mod.importPkg(c_import_pkg) catch |err|
+ return sema.mod.fail(&child_block.base, src, "C import failed: {s}", .{@errorName(err)});
+
+ sema.mod.astGenFile(result.file) catch |err|
+ return sema.mod.fail(&child_block.base, src, "C import failed: {s}", .{@errorName(err)});
+
+ try sema.mod.semaFile(result.file);
+ const file_root_decl = result.file.root_decl.?;
+ try sema.mod.declareDeclDependency(sema.owner_decl, file_root_decl);
+ return sema.addConstant(file_root_decl.ty, file_root_decl.val);
}
fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -2118,6 +2391,7 @@ fn zirBlock(
.parent = parent_block,
.sema = sema,
.src_decl = parent_block.src_decl,
+ .wip_capture_scope = parent_block.wip_capture_scope,
.instructions = .{},
.label = &label,
.inlining = parent_block.inlining,
@@ -2142,8 +2416,12 @@ fn resolveBlockBody(
body: []const Zir.Inst.Index,
merges: *Scope.Block.Merges,
) CompileError!Air.Inst.Ref {
- _ = try sema.analyzeBody(child_block, body);
- return sema.analyzeBlockBody(parent_block, src, child_block, merges);
+ if (child_block.is_comptime) {
+ return sema.resolveBody(child_block, body);
+ } else {
+ _ = try sema.analyzeBody(child_block, body);
+ return sema.analyzeBlockBody(parent_block, src, child_block, merges);
+ }
}
fn analyzeBlockBody(
@@ -2256,36 +2534,66 @@ fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErro
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data;
const src = inst_data.src();
- const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
- const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const decl_name = sema.code.nullTerminatedString(extra.decl_name);
if (extra.namespace != .none) {
return sema.mod.fail(&block.base, src, "TODO: implement exporting with field access", .{});
}
- const decl = try sema.lookupIdentifier(block, lhs_src, decl_name);
- const options = try sema.resolveInstConst(block, rhs_src, extra.options);
- const struct_obj = options.ty.castTag(.@"struct").?.data;
- const fields = options.val.castTag(.@"struct").?.data[0..struct_obj.fields.count()];
- const name_index = struct_obj.fields.getIndex("name").?;
- const linkage_index = struct_obj.fields.getIndex("linkage").?;
- const section_index = struct_obj.fields.getIndex("section").?;
- const export_name = try fields[name_index].toAllocatedBytes(sema.arena);
- const linkage = fields[linkage_index].toEnum(std.builtin.GlobalLinkage);
+ const decl = try sema.lookupIdentifier(block, operand_src, decl_name);
+ const options = try sema.resolveExportOptions(block, options_src, extra.options);
+ try sema.mod.analyzeExport(block, src, options, decl);
+}
- if (linkage != .Strong) {
- return sema.mod.fail(&block.base, src, "TODO: implement exporting with non-strong linkage", .{});
- }
- if (!fields[section_index].isNull()) {
- return sema.mod.fail(&block.base, src, "TODO: implement exporting with linksection", .{});
- }
+fn zirExportValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void {
+ const tracy = trace(@src());
+ defer tracy.end();
- try sema.mod.analyzeExport(&block.base, src, export_name, decl);
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const extra = sema.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data;
+ const src = inst_data.src();
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const operand = try sema.resolveInstConst(block, operand_src, extra.operand);
+ const options = try sema.resolveExportOptions(block, options_src, extra.options);
+ const decl = switch (operand.val.tag()) {
+ .function => operand.val.castTag(.function).?.data.owner_decl,
+ else => return sema.mod.fail(&block.base, operand_src, "TODO implement exporting arbitrary Value objects", .{}), // TODO put this Value into an anonymous Decl and then export it.
+ };
+ try sema.mod.analyzeExport(block, src, options, decl);
}
fn zirSetAlignStack(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const src: LazySrcLoc = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirSetAlignStack", .{});
+ const alignment = try sema.resolveAlign(block, operand_src, inst_data.operand);
+ if (alignment > 256) {
+ return mod.fail(&block.base, src, "attempt to @setAlignStack({d}); maximum is 256", .{
+ alignment,
+ });
+ }
+ const func = sema.owner_func orelse
+ return mod.fail(&block.base, src, "@setAlignStack outside function body", .{});
+
+ switch (func.owner_decl.ty.fnCallingConvention()) {
+ .Naked => return mod.fail(&block.base, src, "@setAlignStack in naked function", .{}),
+ .Inline => return mod.fail(&block.base, src, "@setAlignStack in inline function", .{}),
+ else => {},
+ }
+
+ const gop = try mod.align_stack_fns.getOrPut(mod.gpa, func);
+ if (gop.found_existing) {
+ const msg = msg: {
+ const msg = try mod.errMsg(&block.base, src, "multiple @setAlignStack in the same function body", .{});
+ errdefer msg.destroy(mod.gpa);
+ try mod.errNote(&block.base, src, msg, "other instance here", .{});
+ break :msg msg;
+ };
+ return mod.failWithOwnedErrorMsg(&block.base, msg);
+ }
+ gop.value_ptr.* = .{ .alignment = alignment, .src = src };
}
fn zirSetCold(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void {
@@ -2308,20 +2616,21 @@ fn zirSetRuntimeSafety(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) C
block.want_safety = try sema.resolveConstBool(block, operand_src, inst_data.operand);
}
-fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void {
- const tracy = trace(@src());
- defer tracy.end();
-
- const src_node = sema.code.instructions.items(.data)[inst].node;
- const src: LazySrcLoc = .{ .node_offset = src_node };
- try sema.requireRuntimeBlock(block, src);
- _ = try block.addNoOp(.breakpoint);
-}
-
fn zirFence(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void {
- const src_node = sema.code.instructions.items(.data)[inst].node;
- const src: LazySrcLoc = .{ .node_offset = src_node };
- return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirFence", .{});
+ if (block.is_comptime) return;
+
+ const inst_data = sema.code.instructions.items(.data)[inst].un_node;
+ const order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const order = try sema.resolveAtomicOrder(block, order_src, inst_data.operand);
+
+ if (@enumToInt(order) < @enumToInt(std.builtin.AtomicOrder.Acquire)) {
+ return sema.mod.fail(&block.base, order_src, "atomic ordering must be Acquire or stricter", .{});
+ }
+
+ _ = try block.addInst(.{
+ .tag = .fence,
+ .data = .{ .fence = order },
+ });
}
fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
@@ -2489,8 +2798,6 @@ fn zirCall(
sema: *Sema,
block: *Scope.Block,
inst: Zir.Inst.Index,
- modifier: std.builtin.CallOptions.Modifier,
- ensure_result_used: bool,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -2499,14 +2806,31 @@ fn zirCall(
const func_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node };
const call_src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index);
- const args = sema.code.refSlice(extra.end, extra.data.args_len);
+ const args = sema.code.refSlice(extra.end, extra.data.flags.args_len);
- const func = sema.resolveInst(extra.data.callee);
- // TODO handle function calls of generic functions
- const resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len);
- for (args) |zir_arg, i| {
- // the args are already casted to the result of a param type instruction.
- resolved_args[i] = sema.resolveInst(zir_arg);
+ const modifier = @intToEnum(std.builtin.CallOptions.Modifier, extra.data.flags.packed_modifier);
+ const ensure_result_used = extra.data.flags.ensure_result_used;
+
+ var func = sema.resolveInst(extra.data.callee);
+ var resolved_args: []Air.Inst.Ref = undefined;
+
+ const func_type = sema.typeOf(func);
+
+ // Desugar bound functions here
+ if (func_type.tag() == .bound_fn) {
+ const bound_func = try sema.resolveValue(block, func_src, func);
+ const bound_data = &bound_func.cast(Value.Payload.BoundFn).?.data;
+ func = bound_data.func_inst;
+ resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len + 1);
+ resolved_args[0] = bound_data.arg0_inst;
+ for (args) |zir_arg, i| {
+ resolved_args[i + 1] = sema.resolveInst(zir_arg);
+ }
+ } else {
+ resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len);
+ for (args) |zir_arg, i| {
+ resolved_args[i] = sema.resolveInst(zir_arg);
+ }
}
return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args);
@@ -2694,10 +3018,14 @@ fn analyzeCall(
sema.func = module_fn;
defer sema.func = parent_func;
+ var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, module_fn.owner_decl.src_scope);
+ defer wip_captures.deinit();
+
var child_block: Scope.Block = .{
.parent = null,
.sema = sema,
.src_decl = module_fn.owner_decl,
+ .wip_capture_scope = wip_captures.scope,
.instructions = .{},
.label = null,
.inlining = &inlining,
@@ -2835,6 +3163,8 @@ fn analyzeCall(
// TODO: check whether any external comptime memory was mutated by the
// comptime function call. If so, then do not memoize the call here.
+ // TODO: re-evaluate whether memoized_calls needs its own arena. I think
+ // it should be fine to use the Decl arena for the function.
{
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
@@ -2845,7 +3175,7 @@ fn analyzeCall(
}
try mod.memoized_calls.put(gpa, memoized_call_key, .{
- .val = result_val,
+ .val = try result_val.copy(arena),
.arena = arena_allocator.state,
});
delete_memoized_call_key = false;
@@ -2860,6 +3190,9 @@ fn analyzeCall(
break :res2 result;
};
+
+ try wip_captures.finalize();
+
break :res res2;
} else if (func_ty_info.is_generic) res: {
const func_val = try sema.resolveConstValue(block, func_src, func);
@@ -2942,7 +3275,8 @@ fn analyzeCall(
try namespace.anon_decls.ensureUnusedCapacity(gpa, 1);
// Create a Decl for the new function.
- const new_decl = try mod.allocateNewDecl(namespace, module_fn.owner_decl.src_node);
+ const src_decl = namespace.getDecl();
+ const new_decl = try mod.allocateNewDecl(namespace, module_fn.owner_decl.src_node, src_decl.src_scope);
// TODO better names for generic function instantiations
const name_index = mod.getNextAnonNameIndex();
new_decl.name = try std.fmt.allocPrintZ(gpa, "{s}__anon_{d}", .{
@@ -2952,7 +3286,8 @@ fn analyzeCall(
new_decl.is_pub = module_fn.owner_decl.is_pub;
new_decl.is_exported = module_fn.owner_decl.is_exported;
new_decl.has_align = module_fn.owner_decl.has_align;
- new_decl.has_linksection = module_fn.owner_decl.has_linksection;
+ new_decl.has_linksection_or_addrspace = module_fn.owner_decl.has_linksection_or_addrspace;
+ new_decl.@"addrspace" = module_fn.owner_decl.@"addrspace";
new_decl.zir_decl_index = module_fn.owner_decl.zir_decl_index;
new_decl.alive = true; // This Decl is called at runtime.
new_decl.has_tv = true;
@@ -2973,6 +3308,7 @@ fn analyzeCall(
.mod = mod,
.gpa = gpa,
.arena = sema.arena,
+ .perm_arena = &new_decl_arena.allocator,
.code = fn_zir,
.owner_decl = new_decl,
.namespace = namespace,
@@ -2985,10 +3321,14 @@ fn analyzeCall(
};
defer child_sema.deinit();
+ var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope);
+ defer wip_captures.deinit();
+
var child_block: Scope.Block = .{
.parent = null,
.sema = &child_sema,
.src_decl = new_decl,
+ .wip_capture_scope = wip_captures.scope,
.instructions = .{},
.inlining = null,
.is_comptime = true,
@@ -3022,14 +3362,16 @@ fn analyzeCall(
}
const arg_src = call_src; // TODO: better source location
const arg = uncasted_args[arg_i];
- if (try sema.resolveMaybeUndefVal(block, arg_src, arg)) |arg_val| {
- const child_arg = try child_sema.addConstant(sema.typeOf(arg), arg_val);
- child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
- } else if (is_comptime) {
- return sema.failWithNeededComptime(block, arg_src);
+ if (is_comptime) {
+ if (try sema.resolveMaybeUndefVal(block, arg_src, arg)) |arg_val| {
+ const child_arg = try child_sema.addConstant(sema.typeOf(arg), arg_val);
+ child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
+ } else {
+ return sema.failWithNeededComptime(block, arg_src);
+ }
} else if (is_anytype) {
// We insert into the map an instruction which is runtime-known
- // but has the type of the comptime argument.
+ // but has the type of the argument.
const child_arg = try child_block.addArg(sema.typeOf(arg), 0);
child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
}
@@ -3076,6 +3418,8 @@ fn analyzeCall(
arg_i += 1;
}
+ try wip_captures.finalize();
+
// Populate the Decl ty/val with the function and its type.
new_decl.ty = try child_sema.typeOf(new_func_inst).copy(&new_decl_arena.allocator);
new_decl.val = try Value.Tag.function.create(&new_decl_arena.allocator, new_func);
@@ -3436,7 +3780,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com
},
.error_set => {
const lhs_set = lhs_ty.castTag(.error_set).?.data;
- try set.ensureCapacity(sema.gpa, set.count() + lhs_set.names_len);
+ try set.ensureUnusedCapacity(sema.gpa, lhs_set.names_len);
for (lhs_set.names_ptr[0..lhs_set.names_len]) |name| {
set.putAssumeCapacityNoClobber(name, {});
}
@@ -3450,7 +3794,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com
},
.error_set => {
const rhs_set = rhs_ty.castTag(.error_set).?.data;
- try set.ensureCapacity(sema.gpa, set.count() + rhs_set.names_len);
+ try set.ensureUnusedCapacity(sema.gpa, rhs_set.names_len);
for (rhs_set.names_ptr[0..rhs_set.names_len]) |name| {
set.putAssumeCapacity(name, {});
}
@@ -3606,7 +3950,11 @@ fn zirOptionalPayloadPtr(
}
const child_type = try opt_type.optionalChildAlloc(sema.arena);
- const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr_ty.isConstPtr(), .One);
+ const child_pointer = try Type.ptr(sema.arena, .{
+ .pointee_type = child_type,
+ .mutable = !optional_ptr_ty.isConstPtr(),
+ .@"addrspace" = optional_ptr_ty.ptrAddressSpace(),
+ });
if (try sema.resolveDefinedValue(block, src, optional_ptr)) |pointer_val| {
if (try pointer_val.pointerDeref(sema.arena)) |val| {
@@ -3721,7 +4069,11 @@ fn zirErrUnionPayloadPtr(
return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand_ty.elemType()});
const payload_ty = operand_ty.elemType().errorUnionPayload();
- const operand_pointer_ty = try Module.simplePtrType(sema.arena, payload_ty, !operand_ty.isConstPtr(), .One);
+ const operand_pointer_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = payload_ty,
+ .mutable = !operand_ty.isConstPtr(),
+ .@"addrspace" = operand_ty.ptrAddressSpace(),
+ });
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
if (try pointer_val.pointerDeref(sema.arena)) |val| {
@@ -4236,6 +4588,19 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src);
}
+fn zirFieldCallBind(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const src = inst_data.src();
+ const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
+ const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
+ const field_name = sema.code.nullTerminatedString(extra.field_name_start);
+ const object_ptr = sema.resolveInst(extra.lhs);
+ return sema.fieldCallBind(block, src, object_ptr, field_name, field_name_src);
+}
+
fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -4262,6 +4627,19 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Comp
return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src);
}
+fn zirFieldCallBindNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const src = inst_data.src();
+ const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
+ const object_ptr = sema.resolveInst(extra.lhs);
+ const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name);
+ return sema.fieldCallBind(block, src, object_ptr, field_name, field_name_src);
+}
+
fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -4275,8 +4653,8 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr
const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = sema.resolveInst(extra.rhs);
- const dest_is_comptime_int = try sema.requireIntegerType(block, dest_ty_src, dest_type);
- _ = try sema.requireIntegerType(block, operand_src, sema.typeOf(operand));
+ const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_type);
+ _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand));
if (try sema.isComptimeKnown(block, operand_src, operand)) {
return sema.coerce(block, dest_type, operand, operand_src);
@@ -4284,7 +4662,8 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr
return sema.mod.fail(&block.base, src, "unable to cast runtime value to 'comptime_int'", .{});
}
- return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{});
+ try sema.requireRuntimeBlock(block, operand_src);
+ return block.addTyOp(.intcast, dest_type, operand);
}
fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -4338,11 +4717,18 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE
if (try sema.isComptimeKnown(block, operand_src, operand)) {
return sema.coerce(block, dest_type, operand, operand_src);
- } else if (dest_is_comptime_float) {
+ }
+ if (dest_is_comptime_float) {
return sema.mod.fail(&block.base, src, "unable to cast runtime value to 'comptime_float'", .{});
}
-
- return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{});
+ const target = sema.mod.getTarget();
+ const src_bits = operand_ty.floatBits(target);
+ const dst_bits = dest_type.floatBits(target);
+ if (dst_bits >= src_bits) {
+ return sema.coerce(block, dest_type, operand, operand_src);
+ }
+ try sema.requireRuntimeBlock(block, operand_src);
+ return block.addTyOp(.fptrunc, dest_type, operand);
}
fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -4778,8 +5164,8 @@ fn analyzeSwitch(
var arena = std.heap.ArenaAllocator.init(gpa);
defer arena.deinit();
- const min_int = try operand_ty.minInt(&arena, mod.getTarget());
- const max_int = try operand_ty.maxInt(&arena, mod.getTarget());
+ const min_int = try operand_ty.minInt(&arena.allocator, mod.getTarget());
+ const max_int = try operand_ty.maxInt(&arena.allocator, mod.getTarget());
if (try range_set.spans(min_int, max_int, operand_ty)) {
if (special_prong == .@"else") {
return mod.fail(
@@ -4971,6 +5357,7 @@ fn analyzeSwitch(
.parent = block,
.sema = sema,
.src_decl = block.src_decl,
+ .wip_capture_scope = block.wip_capture_scope,
.instructions = .{},
.label = &label,
.inlining = block.inlining,
@@ -5075,12 +5462,19 @@ fn analyzeSwitch(
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body_len;
+ var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope);
+ defer wip_captures.deinit();
+
case_block.instructions.shrinkRetainingCapacity(0);
+ case_block.wip_capture_scope = wip_captures.scope;
+
const item = sema.resolveInst(item_ref);
// `item` is already guaranteed to be constant known.
_ = try sema.analyzeBody(&case_block, body);
+ try wip_captures.finalize();
+
try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
cases_extra.appendAssumeCapacity(1); // items_len
cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
@@ -5108,6 +5502,7 @@ fn analyzeSwitch(
extra_index += items_len;
case_block.instructions.shrinkRetainingCapacity(0);
+ case_block.wip_capture_scope = child_block.wip_capture_scope;
var any_ok: Air.Inst.Ref = .none;
@@ -5186,11 +5581,18 @@ fn analyzeSwitch(
var cond_body = case_block.instructions.toOwnedSlice(gpa);
defer gpa.free(cond_body);
+ var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope);
+ defer wip_captures.deinit();
+
case_block.instructions.shrinkRetainingCapacity(0);
+ case_block.wip_capture_scope = wip_captures.scope;
+
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body_len;
_ = try sema.analyzeBody(&case_block, body);
+ try wip_captures.finalize();
+
if (is_first) {
is_first = false;
first_else_body = cond_body;
@@ -5216,9 +5618,16 @@ fn analyzeSwitch(
var final_else_body: []const Air.Inst.Index = &.{};
if (special.body.len != 0) {
+ var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope);
+ defer wip_captures.deinit();
+
case_block.instructions.shrinkRetainingCapacity(0);
+ case_block.wip_capture_scope = wip_captures.scope;
+
_ = try sema.analyzeBody(&case_block, special.body);
+ try wip_captures.finalize();
+
if (is_first) {
final_else_body = case_block.instructions.items;
} else {
@@ -5500,7 +5909,7 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErro
try mod.semaFile(result.file);
const file_root_decl = result.file.root_decl.?;
try sema.mod.declareDeclDependency(sema.owner_decl, file_root_decl);
- return sema.addType(file_root_decl.ty);
+ return sema.addConstant(file_root_decl.ty, file_root_decl.val);
}
fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -5509,29 +5918,57 @@ fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co
return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{});
}
-fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+fn zirShl(
+ sema: *Sema,
+ block: *Scope.Block,
+ inst: Zir.Inst.Index,
+ air_tag: Air.Inst.Tag,
+) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs = sema.resolveInst(extra.lhs);
const rhs = sema.resolveInst(extra.rhs);
- if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| {
- if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| {
- if (lhs_val.isUndef() or rhs_val.isUndef()) {
- return sema.addConstUndef(sema.typeOf(lhs));
- }
- return sema.mod.fail(&block.base, src, "TODO implement comptime shl", .{});
- }
- }
+ // TODO coerce rhs if air_tag is not shl_sat
- try sema.requireRuntimeBlock(block, src);
- return block.addBinOp(.shl, lhs, rhs);
+ const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs);
+ const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs);
+
+ const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
+ const lhs_ty = sema.typeOf(lhs);
+
+ if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty);
+ const rhs_val = maybe_rhs_val orelse break :rs rhs_src;
+ if (rhs_val.isUndef()) return sema.addConstUndef(lhs_ty);
+
+ // If rhs is 0, return lhs without doing any calculations.
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(lhs_ty, lhs_val);
+ }
+ const val = try lhs_val.shl(rhs_val, sema.arena);
+ switch (air_tag) {
+ .shl_exact => return sema.mod.fail(&block.base, lhs_src, "TODO implement Sema for comptime shl_exact", .{}),
+ .shl_sat => return sema.mod.fail(&block.base, lhs_src, "TODO implement Sema for comptime shl_sat", .{}),
+ .shl => {},
+ else => unreachable,
+ }
+ return sema.addConstant(lhs_ty, val);
+ } else rs: {
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) return sema.addConstUndef(sema.typeOf(lhs));
+ }
+ break :rs lhs_src;
+ };
+
+ // TODO: insert runtime safety check for shl_exact
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ return block.addBinOp(air_tag, lhs, rhs);
}
fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -5619,10 +6056,13 @@ fn zirBitwise(
if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| {
- if (lhs_val.isUndef() or rhs_val.isUndef()) {
- return sema.addConstUndef(resolved_type);
- }
- return sema.mod.fail(&block.base, src, "TODO implement comptime bitwise operations", .{});
+ const result_val = switch (air_tag) {
+ .bit_and => try lhs_val.bitwiseAnd(rhs_val, sema.arena),
+ .bit_or => try lhs_val.bitwiseOr(rhs_val, sema.arena),
+ .xor => try lhs_val.bitwiseXor(rhs_val, sema.arena),
+ else => unreachable,
+ };
+ return sema.addConstant(scalar_type, result_val);
}
}
@@ -5671,38 +6111,36 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| {
const final_len = lhs_info.len + rhs_info.len;
- if (lhs_ty.zigTypeTag() == .Pointer) {
- var anon_decl = try block.startAnonDecl();
- defer anon_decl.deinit();
+ const is_pointer = lhs_ty.zigTypeTag() == .Pointer;
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
- const lhs_sub_val = (try lhs_val.pointerDeref(anon_decl.arena())).?;
- const rhs_sub_val = (try rhs_val.pointerDeref(anon_decl.arena())).?;
- const buf = try anon_decl.arena().alloc(Value, final_len);
- {
- var i: u64 = 0;
- while (i < lhs_info.len) : (i += 1) {
- const val = try lhs_sub_val.elemValue(sema.arena, i);
- buf[i] = try val.copy(anon_decl.arena());
- }
+ const lhs_sub_val = if (is_pointer) (try lhs_val.pointerDeref(anon_decl.arena())).? else lhs_val;
+ const rhs_sub_val = if (is_pointer) (try rhs_val.pointerDeref(anon_decl.arena())).? else rhs_val;
+ const buf = try anon_decl.arena().alloc(Value, final_len);
+ {
+ var i: u64 = 0;
+ while (i < lhs_info.len) : (i += 1) {
+ const val = try lhs_sub_val.elemValue(sema.arena, i);
+ buf[i] = try val.copy(anon_decl.arena());
}
- {
- var i: u64 = 0;
- while (i < rhs_info.len) : (i += 1) {
- const val = try rhs_sub_val.elemValue(sema.arena, i);
- buf[lhs_info.len + i] = try val.copy(anon_decl.arena());
- }
- }
- const ty = if (res_sent) |rs|
- try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ .len = final_len, .elem_type = lhs_info.elem_type, .sentinel = rs })
- else
- try Type.Tag.array.create(anon_decl.arena(), .{ .len = final_len, .elem_type = lhs_info.elem_type });
- const val = try Value.Tag.array.create(anon_decl.arena(), buf);
- return sema.analyzeDeclRef(try anon_decl.finish(
- ty,
- val,
- ));
}
- return sema.mod.fail(&block.base, lhs_src, "TODO array_cat more types of Values", .{});
+ {
+ var i: u64 = 0;
+ while (i < rhs_info.len) : (i += 1) {
+ const val = try rhs_sub_val.elemValue(sema.arena, i);
+ buf[lhs_info.len + i] = try val.copy(anon_decl.arena());
+ }
+ }
+ const ty = if (res_sent) |rs|
+ try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ .len = final_len, .elem_type = lhs_info.elem_type, .sentinel = rs })
+ else
+ try Type.Tag.array.create(anon_decl.arena(), .{ .len = final_len, .elem_type = lhs_info.elem_type });
+ const val = try Value.Tag.array.create(anon_decl.arena(), buf);
+ return if (is_pointer)
+ sema.analyzeDeclRef(try anon_decl.finish(ty, val))
+ else
+ sema.analyzeDeclVal(block, .unneeded, try anon_decl.finish(ty, val));
} else {
return sema.mod.fail(&block.base, lhs_src, "TODO runtime array_cat", .{});
}
@@ -5742,32 +6180,30 @@ fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
const final_len = std.math.mul(u64, mulinfo.len, tomulby) catch return sema.mod.fail(&block.base, rhs_src, "operation results in overflow", .{});
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
- if (lhs_ty.zigTypeTag() == .Pointer) {
- var anon_decl = try block.startAnonDecl();
- defer anon_decl.deinit();
- const lhs_sub_val = (try lhs_val.pointerDeref(anon_decl.arena())).?;
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+ const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try lhs_val.pointerDeref(anon_decl.arena())).? else lhs_val;
+ const final_ty = if (mulinfo.sentinel) |sent|
+ try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ .len = final_len, .elem_type = mulinfo.elem_type, .sentinel = sent })
+ else
+ try Type.Tag.array.create(anon_decl.arena(), .{ .len = final_len, .elem_type = mulinfo.elem_type });
+ const buf = try anon_decl.arena().alloc(Value, final_len);
- const final_ty = if (mulinfo.sentinel) |sent|
- try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ .len = final_len, .elem_type = mulinfo.elem_type, .sentinel = sent })
- else
- try Type.Tag.array.create(anon_decl.arena(), .{ .len = final_len, .elem_type = mulinfo.elem_type });
-
- const buf = try anon_decl.arena().alloc(Value, final_len);
- var i: u64 = 0;
- while (i < tomulby) : (i += 1) {
- var j: u64 = 0;
- while (j < mulinfo.len) : (j += 1) {
- const val = try lhs_sub_val.elemValue(sema.arena, j);
- buf[mulinfo.len * i + j] = try val.copy(anon_decl.arena());
- }
+ // the actual loop
+ var i: u64 = 0;
+ while (i < tomulby) : (i += 1) {
+ var j: u64 = 0;
+ while (j < mulinfo.len) : (j += 1) {
+ const val = try lhs_sub_val.elemValue(sema.arena, j);
+ buf[mulinfo.len * i + j] = try val.copy(anon_decl.arena());
}
- const val = try Value.Tag.array.create(anon_decl.arena(), buf);
- return sema.analyzeDeclRef(try anon_decl.finish(
- final_ty,
- val,
- ));
}
- return sema.mod.fail(&block.base, lhs_src, "TODO array_mul more types of Values", .{});
+ const val = try Value.Tag.array.create(anon_decl.arena(), buf);
+ if (lhs_ty.zigTypeTag() == .Pointer) {
+ return sema.analyzeDeclRef(try anon_decl.finish(final_ty, val));
+ } else {
+ return sema.analyzeDeclVal(block, .unneeded, try anon_decl.finish(final_ty, val));
+ }
}
return sema.mod.fail(&block.base, lhs_src, "TODO runtime array_mul", .{});
}
@@ -5791,11 +6227,15 @@ fn zirNegate(
return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src);
}
-fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+fn zirArithmetic(
+ sema: *Sema,
+ block: *Scope.Block,
+ inst: Zir.Inst.Index,
+ zir_tag: Zir.Inst.Tag,
+) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
- const tag_override = block.sema.code.instructions.items(.tag)[inst];
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
sema.src = .{ .node_offset_bin_op = inst_data.src_node };
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
@@ -5804,7 +6244,7 @@ fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compile
const lhs = sema.resolveInst(extra.lhs);
const rhs = sema.resolveInst(extra.rhs);
- return sema.analyzeArithmetic(block, tag_override, lhs, rhs, sema.src, lhs_src, rhs_src);
+ return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src);
}
fn zirOverflowArithmetic(
@@ -5821,22 +6261,10 @@ fn zirOverflowArithmetic(
return sema.mod.fail(&block.base, src, "TODO implement Sema.zirOverflowArithmetic", .{});
}
-fn zirSatArithmetic(
- sema: *Sema,
- block: *Scope.Block,
- extended: Zir.Inst.Extended.InstData,
-) CompileError!Air.Inst.Ref {
- const tracy = trace(@src());
- defer tracy.end();
-
- const extra = sema.code.extraData(Zir.Inst.SaturatingArithmetic, extended.operand).data;
- const src: LazySrcLoc = .{ .node_offset = extra.node };
- return sema.mod.fail(&block.base, src, "TODO implement Sema.zirSatArithmetic", .{});
-}
-
fn analyzeArithmetic(
sema: *Sema,
block: *Scope.Block,
+ /// TODO performance investigation: make this comptime?
zir_tag: Zir.Inst.Tag,
lhs: Air.Inst.Ref,
rhs: Air.Inst.Ref,
@@ -5854,7 +6282,7 @@ fn analyzeArithmetic(
lhs_ty.arrayLen(), rhs_ty.arrayLen(),
});
}
- return sema.mod.fail(&block.base, src, "TODO implement support for vectors in zirBinOp", .{});
+ return sema.mod.fail(&block.base, src, "TODO implement support for vectors in Sema.analyzeArithmetic", .{});
} else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) {
return sema.mod.fail(&block.base, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{
lhs_ty, rhs_ty,
@@ -5897,7 +6325,9 @@ fn analyzeArithmetic(
};
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
- const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src } });
+ const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
+ .override = &[_]LazySrcLoc{ lhs_src, rhs_src },
+ });
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
@@ -5917,86 +6347,581 @@ fn analyzeArithmetic(
});
}
- if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| {
- if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| {
- if (lhs_val.isUndef() or rhs_val.isUndef()) {
- return sema.addConstUndef(resolved_type);
- }
- // incase rhs is 0, simply return lhs without doing any calculations
- // TODO Once division is implemented we should throw an error when dividing by 0.
- if (rhs_val.compareWithZero(.eq)) {
- switch (zir_tag) {
- .add, .addwrap, .sub, .subwrap => {
- return sema.addConstant(scalar_type, lhs_val);
- },
- else => {},
+ const target = sema.mod.getTarget();
+ const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs);
+ const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs);
+ const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: {
+ switch (zir_tag) {
+ .add => {
+ // For integers:
+ // If either of the operands are zero, then the other operand is
+ // returned, even if it is undefined.
+ // If either of the operands are undefined, it's a compile error
+ // because there is a possible value for which the addition would
+ // overflow (max_int), causing illegal behavior.
+ // For floats: either operand being undef makes the result undef.
+ if (maybe_lhs_val) |lhs_val| {
+ if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) {
+ return casted_rhs;
+ }
}
- }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ if (is_int) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ } else {
+ return sema.addConstUndef(scalar_type);
+ }
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return casted_lhs;
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ if (is_int) {
+ return sema.failWithUseOfUndef(block, lhs_src);
+ } else {
+ return sema.addConstUndef(scalar_type);
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (is_int) {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intAdd(rhs_val, sema.arena),
+ );
+ } else {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.floatAdd(rhs_val, scalar_type, sema.arena),
+ );
+ }
+ } else break :rs .{ .src = rhs_src, .air_tag = .add };
+ } else break :rs .{ .src = lhs_src, .air_tag = .add };
+ },
+ .addwrap => {
+ // Integers only; floats are checked above.
+ // If either of the operands are zero, the other operand is returned.
+ // If either of the operands are undefined, the result is undefined.
+ if (maybe_lhs_val) |lhs_val| {
+ if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) {
+ return casted_rhs;
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return casted_lhs;
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.numberAddWrap(rhs_val, scalar_type, sema.arena, target),
+ );
+ } else break :rs .{ .src = lhs_src, .air_tag = .addwrap };
+ } else break :rs .{ .src = rhs_src, .air_tag = .addwrap };
+ },
+ .add_sat => {
+ // Integers only; floats are checked above.
+ // If either of the operands are zero, then the other operand is returned.
+ // If either of the operands are undefined, the result is undefined.
+ if (maybe_lhs_val) |lhs_val| {
+ if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) {
+ return casted_rhs;
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return casted_lhs;
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intAddSat(rhs_val, scalar_type, sema.arena, target),
+ );
+ } else break :rs .{ .src = lhs_src, .air_tag = .add_sat };
+ } else break :rs .{ .src = rhs_src, .air_tag = .add_sat };
+ },
+ .sub => {
+ // For integers:
+ // If the rhs is zero, then the other operand is
+ // returned, even if it is undefined.
+ // If either of the operands are undefined, it's a compile error
+ // because there is a possible value for which the subtraction would
+ // overflow, causing illegal behavior.
+ // For floats: either operand being undef makes the result undef.
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ if (is_int) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ } else {
+ return sema.addConstUndef(scalar_type);
+ }
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return casted_lhs;
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ if (is_int) {
+ return sema.failWithUseOfUndef(block, lhs_src);
+ } else {
+ return sema.addConstUndef(scalar_type);
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (is_int) {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intSub(rhs_val, sema.arena),
+ );
+ } else {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.floatSub(rhs_val, scalar_type, sema.arena),
+ );
+ }
+ } else break :rs .{ .src = rhs_src, .air_tag = .sub };
+ } else break :rs .{ .src = lhs_src, .air_tag = .sub };
+ },
+ .subwrap => {
+ // Integers only; floats are checked above.
+ // If the RHS is zero, then the other operand is returned, even if it is undefined.
+ // If either of the operands are undefined, the result is undefined.
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return casted_lhs;
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.numberSubWrap(rhs_val, scalar_type, sema.arena, target),
+ );
+ } else break :rs .{ .src = rhs_src, .air_tag = .subwrap };
+ } else break :rs .{ .src = lhs_src, .air_tag = .subwrap };
+ },
+ .sub_sat => {
+ // Integers only; floats are checked above.
+ // If the RHS is zero, result is LHS.
+ // If either of the operands are undefined, result is undefined.
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return casted_lhs;
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intSubSat(rhs_val, scalar_type, sema.arena, target),
+ );
+ } else break :rs .{ .src = rhs_src, .air_tag = .sub_sat };
+ } else break :rs .{ .src = lhs_src, .air_tag = .sub_sat };
+ },
+ .div => {
+ // For integers:
+ // If the lhs is zero, then zero is returned regardless of rhs.
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined:
+ // * if lhs type is signed:
+ // * if rhs is comptime-known and not -1, result is undefined
+ // * if rhs is -1 or runtime-known, compile error because there is a
+ // possible value (-min_int * -1) for which division would be
+ // illegal behavior.
+ // * if lhs type is unsigned, undef is returned regardless of rhs.
+ // For floats:
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, result is undefined.
+ if (maybe_lhs_val) |lhs_val| {
+ if (!lhs_val.isUndef()) {
+ if (lhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(scalar_type, Value.zero);
+ }
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) {
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) {
+ return sema.addConstUndef(scalar_type);
+ }
+ }
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ return sema.addConstUndef(scalar_type);
+ }
- const value = switch (zir_tag) {
- .add => blk: {
- const val = if (is_int)
- try lhs_val.intAdd(rhs_val, sema.arena)
- else
- try lhs_val.floatAdd(rhs_val, scalar_type, sema.arena);
- break :blk val;
- },
- .sub => blk: {
- const val = if (is_int)
- try lhs_val.intSub(rhs_val, sema.arena)
- else
- try lhs_val.floatSub(rhs_val, scalar_type, sema.arena);
- break :blk val;
- },
- .div => blk: {
- const val = if (is_int)
- try lhs_val.intDiv(rhs_val, sema.arena)
- else
- try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena);
- break :blk val;
- },
- .mul => blk: {
- const val = if (is_int)
- try lhs_val.intMul(rhs_val, sema.arena)
- else
- try lhs_val.floatMul(rhs_val, scalar_type, sema.arena);
- break :blk val;
- },
- else => return sema.mod.fail(&block.base, src, "TODO implement comptime arithmetic for operand '{s}'", .{@tagName(zir_tag)}),
- };
-
- log.debug("{s}({}, {}) result: {}", .{ @tagName(zir_tag), lhs_val, rhs_val, value });
-
- return sema.addConstant(scalar_type, value);
- } else {
- try sema.requireRuntimeBlock(block, rhs_src);
+ if (maybe_rhs_val) |rhs_val| {
+ if (is_int) {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intDiv(rhs_val, sema.arena),
+ );
+ } else {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena),
+ );
+ }
+ } else break :rs .{ .src = rhs_src, .air_tag = .div };
+ } else break :rs .{ .src = lhs_src, .air_tag = .div };
+ },
+ .mul => {
+ // For integers:
+ // If either of the operands are zero, the result is zero.
+ // If either of the operands are one, the result is the other
+ // operand, even if it is undefined.
+ // If either of the operands are undefined, it's a compile error
+ // because there is a possible value for which the addition would
+ // overflow (max_int), causing illegal behavior.
+ // For floats: either operand being undef makes the result undef.
+ if (maybe_lhs_val) |lhs_val| {
+ if (!lhs_val.isUndef()) {
+ if (lhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(scalar_type, Value.zero);
+ }
+ if (lhs_val.compare(.eq, Value.one, scalar_type)) {
+ return casted_rhs;
+ }
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ if (is_int) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ } else {
+ return sema.addConstUndef(scalar_type);
+ }
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(scalar_type, Value.zero);
+ }
+ if (rhs_val.compare(.eq, Value.one, scalar_type)) {
+ return casted_lhs;
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ if (is_int) {
+ return sema.failWithUseOfUndef(block, lhs_src);
+ } else {
+ return sema.addConstUndef(scalar_type);
+ }
+ }
+ if (is_int) {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intMul(rhs_val, sema.arena),
+ );
+ } else {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.floatMul(rhs_val, scalar_type, sema.arena),
+ );
+ }
+ } else break :rs .{ .src = lhs_src, .air_tag = .mul };
+ } else break :rs .{ .src = rhs_src, .air_tag = .mul };
+ },
+ .mulwrap => {
+ // Integers only; floats are handled above.
+ // If either of the operands are zero, result is zero.
+ // If either of the operands are one, result is the other operand.
+ // If either of the operands are undefined, result is undefined.
+ if (maybe_lhs_val) |lhs_val| {
+ if (!lhs_val.isUndef()) {
+ if (lhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(scalar_type, Value.zero);
+ }
+ if (lhs_val.compare(.eq, Value.one, scalar_type)) {
+ return casted_rhs;
+ }
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(scalar_type, Value.zero);
+ }
+ if (rhs_val.compare(.eq, Value.one, scalar_type)) {
+ return casted_lhs;
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ }
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.numberMulWrap(rhs_val, scalar_type, sema.arena, target),
+ );
+ } else break :rs .{ .src = lhs_src, .air_tag = .mulwrap };
+ } else break :rs .{ .src = rhs_src, .air_tag = .mulwrap };
+ },
+ .mul_sat => {
+ // Integers only; floats are checked above.
+ // If either of the operands are zero, result is zero.
+ // If either of the operands are one, result is the other operand.
+ // If either of the operands are undefined, result is undefined.
+ if (maybe_lhs_val) |lhs_val| {
+ if (!lhs_val.isUndef()) {
+ if (lhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(scalar_type, Value.zero);
+ }
+ if (lhs_val.compare(.eq, Value.one, scalar_type)) {
+ return casted_rhs;
+ }
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(scalar_type, Value.zero);
+ }
+ if (rhs_val.compare(.eq, Value.one, scalar_type)) {
+ return casted_lhs;
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ }
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intMulSat(rhs_val, scalar_type, sema.arena, target),
+ );
+ } else break :rs .{ .src = lhs_src, .air_tag = .mul_sat };
+ } else break :rs .{ .src = rhs_src, .air_tag = .mul_sat };
+ },
+ .mod_rem => {
+ // For integers:
+ // Either operand being undef is a compile error because there exists
+ // a possible value (TODO what is it?) that would invoke illegal behavior.
+ // TODO: can lhs zero be handled better?
+ // TODO: can lhs undef be handled better?
+ //
+ // For floats:
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, result is undefined.
+ //
+ // For either one: if the result would be different between @mod and @rem,
+ // then emit a compile error saying you have to pick one.
+ if (is_int) {
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, lhs_src);
+ }
+ if (lhs_val.compareWithZero(.lt)) {
+ return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
+ }
+ } else if (lhs_ty.isSignedInt()) {
+ return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.lt)) {
+ return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intRem(rhs_val, sema.arena),
+ );
+ }
+ break :rs .{ .src = lhs_src, .air_tag = .rem };
+ } else if (rhs_ty.isSignedInt()) {
+ return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
+ } else {
+ break :rs .{ .src = rhs_src, .air_tag = .rem };
+ }
+ }
+ // float operands
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.lt)) {
+ return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef() or lhs_val.compareWithZero(.lt)) {
+ return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
+ }
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.floatRem(rhs_val, sema.arena),
+ );
+ } else {
+ return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
+ }
+ } else {
+ return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
+ }
+ },
+ .rem => {
+ // For integers:
+ // Either operand being undef is a compile error because there exists
+ // a possible value (TODO what is it?) that would invoke illegal behavior.
+ // TODO: can lhs zero be handled better?
+ // TODO: can lhs undef be handled better?
+ //
+ // For floats:
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, result is undefined.
+ if (is_int) {
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, lhs_src);
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intRem(rhs_val, sema.arena),
+ );
+ }
+ break :rs .{ .src = lhs_src, .air_tag = .rem };
+ } else {
+ break :rs .{ .src = rhs_src, .air_tag = .rem };
+ }
+ }
+ // float operands
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.floatRem(rhs_val, sema.arena),
+ );
+ } else break :rs .{ .src = rhs_src, .air_tag = .rem };
+ } else break :rs .{ .src = lhs_src, .air_tag = .rem };
+ },
+ .mod => {
+ // For integers:
+ // Either operand being undef is a compile error because there exists
+ // a possible value (TODO what is it?) that would invoke illegal behavior.
+ // TODO: can lhs zero be handled better?
+ // TODO: can lhs undef be handled better?
+ //
+ // For floats:
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, result is undefined.
+ if (is_int) {
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, lhs_src);
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intMod(rhs_val, sema.arena),
+ );
+ }
+ break :rs .{ .src = lhs_src, .air_tag = .mod };
+ } else {
+ break :rs .{ .src = rhs_src, .air_tag = .mod };
+ }
+ }
+ // float operands
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.floatMod(rhs_val, sema.arena),
+ );
+ } else break :rs .{ .src = rhs_src, .air_tag = .mod };
+ } else break :rs .{ .src = lhs_src, .air_tag = .mod };
+ },
+ else => unreachable,
}
- } else {
- try sema.requireRuntimeBlock(block, lhs_src);
- }
-
- if (zir_tag == .mod_rem) {
- const dirty_lhs = lhs_ty.isSignedInt() or lhs_ty.isFloat();
- const dirty_rhs = rhs_ty.isSignedInt() or rhs_ty.isFloat();
- if (dirty_lhs or dirty_rhs) {
- return sema.mod.fail(&block.base, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ lhs_ty, rhs_ty });
- }
- }
-
- const air_tag: Air.Inst.Tag = switch (zir_tag) {
- .add => .add,
- .addwrap => .addwrap,
- .sub => .sub,
- .subwrap => .subwrap,
- .mul => .mul,
- .mulwrap => .mulwrap,
- .div => .div,
- .mod_rem => .rem,
- .rem => .rem,
- else => return sema.mod.fail(&block.base, src, "TODO implement arithmetic for operand '{s}'", .{@tagName(zir_tag)}),
};
- return block.addBinOp(air_tag, casted_lhs, casted_rhs);
+ try sema.requireRuntimeBlock(block, rs.src);
+ return block.addBinOp(rs.air_tag, casted_lhs, casted_rhs);
}
fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -6134,10 +7059,11 @@ fn zirCmpEq(
const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty;
return mod.fail(&block.base, src, "comparison of '{}' with null", .{non_null_type});
}
- if (((lhs_ty_tag == .EnumLiteral and rhs_ty_tag == .Union) or
- (rhs_ty_tag == .EnumLiteral and lhs_ty_tag == .Union)))
- {
- return mod.fail(&block.base, src, "TODO implement equality comparison between a union's tag value and an enum literal", .{});
+ if (lhs_ty_tag == .EnumLiteral and rhs_ty_tag == .Union) {
+ return sema.analyzeCmpUnionTag(block, rhs, rhs_src, lhs, lhs_src, op);
+ }
+ if (rhs_ty_tag == .EnumLiteral and lhs_ty_tag == .Union) {
+ return sema.analyzeCmpUnionTag(block, lhs, lhs_src, rhs, rhs_src, op);
}
if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) {
const runtime_src: LazySrcLoc = src: {
@@ -6178,6 +7104,28 @@ fn zirCmpEq(
return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, true);
}
+fn analyzeCmpUnionTag(
+ sema: *Sema,
+ block: *Scope.Block,
+ un: Air.Inst.Ref,
+ un_src: LazySrcLoc,
+ tag: Air.Inst.Ref,
+ tag_src: LazySrcLoc,
+ op: std.math.CompareOperator,
+) CompileError!Air.Inst.Ref {
+ const union_ty = sema.typeOf(un);
+ const union_tag_ty = union_ty.unionTagType() orelse {
+ // TODO note at declaration site that says "union foo is not tagged"
+ return sema.mod.fail(&block.base, un_src, "comparison of union and enum literal is only valid for tagged union types", .{});
+ };
+ // Coerce both the union and the tag to the union's tag type, and then execute the
+ // enum comparison codepath.
+ const coerced_tag = try sema.coerce(block, union_tag_ty, tag, tag_src);
+ const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src);
+
+ return sema.cmpSelf(block, coerced_union, coerced_tag, op, un_src, tag_src);
+}
+
/// Only called for non-equality operators. See also `zirCmpEq`.
fn zirCmp(
sema: *Sema,
@@ -6224,10 +7172,21 @@ fn analyzeCmp(
@tagName(op), resolved_type,
});
}
-
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
+ return sema.cmpSelf(block, casted_lhs, casted_rhs, op, lhs_src, rhs_src);
+}
+fn cmpSelf(
+ sema: *Sema,
+ block: *Scope.Block,
+ casted_lhs: Air.Inst.Ref,
+ casted_rhs: Air.Inst.Ref,
+ op: std.math.CompareOperator,
+ lhs_src: LazySrcLoc,
+ rhs_src: LazySrcLoc,
+) CompileError!Air.Inst.Ref {
+ const resolved_type = sema.typeOf(casted_lhs);
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| {
if (lhs_val.isUndef()) return sema.addConstUndef(resolved_type);
@@ -6294,10 +7253,42 @@ fn runtimeBoolCmp(
fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
+ const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand);
+ try sema.resolveTypeLayout(block, src, operand_ty);
const target = sema.mod.getTarget();
- const abi_size = operand_ty.abiSize(target);
+ const abi_size = switch (operand_ty.zigTypeTag()) {
+ .Fn => unreachable,
+ .NoReturn,
+ .Undefined,
+ .Null,
+ .BoundFn,
+ .Opaque,
+ => return sema.mod.fail(&block.base, src, "no size available for type '{}'", .{operand_ty}),
+ .Type,
+ .EnumLiteral,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Void,
+ => 0,
+
+ .Bool,
+ .Int,
+ .Float,
+ .Pointer,
+ .Array,
+ .Struct,
+ .Optional,
+ .ErrorUnion,
+ .ErrorSet,
+ .Enum,
+ .Union,
+ .Vector,
+ .Frame,
+ .AnyFrame,
+ => operand_ty.abiSize(target),
+ };
return sema.addIntUnsigned(Type.initTag(.comptime_int), abi_size);
}
@@ -6315,8 +7306,45 @@ fn zirThis(
block: *Scope.Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
+ const this_decl = block.base.namespace().getDecl();
const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
- return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{});
+ return sema.analyzeDeclVal(block, src, this_decl);
+}
+
+fn zirClosureCapture(
+ sema: *Sema,
+ block: *Scope.Block,
+ inst: Zir.Inst.Index,
+) CompileError!void {
+ // TODO: Compile error when closed over values are modified
+ const inst_data = sema.code.instructions.items(.data)[inst].un_tok;
+ const tv = try sema.resolveInstConst(block, inst_data.src(), inst_data.operand);
+ try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, .{
+ .ty = try tv.ty.copy(sema.perm_arena),
+ .val = try tv.val.copy(sema.perm_arena),
+ });
+}
+
+fn zirClosureGet(
+ sema: *Sema,
+ block: *Scope.Block,
+ inst: Zir.Inst.Index,
+) CompileError!Air.Inst.Ref {
+ // TODO CLOSURE: Test this with inline functions
+ const inst_data = sema.code.instructions.items(.data)[inst].inst_node;
+ var scope: *CaptureScope = block.src_decl.src_scope.?;
+ // Note: The target closure must be in this scope list.
+ // If it's not here, the zir is invalid, or the list is broken.
+ const tv = while (true) {
+ // Note: We don't need to add a dependency here, because
+ // decls always depend on their lexical parents.
+ if (scope.captures.getPtr(inst_data.inst)) |tv| {
+ break tv;
+ }
+ scope = scope.parent.?;
+ } else unreachable;
+
+ return sema.addConstant(tv.ty, tv.val);
}
fn zirRetAddr(
@@ -6345,19 +7373,80 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
const target = sema.mod.getTarget();
switch (ty.zigTypeTag()) {
+ .Type => return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Type)),
+ .val = Value.initTag(.unreachable_value),
+ }),
+ ),
+ .Void => return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Void)),
+ .val = Value.initTag(.unreachable_value),
+ }),
+ ),
+ .Bool => return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Bool)),
+ .val = Value.initTag(.unreachable_value),
+ }),
+ ),
+ .NoReturn => return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.NoReturn)),
+ .val = Value.initTag(.unreachable_value),
+ }),
+ ),
+ .ComptimeFloat => return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeFloat)),
+ .val = Value.initTag(.unreachable_value),
+ }),
+ ),
+ .ComptimeInt => return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeInt)),
+ .val = Value.initTag(.unreachable_value),
+ }),
+ ),
+ .Undefined => return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Undefined)),
+ .val = Value.initTag(.unreachable_value),
+ }),
+ ),
+ .Null => return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Null)),
+ .val = Value.initTag(.unreachable_value),
+ }),
+ ),
+ .EnumLiteral => return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.EnumLiteral)),
+ .val = Value.initTag(.unreachable_value),
+ }),
+ ),
.Fn => {
+ const info = ty.fnInfo();
const field_values = try sema.arena.alloc(Value, 6);
// calling_convention: CallingConvention,
- field_values[0] = try Value.Tag.enum_field_index.create(
- sema.arena,
- @enumToInt(ty.fnCallingConvention()),
- );
+ field_values[0] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc));
// alignment: comptime_int,
field_values[1] = try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(target));
// is_generic: bool,
- field_values[2] = Value.initTag(.bool_false); // TODO
+ field_values[2] = if (info.is_generic) Value.initTag(.bool_true) else Value.initTag(.bool_false);
// is_var_args: bool,
- field_values[3] = Value.initTag(.bool_false); // TODO
+ field_values[3] = if (info.is_var_args) Value.initTag(.bool_true) else Value.initTag(.bool_false);
// return_type: ?type,
field_values[4] = try Value.Tag.ty.create(sema.arena, ty.fnReturnType());
// args: []const FnArg,
@@ -6366,10 +7455,7 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(
- sema.arena,
- @enumToInt(@typeInfo(std.builtin.TypeInfo).Union.tag_type.?.Fn),
- ),
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Fn)),
.val = try Value.Tag.@"struct".create(sema.arena, field_values),
}),
);
@@ -6388,10 +7474,92 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(
- sema.arena,
- @enumToInt(@typeInfo(std.builtin.TypeInfo).Union.tag_type.?.Int),
- ),
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Int)),
+ .val = try Value.Tag.@"struct".create(sema.arena, field_values),
+ }),
+ );
+ },
+ .Float => {
+ const field_values = try sema.arena.alloc(Value, 1);
+ // bits: comptime_int,
+ field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(target));
+
+ return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Float)),
+ .val = try Value.Tag.@"struct".create(sema.arena, field_values),
+ }),
+ );
+ },
+ .Pointer => {
+ const info = ty.ptrInfo().data;
+ const field_values = try sema.arena.alloc(Value, 7);
+ // size: Size,
+ field_values[0] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.size));
+ // is_const: bool,
+ field_values[1] = if (!info.mutable) Value.initTag(.bool_true) else Value.initTag(.bool_false);
+ // is_volatile: bool,
+ field_values[2] = if (info.@"volatile") Value.initTag(.bool_true) else Value.initTag(.bool_false);
+ // alignment: comptime_int,
+ field_values[3] = try Value.Tag.int_u64.create(sema.arena, info.@"align");
+ // child: type,
+ field_values[4] = try Value.Tag.ty.create(sema.arena, info.pointee_type);
+ // is_allowzero: bool,
+ field_values[5] = if (info.@"allowzero") Value.initTag(.bool_true) else Value.initTag(.bool_false);
+ // sentinel: anytype,
+ field_values[6] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.initTag(.null_value);
+
+ return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Pointer)),
+ .val = try Value.Tag.@"struct".create(sema.arena, field_values),
+ }),
+ );
+ },
+ .Array => {
+ const info = ty.arrayInfo();
+ const field_values = try sema.arena.alloc(Value, 3);
+ // len: comptime_int,
+ field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len);
+ // child: type,
+ field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type);
+ // sentinel: anytype,
+ field_values[2] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.initTag(.null_value);
+
+ return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Array)),
+ .val = try Value.Tag.@"struct".create(sema.arena, field_values),
+ }),
+ );
+ },
+ .Optional => {
+ const field_values = try sema.arena.alloc(Value, 1);
+ // child: type,
+ field_values[0] = try Value.Tag.ty.create(sema.arena, try ty.optionalChildAlloc(sema.arena));
+
+ return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Optional)),
+ .val = try Value.Tag.@"struct".create(sema.arena, field_values),
+ }),
+ );
+ },
+ .ErrorUnion => {
+ const field_values = try sema.arena.alloc(Value, 2);
+ // error_set: type,
+ field_values[0] = try Value.Tag.ty.create(sema.arena, ty.errorUnionSet());
+ // payload: type,
+ field_values[1] = try Value.Tag.ty.create(sema.arena, ty.errorUnionPayload());
+
+ return sema.addConstant(
+ type_info_ty,
+ try Value.Tag.@"union".create(sema.arena, .{
+ .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ErrorUnion)),
.val = try Value.Tag.@"struct".create(sema.arena, field_values),
}),
);
@@ -6808,7 +7976,7 @@ fn analyzeRet(
fn floatOpAllowed(tag: Zir.Inst.Tag) bool {
// extend this swich as additional operators are implemented
return switch (tag) {
- .add, .sub, .mul, .div => true,
+ .add, .sub, .mul, .div, .mod, .rem, .mod_rem => true,
else => false,
};
}
@@ -6819,18 +7987,14 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Comp
const inst_data = sema.code.instructions.items(.data)[inst].ptr_type_simple;
const elem_type = try sema.resolveType(block, .unneeded, inst_data.elem_type);
- const ty = try Module.ptrType(
- sema.arena,
- elem_type,
- null,
- 0,
- 0,
- 0,
- inst_data.is_mutable,
- inst_data.is_allowzero,
- inst_data.is_volatile,
- inst_data.size,
- );
+ const ty = try Type.ptr(sema.arena, .{
+ .pointee_type = elem_type,
+ .@"addrspace" = .generic,
+ .mutable = inst_data.is_mutable,
+ .@"allowzero" = inst_data.is_allowzero,
+ .@"volatile" = inst_data.is_volatile,
+ .size = inst_data.size,
+ });
return sema.addType(ty);
}
@@ -6856,6 +8020,12 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr
break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u32);
} else 0;
+ const address_space = if (inst_data.flags.has_addrspace) blk: {
+ const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
+ extra_i += 1;
+ break :blk try sema.analyzeAddrspace(block, .unneeded, ref, .pointer);
+ } else .generic;
+
const bit_start = if (inst_data.flags.has_bit_range) blk: {
const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
@@ -6873,18 +8043,18 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr
const elem_type = try sema.resolveType(block, .unneeded, extra.data.elem_type);
- const ty = try Module.ptrType(
- sema.arena,
- elem_type,
- sentinel,
- abi_align,
- bit_start,
- bit_end,
- inst_data.flags.is_mutable,
- inst_data.flags.is_allowzero,
- inst_data.flags.is_volatile,
- inst_data.size,
- );
+ const ty = try Type.ptr(sema.arena, .{
+ .pointee_type = elem_type,
+ .sentinel = sentinel,
+ .@"align" = abi_align,
+ .@"addrspace" = address_space,
+ .bit_offset = bit_start,
+ .host_size = bit_end,
+ .mutable = inst_data.flags.is_mutable,
+ .@"allowzero" = inst_data.flags.is_allowzero,
+ .@"volatile" = inst_data.flags.is_volatile,
+ .size = inst_data.size,
+ });
return sema.addType(ty);
}
@@ -7059,8 +8229,49 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref:
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- _ = is_ref;
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{});
+ const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
+ const args = sema.code.refSlice(extra.end, extra.data.operands_len);
+
+ var resolved_args = try sema.mod.gpa.alloc(Air.Inst.Ref, args.len);
+ for (args) |arg, i| resolved_args[i] = sema.resolveInst(arg);
+
+ var all_args_comptime = for (resolved_args) |arg| {
+ if ((try sema.resolveMaybeUndefVal(block, src, arg)) == null) break false;
+ } else true;
+
+ if (all_args_comptime) {
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+ assert(!(resolved_args.len == 0));
+ const final_ty = try Type.Tag.array.create(anon_decl.arena(), .{ .len = resolved_args.len, .elem_type = sema.typeOf(resolved_args[0]) });
+ const buf = try anon_decl.arena().alloc(Value, resolved_args.len);
+ for (resolved_args) |arg, i| {
+ buf[i] = (try sema.resolveMaybeUndefVal(block, src, arg)).?;
+ }
+
+ const val = try Value.Tag.array.create(anon_decl.arena(), buf);
+ if (is_ref)
+ return sema.analyzeDeclRef(try anon_decl.finish(final_ty, val))
+ else
+ return sema.analyzeDeclVal(block, .unneeded, try anon_decl.finish(final_ty, val));
+ }
+
+ assert(!(resolved_args.len == 0));
+ const array_ty = try Type.Tag.array.create(sema.arena, .{ .len = resolved_args.len, .elem_type = sema.typeOf(resolved_args[0]) });
+ const final_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = array_ty,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
+ const alloc = try block.addTy(.alloc, final_ty);
+
+ for (resolved_args) |arg, i| {
+ const pointer_to_array_at_index = try block.addBinOp(.ptr_elem_ptr, alloc, try sema.addIntUnsigned(Type.initTag(.u64), i));
+ _ = try block.addBinOp(.store, pointer_to_array_at_index, arg);
+ }
+ return if (is_ref)
+ alloc
+ else
+ try sema.analyzeLoad(block, .unneeded, alloc, .unneeded);
}
fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref {
@@ -7132,8 +8343,11 @@ fn zirFrameAddress(
fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{});
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const ty = try sema.resolveType(block, operand_src, inst_data.operand);
+ const target = sema.mod.getTarget();
+ const abi_align = ty.abiAlignment(target);
+ return sema.addIntUnsigned(Type.comptime_int, abi_align);
}
fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -7246,13 +8460,30 @@ fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE
fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
+ // TODO don't forget the safety check!
return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{});
}
fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{});
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const dest_ty = try sema.resolveType(block, ty_src, extra.lhs);
+ const operand = sema.resolveInst(extra.rhs);
+ const operand_ty = sema.typeOf(operand);
+
+ _ = try sema.checkIntType(block, ty_src, dest_ty);
+ try sema.checkFloatType(block, operand_src, operand_ty);
+
+ if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
+ const target = sema.mod.getTarget();
+ const result_val = try val.intToFloat(sema.arena, dest_ty, target);
+ return sema.addConstant(dest_ty, result_val);
+ }
+
+ try sema.requireRuntimeBlock(block, operand_src);
+ return block.addTyOp(.int_to_float, dest_ty, operand);
}
fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -7351,8 +8582,8 @@ fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
const operand = sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
const mod = sema.mod;
- const dest_is_comptime_int = try sema.requireIntegerType(block, dest_ty_src, dest_ty);
- const src_is_comptime_int = try sema.requireIntegerType(block, operand_src, operand_ty);
+ const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_ty);
+ const src_is_comptime_int = try sema.checkIntType(block, operand_src, operand_ty);
if (dest_is_comptime_int) {
return sema.coerce(block, dest_ty, operand, operand_src);
@@ -7410,14 +8641,56 @@ fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE
fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{});
+ const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const operand = sema.resolveInst(inst_data.operand);
+ const operand_ty = sema.typeOf(operand);
+ // TODO implement support for vectors
+ if (operand_ty.zigTypeTag() != .Int) {
+ return sema.mod.fail(&block.base, ty_src, "expected integer type, found '{}'", .{
+ operand_ty,
+ });
+ }
+ const target = sema.mod.getTarget();
+ const bits = operand_ty.intInfo(target).bits;
+ if (bits == 0) return Air.Inst.Ref.zero;
+
+ const result_ty = try Type.smallestUnsignedInt(sema.arena, bits);
+
+ const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
+ if (val.isUndef()) return sema.addConstUndef(result_ty);
+ return sema.addIntUnsigned(result_ty, val.clz(operand_ty, target));
+ } else operand_src;
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ return block.addTyOp(.clz, result_ty, operand);
}
fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{});
+ const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const operand = sema.resolveInst(inst_data.operand);
+ const operand_ty = sema.typeOf(operand);
+ // TODO implement support for vectors
+ if (operand_ty.zigTypeTag() != .Int) {
+ return sema.mod.fail(&block.base, ty_src, "expected integer type, found '{}'", .{
+ operand_ty,
+ });
+ }
+ const target = sema.mod.getTarget();
+ const bits = operand_ty.intInfo(target).bits;
+ if (bits == 0) return Air.Inst.Ref.zero;
+
+ const result_ty = try Type.smallestUnsignedInt(sema.arena, bits);
+
+ const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
+ if (val.isUndef()) return sema.addConstUndef(result_ty);
+ return sema.mod.fail(&block.base, operand_src, "TODO: implement comptime @ctz", .{});
+ } else operand_src;
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ return block.addTyOp(.ctz, result_ty, operand);
}
fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -7456,22 +8729,6 @@ fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{});
}
-fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{});
-}
-
-fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- return sema.zirArithmetic(block, inst);
-}
-
-fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{});
-}
-
fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
@@ -7490,6 +8747,29 @@ fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr
return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{});
}
+/// Returns `true` if the type was a comptime_int.
+fn checkIntType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) CompileError!bool {
+ switch (ty.zigTypeTag()) {
+ .ComptimeInt => return true,
+ .Int => return false,
+ else => return sema.mod.fail(&block.base, src, "expected integer type, found '{}'", .{ty}),
+ }
+}
+
+fn checkFloatType(
+ sema: *Sema,
+ block: *Scope.Block,
+ ty_src: LazySrcLoc,
+ ty: Type,
+) CompileError!void {
+ switch (ty.zigTypeTag()) {
+ .ComptimeFloat, .Float => {},
+ else => return sema.mod.fail(&block.base, ty_src, "expected float type, found '{}'", .{
+ ty,
+ }),
+ }
+}
+
fn checkAtomicOperandType(
sema: *Sema,
block: *Scope.Block,
@@ -7501,7 +8781,7 @@ fn checkAtomicOperandType(
const max_atomic_bits = target_util.largestAtomicBits(target);
const int_ty = switch (ty.zigTypeTag()) {
.Int => ty,
- .Enum => ty.enumTagType(&buffer),
+ .Enum => ty.intTagType(&buffer),
.Float => {
const bit_count = ty.floatBits(target);
if (bit_count > max_atomic_bits) {
@@ -7515,12 +8795,16 @@ fn checkAtomicOperandType(
return;
},
.Bool => return, // Will be treated as `u8`.
- else => return sema.mod.fail(
- &block.base,
- ty_src,
- "expected bool, integer, float, enum, or pointer type; found {}",
- .{ty},
- ),
+ else => {
+ if (ty.isPtrAtRuntime()) return;
+
+ return sema.mod.fail(
+ &block.base,
+ ty_src,
+ "expected bool, integer, float, enum, or pointer type; found {}",
+ .{ty},
+ );
+ },
};
const bit_count = int_ty.intInfo(target).bits;
if (bit_count > max_atomic_bits) {
@@ -7533,6 +8817,31 @@ fn checkAtomicOperandType(
}
}
+fn resolveExportOptions(
+ sema: *Sema,
+ block: *Scope.Block,
+ src: LazySrcLoc,
+ zir_ref: Zir.Inst.Ref,
+) CompileError!std.builtin.ExportOptions {
+ const export_options_ty = try sema.getBuiltinType(block, src, "ExportOptions");
+ const air_ref = sema.resolveInst(zir_ref);
+ const coerced = try sema.coerce(block, export_options_ty, air_ref, src);
+ const val = try sema.resolveConstValue(block, src, coerced);
+ const fields = val.castTag(.@"struct").?.data;
+ const struct_obj = export_options_ty.castTag(.@"struct").?.data;
+ const name_index = struct_obj.fields.getIndex("name").?;
+ const linkage_index = struct_obj.fields.getIndex("linkage").?;
+ const section_index = struct_obj.fields.getIndex("section").?;
+ if (!fields[section_index].isNull()) {
+ return sema.mod.fail(&block.base, src, "TODO: implement exporting with linksection", .{});
+ }
+ return std.builtin.ExportOptions{
+ .name = try fields[name_index].toAllocatedBytes(sema.arena),
+ .linkage = fields[linkage_index].toEnum(std.builtin.GlobalLinkage),
+ .section = null, // TODO
+ };
+}
+
fn resolveAtomicOrder(
sema: *Sema,
block: *Scope.Block,
@@ -7546,6 +8855,19 @@ fn resolveAtomicOrder(
return val.toEnum(std.builtin.AtomicOrder);
}
+fn resolveAtomicRmwOp(
+ sema: *Sema,
+ block: *Scope.Block,
+ src: LazySrcLoc,
+ zir_ref: Zir.Inst.Ref,
+) CompileError!std.builtin.AtomicRmwOp {
+ const atomic_rmw_op_ty = try sema.getBuiltinType(block, src, "AtomicRmwOp");
+ const air_ref = sema.resolveInst(zir_ref);
+ const coerced = try sema.coerce(block, atomic_rmw_op_ty, air_ref, src);
+ const val = try sema.resolveConstValue(block, src, coerced);
+ return val.toEnum(std.builtin.AtomicRmwOp);
+}
+
fn zirCmpxchg(
sema: *Sema,
block: *Scope.Block,
@@ -7604,6 +8926,8 @@ fn zirCmpxchg(
if (try sema.resolveMaybeUndefVal(block, expected_src, expected_value)) |expected_val| {
if (try sema.resolveMaybeUndefVal(block, new_value_src, new_value)) |new_val| {
if (expected_val.isUndef() or new_val.isUndef()) {
+ // TODO: this should probably cause the memory stored at the pointer
+ // to become undef as well
return sema.addConstUndef(result_ty);
}
const stored_val = (try ptr_val.pointerDeref(sema.arena)) orelse break :rs ptr_src;
@@ -7661,20 +8985,160 @@ fn zirSelect(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErro
fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{});
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ // zig fmt: off
+ const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
+ // zig fmt: on
+ const ptr = sema.resolveInst(extra.lhs);
+ const elem_ty = sema.typeOf(ptr).elemType();
+ try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty);
+ const order = try sema.resolveAtomicOrder(block, order_src, extra.rhs);
+
+ switch (order) {
+ .Release, .AcqRel => {
+ return sema.mod.fail(
+ &block.base,
+ order_src,
+ "@atomicLoad atomic ordering must not be Release or AcqRel",
+ .{},
+ );
+ },
+ else => {},
+ }
+
+ if (try sema.typeHasOnePossibleValue(block, elem_ty_src, elem_ty)) |val| {
+ return sema.addConstant(elem_ty, val);
+ }
+
+ if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
+ if (try ptr_val.pointerDeref(sema.arena)) |elem_val| {
+ return sema.addConstant(elem_ty, elem_val);
+ }
+ }
+
+ try sema.requireRuntimeBlock(block, ptr_src);
+ return block.addInst(.{
+ .tag = .atomic_load,
+ .data = .{ .atomic_load = .{
+ .ptr = ptr,
+ .order = order,
+ } },
+ });
}
fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data;
const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{});
+ // zig fmt: off
+ const operand_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const op_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
+ const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
+ const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node };
+ // zig fmt: on
+ const ptr = sema.resolveInst(extra.ptr);
+ const operand_ty = sema.typeOf(ptr).elemType();
+ try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty);
+ const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation);
+
+ switch (operand_ty.zigTypeTag()) {
+ .Enum => if (op != .Xchg) {
+ return mod.fail(&block.base, op_src, "@atomicRmw with enum only allowed with .Xchg", .{});
+ },
+ .Bool => if (op != .Xchg) {
+ return mod.fail(&block.base, op_src, "@atomicRmw with bool only allowed with .Xchg", .{});
+ },
+ .Float => switch (op) {
+ .Xchg, .Add, .Sub => {},
+ else => return mod.fail(&block.base, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, and .Sub", .{}),
+ },
+ else => {},
+ }
+ const operand = try sema.coerce(block, operand_ty, sema.resolveInst(extra.operand), operand_src);
+ const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering);
+
+ if (order == .Unordered) {
+ return mod.fail(&block.base, order_src, "@atomicRmw atomic ordering must not be Unordered", .{});
+ }
+
+ // special case zero bit types
+ if (try sema.typeHasOnePossibleValue(block, operand_ty_src, operand_ty)) |val| {
+ return sema.addConstant(operand_ty, val);
+ }
+
+ const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
+ if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |operand_val| {
+ const target = sema.mod.getTarget();
+ const stored_val = (try ptr_val.pointerDeref(sema.arena)) orelse break :rs ptr_src;
+ const new_val = switch (op) {
+ // zig fmt: off
+ .Xchg => operand_val,
+ .Add => try stored_val.numberAddWrap(operand_val, operand_ty, sema.arena, target),
+ .Sub => try stored_val.numberSubWrap(operand_val, operand_ty, sema.arena, target),
+ .And => try stored_val.bitwiseAnd (operand_val, sema.arena),
+ .Nand => try stored_val.bitwiseNand (operand_val, operand_ty, sema.arena, target),
+ .Or => try stored_val.bitwiseOr (operand_val, sema.arena),
+ .Xor => try stored_val.bitwiseXor (operand_val, sema.arena),
+ .Max => try stored_val.numberMax (operand_val, sema.arena),
+ .Min => try stored_val.numberMin (operand_val, sema.arena),
+ // zig fmt: on
+ };
+ try sema.storePtrVal(block, src, ptr_val, new_val, operand_ty);
+ return sema.addConstant(operand_ty, stored_val);
+ } else break :rs operand_src;
+ } else ptr_src;
+
+ const flags: u32 = @as(u32, @enumToInt(order)) | (@as(u32, @enumToInt(op)) << 3);
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ return block.addInst(.{
+ .tag = .atomic_rmw,
+ .data = .{ .pl_op = .{
+ .operand = ptr,
+ .payload = try sema.addExtra(Air.AtomicRmw{
+ .operand = operand,
+ .flags = flags,
+ }),
+ } },
+ });
}
-fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const extra = sema.code.extraData(Zir.Inst.AtomicStore, inst_data.payload_index).data;
const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{});
+ // zig fmt: off
+ const operand_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
+ const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
+ // zig fmt: on
+ const ptr = sema.resolveInst(extra.ptr);
+ const operand_ty = sema.typeOf(ptr).elemType();
+ try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty);
+ const operand = try sema.coerce(block, operand_ty, sema.resolveInst(extra.operand), operand_src);
+ const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering);
+
+ const air_tag: Air.Inst.Tag = switch (order) {
+ .Acquire, .AcqRel => {
+ return sema.mod.fail(
+ &block.base,
+ order_src,
+ "@atomicStore atomic ordering must not be Acquire or AcqRel",
+ .{},
+ );
+ },
+ .Unordered => .atomic_store_unordered,
+ .Monotonic => .atomic_store_monotonic,
+ .Release => .atomic_store_release,
+ .SeqCst => .atomic_store_seq_cst,
+ };
+
+ return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag);
}
fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -7707,16 +9171,115 @@ fn zirMaximum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr
return sema.mod.fail(&block.base, src, "TODO: Sema.zirMaximum", .{});
}
-fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const extra = sema.code.extraData(Zir.Inst.Memcpy, inst_data.payload_index).data;
const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{});
+ const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const src_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
+ const dest_ptr = sema.resolveInst(extra.dest);
+ const dest_ptr_ty = sema.typeOf(dest_ptr);
+
+ if (dest_ptr_ty.zigTypeTag() != .Pointer) {
+ return sema.mod.fail(&block.base, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty});
+ }
+ if (dest_ptr_ty.isConstPtr()) {
+ return sema.mod.fail(&block.base, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty});
+ }
+
+ const uncasted_src_ptr = sema.resolveInst(extra.source);
+ const uncasted_src_ptr_ty = sema.typeOf(uncasted_src_ptr);
+ if (uncasted_src_ptr_ty.zigTypeTag() != .Pointer) {
+ return sema.mod.fail(&block.base, src_src, "expected pointer, found '{}'", .{
+ uncasted_src_ptr_ty,
+ });
+ }
+ const src_ptr_info = uncasted_src_ptr_ty.ptrInfo().data;
+ const wanted_src_ptr_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = dest_ptr_ty.elemType2(),
+ .@"align" = src_ptr_info.@"align",
+ .@"addrspace" = src_ptr_info.@"addrspace",
+ .mutable = false,
+ .@"allowzero" = src_ptr_info.@"allowzero",
+ .@"volatile" = src_ptr_info.@"volatile",
+ .size = .Many,
+ });
+ const src_ptr = try sema.coerce(block, wanted_src_ptr_ty, uncasted_src_ptr, src_src);
+ const len = try sema.coerce(block, Type.initTag(.usize), sema.resolveInst(extra.byte_count), len_src);
+
+ const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr);
+ const maybe_src_ptr_val = try sema.resolveDefinedValue(block, src_src, src_ptr);
+ const maybe_len_val = try sema.resolveDefinedValue(block, len_src, len);
+
+ const runtime_src = if (maybe_dest_ptr_val) |dest_ptr_val| rs: {
+ if (maybe_src_ptr_val) |src_ptr_val| {
+ if (maybe_len_val) |len_val| {
+ _ = dest_ptr_val;
+ _ = src_ptr_val;
+ _ = len_val;
+ return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy at comptime", .{});
+ } else break :rs len_src;
+ } else break :rs src_src;
+ } else dest_src;
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ _ = try block.addInst(.{
+ .tag = .memcpy,
+ .data = .{ .pl_op = .{
+ .operand = dest_ptr,
+ .payload = try sema.addExtra(Air.Bin{
+ .lhs = src_ptr,
+ .rhs = len,
+ }),
+ } },
+ });
}
-fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const extra = sema.code.extraData(Zir.Inst.Memset, inst_data.payload_index).data;
const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{});
+ const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const value_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
+ const dest_ptr = sema.resolveInst(extra.dest);
+ const dest_ptr_ty = sema.typeOf(dest_ptr);
+ if (dest_ptr_ty.zigTypeTag() != .Pointer) {
+ return sema.mod.fail(&block.base, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty});
+ }
+ if (dest_ptr_ty.isConstPtr()) {
+ return sema.mod.fail(&block.base, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty});
+ }
+ const elem_ty = dest_ptr_ty.elemType2();
+ const value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.byte), value_src);
+ const len = try sema.coerce(block, Type.initTag(.usize), sema.resolveInst(extra.byte_count), len_src);
+
+ const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr);
+ const maybe_len_val = try sema.resolveDefinedValue(block, len_src, len);
+
+ const runtime_src = if (maybe_dest_ptr_val) |ptr_val| rs: {
+ if (maybe_len_val) |len_val| {
+ if (try sema.resolveMaybeUndefVal(block, value_src, value)) |val| {
+ _ = ptr_val;
+ _ = len_val;
+ _ = val;
+ return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset at comptime", .{});
+ } else break :rs value_src;
+ } else break :rs len_src;
+ } else dest_src;
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ _ = try block.addInst(.{
+ .tag = .memset,
+ .data = .{ .pl_op = .{
+ .operand = dest_ptr,
+ .payload = try sema.addExtra(Air.Bin{
+ .lhs = value,
+ .rhs = len,
+ }),
+ } },
+ });
}
fn zirMinimum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -7761,7 +9324,6 @@ fn zirVarExtended(
const mut_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at mut token
const init_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at init expr
const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small);
- const var_ty = try sema.resolveType(block, ty_src, extra.data.var_type);
var extra_index: usize = extra.end;
@@ -7781,19 +9343,29 @@ fn zirVarExtended(
// break :blk align_tv.val;
//} else Value.initTag(.null_value);
- const init_val: Value = if (small.has_init) blk: {
+ const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: {
const init_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
- const init_air_inst = sema.resolveInst(init_ref);
- break :blk (try sema.resolveMaybeUndefVal(block, init_src, init_air_inst)) orelse
+ break :blk sema.resolveInst(init_ref);
+ } else .none;
+
+ const have_ty = extra.data.var_type != .none;
+ const var_ty = if (have_ty)
+ try sema.resolveType(block, ty_src, extra.data.var_type)
+ else
+ sema.typeOf(uncasted_init);
+
+ const init_val = if (uncasted_init != .none) blk: {
+ const init = if (have_ty)
+ try sema.coerce(block, var_ty, uncasted_init, init_src)
+ else
+ uncasted_init;
+
+ break :blk (try sema.resolveMaybeUndefVal(block, init_src, init)) orelse
return sema.failWithNeededComptime(block, init_src);
} else Value.initTag(.unreachable_value);
- if (!var_ty.isValidVarType(small.is_extern)) {
- return sema.mod.fail(&block.base, mut_src, "variable of type '{}' must be const", .{
- var_ty,
- });
- }
+ try sema.validateVarType(block, mut_src, var_ty, small.is_extern);
if (lib_name != null) {
// Look at the sema code for functions which has this logic, it just needs to
@@ -7890,7 +9462,10 @@ fn zirCUndef(
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src: LazySrcLoc = .{ .node_offset = extra.node };
- return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{});
+
+ const name = try sema.resolveConstString(block, src, extra.operand);
+ try block.c_import_buf.?.writer().print("#undefine {s}\n", .{name});
+ return Air.Inst.Ref.void_value;
}
fn zirCInclude(
@@ -7900,7 +9475,10 @@ fn zirCInclude(
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src: LazySrcLoc = .{ .node_offset = extra.node };
- return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{});
+
+ const name = try sema.resolveConstString(block, src, extra.operand);
+ try block.c_import_buf.?.writer().print("#include <{s}>\n", .{name});
+ return Air.Inst.Ref.void_value;
}
fn zirCDefine(
@@ -7910,7 +9488,15 @@ fn zirCDefine(
) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src: LazySrcLoc = .{ .node_offset = extra.node };
- return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{});
+
+ const name = try sema.resolveConstString(block, src, extra.lhs);
+ if (sema.typeOf(extra.rhs).zigTypeTag() != .Void) {
+ const value = try sema.resolveConstString(block, src, extra.rhs);
+ try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value });
+ } else {
+ try block.c_import_buf.?.writer().print("#define {s}\n", .{name});
+ }
+ return Air.Inst.Ref.void_value;
}
fn zirWasmMemorySize(
@@ -7956,17 +9542,54 @@ fn requireRuntimeBlock(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) !void
try sema.requireFunctionBlock(block, src);
}
-fn requireIntegerType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) !bool {
- switch (ty.zigTypeTag()) {
- .ComptimeInt => return true,
- .Int => return false,
- else => return sema.mod.fail(&block.base, src, "expected integer type, found '{}'", .{ty}),
- }
-}
+/// Emit a compile error if type cannot be used for a runtime variable.
+fn validateVarType(
+ sema: *Sema,
+ block: *Scope.Block,
+ src: LazySrcLoc,
+ var_ty: Type,
+ is_extern: bool,
+) CompileError!void {
+ var ty = var_ty;
+ const ok: bool = while (true) switch (ty.zigTypeTag()) {
+ .Bool,
+ .Int,
+ .Float,
+ .ErrorSet,
+ .Enum,
+ .Frame,
+ .AnyFrame,
+ => break true,
-fn validateVarType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) !void {
- if (!ty.isValidVarType(false)) {
- return sema.mod.fail(&block.base, src, "variable of type '{}' must be const or comptime", .{ty});
+ .BoundFn,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .EnumLiteral,
+ .NoReturn,
+ .Type,
+ .Void,
+ .Undefined,
+ .Null,
+ => break false,
+
+ .Opaque => break is_extern,
+
+ .Optional => {
+ var buf: Type.Payload.ElemType = undefined;
+ const child_ty = ty.optionalChild(&buf);
+ return validateVarType(sema, block, src, child_ty, is_extern);
+ },
+ .Pointer, .Array, .Vector => ty = ty.elemType(),
+ .ErrorUnion => ty = ty.errorUnionPayload(),
+
+ .Fn => @panic("TODO fn validateVarType"),
+ .Struct, .Union => {
+ const resolved_ty = try sema.resolveTypeFields(block, src, ty);
+ break !resolved_ty.requiresComptime();
+ },
+ } else unreachable; // TODO should not need else unreachable
+ if (!ok) {
+ return sema.mod.fail(&block.base, src, "variable of type '{}' must be const or comptime", .{var_ty});
}
}
@@ -7990,6 +9613,7 @@ fn addSafetyCheck(
var fail_block: Scope.Block = .{
.parent = parent_block,
.sema = sema,
+ .wip_capture_scope = parent_block.wip_capture_scope,
.src_decl = parent_block.src_decl,
.instructions = .{},
.inlining = parent_block.inlining,
@@ -8067,7 +9691,10 @@ fn panicWithMsg(
const panic_fn = try sema.getBuiltin(block, src, "panic");
const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty);
- const ptr_stack_trace_ty = try Module.simplePtrType(arena, stack_trace_ty, true, .One);
+ const ptr_stack_trace_ty = try Type.ptr(arena, .{
+ .pointee_type = stack_trace_ty,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant), // TODO might need a place that is more dynamic
+ });
const null_stack_trace = try sema.addConstant(
try Module.optionalType(arena, ptr_stack_trace_ty),
Value.initTag(.null_value),
@@ -8083,7 +9710,7 @@ fn safetyPanic(
block: *Scope.Block,
src: LazySrcLoc,
panic_id: PanicId,
-) !Zir.Inst.Index {
+) CompileError!Zir.Inst.Index {
const msg = switch (panic_id) {
.unreach => "reached unreachable code",
.unwrap_null => "attempt to use null value",
@@ -8151,7 +9778,7 @@ fn fieldVal(
.Pointer => switch (object_ty.ptrSize()) {
.Slice => {
if (mem.eql(u8, field_name, "ptr")) {
- const buf = try arena.create(Type.Payload.ElemType);
+ const buf = try arena.create(Type.SlicePtrFieldTypeBuffer);
const result_ty = object_ty.slicePtrFieldType(buf);
if (try sema.resolveMaybeUndefVal(block, object_src, object)) |val| {
if (val.isUndef()) return sema.addConstUndef(result_ty);
@@ -8185,21 +9812,32 @@ fn fieldVal(
}
},
.One => {
- const elem_ty = object_ty.elemType();
- if (elem_ty.zigTypeTag() == .Array) {
- if (mem.eql(u8, field_name, "len")) {
- return sema.addConstant(
- Type.initTag(.comptime_int),
- try Value.Tag.int_u64.create(arena, elem_ty.arrayLen()),
- );
- } else {
- return mod.fail(
- &block.base,
- field_name_src,
- "no member named '{s}' in '{}'",
- .{ field_name, object_ty },
- );
- }
+ const ptr_child = object_ty.elemType();
+ switch (ptr_child.zigTypeTag()) {
+ .Array => {
+ if (mem.eql(u8, field_name, "len")) {
+ return sema.addConstant(
+ Type.initTag(.comptime_int),
+ try Value.Tag.int_u64.create(arena, ptr_child.arrayLen()),
+ );
+ } else {
+ return mod.fail(
+ &block.base,
+ field_name_src,
+ "no member named '{s}' in '{}'",
+ .{ field_name, object_ty },
+ );
+ }
+ },
+ .Struct => {
+ const struct_ptr_deref = try sema.analyzeLoad(block, src, object, object_src);
+ return sema.unionFieldVal(block, src, struct_ptr_deref, field_name, field_name_src, ptr_child);
+ },
+ .Union => {
+ const union_ptr_deref = try sema.analyzeLoad(block, src, object, object_src);
+ return sema.unionFieldVal(block, src, union_ptr_deref, field_name, field_name_src, ptr_child);
+ },
+ else => {},
}
},
.Many, .C => {},
@@ -8323,9 +9961,8 @@ fn fieldPtr(
);
}
},
- .Pointer => {
- const ptr_child = object_ty.elemType();
- if (ptr_child.isSlice()) {
+ .Pointer => switch (object_ty.ptrSize()) {
+ .Slice => {
// Here for the ptr and len fields what we need to do is the situation
// when a temporary has its address taken, e.g. `&a[c..d].len`.
// This value may be known at compile-time or runtime. In the former
@@ -8355,26 +9992,39 @@ fn fieldPtr(
.{ field_name, object_ty },
);
}
- } else switch (ptr_child.zigTypeTag()) {
- .Array => {
- if (mem.eql(u8, field_name, "len")) {
- var anon_decl = try block.startAnonDecl();
- defer anon_decl.deinit();
- return sema.analyzeDeclRef(try anon_decl.finish(
- Type.initTag(.comptime_int),
- try Value.Tag.int_u64.create(anon_decl.arena(), ptr_child.arrayLen()),
- ));
- } else {
- return mod.fail(
- &block.base,
- field_name_src,
- "no member named '{s}' in '{}'",
- .{ field_name, object_ty },
- );
- }
- },
- else => {},
- }
+ },
+ .One => {
+ const ptr_child = object_ty.elemType();
+ switch (ptr_child.zigTypeTag()) {
+ .Array => {
+ if (mem.eql(u8, field_name, "len")) {
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+ return sema.analyzeDeclRef(try anon_decl.finish(
+ Type.initTag(.comptime_int),
+ try Value.Tag.int_u64.create(anon_decl.arena(), ptr_child.arrayLen()),
+ ));
+ } else {
+ return mod.fail(
+ &block.base,
+ field_name_src,
+ "no member named '{s}' in '{}'",
+ .{ field_name, object_ty },
+ );
+ }
+ },
+ .Struct => {
+ const struct_ptr_deref = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src);
+ return sema.structFieldPtr(block, src, struct_ptr_deref, field_name, field_name_src, ptr_child);
+ },
+ .Union => {
+ const union_ptr_deref = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src);
+ return sema.unionFieldPtr(block, src, union_ptr_deref, field_name, field_name_src, ptr_child);
+ },
+ else => {},
+ }
+ },
+ .Many, .C => {},
},
.Type => {
_ = try sema.resolveConstValue(block, object_ptr_src, object_ptr);
@@ -8467,6 +10117,148 @@ fn fieldPtr(
return mod.fail(&block.base, src, "type '{}' does not support field access", .{object_ty});
}
+fn fieldCallBind(
+ sema: *Sema,
+ block: *Scope.Block,
+ src: LazySrcLoc,
+ raw_ptr: Air.Inst.Ref,
+ field_name: []const u8,
+ field_name_src: LazySrcLoc,
+) CompileError!Air.Inst.Ref {
+ // When editing this function, note that there is corresponding logic to be edited
+ // in `fieldVal`. This function takes a pointer and returns a pointer.
+
+ const mod = sema.mod;
+ const raw_ptr_src = src; // TODO better source location
+ const raw_ptr_ty = sema.typeOf(raw_ptr);
+ const inner_ty = if (raw_ptr_ty.zigTypeTag() == .Pointer and raw_ptr_ty.ptrSize() == .One)
+ raw_ptr_ty.childType()
+ else
+ return mod.fail(&block.base, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty});
+
+ // Optionally dereference a second pointer to get the concrete type.
+ const is_double_ptr = inner_ty.zigTypeTag() == .Pointer and inner_ty.ptrSize() == .One;
+ const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty;
+ const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty;
+ const object_ptr = if (is_double_ptr)
+ try sema.analyzeLoad(block, src, raw_ptr, src)
+ else
+ raw_ptr;
+
+ const arena = sema.arena;
+ find_field: {
+ switch (concrete_ty.zigTypeTag()) {
+ .Struct => {
+ const struct_ty = try sema.resolveTypeFields(block, src, concrete_ty);
+ const struct_obj = struct_ty.castTag(.@"struct").?.data;
+
+ const field_index = struct_obj.fields.getIndex(field_name) orelse
+ break :find_field;
+ const field = struct_obj.fields.values()[field_index];
+
+ const ptr_field_ty = try Type.ptr(arena, .{
+ .pointee_type = field.ty,
+ .mutable = ptr_ty.ptrIsMutable(),
+ .@"addrspace" = ptr_ty.ptrAddressSpace(),
+ });
+
+ if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| {
+ const pointer = try sema.addConstant(
+ ptr_field_ty,
+ try Value.Tag.field_ptr.create(arena, .{
+ .container_ptr = struct_ptr_val,
+ .field_index = field_index,
+ }),
+ );
+ return sema.analyzeLoad(block, src, pointer, src);
+ }
+
+ try sema.requireRuntimeBlock(block, src);
+ const ptr_inst = ptr_inst: {
+ const tag: Air.Inst.Tag = switch (field_index) {
+ 0 => .struct_field_ptr_index_0,
+ 1 => .struct_field_ptr_index_1,
+ 2 => .struct_field_ptr_index_2,
+ 3 => .struct_field_ptr_index_3,
+ else => {
+ break :ptr_inst try block.addInst(.{
+ .tag = .struct_field_ptr,
+ .data = .{ .ty_pl = .{
+ .ty = try sema.addType(ptr_field_ty),
+ .payload = try sema.addExtra(Air.StructField{
+ .struct_operand = object_ptr,
+ .field_index = @intCast(u32, field_index),
+ }),
+ } },
+ });
+ },
+ };
+ break :ptr_inst try block.addInst(.{
+ .tag = tag,
+ .data = .{ .ty_op = .{
+ .ty = try sema.addType(ptr_field_ty),
+ .operand = object_ptr,
+ } },
+ });
+ };
+ return sema.analyzeLoad(block, src, ptr_inst, src);
+ },
+ .Union => return sema.mod.fail(&block.base, src, "TODO implement field calls on unions", .{}),
+ .Type => {
+ const namespace = try sema.analyzeLoad(block, src, object_ptr, src);
+ return sema.fieldVal(block, src, namespace, field_name, field_name_src);
+ },
+ else => {},
+ }
+ }
+
+ // If we get here, we need to look for a decl in the struct type instead.
+ switch (concrete_ty.zigTypeTag()) {
+ .Struct, .Opaque, .Union, .Enum => {
+ if (concrete_ty.getNamespace()) |namespace| {
+ if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
+ const decl_val = try sema.analyzeLoad(block, src, inst, src);
+ const decl_type = sema.typeOf(decl_val);
+ if (decl_type.zigTypeTag() == .Fn and
+ decl_type.fnParamLen() >= 1)
+ {
+ const first_param_type = decl_type.fnParamType(0);
+ const first_param_tag = first_param_type.tag();
+ // zig fmt: off
+ if (first_param_tag == .var_args_param or
+ first_param_tag == .generic_poison or (
+ first_param_type.zigTypeTag() == .Pointer and
+ first_param_type.ptrSize() == .One and
+ first_param_type.childType().eql(concrete_ty)))
+ {
+ // zig fmt: on
+ // TODO: bound fn calls on rvalues should probably
+ // generate a by-value argument somehow.
+ const ty = Type.Tag.bound_fn.init();
+ const value = try Value.Tag.bound_fn.create(arena, .{
+ .func_inst = decl_val,
+ .arg0_inst = object_ptr,
+ });
+ return sema.addConstant(ty, value);
+ } else if (first_param_type.eql(concrete_ty)) {
+ var deref = try sema.analyzeLoad(block, src, object_ptr, src);
+ const ty = Type.Tag.bound_fn.init();
+ const value = try Value.Tag.bound_fn.create(arena, .{
+ .func_inst = decl_val,
+ .arg0_inst = deref,
+ });
+ return sema.addConstant(ty, value);
+ }
+ }
+ }
+ }
+ },
+ else => {},
+ }
+
+ return mod.fail(&block.base, src, "type '{}' has no field or member function named '{s}'", .{ concrete_ty, field_name });
+}
+
fn namespaceLookup(
sema: *Sema,
block: *Scope.Block,
@@ -8516,13 +10308,19 @@ fn structFieldPtr(
const arena = sema.arena;
assert(unresolved_struct_ty.zigTypeTag() == .Struct);
+ const struct_ptr_ty = sema.typeOf(struct_ptr);
const struct_ty = try sema.resolveTypeFields(block, src, unresolved_struct_ty);
const struct_obj = struct_ty.castTag(.@"struct").?.data;
- const field_index = struct_obj.fields.getIndex(field_name) orelse
+ const field_index_big = struct_obj.fields.getIndex(field_name) orelse
return sema.failWithBadFieldAccess(block, struct_obj, field_name_src, field_name);
+ const field_index = @intCast(u32, field_index_big);
const field = struct_obj.fields.values()[field_index];
- const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One);
+ const ptr_field_ty = try Type.ptr(arena, .{
+ .pointee_type = field.ty,
+ .mutable = struct_ptr_ty.ptrIsMutable(),
+ .@"addrspace" = struct_ptr_ty.ptrAddressSpace(),
+ });
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
return sema.addConstant(
@@ -8535,31 +10333,7 @@ fn structFieldPtr(
}
try sema.requireRuntimeBlock(block, src);
- const tag: Air.Inst.Tag = switch (field_index) {
- 0 => .struct_field_ptr_index_0,
- 1 => .struct_field_ptr_index_1,
- 2 => .struct_field_ptr_index_2,
- 3 => .struct_field_ptr_index_3,
- else => {
- return block.addInst(.{
- .tag = .struct_field_ptr,
- .data = .{ .ty_pl = .{
- .ty = try sema.addType(ptr_field_ty),
- .payload = try sema.addExtra(Air.StructField{
- .struct_operand = struct_ptr,
- .field_index = @intCast(u32, field_index),
- }),
- } },
- });
- },
- };
- return block.addInst(.{
- .tag = tag,
- .data = .{ .ty_op = .{
- .ty = try sema.addType(ptr_field_ty),
- .operand = struct_ptr,
- } },
- });
+ return block.addStructFieldPtr(struct_ptr, field_index, ptr_field_ty);
}
fn structFieldVal(
@@ -8609,18 +10383,23 @@ fn unionFieldPtr(
field_name_src: LazySrcLoc,
unresolved_union_ty: Type,
) CompileError!Air.Inst.Ref {
- const mod = sema.mod;
const arena = sema.arena;
assert(unresolved_union_ty.zigTypeTag() == .Union);
+ const union_ptr_ty = sema.typeOf(union_ptr);
const union_ty = try sema.resolveTypeFields(block, src, unresolved_union_ty);
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
- const field_index = union_obj.fields.getIndex(field_name) orelse
+ const field_index_big = union_obj.fields.getIndex(field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name);
+ const field_index = @intCast(u32, field_index_big);
const field = union_obj.fields.values()[field_index];
- const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One);
+ const ptr_field_ty = try Type.ptr(arena, .{
+ .pointee_type = field.ty,
+ .mutable = union_ptr_ty.ptrIsMutable(),
+ .@"addrspace" = union_ptr_ty.ptrAddressSpace(),
+ });
if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| {
// TODO detect inactive union field and emit compile error
@@ -8634,7 +10413,7 @@ fn unionFieldPtr(
}
try sema.requireRuntimeBlock(block, src);
- return mod.fail(&block.base, src, "TODO implement runtime union field access", .{});
+ return block.addStructFieldPtr(union_ptr, field_index, ptr_field_ty);
}
fn unionFieldVal(
@@ -8796,10 +10575,11 @@ fn elemPtrArray(
) CompileError!Air.Inst.Ref {
const array_ptr_ty = sema.typeOf(array_ptr);
const pointee_type = array_ptr_ty.elemType().elemType();
- const result_ty = if (array_ptr_ty.ptrIsMutable())
- try Type.Tag.single_mut_pointer.create(sema.arena, pointee_type)
- else
- try Type.Tag.single_const_pointer.create(sema.arena, pointee_type);
+ const result_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = pointee_type,
+ .mutable = array_ptr_ty.ptrIsMutable(),
+ .@"addrspace" = array_ptr_ty.ptrAddressSpace(),
+ });
if (try sema.resolveDefinedValue(block, src, array_ptr)) |array_ptr_val| {
if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| {
@@ -8845,14 +10625,14 @@ fn coerce(
if (dest_type.eql(inst_ty))
return inst;
- const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty);
+ const mod = sema.mod;
+ const arena = sema.arena;
+
+ const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty, false, mod.getTarget());
if (in_memory_result == .ok) {
return sema.bitcast(block, dest_type, inst, inst_src);
}
- const mod = sema.mod;
- const arena = sema.arena;
-
// undefined to anything
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
if (val.isUndef() or inst_ty.zigTypeTag() == .Undefined) {
@@ -8887,11 +10667,13 @@ fn coerce(
const array_type = inst_ty.elemType();
if (array_type.zigTypeTag() != .Array) break :src_array_ptr;
const array_elem_type = array_type.elemType();
- if (inst_ty.isConstPtr() and !dest_type.isConstPtr()) break :src_array_ptr;
+ const dest_is_mut = !dest_type.isConstPtr();
+ if (inst_ty.isConstPtr() and dest_is_mut) break :src_array_ptr;
if (inst_ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr;
+ if (inst_ty.ptrAddressSpace() != dest_type.ptrAddressSpace()) break :src_array_ptr;
const dst_elem_type = dest_type.elemType();
- switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type)) {
+ switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut, mod.getTarget())) {
.ok => {},
.no_match => break :src_array_ptr,
}
@@ -8932,7 +10714,7 @@ fn coerce(
const src_info = inst_ty.intInfo(target);
if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or
// small enough unsigned ints can get casted to large enough signed ints
- (src_info.signedness == .signed and dst_info.signedness == .unsigned and dst_info.bits > src_info.bits))
+ (dst_info.signedness == .signed and dst_info.bits > src_info.bits))
{
try sema.requireRuntimeBlock(block, inst_src);
return block.addTyOp(.intcast, dest_type, inst);
@@ -8948,13 +10730,13 @@ fn coerce(
const dst_bits = dest_type.floatBits(target);
if (dst_bits >= src_bits) {
try sema.requireRuntimeBlock(block, inst_src);
- return block.addTyOp(.floatcast, dest_type, inst);
+ return block.addTyOp(.fpext, dest_type, inst);
}
}
},
- .Enum => {
- // enum literal to enum
- if (inst_ty.zigTypeTag() == .EnumLiteral) {
+ .Enum => switch (inst_ty.zigTypeTag()) {
+ .EnumLiteral => {
+ // enum literal to enum
const val = try sema.resolveConstValue(block, inst_src, inst);
const bytes = val.castTag(.enum_literal).?.data;
const resolved_dest_type = try sema.resolveTypeFields(block, inst_src, dest_type);
@@ -8981,7 +10763,15 @@ fn coerce(
resolved_dest_type,
try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)),
);
- }
+ },
+ .Union => blk: {
+ // union to its own tag type
+ const union_tag_ty = inst_ty.unionTagType() orelse break :blk;
+ if (union_tag_ty.eql(dest_type)) {
+ return sema.unionToTag(block, dest_type, inst, inst_src);
+ }
+ },
+ else => {},
},
.ErrorUnion => {
// T to E!T or E to E!T
@@ -8998,10 +10788,92 @@ const InMemoryCoercionResult = enum {
no_match,
};
-fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult {
+/// If pointers have the same representation in runtime memory, a bitcast AIR instruction
+/// may be used for the coercion.
+/// * `const` attribute can be gained
+/// * `volatile` attribute can be gained
+/// * `allowzero` attribute can be gained (whether from explicit attribute, C pointer, or optional pointer) but only if !dest_is_mut
+/// * alignment can be decreased
+/// * bit offset attributes must match exactly
+/// * `*`/`[*]` must match exactly, but `[*c]` matches either one
+/// * sentinel-terminated pointers can coerce into `[*]`
+/// TODO improve this function to report recursive compile errors like it does in stage1.
+/// look at the function types_match_const_cast_only
+fn coerceInMemoryAllowed(dest_type: Type, src_type: Type, dest_is_mut: bool, target: std.Target) InMemoryCoercionResult {
if (dest_type.eql(src_type))
return .ok;
+ if (dest_type.zigTypeTag() == .Pointer and
+ src_type.zigTypeTag() == .Pointer)
+ {
+ const dest_info = dest_type.ptrInfo().data;
+ const src_info = src_type.ptrInfo().data;
+
+ const child = coerceInMemoryAllowed(dest_info.pointee_type, src_info.pointee_type, dest_info.mutable, target);
+ if (child == .no_match) {
+ return child;
+ }
+
+ if (dest_info.@"addrspace" != src_info.@"addrspace") {
+ return .no_match;
+ }
+
+ const ok_sent = dest_info.sentinel == null or src_info.size == .C or
+ (src_info.sentinel != null and
+ dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type));
+ if (!ok_sent) {
+ return .no_match;
+ }
+
+ const ok_ptr_size = src_info.size == dest_info.size or
+ src_info.size == .C or dest_info.size == .C;
+ if (!ok_ptr_size) {
+ return .no_match;
+ }
+
+ const ok_cv_qualifiers =
+ (src_info.mutable or !dest_info.mutable) and
+ (!src_info.@"volatile" or dest_info.@"volatile");
+
+ if (!ok_cv_qualifiers) {
+ return .no_match;
+ }
+
+ const ok_allows_zero = (dest_info.@"allowzero" and
+ (src_info.@"allowzero" or !dest_is_mut)) or
+ (!dest_info.@"allowzero" and !src_info.@"allowzero");
+ if (!ok_allows_zero) {
+ return .no_match;
+ }
+
+ if (dest_type.hasCodeGenBits() != src_type.hasCodeGenBits()) {
+ return .no_match;
+ }
+
+ if (src_info.host_size != dest_info.host_size or
+ src_info.bit_offset != dest_info.bit_offset)
+ {
+ return .no_match;
+ }
+
+ // If both pointers have alignment 0, it means they both want ABI alignment.
+ // In this case, if they share the same child type, no need to resolve
+ // pointee type alignment. Otherwise both pointee types must have their alignment
+ // resolved and we compare the alignment numerically.
+ if (src_info.@"align" != 0 or dest_info.@"align" != 0 or
+ !dest_info.pointee_type.eql(src_info.pointee_type))
+ {
+ const src_align = src_type.ptrAlignment(target);
+ const dest_align = dest_type.ptrAlignment(target);
+
+ if (dest_align > src_align) {
+ return .no_match;
+ }
+ }
+
+ return .ok;
+ }
+
// TODO: implement more of this function
return .no_match;
@@ -9021,33 +10893,56 @@ fn coerceNum(
const target = sema.mod.getTarget();
- if (dst_zig_tag == .ComptimeInt or dst_zig_tag == .Int) {
- if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
- if (val.floatHasFraction()) {
- return sema.mod.fail(&block.base, inst_src, "fractional component prevents float value {} from being casted to type '{}'", .{ val, inst_ty });
- }
- return sema.mod.fail(&block.base, inst_src, "TODO float to int", .{});
- } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
- if (!val.intFitsInType(dest_type, target)) {
- return sema.mod.fail(&block.base, inst_src, "type {} cannot represent integer value {}", .{ dest_type, val });
- }
- return try sema.addConstant(dest_type, val);
- }
- } else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) {
- if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
- const res = val.floatCast(sema.arena, dest_type) catch |err| switch (err) {
- error.Overflow => return sema.mod.fail(
- &block.base,
- inst_src,
- "cast of value {} to type '{}' loses information",
- .{ val, dest_type },
- ),
- error.OutOfMemory => return error.OutOfMemory,
- };
- return try sema.addConstant(dest_type, res);
- } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
- return sema.mod.fail(&block.base, inst_src, "TODO int to float", .{});
- }
+ switch (dst_zig_tag) {
+ .ComptimeInt, .Int => switch (src_zig_tag) {
+ .Float, .ComptimeFloat => {
+ if (val.floatHasFraction()) {
+ return sema.mod.fail(&block.base, inst_src, "fractional component prevents float value {} from coercion to type '{}'", .{ val, dest_type });
+ }
+ return sema.mod.fail(&block.base, inst_src, "TODO float to int", .{});
+ },
+ .Int, .ComptimeInt => {
+ if (!val.intFitsInType(dest_type, target)) {
+ return sema.mod.fail(&block.base, inst_src, "type {} cannot represent integer value {}", .{ dest_type, val });
+ }
+ return try sema.addConstant(dest_type, val);
+ },
+ else => {},
+ },
+ .ComptimeFloat, .Float => switch (src_zig_tag) {
+ .ComptimeFloat => {
+ const result_val = try val.floatCast(sema.arena, dest_type);
+ return try sema.addConstant(dest_type, result_val);
+ },
+ .Float => {
+ const result_val = try val.floatCast(sema.arena, dest_type);
+ if (!val.eql(result_val, dest_type)) {
+ return sema.mod.fail(
+ &block.base,
+ inst_src,
+ "type {} cannot represent float value {}",
+ .{ dest_type, val },
+ );
+ }
+ return try sema.addConstant(dest_type, result_val);
+ },
+ .Int, .ComptimeInt => {
+ const result_val = try val.intToFloat(sema.arena, dest_type, target);
+ // TODO implement this compile error
+ //const int_again_val = try result_val.floatToInt(sema.arena, inst_ty);
+ //if (!int_again_val.eql(val, inst_ty)) {
+ // return sema.mod.fail(
+ // &block.base,
+ // inst_src,
+ // "type {} cannot represent integer value {}",
+ // .{ dest_type, val },
+ // );
+ //}
+ return try sema.addConstant(dest_type, result_val);
+ },
+ else => {},
+ },
+ else => {},
}
return null;
}
@@ -9067,66 +10962,97 @@ fn coerceVarArgParam(
return inst;
}
+// TODO migrate callsites to use storePtr2 instead.
fn storePtr(
sema: *Sema,
block: *Scope.Block,
src: LazySrcLoc,
ptr: Air.Inst.Ref,
- uncasted_value: Air.Inst.Ref,
+ uncasted_operand: Air.Inst.Ref,
+) !void {
+ return sema.storePtr2(block, src, ptr, src, uncasted_operand, src, .store);
+}
+
+fn storePtr2(
+ sema: *Sema,
+ block: *Scope.Block,
+ src: LazySrcLoc,
+ ptr: Air.Inst.Ref,
+ ptr_src: LazySrcLoc,
+ uncasted_operand: Air.Inst.Ref,
+ operand_src: LazySrcLoc,
+ air_tag: Air.Inst.Tag,
) !void {
const ptr_ty = sema.typeOf(ptr);
if (ptr_ty.isConstPtr())
return sema.mod.fail(&block.base, src, "cannot assign to constant", .{});
const elem_ty = ptr_ty.elemType();
- const value = try sema.coerce(block, elem_ty, uncasted_value, src);
+ const operand = try sema.coerce(block, elem_ty, uncasted_operand, operand_src);
if ((try sema.typeHasOnePossibleValue(block, src, elem_ty)) != null)
return;
- if (try sema.resolveDefinedValue(block, src, ptr)) |ptr_val| {
- if (ptr_val.castTag(.decl_ref_mut)) |decl_ref_mut| {
- const const_val = (try sema.resolveMaybeUndefVal(block, src, value)) orelse
- return sema.mod.fail(&block.base, src, "cannot store runtime value in compile time variable", .{});
-
- if (decl_ref_mut.data.runtime_index < block.runtime_index) {
- if (block.runtime_cond) |cond_src| {
- const msg = msg: {
- const msg = try sema.mod.errMsg(&block.base, src, "store to comptime variable depends on runtime condition", .{});
- errdefer msg.destroy(sema.gpa);
- try sema.mod.errNote(&block.base, cond_src, msg, "runtime condition here", .{});
- break :msg msg;
- };
- return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
- }
- if (block.runtime_loop) |loop_src| {
- const msg = msg: {
- const msg = try sema.mod.errMsg(&block.base, src, "cannot store to comptime variable in non-inline loop", .{});
- errdefer msg.destroy(sema.gpa);
- try sema.mod.errNote(&block.base, loop_src, msg, "non-inline loop here", .{});
- break :msg msg;
- };
- return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
- }
- unreachable;
- }
- var new_arena = std.heap.ArenaAllocator.init(sema.gpa);
- errdefer new_arena.deinit();
- const new_ty = try elem_ty.copy(&new_arena.allocator);
- const new_val = try const_val.copy(&new_arena.allocator);
- const decl = decl_ref_mut.data.decl;
- var old_arena = decl.value_arena.?.promote(sema.gpa);
- decl.value_arena = null;
- try decl.finalizeNewArena(&new_arena);
- decl.ty = new_ty;
- decl.val = new_val;
- old_arena.deinit();
+ const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
+ const operand_val = (try sema.resolveMaybeUndefVal(block, operand_src, operand)) orelse
+ return sema.mod.fail(&block.base, src, "cannot store runtime value in compile time variable", .{});
+ if (ptr_val.tag() == .decl_ref_mut) {
+ try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty);
return;
}
- }
+ break :rs operand_src;
+ } else ptr_src;
+
// TODO handle if the element type requires comptime
- try sema.requireRuntimeBlock(block, src);
- _ = try block.addBinOp(.store, ptr, value);
+ try sema.requireRuntimeBlock(block, runtime_src);
+ _ = try block.addBinOp(air_tag, ptr, operand);
+}
+
+/// Call when you have Value objects rather than Air instructions, and you want to
+/// assert the store must be done at comptime.
+fn storePtrVal(
+ sema: *Sema,
+ block: *Scope.Block,
+ src: LazySrcLoc,
+ ptr_val: Value,
+ operand_val: Value,
+ operand_ty: Type,
+) !void {
+ if (ptr_val.castTag(.decl_ref_mut)) |decl_ref_mut| {
+ if (decl_ref_mut.data.runtime_index < block.runtime_index) {
+ if (block.runtime_cond) |cond_src| {
+ const msg = msg: {
+ const msg = try sema.mod.errMsg(&block.base, src, "store to comptime variable depends on runtime condition", .{});
+ errdefer msg.destroy(sema.gpa);
+ try sema.mod.errNote(&block.base, cond_src, msg, "runtime condition here", .{});
+ break :msg msg;
+ };
+ return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
+ }
+ if (block.runtime_loop) |loop_src| {
+ const msg = msg: {
+ const msg = try sema.mod.errMsg(&block.base, src, "cannot store to comptime variable in non-inline loop", .{});
+ errdefer msg.destroy(sema.gpa);
+ try sema.mod.errNote(&block.base, loop_src, msg, "non-inline loop here", .{});
+ break :msg msg;
+ };
+ return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
+ }
+ unreachable;
+ }
+ var new_arena = std.heap.ArenaAllocator.init(sema.gpa);
+ errdefer new_arena.deinit();
+ const new_ty = try operand_ty.copy(&new_arena.allocator);
+ const new_val = try operand_val.copy(&new_arena.allocator);
+ const decl = decl_ref_mut.data.decl;
+ var old_arena = decl.value_arena.?.promote(sema.gpa);
+ decl.value_arena = null;
+ try decl.finalizeNewArena(&new_arena);
+ decl.ty = new_ty;
+ decl.val = new_val;
+ old_arena.deinit();
+ return;
+ }
}
fn bitcast(
@@ -9171,7 +11097,8 @@ fn coerceArrayPtrToMany(
// The comptime Value representation is compatible with both types.
return sema.addConstant(dest_type, val);
}
- return sema.mod.fail(&block.base, inst_src, "TODO implement coerceArrayPtrToMany runtime instruction", .{});
+ try sema.requireRuntimeBlock(block, inst_src);
+ return sema.bitcast(block, dest_type, inst, inst_src);
}
fn analyzeDeclVal(
@@ -9211,11 +11138,19 @@ fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref {
const decl_tv = try decl.typedValue();
if (decl_tv.val.castTag(.variable)) |payload| {
const variable = payload.data;
- const ty = try Module.simplePtrType(sema.arena, decl_tv.ty, variable.is_mutable, .One);
+ const ty = try Type.ptr(sema.arena, .{
+ .pointee_type = decl_tv.ty,
+ .mutable = variable.is_mutable,
+ .@"addrspace" = decl.@"addrspace",
+ });
return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl));
}
return sema.addConstant(
- try Module.simplePtrType(sema.arena, decl_tv.ty, false, .One),
+ try Type.ptr(sema.arena, .{
+ .pointee_type = decl_tv.ty,
+ .mutable = false,
+ .@"addrspace" = decl.@"addrspace",
+ }),
try Value.Tag.decl_ref.create(sema.arena, decl),
);
}
@@ -9232,14 +11167,22 @@ fn analyzeRef(
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
- operand_ty,
+ try operand_ty.copy(anon_decl.arena()),
try val.copy(anon_decl.arena()),
));
}
try sema.requireRuntimeBlock(block, src);
- const ptr_type = try Module.simplePtrType(sema.arena, operand_ty, false, .One);
- const mut_ptr_type = try Module.simplePtrType(sema.arena, operand_ty, true, .One);
+ const address_space = target_util.defaultAddressSpace(sema.mod.getTarget(), .local);
+ const ptr_type = try Type.ptr(sema.arena, .{
+ .pointee_type = operand_ty,
+ .mutable = false,
+ .@"addrspace" = address_space,
+ });
+ const mut_ptr_type = try Type.ptr(sema.arena, .{
+ .pointee_type = operand_ty,
+ .@"addrspace" = address_space,
+ });
const alloc = try block.addTy(.alloc, mut_ptr_type);
try sema.storePtr(block, src, alloc, operand);
@@ -9395,18 +11338,16 @@ fn analyzeSlice(
}
}
}
- const return_type = try Module.ptrType(
- sema.arena,
- return_elem_type,
- if (end_opt == .none) slice_sentinel else null,
- 0, // TODO alignment
- 0,
- 0,
- !ptr_child.isConstPtr(),
- ptr_child.isAllowzeroPtr(),
- ptr_child.isVolatilePtr(),
- return_ptr_size,
- );
+ const return_type = try Type.ptr(sema.arena, .{
+ .pointee_type = return_elem_type,
+ .sentinel = if (end_opt == .none) slice_sentinel else null,
+ .@"align" = 0, // TODO alignment
+ .@"addrspace" = if (ptr_child.zigTypeTag() == .Pointer) ptr_child.ptrAddressSpace() else .generic,
+ .mutable = !ptr_child.isConstPtr(),
+ .@"allowzero" = ptr_child.isAllowzeroPtr(),
+ .@"volatile" = ptr_child.isVolatilePtr(),
+ .size = return_ptr_size,
+ });
_ = return_type;
return sema.mod.fail(&block.base, src, "TODO implement analysis of slice", .{});
@@ -9508,11 +11449,11 @@ fn cmpNumeric(
const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val|
lhs_val.compareWithZero(.lt)
else
- (lhs_ty.isFloat() or lhs_ty.isSignedInt());
+ (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt());
const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val|
rhs_val.compareWithZero(.lt)
else
- (rhs_ty.isFloat() or rhs_ty.isSignedInt());
+ (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt());
const dest_int_is_signed = lhs_is_signed or rhs_is_signed;
var dest_float_type: ?Type = null;
@@ -9692,6 +11633,20 @@ fn wrapErrorUnion(
}
}
+fn unionToTag(
+ sema: *Sema,
+ block: *Scope.Block,
+ dest_type: Type,
+ un: Air.Inst.Ref,
+ un_src: LazySrcLoc,
+) !Air.Inst.Ref {
+ if (try sema.resolveMaybeUndefVal(block, un_src, un)) |un_val| {
+ return sema.addConstant(dest_type, un_val.unionTag());
+ }
+ try sema.requireRuntimeBlock(block, un_src);
+ return block.addTyOp(.get_union_tag, dest_type, un);
+}
+
fn resolvePeerTypes(
sema: *Sema,
block: *Scope.Block,
@@ -9738,7 +11693,7 @@ fn resolvePeerTypes(
}
continue;
}
- if (chosen_ty.isFloat() and candidate_ty.isFloat()) {
+ if (chosen_ty.isRuntimeFloat() and candidate_ty.isRuntimeFloat()) {
if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) {
chosen = candidate;
chosen_i = candidate_i + 1;
@@ -9756,13 +11711,13 @@ fn resolvePeerTypes(
continue;
}
- if (chosen_ty.zigTypeTag() == .ComptimeFloat and candidate_ty.isFloat()) {
+ if (chosen_ty.zigTypeTag() == .ComptimeFloat and candidate_ty.isRuntimeFloat()) {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
}
- if (chosen_ty.isFloat() and candidate_ty.zigTypeTag() == .ComptimeFloat) {
+ if (chosen_ty.isRuntimeFloat() and candidate_ty.zigTypeTag() == .ComptimeFloat) {
continue;
}
@@ -9813,9 +11768,6 @@ pub fn resolveTypeLayout(
ty: Type,
) CompileError!void {
switch (ty.zigTypeTag()) {
- .Pointer => {
- return sema.resolveTypeLayout(block, src, ty.elemType());
- },
.Struct => {
const resolved_ty = try sema.resolveTypeFields(block, src, ty);
const struct_obj = resolved_ty.castTag(.@"struct").?.data;
@@ -9863,6 +11815,10 @@ pub fn resolveDeclFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty:
sema.namespace = &struct_obj.namespace;
defer sema.namespace = prev_namespace;
+ const old_src = block.src_decl;
+ defer block.src_decl = old_src;
+ block.src_decl = struct_obj.owner_decl;
+
struct_obj.status = .field_types_wip;
try sema.analyzeStructFields(block, struct_obj);
struct_obj.status = .have_field_types;
@@ -9881,6 +11837,10 @@ pub fn resolveDeclFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty:
sema.namespace = &union_obj.namespace;
defer sema.namespace = prev_namespace;
+ const old_src = block.src_decl;
+ defer block.src_decl = old_src;
+ block.src_decl = union_obj.owner_decl;
+
union_obj.status = .field_types_wip;
try sema.analyzeUnionFields(block, union_obj);
union_obj.status = .have_field_types;
@@ -9907,6 +11867,7 @@ fn resolveTypeFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type
.atomic_order => return sema.resolveBuiltinTypeFields(block, src, "AtomicOrder"),
.atomic_rmw_op => return sema.resolveBuiltinTypeFields(block, src, "AtomicRmwOp"),
.calling_convention => return sema.resolveBuiltinTypeFields(block, src, "CallingConvention"),
+ .address_space => return sema.resolveBuiltinTypeFields(block, src, "AddressSpace"),
.float_mode => return sema.resolveBuiltinTypeFields(block, src, "FloatMode"),
.reduce_op => return sema.resolveBuiltinTypeFields(block, src, "ReduceOp"),
.call_options => return sema.resolveBuiltinTypeFields(block, src, "CallOptions"),
@@ -10081,9 +12042,11 @@ fn analyzeUnionFields(
const src: LazySrcLoc = .{ .node_offset = union_obj.node_offset };
extra_index += @boolToInt(small.has_src_node);
- if (small.has_tag_type) {
+ const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: {
+ const ty_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
extra_index += 1;
- }
+ break :blk ty_ref;
+ } else .none;
const body_len = if (small.has_body_len) blk: {
const body_len = zir.extra[extra_index];
@@ -10118,11 +12081,33 @@ fn analyzeUnionFields(
var decl_arena = union_obj.owner_decl.value_arena.?.promote(gpa);
defer union_obj.owner_decl.value_arena.?.* = decl_arena.state;
- try union_obj.fields.ensureCapacity(&decl_arena.allocator, fields_len);
+ try union_obj.fields.ensureTotalCapacity(&decl_arena.allocator, fields_len);
if (body.len != 0) {
_ = try sema.analyzeBody(block, body);
}
+ var int_tag_ty: Type = undefined;
+ var enum_field_names: ?*Module.EnumNumbered.NameMap = null;
+ var enum_value_map: ?*Module.EnumNumbered.ValueMap = null;
+ if (tag_type_ref != .none) {
+ const provided_ty = try sema.resolveType(block, src, tag_type_ref);
+ if (small.auto_enum_tag) {
+ // The provided type is an integer type and we must construct the enum tag type here.
+ int_tag_ty = provided_ty;
+ union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(block, fields_len, provided_ty);
+ enum_field_names = &union_obj.tag_ty.castTag(.enum_numbered).?.data.fields;
+ enum_value_map = &union_obj.tag_ty.castTag(.enum_numbered).?.data.values;
+ } else {
+ // The provided type is the enum tag type.
+ union_obj.tag_ty = provided_ty;
+ }
+ } else {
+ // If auto_enum_tag is false, this is an untagged union. However, for semantic analysis
+ // purposes, we still auto-generate an enum tag type the same way. That the union is
+ // untagged is represented by the Type tag (union vs union_tagged).
+ union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, fields_len);
+ enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields;
+ }
const bits_per_field = 4;
const fields_per_u32 = 32 / bits_per_field;
@@ -10161,12 +12146,25 @@ fn analyzeUnionFields(
break :blk align_ref;
} else .none;
- if (has_tag) {
+ const tag_ref: Zir.Inst.Ref = if (has_tag) blk: {
+ const tag_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
extra_index += 1;
+ break :blk tag_ref;
+ } else .none;
+
+ if (enum_value_map) |map| {
+ const tag_src = src; // TODO better source location
+ const coerced = try sema.coerce(block, int_tag_ty, tag_ref, tag_src);
+ const val = try sema.resolveConstValue(block, tag_src, coerced);
+ map.putAssumeCapacityContext(val, {}, .{ .ty = int_tag_ty });
}
// This string needs to outlive the ZIR code.
const field_name = try decl_arena.allocator.dupe(u8, field_name_zir);
+ if (enum_field_names) |set| {
+ set.putAssumeCapacity(field_name, {});
+ }
+
const field_ty: Type = if (field_type_ref == .none)
Type.initTag(.void)
else
@@ -10188,10 +12186,84 @@ fn analyzeUnionFields(
// But only resolve the source location if we need to emit a compile error.
const abi_align_val = (try sema.resolveInstConst(block, src, align_ref)).val;
gop.value_ptr.abi_align = try abi_align_val.copy(&decl_arena.allocator);
+ } else {
+ gop.value_ptr.abi_align = Value.initTag(.abi_align_default);
}
}
+}
- // TODO resolve the union tag_type_ref
+fn generateUnionTagTypeNumbered(
+ sema: *Sema,
+ block: *Scope.Block,
+ fields_len: u32,
+ int_ty: Type,
+) !Type {
+ const mod = sema.mod;
+
+ var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
+ errdefer new_decl_arena.deinit();
+
+ const enum_obj = try new_decl_arena.allocator.create(Module.EnumNumbered);
+ const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumNumbered);
+ enum_ty_payload.* = .{
+ .base = .{ .tag = .enum_numbered },
+ .data = enum_obj,
+ };
+ const enum_ty = Type.initPayload(&enum_ty_payload.base);
+ const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty);
+ // TODO better type name
+ const new_decl = try mod.createAnonymousDecl(&block.base, .{
+ .ty = Type.initTag(.type),
+ .val = enum_val,
+ });
+ new_decl.owns_tv = true;
+ errdefer sema.mod.deleteAnonDecl(&block.base, new_decl);
+
+ enum_obj.* = .{
+ .owner_decl = new_decl,
+ .tag_ty = int_ty,
+ .fields = .{},
+ .values = .{},
+ .node_offset = 0,
+ };
+ // Here we pre-allocate the maps using the decl arena.
+ try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len);
+ try enum_obj.values.ensureTotalCapacityContext(&new_decl_arena.allocator, fields_len, .{ .ty = int_ty });
+ try new_decl.finalizeNewArena(&new_decl_arena);
+ return enum_ty;
+}
+
+fn generateUnionTagTypeSimple(sema: *Sema, block: *Scope.Block, fields_len: u32) !Type {
+ const mod = sema.mod;
+
+ var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
+ errdefer new_decl_arena.deinit();
+
+ const enum_obj = try new_decl_arena.allocator.create(Module.EnumSimple);
+ const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumSimple);
+ enum_ty_payload.* = .{
+ .base = .{ .tag = .enum_simple },
+ .data = enum_obj,
+ };
+ const enum_ty = Type.initPayload(&enum_ty_payload.base);
+ const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty);
+ // TODO better type name
+ const new_decl = try mod.createAnonymousDecl(&block.base, .{
+ .ty = Type.initTag(.type),
+ .val = enum_val,
+ });
+ new_decl.owns_tv = true;
+ errdefer sema.mod.deleteAnonDecl(&block.base, new_decl);
+
+ enum_obj.* = .{
+ .owner_decl = new_decl,
+ .fields = .{},
+ .node_offset = 0,
+ };
+ // Here we pre-allocate the maps using the decl arena.
+ try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len);
+ try new_decl.finalizeNewArena(&new_decl_arena);
+ return enum_ty;
}
fn getBuiltin(
@@ -10231,7 +12303,7 @@ fn getBuiltinType(
}
/// There is another implementation of this in `Type.onePossibleValue`. This one
-/// in `Sema` is for calling during semantic analysis, and peforms field resolution
+/// in `Sema` is for calling during semantic analysis, and performs field resolution
/// to get the answer. The one in `Type` is for calling during codegen and asserts
/// that the types are already resolved.
fn typeHasOnePossibleValue(
@@ -10301,6 +12373,7 @@ fn typeHasOnePossibleValue(
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -10316,6 +12389,7 @@ fn typeHasOnePossibleValue(
.single_const_pointer,
.single_mut_pointer,
.pointer,
+ .bound_fn,
=> return null,
.@"struct" => {
@@ -10328,11 +12402,28 @@ fn typeHasOnePossibleValue(
}
return Value.initTag(.empty_struct_value);
},
+ .enum_numbered => {
+ const resolved_ty = try sema.resolveTypeFields(block, src, ty);
+ const enum_obj = resolved_ty.castTag(.enum_numbered).?.data;
+ if (enum_obj.fields.count() == 1) {
+ if (enum_obj.values.count() == 0) {
+ return Value.initTag(.zero); // auto-numbered
+ } else {
+ return enum_obj.values.keys()[0];
+ }
+ } else {
+ return null;
+ }
+ },
.enum_full => {
const resolved_ty = try sema.resolveTypeFields(block, src, ty);
- const enum_full = resolved_ty.castTag(.enum_full).?.data;
- if (enum_full.fields.count() == 1) {
- return enum_full.values.keys()[0];
+ const enum_obj = resolved_ty.castTag(.enum_full).?.data;
+ if (enum_obj.fields.count() == 1) {
+ if (enum_obj.values.count() == 0) {
+ return Value.initTag(.zero); // auto-numbered
+ } else {
+ return enum_obj.values.keys()[0];
+ }
} else {
return null;
}
@@ -10486,6 +12577,7 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref {
.atomic_order => return .atomic_order_type,
.atomic_rmw_op => return .atomic_rmw_op_type,
.calling_convention => return .calling_convention_type,
+ .address_space => return .address_space_type,
.float_mode => return .float_mode_type,
.reduce_op => return .reduce_op_type,
.call_options => return .call_options_type,
@@ -10581,7 +12673,10 @@ fn analyzeComptimeAlloc(
block: *Scope.Block,
var_type: Type,
) CompileError!Air.Inst.Ref {
- const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One);
+ const ptr_type = try Type.ptr(sema.arena, .{
+ .pointee_type = var_type,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant),
+ });
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
@@ -10597,3 +12692,58 @@ fn analyzeComptimeAlloc(
.decl = decl,
}));
}
+
+/// The places where a user can specify an address space attribute
+pub const AddressSpaceContext = enum {
+ /// A function is specified to be placed in a certain address space.
+ function,
+
+ /// A (global) variable is specified to be placed in a certain address space.
+ /// In contrast to .constant, these values (and thus the address space they will be
+ /// placed in) are required to be mutable.
+ variable,
+
+ /// A (global) constant value is specified to be placed in a certain address space.
+ /// In contrast to .variable, values placed in this address space are not required to be mutable.
+ constant,
+
+ /// A pointer is ascripted to point into a certain address space.
+ pointer,
+};
+
+pub fn analyzeAddrspace(
+ sema: *Sema,
+ block: *Scope.Block,
+ src: LazySrcLoc,
+ zir_ref: Zir.Inst.Ref,
+ ctx: AddressSpaceContext,
+) !std.builtin.AddressSpace {
+ const addrspace_tv = try sema.resolveInstConst(block, src, zir_ref);
+ const address_space = addrspace_tv.val.toEnum(std.builtin.AddressSpace);
+ const target = sema.mod.getTarget();
+ const arch = target.cpu.arch;
+
+ const supported = switch (address_space) {
+ .generic => true,
+ .gs, .fs, .ss => (arch == .i386 or arch == .x86_64) and ctx == .pointer,
+ };
+
+ if (!supported) {
+ // TODO error messages could be made more elaborate here
+ const entity = switch (ctx) {
+ .function => "functions",
+ .variable => "mutable values",
+ .constant => "constant values",
+ .pointer => "pointers",
+ };
+
+ return sema.mod.fail(
+ &block.base,
+ src,
+ "{s} with address space '{s}' are not supported on {s}",
+ .{ entity, @tagName(address_space), arch.genericName() },
+ );
+ }
+
+ return address_space;
+}
diff --git a/src/Zir.zig b/src/Zir.zig
index 1f0e4e370b..e7359f9382 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -49,8 +49,6 @@ pub const Header = extern struct {
};
pub const ExtraIndex = enum(u32) {
- /// Ref. The main struct decl for this file.
- main_struct,
/// If this is 0, no compile errors. Otherwise there is a `CompileErrors`
/// payload at this index.
compile_errors,
@@ -61,11 +59,6 @@ pub const ExtraIndex = enum(u32) {
_,
};
-pub fn getMainStruct(zir: Zir) Inst.Index {
- return zir.extra[@enumToInt(ExtraIndex.main_struct)] -
- @intCast(u32, Inst.Ref.typed_value_map.len);
-}
-
/// Returns the requested data, as well as the new index which is at the start of the
/// trailers for the object.
pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, end: usize } {
@@ -77,6 +70,7 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en
u32 => code.extra[i],
Inst.Ref => @intToEnum(Inst.Ref, code.extra[i]),
i32 => @bitCast(i32, code.extra[i]),
+ Inst.Call.Flags => @bitCast(Inst.Call.Flags, code.extra[i]),
else => @compileError("bad field type"),
};
i += 1;
@@ -112,50 +106,9 @@ pub fn deinit(code: *Zir, gpa: *Allocator) void {
code.* = undefined;
}
-/// Write human-readable, debug formatted ZIR code to a file.
-pub fn renderAsTextToFile(
- gpa: *Allocator,
- scope_file: *Module.Scope.File,
- fs_file: std.fs.File,
-) !void {
- var arena = std.heap.ArenaAllocator.init(gpa);
- defer arena.deinit();
-
- var writer: Writer = .{
- .gpa = gpa,
- .arena = &arena.allocator,
- .file = scope_file,
- .code = scope_file.zir,
- .indent = 0,
- .parent_decl_node = 0,
- };
-
- const main_struct_inst = scope_file.zir.getMainStruct();
- try fs_file.writer().print("%{d} ", .{main_struct_inst});
- try writer.writeInstToStream(fs_file.writer(), main_struct_inst);
- try fs_file.writeAll("\n");
- const imports_index = scope_file.zir.extra[@enumToInt(ExtraIndex.imports)];
- if (imports_index != 0) {
- try fs_file.writeAll("Imports:\n");
-
- const extra = scope_file.zir.extraData(Inst.Imports, imports_index);
- var import_i: u32 = 0;
- var extra_index = extra.end;
-
- while (import_i < extra.data.imports_len) : (import_i += 1) {
- const item = scope_file.zir.extraData(Inst.Imports.Item, extra_index);
- extra_index = item.end;
-
- const src: LazySrcLoc = .{ .token_abs = item.data.token };
- const import_path = scope_file.zir.nullTerminatedString(item.data.name);
- try fs_file.writer().print(" @import(\"{}\") ", .{
- std.zig.fmtEscapes(import_path),
- });
- try writer.writeSrc(fs_file.writer(), src);
- try fs_file.writer().writeAll("\n");
- }
- }
-}
+/// ZIR is structured so that the outermost "main" struct of any file
+/// is always at index 0.
+pub const main_struct_inst: Inst.Index = 0;
/// These are untyped instructions generated from an Abstract Syntax Tree.
/// The data here is immutable because it is possible to have multiple
@@ -173,6 +126,64 @@ pub const Inst = struct {
/// Twos complement wrapping integer addition.
/// Uses the `pl_node` union field. Payload is `Bin`.
addwrap,
+ /// Saturating addition.
+ /// Uses the `pl_node` union field. Payload is `Bin`.
+ add_sat,
+ /// Arithmetic subtraction. Asserts no integer overflow.
+ /// Uses the `pl_node` union field. Payload is `Bin`.
+ sub,
+ /// Twos complement wrapping integer subtraction.
+ /// Uses the `pl_node` union field. Payload is `Bin`.
+ subwrap,
+ /// Saturating subtraction.
+ /// Uses the `pl_node` union field. Payload is `Bin`.
+ sub_sat,
+ /// Arithmetic multiplication. Asserts no integer overflow.
+ /// Uses the `pl_node` union field. Payload is `Bin`.
+ mul,
+ /// Twos complement wrapping integer multiplication.
+ /// Uses the `pl_node` union field. Payload is `Bin`.
+ mulwrap,
+ /// Saturating multiplication.
+ /// Uses the `pl_node` union field. Payload is `Bin`.
+ mul_sat,
+ /// Implements the `@divExact` builtin.
+ /// Uses the `pl_node` union field with payload `Bin`.
+ div_exact,
+ /// Implements the `@divFloor` builtin.
+ /// Uses the `pl_node` union field with payload `Bin`.
+ div_floor,
+ /// Implements the `@divTrunc` builtin.
+ /// Uses the `pl_node` union field with payload `Bin`.
+ div_trunc,
+ /// Implements the `@mod` builtin.
+ /// Uses the `pl_node` union field with payload `Bin`.
+ mod,
+ /// Implements the `@rem` builtin.
+ /// Uses the `pl_node` union field with payload `Bin`.
+ rem,
+ /// Ambiguously remainder division or modulus. If the computation would possibly have
+ /// a different value depending on whether the operation is remainder division or modulus,
+ /// a compile error is emitted. Otherwise the computation is performed.
+ /// Uses the `pl_node` union field. Payload is `Bin`.
+ mod_rem,
+ /// Integer shift-left. Zeroes are shifted in from the right hand side.
+ /// Uses the `pl_node` union field. Payload is `Bin`.
+ shl,
+ /// Implements the `@shlExact` builtin.
+ /// Uses the `pl_node` union field with payload `Bin`.
+ shl_exact,
+ /// Saturating shift-left.
+ /// Uses the `pl_node` union field. Payload is `Bin`.
+ shl_sat,
+ /// Integer shift-right. Arithmetic or logical depending on the signedness of
+ /// the integer type.
+ /// Uses the `pl_node` union field. Payload is `Bin`.
+ shr,
+ /// Implements the `@shrExact` builtin.
+ /// Uses the `pl_node` union field with payload `Bin`.
+ shr_exact,
+
/// Declares a parameter of the current function. Used for:
/// * debug info
/// * checking shadowing against declarations in the current namespace
@@ -270,17 +281,9 @@ pub const Inst = struct {
break_inline,
/// Uses the `node` union field.
breakpoint,
- /// Function call with modifier `.auto`.
+ /// Function call.
/// Uses `pl_node`. AST node is the function call. Payload is `Call`.
call,
- /// Same as `call` but it also does `ensure_result_used` on the return value.
- call_chkused,
- /// Same as `call` but with modifier `.compile_time`.
- call_compile_time,
- /// Same as `call` but with modifier `.no_suspend`.
- call_nosuspend,
- /// Same as `call` but with modifier `.async_kw`.
- call_async,
/// `<`
/// Uses the `pl_node` union field. Payload is `Bin`.
cmp_lt,
@@ -312,11 +315,6 @@ pub const Inst = struct {
/// only the taken branch is analyzed. The then block and else block must
/// terminate with an "inline" variant of a noreturn instruction.
condbr_inline,
- /// An opaque type definition. Provides an AST node only.
- /// Uses the `pl_node` union field. Payload is `OpaqueDecl`.
- opaque_decl,
- opaque_decl_anon,
- opaque_decl_func,
/// An error set type definition. Contains a list of field names.
/// Uses the `pl_node` union field. Payload is `ErrorSetDecl`.
error_set_decl,
@@ -364,9 +362,13 @@ pub const Inst = struct {
/// `error.Foo` syntax. Uses the `str_tok` field of the Data union.
error_value,
/// Implements the `@export` builtin function, based on either an identifier to a Decl,
- /// or field access of a Decl.
+ /// or field access of a Decl. The thing being exported is the Decl.
/// Uses the `pl_node` union field. Payload is `Export`.
@"export",
+ /// Implements the `@export` builtin function, based on a comptime-known value.
+ /// The thing being exported is the comptime-known value which is the operand.
+ /// Uses the `pl_node` union field. Payload is `ExportValue`.
+ export_value,
/// Given a pointer to a struct or object that contains virtual fields, returns a pointer
/// to the named field. The field name is stored in string_bytes. Used by a.b syntax.
/// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field.
@@ -376,6 +378,15 @@ pub const Inst = struct {
/// This instruction also accepts a pointer.
/// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field.
field_val,
+ /// Given a pointer to a struct or object that contains virtual fields, returns the
+ /// named field. If there is no named field, searches in the type for a decl that
+ /// matches the field name. The decl is resolved and we ensure that it's a function
+ /// which can accept the object as the first parameter, with one pointer fixup. If
+ /// all of that works, this instruction produces a special "bound function" value
+ /// which contains both the function and the saved first parameter value.
+ /// Bound functions may only be used as the function parameter to a `call` or
+ /// `builtin_call` instruction. Any other use is invalid zir and may crash the compiler.
+ field_call_bind,
/// Given a pointer to a struct or object that contains virtual fields, returns a pointer
/// to the named field. The field name is a comptime instruction. Used by @field.
/// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed.
@@ -384,6 +395,15 @@ pub const Inst = struct {
/// The field name is a comptime instruction. Used by @field.
/// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed.
field_val_named,
+ /// Given a pointer to a struct or object that contains virtual fields, returns the
+ /// named field. If there is no named field, searches in the type for a decl that
+ /// matches the field name. The decl is resolved and we ensure that it's a function
+ /// which can accept the object as the first parameter, with one pointer fixup. If
+ /// all of that works, this instruction produces a special "bound function" value
+ /// which contains both the function and the saved first parameter value.
+ /// Bound functions may only be used as the function parameter to a `call` or
+ /// `builtin_call` instruction. Any other use is invalid zir and may crash the compiler.
+ field_call_bind_named,
/// Returns a function type, or a function instance, depending on whether
/// the body_len is 0. Calling convention is auto.
/// Uses the `pl_node` union field. `payload_index` points to a `Func`.
@@ -433,25 +453,6 @@ pub const Inst = struct {
/// Merge two error sets into one, `E1 || E2`.
/// Uses the `pl_node` field with payload `Bin`.
merge_error_sets,
- /// Ambiguously remainder division or modulus. If the computation would possibly have
- /// a different value depending on whether the operation is remainder division or modulus,
- /// a compile error is emitted. Otherwise the computation is performed.
- /// Uses the `pl_node` union field. Payload is `Bin`.
- mod_rem,
- /// Arithmetic multiplication. Asserts no integer overflow.
- /// Uses the `pl_node` union field. Payload is `Bin`.
- mul,
- /// Twos complement wrapping integer multiplication.
- /// Uses the `pl_node` union field. Payload is `Bin`.
- mulwrap,
- /// Given a reference to a function and a parameter index, returns the
- /// type of the parameter. The only usage of this instruction is for the
- /// result location of parameters of function calls. In the case of a function's
- /// parameter type being `anytype`, it is the type coercion's job to detect this
- /// scenario and skip the coercion, so that semantic analysis of this instruction
- /// is not in a position where it must create an invalid type.
- /// Uses the `param_type` union field.
- param_type,
/// Turns an R-Value into a const L-Value. In other words, it takes a value,
/// stores it in a memory location, and returns a const pointer to it. If the value
/// is `comptime`, the memory location is global static constant data. Otherwise,
@@ -488,10 +489,10 @@ pub const Inst = struct {
/// this instruction; a following 'ret' instruction will do the diversion.
/// Uses the `str_tok` union field.
ret_err_value_code,
- /// Create a pointer type that does not have a sentinel, alignment, or bit range specified.
+ /// Create a pointer type that does not have a sentinel, alignment, address space, or bit range specified.
/// Uses the `ptr_type_simple` union field.
ptr_type_simple,
- /// Create a pointer type which can have a sentinel, alignment, and/or bit range.
+ /// Create a pointer type which can have a sentinel, alignment, address space, and/or bit range.
/// Uses the `ptr_type` union field.
ptr_type,
/// Slice operation `lhs[rhs..]`. No sentinel and no end offset.
@@ -528,12 +529,6 @@ pub const Inst = struct {
/// String Literal. Makes an anonymous Decl and then takes a pointer to it.
/// Uses the `str` union field.
str,
- /// Arithmetic subtraction. Asserts no integer overflow.
- /// Uses the `pl_node` union field. Payload is `Bin`.
- sub,
- /// Twos complement wrapping integer subtraction.
- /// Uses the `pl_node` union field. Payload is `Bin`.
- subwrap,
/// Arithmetic negation. Asserts no integer overflow.
/// Same as sub with a lhs of 0, split into a separate instruction to save memory.
/// Uses `un_node`.
@@ -731,7 +726,7 @@ pub const Inst = struct {
size_of,
/// Implements the `@bitSizeOf` builtin. Uses `un_node`.
bit_size_of,
- /// Implements the `@fence` builtin. Uses `node`.
+ /// Implements the `@fence` builtin. Uses `un_node`.
fence,
/// Implement builtin `@ptrToInt`. Uses `un_node`.
@@ -859,35 +854,6 @@ pub const Inst = struct {
/// Implements the `@bitReverse` builtin. Uses the `un_node` union field.
bit_reverse,
- /// Implements the `@divExact` builtin.
- /// Uses the `pl_node` union field with payload `Bin`.
- div_exact,
- /// Implements the `@divFloor` builtin.
- /// Uses the `pl_node` union field with payload `Bin`.
- div_floor,
- /// Implements the `@divTrunc` builtin.
- /// Uses the `pl_node` union field with payload `Bin`.
- div_trunc,
- /// Implements the `@mod` builtin.
- /// Uses the `pl_node` union field with payload `Bin`.
- mod,
- /// Implements the `@rem` builtin.
- /// Uses the `pl_node` union field with payload `Bin`.
- rem,
-
- /// Integer shift-left. Zeroes are shifted in from the right hand side.
- /// Uses the `pl_node` union field. Payload is `Bin`.
- shl,
- /// Implements the `@shlExact` builtin.
- /// Uses the `pl_node` union field with payload `Bin`.
- shl_exact,
- /// Integer shift-right. Arithmetic or logical depending on the signedness of the integer type.
- /// Uses the `pl_node` union field. Payload is `Bin`.
- shr,
- /// Implements the `@shrExact` builtin.
- /// Uses the `pl_node` union field with payload `Bin`.
- shr_exact,
-
/// Implements the `@bitOffsetOf` builtin.
/// Uses the `pl_node` union field with payload `Bin`.
bit_offset_of,
@@ -982,6 +948,17 @@ pub const Inst = struct {
@"await",
await_nosuspend,
+ /// When a type or function refers to a comptime value from an outer
+ /// scope, that forms a closure over comptime value. The outer scope
+ /// will record a capture of that value, which encodes its current state
+ /// and marks it to persist. Uses `un_tok` field. Operand is the
+ /// instruction value to capture.
+ closure_capture,
+ /// The inner scope of a closure uses closure_get to retrieve the value
+ /// stored by the outer scope. Uses `inst_node` field. Operand is the
+ /// closure_capture instruction ref.
+ closure_get,
+
/// The ZIR instruction tag is one of the `Extended` ones.
/// Uses the `extended` union field.
extended,
@@ -996,6 +973,7 @@ pub const Inst = struct {
.param_anytype_comptime,
.add,
.addwrap,
+ .add_sat,
.alloc,
.alloc_mut,
.alloc_comptime,
@@ -1026,10 +1004,6 @@ pub const Inst = struct {
.breakpoint,
.fence,
.call,
- .call_chkused,
- .call_compile_time,
- .call_nosuspend,
- .call_async,
.cmp_lt,
.cmp_lte,
.cmp_eq,
@@ -1037,9 +1011,6 @@ pub const Inst = struct {
.cmp_gt,
.cmp_neq,
.coerce_result_ptr,
- .opaque_decl,
- .opaque_decl_anon,
- .opaque_decl_func,
.error_set_decl,
.error_set_decl_anon,
.error_set_decl_func,
@@ -1055,10 +1026,13 @@ pub const Inst = struct {
.ensure_result_used,
.ensure_result_non_error,
.@"export",
+ .export_value,
.field_ptr,
.field_val,
+ .field_call_bind,
.field_ptr_named,
.field_val_named,
+ .field_call_bind_named,
.func,
.func_inferred,
.has_decl,
@@ -1074,9 +1048,10 @@ pub const Inst = struct {
.mod_rem,
.mul,
.mulwrap,
- .param_type,
+ .mul_sat,
.ref,
.shl,
+ .shl_sat,
.shr,
.store,
.store_node,
@@ -1085,6 +1060,7 @@ pub const Inst = struct {
.str,
.sub,
.subwrap,
+ .sub_sat,
.negate,
.negate_wrap,
.typeof,
@@ -1231,6 +1207,8 @@ pub const Inst = struct {
.await_nosuspend,
.ret_err_value_code,
.extended,
+ .closure_get,
+ .closure_capture,
=> false,
.@"break",
@@ -1256,6 +1234,14 @@ pub const Inst = struct {
break :list std.enums.directEnumArray(Tag, Data.FieldEnum, 0, .{
.add = .pl_node,
.addwrap = .pl_node,
+ .add_sat = .pl_node,
+ .sub = .pl_node,
+ .subwrap = .pl_node,
+ .sub_sat = .pl_node,
+ .mul = .pl_node,
+ .mulwrap = .pl_node,
+ .mul_sat = .pl_node,
+
.param = .pl_tok,
.param_comptime = .pl_tok,
.param_anytype = .str_tok,
@@ -1285,10 +1271,6 @@ pub const Inst = struct {
.break_inline = .@"break",
.breakpoint = .node,
.call = .pl_node,
- .call_chkused = .pl_node,
- .call_compile_time = .pl_node,
- .call_nosuspend = .pl_node,
- .call_async = .pl_node,
.cmp_lt = .pl_node,
.cmp_lte = .pl_node,
.cmp_eq = .pl_node,
@@ -1298,9 +1280,6 @@ pub const Inst = struct {
.coerce_result_ptr = .bin,
.condbr = .pl_node,
.condbr_inline = .pl_node,
- .opaque_decl = .pl_node,
- .opaque_decl_anon = .pl_node,
- .opaque_decl_func = .pl_node,
.error_set_decl = .pl_node,
.error_set_decl_anon = .pl_node,
.error_set_decl_func = .pl_node,
@@ -1318,10 +1297,13 @@ pub const Inst = struct {
.error_union_type = .pl_node,
.error_value = .str_tok,
.@"export" = .pl_node,
+ .export_value = .pl_node,
.field_ptr = .pl_node,
.field_val = .pl_node,
.field_ptr_named = .pl_node,
.field_val_named = .pl_node,
+ .field_call_bind = .pl_node,
+ .field_call_bind_named = .pl_node,
.func = .pl_node,
.func_inferred = .pl_node,
.import = .str_tok,
@@ -1339,9 +1321,6 @@ pub const Inst = struct {
.repeat_inline = .node,
.merge_error_sets = .pl_node,
.mod_rem = .pl_node,
- .mul = .pl_node,
- .mulwrap = .pl_node,
- .param_type = .param_type,
.ref = .un_tok,
.ret_node = .un_node,
.ret_load = .un_node,
@@ -1358,8 +1337,6 @@ pub const Inst = struct {
.store_to_block_ptr = .bin,
.store_to_inferred_ptr = .bin,
.str = .str,
- .sub = .pl_node,
- .subwrap = .pl_node,
.negate = .un_node,
.negate_wrap = .un_node,
.typeof = .un_node,
@@ -1416,7 +1393,7 @@ pub const Inst = struct {
.type_info = .un_node,
.size_of = .un_node,
.bit_size_of = .un_node,
- .fence = .node,
+ .fence = .un_node,
.ptr_to_int = .un_node,
.error_to_int = .un_node,
@@ -1480,6 +1457,7 @@ pub const Inst = struct {
.shl = .pl_node,
.shl_exact = .pl_node,
+ .shl_sat = .pl_node,
.shr = .pl_node,
.shr_exact = .pl_node,
@@ -1517,6 +1495,9 @@ pub const Inst = struct {
.@"await" = .un_node,
.await_nosuspend = .un_node,
+ .closure_capture = .un_tok,
+ .closure_get = .inst_node,
+
.extended = .extended,
});
};
@@ -1549,6 +1530,10 @@ pub const Inst = struct {
/// `operand` is payload index to `UnionDecl`.
/// `small` is `UnionDecl.Small`.
union_decl,
+ /// An opaque type definition. Contains references to decls and captures.
+ /// `operand` is payload index to `OpaqueDecl`.
+ /// `small` is `OpaqueDecl.Small`.
+ opaque_decl,
/// Obtains a pointer to the return value.
/// `operand` is `src_node: i32`.
ret_ptr,
@@ -1629,22 +1614,6 @@ pub const Inst = struct {
wasm_memory_size,
/// `operand` is payload index to `BinNode`.
wasm_memory_grow,
- /// Implements the `@addWithSaturation` builtin.
- /// `operand` is payload index to `SaturatingArithmetic`.
- /// `small` is unused.
- add_with_saturation,
- /// Implements the `@subWithSaturation` builtin.
- /// `operand` is payload index to `SaturatingArithmetic`.
- /// `small` is unused.
- sub_with_saturation,
- /// Implements the `@mulWithSaturation` builtin.
- /// `operand` is payload index to `SaturatingArithmetic`.
- /// `small` is unused.
- mul_with_saturation,
- /// Implements the `@shlWithSaturation` builtin.
- /// `operand` is payload index to `SaturatingArithmetic`.
- /// `small` is unused.
- shl_with_saturation,
pub const InstData = struct {
opcode: Extended,
@@ -1717,6 +1686,7 @@ pub const Inst = struct {
atomic_order_type,
atomic_rmw_op_type,
calling_convention_type,
+ address_space_type,
float_mode_type,
reduce_op_type,
call_options_type,
@@ -1973,6 +1943,10 @@ pub const Inst = struct {
.ty = Type.initTag(.type),
.val = Value.initTag(.calling_convention_type),
},
+ .address_space_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.address_space_type),
+ },
.float_mode_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.float_mode_type),
@@ -2174,8 +2148,9 @@ pub const Inst = struct {
is_volatile: bool,
has_sentinel: bool,
has_align: bool,
+ has_addrspace: bool,
has_bit_range: bool,
- _: u2 = undefined,
+ _: u1 = undefined,
},
size: std.builtin.TypeInfo.Pointer.Size,
/// Index into extra. See `PtrType`.
@@ -2197,10 +2172,6 @@ pub const Inst = struct {
/// Points to a `Block`.
payload_index: u32,
},
- param_type: struct {
- callee: Ref,
- param_index: u32,
- },
@"unreachable": struct {
/// Offset from Decl AST node index.
/// `Tag` determines which kind of AST node this points to.
@@ -2227,6 +2198,18 @@ pub const Inst = struct {
line: u32,
column: u32,
},
+ /// Used for unary operators which reference an inst,
+ /// with an AST node source location.
+ inst_node: struct {
+ /// Offset from Decl AST node index.
+ src_node: i32,
+ /// The meaning of this operand depends on the corresponding `Tag`.
+ inst: Index,
+
+ pub fn src(self: @This()) LazySrcLoc {
+ return .{ .node_offset = self.src_node };
+ }
+ },
// Make sure we don't accidentally add a field to make this union
// bigger than expected. Note that in Debug builds, Zig is allowed
@@ -2259,11 +2242,11 @@ pub const Inst = struct {
ptr_type,
int_type,
bool_br,
- param_type,
@"unreachable",
@"break",
switch_capture,
dbg_stmt,
+ inst_node,
};
};
@@ -2386,8 +2369,27 @@ pub const Inst = struct {
/// Stored inside extra, with trailing arguments according to `args_len`.
/// Each argument is a `Ref`.
pub const Call = struct {
+ // Note: Flags *must* come first so that unusedResultExpr
+ // can find it when it goes to modify them.
+ flags: Flags,
callee: Ref,
- args_len: u32,
+
+ pub const Flags = packed struct {
+ /// std.builtin.CallOptions.Modifier in packed form
+ pub const PackedModifier = u3;
+ pub const PackedArgsLen = u28;
+
+ packed_modifier: PackedModifier,
+ ensure_result_used: bool = false,
+ args_len: PackedArgsLen,
+
+ comptime {
+ if (@sizeOf(Flags) != 4 or @bitSizeOf(Flags) != 32)
+ @compileError("Layout of Call.Flags needs to be updated!");
+ if (@bitSizeOf(std.builtin.CallOptions.Modifier) != @bitSizeOf(PackedModifier))
+ @compileError("Call.Flags.PackedModifier needs to be updated!");
+ }
+ };
};
pub const BuiltinCall = struct {
@@ -2405,12 +2407,13 @@ pub const Inst = struct {
else_body_len: u32,
};
- /// Stored in extra. Depending on the flags in Data, there will be up to 4
+ /// Stored in extra. Depending on the flags in Data, there will be up to 5
/// trailing Ref fields:
/// 0. sentinel: Ref // if `has_sentinel` flag is set
/// 1. align: Ref // if `has_align` flag is set
- /// 2. bit_start: Ref // if `has_bit_range` flag is set
- /// 3. bit_end: Ref // if `has_bit_range` flag is set
+ /// 2. address_space: Ref // if `has_addrspace` flag is set
+ /// 3. bit_start: Ref // if `has_bit_range` flag is set
+ /// 4. bit_end: Ref // if `has_bit_range` flag is set
pub const PtrType = struct {
elem_type: Ref,
};
@@ -2528,7 +2531,7 @@ pub const Inst = struct {
/// 0b000X: whether corresponding decl is pub
/// 0b00X0: whether corresponding decl is exported
/// 0b0X00: whether corresponding decl has an align expression
- /// 0bX000: whether corresponding decl has a linksection expression
+ /// 0bX000: whether corresponding decl has a linksection or an address space expression
/// 5. decl: { // for every decls_len
/// src_hash: [4]u32, // hash of source bytes
/// line: u32, // line number of decl, relative to parent
@@ -2540,7 +2543,10 @@ pub const Inst = struct {
/// this is a test decl, and the name starts at `name+1`.
/// value: Index,
/// align: Ref, // if corresponding bit is set
- /// link_section: Ref, // if corresponding bit is set
+ /// link_section_or_address_space: { // if corresponding bit is set.
+ /// link_section: Ref,
+ /// address_space: Ref,
+ /// }
/// }
/// 6. inst: Index // for every body_len
/// 7. flags: u32 // for every 8 fields
@@ -2592,7 +2598,7 @@ pub const Inst = struct {
/// 0b000X: whether corresponding decl is pub
/// 0b00X0: whether corresponding decl is exported
/// 0b0X00: whether corresponding decl has an align expression
- /// 0bX000: whether corresponding decl has a linksection expression
+ /// 0bX000: whether corresponding decl has a linksection or an address space expression
/// 6. decl: { // for every decls_len
/// src_hash: [4]u32, // hash of source bytes
/// line: u32, // line number of decl, relative to parent
@@ -2604,7 +2610,10 @@ pub const Inst = struct {
/// this is a test decl, and the name starts at `name+1`.
/// value: Index,
/// align: Ref, // if corresponding bit is set
- /// link_section: Ref, // if corresponding bit is set
+ /// link_section_or_address_space: { // if corresponding bit is set.
+ /// link_section: Ref,
+ /// address_space: Ref,
+ /// }
/// }
/// 7. inst: Index // for every body_len
/// 8. has_bits: u32 // for every 32 fields
@@ -2637,7 +2646,7 @@ pub const Inst = struct {
/// 0b000X: whether corresponding decl is pub
/// 0b00X0: whether corresponding decl is exported
/// 0b0X00: whether corresponding decl has an align expression
- /// 0bX000: whether corresponding decl has a linksection expression
+ /// 0bX000: whether corresponding decl has a linksection or an address space expression
/// 6. decl: { // for every decls_len
/// src_hash: [4]u32, // hash of source bytes
/// line: u32, // line number of decl, relative to parent
@@ -2649,7 +2658,10 @@ pub const Inst = struct {
/// this is a test decl, and the name starts at `name+1`.
/// value: Index,
/// align: Ref, // if corresponding bit is set
- /// link_section: Ref, // if corresponding bit is set
+ /// link_section_or_address_space: { // if corresponding bit is set.
+ /// link_section: Ref,
+ /// address_space: Ref,
+ /// }
/// }
/// 7. inst: Index // for every body_len
/// 8. has_bits: u32 // for every 8 fields
@@ -2673,21 +2685,27 @@ pub const Inst = struct {
has_decls_len: bool,
name_strategy: NameStrategy,
layout: std.builtin.TypeInfo.ContainerLayout,
- /// false: union(tag_type)
- /// true: union(enum(tag_type))
+ /// has_tag_type | auto_enum_tag | result
+ /// -------------------------------------
+ /// false | false | union { }
+ /// false | true | union(enum) { }
+ /// true | true | union(enum(T)) { }
+ /// true | false | union(T) { }
auto_enum_tag: bool,
_: u6 = undefined,
};
};
/// Trailing:
- /// 0. decl_bits: u32 // for every 8 decls
+ /// 0. src_node: i32, // if has_src_node
+ /// 1. decls_len: u32, // if has_decls_len
+ /// 2. decl_bits: u32 // for every 8 decls
/// - sets of 4 bits:
/// 0b000X: whether corresponding decl is pub
/// 0b00X0: whether corresponding decl is exported
/// 0b0X00: whether corresponding decl has an align expression
- /// 0bX000: whether corresponding decl has a linksection expression
- /// 1. decl: { // for every decls_len
+ /// 0bX000: whether corresponding decl has a linksection or an address space expression
+ /// 3. decl: { // for every decls_len
/// src_hash: [4]u32, // hash of source bytes
/// line: u32, // line number of decl, relative to parent
/// name: u32, // null terminated string index
@@ -2698,10 +2716,18 @@ pub const Inst = struct {
/// this is a test decl, and the name starts at `name+1`.
/// value: Index,
/// align: Ref, // if corresponding bit is set
- /// link_section: Ref, // if corresponding bit is set
+ /// link_section_or_address_space: { // if corresponding bit is set.
+ /// link_section: Ref,
+ /// address_space: Ref,
+ /// }
/// }
pub const OpaqueDecl = struct {
- decls_len: u32,
+ pub const Small = packed struct {
+ has_src_node: bool,
+ has_decls_len: bool,
+ name_strategy: NameStrategy,
+ _: u12 = undefined,
+ };
};
/// Trailing: field_name: u32 // for every field: null terminated string index
@@ -2767,12 +2793,6 @@ pub const Inst = struct {
ptr: Ref,
};
- pub const SaturatingArithmetic = struct {
- node: i32,
- lhs: Ref,
- rhs: Ref,
- };
-
pub const Cmpxchg = struct {
ptr: Ref,
expected_value: Ref,
@@ -2858,6 +2878,14 @@ pub const Inst = struct {
/// 1. align_inst: Ref, // if small 0b00X0 is set
pub const AllocExtended = struct {
src_node: i32,
+
+ pub const Small = packed struct {
+ has_type: bool,
+ has_align: bool,
+ is_const: bool,
+ is_comptime: bool,
+ _: u12 = undefined,
+ };
};
pub const Export = struct {
@@ -2869,6 +2897,12 @@ pub const Inst = struct {
options: Ref,
};
+ pub const ExportValue = struct {
+ /// The comptime value to export.
+ operand: Ref,
+ options: Ref,
+ };
+
/// Trailing: `CompileErrors.Item` for each `items_len`.
pub const CompileErrors = struct {
items_len: u32,
@@ -2904,1794 +2938,6 @@ pub const Inst = struct {
pub const SpecialProng = enum { none, @"else", under };
-const Writer = struct {
- gpa: *Allocator,
- arena: *Allocator,
- file: *Module.Scope.File,
- code: Zir,
- indent: u32,
- parent_decl_node: u32,
-
- fn relativeToNodeIndex(self: *Writer, offset: i32) Ast.Node.Index {
- return @bitCast(Ast.Node.Index, offset + @bitCast(i32, self.parent_decl_node));
- }
-
- fn writeInstToStream(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
- const tags = self.code.instructions.items(.tag);
- const tag = tags[inst];
- try stream.print("= {s}(", .{@tagName(tags[inst])});
- switch (tag) {
- .array_type,
- .as,
- .coerce_result_ptr,
- .elem_ptr,
- .elem_val,
- .store,
- .store_to_block_ptr,
- .store_to_inferred_ptr,
- .field_ptr_type,
- => try self.writeBin(stream, inst),
-
- .alloc,
- .alloc_mut,
- .alloc_comptime,
- .indexable_ptr_len,
- .anyframe_type,
- .bit_not,
- .bool_not,
- .negate,
- .negate_wrap,
- .load,
- .ensure_result_used,
- .ensure_result_non_error,
- .ret_node,
- .ret_load,
- .resolve_inferred_alloc,
- .optional_type,
- .optional_payload_safe,
- .optional_payload_unsafe,
- .optional_payload_safe_ptr,
- .optional_payload_unsafe_ptr,
- .err_union_payload_safe,
- .err_union_payload_unsafe,
- .err_union_payload_safe_ptr,
- .err_union_payload_unsafe_ptr,
- .err_union_code,
- .err_union_code_ptr,
- .is_non_null,
- .is_non_null_ptr,
- .is_non_err,
- .is_non_err_ptr,
- .typeof,
- .typeof_elem,
- .struct_init_empty,
- .type_info,
- .size_of,
- .bit_size_of,
- .typeof_log2_int_type,
- .log2_int_type,
- .ptr_to_int,
- .error_to_int,
- .int_to_error,
- .compile_error,
- .set_eval_branch_quota,
- .enum_to_int,
- .align_of,
- .bool_to_int,
- .embed_file,
- .error_name,
- .panic,
- .set_align_stack,
- .set_cold,
- .set_float_mode,
- .set_runtime_safety,
- .sqrt,
- .sin,
- .cos,
- .exp,
- .exp2,
- .log,
- .log2,
- .log10,
- .fabs,
- .floor,
- .ceil,
- .trunc,
- .round,
- .tag_name,
- .reify,
- .type_name,
- .frame_type,
- .frame_size,
- .clz,
- .ctz,
- .pop_count,
- .byte_swap,
- .bit_reverse,
- .elem_type,
- .@"resume",
- .@"await",
- .await_nosuspend,
- => try self.writeUnNode(stream, inst),
-
- .ref,
- .ret_coerce,
- .ensure_err_payload_void,
- => try self.writeUnTok(stream, inst),
-
- .bool_br_and,
- .bool_br_or,
- => try self.writeBoolBr(stream, inst),
-
- .array_type_sentinel => try self.writeArrayTypeSentinel(stream, inst),
- .param_type => try self.writeParamType(stream, inst),
- .ptr_type_simple => try self.writePtrTypeSimple(stream, inst),
- .ptr_type => try self.writePtrType(stream, inst),
- .int => try self.writeInt(stream, inst),
- .int_big => try self.writeIntBig(stream, inst),
- .float => try self.writeFloat(stream, inst),
- .float128 => try self.writeFloat128(stream, inst),
- .str => try self.writeStr(stream, inst),
- .int_type => try self.writeIntType(stream, inst),
-
- .@"break",
- .break_inline,
- => try self.writeBreak(stream, inst),
-
- .elem_ptr_node,
- .elem_val_node,
- .field_ptr_named,
- .field_val_named,
- .slice_start,
- .slice_end,
- .slice_sentinel,
- .array_init,
- .array_init_anon,
- .array_init_ref,
- .array_init_anon_ref,
- .union_init_ptr,
- .shuffle,
- .select,
- .atomic_rmw,
- .atomic_store,
- .mul_add,
- .builtin_call,
- .field_parent_ptr,
- .memcpy,
- .memset,
- .builtin_async_call,
- => try self.writePlNode(stream, inst),
-
- .struct_init,
- .struct_init_ref,
- => try self.writeStructInit(stream, inst),
-
- .cmpxchg_strong,
- .cmpxchg_weak,
- => try self.writeCmpxchg(stream, inst),
-
- .struct_init_anon,
- .struct_init_anon_ref,
- => try self.writeStructInitAnon(stream, inst),
-
- .field_type => try self.writeFieldType(stream, inst),
- .field_type_ref => try self.writeFieldTypeRef(stream, inst),
-
- .add,
- .addwrap,
- .array_cat,
- .array_mul,
- .mul,
- .mulwrap,
- .sub,
- .subwrap,
- .cmp_lt,
- .cmp_lte,
- .cmp_eq,
- .cmp_gte,
- .cmp_gt,
- .cmp_neq,
- .div,
- .has_decl,
- .has_field,
- .mod_rem,
- .shl,
- .shl_exact,
- .shr,
- .shr_exact,
- .xor,
- .store_node,
- .error_union_type,
- .merge_error_sets,
- .bit_and,
- .bit_or,
- .float_to_int,
- .int_to_float,
- .int_to_ptr,
- .int_to_enum,
- .float_cast,
- .int_cast,
- .err_set_cast,
- .ptr_cast,
- .truncate,
- .align_cast,
- .div_exact,
- .div_floor,
- .div_trunc,
- .mod,
- .rem,
- .bit_offset_of,
- .offset_of,
- .splat,
- .reduce,
- .atomic_load,
- .bitcast,
- .bitcast_result_ptr,
- .vector_type,
- .maximum,
- .minimum,
- => try self.writePlNodeBin(stream, inst),
-
- .@"export" => try self.writePlNodeExport(stream, inst),
-
- .call,
- .call_chkused,
- .call_compile_time,
- .call_nosuspend,
- .call_async,
- => try self.writePlNodeCall(stream, inst),
-
- .block,
- .block_inline,
- .suspend_block,
- .loop,
- .validate_struct_init_ptr,
- .validate_array_init_ptr,
- .c_import,
- => try self.writePlNodeBlock(stream, inst),
-
- .condbr,
- .condbr_inline,
- => try self.writePlNodeCondBr(stream, inst),
-
- .opaque_decl => try self.writeOpaqueDecl(stream, inst, .parent),
- .opaque_decl_anon => try self.writeOpaqueDecl(stream, inst, .anon),
- .opaque_decl_func => try self.writeOpaqueDecl(stream, inst, .func),
-
- .error_set_decl => try self.writeErrorSetDecl(stream, inst, .parent),
- .error_set_decl_anon => try self.writeErrorSetDecl(stream, inst, .anon),
- .error_set_decl_func => try self.writeErrorSetDecl(stream, inst, .func),
-
- .switch_block => try self.writePlNodeSwitchBr(stream, inst, .none),
- .switch_block_else => try self.writePlNodeSwitchBr(stream, inst, .@"else"),
- .switch_block_under => try self.writePlNodeSwitchBr(stream, inst, .under),
- .switch_block_ref => try self.writePlNodeSwitchBr(stream, inst, .none),
- .switch_block_ref_else => try self.writePlNodeSwitchBr(stream, inst, .@"else"),
- .switch_block_ref_under => try self.writePlNodeSwitchBr(stream, inst, .under),
-
- .switch_block_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .none),
- .switch_block_else_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .@"else"),
- .switch_block_under_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .under),
- .switch_block_ref_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .none),
- .switch_block_ref_else_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .@"else"),
- .switch_block_ref_under_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .under),
-
- .field_ptr,
- .field_val,
- => try self.writePlNodeField(stream, inst),
-
- .as_node => try self.writeAs(stream, inst),
-
- .breakpoint,
- .fence,
- .repeat,
- .repeat_inline,
- .alloc_inferred,
- .alloc_inferred_mut,
- .alloc_inferred_comptime,
- => try self.writeNode(stream, inst),
-
- .error_value,
- .enum_literal,
- .decl_ref,
- .decl_val,
- .import,
- .ret_err_value,
- .ret_err_value_code,
- .param_anytype,
- .param_anytype_comptime,
- => try self.writeStrTok(stream, inst),
-
- .param, .param_comptime => try self.writeParam(stream, inst),
-
- .func => try self.writeFunc(stream, inst, false),
- .func_inferred => try self.writeFunc(stream, inst, true),
-
- .@"unreachable" => try self.writeUnreachable(stream, inst),
-
- .switch_capture,
- .switch_capture_ref,
- .switch_capture_multi,
- .switch_capture_multi_ref,
- .switch_capture_else,
- .switch_capture_else_ref,
- => try self.writeSwitchCapture(stream, inst),
-
- .dbg_stmt => try self.writeDbgStmt(stream, inst),
-
- .extended => try self.writeExtended(stream, inst),
- }
- }
-
- fn writeExtended(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const extended = self.code.instructions.items(.data)[inst].extended;
- try stream.print("{s}(", .{@tagName(extended.opcode)});
- switch (extended.opcode) {
- .ret_ptr,
- .ret_type,
- .this,
- .ret_addr,
- .error_return_trace,
- .frame,
- .frame_address,
- .builtin_src,
- => try self.writeExtNode(stream, extended),
-
- .@"asm" => try self.writeAsm(stream, extended),
- .func => try self.writeFuncExtended(stream, extended),
- .variable => try self.writeVarExtended(stream, extended),
-
- .compile_log,
- .typeof_peer,
- => try self.writeNodeMultiOp(stream, extended),
-
- .add_with_overflow,
- .sub_with_overflow,
- .mul_with_overflow,
- .shl_with_overflow,
- => try self.writeOverflowArithmetic(stream, extended),
-
- .add_with_saturation,
- .sub_with_saturation,
- .mul_with_saturation,
- .shl_with_saturation,
- => try self.writeSaturatingArithmetic(stream, extended),
- .struct_decl => try self.writeStructDecl(stream, extended),
- .union_decl => try self.writeUnionDecl(stream, extended),
- .enum_decl => try self.writeEnumDecl(stream, extended),
-
- .alloc,
- .builtin_extern,
- .c_undef,
- .c_include,
- .c_define,
- .wasm_memory_size,
- .wasm_memory_grow,
- => try stream.writeAll("TODO))"),
- }
- }
-
- fn writeExtNode(self: *Writer, stream: anytype, extended: Inst.Extended.InstData) !void {
- const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
- try stream.writeAll(")) ");
- try self.writeSrc(stream, src);
- }
-
- fn writeBin(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].bin;
- try self.writeInstRef(stream, inst_data.lhs);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, inst_data.rhs);
- try stream.writeByte(')');
- }
-
- fn writeUnNode(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
- const inst_data = self.code.instructions.items(.data)[inst].un_node;
- try self.writeInstRef(stream, inst_data.operand);
- try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeUnTok(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
- const inst_data = self.code.instructions.items(.data)[inst].un_tok;
- try self.writeInstRef(stream, inst_data.operand);
- try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeArrayTypeSentinel(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
- const inst_data = self.code.instructions.items(.data)[inst].array_type_sentinel;
- _ = inst_data;
- try stream.writeAll("TODO)");
- }
-
- fn writeParamType(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
- const inst_data = self.code.instructions.items(.data)[inst].param_type;
- try self.writeInstRef(stream, inst_data.callee);
- try stream.print(", {d})", .{inst_data.param_index});
- }
-
- fn writePtrTypeSimple(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
- const inst_data = self.code.instructions.items(.data)[inst].ptr_type_simple;
- const str_allowzero = if (inst_data.is_allowzero) "allowzero, " else "";
- const str_const = if (!inst_data.is_mutable) "const, " else "";
- const str_volatile = if (inst_data.is_volatile) "volatile, " else "";
- try self.writeInstRef(stream, inst_data.elem_type);
- try stream.print(", {s}{s}{s}{s})", .{
- str_allowzero,
- str_const,
- str_volatile,
- @tagName(inst_data.size),
- });
- }
-
- fn writePtrType(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
- const inst_data = self.code.instructions.items(.data)[inst].ptr_type;
- _ = inst_data;
- try stream.writeAll("TODO)");
- }
-
- fn writeInt(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].int;
- try stream.print("{d})", .{inst_data});
- }
-
- fn writeIntBig(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].str;
- const byte_count = inst_data.len * @sizeOf(std.math.big.Limb);
- const limb_bytes = self.code.string_bytes[inst_data.start..][0..byte_count];
- // limb_bytes is not aligned properly; we must allocate and copy the bytes
- // in order to accomplish this.
- const limbs = try self.gpa.alloc(std.math.big.Limb, inst_data.len);
- defer self.gpa.free(limbs);
-
- mem.copy(u8, mem.sliceAsBytes(limbs), limb_bytes);
- const big_int: std.math.big.int.Const = .{
- .limbs = limbs,
- .positive = true,
- };
- const as_string = try big_int.toStringAlloc(self.gpa, 10, .lower);
- defer self.gpa.free(as_string);
- try stream.print("{s})", .{as_string});
- }
-
- fn writeFloat(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const number = self.code.instructions.items(.data)[inst].float;
- try stream.print("{d})", .{number});
- }
-
- fn writeFloat128(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.Float128, inst_data.payload_index).data;
- const src = inst_data.src();
- const number = extra.get();
- // TODO improve std.format to be able to print f128 values
- try stream.print("{d}) ", .{@floatCast(f64, number)});
- try self.writeSrc(stream, src);
- }
-
- fn writeStr(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
- const inst_data = self.code.instructions.items(.data)[inst].str;
- const str = inst_data.get(self.code);
- try stream.print("\"{}\")", .{std.zig.fmtEscapes(str)});
- }
-
- fn writePlNode(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- try stream.writeAll("TODO) ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeParam(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_tok;
- const extra = self.code.extraData(Inst.Param, inst_data.payload_index);
- const body = self.code.extra[extra.end..][0..extra.data.body_len];
- try stream.print("\"{}\", ", .{
- std.zig.fmtEscapes(self.code.nullTerminatedString(extra.data.name)),
- });
- try stream.writeAll("{\n");
- self.indent += 2;
- try self.writeBody(stream, body);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}) ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writePlNodeBin(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.Bin, inst_data.payload_index).data;
- try self.writeInstRef(stream, extra.lhs);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, extra.rhs);
- try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writePlNodeExport(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.Export, inst_data.payload_index).data;
- const decl_name = self.code.nullTerminatedString(extra.decl_name);
-
- try self.writeInstRef(stream, extra.namespace);
- try stream.print(", {}, ", .{std.zig.fmtId(decl_name)});
- try self.writeInstRef(stream, extra.options);
- try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeStructInit(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.StructInit, inst_data.payload_index);
- var field_i: u32 = 0;
- var extra_index = extra.end;
-
- while (field_i < extra.data.fields_len) : (field_i += 1) {
- const item = self.code.extraData(Inst.StructInit.Item, extra_index);
- extra_index = item.end;
-
- if (field_i != 0) {
- try stream.writeAll(", [");
- } else {
- try stream.writeAll("[");
- }
- try self.writeInstIndex(stream, item.data.field_type);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, item.data.init);
- try stream.writeAll("]");
- }
- try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeCmpxchg(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.Cmpxchg, inst_data.payload_index).data;
-
- try self.writeInstRef(stream, extra.ptr);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, extra.expected_value);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, extra.new_value);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, extra.success_order);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, extra.failure_order);
- try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeStructInitAnon(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.StructInitAnon, inst_data.payload_index);
- var field_i: u32 = 0;
- var extra_index = extra.end;
-
- while (field_i < extra.data.fields_len) : (field_i += 1) {
- const item = self.code.extraData(Inst.StructInitAnon.Item, extra_index);
- extra_index = item.end;
-
- const field_name = self.code.nullTerminatedString(item.data.field_name);
-
- const prefix = if (field_i != 0) ", [" else "[";
- try stream.print("{s}[{s}=", .{ prefix, field_name });
- try self.writeInstRef(stream, item.data.init);
- try stream.writeAll("]");
- }
- try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeFieldType(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.FieldType, inst_data.payload_index).data;
- try self.writeInstRef(stream, extra.container_type);
- const field_name = self.code.nullTerminatedString(extra.name_start);
- try stream.print(", {s}) ", .{field_name});
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeFieldTypeRef(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.FieldTypeRef, inst_data.payload_index).data;
- try self.writeInstRef(stream, extra.container_type);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, extra.field_name);
- try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeNodeMultiOp(self: *Writer, stream: anytype, extended: Inst.Extended.InstData) !void {
- const extra = self.code.extraData(Inst.NodeMultiOp, extended.operand);
- const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
- const operands = self.code.refSlice(extra.end, extended.small);
-
- for (operands) |operand, i| {
- if (i != 0) try stream.writeAll(", ");
- try self.writeInstRef(stream, operand);
- }
- try stream.writeAll(")) ");
- try self.writeSrc(stream, src);
- }
-
- fn writeAsm(self: *Writer, stream: anytype, extended: Inst.Extended.InstData) !void {
- const extra = self.code.extraData(Inst.Asm, extended.operand);
- const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
- const outputs_len = @truncate(u5, extended.small);
- const inputs_len = @truncate(u5, extended.small >> 5);
- const clobbers_len = @truncate(u5, extended.small >> 10);
- const is_volatile = @truncate(u1, extended.small >> 15) != 0;
- const asm_source = self.code.nullTerminatedString(extra.data.asm_source);
-
- try self.writeFlag(stream, "volatile, ", is_volatile);
- try stream.print("\"{}\", ", .{std.zig.fmtEscapes(asm_source)});
- try stream.writeAll(", ");
-
- var extra_i: usize = extra.end;
- var output_type_bits = extra.data.output_type_bits;
- {
- var i: usize = 0;
- while (i < outputs_len) : (i += 1) {
- const output = self.code.extraData(Inst.Asm.Output, extra_i);
- extra_i = output.end;
-
- const is_type = @truncate(u1, output_type_bits) != 0;
- output_type_bits >>= 1;
-
- const name = self.code.nullTerminatedString(output.data.name);
- const constraint = self.code.nullTerminatedString(output.data.constraint);
- try stream.print("output({}, \"{}\", ", .{
- std.zig.fmtId(name), std.zig.fmtEscapes(constraint),
- });
- try self.writeFlag(stream, "->", is_type);
- try self.writeInstRef(stream, output.data.operand);
- try stream.writeAll(")");
- if (i + 1 < outputs_len) {
- try stream.writeAll("), ");
- }
- }
- }
- {
- var i: usize = 0;
- while (i < inputs_len) : (i += 1) {
- const input = self.code.extraData(Inst.Asm.Input, extra_i);
- extra_i = input.end;
-
- const name = self.code.nullTerminatedString(input.data.name);
- const constraint = self.code.nullTerminatedString(input.data.constraint);
- try stream.print("input({}, \"{}\", ", .{
- std.zig.fmtId(name), std.zig.fmtEscapes(constraint),
- });
- try self.writeInstRef(stream, input.data.operand);
- try stream.writeAll(")");
- if (i + 1 < inputs_len) {
- try stream.writeAll(", ");
- }
- }
- }
- {
- var i: usize = 0;
- while (i < clobbers_len) : (i += 1) {
- const str_index = self.code.extra[extra_i];
- extra_i += 1;
- const clobber = self.code.nullTerminatedString(str_index);
- try stream.print("{}", .{std.zig.fmtId(clobber)});
- if (i + 1 < clobbers_len) {
- try stream.writeAll(", ");
- }
- }
- }
- try stream.writeAll(")) ");
- try self.writeSrc(stream, src);
- }
-
- fn writeOverflowArithmetic(self: *Writer, stream: anytype, extended: Inst.Extended.InstData) !void {
- const extra = self.code.extraData(Zir.Inst.OverflowArithmetic, extended.operand).data;
- const src: LazySrcLoc = .{ .node_offset = extra.node };
-
- try self.writeInstRef(stream, extra.lhs);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, extra.rhs);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, extra.ptr);
- try stream.writeAll(")) ");
- try self.writeSrc(stream, src);
- }
-
- fn writeSaturatingArithmetic(self: *Writer, stream: anytype, extended: Inst.Extended.InstData) !void {
- const extra = self.code.extraData(Zir.Inst.SaturatingArithmetic, extended.operand).data;
- const src: LazySrcLoc = .{ .node_offset = extra.node };
-
- try self.writeInstRef(stream, extra.lhs);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, extra.rhs);
- try stream.writeAll(", ");
- try stream.writeAll(") ");
- try self.writeSrc(stream, src);
- }
-
- fn writePlNodeCall(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.Call, inst_data.payload_index);
- const args = self.code.refSlice(extra.end, extra.data.args_len);
-
- try self.writeInstRef(stream, extra.data.callee);
- try stream.writeAll(", [");
- for (args) |arg, i| {
- if (i != 0) try stream.writeAll(", ");
- try self.writeInstRef(stream, arg);
- }
- try stream.writeAll("]) ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writePlNodeBlock(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- try self.writePlNodeBlockWithoutSrc(stream, inst);
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writePlNodeBlockWithoutSrc(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.Block, inst_data.payload_index);
- const body = self.code.extra[extra.end..][0..extra.data.body_len];
- try stream.writeAll("{\n");
- self.indent += 2;
- try self.writeBody(stream, body);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}) ");
- }
-
- fn writePlNodeCondBr(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.CondBr, inst_data.payload_index);
- const then_body = self.code.extra[extra.end..][0..extra.data.then_body_len];
- const else_body = self.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
- try self.writeInstRef(stream, extra.data.condition);
- try stream.writeAll(", {\n");
- self.indent += 2;
- try self.writeBody(stream, then_body);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}, {\n");
- self.indent += 2;
- try self.writeBody(stream, else_body);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}) ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeStructDecl(self: *Writer, stream: anytype, extended: Inst.Extended.InstData) !void {
- const small = @bitCast(Inst.StructDecl.Small, extended.small);
-
- var extra_index: usize = extended.operand;
-
- const src_node: ?i32 = if (small.has_src_node) blk: {
- const src_node = @bitCast(i32, self.code.extra[extra_index]);
- extra_index += 1;
- break :blk src_node;
- } else null;
-
- const body_len = if (small.has_body_len) blk: {
- const body_len = self.code.extra[extra_index];
- extra_index += 1;
- break :blk body_len;
- } else 0;
-
- const fields_len = if (small.has_fields_len) blk: {
- const fields_len = self.code.extra[extra_index];
- extra_index += 1;
- break :blk fields_len;
- } else 0;
-
- const decls_len = if (small.has_decls_len) blk: {
- const decls_len = self.code.extra[extra_index];
- extra_index += 1;
- break :blk decls_len;
- } else 0;
-
- try self.writeFlag(stream, "known_has_bits, ", small.known_has_bits);
- try stream.print("{s}, {s}, ", .{
- @tagName(small.name_strategy), @tagName(small.layout),
- });
-
- if (decls_len == 0) {
- try stream.writeAll("{}, ");
- } else {
- try stream.writeAll("{\n");
- self.indent += 2;
- extra_index = try self.writeDecls(stream, decls_len, extra_index);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}, ");
- }
-
- const body = self.code.extra[extra_index..][0..body_len];
- extra_index += body.len;
-
- if (fields_len == 0) {
- assert(body.len == 0);
- try stream.writeAll("{}, {})");
- } else {
- const prev_parent_decl_node = self.parent_decl_node;
- if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
- self.indent += 2;
- if (body.len == 0) {
- try stream.writeAll("{}, {\n");
- } else {
- try stream.writeAll("{\n");
- try self.writeBody(stream, body);
-
- try stream.writeByteNTimes(' ', self.indent - 2);
- try stream.writeAll("}, {\n");
- }
-
- const bits_per_field = 4;
- const fields_per_u32 = 32 / bits_per_field;
- const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
- var bit_bag_index: usize = extra_index;
- extra_index += bit_bags_count;
- var cur_bit_bag: u32 = undefined;
- var field_i: u32 = 0;
- while (field_i < fields_len) : (field_i += 1) {
- if (field_i % fields_per_u32 == 0) {
- cur_bit_bag = self.code.extra[bit_bag_index];
- bit_bag_index += 1;
- }
- const has_align = @truncate(u1, cur_bit_bag) != 0;
- cur_bit_bag >>= 1;
- const has_default = @truncate(u1, cur_bit_bag) != 0;
- cur_bit_bag >>= 1;
- const is_comptime = @truncate(u1, cur_bit_bag) != 0;
- cur_bit_bag >>= 1;
- const unused = @truncate(u1, cur_bit_bag) != 0;
- cur_bit_bag >>= 1;
-
- _ = unused;
-
- const field_name = self.code.nullTerminatedString(self.code.extra[extra_index]);
- extra_index += 1;
- const field_type = @intToEnum(Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
-
- try stream.writeByteNTimes(' ', self.indent);
- try self.writeFlag(stream, "comptime ", is_comptime);
- try stream.print("{}: ", .{std.zig.fmtId(field_name)});
- try self.writeInstRef(stream, field_type);
-
- if (has_align) {
- const align_ref = @intToEnum(Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
-
- try stream.writeAll(" align(");
- try self.writeInstRef(stream, align_ref);
- try stream.writeAll(")");
- }
- if (has_default) {
- const default_ref = @intToEnum(Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
-
- try stream.writeAll(" = ");
- try self.writeInstRef(stream, default_ref);
- }
- try stream.writeAll(",\n");
- }
-
- self.parent_decl_node = prev_parent_decl_node;
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("})");
- }
- try self.writeSrcNode(stream, src_node);
- }
-
- fn writeUnionDecl(self: *Writer, stream: anytype, extended: Inst.Extended.InstData) !void {
- const small = @bitCast(Inst.UnionDecl.Small, extended.small);
-
- var extra_index: usize = extended.operand;
-
- const src_node: ?i32 = if (small.has_src_node) blk: {
- const src_node = @bitCast(i32, self.code.extra[extra_index]);
- extra_index += 1;
- break :blk src_node;
- } else null;
-
- const tag_type_ref = if (small.has_tag_type) blk: {
- const tag_type_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
- break :blk tag_type_ref;
- } else .none;
-
- const body_len = if (small.has_body_len) blk: {
- const body_len = self.code.extra[extra_index];
- extra_index += 1;
- break :blk body_len;
- } else 0;
-
- const fields_len = if (small.has_fields_len) blk: {
- const fields_len = self.code.extra[extra_index];
- extra_index += 1;
- break :blk fields_len;
- } else 0;
-
- const decls_len = if (small.has_decls_len) blk: {
- const decls_len = self.code.extra[extra_index];
- extra_index += 1;
- break :blk decls_len;
- } else 0;
-
- try stream.print("{s}, {s}, ", .{
- @tagName(small.name_strategy), @tagName(small.layout),
- });
- try self.writeFlag(stream, "autoenum, ", small.auto_enum_tag);
-
- if (decls_len == 0) {
- try stream.writeAll("{}, ");
- } else {
- try stream.writeAll("{\n");
- self.indent += 2;
- extra_index = try self.writeDecls(stream, decls_len, extra_index);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}, ");
- }
-
- assert(fields_len != 0);
-
- if (tag_type_ref != .none) {
- try self.writeInstRef(stream, tag_type_ref);
- try stream.writeAll(", ");
- }
-
- const body = self.code.extra[extra_index..][0..body_len];
- extra_index += body.len;
-
- const prev_parent_decl_node = self.parent_decl_node;
- if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
- self.indent += 2;
- if (body.len == 0) {
- try stream.writeAll("{}, {\n");
- } else {
- try stream.writeAll("{\n");
- try self.writeBody(stream, body);
-
- try stream.writeByteNTimes(' ', self.indent - 2);
- try stream.writeAll("}, {\n");
- }
-
- const bits_per_field = 4;
- const fields_per_u32 = 32 / bits_per_field;
- const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
- const body_end = extra_index;
- extra_index += bit_bags_count;
- var bit_bag_index: usize = body_end;
- var cur_bit_bag: u32 = undefined;
- var field_i: u32 = 0;
- while (field_i < fields_len) : (field_i += 1) {
- if (field_i % fields_per_u32 == 0) {
- cur_bit_bag = self.code.extra[bit_bag_index];
- bit_bag_index += 1;
- }
- const has_type = @truncate(u1, cur_bit_bag) != 0;
- cur_bit_bag >>= 1;
- const has_align = @truncate(u1, cur_bit_bag) != 0;
- cur_bit_bag >>= 1;
- const has_value = @truncate(u1, cur_bit_bag) != 0;
- cur_bit_bag >>= 1;
- const unused = @truncate(u1, cur_bit_bag) != 0;
- cur_bit_bag >>= 1;
-
- _ = unused;
-
- const field_name = self.code.nullTerminatedString(self.code.extra[extra_index]);
- extra_index += 1;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.print("{}", .{std.zig.fmtId(field_name)});
-
- if (has_type) {
- const field_type = @intToEnum(Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
-
- try stream.writeAll(": ");
- try self.writeInstRef(stream, field_type);
- }
- if (has_align) {
- const align_ref = @intToEnum(Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
-
- try stream.writeAll(" align(");
- try self.writeInstRef(stream, align_ref);
- try stream.writeAll(")");
- }
- if (has_value) {
- const default_ref = @intToEnum(Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
-
- try stream.writeAll(" = ");
- try self.writeInstRef(stream, default_ref);
- }
- try stream.writeAll(",\n");
- }
-
- self.parent_decl_node = prev_parent_decl_node;
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("})");
- try self.writeSrcNode(stream, src_node);
- }
-
- fn writeDecls(self: *Writer, stream: anytype, decls_len: u32, extra_start: usize) !usize {
- const parent_decl_node = self.parent_decl_node;
- const bit_bags_count = std.math.divCeil(usize, decls_len, 8) catch unreachable;
- var extra_index = extra_start + bit_bags_count;
- var bit_bag_index: usize = extra_start;
- var cur_bit_bag: u32 = undefined;
- var decl_i: u32 = 0;
- while (decl_i < decls_len) : (decl_i += 1) {
- if (decl_i % 8 == 0) {
- cur_bit_bag = self.code.extra[bit_bag_index];
- bit_bag_index += 1;
- }
- const is_pub = @truncate(u1, cur_bit_bag) != 0;
- cur_bit_bag >>= 1;
- const is_exported = @truncate(u1, cur_bit_bag) != 0;
- cur_bit_bag >>= 1;
- const has_align = @truncate(u1, cur_bit_bag) != 0;
- cur_bit_bag >>= 1;
- const has_section = @truncate(u1, cur_bit_bag) != 0;
- cur_bit_bag >>= 1;
-
- const sub_index = extra_index;
-
- const hash_u32s = self.code.extra[extra_index..][0..4];
- extra_index += 4;
- const line = self.code.extra[extra_index];
- extra_index += 1;
- const decl_name_index = self.code.extra[extra_index];
- extra_index += 1;
- const decl_index = self.code.extra[extra_index];
- extra_index += 1;
- const align_inst: Inst.Ref = if (!has_align) .none else inst: {
- const inst = @intToEnum(Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
- break :inst inst;
- };
- const section_inst: Inst.Ref = if (!has_section) .none else inst: {
- const inst = @intToEnum(Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
- break :inst inst;
- };
-
- const pub_str = if (is_pub) "pub " else "";
- const hash_bytes = @bitCast([16]u8, hash_u32s.*);
- try stream.writeByteNTimes(' ', self.indent);
- if (decl_name_index == 0) {
- const name = if (is_exported) "usingnamespace" else "comptime";
- try stream.writeAll(pub_str);
- try stream.writeAll(name);
- } else if (decl_name_index == 1) {
- try stream.writeAll("test");
- } else {
- const raw_decl_name = self.code.nullTerminatedString(decl_name_index);
- const decl_name = if (raw_decl_name.len == 0)
- self.code.nullTerminatedString(decl_name_index + 1)
- else
- raw_decl_name;
- const test_str = if (raw_decl_name.len == 0) "test " else "";
- const export_str = if (is_exported) "export " else "";
- try stream.print("[{d}] {s}{s}{s}{}", .{
- sub_index, pub_str, test_str, export_str, std.zig.fmtId(decl_name),
- });
- if (align_inst != .none) {
- try stream.writeAll(" align(");
- try self.writeInstRef(stream, align_inst);
- try stream.writeAll(")");
- }
- if (section_inst != .none) {
- try stream.writeAll(" linksection(");
- try self.writeInstRef(stream, section_inst);
- try stream.writeAll(")");
- }
- }
- const tag = self.code.instructions.items(.tag)[decl_index];
- try stream.print(" line({d}) hash({}): %{d} = {s}(", .{
- line, std.fmt.fmtSliceHexLower(&hash_bytes), decl_index, @tagName(tag),
- });
-
- const decl_block_inst_data = self.code.instructions.items(.data)[decl_index].pl_node;
- const sub_decl_node_off = decl_block_inst_data.src_node;
- self.parent_decl_node = self.relativeToNodeIndex(sub_decl_node_off);
- try self.writePlNodeBlockWithoutSrc(stream, decl_index);
- self.parent_decl_node = parent_decl_node;
- try self.writeSrc(stream, decl_block_inst_data.src());
- try stream.writeAll("\n");
- }
- return extra_index;
- }
-
- fn writeEnumDecl(self: *Writer, stream: anytype, extended: Inst.Extended.InstData) !void {
- const small = @bitCast(Inst.EnumDecl.Small, extended.small);
- var extra_index: usize = extended.operand;
-
- const src_node: ?i32 = if (small.has_src_node) blk: {
- const src_node = @bitCast(i32, self.code.extra[extra_index]);
- extra_index += 1;
- break :blk src_node;
- } else null;
-
- const tag_type_ref = if (small.has_tag_type) blk: {
- const tag_type_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
- break :blk tag_type_ref;
- } else .none;
-
- const body_len = if (small.has_body_len) blk: {
- const body_len = self.code.extra[extra_index];
- extra_index += 1;
- break :blk body_len;
- } else 0;
-
- const fields_len = if (small.has_fields_len) blk: {
- const fields_len = self.code.extra[extra_index];
- extra_index += 1;
- break :blk fields_len;
- } else 0;
-
- const decls_len = if (small.has_decls_len) blk: {
- const decls_len = self.code.extra[extra_index];
- extra_index += 1;
- break :blk decls_len;
- } else 0;
-
- try stream.print("{s}, ", .{@tagName(small.name_strategy)});
- try self.writeFlag(stream, "nonexhaustive, ", small.nonexhaustive);
-
- if (decls_len == 0) {
- try stream.writeAll("{}, ");
- } else {
- try stream.writeAll("{\n");
- self.indent += 2;
- extra_index = try self.writeDecls(stream, decls_len, extra_index);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}, ");
- }
-
- if (tag_type_ref != .none) {
- try self.writeInstRef(stream, tag_type_ref);
- try stream.writeAll(", ");
- }
-
- const body = self.code.extra[extra_index..][0..body_len];
- extra_index += body.len;
-
- if (fields_len == 0) {
- assert(body.len == 0);
- try stream.writeAll("{}, {})");
- } else {
- const prev_parent_decl_node = self.parent_decl_node;
- if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
- self.indent += 2;
- if (body.len == 0) {
- try stream.writeAll("{}, {\n");
- } else {
- try stream.writeAll("{\n");
- try self.writeBody(stream, body);
-
- try stream.writeByteNTimes(' ', self.indent - 2);
- try stream.writeAll("}, {\n");
- }
-
- const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
- const body_end = extra_index;
- extra_index += bit_bags_count;
- var bit_bag_index: usize = body_end;
- var cur_bit_bag: u32 = undefined;
- var field_i: u32 = 0;
- while (field_i < fields_len) : (field_i += 1) {
- if (field_i % 32 == 0) {
- cur_bit_bag = self.code.extra[bit_bag_index];
- bit_bag_index += 1;
- }
- const has_tag_value = @truncate(u1, cur_bit_bag) != 0;
- cur_bit_bag >>= 1;
-
- const field_name = self.code.nullTerminatedString(self.code.extra[extra_index]);
- extra_index += 1;
-
- try stream.writeByteNTimes(' ', self.indent);
- try stream.print("{}", .{std.zig.fmtId(field_name)});
-
- if (has_tag_value) {
- const tag_value_ref = @intToEnum(Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
-
- try stream.writeAll(" = ");
- try self.writeInstRef(stream, tag_value_ref);
- }
- try stream.writeAll(",\n");
- }
- self.parent_decl_node = prev_parent_decl_node;
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("})");
- }
- try self.writeSrcNode(stream, src_node);
- }
-
- fn writeOpaqueDecl(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- name_strategy: Inst.NameStrategy,
- ) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.OpaqueDecl, inst_data.payload_index);
- const decls_len = extra.data.decls_len;
-
- try stream.print("{s}, ", .{@tagName(name_strategy)});
-
- if (decls_len == 0) {
- try stream.writeAll("}) ");
- } else {
- try stream.writeAll("\n");
- self.indent += 2;
- _ = try self.writeDecls(stream, decls_len, extra.end);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}) ");
- }
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeErrorSetDecl(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- name_strategy: Inst.NameStrategy,
- ) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.ErrorSetDecl, inst_data.payload_index);
- const fields = self.code.extra[extra.end..][0..extra.data.fields_len];
-
- try stream.print("{s}, ", .{@tagName(name_strategy)});
-
- try stream.writeAll("{\n");
- self.indent += 2;
- for (fields) |str_index| {
- const name = self.code.nullTerminatedString(str_index);
- try stream.writeByteNTimes(' ', self.indent);
- try stream.print("{},\n", .{std.zig.fmtId(name)});
- }
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}) ");
-
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writePlNodeSwitchBr(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- special_prong: SpecialProng,
- ) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.SwitchBlock, inst_data.payload_index);
- const special: struct {
- body: []const Inst.Index,
- end: usize,
- } = switch (special_prong) {
- .none => .{ .body = &.{}, .end = extra.end },
- .under, .@"else" => blk: {
- const body_len = self.code.extra[extra.end];
- const extra_body_start = extra.end + 1;
- break :blk .{
- .body = self.code.extra[extra_body_start..][0..body_len],
- .end = extra_body_start + body_len,
- };
- },
- };
-
- try self.writeInstRef(stream, extra.data.operand);
-
- if (special.body.len != 0) {
- const prong_name = switch (special_prong) {
- .@"else" => "else",
- .under => "_",
- else => unreachable,
- };
- try stream.print(", {s} => {{\n", .{prong_name});
- self.indent += 2;
- try self.writeBody(stream, special.body);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}");
- }
-
- var extra_index: usize = special.end;
- {
- var scalar_i: usize = 0;
- while (scalar_i < extra.data.cases_len) : (scalar_i += 1) {
- const item_ref = @intToEnum(Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
- const body_len = self.code.extra[extra_index];
- extra_index += 1;
- const body = self.code.extra[extra_index..][0..body_len];
- extra_index += body_len;
-
- try stream.writeAll(", ");
- try self.writeInstRef(stream, item_ref);
- try stream.writeAll(" => {\n");
- self.indent += 2;
- try self.writeBody(stream, body);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}");
- }
- }
- try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writePlNodeSwitchBlockMulti(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- special_prong: SpecialProng,
- ) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.SwitchBlockMulti, inst_data.payload_index);
- const special: struct {
- body: []const Inst.Index,
- end: usize,
- } = switch (special_prong) {
- .none => .{ .body = &.{}, .end = extra.end },
- .under, .@"else" => blk: {
- const body_len = self.code.extra[extra.end];
- const extra_body_start = extra.end + 1;
- break :blk .{
- .body = self.code.extra[extra_body_start..][0..body_len],
- .end = extra_body_start + body_len,
- };
- },
- };
-
- try self.writeInstRef(stream, extra.data.operand);
-
- if (special.body.len != 0) {
- const prong_name = switch (special_prong) {
- .@"else" => "else",
- .under => "_",
- else => unreachable,
- };
- try stream.print(", {s} => {{\n", .{prong_name});
- self.indent += 2;
- try self.writeBody(stream, special.body);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}");
- }
-
- var extra_index: usize = special.end;
- {
- var scalar_i: usize = 0;
- while (scalar_i < extra.data.scalar_cases_len) : (scalar_i += 1) {
- const item_ref = @intToEnum(Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
- const body_len = self.code.extra[extra_index];
- extra_index += 1;
- const body = self.code.extra[extra_index..][0..body_len];
- extra_index += body_len;
-
- try stream.writeAll(", ");
- try self.writeInstRef(stream, item_ref);
- try stream.writeAll(" => {\n");
- self.indent += 2;
- try self.writeBody(stream, body);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}");
- }
- }
- {
- var multi_i: usize = 0;
- while (multi_i < extra.data.multi_cases_len) : (multi_i += 1) {
- const items_len = self.code.extra[extra_index];
- extra_index += 1;
- const ranges_len = self.code.extra[extra_index];
- extra_index += 1;
- const body_len = self.code.extra[extra_index];
- extra_index += 1;
- const items = self.code.refSlice(extra_index, items_len);
- extra_index += items_len;
-
- for (items) |item_ref| {
- try stream.writeAll(", ");
- try self.writeInstRef(stream, item_ref);
- }
-
- var range_i: usize = 0;
- while (range_i < ranges_len) : (range_i += 1) {
- const item_first = @intToEnum(Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
- const item_last = @intToEnum(Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
-
- try stream.writeAll(", ");
- try self.writeInstRef(stream, item_first);
- try stream.writeAll("...");
- try self.writeInstRef(stream, item_last);
- }
-
- const body = self.code.extra[extra_index..][0..body_len];
- extra_index += body_len;
- try stream.writeAll(" => {\n");
- self.indent += 2;
- try self.writeBody(stream, body);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}");
- }
- }
- try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writePlNodeField(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.Field, inst_data.payload_index).data;
- const name = self.code.nullTerminatedString(extra.field_name_start);
- try self.writeInstRef(stream, extra.lhs);
- try stream.print(", \"{}\") ", .{std.zig.fmtEscapes(name)});
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeAs(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Inst.As, inst_data.payload_index).data;
- try self.writeInstRef(stream, extra.dest_type);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, extra.operand);
- try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeNode(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
- const src_node = self.code.instructions.items(.data)[inst].node;
- const src: LazySrcLoc = .{ .node_offset = src_node };
- try stream.writeAll(") ");
- try self.writeSrc(stream, src);
- }
-
- fn writeStrTok(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
- const inst_data = self.code.instructions.items(.data)[inst].str_tok;
- const str = inst_data.get(self.code);
- try stream.print("\"{}\") ", .{std.zig.fmtEscapes(str)});
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeFunc(
- self: *Writer,
- stream: anytype,
- inst: Inst.Index,
- inferred_error_set: bool,
- ) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- const extra = self.code.extraData(Inst.Func, inst_data.payload_index);
- var extra_index = extra.end;
-
- const ret_ty_body = self.code.extra[extra_index..][0..extra.data.ret_body_len];
- extra_index += ret_ty_body.len;
-
- const body = self.code.extra[extra_index..][0..extra.data.body_len];
- extra_index += body.len;
-
- var src_locs: Zir.Inst.Func.SrcLocs = undefined;
- if (body.len != 0) {
- src_locs = self.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data;
- }
- return self.writeFuncCommon(
- stream,
- ret_ty_body,
- inferred_error_set,
- false,
- false,
- .none,
- .none,
- body,
- src,
- src_locs,
- );
- }
-
- fn writeFuncExtended(self: *Writer, stream: anytype, extended: Inst.Extended.InstData) !void {
- const extra = self.code.extraData(Inst.ExtendedFunc, extended.operand);
- const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
- const small = @bitCast(Inst.ExtendedFunc.Small, extended.small);
-
- var extra_index: usize = extra.end;
- if (small.has_lib_name) {
- const lib_name = self.code.nullTerminatedString(self.code.extra[extra_index]);
- extra_index += 1;
- try stream.print("lib_name=\"{}\", ", .{std.zig.fmtEscapes(lib_name)});
- }
- try self.writeFlag(stream, "test, ", small.is_test);
- const cc: Inst.Ref = if (!small.has_cc) .none else blk: {
- const cc = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
- break :blk cc;
- };
- const align_inst: Inst.Ref = if (!small.has_align) .none else blk: {
- const align_inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
- break :blk align_inst;
- };
-
- const ret_ty_body = self.code.extra[extra_index..][0..extra.data.ret_body_len];
- extra_index += ret_ty_body.len;
-
- const body = self.code.extra[extra_index..][0..extra.data.body_len];
- extra_index += body.len;
-
- var src_locs: Zir.Inst.Func.SrcLocs = undefined;
- if (body.len != 0) {
- src_locs = self.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data;
- }
- return self.writeFuncCommon(
- stream,
- ret_ty_body,
- small.is_inferred_error,
- small.is_var_args,
- small.is_extern,
- cc,
- align_inst,
- body,
- src,
- src_locs,
- );
- }
-
- fn writeVarExtended(self: *Writer, stream: anytype, extended: Inst.Extended.InstData) !void {
- const extra = self.code.extraData(Inst.ExtendedVar, extended.operand);
- const small = @bitCast(Inst.ExtendedVar.Small, extended.small);
-
- try self.writeInstRef(stream, extra.data.var_type);
-
- var extra_index: usize = extra.end;
- if (small.has_lib_name) {
- const lib_name = self.code.nullTerminatedString(self.code.extra[extra_index]);
- extra_index += 1;
- try stream.print(", lib_name=\"{}\"", .{std.zig.fmtEscapes(lib_name)});
- }
- const align_inst: Inst.Ref = if (!small.has_align) .none else blk: {
- const align_inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
- break :blk align_inst;
- };
- const init_inst: Inst.Ref = if (!small.has_init) .none else blk: {
- const init_inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
- break :blk init_inst;
- };
- try self.writeFlag(stream, ", is_extern", small.is_extern);
- try self.writeOptionalInstRef(stream, ", align=", align_inst);
- try self.writeOptionalInstRef(stream, ", init=", init_inst);
- try stream.writeAll("))");
- }
-
- fn writeBoolBr(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].bool_br;
- const extra = self.code.extraData(Inst.Block, inst_data.payload_index);
- const body = self.code.extra[extra.end..][0..extra.data.body_len];
- try self.writeInstRef(stream, inst_data.lhs);
- try stream.writeAll(", {\n");
- self.indent += 2;
- try self.writeBody(stream, body);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("})");
- }
-
- fn writeIntType(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const int_type = self.code.instructions.items(.data)[inst].int_type;
- const prefix: u8 = switch (int_type.signedness) {
- .signed => 'i',
- .unsigned => 'u',
- };
- try stream.print("{c}{d}) ", .{ prefix, int_type.bit_count });
- try self.writeSrc(stream, int_type.src());
- }
-
- fn writeBreak(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].@"break";
-
- try self.writeInstIndex(stream, inst_data.block_inst);
- try stream.writeAll(", ");
- try self.writeInstRef(stream, inst_data.operand);
- try stream.writeAll(")");
- }
-
- fn writeUnreachable(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].@"unreachable";
- const safety_str = if (inst_data.safety) "safe" else "unsafe";
- try stream.print("{s}) ", .{safety_str});
- try self.writeSrc(stream, inst_data.src());
- }
-
- fn writeFuncCommon(
- self: *Writer,
- stream: anytype,
- ret_ty_body: []const Inst.Index,
- inferred_error_set: bool,
- var_args: bool,
- is_extern: bool,
- cc: Inst.Ref,
- align_inst: Inst.Ref,
- body: []const Inst.Index,
- src: LazySrcLoc,
- src_locs: Zir.Inst.Func.SrcLocs,
- ) !void {
- if (ret_ty_body.len == 0) {
- try stream.writeAll("ret_ty=void");
- } else {
- try stream.writeAll("ret_ty={\n");
- self.indent += 2;
- try self.writeBody(stream, ret_ty_body);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}");
- }
-
- try self.writeOptionalInstRef(stream, ", cc=", cc);
- try self.writeOptionalInstRef(stream, ", align=", align_inst);
- try self.writeFlag(stream, ", vargs", var_args);
- try self.writeFlag(stream, ", extern", is_extern);
- try self.writeFlag(stream, ", inferror", inferred_error_set);
-
- if (body.len == 0) {
- try stream.writeAll(", body={}) ");
- } else {
- try stream.writeAll(", body={\n");
- self.indent += 2;
- try self.writeBody(stream, body);
- self.indent -= 2;
- try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}) ");
- }
- if (body.len != 0) {
- try stream.print("(lbrace={d}:{d},rbrace={d}:{d}) ", .{
- src_locs.lbrace_line, @truncate(u16, src_locs.columns),
- src_locs.rbrace_line, @truncate(u16, src_locs.columns >> 16),
- });
- }
- try self.writeSrc(stream, src);
- }
-
- fn writeSwitchCapture(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].switch_capture;
- try self.writeInstIndex(stream, inst_data.switch_inst);
- try stream.print(", {d})", .{inst_data.prong_index});
- }
-
- fn writeDbgStmt(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- const inst_data = self.code.instructions.items(.data)[inst].dbg_stmt;
- try stream.print("{d}, {d})", .{ inst_data.line, inst_data.column });
- }
-
- fn writeInstRef(self: *Writer, stream: anytype, ref: Inst.Ref) !void {
- var i: usize = @enumToInt(ref);
-
- if (i < Inst.Ref.typed_value_map.len) {
- return stream.print("@{}", .{ref});
- }
- i -= Inst.Ref.typed_value_map.len;
-
- return self.writeInstIndex(stream, @intCast(Inst.Index, i));
- }
-
- fn writeInstIndex(self: *Writer, stream: anytype, inst: Inst.Index) !void {
- _ = self;
- return stream.print("%{d}", .{inst});
- }
-
- fn writeOptionalInstRef(
- self: *Writer,
- stream: anytype,
- prefix: []const u8,
- inst: Inst.Ref,
- ) !void {
- if (inst == .none) return;
- try stream.writeAll(prefix);
- try self.writeInstRef(stream, inst);
- }
-
- fn writeFlag(
- self: *Writer,
- stream: anytype,
- name: []const u8,
- flag: bool,
- ) !void {
- _ = self;
- if (!flag) return;
- try stream.writeAll(name);
- }
-
- fn writeSrc(self: *Writer, stream: anytype, src: LazySrcLoc) !void {
- const tree = self.file.tree;
- const src_loc: Module.SrcLoc = .{
- .file_scope = self.file,
- .parent_decl_node = self.parent_decl_node,
- .lazy = src,
- };
- // Caller must ensure AST tree is loaded.
- const abs_byte_off = src_loc.byteOffset(self.gpa) catch unreachable;
- const delta_line = std.zig.findLineColumn(tree.source, abs_byte_off);
- try stream.print("{s}:{d}:{d}", .{
- @tagName(src), delta_line.line + 1, delta_line.column + 1,
- });
- }
-
- fn writeSrcNode(self: *Writer, stream: anytype, src_node: ?i32) !void {
- const node_offset = src_node orelse return;
- const src: LazySrcLoc = .{ .node_offset = node_offset };
- try stream.writeAll(" ");
- return self.writeSrc(stream, src);
- }
-
- fn writeBody(self: *Writer, stream: anytype, body: []const Inst.Index) !void {
- for (body) |inst| {
- try stream.writeByteNTimes(' ', self.indent);
- try stream.print("%{d} ", .{inst});
- try self.writeInstToStream(stream, inst);
- try stream.writeByte('\n');
- }
- }
-};
-
pub const DeclIterator = struct {
extra_index: usize,
bit_bag_index: usize,
@@ -4735,15 +2981,6 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator {
const tags = zir.instructions.items(.tag);
const datas = zir.instructions.items(.data);
switch (tags[decl_inst]) {
- .opaque_decl,
- .opaque_decl_anon,
- .opaque_decl_func,
- => {
- const inst_data = datas[decl_inst].pl_node;
- const extra = zir.extraData(Inst.OpaqueDecl, inst_data.payload_index);
- return declIteratorInner(zir, extra.end, extra.data.decls_len);
- },
-
// Functions are allowed and yield no iterations.
// There is one case matching this in the extended instruction set below.
.func,
@@ -4798,6 +3035,18 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator {
return declIteratorInner(zir, extra_index, decls_len);
},
+ .opaque_decl => {
+ const small = @bitCast(Inst.OpaqueDecl.Small, extended.small);
+ var extra_index: usize = extended.operand;
+ extra_index += @boolToInt(small.has_src_node);
+ const decls_len = if (small.has_decls_len) decls_len: {
+ const decls_len = zir.extra[extra_index];
+ extra_index += 1;
+ break :decls_len decls_len;
+ } else 0;
+
+ return declIteratorInner(zir, extra_index, decls_len);
+ },
else => unreachable,
}
},
@@ -4835,13 +3084,6 @@ fn findDeclsInner(
const datas = zir.instructions.items(.data);
switch (tags[inst]) {
- // Decl instructions are interesting but have no body.
- // TODO yes they do have a body actually. recurse over them just like block instructions.
- .opaque_decl,
- .opaque_decl_anon,
- .opaque_decl_func,
- => return list.append(inst),
-
// Functions instructions are interesting and have a body.
.func,
.func_inferred,
@@ -4869,9 +3111,12 @@ fn findDeclsInner(
return zir.findDeclsBody(list, body);
},
+ // Decl instructions are interesting but have no body.
+ // TODO yes they do have a body actually. recurse over them just like block instructions.
.struct_decl,
.union_decl,
.enum_decl,
+ .opaque_decl,
=> return list.append(inst),
else => return,
diff --git a/src/codegen/aarch64.zig b/src/arch/aarch64/bits.zig
similarity index 100%
rename from src/codegen/aarch64.zig
rename to src/arch/aarch64/bits.zig
diff --git a/src/codegen/arm.zig b/src/arch/arm/bits.zig
similarity index 99%
rename from src/codegen/arm.zig
rename to src/arch/arm/bits.zig
index ec9152f96b..279ce58005 100644
--- a/src/codegen/arm.zig
+++ b/src/arch/arm/bits.zig
@@ -2,7 +2,7 @@ const std = @import("std");
const DW = std.dwarf;
const testing = std.testing;
-/// The condition field specifies the flags neccessary for an
+/// The condition field specifies the flags necessary for an
/// Instruction to be executed
pub const Condition = enum(u4) {
/// equal
diff --git a/src/codegen/riscv64.zig b/src/arch/riscv64/bits.zig
similarity index 100%
rename from src/codegen/riscv64.zig
rename to src/arch/riscv64/bits.zig
diff --git a/src/codegen/x86.zig b/src/arch/x86/bits.zig
similarity index 100%
rename from src/codegen/x86.zig
rename to src/arch/x86/bits.zig
diff --git a/src/codegen/x86_64.zig b/src/arch/x86_64/bits.zig
similarity index 100%
rename from src/codegen/x86_64.zig
rename to src/arch/x86_64/bits.zig
diff --git a/src/clang_options_data.zig b/src/clang_options_data.zig
index 524374a7e9..ecaa383b69 100644
--- a/src/clang_options_data.zig
+++ b/src/clang_options_data.zig
@@ -2434,7 +2434,14 @@ flagpd1("emit-codegen-only"),
flagpd1("emit-header-module"),
flagpd1("emit-html"),
flagpd1("emit-interface-stubs"),
-flagpd1("emit-llvm"),
+.{
+ .name = "emit-llvm",
+ .syntax = .flag,
+ .zig_equivalent = .emit_llvm,
+ .pd1 = true,
+ .pd2 = false,
+ .psl = false,
+},
flagpd1("emit-llvm-bc"),
flagpd1("emit-llvm-only"),
flagpd1("emit-llvm-uselists"),
diff --git a/src/codegen.zig b/src/codegen.zig
index 511d4c2301..dfaedf041a 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -21,7 +21,7 @@ const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const RegisterManager = @import("register_manager.zig").RegisterManager;
-const X8664Encoder = @import("codegen/x86_64.zig").Encoder;
+const X8664Encoder = @import("arch/x86_64/bits.zig").Encoder;
pub const FnResult = union(enum) {
/// The `code` parameter passed to `generateSymbol` has the value appended.
@@ -48,6 +48,28 @@ pub const DebugInfoOutput = union(enum) {
dbg_info: *std.ArrayList(u8),
dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
},
+ /// the plan9 debuginfo output is a bytecode with 4 opcodes
+ /// assume all numbers/variables are bytes
+ /// 0 w x y z -> interpret w x y z as a big-endian i32, and add it to the line offset
+ /// x when x < 65 -> add x to line offset
+ /// x when x < 129 -> subtract 64 from x and subtract it from the line offset
+ /// x -> subtract 129 from x, multiply it by the quanta of the instruction size
+ /// (1 on x86_64), and add it to the pc
+ /// after every opcode, add the quanta of the instruction size to the pc
+ plan9: struct {
+ /// the actual opcodes
+ dbg_line: *std.ArrayList(u8),
+ /// what line the debuginfo starts on
+ /// this helps because the linker might have to insert some opcodes to make sure that the line count starts at the right amount for the next decl
+ start_line: *?u32,
+ /// what the line count ends on after codegen
+ /// this helps because the linker might have to insert some opcodes to make sure that the line count starts at the right amount for the next decl
+ end_line: *u32,
+ /// the last pc change op
+ /// This is very useful for adding quanta
+ /// to it if its not actually the last one.
+ pcop_change_index: *?u32,
+ },
none,
};
@@ -141,7 +163,7 @@ pub fn generateSymbol(
// TODO populate .debug_info for the array
if (typed_value.val.castTag(.bytes)) |payload| {
if (typed_value.ty.sentinel()) |sentinel| {
- try code.ensureCapacity(code.items.len + payload.data.len + 1);
+ try code.ensureUnusedCapacity(payload.data.len + 1);
code.appendSliceAssumeCapacity(payload.data);
switch (try generateSymbol(bin_file, src_loc, .{
.ty = typed_value.ty.elemType(),
@@ -448,7 +470,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
/// A branch in the ARM instruction set
arm_branch: struct {
pos: usize,
- cond: @import("codegen/arm.zig").Condition,
+ cond: @import("arch/arm/bits.zig").Condition,
},
};
@@ -568,7 +590,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
fn gen(self: *Self) !void {
switch (arch) {
.x86_64 => {
- try self.code.ensureCapacity(self.code.items.len + 11);
+ try self.code.ensureUnusedCapacity(11);
const cc = self.fn_type.fnCallingConvention();
if (cc != .Naked) {
@@ -607,7 +629,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// Important to be after the possible self.code.items.len -= 5 above.
try self.dbgSetEpilogueBegin();
- try self.code.ensureCapacity(self.code.items.len + 9);
+ try self.code.ensureUnusedCapacity(9);
// add rsp, x
if (aligned_stack_end > math.maxInt(i8)) {
// example: 48 81 c4 ff ff ff 7f add rsp,0x7fffffff
@@ -802,14 +824,20 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
switch (air_tags[inst]) {
// zig fmt: off
- .add, .ptr_add => try self.airAdd(inst),
- .addwrap => try self.airAddWrap(inst),
- .sub, .ptr_sub => try self.airSub(inst),
- .subwrap => try self.airSubWrap(inst),
- .mul => try self.airMul(inst),
- .mulwrap => try self.airMulWrap(inst),
- .div => try self.airDiv(inst),
- .rem => try self.airRem(inst),
+ .add, .ptr_add => try self.airAdd(inst),
+ .addwrap => try self.airAddWrap(inst),
+ .add_sat => try self.airAddSat(inst),
+ .sub, .ptr_sub => try self.airSub(inst),
+ .subwrap => try self.airSubWrap(inst),
+ .sub_sat => try self.airSubSat(inst),
+ .mul => try self.airMul(inst),
+ .mulwrap => try self.airMulWrap(inst),
+ .mul_sat => try self.airMulSat(inst),
+ .div => try self.airDiv(inst),
+ .rem => try self.airRem(inst),
+ .mod => try self.airMod(inst),
+ .shl, .shl_exact => try self.airShl(inst),
+ .shl_sat => try self.airShlSat(inst),
.cmp_lt => try self.airCmp(inst, .lt),
.cmp_lte => try self.airCmp(inst, .lte),
@@ -824,7 +852,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.bit_or => try self.airBitOr(inst),
.xor => try self.airXor(inst),
.shr => try self.airShr(inst),
- .shl => try self.airShl(inst),
.alloc => try self.airAlloc(inst),
.arg => try self.airArg(inst),
@@ -833,10 +860,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.breakpoint => try self.airBreakpoint(),
+ .fence => try self.airFence(),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
.dbg_stmt => try self.airDbgStmt(inst),
- .floatcast => try self.airFloatCast(inst),
+ .fptrunc => try self.airFptrunc(inst),
+ .fpext => try self.airFpext(inst),
.intcast => try self.airIntCast(inst),
.trunc => try self.airTrunc(inst),
.bool_to_int => try self.airBoolToInt(inst),
@@ -857,8 +886,23 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.struct_field_ptr=> try self.airStructFieldPtr(inst),
.struct_field_val=> try self.airStructFieldVal(inst),
.array_to_slice => try self.airArrayToSlice(inst),
+ .int_to_float => try self.airIntToFloat(inst),
+ .float_to_int => try self.airFloatToInt(inst),
.cmpxchg_strong => try self.airCmpxchg(inst),
.cmpxchg_weak => try self.airCmpxchg(inst),
+ .atomic_rmw => try self.airAtomicRmw(inst),
+ .atomic_load => try self.airAtomicLoad(inst),
+ .memcpy => try self.airMemcpy(inst),
+ .memset => try self.airMemset(inst),
+ .set_union_tag => try self.airSetUnionTag(inst),
+ .get_union_tag => try self.airGetUnionTag(inst),
+ .clz => try self.airClz(inst),
+ .ctz => try self.airCtz(inst),
+
+ .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
+ .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
+ .atomic_store_release => try self.airAtomicStore(inst, .Release),
+ .atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
@@ -905,6 +949,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try dbg_out.dbg_line.append(DW.LNS.set_prologue_end);
try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
},
+ .plan9 => {},
.none => {},
}
}
@@ -915,15 +960,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try dbg_out.dbg_line.append(DW.LNS.set_epilogue_begin);
try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
},
+ .plan9 => {},
.none => {},
}
}
fn dbgAdvancePCAndLine(self: *Self, line: u32, column: u32) InnerError!void {
+ const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line);
+ const delta_pc: usize = self.code.items.len - self.prev_di_pc;
switch (self.debug_output) {
.dwarf => |dbg_out| {
- const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line);
- const delta_pc = self.code.items.len - self.prev_di_pc;
// TODO Look into using the DWARF special opcodes to compress this data.
// It lets you emit single-byte opcodes that add different numbers to
// both the PC and the line number at the same time.
@@ -935,12 +981,39 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
leb128.writeILEB128(dbg_out.dbg_line.writer(), delta_line) catch unreachable;
}
dbg_out.dbg_line.appendAssumeCapacity(DW.LNS.copy);
+ self.prev_di_pc = self.code.items.len;
+ self.prev_di_line = line;
+ self.prev_di_column = column;
+ self.prev_di_pc = self.code.items.len;
+ },
+ .plan9 => |dbg_out| {
+ if (delta_pc <= 0) return; // only do this when the pc changes
+ // we have already checked the target in the linker to make sure it is compatable
+ const quant = @import("link/Plan9/aout.zig").getPCQuant(self.target.cpu.arch) catch unreachable;
+
+ // increasing the line number
+ try @import("link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
+ // increasing the pc
+ const d_pc_p9 = @intCast(i64, delta_pc) - quant;
+ if (d_pc_p9 > 0) {
+ // minus one because if its the last one, we want to leave space to change the line which is one quanta
+ try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant);
+ if (dbg_out.pcop_change_index.*) |pci|
+ dbg_out.dbg_line.items[pci] += 1;
+ dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1);
+ } else if (d_pc_p9 == 0) {
+ // we don't need to do anything, because adding the quant does it for us
+ } else unreachable;
+ if (dbg_out.start_line.* == null)
+ dbg_out.start_line.* = self.prev_di_line;
+ dbg_out.end_line.* = line;
+ // only do this if the pc changed
+ self.prev_di_line = line;
+ self.prev_di_column = column;
+ self.prev_di_pc = self.code.items.len;
},
.none => {},
}
- self.prev_di_line = line;
- self.prev_di_column = column;
- self.prev_di_pc = self.code.items.len;
}
/// Asserts there is already capacity to insert into top branch inst_table.
@@ -1024,6 +1097,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
try gop.value_ptr.relocs.append(self.gpa, @intCast(u32, index));
},
+ .plan9 => {},
.none => {},
}
}
@@ -1110,10 +1184,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none });
}
- fn airFloatCast(self: *Self, inst: Air.Inst.Index) !void {
+ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
- else => return self.fail("TODO implement floatCast for {}", .{self.target.cpu.arch}),
+ else => return self.fail("TODO implement airFptrunc for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ }
+
+ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement airFpext for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -1226,6 +1308,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
+ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement add_sat for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ }
+
fn airSub(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@@ -1244,6 +1334,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
+ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ }
+
fn airMul(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@@ -1262,6 +1360,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
+ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ }
+
fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@@ -1278,6 +1384,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
+ fn airMod(self: *Self, inst: Air.Inst.Index) !void {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement mod for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ }
+
fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@@ -1316,6 +1430,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
+ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ }
+
fn airShr(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@@ -1470,6 +1592,38 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
+ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const result: MCValue = switch (arch) {
+ else => return self.fail("TODO implement airSetUnionTag for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ }
+
+ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement airGetUnionTag for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ }
+
+ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement airClz for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ }
+
+ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement airCtz for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ }
+
fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool {
if (!self.liveness.operandDies(inst, op_index))
return false;
@@ -1848,15 +2002,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
},
.shl => {
assert(!swap_lhs_and_rhs);
- const shift_amout = switch (operand) {
+ const shift_amount = switch (operand) {
.Register => |reg_op| Instruction.ShiftAmount.reg(@intToEnum(Register, reg_op.rm)),
.Immediate => |imm_op| Instruction.ShiftAmount.imm(@intCast(u5, imm_op.imm)),
};
- writeInt(u32, try self.code.addManyAsArray(4), Instruction.lsl(.al, dst_reg, op1, shift_amout).toU32());
+ writeInt(u32, try self.code.addManyAsArray(4), Instruction.lsl(.al, dst_reg, op1, shift_amount).toU32());
},
.shr => {
assert(!swap_lhs_and_rhs);
- const shift_amout = switch (operand) {
+ const shift_amount = switch (operand) {
.Register => |reg_op| Instruction.ShiftAmount.reg(@intToEnum(Register, reg_op.rm)),
.Immediate => |imm_op| Instruction.ShiftAmount.imm(@intCast(u5, imm_op.imm)),
};
@@ -1865,7 +2019,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.signed => Instruction.asr,
.unsigned => Instruction.lsr,
};
- writeInt(u32, try self.code.addManyAsArray(4), shr(.al, dst_reg, op1, shift_amout).toU32());
+ writeInt(u32, try self.code.addManyAsArray(4), shr(.al, dst_reg, op1, shift_amount).toU32());
},
else => unreachable, // not a binary instruction
}
@@ -1952,7 +2106,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
//
// TODO: make this algorithm less bad
- try self.code.ensureCapacity(self.code.items.len + 8);
+ try self.code.ensureUnusedCapacity(8);
const lhs = try self.resolveInst(op_lhs);
const rhs = try self.resolveInst(op_rhs);
@@ -2439,16 +2593,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.register => |reg| {
switch (self.debug_output) {
.dwarf => |dbg_out| {
- try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 3);
+ try dbg_out.dbg_info.ensureUnusedCapacity(3);
dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
1, // ULEB128 dwarf expression length
reg.dwarfLocOp(),
});
- try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len);
+ try dbg_out.dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
try self.addDbgInfoTypeReloc(ty); // DW.AT.type, DW.FORM.ref4
dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
},
+ .plan9 => {},
.none => {},
}
},
@@ -2476,13 +2631,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try dbg_out.dbg_info.append(DW.OP.breg11);
try leb128.writeILEB128(dbg_out.dbg_info.writer(), adjusted_stack_offset);
- try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len);
+ try dbg_out.dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
try self.addDbgInfoTypeReloc(ty); // DW.AT.type, DW.FORM.ref4
dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
},
else => {},
}
},
+ .plan9 => {},
.none => {},
}
},
@@ -2549,6 +2705,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAirBookkeeping();
}
+ fn airFence(self: *Self) !void {
+ return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch});
+ //return self.finishAirBookkeeping();
+ }
+
fn airCall(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const fn_ty = self.air.typeOf(pl_op.operand);
@@ -2613,7 +2774,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
unreachable;
// ff 14 25 xx xx xx xx call [addr]
- try self.code.ensureCapacity(self.code.items.len + 7);
+ try self.code.ensureUnusedCapacity(7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), got_addr);
} else if (func_value.castTag(.extern_fn)) |_| {
@@ -2826,7 +2987,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.memory = func.owner_decl.link.macho.local_sym_index,
});
// callq *%rax
- try self.code.ensureCapacity(self.code.items.len + 2);
+ try self.code.ensureUnusedCapacity(2);
self.code.appendSliceAssumeCapacity(&[2]u8{ 0xff, 0xd0 });
},
.aarch64 => {
@@ -2840,12 +3001,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const decl = func_payload.data;
- const where_index = try macho_file.addExternFn(mem.spanZ(decl.name));
+ const resolv = try macho_file.addExternFn(mem.spanZ(decl.name));
const offset = blk: {
switch (arch) {
.x86_64 => {
// callq
- try self.code.ensureCapacity(self.code.items.len + 5);
+ try self.code.ensureUnusedCapacity(5);
self.code.appendSliceAssumeCapacity(&[5]u8{ 0xe8, 0x0, 0x0, 0x0, 0x0 });
break :blk @intCast(u32, self.code.items.len) - 4;
},
@@ -2861,8 +3022,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// Add relocation to the decl.
try macho_file.active_decl.?.link.macho.relocs.append(self.bin_file.allocator, .{
.offset = offset,
- .where = .undef,
- .where_index = where_index,
+ .where = switch (resolv.where) {
+ .local => .local,
+ .undef => .undef,
+ },
+ .where_index = resolv.where_index,
.payload = .{ .branch = .{
.arch = arch,
} },
@@ -2911,12 +3075,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
+ try p9.seeDecl(func_payload.data.owner_decl);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = func_payload.data.owner_decl.link.plan9.got_index.?;
// ff 14 25 xx xx xx xx call [addr]
- try self.code.ensureCapacity(self.code.items.len + 7);
+ try self.code.ensureUnusedCapacity(7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
const fn_got_addr = got_addr + got_index * ptr_bytes;
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), @intCast(u32, fn_got_addr));
@@ -2958,6 +3123,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
+ try p9.seeDecl(func_payload.data.owner_decl);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
@@ -3059,7 +3225,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const rhs = try self.resolveInst(bin_op.rhs);
const result: MCValue = switch (arch) {
.x86_64 => result: {
- try self.code.ensureCapacity(self.code.items.len + 8);
+ try self.code.ensureUnusedCapacity(8);
// There are 2 operands, destination and source.
// Either one, but not both, can be a memory operand.
@@ -3143,7 +3309,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const reloc: Reloc = switch (arch) {
.i386, .x86_64 => reloc: {
- try self.code.ensureCapacity(self.code.items.len + 6);
+ try self.code.ensureUnusedCapacity(6);
const opcode: u8 = switch (cond) {
.compare_flags_signed => |cmp_op| blk: {
@@ -3503,7 +3669,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
fn jump(self: *Self, index: usize) !void {
switch (arch) {
.i386, .x86_64 => {
- try self.code.ensureCapacity(self.code.items.len + 5);
+ try self.code.ensureUnusedCapacity(5);
if (math.cast(i8, @intCast(i32, index) - (@intCast(i32, self.code.items.len + 2)))) |delta| {
self.code.appendAssumeCapacity(0xeb); // jmp rel8
self.code.appendAssumeCapacity(@bitCast(u8, delta));
@@ -3535,7 +3701,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try self.blocks.putNoClobber(self.gpa, inst, .{
// A block is a setup to be able to jump to the end.
.relocs = .{},
- // It also acts as a receptical for break operands.
+ // It also acts as a receptacle for break operands.
// Here we use `MCValue.none` to represent a null value so that the first
// break instruction will choose a MCValue for the block result and overwrite
// this field. Following break instructions will use that MCValue to put their
@@ -3589,7 +3755,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.fail("TODO: enable larger branch offset", .{});
}
},
- else => unreachable, // attempting to perfrom an ARM relocation on a non-ARM target arch
+ else => unreachable, // attempting to perform an ARM relocation on a non-ARM target arch
}
},
}
@@ -3641,7 +3807,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const block_data = self.blocks.getPtr(block).?;
// Emit a jump with a relocation. It will be patched up after the block ends.
- try block_data.relocs.ensureCapacity(self.gpa, block_data.relocs.items.len + 1);
+ try block_data.relocs.ensureUnusedCapacity(self.gpa, 1);
switch (arch) {
.i386, .x86_64 => {
@@ -4025,7 +4191,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (adj_off > 128) {
return self.fail("TODO implement set stack variable with large stack offset", .{});
}
- try self.code.ensureCapacity(self.code.items.len + 8);
+ try self.code.ensureUnusedCapacity(8);
switch (abi_size) {
1 => {
return self.fail("TODO implement set abi_size=1 stack variable with immediate", .{});
@@ -4051,7 +4217,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// 64 bit write to memory would take two mov's anyways so we
// insted just use two 32 bit writes to avoid register allocation
- try self.code.ensureCapacity(self.code.items.len + 14);
+ try self.code.ensureUnusedCapacity(14);
var buf: [8]u8 = undefined;
mem.writeIntLittle(u64, &buf, x_big);
@@ -4753,6 +4919,26 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement airIntToFloat for {}", .{
+ self.target.cpu.arch,
+ }),
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ }
+
+ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement airFloatToInt for {}", .{
+ self.target.cpu.arch,
+ }),
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ }
+
fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
@@ -4764,6 +4950,32 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value });
}
+ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void {
+ _ = inst;
+ return self.fail("TODO implement airCmpxchg for {}", .{self.target.cpu.arch});
+ }
+
+ fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void {
+ _ = inst;
+ return self.fail("TODO implement airAtomicLoad for {}", .{self.target.cpu.arch});
+ }
+
+ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void {
+ _ = inst;
+ _ = order;
+ return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch});
+ }
+
+ fn airMemset(self: *Self, inst: Air.Inst.Index) !void {
+ _ = inst;
+ return self.fail("TODO implement airMemset for {}", .{self.target.cpu.arch});
+ }
+
+ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
+ _ = inst;
+ return self.fail("TODO implement airMemcpy for {}", .{self.target.cpu.arch});
+ }
+
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// First section of indexes correspond to a set number of constant values.
const ref_int = @enumToInt(inst);
@@ -4841,7 +5053,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
- var buf: Type.Payload.ElemType = undefined;
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_type = typed_value.ty.slicePtrFieldType(&buf);
const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val });
const slice_len = typed_value.val.sliceLen();
@@ -4869,6 +5081,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
+ try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
@@ -5216,11 +5429,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
const Register = switch (arch) {
- .i386 => @import("codegen/x86.zig").Register,
- .x86_64 => @import("codegen/x86_64.zig").Register,
- .riscv64 => @import("codegen/riscv64.zig").Register,
- .arm, .armeb => @import("codegen/arm.zig").Register,
- .aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").Register,
+ .i386 => @import("arch/x86/bits.zig").Register,
+ .x86_64 => @import("arch/x86_64/bits.zig").Register,
+ .riscv64 => @import("arch/riscv64/bits.zig").Register,
+ .arm, .armeb => @import("arch/arm/bits.zig").Register,
+ .aarch64, .aarch64_be, .aarch64_32 => @import("arch/aarch64/bits.zig").Register,
else => enum {
dummy,
@@ -5232,39 +5445,39 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
};
const Instruction = switch (arch) {
- .riscv64 => @import("codegen/riscv64.zig").Instruction,
- .arm, .armeb => @import("codegen/arm.zig").Instruction,
- .aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").Instruction,
+ .riscv64 => @import("arch/riscv64/bits.zig").Instruction,
+ .arm, .armeb => @import("arch/arm/bits.zig").Instruction,
+ .aarch64, .aarch64_be, .aarch64_32 => @import("arch/aarch64/bits.zig").Instruction,
else => void,
};
const Condition = switch (arch) {
- .arm, .armeb => @import("codegen/arm.zig").Condition,
+ .arm, .armeb => @import("arch/arm/bits.zig").Condition,
else => void,
};
const callee_preserved_regs = switch (arch) {
- .i386 => @import("codegen/x86.zig").callee_preserved_regs,
- .x86_64 => @import("codegen/x86_64.zig").callee_preserved_regs,
- .riscv64 => @import("codegen/riscv64.zig").callee_preserved_regs,
- .arm, .armeb => @import("codegen/arm.zig").callee_preserved_regs,
- .aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").callee_preserved_regs,
+ .i386 => @import("arch/x86/bits.zig").callee_preserved_regs,
+ .x86_64 => @import("arch/x86_64/bits.zig").callee_preserved_regs,
+ .riscv64 => @import("arch/riscv64/bits.zig").callee_preserved_regs,
+ .arm, .armeb => @import("arch/arm/bits.zig").callee_preserved_regs,
+ .aarch64, .aarch64_be, .aarch64_32 => @import("arch/aarch64/bits.zig").callee_preserved_regs,
else => [_]Register{},
};
const c_abi_int_param_regs = switch (arch) {
- .i386 => @import("codegen/x86.zig").c_abi_int_param_regs,
- .x86_64 => @import("codegen/x86_64.zig").c_abi_int_param_regs,
- .arm, .armeb => @import("codegen/arm.zig").c_abi_int_param_regs,
- .aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").c_abi_int_param_regs,
+ .i386 => @import("arch/x86/bits.zig").c_abi_int_param_regs,
+ .x86_64 => @import("arch/x86_64/bits.zig").c_abi_int_param_regs,
+ .arm, .armeb => @import("arch/arm/bits.zig").c_abi_int_param_regs,
+ .aarch64, .aarch64_be, .aarch64_32 => @import("arch/aarch64/bits.zig").c_abi_int_param_regs,
else => [_]Register{},
};
const c_abi_int_return_regs = switch (arch) {
- .i386 => @import("codegen/x86.zig").c_abi_int_return_regs,
- .x86_64 => @import("codegen/x86_64.zig").c_abi_int_return_regs,
- .arm, .armeb => @import("codegen/arm.zig").c_abi_int_return_regs,
- .aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").c_abi_int_return_regs,
+ .i386 => @import("arch/x86/bits.zig").c_abi_int_return_regs,
+ .x86_64 => @import("arch/x86_64/bits.zig").c_abi_int_return_regs,
+ .arm, .armeb => @import("arch/arm/bits.zig").c_abi_int_return_regs,
+ .aarch64, .aarch64_be, .aarch64_32 => @import("arch/aarch64/bits.zig").c_abi_int_return_regs,
else => [_]Register{},
};
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index ff49b18f7b..d2ce9cc6de 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -91,55 +91,76 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) {
return .{ .data = ident };
}
-/// This data is available when outputting .c code for a Module.
+/// This data is available when outputting .c code for a `*Module.Fn`.
/// It is not available when generating .h file.
-pub const Object = struct {
- dg: DeclGen,
+pub const Function = struct {
air: Air,
liveness: Liveness,
- gpa: *mem.Allocator,
- code: std.ArrayList(u8),
value_map: CValueMap,
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
next_arg_index: usize = 0,
next_local_index: usize = 0,
next_block_index: usize = 0,
- indent_writer: IndentWriter(std.ArrayList(u8).Writer),
+ object: Object,
+ func: *Module.Fn,
- fn resolveInst(o: *Object, inst: Air.Inst.Ref) !CValue {
- if (o.air.value(inst)) |_| {
+ fn resolveInst(f: *Function, inst: Air.Inst.Ref) !CValue {
+ if (f.air.value(inst)) |_| {
return CValue{ .constant = inst };
}
const index = Air.refToIndex(inst).?;
- return o.value_map.get(index).?; // Assertion means instruction does not dominate usage.
+ return f.value_map.get(index).?; // Assertion means instruction does not dominate usage.
}
- fn allocLocalValue(o: *Object) CValue {
- const result = o.next_local_index;
- o.next_local_index += 1;
+ fn allocLocalValue(f: *Function) CValue {
+ const result = f.next_local_index;
+ f.next_local_index += 1;
return .{ .local = result };
}
- fn allocLocal(o: *Object, ty: Type, mutability: Mutability) !CValue {
- const local_value = o.allocLocalValue();
- try o.renderTypeAndName(o.writer(), ty, local_value, mutability);
+ fn allocLocal(f: *Function, ty: Type, mutability: Mutability) !CValue {
+ const local_value = f.allocLocalValue();
+ try f.object.renderTypeAndName(f.object.writer(), ty, local_value, mutability);
return local_value;
}
+ fn writeCValue(f: *Function, w: anytype, c_value: CValue) !void {
+ switch (c_value) {
+ .constant => |inst| {
+ const ty = f.air.typeOf(inst);
+ const val = f.air.value(inst).?;
+ return f.object.dg.renderValue(w, ty, val);
+ },
+ else => return Object.writeCValue(w, c_value),
+ }
+ }
+
+ fn fail(f: *Function, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
+ return f.object.dg.fail(format, args);
+ }
+
+ fn renderType(f: *Function, w: anytype, t: Type) !void {
+ return f.object.dg.renderType(w, t);
+ }
+};
+
+/// This data is available when outputting .c code for a `Module`.
+/// It is not available when generating .h file.
+pub const Object = struct {
+ dg: DeclGen,
+ code: std.ArrayList(u8),
+ indent_writer: IndentWriter(std.ArrayList(u8).Writer),
+
fn writer(o: *Object) IndentWriter(std.ArrayList(u8).Writer).Writer {
return o.indent_writer.writer();
}
- fn writeCValue(o: *Object, w: anytype, c_value: CValue) !void {
+ fn writeCValue(w: anytype, c_value: CValue) !void {
switch (c_value) {
.none => unreachable,
.local => |i| return w.print("t{d}", .{i}),
.local_ref => |i| return w.print("&t{d}", .{i}),
- .constant => |inst| {
- const ty = o.air.typeOf(inst);
- const val = o.air.value(inst).?;
- return o.dg.renderValue(w, ty, val);
- },
+ .constant => unreachable,
.arg => |i| return w.print("a{d}", .{i}),
.decl => |decl| return w.writeAll(mem.span(decl.name)),
.decl_ref => |decl| return w.print("&{s}", .{decl.name}),
@@ -153,7 +174,7 @@ pub const Object = struct {
name: CValue,
mutability: Mutability,
) error{ OutOfMemory, AnalysisFail }!void {
- var suffix = std.ArrayList(u8).init(o.gpa);
+ var suffix = std.ArrayList(u8).init(o.dg.gpa);
defer suffix.deinit();
var render_ty = ty;
@@ -177,7 +198,7 @@ pub const Object = struct {
.Const => try w.writeAll("const "),
.Mut => {},
}
- try o.writeCValue(w, name);
+ try writeCValue(w, name);
try w.writeAll(")(");
const param_len = render_ty.fnParamLen();
const is_var_args = render_ty.fnIsVarArgs();
@@ -205,7 +226,7 @@ pub const Object = struct {
.Mut => "",
};
try w.print(" {s}", .{const_prefix});
- try o.writeCValue(w, name);
+ try writeCValue(w, name);
}
try w.writeAll(suffix.items);
}
@@ -213,11 +234,14 @@ pub const Object = struct {
/// This data is available both when outputting .c code and when outputting an .h file.
pub const DeclGen = struct {
+ gpa: *std.mem.Allocator,
module: *Module,
decl: *Decl,
fwd_decl: std.ArrayList(u8),
error_msg: ?*Module.ErrorMsg,
+ /// The key of this map is Type which has references to typedefs_arena.
typedefs: TypedefMap,
+ typedefs_arena: *std.mem.Allocator,
fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
@setCold(true);
@@ -251,7 +275,7 @@ pub const DeclGen = struct {
try writer.writeByte('(');
try dg.renderType(writer, t);
try writer.writeAll("){");
- var buf: Type.Payload.ElemType = undefined;
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
try dg.renderValue(writer, t.slicePtrFieldType(&buf), val);
try writer.writeAll(", ");
try writer.print("{d}", .{val.sliceLen()});
@@ -545,7 +569,10 @@ pub const DeclGen = struct {
try dg.typedefs.ensureUnusedCapacity(1);
try w.writeAll(name);
- dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered });
+ dg.typedefs.putAssumeCapacityNoClobber(
+ try t.copy(dg.typedefs_arena),
+ .{ .name = name, .rendered = rendered },
+ );
} else {
try dg.renderType(w, t.elemType());
try w.writeAll(" *");
@@ -586,7 +613,10 @@ pub const DeclGen = struct {
try dg.typedefs.ensureUnusedCapacity(1);
try w.writeAll(name);
- dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered });
+ dg.typedefs.putAssumeCapacityNoClobber(
+ try t.copy(dg.typedefs_arena),
+ .{ .name = name, .rendered = rendered },
+ );
},
.ErrorSet => {
comptime std.debug.assert(Type.initTag(.anyerror).abiSize(std.Target.current) == 2);
@@ -626,7 +656,10 @@ pub const DeclGen = struct {
try dg.typedefs.ensureUnusedCapacity(1);
try w.writeAll(name);
- dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered });
+ dg.typedefs.putAssumeCapacityNoClobber(
+ try t.copy(dg.typedefs_arena),
+ .{ .name = name, .rendered = rendered },
+ );
},
.Struct => {
if (dg.typedefs.get(t)) |some| {
@@ -659,7 +692,10 @@ pub const DeclGen = struct {
try dg.typedefs.ensureUnusedCapacity(1);
try w.writeAll(name);
- dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered });
+ dg.typedefs.putAssumeCapacityNoClobber(
+ try t.copy(dg.typedefs_arena),
+ .{ .name = name, .rendered = rendered },
+ );
},
.Enum => {
// For enums, we simply use the integer tag type.
@@ -724,6 +760,29 @@ pub const DeclGen = struct {
}
};
+pub fn genFunc(f: *Function) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const o = &f.object;
+ const is_global = o.dg.module.decl_exports.contains(f.func.owner_decl);
+ const fwd_decl_writer = o.dg.fwd_decl.writer();
+ if (is_global) {
+ try fwd_decl_writer.writeAll("ZIG_EXTERN_C ");
+ }
+ try o.dg.renderFunctionSignature(fwd_decl_writer, is_global);
+ try fwd_decl_writer.writeAll(";\n");
+
+ try o.indent_writer.insertNewline();
+ try o.dg.renderFunctionSignature(o.writer(), is_global);
+
+ try o.writer().writeByte(' ');
+ const main_body = f.air.getMainBody();
+ try genBody(f, main_body);
+
+ try o.indent_writer.insertNewline();
+}
+
pub fn genDecl(o: *Object) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -732,28 +791,6 @@ pub fn genDecl(o: *Object) !void {
.ty = o.dg.decl.ty,
.val = o.dg.decl.val,
};
- if (tv.val.castTag(.function)) |func_payload| {
- const func: *Module.Fn = func_payload.data;
- if (func.owner_decl == o.dg.decl) {
- const is_global = o.dg.declIsGlobal(tv);
- const fwd_decl_writer = o.dg.fwd_decl.writer();
- if (is_global) {
- try fwd_decl_writer.writeAll("ZIG_EXTERN_C ");
- }
- try o.dg.renderFunctionSignature(fwd_decl_writer, is_global);
- try fwd_decl_writer.writeAll(";\n");
-
- try o.indent_writer.insertNewline();
- try o.dg.renderFunctionSignature(o.writer(), is_global);
-
- try o.writer().writeByte(' ');
- const main_body = o.air.getMainBody();
- try genBody(o, main_body);
-
- try o.indent_writer.insertNewline();
- return;
- }
- }
if (tv.val.tag() == .extern_fn) {
const writer = o.writer();
try writer.writeAll("ZIG_EXTERN_C ");
@@ -821,240 +858,263 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void {
}
}
-fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void {
- const writer = o.writer();
+fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void {
+ const writer = f.object.writer();
if (body.len == 0) {
try writer.writeAll("{}");
return;
}
try writer.writeAll("{\n");
- o.indent_writer.pushIndent();
+ f.object.indent_writer.pushIndent();
- const air_tags = o.air.instructions.items(.tag);
+ const air_tags = f.air.instructions.items(.tag);
for (body) |inst| {
const result_value = switch (air_tags[inst]) {
// zig fmt: off
.constant => unreachable, // excluded from function bodies
.const_ty => unreachable, // excluded from function bodies
- .arg => airArg(o),
+ .arg => airArg(f),
- .breakpoint => try airBreakpoint(o),
- .unreach => try airUnreach(o),
+ .breakpoint => try airBreakpoint(f),
+ .unreach => try airUnreach(f),
+ .fence => try airFence(f, inst),
// TODO use a different strategy for add that communicates to the optimizer
// that wrapping is UB.
- .add, .ptr_add => try airBinOp( o, inst, " + "),
- .addwrap => try airWrapOp(o, inst, " + ", "addw_"),
+ .add, .ptr_add => try airBinOp (f, inst, " + "),
// TODO use a different strategy for sub that communicates to the optimizer
// that wrapping is UB.
- .sub, .ptr_sub => try airBinOp( o, inst, " - "),
- .subwrap => try airWrapOp(o, inst, " - ", "subw_"),
+ .sub, .ptr_sub => try airBinOp (f, inst, " - "),
// TODO use a different strategy for mul that communicates to the optimizer
// that wrapping is UB.
- .mul => try airBinOp( o, inst, " * "),
- .mulwrap => try airWrapOp(o, inst, " * ", "mulw_"),
+ .mul => try airBinOp (f, inst, " * "),
// TODO use a different strategy for div that communicates to the optimizer
// that wrapping is UB.
- .div => try airBinOp( o, inst, " / "),
- .rem => try airBinOp( o, inst, " % "),
+ .div => try airBinOp( f, inst, " / "),
+ .rem => try airBinOp( f, inst, " % "),
+ .mod => try airBinOp( f, inst, " mod "), // TODO implement modulus division
- .cmp_eq => try airBinOp(o, inst, " == "),
- .cmp_gt => try airBinOp(o, inst, " > "),
- .cmp_gte => try airBinOp(o, inst, " >= "),
- .cmp_lt => try airBinOp(o, inst, " < "),
- .cmp_lte => try airBinOp(o, inst, " <= "),
- .cmp_neq => try airBinOp(o, inst, " != "),
+ .addwrap => try airWrapOp(f, inst, " + ", "addw_"),
+ .subwrap => try airWrapOp(f, inst, " - ", "subw_"),
+ .mulwrap => try airWrapOp(f, inst, " * ", "mulw_"),
+
+ .add_sat => try airSatOp(f, inst, "adds_"),
+ .sub_sat => try airSatOp(f, inst, "subs_"),
+ .mul_sat => try airSatOp(f, inst, "muls_"),
+ .shl_sat => try airSatOp(f, inst, "shls_"),
+
+ .cmp_eq => try airBinOp(f, inst, " == "),
+ .cmp_gt => try airBinOp(f, inst, " > "),
+ .cmp_gte => try airBinOp(f, inst, " >= "),
+ .cmp_lt => try airBinOp(f, inst, " < "),
+ .cmp_lte => try airBinOp(f, inst, " <= "),
+ .cmp_neq => try airBinOp(f, inst, " != "),
// bool_and and bool_or are non-short-circuit operations
- .bool_and => try airBinOp(o, inst, " & "),
- .bool_or => try airBinOp(o, inst, " | "),
- .bit_and => try airBinOp(o, inst, " & "),
- .bit_or => try airBinOp(o, inst, " | "),
- .xor => try airBinOp(o, inst, " ^ "),
+ .bool_and => try airBinOp(f, inst, " & "),
+ .bool_or => try airBinOp(f, inst, " | "),
+ .bit_and => try airBinOp(f, inst, " & "),
+ .bit_or => try airBinOp(f, inst, " | "),
+ .xor => try airBinOp(f, inst, " ^ "),
+ .shr => try airBinOp(f, inst, " >> "),
+ .shl, .shl_exact => try airBinOp(f, inst, " << "),
+ .not => try airNot (f, inst),
- .shr => try airBinOp(o, inst, " >> "),
- .shl => try airBinOp(o, inst, " << "),
+ .optional_payload => try airOptionalPayload(f, inst),
+ .optional_payload_ptr => try airOptionalPayload(f, inst),
- .not => try airNot( o, inst),
+ .is_err => try airIsErr(f, inst, "", ".", "!="),
+ .is_non_err => try airIsErr(f, inst, "", ".", "=="),
+ .is_err_ptr => try airIsErr(f, inst, "*", "->", "!="),
+ .is_non_err_ptr => try airIsErr(f, inst, "*", "->", "=="),
- .optional_payload => try airOptionalPayload(o, inst),
- .optional_payload_ptr => try airOptionalPayload(o, inst),
+ .is_null => try airIsNull(f, inst, "==", ""),
+ .is_non_null => try airIsNull(f, inst, "!=", ""),
+ .is_null_ptr => try airIsNull(f, inst, "==", "[0]"),
+ .is_non_null_ptr => try airIsNull(f, inst, "!=", "[0]"),
- .is_err => try airIsErr(o, inst, "", ".", "!="),
- .is_non_err => try airIsErr(o, inst, "", ".", "=="),
- .is_err_ptr => try airIsErr(o, inst, "*", "->", "!="),
- .is_non_err_ptr => try airIsErr(o, inst, "*", "->", "=="),
+ .alloc => try airAlloc(f, inst),
+ .assembly => try airAsm(f, inst),
+ .block => try airBlock(f, inst),
+ .bitcast => try airBitcast(f, inst),
+ .call => try airCall(f, inst),
+ .dbg_stmt => try airDbgStmt(f, inst),
+ .intcast => try airIntCast(f, inst),
+ .trunc => try airTrunc(f, inst),
+ .bool_to_int => try airBoolToInt(f, inst),
+ .load => try airLoad(f, inst),
+ .ret => try airRet(f, inst),
+ .store => try airStore(f, inst),
+ .loop => try airLoop(f, inst),
+ .cond_br => try airCondBr(f, inst),
+ .br => try airBr(f, inst),
+ .switch_br => try airSwitchBr(f, inst),
+ .wrap_optional => try airWrapOptional(f, inst),
+ .struct_field_ptr => try airStructFieldPtr(f, inst),
+ .array_to_slice => try airArrayToSlice(f, inst),
+ .cmpxchg_weak => try airCmpxchg(f, inst, "weak"),
+ .cmpxchg_strong => try airCmpxchg(f, inst, "strong"),
+ .atomic_rmw => try airAtomicRmw(f, inst),
+ .atomic_load => try airAtomicLoad(f, inst),
+ .memset => try airMemset(f, inst),
+ .memcpy => try airMemcpy(f, inst),
+ .set_union_tag => try airSetUnionTag(f, inst),
+ .get_union_tag => try airGetUnionTag(f, inst),
+ .clz => try airBuiltinCall(f, inst, "clz"),
+ .ctz => try airBuiltinCall(f, inst, "ctz"),
- .is_null => try airIsNull(o, inst, "==", ""),
- .is_non_null => try airIsNull(o, inst, "!=", ""),
- .is_null_ptr => try airIsNull(o, inst, "==", "[0]"),
- .is_non_null_ptr => try airIsNull(o, inst, "!=", "[0]"),
+ .int_to_float,
+ .float_to_int,
+ .fptrunc,
+ .fpext,
+ .ptrtoint,
+ => try airSimpleCast(f, inst),
- .alloc => try airAlloc(o, inst),
- .assembly => try airAsm(o, inst),
- .block => try airBlock(o, inst),
- .bitcast => try airBitcast(o, inst),
- .call => try airCall(o, inst),
- .dbg_stmt => try airDbgStmt(o, inst),
- .intcast => try airIntCast(o, inst),
- .trunc => try airTrunc(o, inst),
- .bool_to_int => try airBoolToInt(o, inst),
- .load => try airLoad(o, inst),
- .ret => try airRet(o, inst),
- .store => try airStore(o, inst),
- .loop => try airLoop(o, inst),
- .cond_br => try airCondBr(o, inst),
- .br => try airBr(o, inst),
- .switch_br => try airSwitchBr(o, inst),
- .wrap_optional => try airWrapOptional(o, inst),
- .struct_field_ptr => try airStructFieldPtr(o, inst),
- .array_to_slice => try airArrayToSlice(o, inst),
- .cmpxchg_weak => try airCmpxchg(o, inst, "weak"),
- .cmpxchg_strong => try airCmpxchg(o, inst, "strong"),
+ .atomic_store_unordered => try airAtomicStore(f, inst, toMemoryOrder(.Unordered)),
+ .atomic_store_monotonic => try airAtomicStore(f, inst, toMemoryOrder(.Monotonic)),
+ .atomic_store_release => try airAtomicStore(f, inst, toMemoryOrder(.Release)),
+ .atomic_store_seq_cst => try airAtomicStore(f, inst, toMemoryOrder(.SeqCst)),
- .struct_field_ptr_index_0 => try airStructFieldPtrIndex(o, inst, 0),
- .struct_field_ptr_index_1 => try airStructFieldPtrIndex(o, inst, 1),
- .struct_field_ptr_index_2 => try airStructFieldPtrIndex(o, inst, 2),
- .struct_field_ptr_index_3 => try airStructFieldPtrIndex(o, inst, 3),
+ .struct_field_ptr_index_0 => try airStructFieldPtrIndex(f, inst, 0),
+ .struct_field_ptr_index_1 => try airStructFieldPtrIndex(f, inst, 1),
+ .struct_field_ptr_index_2 => try airStructFieldPtrIndex(f, inst, 2),
+ .struct_field_ptr_index_3 => try airStructFieldPtrIndex(f, inst, 3),
- .struct_field_val => try airStructFieldVal(o, inst),
- .slice_ptr => try airSliceField(o, inst, ".ptr;\n"),
- .slice_len => try airSliceField(o, inst, ".len;\n"),
+ .struct_field_val => try airStructFieldVal(f, inst),
+ .slice_ptr => try airSliceField(f, inst, ".ptr;\n"),
+ .slice_len => try airSliceField(f, inst, ".len;\n"),
- .ptr_elem_val => try airPtrElemVal(o, inst, "["),
- .ptr_ptr_elem_val => try airPtrElemVal(o, inst, "[0]["),
- .ptr_elem_ptr => try airPtrElemPtr(o, inst),
- .slice_elem_val => try airSliceElemVal(o, inst, "["),
- .ptr_slice_elem_val => try airSliceElemVal(o, inst, "[0]["),
+ .ptr_elem_val => try airPtrElemVal(f, inst, "["),
+ .ptr_ptr_elem_val => try airPtrElemVal(f, inst, "[0]["),
+ .ptr_elem_ptr => try airPtrElemPtr(f, inst),
+ .slice_elem_val => try airSliceElemVal(f, inst, "["),
+ .ptr_slice_elem_val => try airSliceElemVal(f, inst, "[0]["),
- .unwrap_errunion_payload => try airUnwrapErrUnionPay(o, inst),
- .unwrap_errunion_err => try airUnwrapErrUnionErr(o, inst),
- .unwrap_errunion_payload_ptr => try airUnwrapErrUnionPay(o, inst),
- .unwrap_errunion_err_ptr => try airUnwrapErrUnionErr(o, inst),
- .wrap_errunion_payload => try airWrapErrUnionPay(o, inst),
- .wrap_errunion_err => try airWrapErrUnionErr(o, inst),
-
- .ptrtoint => return o.dg.fail("TODO: C backend: implement codegen for ptrtoint", .{}),
- .floatcast => return o.dg.fail("TODO: C backend: implement codegen for floatcast", .{}),
+ .unwrap_errunion_payload => try airUnwrapErrUnionPay(f, inst),
+ .unwrap_errunion_err => try airUnwrapErrUnionErr(f, inst),
+ .unwrap_errunion_payload_ptr => try airUnwrapErrUnionPay(f, inst),
+ .unwrap_errunion_err_ptr => try airUnwrapErrUnionErr(f, inst),
+ .wrap_errunion_payload => try airWrapErrUnionPay(f, inst),
+ .wrap_errunion_err => try airWrapErrUnionErr(f, inst),
// zig fmt: on
};
switch (result_value) {
.none => {},
- else => try o.value_map.putNoClobber(inst, result_value),
+ else => try f.value_map.putNoClobber(inst, result_value),
}
}
- o.indent_writer.popIndent();
+ f.object.indent_writer.popIndent();
try writer.writeAll("}");
}
-fn airSliceField(o: *Object, inst: Air.Inst.Index, suffix: []const u8) !CValue {
- if (o.liveness.isUnused(inst))
+fn airSliceField(f: *Function, inst: Air.Inst.Index, suffix: []const u8) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const operand = try o.resolveInst(ty_op.operand);
- const writer = o.writer();
- const local = try o.allocLocal(Type.initTag(.usize), .Const);
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const operand = try f.resolveInst(ty_op.operand);
+ const writer = f.object.writer();
+ const local = try f.allocLocal(Type.initTag(.usize), .Const);
try writer.writeAll(" = ");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.writeAll(suffix);
return local;
}
-fn airPtrElemVal(o: *Object, inst: Air.Inst.Index, prefix: []const u8) !CValue {
+fn airPtrElemVal(f: *Function, inst: Air.Inst.Index, prefix: []const u8) !CValue {
const is_volatile = false; // TODO
- if (!is_volatile and o.liveness.isUnused(inst))
+ if (!is_volatile and f.liveness.isUnused(inst))
return CValue.none;
_ = prefix;
- return o.dg.fail("TODO: C backend: airPtrElemVal", .{});
+ return f.fail("TODO: C backend: airPtrElemVal", .{});
}
-fn airPtrElemPtr(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- return o.dg.fail("TODO: C backend: airPtrElemPtr", .{});
+ return f.fail("TODO: C backend: airPtrElemPtr", .{});
}
-fn airSliceElemVal(o: *Object, inst: Air.Inst.Index, prefix: []const u8) !CValue {
+fn airSliceElemVal(f: *Function, inst: Air.Inst.Index, prefix: []const u8) !CValue {
const is_volatile = false; // TODO
- if (!is_volatile and o.liveness.isUnused(inst))
+ if (!is_volatile and f.liveness.isUnused(inst))
return CValue.none;
- const bin_op = o.air.instructions.items(.data)[inst].bin_op;
- const slice = try o.resolveInst(bin_op.lhs);
- const index = try o.resolveInst(bin_op.rhs);
- const writer = o.writer();
- const local = try o.allocLocal(o.air.typeOfIndex(inst), .Const);
+ const bin_op = f.air.instructions.items(.data)[inst].bin_op;
+ const slice = try f.resolveInst(bin_op.lhs);
+ const index = try f.resolveInst(bin_op.rhs);
+ const writer = f.object.writer();
+ const local = try f.allocLocal(f.air.typeOfIndex(inst), .Const);
try writer.writeAll(" = ");
- try o.writeCValue(writer, slice);
+ try f.writeCValue(writer, slice);
try writer.writeAll(prefix);
- try o.writeCValue(writer, index);
+ try f.writeCValue(writer, index);
try writer.writeAll("];\n");
return local;
}
-fn airAlloc(o: *Object, inst: Air.Inst.Index) !CValue {
- const writer = o.writer();
- const inst_ty = o.air.typeOfIndex(inst);
+fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
+ const writer = f.object.writer();
+ const inst_ty = f.air.typeOfIndex(inst);
// First line: the variable used as data storage.
const elem_type = inst_ty.elemType();
const mutability: Mutability = if (inst_ty.isConstPtr()) .Const else .Mut;
- const local = try o.allocLocal(elem_type, mutability);
+ const local = try f.allocLocal(elem_type, mutability);
try writer.writeAll(";\n");
return CValue{ .local_ref = local.local };
}
-fn airArg(o: *Object) CValue {
- const i = o.next_arg_index;
- o.next_arg_index += 1;
+fn airArg(f: *Function) CValue {
+ const i = f.next_arg_index;
+ f.next_arg_index += 1;
return .{ .arg = i };
}
-fn airLoad(o: *Object, inst: Air.Inst.Index) !CValue {
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const is_volatile = o.air.typeOf(ty_op.operand).isVolatilePtr();
- if (!is_volatile and o.liveness.isUnused(inst))
+fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const is_volatile = f.air.typeOf(ty_op.operand).isVolatilePtr();
+ if (!is_volatile and f.liveness.isUnused(inst))
return CValue.none;
- const inst_ty = o.air.typeOfIndex(inst);
- const operand = try o.resolveInst(ty_op.operand);
- const writer = o.writer();
- const local = try o.allocLocal(inst_ty, .Const);
+ const inst_ty = f.air.typeOfIndex(inst);
+ const operand = try f.resolveInst(ty_op.operand);
+ const writer = f.object.writer();
+ const local = try f.allocLocal(inst_ty, .Const);
switch (operand) {
.local_ref => |i| {
const wrapped: CValue = .{ .local = i };
try writer.writeAll(" = ");
- try o.writeCValue(writer, wrapped);
+ try f.writeCValue(writer, wrapped);
try writer.writeAll(";\n");
},
.decl_ref => |decl| {
const wrapped: CValue = .{ .decl = decl };
try writer.writeAll(" = ");
- try o.writeCValue(writer, wrapped);
+ try f.writeCValue(writer, wrapped);
try writer.writeAll(";\n");
},
else => {
try writer.writeAll(" = *");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.writeAll(";\n");
},
}
return local;
}
-fn airRet(o: *Object, inst: Air.Inst.Index) !CValue {
- const un_op = o.air.instructions.items(.data)[inst].un_op;
- const writer = o.writer();
- if (o.air.typeOf(un_op).hasCodeGenBits()) {
- const operand = try o.resolveInst(un_op);
+fn airRet(f: *Function, inst: Air.Inst.Index) !CValue {
+ const un_op = f.air.instructions.items(.data)[inst].un_op;
+ const writer = f.object.writer();
+ if (f.air.typeOf(un_op).hasCodeGenBits()) {
+ const operand = try f.resolveInst(un_op);
try writer.writeAll("return ");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.writeAll(";\n");
} else {
try writer.writeAll("return;\n");
@@ -1062,75 +1122,75 @@ fn airRet(o: *Object, inst: Air.Inst.Index) !CValue {
return CValue.none;
}
-fn airIntCast(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const operand = try o.resolveInst(ty_op.operand);
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const operand = try f.resolveInst(ty_op.operand);
- const writer = o.writer();
- const inst_ty = o.air.typeOfIndex(inst);
- const local = try o.allocLocal(inst_ty, .Const);
+ const writer = f.object.writer();
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = (");
- try o.dg.renderType(writer, inst_ty);
+ try f.renderType(writer, inst_ty);
try writer.writeAll(")");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.writeAll(";\n");
return local;
}
-fn airTrunc(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const operand = try o.resolveInst(ty_op.operand);
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const operand = try f.resolveInst(ty_op.operand);
_ = operand;
- return o.dg.fail("TODO: C backend: airTrunc", .{});
+ return f.fail("TODO: C backend: airTrunc", .{});
}
-fn airBoolToInt(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const un_op = o.air.instructions.items(.data)[inst].un_op;
- const writer = o.writer();
- const inst_ty = o.air.typeOfIndex(inst);
- const operand = try o.resolveInst(un_op);
- const local = try o.allocLocal(inst_ty, .Const);
+ const un_op = f.air.instructions.items(.data)[inst].un_op;
+ const writer = f.object.writer();
+ const inst_ty = f.air.typeOfIndex(inst);
+ const operand = try f.resolveInst(un_op);
+ const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.writeAll(";\n");
return local;
}
-fn airStore(o: *Object, inst: Air.Inst.Index) !CValue {
+fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
// *a = b;
- const bin_op = o.air.instructions.items(.data)[inst].bin_op;
- const dest_ptr = try o.resolveInst(bin_op.lhs);
- const src_val = try o.resolveInst(bin_op.rhs);
+ const bin_op = f.air.instructions.items(.data)[inst].bin_op;
+ const dest_ptr = try f.resolveInst(bin_op.lhs);
+ const src_val = try f.resolveInst(bin_op.rhs);
- const writer = o.writer();
+ const writer = f.object.writer();
switch (dest_ptr) {
.local_ref => |i| {
const dest: CValue = .{ .local = i };
- try o.writeCValue(writer, dest);
+ try f.writeCValue(writer, dest);
try writer.writeAll(" = ");
- try o.writeCValue(writer, src_val);
+ try f.writeCValue(writer, src_val);
try writer.writeAll(";\n");
},
.decl_ref => |decl| {
const dest: CValue = .{ .decl = decl };
- try o.writeCValue(writer, dest);
+ try f.writeCValue(writer, dest);
try writer.writeAll(" = ");
- try o.writeCValue(writer, src_val);
+ try f.writeCValue(writer, src_val);
try writer.writeAll(";\n");
},
else => {
try writer.writeAll("*");
- try o.writeCValue(writer, dest_ptr);
+ try f.writeCValue(writer, dest_ptr);
try writer.writeAll(" = ");
- try o.writeCValue(writer, src_val);
+ try f.writeCValue(writer, src_val);
try writer.writeAll(";\n");
},
}
@@ -1138,17 +1198,17 @@ fn airStore(o: *Object, inst: Air.Inst.Index) !CValue {
}
fn airWrapOp(
- o: *Object,
+ f: *Function,
inst: Air.Inst.Index,
str_op: [*:0]const u8,
fn_op: [*:0]const u8,
) !CValue {
- if (o.liveness.isUnused(inst))
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const bin_op = o.air.instructions.items(.data)[inst].bin_op;
- const inst_ty = o.air.typeOfIndex(inst);
- const int_info = inst_ty.intInfo(o.dg.module.getTarget());
+ const bin_op = f.air.instructions.items(.data)[inst].bin_op;
+ const inst_ty = f.air.typeOfIndex(inst);
+ const int_info = inst_ty.intInfo(f.object.dg.module.getTarget());
const bits = int_info.bits;
// if it's an unsigned int with non-arbitrary bit size then we can just add
@@ -1158,12 +1218,12 @@ fn airWrapOp(
else => false,
};
if (ok_bits or inst_ty.tag() != .int_unsigned) {
- return try airBinOp(o, inst, str_op);
+ return try airBinOp(f, inst, str_op);
}
}
if (bits > 64) {
- return o.dg.fail("TODO: C backend: airWrapOp for large integers", .{});
+ return f.fail("TODO: C backend: airWrapOp for large integers", .{});
}
var min_buf: [80]u8 = undefined;
@@ -1210,11 +1270,11 @@ fn airWrapOp(
},
};
- const lhs = try o.resolveInst(bin_op.lhs);
- const rhs = try o.resolveInst(bin_op.rhs);
- const w = o.writer();
+ const lhs = try f.resolveInst(bin_op.lhs);
+ const rhs = try f.resolveInst(bin_op.rhs);
+ const w = f.object.writer();
- const ret = try o.allocLocal(inst_ty, .Mut);
+ const ret = try f.allocLocal(inst_ty, .Mut);
try w.print(" = zig_{s}", .{fn_op});
switch (inst_ty.tag()) {
@@ -1240,71 +1300,179 @@ fn airWrapOp(
}
try w.writeByte('(');
- try o.writeCValue(w, lhs);
+ try f.writeCValue(w, lhs);
try w.writeAll(", ");
- try o.writeCValue(w, rhs);
+ try f.writeCValue(w, rhs);
if (int_info.signedness == .signed) {
try w.print(", {s}", .{min});
}
try w.print(", {s});", .{max});
- try o.indent_writer.insertNewline();
+ try f.object.indent_writer.insertNewline();
return ret;
}
-fn airNot(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airSatOp(f: *Function, inst: Air.Inst.Index, fn_op: [*:0]const u8) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const op = try o.resolveInst(ty_op.operand);
+ const bin_op = f.air.instructions.items(.data)[inst].bin_op;
+ const inst_ty = f.air.typeOfIndex(inst);
+ const int_info = inst_ty.intInfo(f.object.dg.module.getTarget());
+ const bits = int_info.bits;
- const writer = o.writer();
- const inst_ty = o.air.typeOfIndex(inst);
- const local = try o.allocLocal(inst_ty, .Const);
+ switch (bits) {
+ 8, 16, 32, 64, 128 => {},
+ else => return f.object.dg.fail("TODO: C backend: airSatOp for non power of 2 integers", .{}),
+ }
+
+ // if it's an unsigned int with non-arbitrary bit size then we can just add
+ if (bits > 64) {
+ return f.object.dg.fail("TODO: C backend: airSatOp for large integers", .{});
+ }
+
+ var min_buf: [80]u8 = undefined;
+ const min = switch (int_info.signedness) {
+ .unsigned => "0",
+ else => switch (inst_ty.tag()) {
+ .c_short => "SHRT_MIN",
+ .c_int => "INT_MIN",
+ .c_long => "LONG_MIN",
+ .c_longlong => "LLONG_MIN",
+ .isize => "INTPTR_MIN",
+ else => blk: {
+ // compute the type minimum based on the bitcount (bits)
+ const val = -1 * std.math.pow(i65, 2, @intCast(i65, bits - 1));
+ break :blk std.fmt.bufPrint(&min_buf, "{d}", .{val}) catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ else => |e| return e,
+ };
+ },
+ },
+ };
+
+ var max_buf: [80]u8 = undefined;
+ const max = switch (inst_ty.tag()) {
+ .c_short => "SHRT_MAX",
+ .c_ushort => "USHRT_MAX",
+ .c_int => "INT_MAX",
+ .c_uint => "UINT_MAX",
+ .c_long => "LONG_MAX",
+ .c_ulong => "ULONG_MAX",
+ .c_longlong => "LLONG_MAX",
+ .c_ulonglong => "ULLONG_MAX",
+ .isize => "INTPTR_MAX",
+ .usize => "UINTPTR_MAX",
+ else => blk: {
+ const pow_bits = switch (int_info.signedness) {
+ .signed => bits - 1,
+ .unsigned => bits,
+ };
+ const val = std.math.pow(u65, 2, pow_bits) - 1;
+ break :blk std.fmt.bufPrint(&max_buf, "{}", .{val}) catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ else => |e| return e,
+ };
+ },
+ };
+
+ const lhs = try f.resolveInst(bin_op.lhs);
+ const rhs = try f.resolveInst(bin_op.rhs);
+ const w = f.object.writer();
+
+ const ret = try f.allocLocal(inst_ty, .Mut);
+ try w.print(" = zig_{s}", .{fn_op});
+
+ switch (inst_ty.tag()) {
+ .isize => try w.writeAll("isize"),
+ .c_short => try w.writeAll("short"),
+ .c_int => try w.writeAll("int"),
+ .c_long => try w.writeAll("long"),
+ .c_longlong => try w.writeAll("longlong"),
+ else => {
+ const prefix_byte: u8 = switch (int_info.signedness) {
+ .signed => 'i',
+ .unsigned => 'u',
+ };
+ for ([_]u8{ 8, 16, 32, 64 }) |nbits| {
+ if (bits <= nbits) {
+ try w.print("{c}{d}", .{ prefix_byte, nbits });
+ break;
+ }
+ } else {
+ unreachable;
+ }
+ },
+ }
+
+ try w.writeByte('(');
+ try f.writeCValue(w, lhs);
+ try w.writeAll(", ");
+ try f.writeCValue(w, rhs);
+
+ if (int_info.signedness == .signed) {
+ try w.print(", {s}", .{min});
+ }
+
+ try w.print(", {s});", .{max});
+ try f.object.indent_writer.insertNewline();
+
+ return ret;
+}
+
+fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
+ return CValue.none;
+
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const op = try f.resolveInst(ty_op.operand);
+
+ const writer = f.object.writer();
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
if (inst_ty.zigTypeTag() == .Bool)
try writer.writeAll("!")
else
try writer.writeAll("~");
- try o.writeCValue(writer, op);
+ try f.writeCValue(writer, op);
try writer.writeAll(";\n");
return local;
}
-fn airBinOp(o: *Object, inst: Air.Inst.Index, operator: [*:0]const u8) !CValue {
- if (o.liveness.isUnused(inst))
+fn airBinOp(f: *Function, inst: Air.Inst.Index, operator: [*:0]const u8) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const bin_op = o.air.instructions.items(.data)[inst].bin_op;
- const lhs = try o.resolveInst(bin_op.lhs);
- const rhs = try o.resolveInst(bin_op.rhs);
+ const bin_op = f.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try f.resolveInst(bin_op.lhs);
+ const rhs = try f.resolveInst(bin_op.rhs);
- const writer = o.writer();
- const inst_ty = o.air.typeOfIndex(inst);
- const local = try o.allocLocal(inst_ty, .Const);
+ const writer = f.object.writer();
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
- try o.writeCValue(writer, lhs);
+ try f.writeCValue(writer, lhs);
try writer.print("{s}", .{operator});
- try o.writeCValue(writer, rhs);
+ try f.writeCValue(writer, rhs);
try writer.writeAll(";\n");
return local;
}
-fn airCall(o: *Object, inst: Air.Inst.Index) !CValue {
- const pl_op = o.air.instructions.items(.data)[inst].pl_op;
- const extra = o.air.extraData(Air.Call, pl_op.payload);
- const args = @bitCast([]const Air.Inst.Ref, o.air.extra[extra.end..][0..extra.data.args_len]);
- const fn_ty = o.air.typeOf(pl_op.operand);
+fn airCall(f: *Function, inst: Air.Inst.Index) !CValue {
+ const pl_op = f.air.instructions.items(.data)[inst].pl_op;
+ const extra = f.air.extraData(Air.Call, pl_op.payload);
+ const args = @bitCast([]const Air.Inst.Ref, f.air.extra[extra.end..][0..extra.data.args_len]);
+ const fn_ty = f.air.typeOf(pl_op.operand);
const ret_ty = fn_ty.fnReturnType();
- const unused_result = o.liveness.isUnused(inst);
- const writer = o.writer();
+ const unused_result = f.liveness.isUnused(inst);
+ const writer = f.object.writer();
var result_local: CValue = .none;
if (unused_result) {
@@ -1312,11 +1480,11 @@ fn airCall(o: *Object, inst: Air.Inst.Index) !CValue {
try writer.print("(void)", .{});
}
} else {
- result_local = try o.allocLocal(ret_ty, .Const);
+ result_local = try f.allocLocal(ret_ty, .Const);
try writer.writeAll(" = ");
}
- if (o.air.value(pl_op.operand)) |func_val| {
+ if (f.air.value(pl_op.operand)) |func_val| {
const fn_decl = if (func_val.castTag(.extern_fn)) |extern_fn|
extern_fn.data
else if (func_val.castTag(.function)) |func_payload|
@@ -1326,8 +1494,8 @@ fn airCall(o: *Object, inst: Air.Inst.Index) !CValue {
try writer.writeAll(mem.spanZ(fn_decl.name));
} else {
- const callee = try o.resolveInst(pl_op.operand);
- try o.writeCValue(writer, callee);
+ const callee = try f.resolveInst(pl_op.operand);
+ try f.writeCValue(writer, callee);
}
try writer.writeAll("(");
@@ -1335,189 +1503,200 @@ fn airCall(o: *Object, inst: Air.Inst.Index) !CValue {
if (i != 0) {
try writer.writeAll(", ");
}
- if (o.air.value(arg)) |val| {
- try o.dg.renderValue(writer, o.air.typeOf(arg), val);
+ if (f.air.value(arg)) |val| {
+ try f.object.dg.renderValue(writer, f.air.typeOf(arg), val);
} else {
- const val = try o.resolveInst(arg);
- try o.writeCValue(writer, val);
+ const val = try f.resolveInst(arg);
+ try f.writeCValue(writer, val);
}
}
try writer.writeAll(");\n");
return result_local;
}
-fn airDbgStmt(o: *Object, inst: Air.Inst.Index) !CValue {
- const dbg_stmt = o.air.instructions.items(.data)[inst].dbg_stmt;
- const writer = o.writer();
+fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue {
+ const dbg_stmt = f.air.instructions.items(.data)[inst].dbg_stmt;
+ const writer = f.object.writer();
try writer.print("#line {d}\n", .{dbg_stmt.line + 1});
return CValue.none;
}
-fn airBlock(o: *Object, inst: Air.Inst.Index) !CValue {
- const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
- const extra = o.air.extraData(Air.Block, ty_pl.payload);
- const body = o.air.extra[extra.end..][0..extra.data.body_len];
+fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue {
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const extra = f.air.extraData(Air.Block, ty_pl.payload);
+ const body = f.air.extra[extra.end..][0..extra.data.body_len];
- const block_id: usize = o.next_block_index;
- o.next_block_index += 1;
- const writer = o.writer();
+ const block_id: usize = f.next_block_index;
+ f.next_block_index += 1;
+ const writer = f.object.writer();
- const inst_ty = o.air.typeOfIndex(inst);
- const result = if (inst_ty.tag() != .void and !o.liveness.isUnused(inst)) blk: {
+ const inst_ty = f.air.typeOfIndex(inst);
+ const result = if (inst_ty.tag() != .void and !f.liveness.isUnused(inst)) blk: {
// allocate a location for the result
- const local = try o.allocLocal(inst_ty, .Mut);
+ const local = try f.allocLocal(inst_ty, .Mut);
try writer.writeAll(";\n");
break :blk local;
} else CValue{ .none = {} };
- try o.blocks.putNoClobber(o.gpa, inst, .{
+ try f.blocks.putNoClobber(f.object.dg.gpa, inst, .{
.block_id = block_id,
.result = result,
});
- try genBody(o, body);
- try o.indent_writer.insertNewline();
+ try genBody(f, body);
+ try f.object.indent_writer.insertNewline();
// label must be followed by an expression, add an empty one.
try writer.print("zig_block_{d}:;\n", .{block_id});
return result;
}
-fn airBr(o: *Object, inst: Air.Inst.Index) !CValue {
- const branch = o.air.instructions.items(.data)[inst].br;
- const block = o.blocks.get(branch.block_inst).?;
+fn airBr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const branch = f.air.instructions.items(.data)[inst].br;
+ const block = f.blocks.get(branch.block_inst).?;
const result = block.result;
- const writer = o.writer();
+ const writer = f.object.writer();
// If result is .none then the value of the block is unused.
if (result != .none) {
- const operand = try o.resolveInst(branch.operand);
- try o.writeCValue(writer, result);
+ const operand = try f.resolveInst(branch.operand);
+ try f.writeCValue(writer, result);
try writer.writeAll(" = ");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.writeAll(";\n");
}
- try o.writer().print("goto zig_block_{d};\n", .{block.block_id});
+ try f.object.writer().print("goto zig_block_{d};\n", .{block.block_id});
return CValue.none;
}
-fn airBitcast(o: *Object, inst: Air.Inst.Index) !CValue {
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const operand = try o.resolveInst(ty_op.operand);
+fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const operand = try f.resolveInst(ty_op.operand);
- const writer = o.writer();
- const inst_ty = o.air.typeOfIndex(inst);
+ const writer = f.object.writer();
+ const inst_ty = f.air.typeOfIndex(inst);
if (inst_ty.zigTypeTag() == .Pointer and
- o.air.typeOf(ty_op.operand).zigTypeTag() == .Pointer)
+ f.air.typeOf(ty_op.operand).zigTypeTag() == .Pointer)
{
- const local = try o.allocLocal(inst_ty, .Const);
+ const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = (");
- try o.dg.renderType(writer, inst_ty);
+ try f.renderType(writer, inst_ty);
try writer.writeAll(")");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.writeAll(";\n");
return local;
}
- const local = try o.allocLocal(inst_ty, .Mut);
+ const local = try f.allocLocal(inst_ty, .Mut);
try writer.writeAll(";\n");
try writer.writeAll("memcpy(&");
- try o.writeCValue(writer, local);
+ try f.writeCValue(writer, local);
try writer.writeAll(", &");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.writeAll(", sizeof ");
- try o.writeCValue(writer, local);
+ try f.writeCValue(writer, local);
try writer.writeAll(");\n");
return local;
}
-fn airBreakpoint(o: *Object) !CValue {
- try o.writer().writeAll("zig_breakpoint();\n");
+fn airBreakpoint(f: *Function) !CValue {
+ try f.object.writer().writeAll("zig_breakpoint();\n");
return CValue.none;
}
-fn airUnreach(o: *Object) !CValue {
- try o.writer().writeAll("zig_unreachable();\n");
+fn airFence(f: *Function, inst: Air.Inst.Index) !CValue {
+ const atomic_order = f.air.instructions.items(.data)[inst].fence;
+ const writer = f.object.writer();
+
+ try writer.writeAll("zig_fence(");
+ try writeMemoryOrder(writer, atomic_order);
+ try writer.writeAll(");\n");
+
return CValue.none;
}
-fn airLoop(o: *Object, inst: Air.Inst.Index) !CValue {
- const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
- const loop = o.air.extraData(Air.Block, ty_pl.payload);
- const body = o.air.extra[loop.end..][0..loop.data.body_len];
- try o.writer().writeAll("while (true) ");
- try genBody(o, body);
- try o.indent_writer.insertNewline();
+fn airUnreach(f: *Function) !CValue {
+ try f.object.writer().writeAll("zig_unreachable();\n");
return CValue.none;
}
-fn airCondBr(o: *Object, inst: Air.Inst.Index) !CValue {
- const pl_op = o.air.instructions.items(.data)[inst].pl_op;
- const cond = try o.resolveInst(pl_op.operand);
- const extra = o.air.extraData(Air.CondBr, pl_op.payload);
- const then_body = o.air.extra[extra.end..][0..extra.data.then_body_len];
- const else_body = o.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
- const writer = o.writer();
+fn airLoop(f: *Function, inst: Air.Inst.Index) !CValue {
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const loop = f.air.extraData(Air.Block, ty_pl.payload);
+ const body = f.air.extra[loop.end..][0..loop.data.body_len];
+ try f.object.writer().writeAll("while (true) ");
+ try genBody(f, body);
+ try f.object.indent_writer.insertNewline();
+ return CValue.none;
+}
+
+fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const pl_op = f.air.instructions.items(.data)[inst].pl_op;
+ const cond = try f.resolveInst(pl_op.operand);
+ const extra = f.air.extraData(Air.CondBr, pl_op.payload);
+ const then_body = f.air.extra[extra.end..][0..extra.data.then_body_len];
+ const else_body = f.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
+ const writer = f.object.writer();
try writer.writeAll("if (");
- try o.writeCValue(writer, cond);
+ try f.writeCValue(writer, cond);
try writer.writeAll(") ");
- try genBody(o, then_body);
+ try genBody(f, then_body);
try writer.writeAll(" else ");
- try genBody(o, else_body);
- try o.indent_writer.insertNewline();
+ try genBody(f, else_body);
+ try f.object.indent_writer.insertNewline();
return CValue.none;
}
-fn airSwitchBr(o: *Object, inst: Air.Inst.Index) !CValue {
- const pl_op = o.air.instructions.items(.data)[inst].pl_op;
- const condition = try o.resolveInst(pl_op.operand);
- const condition_ty = o.air.typeOf(pl_op.operand);
- const switch_br = o.air.extraData(Air.SwitchBr, pl_op.payload);
- const writer = o.writer();
+fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const pl_op = f.air.instructions.items(.data)[inst].pl_op;
+ const condition = try f.resolveInst(pl_op.operand);
+ const condition_ty = f.air.typeOf(pl_op.operand);
+ const switch_br = f.air.extraData(Air.SwitchBr, pl_op.payload);
+ const writer = f.object.writer();
try writer.writeAll("switch (");
- try o.writeCValue(writer, condition);
+ try f.writeCValue(writer, condition);
try writer.writeAll(") {");
- o.indent_writer.pushIndent();
+ f.object.indent_writer.pushIndent();
var extra_index: usize = switch_br.end;
var case_i: u32 = 0;
while (case_i < switch_br.data.cases_len) : (case_i += 1) {
- const case = o.air.extraData(Air.SwitchBr.Case, extra_index);
- const items = @bitCast([]const Air.Inst.Ref, o.air.extra[case.end..][0..case.data.items_len]);
- const case_body = o.air.extra[case.end + items.len ..][0..case.data.body_len];
+ const case = f.air.extraData(Air.SwitchBr.Case, extra_index);
+ const items = @bitCast([]const Air.Inst.Ref, f.air.extra[case.end..][0..case.data.items_len]);
+ const case_body = f.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
for (items) |item| {
- try o.indent_writer.insertNewline();
+ try f.object.indent_writer.insertNewline();
try writer.writeAll("case ");
- try o.dg.renderValue(writer, condition_ty, o.air.value(item).?);
+ try f.object.dg.renderValue(writer, condition_ty, f.air.value(item).?);
try writer.writeAll(": ");
}
// The case body must be noreturn so we don't need to insert a break.
- try genBody(o, case_body);
+ try genBody(f, case_body);
}
- const else_body = o.air.extra[extra_index..][0..switch_br.data.else_body_len];
- try o.indent_writer.insertNewline();
+ const else_body = f.air.extra[extra_index..][0..switch_br.data.else_body_len];
+ try f.object.indent_writer.insertNewline();
try writer.writeAll("default: ");
- try genBody(o, else_body);
- try o.indent_writer.insertNewline();
+ try genBody(f, else_body);
+ try f.object.indent_writer.insertNewline();
- o.indent_writer.popIndent();
+ f.object.indent_writer.popIndent();
try writer.writeAll("}\n");
return CValue.none;
}
-fn airAsm(o: *Object, inst: Air.Inst.Index) !CValue {
- const air_datas = o.air.instructions.items(.data);
- const air_extra = o.air.extraData(Air.Asm, air_datas[inst].ty_pl.payload);
- const zir = o.dg.decl.namespace.file_scope.zir;
+fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
+ const air_datas = f.air.instructions.items(.data);
+ const air_extra = f.air.extraData(Air.Asm, air_datas[inst].ty_pl.payload);
+ const zir = f.object.dg.decl.namespace.file_scope.zir;
const extended = zir.instructions.items(.data)[air_extra.data.zir_index].extended;
const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand);
const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source);
@@ -1526,14 +1705,14 @@ fn airAsm(o: *Object, inst: Air.Inst.Index) !CValue {
const clobbers_len = @truncate(u5, extended.small >> 10);
_ = clobbers_len; // TODO honor these
const is_volatile = @truncate(u1, extended.small >> 15) != 0;
- const outputs = @bitCast([]const Air.Inst.Ref, o.air.extra[air_extra.end..][0..outputs_len]);
- const args = @bitCast([]const Air.Inst.Ref, o.air.extra[air_extra.end + outputs.len ..][0..args_len]);
+ const outputs = @bitCast([]const Air.Inst.Ref, f.air.extra[air_extra.end..][0..outputs_len]);
+ const args = @bitCast([]const Air.Inst.Ref, f.air.extra[air_extra.end + outputs.len ..][0..args_len]);
if (outputs_len > 1) {
- return o.dg.fail("TODO implement codegen for asm with more than 1 output", .{});
+ return f.fail("TODO implement codegen for asm with more than 1 output", .{});
}
- if (o.liveness.isUnused(inst) and !is_volatile)
+ if (f.liveness.isUnused(inst) and !is_volatile)
return CValue.none;
var extra_i: usize = zir_extra.end;
@@ -1548,28 +1727,28 @@ fn airAsm(o: *Object, inst: Air.Inst.Index) !CValue {
};
const args_extra_begin = extra_i;
- const writer = o.writer();
+ const writer = f.object.writer();
for (args) |arg| {
const input = zir.extraData(Zir.Inst.Asm.Input, extra_i);
extra_i = input.end;
const constraint = zir.nullTerminatedString(input.data.constraint);
if (constraint[0] == '{' and constraint[constraint.len - 1] == '}') {
const reg = constraint[1 .. constraint.len - 1];
- const arg_c_value = try o.resolveInst(arg);
+ const arg_c_value = try f.resolveInst(arg);
try writer.writeAll("register ");
- try o.dg.renderType(writer, o.air.typeOf(arg));
+ try f.renderType(writer, f.air.typeOf(arg));
try writer.print(" {s}_constant __asm__(\"{s}\") = ", .{ reg, reg });
- try o.writeCValue(writer, arg_c_value);
+ try f.writeCValue(writer, arg_c_value);
try writer.writeAll(";\n");
} else {
- return o.dg.fail("TODO non-explicit inline asm regs", .{});
+ return f.fail("TODO non-explicit inline asm regs", .{});
}
}
const volatile_string: []const u8 = if (is_volatile) "volatile " else "";
try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, asm_source });
if (output_constraint) |_| {
- return o.dg.fail("TODO: CBE inline asm output", .{});
+ return f.fail("TODO: CBE inline asm output", .{});
}
if (args.len > 0) {
if (output_constraint == null) {
@@ -1595,30 +1774,30 @@ fn airAsm(o: *Object, inst: Air.Inst.Index) !CValue {
}
try writer.writeAll(");\n");
- if (o.liveness.isUnused(inst))
+ if (f.liveness.isUnused(inst))
return CValue.none;
- return o.dg.fail("TODO: C backend: inline asm expression result used", .{});
+ return f.fail("TODO: C backend: inline asm expression result used", .{});
}
fn airIsNull(
- o: *Object,
+ f: *Function,
inst: Air.Inst.Index,
operator: [*:0]const u8,
deref_suffix: [*:0]const u8,
) !CValue {
- if (o.liveness.isUnused(inst))
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const un_op = o.air.instructions.items(.data)[inst].un_op;
- const writer = o.writer();
- const operand = try o.resolveInst(un_op);
+ const un_op = f.air.instructions.items(.data)[inst].un_op;
+ const writer = f.object.writer();
+ const operand = try f.resolveInst(un_op);
- const local = try o.allocLocal(Type.initTag(.bool), .Const);
+ const local = try f.allocLocal(Type.initTag(.bool), .Const);
try writer.writeAll(" = (");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
- if (o.air.typeOf(un_op).isPtrLikeOptional()) {
+ if (f.air.typeOf(un_op).isPtrLikeOptional()) {
// operand is a regular pointer, test `operand !=/== NULL`
try writer.print("){s} {s} NULL;\n", .{ deref_suffix, operator });
} else {
@@ -1627,14 +1806,14 @@ fn airIsNull(
return local;
}
-fn airOptionalPayload(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const writer = o.writer();
- const operand = try o.resolveInst(ty_op.operand);
- const operand_ty = o.air.typeOf(ty_op.operand);
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const writer = f.object.writer();
+ const operand = try f.resolveInst(ty_op.operand);
+ const operand_ty = f.air.typeOf(ty_op.operand);
const opt_ty = if (operand_ty.zigTypeTag() == .Pointer)
operand_ty.elemType()
@@ -1647,98 +1826,98 @@ fn airOptionalPayload(o: *Object, inst: Air.Inst.Index) !CValue {
return operand;
}
- const inst_ty = o.air.typeOfIndex(inst);
+ const inst_ty = f.air.typeOfIndex(inst);
const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else ".";
const maybe_addrof = if (inst_ty.zigTypeTag() == .Pointer) "&" else "";
- const local = try o.allocLocal(inst_ty, .Const);
+ const local = try f.allocLocal(inst_ty, .Const);
try writer.print(" = {s}(", .{maybe_addrof});
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.print("){s}payload;\n", .{maybe_deref});
return local;
}
-fn airStructFieldPtr(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airStructFieldPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
// TODO this @as is needed because of a stage1 bug
return @as(CValue, CValue.none);
- const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
- const extra = o.air.extraData(Air.StructField, ty_pl.payload).data;
- const struct_ptr = try o.resolveInst(extra.struct_operand);
- const struct_ptr_ty = o.air.typeOf(extra.struct_operand);
- return structFieldPtr(o, inst, struct_ptr_ty, struct_ptr, extra.field_index);
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const extra = f.air.extraData(Air.StructField, ty_pl.payload).data;
+ const struct_ptr = try f.resolveInst(extra.struct_operand);
+ const struct_ptr_ty = f.air.typeOf(extra.struct_operand);
+ return structFieldPtr(f, inst, struct_ptr_ty, struct_ptr, extra.field_index);
}
-fn airStructFieldPtrIndex(o: *Object, inst: Air.Inst.Index, index: u8) !CValue {
- if (o.liveness.isUnused(inst))
+fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue {
+ if (f.liveness.isUnused(inst))
// TODO this @as is needed because of a stage1 bug
return @as(CValue, CValue.none);
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const struct_ptr = try o.resolveInst(ty_op.operand);
- const struct_ptr_ty = o.air.typeOf(ty_op.operand);
- return structFieldPtr(o, inst, struct_ptr_ty, struct_ptr, index);
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const struct_ptr = try f.resolveInst(ty_op.operand);
+ const struct_ptr_ty = f.air.typeOf(ty_op.operand);
+ return structFieldPtr(f, inst, struct_ptr_ty, struct_ptr, index);
}
-fn structFieldPtr(o: *Object, inst: Air.Inst.Index, struct_ptr_ty: Type, struct_ptr: CValue, index: u32) !CValue {
- const writer = o.writer();
+fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struct_ptr: CValue, index: u32) !CValue {
+ const writer = f.object.writer();
const struct_obj = struct_ptr_ty.elemType().castTag(.@"struct").?.data;
const field_name = struct_obj.fields.keys()[index];
- const inst_ty = o.air.typeOfIndex(inst);
- const local = try o.allocLocal(inst_ty, .Const);
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
switch (struct_ptr) {
.local_ref => |i| {
try writer.print(" = &t{d}.{};\n", .{ i, fmtIdent(field_name) });
},
else => {
try writer.writeAll(" = &");
- try o.writeCValue(writer, struct_ptr);
+ try f.writeCValue(writer, struct_ptr);
try writer.print("->{};\n", .{fmtIdent(field_name)});
},
}
return local;
}
-fn airStructFieldVal(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
- const extra = o.air.extraData(Air.StructField, ty_pl.payload).data;
- const writer = o.writer();
- const struct_byval = try o.resolveInst(extra.struct_operand);
- const struct_ty = o.air.typeOf(extra.struct_operand);
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const extra = f.air.extraData(Air.StructField, ty_pl.payload).data;
+ const writer = f.object.writer();
+ const struct_byval = try f.resolveInst(extra.struct_operand);
+ const struct_ty = f.air.typeOf(extra.struct_operand);
const struct_obj = struct_ty.castTag(.@"struct").?.data;
const field_name = struct_obj.fields.keys()[extra.field_index];
- const inst_ty = o.air.typeOfIndex(inst);
- const local = try o.allocLocal(inst_ty, .Const);
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
- try o.writeCValue(writer, struct_byval);
+ try f.writeCValue(writer, struct_byval);
try writer.print(".{};\n", .{fmtIdent(field_name)});
return local;
}
// *(E!T) -> E NOT *E
-fn airUnwrapErrUnionErr(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const inst_ty = o.air.typeOfIndex(inst);
- const writer = o.writer();
- const operand = try o.resolveInst(ty_op.operand);
- const operand_ty = o.air.typeOf(ty_op.operand);
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const inst_ty = f.air.typeOfIndex(inst);
+ const writer = f.object.writer();
+ const operand = try f.resolveInst(ty_op.operand);
+ const operand_ty = f.air.typeOf(ty_op.operand);
const payload_ty = operand_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
if (operand_ty.zigTypeTag() == .Pointer) {
- const local = try o.allocLocal(inst_ty, .Const);
+ const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = *");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.writeAll(";\n");
return local;
} else {
@@ -1748,154 +1927,189 @@ fn airUnwrapErrUnionErr(o: *Object, inst: Air.Inst.Index) !CValue {
const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else ".";
- const local = try o.allocLocal(inst_ty, .Const);
+ const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = (");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.print("){s}error;\n", .{maybe_deref});
return local;
}
-fn airUnwrapErrUnionPay(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const writer = o.writer();
- const operand = try o.resolveInst(ty_op.operand);
- const operand_ty = o.air.typeOf(ty_op.operand);
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const writer = f.object.writer();
+ const operand = try f.resolveInst(ty_op.operand);
+ const operand_ty = f.air.typeOf(ty_op.operand);
const payload_ty = operand_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
return CValue.none;
}
- const inst_ty = o.air.typeOfIndex(inst);
+ const inst_ty = f.air.typeOfIndex(inst);
const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else ".";
const maybe_addrof = if (inst_ty.zigTypeTag() == .Pointer) "&" else "";
- const local = try o.allocLocal(inst_ty, .Const);
+ const local = try f.allocLocal(inst_ty, .Const);
try writer.print(" = {s}(", .{maybe_addrof});
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.print("){s}payload;\n", .{maybe_deref});
return local;
}
-fn airWrapOptional(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const writer = o.writer();
- const operand = try o.resolveInst(ty_op.operand);
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const writer = f.object.writer();
+ const operand = try f.resolveInst(ty_op.operand);
- const inst_ty = o.air.typeOfIndex(inst);
+ const inst_ty = f.air.typeOfIndex(inst);
if (inst_ty.isPtrLikeOptional()) {
// the operand is just a regular pointer, no need to do anything special.
return operand;
}
// .wrap_optional is used to convert non-optionals into optionals so it can never be null.
- const local = try o.allocLocal(inst_ty, .Const);
+ const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = { .is_null = false, .payload =");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.writeAll("};\n");
return local;
}
-fn airWrapErrUnionErr(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const writer = o.writer();
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const operand = try o.resolveInst(ty_op.operand);
+ const writer = f.object.writer();
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const operand = try f.resolveInst(ty_op.operand);
- const inst_ty = o.air.typeOfIndex(inst);
- const local = try o.allocLocal(inst_ty, .Const);
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = { .error = ");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.writeAll(" };\n");
return local;
}
-fn airWrapErrUnionPay(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const writer = o.writer();
- const operand = try o.resolveInst(ty_op.operand);
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const writer = f.object.writer();
+ const operand = try f.resolveInst(ty_op.operand);
- const inst_ty = o.air.typeOfIndex(inst);
- const local = try o.allocLocal(inst_ty, .Const);
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = { .error = 0, .payload = ");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.writeAll(" };\n");
return local;
}
fn airIsErr(
- o: *Object,
+ f: *Function,
inst: Air.Inst.Index,
deref_prefix: [*:0]const u8,
deref_suffix: [*:0]const u8,
op_str: [*:0]const u8,
) !CValue {
- if (o.liveness.isUnused(inst))
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const un_op = o.air.instructions.items(.data)[inst].un_op;
- const writer = o.writer();
- const operand = try o.resolveInst(un_op);
- const operand_ty = o.air.typeOf(un_op);
- const local = try o.allocLocal(Type.initTag(.bool), .Const);
+ const un_op = f.air.instructions.items(.data)[inst].un_op;
+ const writer = f.object.writer();
+ const operand = try f.resolveInst(un_op);
+ const operand_ty = f.air.typeOf(un_op);
+ const local = try f.allocLocal(Type.initTag(.bool), .Const);
const payload_ty = operand_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
try writer.print(" = {s}", .{deref_prefix});
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.print(" {s} 0;\n", .{op_str});
} else {
try writer.writeAll(" = ");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.print("{s}error {s} 0;\n", .{ deref_suffix, op_str });
}
return local;
}
-fn airArrayToSlice(o: *Object, inst: Air.Inst.Index) !CValue {
- if (o.liveness.isUnused(inst))
+fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
return CValue.none;
- const inst_ty = o.air.typeOfIndex(inst);
- const local = try o.allocLocal(inst_ty, .Const);
- const ty_op = o.air.instructions.items(.data)[inst].ty_op;
- const writer = o.writer();
- const operand = try o.resolveInst(ty_op.operand);
- const array_len = o.air.typeOf(ty_op.operand).elemType().arrayLen();
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const writer = f.object.writer();
+ const operand = try f.resolveInst(ty_op.operand);
+ const array_len = f.air.typeOf(ty_op.operand).elemType().arrayLen();
try writer.writeAll(" = { .ptr = ");
- try o.writeCValue(writer, operand);
+ try f.writeCValue(writer, operand);
try writer.print(", .len = {d} }};\n", .{array_len});
return local;
}
-fn airCmpxchg(o: *Object, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue {
- const ty_pl = o.air.instructions.items(.data)[inst].ty_pl;
- const extra = o.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
- const inst_ty = o.air.typeOfIndex(inst);
- const ptr = try o.resolveInst(extra.ptr);
- const expected_value = try o.resolveInst(extra.expected_value);
- const new_value = try o.resolveInst(extra.new_value);
- const local = try o.allocLocal(inst_ty, .Const);
- const writer = o.writer();
+/// Emits a local variable with the result type and initializes it
+/// with the operand.
+fn airSimpleCast(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
+ return CValue.none;
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const writer = f.object.writer();
+ const operand = try f.resolveInst(ty_op.operand);
+
+ try writer.writeAll(" = ");
+ try f.writeCValue(writer, operand);
+ try writer.writeAll(";\n");
+ return local;
+}
+
+fn airBuiltinCall(f: *Function, inst: Air.Inst.Index, fn_name: [*:0]const u8) !CValue {
+ if (f.liveness.isUnused(inst)) return CValue.none;
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const writer = f.object.writer();
+ const operand = try f.resolveInst(ty_op.operand);
+
+ // TODO implement the function in zig.h and call it here
+
+ try writer.print(" = {s}(", .{fn_name});
+ try f.writeCValue(writer, operand);
+ try writer.writeAll(");\n");
+ return local;
+}
+
+fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue {
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
+ const inst_ty = f.air.typeOfIndex(inst);
+ const ptr = try f.resolveInst(extra.ptr);
+ const expected_value = try f.resolveInst(extra.expected_value);
+ const new_value = try f.resolveInst(extra.new_value);
+ const local = try f.allocLocal(inst_ty, .Const);
+ const writer = f.object.writer();
try writer.print(" = zig_cmpxchg_{s}(", .{flavor});
- try o.writeCValue(writer, ptr);
+ try f.writeCValue(writer, ptr);
try writer.writeAll(", ");
- try o.writeCValue(writer, expected_value);
+ try f.writeCValue(writer, expected_value);
try writer.writeAll(", ");
- try o.writeCValue(writer, new_value);
+ try f.writeCValue(writer, new_value);
try writer.writeAll(", ");
try writeMemoryOrder(writer, extra.successOrder());
try writer.writeAll(", ");
@@ -1905,8 +2119,134 @@ fn airCmpxchg(o: *Object, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue {
return local;
}
-fn writeMemoryOrder(w: anytype, order: std.builtin.AtomicOrder) !void {
- const str = switch (order) {
+fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
+ const pl_op = f.air.instructions.items(.data)[inst].pl_op;
+ const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data;
+ const inst_ty = f.air.typeOfIndex(inst);
+ const ptr = try f.resolveInst(pl_op.operand);
+ const operand = try f.resolveInst(extra.operand);
+ const local = try f.allocLocal(inst_ty, .Const);
+ const writer = f.object.writer();
+
+ try writer.print(" = zig_atomicrmw_{s}(", .{toAtomicRmwSuffix(extra.op())});
+ try f.writeCValue(writer, ptr);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, operand);
+ try writer.writeAll(", ");
+ try writeMemoryOrder(writer, extra.ordering());
+ try writer.writeAll(");\n");
+
+ return local;
+}
+
+fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
+ const atomic_load = f.air.instructions.items(.data)[inst].atomic_load;
+ const ptr = try f.resolveInst(atomic_load.ptr);
+ const ptr_ty = f.air.typeOf(atomic_load.ptr);
+ if (!ptr_ty.isVolatilePtr() and f.liveness.isUnused(inst))
+ return CValue.none;
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
+ const writer = f.object.writer();
+
+ try writer.writeAll(" = zig_atomic_load(");
+ try f.writeCValue(writer, ptr);
+ try writer.writeAll(", ");
+ try writeMemoryOrder(writer, atomic_load.order);
+ try writer.writeAll(");\n");
+
+ return local;
+}
+
+fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue {
+ const bin_op = f.air.instructions.items(.data)[inst].bin_op;
+ const ptr = try f.resolveInst(bin_op.lhs);
+ const element = try f.resolveInst(bin_op.rhs);
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
+ const writer = f.object.writer();
+
+ try writer.writeAll(" = zig_atomic_store(");
+ try f.writeCValue(writer, ptr);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, element);
+ try writer.print(", {s});\n", .{order});
+
+ return local;
+}
+
+fn airMemset(f: *Function, inst: Air.Inst.Index) !CValue {
+ const pl_op = f.air.instructions.items(.data)[inst].pl_op;
+ const extra = f.air.extraData(Air.Bin, pl_op.payload).data;
+ const dest_ptr = try f.resolveInst(pl_op.operand);
+ const value = try f.resolveInst(extra.lhs);
+ const len = try f.resolveInst(extra.rhs);
+ const writer = f.object.writer();
+
+ try writer.writeAll("memset(");
+ try f.writeCValue(writer, dest_ptr);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, value);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, len);
+ try writer.writeAll(");\n");
+
+ return CValue.none;
+}
+
+fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
+ const pl_op = f.air.instructions.items(.data)[inst].pl_op;
+ const extra = f.air.extraData(Air.Bin, pl_op.payload).data;
+ const dest_ptr = try f.resolveInst(pl_op.operand);
+ const src_ptr = try f.resolveInst(extra.lhs);
+ const len = try f.resolveInst(extra.rhs);
+ const writer = f.object.writer();
+
+ try writer.writeAll("memcpy(");
+ try f.writeCValue(writer, dest_ptr);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, src_ptr);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, len);
+ try writer.writeAll(");\n");
+
+ return CValue.none;
+}
+
+fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
+ const bin_op = f.air.instructions.items(.data)[inst].bin_op;
+ const union_ptr = try f.resolveInst(bin_op.lhs);
+ const new_tag = try f.resolveInst(bin_op.rhs);
+ const writer = f.object.writer();
+
+ try writer.writeAll("*");
+ try f.writeCValue(writer, union_ptr);
+ try writer.writeAll(" = ");
+ try f.writeCValue(writer, new_tag);
+ try writer.writeAll(";\n");
+
+ return CValue.none;
+}
+
+fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
+ return CValue.none;
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const writer = f.object.writer();
+ const operand = try f.resolveInst(ty_op.operand);
+
+ try writer.writeAll("get_union_tag(");
+ try f.writeCValue(writer, operand);
+ try writer.writeAll(");\n");
+ return local;
+}
+
+fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 {
+ return switch (order) {
.Unordered => "memory_order_relaxed",
.Monotonic => "memory_order_consume",
.Acquire => "memory_order_acquire",
@@ -1914,7 +2254,24 @@ fn writeMemoryOrder(w: anytype, order: std.builtin.AtomicOrder) !void {
.AcqRel => "memory_order_acq_rel",
.SeqCst => "memory_order_seq_cst",
};
- return w.writeAll(str);
+}
+
+fn writeMemoryOrder(w: anytype, order: std.builtin.AtomicOrder) !void {
+ return w.writeAll(toMemoryOrder(order));
+}
+
+fn toAtomicRmwSuffix(order: std.builtin.AtomicRmwOp) []const u8 {
+ return switch (order) {
+ .Xchg => "xchg",
+ .Add => "add",
+ .Sub => "sub",
+ .And => "and",
+ .Nand => "nand",
+ .Or => "or",
+ .Xor => "xor",
+ .Max => "max",
+ .Min => "min",
+ };
}
fn IndentWriter(comptime UnderlyingWriter: type) type {
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 1e190a53d0..d223569d0f 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -158,15 +158,42 @@ pub const Object = struct {
llvm_module: *const llvm.Module,
context: *const llvm.Context,
target_machine: *const llvm.TargetMachine,
+ /// Ideally we would use `llvm_module.getNamedFunction` to go from *Decl to LLVM function,
+ /// but that has some downsides:
+ /// * we have to compute the fully qualified name every time we want to do the lookup
+ /// * for externally linked functions, the name is not fully qualified, but when
+ /// a Decl goes from exported to not exported and vice-versa, we would use the wrong
+ /// version of the name and incorrectly get function not found in the llvm module.
+ /// * it works for functions not all globals.
+ /// Therefore, this table keeps track of the mapping.
+ decl_map: std.AutoHashMapUnmanaged(*const Module.Decl, *const llvm.Value),
+ /// Maps Zig types to LLVM types. The table memory itself is backed by the GPA of
+ /// the compiler, but the Type/Value memory here is backed by `type_map_arena`.
+ /// TODO we need to remove entries from this map in response to incremental compilation
+ /// but I think the frontend won't tell us about types that get deleted because
+ /// hasCodeGenBits() is false for types.
+ type_map: TypeMap,
+ /// The backing memory for `type_map`. Periodically garbage collected after flush().
+ /// The code for doing the periodical GC is not yet implemented.
+ type_map_arena: std.heap.ArenaAllocator,
+ /// Where to put the output object file, relative to bin_file.options.emit directory.
+ sub_path: []const u8,
- pub fn create(gpa: *Allocator, options: link.Options) !*Object {
+ pub const TypeMap = std.HashMapUnmanaged(
+ Type,
+ *const llvm.Type,
+ Type.HashContext64,
+ std.hash_map.default_max_load_percentage,
+ );
+
+ pub fn create(gpa: *Allocator, sub_path: []const u8, options: link.Options) !*Object {
const obj = try gpa.create(Object);
errdefer gpa.destroy(obj);
- obj.* = try Object.init(gpa, options);
+ obj.* = try Object.init(gpa, sub_path, options);
return obj;
}
- pub fn init(gpa: *Allocator, options: link.Options) !Object {
+ pub fn init(gpa: *Allocator, sub_path: []const u8, options: link.Options) !Object {
const context = llvm.Context.create();
errdefer context.dispose();
@@ -240,22 +267,34 @@ pub const Object = struct {
);
errdefer target_machine.dispose();
+ const target_data = target_machine.createTargetDataLayout();
+ defer target_data.dispose();
+
+ llvm_module.setModuleDataLayout(target_data);
+
return Object{
.llvm_module = llvm_module,
.context = context,
.target_machine = target_machine,
+ .decl_map = .{},
+ .type_map = .{},
+ .type_map_arena = std.heap.ArenaAllocator.init(gpa),
+ .sub_path = sub_path,
};
}
- pub fn deinit(self: *Object) void {
+ pub fn deinit(self: *Object, gpa: *Allocator) void {
self.target_machine.dispose();
self.llvm_module.dispose();
self.context.dispose();
+ self.decl_map.deinit(gpa);
+ self.type_map.deinit(gpa);
+ self.type_map_arena.deinit();
self.* = undefined;
}
pub fn destroy(self: *Object, gpa: *Allocator) void {
- self.deinit();
+ self.deinit(gpa);
gpa.destroy(self);
}
@@ -293,23 +332,18 @@ pub const Object = struct {
const mod = comp.bin_file.options.module.?;
const cache_dir = mod.zig_cache_artifact_directory;
- const emit_bin_path: ?[*:0]const u8 = if (comp.bin_file.options.emit != null) blk: {
- const obj_basename = try std.zig.binNameAlloc(arena, .{
- .root_name = comp.bin_file.options.root_name,
- .target = comp.bin_file.options.target,
- .output_mode = .Obj,
- });
- if (cache_dir.joinZ(arena, &[_][]const u8{obj_basename})) |p| {
- break :blk p.ptr;
- } else |err| {
- return err;
- }
- } else null;
+ const emit_bin_path: ?[*:0]const u8 = if (comp.bin_file.options.emit) |emit|
+ try emit.directory.joinZ(arena, &[_][]const u8{self.sub_path})
+ else
+ null;
const emit_asm_path = try locPath(arena, comp.emit_asm, cache_dir);
const emit_llvm_ir_path = try locPath(arena, comp.emit_llvm_ir, cache_dir);
const emit_llvm_bc_path = try locPath(arena, comp.emit_llvm_bc, cache_dir);
+ const debug_emit_path = emit_bin_path orelse "(none)";
+ log.debug("emit LLVM object to {s}", .{debug_emit_path});
+
var error_message: [*:0]const u8 = undefined;
if (self.target_machine.emitToFile(
self.llvm_module,
@@ -358,18 +392,31 @@ pub const Object = struct {
const llvm_func = try dg.resolveLlvmFunction(decl);
+ if (module.align_stack_fns.get(func)) |align_info| {
+ dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment);
+ dg.addFnAttr(llvm_func, "noinline");
+ } else {
+ DeclGen.removeFnAttr(llvm_func, "alignstack");
+ if (!func.is_noinline) DeclGen.removeFnAttr(llvm_func, "noinline");
+ }
+
+ if (func.is_cold) {
+ dg.addFnAttr(llvm_func, "cold");
+ } else {
+ DeclGen.removeFnAttr(llvm_func, "cold");
+ }
+
// This gets the LLVM values from the function and stores them in `dg.args`.
- const fn_param_len = decl.ty.fnParamLen();
- var args = try dg.gpa.alloc(*const llvm.Value, fn_param_len);
+ const fn_info = decl.ty.fnInfo();
+ var args = try dg.gpa.alloc(*const llvm.Value, fn_info.param_types.len);
for (args) |*arg, i| {
arg.* = llvm.getParam(llvm_func, @intCast(c_uint, i));
}
- // We remove all the basic blocks of a function to support incremental
- // compilation!
- // TODO: remove all basic blocks if functions can have more than one
- if (llvm_func.getFirstBasicBlock()) |bb| {
+ // Remove all the basic blocks of a function in order to start over, generating
+ // LLVM IR from an empty function body.
+ while (llvm_func.getFirstBasicBlock()) |bb| {
bb.deleteBasicBlock();
}
@@ -440,30 +487,62 @@ pub const Object = struct {
) !void {
// If the module does not already have the function, we ignore this function call
// because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`.
- const llvm_fn = self.llvm_module.getNamedFunction(decl.name) orelse return;
+ const llvm_fn = self.decl_map.get(decl) orelse return;
const is_extern = decl.val.tag() == .extern_fn;
- if (is_extern or exports.len != 0) {
- llvm_fn.setLinkage(.External);
+ if (is_extern) {
+ llvm_fn.setValueName(decl.name);
llvm_fn.setUnnamedAddr(.False);
+ llvm_fn.setLinkage(.External);
+ } else if (exports.len != 0) {
+ const exp_name = exports[0].options.name;
+ llvm_fn.setValueName2(exp_name.ptr, exp_name.len);
+ llvm_fn.setUnnamedAddr(.False);
+ switch (exports[0].options.linkage) {
+ .Internal => unreachable,
+ .Strong => llvm_fn.setLinkage(.External),
+ .Weak => llvm_fn.setLinkage(.WeakODR),
+ .LinkOnce => llvm_fn.setLinkage(.LinkOnceODR),
+ }
+ // If a Decl is exported more than one time (which is rare),
+ // we add aliases for all but the first export.
+ // TODO LLVM C API does not support deleting aliases. We need to
+ // patch it to support this or figure out how to wrap the C++ API ourselves.
+ // Until then we iterate over existing aliases and make them point
+ // to the correct decl, or otherwise add a new alias. Old aliases are leaked.
+ for (exports[1..]) |exp| {
+ const exp_name_z = try module.gpa.dupeZ(u8, exp.options.name);
+ defer module.gpa.free(exp_name_z);
+
+ if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| {
+ alias.setAliasee(llvm_fn);
+ } else {
+ const alias = self.llvm_module.addAlias(llvm_fn.typeOf(), llvm_fn, exp_name_z);
+ switch (exp.options.linkage) {
+ .Internal => alias.setLinkage(.Internal),
+ .Strong => alias.setLinkage(.External),
+ .Weak => {
+ if (is_extern) {
+ alias.setLinkage(.ExternalWeak);
+ } else {
+ alias.setLinkage(.WeakODR);
+ }
+ },
+ .LinkOnce => alias.setLinkage(.LinkOnceODR),
+ }
+ }
+ }
} else {
+ const fqn = try decl.getFullyQualifiedName(module.gpa);
+ defer module.gpa.free(fqn);
+ llvm_fn.setValueName2(fqn.ptr, fqn.len);
llvm_fn.setLinkage(.Internal);
llvm_fn.setUnnamedAddr(.True);
}
- // TODO LLVM C API does not support deleting aliases. We need to
- // patch it to support this or figure out how to wrap the C++ API ourselves.
- // Until then we iterate over existing aliases and make them point
- // to the correct decl, or otherwise add a new alias. Old aliases are leaked.
- for (exports) |exp| {
- const exp_name_z = try module.gpa.dupeZ(u8, exp.options.name);
- defer module.gpa.free(exp_name_z);
+ }
- if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| {
- alias.setAliasee(llvm_fn);
- } else {
- const alias = self.llvm_module.addAlias(llvm_fn.typeOf(), llvm_fn, exp_name_z);
- _ = alias;
- }
- }
+ pub fn freeDecl(self: *Object, decl: *Module.Decl) void {
+ const llvm_value = self.decl_map.get(decl) orelse return;
+ llvm_value.deleteGlobal();
}
};
@@ -472,9 +551,8 @@ pub const DeclGen = struct {
object: *Object,
module: *Module,
decl: *Module.Decl,
- err_msg: ?*Module.ErrorMsg,
-
gpa: *Allocator,
+ err_msg: ?*Module.ErrorMsg,
fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@setCold(true);
@@ -515,33 +593,47 @@ pub const DeclGen = struct {
}
}
- /// If the llvm function does not exist, create it
+ /// If the llvm function does not exist, create it.
+ /// Note that this can be called before the function's semantic analysis has
+ /// completed, so if any attributes rely on that, they must be done in updateFunc, not here.
fn resolveLlvmFunction(self: *DeclGen, decl: *Module.Decl) !*const llvm.Value {
- if (self.llvmModule().getNamedFunction(decl.name)) |llvm_fn| return llvm_fn;
+ const gop = try self.object.decl_map.getOrPut(self.gpa, decl);
+ if (gop.found_existing) return gop.value_ptr.*;
assert(decl.has_tv);
const zig_fn_type = decl.ty;
- const return_type = zig_fn_type.fnReturnType();
- const fn_param_len = zig_fn_type.fnParamLen();
+ const fn_info = zig_fn_type.fnInfo();
+ const return_type = fn_info.return_type;
- const fn_param_types = try self.gpa.alloc(Type, fn_param_len);
- defer self.gpa.free(fn_param_types);
- zig_fn_type.fnParamTypes(fn_param_types);
+ const llvm_param_buffer = try self.gpa.alloc(*const llvm.Type, fn_info.param_types.len);
+ defer self.gpa.free(llvm_param_buffer);
- const llvm_param = try self.gpa.alloc(*const llvm.Type, fn_param_len);
- defer self.gpa.free(llvm_param);
-
- for (fn_param_types) |fn_param, i| {
- llvm_param[i] = try self.llvmType(fn_param);
+ var llvm_params_len: c_uint = 0;
+ for (fn_info.param_types) |param_ty| {
+ if (param_ty.hasCodeGenBits()) {
+ llvm_param_buffer[llvm_params_len] = try self.llvmType(param_ty);
+ llvm_params_len += 1;
+ }
}
+ const llvm_ret_ty = if (!return_type.hasCodeGenBits())
+ self.context.voidType()
+ else
+ try self.llvmType(return_type);
+
const fn_type = llvm.functionType(
- try self.llvmType(return_type),
- llvm_param.ptr,
- @intCast(c_uint, fn_param_len),
+ llvm_ret_ty,
+ llvm_param_buffer.ptr,
+ llvm_params_len,
.False,
);
- const llvm_fn = self.llvmModule().addFunction(decl.name, fn_type);
+ const llvm_addrspace = self.llvmAddressSpace(decl.@"addrspace");
+
+ const fqn = try decl.getFullyQualifiedName(self.gpa);
+ defer self.gpa.free(fqn);
+
+ const llvm_fn = self.llvmModule().addFunctionInAddressSpace(fqn, fn_type, llvm_addrspace);
+ gop.value_ptr.* = llvm_fn;
const is_extern = decl.val.tag() == .extern_fn;
if (!is_extern) {
@@ -549,8 +641,85 @@ pub const DeclGen = struct {
llvm_fn.setUnnamedAddr(.True);
}
- // TODO: calling convention, linkage, tsan, etc. see codegen.cpp `make_fn_llvm_value`.
+ // TODO: more attributes. see codegen.cpp `make_fn_llvm_value`.
+ const target = self.module.getTarget();
+ switch (fn_info.cc) {
+ .Unspecified, .Inline, .Async => {
+ llvm_fn.setFunctionCallConv(.Fast);
+ },
+ .C => {
+ llvm_fn.setFunctionCallConv(.C);
+ },
+ .Naked => {
+ self.addFnAttr(llvm_fn, "naked");
+ },
+ .Stdcall => {
+ llvm_fn.setFunctionCallConv(.X86_StdCall);
+ },
+ .Fastcall => {
+ llvm_fn.setFunctionCallConv(.X86_FastCall);
+ },
+ .Vectorcall => {
+ switch (target.cpu.arch) {
+ .i386, .x86_64 => {
+ llvm_fn.setFunctionCallConv(.X86_VectorCall);
+ },
+ .aarch64, .aarch64_be, .aarch64_32 => {
+ llvm_fn.setFunctionCallConv(.AArch64_VectorCall);
+ },
+ else => unreachable,
+ }
+ },
+ .Thiscall => {
+ llvm_fn.setFunctionCallConv(.X86_ThisCall);
+ },
+ .APCS => {
+ llvm_fn.setFunctionCallConv(.ARM_APCS);
+ },
+ .AAPCS => {
+ llvm_fn.setFunctionCallConv(.ARM_AAPCS);
+ },
+ .AAPCSVFP => {
+ llvm_fn.setFunctionCallConv(.ARM_AAPCS_VFP);
+ },
+ .Interrupt => {
+ switch (target.cpu.arch) {
+ .i386, .x86_64 => {
+ llvm_fn.setFunctionCallConv(.X86_INTR);
+ },
+ .avr => {
+ llvm_fn.setFunctionCallConv(.AVR_INTR);
+ },
+ .msp430 => {
+ llvm_fn.setFunctionCallConv(.MSP430_INTR);
+ },
+ else => unreachable,
+ }
+ },
+ .Signal => {
+ llvm_fn.setFunctionCallConv(.AVR_SIGNAL);
+ },
+ .SysV => {
+ llvm_fn.setFunctionCallConv(.X86_64_SysV);
+ },
+ }
+ // Function attributes that are independent of analysis results of the function body.
+ if (!self.module.comp.bin_file.options.red_zone) {
+ self.addFnAttr(llvm_fn, "noredzone");
+ }
+ self.addFnAttr(llvm_fn, "nounwind");
+ if (self.module.comp.unwind_tables) {
+ self.addFnAttr(llvm_fn, "uwtable");
+ }
+ if (self.module.comp.bin_file.options.optimize_mode == .ReleaseSmall) {
+ self.addFnAttr(llvm_fn, "minsize");
+ self.addFnAttr(llvm_fn, "optsize");
+ }
+ if (self.module.comp.bin_file.options.tsan) {
+ self.addFnAttr(llvm_fn, "sanitize_thread");
+ }
+ // TODO add target-cpu and target-features fn attributes
if (return_type.isNoReturn()) {
self.addFnAttr(llvm_fn, "noreturn");
}
@@ -563,18 +732,41 @@ pub const DeclGen = struct {
if (llvm_module.getNamedGlobal(decl.name)) |val| return val;
// TODO: remove this redundant `llvmType`, it is also called in `genTypedValue`.
const llvm_type = try self.llvmType(decl.ty);
- return llvm_module.addGlobal(llvm_type, decl.name);
+ const llvm_addrspace = self.llvmAddressSpace(decl.@"addrspace");
+ return llvm_module.addGlobalInAddressSpace(llvm_type, decl.name, llvm_addrspace);
+ }
+
+ fn llvmAddressSpace(self: DeclGen, address_space: std.builtin.AddressSpace) c_uint {
+ const target = self.module.getTarget();
+ return switch (target.cpu.arch) {
+ .i386, .x86_64 => switch (address_space) {
+ .generic => llvm.address_space.default,
+ .gs => llvm.address_space.x86.gs,
+ .fs => llvm.address_space.x86.fs,
+ .ss => llvm.address_space.x86.ss,
+ },
+ else => switch (address_space) {
+ .generic => llvm.address_space.default,
+ else => unreachable,
+ },
+ };
}
fn llvmType(self: *DeclGen, t: Type) error{ OutOfMemory, CodegenFail }!*const llvm.Type {
+ const gpa = self.gpa;
log.debug("llvmType for {}", .{t});
switch (t.zigTypeTag()) {
- .Void => return self.context.voidType(),
- .NoReturn => return self.context.voidType(),
+ .Void, .NoReturn => return self.context.voidType(),
.Int => {
const info = t.intInfo(self.module.getTarget());
return self.context.intType(info.bits);
},
+ .Enum => {
+ var buffer: Type.Payload.Bits = undefined;
+ const int_ty = t.intTagType(&buffer);
+ const bit_count = int_ty.intInfo(self.module.getTarget()).bits;
+ return self.context.intType(bit_count);
+ },
.Float => switch (t.floatBits(self.module.getTarget())) {
16 => return self.context.halfType(),
32 => return self.context.floatType(),
@@ -586,7 +778,7 @@ pub const DeclGen = struct {
.Bool => return self.context.intType(1),
.Pointer => {
if (t.isSlice()) {
- var buf: Type.Payload.ElemType = undefined;
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_type = t.slicePtrFieldType(&buf);
const fields: [2]*const llvm.Type = .{
@@ -596,7 +788,8 @@ pub const DeclGen = struct {
return self.context.structType(&fields, fields.len, .False);
} else {
const elem_type = try self.llvmType(t.elemType());
- return elem_type.pointerType(0);
+ const llvm_addrspace = self.llvmAddressSpace(t.ptrAddressSpace());
+ return elem_type.pointerType(llvm_addrspace);
}
},
.Array => {
@@ -605,18 +798,18 @@ pub const DeclGen = struct {
return elem_type.arrayType(@intCast(c_uint, total_len));
},
.Optional => {
- if (!t.isPtrLikeOptional()) {
- var buf: Type.Payload.ElemType = undefined;
- const child_type = t.optionalChild(&buf);
+ var buf: Type.Payload.ElemType = undefined;
+ const child_type = t.optionalChild(&buf);
+ const payload_llvm_ty = try self.llvmType(child_type);
- const optional_types: [2]*const llvm.Type = .{
- try self.llvmType(child_type),
- self.context.intType(1),
- };
- return self.context.structType(&optional_types, 2, .False);
- } else {
- return self.todo("implement optional pointers as actual pointers", .{});
+ if (t.isPtrLikeOptional()) {
+ return payload_llvm_ty;
}
+
+ const fields: [2]*const llvm.Type = .{
+ payload_llvm_ty, self.context.intType(1),
+ };
+ return self.context.structType(&fields, fields.len, .False);
},
.ErrorUnion => {
const error_type = t.errorUnionSet();
@@ -634,24 +827,67 @@ pub const DeclGen = struct {
return self.context.intType(16);
},
.Struct => {
+ const gop = try self.object.type_map.getOrPut(gpa, t);
+ if (gop.found_existing) return gop.value_ptr.*;
+
+ // The Type memory is ephemeral; since we want to store a longer-lived
+ // reference, we need to copy it here.
+ gop.key_ptr.* = try t.copy(&self.object.type_map_arena.allocator);
+
const struct_obj = t.castTag(.@"struct").?.data;
assert(struct_obj.haveFieldTypes());
- const llvm_fields = try self.gpa.alloc(*const llvm.Type, struct_obj.fields.count());
- defer self.gpa.free(llvm_fields);
- for (struct_obj.fields.values()) |field, i| {
- llvm_fields[i] = try self.llvmType(field.ty);
+
+ const name = try struct_obj.getFullyQualifiedName(gpa);
+ defer gpa.free(name);
+
+ const llvm_struct_ty = self.context.structCreateNamed(name);
+ gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
+
+ var llvm_field_types: std.ArrayListUnmanaged(*const llvm.Type) = .{};
+ try llvm_field_types.ensureTotalCapacity(gpa, struct_obj.fields.count());
+ defer llvm_field_types.deinit(gpa);
+
+ for (struct_obj.fields.values()) |field| {
+ if (!field.ty.hasCodeGenBits()) continue;
+ llvm_field_types.appendAssumeCapacity(try self.llvmType(field.ty));
}
- return self.context.structType(
- llvm_fields.ptr,
- @intCast(c_uint, llvm_fields.len),
- .False,
+
+ llvm_struct_ty.structSetBody(
+ llvm_field_types.items.ptr,
+ @intCast(c_uint, llvm_field_types.items.len),
+ llvm.Bool.fromBool(struct_obj.layout == .Packed),
);
+
+ return llvm_struct_ty;
+ },
+ .Union => {
+ const union_obj = t.castTag(.@"union").?.data;
+ assert(union_obj.haveFieldTypes());
+
+ const enum_tag_ty = union_obj.tag_ty;
+ const enum_tag_llvm_ty = try self.llvmType(enum_tag_ty);
+
+ if (union_obj.onlyTagHasCodegenBits()) {
+ return enum_tag_llvm_ty;
+ }
+
+ const target = self.module.getTarget();
+ const most_aligned_field_index = union_obj.mostAlignedField(target);
+ const most_aligned_field = union_obj.fields.values()[most_aligned_field_index];
+ // TODO handle when the most aligned field is different than the
+ // biggest sized field.
+
+ const llvm_fields = [_]*const llvm.Type{
+ try self.llvmType(most_aligned_field.ty),
+ enum_tag_llvm_ty,
+ };
+ return self.context.structType(&llvm_fields, llvm_fields.len, .False);
},
.Fn => {
const ret_ty = try self.llvmType(t.fnReturnType());
const params_len = t.fnParamLen();
- const llvm_params = try self.gpa.alloc(*const llvm.Type, params_len);
- defer self.gpa.free(llvm_params);
+ const llvm_params = try gpa.alloc(*const llvm.Type, params_len);
+ defer gpa.free(llvm_params);
for (llvm_params) |*llvm_param, i| {
llvm_param.* = try self.llvmType(t.fnParamType(i));
}
@@ -662,7 +898,9 @@ pub const DeclGen = struct {
@intCast(c_uint, llvm_params.len),
llvm.Bool.fromBool(is_var_args),
);
- return llvm_fn_ty.pointerType(0);
+ // TODO make .Fn not both a pointer type and a prototype
+ const llvm_addrspace = self.llvmAddressSpace(.generic);
+ return llvm_fn_ty.pointerType(llvm_addrspace);
},
.ComptimeInt => unreachable,
.ComptimeFloat => unreachable,
@@ -673,8 +911,6 @@ pub const DeclGen = struct {
.BoundFn => @panic("TODO remove BoundFn from the language"),
- .Enum,
- .Union,
.Opaque,
.Frame,
.AnyFrame,
@@ -701,13 +937,24 @@ pub const DeclGen = struct {
const llvm_type = try self.llvmType(tv.ty);
if (bigint.eqZero()) return llvm_type.constNull();
- if (bigint.limbs.len != 1) {
- return self.todo("implement bigger bigint", .{});
- }
- const llvm_int = llvm_type.constInt(bigint.limbs[0], .False);
+ const unsigned_val = if (bigint.limbs.len == 1)
+ llvm_type.constInt(bigint.limbs[0], .False)
+ else
+ llvm_type.constIntOfArbitraryPrecision(@intCast(c_uint, bigint.limbs.len), bigint.limbs.ptr);
if (!bigint.positive) {
- return llvm.constNeg(llvm_int);
+ return llvm.constNeg(unsigned_val);
}
+ return unsigned_val;
+ },
+ .Enum => {
+ const llvm_type = try self.llvmType(tv.ty);
+ const uint: u64 = uint: {
+ if (tv.val.castTag(.enum_field_index)) |payload| {
+ break :uint payload.data;
+ }
+ break :uint tv.val.toUnsignedInt();
+ };
+ const llvm_int = llvm_type.constInt(uint, .False);
return llvm_int;
},
.Float => {
@@ -720,7 +967,7 @@ pub const DeclGen = struct {
.Pointer => switch (tv.val.tag()) {
.decl_ref => {
if (tv.ty.isSlice()) {
- var buf: Type.Payload.ElemType = undefined;
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = tv.ty.slicePtrFieldType(&buf);
var slice_len: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
@@ -750,12 +997,13 @@ pub const DeclGen = struct {
decl.alive = true;
const val = try self.resolveGlobalDecl(decl);
const llvm_var_type = try self.llvmType(tv.ty);
- const llvm_type = llvm_var_type.pointerType(0);
+ const llvm_addrspace = self.llvmAddressSpace(decl.@"addrspace");
+ const llvm_type = llvm_var_type.pointerType(llvm_addrspace);
return val.constBitCast(llvm_type);
},
.slice => {
const slice = tv.val.castTag(.slice).?.data;
- var buf: Type.Payload.ElemType = undefined;
+ var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const fields: [2]*const llvm.Value = .{
try self.genTypedValue(.{
.ty = tv.ty.slicePtrFieldType(&buf),
@@ -873,42 +1121,80 @@ pub const DeclGen = struct {
return self.context.constStruct(&fields, fields.len, .False);
},
.Struct => {
- const fields_len = tv.ty.structFieldCount();
+ const llvm_struct_ty = try self.llvmType(tv.ty);
const field_vals = tv.val.castTag(.@"struct").?.data;
const gpa = self.gpa;
- const llvm_fields = try gpa.alloc(*const llvm.Value, fields_len);
- defer gpa.free(llvm_fields);
- for (llvm_fields) |*llvm_field, i| {
- llvm_field.* = try self.genTypedValue(.{
- .ty = tv.ty.structFieldType(i),
- .val = field_vals[i],
- });
+
+ var llvm_fields: std.ArrayListUnmanaged(*const llvm.Value) = .{};
+ try llvm_fields.ensureTotalCapacity(gpa, field_vals.len);
+ defer llvm_fields.deinit(gpa);
+
+ for (field_vals) |field_val, i| {
+ const field_ty = tv.ty.structFieldType(i);
+ if (!field_ty.hasCodeGenBits()) continue;
+
+ llvm_fields.appendAssumeCapacity(try self.genTypedValue(.{
+ .ty = field_ty,
+ .val = field_val,
+ }));
}
- return self.context.constStruct(
- llvm_fields.ptr,
- @intCast(c_uint, llvm_fields.len),
- .False,
+ return llvm_struct_ty.constNamedStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
);
},
.ComptimeInt => unreachable,
.ComptimeFloat => unreachable,
.Type => unreachable,
.EnumLiteral => unreachable,
- else => return self.todo("implement const of type '{}'", .{tv.ty}),
+ .Void => unreachable,
+ .NoReturn => unreachable,
+ .Undefined => unreachable,
+ .Null => unreachable,
+ .BoundFn => unreachable,
+ .Opaque => unreachable,
+
+ .Union,
+ .Frame,
+ .AnyFrame,
+ .Vector,
+ => return self.todo("implement const of type '{}'", .{tv.ty}),
}
}
- // Helper functions
- fn addAttr(self: *DeclGen, val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
+ fn addAttr(dg: *DeclGen, val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
+ return dg.addAttrInt(val, index, name, 0);
+ }
+
+ fn removeAttr(val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len);
assert(kind_id != 0);
- const llvm_attr = self.context.createEnumAttribute(kind_id, 0);
+ val.removeEnumAttributeAtIndex(index, kind_id);
+ }
+
+ fn addAttrInt(
+ dg: *DeclGen,
+ val: *const llvm.Value,
+ index: llvm.AttributeIndex,
+ name: []const u8,
+ int: u64,
+ ) void {
+ const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len);
+ assert(kind_id != 0);
+ const llvm_attr = dg.context.createEnumAttribute(kind_id, int);
val.addAttributeAtIndex(index, llvm_attr);
}
- fn addFnAttr(self: *DeclGen, val: *const llvm.Value, attr_name: []const u8) void {
- // TODO: improve this API, `addAttr(-1, attr_name)`
- self.addAttr(val, std.math.maxInt(llvm.AttributeIndex), attr_name);
+ fn addFnAttr(dg: *DeclGen, val: *const llvm.Value, name: []const u8) void {
+ dg.addAttr(val, std.math.maxInt(llvm.AttributeIndex), name);
+ }
+
+ fn removeFnAttr(fn_val: *const llvm.Value, name: []const u8) void {
+ removeAttr(fn_val, std.math.maxInt(llvm.AttributeIndex), name);
+ }
+
+ fn addFnAttrInt(dg: *DeclGen, fn_val: *const llvm.Value, name: []const u8, int: u64) void {
+ return dg.addAttrInt(fn_val, std.math.maxInt(llvm.AttributeIndex), name, int);
}
/// If the operand type of an atomic operation is not byte sized we need to
@@ -920,7 +1206,7 @@ pub const DeclGen = struct {
var buffer: Type.Payload.Bits = undefined;
const int_ty = switch (ty.zigTypeTag()) {
.Int => ty,
- .Enum => ty.enumTagType(&buffer),
+ .Enum => ty.intTagType(&buffer),
.Float => {
if (!is_rmw_xchg) return null;
return dg.context.intType(@intCast(c_uint, ty.abiSize(target) * 8));
@@ -950,7 +1236,7 @@ pub const FuncGen = struct {
/// in other instructions. This table is cleared before every function is generated.
func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Index, *const llvm.Value),
- /// These fields are used to refer to the LLVM value of the function paramaters
+ /// These fields are used to refer to the LLVM value of the function parameters
/// in an Arg instruction.
args: []*const llvm.Value,
arg_index: usize,
@@ -1003,22 +1289,27 @@ pub const FuncGen = struct {
for (body) |inst| {
const opt_value: ?*const llvm.Value = switch (air_tags[inst]) {
// zig fmt: off
- .add => try self.airAdd(inst, false),
- .addwrap => try self.airAdd(inst, true),
- .sub => try self.airSub(inst, false),
- .subwrap => try self.airSub(inst, true),
- .mul => try self.airMul(inst, false),
- .mulwrap => try self.airMul(inst, true),
- .div => try self.airDiv(inst),
- .rem => try self.airRem(inst),
- .ptr_add => try self.airPtrAdd(inst),
- .ptr_sub => try self.airPtrSub(inst),
+ .add => try self.airAdd(inst),
+ .addwrap => try self.airAddWrap(inst),
+ .add_sat => try self.airAddSat(inst),
+ .sub => try self.airSub(inst),
+ .subwrap => try self.airSubWrap(inst),
+ .sub_sat => try self.airSubSat(inst),
+ .mul => try self.airMul(inst),
+ .mulwrap => try self.airMulWrap(inst),
+ .mul_sat => try self.airMulSat(inst),
+ .div => try self.airDiv(inst),
+ .rem => try self.airRem(inst),
+ .mod => try self.airMod(inst),
+ .ptr_add => try self.airPtrAdd(inst),
+ .ptr_sub => try self.airPtrSub(inst),
+ .shl => try self.airShl(inst),
+ .shl_sat => try self.airShlSat(inst),
+ .shl_exact => try self.airShlExact(inst),
.bit_and, .bool_and => try self.airAnd(inst),
.bit_or, .bool_or => try self.airOr(inst),
.xor => try self.airXor(inst),
-
- .shl => try self.airShl(inst),
.shr => try self.airShr(inst),
.cmp_eq => try self.airCmp(inst, .eq),
@@ -1049,7 +1340,8 @@ pub const FuncGen = struct {
.cond_br => try self.airCondBr(inst),
.intcast => try self.airIntCast(inst),
.trunc => try self.airTrunc(inst),
- .floatcast => try self.airFloatCast(inst),
+ .fptrunc => try self.airFptrunc(inst),
+ .fpext => try self.airFpext(inst),
.ptrtoint => try self.airPtrToInt(inst),
.load => try self.airLoad(inst),
.loop => try self.airLoop(inst),
@@ -1060,8 +1352,24 @@ pub const FuncGen = struct {
.slice_ptr => try self.airSliceField(inst, 0),
.slice_len => try self.airSliceField(inst, 1),
.array_to_slice => try self.airArrayToSlice(inst),
+ .float_to_int => try self.airFloatToInt(inst),
+ .int_to_float => try self.airIntToFloat(inst),
.cmpxchg_weak => try self.airCmpxchg(inst, true),
.cmpxchg_strong => try self.airCmpxchg(inst, false),
+ .fence => try self.airFence(inst),
+ .atomic_rmw => try self.airAtomicRmw(inst),
+ .atomic_load => try self.airAtomicLoad(inst),
+ .memset => try self.airMemset(inst),
+ .memcpy => try self.airMemcpy(inst),
+ .set_union_tag => try self.airSetUnionTag(inst),
+ .get_union_tag => try self.airGetUnionTag(inst),
+ .clz => try self.airClzCtz(inst, "ctlz"),
+ .ctz => try self.airClzCtz(inst, "cttz"),
+
+ .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
+ .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
+ .atomic_store_release => try self.airAtomicStore(inst, .Release),
+ .atomic_store_seq_cst => try self.airAtomicStore(inst, .SequentiallyConsistent),
.struct_field_ptr => try self.airStructFieldPtr(inst),
.struct_field_val => try self.airStructFieldVal(inst),
@@ -1152,21 +1460,15 @@ pub const FuncGen = struct {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
+ const operand_ty = self.air.typeOf(bin_op.lhs);
- switch (self.air.typeOf(bin_op.lhs).zigTypeTag()) {
- .Int, .Bool, .Pointer, .ErrorSet => {
- const is_signed = inst_ty.isSignedInt();
- const operation = switch (op) {
- .eq => .EQ,
- .neq => .NE,
- .lt => @as(llvm.IntPredicate, if (is_signed) .SLT else .ULT),
- .lte => @as(llvm.IntPredicate, if (is_signed) .SLE else .ULE),
- .gt => @as(llvm.IntPredicate, if (is_signed) .SGT else .UGT),
- .gte => @as(llvm.IntPredicate, if (is_signed) .SGE else .UGE),
- };
- return self.builder.buildICmp(operation, lhs, rhs, "");
+ const int_ty = switch (operand_ty.zigTypeTag()) {
+ .Enum => blk: {
+ var buffer: Type.Payload.Bits = undefined;
+ const int_ty = operand_ty.intTagType(&buffer);
+ break :blk int_ty;
},
+ .Int, .Bool, .Pointer, .ErrorSet => operand_ty,
.Float => {
const operation: llvm.RealPredicate = switch (op) {
.eq => .OEQ,
@@ -1179,7 +1481,17 @@ pub const FuncGen = struct {
return self.builder.buildFCmp(operation, lhs, rhs, "");
},
else => unreachable,
- }
+ };
+ const is_signed = int_ty.isSignedInt();
+ const operation = switch (op) {
+ .eq => .EQ,
+ .neq => .NE,
+ .lt => @as(llvm.IntPredicate, if (is_signed) .SLT else .ULT),
+ .lte => @as(llvm.IntPredicate, if (is_signed) .SLE else .ULE),
+ .gt => @as(llvm.IntPredicate, if (is_signed) .SGT else .UGT),
+ .gte => @as(llvm.IntPredicate, if (is_signed) .SGE else .UGE),
+ };
+ return self.builder.buildICmp(operation, lhs, rhs, "");
}
fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -1285,19 +1597,57 @@ pub const FuncGen = struct {
return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const operand = try self.resolveInst(ty_op.operand);
- const array_len = self.air.typeOf(ty_op.operand).elemType().arrayLen();
- const usize_llvm_ty = try self.dg.llvmType(Type.initTag(.usize));
- const len = usize_llvm_ty.constInt(array_len, .False);
+ const operand_ty = self.air.typeOf(ty_op.operand);
+ const array_ty = operand_ty.childType();
+ const llvm_usize = try self.dg.llvmType(Type.usize);
+ const len = llvm_usize.constInt(array_ty.arrayLen(), .False);
const slice_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
+ if (!array_ty.hasCodeGenBits()) {
+ return self.builder.buildInsertValue(slice_llvm_ty.getUndef(), len, 1, "");
+ }
+ const operand = try self.resolveInst(ty_op.operand);
const indices: [2]*const llvm.Value = .{
- usize_llvm_ty.constNull(), usize_llvm_ty.constNull(),
+ llvm_usize.constNull(), llvm_usize.constNull(),
};
const ptr = self.builder.buildInBoundsGEP(operand, &indices, indices.len, "");
const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr, 0, "");
return self.builder.buildInsertValue(partial, len, 1, "");
}
+ fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+ const dest_ty = self.air.typeOfIndex(inst);
+ const dest_llvm_ty = try self.dg.llvmType(dest_ty);
+
+ if (dest_ty.isSignedInt()) {
+ return self.builder.buildSIToFP(operand, dest_llvm_ty, "");
+ } else {
+ return self.builder.buildUIToFP(operand, dest_llvm_ty, "");
+ }
+ }
+
+ fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+ const dest_ty = self.air.typeOfIndex(inst);
+ const dest_llvm_ty = try self.dg.llvmType(dest_ty);
+
+ // TODO set fast math flag
+
+ if (dest_ty.isSignedInt()) {
+ return self.builder.buildFPToSI(operand, dest_llvm_ty, "");
+ } else {
+ return self.builder.buildFPToUI(operand, dest_llvm_ty, "");
+ }
+ }
+
fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -1398,27 +1748,31 @@ pub const FuncGen = struct {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ptr = try self.resolveInst(struct_field.struct_operand);
- const field_index = @intCast(c_uint, struct_field.field_index);
- return self.builder.buildStructGEP(struct_ptr, field_index, "");
+ const struct_ptr_ty = self.air.typeOf(struct_field.struct_operand);
+ return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, struct_field.field_index);
}
- fn airStructFieldPtrIndex(self: *FuncGen, inst: Air.Inst.Index, field_index: c_uint) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ fn airStructFieldPtrIndex(
+ self: *FuncGen,
+ inst: Air.Inst.Index,
+ field_index: u32,
+ ) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const struct_ptr = try self.resolveInst(ty_op.operand);
- return self.builder.buildStructGEP(struct_ptr, field_index, "");
+ const struct_ptr_ty = self.air.typeOf(ty_op.operand);
+ return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index);
}
fn airStructFieldVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
+ const struct_ty = self.air.typeOf(struct_field.struct_operand);
const struct_byval = try self.resolveInst(struct_field.struct_operand);
- const field_index = @intCast(c_uint, struct_field.field_index);
+ const field_index = llvmFieldIndex(struct_ty, struct_field.field_index);
return self.builder.buildExtractValue(struct_byval, field_index, "");
}
@@ -1569,6 +1923,13 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(un_op);
if (operand_is_ptr) {
+ const operand_ty = self.air.typeOf(un_op).elemType();
+ if (operand_ty.isPtrLikeOptional()) {
+ const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const loaded = self.builder.buildLoad(operand, "");
+ return self.builder.buildICmp(.NE, loaded, operand_llvm_ty.constNull(), "");
+ }
+
const index_type = self.context.intType(32);
var indices: [2]*const llvm.Value = .{
@@ -1577,9 +1938,15 @@ pub const FuncGen = struct {
};
return self.builder.buildLoad(self.builder.buildInBoundsGEP(operand, &indices, indices.len, ""), "");
- } else {
- return self.builder.buildExtractValue(operand, 1, "");
}
+
+ const operand_ty = self.air.typeOf(un_op);
+ if (operand_ty.isPtrLikeOptional()) {
+ const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ return self.builder.buildICmp(.NE, operand, operand_llvm_ty.constNull(), "");
+ }
+
+ return self.builder.buildExtractValue(operand, 1, "");
}
fn airIsNull(self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool) !?*const llvm.Value {
@@ -1632,17 +1999,24 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(ty_op.operand);
if (operand_is_ptr) {
+ const operand_ty = self.air.typeOf(ty_op.operand).elemType();
+ if (operand_ty.isPtrLikeOptional()) {
+ return self.builder.buildLoad(operand, "");
+ }
+
const index_type = self.context.intType(32);
-
var indices: [2]*const llvm.Value = .{
- index_type.constNull(),
- index_type.constNull(),
+ index_type.constNull(), index_type.constNull(),
};
-
return self.builder.buildInBoundsGEP(operand, &indices, 2, "");
- } else {
- return self.builder.buildExtractValue(operand, 0, "");
}
+
+ const operand_ty = self.air.typeOf(ty_op.operand);
+ if (operand_ty.isPtrLikeOptional()) {
+ return operand;
+ }
+
+ return self.builder.buildExtractValue(operand, 0, "");
}
fn airErrUnionPayload(
@@ -1716,51 +2090,115 @@ pub const FuncGen = struct {
return self.todo("implement llvm codegen for 'airWrapErrUnionErr'", .{});
}
- fn airAdd(self: *FuncGen, inst: Air.Inst.Index, wrap: bool) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ fn airAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- if (inst_ty.isFloat()) return self.builder.buildFAdd(lhs, rhs, "");
- if (wrap) return self.builder.buildAdd(lhs, rhs, "");
+ if (inst_ty.isAnyFloat()) return self.builder.buildFAdd(lhs, rhs, "");
if (inst_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, "");
return self.builder.buildNUWAdd(lhs, rhs, "");
}
- fn airSub(self: *FuncGen, inst: Air.Inst.Index, wrap: bool) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+
+ return self.builder.buildAdd(lhs, rhs, "");
+ }
+
+ fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- if (inst_ty.isFloat()) return self.builder.buildFSub(lhs, rhs, "");
- if (wrap) return self.builder.buildSub(lhs, rhs, "");
+ if (inst_ty.isAnyFloat()) return self.todo("saturating float add", .{});
+ if (inst_ty.isSignedInt()) return self.builder.buildSAddSat(lhs, rhs, "");
+
+ return self.builder.buildUAddSat(lhs, rhs, "");
+ }
+
+ fn airSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+
+ if (inst_ty.isAnyFloat()) return self.builder.buildFSub(lhs, rhs, "");
if (inst_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, "");
return self.builder.buildNUWSub(lhs, rhs, "");
}
- fn airMul(self: *FuncGen, inst: Air.Inst.Index, wrap: bool) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+
+ return self.builder.buildSub(lhs, rhs, "");
+ }
+
+ fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- if (inst_ty.isFloat()) return self.builder.buildFMul(lhs, rhs, "");
- if (wrap) return self.builder.buildMul(lhs, rhs, "");
+ if (inst_ty.isAnyFloat()) return self.todo("saturating float sub", .{});
+ if (inst_ty.isSignedInt()) return self.builder.buildSSubSat(lhs, rhs, "");
+ return self.builder.buildUSubSat(lhs, rhs, "");
+ }
+
+ fn airMul(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+
+ if (inst_ty.isAnyFloat()) return self.builder.buildFMul(lhs, rhs, "");
if (inst_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, "");
return self.builder.buildNUWMul(lhs, rhs, "");
}
+ fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+
+ return self.builder.buildMul(lhs, rhs, "");
+ }
+
+ fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+
+ if (inst_ty.isAnyFloat()) return self.todo("saturating float mul", .{});
+ if (inst_ty.isSignedInt()) return self.builder.buildSMulFixSat(lhs, rhs, "");
+ return self.builder.buildUMulFixSat(lhs, rhs, "");
+ }
+
fn airDiv(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -1770,7 +2208,7 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- if (inst_ty.isFloat()) return self.builder.buildFDiv(lhs, rhs, "");
+ if (inst_ty.isRuntimeFloat()) return self.builder.buildFDiv(lhs, rhs, "");
if (inst_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, "");
return self.builder.buildUDiv(lhs, rhs, "");
}
@@ -1783,11 +2221,39 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- if (inst_ty.isFloat()) return self.builder.buildFRem(lhs, rhs, "");
+ if (inst_ty.isRuntimeFloat()) return self.builder.buildFRem(lhs, rhs, "");
if (inst_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, "");
return self.builder.buildURem(lhs, rhs, "");
}
+ fn airMod(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+ const inst_llvm_ty = try self.dg.llvmType(inst_ty);
+
+ if (inst_ty.isRuntimeFloat()) {
+ const a = self.builder.buildFRem(lhs, rhs, "");
+ const b = self.builder.buildFAdd(a, rhs, "");
+ const c = self.builder.buildFRem(b, rhs, "");
+ const zero = inst_llvm_ty.constNull();
+ const ltz = self.builder.buildFCmp(.OLT, lhs, zero, "");
+ return self.builder.buildSelect(ltz, c, a, "");
+ }
+ if (inst_ty.isSignedInt()) {
+ const a = self.builder.buildSRem(lhs, rhs, "");
+ const b = self.builder.buildNSWAdd(a, rhs, "");
+ const c = self.builder.buildSRem(b, rhs, "");
+ const zero = inst_llvm_ty.constNull();
+ const ltz = self.builder.buildICmp(.SLT, lhs, zero, "");
+ return self.builder.buildSelect(ltz, c, a, "");
+ }
+ return self.builder.buildURem(lhs, rhs, "");
+ }
+
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -1838,9 +2304,25 @@ pub const FuncGen = struct {
return self.builder.buildXor(lhs, rhs, "");
}
+ fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const lhs_type = self.air.typeOf(bin_op.lhs);
+ const tg = self.dg.module.getTarget();
+ const casted_rhs = if (self.air.typeOf(bin_op.rhs).bitSize(tg) < lhs_type.bitSize(tg))
+ self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "")
+ else
+ rhs;
+ if (lhs_type.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, "");
+ return self.builder.buildNUWShl(lhs, casted_rhs, "");
+ }
+
fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
+
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -1853,6 +2335,22 @@ pub const FuncGen = struct {
return self.builder.buildShl(lhs, casted_rhs, "");
}
+ fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const lhs_type = self.air.typeOf(bin_op.lhs);
+ const tg = self.dg.module.getTarget();
+ const casted_rhs = if (self.air.typeOf(bin_op.rhs).bitSize(tg) < lhs_type.bitSize(tg))
+ self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "")
+ else
+ rhs;
+ if (lhs_type.isSignedInt()) return self.builder.buildSShlSat(lhs, casted_rhs, "");
+ return self.builder.buildUShlSat(lhs, casted_rhs, "");
+ }
+
fn airShr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -1877,14 +2375,22 @@ pub const FuncGen = struct {
if (self.liveness.isUnused(inst))
return null;
+ const target = self.dg.module.getTarget();
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const dest_ty = self.air.typeOfIndex(inst);
+ const dest_info = dest_ty.intInfo(target);
+ const dest_llvm_ty = try self.dg.llvmType(dest_ty);
const operand = try self.resolveInst(ty_op.operand);
- const inst_ty = self.air.typeOfIndex(inst);
+ const operand_ty = self.air.typeOf(ty_op.operand);
+ const operand_info = operand_ty.intInfo(target);
- const signed = inst_ty.isSignedInt();
- // TODO: Should we use intcast here or just a simple bitcast?
- // LLVM does truncation vs bitcast (+signed extension) in the intcast depending on the sizes
- return self.builder.buildIntCast2(operand, try self.dg.llvmType(inst_ty), llvm.Bool.fromBool(signed), "");
+ if (operand_info.bits < dest_info.bits) {
+ switch (operand_info.signedness) {
+ .signed => return self.builder.buildSExt(operand, dest_llvm_ty, ""),
+ .unsigned => return self.builder.buildZExt(operand, dest_llvm_ty, ""),
+ }
+ }
+ return self.builder.buildTrunc(operand, dest_llvm_ty, "");
}
fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -1897,12 +2403,26 @@ pub const FuncGen = struct {
return self.builder.buildTrunc(operand, dest_llvm_ty, "");
}
- fn airFloatCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
- // TODO split floatcast AIR into float_widen and float_shorten
- return self.todo("implement 'airFloatCast'", .{});
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+ const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
+
+ return self.builder.buildFPTrunc(operand, dest_llvm_ty, "");
+ }
+
+ fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+ const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
+
+ return self.builder.buildFPExt(operand, dest_llvm_ty, "");
}
fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -1947,16 +2467,17 @@ pub const FuncGen = struct {
}
fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
// buildAlloca expects the pointee type, not the pointer type, so assert that
// a Payload.PointerSimple is passed to the alloc instruction.
- const inst_ty = self.air.typeOfIndex(inst);
- const pointee_type = inst_ty.castPointer().?.data;
-
- // TODO: figure out a way to get the name of the var decl.
- // TODO: set alignment and volatile
- return self.buildAlloca(try self.dg.llvmType(pointee_type));
+ const ptr_ty = self.air.typeOfIndex(inst);
+ const pointee_type = ptr_ty.elemType();
+ if (!pointee_type.hasCodeGenBits()) return null;
+ const pointee_llvm_ty = try self.dg.llvmType(pointee_type);
+ const target = self.dg.module.getTarget();
+ const alloca_inst = self.buildAlloca(pointee_llvm_ty);
+ alloca_inst.setAlignment(ptr_ty.ptrAlignment(target));
+ return alloca_inst;
}
/// Use this instead of builder.buildAlloca, because this function makes sure to
@@ -1986,20 +2507,19 @@ pub const FuncGen = struct {
fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const dest_ptr = try self.resolveInst(bin_op.lhs);
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
const src_operand = try self.resolveInst(bin_op.rhs);
- // TODO set volatile on this store properly
- _ = self.builder.buildStore(src_operand, dest_ptr);
+ _ = self.store(dest_ptr, ptr_ty, src_operand);
return null;
}
fn airLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr();
- if (!is_volatile and self.liveness.isUnused(inst))
+ const ptr_ty = self.air.typeOf(ty_op.operand);
+ if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst))
return null;
const ptr = try self.resolveInst(ty_op.operand);
- // TODO set volatile on this load properly
- return self.builder.buildLoad(ptr, "");
+ return self.load(ptr, ptr_ty);
}
fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -2009,6 +2529,14 @@ pub const FuncGen = struct {
return null;
}
+ fn airFence(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const atomic_order = self.air.instructions.items(.data)[inst].fence;
+ const llvm_memory_order = toLlvmAtomicOrdering(atomic_order);
+ const single_threaded = llvm.Bool.fromBool(self.single_threaded);
+ _ = self.builder.buildFence(llvm_memory_order, single_threaded, "");
+ return null;
+ }
+
fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*const llvm.Value {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
@@ -2039,8 +2567,6 @@ pub const FuncGen = struct {
result.setWeak(llvm.Bool.fromBool(is_weak));
const optional_ty = self.air.typeOfIndex(inst);
- var buffer: Type.Payload.ElemType = undefined;
- const child_ty = optional_ty.optionalChild(&buffer);
var payload = self.builder.buildExtractValue(result, 0, "");
if (opt_abi_ty != null) {
@@ -2049,8 +2575,7 @@ pub const FuncGen = struct {
const success_bit = self.builder.buildExtractValue(result, 1, "");
if (optional_ty.isPtrLikeOptional()) {
- const child_llvm_ty = try self.dg.llvmType(child_ty);
- return self.builder.buildSelect(success_bit, child_llvm_ty.constNull(), payload, "");
+ return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, "");
}
const optional_llvm_ty = try self.dg.llvmType(optional_ty);
@@ -2059,6 +2584,252 @@ pub const FuncGen = struct {
return self.builder.buildInsertValue(partial, non_null_bit, 1, "");
}
+ fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data;
+ const ptr = try self.resolveInst(pl_op.operand);
+ const ptr_ty = self.air.typeOf(pl_op.operand);
+ const operand_ty = ptr_ty.elemType();
+ const operand = try self.resolveInst(extra.operand);
+ const is_signed_int = operand_ty.isSignedInt();
+ const is_float = operand_ty.isRuntimeFloat();
+ const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float);
+ const ordering = toLlvmAtomicOrdering(extra.ordering());
+ const single_threaded = llvm.Bool.fromBool(self.single_threaded);
+ const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, op == .Xchg);
+ if (opt_abi_ty) |abi_ty| {
+ // operand needs widening and truncating or bitcasting.
+ const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), "");
+ const casted_operand = if (is_float)
+ self.builder.buildBitCast(operand, abi_ty, "")
+ else if (is_signed_int)
+ self.builder.buildSExt(operand, abi_ty, "")
+ else
+ self.builder.buildZExt(operand, abi_ty, "");
+
+ const uncasted_result = self.builder.buildAtomicRmw(
+ op,
+ casted_ptr,
+ casted_operand,
+ ordering,
+ single_threaded,
+ );
+ const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ if (is_float) {
+ return self.builder.buildBitCast(uncasted_result, operand_llvm_ty, "");
+ } else {
+ return self.builder.buildTrunc(uncasted_result, operand_llvm_ty, "");
+ }
+ }
+
+ if (operand.typeOf().getTypeKind() != .Pointer) {
+ return self.builder.buildAtomicRmw(op, ptr, operand, ordering, single_threaded);
+ }
+
+ // It's a pointer but we need to treat it as an int.
+ const usize_llvm_ty = try self.dg.llvmType(Type.initTag(.usize));
+ const casted_ptr = self.builder.buildBitCast(ptr, usize_llvm_ty.pointerType(0), "");
+ const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, "");
+ const uncasted_result = self.builder.buildAtomicRmw(
+ op,
+ casted_ptr,
+ casted_operand,
+ ordering,
+ single_threaded,
+ );
+ const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ return self.builder.buildIntToPtr(uncasted_result, operand_llvm_ty, "");
+ }
+
+ fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const atomic_load = self.air.instructions.items(.data)[inst].atomic_load;
+ const ptr = try self.resolveInst(atomic_load.ptr);
+ const ptr_ty = self.air.typeOf(atomic_load.ptr);
+ if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst))
+ return null;
+ const ordering = toLlvmAtomicOrdering(atomic_load.order);
+ const operand_ty = ptr_ty.elemType();
+ const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
+
+ if (opt_abi_ty) |abi_ty| {
+ // operand needs widening and truncating
+ const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), "");
+ const load_inst = self.load(casted_ptr, ptr_ty);
+ load_inst.setOrdering(ordering);
+ return self.builder.buildTrunc(load_inst, try self.dg.llvmType(operand_ty), "");
+ }
+ const load_inst = self.load(ptr, ptr_ty);
+ load_inst.setOrdering(ordering);
+ return load_inst;
+ }
+
+ fn airAtomicStore(
+ self: *FuncGen,
+ inst: Air.Inst.Index,
+ ordering: llvm.AtomicOrdering,
+ ) !?*const llvm.Value {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ var ptr = try self.resolveInst(bin_op.lhs);
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
+ var element = try self.resolveInst(bin_op.rhs);
+ const operand_ty = ptr_ty.elemType();
+ const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
+
+ if (opt_abi_ty) |abi_ty| {
+ // operand needs widening
+ ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), "");
+ if (operand_ty.isSignedInt()) {
+ element = self.builder.buildSExt(element, abi_ty, "");
+ } else {
+ element = self.builder.buildZExt(element, abi_ty, "");
+ }
+ }
+ const store_inst = self.store(ptr, ptr_ty, element);
+ store_inst.setOrdering(ordering);
+ return null;
+ }
+
+ fn airMemset(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
+ const dest_ptr = try self.resolveInst(pl_op.operand);
+ const ptr_ty = self.air.typeOf(pl_op.operand);
+ const value = try self.resolveInst(extra.lhs);
+ const val_is_undef = if (self.air.value(extra.lhs)) |val| val.isUndef() else false;
+ const len = try self.resolveInst(extra.rhs);
+ const u8_llvm_ty = self.context.intType(8);
+ const ptr_u8_llvm_ty = u8_llvm_ty.pointerType(0);
+ const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, "");
+ const fill_char = if (val_is_undef) u8_llvm_ty.constInt(0xaa, .False) else value;
+ const target = self.dg.module.getTarget();
+ const dest_ptr_align = ptr_ty.ptrAlignment(target);
+ _ = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr());
+
+ if (val_is_undef and self.dg.module.comp.bin_file.options.valgrind) {
+ // TODO generate valgrind client request to mark byte range as undefined
+ // see gen_valgrind_undef() in codegen.cpp
+ }
+ return null;
+ }
+
+ fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
+ const dest_ptr = try self.resolveInst(pl_op.operand);
+ const dest_ptr_ty = self.air.typeOf(pl_op.operand);
+ const src_ptr = try self.resolveInst(extra.lhs);
+ const src_ptr_ty = self.air.typeOf(extra.lhs);
+ const len = try self.resolveInst(extra.rhs);
+ const u8_llvm_ty = self.context.intType(8);
+ const ptr_u8_llvm_ty = u8_llvm_ty.pointerType(0);
+ const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, "");
+ const src_ptr_u8 = self.builder.buildBitCast(src_ptr, ptr_u8_llvm_ty, "");
+ const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr();
+ const target = self.dg.module.getTarget();
+ _ = self.builder.buildMemCpy(
+ dest_ptr_u8,
+ dest_ptr_ty.ptrAlignment(target),
+ src_ptr_u8,
+ src_ptr_ty.ptrAlignment(target),
+ len,
+ is_volatile,
+ );
+ return null;
+ }
+
+ fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const union_ptr = try self.resolveInst(bin_op.lhs);
+ // TODO handle when onlyTagHasCodegenBits() == true
+ const new_tag = try self.resolveInst(bin_op.rhs);
+ const tag_field_ptr = self.builder.buildStructGEP(union_ptr, 1, "");
+
+ _ = self.builder.buildStore(new_tag, tag_field_ptr);
+ return null;
+ }
+
+ fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst))
+ return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const un_ty = self.air.typeOf(ty_op.operand);
+ const un = try self.resolveInst(ty_op.operand);
+
+ _ = un_ty; // TODO handle when onlyTagHasCodegenBits() == true and other union forms
+ return self.builder.buildExtractValue(un, 1, "");
+ }
+
+ fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, prefix: [*:0]const u8) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand_ty = self.air.typeOf(ty_op.operand);
+ const operand = try self.resolveInst(ty_op.operand);
+ const target = self.dg.module.getTarget();
+ const bits = operand_ty.intInfo(target).bits;
+
+ var fn_name_buf: [100]u8 = undefined;
+ const llvm_fn_name = std.fmt.bufPrintZ(&fn_name_buf, "llvm.{s}.i{d}", .{
+ prefix, bits,
+ }) catch unreachable;
+ const llvm_i1 = self.context.intType(1);
+ const fn_val = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
+ const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const param_types = [_]*const llvm.Type{ operand_llvm_ty, llvm_i1 };
+ const fn_type = llvm.functionType(operand_llvm_ty, ¶m_types, param_types.len, .False);
+ break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
+ };
+
+ const params = [_]*const llvm.Value{ operand, llvm_i1.constNull() };
+ const wrong_size_result = self.builder.buildCall(fn_val, ¶ms, params.len, "");
+ const result_ty = self.air.typeOfIndex(inst);
+ const result_llvm_ty = try self.dg.llvmType(result_ty);
+ const result_bits = result_ty.intInfo(target).bits;
+ if (bits > result_bits) {
+ return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, "");
+ } else if (bits < result_bits) {
+ return self.builder.buildZExt(wrong_size_result, result_llvm_ty, "");
+ } else {
+ return wrong_size_result;
+ }
+ }
+
+ fn fieldPtr(
+ self: *FuncGen,
+ inst: Air.Inst.Index,
+ struct_ptr: *const llvm.Value,
+ struct_ptr_ty: Type,
+ field_index: u32,
+ ) !?*const llvm.Value {
+ const struct_ty = struct_ptr_ty.childType();
+ switch (struct_ty.zigTypeTag()) {
+ .Struct => {
+ const llvm_field_index = llvmFieldIndex(struct_ty, field_index);
+ return self.builder.buildStructGEP(struct_ptr, llvm_field_index, "");
+ },
+ .Union => return self.unionFieldPtr(inst, struct_ptr, struct_ty, field_index),
+ else => unreachable,
+ }
+ }
+
+ fn unionFieldPtr(
+ self: *FuncGen,
+ inst: Air.Inst.Index,
+ union_ptr: *const llvm.Value,
+ union_ty: Type,
+ field_index: c_uint,
+ ) !?*const llvm.Value {
+ const union_obj = union_ty.cast(Type.Payload.Union).?.data;
+ const field = &union_obj.fields.values()[field_index];
+ const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
+ if (!field.ty.hasCodeGenBits()) {
+ return null;
+ }
+ const union_field_ptr = self.builder.buildStructGEP(union_ptr, 0, "");
+ return self.builder.buildBitCast(union_field_ptr, result_llvm_ty, "");
+ }
+
fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value {
const id = llvm.lookupIntrinsicID(name.ptr, name.len);
assert(id != 0);
@@ -2067,6 +2838,27 @@ pub const FuncGen = struct {
// `getIntrinsicDeclaration`
return self.llvmModule().getIntrinsicDeclaration(id, null, 0);
}
+
+ fn load(self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type) *const llvm.Value {
+ const llvm_inst = self.builder.buildLoad(ptr, "");
+ const target = self.dg.module.getTarget();
+ llvm_inst.setAlignment(ptr_ty.ptrAlignment(target));
+ llvm_inst.setVolatile(llvm.Bool.fromBool(ptr_ty.isVolatilePtr()));
+ return llvm_inst;
+ }
+
+ fn store(
+ self: *FuncGen,
+ ptr: *const llvm.Value,
+ ptr_ty: Type,
+ elem: *const llvm.Value,
+ ) *const llvm.Value {
+ const llvm_inst = self.builder.buildStore(elem, ptr);
+ const target = self.dg.module.getTarget();
+ llvm_inst.setAlignment(ptr_ty.ptrAlignment(target));
+ llvm_inst.setVolatile(llvm.Bool.fromBool(ptr_ty.isVolatilePtr()));
+ return llvm_inst;
+ }
};
fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
@@ -2261,3 +3053,33 @@ fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) llvm.AtomicOrderi
.SeqCst => .SequentiallyConsistent,
};
}
+
+fn toLlvmAtomicRmwBinOp(
+ op: std.builtin.AtomicRmwOp,
+ is_signed: bool,
+ is_float: bool,
+) llvm.AtomicRMWBinOp {
+ return switch (op) {
+ .Xchg => .Xchg,
+ .Add => if (is_float) llvm.AtomicRMWBinOp.FAdd else return .Add,
+ .Sub => if (is_float) llvm.AtomicRMWBinOp.FSub else return .Sub,
+ .And => .And,
+ .Nand => .Nand,
+ .Or => .Or,
+ .Xor => .Xor,
+ .Max => if (is_signed) llvm.AtomicRMWBinOp.Max else return .UMax,
+ .Min => if (is_signed) llvm.AtomicRMWBinOp.Min else return .UMin,
+ };
+}
+
+/// Take into account 0 bit fields.
+fn llvmFieldIndex(ty: Type, index: u32) c_uint {
+ const struct_obj = ty.castTag(.@"struct").?.data;
+ var result: c_uint = 0;
+ for (struct_obj.fields.values()[0..index]) |field| {
+ if (field.ty.hasCodeGenBits()) {
+ result += 1;
+ }
+ }
+ return result;
+}
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index 1e6201a42a..496579008e 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -85,6 +85,9 @@ pub const Value = opaque {
pub const addAttributeAtIndex = LLVMAddAttributeAtIndex;
extern fn LLVMAddAttributeAtIndex(*const Value, Idx: AttributeIndex, A: *const Attribute) void;
+ pub const removeEnumAttributeAtIndex = LLVMRemoveEnumAttributeAtIndex;
+ extern fn LLVMRemoveEnumAttributeAtIndex(F: *const Value, Idx: AttributeIndex, KindID: c_uint) void;
+
pub const getFirstBasicBlock = LLVMGetFirstBasicBlock;
extern fn LLVMGetFirstBasicBlock(Fn: *const Value) ?*const BasicBlock;
@@ -136,6 +139,30 @@ pub const Value = opaque {
pub const setWeak = LLVMSetWeak;
extern fn LLVMSetWeak(CmpXchgInst: *const Value, IsWeak: Bool) void;
+
+ pub const setOrdering = LLVMSetOrdering;
+ extern fn LLVMSetOrdering(MemoryAccessInst: *const Value, Ordering: AtomicOrdering) void;
+
+ pub const setVolatile = LLVMSetVolatile;
+ extern fn LLVMSetVolatile(MemoryAccessInst: *const Value, IsVolatile: Bool) void;
+
+ pub const setAlignment = LLVMSetAlignment;
+ extern fn LLVMSetAlignment(V: *const Value, Bytes: c_uint) void;
+
+ pub const getFunctionCallConv = LLVMGetFunctionCallConv;
+ extern fn LLVMGetFunctionCallConv(Fn: *const Value) CallConv;
+
+ pub const setFunctionCallConv = LLVMSetFunctionCallConv;
+ extern fn LLVMSetFunctionCallConv(Fn: *const Value, CC: CallConv) void;
+
+ pub const setValueName = LLVMSetValueName;
+ extern fn LLVMSetValueName(Val: *const Value, Name: [*:0]const u8) void;
+
+ pub const setValueName2 = LLVMSetValueName2;
+ extern fn LLVMSetValueName2(Val: *const Value, Name: [*]const u8, NameLen: usize) void;
+
+ pub const deleteFunction = LLVMDeleteFunction;
+ extern fn LLVMDeleteFunction(Fn: *const Value) void;
};
pub const Type = opaque {
@@ -148,12 +175,22 @@ pub const Type = opaque {
pub const constInt = LLVMConstInt;
extern fn LLVMConstInt(IntTy: *const Type, N: c_ulonglong, SignExtend: Bool) *const Value;
+ pub const constIntOfArbitraryPrecision = LLVMConstIntOfArbitraryPrecision;
+ extern fn LLVMConstIntOfArbitraryPrecision(IntTy: *const Type, NumWords: c_uint, Words: [*]const u64) *const Value;
+
pub const constReal = LLVMConstReal;
extern fn LLVMConstReal(RealTy: *const Type, N: f64) *const Value;
pub const constArray = LLVMConstArray;
extern fn LLVMConstArray(ElementTy: *const Type, ConstantVals: [*]*const Value, Length: c_uint) *const Value;
+ pub const constNamedStruct = LLVMConstNamedStruct;
+ extern fn LLVMConstNamedStruct(
+ StructTy: *const Type,
+ ConstantVals: [*]const *const Value,
+ Count: c_uint,
+ ) *const Value;
+
pub const getUndef = LLVMGetUndef;
extern fn LLVMGetUndef(Ty: *const Type) *const Value;
@@ -170,6 +207,9 @@ pub const Type = opaque {
ElementCount: c_uint,
Packed: Bool,
) void;
+
+ pub const getTypeKind = LLVMGetTypeKind;
+ extern fn LLVMGetTypeKind(Ty: *const Type) TypeKind;
};
pub const Module = opaque {
@@ -182,9 +222,15 @@ pub const Module = opaque {
pub const verify = LLVMVerifyModule;
extern fn LLVMVerifyModule(*const Module, Action: VerifierFailureAction, OutMessage: *[*:0]const u8) Bool;
+ pub const setModuleDataLayout = LLVMSetModuleDataLayout;
+ extern fn LLVMSetModuleDataLayout(*const Module, *const TargetData) void;
+
pub const addFunction = LLVMAddFunction;
extern fn LLVMAddFunction(*const Module, Name: [*:0]const u8, FunctionTy: *const Type) *const Value;
+ pub const addFunctionInAddressSpace = ZigLLVMAddFunctionInAddressSpace;
+ extern fn ZigLLVMAddFunctionInAddressSpace(*const Module, Name: [*:0]const u8, FunctionTy: *const Type, AddressSpace: c_uint) *const Value;
+
pub const getNamedFunction = LLVMGetNamedFunction;
extern fn LLVMGetNamedFunction(*const Module, Name: [*:0]const u8) ?*const Value;
@@ -197,6 +243,9 @@ pub const Module = opaque {
pub const addGlobal = LLVMAddGlobal;
extern fn LLVMAddGlobal(M: *const Module, Ty: *const Type, Name: [*:0]const u8) *const Value;
+ pub const addGlobalInAddressSpace = LLVMAddGlobalInAddressSpace;
+ extern fn LLVMAddGlobalInAddressSpace(M: *const Module, Ty: *const Type, Name: [*:0]const u8, AddressSpace: c_uint) *const Value;
+
pub const getNamedGlobal = LLVMGetNamedGlobal;
extern fn LLVMGetNamedGlobal(M: *const Module, Name: [*:0]const u8) ?*const Value;
@@ -268,7 +317,7 @@ extern fn LLVMGetInlineAsm(
pub const functionType = LLVMFunctionType;
extern fn LLVMFunctionType(
ReturnType: *const Type,
- ParamTypes: [*]*const Type,
+ ParamTypes: [*]const *const Type,
ParamCount: c_uint,
IsVarArg: Bool,
) *const Type;
@@ -314,7 +363,7 @@ pub const Builder = opaque {
extern fn LLVMBuildCall(
*const Builder,
Fn: *const Value,
- Args: [*]*const Value,
+ Args: [*]const *const Value,
NumArgs: c_uint,
Name: [*:0]const u8,
) *const Value;
@@ -365,6 +414,12 @@ pub const Builder = opaque {
pub const buildNUWAdd = LLVMBuildNUWAdd;
extern fn LLVMBuildNUWAdd(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+ pub const buildSAddSat = ZigLLVMBuildSAddSat;
+ extern fn ZigLLVMBuildSAddSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildUAddSat = ZigLLVMBuildUAddSat;
+ extern fn ZigLLVMBuildUAddSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
pub const buildFSub = LLVMBuildFSub;
extern fn LLVMBuildFSub(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
@@ -377,6 +432,12 @@ pub const Builder = opaque {
pub const buildNUWSub = LLVMBuildNUWSub;
extern fn LLVMBuildNUWSub(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+ pub const buildSSubSat = ZigLLVMBuildSSubSat;
+ extern fn ZigLLVMBuildSSubSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildUSubSat = ZigLLVMBuildUSubSat;
+ extern fn ZigLLVMBuildUSubSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
pub const buildFMul = LLVMBuildFMul;
extern fn LLVMBuildFMul(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
@@ -389,6 +450,12 @@ pub const Builder = opaque {
pub const buildNUWMul = LLVMBuildNUWMul;
extern fn LLVMBuildNUWMul(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+ pub const buildSMulFixSat = ZigLLVMBuildSMulFixSat;
+ extern fn ZigLLVMBuildSMulFixSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildUMulFixSat = ZigLLVMBuildUMulFixSat;
+ extern fn ZigLLVMBuildUMulFixSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
pub const buildUDiv = LLVMBuildUDiv;
extern fn LLVMBuildUDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
@@ -419,6 +486,18 @@ pub const Builder = opaque {
pub const buildShl = LLVMBuildShl;
extern fn LLVMBuildShl(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+ pub const buildNUWShl = ZigLLVMBuildNUWShl;
+ extern fn ZigLLVMBuildNUWShl(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildNSWShl = ZigLLVMBuildNSWShl;
+ extern fn ZigLLVMBuildNSWShl(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildSShlSat = ZigLLVMBuildSShlSat;
+ extern fn ZigLLVMBuildSShlSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildUShlSat = ZigLLVMBuildUShlSat;
+ extern fn ZigLLVMBuildUShlSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
pub const buildOr = LLVMBuildOr;
extern fn LLVMBuildOr(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
@@ -481,6 +560,14 @@ pub const Builder = opaque {
Name: [*:0]const u8,
) *const Value;
+ pub const buildIntToPtr = LLVMBuildIntToPtr;
+ extern fn LLVMBuildIntToPtr(
+ *const Builder,
+ Val: *const Value,
+ DestTy: *const Type,
+ Name: [*:0]const u8,
+ ) *const Value;
+
pub const buildStructGEP = LLVMBuildStructGEP;
extern fn LLVMBuildStructGEP(
B: *const Builder,
@@ -525,6 +612,93 @@ pub const Builder = opaque {
Else: *const Value,
Name: [*:0]const u8,
) *const Value;
+
+ pub const buildFence = LLVMBuildFence;
+ extern fn LLVMBuildFence(
+ B: *const Builder,
+ ordering: AtomicOrdering,
+ singleThread: Bool,
+ Name: [*:0]const u8,
+ ) *const Value;
+
+ pub const buildAtomicRmw = LLVMBuildAtomicRMW;
+ extern fn LLVMBuildAtomicRMW(
+ B: *const Builder,
+ op: AtomicRMWBinOp,
+ PTR: *const Value,
+ Val: *const Value,
+ ordering: AtomicOrdering,
+ singleThread: Bool,
+ ) *const Value;
+
+ pub const buildFPToUI = LLVMBuildFPToUI;
+ extern fn LLVMBuildFPToUI(
+ *const Builder,
+ Val: *const Value,
+ DestTy: *const Type,
+ Name: [*:0]const u8,
+ ) *const Value;
+
+ pub const buildFPToSI = LLVMBuildFPToSI;
+ extern fn LLVMBuildFPToSI(
+ *const Builder,
+ Val: *const Value,
+ DestTy: *const Type,
+ Name: [*:0]const u8,
+ ) *const Value;
+
+ pub const buildUIToFP = LLVMBuildUIToFP;
+ extern fn LLVMBuildUIToFP(
+ *const Builder,
+ Val: *const Value,
+ DestTy: *const Type,
+ Name: [*:0]const u8,
+ ) *const Value;
+
+ pub const buildSIToFP = LLVMBuildSIToFP;
+ extern fn LLVMBuildSIToFP(
+ *const Builder,
+ Val: *const Value,
+ DestTy: *const Type,
+ Name: [*:0]const u8,
+ ) *const Value;
+
+ pub const buildFPTrunc = LLVMBuildFPTrunc;
+ extern fn LLVMBuildFPTrunc(
+ *const Builder,
+ Val: *const Value,
+ DestTy: *const Type,
+ Name: [*:0]const u8,
+ ) *const Value;
+
+ pub const buildFPExt = LLVMBuildFPExt;
+ extern fn LLVMBuildFPExt(
+ *const Builder,
+ Val: *const Value,
+ DestTy: *const Type,
+ Name: [*:0]const u8,
+ ) *const Value;
+
+ pub const buildMemSet = ZigLLVMBuildMemSet;
+ extern fn ZigLLVMBuildMemSet(
+ B: *const Builder,
+ Ptr: *const Value,
+ Val: *const Value,
+ Len: *const Value,
+ Align: c_uint,
+ is_volatile: bool,
+ ) *const Value;
+
+ pub const buildMemCpy = ZigLLVMBuildMemCpy;
+ extern fn ZigLLVMBuildMemCpy(
+ B: *const Builder,
+ Dst: *const Value,
+ DstAlign: c_uint,
+ Src: *const Value,
+ SrcAlign: c_uint,
+ Size: *const Value,
+ is_volatile: bool,
+ ) *const Value;
};
pub const IntPredicate = enum(c_uint) {
@@ -598,6 +772,14 @@ pub const TargetMachine = opaque {
llvm_ir_filename: ?[*:0]const u8,
bitcode_filename: ?[*:0]const u8,
) bool;
+
+ pub const createTargetDataLayout = LLVMCreateTargetDataLayout;
+ extern fn LLVMCreateTargetDataLayout(*const TargetMachine) *const TargetData;
+};
+
+pub const TargetData = opaque {
+ pub const dispose = LLVMDisposeTargetData;
+ extern fn LLVMDisposeTargetData(*const TargetData) void;
};
pub const CodeModel = enum(c_int) {
@@ -915,3 +1097,151 @@ pub const AtomicOrdering = enum(c_uint) {
AcquireRelease = 6,
SequentiallyConsistent = 7,
};
+
+pub const AtomicRMWBinOp = enum(c_int) {
+ Xchg,
+ Add,
+ Sub,
+ And,
+ Nand,
+ Or,
+ Xor,
+ Max,
+ Min,
+ UMax,
+ UMin,
+ FAdd,
+ FSub,
+};
+
+pub const TypeKind = enum(c_int) {
+ Void,
+ Half,
+ Float,
+ Double,
+ X86_FP80,
+ FP128,
+ PPC_FP128,
+ Label,
+ Integer,
+ Function,
+ Struct,
+ Array,
+ Pointer,
+ Vector,
+ Metadata,
+ X86_MMX,
+ Token,
+ ScalableVector,
+ BFloat,
+ X86_AMX,
+};
+
+pub const CallConv = enum(c_uint) {
+ C = 0,
+ Fast = 8,
+ Cold = 9,
+ GHC = 10,
+ HiPE = 11,
+ WebKit_JS = 12,
+ AnyReg = 13,
+ PreserveMost = 14,
+ PreserveAll = 15,
+ Swift = 16,
+ CXX_FAST_TLS = 17,
+
+ X86_StdCall = 64,
+ X86_FastCall = 65,
+ ARM_APCS = 66,
+ ARM_AAPCS = 67,
+ ARM_AAPCS_VFP = 68,
+ MSP430_INTR = 69,
+ X86_ThisCall = 70,
+ PTX_Kernel = 71,
+ PTX_Device = 72,
+ SPIR_FUNC = 75,
+ SPIR_KERNEL = 76,
+ Intel_OCL_BI = 77,
+ X86_64_SysV = 78,
+ Win64 = 79,
+ X86_VectorCall = 80,
+ HHVM = 81,
+ HHVM_C = 82,
+ X86_INTR = 83,
+ AVR_INTR = 84,
+ AVR_SIGNAL = 85,
+ AVR_BUILTIN = 86,
+ AMDGPU_VS = 87,
+ AMDGPU_GS = 88,
+ AMDGPU_PS = 89,
+ AMDGPU_CS = 90,
+ AMDGPU_KERNEL = 91,
+ X86_RegCall = 92,
+ AMDGPU_HS = 93,
+ MSP430_BUILTIN = 94,
+ AMDGPU_LS = 95,
+ AMDGPU_ES = 96,
+ AArch64_VectorCall = 97,
+};
+
+pub const address_space = struct {
+ pub const default: c_uint = 0;
+
+ // See llvm/lib/Target/X86/X86.h
+ pub const x86_64 = x86;
+ pub const x86 = struct {
+ pub const gs: c_uint = 256;
+ pub const fs: c_uint = 257;
+ pub const ss: c_uint = 258;
+
+ pub const ptr32_sptr: c_uint = 270;
+ pub const ptr32_uptr: c_uint = 271;
+ pub const ptr64: c_uint = 272;
+ };
+
+ // See llvm/lib/Target/AVR/AVR.h
+ pub const avr = struct {
+ pub const data_memory: c_uint = 0;
+ pub const program_memory: c_uint = 1;
+ };
+
+ // See llvm/lib/Target/NVPTX/NVPTX.h
+ pub const nvptx = struct {
+ pub const generic: c_uint = 0;
+ pub const global: c_uint = 1;
+ pub const constant: c_uint = 2;
+ pub const shared: c_uint = 3;
+ pub const param: c_uint = 4;
+ pub const local: c_uint = 5;
+ };
+
+ // See llvm/lib/Target/AMDGPU/AMDGPU.h
+ pub const amdgpu = struct {
+ pub const flat: c_uint = 0;
+ pub const global: c_uint = 1;
+ pub const region: c_uint = 2;
+ pub const local: c_uint = 3;
+ pub const constant: c_uint = 4;
+ pub const private: c_uint = 5;
+ pub const constant_32bit: c_uint = 6;
+ pub const buffer_fat_pointer: c_uint = 7;
+ pub const param_d: c_uint = 6;
+ pub const param_i: c_uint = 7;
+ pub const constant_buffer_0: c_uint = 8;
+ pub const constant_buffer_1: c_uint = 9;
+ pub const constant_buffer_2: c_uint = 10;
+ pub const constant_buffer_3: c_uint = 11;
+ pub const constant_buffer_4: c_uint = 12;
+ pub const constant_buffer_5: c_uint = 13;
+ pub const constant_buffer_6: c_uint = 14;
+ pub const constant_buffer_7: c_uint = 15;
+ pub const constant_buffer_8: c_uint = 16;
+ pub const constant_buffer_9: c_uint = 17;
+ pub const constant_buffer_10: c_uint = 18;
+ pub const constant_buffer_11: c_uint = 19;
+ pub const constant_buffer_12: c_uint = 20;
+ pub const constant_buffer_13: c_uint = 21;
+ pub const constant_buffer_14: c_uint = 22;
+ pub const constant_buffer_15: c_uint = 23;
+ };
+};
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 7429e3c3b0..5826daa5a5 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -260,7 +260,7 @@ pub const DeclGen = struct {
};
}
- /// Generate the code for `decl`. If a reportable error occured during code generation,
+ /// Generate the code for `decl`. If a reportable error occurred during code generation,
/// a message is returned by this function. Callee owns the memory. If this function
/// returns such a reportable error, it is valid to be called again for a different decl.
pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
@@ -565,7 +565,7 @@ pub const DeclGen = struct {
}
},
// When recursively generating a type, we cannot infer the pointer's storage class. See genPointerType.
- .Pointer => return self.fail("Cannot create pointer with unkown storage class", .{}),
+ .Pointer => return self.fail("Cannot create pointer with unknown storage class", .{}),
.Vector => {
// Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations
// which work on them), so simply use those.
@@ -629,7 +629,7 @@ pub const DeclGen = struct {
const params = decl.ty.fnParamLen();
var i: usize = 0;
- try self.args.ensureCapacity(params);
+ try self.args.ensureTotalCapacity(params);
while (i < params) : (i += 1) {
const param_type_id = self.spv.types.get(decl.ty.fnParamType(i)).?;
const arg_result_id = self.spv.allocResultId();
diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig
index bb05567236..9bd80f7d84 100644
--- a/src/codegen/wasm.zig
+++ b/src/codegen/wasm.zig
@@ -1005,7 +1005,7 @@ pub const Context = struct {
const rhs = self.resolveInst(bin_op.rhs);
// it's possible for both lhs and/or rhs to return an offset as well,
- // in which case we return the first offset occurance we find.
+ // in which case we return the first offset occurrence we find.
const offset = blk: {
if (lhs == .code_offset) break :blk lhs.code_offset;
if (rhs == .code_offset) break :blk rhs.code_offset;
@@ -1031,7 +1031,7 @@ pub const Context = struct {
const rhs = self.resolveInst(bin_op.rhs);
// it's possible for both lhs and/or rhs to return an offset as well,
- // in which case we return the first offset occurance we find.
+ // in which case we return the first offset occurrence we find.
const offset = blk: {
if (lhs == .code_offset) break :blk lhs.code_offset;
if (rhs == .code_offset) break :blk rhs.code_offset;
@@ -1395,7 +1395,7 @@ pub const Context = struct {
}
// We map every block to its block index.
- // We then determine how far we have to jump to it by substracting it from current block depth
+ // We then determine how far we have to jump to it by subtracting it from current block depth
const idx: u32 = self.block_depth - self.blocks.get(br.block_inst).?;
const writer = self.code.writer();
try writer.writeByte(wasm.opcode(.br));
diff --git a/src/crash_report.zig b/src/crash_report.zig
new file mode 100644
index 0000000000..84f4b8db84
--- /dev/null
+++ b/src/crash_report.zig
@@ -0,0 +1,581 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const debug = std.debug;
+const os = std.os;
+const io = std.io;
+const print_zir = @import("print_zir.zig");
+
+const Module = @import("Module.zig");
+const Sema = @import("Sema.zig");
+const Zir = @import("Zir.zig");
+
+pub const is_enabled = builtin.mode == .Debug;
+
+/// To use these crash report diagnostics, publish these symbols in your main file.
+/// You will also need to call initialize() on startup, preferably as the very first operation in your program.
+pub const root_decls = struct {
+ pub const panic = if (is_enabled) compilerPanic else std.builtin.default_panic;
+ pub const enable_segfault_handler = if (is_enabled) false else debug.default_enable_segfault_handler;
+};
+
+/// Install signal handlers to identify crashes and report diagnostics.
+pub fn initialize() void {
+ if (is_enabled and debug.have_segfault_handling_support) {
+ attachSegfaultHandler();
+ }
+}
+
+fn En(comptime T: type) type {
+ return if (is_enabled) T else void;
+}
+
+fn en(val: anytype) En(@TypeOf(val)) {
+ return if (is_enabled) val else {};
+}
+
+pub const AnalyzeBody = struct {
+ parent: if (is_enabled) ?*AnalyzeBody else void,
+ sema: En(*Sema),
+ block: En(*Module.Scope.Block),
+ body: En([]const Zir.Inst.Index),
+ body_index: En(usize),
+
+ pub fn push(self: *@This()) void {
+ if (!is_enabled) return;
+ const head = &zir_state;
+ debug.assert(self.parent == null);
+ self.parent = head.*;
+ head.* = self;
+ }
+
+ pub fn pop(self: *@This()) void {
+ if (!is_enabled) return;
+ const head = &zir_state;
+ const old = head.*.?;
+ debug.assert(old == self);
+ head.* = old.parent;
+ }
+
+ pub fn setBodyIndex(self: *@This(), index: usize) void {
+ if (!is_enabled) return;
+ self.body_index = index;
+ }
+};
+
+threadlocal var zir_state: ?*AnalyzeBody = if (is_enabled) null else @compileError("Cannot use zir_state if crash_report is disabled.");
+
+pub fn prepAnalyzeBody(sema: *Sema, block: *Module.Scope.Block, body: []const Zir.Inst.Index) AnalyzeBody {
+ if (is_enabled) {
+ return .{
+ .parent = null,
+ .sema = sema,
+ .block = block,
+ .body = body,
+ .body_index = 0,
+ };
+ } else {
+ if (@sizeOf(AnalyzeBody) != 0)
+ @compileError("AnalyzeBody must have zero size when crash reports are disabled");
+ return undefined;
+ }
+}
+
+fn dumpStatusReport() !void {
+ const anal = zir_state orelse return;
+ // Note: We have the panic mutex here, so we can safely use the global crash heap.
+ var fba = std.heap.FixedBufferAllocator.init(&crash_heap);
+ const allocator = &fba.allocator;
+
+ const stderr = io.getStdErr().writer();
+ const block: *Scope.Block = anal.block;
+
+ try stderr.writeAll("Analyzing ");
+ try writeFullyQualifiedDeclWithFile(block.src_decl, stderr);
+ try stderr.writeAll("\n");
+
+ print_zir.renderInstructionContext(
+ allocator,
+ anal.body,
+ anal.body_index,
+ block.src_decl.getFileScope(),
+ block.src_decl.src_node,
+ 6, // indent
+ stderr,
+ ) catch |err| switch (err) {
+ error.OutOfMemory => try stderr.writeAll(" \n"),
+ else => |e| return e,
+ };
+ try stderr.writeAll(" For full context, use the command\n zig ast-check -t ");
+ try writeFilePath(block.src_decl.getFileScope(), stderr);
+ try stderr.writeAll("\n\n");
+
+ var parent = anal.parent;
+ while (parent) |curr| {
+ fba.reset();
+ try stderr.writeAll(" in ");
+ try writeFullyQualifiedDeclWithFile(curr.block.src_decl, stderr);
+ try stderr.writeAll("\n > ");
+ print_zir.renderSingleInstruction(
+ allocator,
+ curr.body[curr.body_index],
+ curr.block.src_decl.getFileScope(),
+ curr.block.src_decl.src_node,
+ 6, // indent
+ stderr,
+ ) catch |err| switch (err) {
+ error.OutOfMemory => try stderr.writeAll(" \n"),
+ else => |e| return e,
+ };
+ try stderr.writeAll("\n");
+
+ parent = curr.parent;
+ }
+
+ try stderr.writeAll("\n");
+}
+
+const Scope = Module.Scope;
+const Decl = Module.Decl;
+
+var crash_heap: [16 * 4096]u8 = undefined;
+
+fn writeFilePath(file: *Scope.File, stream: anytype) !void {
+ if (file.pkg.root_src_directory.path) |path| {
+ try stream.writeAll(path);
+ try stream.writeAll(std.fs.path.sep_str);
+ }
+ try stream.writeAll(file.sub_file_path);
+}
+
+fn writeFullyQualifiedDeclWithFile(decl: *Decl, stream: anytype) !void {
+ try writeFilePath(decl.getFileScope(), stream);
+ try stream.writeAll(": ");
+ try decl.namespace.renderFullyQualifiedName(std.mem.sliceTo(decl.name, 0), stream);
+}
+
+fn compilerPanic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn {
+ PanicSwitch.preDispatch();
+ @setCold(true);
+ const ret_addr = @returnAddress();
+ const stack_ctx: StackContext = .{ .current = .{ .ret_addr = ret_addr } };
+ PanicSwitch.dispatch(error_return_trace, stack_ctx, msg);
+}
+
+/// Attaches a global SIGSEGV handler
+pub fn attachSegfaultHandler() void {
+ if (!debug.have_segfault_handling_support) {
+ @compileError("segfault handler not supported for this target");
+ }
+ if (builtin.os.tag == .windows) {
+ _ = os.windows.kernel32.AddVectoredExceptionHandler(0, handleSegfaultWindows);
+ return;
+ }
+ var act = os.Sigaction{
+ .handler = .{ .sigaction = handleSegfaultLinux },
+ .mask = os.empty_sigset,
+ .flags = (os.SA.SIGINFO | os.SA.RESTART | os.SA.RESETHAND),
+ };
+
+ os.sigaction(os.SIG.SEGV, &act, null);
+ os.sigaction(os.SIG.ILL, &act, null);
+ os.sigaction(os.SIG.BUS, &act, null);
+}
+
+fn handleSegfaultLinux(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const c_void) callconv(.C) noreturn {
+ // TODO: use alarm() here to prevent infinite loops
+ PanicSwitch.preDispatch();
+
+ const addr = switch (builtin.os.tag) {
+ .linux => @ptrToInt(info.fields.sigfault.addr),
+ .freebsd => @ptrToInt(info.addr),
+ .netbsd => @ptrToInt(info.info.reason.fault.addr),
+ .openbsd => @ptrToInt(info.data.fault.addr),
+ .solaris => @ptrToInt(info.reason.fault.addr),
+ else => @compileError("TODO implement handleSegfaultLinux for new linux OS"),
+ };
+
+ var err_buffer: [128]u8 = undefined;
+ const error_msg = switch (sig) {
+ os.SIG.SEGV => std.fmt.bufPrint(&err_buffer, "Segmentation fault at address 0x{x}", .{addr}) catch "Segmentation fault",
+ os.SIG.ILL => std.fmt.bufPrint(&err_buffer, "Illegal instruction at address 0x{x}", .{addr}) catch "Illegal instruction",
+ os.SIG.BUS => std.fmt.bufPrint(&err_buffer, "Bus error at address 0x{x}", .{addr}) catch "Bus error",
+ else => std.fmt.bufPrint(&err_buffer, "Unknown error (signal {}) at address 0x{x}", .{ sig, addr }) catch "Unknown error",
+ };
+
+ const stack_ctx: StackContext = switch (builtin.cpu.arch) {
+ .i386 => ctx: {
+ const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+ const ip = @intCast(usize, ctx.mcontext.gregs[os.REG.EIP]);
+ const bp = @intCast(usize, ctx.mcontext.gregs[os.REG.EBP]);
+ break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
+ },
+ .x86_64 => ctx: {
+ const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+ const ip = switch (builtin.os.tag) {
+ .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]),
+ .freebsd => @intCast(usize, ctx.mcontext.rip),
+ .openbsd => @intCast(usize, ctx.sc_rip),
+ else => unreachable,
+ };
+ const bp = switch (builtin.os.tag) {
+ .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]),
+ .openbsd => @intCast(usize, ctx.sc_rbp),
+ .freebsd => @intCast(usize, ctx.mcontext.rbp),
+ else => unreachable,
+ };
+ break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
+ },
+ .arm => ctx: {
+ const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+ const ip = @intCast(usize, ctx.mcontext.arm_pc);
+ const bp = @intCast(usize, ctx.mcontext.arm_fp);
+ break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
+ },
+ .aarch64 => ctx: {
+ const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+ const ip = @intCast(usize, ctx.mcontext.pc);
+ // x29 is the ABI-designated frame pointer
+ const bp = @intCast(usize, ctx.mcontext.regs[29]);
+ break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
+ },
+ else => .not_supported,
+ };
+
+ PanicSwitch.dispatch(null, stack_ctx, error_msg);
+}
+
+const WindowsSegfaultMessage = union(enum) {
+ literal: []const u8,
+ segfault: void,
+ illegal_instruction: void,
+};
+
+fn handleSegfaultWindows(info: *os.windows.EXCEPTION_POINTERS) callconv(os.windows.WINAPI) c_long {
+ switch (info.ExceptionRecord.ExceptionCode) {
+ os.windows.EXCEPTION_DATATYPE_MISALIGNMENT => handleSegfaultWindowsExtra(info, .{ .literal = "Unaligned Memory Access" }),
+ os.windows.EXCEPTION_ACCESS_VIOLATION => handleSegfaultWindowsExtra(info, .segfault),
+ os.windows.EXCEPTION_ILLEGAL_INSTRUCTION => handleSegfaultWindowsExtra(info, .illegal_instruction),
+ os.windows.EXCEPTION_STACK_OVERFLOW => handleSegfaultWindowsExtra(info, .{ .literal = "Stack Overflow" }),
+ else => return os.windows.EXCEPTION_CONTINUE_SEARCH,
+ }
+}
+
+fn handleSegfaultWindowsExtra(info: *os.windows.EXCEPTION_POINTERS, comptime msg: WindowsSegfaultMessage) noreturn {
+ PanicSwitch.preDispatch();
+
+ const stack_ctx = if (@hasDecl(os.windows, "CONTEXT")) ctx: {
+ const regs = info.ContextRecord.getRegs();
+ break :ctx StackContext{ .exception = .{ .bp = regs.bp, .ip = regs.ip } };
+ } else ctx: {
+ const addr = @ptrToInt(info.ExceptionRecord.ExceptionAddress);
+ break :ctx StackContext{ .current = .{ .ret_addr = addr } };
+ };
+
+ switch (msg) {
+ .literal => |err| PanicSwitch.dispatch(null, stack_ctx, err),
+ .segfault => {
+ const format_item = "Segmentation fault at address 0x{x}";
+ var buf: [format_item.len + 32]u8 = undefined; // 32 is arbitrary, but sufficiently large
+ const to_print = std.fmt.bufPrint(&buf, format_item, .{info.ExceptionRecord.ExceptionInformation[1]}) catch unreachable;
+ PanicSwitch.dispatch(null, stack_ctx, to_print);
+ },
+ .illegal_instruction => {
+ const ip: ?usize = switch (stack_ctx) {
+ .exception => |ex| ex.ip,
+ .current => |cur| cur.ret_addr,
+ .not_supported => null,
+ };
+
+ if (ip) |addr| {
+ const format_item = "Illegal instruction at address 0x{x}";
+ var buf: [format_item.len + 32]u8 = undefined; // 32 is arbitrary, but sufficiently large
+ const to_print = std.fmt.bufPrint(&buf, format_item, .{addr}) catch unreachable;
+ PanicSwitch.dispatch(null, stack_ctx, to_print);
+ } else {
+ PanicSwitch.dispatch(null, stack_ctx, "Illegal Instruction");
+ }
+ },
+ }
+}
+
+const StackContext = union(enum) {
+ current: struct {
+ ret_addr: ?usize,
+ },
+ exception: struct {
+ bp: usize,
+ ip: usize,
+ },
+ not_supported: void,
+
+ pub fn dumpStackTrace(ctx: @This()) void {
+ switch (ctx) {
+ .current => |ct| {
+ debug.dumpCurrentStackTrace(ct.ret_addr);
+ },
+ .exception => |ex| {
+ debug.dumpStackTraceFromBase(ex.bp, ex.ip);
+ },
+ .not_supported => {
+ const stderr = io.getStdErr().writer();
+ stderr.writeAll("Stack trace not supported on this platform.\n") catch {};
+ },
+ }
+ }
+};
+
+const PanicSwitch = struct {
+ const RecoverStage = enum {
+ initialize,
+ report_stack,
+ release_mutex,
+ release_ref_count,
+ abort,
+ silent_abort,
+ };
+
+ const RecoverVerbosity = enum {
+ message_and_stack,
+ message_only,
+ silent,
+ };
+
+ const PanicState = struct {
+ recover_stage: RecoverStage = .initialize,
+ recover_verbosity: RecoverVerbosity = .message_and_stack,
+ panic_ctx: StackContext = undefined,
+ panic_trace: ?*const std.builtin.StackTrace = null,
+ awaiting_dispatch: bool = false,
+ };
+
+ /// Counter for the number of threads currently panicking.
+ /// Updated atomically before taking the panic_mutex.
+ /// In recoverable cases, the program will not abort
+ /// until all panicking threads have dumped their traces.
+ var panicking: u8 = 0;
+
+ // Locked to avoid interleaving panic messages from multiple threads.
+ var panic_mutex = std.Thread.Mutex{};
+
+ /// Tracks the state of the current panic. If the code within the
+ /// panic triggers a secondary panic, this allows us to recover.
+ threadlocal var panic_state_raw: PanicState = .{};
+
+ /// The segfault handlers above need to do some work before they can dispatch
+ /// this switch. Calling preDispatch() first makes that work fault tolerant.
+ pub fn preDispatch() void {
+ // TODO: We want segfaults to trigger the panic recursively here,
+ // but if there is a segfault accessing this TLS slot it will cause an
+ // infinite loop. We should use `alarm()` to prevent the infinite
+ // loop and maybe also use a non-thread-local global to detect if
+ // it's happening and print a message.
+ var panic_state: *volatile PanicState = &panic_state_raw;
+ if (panic_state.awaiting_dispatch) {
+ dispatch(null, .{ .current = .{ .ret_addr = null } }, "Panic while preparing callstack");
+ }
+ panic_state.awaiting_dispatch = true;
+ }
+
+ /// This is the entry point to a panic-tolerant panic handler.
+ /// preDispatch() *MUST* be called exactly once before calling this.
+ /// A threadlocal "recover_stage" is updated throughout the process.
+ /// If a panic happens during the panic, the recover_stage will be
+ /// used to select a recover* function to call to resume the panic.
+ /// The recover_verbosity field is used to handle panics while reporting
+ /// panics within panics. If the panic handler triggers a panic, it will
+ /// attempt to log an additional stack trace for the secondary panic. If
+ /// that panics, it will fall back to just logging the panic message. If
+ /// it can't even do that witout panicing, it will recover without logging
+ /// anything about the internal panic. Depending on the state, "recover"
+ /// here may just mean "call abort".
+ pub fn dispatch(
+ trace: ?*const std.builtin.StackTrace,
+ stack_ctx: StackContext,
+ msg: []const u8,
+ ) noreturn {
+ var panic_state: *volatile PanicState = &panic_state_raw;
+ debug.assert(panic_state.awaiting_dispatch);
+ panic_state.awaiting_dispatch = false;
+ nosuspend switch (panic_state.recover_stage) {
+ .initialize => goTo(initPanic, .{ panic_state, trace, stack_ctx, msg }),
+ .report_stack => goTo(recoverReportStack, .{ panic_state, trace, stack_ctx, msg }),
+ .release_mutex => goTo(recoverReleaseMutex, .{ panic_state, trace, stack_ctx, msg }),
+ .release_ref_count => goTo(recoverReleaseRefCount, .{ panic_state, trace, stack_ctx, msg }),
+ .abort => goTo(recoverAbort, .{ panic_state, trace, stack_ctx, msg }),
+ .silent_abort => goTo(abort, .{}),
+ };
+ }
+
+ noinline fn initPanic(
+ state: *volatile PanicState,
+ trace: ?*const std.builtin.StackTrace,
+ stack: StackContext,
+ msg: []const u8,
+ ) noreturn {
+ // use a temporary so there's only one volatile store
+ const new_state = PanicState{
+ .recover_stage = .abort,
+ .panic_ctx = stack,
+ .panic_trace = trace,
+ };
+ state.* = new_state;
+
+ _ = @atomicRmw(u8, &panicking, .Add, 1, .SeqCst);
+
+ state.recover_stage = .release_ref_count;
+
+ _ = panic_mutex.acquire();
+
+ state.recover_stage = .release_mutex;
+
+ const stderr = io.getStdErr().writer();
+ if (builtin.single_threaded) {
+ stderr.print("panic: ", .{}) catch goTo(releaseMutex, .{state});
+ } else {
+ const current_thread_id = std.Thread.getCurrentId();
+ stderr.print("thread {} panic: ", .{current_thread_id}) catch goTo(releaseMutex, .{state});
+ }
+ stderr.print("{s}\n", .{msg}) catch goTo(releaseMutex, .{state});
+
+ state.recover_stage = .report_stack;
+
+ dumpStatusReport() catch |err| {
+ stderr.print("\nIntercepted error.{} while dumping current state. Continuing...\n", .{err}) catch {};
+ };
+
+ goTo(reportStack, .{state});
+ }
+
+ noinline fn recoverReportStack(
+ state: *volatile PanicState,
+ trace: ?*const std.builtin.StackTrace,
+ stack: StackContext,
+ msg: []const u8,
+ ) noreturn {
+ recover(state, trace, stack, msg);
+
+ state.recover_stage = .release_mutex;
+ const stderr = io.getStdErr().writer();
+ stderr.writeAll("\nOriginal Error:\n") catch {};
+ goTo(reportStack, .{state});
+ }
+
+ noinline fn reportStack(state: *volatile PanicState) noreturn {
+ state.recover_stage = .release_mutex;
+
+ if (state.panic_trace) |t| {
+ debug.dumpStackTrace(t.*);
+ }
+ state.panic_ctx.dumpStackTrace();
+
+ goTo(releaseMutex, .{state});
+ }
+
+ noinline fn recoverReleaseMutex(
+ state: *volatile PanicState,
+ trace: ?*const std.builtin.StackTrace,
+ stack: StackContext,
+ msg: []const u8,
+ ) noreturn {
+ recover(state, trace, stack, msg);
+ goTo(releaseMutex, .{state});
+ }
+
+ noinline fn releaseMutex(state: *volatile PanicState) noreturn {
+ state.recover_stage = .abort;
+
+ panic_mutex.releaseDirect();
+
+ goTo(releaseRefCount, .{state});
+ }
+
+ noinline fn recoverReleaseRefCount(
+ state: *volatile PanicState,
+ trace: ?*const std.builtin.StackTrace,
+ stack: StackContext,
+ msg: []const u8,
+ ) noreturn {
+ recover(state, trace, stack, msg);
+ goTo(releaseRefCount, .{state});
+ }
+
+ noinline fn releaseRefCount(state: *volatile PanicState) noreturn {
+ state.recover_stage = .abort;
+
+ if (@atomicRmw(u8, &panicking, .Sub, 1, .SeqCst) != 1) {
+ // Another thread is panicking, wait for the last one to finish
+ // and call abort()
+
+ // Sleep forever without hammering the CPU
+ var event: std.Thread.StaticResetEvent = .{};
+ event.wait();
+ // This should be unreachable, recurse into recoverAbort.
+ @panic("event.wait() returned");
+ }
+
+ goTo(abort, .{});
+ }
+
+ noinline fn recoverAbort(
+ state: *volatile PanicState,
+ trace: ?*const std.builtin.StackTrace,
+ stack: StackContext,
+ msg: []const u8,
+ ) noreturn {
+ recover(state, trace, stack, msg);
+
+ state.recover_stage = .silent_abort;
+ const stderr = io.getStdErr().writer();
+ stderr.writeAll("Aborting...\n") catch {};
+ goTo(abort, .{});
+ }
+
+ noinline fn abort() noreturn {
+ os.abort();
+ }
+
+ inline fn goTo(comptime func: anytype, args: anytype) noreturn {
+ // TODO: Tailcall is broken right now, but eventually this should be used
+ // to avoid blowing up the stack. It's ok for now though, there are no
+ // cycles in the state machine so the max stack usage is bounded.
+ //@call(.{.modifier = .always_tail}, func, args);
+ @call(.{}, func, args);
+ }
+
+ fn recover(
+ state: *volatile PanicState,
+ trace: ?*const std.builtin.StackTrace,
+ stack: StackContext,
+ msg: []const u8,
+ ) void {
+ switch (state.recover_verbosity) {
+ .message_and_stack => {
+ // lower the verbosity, and restore it at the end if we don't panic.
+ state.recover_verbosity = .message_only;
+
+ const stderr = io.getStdErr().writer();
+ stderr.writeAll("\nPanicked during a panic: ") catch {};
+ stderr.writeAll(msg) catch {};
+ stderr.writeAll("\nInner panic stack:\n") catch {};
+ if (trace) |t| {
+ debug.dumpStackTrace(t.*);
+ }
+ stack.dumpStackTrace();
+
+ state.recover_verbosity = .message_and_stack;
+ },
+ .message_only => {
+ state.recover_verbosity = .silent;
+
+ const stderr = io.getStdErr().writer();
+ stderr.writeAll("\nPanicked while dumping inner panic stack: ") catch {};
+ stderr.writeAll(msg) catch {};
+ stderr.writeAll("\n") catch {};
+
+ // If we succeed, restore all the way to dumping the stack.
+ state.recover_verbosity = .message_and_stack;
+ },
+ .silent => {},
+ }
+ }
+};
diff --git a/src/libc_installation.zig b/src/libc_installation.zig
index 62174930f8..d54af71415 100644
--- a/src/libc_installation.zig
+++ b/src/libc_installation.zig
@@ -221,6 +221,7 @@ pub const LibCInstallation = struct {
batch.add(&async self.findNativeIncludeDirPosix(args));
switch (Target.current.os.tag) {
.freebsd, .netbsd, .openbsd, .dragonfly => self.crt_dir = try std.mem.dupeZ(args.allocator, u8, "/usr/lib"),
+ .solaris => self.crt_dir = try std.mem.dupeZ(args.allocator, u8, "/usr/lib/64"),
.linux => batch.add(&async self.findNativeCrtDirPosix(args)),
else => {},
}
diff --git a/src/libcxx.zig b/src/libcxx.zig
index bd7da15fc1..faebca0922 100644
--- a/src/libcxx.zig
+++ b/src/libcxx.zig
@@ -110,7 +110,7 @@ pub fn buildLibCXX(comp: *Compilation) !void {
const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" });
const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" });
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
- try c_source_files.ensureCapacity(libcxx_files.len);
+ try c_source_files.ensureTotalCapacity(libcxx_files.len);
for (libcxx_files) |cxx_src| {
var cflags = std.ArrayList([]const u8).init(arena);
@@ -250,7 +250,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" });
const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" });
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
- try c_source_files.ensureCapacity(libcxxabi_files.len);
+ try c_source_files.ensureTotalCapacity(libcxxabi_files.len);
for (libcxxabi_files) |cxxabi_src| {
var cflags = std.ArrayList([]const u8).init(arena);
diff --git a/src/libtsan.zig b/src/libtsan.zig
index d02b3f7520..57f1f8c78e 100644
--- a/src/libtsan.zig
+++ b/src/libtsan.zig
@@ -34,7 +34,7 @@ pub fn buildTsan(comp: *Compilation) !void {
};
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
- try c_source_files.ensureCapacity(c_source_files.items.len + tsan_sources.len);
+ try c_source_files.ensureUnusedCapacity(tsan_sources.len);
const tsan_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{"tsan"});
for (tsan_sources) |tsan_src| {
@@ -58,7 +58,7 @@ pub fn buildTsan(comp: *Compilation) !void {
&darwin_tsan_sources
else
&unix_tsan_sources;
- try c_source_files.ensureCapacity(c_source_files.items.len + platform_tsan_sources.len);
+ try c_source_files.ensureUnusedCapacity(platform_tsan_sources.len);
for (platform_tsan_sources) |tsan_src| {
var cflags = std.ArrayList([]const u8).init(arena);
@@ -96,7 +96,7 @@ pub fn buildTsan(comp: *Compilation) !void {
});
}
- try c_source_files.ensureCapacity(c_source_files.items.len + sanitizer_common_sources.len);
+ try c_source_files.ensureUnusedCapacity(sanitizer_common_sources.len);
const sanitizer_common_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"tsan", "sanitizer_common",
});
@@ -123,7 +123,7 @@ pub fn buildTsan(comp: *Compilation) !void {
&sanitizer_libcdep_sources
else
&sanitizer_nolibc_sources;
- try c_source_files.ensureCapacity(c_source_files.items.len + to_c_or_not_to_c_sources.len);
+ try c_source_files.ensureUnusedCapacity(to_c_or_not_to_c_sources.len);
for (to_c_or_not_to_c_sources) |c_src| {
var cflags = std.ArrayList([]const u8).init(arena);
@@ -143,7 +143,7 @@ pub fn buildTsan(comp: *Compilation) !void {
});
}
- try c_source_files.ensureCapacity(c_source_files.items.len + sanitizer_symbolizer_sources.len);
+ try c_source_files.ensureUnusedCapacity(sanitizer_symbolizer_sources.len);
for (sanitizer_symbolizer_sources) |c_src| {
var cflags = std.ArrayList([]const u8).init(arena);
@@ -168,7 +168,7 @@ pub fn buildTsan(comp: *Compilation) !void {
&[_][]const u8{"interception"},
);
- try c_source_files.ensureCapacity(c_source_files.items.len + interception_sources.len);
+ try c_source_files.ensureUnusedCapacity(interception_sources.len);
for (interception_sources) |c_src| {
var cflags = std.ArrayList([]const u8).init(arena);
diff --git a/src/link.zig b/src/link.zig
index 88159496f4..fe233e060f 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -149,7 +149,7 @@ pub const File = struct {
coff: Coff.TextBlock,
macho: MachO.TextBlock,
plan9: Plan9.DeclBlock,
- c: C.DeclBlock,
+ c: void,
wasm: Wasm.DeclBlock,
spirv: void,
};
@@ -159,7 +159,7 @@ pub const File = struct {
coff: Coff.SrcFn,
macho: MachO.SrcFn,
plan9: void,
- c: C.FnBlock,
+ c: void,
wasm: Wasm.FnData,
spirv: SpirV.FnData,
};
@@ -245,6 +245,9 @@ pub const File = struct {
};
if (use_lld) {
+ // TODO this intermediary_basename isn't enough; in the case of `zig build-exe`,
+ // we also want to put the intermediary object file in the cache while the
+ // main emit directory is the cwd.
file.intermediary_basename = sub_path;
}
@@ -372,16 +375,18 @@ pub const File = struct {
/// Must be called before any call to updateDecl or updateDeclExports for
/// any given Decl.
+ /// TODO we're transitioning to deleting this function and instead having
+ /// each linker backend notice the first time updateDecl or updateFunc is called, or
+ /// a callee referenced from AIR.
pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void {
log.debug("allocateDeclIndexes {*} ({s})", .{ decl, decl.name });
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl),
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl),
- .c => return @fieldParentPtr(C, "base", base).allocateDeclIndexes(decl),
.wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl),
.plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl),
- .spirv => {},
+ .c, .spirv => {},
}
}
@@ -635,7 +640,7 @@ pub const File = struct {
var object_files = std.ArrayList([*:0]const u8).init(base.allocator);
defer object_files.deinit();
- try object_files.ensureCapacity(base.options.objects.len + comp.c_object_table.count() + 2);
+ try object_files.ensureTotalCapacity(base.options.objects.len + comp.c_object_table.count() + 2);
for (base.options.objects) |obj_path| {
object_files.appendAssumeCapacity(try arena.dupeZ(u8, obj_path));
}
diff --git a/src/link/C.zig b/src/link/C.zig
index 09f789f7d1..8689a6859a 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -21,30 +21,34 @@ base: link.File,
/// This linker backend does not try to incrementally link output C source code.
/// Instead, it tracks all declarations in this table, and iterates over it
/// in the flush function, stitching pre-rendered pieces of C code together.
-decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{},
+decl_table: std.AutoArrayHashMapUnmanaged(*const Module.Decl, DeclBlock) = .{},
+/// Stores Type/Value data for `typedefs` to reference.
+/// Accumulates allocations and then there is a periodic garbage collection after flush().
+arena: std.heap.ArenaAllocator,
/// Per-declaration data. For functions this is the body, and
/// the forward declaration is stored in the FnBlock.
-pub const DeclBlock = struct {
- code: std.ArrayListUnmanaged(u8),
+const DeclBlock = struct {
+ code: std.ArrayListUnmanaged(u8) = .{},
+ fwd_decl: std.ArrayListUnmanaged(u8) = .{},
+ /// Each Decl stores a mapping of Zig Types to corresponding C types, for every
+ /// Zig Type used by the Decl. In flush(), we iterate over each Decl
+ /// and emit the typedef code for all types, making sure to not emit the same thing twice.
+ /// Any arena memory the Type points to lives in the `arena` field of `C`.
+ typedefs: codegen.TypedefMap.Unmanaged = .{},
- pub const empty: DeclBlock = .{
- .code = .{},
- };
+ fn deinit(db: *DeclBlock, gpa: *Allocator) void {
+ db.code.deinit(gpa);
+ db.fwd_decl.deinit(gpa);
+ for (db.typedefs.values()) |typedef| {
+ gpa.free(typedef.rendered);
+ }
+ db.typedefs.deinit(gpa);
+ db.* = undefined;
+ }
};
-/// Per-function data.
-pub const FnBlock = struct {
- fwd_decl: std.ArrayListUnmanaged(u8),
- typedefs: codegen.TypedefMap.Unmanaged,
-
- pub const empty: FnBlock = .{
- .fwd_decl = .{},
- .typedefs = .{},
- };
-};
-
-pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*C {
+pub fn openPath(gpa: *Allocator, sub_path: []const u8, options: link.Options) !*C {
assert(options.object_format == .c);
if (options.use_llvm) return error.LLVMHasNoCBackend;
@@ -57,15 +61,16 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
});
errdefer file.close();
- var c_file = try allocator.create(C);
- errdefer allocator.destroy(c_file);
+ var c_file = try gpa.create(C);
+ errdefer gpa.destroy(c_file);
c_file.* = C{
+ .arena = std.heap.ArenaAllocator.init(gpa),
.base = .{
.tag = .c,
.options = options,
.file = file,
- .allocator = allocator,
+ .allocator = gpa,
},
};
@@ -73,38 +78,105 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
}
pub fn deinit(self: *C) void {
- for (self.decl_table.keys()) |key| {
- deinitDecl(self.base.allocator, key);
- }
- self.decl_table.deinit(self.base.allocator);
-}
+ const gpa = self.base.allocator;
-pub fn allocateDeclIndexes(self: *C, decl: *Module.Decl) !void {
- _ = self;
- _ = decl;
+ for (self.decl_table.values()) |*db| {
+ db.deinit(gpa);
+ }
+ self.decl_table.deinit(gpa);
+
+ self.arena.deinit();
}
pub fn freeDecl(self: *C, decl: *Module.Decl) void {
- _ = self.decl_table.swapRemove(decl);
- deinitDecl(self.base.allocator, decl);
-}
-
-fn deinitDecl(gpa: *Allocator, decl: *Module.Decl) void {
- decl.link.c.code.deinit(gpa);
- decl.fn_link.c.fwd_decl.deinit(gpa);
- for (decl.fn_link.c.typedefs.values()) |value| {
- gpa.free(value.rendered);
+ const gpa = self.base.allocator;
+ if (self.decl_table.fetchSwapRemove(decl)) |*kv| {
+ kv.value.deinit(gpa);
}
- decl.fn_link.c.typedefs.deinit(gpa);
}
-pub fn finishUpdateDecl(self: *C, module: *Module, decl: *Module.Decl, air: Air, liveness: Liveness) !void {
- // Keep track of all decls so we can iterate over them on flush().
- _ = try self.decl_table.getOrPut(self.base.allocator, decl);
+pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
- const fwd_decl = &decl.fn_link.c.fwd_decl;
- const typedefs = &decl.fn_link.c.typedefs;
- const code = &decl.link.c.code;
+ const decl = func.owner_decl;
+ const gop = try self.decl_table.getOrPut(self.base.allocator, decl);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{};
+ }
+ const fwd_decl = &gop.value_ptr.fwd_decl;
+ const typedefs = &gop.value_ptr.typedefs;
+ const code = &gop.value_ptr.code;
+ fwd_decl.shrinkRetainingCapacity(0);
+ {
+ for (typedefs.values()) |value| {
+ module.gpa.free(value.rendered);
+ }
+ }
+ typedefs.clearRetainingCapacity();
+ code.shrinkRetainingCapacity(0);
+
+ var function: codegen.Function = .{
+ .value_map = codegen.CValueMap.init(module.gpa),
+ .air = air,
+ .liveness = liveness,
+ .func = func,
+ .object = .{
+ .dg = .{
+ .gpa = module.gpa,
+ .module = module,
+ .error_msg = null,
+ .decl = decl,
+ .fwd_decl = fwd_decl.toManaged(module.gpa),
+ .typedefs = typedefs.promote(module.gpa),
+ .typedefs_arena = &self.arena.allocator,
+ },
+ .code = code.toManaged(module.gpa),
+ .indent_writer = undefined, // set later so we can get a pointer to object.code
+ },
+ };
+
+ function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() };
+ defer {
+ function.value_map.deinit();
+ function.blocks.deinit(module.gpa);
+ function.object.code.deinit();
+ function.object.dg.fwd_decl.deinit();
+ for (function.object.dg.typedefs.values()) |value| {
+ module.gpa.free(value.rendered);
+ }
+ function.object.dg.typedefs.deinit();
+ }
+
+ codegen.genFunc(&function) catch |err| switch (err) {
+ error.AnalysisFail => {
+ try module.failed_decls.put(module.gpa, decl, function.object.dg.error_msg.?);
+ return;
+ },
+ else => |e| return e,
+ };
+
+ fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged();
+ typedefs.* = function.object.dg.typedefs.unmanaged;
+ function.object.dg.typedefs.unmanaged = .{};
+ code.* = function.object.code.moveToUnmanaged();
+
+ // Free excess allocated memory for this Decl.
+ fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len);
+ code.shrinkAndFree(module.gpa, code.items.len);
+}
+
+pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const gop = try self.decl_table.getOrPut(self.base.allocator, decl);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{};
+ }
+ const fwd_decl = &gop.value_ptr.fwd_decl;
+ const typedefs = &gop.value_ptr.typedefs;
+ const code = &gop.value_ptr.code;
fwd_decl.shrinkRetainingCapacity(0);
{
for (typedefs.values()) |value| {
@@ -116,23 +188,19 @@ pub fn finishUpdateDecl(self: *C, module: *Module, decl: *Module.Decl, air: Air,
var object: codegen.Object = .{
.dg = .{
+ .gpa = module.gpa,
.module = module,
.error_msg = null,
.decl = decl,
.fwd_decl = fwd_decl.toManaged(module.gpa),
.typedefs = typedefs.promote(module.gpa),
+ .typedefs_arena = &self.arena.allocator,
},
- .gpa = module.gpa,
.code = code.toManaged(module.gpa),
- .value_map = codegen.CValueMap.init(module.gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
- .air = air,
- .liveness = liveness,
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer {
- object.value_map.deinit();
- object.blocks.deinit(module.gpa);
object.code.deinit();
object.dg.fwd_decl.deinit();
for (object.dg.typedefs.values()) |value| {
@@ -159,24 +227,12 @@ pub fn finishUpdateDecl(self: *C, module: *Module, decl: *Module.Decl, air: Air,
code.shrinkAndFree(module.gpa, code.items.len);
}
-pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
- const tracy = trace(@src());
- defer tracy.end();
-
- return self.finishUpdateDecl(module, func.owner_decl, air, liveness);
-}
-
-pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
- const tracy = trace(@src());
- defer tracy.end();
-
- return self.finishUpdateDecl(module, decl, undefined, undefined);
-}
-
pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void {
// The C backend does not have the ability to fix line numbers without re-generating
// the entire Decl.
- return self.updateDecl(module, decl);
+ _ = self;
+ _ = module;
+ _ = decl;
}
pub fn flush(self: *C, comp: *Compilation) !void {
@@ -197,7 +253,7 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
defer all_buffers.deinit();
// This is at least enough until we get to the function bodies without error handling.
- try all_buffers.ensureCapacity(self.decl_table.count() + 2);
+ try all_buffers.ensureTotalCapacity(self.decl_table.count() + 2);
var file_size: u64 = zig_h.len;
all_buffers.appendAssumeCapacity(.{
@@ -223,32 +279,42 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
var typedefs = std.HashMap(Type, void, Type.HashContext64, std.hash_map.default_max_load_percentage).init(comp.gpa);
defer typedefs.deinit();
- // Typedefs, forward decls and non-functions first.
+ // Typedefs, forward decls, and non-functions first.
// TODO: performance investigation: would keeping a list of Decls that we should
// generate, rather than querying here, be faster?
- for (self.decl_table.keys()) |decl| {
- if (!decl.has_tv) continue;
- const buf = buf: {
- if (decl.val.castTag(.function)) |_| {
- try typedefs.ensureUnusedCapacity(@intCast(u32, decl.fn_link.c.typedefs.count()));
- var it = decl.fn_link.c.typedefs.iterator();
- while (it.next()) |new| {
- const gop = typedefs.getOrPutAssumeCapacity(new.key_ptr.*);
- if (!gop.found_existing) {
- try err_typedef_writer.writeAll(new.value_ptr.rendered);
- }
+ const decl_keys = self.decl_table.keys();
+ const decl_values = self.decl_table.values();
+ for (decl_keys) |decl, i| {
+ if (!decl.has_tv) continue; // TODO do we really need this branch?
+
+ const decl_block = &decl_values[i];
+
+ if (decl_block.fwd_decl.items.len != 0) {
+ try typedefs.ensureUnusedCapacity(@intCast(u32, decl_block.typedefs.count()));
+ var it = decl_block.typedefs.iterator();
+ while (it.next()) |new| {
+ const gop = typedefs.getOrPutAssumeCapacity(new.key_ptr.*);
+ if (!gop.found_existing) {
+ try err_typedef_writer.writeAll(new.value_ptr.rendered);
}
- fn_count += 1;
- break :buf decl.fn_link.c.fwd_decl.items;
- } else {
- break :buf decl.link.c.code.items;
}
- };
- all_buffers.appendAssumeCapacity(.{
- .iov_base = buf.ptr,
- .iov_len = buf.len,
- });
- file_size += buf.len;
+ const buf = decl_block.fwd_decl.items;
+ all_buffers.appendAssumeCapacity(.{
+ .iov_base = buf.ptr,
+ .iov_len = buf.len,
+ });
+ file_size += buf.len;
+ }
+ if (decl.getFunction() != null) {
+ fn_count += 1;
+ } else if (decl_block.code.items.len != 0) {
+ const buf = decl_block.code.items;
+ all_buffers.appendAssumeCapacity(.{
+ .iov_base = buf.ptr,
+ .iov_len = buf.len,
+ });
+ file_size += buf.len;
+ }
}
err_typedef_item.* = .{
@@ -258,16 +324,18 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
file_size += err_typedef_buf.items.len;
// Now the function bodies.
- try all_buffers.ensureCapacity(all_buffers.items.len + fn_count);
- for (self.decl_table.keys()) |decl| {
- if (!decl.has_tv) continue;
- if (decl.val.castTag(.function)) |_| {
- const buf = decl.link.c.code.items;
- all_buffers.appendAssumeCapacity(.{
- .iov_base = buf.ptr,
- .iov_len = buf.len,
- });
- file_size += buf.len;
+ try all_buffers.ensureUnusedCapacity(fn_count);
+ for (decl_keys) |decl, i| {
+ if (decl.getFunction() != null) {
+ const decl_block = &decl_values[i];
+ const buf = decl_block.code.items;
+ if (buf.len != 0) {
+ all_buffers.appendAssumeCapacity(.{
+ .iov_base = buf.ptr,
+ .iov_len = buf.len,
+ });
+ file_size += buf.len;
+ }
}
}
@@ -286,7 +354,7 @@ pub fn flushEmitH(module: *Module) !void {
var all_buffers = std.ArrayList(std.os.iovec_const).init(module.gpa);
defer all_buffers.deinit();
- try all_buffers.ensureCapacity(emit_h.decl_table.count() + 1);
+ try all_buffers.ensureTotalCapacity(emit_h.decl_table.count() + 1);
var file_size: u64 = zig_h.len;
all_buffers.appendAssumeCapacity(.{
diff --git a/src/link/C/zig.h b/src/link/C/zig.h
index f3fb02b840..72868e4400 100644
--- a/src/link/C/zig.h
+++ b/src/link/C/zig.h
@@ -62,14 +62,62 @@
#if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
#include
-#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) atomic_compare_exchange_strong_explicit(obj, expected, desired, succ, fail)
-#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) atomic_compare_exchange_weak_explicit(obj, expected, desired, succ, fail)
+#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) atomic_compare_exchange_strong_explicit(obj, &(expected), desired, succ, fail)
+#define zig_cmpxchg_weak (obj, expected, desired, succ, fail) atomic_compare_exchange_weak_explicit (obj, &(expected), desired, succ, fail)
+#define zig_atomicrmw_xchg(obj, arg, order) atomic_exchange_explicit (obj, arg, order)
+#define zig_atomicrmw_add (obj, arg, order) atomic_fetch_add_explicit (obj, arg, order)
+#define zig_atomicrmw_sub (obj, arg, order) atomic_fetch_sub_explicit (obj, arg, order)
+#define zig_atomicrmw_or (obj, arg, order) atomic_fetch_or_explicit (obj, arg, order)
+#define zig_atomicrmw_xor (obj, arg, order) atomic_fetch_xor_explicit (obj, arg, order)
+#define zig_atomicrmw_and (obj, arg, order) atomic_fetch_and_explicit (obj, arg, order)
+#define zig_atomicrmw_nand(obj, arg, order) atomic_fetch_nand_explicit(obj, arg, order)
+#define zig_atomicrmw_min (obj, arg, order) atomic_fetch_min_explicit (obj, arg, order)
+#define zig_atomicrmw_max (obj, arg, order) atomic_fetch_max_explicit (obj, arg, order)
+#define zig_atomic_store (obj, arg, order) atomic_store_explicit (obj, arg, order)
+#define zig_atomic_load (obj, order) atomic_load_explicit (obj, order)
+#define zig_fence(order) atomic_thread_fence(order)
#elif __GNUC__
-#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) __sync_val_compare_and_swap(obj, expected, desired)
-#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) __sync_val_compare_and_swap(obj, expected, desired)
+#define memory_order_relaxed __ATOMIC_RELAXED
+#define memory_order_consume __ATOMIC_CONSUME
+#define memory_order_acquire __ATOMIC_ACQUIRE
+#define memory_order_release __ATOMIC_RELEASE
+#define memory_order_acq_rel __ATOMIC_ACQ_REL
+#define memory_order_seq_cst __ATOMIC_SEQ_CST
+#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail)
+#define zig_cmpxchg_weak (obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail)
+#define zig_atomicrmw_xchg(obj, arg, order) __atomic_exchange_n(obj, arg, order)
+#define zig_atomicrmw_add (obj, arg, order) __atomic_fetch_add (obj, arg, order)
+#define zig_atomicrmw_sub (obj, arg, order) __atomic_fetch_sub (obj, arg, order)
+#define zig_atomicrmw_or (obj, arg, order) __atomic_fetch_or (obj, arg, order)
+#define zig_atomicrmw_xor (obj, arg, order) __atomic_fetch_xor (obj, arg, order)
+#define zig_atomicrmw_and (obj, arg, order) __atomic_fetch_and (obj, arg, order)
+#define zig_atomicrmw_nand(obj, arg, order) __atomic_fetch_nand(obj, arg, order)
+#define zig_atomicrmw_min (obj, arg, order) __atomic_fetch_min (obj, arg, order)
+#define zig_atomicrmw_max (obj, arg, order) __atomic_fetch_max (obj, arg, order)
+#define zig_atomic_store (obj, arg, order) __atomic_store (obj, arg, order)
+#define zig_atomic_load (obj, order) __atomic_load (obj, order)
+#define zig_fence(order) __atomic_thread_fence(order)
#else
+#define memory_order_relaxed 0
+#define memory_order_consume 1
+#define memory_order_acquire 2
+#define memory_order_release 3
+#define memory_order_acq_rel 4
+#define memory_order_seq_cst 5
#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) zig_unimplemented()
-#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) zig_unimplemented()
+#define zig_cmpxchg_weak (obj, expected, desired, succ, fail) zig_unimplemented()
+#define zig_atomicrmw_xchg(obj, arg, order) zig_unimplemented()
+#define zig_atomicrmw_add (obj, arg, order) zig_unimplemented()
+#define zig_atomicrmw_sub (obj, arg, order) zig_unimplemented()
+#define zig_atomicrmw_or (obj, arg, order) zig_unimplemented()
+#define zig_atomicrmw_xor (obj, arg, order) zig_unimplemented()
+#define zig_atomicrmw_and (obj, arg, order) zig_unimplemented()
+#define zig_atomicrmw_nand(obj, arg, order) zig_unimplemented()
+#define zig_atomicrmw_min (obj, arg, order) zig_unimplemented()
+#define zig_atomicrmw_max (obj, arg, order) zig_unimplemented()
+#define zig_atomic_store (obj, arg, order) zig_unimplemented()
+#define zig_atomic_load (obj, order) zig_unimplemented()
+#define zig_fence(order) zig_unimplemented()
#endif
#include
@@ -78,6 +126,7 @@
#define int128_t __int128
#define uint128_t unsigned __int128
ZIG_EXTERN_C void *memcpy (void *ZIG_RESTRICT, const void *ZIG_RESTRICT, size_t);
+ZIG_EXTERN_C void *memset (void *, int, size_t);
static inline uint8_t zig_addw_u8(uint8_t lhs, uint8_t rhs, uint8_t max) {
uint8_t thresh = max - rhs;
@@ -307,3 +356,97 @@ static inline long long zig_subw_longlong(long long lhs, long long rhs, long lon
return (long long)(((unsigned long long)lhs) - ((unsigned long long)rhs));
}
+#define zig_add_sat_u(ZT, T) static inline T zig_adds_##ZT(T x, T y, T max) { \
+ return (x > max - y) ? max : x + y; \
+}
+
+#define zig_add_sat_s(ZT, T, T2) static inline T zig_adds_##ZT(T2 x, T2 y, T2 min, T2 max) { \
+ T2 res = x + y; \
+ return (res < min) ? min : (res > max) ? max : res; \
+}
+
+zig_add_sat_u( u8, uint8_t)
+zig_add_sat_s( i8, int8_t, int16_t)
+zig_add_sat_u(u16, uint16_t)
+zig_add_sat_s(i16, int16_t, int32_t)
+zig_add_sat_u(u32, uint32_t)
+zig_add_sat_s(i32, int32_t, int64_t)
+zig_add_sat_u(u64, uint64_t)
+zig_add_sat_s(i64, int64_t, int128_t)
+zig_add_sat_s(isize, intptr_t, int128_t)
+zig_add_sat_s(short, short, int)
+zig_add_sat_s(int, int, long)
+zig_add_sat_s(long, long, long long)
+
+#define zig_sub_sat_u(ZT, T) static inline T zig_subs_##ZT(T x, T y, T max) { \
+ return (x > max + y) ? max : x - y; \
+}
+
+#define zig_sub_sat_s(ZT, T, T2) static inline T zig_subs_##ZT(T2 x, T2 y, T2 min, T2 max) { \
+ T2 res = x - y; \
+ return (res < min) ? min : (res > max) ? max : res; \
+}
+
+zig_sub_sat_u( u8, uint8_t)
+zig_sub_sat_s( i8, int8_t, int16_t)
+zig_sub_sat_u(u16, uint16_t)
+zig_sub_sat_s(i16, int16_t, int32_t)
+zig_sub_sat_u(u32, uint32_t)
+zig_sub_sat_s(i32, int32_t, int64_t)
+zig_sub_sat_u(u64, uint64_t)
+zig_sub_sat_s(i64, int64_t, int128_t)
+zig_sub_sat_s(isize, intptr_t, int128_t)
+zig_sub_sat_s(short, short, int)
+zig_sub_sat_s(int, int, long)
+zig_sub_sat_s(long, long, long long)
+
+
+#define zig_mul_sat_u(ZT, T, T2) static inline T zig_muls_##ZT(T2 x, T2 y, T2 max) { \
+ T2 res = x * y; \
+ return (res > max) ? max : res; \
+}
+
+#define zig_mul_sat_s(ZT, T, T2) static inline T zig_muls_##ZT(T2 x, T2 y, T2 min, T2 max) { \
+ T2 res = x * y; \
+ return (res < min) ? min : (res > max) ? max : res; \
+}
+
+zig_mul_sat_u(u8, uint8_t, uint16_t)
+zig_mul_sat_s(i8, int8_t, int16_t)
+zig_mul_sat_u(u16, uint16_t, uint32_t)
+zig_mul_sat_s(i16, int16_t, int32_t)
+zig_mul_sat_u(u32, uint32_t, uint64_t)
+zig_mul_sat_s(i32, int32_t, int64_t)
+zig_mul_sat_u(u64, uint64_t, uint128_t)
+zig_mul_sat_s(i64, int64_t, int128_t)
+zig_mul_sat_s(isize, intptr_t, int128_t)
+zig_mul_sat_s(short, short, int)
+zig_mul_sat_s(int, int, long)
+zig_mul_sat_s(long, long, long long)
+
+#define zig_shl_sat_u(ZT, T, bits) static inline T zig_shls_##ZT(T x, T y, T max) { \
+ if(x == 0) return 0; \
+ T bits_set = 64 - __builtin_clzll(x); \
+ return (bits_set + y > bits) ? max : x << y; \
+}
+
+#define zig_shl_sat_s(ZT, T, bits) static inline T zig_shls_##ZT(T x, T y, T min, T max) { \
+ if(x == 0) return 0; \
+ T x_twos_comp = x < 0 ? -x : x; \
+ T bits_set = 64 - __builtin_clzll(x_twos_comp); \
+ T min_or_max = (x < 0) ? min : max; \
+ return (y + bits_set > bits ) ? min_or_max : x << y; \
+}
+
+zig_shl_sat_u(u8, uint8_t, 8)
+zig_shl_sat_s(i8, int8_t, 7)
+zig_shl_sat_u(u16, uint16_t, 16)
+zig_shl_sat_s(i16, int16_t, 15)
+zig_shl_sat_u(u32, uint32_t, 32)
+zig_shl_sat_s(i32, int32_t, 31)
+zig_shl_sat_u(u64, uint64_t, 64)
+zig_shl_sat_s(i64, int64_t, 63)
+zig_shl_sat_s(isize, intptr_t, ((sizeof(intptr_t)) * CHAR_BIT - 1))
+zig_shl_sat_s(short, short, ((sizeof(short )) * CHAR_BIT - 1))
+zig_shl_sat_s(int, int, ((sizeof(int )) * CHAR_BIT - 1))
+zig_shl_sat_s(long, long, ((sizeof(long )) * CHAR_BIT - 1))
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 4f5df73f8d..fd009ca9f8 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -50,7 +50,7 @@ last_text_block: ?*TextBlock = null,
section_table_offset: u32 = 0,
/// Section data file pointer.
section_data_offset: u32 = 0,
-/// Optiona header file pointer.
+/// Optional header file pointer.
optional_header_offset: u32 = 0,
/// Absolute virtual address of the offset table when the executable is loaded in memory.
@@ -132,7 +132,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
const self = try createEmpty(allocator, options);
errdefer self.base.destroy();
- self.llvm_object = try LlvmObject.create(allocator, options);
+ self.llvm_object = try LlvmObject.create(allocator, sub_path, options);
return self;
}
@@ -418,7 +418,7 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Coff {
pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void {
if (self.llvm_object) |_| return;
- try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
+ try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
if (self.offset_table_free_list.popOrNull()) |i| {
decl.link.coff.offset_table_index = i;
@@ -602,7 +602,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
const current_virtual_size = mem.alignForwardGeneric(u32, self.offset_table_size, section_alignment);
const new_virtual_size = mem.alignForwardGeneric(u32, new_raw_size, section_alignment);
// If we had to move in the virtual address space, we need to fix the VAs in the offset table, as well as the virtual address of the `.text` section
- // and the virutal size of the `.got` section
+ // and the virtual size of the `.got` section
if (new_virtual_size != current_virtual_size) {
log.debug("growing offset table from virtual size {} to {}\n", .{ current_virtual_size, new_virtual_size });
@@ -770,7 +770,9 @@ fn finishUpdateDecl(self: *Coff, module: *Module, decl: *Module.Decl, code: []co
}
pub fn freeDecl(self: *Coff, decl: *Module.Decl) void {
- if (self.llvm_object) |_| return;
+ if (build_options.have_llvm) {
+ if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl);
+ }
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
self.freeTextBlock(&decl.link.coff);
@@ -793,7 +795,7 @@ pub fn updateDeclExports(
for (exports) |exp| {
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text")) {
- try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
+ try module.failed_exports.ensureUnusedCapacity(module.gpa, 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}),
@@ -804,7 +806,7 @@ pub fn updateDeclExports(
if (mem.eql(u8, exp.options.name, "_start")) {
self.entry_addr = decl.link.coff.getVAddr(self.*) - default_image_base;
} else {
- try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
+ try module.failed_exports.ensureUnusedCapacity(module.gpa, 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: Exports other than '_start'", .{}),
@@ -882,11 +884,8 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: {
- // Both stage1 and stage2 LLVM backend put the object file in the cache directory.
- if (self.base.options.use_llvm) {
- // Stage2 has to call flushModule since that outputs the LLVM object file.
- if (!build_options.is_stage1 or !self.base.options.use_stage1) try self.flushModule(comp);
-
+ const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1;
+ if (use_stage1) {
const obj_basename = try std.zig.binNameAlloc(arena, .{
.root_name = self.base.options.root_name,
.target = self.base.options.target,
@@ -981,7 +980,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
if (self.base.options.output_mode == .Obj) {
- // LLD's COFF driver does not support the equvialent of `-r` so we do a simple file copy
+ // LLD's COFF driver does not support the equivalent of `-r` so we do a simple file copy
// here. TODO: think carefully about how we can avoid this redundant operation when doing
// build-obj. See also the corresponding TODO in linkAsArchive.
const the_object_path = blk: {
@@ -1267,22 +1266,23 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
try argv.append(comp.libunwind_static_lib.?.full_object_path);
}
- // TODO: remove when stage2 can build compiler_rt.zig, c.zig and ssp.zig
- // compiler-rt, libc and libssp
- if (is_exe_or_dyn_lib and
- !self.base.options.skip_linker_dependencies and
- build_options.is_stage1 and self.base.options.use_stage1)
- {
+ if (is_exe_or_dyn_lib and !self.base.options.skip_linker_dependencies) {
if (!self.base.options.link_libc) {
- try argv.append(comp.libc_static_lib.?.full_object_path);
+ if (comp.libc_static_lib) |lib| {
+ try argv.append(lib.full_object_path);
+ }
}
// MinGW doesn't provide libssp symbols
if (target.abi.isGnu()) {
- try argv.append(comp.libssp_static_lib.?.full_object_path);
+ if (comp.libssp_static_lib) |lib| {
+ try argv.append(lib.full_object_path);
+ }
}
// MSVC compiler_rt is missing some stuff, so we build it unconditionally but
// and rely on weak linkage to allow MSVC compiler_rt functions to override ours.
- try argv.append(comp.compiler_rt_static_lib.?.full_object_path);
+ if (comp.compiler_rt_static_lib) |lib| {
+ try argv.append(lib.full_object_path);
+ }
}
try argv.ensureUnusedCapacity(self.base.options.system_libs.count());
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index f8cf70104f..a8efa8dab9 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -235,7 +235,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
const self = try createEmpty(allocator, options);
errdefer self.base.destroy();
- self.llvm_object = try LlvmObject.create(allocator, options);
+ self.llvm_object = try LlvmObject.create(allocator, sub_path, options);
return self;
}
@@ -411,7 +411,7 @@ fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u16) u64 {
/// TODO Improve this to use a table.
fn makeString(self: *Elf, bytes: []const u8) !u32 {
- try self.shstrtab.ensureCapacity(self.base.allocator, self.shstrtab.items.len + bytes.len + 1);
+ try self.shstrtab.ensureUnusedCapacity(self.base.allocator, bytes.len + 1);
const result = self.shstrtab.items.len;
self.shstrtab.appendSliceAssumeCapacity(bytes);
self.shstrtab.appendAssumeCapacity(0);
@@ -420,7 +420,7 @@ fn makeString(self: *Elf, bytes: []const u8) !u32 {
/// TODO Improve this to use a table.
fn makeDebugString(self: *Elf, bytes: []const u8) !u32 {
- try self.debug_strtab.ensureCapacity(self.base.allocator, self.debug_strtab.items.len + bytes.len + 1);
+ try self.debug_strtab.ensureUnusedCapacity(self.base.allocator, bytes.len + 1);
const result = self.debug_strtab.items.len;
self.debug_strtab.appendSliceAssumeCapacity(bytes);
self.debug_strtab.appendAssumeCapacity(0);
@@ -856,7 +856,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
// We have a function to compute the upper bound size, because it's needed
// for determining where to put the offset of the first `LinkBlock`.
- try di_buf.ensureCapacity(self.dbgInfoNeededHeaderBytes());
+ try di_buf.ensureTotalCapacity(self.dbgInfoNeededHeaderBytes());
// initial length - length of the .debug_info contribution for this compilation unit,
// not including the initial length itself.
@@ -925,7 +925,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
// Enough for all the data without resizing. When support for more compilation units
// is added, the size of this section will become more variable.
- try di_buf.ensureCapacity(100);
+ try di_buf.ensureTotalCapacity(100);
// initial length - length of the .debug_aranges contribution for this compilation unit,
// not including the initial length itself.
@@ -1004,7 +1004,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation) !void {
// The size of this header is variable, depending on the number of directories,
// files, and padding. We have a function to compute the upper bound size, however,
// because it's needed for determining where to put the offset of the first `SrcFn`.
- try di_buf.ensureCapacity(self.dbgLineNeededHeaderBytes());
+ try di_buf.ensureTotalCapacity(self.dbgLineNeededHeaderBytes());
// initial length - length of the .debug_line contribution for this compilation unit,
// not including the initial length itself.
@@ -1254,11 +1254,8 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: {
- // Both stage1 and stage2 LLVM backend put the object file in the cache directory.
- if (self.base.options.use_llvm) {
- // Stage2 has to call flushModule since that outputs the LLVM object file.
- if (!build_options.is_stage1 or !self.base.options.use_stage1) try self.flushModule(comp);
-
+ // stage1 puts the object file in the cache directory.
+ if (self.base.options.use_stage1) {
const obj_basename = try std.zig.binNameAlloc(arena, .{
.root_name = self.base.options.root_name,
.target = self.base.options.target,
@@ -1285,20 +1282,11 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
const gc_sections = self.base.options.gc_sections orelse !is_obj;
const stack_size = self.base.options.stack_size_override orelse 16777216;
const allow_shlib_undefined = self.base.options.allow_shlib_undefined orelse !self.base.options.is_native_os;
- const compiler_rt_path: ?[]const u8 = if (self.base.options.include_compiler_rt) blk: {
- // TODO: remove when stage2 can build compiler_rt.zig
- if (!build_options.is_stage1 or !self.base.options.use_stage1) break :blk null;
-
- // In the case of build-obj we include the compiler-rt symbols directly alongside
- // the symbols of the root source file, in the same compilation unit.
- if (is_obj) break :blk null;
-
- if (is_exe_or_dyn_lib) {
- break :blk comp.compiler_rt_static_lib.?.full_object_path;
- } else {
- break :blk comp.compiler_rt_obj.?.full_object_path;
- }
- } else null;
+ const compiler_rt_path: ?[]const u8 = blk: {
+ if (comp.compiler_rt_static_lib) |x| break :blk x.full_object_path;
+ if (comp.compiler_rt_obj) |x| break :blk x.full_object_path;
+ break :blk null;
+ };
// Here we want to determine whether we can save time by not invoking LLD when the
// output is unchanged. None of the linker options or the object files that are being
@@ -1621,14 +1609,13 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
}
// libc
- // TODO: enable when stage2 can build c.zig
if (is_exe_or_dyn_lib and
!self.base.options.skip_linker_dependencies and
- !self.base.options.link_libc and
- build_options.is_stage1 and
- self.base.options.use_stage1)
+ !self.base.options.link_libc)
{
- try argv.append(comp.libc_static_lib.?.full_object_path);
+ if (comp.libc_static_lib) |lib| {
+ try argv.append(lib.full_object_path);
+ }
}
// compiler-rt
@@ -1639,7 +1626,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
// Shared libraries.
if (is_exe_or_dyn_lib) {
const system_libs = self.base.options.system_libs.keys();
- try argv.ensureCapacity(argv.items.len + system_libs.len);
+ try argv.ensureUnusedCapacity(system_libs.len);
for (system_libs) |link_lib| {
// By this time, we depend on these libs being dynamically linked libraries and not static libraries
// (the check for that needs to be earlier), but they could be full paths to .so files, in which
@@ -2113,8 +2100,8 @@ pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
if (decl.link.elf.local_sym_index != 0) return;
- try self.local_symbols.ensureCapacity(self.base.allocator, self.local_symbols.items.len + 1);
- try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
+ try self.local_symbols.ensureUnusedCapacity(self.base.allocator, 1);
+ try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
if (self.local_symbol_free_list.popOrNull()) |i| {
log.debug("reusing symbol index {d} for {s}", .{ i, decl.name });
@@ -2147,7 +2134,9 @@ pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
}
pub fn freeDecl(self: *Elf, decl: *Module.Decl) void {
- if (self.llvm_object) |_| return;
+ if (build_options.have_llvm) {
+ if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl);
+ }
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
self.freeTextBlock(&decl.link.elf);
@@ -2316,7 +2305,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs);
// For functions we need to add a prologue to the debug line program.
- try dbg_line_buffer.ensureCapacity(26);
+ try dbg_line_buffer.ensureTotalCapacity(26);
const decl = func.owner_decl;
const line_off = @intCast(u28, decl.src_line + func.lbrace_line);
@@ -2351,7 +2340,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
// .debug_info subprogram
const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1];
- try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len);
+ try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len);
const fn_ret_type = decl.ty.fnReturnType();
const fn_ret_has_bits = fn_ret_type.hasCodeGenBits();
@@ -2593,7 +2582,7 @@ fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !vo
},
.Int => {
const info = ty.intInfo(self.base.options.target);
- try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12);
+ try dbg_info_buffer.ensureUnusedCapacity(12);
dbg_info_buffer.appendAssumeCapacity(abbrev_base_type);
// DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(switch (info.signedness) {
@@ -2607,7 +2596,7 @@ fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !vo
},
.Optional => {
if (ty.isPtrLikeOptional()) {
- try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12);
+ try dbg_info_buffer.ensureUnusedCapacity(12);
dbg_info_buffer.appendAssumeCapacity(abbrev_base_type);
// DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(DW.ATE.address);
@@ -2747,14 +2736,14 @@ pub fn updateDeclExports(
const tracy = trace(@src());
defer tracy.end();
- try self.global_symbols.ensureCapacity(self.base.allocator, self.global_symbols.items.len + exports.len);
+ try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len);
if (decl.link.elf.local_sym_index == 0) return;
const decl_sym = self.local_symbols.items[decl.link.elf.local_sym_index];
for (exports) |exp| {
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text")) {
- try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
+ try module.failed_exports.ensureUnusedCapacity(module.gpa, 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}),
@@ -2772,7 +2761,7 @@ pub fn updateDeclExports(
},
.Weak => elf.STB_WEAK,
.LinkOnce => {
- try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
+ try module.failed_exports.ensureUnusedCapacity(module.gpa, 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: GlobalLinkage.LinkOnce", .{}),
@@ -3448,6 +3437,15 @@ const CsuObjects = struct {
.static_pie => result.set( "start_dyn.o", "crti.o", "crtbeginS.o", "crtendS.o", "crtn.o" ),
// zig fmt: on
},
+ .solaris => switch (mode) {
+ // zig fmt: off
+ .dynamic_lib => result.set( null, "crti.o", null, null, "crtn.o" ),
+ .dynamic_exe,
+ .dynamic_pie => result.set( "crt1.o", "crti.o", null, null, "crtn.o" ),
+ .static_exe,
+ .static_pie => result.set( null, null, null, null, null ),
+ // zig fmt: on
+ },
else => {},
}
}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index ce00c85dea..9a2c462d55 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -12,7 +12,7 @@ const math = std.math;
const mem = std.mem;
const meta = std.meta;
-const aarch64 = @import("../codegen/aarch64.zig");
+const aarch64 = @import("../arch/aarch64/bits.zig");
const bind = @import("MachO/bind.zig");
const codegen = @import("../codegen.zig");
const commands = @import("MachO/commands.zig");
@@ -200,7 +200,7 @@ atoms: std.AutoHashMapUnmanaged(MatchingSection, *Atom) = .{},
/// List of atoms that are owned directly by the linker.
/// Currently these are only atoms that are the result of linking
-/// object files. Atoms which take part in incremental linking are
+/// object files. Atoms which take part in incremental linking are
/// at present owned by Module.Decl.
/// TODO consolidate this.
managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
@@ -213,7 +213,7 @@ decls: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{},
/// Currently active Module.Decl.
/// TODO this might not be necessary if we figure out how to pass Module.Decl instance
-/// to codegen.genSetReg() or alterntively move PIE displacement for MCValue{ .memory = x }
+/// to codegen.genSetReg() or alternatively move PIE displacement for MCValue{ .memory = x }
/// somewhere else in the codegen.
active_decl: ?*Module.Decl = null,
@@ -231,7 +231,7 @@ const SymbolWithLoc = struct {
},
where_index: u32,
local_sym_index: u32 = 0,
- file: u16 = 0,
+ file: ?u16 = null, // null means Zig module
};
pub const GotIndirectionKey = struct {
@@ -290,7 +290,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
const self = try createEmpty(allocator, options);
errdefer self.base.destroy();
- self.llvm_object = try LlvmObject.create(allocator, options);
+ self.llvm_object = try LlvmObject.create(allocator, sub_path, options);
return self;
}
@@ -512,7 +512,7 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
if (self.base.options.output_mode == .Obj) {
- // LLD's MachO driver does not support the equvialent of `-r` so we do a simple file copy
+ // LLD's MachO driver does not support the equivalent of `-r` so we do a simple file copy
// here. TODO: think carefully about how we can avoid this redundant operation when doing
// build-obj. See also the corresponding TODO in linkAsArchive.
const the_object_path = blk: {
@@ -543,9 +543,6 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
.mode = link.determineMode(self.base.options),
});
try self.populateMissingMetadata();
-
- // TODO mimicking insertion of null symbol from incremental linker.
- // This will need to moved.
try self.locals.append(self.base.allocator, .{
.n_strx = 0,
.n_type = macho.N_UNDF,
@@ -557,13 +554,56 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
}
if (needs_full_relink) {
+ for (self.objects.items) |*object| {
+ object.free(self.base.allocator, self);
+ object.deinit(self.base.allocator);
+ }
self.objects.clearRetainingCapacity();
+
+ for (self.archives.items) |*archive| {
+ archive.deinit(self.base.allocator);
+ }
self.archives.clearRetainingCapacity();
+
+ for (self.dylibs.items) |*dylib| {
+ dylib.deinit(self.base.allocator);
+ }
self.dylibs.clearRetainingCapacity();
self.dylibs_map.clearRetainingCapacity();
self.referenced_dylibs.clearRetainingCapacity();
- // TODO figure out how to clear atoms from objects, etc.
+ {
+ var to_remove = std.ArrayList(u32).init(self.base.allocator);
+ defer to_remove.deinit();
+ var it = self.symbol_resolver.iterator();
+ while (it.next()) |entry| {
+ const key = entry.key_ptr.*;
+ const value = entry.value_ptr.*;
+ if (value.file != null) {
+ try to_remove.append(key);
+ }
+ }
+
+ for (to_remove.items) |key| {
+ if (self.symbol_resolver.fetchRemove(key)) |entry| {
+ const resolv = entry.value;
+ switch (resolv.where) {
+ .global => {
+ self.globals_free_list.append(self.base.allocator, resolv.where_index) catch {};
+ const sym = &self.globals.items[resolv.where_index];
+ sym.n_strx = 0;
+ sym.n_type = 0;
+ sym.n_value = 0;
+ },
+ .undef => {
+ const sym = &self.undefs.items[resolv.where_index];
+ sym.n_strx = 0;
+ sym.n_desc = 0;
+ },
+ }
+ }
+ }
+ }
// Positional arguments to the linker such as object files and static archives.
var positionals = std.ArrayList([]const u8).init(arena);
@@ -802,13 +842,35 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
try self.createDsoHandleAtom();
try self.addCodeSignatureLC();
+ // log.warn("locals:", .{});
+ // for (self.locals.items) |sym, id| {
+ // log.warn(" {d}: {s}: {}", .{ id, self.getString(sym.n_strx), sym });
+ // }
+ // log.warn("globals:", .{});
+ // for (self.globals.items) |sym, id| {
+ // log.warn(" {d}: {s}: {}", .{ id, self.getString(sym.n_strx), sym });
+ // }
+ // log.warn("undefs:", .{});
+ // for (self.undefs.items) |sym, id| {
+ // log.warn(" {d}: {s}: {}", .{ id, self.getString(sym.n_strx), sym });
+ // }
+ // {
+ // log.warn("resolver:", .{});
+ // var it = self.symbol_resolver.iterator();
+ // while (it.next()) |entry| {
+ // log.warn(" {s} => {}", .{ self.getString(entry.key_ptr.*), entry.value_ptr.* });
+ // }
+ // }
+
for (self.unresolved.keys()) |index| {
const sym = self.undefs.items[index];
const sym_name = self.getString(sym.n_strx);
const resolv = self.symbol_resolver.get(sym.n_strx) orelse unreachable;
log.err("undefined reference to symbol '{s}'", .{sym_name});
- log.err(" first referenced in '{s}'", .{self.objects.items[resolv.file].name});
+ if (resolv.file) |file| {
+ log.err(" first referenced in '{s}'", .{self.objects.items[file].name});
+ }
}
if (self.unresolved.count() > 0) {
return error.UndefinedSymbolReference;
@@ -1807,7 +1869,7 @@ fn writeAtoms(self: *MachO) !void {
pub fn createGotAtom(self: *MachO, key: GotIndirectionKey) !*Atom {
const local_sym_index = @intCast(u32, self.locals.items.len);
try self.locals.append(self.base.allocator, .{
- .n_strx = try self.makeString("l_zld_got_entry"),
+ .n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = 0,
.n_desc = 0,
@@ -1845,7 +1907,7 @@ fn createDyldPrivateAtom(self: *MachO) !void {
const local_sym_index = @intCast(u32, self.locals.items.len);
const sym = try self.locals.addOne(self.base.allocator);
sym.* = .{
- .n_strx = try self.makeString("l_zld_dyld_private"),
+ .n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = 0,
.n_desc = 0,
@@ -1879,7 +1941,7 @@ fn createStubHelperPreambleAtom(self: *MachO) !void {
const local_sym_index = @intCast(u32, self.locals.items.len);
const sym = try self.locals.addOne(self.base.allocator);
sym.* = .{
- .n_strx = try self.makeString("l_zld_stub_preamble"),
+ .n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = 0,
.n_desc = 0,
@@ -2021,7 +2083,7 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
};
const local_sym_index = @intCast(u32, self.locals.items.len);
try self.locals.append(self.base.allocator, .{
- .n_strx = try self.makeString("l_zld_stub_in_stub_helper"),
+ .n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = 0,
.n_desc = 0,
@@ -2076,7 +2138,7 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, lazy_binding_sym_index: u32) !*Atom {
const local_sym_index = @intCast(u32, self.locals.items.len);
try self.locals.append(self.base.allocator, .{
- .n_strx = try self.makeString("l_zld_lazy_ptr"),
+ .n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = 0,
.n_desc = 0,
@@ -2117,7 +2179,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
};
const local_sym_index = @intCast(u32, self.locals.items.len);
try self.locals.append(self.base.allocator, .{
- .n_strx = try self.makeString("l_zld_stub"),
+ .n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = 0,
.n_desc = 0,
@@ -2183,7 +2245,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
fn createTentativeDefAtoms(self: *MachO) !void {
if (self.tentatives.count() == 0) return;
// Convert any tentative definition into a regular symbol and allocate
- // text blocks for each tentative defintion.
+ // text blocks for each tentative definition.
while (self.tentatives.popOrNull()) |entry| {
const match = MatchingSection{
.seg = self.data_segment_cmd_index.?,
@@ -2349,7 +2411,9 @@ fn resolveSymbolsInObject(self: *MachO, object_id: u16) !void {
!(symbolIsWeakDef(global.*) or symbolIsPext(global.*)))
{
log.err("symbol '{s}' defined multiple times", .{sym_name});
- log.err(" first definition in '{s}'", .{self.objects.items[resolv.file].name});
+ if (resolv.file) |file| {
+ log.err(" first definition in '{s}'", .{self.objects.items[file].name});
+ }
log.err(" next definition in '{s}'", .{object.name});
return error.MultipleSymbolDefinitions;
} else if (symbolIsWeakDef(sym) or symbolIsPext(sym)) continue; // Current symbol is weak, so skip it.
@@ -2632,10 +2696,10 @@ fn parseObjectsIntoAtoms(self: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- var parsed_atoms = Object.ParsedAtoms.init(self.base.allocator);
+ var parsed_atoms = std.AutoArrayHashMap(MatchingSection, *Atom).init(self.base.allocator);
defer parsed_atoms.deinit();
- var first_atoms = Object.ParsedAtoms.init(self.base.allocator);
+ var first_atoms = std.AutoArrayHashMap(MatchingSection, *Atom).init(self.base.allocator);
defer first_atoms.deinit();
var section_metadata = std.AutoHashMap(MatchingSection, struct {
@@ -2644,13 +2708,12 @@ fn parseObjectsIntoAtoms(self: *MachO) !void {
}).init(self.base.allocator);
defer section_metadata.deinit();
- for (self.objects.items) |*object, object_id| {
+ for (self.objects.items) |*object| {
if (object.analyzed) continue;
- var atoms_in_objects = try object.parseIntoAtoms(self.base.allocator, @intCast(u16, object_id), self);
- defer atoms_in_objects.deinit();
+ try object.parseIntoAtoms(self.base.allocator, self);
- var it = atoms_in_objects.iterator();
+ var it = object.end_atoms.iterator();
while (it.next()) |entry| {
const match = entry.key_ptr.*;
const last_atom = entry.value_ptr.*;
@@ -3292,8 +3355,6 @@ pub fn updateDeclExports(
decl: *Module.Decl,
exports: []const *Module.Export,
) !void {
- // TODO If we are exporting with global linkage, check for already defined globals and flag
- // symbol duplicate/collision!
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@@ -3303,7 +3364,7 @@ pub fn updateDeclExports(
const tracy = trace(@src());
defer tracy.end();
- try self.globals.ensureCapacity(self.base.allocator, self.globals.items.len + exports.len);
+ try self.globals.ensureUnusedCapacity(self.base.allocator, exports.len);
if (decl.link.macho.local_sym_index == 0) return;
const decl_sym = &self.locals.items[decl.link.macho.local_sym_index];
@@ -3313,15 +3374,76 @@ pub fn updateDeclExports(
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, "__text")) {
- try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
- module.failed_exports.putAssumeCapacityNoClobber(
+ try module.failed_exports.putNoClobber(
+ module.gpa,
exp,
- try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}),
+ try Module.ErrorMsg.create(
+ self.base.allocator,
+ decl.srcLoc(),
+ "Unimplemented: ExportOptions.section",
+ .{},
+ ),
);
continue;
}
}
+ if (exp.options.linkage == .LinkOnce) {
+ try module.failed_exports.putNoClobber(
+ module.gpa,
+ exp,
+ try Module.ErrorMsg.create(
+ self.base.allocator,
+ decl.srcLoc(),
+ "Unimplemented: GlobalLinkage.LinkOnce",
+ .{},
+ ),
+ );
+ continue;
+ }
+
+ const is_weak = exp.options.linkage == .Internal or exp.options.linkage == .Weak;
+ const n_strx = try self.makeString(exp_name);
+ if (self.symbol_resolver.getPtr(n_strx)) |resolv| {
+ switch (resolv.where) {
+ .global => {
+ if (resolv.local_sym_index == decl.link.macho.local_sym_index) continue;
+
+ const sym = &self.globals.items[resolv.where_index];
+
+ if (symbolIsTentative(sym.*)) {
+ _ = self.tentatives.fetchSwapRemove(resolv.where_index);
+ } else if (!is_weak and !(symbolIsWeakDef(sym.*) or symbolIsPext(sym.*))) {
+ _ = try module.failed_exports.put(
+ module.gpa,
+ exp,
+ try Module.ErrorMsg.create(
+ self.base.allocator,
+ decl.srcLoc(),
+ \\LinkError: symbol '{s}' defined multiple times
+ \\ first definition in '{s}'
+ ,
+ .{ exp_name, self.objects.items[resolv.file.?].name },
+ ),
+ );
+ continue;
+ } else if (is_weak) continue; // Current symbol is weak, so skip it.
+
+ // Otherwise, update the resolver and the global symbol.
+ sym.n_type = macho.N_SECT | macho.N_EXT;
+ resolv.local_sym_index = decl.link.macho.local_sym_index;
+ resolv.file = null;
+ exp.link.macho.sym_index = resolv.where_index;
+
+ continue;
+ },
+ .undef => {
+ _ = self.unresolved.fetchSwapRemove(resolv.where_index);
+ _ = self.symbol_resolver.remove(n_strx);
+ },
+ }
+ }
+
var n_type: u8 = macho.N_SECT | macho.N_EXT;
var n_desc: u16 = 0;
@@ -3339,14 +3461,7 @@ pub fn updateDeclExports(
// Symbol's n_type is like for a symbol with strong linkage.
n_desc |= macho.N_WEAK_DEF;
},
- .LinkOnce => {
- try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
- module.failed_exports.putAssumeCapacityNoClobber(
- exp,
- try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: GlobalLinkage.LinkOnce", .{}),
- );
- continue;
- },
+ else => unreachable,
}
const global_sym_index = if (exp.link.macho.sym_index) |i| i else blk: {
@@ -3356,8 +3471,6 @@ pub fn updateDeclExports(
};
break :blk i;
};
-
- const n_strx = try self.makeString(exp_name);
const sym = &self.globals.items[global_sym_index];
sym.* = .{
.n_strx = try self.makeString(exp_name),
@@ -3368,12 +3481,11 @@ pub fn updateDeclExports(
};
exp.link.macho.sym_index = global_sym_index;
- const resolv = try self.symbol_resolver.getOrPut(self.base.allocator, n_strx);
- resolv.value_ptr.* = .{
+ try self.symbol_resolver.putNoClobber(self.base.allocator, n_strx, .{
.where = .global,
.where_index = global_sym_index,
.local_sym_index = decl.link.macho.local_sym_index,
- };
+ });
}
}
@@ -3381,11 +3493,17 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
const sym_index = exp.sym_index orelse return;
self.globals_free_list.append(self.base.allocator, sym_index) catch {};
const global = &self.globals.items[sym_index];
- global.n_type = 0;
+ log.debug("deleting export '{s}': {}", .{ self.getString(global.n_strx), global });
assert(self.symbol_resolver.remove(global.n_strx));
+ global.n_type = 0;
+ global.n_strx = 0;
+ global.n_value = 0;
}
pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {
+ if (build_options.have_llvm) {
+ if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl);
+ }
log.debug("freeDecl {*}", .{decl});
_ = self.decls.swapRemove(decl);
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
@@ -3970,6 +4088,70 @@ fn findFreeSpace(self: MachO, segment_id: u16, alignment: u64, start: ?u64) u64
return mem.alignForwardGeneric(u64, final_off, alignment);
}
+fn growSegment(self: *MachO, seg_id: u16, new_size: u64) !void {
+ const seg = &self.load_commands.items[seg_id].Segment;
+ const new_seg_size = mem.alignForwardGeneric(u64, new_size, self.page_size);
+ assert(new_seg_size > seg.inner.filesize);
+ const offset_amt = new_seg_size - seg.inner.filesize;
+ log.debug("growing segment {s} from 0x{x} to 0x{x}", .{ seg.inner.segname, seg.inner.filesize, new_seg_size });
+ seg.inner.filesize = new_seg_size;
+ seg.inner.vmsize = new_seg_size;
+
+ log.debug(" (new segment file offsets from 0x{x} to 0x{x} (in memory 0x{x} to 0x{x}))", .{
+ seg.inner.fileoff,
+ seg.inner.fileoff + seg.inner.filesize,
+ seg.inner.vmaddr,
+ seg.inner.vmaddr + seg.inner.vmsize,
+ });
+
+ // TODO We should probably nop the expanded by distance, or put 0s.
+
+ // TODO copyRangeAll doesn't automatically extend the file on macOS.
+ const ledit_seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment;
+ const new_filesize = offset_amt + ledit_seg.inner.fileoff + ledit_seg.inner.filesize;
+ try self.base.file.?.pwriteAll(&[_]u8{0}, new_filesize - 1);
+
+ var next: usize = seg_id + 1;
+ while (next < self.linkedit_segment_cmd_index.? + 1) : (next += 1) {
+ const next_seg = &self.load_commands.items[next].Segment;
+ _ = try self.base.file.?.copyRangeAll(
+ next_seg.inner.fileoff,
+ self.base.file.?,
+ next_seg.inner.fileoff + offset_amt,
+ next_seg.inner.filesize,
+ );
+ next_seg.inner.fileoff += offset_amt;
+ next_seg.inner.vmaddr += offset_amt;
+
+ log.debug(" (new {s} segment file offsets from 0x{x} to 0x{x} (in memory 0x{x} to 0x{x}))", .{
+ next_seg.inner.segname,
+ next_seg.inner.fileoff,
+ next_seg.inner.fileoff + next_seg.inner.filesize,
+ next_seg.inner.vmaddr,
+ next_seg.inner.vmaddr + next_seg.inner.vmsize,
+ });
+
+ for (next_seg.sections.items) |*moved_sect, moved_sect_id| {
+ moved_sect.offset += @intCast(u32, offset_amt);
+ moved_sect.addr += offset_amt;
+
+ log.debug(" (new {s},{s} file offsets from 0x{x} to 0x{x} (in memory 0x{x} to 0x{x}))", .{
+ commands.segmentName(moved_sect.*),
+ commands.sectionName(moved_sect.*),
+ moved_sect.offset,
+ moved_sect.offset + moved_sect.size,
+ moved_sect.addr,
+ moved_sect.addr + moved_sect.size,
+ });
+
+ try self.allocateLocalSymbols(.{
+ .seg = @intCast(u16, next),
+ .sect = @intCast(u16, moved_sect_id),
+ }, @intCast(i64, offset_amt));
+ }
+ }
+}
+
fn growSection(self: *MachO, match: MatchingSection, new_size: u32) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -3983,7 +4165,14 @@ fn growSection(self: *MachO, match: MatchingSection, new_size: u32) !void {
const needed_size = mem.alignForwardGeneric(u32, ideal_size, alignment);
if (needed_size > max_size) blk: {
- log.debug(" (need to grow!)", .{});
+ log.debug(" (need to grow! needed 0x{x}, max 0x{x})", .{ needed_size, max_size });
+
+ if (match.sect == seg.sections.items.len - 1) {
+ // Last section, just grow segments
+ try self.growSegment(match.seg, seg.inner.filesize + needed_size - max_size);
+ break :blk;
+ }
+
// Need to move all sections below in file and address spaces.
const offset_amt = offset: {
const max_alignment = try self.getSectionMaxAlignment(match.seg, match.sect + 1);
@@ -3999,70 +4188,10 @@ fn growSection(self: *MachO, match: MatchingSection, new_size: u32) !void {
if (last_sect_off + offset_amt > seg_off) {
// Need to grow segment first.
- log.debug(" (need to grow segment first)", .{});
const spill_size = (last_sect_off + offset_amt) - seg_off;
- const seg_offset_amt = mem.alignForwardGeneric(u64, spill_size, self.page_size);
- seg.inner.filesize += seg_offset_amt;
- seg.inner.vmsize += seg_offset_amt;
-
- log.debug(" (new {s} segment file offsets from 0x{x} to 0x{x} (in memory 0x{x} to 0x{x}))", .{
- seg.inner.segname,
- seg.inner.fileoff,
- seg.inner.fileoff + seg.inner.filesize,
- seg.inner.vmaddr,
- seg.inner.vmaddr + seg.inner.vmsize,
- });
-
- // TODO We should probably nop the expanded by distance, or put 0s.
-
- // TODO copyRangeAll doesn't automatically extend the file on macOS.
- const ledit_seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment;
- const new_filesize = seg_offset_amt + ledit_seg.inner.fileoff + ledit_seg.inner.filesize;
- try self.base.file.?.pwriteAll(&[_]u8{0}, new_filesize - 1);
-
- var next: usize = match.seg + 1;
- while (next < self.linkedit_segment_cmd_index.? + 1) : (next += 1) {
- const next_seg = &self.load_commands.items[next].Segment;
- _ = try self.base.file.?.copyRangeAll(
- next_seg.inner.fileoff,
- self.base.file.?,
- next_seg.inner.fileoff + seg_offset_amt,
- next_seg.inner.filesize,
- );
- next_seg.inner.fileoff += seg_offset_amt;
- next_seg.inner.vmaddr += seg_offset_amt;
-
- log.debug(" (new {s} segment file offsets from 0x{x} to 0x{x} (in memory 0x{x} to 0x{x}))", .{
- next_seg.inner.segname,
- next_seg.inner.fileoff,
- next_seg.inner.fileoff + next_seg.inner.filesize,
- next_seg.inner.vmaddr,
- next_seg.inner.vmaddr + next_seg.inner.vmsize,
- });
-
- for (next_seg.sections.items) |*moved_sect, moved_sect_id| {
- moved_sect.offset += @intCast(u32, seg_offset_amt);
- moved_sect.addr += seg_offset_amt;
-
- log.debug(" (new {s},{s} file offsets from 0x{x} to 0x{x} (in memory 0x{x} to 0x{x}))", .{
- commands.segmentName(moved_sect.*),
- commands.sectionName(moved_sect.*),
- moved_sect.offset,
- moved_sect.offset + moved_sect.size,
- moved_sect.addr,
- moved_sect.addr + moved_sect.size,
- });
-
- try self.allocateLocalSymbols(.{
- .seg = @intCast(u16, next),
- .sect = @intCast(u16, moved_sect_id),
- }, @intCast(i64, seg_offset_amt));
- }
- }
+ try self.growSegment(match.seg, seg.inner.filesize + spill_size);
}
- if (match.sect + 1 >= seg.sections.items.len) break :blk;
-
// We have enough space to expand within the segment, so move all sections by
// the required amount and update their header offsets.
const next_sect = seg.sections.items[match.sect + 1];
@@ -4228,20 +4357,34 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64, m
return vaddr;
}
-pub fn addExternFn(self: *MachO, name: []const u8) !u32 {
+const AddExternFnRes = struct {
+ where: enum {
+ local,
+ undef,
+ },
+ where_index: u32,
+};
+
+pub fn addExternFn(self: *MachO, name: []const u8) !AddExternFnRes {
const sym_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{name});
defer self.base.allocator.free(sym_name);
+ const n_strx = try self.makeString(sym_name);
- if (self.strtab_dir.getKeyAdapted(@as([]const u8, sym_name), StringIndexAdapter{
- .bytes = &self.strtab,
- })) |n_strx| {
- const resolv = self.symbol_resolver.get(n_strx) orelse unreachable;
- return resolv.where_index;
+ if (self.symbol_resolver.get(n_strx)) |resolv| {
+ return switch (resolv.where) {
+ .global => AddExternFnRes{
+ .where = .local,
+ .where_index = resolv.local_sym_index,
+ },
+ .undef => AddExternFnRes{
+ .where = .undef,
+ .where_index = resolv.where_index,
+ },
+ };
}
log.debug("adding new extern function '{s}'", .{sym_name});
const sym_index = @intCast(u32, self.undefs.items.len);
- const n_strx = try self.makeString(sym_name);
try self.undefs.append(self.base.allocator, .{
.n_strx = n_strx,
.n_type = macho.N_UNDF,
@@ -4255,7 +4398,10 @@ pub fn addExternFn(self: *MachO, name: []const u8) !u32 {
});
try self.unresolved.putNoClobber(self.base.allocator, sym_index, .stub);
- return sym_index;
+ return AddExternFnRes{
+ .where = .undef,
+ .where_index = sym_index,
+ };
}
const NextSegmentAddressAndOffset = struct {
@@ -4386,6 +4532,7 @@ fn writeDyldInfoData(self: *MachO) !void {
const base_address = text_segment.inner.vmaddr;
for (self.globals.items) |sym| {
+ if (sym.n_type == 0) continue;
const sym_name = self.getString(sym.n_strx);
log.debug(" (putting '{s}' defined at 0x{x})", .{ sym_name, sym.n_value });
@@ -4462,7 +4609,7 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, buffer: []const u8) !void {
// Because we insert lazy binding opcodes in reverse order (from last to the first atom),
// we need reverse the order of atom traversal here as well.
- // TODO figure out a less error prone mechanims for this!
+ // TODO figure out a less error prone mechanisms for this!
var atom = last_atom;
while (atom.prev) |prev| {
atom = prev;
@@ -4608,7 +4755,12 @@ fn writeSymbolTable(self: *MachO) !void {
var locals = std.ArrayList(macho.nlist_64).init(self.base.allocator);
defer locals.deinit();
- try locals.appendSlice(self.locals.items);
+
+ for (self.locals.items) |sym| {
+ if (sym.n_strx == 0) continue;
+ if (symbolIsTemp(sym, self.getString(sym.n_strx))) continue;
+ try locals.append(sym);
+ }
if (self.has_stabs) {
for (self.objects.items) |object| {
@@ -4638,7 +4790,7 @@ fn writeSymbolTable(self: *MachO) !void {
.n_value = object.mtime orelse 0,
});
- for (object.atoms.items) |atom| {
+ for (object.contained_atoms.items) |atom| {
if (atom.stab) |stab| {
const nlists = try stab.asNlists(atom.local_sym_index, self);
defer self.base.allocator.free(nlists);
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index 298855934e..a98f624176 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -2,7 +2,7 @@ const Atom = @This();
const std = @import("std");
const build_options = @import("build_options");
-const aarch64 = @import("../../codegen/aarch64.zig");
+const aarch64 = @import("../../arch/aarch64/bits.zig");
const assert = std.debug.assert;
const commands = @import("commands.zig");
const log = std.log.scoped(.text_block);
@@ -41,7 +41,7 @@ code: std.ArrayListUnmanaged(u8) = .{},
size: u64,
/// Alignment of this atom as a power of 2.
-/// For instance, aligmment of 0 should be read as 2^0 = 1 byte aligned.
+/// For instance, alignment of 0 should be read as 2^0 = 1 byte aligned.
alignment: u32,
/// List of relocations belonging to this atom.
@@ -645,7 +645,6 @@ const RelocContext = struct {
allocator: *Allocator,
object: *Object,
macho_file: *MachO,
- parsed_atoms: *Object.ParsedAtoms,
};
fn initRelocFromObject(rel: macho.relocation_info, context: RelocContext) !Relocation {
@@ -664,15 +663,8 @@ fn initRelocFromObject(rel: macho.relocation_info, context: RelocContext) !Reloc
const sect = seg.sections.items[sect_id];
const match = (try context.macho_file.getMatchingSection(sect)) orelse unreachable;
const local_sym_index = @intCast(u32, context.macho_file.locals.items.len);
- const sym_name = try std.fmt.allocPrint(context.allocator, "l_{s}_{s}_{s}", .{
- context.object.name,
- commands.segmentName(sect),
- commands.sectionName(sect),
- });
- defer context.allocator.free(sym_name);
-
try context.macho_file.locals.append(context.allocator, .{
- .n_strx = try context.macho_file.makeString(sym_name),
+ .n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = @intCast(u8, context.macho_file.section_ordinals.getIndex(match).? + 1),
.n_desc = 0,
@@ -877,12 +869,16 @@ pub fn parseRelocs(self: *Atom, relocs: []macho.relocation_info, context: RelocC
.sect = context.macho_file.got_section_index.?,
};
- if (context.parsed_atoms.getPtr(match)) |last| {
+ if (!context.object.start_atoms.contains(match)) {
+ try context.object.start_atoms.putNoClobber(context.allocator, match, atom);
+ }
+
+ if (context.object.end_atoms.getPtr(match)) |last| {
last.*.next = atom;
atom.prev = last.*;
last.* = atom;
} else {
- try context.parsed_atoms.putNoClobber(match, atom);
+ try context.object.end_atoms.putNoClobber(context.allocator, match, atom);
}
} else if (parsed_rel.payload == .unsigned) {
switch (parsed_rel.where) {
@@ -939,52 +935,63 @@ pub fn parseRelocs(self: *Atom, relocs: []macho.relocation_info, context: RelocC
if (parsed_rel.where != .undef) break :blk;
if (context.macho_file.stubs_map.contains(parsed_rel.where_index)) break :blk;
- const stub_helper_atom = try context.macho_file.createStubHelperAtom();
- const laptr_atom = try context.macho_file.createLazyPointerAtom(
- stub_helper_atom.local_sym_index,
- parsed_rel.where_index,
- );
- const stub_atom = try context.macho_file.createStubAtom(laptr_atom.local_sym_index);
- try context.macho_file.stubs_map.putNoClobber(context.allocator, parsed_rel.where_index, stub_atom);
// TODO clean this up!
- if (context.parsed_atoms.getPtr(.{
- .seg = context.macho_file.text_segment_cmd_index.?,
- .sect = context.macho_file.stub_helper_section_index.?,
- })) |last| {
- last.*.next = stub_helper_atom;
- stub_helper_atom.prev = last.*;
- last.* = stub_helper_atom;
- } else {
- try context.parsed_atoms.putNoClobber(.{
+ const stub_helper_atom = atom: {
+ const atom = try context.macho_file.createStubHelperAtom();
+ const match = MachO.MatchingSection{
.seg = context.macho_file.text_segment_cmd_index.?,
.sect = context.macho_file.stub_helper_section_index.?,
- }, stub_helper_atom);
- }
- if (context.parsed_atoms.getPtr(.{
- .seg = context.macho_file.text_segment_cmd_index.?,
- .sect = context.macho_file.stubs_section_index.?,
- })) |last| {
- last.*.next = stub_atom;
- stub_atom.prev = last.*;
- last.* = stub_atom;
- } else {
- try context.parsed_atoms.putNoClobber(.{
- .seg = context.macho_file.text_segment_cmd_index.?,
- .sect = context.macho_file.stubs_section_index.?,
- }, stub_atom);
- }
- if (context.parsed_atoms.getPtr(.{
- .seg = context.macho_file.data_segment_cmd_index.?,
- .sect = context.macho_file.la_symbol_ptr_section_index.?,
- })) |last| {
- last.*.next = laptr_atom;
- laptr_atom.prev = last.*;
- last.* = laptr_atom;
- } else {
- try context.parsed_atoms.putNoClobber(.{
+ };
+ if (!context.object.start_atoms.contains(match)) {
+ try context.object.start_atoms.putNoClobber(context.allocator, match, atom);
+ }
+ if (context.object.end_atoms.getPtr(match)) |last| {
+ last.*.next = atom;
+ atom.prev = last.*;
+ last.* = atom;
+ } else {
+ try context.object.end_atoms.putNoClobber(context.allocator, match, atom);
+ }
+ break :atom atom;
+ };
+ const laptr_atom = atom: {
+ const atom = try context.macho_file.createLazyPointerAtom(
+ stub_helper_atom.local_sym_index,
+ parsed_rel.where_index,
+ );
+ const match = MachO.MatchingSection{
.seg = context.macho_file.data_segment_cmd_index.?,
.sect = context.macho_file.la_symbol_ptr_section_index.?,
- }, laptr_atom);
+ };
+ if (!context.object.start_atoms.contains(match)) {
+ try context.object.start_atoms.putNoClobber(context.allocator, match, atom);
+ }
+ if (context.object.end_atoms.getPtr(match)) |last| {
+ last.*.next = atom;
+ atom.prev = last.*;
+ last.* = atom;
+ } else {
+ try context.object.end_atoms.putNoClobber(context.allocator, match, atom);
+ }
+ break :atom atom;
+ };
+ {
+ const atom = try context.macho_file.createStubAtom(laptr_atom.local_sym_index);
+ const match = MachO.MatchingSection{
+ .seg = context.macho_file.text_segment_cmd_index.?,
+ .sect = context.macho_file.stubs_section_index.?,
+ };
+ if (!context.object.start_atoms.contains(match)) {
+ try context.object.start_atoms.putNoClobber(context.allocator, match, atom);
+ }
+ if (context.object.end_atoms.getPtr(match)) |last| {
+ last.*.next = atom;
+ atom.prev = last.*;
+ last.* = atom;
+ } else {
+ try context.object.end_atoms.putNoClobber(context.allocator, match, atom);
+ }
+ try context.macho_file.stubs_map.putNoClobber(context.allocator, parsed_rel.where_index, atom);
}
}
}
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index 6dd7e556b5..845122f5e3 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -102,7 +102,7 @@ pub fn calcAdhocSignature(
var buffer = try allocator.alloc(u8, page_size);
defer allocator.free(buffer);
- try cdir.data.ensureCapacity(allocator, total_pages * hash_size + id.len + 1);
+ try cdir.data.ensureTotalCapacity(allocator, total_pages * hash_size + id.len + 1);
// 1. Save the identifier and update offsets
cdir.inner.identOffset = cdir.inner.length;
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index a8c0138f60..3e940da85d 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -353,7 +353,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
// We have a function to compute the upper bound size, because it's needed
// for determining where to put the offset of the first `LinkBlock`.
- try di_buf.ensureCapacity(self.dbgInfoNeededHeaderBytes());
+ try di_buf.ensureTotalCapacity(self.dbgInfoNeededHeaderBytes());
// initial length - length of the .debug_info contribution for this compilation unit,
// not including the initial length itself.
@@ -408,7 +408,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
// Enough for all the data without resizing. When support for more compilation units
// is added, the size of this section will become more variable.
- try di_buf.ensureCapacity(100);
+ try di_buf.ensureTotalCapacity(100);
// initial length - length of the .debug_aranges contribution for this compilation unit,
// not including the initial length itself.
@@ -479,7 +479,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
// The size of this header is variable, depending on the number of directories,
// files, and padding. We have a function to compute the upper bound size, however,
// because it's needed for determining where to put the offset of the first `SrcFn`.
- try di_buf.ensureCapacity(self.dbgLineNeededHeaderBytes(module));
+ try di_buf.ensureTotalCapacity(self.dbgLineNeededHeaderBytes(module));
// initial length - length of the .debug_line contribution for this compilation unit,
// not including the initial length itself.
@@ -607,7 +607,7 @@ fn copySegmentCommand(self: *DebugSymbols, allocator: *Allocator, base_cmd: Segm
};
mem.copy(u8, &cmd.inner.segname, &base_cmd.inner.segname);
- try cmd.sections.ensureCapacity(allocator, cmd.inner.nsects);
+ try cmd.sections.ensureTotalCapacity(allocator, cmd.inner.nsects);
for (base_cmd.sections.items) |base_sect, i| {
var sect = macho.section_64{
.sectname = undefined,
@@ -855,7 +855,7 @@ pub fn initDeclDebugBuffers(
switch (decl.ty.zigTypeTag()) {
.Fn => {
// For functions we need to add a prologue to the debug line program.
- try dbg_line_buffer.ensureCapacity(26);
+ try dbg_line_buffer.ensureTotalCapacity(26);
const func = decl.val.castTag(.function).?.data;
const line_off = @intCast(u28, decl.src_line + func.lbrace_line);
@@ -889,7 +889,7 @@ pub fn initDeclDebugBuffers(
// .debug_info subprogram
const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1];
- try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 27 + decl_name_with_null.len);
+ try dbg_info_buffer.ensureUnusedCapacity(27 + decl_name_with_null.len);
const fn_ret_type = decl.ty.fnReturnType();
const fn_ret_has_bits = fn_ret_type.hasCodeGenBits();
@@ -1124,7 +1124,7 @@ fn addDbgInfoType(
},
.Int => {
const info = ty.intInfo(target);
- try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12);
+ try dbg_info_buffer.ensureUnusedCapacity(12);
dbg_info_buffer.appendAssumeCapacity(abbrev_base_type);
// DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(switch (info.signedness) {
@@ -1261,7 +1261,7 @@ fn getDebugLineProgramEnd(self: DebugSymbols) u32 {
/// TODO Improve this to use a table.
fn makeDebugString(self: *DebugSymbols, allocator: *Allocator, bytes: []const u8) !u32 {
- try self.debug_string_table.ensureCapacity(allocator, self.debug_string_table.items.len + bytes.len + 1);
+ try self.debug_string_table.ensureUnusedCapacity(allocator, bytes.len + 1);
const result = self.debug_string_table.items.len;
self.debug_string_table.appendSliceAssumeCapacity(bytes);
self.debug_string_table.appendAssumeCapacity(0);
diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig
index 05d44559ce..6a1a74f79a 100644
--- a/src/link/MachO/Dylib.zig
+++ b/src/link/MachO/Dylib.zig
@@ -180,7 +180,7 @@ pub fn parse(self: *Dylib, allocator: *Allocator, target: std.Target) !void {
fn readLoadCommands(self: *Dylib, allocator: *Allocator, reader: anytype) !void {
const should_lookup_reexports = self.header.?.flags & macho.MH_NO_REEXPORTED_DYLIBS == 0;
- try self.load_commands.ensureCapacity(allocator, self.header.?.ncmds);
+ try self.load_commands.ensureTotalCapacity(allocator, self.header.?.ncmds);
var i: u16 = 0;
while (i < self.header.?.ncmds) : (i += 1) {
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index 27da019be8..cfa994ecfb 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -31,14 +31,12 @@ header: ?macho.mach_header_64 = null,
load_commands: std.ArrayListUnmanaged(LoadCommand) = .{},
segment_cmd_index: ?u16 = null,
+text_section_index: ?u16 = null,
symtab_cmd_index: ?u16 = null,
dysymtab_cmd_index: ?u16 = null,
build_version_cmd_index: ?u16 = null,
data_in_code_cmd_index: ?u16 = null,
-text_section_index: ?u16 = null,
-mod_init_func_section_index: ?u16 = null,
-
// __DWARF segment sections
dwarf_debug_info_index: ?u16 = null,
dwarf_debug_abbrev_index: ?u16 = null,
@@ -56,7 +54,9 @@ tu_name: ?[]const u8 = null,
tu_comp_dir: ?[]const u8 = null,
mtime: ?u64 = null,
-atoms: std.ArrayListUnmanaged(*Atom) = .{},
+contained_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+start_atoms: std.AutoHashMapUnmanaged(MachO.MatchingSection, *Atom) = .{},
+end_atoms: std.AutoHashMapUnmanaged(MachO.MatchingSection, *Atom) = .{},
sections_as_symbols: std.AutoHashMapUnmanaged(u16, u32) = .{},
// TODO symbol mapping and its inverse can probably be simple arrays
@@ -138,12 +138,15 @@ pub fn deinit(self: *Object, allocator: *Allocator) void {
self.data_in_code_entries.deinit(allocator);
self.symtab.deinit(allocator);
self.strtab.deinit(allocator);
- self.atoms.deinit(allocator);
self.sections_as_symbols.deinit(allocator);
self.symbol_mapping.deinit(allocator);
self.reverse_symbol_mapping.deinit(allocator);
allocator.free(self.name);
+ self.contained_atoms.deinit(allocator);
+ self.start_atoms.deinit(allocator);
+ self.end_atoms.deinit(allocator);
+
if (self.debug_info) |*db| {
db.deinit(allocator);
}
@@ -157,6 +160,67 @@ pub fn deinit(self: *Object, allocator: *Allocator) void {
}
}
+pub fn free(self: *Object, allocator: *Allocator, macho_file: *MachO) void {
+ log.debug("freeObject {*}", .{self});
+
+ var it = self.end_atoms.iterator();
+ while (it.next()) |entry| {
+ const match = entry.key_ptr.*;
+ const first_atom = self.start_atoms.get(match).?;
+ const last_atom = entry.value_ptr.*;
+ var atom = first_atom;
+
+ while (true) {
+ if (atom.local_sym_index != 0) {
+ macho_file.locals_free_list.append(allocator, atom.local_sym_index) catch {};
+ const local = &macho_file.locals.items[atom.local_sym_index];
+ local.n_type = 0;
+ atom.local_sym_index = 0;
+ }
+ if (atom == last_atom) {
+ break;
+ }
+ if (atom.next) |next| {
+ atom = next;
+ } else break;
+ }
+ }
+
+ self.freeAtoms(macho_file);
+}
+
+fn freeAtoms(self: *Object, macho_file: *MachO) void {
+ var it = self.end_atoms.iterator();
+ while (it.next()) |entry| {
+ const match = entry.key_ptr.*;
+ var first_atom: *Atom = self.start_atoms.get(match).?;
+ var last_atom: *Atom = entry.value_ptr.*;
+
+ if (macho_file.atoms.getPtr(match)) |atom_ptr| {
+ if (atom_ptr.* == last_atom) {
+ if (first_atom.prev) |prev| {
+ // TODO shrink the section size here
+ atom_ptr.* = prev;
+ } else {
+ _ = macho_file.atoms.fetchRemove(match);
+ }
+ }
+ }
+
+ if (first_atom.prev) |prev| {
+ prev.next = last_atom.next;
+ } else {
+ first_atom.prev = null;
+ }
+
+ if (last_atom.next) |next| {
+ next.prev = last_atom.prev;
+ } else {
+ last_atom.next = null;
+ }
+ }
+}
+
pub fn parse(self: *Object, allocator: *Allocator, target: std.Target) !void {
const reader = self.file.reader();
if (self.file_offset) |offset| {
@@ -197,7 +261,7 @@ pub fn readLoadCommands(self: *Object, allocator: *Allocator, reader: anytype) !
const header = self.header orelse unreachable; // Unreachable here signifies a fatal unexplored condition.
const offset = self.file_offset orelse 0;
- try self.load_commands.ensureCapacity(allocator, header.ncmds);
+ try self.load_commands.ensureTotalCapacity(allocator, header.ncmds);
var i: u16 = 0;
while (i < header.ncmds) : (i += 1) {
@@ -226,10 +290,6 @@ pub fn readLoadCommands(self: *Object, allocator: *Allocator, reader: anytype) !
if (mem.eql(u8, sectname, "__text")) {
self.text_section_index = index;
}
- } else if (mem.eql(u8, segname, "__DATA")) {
- if (mem.eql(u8, sectname, "__mod_init_func")) {
- self.mod_init_func_section_index = index;
- }
}
sect.offset += offset;
@@ -315,166 +375,10 @@ fn filterDice(dices: []macho.data_in_code_entry, start_addr: u64, end_addr: u64)
return dices[start..end];
}
-const Context = struct {
- allocator: *Allocator,
- object: *Object,
- macho_file: *MachO,
- match: MachO.MatchingSection,
- parsed_atoms: *ParsedAtoms,
-};
-
-const AtomParser = struct {
- section: macho.section_64,
- code: []u8,
- relocs: []macho.relocation_info,
- nlists: []NlistWithIndex,
- index: u32 = 0,
-
- fn peek(self: AtomParser) ?NlistWithIndex {
- return if (self.index + 1 < self.nlists.len) self.nlists[self.index + 1] else null;
- }
-
- fn lessThanBySeniority(context: Context, lhs: NlistWithIndex, rhs: NlistWithIndex) bool {
- if (!MachO.symbolIsExt(rhs.nlist)) {
- return MachO.symbolIsTemp(lhs.nlist, context.object.getString(lhs.nlist.n_strx));
- } else if (MachO.symbolIsPext(rhs.nlist) or MachO.symbolIsWeakDef(rhs.nlist)) {
- return !MachO.symbolIsExt(lhs.nlist);
- } else {
- return false;
- }
- }
-
- pub fn next(self: *AtomParser, context: Context) !?*Atom {
- if (self.index == self.nlists.len) return null;
-
- const tracy = trace(@src());
- defer tracy.end();
-
- var aliases = std.ArrayList(NlistWithIndex).init(context.allocator);
- defer aliases.deinit();
-
- const next_nlist: ?NlistWithIndex = blk: while (true) {
- const curr_nlist = self.nlists[self.index];
- try aliases.append(curr_nlist);
-
- if (self.peek()) |next_nlist| {
- if (curr_nlist.nlist.n_value == next_nlist.nlist.n_value) {
- self.index += 1;
- continue;
- }
- break :blk next_nlist;
- }
- break :blk null;
- } else null;
-
- for (aliases.items) |*nlist_with_index| {
- nlist_with_index.index = context.object.symbol_mapping.get(nlist_with_index.index) orelse unreachable;
- }
-
- if (aliases.items.len > 1) {
- // Bubble-up senior symbol as the main link to the atom.
- sort.sort(
- NlistWithIndex,
- aliases.items,
- context,
- AtomParser.lessThanBySeniority,
- );
- }
-
- const senior_nlist = aliases.pop();
- const senior_sym = &context.macho_file.locals.items[senior_nlist.index];
- senior_sym.n_sect = @intCast(u8, context.macho_file.section_ordinals.getIndex(context.match).? + 1);
-
- const start_addr = senior_nlist.nlist.n_value - self.section.addr;
- const end_addr = if (next_nlist) |n| n.nlist.n_value - self.section.addr else self.section.size;
-
- const code = self.code[start_addr..end_addr];
- const size = code.len;
-
- const max_align = self.section.@"align";
- const actual_align = if (senior_nlist.nlist.n_value > 0)
- math.min(@ctz(u64, senior_nlist.nlist.n_value), max_align)
- else
- max_align;
-
- const stab: ?Atom.Stab = if (context.object.debug_info) |di| blk: {
- // TODO there has to be a better to handle this.
- for (di.inner.func_list.items) |func| {
- if (func.pc_range) |range| {
- if (senior_nlist.nlist.n_value >= range.start and senior_nlist.nlist.n_value < range.end) {
- break :blk Atom.Stab{
- .function = range.end - range.start,
- };
- }
- }
- }
- // TODO
- // if (self.macho_file.globals.contains(self.macho_file.getString(senior_sym.strx))) break :blk .global;
- break :blk .static;
- } else null;
-
- const atom = try context.macho_file.createEmptyAtom(senior_nlist.index, size, actual_align);
- atom.stab = stab;
-
- const is_zerofill = blk: {
- const section_type = commands.sectionType(self.section);
- break :blk section_type == macho.S_ZEROFILL or section_type == macho.S_THREAD_LOCAL_ZEROFILL;
- };
- if (!is_zerofill) {
- mem.copy(u8, atom.code.items, code);
- }
-
- try atom.aliases.ensureTotalCapacity(context.allocator, aliases.items.len);
- for (aliases.items) |alias| {
- atom.aliases.appendAssumeCapacity(alias.index);
- const sym = &context.macho_file.locals.items[alias.index];
- sym.n_sect = @intCast(u8, context.macho_file.section_ordinals.getIndex(context.match).? + 1);
- }
-
- try atom.parseRelocs(self.relocs, .{
- .base_addr = self.section.addr,
- .base_offset = start_addr,
- .allocator = context.allocator,
- .object = context.object,
- .macho_file = context.macho_file,
- .parsed_atoms = context.parsed_atoms,
- });
-
- if (context.macho_file.has_dices) {
- const dices = filterDice(
- context.object.data_in_code_entries.items,
- senior_nlist.nlist.n_value,
- senior_nlist.nlist.n_value + size,
- );
- try atom.dices.ensureTotalCapacity(context.allocator, dices.len);
-
- for (dices) |dice| {
- atom.dices.appendAssumeCapacity(.{
- .offset = dice.offset - try math.cast(u32, senior_nlist.nlist.n_value),
- .length = dice.length,
- .kind = dice.kind,
- });
- }
- }
-
- self.index += 1;
-
- return atom;
- }
-};
-
-pub const ParsedAtoms = std.AutoHashMap(MachO.MatchingSection, *Atom);
-
-pub fn parseIntoAtoms(
- self: *Object,
- allocator: *Allocator,
- object_id: u16,
- macho_file: *MachO,
-) !ParsedAtoms {
+pub fn parseIntoAtoms(self: *Object, allocator: *Allocator, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- var parsed_atoms = ParsedAtoms.init(allocator);
const seg = self.load_commands.items[self.segment_cmd_index.?].Segment;
log.debug("analysing {s}", .{self.name});
@@ -540,16 +444,6 @@ pub fn parseIntoAtoms(
// Symbols within this section only.
const filtered_nlists = NlistWithIndex.filterInSection(sorted_nlists, sect);
- // TODO rewrite and re-enable dead-code stripping optimisation. I think it might make sense
- // to do this in a standalone pass after we parse the sections as atoms.
- // In release mode, if the object file was generated with dead code stripping optimisations,
- // note it now and parse sections as atoms.
- // const is_splittable = blk: {
- // if (macho_file.base.options.optimize_mode == .Debug) break :blk false;
- // break :blk self.header.?.flags & macho.MH_SUBSECTIONS_VIA_SYMBOLS != 0;
- // };
- const is_splittable = false;
-
macho_file.has_dices = macho_file.has_dices or blk: {
if (self.text_section_index) |index| {
if (index != id) break :blk false;
@@ -560,237 +454,101 @@ pub fn parseIntoAtoms(
};
macho_file.has_stabs = macho_file.has_stabs or self.debug_info != null;
- next: {
- if (is_splittable) atoms: {
- if (filtered_nlists.len == 0) break :atoms;
-
- // If the first nlist does not match the start of the section,
- // then we need to encapsulate the memory range [section start, first symbol)
- // as a temporary symbol and insert the matching Atom.
- const first_nlist = filtered_nlists[0].nlist;
- if (first_nlist.n_value > sect.addr) {
- const sym_name = try std.fmt.allocPrint(allocator, "l_{s}_{s}_{s}", .{
- self.name,
- segmentName(sect),
- sectionName(sect),
- });
- defer allocator.free(sym_name);
-
- const atom_local_sym_index = self.sections_as_symbols.get(sect_id) orelse blk: {
- const atom_local_sym_index = @intCast(u32, macho_file.locals.items.len);
- try macho_file.locals.append(allocator, .{
- .n_strx = try macho_file.makeString(sym_name),
- .n_type = macho.N_SECT,
- .n_sect = @intCast(u8, macho_file.section_ordinals.getIndex(match).? + 1),
- .n_desc = 0,
- .n_value = 0,
- });
- try self.sections_as_symbols.putNoClobber(allocator, sect_id, atom_local_sym_index);
- break :blk atom_local_sym_index;
- };
- const atom_code = code[0 .. first_nlist.n_value - sect.addr];
- const atom_size = atom_code.len;
- const atom = try macho_file.createEmptyAtom(atom_local_sym_index, atom_size, sect.@"align");
-
- const is_zerofill = blk: {
- const section_type = commands.sectionType(sect);
- break :blk section_type == macho.S_ZEROFILL or section_type == macho.S_THREAD_LOCAL_ZEROFILL;
- };
- if (!is_zerofill) {
- mem.copy(u8, atom.code.items, atom_code);
- }
-
- try atom.parseRelocs(relocs, .{
- .base_addr = sect.addr,
- .base_offset = 0,
- .allocator = allocator,
- .object = self,
- .macho_file = macho_file,
- .parsed_atoms = &parsed_atoms,
- });
-
- if (macho_file.has_dices) {
- const dices = filterDice(self.data_in_code_entries.items, sect.addr, sect.addr + atom_size);
- try atom.dices.ensureTotalCapacity(allocator, dices.len);
-
- for (dices) |dice| {
- atom.dices.appendAssumeCapacity(.{
- .offset = dice.offset - try math.cast(u32, sect.addr),
- .length = dice.length,
- .kind = dice.kind,
- });
- }
- }
-
- if (parsed_atoms.getPtr(match)) |last| {
- last.*.next = atom;
- atom.prev = last.*;
- last.* = atom;
- } else {
- try parsed_atoms.putNoClobber(match, atom);
- }
- try self.atoms.append(allocator, atom);
- }
-
- var parser = AtomParser{
- .section = sect,
- .code = code,
- .relocs = relocs,
- .nlists = filtered_nlists,
- };
-
- while (try parser.next(.{
- .allocator = allocator,
- .object = self,
- .macho_file = macho_file,
- .match = match,
- .parsed_atoms = &parsed_atoms,
- })) |atom| {
- const sym = macho_file.locals.items[atom.local_sym_index];
- const is_ext = blk: {
- const orig_sym_id = self.reverse_symbol_mapping.get(atom.local_sym_index) orelse unreachable;
- break :blk MachO.symbolIsExt(self.symtab.items[orig_sym_id]);
- };
- if (is_ext) {
- if (macho_file.symbol_resolver.get(sym.n_strx)) |resolv| {
- assert(resolv.where == .global);
- if (resolv.file != object_id) {
- log.debug("deduping definition of {s} in {s}", .{
- macho_file.getString(sym.n_strx),
- self.name,
- });
- log.debug(" already defined in {s}", .{
- macho_file.objects.items[resolv.file].name,
- });
- continue;
- }
- }
- }
-
- if (sym.n_value == sect.addr) {
- if (self.sections_as_symbols.get(sect_id)) |alias| {
- // In x86_64 relocs, it can so happen that the compiler refers to the same
- // atom by both the actual assigned symbol and the start of the section. In this
- // case, we need to link the two together so add an alias.
- try atom.aliases.append(allocator, alias);
- }
- }
-
- if (parsed_atoms.getPtr(match)) |last| {
- last.*.next = atom;
- atom.prev = last.*;
- last.* = atom;
- } else {
- try parsed_atoms.putNoClobber(match, atom);
- }
- try self.atoms.append(allocator, atom);
- }
-
- break :next;
- }
-
- // Since there is no symbol to refer to this atom, we create
- // a temp one, unless we already did that when working out the relocations
- // of other atoms.
- const sym_name = try std.fmt.allocPrint(allocator, "l_{s}_{s}_{s}", .{
- self.name,
- segmentName(sect),
- sectionName(sect),
+ // Since there is no symbol to refer to this atom, we create
+ // a temp one, unless we already did that when working out the relocations
+ // of other atoms.
+ const atom_local_sym_index = self.sections_as_symbols.get(sect_id) orelse blk: {
+ const atom_local_sym_index = @intCast(u32, macho_file.locals.items.len);
+ try macho_file.locals.append(allocator, .{
+ .n_strx = 0,
+ .n_type = macho.N_SECT,
+ .n_sect = @intCast(u8, macho_file.section_ordinals.getIndex(match).? + 1),
+ .n_desc = 0,
+ .n_value = 0,
});
- defer allocator.free(sym_name);
+ try self.sections_as_symbols.putNoClobber(allocator, sect_id, atom_local_sym_index);
+ break :blk atom_local_sym_index;
+ };
+ const atom = try macho_file.createEmptyAtom(atom_local_sym_index, sect.size, sect.@"align");
- const atom_local_sym_index = self.sections_as_symbols.get(sect_id) orelse blk: {
- const atom_local_sym_index = @intCast(u32, macho_file.locals.items.len);
- try macho_file.locals.append(allocator, .{
- .n_strx = try macho_file.makeString(sym_name),
- .n_type = macho.N_SECT,
- .n_sect = @intCast(u8, macho_file.section_ordinals.getIndex(match).? + 1),
- .n_desc = 0,
- .n_value = 0,
- });
- try self.sections_as_symbols.putNoClobber(allocator, sect_id, atom_local_sym_index);
- break :blk atom_local_sym_index;
- };
- const atom = try macho_file.createEmptyAtom(atom_local_sym_index, sect.size, sect.@"align");
-
- const is_zerofill = blk: {
- const section_type = commands.sectionType(sect);
- break :blk section_type == macho.S_ZEROFILL or section_type == macho.S_THREAD_LOCAL_ZEROFILL;
- };
- if (!is_zerofill) {
- mem.copy(u8, atom.code.items, code);
- }
-
- try atom.parseRelocs(relocs, .{
- .base_addr = sect.addr,
- .base_offset = 0,
- .allocator = allocator,
- .object = self,
- .macho_file = macho_file,
- .parsed_atoms = &parsed_atoms,
- });
-
- if (macho_file.has_dices) {
- const dices = filterDice(self.data_in_code_entries.items, sect.addr, sect.addr + sect.size);
- try atom.dices.ensureTotalCapacity(allocator, dices.len);
-
- for (dices) |dice| {
- atom.dices.appendAssumeCapacity(.{
- .offset = dice.offset - try math.cast(u32, sect.addr),
- .length = dice.length,
- .kind = dice.kind,
- });
- }
- }
-
- // Since this is atom gets a helper local temporary symbol that didn't exist
- // in the object file which encompasses the entire section, we need traverse
- // the filtered symbols and note which symbol is contained within so that
- // we can properly allocate addresses down the line.
- // While we're at it, we need to update segment,section mapping of each symbol too.
- try atom.contained.ensureTotalCapacity(allocator, filtered_nlists.len);
-
- for (filtered_nlists) |nlist_with_index| {
- const nlist = nlist_with_index.nlist;
- const local_sym_index = self.symbol_mapping.get(nlist_with_index.index) orelse unreachable;
- const local = &macho_file.locals.items[local_sym_index];
- local.n_sect = @intCast(u8, macho_file.section_ordinals.getIndex(match).? + 1);
-
- const stab: ?Atom.Stab = if (self.debug_info) |di| blk: {
- // TODO there has to be a better to handle this.
- for (di.inner.func_list.items) |func| {
- if (func.pc_range) |range| {
- if (nlist.n_value >= range.start and nlist.n_value < range.end) {
- break :blk Atom.Stab{
- .function = range.end - range.start,
- };
- }
- }
- }
- // TODO
- // if (zld.globals.contains(zld.getString(sym.strx))) break :blk .global;
- break :blk .static;
- } else null;
-
- atom.contained.appendAssumeCapacity(.{
- .local_sym_index = local_sym_index,
- .offset = nlist.n_value - sect.addr,
- .stab = stab,
- });
- }
-
- if (parsed_atoms.getPtr(match)) |last| {
- last.*.next = atom;
- atom.prev = last.*;
- last.* = atom;
- } else {
- try parsed_atoms.putNoClobber(match, atom);
- }
- try self.atoms.append(allocator, atom);
+ const is_zerofill = blk: {
+ const section_type = commands.sectionType(sect);
+ break :blk section_type == macho.S_ZEROFILL or section_type == macho.S_THREAD_LOCAL_ZEROFILL;
+ };
+ if (!is_zerofill) {
+ mem.copy(u8, atom.code.items, code);
}
- }
- return parsed_atoms;
+ try atom.parseRelocs(relocs, .{
+ .base_addr = sect.addr,
+ .base_offset = 0,
+ .allocator = allocator,
+ .object = self,
+ .macho_file = macho_file,
+ });
+
+ if (macho_file.has_dices) {
+ const dices = filterDice(self.data_in_code_entries.items, sect.addr, sect.addr + sect.size);
+ try atom.dices.ensureTotalCapacity(allocator, dices.len);
+
+ for (dices) |dice| {
+ atom.dices.appendAssumeCapacity(.{
+ .offset = dice.offset - try math.cast(u32, sect.addr),
+ .length = dice.length,
+ .kind = dice.kind,
+ });
+ }
+ }
+
+ // Since this is atom gets a helper local temporary symbol that didn't exist
+ // in the object file which encompasses the entire section, we need traverse
+ // the filtered symbols and note which symbol is contained within so that
+ // we can properly allocate addresses down the line.
+ // While we're at it, we need to update segment,section mapping of each symbol too.
+ try atom.contained.ensureTotalCapacity(allocator, filtered_nlists.len);
+
+ for (filtered_nlists) |nlist_with_index| {
+ const nlist = nlist_with_index.nlist;
+ const local_sym_index = self.symbol_mapping.get(nlist_with_index.index) orelse unreachable;
+ const local = &macho_file.locals.items[local_sym_index];
+ local.n_sect = @intCast(u8, macho_file.section_ordinals.getIndex(match).? + 1);
+
+ const stab: ?Atom.Stab = if (self.debug_info) |di| blk: {
+ // TODO there has to be a better to handle this.
+ for (di.inner.func_list.items) |func| {
+ if (func.pc_range) |range| {
+ if (nlist.n_value >= range.start and nlist.n_value < range.end) {
+ break :blk Atom.Stab{
+ .function = range.end - range.start,
+ };
+ }
+ }
+ }
+ // TODO
+ // if (zld.globals.contains(zld.getString(sym.strx))) break :blk .global;
+ break :blk .static;
+ } else null;
+
+ atom.contained.appendAssumeCapacity(.{
+ .local_sym_index = local_sym_index,
+ .offset = nlist.n_value - sect.addr,
+ .stab = stab,
+ });
+ }
+
+ if (!self.start_atoms.contains(match)) {
+ try self.start_atoms.putNoClobber(allocator, match, atom);
+ }
+
+ if (self.end_atoms.getPtr(match)) |last| {
+ last.*.next = atom;
+ atom.prev = last.*;
+ last.* = atom;
+ } else {
+ try self.end_atoms.putNoClobber(allocator, match, atom);
+ }
+ try self.contained_atoms.append(allocator, atom);
+ }
}
fn parseSymtab(self: *Object, allocator: *Allocator) !void {
diff --git a/src/link/MachO/Trie.zig b/src/link/MachO/Trie.zig
index ab3a97eb33..c166aaf432 100644
--- a/src/link/MachO/Trie.zig
+++ b/src/link/MachO/Trie.zig
@@ -326,7 +326,7 @@ pub fn finalize(self: *Trie, allocator: *Allocator) !void {
if (!self.trie_dirty) return;
self.ordered_nodes.shrinkRetainingCapacity(0);
- try self.ordered_nodes.ensureCapacity(allocator, self.node_count);
+ try self.ordered_nodes.ensureTotalCapacity(allocator, self.node_count);
var fifo = std.fifo.LinearFifo(*Node, .Dynamic).init(allocator);
defer fifo.deinit();
@@ -506,7 +506,7 @@ test "write Trie to a byte stream" {
});
try trie.finalize(gpa);
- try trie.finalize(gpa); // Finalizing mulitple times is a nop subsequently unless we add new nodes.
+ try trie.finalize(gpa); // Finalizing multiple times is a nop subsequently unless we add new nodes.
const exp_buffer = [_]u8{
0x0, 0x1, // node root
diff --git a/src/link/MachO/commands.zig b/src/link/MachO/commands.zig
index d9ca056c8e..35512886d4 100644
--- a/src/link/MachO/commands.zig
+++ b/src/link/MachO/commands.zig
@@ -223,7 +223,7 @@ pub const SegmentCommand = struct {
var segment = SegmentCommand{
.inner = inner,
};
- try segment.sections.ensureCapacity(alloc, inner.nsects);
+ try segment.sections.ensureTotalCapacity(alloc, inner.nsects);
var i: usize = 0;
while (i < inner.nsects) : (i += 1) {
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index bf49a238b6..384345ff67 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -20,6 +20,14 @@ const Allocator = std.mem.Allocator;
const log = std.log.scoped(.link);
const assert = std.debug.assert;
+const FnDeclOutput = struct {
+ code: []const u8,
+ /// this might have to be modified in the linker, so thats why its mutable
+ lineinfo: []u8,
+ start_line: u32,
+ end_line: u32,
+};
+
base: link.File,
sixtyfour_bit: bool,
error_flags: File.ErrorFlags = File.ErrorFlags{},
@@ -27,16 +35,45 @@ bases: Bases,
/// A symbol's value is just casted down when compiling
/// for a 32 bit target.
+/// Does not represent the order or amount of symbols in the file
+/// it is just useful for storing symbols. Some other symbols are in
+/// file_segments.
syms: std.ArrayListUnmanaged(aout.Sym) = .{},
-fn_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{},
+/// The plan9 a.out format requires segments of
+/// filenames to be deduplicated, so we use this map to
+/// de duplicate it. The value is the value of the path
+/// component
+file_segments: std.StringArrayHashMapUnmanaged(u16) = .{},
+/// The value of a 'f' symbol increments by 1 every time, so that no 2 'f'
+/// symbols have the same value.
+file_segments_i: u16 = 1,
+
+path_arena: std.heap.ArenaAllocator,
+
+/// maps a file scope to a hash map of decl to codegen output
+/// this is useful for line debuginfo, since it makes sense to sort by file
+/// The debugger looks for the first file (aout.Sym.Type.z) preceeding the text symbol
+/// of the function to know what file it came from.
+/// If we group the decls by file, it makes it really easy to do this (put the symbol in the correct place)
+fn_decl_table: std.AutoArrayHashMapUnmanaged(
+ *Module.Scope.File,
+ struct { sym_index: u32, functions: std.AutoArrayHashMapUnmanaged(*Module.Decl, FnDeclOutput) = .{} },
+) = .{},
data_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{},
hdr: aout.ExecHdr = undefined,
+magic: u32,
+
entry_val: ?u64 = null,
got_len: usize = 0,
+// A list of all the free got indexes, so when making a new decl
+// don't make a new one, just use one from here.
+got_index_free_list: std.ArrayListUnmanaged(u64) = .{},
+
+syms_index_free_list: std.ArrayListUnmanaged(u64) = .{},
const Bases = struct {
text: u64,
@@ -103,8 +140,12 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Plan9 {
33...64 => true,
else => return error.UnsupportedP9Architecture,
};
+
+ var arena_allocator = std.heap.ArenaAllocator.init(gpa);
+
const self = try gpa.create(Plan9);
self.* = .{
+ .path_arena = arena_allocator,
.base = .{
.tag = .plan9,
.options = options,
@@ -113,21 +154,102 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Plan9 {
},
.sixtyfour_bit = sixtyfour_bit,
.bases = undefined,
+ .magic = try aout.magicFromArch(self.base.options.target.cpu.arch),
};
+ // a / will always be in a file path
+ try self.file_segments.put(self.base.allocator, "/", 1);
return self;
}
+fn putFn(self: *Plan9, decl: *Module.Decl, out: FnDeclOutput) !void {
+ const gpa = self.base.allocator;
+ const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.namespace.file_scope);
+ if (fn_map_res.found_existing) {
+ try fn_map_res.value_ptr.functions.put(gpa, decl, out);
+ } else {
+ const file = decl.namespace.file_scope;
+ const arena = &self.path_arena.allocator;
+ // each file gets a symbol
+ fn_map_res.value_ptr.* = .{
+ .sym_index = blk: {
+ try self.syms.append(gpa, undefined);
+ break :blk @intCast(u32, self.syms.items.len - 1);
+ },
+ };
+ try fn_map_res.value_ptr.functions.put(gpa, decl, out);
+
+ var a = std.ArrayList(u8).init(arena);
+ errdefer a.deinit();
+ // every 'z' starts with 0
+ try a.append(0);
+ // path component value of '/'
+ try a.writer().writeIntBig(u16, 1);
+
+ // getting the full file path
+ var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
+ const dir = file.pkg.root_src_directory.path orelse try std.os.getcwd(&buf);
+ const sub_path = try std.fs.path.join(arena, &.{ dir, file.sub_file_path });
+ try self.addPathComponents(sub_path, &a);
+
+ // null terminate
+ try a.append(0);
+ const final = a.toOwnedSlice();
+ self.syms.items[fn_map_res.value_ptr.sym_index] = .{
+ .type = .z,
+ .value = 1,
+ .name = final,
+ };
+ }
+}
+
+fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !void {
+ const sep = std.fs.path.sep;
+ var it = std.mem.tokenize(u8, path, &.{sep});
+ while (it.next()) |component| {
+ if (self.file_segments.get(component)) |num| {
+ try a.writer().writeIntBig(u16, num);
+ } else {
+ self.file_segments_i += 1;
+ try self.file_segments.put(self.base.allocator, component, self.file_segments_i);
+ try a.writer().writeIntBig(u16, self.file_segments_i);
+ }
+ }
+}
+
pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
const decl = func.owner_decl;
+
+ try self.seeDecl(decl);
log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ .none = .{} });
+ var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer dbg_line_buffer.deinit();
+ var start_line: ?u32 = null;
+ var end_line: u32 = undefined;
+ var pcop_change_index: ?u32 = null;
+
+ const res = try codegen.generateFunction(
+ &self.base,
+ decl.srcLoc(),
+ func,
+ air,
+ liveness,
+ &code_buffer,
+ .{
+ .plan9 = .{
+ .dbg_line = &dbg_line_buffer,
+ .end_line = &end_line,
+ .start_line = &start_line,
+ .pcop_change_index = &pcop_change_index,
+ },
+ },
+ );
const code = switch (res) {
.appended => code_buffer.toOwnedSlice(),
.fail => |em| {
@@ -136,7 +258,13 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
return;
},
};
- try self.fn_decl_table.put(self.base.allocator, decl, code);
+ const out: FnDeclOutput = .{
+ .code = code,
+ .lineinfo = dbg_line_buffer.toOwnedSlice(),
+ .start_line = start_line.?,
+ .end_line = end_line,
+ };
+ try self.putFn(decl, out);
return self.updateFinish(decl);
}
@@ -151,6 +279,8 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void {
}
}
+ try self.seeDecl(decl);
+
log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
@@ -192,8 +322,12 @@ fn updateFinish(self: *Plan9, decl: *Module.Decl) !void {
if (decl.link.plan9.sym_index) |s| {
self.syms.items[s] = sym;
} else {
- try self.syms.append(self.base.allocator, sym);
- decl.link.plan9.sym_index = self.syms.items.len - 1;
+ if (self.syms_index_free_list.popOrNull()) |i| {
+ decl.link.plan9.sym_index = i;
+ } else {
+ try self.syms.append(self.base.allocator, sym);
+ decl.link.plan9.sym_index = self.syms.items.len - 1;
+ }
}
}
@@ -209,6 +343,30 @@ pub fn flush(self: *Plan9, comp: *Compilation) !void {
return self.flushModule(comp);
}
+pub fn changeLine(l: *std.ArrayList(u8), delta_line: i32) !void {
+ if (delta_line > 0 and delta_line < 65) {
+ const toappend = @intCast(u8, delta_line);
+ try l.append(toappend);
+ } else if (delta_line < 0 and delta_line > -65) {
+ const toadd: u8 = @intCast(u8, -delta_line + 64);
+ try l.append(toadd);
+ } else if (delta_line != 0) {
+ try l.append(0);
+ try l.writer().writeIntBig(i32, delta_line);
+ }
+}
+
+fn declCount(self: *Plan9) u64 {
+ var fn_decl_count: u64 = 0;
+ var itf_files = self.fn_decl_table.iterator();
+ while (itf_files.next()) |ent| {
+ // get the submap
+ var submap = ent.value_ptr.functions;
+ fn_decl_count += submap.count();
+ }
+ return self.data_decl_table.count() + fn_decl_count;
+}
+
pub fn flushModule(self: *Plan9, comp: *Compilation) !void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
@@ -224,15 +382,13 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void {
const mod = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
- // TODO I changed this assert from == to >= but this code all needs to be audited; see
- // the comment in `freeDecl`.
- assert(self.got_len >= self.fn_decl_table.count() + self.data_decl_table.count());
+ assert(self.got_len == self.declCount() + self.got_index_free_list.items.len);
const got_size = self.got_len * if (!self.sixtyfour_bit) @as(u32, 4) else 8;
var got_table = try self.base.allocator.alloc(u8, got_size);
defer self.base.allocator.free(got_table);
- // + 2 for header, got, symbols
- var iovecs = try self.base.allocator.alloc(std.os.iovec_const, self.fn_decl_table.count() + self.data_decl_table.count() + 3);
+ // + 4 for header, got, symbols, linecountinfo
+ var iovecs = try self.base.allocator.alloc(std.os.iovec_const, self.declCount() + 4);
defer self.base.allocator.free(iovecs);
const file = self.base.file.?;
@@ -245,30 +401,52 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void {
iovecs[0] = .{ .iov_base = hdr_slice.ptr, .iov_len = hdr_slice.len };
var iovecs_i: usize = 1;
var text_i: u64 = 0;
+
+ var linecountinfo = std.ArrayList(u8).init(self.base.allocator);
+ defer linecountinfo.deinit();
// text
{
- var it = self.fn_decl_table.iterator();
- while (it.next()) |entry| {
- const decl = entry.key_ptr.*;
- const code = entry.value_ptr.*;
- log.debug("write text decl {*} ({s})", .{ decl, decl.name });
- foff += code.len;
- iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len };
- iovecs_i += 1;
- const off = self.getAddr(text_i, .t);
- text_i += code.len;
- decl.link.plan9.offset = off;
- if (!self.sixtyfour_bit) {
- mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off));
- mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
- } else {
- mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
- }
- self.syms.items[decl.link.plan9.sym_index.?].value = off;
- if (mod.decl_exports.get(decl)) |exports| {
- try self.addDeclExports(mod, decl, exports);
+ var linecount: u32 = 0;
+ var it_file = self.fn_decl_table.iterator();
+ while (it_file.next()) |fentry| {
+ var it = fentry.value_ptr.functions.iterator();
+ while (it.next()) |entry| {
+ const decl = entry.key_ptr.*;
+ const out = entry.value_ptr.*;
+ log.debug("write text decl {*} ({s}), lines {d} to {d}", .{ decl, decl.name, out.start_line, out.end_line });
+ {
+ // connect the previous decl to the next
+ const delta_line = @intCast(i32, out.start_line) - @intCast(i32, linecount);
+
+ try changeLine(&linecountinfo, delta_line);
+ // TODO change the pc too (maybe?)
+
+ // write out the actual info that was generated in codegen now
+ try linecountinfo.appendSlice(out.lineinfo);
+ linecount = out.end_line;
+ }
+ foff += out.code.len;
+ iovecs[iovecs_i] = .{ .iov_base = out.code.ptr, .iov_len = out.code.len };
+ iovecs_i += 1;
+ const off = self.getAddr(text_i, .t);
+ text_i += out.code.len;
+ decl.link.plan9.offset = off;
+ if (!self.sixtyfour_bit) {
+ mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off));
+ mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ } else {
+ mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
+ }
+ self.syms.items[decl.link.plan9.sym_index.?].value = off;
+ if (mod.decl_exports.get(decl)) |exports| {
+ try self.addDeclExports(mod, decl, exports);
+ }
}
}
+ if (linecountinfo.items.len & 1 == 1) {
+ // just a nop to make it even, the plan9 linker does this
+ try linecountinfo.append(129);
+ }
// etext symbol
self.syms.items[2].value = self.getAddr(text_i, .t);
}
@@ -306,20 +484,23 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void {
// edata
self.syms.items[1].value = self.getAddr(0x0, .b);
var sym_buf = std.ArrayList(u8).init(self.base.allocator);
- defer sym_buf.deinit();
try self.writeSyms(&sym_buf);
- assert(2 + self.fn_decl_table.count() + self.data_decl_table.count() == iovecs_i); // we didn't write all the decls
- iovecs[iovecs_i] = .{ .iov_base = sym_buf.items.ptr, .iov_len = sym_buf.items.len };
+ const syms = sym_buf.toOwnedSlice();
+ defer self.base.allocator.free(syms);
+ assert(2 + self.declCount() == iovecs_i); // we didn't write all the decls
+ iovecs[iovecs_i] = .{ .iov_base = syms.ptr, .iov_len = syms.len };
+ iovecs_i += 1;
+ iovecs[iovecs_i] = .{ .iov_base = linecountinfo.items.ptr, .iov_len = linecountinfo.items.len };
iovecs_i += 1;
// generate the header
self.hdr = .{
- .magic = try aout.magicFromArch(self.base.options.target.cpu.arch),
+ .magic = self.magic,
.text = @intCast(u32, text_i),
.data = @intCast(u32, data_i),
- .syms = @intCast(u32, sym_buf.items.len),
+ .syms = @intCast(u32, syms.len),
.bss = 0,
- .pcsz = 0,
.spsz = 0,
+ .pcsz = @intCast(u32, linecountinfo.items.len),
.entry = @intCast(u32, self.entry_val.?),
};
std.mem.copy(u8, hdr_slice, self.hdr.toU8s()[0..hdr_size]);
@@ -360,18 +541,43 @@ fn addDeclExports(
}
pub fn freeDecl(self: *Plan9, decl: *Module.Decl) void {
- // TODO this is not the correct check for being function body,
- // it could just be a function pointer.
// TODO audit the lifetimes of decls table entries. It's possible to get
// allocateDeclIndexes and then freeDecl without any updateDecl in between.
// However that is planned to change, see the TODO comment in Module.zig
// in the deleteUnusedDecl function.
- const is_fn = (decl.ty.zigTypeTag() == .Fn);
+ const is_fn = (decl.val.tag() == .function);
if (is_fn) {
- _ = self.fn_decl_table.swapRemove(decl);
+ var symidx_and_submap =
+ self.fn_decl_table.get(decl.namespace.file_scope).?;
+ var submap = symidx_and_submap.functions;
+ _ = submap.swapRemove(decl);
+ if (submap.count() == 0) {
+ self.syms.items[symidx_and_submap.sym_index] = aout.Sym.undefined_symbol;
+ self.syms_index_free_list.append(self.base.allocator, symidx_and_submap.sym_index) catch {};
+ submap.deinit(self.base.allocator);
+ }
} else {
_ = self.data_decl_table.swapRemove(decl);
}
+ if (decl.link.plan9.got_index) |i| {
+ // TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
+ self.got_index_free_list.append(self.base.allocator, i) catch {};
+ }
+ if (decl.link.plan9.sym_index) |i| {
+ self.syms_index_free_list.append(self.base.allocator, i) catch {};
+ self.syms.items[i] = aout.Sym.undefined_symbol;
+ }
+}
+
+pub fn seeDecl(self: *Plan9, decl: *Module.Decl) !void {
+ if (decl.link.plan9.got_index == null) {
+ if (self.got_index_free_list.popOrNull()) |i| {
+ decl.link.plan9.got_index = i;
+ } else {
+ self.got_len += 1;
+ decl.link.plan9.got_index = self.got_len - 1;
+ }
+ }
}
pub fn updateDeclExports(
@@ -380,6 +586,7 @@ pub fn updateDeclExports(
decl: *Module.Decl,
exports: []const *Module.Export,
) !void {
+ try self.seeDecl(decl);
// we do all the things in flush
_ = self;
_ = module;
@@ -387,17 +594,29 @@ pub fn updateDeclExports(
_ = exports;
}
pub fn deinit(self: *Plan9) void {
- var itf = self.fn_decl_table.iterator();
- while (itf.next()) |entry| {
- self.base.allocator.free(entry.value_ptr.*);
+ const gpa = self.base.allocator;
+ var itf_files = self.fn_decl_table.iterator();
+ while (itf_files.next()) |ent| {
+ // get the submap
+ var submap = ent.value_ptr.functions;
+ defer submap.deinit(gpa);
+ var itf = submap.iterator();
+ while (itf.next()) |entry| {
+ gpa.free(entry.value_ptr.code);
+ gpa.free(entry.value_ptr.lineinfo);
+ }
}
- self.fn_decl_table.deinit(self.base.allocator);
+ self.fn_decl_table.deinit(gpa);
var itd = self.data_decl_table.iterator();
while (itd.next()) |entry| {
- self.base.allocator.free(entry.value_ptr.*);
+ gpa.free(entry.value_ptr.*);
}
- self.data_decl_table.deinit(self.base.allocator);
- self.syms.deinit(self.base.allocator);
+ self.data_decl_table.deinit(gpa);
+ self.syms.deinit(gpa);
+ self.got_index_free_list.deinit(gpa);
+ self.syms_index_free_list.deinit(gpa);
+ self.file_segments.deinit(gpa);
+ self.path_arena.deinit();
}
pub const Export = ?usize;
@@ -407,7 +626,6 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
return error.LLVMBackendDoesNotSupportPlan9;
assert(options.object_format == .plan9);
const file = try options.emit.?.directory.handle.createFile(sub_path, .{
- .truncate = false,
.read = true,
.mode = link.determineMode(options),
});
@@ -441,27 +659,77 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
return self;
}
+pub fn writeSym(self: *Plan9, w: anytype, sym: aout.Sym) !void {
+ log.debug("write sym.name: {s}", .{sym.name});
+ log.debug("write sym.value: {x}", .{sym.value});
+ if (sym.type == .bad) return; // we don't want to write free'd symbols
+ if (!self.sixtyfour_bit) {
+ try w.writeIntBig(u32, @intCast(u32, sym.value));
+ } else {
+ try w.writeIntBig(u64, sym.value);
+ }
+ try w.writeByte(@enumToInt(sym.type));
+ try w.writeAll(sym.name);
+ try w.writeByte(0);
+}
pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
const writer = buf.writer();
- for (self.syms.items) |sym| {
- log.debug("sym.name: {s}", .{sym.name});
- log.debug("sym.value: {x}", .{sym.value});
- if (mem.eql(u8, sym.name, "_start"))
- self.entry_val = sym.value;
- if (!self.sixtyfour_bit) {
- try writer.writeIntBig(u32, @intCast(u32, sym.value));
- } else {
- try writer.writeIntBig(u64, sym.value);
+ // write the f symbols
+ {
+ var it = self.file_segments.iterator();
+ while (it.next()) |entry| {
+ try self.writeSym(writer, .{
+ .type = .f,
+ .value = entry.value_ptr.*,
+ .name = entry.key_ptr.*,
+ });
+ }
+ }
+ // write the data symbols
+ {
+ var it = self.data_decl_table.iterator();
+ while (it.next()) |entry| {
+ const decl = entry.key_ptr.*;
+ const sym = self.syms.items[decl.link.plan9.sym_index.?];
+ try self.writeSym(writer, sym);
+ if (self.base.options.module.?.decl_exports.get(decl)) |exports| {
+ for (exports) |e| {
+ try self.writeSym(writer, self.syms.items[e.link.plan9.?]);
+ }
+ }
+ }
+ }
+ // text symbols are the hardest:
+ // the file of a text symbol is the .z symbol before it
+ // so we have to write everything in the right order
+ {
+ var it_file = self.fn_decl_table.iterator();
+ while (it_file.next()) |fentry| {
+ var symidx_and_submap = fentry.value_ptr;
+ // write the z symbol
+ try self.writeSym(writer, self.syms.items[symidx_and_submap.sym_index]);
+
+ // write all the decls come from the file of the z symbol
+ var submap_it = symidx_and_submap.functions.iterator();
+ while (submap_it.next()) |entry| {
+ const decl = entry.key_ptr.*;
+ const sym = self.syms.items[decl.link.plan9.sym_index.?];
+ try self.writeSym(writer, sym);
+ if (self.base.options.module.?.decl_exports.get(decl)) |exports| {
+ for (exports) |e| {
+ const s = self.syms.items[e.link.plan9.?];
+ if (mem.eql(u8, s.name, "_start"))
+ self.entry_val = s.value;
+ try self.writeSym(writer, s);
+ }
+ }
+ }
}
- try writer.writeByte(@enumToInt(sym.type));
- try writer.writeAll(sym.name);
- try writer.writeByte(0);
}
}
+/// this will be removed, moved to updateFinish
pub fn allocateDeclIndexes(self: *Plan9, decl: *Module.Decl) !void {
- if (decl.link.plan9.got_index == null) {
- self.got_len += 1;
- decl.link.plan9.got_index = self.got_len - 1;
- }
+ _ = self;
+ _ = decl;
}
diff --git a/src/link/Plan9/aout.zig b/src/link/Plan9/aout.zig
index f6dff7437c..39994516fa 100644
--- a/src/link/Plan9/aout.zig
+++ b/src/link/Plan9/aout.zig
@@ -16,7 +16,7 @@ pub const ExecHdr = extern struct {
comptime {
assert(@sizeOf(@This()) == 32);
}
- /// It is up to the caller to disgard the last 8 bytes if the header is not fat.
+ /// It is up to the caller to discard the last 8 bytes if the header is not fat.
pub fn toU8s(self: *@This()) [40]u8 {
var buf: [40]u8 = undefined;
var i: u8 = 0;
@@ -34,6 +34,12 @@ pub const Sym = struct {
type: Type,
name: []const u8,
+ pub const undefined_symbol: Sym = .{
+ .value = undefined,
+ .type = .bad,
+ .name = "undefined_symbol",
+ };
+
/// The type field is one of the following characters with the
/// high bit set:
/// T text segment symbol
@@ -65,6 +71,8 @@ pub const Sym = struct {
z = 0x80 | 'z',
Z = 0x80 | 'Z',
m = 0x80 | 'm',
+ /// represents an undefined symbol, to be removed in flush
+ bad = 0,
pub fn toGlobal(self: Type) Type {
return switch (self) {
@@ -112,3 +120,12 @@ pub fn magicFromArch(arch: std.Target.Cpu.Arch) !u32 {
else => error.ArchNotSupportedByPlan9,
};
}
+
+/// gets the quantization of pc for the arch
+pub fn getPCQuant(arch: std.Target.Cpu.Arch) !u8 {
+ return switch (arch) {
+ .i386, .x86_64 => 1,
+ .powerpc, .powerpc64, .mips, .sparc, .arm, .aarch64 => 4,
+ else => error.ArchNotSupportedByPlan9,
+ };
+}
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index 17b656a06c..f9d3f7a1e6 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -12,7 +12,7 @@
//! - OpName and OpMemberName instructions.
//! - OpModuleProcessed instructions.
//! All annotation (decoration) instructions.
-//! All type declaration instructions, constant instructions, global variable declarations, (preferrably) OpUndef instructions.
+//! All type declaration instructions, constant instructions, global variable declarations, (preferably) OpUndef instructions.
//! All function declarations without a body (extern functions presumably).
//! All regular functions.
@@ -93,7 +93,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
if (options.use_llvm) return error.LLVM_BackendIsTODO_ForSpirV; // TODO: LLVM Doesn't support SpirV at all.
if (options.use_lld) return error.LLD_LinkingIsTODO_ForSpirV; // TODO: LLD Doesn't support SpirV at all.
- // TODO: read the file and keep vaild parts instead of truncating
+ // TODO: read the file and keep valid parts instead of truncating
const file = try options.emit.?.directory.handle.createFile(sub_path, .{ .truncate = true, .read = true });
errdefer file.close();
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index fc559948c4..a75ad1b2f7 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -35,7 +35,7 @@ llvm_object: ?*LlvmObject = null,
/// TODO: can/should we access some data structure in Module directly?
funcs: std.ArrayListUnmanaged(*Module.Decl) = .{},
/// List of all extern function Decls to be written to the `import` section of the
-/// wasm binary. The positin in the list defines the function index
+/// wasm binary. The position in the list defines the function index
ext_funcs: std.ArrayListUnmanaged(*Module.Decl) = .{},
/// When importing objects from the host environment, a name must be supplied.
/// LLVM uses "env" by default when none is given. This would be a good default for Zig
@@ -121,7 +121,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
const self = try createEmpty(allocator, options);
errdefer self.base.destroy();
- self.llvm_object = try LlvmObject.create(allocator, options);
+ self.llvm_object = try LlvmObject.create(allocator, sub_path, options);
return self;
}
@@ -172,8 +172,8 @@ pub fn deinit(self: *Wasm) void {
pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void {
if (decl.link.wasm.init) return;
- try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
- try self.symbols.ensureCapacity(self.base.allocator, self.symbols.items.len + 1);
+ try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
+ try self.symbols.ensureUnusedCapacity(self.base.allocator, 1);
const block = &decl.link.wasm;
block.init = true;
@@ -339,6 +339,10 @@ pub fn updateDeclExports(
}
pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
+ if (build_options.have_llvm) {
+ if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl);
+ }
+
if (self.getFuncidx(decl)) |func_idx| {
switch (decl.val.tag()) {
.function => _ = self.funcs.swapRemove(func_idx),
@@ -710,7 +714,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
if (self.base.options.output_mode == .Obj) {
- // LLD's WASM driver does not support the equvialent of `-r` so we do a simple file copy
+ // LLD's WASM driver does not support the equivalent of `-r` so we do a simple file copy
// here. TODO: think carefully about how we can avoid this redundant operation when doing
// build-obj. See also the corresponding TODO in linkAsArchive.
const the_object_path = blk: {
@@ -752,7 +756,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
if (self.base.options.output_mode == .Exe) {
// Increase the default stack size to a more reasonable value of 1MB instead of
- // the default of 1 Wasm page being 64KB, unless overriden by the user.
+ // the default of 1 Wasm page being 64KB, unless overridden by the user.
try argv.append("-z");
const stack_size = self.base.options.stack_size_override orelse 1048576;
const arg = try std.fmt.allocPrint(arena, "stack-size={d}", .{stack_size});
diff --git a/src/main.zig b/src/main.zig
index 6a76c9507f..e0be4b6021 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -20,6 +20,10 @@ const translate_c = @import("translate_c.zig");
const Cache = @import("Cache.zig");
const target_util = @import("target.zig");
const ThreadPool = @import("ThreadPool.zig");
+const crash_report = @import("crash_report.zig");
+
+// Crash report needs to override the panic handler and other root decls
+pub usingnamespace crash_report.root_decls;
pub fn fatal(comptime format: []const u8, args: anytype) noreturn {
std.log.emerg(format, args);
@@ -134,6 +138,8 @@ var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{
}){};
pub fn main() anyerror!void {
+ crash_report.initialize();
+
var gpa_need_deinit = false;
const gpa = gpa: {
if (!std.builtin.link_libc) {
@@ -1151,6 +1157,7 @@ fn buildOutputType(
var is_shared_lib = false;
var linker_args = std.ArrayList([]const u8).init(arena);
var it = ClangArgIterator.init(arena, all_args);
+ var emit_llvm = false;
while (it.has_next) {
it.next() catch |err| {
fatal("unable to parse command line parameters: {s}", .{@errorName(err)});
@@ -1161,6 +1168,7 @@ fn buildOutputType(
.c => c_out_mode = .object, // -c
.asm_only => c_out_mode = .assembly, // -S
.preprocess_only => c_out_mode = .preprocessor, // -E
+ .emit_llvm => emit_llvm = true,
.other => {
try clang_argv.appendSlice(it.other_args);
},
@@ -1518,22 +1526,42 @@ fn buildOutputType(
output_mode = if (is_shared_lib) .Lib else .Exe;
emit_bin = if (out_path) |p| .{ .yes = p } else EmitBin.yes_a_out;
enable_cache = true;
+ if (emit_llvm) {
+ fatal("-emit-llvm cannot be used when linking", .{});
+ }
},
.object => {
output_mode = .Obj;
- if (out_path) |p| {
- emit_bin = .{ .yes = p };
+ if (emit_llvm) {
+ emit_bin = .no;
+ if (out_path) |p| {
+ emit_llvm_bc = .{ .yes = p };
+ } else {
+ emit_llvm_bc = .yes_default_path;
+ }
} else {
- emit_bin = .yes_default_path;
+ if (out_path) |p| {
+ emit_bin = .{ .yes = p };
+ } else {
+ emit_bin = .yes_default_path;
+ }
}
},
.assembly => {
output_mode = .Obj;
emit_bin = .no;
- if (out_path) |p| {
- emit_asm = .{ .yes = p };
+ if (emit_llvm) {
+ if (out_path) |p| {
+ emit_llvm_ir = .{ .yes = p };
+ } else {
+ emit_llvm_ir = .yes_default_path;
+ }
} else {
- emit_asm = .yes_default_path;
+ if (out_path) |p| {
+ emit_asm = .{ .yes = p };
+ } else {
+ emit_asm = .yes_default_path;
+ }
}
},
.preprocessor => {
@@ -1682,22 +1710,22 @@ fn buildOutputType(
} else true;
if (!should_get_sdk_path) break :outer false;
if (try std.zig.system.darwin.getSDKPath(arena, target_info.target)) |sdk_path| {
- try clang_argv.ensureCapacity(clang_argv.items.len + 2);
+ try clang_argv.ensureUnusedCapacity(2);
clang_argv.appendAssumeCapacity("-isysroot");
clang_argv.appendAssumeCapacity(sdk_path);
break :outer true;
} else break :outer false;
} else false;
- try clang_argv.ensureCapacity(clang_argv.items.len + paths.include_dirs.items.len * 2);
+ try clang_argv.ensureUnusedCapacity(paths.include_dirs.items.len * 2);
const isystem_flag = if (has_sysroot) "-iwithsysroot" else "-isystem";
for (paths.include_dirs.items) |include_dir| {
clang_argv.appendAssumeCapacity(isystem_flag);
clang_argv.appendAssumeCapacity(include_dir);
}
- try clang_argv.ensureCapacity(clang_argv.items.len + paths.framework_dirs.items.len * 2);
- try framework_dirs.ensureCapacity(framework_dirs.items.len + paths.framework_dirs.items.len);
+ try clang_argv.ensureUnusedCapacity(paths.framework_dirs.items.len * 2);
+ try framework_dirs.ensureUnusedCapacity(paths.framework_dirs.items.len);
const iframework_flag = if (has_sysroot) "-iframeworkwithsysroot" else "-iframework";
for (paths.framework_dirs.items) |framework_dir| {
clang_argv.appendAssumeCapacity(iframework_flag);
@@ -2761,7 +2789,7 @@ pub fn cmdInit(
fatal("unable to read template file 'build.zig': {s}", .{@errorName(err)});
};
var modified_build_zig_contents = std.ArrayList(u8).init(arena);
- try modified_build_zig_contents.ensureCapacity(build_zig_contents.len);
+ try modified_build_zig_contents.ensureTotalCapacity(build_zig_contents.len);
for (build_zig_contents) |c| {
if (c == '$') {
try modified_build_zig_contents.appendSlice(cwd_basename);
@@ -2778,6 +2806,12 @@ pub fn cmdInit(
error.FileNotFound => {},
else => fatal("unable to test existence of build.zig: {s}\n", .{@errorName(err)}),
}
+ if (fs.cwd().access("src" ++ s ++ "main.zig", .{})) |_| {
+ fatal("existing src" ++ s ++ "main.zig file would be overwritten", .{});
+ } else |err| switch (err) {
+ error.FileNotFound => {},
+ else => fatal("unable to test existence of src" ++ s ++ "main.zig: {s}\n", .{@errorName(err)}),
+ }
var src_dir = try fs.cwd().makeOpenPath("src", .{});
defer src_dir.close();
@@ -3442,7 +3476,7 @@ fn fmtPathFile(
// As a heuristic, we make enough capacity for the same as the input source.
fmt.out_buffer.shrinkRetainingCapacity(0);
- try fmt.out_buffer.ensureCapacity(source_code.len);
+ try fmt.out_buffer.ensureTotalCapacity(source_code.len);
try tree.renderToArrayList(&fmt.out_buffer);
if (mem.eql(u8, fmt.out_buffer.items, source_code))
@@ -3663,6 +3697,7 @@ pub const ClangArgIterator = struct {
no_red_zone,
strip,
exec_model,
+ emit_llvm,
};
const Args = struct {
@@ -4109,7 +4144,7 @@ pub fn cmdAstCheck(
// zig fmt: on
}
- return Zir.renderAsTextToFile(gpa, &file, io.getStdOut());
+ return @import("print_zir.zig").renderAsTextToFile(gpa, &file, io.getStdOut());
}
/// This is only enabled for debug builds.
diff --git a/src/mingw.zig b/src/mingw.zig
index 587f019270..7771065a5a 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -312,7 +312,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
if (try man.hit()) {
const digest = man.final();
- try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1);
+ try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
comp.crt_files.putAssumeCapacityNoClobber(final_lib_basename, .{
.full_object_path = try comp.global_cache_directory.join(comp.gpa, &[_][]const u8{
"o", &digest, final_lib_basename,
@@ -857,6 +857,7 @@ const mingwex_generic_src = [_][]const u8{
"stdio" ++ path.sep_str ++ "fopen64.c",
"stdio" ++ path.sep_str ++ "fseeko32.c",
"stdio" ++ path.sep_str ++ "fseeko64.c",
+ "stdio" ++ path.sep_str ++ "fseeki64.c",
"stdio" ++ path.sep_str ++ "fsetpos64.c",
"stdio" ++ path.sep_str ++ "ftello.c",
"stdio" ++ path.sep_str ++ "ftello64.c",
diff --git a/src/musl.zig b/src/musl.zig
index 7dbaf3ba3f..3b5915719b 100644
--- a/src/musl.zig
+++ b/src/musl.zig
@@ -112,7 +112,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
var source_table = std.StringArrayHashMap(Ext).init(comp.gpa);
defer source_table.deinit();
- try source_table.ensureCapacity(compat_time32_files.len + src_files.len);
+ try source_table.ensureTotalCapacity(compat_time32_files.len + src_files.len);
for (src_files) |src_file| {
try addSrcFile(arena, &source_table, src_file);
@@ -231,7 +231,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
try sub_compilation.updateSubCompilation();
- try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1);
+ try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
const basename = try comp.gpa.dupe(u8, "libc.so");
errdefer comp.gpa.free(basename);
diff --git a/src/print_air.zig b/src/print_air.zig
index 11cf1b7baa..dda3b4458b 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -104,12 +104,16 @@ const Writer = struct {
.add,
.addwrap,
+ .add_sat,
.sub,
.subwrap,
+ .sub_sat,
.mul,
.mulwrap,
+ .mul_sat,
.div,
.rem,
+ .mod,
.ptr_add,
.ptr_sub,
.bit_and,
@@ -129,7 +133,10 @@ const Writer = struct {
.ptr_elem_val,
.ptr_ptr_elem_val,
.shl,
+ .shl_exact,
+ .shl_sat,
.shr,
+ .set_union_tag,
=> try w.writeBinOp(s, inst),
.is_null,
@@ -156,7 +163,8 @@ const Writer = struct {
.not,
.bitcast,
.load,
- .floatcast,
+ .fptrunc,
+ .fpext,
.intcast,
.trunc,
.optional_payload,
@@ -175,6 +183,11 @@ const Writer = struct {
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.array_to_slice,
+ .int_to_float,
+ .float_to_int,
+ .get_union_tag,
+ .clz,
+ .ctz,
=> try w.writeTyOp(s, inst),
.block,
@@ -192,6 +205,15 @@ const Writer = struct {
.cond_br => try w.writeCondBr(s, inst),
.switch_br => try w.writeSwitchBr(s, inst),
.cmpxchg_weak, .cmpxchg_strong => try w.writeCmpxchg(s, inst),
+ .fence => try w.writeFence(s, inst),
+ .atomic_load => try w.writeAtomicLoad(s, inst),
+ .atomic_store_unordered => try w.writeAtomicStore(s, inst, .Unordered),
+ .atomic_store_monotonic => try w.writeAtomicStore(s, inst, .Monotonic),
+ .atomic_store_release => try w.writeAtomicStore(s, inst, .Release),
+ .atomic_store_seq_cst => try w.writeAtomicStore(s, inst, .SeqCst),
+ .atomic_rmw => try w.writeAtomicRmw(s, inst),
+ .memcpy => try w.writeMemcpy(s, inst),
+ .memset => try w.writeMemset(s, inst),
}
}
@@ -276,6 +298,64 @@ const Writer = struct {
});
}
+ fn writeFence(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
+ const atomic_order = w.air.instructions.items(.data)[inst].fence;
+
+ try s.print("{s}", .{@tagName(atomic_order)});
+ }
+
+ fn writeAtomicLoad(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
+ const atomic_load = w.air.instructions.items(.data)[inst].atomic_load;
+
+ try w.writeOperand(s, inst, 0, atomic_load.ptr);
+ try s.print(", {s}", .{@tagName(atomic_load.order)});
+ }
+
+ fn writeAtomicStore(
+ w: *Writer,
+ s: anytype,
+ inst: Air.Inst.Index,
+ order: std.builtin.AtomicOrder,
+ ) @TypeOf(s).Error!void {
+ const bin_op = w.air.instructions.items(.data)[inst].bin_op;
+ try w.writeOperand(s, inst, 0, bin_op.lhs);
+ try s.writeAll(", ");
+ try w.writeOperand(s, inst, 1, bin_op.rhs);
+ try s.print(", {s}", .{@tagName(order)});
+ }
+
+ fn writeAtomicRmw(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
+ const pl_op = w.air.instructions.items(.data)[inst].pl_op;
+ const extra = w.air.extraData(Air.AtomicRmw, pl_op.payload).data;
+
+ try w.writeOperand(s, inst, 0, pl_op.operand);
+ try s.writeAll(", ");
+ try w.writeOperand(s, inst, 1, extra.operand);
+ try s.print(", {s}, {s}", .{ @tagName(extra.op()), @tagName(extra.ordering()) });
+ }
+
+ fn writeMemset(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
+ const pl_op = w.air.instructions.items(.data)[inst].pl_op;
+ const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
+
+ try w.writeOperand(s, inst, 0, pl_op.operand);
+ try s.writeAll(", ");
+ try w.writeOperand(s, inst, 1, extra.lhs);
+ try s.writeAll(", ");
+ try w.writeOperand(s, inst, 2, extra.rhs);
+ }
+
+ fn writeMemcpy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
+ const pl_op = w.air.instructions.items(.data)[inst].pl_op;
+ const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
+
+ try w.writeOperand(s, inst, 0, pl_op.operand);
+ try s.writeAll(", ");
+ try w.writeOperand(s, inst, 1, extra.lhs);
+ try s.writeAll(", ");
+ try w.writeOperand(s, inst, 2, extra.rhs);
+ }
+
fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const val = w.air.values[ty_pl.payload];
diff --git a/src/print_zir.zig b/src/print_zir.zig
new file mode 100644
index 0000000000..c53c92f6bf
--- /dev/null
+++ b/src/print_zir.zig
@@ -0,0 +1,2040 @@
+const std = @import("std");
+const mem = std.mem;
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const Ast = std.zig.Ast;
+
+const Zir = @import("Zir.zig");
+const Module = @import("Module.zig");
+const LazySrcLoc = Module.LazySrcLoc;
+
+/// Write human-readable, debug formatted ZIR code to a file.
+pub fn renderAsTextToFile(
+ gpa: *Allocator,
+ scope_file: *Module.Scope.File,
+ fs_file: std.fs.File,
+) !void {
+ var arena = std.heap.ArenaAllocator.init(gpa);
+ defer arena.deinit();
+
+ var writer: Writer = .{
+ .gpa = gpa,
+ .arena = &arena.allocator,
+ .file = scope_file,
+ .code = scope_file.zir,
+ .indent = 0,
+ .parent_decl_node = 0,
+ .recurse_decls = true,
+ .recurse_blocks = true,
+ };
+
+ var raw_stream = std.io.bufferedWriter(fs_file.writer());
+ const stream = raw_stream.writer();
+
+ const main_struct_inst = Zir.main_struct_inst;
+ try stream.print("%{d} ", .{main_struct_inst});
+ try writer.writeInstToStream(stream, main_struct_inst);
+ try stream.writeAll("\n");
+ const imports_index = scope_file.zir.extra[@enumToInt(Zir.ExtraIndex.imports)];
+ if (imports_index != 0) {
+ try stream.writeAll("Imports:\n");
+
+ const extra = scope_file.zir.extraData(Zir.Inst.Imports, imports_index);
+ var import_i: u32 = 0;
+ var extra_index = extra.end;
+
+ while (import_i < extra.data.imports_len) : (import_i += 1) {
+ const item = scope_file.zir.extraData(Zir.Inst.Imports.Item, extra_index);
+ extra_index = item.end;
+
+ const src: LazySrcLoc = .{ .token_abs = item.data.token };
+ const import_path = scope_file.zir.nullTerminatedString(item.data.name);
+ try stream.print(" @import(\"{}\") ", .{
+ std.zig.fmtEscapes(import_path),
+ });
+ try writer.writeSrc(stream, src);
+ try stream.writeAll("\n");
+ }
+ }
+
+ try raw_stream.flush();
+}
+
+pub fn renderInstructionContext(
+ gpa: *Allocator,
+ block: []const Zir.Inst.Index,
+ block_index: usize,
+ scope_file: *Module.Scope.File,
+ parent_decl_node: Ast.Node.Index,
+ indent: u32,
+ stream: anytype,
+) !void {
+ var arena = std.heap.ArenaAllocator.init(gpa);
+ defer arena.deinit();
+
+ var writer: Writer = .{
+ .gpa = gpa,
+ .arena = &arena.allocator,
+ .file = scope_file,
+ .code = scope_file.zir,
+ .indent = if (indent < 2) 2 else indent,
+ .parent_decl_node = parent_decl_node,
+ .recurse_decls = false,
+ .recurse_blocks = true,
+ };
+
+ try writer.writeBody(stream, block[0..block_index]);
+ try stream.writeByteNTimes(' ', writer.indent - 2);
+ try stream.print("> %{d} ", .{block[block_index]});
+ try writer.writeInstToStream(stream, block[block_index]);
+ try stream.writeByte('\n');
+ if (block_index + 1 < block.len) {
+ try writer.writeBody(stream, block[block_index + 1 ..]);
+ }
+}
+
+pub fn renderSingleInstruction(
+ gpa: *Allocator,
+ inst: Zir.Inst.Index,
+ scope_file: *Module.Scope.File,
+ parent_decl_node: Ast.Node.Index,
+ indent: u32,
+ stream: anytype,
+) !void {
+ var arena = std.heap.ArenaAllocator.init(gpa);
+ defer arena.deinit();
+
+ var writer: Writer = .{
+ .gpa = gpa,
+ .arena = &arena.allocator,
+ .file = scope_file,
+ .code = scope_file.zir,
+ .indent = indent,
+ .parent_decl_node = parent_decl_node,
+ .recurse_decls = false,
+ .recurse_blocks = false,
+ };
+
+ try stream.print("%{d} ", .{inst});
+ try writer.writeInstToStream(stream, inst);
+}
+
+const Writer = struct {
+ gpa: *Allocator,
+ arena: *Allocator,
+ file: *Module.Scope.File,
+ code: Zir,
+ indent: u32,
+ parent_decl_node: Ast.Node.Index,
+ recurse_decls: bool,
+ recurse_blocks: bool,
+
+ fn relativeToNodeIndex(self: *Writer, offset: i32) Ast.Node.Index {
+ return @bitCast(Ast.Node.Index, offset + @bitCast(i32, self.parent_decl_node));
+ }
+
+ fn writeInstToStream(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
+ const tags = self.code.instructions.items(.tag);
+ const tag = tags[inst];
+ try stream.print("= {s}(", .{@tagName(tags[inst])});
+ switch (tag) {
+ .array_type,
+ .as,
+ .coerce_result_ptr,
+ .elem_ptr,
+ .elem_val,
+ .store,
+ .store_to_block_ptr,
+ .store_to_inferred_ptr,
+ .field_ptr_type,
+ => try self.writeBin(stream, inst),
+
+ .alloc,
+ .alloc_mut,
+ .alloc_comptime,
+ .indexable_ptr_len,
+ .anyframe_type,
+ .bit_not,
+ .bool_not,
+ .negate,
+ .negate_wrap,
+ .load,
+ .ensure_result_used,
+ .ensure_result_non_error,
+ .ret_node,
+ .ret_load,
+ .resolve_inferred_alloc,
+ .optional_type,
+ .optional_payload_safe,
+ .optional_payload_unsafe,
+ .optional_payload_safe_ptr,
+ .optional_payload_unsafe_ptr,
+ .err_union_payload_safe,
+ .err_union_payload_unsafe,
+ .err_union_payload_safe_ptr,
+ .err_union_payload_unsafe_ptr,
+ .err_union_code,
+ .err_union_code_ptr,
+ .is_non_null,
+ .is_non_null_ptr,
+ .is_non_err,
+ .is_non_err_ptr,
+ .typeof,
+ .typeof_elem,
+ .struct_init_empty,
+ .type_info,
+ .size_of,
+ .bit_size_of,
+ .typeof_log2_int_type,
+ .log2_int_type,
+ .ptr_to_int,
+ .error_to_int,
+ .int_to_error,
+ .compile_error,
+ .set_eval_branch_quota,
+ .enum_to_int,
+ .align_of,
+ .bool_to_int,
+ .embed_file,
+ .error_name,
+ .panic,
+ .set_align_stack,
+ .set_cold,
+ .set_float_mode,
+ .set_runtime_safety,
+ .sqrt,
+ .sin,
+ .cos,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .fabs,
+ .floor,
+ .ceil,
+ .trunc,
+ .round,
+ .tag_name,
+ .reify,
+ .type_name,
+ .frame_type,
+ .frame_size,
+ .clz,
+ .ctz,
+ .pop_count,
+ .byte_swap,
+ .bit_reverse,
+ .elem_type,
+ .@"resume",
+ .@"await",
+ .await_nosuspend,
+ .fence,
+ => try self.writeUnNode(stream, inst),
+
+ .ref,
+ .ret_coerce,
+ .ensure_err_payload_void,
+ .closure_capture,
+ => try self.writeUnTok(stream, inst),
+
+ .bool_br_and,
+ .bool_br_or,
+ => try self.writeBoolBr(stream, inst),
+
+ .array_type_sentinel => try self.writeArrayTypeSentinel(stream, inst),
+ .ptr_type_simple => try self.writePtrTypeSimple(stream, inst),
+ .ptr_type => try self.writePtrType(stream, inst),
+ .int => try self.writeInt(stream, inst),
+ .int_big => try self.writeIntBig(stream, inst),
+ .float => try self.writeFloat(stream, inst),
+ .float128 => try self.writeFloat128(stream, inst),
+ .str => try self.writeStr(stream, inst),
+ .int_type => try self.writeIntType(stream, inst),
+
+ .@"break",
+ .break_inline,
+ => try self.writeBreak(stream, inst),
+ .array_init,
+ .array_init_ref,
+ => try self.writeArrayInit(stream, inst),
+
+ .elem_ptr_node,
+ .elem_val_node,
+ .slice_start,
+ .slice_end,
+ .slice_sentinel,
+ .array_init_anon,
+ .array_init_anon_ref,
+ .union_init_ptr,
+ .shuffle,
+ .select,
+ .mul_add,
+ .builtin_call,
+ .field_parent_ptr,
+ .builtin_async_call,
+ => try self.writePlNode(stream, inst),
+
+ .struct_init,
+ .struct_init_ref,
+ => try self.writeStructInit(stream, inst),
+
+ .cmpxchg_strong, .cmpxchg_weak => try self.writeCmpxchg(stream, inst),
+ .atomic_store => try self.writeAtomicStore(stream, inst),
+ .atomic_rmw => try self.writeAtomicRmw(stream, inst),
+ .memcpy => try self.writeMemcpy(stream, inst),
+ .memset => try self.writeMemset(stream, inst),
+
+ .struct_init_anon,
+ .struct_init_anon_ref,
+ => try self.writeStructInitAnon(stream, inst),
+
+ .field_type => try self.writeFieldType(stream, inst),
+ .field_type_ref => try self.writeFieldTypeRef(stream, inst),
+
+ .add,
+ .addwrap,
+ .add_sat,
+ .array_cat,
+ .array_mul,
+ .mul,
+ .mulwrap,
+ .mul_sat,
+ .sub,
+ .subwrap,
+ .sub_sat,
+ .cmp_lt,
+ .cmp_lte,
+ .cmp_eq,
+ .cmp_gte,
+ .cmp_gt,
+ .cmp_neq,
+ .div,
+ .has_decl,
+ .has_field,
+ .mod_rem,
+ .shl,
+ .shl_exact,
+ .shl_sat,
+ .shr,
+ .shr_exact,
+ .xor,
+ .store_node,
+ .error_union_type,
+ .merge_error_sets,
+ .bit_and,
+ .bit_or,
+ .float_to_int,
+ .int_to_float,
+ .int_to_ptr,
+ .int_to_enum,
+ .float_cast,
+ .int_cast,
+ .err_set_cast,
+ .ptr_cast,
+ .truncate,
+ .align_cast,
+ .div_exact,
+ .div_floor,
+ .div_trunc,
+ .mod,
+ .rem,
+ .bit_offset_of,
+ .offset_of,
+ .splat,
+ .reduce,
+ .atomic_load,
+ .bitcast,
+ .bitcast_result_ptr,
+ .vector_type,
+ .maximum,
+ .minimum,
+ => try self.writePlNodeBin(stream, inst),
+
+ .@"export" => try self.writePlNodeExport(stream, inst),
+ .export_value => try self.writePlNodeExportValue(stream, inst),
+
+ .call => try self.writePlNodeCall(stream, inst),
+
+ .block,
+ .block_inline,
+ .suspend_block,
+ .loop,
+ .validate_struct_init_ptr,
+ .validate_array_init_ptr,
+ .c_import,
+ => try self.writePlNodeBlock(stream, inst),
+
+ .condbr,
+ .condbr_inline,
+ => try self.writePlNodeCondBr(stream, inst),
+
+ .error_set_decl => try self.writeErrorSetDecl(stream, inst, .parent),
+ .error_set_decl_anon => try self.writeErrorSetDecl(stream, inst, .anon),
+ .error_set_decl_func => try self.writeErrorSetDecl(stream, inst, .func),
+
+ .switch_block => try self.writePlNodeSwitchBr(stream, inst, .none),
+ .switch_block_else => try self.writePlNodeSwitchBr(stream, inst, .@"else"),
+ .switch_block_under => try self.writePlNodeSwitchBr(stream, inst, .under),
+ .switch_block_ref => try self.writePlNodeSwitchBr(stream, inst, .none),
+ .switch_block_ref_else => try self.writePlNodeSwitchBr(stream, inst, .@"else"),
+ .switch_block_ref_under => try self.writePlNodeSwitchBr(stream, inst, .under),
+
+ .switch_block_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .none),
+ .switch_block_else_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .@"else"),
+ .switch_block_under_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .under),
+ .switch_block_ref_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .none),
+ .switch_block_ref_else_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .@"else"),
+ .switch_block_ref_under_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .under),
+
+ .field_ptr,
+ .field_val,
+ .field_call_bind,
+ => try self.writePlNodeField(stream, inst),
+
+ .field_ptr_named,
+ .field_val_named,
+ .field_call_bind_named,
+ => try self.writePlNodeFieldNamed(stream, inst),
+
+ .as_node => try self.writeAs(stream, inst),
+
+ .breakpoint,
+ .repeat,
+ .repeat_inline,
+ .alloc_inferred,
+ .alloc_inferred_mut,
+ .alloc_inferred_comptime,
+ => try self.writeNode(stream, inst),
+
+ .error_value,
+ .enum_literal,
+ .decl_ref,
+ .decl_val,
+ .import,
+ .ret_err_value,
+ .ret_err_value_code,
+ .param_anytype,
+ .param_anytype_comptime,
+ => try self.writeStrTok(stream, inst),
+
+ .param, .param_comptime => try self.writeParam(stream, inst),
+
+ .func => try self.writeFunc(stream, inst, false),
+ .func_inferred => try self.writeFunc(stream, inst, true),
+
+ .@"unreachable" => try self.writeUnreachable(stream, inst),
+
+ .switch_capture,
+ .switch_capture_ref,
+ .switch_capture_multi,
+ .switch_capture_multi_ref,
+ .switch_capture_else,
+ .switch_capture_else_ref,
+ => try self.writeSwitchCapture(stream, inst),
+
+ .dbg_stmt => try self.writeDbgStmt(stream, inst),
+
+ .closure_get => try self.writeInstNode(stream, inst),
+
+ .extended => try self.writeExtended(stream, inst),
+ }
+ }
+
+ fn writeExtended(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const extended = self.code.instructions.items(.data)[inst].extended;
+ try stream.print("{s}(", .{@tagName(extended.opcode)});
+ switch (extended.opcode) {
+ .ret_ptr,
+ .ret_type,
+ .this,
+ .ret_addr,
+ .error_return_trace,
+ .frame,
+ .frame_address,
+ .builtin_src,
+ => try self.writeExtNode(stream, extended),
+
+ .@"asm" => try self.writeAsm(stream, extended),
+ .func => try self.writeFuncExtended(stream, extended),
+ .variable => try self.writeVarExtended(stream, extended),
+ .alloc => try self.writeAllocExtended(stream, extended),
+
+ .compile_log,
+ .typeof_peer,
+ => try self.writeNodeMultiOp(stream, extended),
+
+ .add_with_overflow,
+ .sub_with_overflow,
+ .mul_with_overflow,
+ .shl_with_overflow,
+ => try self.writeOverflowArithmetic(stream, extended),
+
+ .struct_decl => try self.writeStructDecl(stream, extended),
+ .union_decl => try self.writeUnionDecl(stream, extended),
+ .enum_decl => try self.writeEnumDecl(stream, extended),
+ .opaque_decl => try self.writeOpaqueDecl(stream, extended),
+
+ .c_undef, .c_include => {
+ const inst_data = self.code.extraData(Zir.Inst.UnNode, extended.operand).data;
+ try self.writeInstRef(stream, inst_data.operand);
+ try stream.writeAll(") ");
+ },
+
+ .c_define => {
+ const inst_data = self.code.extraData(Zir.Inst.BinNode, extended.operand).data;
+ try self.writeInstRef(stream, inst_data.lhs);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, inst_data.rhs);
+ try stream.writeByte(')');
+ },
+
+ .builtin_extern,
+ .wasm_memory_size,
+ .wasm_memory_grow,
+ => try stream.writeAll("TODO))"),
+ }
+ }
+
+ fn writeExtNode(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
+ const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
+ try stream.writeAll(")) ");
+ try self.writeSrc(stream, src);
+ }
+
+ fn writeBin(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].bin;
+ try self.writeInstRef(stream, inst_data.lhs);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, inst_data.rhs);
+ try stream.writeByte(')');
+ }
+
+ fn writeUnNode(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
+ const inst_data = self.code.instructions.items(.data)[inst].un_node;
+ try self.writeInstRef(stream, inst_data.operand);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeUnTok(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
+ const inst_data = self.code.instructions.items(.data)[inst].un_tok;
+ try self.writeInstRef(stream, inst_data.operand);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeArrayTypeSentinel(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
+ const inst_data = self.code.instructions.items(.data)[inst].array_type_sentinel;
+ _ = inst_data;
+ try stream.writeAll("TODO)");
+ }
+
+ fn writePtrTypeSimple(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
+ const inst_data = self.code.instructions.items(.data)[inst].ptr_type_simple;
+ const str_allowzero = if (inst_data.is_allowzero) "allowzero, " else "";
+ const str_const = if (!inst_data.is_mutable) "const, " else "";
+ const str_volatile = if (inst_data.is_volatile) "volatile, " else "";
+ try self.writeInstRef(stream, inst_data.elem_type);
+ try stream.print(", {s}{s}{s}{s})", .{
+ str_allowzero,
+ str_const,
+ str_volatile,
+ @tagName(inst_data.size),
+ });
+ }
+
+ fn writePtrType(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
+ const inst_data = self.code.instructions.items(.data)[inst].ptr_type;
+ _ = inst_data;
+ try stream.writeAll("TODO)");
+ }
+
+ fn writeInt(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].int;
+ try stream.print("{d})", .{inst_data});
+ }
+
+ fn writeIntBig(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].str;
+ const byte_count = inst_data.len * @sizeOf(std.math.big.Limb);
+ const limb_bytes = self.code.string_bytes[inst_data.start..][0..byte_count];
+ // limb_bytes is not aligned properly; we must allocate and copy the bytes
+ // in order to accomplish this.
+ const limbs = try self.gpa.alloc(std.math.big.Limb, inst_data.len);
+ defer self.gpa.free(limbs);
+
+ mem.copy(u8, mem.sliceAsBytes(limbs), limb_bytes);
+ const big_int: std.math.big.int.Const = .{
+ .limbs = limbs,
+ .positive = true,
+ };
+ const as_string = try big_int.toStringAlloc(self.gpa, 10, .lower);
+ defer self.gpa.free(as_string);
+ try stream.print("{s})", .{as_string});
+ }
+
+ fn writeFloat(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const number = self.code.instructions.items(.data)[inst].float;
+ try stream.print("{d})", .{number});
+ }
+
+ fn writeFloat128(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data;
+ const src = inst_data.src();
+ const number = extra.get();
+ // TODO improve std.format to be able to print f128 values
+ try stream.print("{d}) ", .{@floatCast(f64, number)});
+ try self.writeSrc(stream, src);
+ }
+
+ fn writeStr(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
+ const inst_data = self.code.instructions.items(.data)[inst].str;
+ const str = inst_data.get(self.code);
+ try stream.print("\"{}\")", .{std.zig.fmtEscapes(str)});
+ }
+
+ fn writePlNode(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ try stream.writeAll("TODO) ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeParam(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_tok;
+ const extra = self.code.extraData(Zir.Inst.Param, inst_data.payload_index);
+ const body = self.code.extra[extra.end..][0..extra.data.body_len];
+ try stream.print("\"{}\", ", .{
+ std.zig.fmtEscapes(self.code.nullTerminatedString(extra.data.name)),
+ });
+ try self.writeBracedBody(stream, body);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writePlNodeBin(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ try self.writeInstRef(stream, extra.lhs);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.rhs);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writePlNodeExport(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.Export, inst_data.payload_index).data;
+ const decl_name = self.code.nullTerminatedString(extra.decl_name);
+
+ try self.writeInstRef(stream, extra.namespace);
+ try stream.print(", {}, ", .{std.zig.fmtId(decl_name)});
+ try self.writeInstRef(stream, extra.options);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writePlNodeExportValue(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data;
+
+ try self.writeInstRef(stream, extra.operand);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.options);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeStructInit(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.StructInit, inst_data.payload_index);
+ var field_i: u32 = 0;
+ var extra_index = extra.end;
+
+ while (field_i < extra.data.fields_len) : (field_i += 1) {
+ const item = self.code.extraData(Zir.Inst.StructInit.Item, extra_index);
+ extra_index = item.end;
+
+ if (field_i != 0) {
+ try stream.writeAll(", [");
+ } else {
+ try stream.writeAll("[");
+ }
+ try self.writeInstIndex(stream, item.data.field_type);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, item.data.init);
+ try stream.writeAll("]");
+ }
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeCmpxchg(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.Cmpxchg, inst_data.payload_index).data;
+
+ try self.writeInstRef(stream, extra.ptr);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.expected_value);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.new_value);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.success_order);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.failure_order);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeAtomicStore(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.AtomicStore, inst_data.payload_index).data;
+
+ try self.writeInstRef(stream, extra.ptr);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.operand);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.ordering);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeAtomicRmw(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data;
+
+ try self.writeInstRef(stream, extra.ptr);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.operation);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.operand);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.ordering);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeMemcpy(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.Memcpy, inst_data.payload_index).data;
+
+ try self.writeInstRef(stream, extra.dest);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.source);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.byte_count);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeMemset(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.Memset, inst_data.payload_index).data;
+
+ try self.writeInstRef(stream, extra.dest);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.byte);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.byte_count);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeStructInitAnon(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index);
+ var field_i: u32 = 0;
+ var extra_index = extra.end;
+
+ while (field_i < extra.data.fields_len) : (field_i += 1) {
+ const item = self.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
+ extra_index = item.end;
+
+ const field_name = self.code.nullTerminatedString(item.data.field_name);
+
+ const prefix = if (field_i != 0) ", [" else "[";
+ try stream.print("{s}[{s}=", .{ prefix, field_name });
+ try self.writeInstRef(stream, item.data.init);
+ try stream.writeAll("]");
+ }
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeFieldType(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data;
+ try self.writeInstRef(stream, extra.container_type);
+ const field_name = self.code.nullTerminatedString(extra.name_start);
+ try stream.print(", {s}) ", .{field_name});
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeFieldTypeRef(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.FieldTypeRef, inst_data.payload_index).data;
+ try self.writeInstRef(stream, extra.container_type);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.field_name);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeNodeMultiOp(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
+ const extra = self.code.extraData(Zir.Inst.NodeMultiOp, extended.operand);
+ const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
+ const operands = self.code.refSlice(extra.end, extended.small);
+
+ for (operands) |operand, i| {
+ if (i != 0) try stream.writeAll(", ");
+ try self.writeInstRef(stream, operand);
+ }
+ try stream.writeAll(")) ");
+ try self.writeSrc(stream, src);
+ }
+
+ fn writeInstNode(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
+ const inst_data = self.code.instructions.items(.data)[inst].inst_node;
+ try self.writeInstIndex(stream, inst_data.inst);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeAsm(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
+ const extra = self.code.extraData(Zir.Inst.Asm, extended.operand);
+ const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
+ const outputs_len = @truncate(u5, extended.small);
+ const inputs_len = @truncate(u5, extended.small >> 5);
+ const clobbers_len = @truncate(u5, extended.small >> 10);
+ const is_volatile = @truncate(u1, extended.small >> 15) != 0;
+ const asm_source = self.code.nullTerminatedString(extra.data.asm_source);
+
+ try self.writeFlag(stream, "volatile, ", is_volatile);
+ try stream.print("\"{}\", ", .{std.zig.fmtEscapes(asm_source)});
+ try stream.writeAll(", ");
+
+ var extra_i: usize = extra.end;
+ var output_type_bits = extra.data.output_type_bits;
+ {
+ var i: usize = 0;
+ while (i < outputs_len) : (i += 1) {
+ const output = self.code.extraData(Zir.Inst.Asm.Output, extra_i);
+ extra_i = output.end;
+
+ const is_type = @truncate(u1, output_type_bits) != 0;
+ output_type_bits >>= 1;
+
+ const name = self.code.nullTerminatedString(output.data.name);
+ const constraint = self.code.nullTerminatedString(output.data.constraint);
+ try stream.print("output({}, \"{}\", ", .{
+ std.zig.fmtId(name), std.zig.fmtEscapes(constraint),
+ });
+ try self.writeFlag(stream, "->", is_type);
+ try self.writeInstRef(stream, output.data.operand);
+ try stream.writeAll(")");
+ if (i + 1 < outputs_len) {
+ try stream.writeAll("), ");
+ }
+ }
+ }
+ {
+ var i: usize = 0;
+ while (i < inputs_len) : (i += 1) {
+ const input = self.code.extraData(Zir.Inst.Asm.Input, extra_i);
+ extra_i = input.end;
+
+ const name = self.code.nullTerminatedString(input.data.name);
+ const constraint = self.code.nullTerminatedString(input.data.constraint);
+ try stream.print("input({}, \"{}\", ", .{
+ std.zig.fmtId(name), std.zig.fmtEscapes(constraint),
+ });
+ try self.writeInstRef(stream, input.data.operand);
+ try stream.writeAll(")");
+ if (i + 1 < inputs_len) {
+ try stream.writeAll(", ");
+ }
+ }
+ }
+ {
+ var i: usize = 0;
+ while (i < clobbers_len) : (i += 1) {
+ const str_index = self.code.extra[extra_i];
+ extra_i += 1;
+ const clobber = self.code.nullTerminatedString(str_index);
+ try stream.print("{}", .{std.zig.fmtId(clobber)});
+ if (i + 1 < clobbers_len) {
+ try stream.writeAll(", ");
+ }
+ }
+ }
+ try stream.writeAll(")) ");
+ try self.writeSrc(stream, src);
+ }
+
+ fn writeOverflowArithmetic(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
+ const extra = self.code.extraData(Zir.Inst.OverflowArithmetic, extended.operand).data;
+ const src: LazySrcLoc = .{ .node_offset = extra.node };
+
+ try self.writeInstRef(stream, extra.lhs);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.rhs);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.ptr);
+ try stream.writeAll(")) ");
+ try self.writeSrc(stream, src);
+ }
+
+ fn writePlNodeCall(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.Call, inst_data.payload_index);
+ const args = self.code.refSlice(extra.end, extra.data.flags.args_len);
+
+ if (extra.data.flags.ensure_result_used) {
+ try stream.writeAll("nodiscard ");
+ }
+ try stream.print(".{s}, ", .{@tagName(@intToEnum(std.builtin.CallOptions.Modifier, extra.data.flags.packed_modifier))});
+ try self.writeInstRef(stream, extra.data.callee);
+ try stream.writeAll(", [");
+ for (args) |arg, i| {
+ if (i != 0) try stream.writeAll(", ");
+ try self.writeInstRef(stream, arg);
+ }
+ try stream.writeAll("]) ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writePlNodeBlock(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ try self.writePlNodeBlockWithoutSrc(stream, inst);
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writePlNodeBlockWithoutSrc(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.Block, inst_data.payload_index);
+ const body = self.code.extra[extra.end..][0..extra.data.body_len];
+ try self.writeBracedBody(stream, body);
+ try stream.writeAll(") ");
+ }
+
+ fn writePlNodeCondBr(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
+ const then_body = self.code.extra[extra.end..][0..extra.data.then_body_len];
+ const else_body = self.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
+ try self.writeInstRef(stream, extra.data.condition);
+ try stream.writeAll(", ");
+ try self.writeBracedBody(stream, then_body);
+ try stream.writeAll(", ");
+ try self.writeBracedBody(stream, else_body);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeStructDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
+ const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
+
+ var extra_index: usize = extended.operand;
+
+ const src_node: ?i32 = if (small.has_src_node) blk: {
+ const src_node = @bitCast(i32, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :blk src_node;
+ } else null;
+
+ const body_len = if (small.has_body_len) blk: {
+ const body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk body_len;
+ } else 0;
+
+ const fields_len = if (small.has_fields_len) blk: {
+ const fields_len = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk fields_len;
+ } else 0;
+
+ const decls_len = if (small.has_decls_len) blk: {
+ const decls_len = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk decls_len;
+ } else 0;
+
+ try self.writeFlag(stream, "known_has_bits, ", small.known_has_bits);
+ try stream.print("{s}, {s}, ", .{
+ @tagName(small.name_strategy), @tagName(small.layout),
+ });
+
+ if (decls_len == 0) {
+ try stream.writeAll("{}, ");
+ } else {
+ try stream.writeAll("{\n");
+ self.indent += 2;
+ extra_index = try self.writeDecls(stream, decls_len, extra_index);
+ self.indent -= 2;
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.writeAll("}, ");
+ }
+
+ const body = self.code.extra[extra_index..][0..body_len];
+ extra_index += body.len;
+
+ if (fields_len == 0) {
+ assert(body.len == 0);
+ try stream.writeAll("{}, {})");
+ } else {
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
+ try self.writeBracedDecl(stream, body);
+ try stream.writeAll(", {\n");
+
+ self.indent += 2;
+ const bits_per_field = 4;
+ const fields_per_u32 = 32 / bits_per_field;
+ const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
+ var bit_bag_index: usize = extra_index;
+ extra_index += bit_bags_count;
+ var cur_bit_bag: u32 = undefined;
+ var field_i: u32 = 0;
+ while (field_i < fields_len) : (field_i += 1) {
+ if (field_i % fields_per_u32 == 0) {
+ cur_bit_bag = self.code.extra[bit_bag_index];
+ bit_bag_index += 1;
+ }
+ const has_align = @truncate(u1, cur_bit_bag) != 0;
+ cur_bit_bag >>= 1;
+ const has_default = @truncate(u1, cur_bit_bag) != 0;
+ cur_bit_bag >>= 1;
+ const is_comptime = @truncate(u1, cur_bit_bag) != 0;
+ cur_bit_bag >>= 1;
+ const unused = @truncate(u1, cur_bit_bag) != 0;
+ cur_bit_bag >>= 1;
+
+ _ = unused;
+
+ const field_name = self.code.nullTerminatedString(self.code.extra[extra_index]);
+ extra_index += 1;
+ const field_type = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+
+ try stream.writeByteNTimes(' ', self.indent);
+ try self.writeFlag(stream, "comptime ", is_comptime);
+ try stream.print("{}: ", .{std.zig.fmtId(field_name)});
+ try self.writeInstRef(stream, field_type);
+
+ if (has_align) {
+ const align_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+
+ try stream.writeAll(" align(");
+ try self.writeInstRef(stream, align_ref);
+ try stream.writeAll(")");
+ }
+ if (has_default) {
+ const default_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+
+ try stream.writeAll(" = ");
+ try self.writeInstRef(stream, default_ref);
+ }
+ try stream.writeAll(",\n");
+ }
+
+ self.parent_decl_node = prev_parent_decl_node;
+ self.indent -= 2;
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.writeAll("})");
+ }
+ try self.writeSrcNode(stream, src_node);
+ }
+
+ fn writeUnionDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
+ const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small);
+
+ var extra_index: usize = extended.operand;
+
+ const src_node: ?i32 = if (small.has_src_node) blk: {
+ const src_node = @bitCast(i32, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :blk src_node;
+ } else null;
+
+ const tag_type_ref = if (small.has_tag_type) blk: {
+ const tag_type_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :blk tag_type_ref;
+ } else .none;
+
+ const body_len = if (small.has_body_len) blk: {
+ const body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk body_len;
+ } else 0;
+
+ const fields_len = if (small.has_fields_len) blk: {
+ const fields_len = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk fields_len;
+ } else 0;
+
+ const decls_len = if (small.has_decls_len) blk: {
+ const decls_len = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk decls_len;
+ } else 0;
+
+ try stream.print("{s}, {s}, ", .{
+ @tagName(small.name_strategy), @tagName(small.layout),
+ });
+ try self.writeFlag(stream, "autoenum, ", small.auto_enum_tag);
+
+ if (decls_len == 0) {
+ try stream.writeAll("{}, ");
+ } else {
+ try stream.writeAll("{\n");
+ self.indent += 2;
+ extra_index = try self.writeDecls(stream, decls_len, extra_index);
+ self.indent -= 2;
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.writeAll("}, ");
+ }
+
+ assert(fields_len != 0);
+
+ if (tag_type_ref != .none) {
+ try self.writeInstRef(stream, tag_type_ref);
+ try stream.writeAll(", ");
+ }
+
+ const body = self.code.extra[extra_index..][0..body_len];
+ extra_index += body.len;
+
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
+ try self.writeBracedDecl(stream, body);
+ try stream.writeAll(", {\n");
+
+ self.indent += 2;
+ const bits_per_field = 4;
+ const fields_per_u32 = 32 / bits_per_field;
+ const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
+ const body_end = extra_index;
+ extra_index += bit_bags_count;
+ var bit_bag_index: usize = body_end;
+ var cur_bit_bag: u32 = undefined;
+ var field_i: u32 = 0;
+ while (field_i < fields_len) : (field_i += 1) {
+ if (field_i % fields_per_u32 == 0) {
+ cur_bit_bag = self.code.extra[bit_bag_index];
+ bit_bag_index += 1;
+ }
+ const has_type = @truncate(u1, cur_bit_bag) != 0;
+ cur_bit_bag >>= 1;
+ const has_align = @truncate(u1, cur_bit_bag) != 0;
+ cur_bit_bag >>= 1;
+ const has_value = @truncate(u1, cur_bit_bag) != 0;
+ cur_bit_bag >>= 1;
+ const unused = @truncate(u1, cur_bit_bag) != 0;
+ cur_bit_bag >>= 1;
+
+ _ = unused;
+
+ const field_name = self.code.nullTerminatedString(self.code.extra[extra_index]);
+ extra_index += 1;
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.print("{}", .{std.zig.fmtId(field_name)});
+
+ if (has_type) {
+ const field_type = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+
+ try stream.writeAll(": ");
+ try self.writeInstRef(stream, field_type);
+ }
+ if (has_align) {
+ const align_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+
+ try stream.writeAll(" align(");
+ try self.writeInstRef(stream, align_ref);
+ try stream.writeAll(")");
+ }
+ if (has_value) {
+ const default_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+
+ try stream.writeAll(" = ");
+ try self.writeInstRef(stream, default_ref);
+ }
+ try stream.writeAll(",\n");
+ }
+
+ self.parent_decl_node = prev_parent_decl_node;
+ self.indent -= 2;
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.writeAll("})");
+ try self.writeSrcNode(stream, src_node);
+ }
+
+ fn writeDecls(self: *Writer, stream: anytype, decls_len: u32, extra_start: usize) !usize {
+ const parent_decl_node = self.parent_decl_node;
+ const bit_bags_count = std.math.divCeil(usize, decls_len, 8) catch unreachable;
+ var extra_index = extra_start + bit_bags_count;
+ var bit_bag_index: usize = extra_start;
+ var cur_bit_bag: u32 = undefined;
+ var decl_i: u32 = 0;
+ while (decl_i < decls_len) : (decl_i += 1) {
+ if (decl_i % 8 == 0) {
+ cur_bit_bag = self.code.extra[bit_bag_index];
+ bit_bag_index += 1;
+ }
+ const is_pub = @truncate(u1, cur_bit_bag) != 0;
+ cur_bit_bag >>= 1;
+ const is_exported = @truncate(u1, cur_bit_bag) != 0;
+ cur_bit_bag >>= 1;
+ const has_align = @truncate(u1, cur_bit_bag) != 0;
+ cur_bit_bag >>= 1;
+ const has_section_or_addrspace = @truncate(u1, cur_bit_bag) != 0;
+ cur_bit_bag >>= 1;
+
+ const sub_index = extra_index;
+
+ const hash_u32s = self.code.extra[extra_index..][0..4];
+ extra_index += 4;
+ const line = self.code.extra[extra_index];
+ extra_index += 1;
+ const decl_name_index = self.code.extra[extra_index];
+ extra_index += 1;
+ const decl_index = self.code.extra[extra_index];
+ extra_index += 1;
+ const align_inst: Zir.Inst.Ref = if (!has_align) .none else inst: {
+ const inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :inst inst;
+ };
+ const section_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: {
+ const inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :inst inst;
+ };
+ const addrspace_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: {
+ const inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :inst inst;
+ };
+
+ const pub_str = if (is_pub) "pub " else "";
+ const hash_bytes = @bitCast([16]u8, hash_u32s.*);
+ try stream.writeByteNTimes(' ', self.indent);
+ if (decl_name_index == 0) {
+ const name = if (is_exported) "usingnamespace" else "comptime";
+ try stream.writeAll(pub_str);
+ try stream.writeAll(name);
+ } else if (decl_name_index == 1) {
+ try stream.writeAll("test");
+ } else {
+ const raw_decl_name = self.code.nullTerminatedString(decl_name_index);
+ const decl_name = if (raw_decl_name.len == 0)
+ self.code.nullTerminatedString(decl_name_index + 1)
+ else
+ raw_decl_name;
+ const test_str = if (raw_decl_name.len == 0) "test " else "";
+ const export_str = if (is_exported) "export " else "";
+ try stream.print("[{d}] {s}{s}{s}{}", .{
+ sub_index, pub_str, test_str, export_str, std.zig.fmtId(decl_name),
+ });
+ if (align_inst != .none) {
+ try stream.writeAll(" align(");
+ try self.writeInstRef(stream, align_inst);
+ try stream.writeAll(")");
+ }
+ if (addrspace_inst != .none) {
+ try stream.writeAll(" addrspace(");
+ try self.writeInstRef(stream, addrspace_inst);
+ try stream.writeAll(")");
+ }
+ if (section_inst != .none) {
+ try stream.writeAll(" linksection(");
+ try self.writeInstRef(stream, section_inst);
+ try stream.writeAll(")");
+ }
+ }
+
+ if (self.recurse_decls) {
+ const tag = self.code.instructions.items(.tag)[decl_index];
+ try stream.print(" line({d}) hash({}): %{d} = {s}(", .{
+ line, std.fmt.fmtSliceHexLower(&hash_bytes), decl_index, @tagName(tag),
+ });
+
+ const decl_block_inst_data = self.code.instructions.items(.data)[decl_index].pl_node;
+ const sub_decl_node_off = decl_block_inst_data.src_node;
+ self.parent_decl_node = self.relativeToNodeIndex(sub_decl_node_off);
+ try self.writePlNodeBlockWithoutSrc(stream, decl_index);
+ self.parent_decl_node = parent_decl_node;
+ try self.writeSrc(stream, decl_block_inst_data.src());
+ try stream.writeAll("\n");
+ } else {
+ try stream.print(" line({d}) hash({}): %{d} = ...\n", .{
+ line, std.fmt.fmtSliceHexLower(&hash_bytes), decl_index,
+ });
+ }
+ }
+ return extra_index;
+ }
+
+ fn writeEnumDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
+ const small = @bitCast(Zir.Inst.EnumDecl.Small, extended.small);
+ var extra_index: usize = extended.operand;
+
+ const src_node: ?i32 = if (small.has_src_node) blk: {
+ const src_node = @bitCast(i32, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :blk src_node;
+ } else null;
+
+ const tag_type_ref = if (small.has_tag_type) blk: {
+ const tag_type_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :blk tag_type_ref;
+ } else .none;
+
+ const body_len = if (small.has_body_len) blk: {
+ const body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk body_len;
+ } else 0;
+
+ const fields_len = if (small.has_fields_len) blk: {
+ const fields_len = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk fields_len;
+ } else 0;
+
+ const decls_len = if (small.has_decls_len) blk: {
+ const decls_len = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk decls_len;
+ } else 0;
+
+ try stream.print("{s}, ", .{@tagName(small.name_strategy)});
+ try self.writeFlag(stream, "nonexhaustive, ", small.nonexhaustive);
+
+ if (decls_len == 0) {
+ try stream.writeAll("{}, ");
+ } else {
+ try stream.writeAll("{\n");
+ self.indent += 2;
+ extra_index = try self.writeDecls(stream, decls_len, extra_index);
+ self.indent -= 2;
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.writeAll("}, ");
+ }
+
+ if (tag_type_ref != .none) {
+ try self.writeInstRef(stream, tag_type_ref);
+ try stream.writeAll(", ");
+ }
+
+ const body = self.code.extra[extra_index..][0..body_len];
+ extra_index += body.len;
+
+ if (fields_len == 0) {
+ assert(body.len == 0);
+ try stream.writeAll("{}, {})");
+ } else {
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
+ try self.writeBracedDecl(stream, body);
+ try stream.writeAll(", {\n");
+
+ self.indent += 2;
+ const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
+ const body_end = extra_index;
+ extra_index += bit_bags_count;
+ var bit_bag_index: usize = body_end;
+ var cur_bit_bag: u32 = undefined;
+ var field_i: u32 = 0;
+ while (field_i < fields_len) : (field_i += 1) {
+ if (field_i % 32 == 0) {
+ cur_bit_bag = self.code.extra[bit_bag_index];
+ bit_bag_index += 1;
+ }
+ const has_tag_value = @truncate(u1, cur_bit_bag) != 0;
+ cur_bit_bag >>= 1;
+
+ const field_name = self.code.nullTerminatedString(self.code.extra[extra_index]);
+ extra_index += 1;
+
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.print("{}", .{std.zig.fmtId(field_name)});
+
+ if (has_tag_value) {
+ const tag_value_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+
+ try stream.writeAll(" = ");
+ try self.writeInstRef(stream, tag_value_ref);
+ }
+ try stream.writeAll(",\n");
+ }
+ self.parent_decl_node = prev_parent_decl_node;
+ self.indent -= 2;
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.writeAll("})");
+ }
+ try self.writeSrcNode(stream, src_node);
+ }
+
+ fn writeOpaqueDecl(
+ self: *Writer,
+ stream: anytype,
+ extended: Zir.Inst.Extended.InstData,
+ ) !void {
+ const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small);
+ var extra_index: usize = extended.operand;
+
+ const src_node: ?i32 = if (small.has_src_node) blk: {
+ const src_node = @bitCast(i32, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :blk src_node;
+ } else null;
+
+ const decls_len = if (small.has_decls_len) blk: {
+ const decls_len = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk decls_len;
+ } else 0;
+
+ try stream.print("{s}, ", .{@tagName(small.name_strategy)});
+
+ if (decls_len == 0) {
+ try stream.writeAll("{})");
+ } else {
+ try stream.writeAll("{\n");
+ self.indent += 2;
+ _ = try self.writeDecls(stream, decls_len, extra_index);
+ self.indent -= 2;
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.writeAll("})");
+ }
+ try self.writeSrcNode(stream, src_node);
+ }
+
+ fn writeErrorSetDecl(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ name_strategy: Zir.Inst.NameStrategy,
+ ) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index);
+ const fields = self.code.extra[extra.end..][0..extra.data.fields_len];
+
+ try stream.print("{s}, ", .{@tagName(name_strategy)});
+
+ try stream.writeAll("{\n");
+ self.indent += 2;
+ for (fields) |str_index| {
+ const name = self.code.nullTerminatedString(str_index);
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.print("{},\n", .{std.zig.fmtId(name)});
+ }
+ self.indent -= 2;
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.writeAll("}) ");
+
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writePlNodeSwitchBr(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ special_prong: Zir.SpecialProng,
+ ) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
+ const special: struct {
+ body: []const Zir.Inst.Index,
+ end: usize,
+ } = switch (special_prong) {
+ .none => .{ .body = &.{}, .end = extra.end },
+ .under, .@"else" => blk: {
+ const body_len = self.code.extra[extra.end];
+ const extra_body_start = extra.end + 1;
+ break :blk .{
+ .body = self.code.extra[extra_body_start..][0..body_len],
+ .end = extra_body_start + body_len,
+ };
+ },
+ };
+
+ try self.writeInstRef(stream, extra.data.operand);
+
+ self.indent += 2;
+
+ if (special.body.len != 0) {
+ const prong_name = switch (special_prong) {
+ .@"else" => "else",
+ .under => "_",
+ else => unreachable,
+ };
+ try stream.writeAll(",\n");
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.print("{s} => ", .{prong_name});
+ try self.writeBracedBody(stream, special.body);
+ }
+
+ var extra_index: usize = special.end;
+ {
+ var scalar_i: usize = 0;
+ while (scalar_i < extra.data.cases_len) : (scalar_i += 1) {
+ const item_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ const body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ const body = self.code.extra[extra_index..][0..body_len];
+ extra_index += body_len;
+
+ try stream.writeAll(",\n");
+ try stream.writeByteNTimes(' ', self.indent);
+ try self.writeInstRef(stream, item_ref);
+ try stream.writeAll(" => ");
+ try self.writeBracedBody(stream, body);
+ }
+ }
+
+ self.indent -= 2;
+
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writePlNodeSwitchBlockMulti(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ special_prong: Zir.SpecialProng,
+ ) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.SwitchBlockMulti, inst_data.payload_index);
+ const special: struct {
+ body: []const Zir.Inst.Index,
+ end: usize,
+ } = switch (special_prong) {
+ .none => .{ .body = &.{}, .end = extra.end },
+ .under, .@"else" => blk: {
+ const body_len = self.code.extra[extra.end];
+ const extra_body_start = extra.end + 1;
+ break :blk .{
+ .body = self.code.extra[extra_body_start..][0..body_len],
+ .end = extra_body_start + body_len,
+ };
+ },
+ };
+
+ try self.writeInstRef(stream, extra.data.operand);
+
+ self.indent += 2;
+
+ if (special.body.len != 0) {
+ const prong_name = switch (special_prong) {
+ .@"else" => "else",
+ .under => "_",
+ else => unreachable,
+ };
+ try stream.writeAll(",\n");
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.print("{s} => ", .{prong_name});
+ try self.writeBracedBody(stream, special.body);
+ }
+
+ var extra_index: usize = special.end;
+ {
+ var scalar_i: usize = 0;
+ while (scalar_i < extra.data.scalar_cases_len) : (scalar_i += 1) {
+ const item_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ const body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ const body = self.code.extra[extra_index..][0..body_len];
+ extra_index += body_len;
+
+ try stream.writeAll(",\n");
+ try stream.writeByteNTimes(' ', self.indent);
+ try self.writeInstRef(stream, item_ref);
+ try stream.writeAll(" => ");
+ try self.writeBracedBody(stream, body);
+ }
+ }
+ {
+ var multi_i: usize = 0;
+ while (multi_i < extra.data.multi_cases_len) : (multi_i += 1) {
+ const items_len = self.code.extra[extra_index];
+ extra_index += 1;
+ const ranges_len = self.code.extra[extra_index];
+ extra_index += 1;
+ const body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ const items = self.code.refSlice(extra_index, items_len);
+ extra_index += items_len;
+
+ try stream.writeAll(",\n");
+ try stream.writeByteNTimes(' ', self.indent);
+
+ for (items) |item_ref, item_i| {
+ if (item_i != 0) try stream.writeAll(", ");
+ try self.writeInstRef(stream, item_ref);
+ }
+
+ var range_i: usize = 0;
+ while (range_i < ranges_len) : (range_i += 1) {
+ const item_first = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ const item_last = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+
+ if (range_i != 0 or items.len != 0) {
+ try stream.writeAll(", ");
+ }
+ try self.writeInstRef(stream, item_first);
+ try stream.writeAll("...");
+ try self.writeInstRef(stream, item_last);
+ }
+
+ const body = self.code.extra[extra_index..][0..body_len];
+ extra_index += body_len;
+ try stream.writeAll(" => ");
+ try self.writeBracedBody(stream, body);
+ }
+ }
+
+ self.indent -= 2;
+
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writePlNodeField(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
+ const name = self.code.nullTerminatedString(extra.field_name_start);
+ try self.writeInstRef(stream, extra.lhs);
+ try stream.print(", \"{}\") ", .{std.zig.fmtEscapes(name)});
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writePlNodeFieldNamed(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
+ try self.writeInstRef(stream, extra.lhs);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.field_name);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeAs(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.As, inst_data.payload_index).data;
+ try self.writeInstRef(stream, extra.dest_type);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.operand);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeNode(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
+ const src_node = self.code.instructions.items(.data)[inst].node;
+ const src: LazySrcLoc = .{ .node_offset = src_node };
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, src);
+ }
+
+ fn writeStrTok(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
+ const inst_data = self.code.instructions.items(.data)[inst].str_tok;
+ const str = inst_data.get(self.code);
+ try stream.print("\"{}\") ", .{std.zig.fmtEscapes(str)});
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeFunc(
+ self: *Writer,
+ stream: anytype,
+ inst: Zir.Inst.Index,
+ inferred_error_set: bool,
+ ) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const src = inst_data.src();
+ const extra = self.code.extraData(Zir.Inst.Func, inst_data.payload_index);
+ var extra_index = extra.end;
+
+ const ret_ty_body = self.code.extra[extra_index..][0..extra.data.ret_body_len];
+ extra_index += ret_ty_body.len;
+
+ const body = self.code.extra[extra_index..][0..extra.data.body_len];
+ extra_index += body.len;
+
+ var src_locs: Zir.Inst.Func.SrcLocs = undefined;
+ if (body.len != 0) {
+ src_locs = self.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data;
+ }
+ return self.writeFuncCommon(
+ stream,
+ ret_ty_body,
+ inferred_error_set,
+ false,
+ false,
+ .none,
+ .none,
+ body,
+ src,
+ src_locs,
+ );
+ }
+
+ fn writeFuncExtended(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
+ const extra = self.code.extraData(Zir.Inst.ExtendedFunc, extended.operand);
+ const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
+ const small = @bitCast(Zir.Inst.ExtendedFunc.Small, extended.small);
+
+ var extra_index: usize = extra.end;
+ if (small.has_lib_name) {
+ const lib_name = self.code.nullTerminatedString(self.code.extra[extra_index]);
+ extra_index += 1;
+ try stream.print("lib_name=\"{}\", ", .{std.zig.fmtEscapes(lib_name)});
+ }
+ try self.writeFlag(stream, "test, ", small.is_test);
+ const cc: Zir.Inst.Ref = if (!small.has_cc) .none else blk: {
+ const cc = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :blk cc;
+ };
+ const align_inst: Zir.Inst.Ref = if (!small.has_align) .none else blk: {
+ const align_inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :blk align_inst;
+ };
+
+ const ret_ty_body = self.code.extra[extra_index..][0..extra.data.ret_body_len];
+ extra_index += ret_ty_body.len;
+
+ const body = self.code.extra[extra_index..][0..extra.data.body_len];
+ extra_index += body.len;
+
+ var src_locs: Zir.Inst.Func.SrcLocs = undefined;
+ if (body.len != 0) {
+ src_locs = self.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data;
+ }
+ return self.writeFuncCommon(
+ stream,
+ ret_ty_body,
+ small.is_inferred_error,
+ small.is_var_args,
+ small.is_extern,
+ cc,
+ align_inst,
+ body,
+ src,
+ src_locs,
+ );
+ }
+
+ fn writeVarExtended(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
+ const extra = self.code.extraData(Zir.Inst.ExtendedVar, extended.operand);
+ const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small);
+
+ try self.writeInstRef(stream, extra.data.var_type);
+
+ var extra_index: usize = extra.end;
+ if (small.has_lib_name) {
+ const lib_name = self.code.nullTerminatedString(self.code.extra[extra_index]);
+ extra_index += 1;
+ try stream.print(", lib_name=\"{}\"", .{std.zig.fmtEscapes(lib_name)});
+ }
+ const align_inst: Zir.Inst.Ref = if (!small.has_align) .none else blk: {
+ const align_inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :blk align_inst;
+ };
+ const init_inst: Zir.Inst.Ref = if (!small.has_init) .none else blk: {
+ const init_inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :blk init_inst;
+ };
+ try self.writeFlag(stream, ", is_extern", small.is_extern);
+ try self.writeOptionalInstRef(stream, ", align=", align_inst);
+ try self.writeOptionalInstRef(stream, ", init=", init_inst);
+ try stream.writeAll("))");
+ }
+
+ fn writeAllocExtended(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
+ const extra = self.code.extraData(Zir.Inst.AllocExtended, extended.operand);
+ const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small);
+ const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
+
+ var extra_index: usize = extra.end;
+ const type_inst: Zir.Inst.Ref = if (!small.has_type) .none else blk: {
+ const type_inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :blk type_inst;
+ };
+ const align_inst: Zir.Inst.Ref = if (!small.has_align) .none else blk: {
+ const align_inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ break :blk align_inst;
+ };
+ try self.writeFlag(stream, ",is_const", small.is_const);
+ try self.writeFlag(stream, ",is_comptime", small.is_comptime);
+ try self.writeOptionalInstRef(stream, ",ty=", type_inst);
+ try self.writeOptionalInstRef(stream, ",align=", align_inst);
+ try stream.writeAll(")) ");
+ try self.writeSrc(stream, src);
+ }
+
+ fn writeBoolBr(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].bool_br;
+ const extra = self.code.extraData(Zir.Inst.Block, inst_data.payload_index);
+ const body = self.code.extra[extra.end..][0..extra.data.body_len];
+ try self.writeInstRef(stream, inst_data.lhs);
+ try stream.writeAll(", ");
+ try self.writeBracedBody(stream, body);
+ }
+
+ fn writeIntType(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const int_type = self.code.instructions.items(.data)[inst].int_type;
+ const prefix: u8 = switch (int_type.signedness) {
+ .signed => 'i',
+ .unsigned => 'u',
+ };
+ try stream.print("{c}{d}) ", .{ prefix, int_type.bit_count });
+ try self.writeSrc(stream, int_type.src());
+ }
+
+ fn writeBreak(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].@"break";
+
+ try self.writeInstIndex(stream, inst_data.block_inst);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, inst_data.operand);
+ try stream.writeAll(")");
+ }
+
+ fn writeArrayInit(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+
+ const extra = self.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
+ const args = self.code.refSlice(extra.end, extra.data.operands_len);
+
+ try stream.writeAll(".{");
+ for (args) |arg, i| {
+ if (i != 0) try stream.writeAll(", ");
+ try self.writeInstRef(stream, arg);
+ }
+ try stream.writeAll("})");
+ }
+
+ fn writeUnreachable(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].@"unreachable";
+ const safety_str = if (inst_data.safety) "safe" else "unsafe";
+ try stream.print("{s}) ", .{safety_str});
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeFuncCommon(
+ self: *Writer,
+ stream: anytype,
+ ret_ty_body: []const Zir.Inst.Index,
+ inferred_error_set: bool,
+ var_args: bool,
+ is_extern: bool,
+ cc: Zir.Inst.Ref,
+ align_inst: Zir.Inst.Ref,
+ body: []const Zir.Inst.Index,
+ src: LazySrcLoc,
+ src_locs: Zir.Inst.Func.SrcLocs,
+ ) !void {
+ if (ret_ty_body.len == 0) {
+ try stream.writeAll("ret_ty=void");
+ } else {
+ try stream.writeAll("ret_ty=");
+ try self.writeBracedBody(stream, ret_ty_body);
+ }
+
+ try self.writeOptionalInstRef(stream, ", cc=", cc);
+ try self.writeOptionalInstRef(stream, ", align=", align_inst);
+ try self.writeFlag(stream, ", vargs", var_args);
+ try self.writeFlag(stream, ", extern", is_extern);
+ try self.writeFlag(stream, ", inferror", inferred_error_set);
+
+ try stream.writeAll(", body=");
+ try self.writeBracedBody(stream, body);
+ try stream.writeAll(") ");
+ if (body.len != 0) {
+ try stream.print("(lbrace={d}:{d},rbrace={d}:{d}) ", .{
+ src_locs.lbrace_line, @truncate(u16, src_locs.columns),
+ src_locs.rbrace_line, @truncate(u16, src_locs.columns >> 16),
+ });
+ }
+ try self.writeSrc(stream, src);
+ }
+
+ fn writeSwitchCapture(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].switch_capture;
+ try self.writeInstIndex(stream, inst_data.switch_inst);
+ try stream.print(", {d})", .{inst_data.prong_index});
+ }
+
+ fn writeDbgStmt(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].dbg_stmt;
+ try stream.print("{d}, {d})", .{ inst_data.line, inst_data.column });
+ }
+
+ fn writeInstRef(self: *Writer, stream: anytype, ref: Zir.Inst.Ref) !void {
+ var i: usize = @enumToInt(ref);
+
+ if (i < Zir.Inst.Ref.typed_value_map.len) {
+ return stream.print("@{}", .{ref});
+ }
+ i -= Zir.Inst.Ref.typed_value_map.len;
+
+ return self.writeInstIndex(stream, @intCast(Zir.Inst.Index, i));
+ }
+
+ fn writeInstIndex(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ _ = self;
+ return stream.print("%{d}", .{inst});
+ }
+
+ fn writeOptionalInstRef(
+ self: *Writer,
+ stream: anytype,
+ prefix: []const u8,
+ inst: Zir.Inst.Ref,
+ ) !void {
+ if (inst == .none) return;
+ try stream.writeAll(prefix);
+ try self.writeInstRef(stream, inst);
+ }
+
+ fn writeFlag(
+ self: *Writer,
+ stream: anytype,
+ name: []const u8,
+ flag: bool,
+ ) !void {
+ _ = self;
+ if (!flag) return;
+ try stream.writeAll(name);
+ }
+
+ fn writeSrc(self: *Writer, stream: anytype, src: LazySrcLoc) !void {
+ if (self.file.tree_loaded) {
+ const tree = self.file.tree;
+ const src_loc: Module.SrcLoc = .{
+ .file_scope = self.file,
+ .parent_decl_node = self.parent_decl_node,
+ .lazy = src,
+ };
+ const abs_byte_off = src_loc.byteOffset(self.gpa) catch unreachable;
+ const delta_line = std.zig.findLineColumn(tree.source, abs_byte_off);
+ try stream.print("{s}:{d}:{d}", .{
+ @tagName(src), delta_line.line + 1, delta_line.column + 1,
+ });
+ }
+ }
+
+ fn writeSrcNode(self: *Writer, stream: anytype, src_node: ?i32) !void {
+ const node_offset = src_node orelse return;
+ const src: LazySrcLoc = .{ .node_offset = node_offset };
+ try stream.writeAll(" ");
+ return self.writeSrc(stream, src);
+ }
+
+ fn writeBracedDecl(self: *Writer, stream: anytype, body: []const Zir.Inst.Index) !void {
+ try self.writeBracedBodyConditional(stream, body, self.recurse_decls);
+ }
+
+ fn writeBracedBody(self: *Writer, stream: anytype, body: []const Zir.Inst.Index) !void {
+ try self.writeBracedBodyConditional(stream, body, self.recurse_blocks);
+ }
+
+ fn writeBracedBodyConditional(self: *Writer, stream: anytype, body: []const Zir.Inst.Index, enabled: bool) !void {
+ if (body.len == 0) {
+ try stream.writeAll("{}");
+ } else if (enabled) {
+ try stream.writeAll("{\n");
+ self.indent += 2;
+ try self.writeBody(stream, body);
+ self.indent -= 2;
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.writeAll("}");
+ } else if (body.len == 1) {
+ try stream.writeByte('{');
+ try self.writeInstIndex(stream, body[0]);
+ try stream.writeByte('}');
+ } else if (body.len == 2) {
+ try stream.writeByte('{');
+ try self.writeInstIndex(stream, body[0]);
+ try stream.writeAll(", ");
+ try self.writeInstIndex(stream, body[1]);
+ try stream.writeByte('}');
+ } else {
+ try stream.writeByte('{');
+ try self.writeInstIndex(stream, body[0]);
+ try stream.writeAll("..");
+ try self.writeInstIndex(stream, body[body.len - 1]);
+ try stream.writeByte('}');
+ }
+ }
+
+ fn writeBody(self: *Writer, stream: anytype, body: []const Zir.Inst.Index) !void {
+ for (body) |inst| {
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.print("%{d} ", .{inst});
+ try self.writeInstToStream(stream, inst);
+ try stream.writeByte('\n');
+ }
+ }
+};
diff --git a/src/stage1/all_types.hpp b/src/stage1/all_types.hpp
index 4004199eb6..5b58766df9 100644
--- a/src/stage1/all_types.hpp
+++ b/src/stage1/all_types.hpp
@@ -86,6 +86,14 @@ enum CallingConvention {
CallingConventionSysV
};
+// Stage 1 supports only the generic address space
+enum AddressSpace {
+ AddressSpaceGeneric,
+ AddressSpaceGS,
+ AddressSpaceFS,
+ AddressSpaceSS,
+};
+
// This one corresponds to the builtin.zig enum.
enum BuiltinPtrSize {
BuiltinPtrSizeOne,
@@ -804,14 +812,18 @@ enum BinOpType {
BinOpTypeInvalid,
BinOpTypeAssign,
BinOpTypeAssignTimes,
+ BinOpTypeAssignTimesSat,
BinOpTypeAssignTimesWrap,
BinOpTypeAssignDiv,
BinOpTypeAssignMod,
BinOpTypeAssignPlus,
+ BinOpTypeAssignPlusSat,
BinOpTypeAssignPlusWrap,
BinOpTypeAssignMinus,
+ BinOpTypeAssignMinusSat,
BinOpTypeAssignMinusWrap,
BinOpTypeAssignBitShiftLeft,
+ BinOpTypeAssignBitShiftLeftSat,
BinOpTypeAssignBitShiftRight,
BinOpTypeAssignBitAnd,
BinOpTypeAssignBitXor,
@@ -828,12 +840,16 @@ enum BinOpType {
BinOpTypeBinXor,
BinOpTypeBinAnd,
BinOpTypeBitShiftLeft,
+ BinOpTypeBitShiftLeftSat,
BinOpTypeBitShiftRight,
BinOpTypeAdd,
+ BinOpTypeAddSat,
BinOpTypeAddWrap,
BinOpTypeSub,
+ BinOpTypeSubSat,
BinOpTypeSubWrap,
BinOpTypeMult,
+ BinOpTypeMultSat,
BinOpTypeMultWrap,
BinOpTypeDiv,
BinOpTypeMod,
@@ -1802,10 +1818,6 @@ enum BuiltinFnId {
BuiltinFnIdReduce,
BuiltinFnIdMaximum,
BuiltinFnIdMinimum,
- BuiltinFnIdSatAdd,
- BuiltinFnIdSatSub,
- BuiltinFnIdSatMul,
- BuiltinFnIdSatShl,
};
struct BuiltinFnEntry {
@@ -2950,10 +2962,10 @@ enum IrBinOp {
IrBinOpArrayMult,
IrBinOpMaximum,
IrBinOpMinimum,
- IrBinOpSatAdd,
- IrBinOpSatSub,
- IrBinOpSatMul,
- IrBinOpSatShl,
+ IrBinOpAddSat,
+ IrBinOpSubSat,
+ IrBinOpMultSat,
+ IrBinOpShlSat,
};
struct Stage1ZirInstBinOp {
diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp
index 2eb609ef1a..ec4ff8fc9e 100644
--- a/src/stage1/analyze.cpp
+++ b/src/stage1/analyze.cpp
@@ -1019,6 +1019,16 @@ bool calling_convention_allows_zig_types(CallingConvention cc) {
zig_unreachable();
}
+const char *address_space_name(AddressSpace as) {
+ switch (as) {
+ case AddressSpaceGeneric: return "generic";
+ case AddressSpaceGS: return "gs";
+ case AddressSpaceFS: return "fs";
+ case AddressSpaceSS: return "ss";
+ }
+ zig_unreachable();
+}
+
ZigType *get_stack_trace_type(CodeGen *g) {
if (g->stack_trace_type == nullptr) {
g->stack_trace_type = get_builtin_type(g, "StackTrace");
@@ -6794,7 +6804,7 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
// Since this frame is async, an await might represent a suspend point, and
// therefore need to spill. It also needs to mark expr scopes as having to spill.
// For example: foo() + await z
- // The funtion call result of foo() must be spilled.
+ // The function call result of foo() must be spilled.
for (size_t i = 0; i < fn->await_list.length; i += 1) {
Stage1AirInstAwait *await = fn->await_list.at(i);
if (await->is_nosuspend) {
diff --git a/src/stage1/analyze.hpp b/src/stage1/analyze.hpp
index 8290ef572c..6d584ff361 100644
--- a/src/stage1/analyze.hpp
+++ b/src/stage1/analyze.hpp
@@ -242,6 +242,8 @@ Error get_primitive_type(CodeGen *g, Buf *name, ZigType **result);
bool calling_convention_allows_zig_types(CallingConvention cc);
const char *calling_convention_name(CallingConvention cc);
+const char *address_space_name(AddressSpace as);
+
Error ATTRIBUTE_MUST_USE file_fetch(CodeGen *g, Buf *resolved_path, Buf *contents);
void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk);
diff --git a/src/stage1/astgen.cpp b/src/stage1/astgen.cpp
index 9e5d9da9ee..8fbd02c688 100644
--- a/src/stage1/astgen.cpp
+++ b/src/stage1/astgen.cpp
@@ -3672,6 +3672,8 @@ static Stage1ZirInst *astgen_bin_op(Stage1AstGen *ag, Scope *scope, AstNode *nod
return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpMult), lval, result_loc);
case BinOpTypeAssignTimesWrap:
return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpMultWrap), lval, result_loc);
+ case BinOpTypeAssignTimesSat:
+ return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpMultSat), lval, result_loc);
case BinOpTypeAssignDiv:
return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpDivUnspecified), lval, result_loc);
case BinOpTypeAssignMod:
@@ -3680,12 +3682,18 @@ static Stage1ZirInst *astgen_bin_op(Stage1AstGen *ag, Scope *scope, AstNode *nod
return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpAdd), lval, result_loc);
case BinOpTypeAssignPlusWrap:
return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpAddWrap), lval, result_loc);
+ case BinOpTypeAssignPlusSat:
+ return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpAddSat), lval, result_loc);
case BinOpTypeAssignMinus:
return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpSub), lval, result_loc);
case BinOpTypeAssignMinusWrap:
return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpSubWrap), lval, result_loc);
+ case BinOpTypeAssignMinusSat:
+ return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpSubSat), lval, result_loc);
case BinOpTypeAssignBitShiftLeft:
return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpBitShiftLeftLossy), lval, result_loc);
+ case BinOpTypeAssignBitShiftLeftSat:
+ return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpShlSat), lval, result_loc);
case BinOpTypeAssignBitShiftRight:
return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpBitShiftRightLossy), lval, result_loc);
case BinOpTypeAssignBitAnd:
@@ -3718,20 +3726,28 @@ static Stage1ZirInst *astgen_bin_op(Stage1AstGen *ag, Scope *scope, AstNode *nod
return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpBinAnd), lval, result_loc);
case BinOpTypeBitShiftLeft:
return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpBitShiftLeftLossy), lval, result_loc);
+ case BinOpTypeBitShiftLeftSat:
+ return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpShlSat), lval, result_loc);
case BinOpTypeBitShiftRight:
return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpBitShiftRightLossy), lval, result_loc);
case BinOpTypeAdd:
return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpAdd), lval, result_loc);
case BinOpTypeAddWrap:
return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpAddWrap), lval, result_loc);
+ case BinOpTypeAddSat:
+ return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpAddSat), lval, result_loc);
case BinOpTypeSub:
return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpSub), lval, result_loc);
case BinOpTypeSubWrap:
return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpSubWrap), lval, result_loc);
+ case BinOpTypeSubSat:
+ return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpSubSat), lval, result_loc);
case BinOpTypeMult:
return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpMult), lval, result_loc);
case BinOpTypeMultWrap:
return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpMultWrap), lval, result_loc);
+ case BinOpTypeMultSat:
+ return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpMultSat), lval, result_loc);
case BinOpTypeDiv:
return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpDivUnspecified), lval, result_loc);
case BinOpTypeMod:
@@ -4704,66 +4720,6 @@ static Stage1ZirInst *astgen_builtin_fn_call(Stage1AstGen *ag, Scope *scope, Ast
Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpMaximum, arg0_value, arg1_value, true);
return ir_lval_wrap(ag, scope, bin_op, lval, result_loc);
}
- case BuiltinFnIdSatAdd:
- {
- AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
- Stage1ZirInst *arg0_value = astgen_node(ag, arg0_node, scope);
- if (arg0_value == ag->codegen->invalid_inst_src)
- return arg0_value;
-
- AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
- Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope);
- if (arg1_value == ag->codegen->invalid_inst_src)
- return arg1_value;
-
- Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSatAdd, arg0_value, arg1_value, true);
- return ir_lval_wrap(ag, scope, bin_op, lval, result_loc);
- }
- case BuiltinFnIdSatSub:
- {
- AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
- Stage1ZirInst *arg0_value = astgen_node(ag, arg0_node, scope);
- if (arg0_value == ag->codegen->invalid_inst_src)
- return arg0_value;
-
- AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
- Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope);
- if (arg1_value == ag->codegen->invalid_inst_src)
- return arg1_value;
-
- Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSatSub, arg0_value, arg1_value, true);
- return ir_lval_wrap(ag, scope, bin_op, lval, result_loc);
- }
- case BuiltinFnIdSatMul:
- {
- AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
- Stage1ZirInst *arg0_value = astgen_node(ag, arg0_node, scope);
- if (arg0_value == ag->codegen->invalid_inst_src)
- return arg0_value;
-
- AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
- Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope);
- if (arg1_value == ag->codegen->invalid_inst_src)
- return arg1_value;
-
- Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSatMul, arg0_value, arg1_value, true);
- return ir_lval_wrap(ag, scope, bin_op, lval, result_loc);
- }
- case BuiltinFnIdSatShl:
- {
- AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
- Stage1ZirInst *arg0_value = astgen_node(ag, arg0_node, scope);
- if (arg0_value == ag->codegen->invalid_inst_src)
- return arg0_value;
-
- AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
- Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope);
- if (arg1_value == ag->codegen->invalid_inst_src)
- return arg1_value;
-
- Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSatShl, arg0_value, arg1_value, true);
- return ir_lval_wrap(ag, scope, bin_op, lval, result_loc);
- }
case BuiltinFnIdMemcpy:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index f3683adfce..a3f87c9f5a 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -487,9 +487,7 @@ static LLVMValueRef make_fn_llvm_value(CodeGen *g, ZigFn *fn) {
if (mangled_symbol_buf) buf_destroy(mangled_symbol_buf);
}
} else {
- if (llvm_fn == nullptr) {
- llvm_fn = LLVMAddFunction(g->module, symbol_name, fn_llvm_type);
- }
+ llvm_fn = LLVMAddFunction(g->module, symbol_name, fn_llvm_type);
for (size_t i = 1; i < fn->export_list.length; i += 1) {
GlobalExport *fn_export = &fn->export_list.items[i];
@@ -3335,7 +3333,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable,
} else {
zig_unreachable();
}
- case IrBinOpSatAdd:
+ case IrBinOpAddSat:
if (scalar_type->id == ZigTypeIdInt) {
if (scalar_type->data.integral.is_signed) {
return ZigLLVMBuildSAddSat(g->builder, op1_value, op2_value, "");
@@ -3345,7 +3343,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable,
} else {
zig_unreachable();
}
- case IrBinOpSatSub:
+ case IrBinOpSubSat:
if (scalar_type->id == ZigTypeIdInt) {
if (scalar_type->data.integral.is_signed) {
return ZigLLVMBuildSSubSat(g->builder, op1_value, op2_value, "");
@@ -3355,7 +3353,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable,
} else {
zig_unreachable();
}
- case IrBinOpSatMul:
+ case IrBinOpMultSat:
if (scalar_type->id == ZigTypeIdInt) {
if (scalar_type->data.integral.is_signed) {
return ZigLLVMBuildSMulFixSat(g->builder, op1_value, op2_value, "");
@@ -3365,7 +3363,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable,
} else {
zig_unreachable();
}
- case IrBinOpSatShl:
+ case IrBinOpShlSat:
if (scalar_type->id == ZigTypeIdInt) {
if (scalar_type->data.integral.is_signed) {
return ZigLLVMBuildSShlSat(g->builder, op1_value, op2_value, "");
@@ -9134,10 +9132,6 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdReduce, "reduce", 2);
create_builtin_fn(g, BuiltinFnIdMaximum, "maximum", 2);
create_builtin_fn(g, BuiltinFnIdMinimum, "minimum", 2);
- create_builtin_fn(g, BuiltinFnIdSatAdd, "addWithSaturation", 2);
- create_builtin_fn(g, BuiltinFnIdSatSub, "subWithSaturation", 2);
- create_builtin_fn(g, BuiltinFnIdSatMul, "mulWithSaturation", 2);
- create_builtin_fn(g, BuiltinFnIdSatShl, "shlWithSaturation", 2);
}
static const char *bool_to_str(bool b) {
@@ -9323,6 +9317,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
buf_appendf(contents, "pub const single_threaded = %s;\n", bool_to_str(g->is_single_threaded));
buf_appendf(contents, "pub const abi = std.Target.Abi.%s;\n", cur_abi);
buf_appendf(contents, "pub const cpu = std.Target.Cpu.baseline(.%s);\n", cur_arch);
+ buf_appendf(contents, "pub const stage2_arch: std.Target.Cpu.Arch = .%s;\n", cur_arch);
buf_appendf(contents, "pub const os = std.Target.Os.Tag.defaultVersionRange(.%s);\n", cur_os);
buf_appendf(contents,
"pub const target = std.Target{\n"
diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp
index 0604c05c46..2f2cfe08f3 100644
--- a/src/stage1/ir.cpp
+++ b/src/stage1/ir.cpp
@@ -6374,7 +6374,7 @@ static Stage1AirInst *ir_analyze_enum_to_union(IrAnalyze *ira, Scope *scope, Ast
if (target->value->type->data.enumeration.non_exhaustive) {
ir_add_error_node(ira, source_node,
- buf_sprintf("runtime cast to union '%s' from non-exhustive enum",
+ buf_sprintf("runtime cast to union '%s' from non-exhaustive enum",
buf_ptr(&wanted_type->name)));
return ira->codegen->invalid_inst_gen;
}
@@ -9820,28 +9820,28 @@ static ErrorMsg *ir_eval_math_op_scalar(IrAnalyze *ira, Scope *scope, AstNode *s
float_min(out_val, op1_val, op2_val);
}
break;
- case IrBinOpSatAdd:
+ case IrBinOpAddSat:
if (is_int) {
bigint_add_sat(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint, type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
} else {
zig_unreachable();
}
break;
- case IrBinOpSatSub:
+ case IrBinOpSubSat:
if (is_int) {
bigint_sub_sat(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint, type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
} else {
zig_unreachable();
}
break;
- case IrBinOpSatMul:
+ case IrBinOpMultSat:
if (is_int) {
bigint_mul_sat(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint, type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
} else {
zig_unreachable();
}
break;
- case IrBinOpSatShl:
+ case IrBinOpShlSat:
if (is_int) {
bigint_shl_sat(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint, type_entry->data.integral.bit_count, type_entry->data.integral.is_signed);
} else {
@@ -10069,10 +10069,10 @@ static bool ok_float_op(IrBinOp op) {
case IrBinOpBitShiftRightExact:
case IrBinOpAddWrap:
case IrBinOpSubWrap:
- case IrBinOpSatAdd:
- case IrBinOpSatSub:
- case IrBinOpSatMul:
- case IrBinOpSatShl:
+ case IrBinOpAddSat:
+ case IrBinOpSubSat:
+ case IrBinOpMultSat:
+ case IrBinOpShlSat:
case IrBinOpMultWrap:
case IrBinOpArrayCat:
case IrBinOpArrayMult:
@@ -11046,10 +11046,10 @@ static Stage1AirInst *ir_analyze_instruction_bin_op(IrAnalyze *ira, Stage1ZirIns
case IrBinOpRemMod:
case IrBinOpMaximum:
case IrBinOpMinimum:
- case IrBinOpSatAdd:
- case IrBinOpSatSub:
- case IrBinOpSatMul:
- case IrBinOpSatShl:
+ case IrBinOpAddSat:
+ case IrBinOpSubSat:
+ case IrBinOpMultSat:
+ case IrBinOpShlSat:
return ir_analyze_bin_op_math(ira, bin_op_instruction);
case IrBinOpArrayCat:
return ir_analyze_array_cat(ira, bin_op_instruction);
@@ -15189,7 +15189,7 @@ static Stage1AirInst *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_
return ir_analyze_inferred_field_ptr(ira, field_name, scope, source_node, container_ptr, bare_type);
}
- // Tracks wether we should return an undefined value of the correct type.
+ // Tracks whether we should return an undefined value of the correct type.
// We do this if the container pointer is undefined and we are in a TypeOf call.
bool return_undef = container_ptr->value->special == ConstValSpecialUndef && \
get_scope_typeof(scope) != nullptr;
@@ -15248,7 +15248,7 @@ static Stage1AirInst *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_
if (type_is_invalid(union_val->type))
return ira->codegen->invalid_inst_gen;
- // Reject undefined values unless we're intializing the union:
+ // Reject undefined values unless we're initializing the union:
// a undefined union means also the tag is undefined, accessing
// its payload slot is UB.
const UndefAllowed allow_undef = initializing ? UndefOk : UndefBad;
@@ -16124,7 +16124,7 @@ static Stage1AirInst *ir_analyze_instruction_optional_unwrap_ptr(IrAnalyze *ira,
static Stage1AirInst *ir_analyze_instruction_ctz(IrAnalyze *ira, Stage1ZirInstCtz *instruction) {
Error err;
-
+
ZigType *int_type = ir_resolve_int_type(ira, instruction->type->child);
if (type_is_invalid(int_type))
return ira->codegen->invalid_inst_gen;
@@ -16166,7 +16166,7 @@ static Stage1AirInst *ir_analyze_instruction_ctz(IrAnalyze *ira, Stage1ZirInstCt
return ira->codegen->invalid_inst_gen;
if (val->special == ConstValSpecialUndef)
return ir_const_undef(ira, instruction->base.scope, instruction->base.source_node, ira->codegen->builtin_types.entry_num_lit_int);
-
+
if (is_vector) {
ZigType *smallest_vec_type = get_vector_type(ira->codegen, vector_len, smallest_type);
Stage1AirInst *result = ir_const(ira, instruction->base.scope, instruction->base.source_node, smallest_vec_type);
@@ -16200,7 +16200,7 @@ static Stage1AirInst *ir_analyze_instruction_ctz(IrAnalyze *ira, Stage1ZirInstCt
static Stage1AirInst *ir_analyze_instruction_clz(IrAnalyze *ira, Stage1ZirInstClz *instruction) {
Error err;
-
+
ZigType *int_type = ir_resolve_int_type(ira, instruction->type->child);
if (type_is_invalid(int_type))
return ira->codegen->invalid_inst_gen;
@@ -16242,7 +16242,7 @@ static Stage1AirInst *ir_analyze_instruction_clz(IrAnalyze *ira, Stage1ZirInstCl
return ira->codegen->invalid_inst_gen;
if (val->special == ConstValSpecialUndef)
return ir_const_undef(ira, instruction->base.scope, instruction->base.source_node, ira->codegen->builtin_types.entry_num_lit_int);
-
+
if (is_vector) {
ZigType *smallest_vec_type = get_vector_type(ira->codegen, vector_len, smallest_type);
Stage1AirInst *result = ir_const(ira, instruction->base.scope, instruction->base.source_node, smallest_vec_type);
@@ -16276,7 +16276,7 @@ static Stage1AirInst *ir_analyze_instruction_clz(IrAnalyze *ira, Stage1ZirInstCl
static Stage1AirInst *ir_analyze_instruction_pop_count(IrAnalyze *ira, Stage1ZirInstPopCount *instruction) {
Error err;
-
+
ZigType *int_type = ir_resolve_int_type(ira, instruction->type->child);
if (type_is_invalid(int_type))
return ira->codegen->invalid_inst_gen;
@@ -16318,7 +16318,7 @@ static Stage1AirInst *ir_analyze_instruction_pop_count(IrAnalyze *ira, Stage1Zir
return ira->codegen->invalid_inst_gen;
if (val->special == ConstValSpecialUndef)
return ir_const_undef(ira, instruction->base.scope, instruction->base.source_node, ira->codegen->builtin_types.entry_num_lit_int);
-
+
if (is_vector) {
ZigType *smallest_vec_type = get_vector_type(ira->codegen, vector_len, smallest_type);
Stage1AirInst *result = ir_const(ira, instruction->base.scope, instruction->base.source_node, smallest_vec_type);
@@ -17904,7 +17904,7 @@ static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, Scope *scope, AstNode
result->special = ConstValSpecialStatic;
result->type = type_info_pointer_type;
- ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 7);
+ ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 8);
result->data.x_struct.fields = fields;
// size: Size
@@ -17939,24 +17939,29 @@ static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, Scope *scope, AstNode
lazy_align_of->base.id = LazyValueIdAlignOf;
lazy_align_of->target_type = ir_const_type(ira, scope, source_node, attrs_type->data.pointer.child_type);
}
- // child: type
- ensure_field_index(result->type, "child", 4);
+ // address_space: AddressSpace,
+ ensure_field_index(result->type, "address_space", 4);
fields[4]->special = ConstValSpecialStatic;
- fields[4]->type = ira->codegen->builtin_types.entry_type;
- fields[4]->data.x_type = attrs_type->data.pointer.child_type;
- // is_allowzero: bool
- ensure_field_index(result->type, "is_allowzero", 5);
+ fields[4]->type = get_builtin_type(ira->codegen, "AddressSpace");
+ bigint_init_unsigned(&fields[4]->data.x_enum_tag, AddressSpaceGeneric);
+ // child: type
+ ensure_field_index(result->type, "child", 5);
fields[5]->special = ConstValSpecialStatic;
- fields[5]->type = ira->codegen->builtin_types.entry_bool;
- fields[5]->data.x_bool = attrs_type->data.pointer.allow_zero;
- // sentinel: anytype
- ensure_field_index(result->type, "sentinel", 6);
+ fields[5]->type = ira->codegen->builtin_types.entry_type;
+ fields[5]->data.x_type = attrs_type->data.pointer.child_type;
+ // is_allowzero: bool
+ ensure_field_index(result->type, "is_allowzero", 6);
fields[6]->special = ConstValSpecialStatic;
+ fields[6]->type = ira->codegen->builtin_types.entry_bool;
+ fields[6]->data.x_bool = attrs_type->data.pointer.allow_zero;
+ // sentinel: anytype
+ ensure_field_index(result->type, "sentinel", 7);
+ fields[7]->special = ConstValSpecialStatic;
if (attrs_type->data.pointer.sentinel != nullptr) {
- fields[6]->type = get_optional_type(ira->codegen, attrs_type->data.pointer.child_type);
- set_optional_payload(fields[6], attrs_type->data.pointer.sentinel);
+ fields[7]->type = get_optional_type(ira->codegen, attrs_type->data.pointer.child_type);
+ set_optional_payload(fields[7], attrs_type->data.pointer.sentinel);
} else {
- fields[6]->type = ira->codegen->builtin_types.entry_null;
+ fields[7]->type = ira->codegen->builtin_types.entry_null;
}
return result;
@@ -18465,7 +18470,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour
result->special = ConstValSpecialStatic;
result->type = ir_type_info_get_type(ira, "Fn", nullptr);
- ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 6);
+ ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 7);
result->data.x_struct.fields = fields;
// calling_convention: TypeInfo.CallingConvention
@@ -18826,11 +18831,11 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_
assert(size_value->type == ir_type_info_get_type(ira, "Size", type_info_pointer_type));
BuiltinPtrSize size_enum_index = (BuiltinPtrSize)bigint_as_u32(&size_value->data.x_enum_tag);
PtrLen ptr_len = size_enum_index_to_ptr_len(size_enum_index);
- ZigType *elem_type = get_const_field_meta_type(ira, source_node, payload, "child", 4);
+ ZigType *elem_type = get_const_field_meta_type(ira, source_node, payload, "child", 5);
if (type_is_invalid(elem_type))
return ira->codegen->invalid_inst_gen->value->type;
ZigValue *sentinel;
- if ((err = get_const_field_sentinel(ira, scope, source_node, payload, "sentinel", 6,
+ if ((err = get_const_field_sentinel(ira, scope, source_node, payload, "sentinel", 7,
elem_type, &sentinel)))
{
return ira->codegen->invalid_inst_gen->value->type;
@@ -18845,6 +18850,19 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_
if (alignment == nullptr)
return ira->codegen->invalid_inst_gen->value->type;
+ ZigValue *as_value = get_const_field(ira, source_node, payload, "address_space", 4);
+ if (as_value == nullptr)
+ return ira->codegen->invalid_inst_gen->value->type;
+ assert(as_value->special == ConstValSpecialStatic);
+ assert(as_value->type == get_builtin_type(ira->codegen, "AddressSpace"));
+ AddressSpace as = (AddressSpace)bigint_as_u32(&as_value->data.x_enum_tag);
+ if (as != AddressSpaceGeneric) {
+ ir_add_error_node(ira, source_node, buf_sprintf(
+ "address space '%s' not available in stage 1 compiler, must be .generic",
+ address_space_name(as)));
+ return ira->codegen->invalid_inst_gen->value->type;
+ }
+
bool is_const;
if ((err = get_const_field_bool(ira, source_node, payload, "is_const", 1, &is_const)))
return ira->codegen->invalid_inst_gen->value->type;
@@ -18857,13 +18875,12 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_
}
bool is_allowzero;
- if ((err = get_const_field_bool(ira, source_node, payload, "is_allowzero", 5,
+ if ((err = get_const_field_bool(ira, source_node, payload, "is_allowzero", 6,
&is_allowzero)))
{
return ira->codegen->invalid_inst_gen->value->type;
}
-
ZigType *ptr_type = get_pointer_to_type_extra2(ira->codegen,
elem_type,
is_const,
diff --git a/src/stage1/ir_print.cpp b/src/stage1/ir_print.cpp
index 152221926d..f92f146d84 100644
--- a/src/stage1/ir_print.cpp
+++ b/src/stage1/ir_print.cpp
@@ -558,7 +558,7 @@ const char* ir_inst_gen_type_str(Stage1AirInstId id) {
case Stage1AirInstIdWasmMemoryGrow:
return "GenWasmMemoryGrow";
case Stage1AirInstIdExtern:
- return "GenExtrern";
+ return "GenExtern";
}
zig_unreachable();
}
@@ -737,13 +737,13 @@ static const char *ir_bin_op_id_str(IrBinOp op_id) {
return "@maximum";
case IrBinOpMinimum:
return "@minimum";
- case IrBinOpSatAdd:
+ case IrBinOpAddSat:
return "@addWithSaturation";
- case IrBinOpSatSub:
+ case IrBinOpSubSat:
return "@subWithSaturation";
- case IrBinOpSatMul:
+ case IrBinOpMultSat:
return "@mulWithSaturation";
- case IrBinOpSatShl:
+ case IrBinOpShlSat:
return "@shlWithSaturation";
}
zig_unreachable();
@@ -829,7 +829,7 @@ static const char *cast_op_str(CastOp op) {
case CastOpIntToFloat: return "IntToFloat";
case CastOpFloatToInt: return "FloatToInt";
case CastOpBoolToInt: return "BoolToInt";
- case CastOpNumLitToConcrete: return "NumLitToConcrate";
+ case CastOpNumLitToConcrete: return "NumLitToConcrete";
case CastOpErrSet: return "ErrSet";
case CastOpBitCast: return "BitCast";
}
diff --git a/src/stage1/os.hpp b/src/stage1/os.hpp
index f1022778d4..6d086c8901 100644
--- a/src/stage1/os.hpp
+++ b/src/stage1/os.hpp
@@ -33,6 +33,8 @@
#define ZIG_OS_OPENBSD
#elif defined(__HAIKU__)
#define ZIG_OS_HAIKU
+#elif defined(__sun)
+#define ZIG_OS_SOLARIS
#else
#define ZIG_OS_UNKNOWN
#endif
diff --git a/src/stage1/parser.cpp b/src/stage1/parser.cpp
index b06a944172..fdc0777aff 100644
--- a/src/stage1/parser.cpp
+++ b/src/stage1/parser.cpp
@@ -2073,7 +2073,7 @@ static AstNode *ast_parse_field_init(ParseContext *pc) {
return nullptr;
}
if (eat_token_if(pc, TokenIdEq) == 0) {
- // Because ".Name" can also be intepreted as an enum literal, we should put back
+ // Because ".Name" can also be interpreted as an enum literal, we should put back
// those two tokens again so that the parser can try to parse them as the enum
// literal later.
put_back_token(pc);
@@ -2381,6 +2381,7 @@ static AstNode *ast_parse_switch_item(ParseContext *pc) {
// / PLUSEQUAL
// / MINUSEQUAL
// / LARROW2EQUAL
+// / LARROW2PIPEEQUAL
// / RARROW2EQUAL
// / AMPERSANDEQUAL
// / CARETEQUAL
@@ -2388,6 +2389,9 @@ static AstNode *ast_parse_switch_item(ParseContext *pc) {
// / ASTERISKPERCENTEQUAL
// / PLUSPERCENTEQUAL
// / MINUSPERCENTEQUAL
+// / ASTERISKPIPEEQUAL
+// / PLUSPIPEEQUAL
+// / MINUSPIPEEQUAL
// / EQUAL
static AstNode *ast_parse_assign_op(ParseContext *pc) {
// In C, we have `T arr[N] = {[i] = T{}};` but it doesn't
@@ -2396,17 +2400,21 @@ static AstNode *ast_parse_assign_op(ParseContext *pc) {
table[TokenIdBitAndEq] = BinOpTypeAssignBitAnd;
table[TokenIdBitOrEq] = BinOpTypeAssignBitOr;
table[TokenIdBitShiftLeftEq] = BinOpTypeAssignBitShiftLeft;
+ table[TokenIdBitShiftLeftPipeEq] = BinOpTypeAssignBitShiftLeftSat;
table[TokenIdBitShiftRightEq] = BinOpTypeAssignBitShiftRight;
table[TokenIdBitXorEq] = BinOpTypeAssignBitXor;
table[TokenIdDivEq] = BinOpTypeAssignDiv;
table[TokenIdEq] = BinOpTypeAssign;
table[TokenIdMinusEq] = BinOpTypeAssignMinus;
table[TokenIdMinusPercentEq] = BinOpTypeAssignMinusWrap;
+ table[TokenIdMinusPipeEq] = BinOpTypeAssignMinusSat;
table[TokenIdModEq] = BinOpTypeAssignMod;
table[TokenIdPlusEq] = BinOpTypeAssignPlus;
table[TokenIdPlusPercentEq] = BinOpTypeAssignPlusWrap;
+ table[TokenIdPlusPipeEq] = BinOpTypeAssignPlusSat;
table[TokenIdTimesEq] = BinOpTypeAssignTimes;
table[TokenIdTimesPercentEq] = BinOpTypeAssignTimesWrap;
+ table[TokenIdTimesPipeEq] = BinOpTypeAssignTimesSat;
BinOpType op = table[pc->token_ids[pc->current_token]];
if (op != BinOpTypeInvalid) {
@@ -2483,10 +2491,12 @@ static AstNode *ast_parse_bitwise_op(ParseContext *pc) {
// BitShiftOp
// <- LARROW2
+// / LARROW2PIPE
// / RARROW2
static AstNode *ast_parse_bit_shift_op(ParseContext *pc) {
BinOpType table[TokenIdCount] = {};
table[TokenIdBitShiftLeft] = BinOpTypeBitShiftLeft;
+ table[TokenIdBitShiftLeftPipe] = BinOpTypeBitShiftLeftSat;
table[TokenIdBitShiftRight] = BinOpTypeBitShiftRight;
BinOpType op = table[pc->token_ids[pc->current_token]];
@@ -2506,6 +2516,8 @@ static AstNode *ast_parse_bit_shift_op(ParseContext *pc) {
// / PLUS2
// / PLUSPERCENT
// / MINUSPERCENT
+// / PLUSPIPE
+// / MINUSPIPE
static AstNode *ast_parse_addition_op(ParseContext *pc) {
BinOpType table[TokenIdCount] = {};
table[TokenIdPlus] = BinOpTypeAdd;
@@ -2513,6 +2525,8 @@ static AstNode *ast_parse_addition_op(ParseContext *pc) {
table[TokenIdPlusPlus] = BinOpTypeArrayCat;
table[TokenIdPlusPercent] = BinOpTypeAddWrap;
table[TokenIdMinusPercent] = BinOpTypeSubWrap;
+ table[TokenIdPlusPipe] = BinOpTypeAddSat;
+ table[TokenIdMinusPipe] = BinOpTypeSubSat;
BinOpType op = table[pc->token_ids[pc->current_token]];
if (op != BinOpTypeInvalid) {
@@ -2532,6 +2546,7 @@ static AstNode *ast_parse_addition_op(ParseContext *pc) {
// / PERCENT
// / ASTERISK2
// / ASTERISKPERCENT
+// / ASTERISKPIPE
static AstNode *ast_parse_multiply_op(ParseContext *pc) {
BinOpType table[TokenIdCount] = {};
table[TokenIdBarBar] = BinOpTypeMergeErrorSets;
@@ -2540,6 +2555,7 @@ static AstNode *ast_parse_multiply_op(ParseContext *pc) {
table[TokenIdPercent] = BinOpTypeMod;
table[TokenIdStarStar] = BinOpTypeArrayMult;
table[TokenIdTimesPercent] = BinOpTypeMultWrap;
+ table[TokenIdTimesPipe] = BinOpTypeMultSat;
BinOpType op = table[pc->token_ids[pc->current_token]];
if (op != BinOpTypeInvalid) {
diff --git a/src/stage1/target.cpp b/src/stage1/target.cpp
index 85ea2f1d3d..eca468e5c0 100644
--- a/src/stage1/target.cpp
+++ b/src/stage1/target.cpp
@@ -401,6 +401,9 @@ Error target_parse_os(Os *out_os, const char *os_ptr, size_t os_len) {
#elif defined(ZIG_OS_HAIKU)
*out_os = OsHaiku;
return ErrorNone;
+#elif defined(ZIG_OS_SOLARIS)
+ *out_os = OsSolaris;
+ return ErrorNone;
#else
zig_panic("stage1 is unable to detect native target for this OS");
#endif
@@ -674,6 +677,7 @@ uint32_t target_c_type_size_in_bits(const ZigTarget *target, CIntType id) {
case OsOpenBSD:
case OsWASI:
case OsHaiku:
+ case OsSolaris:
case OsEmscripten:
case OsPlan9:
switch (id) {
@@ -732,7 +736,6 @@ uint32_t target_c_type_size_in_bits(const ZigTarget *target, CIntType id) {
case OsCloudABI:
case OsKFreeBSD:
case OsLv2:
- case OsSolaris:
case OsZOS:
case OsMinix:
case OsRTEMS:
diff --git a/src/stage1/tokenizer.cpp b/src/stage1/tokenizer.cpp
index f10579c966..47e324c933 100644
--- a/src/stage1/tokenizer.cpp
+++ b/src/stage1/tokenizer.cpp
@@ -226,8 +226,10 @@ enum TokenizeState {
TokenizeState_pipe,
TokenizeState_minus,
TokenizeState_minus_percent,
+ TokenizeState_minus_pipe,
TokenizeState_asterisk,
TokenizeState_asterisk_percent,
+ TokenizeState_asterisk_pipe,
TokenizeState_slash,
TokenizeState_line_comment_start,
TokenizeState_line_comment,
@@ -257,8 +259,10 @@ enum TokenizeState {
TokenizeState_percent,
TokenizeState_plus,
TokenizeState_plus_percent,
+ TokenizeState_plus_pipe,
TokenizeState_angle_bracket_left,
TokenizeState_angle_bracket_angle_bracket_left,
+ TokenizeState_angle_bracket_angle_bracket_left_pipe,
TokenizeState_angle_bracket_right,
TokenizeState_angle_bracket_angle_bracket_right,
TokenizeState_period,
@@ -548,6 +552,9 @@ void tokenize(const char *source, Tokenization *out) {
case '%':
t.state = TokenizeState_asterisk_percent;
break;
+ case '|':
+ t.state = TokenizeState_asterisk_pipe;
+ break;
default:
t.state = TokenizeState_start;
continue;
@@ -568,6 +575,21 @@ void tokenize(const char *source, Tokenization *out) {
continue;
}
break;
+ case TokenizeState_asterisk_pipe:
+ switch (c) {
+ case 0:
+ t.out->ids.last() = TokenIdTimesPipe;
+ goto eof;
+ case '=':
+ t.out->ids.last() = TokenIdTimesPipeEq;
+ t.state = TokenizeState_start;
+ break;
+ default:
+ t.out->ids.last() = TokenIdTimesPipe;
+ t.state = TokenizeState_start;
+ continue;
+ }
+ break;
case TokenizeState_percent:
switch (c) {
case 0:
@@ -596,6 +618,9 @@ void tokenize(const char *source, Tokenization *out) {
case '%':
t.state = TokenizeState_plus_percent;
break;
+ case '|':
+ t.state = TokenizeState_plus_pipe;
+ break;
default:
t.state = TokenizeState_start;
continue;
@@ -616,6 +641,21 @@ void tokenize(const char *source, Tokenization *out) {
continue;
}
break;
+ case TokenizeState_plus_pipe:
+ switch (c) {
+ case 0:
+ t.out->ids.last() = TokenIdPlusPipe;
+ goto eof;
+ case '=':
+ t.out->ids.last() = TokenIdPlusPipeEq;
+ t.state = TokenizeState_start;
+ break;
+ default:
+ t.out->ids.last() = TokenIdPlusPipe;
+ t.state = TokenizeState_start;
+ continue;
+ }
+ break;
case TokenizeState_caret:
switch (c) {
case 0:
@@ -891,6 +931,9 @@ void tokenize(const char *source, Tokenization *out) {
case '%':
t.state = TokenizeState_minus_percent;
break;
+ case '|':
+ t.state = TokenizeState_minus_pipe;
+ break;
default:
t.state = TokenizeState_start;
continue;
@@ -911,6 +954,21 @@ void tokenize(const char *source, Tokenization *out) {
continue;
}
break;
+ case TokenizeState_minus_pipe:
+ switch (c) {
+ case 0:
+ t.out->ids.last() = TokenIdMinusPipe;
+ goto eof;
+ case '=':
+ t.out->ids.last() = TokenIdMinusPipeEq;
+ t.state = TokenizeState_start;
+ break;
+ default:
+ t.out->ids.last() = TokenIdMinusPipe;
+ t.state = TokenizeState_start;
+ continue;
+ }
+ break;
case TokenizeState_angle_bracket_left:
switch (c) {
case 0:
@@ -936,12 +994,30 @@ void tokenize(const char *source, Tokenization *out) {
t.out->ids.last() = TokenIdBitShiftLeftEq;
t.state = TokenizeState_start;
break;
+ case '|':
+ t.state = TokenizeState_angle_bracket_angle_bracket_left_pipe;
+ break;
default:
t.out->ids.last() = TokenIdBitShiftLeft;
t.state = TokenizeState_start;
continue;
}
break;
+ case TokenizeState_angle_bracket_angle_bracket_left_pipe:
+ switch (c) {
+ case 0:
+ t.out->ids.last() = TokenIdBitShiftLeftPipe;
+ goto eof;
+ case '=':
+ t.out->ids.last() = TokenIdBitShiftLeftPipeEq;
+ t.state = TokenizeState_start;
+ break;
+ default:
+ t.out->ids.last() = TokenIdBitShiftLeftPipe;
+ t.state = TokenizeState_start;
+ continue;
+ }
+ break;
case TokenizeState_angle_bracket_right:
switch (c) {
case 0:
@@ -1437,6 +1513,8 @@ const char * token_name(TokenId id) {
case TokenIdBitOrEq: return "|=";
case TokenIdBitShiftLeft: return "<<";
case TokenIdBitShiftLeftEq: return "<<=";
+ case TokenIdBitShiftLeftPipe: return "<<|";
+ case TokenIdBitShiftLeftPipeEq: return "<<|=";
case TokenIdBitShiftRight: return ">>";
case TokenIdBitShiftRightEq: return ">>=";
case TokenIdBitXorEq: return "^=";
@@ -1521,12 +1599,16 @@ const char * token_name(TokenId id) {
case TokenIdMinusEq: return "-=";
case TokenIdMinusPercent: return "-%";
case TokenIdMinusPercentEq: return "-%=";
+ case TokenIdMinusPipe: return "-|";
+ case TokenIdMinusPipeEq: return "-|=";
case TokenIdModEq: return "%=";
case TokenIdPercent: return "%";
case TokenIdPlus: return "+";
case TokenIdPlusEq: return "+=";
case TokenIdPlusPercent: return "+%";
case TokenIdPlusPercentEq: return "+%=";
+ case TokenIdPlusPipe: return "+|";
+ case TokenIdPlusPipeEq: return "+|=";
case TokenIdPlusPlus: return "++";
case TokenIdRBrace: return "}";
case TokenIdRBracket: return "]";
@@ -1542,6 +1624,8 @@ const char * token_name(TokenId id) {
case TokenIdTimesEq: return "*=";
case TokenIdTimesPercent: return "*%";
case TokenIdTimesPercentEq: return "*%=";
+ case TokenIdTimesPipe: return "*|";
+ case TokenIdTimesPipeEq: return "*|=";
case TokenIdBuiltin: return "Builtin";
case TokenIdCount:
zig_unreachable();
diff --git a/src/stage1/tokenizer.hpp b/src/stage1/tokenizer.hpp
index 0e196597eb..56605c1764 100644
--- a/src/stage1/tokenizer.hpp
+++ b/src/stage1/tokenizer.hpp
@@ -23,6 +23,8 @@ enum TokenId : uint8_t {
TokenIdBitOrEq,
TokenIdBitShiftLeft,
TokenIdBitShiftLeftEq,
+ TokenIdBitShiftLeftPipe,
+ TokenIdBitShiftLeftPipeEq,
TokenIdBitShiftRight,
TokenIdBitShiftRightEq,
TokenIdBitXorEq,
@@ -108,12 +110,16 @@ enum TokenId : uint8_t {
TokenIdMinusEq,
TokenIdMinusPercent,
TokenIdMinusPercentEq,
+ TokenIdMinusPipe,
+ TokenIdMinusPipeEq,
TokenIdModEq,
TokenIdPercent,
TokenIdPlus,
TokenIdPlusEq,
TokenIdPlusPercent,
TokenIdPlusPercentEq,
+ TokenIdPlusPipe,
+ TokenIdPlusPipeEq,
TokenIdPlusPlus,
TokenIdRBrace,
TokenIdRBracket,
@@ -129,6 +135,8 @@ enum TokenId : uint8_t {
TokenIdTimesEq,
TokenIdTimesPercent,
TokenIdTimesPercentEq,
+ TokenIdTimesPipe,
+ TokenIdTimesPipeEq,
TokenIdCount,
};
diff --git a/src/target.zig b/src/target.zig
index 4f9898cfa8..6b6ed2fbc5 100644
--- a/src/target.zig
+++ b/src/target.zig
@@ -438,6 +438,13 @@ pub fn libcFullLinkFlags(target: std.Target) []const []const u8 {
"-lc",
"-lutil",
},
+ .solaris => &[_][]const u8{
+ "-lm",
+ "-lsocket",
+ "-lnsl",
+ // Solaris releases after 10 merged the threading libraries into libc.
+ "-lc",
+ },
.haiku => &[_][]const u8{
"-lm",
"-lroot",
@@ -550,3 +557,21 @@ pub fn largestAtomicBits(target: std.Target) u32 {
.x86_64 => 128,
};
}
+
+pub fn defaultAddressSpace(
+ target: std.Target,
+ context: enum {
+ /// Query the default address space for global constant values.
+ global_constant,
+ /// Query the default address space for global mutable values.
+ global_mutable,
+ /// Query the default address space for function-local values.
+ local,
+ /// Query the default address space for functions themselves.
+ function,
+ },
+) std.builtin.AddressSpace {
+ _ = target;
+ _ = context;
+ return .generic;
+}
diff --git a/src/translate_c.zig b/src/translate_c.zig
index d980fa657e..7247ed50a9 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -4882,7 +4882,7 @@ fn finishTransFnProto(
var fn_params = std.ArrayList(ast.Payload.Param).init(c.gpa);
defer fn_params.deinit();
const param_count: usize = if (fn_proto_ty != null) fn_proto_ty.?.getNumParams() else 0;
- try fn_params.ensureCapacity(param_count);
+ try fn_params.ensureTotalCapacity(param_count);
var i: usize = 0;
while (i < param_count) : (i += 1) {
diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig
index 3686b90bda..dbd9367d1a 100644
--- a/src/translate_c/ast.zig
+++ b/src/translate_c/ast.zig
@@ -728,13 +728,13 @@ pub fn render(gpa: *Allocator, nodes: []const Node) !std.zig.Ast {
// Estimate that each top level node has 10 child nodes.
const estimated_node_count = nodes.len * 10;
- try ctx.nodes.ensureCapacity(gpa, estimated_node_count);
+ try ctx.nodes.ensureTotalCapacity(gpa, estimated_node_count);
// Estimate that each each node has 2 tokens.
const estimated_tokens_count = estimated_node_count * 2;
- try ctx.tokens.ensureCapacity(gpa, estimated_tokens_count);
+ try ctx.tokens.ensureTotalCapacity(gpa, estimated_tokens_count);
// Estimate that each each token is 3 bytes long.
const estimated_buf_len = estimated_tokens_count * 3;
- try ctx.buf.ensureCapacity(estimated_buf_len);
+ try ctx.buf.ensureTotalCapacity(estimated_buf_len);
ctx.nodes.appendAssumeCapacity(.{
.tag = .root,
@@ -839,7 +839,7 @@ const Context = struct {
fn addExtra(c: *Context, extra: anytype) Allocator.Error!NodeIndex {
const fields = std.meta.fields(@TypeOf(extra));
- try c.extra_data.ensureCapacity(c.gpa, c.extra_data.items.len + fields.len);
+ try c.extra_data.ensureUnusedCapacity(c.gpa, fields.len);
const result = @intCast(u32, c.extra_data.items.len);
inline for (fields) |field| {
comptime std.debug.assert(field.field_type == NodeIndex);
@@ -1462,10 +1462,10 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
.mul_wrap_assign => return renderBinOp(c, node, .assign_mul_wrap, .asterisk_percent_equal, "*%="),
.div => return renderBinOpGrouped(c, node, .div, .slash, "/"),
.div_assign => return renderBinOp(c, node, .assign_div, .slash_equal, "/="),
- .shl => return renderBinOpGrouped(c, node, .bit_shift_left, .angle_bracket_angle_bracket_left, "<<"),
- .shl_assign => return renderBinOp(c, node, .assign_bit_shift_left, .angle_bracket_angle_bracket_left_equal, "<<="),
- .shr => return renderBinOpGrouped(c, node, .bit_shift_right, .angle_bracket_angle_bracket_right, ">>"),
- .shr_assign => return renderBinOp(c, node, .assign_bit_shift_right, .angle_bracket_angle_bracket_right_equal, ">>="),
+ .shl => return renderBinOpGrouped(c, node, .shl, .angle_bracket_angle_bracket_left, "<<"),
+ .shl_assign => return renderBinOp(c, node, .assign_shl, .angle_bracket_angle_bracket_left_equal, "<<="),
+ .shr => return renderBinOpGrouped(c, node, .shr, .angle_bracket_angle_bracket_right, ">>"),
+ .shr_assign => return renderBinOp(c, node, .assign_shr, .angle_bracket_angle_bracket_right_equal, ">>="),
.mod => return renderBinOpGrouped(c, node, .mod, .percent, "%"),
.mod_assign => return renderBinOp(c, node, .assign_mod, .percent_equal, "%="),
.@"and" => return renderBinOpGrouped(c, node, .bool_and, .keyword_and, "and"),
@@ -2614,6 +2614,7 @@ fn renderVar(c: *Context, node: Node) !NodeIndex {
.type_node = type_node,
.align_node = align_node,
.section_node = section_node,
+ .addrspace_node = 0,
}),
.rhs = init_node,
},
@@ -2705,6 +2706,7 @@ fn renderFunc(c: *Context, node: Node) !NodeIndex {
.lhs = try c.addExtra(std.zig.Ast.Node.FnProtoOne{
.param = params.items[0],
.align_expr = align_expr,
+ .addrspace_expr = 0, // TODO
.section_expr = section_expr,
.callconv_expr = callconv_expr,
}),
@@ -2720,6 +2722,7 @@ fn renderFunc(c: *Context, node: Node) !NodeIndex {
.params_start = span.start,
.params_end = span.end,
.align_expr = align_expr,
+ .addrspace_expr = 0, // TODO
.section_expr = section_expr,
.callconv_expr = callconv_expr,
}),
@@ -2797,7 +2800,7 @@ fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.Ar
_ = try c.addToken(.l_paren, "(");
var rendered = std.ArrayList(NodeIndex).init(c.gpa);
errdefer rendered.deinit();
- try rendered.ensureCapacity(std.math.max(params.len, 1));
+ try rendered.ensureTotalCapacity(std.math.max(params.len, 1));
for (params) |param, i| {
if (i != 0) _ = try c.addToken(.comma, ",");
diff --git a/src/type.zig b/src/type.zig
index 2403893133..dacde84167 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -124,9 +124,11 @@ pub const Type = extern union {
.enum_full,
.enum_nonexhaustive,
.enum_simple,
+ .enum_numbered,
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
=> return .Enum,
@@ -136,6 +138,7 @@ pub const Type = extern union {
.type_info,
=> return .Union,
+ .bound_fn => unreachable,
.var_args_param => unreachable, // can be any type
}
}
@@ -288,6 +291,7 @@ pub const Type = extern union {
.pointee_type = Type.initTag(.comptime_int),
.sentinel = null,
.@"align" = 0,
+ .@"addrspace" = .generic,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
@@ -299,6 +303,7 @@ pub const Type = extern union {
.pointee_type = Type.initTag(.u8),
.sentinel = null,
.@"align" = 0,
+ .@"addrspace" = .generic,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
@@ -310,6 +315,7 @@ pub const Type = extern union {
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
+ .@"addrspace" = .generic,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
@@ -321,6 +327,7 @@ pub const Type = extern union {
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
+ .@"addrspace" = .generic,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
@@ -332,6 +339,7 @@ pub const Type = extern union {
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
+ .@"addrspace" = .generic,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
@@ -343,6 +351,7 @@ pub const Type = extern union {
.pointee_type = Type.initTag(.u8),
.sentinel = null,
.@"align" = 0,
+ .@"addrspace" = .generic,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
@@ -354,6 +363,7 @@ pub const Type = extern union {
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
+ .@"addrspace" = .generic,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
@@ -365,6 +375,7 @@ pub const Type = extern union {
.pointee_type = Type.initTag(.u8),
.sentinel = null,
.@"align" = 0,
+ .@"addrspace" = .generic,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
@@ -376,6 +387,7 @@ pub const Type = extern union {
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
+ .@"addrspace" = .generic,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
@@ -387,6 +399,7 @@ pub const Type = extern union {
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
+ .@"addrspace" = .generic,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
@@ -398,6 +411,7 @@ pub const Type = extern union {
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
+ .@"addrspace" = .generic,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
@@ -409,6 +423,7 @@ pub const Type = extern union {
.pointee_type = self.castPointer().?.data,
.sentinel = null,
.@"align" = 0,
+ .@"addrspace" = .generic,
.bit_offset = 0,
.host_size = 0,
.@"allowzero" = false,
@@ -461,6 +476,8 @@ pub const Type = extern union {
return false;
if (info_a.host_size != info_b.host_size)
return false;
+ if (info_a.@"addrspace" != info_b.@"addrspace")
+ return false;
const sentinel_a = info_a.sentinel;
const sentinel_b = info_b.sentinel;
@@ -587,8 +604,8 @@ pub const Type = extern union {
}
return false;
},
+ .Float => return a.tag() == b.tag(),
.Opaque,
- .Float,
.BoundFn,
.Frame,
=> std.debug.panic("TODO implement Type equality comparison of {} and {}", .{ a, b }),
@@ -746,6 +763,7 @@ pub const Type = extern union {
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -754,6 +772,7 @@ pub const Type = extern union {
.type_info,
.@"anyframe",
.generic_poison,
+ .bound_fn,
=> unreachable,
.array_u8,
@@ -835,6 +854,7 @@ pub const Type = extern union {
.pointee_type = try payload.pointee_type.copy(allocator),
.sentinel = sent,
.@"align" = payload.@"align",
+ .@"addrspace" = payload.@"addrspace",
.bit_offset = payload.bit_offset,
.host_size = payload.host_size,
.@"allowzero" = payload.@"allowzero",
@@ -857,6 +877,7 @@ pub const Type = extern union {
.@"struct" => return self.copyPayloadShallow(allocator, Payload.Struct),
.@"union", .union_tagged => return self.copyPayloadShallow(allocator, Payload.Union),
.enum_simple => return self.copyPayloadShallow(allocator, Payload.EnumSimple),
+ .enum_numbered => return self.copyPayloadShallow(allocator, Payload.EnumNumbered),
.enum_full, .enum_nonexhaustive => return self.copyPayloadShallow(allocator, Payload.EnumFull),
.@"opaque" => return self.copyPayloadShallow(allocator, Payload.Opaque),
}
@@ -917,6 +938,7 @@ pub const Type = extern union {
.comptime_float,
.noreturn,
.var_args_param,
+ .bound_fn,
=> return writer.writeAll(@tagName(t)),
.enum_literal => return writer.writeAll("@Type(.EnumLiteral)"),
@@ -941,6 +963,10 @@ pub const Type = extern union {
const enum_simple = ty.castTag(.enum_simple).?.data;
return enum_simple.owner_decl.renderFullyQualifiedName(writer);
},
+ .enum_numbered => {
+ const enum_numbered = ty.castTag(.enum_numbered).?.data;
+ return enum_numbered.owner_decl.renderFullyQualifiedName(writer);
+ },
.@"opaque" => {
// TODO use declaration name
return writer.writeAll("opaque {}");
@@ -958,6 +984,7 @@ pub const Type = extern union {
.atomic_order => return writer.writeAll("std.builtin.AtomicOrder"),
.atomic_rmw_op => return writer.writeAll("std.builtin.AtomicRmwOp"),
.calling_convention => return writer.writeAll("std.builtin.CallingConvention"),
+ .address_space => return writer.writeAll("std.builtin.AddressSpace"),
.float_mode => return writer.writeAll("std.builtin.FloatMode"),
.reduce_op => return writer.writeAll("std.builtin.ReduceOp"),
.call_options => return writer.writeAll("std.builtin.CallOptions"),
@@ -1111,6 +1138,9 @@ pub const Type = extern union {
}
try writer.writeAll(") ");
}
+ if (payload.@"addrspace" != .generic) {
+ try writer.print("addrspace(.{s}) ", .{@tagName(payload.@"addrspace")});
+ }
if (!payload.mutable) try writer.writeAll("const ");
if (payload.@"volatile") try writer.writeAll("volatile ");
if (payload.@"allowzero") try writer.writeAll("allowzero ");
@@ -1186,6 +1216,7 @@ pub const Type = extern union {
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -1220,6 +1251,7 @@ pub const Type = extern union {
.var_args_param => unreachable,
.inferred_alloc_mut => unreachable,
.inferred_alloc_const => unreachable,
+ .bound_fn => unreachable,
.array_u8,
.array_u8_sentinel_0,
@@ -1246,6 +1278,7 @@ pub const Type = extern union {
.@"union",
.union_tagged,
.enum_simple,
+ .enum_numbered,
.enum_full,
.enum_nonexhaustive,
=> false, // TODO some of these should be `true` depending on their child types
@@ -1301,6 +1334,7 @@ pub const Type = extern union {
.atomic_order => return Value.initTag(.atomic_order_type),
.atomic_rmw_op => return Value.initTag(.atomic_rmw_op_type),
.calling_convention => return Value.initTag(.calling_convention_type),
+ .address_space => return Value.initTag(.address_space_type),
.float_mode => return Value.initTag(.float_mode_type),
.reduce_op => return Value.initTag(.reduce_op_type),
.call_options => return Value.initTag(.call_options_type),
@@ -1313,6 +1347,8 @@ pub const Type = extern union {
}
}
+ /// For structs and unions, if the type does not have their fields resolved
+ /// this will return `false`.
pub fn hasCodeGenBits(self: Type) bool {
return switch (self.tag()) {
.u1,
@@ -1343,10 +1379,6 @@ pub const Type = extern union {
.f128,
.bool,
.anyerror,
- .fn_noreturn_no_args,
- .fn_void_no_args,
- .fn_naked_noreturn_no_args,
- .fn_ccc_void_no_args,
.single_const_pointer_to_comptime_int,
.const_slice_u8,
.array_u8_sentinel_0,
@@ -1362,6 +1394,7 @@ pub const Type = extern union {
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -1373,15 +1406,17 @@ pub const Type = extern union {
.function => !self.castTag(.function).?.data.is_generic,
+ .fn_noreturn_no_args,
+ .fn_void_no_args,
+ .fn_naked_noreturn_no_args,
+ .fn_ccc_void_no_args,
+ => true,
+
.@"struct" => {
- // TODO introduce lazy value mechanism
const struct_obj = self.castTag(.@"struct").?.data;
if (struct_obj.known_has_bits) {
return true;
}
- assert(struct_obj.status == .have_field_types or
- struct_obj.status == .layout_wip or
- struct_obj.status == .have_layout);
for (struct_obj.fields.values()) |value| {
if (value.ty.hasCodeGenBits())
return true;
@@ -1397,7 +1432,7 @@ pub const Type = extern union {
const enum_simple = self.castTag(.enum_simple).?.data;
return enum_simple.fields.count() >= 2;
},
- .enum_nonexhaustive => {
+ .enum_numbered, .enum_nonexhaustive => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = self.intTagType(&buffer);
return int_tag_ty.hasCodeGenBits();
@@ -1448,6 +1483,7 @@ pub const Type = extern union {
.empty_struct_literal,
.@"opaque",
.type_info,
+ .bound_fn,
=> false,
.inferred_alloc_const => unreachable,
@@ -1458,7 +1494,9 @@ pub const Type = extern union {
}
pub fn isNoReturn(self: Type) bool {
- const definitely_correct_result = self.zigTypeTag() == .NoReturn;
+ const definitely_correct_result =
+ self.tag_if_small_enough != .bound_fn and
+ self.zigTypeTag() == .NoReturn;
const fast_result = self.tag_if_small_enough == Tag.noreturn;
assert(fast_result == definitely_correct_result);
return fast_result;
@@ -1496,6 +1534,30 @@ pub const Type = extern union {
}
}
+ pub fn ptrAddressSpace(self: Type) std.builtin.AddressSpace {
+ return switch (self.tag()) {
+ .single_const_pointer_to_comptime_int,
+ .const_slice_u8,
+ .single_const_pointer,
+ .single_mut_pointer,
+ .many_const_pointer,
+ .many_mut_pointer,
+ .c_const_pointer,
+ .c_mut_pointer,
+ .const_slice,
+ .mut_slice,
+ .inferred_alloc_const,
+ .inferred_alloc_mut,
+ .manyptr_u8,
+ .manyptr_const_u8,
+ => .generic,
+
+ .pointer => self.castTag(.pointer).?.data.@"addrspace",
+
+ else => unreachable,
+ };
+ }
+
/// Asserts that hasCodeGenBits() is true.
pub fn abiAlignment(self: Type, target: Target) u32 {
return switch (self.tag()) {
@@ -1508,6 +1570,7 @@ pub const Type = extern union {
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -1583,7 +1646,11 @@ pub const Type = extern union {
.int_signed, .int_unsigned => {
const bits: u16 = self.cast(Payload.Bits).?.data;
- return std.math.ceilPowerOfTwoPromote(u16, (bits + 7) / 8);
+ if (bits <= 8) return 1;
+ if (bits <= 16) return 2;
+ if (bits <= 32) return 4;
+ if (bits <= 64) return 8;
+ return 16;
},
.optional => {
@@ -1629,7 +1696,7 @@ pub const Type = extern union {
assert(biggest != 0);
return biggest;
},
- .enum_full, .enum_nonexhaustive, .enum_simple => {
+ .enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = self.intTagType(&buffer);
return int_tag_ty.abiAlignment(target);
@@ -1676,6 +1743,7 @@ pub const Type = extern union {
.@"opaque",
.var_args_param,
.type_info,
+ .bound_fn,
=> unreachable,
.generic_poison => unreachable,
@@ -1708,13 +1776,30 @@ pub const Type = extern union {
.var_args_param => unreachable,
.generic_poison => unreachable,
.type_info => unreachable,
+ .bound_fn => unreachable,
.@"struct" => {
const s = self.castTag(.@"struct").?.data;
assert(s.status == .have_layout);
- @panic("TODO abiSize struct");
+ const is_packed = s.layout == .Packed;
+ if (is_packed) @panic("TODO packed structs");
+ var size: u64 = 0;
+ for (s.fields.values()) |field| {
+ if (!field.ty.hasCodeGenBits()) continue;
+
+ const field_align = a: {
+ if (field.abi_align.tag() == .abi_align_default) {
+ break :a field.ty.abiAlignment(target);
+ } else {
+ break :a field.abi_align.toUnsignedInt();
+ }
+ };
+ size = std.mem.alignForwardGeneric(u64, size, field_align);
+ size += field.ty.abiSize(target);
+ }
+ return size;
},
- .enum_simple, .enum_full, .enum_nonexhaustive => {
+ .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = self.intTagType(&buffer);
return int_tag_ty.abiSize(target);
@@ -1730,6 +1815,7 @@ pub const Type = extern union {
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -1818,6 +1904,7 @@ pub const Type = extern union {
.int_signed, .int_unsigned => {
const bits: u16 = self.cast(Payload.Bits).?.data;
+ if (bits == 0) return 0;
return std.math.ceilPowerOfTwoPromote(u16, (bits + 7) / 8);
},
@@ -1875,11 +1962,12 @@ pub const Type = extern union {
.@"opaque" => unreachable,
.var_args_param => unreachable,
.generic_poison => unreachable,
+ .bound_fn => unreachable,
.@"struct" => {
@panic("TODO bitSize struct");
},
- .enum_simple, .enum_full, .enum_nonexhaustive => {
+ .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = self.intTagType(&buffer);
return int_tag_ty.bitSize(target);
@@ -2014,6 +2102,7 @@ pub const Type = extern union {
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -2024,23 +2113,6 @@ pub const Type = extern union {
};
}
- /// Asserts the type is an enum.
- pub fn intTagType(self: Type, buffer: *Payload.Bits) Type {
- switch (self.tag()) {
- .enum_full, .enum_nonexhaustive => return self.cast(Payload.EnumFull).?.data.tag_ty,
- .enum_simple => {
- const enum_simple = self.castTag(.enum_simple).?.data;
- const bits = std.math.log2_int_ceil(usize, enum_simple.fields.count());
- buffer.* = .{
- .base = .{ .tag = .int_unsigned },
- .data = bits,
- };
- return Type.initPayload(&buffer.base);
- },
- else => unreachable,
- }
- }
-
pub fn isSinglePointer(self: Type) bool {
return switch (self.tag()) {
.single_const_pointer,
@@ -2100,42 +2172,82 @@ pub const Type = extern union {
};
}
- pub fn slicePtrFieldType(self: Type, buffer: *Payload.ElemType) Type {
+ pub const SlicePtrFieldTypeBuffer = union {
+ elem_type: Payload.ElemType,
+ pointer: Payload.Pointer,
+ };
+
+ pub fn slicePtrFieldType(self: Type, buffer: *SlicePtrFieldTypeBuffer) Type {
switch (self.tag()) {
.const_slice_u8 => return Type.initTag(.manyptr_const_u8),
.const_slice => {
const elem_type = self.castTag(.const_slice).?.data;
buffer.* = .{
- .base = .{ .tag = .many_const_pointer },
- .data = elem_type,
+ .elem_type = .{
+ .base = .{ .tag = .many_const_pointer },
+ .data = elem_type,
+ },
};
- return Type.initPayload(&buffer.base);
+ return Type.initPayload(&buffer.elem_type.base);
},
.mut_slice => {
const elem_type = self.castTag(.mut_slice).?.data;
buffer.* = .{
- .base = .{ .tag = .many_mut_pointer },
- .data = elem_type,
+ .elem_type = .{
+ .base = .{ .tag = .many_mut_pointer },
+ .data = elem_type,
+ },
};
- return Type.initPayload(&buffer.base);
+ return Type.initPayload(&buffer.elem_type.base);
},
.pointer => {
const payload = self.castTag(.pointer).?.data;
assert(payload.size == .Slice);
- if (payload.mutable) {
+
+ if (payload.sentinel != null or
+ payload.@"align" != 0 or
+ payload.@"addrspace" != .generic or
+ payload.bit_offset != 0 or
+ payload.host_size != 0 or
+ payload.@"allowzero" or
+ payload.@"volatile")
+ {
buffer.* = .{
- .base = .{ .tag = .many_mut_pointer },
- .data = payload.pointee_type,
+ .pointer = .{
+ .data = .{
+ .pointee_type = payload.pointee_type,
+ .sentinel = payload.sentinel,
+ .@"align" = payload.@"align",
+ .@"addrspace" = payload.@"addrspace",
+ .bit_offset = payload.bit_offset,
+ .host_size = payload.host_size,
+ .@"allowzero" = payload.@"allowzero",
+ .mutable = payload.mutable,
+ .@"volatile" = payload.@"volatile",
+ .size = .Many,
+ },
+ },
};
+ return Type.initPayload(&buffer.pointer.base);
+ } else if (payload.mutable) {
+ buffer.* = .{
+ .elem_type = .{
+ .base = .{ .tag = .many_mut_pointer },
+ .data = payload.pointee_type,
+ },
+ };
+ return Type.initPayload(&buffer.elem_type.base);
} else {
buffer.* = .{
- .base = .{ .tag = .many_const_pointer },
- .data = payload.pointee_type,
+ .elem_type = .{
+ .base = .{ .tag = .many_const_pointer },
+ .data = payload.pointee_type,
+ },
};
+ return Type.initPayload(&buffer.elem_type.base);
}
- return Type.initPayload(&buffer.base);
},
else => unreachable,
@@ -2191,6 +2303,44 @@ pub const Type = extern union {
};
}
+ pub fn isPtrAtRuntime(self: Type) bool {
+ switch (self.tag()) {
+ .c_const_pointer,
+ .c_mut_pointer,
+ .many_const_pointer,
+ .many_mut_pointer,
+ .manyptr_const_u8,
+ .manyptr_u8,
+ .optional_single_const_pointer,
+ .optional_single_mut_pointer,
+ .single_const_pointer,
+ .single_const_pointer_to_comptime_int,
+ .single_mut_pointer,
+ => return true,
+
+ .pointer => switch (self.castTag(.pointer).?.data.size) {
+ .Slice => return false,
+ .One, .Many, .C => return true,
+ },
+
+ .optional => {
+ var buf: Payload.ElemType = undefined;
+ const child_type = self.optionalChild(&buf);
+ // optionals of zero sized pointers behave like bools
+ if (!child_type.hasCodeGenBits()) return false;
+ if (child_type.zigTypeTag() != .Pointer) return false;
+
+ const info = child_type.ptrInfo().data;
+ switch (info.size) {
+ .Slice, .C => return false,
+ .Many, .One => return !info.@"allowzero",
+ }
+ },
+
+ else => return false,
+ }
+ }
+
/// Asserts that the type is an optional
pub fn isPtrLikeOptional(self: Type) bool {
switch (self.tag()) {
@@ -2203,8 +2353,13 @@ pub const Type = extern union {
const child_type = self.optionalChild(&buf);
// optionals of zero sized pointers behave like bools
if (!child_type.hasCodeGenBits()) return false;
+ if (child_type.zigTypeTag() != .Pointer) return false;
- return child_type.zigTypeTag() == .Pointer and !child_type.isCPtr();
+ const info = child_type.ptrInfo().data;
+ switch (info.size) {
+ .Slice, .C => return false,
+ .Many, .One => return !info.@"allowzero",
+ }
},
else => unreachable,
}
@@ -2252,12 +2407,14 @@ pub const Type = extern union {
};
}
- /// Asserts the type is a pointer or array type.
- pub fn elemType(self: Type) Type {
- return switch (self.tag()) {
- .vector => self.castTag(.vector).?.data.elem_type,
- .array => self.castTag(.array).?.data.elem_type,
- .array_sentinel => self.castTag(.array_sentinel).?.data.elem_type,
+ /// For *[N]T, returns [N]T.
+ /// For *T, returns T.
+ /// For [*]T, returns T.
+ pub fn childType(ty: Type) Type {
+ return switch (ty.tag()) {
+ .vector => ty.castTag(.vector).?.data.elem_type,
+ .array => ty.castTag(.array).?.data.elem_type,
+ .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type,
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
@@ -2266,7 +2423,7 @@ pub const Type = extern union {
.c_mut_pointer,
.const_slice,
.mut_slice,
- => self.castPointer().?.data,
+ => ty.castPointer().?.data,
.array_u8,
.array_u8_sentinel_0,
@@ -2276,12 +2433,70 @@ pub const Type = extern union {
=> Type.initTag(.u8),
.single_const_pointer_to_comptime_int => Type.initTag(.comptime_int),
- .pointer => self.castTag(.pointer).?.data.pointee_type,
+ .pointer => ty.castTag(.pointer).?.data.pointee_type,
else => unreachable,
};
}
+ /// Asserts the type is a pointer or array type.
+ /// TODO this is deprecated in favor of `childType`.
+ pub const elemType = childType;
+
+ /// For *[N]T, returns T.
+ /// For ?*T, returns T.
+ /// For ?*[N]T, returns T.
+ /// For ?[*]T, returns T.
+ /// For *T, returns T.
+ /// For [*]T, returns T.
+ pub fn elemType2(ty: Type) Type {
+ return switch (ty.tag()) {
+ .vector => ty.castTag(.vector).?.data.elem_type,
+ .array => ty.castTag(.array).?.data.elem_type,
+ .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type,
+ .many_const_pointer,
+ .many_mut_pointer,
+ .c_const_pointer,
+ .c_mut_pointer,
+ .const_slice,
+ .mut_slice,
+ => ty.castPointer().?.data,
+
+ .single_const_pointer,
+ .single_mut_pointer,
+ => ty.castPointer().?.data.shallowElemType(),
+
+ .array_u8,
+ .array_u8_sentinel_0,
+ .const_slice_u8,
+ .manyptr_u8,
+ .manyptr_const_u8,
+ => Type.initTag(.u8),
+
+ .single_const_pointer_to_comptime_int => Type.initTag(.comptime_int),
+ .pointer => {
+ const info = ty.castTag(.pointer).?.data;
+ const child_ty = info.pointee_type;
+ if (info.size == .One) {
+ return child_ty.shallowElemType();
+ } else {
+ return child_ty;
+ }
+ },
+
+ // TODO handle optionals
+
+ else => unreachable,
+ };
+ }
+
+ fn shallowElemType(child_ty: Type) Type {
+ return switch (child_ty.zigTypeTag()) {
+ .Array, .Vector => child_ty.childType(),
+ else => child_ty,
+ };
+ }
+
/// Asserts that the type is an optional.
/// Resulting `Type` will have inner memory referencing `buf`.
pub fn optionalChild(self: Type, buf: *Payload.ElemType) Type {
@@ -2320,6 +2535,21 @@ pub const Type = extern union {
}
}
+ /// Returns the tag type of a union, if the type is a union and it has a tag type.
+ /// Otherwise, returns `null`.
+ pub fn unionTagType(ty: Type) ?Type {
+ return switch (ty.tag()) {
+ .union_tagged => ty.castTag(.union_tagged).?.data.tag_ty,
+ else => null,
+ };
+ }
+
+ pub fn unionFieldType(ty: Type, enum_tag: Value) Type {
+ const union_obj = ty.cast(Payload.Union).?.data;
+ const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag).?;
+ return union_obj.fields.values()[index].ty;
+ }
+
/// Asserts that the type is an error union.
pub fn errorUnionPayload(self: Type) Type {
return switch (self.tag()) {
@@ -2476,7 +2706,8 @@ pub const Type = extern union {
};
}
- pub fn isFloat(self: Type) bool {
+ /// Returns `false` for `comptime_float`.
+ pub fn isRuntimeFloat(self: Type) bool {
return switch (self.tag()) {
.f16,
.f32,
@@ -2489,13 +2720,29 @@ pub const Type = extern union {
};
}
- /// Asserts the type is a fixed-size float.
+ /// Returns `true` for `comptime_float`.
+ pub fn isAnyFloat(self: Type) bool {
+ return switch (self.tag()) {
+ .f16,
+ .f32,
+ .f64,
+ .f128,
+ .c_longdouble,
+ .comptime_float,
+ => true,
+
+ else => false,
+ };
+ }
+
+ /// Asserts the type is a fixed-size float or comptime_float.
+ /// Returns 128 for comptime_float types.
pub fn floatBits(self: Type, target: Target) u16 {
return switch (self.tag()) {
.f16 => 16,
.f32 => 32,
.f64 => 64,
- .f128 => 128,
+ .f128, .comptime_float => 128,
.c_longdouble => CType.longdouble.sizeInBits(target),
else => unreachable,
@@ -2728,6 +2975,7 @@ pub const Type = extern union {
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -2743,6 +2991,7 @@ pub const Type = extern union {
.single_const_pointer,
.single_mut_pointer,
.pointer,
+ .bound_fn,
=> return null,
.@"struct" => {
@@ -2772,6 +3021,7 @@ pub const Type = extern union {
}
},
.enum_nonexhaustive => ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty,
+ .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty,
.@"union" => {
return null; // TODO
},
@@ -2832,7 +3082,7 @@ pub const Type = extern union {
}
/// Asserts that self.zigTypeTag() == .Int.
- pub fn minInt(self: Type, arena: *std.heap.ArenaAllocator, target: Target) !Value {
+ pub fn minInt(self: Type, arena: *Allocator, target: Target) !Value {
assert(self.zigTypeTag() == .Int);
const info = self.intInfo(target);
@@ -2842,35 +3092,35 @@ pub const Type = extern union {
if ((info.bits - 1) <= std.math.maxInt(u6)) {
const n: i64 = -(@as(i64, 1) << @truncate(u6, info.bits - 1));
- return Value.Tag.int_i64.create(&arena.allocator, n);
+ return Value.Tag.int_i64.create(arena, n);
}
- var res = try std.math.big.int.Managed.initSet(&arena.allocator, 1);
+ var res = try std.math.big.int.Managed.initSet(arena, 1);
try res.shiftLeft(res, info.bits - 1);
res.negate();
const res_const = res.toConst();
if (res_const.positive) {
- return Value.Tag.int_big_positive.create(&arena.allocator, res_const.limbs);
+ return Value.Tag.int_big_positive.create(arena, res_const.limbs);
} else {
- return Value.Tag.int_big_negative.create(&arena.allocator, res_const.limbs);
+ return Value.Tag.int_big_negative.create(arena, res_const.limbs);
}
}
/// Asserts that self.zigTypeTag() == .Int.
- pub fn maxInt(self: Type, arena: *std.heap.ArenaAllocator, target: Target) !Value {
+ pub fn maxInt(self: Type, arena: *Allocator, target: Target) !Value {
assert(self.zigTypeTag() == .Int);
const info = self.intInfo(target);
if (info.signedness == .signed and (info.bits - 1) <= std.math.maxInt(u6)) {
const n: i64 = (@as(i64, 1) << @truncate(u6, info.bits - 1)) - 1;
- return Value.Tag.int_i64.create(&arena.allocator, n);
+ return Value.Tag.int_i64.create(arena, n);
} else if (info.signedness == .signed and info.bits <= std.math.maxInt(u6)) {
const n: u64 = (@as(u64, 1) << @truncate(u6, info.bits)) - 1;
- return Value.Tag.int_u64.create(&arena.allocator, n);
+ return Value.Tag.int_u64.create(arena, n);
}
- var res = try std.math.big.int.Managed.initSet(&arena.allocator, 1);
+ var res = try std.math.big.int.Managed.initSet(arena, 1);
try res.shiftLeft(res, info.bits - @boolToInt(info.signedness == .signed));
const one = std.math.big.int.Const{
.limbs = &[_]std.math.big.Limb{1},
@@ -2880,37 +3130,27 @@ pub const Type = extern union {
const res_const = res.toConst();
if (res_const.positive) {
- return Value.Tag.int_big_positive.create(&arena.allocator, res_const.limbs);
+ return Value.Tag.int_big_positive.create(arena, res_const.limbs);
} else {
- return Value.Tag.int_big_negative.create(&arena.allocator, res_const.limbs);
+ return Value.Tag.int_big_negative.create(arena, res_const.limbs);
}
}
- /// Returns the integer tag type of the enum.
- pub fn enumTagType(ty: Type, buffer: *Payload.Bits) Type {
- switch (ty.tag()) {
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Payload.EnumFull).?.data;
- return enum_full.tag_ty;
- },
+ /// Asserts the type is an enum or a union.
+ /// TODO support unions
+ pub fn intTagType(self: Type, buffer: *Payload.Bits) Type {
+ switch (self.tag()) {
+ .enum_full, .enum_nonexhaustive => return self.cast(Payload.EnumFull).?.data.tag_ty,
+ .enum_numbered => return self.castTag(.enum_numbered).?.data.tag_ty,
.enum_simple => {
- const enum_simple = ty.castTag(.enum_simple).?.data;
+ const enum_simple = self.castTag(.enum_simple).?.data;
+ const bits = std.math.log2_int_ceil(usize, enum_simple.fields.count());
buffer.* = .{
.base = .{ .tag = .int_unsigned },
- .data = std.math.log2_int_ceil(usize, enum_simple.fields.count()),
+ .data = bits,
};
return Type.initPayload(&buffer.base);
},
- .atomic_order,
- .atomic_rmw_op,
- .calling_convention,
- .float_mode,
- .reduce_op,
- .call_options,
- .export_options,
- .extern_options,
- => @panic("TODO resolve std.builtin types"),
-
else => unreachable,
}
}
@@ -2928,13 +3168,12 @@ pub const Type = extern union {
const enum_full = ty.cast(Payload.EnumFull).?.data;
return enum_full.fields.count();
},
- .enum_simple => {
- const enum_simple = ty.castTag(.enum_simple).?.data;
- return enum_simple.fields.count();
- },
+ .enum_simple => return ty.castTag(.enum_simple).?.data.fields.count(),
+ .enum_numbered => return ty.castTag(.enum_numbered).?.data.fields.count(),
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -2956,9 +3195,14 @@ pub const Type = extern union {
const enum_simple = ty.castTag(.enum_simple).?.data;
return enum_simple.fields.keys()[field_index];
},
+ .enum_numbered => {
+ const enum_numbered = ty.castTag(.enum_numbered).?.data;
+ return enum_numbered.fields.keys()[field_index];
+ },
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -2979,9 +3223,14 @@ pub const Type = extern union {
const enum_simple = ty.castTag(.enum_simple).?.data;
return enum_simple.fields.getIndex(field_name);
},
+ .enum_numbered => {
+ const enum_numbered = ty.castTag(.enum_numbered).?.data;
+ return enum_numbered.fields.getIndex(field_name);
+ },
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -3021,6 +3270,15 @@ pub const Type = extern union {
return enum_full.values.getIndexContext(enum_tag, .{ .ty = tag_ty });
}
},
+ .enum_numbered => {
+ const enum_obj = ty.castTag(.enum_numbered).?.data;
+ const tag_ty = enum_obj.tag_ty;
+ if (enum_obj.values.count() == 0) {
+ return S.fieldWithRange(tag_ty, enum_tag, enum_obj.fields.count());
+ } else {
+ return enum_obj.values.getIndexContext(enum_tag, .{ .ty = tag_ty });
+ }
+ },
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
const fields_len = enum_simple.fields.count();
@@ -3035,6 +3293,7 @@ pub const Type = extern union {
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -3071,6 +3330,7 @@ pub const Type = extern union {
const enum_full = ty.cast(Payload.EnumFull).?.data;
return enum_full.srcLoc();
},
+ .enum_numbered => return ty.castTag(.enum_numbered).?.data.srcLoc(),
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
return enum_simple.srcLoc();
@@ -3090,6 +3350,7 @@ pub const Type = extern union {
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -3107,6 +3368,7 @@ pub const Type = extern union {
const enum_full = ty.cast(Payload.EnumFull).?.data;
return enum_full.owner_decl;
},
+ .enum_numbered => return ty.castTag(.enum_numbered).?.data.owner_decl,
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
return enum_simple.owner_decl;
@@ -3127,6 +3389,7 @@ pub const Type = extern union {
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -3163,6 +3426,15 @@ pub const Type = extern union {
return enum_full.values.containsContext(int, .{ .ty = tag_ty });
}
},
+ .enum_numbered => {
+ const enum_obj = ty.castTag(.enum_numbered).?.data;
+ const tag_ty = enum_obj.tag_ty;
+ if (enum_obj.values.count() == 0) {
+ return S.intInRange(tag_ty, int, enum_obj.fields.count());
+ } else {
+ return enum_obj.values.containsContext(int, .{ .ty = tag_ty });
+ }
+ },
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
const fields_len = enum_simple.fields.count();
@@ -3177,6 +3449,7 @@ pub const Type = extern union {
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -3237,6 +3510,7 @@ pub const Type = extern union {
atomic_order,
atomic_rmw_op,
calling_convention,
+ address_space,
float_mode,
reduce_op,
call_options,
@@ -3254,7 +3528,7 @@ pub const Type = extern union {
anyerror_void_error_union,
generic_poison,
/// This is a special type for variadic parameters of a function call.
- /// Casts to it will validate that the type can be passed to a c calling convetion function.
+ /// Casts to it will validate that the type can be passed to a c calling convention function.
var_args_param,
/// Same as `empty_struct` except it has an empty namespace.
empty_struct_literal,
@@ -3264,6 +3538,7 @@ pub const Type = extern union {
inferred_alloc_mut,
/// Same as `inferred_alloc_mut` but the local is `var` not `const`.
inferred_alloc_const, // See last_no_payload_tag below.
+ bound_fn,
// After this, the tag requires a payload.
array_u8,
@@ -3298,10 +3573,11 @@ pub const Type = extern union {
@"union",
union_tagged,
enum_simple,
+ enum_numbered,
enum_full,
enum_nonexhaustive,
- pub const last_no_payload_tag = Tag.inferred_alloc_const;
+ pub const last_no_payload_tag = Tag.bound_fn;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
pub fn Type(comptime t: Tag) type {
@@ -3360,6 +3636,7 @@ pub const Type = extern union {
.atomic_order,
.atomic_rmw_op,
.calling_convention,
+ .address_space,
.float_mode,
.reduce_op,
.call_options,
@@ -3367,6 +3644,7 @@ pub const Type = extern union {
.extern_options,
.type_info,
.@"anyframe",
+ .bound_fn,
=> @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"),
.array_u8,
@@ -3405,6 +3683,7 @@ pub const Type = extern union {
.@"union", .union_tagged => Payload.Union,
.enum_full, .enum_nonexhaustive => Payload.EnumFull,
.enum_simple => Payload.EnumSimple,
+ .enum_numbered => Payload.EnumNumbered,
.empty_struct => Payload.ContainerScope,
};
}
@@ -3415,12 +3694,12 @@ pub const Type = extern union {
}
pub fn create(comptime t: Tag, ally: *Allocator, data: Data(t)) error{OutOfMemory}!file_struct.Type {
- const ptr = try ally.create(t.Type());
- ptr.* = .{
+ const p = try ally.create(t.Type());
+ p.* = .{
.base = .{ .tag = t },
.data = data,
};
- return file_struct.Type{ .ptr_otherwise = &ptr.base };
+ return file_struct.Type{ .ptr_otherwise = &p.base };
}
pub fn Data(comptime t: Tag) type {
@@ -3510,18 +3789,23 @@ pub const Type = extern union {
pub const base_tag = Tag.pointer;
base: Payload = Payload{ .tag = base_tag },
- data: struct {
+ data: Data,
+
+ pub const Data = struct {
pointee_type: Type,
- sentinel: ?Value,
+ sentinel: ?Value = null,
/// If zero use pointee_type.AbiAlign()
- @"align": u32,
- bit_offset: u16,
- host_size: u16,
- @"allowzero": bool,
- mutable: bool,
- @"volatile": bool,
- size: std.builtin.TypeInfo.Pointer.Size,
- },
+ @"align": u32 = 0,
+ /// See src/target.zig defaultAddressSpace function for how to obtain
+ /// an appropriate value for this field.
+ @"addrspace": std.builtin.AddressSpace,
+ bit_offset: u16 = 0,
+ host_size: u16 = 0,
+ @"allowzero": bool = false,
+ mutable: bool = true, // TODO change this to const, not mutable
+ @"volatile": bool = false,
+ size: std.builtin.TypeInfo.Pointer.Size = .One,
+ };
};
pub const ErrorUnion = struct {
@@ -3576,7 +3860,64 @@ pub const Type = extern union {
base: Payload = .{ .tag = .enum_simple },
data: *Module.EnumSimple,
};
+
+ pub const EnumNumbered = struct {
+ base: Payload = .{ .tag = .enum_numbered },
+ data: *Module.EnumNumbered,
+ };
};
+
+ pub const @"bool" = initTag(.bool);
+ pub const @"usize" = initTag(.usize);
+ pub const @"comptime_int" = initTag(.comptime_int);
+
+ pub fn ptr(arena: *Allocator, d: Payload.Pointer.Data) !Type {
+ assert(d.host_size == 0 or d.bit_offset < d.host_size * 8);
+
+ if (d.sentinel != null or d.@"align" != 0 or d.@"addrspace" != .generic or
+ d.bit_offset != 0 or d.host_size != 0 or d.@"allowzero" or d.@"volatile")
+ {
+ return Type.Tag.pointer.create(arena, d);
+ }
+
+ if (!d.mutable and d.size == .Slice and d.pointee_type.eql(Type.initTag(.u8))) {
+ return Type.initTag(.const_slice_u8);
+ }
+
+ // TODO stage1 type inference bug
+ const T = Type.Tag;
+
+ const type_payload = try arena.create(Type.Payload.ElemType);
+ type_payload.* = .{
+ .base = .{
+ .tag = switch (d.size) {
+ .One => if (d.mutable) T.single_mut_pointer else T.single_const_pointer,
+ .Many => if (d.mutable) T.many_mut_pointer else T.many_const_pointer,
+ .C => if (d.mutable) T.c_mut_pointer else T.c_const_pointer,
+ .Slice => if (d.mutable) T.mut_slice else T.const_slice,
+ },
+ },
+ .data = d.pointee_type,
+ };
+ return Type.initPayload(&type_payload.base);
+ }
+
+ pub fn smallestUnsignedInt(arena: *Allocator, max: u64) !Type {
+ const bits = bits: {
+ if (max == 0) break :bits 0;
+ const base = std.math.log2(max);
+ const upper = (@as(u64, 1) << @intCast(u6, base)) - 1;
+ break :bits base + @boolToInt(upper < max);
+ };
+ return switch (@intCast(u16, bits)) {
+ 1 => initTag(.u1),
+ 8 => initTag(.u8),
+ 16 => initTag(.u16),
+ 32 => initTag(.u32),
+ 64 => initTag(.u64),
+ else => |b| return Tag.int_unsigned.create(arena, b),
+ };
+ }
};
pub const CType = enum {
@@ -3633,6 +3974,7 @@ pub const CType = enum {
.wasi,
.emscripten,
.plan9,
+ .solaris,
=> switch (self) {
.short,
.ushort,
@@ -3684,7 +4026,6 @@ pub const CType = enum {
.fuchsia,
.kfreebsd,
.lv2,
- .solaris,
.zos,
.haiku,
.minix,
diff --git a/src/value.zig b/src/value.zig
index 88d0d04086..ac52654041 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -63,6 +63,7 @@ pub const Value = extern union {
atomic_order_type,
atomic_rmw_op_type,
calling_convention_type,
+ address_space_type,
float_mode_type,
reduce_op_type,
call_options_type,
@@ -158,6 +159,10 @@ pub const Value = extern union {
/// Used to coordinate alloc_inferred, store_to_inferred_ptr, and resolve_inferred_alloc
/// instructions for comptime code.
inferred_alloc_comptime,
+ /// Used sometimes as the result of field_call_bind. This value is always temporary,
+ /// and refers directly to the air. It will never be referenced by the air itself.
+ /// TODO: This is probably a bad encoding, maybe put temp data in the sema instead.
+ bound_fn,
pub const last_no_payload_tag = Tag.empty_array;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
@@ -226,6 +231,7 @@ pub const Value = extern union {
.atomic_order_type,
.atomic_rmw_op_type,
.calling_convention_type,
+ .address_space_type,
.float_mode_type,
.reduce_op_type,
.call_options_type,
@@ -277,6 +283,7 @@ pub const Value = extern union {
.inferred_alloc => Payload.InferredAlloc,
.@"struct" => Payload.Struct,
.@"union" => Payload.Union,
+ .bound_fn => Payload.BoundFn,
};
}
@@ -412,6 +419,7 @@ pub const Value = extern union {
.atomic_order_type,
.atomic_rmw_op_type,
.calling_convention_type,
+ .address_space_type,
.float_mode_type,
.reduce_op_type,
.call_options_type,
@@ -419,6 +427,7 @@ pub const Value = extern union {
.extern_options_type,
.type_info_type,
.generic_poison,
+ .bound_fn,
=> unreachable,
.ty => {
@@ -625,6 +634,7 @@ pub const Value = extern union {
.atomic_order_type => return out_stream.writeAll("std.builtin.AtomicOrder"),
.atomic_rmw_op_type => return out_stream.writeAll("std.builtin.AtomicRmwOp"),
.calling_convention_type => return out_stream.writeAll("std.builtin.CallingConvention"),
+ .address_space_type => return out_stream.writeAll("std.builtin.AddressSpace"),
.float_mode_type => return out_stream.writeAll("std.builtin.FloatMode"),
.reduce_op_type => return out_stream.writeAll("std.builtin.ReduceOp"),
.call_options_type => return out_stream.writeAll("std.builtin.CallOptions"),
@@ -712,6 +722,10 @@ pub const Value = extern union {
try out_stream.writeAll("(opt_payload_ptr)");
val = val.castTag(.opt_payload_ptr).?.data;
},
+ .bound_fn => {
+ const bound_func = val.castTag(.bound_fn).?.data;
+ return out_stream.print("(bound_fn %{}(%{})", .{ bound_func.func_inst, bound_func.arg0_inst });
+ },
};
}
@@ -792,6 +806,7 @@ pub const Value = extern union {
.atomic_order_type => Type.initTag(.atomic_order),
.atomic_rmw_op_type => Type.initTag(.atomic_rmw_op),
.calling_convention_type => Type.initTag(.calling_convention),
+ .address_space_type => Type.initTag(.address_space),
.float_mode_type => Type.initTag(.float_mode),
.reduce_op_type => Type.initTag(.reduce_op),
.call_options_type => Type.initTag(.call_options),
@@ -932,7 +947,7 @@ pub const Value = extern union {
/// Asserts that the value is a float or an integer.
pub fn toFloat(self: Value, comptime T: type) T {
return switch (self.tag()) {
- .float_16 => @panic("TODO soft float"),
+ .float_16 => @floatCast(T, self.castTag(.float_16).?.data),
.float_32 => @floatCast(T, self.castTag(.float_32).?.data),
.float_64 => @floatCast(T, self.castTag(.float_64).?.data),
.float_128 => @floatCast(T, self.castTag(.float_128).?.data),
@@ -947,6 +962,45 @@ pub const Value = extern union {
};
}
+ pub fn clz(val: Value, ty: Type, target: Target) u64 {
+ const ty_bits = ty.intInfo(target).bits;
+ switch (val.tag()) {
+ .zero, .bool_false => return ty_bits,
+ .one, .bool_true => return ty_bits - 1,
+
+ .int_u64 => {
+ const big = @clz(u64, val.castTag(.int_u64).?.data);
+ return big + ty_bits - 64;
+ },
+ .int_i64 => {
+ @panic("TODO implement i64 Value clz");
+ },
+ .int_big_positive => {
+ // TODO: move this code into std lib big ints
+ const bigint = val.castTag(.int_big_positive).?.asBigInt();
+ // Limbs are stored in little-endian order but we need
+ // to iterate big-endian.
+ var total_limb_lz: u64 = 0;
+ var i: usize = bigint.limbs.len;
+ const bits_per_limb = @sizeOf(std.math.big.Limb) * 8;
+ while (i != 0) {
+ i -= 1;
+ const limb = bigint.limbs[i];
+ const this_limb_lz = @clz(std.math.big.Limb, limb);
+ total_limb_lz += this_limb_lz;
+ if (this_limb_lz != bits_per_limb) break;
+ }
+ const total_limb_bits = bigint.limbs.len * bits_per_limb;
+ return total_limb_lz + ty_bits - total_limb_bits;
+ },
+ .int_big_negative => {
+ @panic("TODO implement int_big_negative Value clz");
+ },
+
+ else => unreachable,
+ }
+ }
+
/// Asserts the value is an integer and not undefined.
/// Returns the number of bits the value requires to represent stored in twos complement form.
pub fn intBitCountTwosComp(self: Value) usize {
@@ -1036,31 +1090,15 @@ pub const Value = extern union {
}
}
- /// Converts an integer or a float to a float.
- /// Returns `error.Overflow` if the value does not fit in the new type.
- pub fn floatCast(self: Value, allocator: *Allocator, dest_ty: Type) !Value {
+ /// Converts an integer or a float to a float. May result in a loss of information.
+ /// Caller can find out by equality checking the result against the operand.
+ pub fn floatCast(self: Value, arena: *Allocator, dest_ty: Type) !Value {
switch (dest_ty.tag()) {
- .f16 => {
- @panic("TODO add __trunctfhf2 to compiler-rt");
- //const res = try Value.Tag.float_16.create(allocator, self.toFloat(f16));
- //if (!self.eql(res))
- // return error.Overflow;
- //return res;
- },
- .f32 => {
- const res = try Value.Tag.float_32.create(allocator, self.toFloat(f32));
- if (!self.eql(res, dest_ty))
- return error.Overflow;
- return res;
- },
- .f64 => {
- const res = try Value.Tag.float_64.create(allocator, self.toFloat(f64));
- if (!self.eql(res, dest_ty))
- return error.Overflow;
- return res;
- },
+ .f16 => return Value.Tag.float_16.create(arena, self.toFloat(f16)),
+ .f32 => return Value.Tag.float_32.create(arena, self.toFloat(f32)),
+ .f64 => return Value.Tag.float_64.create(arena, self.toFloat(f64)),
.f128, .comptime_float, .c_longdouble => {
- return Value.Tag.float_128.create(allocator, self.toFloat(f128));
+ return Value.Tag.float_128.create(arena, self.toFloat(f128));
},
else => unreachable,
}
@@ -1286,7 +1324,12 @@ pub const Value = extern union {
}
},
.Union => {
- @panic("TODO implement hashing union values");
+ const union_obj = val.castTag(.@"union").?.data;
+ if (ty.unionTagType()) |tag_ty| {
+ union_obj.tag.hash(tag_ty, hasher);
+ }
+ const active_field_ty = ty.unionFieldType(union_obj.tag);
+ union_obj.val.hash(active_field_ty, hasher);
},
.Fn => {
@panic("TODO implement hashing function values");
@@ -1442,6 +1485,14 @@ pub const Value = extern union {
}
}
+ pub fn unionTag(val: Value) Value {
+ switch (val.tag()) {
+ .undef => return val,
+ .@"union" => return val.castTag(.@"union").?.data.tag,
+ else => unreachable,
+ }
+ }
+
/// Returns a pointer to the element value at the index.
pub fn elemPtr(self: Value, allocator: *Allocator, index: usize) !Value {
if (self.castTag(.elem_ptr)) |elem_ptr| {
@@ -1524,6 +1575,341 @@ pub const Value = extern union {
};
}
+ pub fn intToFloat(val: Value, allocator: *Allocator, dest_ty: Type, target: Target) !Value {
+ switch (val.tag()) {
+ .undef, .zero, .one => return val,
+ .int_u64 => {
+ return intToFloatInner(val.castTag(.int_u64).?.data, allocator, dest_ty, target);
+ },
+ .int_i64 => {
+ return intToFloatInner(val.castTag(.int_i64).?.data, allocator, dest_ty, target);
+ },
+ .int_big_positive, .int_big_negative => @panic("big int to float"),
+ else => unreachable,
+ }
+ }
+
+ fn intToFloatInner(x: anytype, arena: *Allocator, dest_ty: Type, target: Target) !Value {
+ switch (dest_ty.floatBits(target)) {
+ 16 => return Value.Tag.float_16.create(arena, @intToFloat(f16, x)),
+ 32 => return Value.Tag.float_32.create(arena, @intToFloat(f32, x)),
+ 64 => return Value.Tag.float_64.create(arena, @intToFloat(f64, x)),
+ 128 => return Value.Tag.float_128.create(arena, @intToFloat(f128, x)),
+ else => unreachable,
+ }
+ }
+
+ /// Supports both floats and ints; handles undefined.
+ pub fn numberAddWrap(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: *Allocator,
+ target: Target,
+ ) !Value {
+ if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
+
+ if (ty.isAnyFloat()) {
+ return floatAdd(lhs, rhs, ty, arena);
+ }
+ const result = try intAdd(lhs, rhs, arena);
+
+ const max = try ty.maxInt(arena, target);
+ if (compare(result, .gt, max, ty)) {
+ @panic("TODO comptime wrapping integer addition");
+ }
+
+ const min = try ty.minInt(arena, target);
+ if (compare(result, .lt, min, ty)) {
+ @panic("TODO comptime wrapping integer addition");
+ }
+
+ return result;
+ }
+
+ /// Supports integers only; asserts neither operand is undefined.
+ pub fn intAddSat(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: *Allocator,
+ target: Target,
+ ) !Value {
+ assert(!lhs.isUndef());
+ assert(!rhs.isUndef());
+
+ const result = try intAdd(lhs, rhs, arena);
+
+ const max = try ty.maxInt(arena, target);
+ if (compare(result, .gt, max, ty)) {
+ return max;
+ }
+
+ const min = try ty.minInt(arena, target);
+ if (compare(result, .lt, min, ty)) {
+ return min;
+ }
+
+ return result;
+ }
+
+ /// Supports both floats and ints; handles undefined.
+ pub fn numberSubWrap(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: *Allocator,
+ target: Target,
+ ) !Value {
+ if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
+
+ if (ty.isAnyFloat()) {
+ return floatSub(lhs, rhs, ty, arena);
+ }
+ const result = try intSub(lhs, rhs, arena);
+
+ const max = try ty.maxInt(arena, target);
+ if (compare(result, .gt, max, ty)) {
+ @panic("TODO comptime wrapping integer subtraction");
+ }
+
+ const min = try ty.minInt(arena, target);
+ if (compare(result, .lt, min, ty)) {
+ @panic("TODO comptime wrapping integer subtraction");
+ }
+
+ return result;
+ }
+
+ /// Supports integers only; asserts neither operand is undefined.
+ pub fn intSubSat(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: *Allocator,
+ target: Target,
+ ) !Value {
+ assert(!lhs.isUndef());
+ assert(!rhs.isUndef());
+
+ const result = try intSub(lhs, rhs, arena);
+
+ const max = try ty.maxInt(arena, target);
+ if (compare(result, .gt, max, ty)) {
+ return max;
+ }
+
+ const min = try ty.minInt(arena, target);
+ if (compare(result, .lt, min, ty)) {
+ return min;
+ }
+
+ return result;
+ }
+
+ /// Supports both floats and ints; handles undefined.
+ pub fn numberMulWrap(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: *Allocator,
+ target: Target,
+ ) !Value {
+ if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
+
+ if (ty.isAnyFloat()) {
+ return floatMul(lhs, rhs, ty, arena);
+ }
+ const result = try intMul(lhs, rhs, arena);
+
+ const max = try ty.maxInt(arena, target);
+ if (compare(result, .gt, max, ty)) {
+ @panic("TODO comptime wrapping integer multiplication");
+ }
+
+ const min = try ty.minInt(arena, target);
+ if (compare(result, .lt, min, ty)) {
+ @panic("TODO comptime wrapping integer multiplication");
+ }
+
+ return result;
+ }
+
+ /// Supports integers only; asserts neither operand is undefined.
+ pub fn intMulSat(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: *Allocator,
+ target: Target,
+ ) !Value {
+ assert(!lhs.isUndef());
+ assert(!rhs.isUndef());
+
+ const result = try intMul(lhs, rhs, arena);
+
+ const max = try ty.maxInt(arena, target);
+ if (compare(result, .gt, max, ty)) {
+ return max;
+ }
+
+ const min = try ty.minInt(arena, target);
+ if (compare(result, .lt, min, ty)) {
+ return min;
+ }
+
+ return result;
+ }
+
+ /// Supports both floats and ints; handles undefined.
+ pub fn numberMax(lhs: Value, rhs: Value, arena: *Allocator) !Value {
+ if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
+
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = lhs.toBigInt(&lhs_space);
+ const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const limbs = try arena.alloc(
+ std.math.big.Limb,
+ std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ );
+ var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+
+ switch (lhs_bigint.order(rhs_bigint)) {
+ .lt => result_bigint.copy(rhs_bigint),
+ .gt, .eq => result_bigint.copy(lhs_bigint),
+ }
+
+ const result_limbs = result_bigint.limbs[0..result_bigint.len];
+
+ if (result_bigint.positive) {
+ return Value.Tag.int_big_positive.create(arena, result_limbs);
+ } else {
+ return Value.Tag.int_big_negative.create(arena, result_limbs);
+ }
+ }
+
+ /// Supports both floats and ints; handles undefined.
+ pub fn numberMin(lhs: Value, rhs: Value, arena: *Allocator) !Value {
+ if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
+
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = lhs.toBigInt(&lhs_space);
+ const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const limbs = try arena.alloc(
+ std.math.big.Limb,
+ std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ );
+ var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+
+ switch (lhs_bigint.order(rhs_bigint)) {
+ .lt => result_bigint.copy(lhs_bigint),
+ .gt, .eq => result_bigint.copy(rhs_bigint),
+ }
+
+ const result_limbs = result_bigint.limbs[0..result_bigint.len];
+
+ if (result_bigint.positive) {
+ return Value.Tag.int_big_positive.create(arena, result_limbs);
+ } else {
+ return Value.Tag.int_big_negative.create(arena, result_limbs);
+ }
+ }
+
+ /// operands must be integers; handles undefined.
+ pub fn bitwiseAnd(lhs: Value, rhs: Value, arena: *Allocator) !Value {
+ if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
+
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = lhs.toBigInt(&lhs_space);
+ const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const limbs = try arena.alloc(
+ std.math.big.Limb,
+ std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ );
+ var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+ result_bigint.bitAnd(lhs_bigint, rhs_bigint);
+ const result_limbs = result_bigint.limbs[0..result_bigint.len];
+
+ if (result_bigint.positive) {
+ return Value.Tag.int_big_positive.create(arena, result_limbs);
+ } else {
+ return Value.Tag.int_big_negative.create(arena, result_limbs);
+ }
+ }
+
+ /// operands must be integers; handles undefined.
+ pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: *Allocator, target: Target) !Value {
+ if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
+
+ const anded = try bitwiseAnd(lhs, rhs, arena);
+
+ const all_ones = if (ty.isSignedInt())
+ try Value.Tag.int_i64.create(arena, -1)
+ else
+ try ty.maxInt(arena, target);
+
+ return bitwiseXor(anded, all_ones, arena);
+ }
+
+ /// operands must be integers; handles undefined.
+ pub fn bitwiseOr(lhs: Value, rhs: Value, arena: *Allocator) !Value {
+ if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
+
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = lhs.toBigInt(&lhs_space);
+ const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const limbs = try arena.alloc(
+ std.math.big.Limb,
+ std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ );
+ var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+ result_bigint.bitOr(lhs_bigint, rhs_bigint);
+ const result_limbs = result_bigint.limbs[0..result_bigint.len];
+
+ if (result_bigint.positive) {
+ return Value.Tag.int_big_positive.create(arena, result_limbs);
+ } else {
+ return Value.Tag.int_big_negative.create(arena, result_limbs);
+ }
+ }
+
+ /// operands must be integers; handles undefined.
+ pub fn bitwiseXor(lhs: Value, rhs: Value, arena: *Allocator) !Value {
+ if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
+
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = lhs.toBigInt(&lhs_space);
+ const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const limbs = try arena.alloc(
+ std.math.big.Limb,
+ std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ );
+ var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+ result_bigint.bitXor(lhs_bigint, rhs_bigint);
+ const result_limbs = result_bigint.limbs[0..result_bigint.len];
+
+ if (result_bigint.positive) {
+ return Value.Tag.int_big_positive.create(arena, result_limbs);
+ } else {
+ return Value.Tag.int_big_negative.create(arena, result_limbs);
+ }
+ }
+
pub fn intAdd(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
@@ -1599,6 +1985,82 @@ pub const Value = extern union {
}
}
+ pub fn intRem(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = lhs.toBigInt(&lhs_space);
+ const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const limbs_q = try allocator.alloc(
+ std.math.big.Limb,
+ lhs_bigint.limbs.len + rhs_bigint.limbs.len + 1,
+ );
+ const limbs_r = try allocator.alloc(
+ std.math.big.Limb,
+ lhs_bigint.limbs.len,
+ );
+ const limbs_buffer = try allocator.alloc(
+ std.math.big.Limb,
+ std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ );
+ var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
+ var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
+ result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer, null);
+ const result_limbs = result_r.limbs[0..result_r.len];
+
+ if (result_r.positive) {
+ return Value.Tag.int_big_positive.create(allocator, result_limbs);
+ } else {
+ return Value.Tag.int_big_negative.create(allocator, result_limbs);
+ }
+ }
+
+ pub fn intMod(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = lhs.toBigInt(&lhs_space);
+ const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const limbs_q = try allocator.alloc(
+ std.math.big.Limb,
+ lhs_bigint.limbs.len + rhs_bigint.limbs.len + 1,
+ );
+ const limbs_r = try allocator.alloc(
+ std.math.big.Limb,
+ lhs_bigint.limbs.len,
+ );
+ const limbs_buffer = try allocator.alloc(
+ std.math.big.Limb,
+ std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ );
+ var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
+ var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
+ result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer, null);
+ const result_limbs = result_r.limbs[0..result_r.len];
+
+ if (result_r.positive) {
+ return Value.Tag.int_big_positive.create(allocator, result_limbs);
+ } else {
+ return Value.Tag.int_big_negative.create(allocator, result_limbs);
+ }
+ }
+
+ pub fn floatRem(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ _ = lhs;
+ _ = rhs;
+ _ = allocator;
+ @panic("TODO implement Value.floatRem");
+ }
+
+ pub fn floatMod(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ _ = lhs;
+ _ = rhs;
+ _ = allocator;
+ @panic("TODO implement Value.floatMod");
+ }
+
pub fn intMul(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
@@ -1634,6 +2096,31 @@ pub const Value = extern union {
return Tag.int_u64.create(arena, truncated);
}
+ pub fn shl(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = lhs.toBigInt(&lhs_space);
+ const shift = rhs.toUnsignedInt();
+ const limbs = try allocator.alloc(
+ std.math.big.Limb,
+ lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
+ );
+ var result_bigint = BigIntMutable{
+ .limbs = limbs,
+ .positive = undefined,
+ .len = undefined,
+ };
+ result_bigint.shiftLeft(lhs_bigint, shift);
+ const result_limbs = result_bigint.limbs[0..result_bigint.len];
+
+ if (result_bigint.positive) {
+ return Value.Tag.int_big_positive.create(allocator, result_limbs);
+ } else {
+ return Value.Tag.int_big_negative.create(allocator, result_limbs);
+ }
+ }
+
pub fn shr(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
@@ -1667,10 +2154,9 @@ pub const Value = extern union {
) !Value {
switch (float_type.tag()) {
.f16 => {
- @panic("TODO add __trunctfhf2 to compiler-rt");
- //const lhs_val = lhs.toFloat(f16);
- //const rhs_val = rhs.toFloat(f16);
- //return Value.Tag.float_16.create(arena, lhs_val + rhs_val);
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(arena, lhs_val + rhs_val);
},
.f32 => {
const lhs_val = lhs.toFloat(f32);
@@ -1699,10 +2185,9 @@ pub const Value = extern union {
) !Value {
switch (float_type.tag()) {
.f16 => {
- @panic("TODO add __trunctfhf2 to compiler-rt");
- //const lhs_val = lhs.toFloat(f16);
- //const rhs_val = rhs.toFloat(f16);
- //return Value.Tag.float_16.create(arena, lhs_val - rhs_val);
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(arena, lhs_val - rhs_val);
},
.f32 => {
const lhs_val = lhs.toFloat(f32);
@@ -1731,10 +2216,9 @@ pub const Value = extern union {
) !Value {
switch (float_type.tag()) {
.f16 => {
- @panic("TODO add __trunctfhf2 to compiler-rt");
- //const lhs_val = lhs.toFloat(f16);
- //const rhs_val = rhs.toFloat(f16);
- //return Value.Tag.float_16.create(arena, lhs_val / rhs_val);
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(arena, lhs_val / rhs_val);
},
.f32 => {
const lhs_val = lhs.toFloat(f32);
@@ -1763,10 +2247,9 @@ pub const Value = extern union {
) !Value {
switch (float_type.tag()) {
.f16 => {
- @panic("TODO add __trunctfhf2 to compiler-rt");
- //const lhs_val = lhs.toFloat(f16);
- //const rhs_val = rhs.toFloat(f16);
- //return Value.Tag.float_16.create(arena, lhs_val * rhs_val);
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(arena, lhs_val * rhs_val);
},
.f32 => {
const lhs_val = lhs.toFloat(f32);
@@ -1972,6 +2455,16 @@ pub const Value = extern union {
val: Value,
},
};
+
+ pub const BoundFn = struct {
+ pub const base_tag = Tag.bound_fn;
+
+ base: Payload = Payload{ .tag = base_tag },
+ data: struct {
+ func_inst: Air.Inst.Ref,
+ arg0_inst: Air.Inst.Ref,
+ },
+ };
};
/// Big enough to fit any non-BigInt value
@@ -1980,4 +2473,13 @@ pub const Value = extern union {
/// are possible without using an allocator.
limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb,
};
+
+ pub const zero = initTag(.zero);
+ pub const one = initTag(.one);
+ pub const negative_one: Value = .{ .ptr_otherwise = &negative_one_payload.base };
+};
+
+var negative_one_payload: Value.Payload.I64 = .{
+ .base = .{ .tag = .int_i64 },
+ .data = -1,
};
diff --git a/src/windows_com.hpp b/src/windows_com.hpp
index f9833e0912..5f0f5565f7 100644
--- a/src/windows_com.hpp
+++ b/src/windows_com.hpp
@@ -352,7 +352,7 @@ extern "C" {
///
/// Gets product-specific properties.
///
- /// A pointer to an instance of . This may be NULL if no properties are defined.
+ /// A pointer to an instance of . This may be NULL if no properties are defined.
/// Standard HRESULT indicating success or failure, including E_FILENOTFOUND if the instance state does not exist.
STDMETHOD(GetProperties)(
_Outptr_result_maybenull_ ISetupPropertyStore** ppProperties
diff --git a/src/zig_clang.cpp b/src/zig_clang.cpp
index 98d9259e0d..a6b4683a4c 100644
--- a/src/zig_clang.cpp
+++ b/src/zig_clang.cpp
@@ -1691,10 +1691,10 @@ void ZigClang_detect_enum_ConstantExprKind(clang::Expr::ConstantExprKind x) {
break;
}
}
-static_assert((clang::Expr::ConstantExprKind)ZigClangExpr_ContantExprKind_Normal == clang::Expr::ConstantExprKind::Normal, "");
-static_assert((clang::Expr::ConstantExprKind)ZigClangExpr_ContantExprKind_NonClassTemplateArgument == clang::Expr::ConstantExprKind::NonClassTemplateArgument, "");
-static_assert((clang::Expr::ConstantExprKind)ZigClangExpr_ContantExprKind_ClassTemplateArgument == clang::Expr::ConstantExprKind::ClassTemplateArgument, "");
-static_assert((clang::Expr::ConstantExprKind)ZigClangExpr_ContantExprKind_ImmediateInvocation == clang::Expr::ConstantExprKind::ImmediateInvocation, "");
+static_assert((clang::Expr::ConstantExprKind)ZigClangExpr_ConstantExprKind_Normal == clang::Expr::ConstantExprKind::Normal, "");
+static_assert((clang::Expr::ConstantExprKind)ZigClangExpr_ConstantExprKind_NonClassTemplateArgument == clang::Expr::ConstantExprKind::NonClassTemplateArgument, "");
+static_assert((clang::Expr::ConstantExprKind)ZigClangExpr_ConstantExprKind_ClassTemplateArgument == clang::Expr::ConstantExprKind::ClassTemplateArgument, "");
+static_assert((clang::Expr::ConstantExprKind)ZigClangExpr_ConstantExprKind_ImmediateInvocation == clang::Expr::ConstantExprKind::ImmediateInvocation, "");
static_assert(sizeof(ZigClangAPValue) == sizeof(clang::APValue), "");
diff --git a/src/zig_clang.h b/src/zig_clang.h
index 19fe81be91..f704b50b18 100644
--- a/src/zig_clang.h
+++ b/src/zig_clang.h
@@ -1011,10 +1011,10 @@ enum ZigClangPreprocessedEntity_EntityKind {
};
enum ZigClangExpr_ConstantExprKind {
- ZigClangExpr_ContantExprKind_Normal,
- ZigClangExpr_ContantExprKind_NonClassTemplateArgument,
- ZigClangExpr_ContantExprKind_ClassTemplateArgument,
- ZigClangExpr_ContantExprKind_ImmediateInvocation,
+ ZigClangExpr_ConstantExprKind_Normal,
+ ZigClangExpr_ConstantExprKind_NonClassTemplateArgument,
+ ZigClangExpr_ConstantExprKind_ClassTemplateArgument,
+ ZigClangExpr_ConstantExprKind_ImmediateInvocation,
};
enum ZigClangUnaryExprOrTypeTrait_Kind {
diff --git a/src/zig_llvm-ar.cpp b/src/zig_llvm-ar.cpp
index b48cd811d9..9516af4096 100644
--- a/src/zig_llvm-ar.cpp
+++ b/src/zig_llvm-ar.cpp
@@ -232,7 +232,7 @@ static std::string ArchiveName;
static std::vector> ArchiveBuffers;
static std::vector> Archives;
-// This variable holds the list of member files to proecess, as given
+// This variable holds the list of member files to process, as given
// on the command line.
static std::vector Members;
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index 4d40c45adc..11cbf38368 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -412,6 +412,11 @@ ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref) {
return wrap(Type::getTokenTy(*unwrap(context_ref)));
}
+LLVMValueRef ZigLLVMAddFunctionInAddressSpace(LLVMModuleRef M, const char *Name, LLVMTypeRef FunctionTy, unsigned AddressSpace) {
+ Function* func = Function::Create(unwrap(FunctionTy), GlobalValue::ExternalLinkage, AddressSpace, Name, unwrap(M));
+ return wrap(func);
+}
+
LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn, LLVMValueRef *Args,
unsigned NumArgs, ZigLLVM_CallingConv CC, ZigLLVM_CallAttr attr, const char *Name)
{
diff --git a/src/zig_llvm.h b/src/zig_llvm.h
index 7bda2bcbe7..d1e4fa2556 100644
--- a/src/zig_llvm.h
+++ b/src/zig_llvm.h
@@ -65,6 +65,9 @@ ZIG_EXTERN_C LLVMTargetMachineRef ZigLLVMCreateTargetMachine(LLVMTargetRef T, co
ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref);
+ZIG_EXTERN_C LLVMValueRef ZigLLVMAddFunctionInAddressSpace(LLVMModuleRef M, const char *Name,
+ LLVMTypeRef FunctionTy, unsigned AddressSpace);
+
enum ZigLLVM_CallingConv {
ZigLLVM_C = 0,
ZigLLVM_Fast = 8,
diff --git a/test/behavior.zig b/test/behavior.zig
index 366753c3bf..1855ff5cf7 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -2,19 +2,38 @@ const builtin = @import("builtin");
test {
// Tests that pass for both.
- _ = @import("behavior/bool.zig");
- _ = @import("behavior/basic.zig");
- _ = @import("behavior/generics.zig");
- _ = @import("behavior/eval.zig");
- _ = @import("behavior/pointers.zig");
- _ = @import("behavior/if.zig");
- _ = @import("behavior/cast.zig");
_ = @import("behavior/array.zig");
- _ = @import("behavior/usingnamespace.zig");
_ = @import("behavior/atomics.zig");
+ _ = @import("behavior/basic.zig");
+ _ = @import("behavior/bool.zig");
+ _ = @import("behavior/bugs/655.zig");
+ _ = @import("behavior/bugs/1277.zig");
+ _ = @import("behavior/bugs/1741.zig");
+ _ = @import("behavior/bugs/2346.zig");
+ _ = @import("behavior/bugs/2692.zig");
+ _ = @import("behavior/bugs/4769_a.zig");
+ _ = @import("behavior/bugs/4769_b.zig");
+ _ = @import("behavior/bugs/6850.zig");
+ _ = @import("behavior/bugs/9584.zig");
+ _ = @import("behavior/cast.zig");
+ _ = @import("behavior/eval.zig");
+ _ = @import("behavior/generics.zig");
+ _ = @import("behavior/if.zig");
+ _ = @import("behavior/math.zig");
+ _ = @import("behavior/member_func.zig");
+ _ = @import("behavior/pointers.zig");
+ _ = @import("behavior/sizeof_and_typeof.zig");
+ _ = @import("behavior/struct.zig");
+ _ = @import("behavior/this.zig");
+ _ = @import("behavior/translate_c_macros.zig");
+ _ = @import("behavior/union.zig");
+ _ = @import("behavior/usingnamespace.zig");
+ _ = @import("behavior/widening.zig");
- if (!builtin.zig_is_stage2) {
- // Tests that only pass for stage1.
+ if (builtin.zig_is_stage2) {
+ // When all comptime_memory.zig tests pass, #9646 can be closed.
+ // _ = @import("behavior/comptime_memory.zig");
+ } else {
_ = @import("behavior/align.zig");
_ = @import("behavior/alignof.zig");
_ = @import("behavior/array_stage1.zig");
@@ -22,16 +41,25 @@ test {
_ = @import("behavior/asm.zig");
_ = @import("behavior/async_fn.zig");
}
- _ = @import("behavior/atomics_stage1.zig");
_ = @import("behavior/await_struct.zig");
_ = @import("behavior/bit_shifting.zig");
_ = @import("behavior/bitcast.zig");
_ = @import("behavior/bitreverse.zig");
+ _ = @import("behavior/bugs/394.zig");
+ _ = @import("behavior/bugs/421.zig");
+ _ = @import("behavior/bugs/529.zig");
+ _ = @import("behavior/bugs/624.zig");
+ _ = @import("behavior/bugs/656.zig");
+ _ = @import("behavior/bugs/679.zig");
+ _ = @import("behavior/bugs/704.zig");
+ _ = @import("behavior/bugs/718.zig");
+ _ = @import("behavior/bugs/726.zig");
+ _ = @import("behavior/bugs/828.zig");
+ _ = @import("behavior/bugs/920.zig");
_ = @import("behavior/bugs/1025.zig");
_ = @import("behavior/bugs/1076.zig");
_ = @import("behavior/bugs/1111.zig");
_ = @import("behavior/bugs/1120.zig");
- _ = @import("behavior/bugs/1277.zig");
_ = @import("behavior/bugs/1310.zig");
_ = @import("behavior/bugs/1322.zig");
_ = @import("behavior/bugs/1381.zig");
@@ -41,14 +69,11 @@ test {
_ = @import("behavior/bugs/1500.zig");
_ = @import("behavior/bugs/1607.zig");
_ = @import("behavior/bugs/1735.zig");
- _ = @import("behavior/bugs/1741.zig");
_ = @import("behavior/bugs/1851.zig");
_ = @import("behavior/bugs/1914.zig");
_ = @import("behavior/bugs/2006.zig");
_ = @import("behavior/bugs/2114.zig");
- _ = @import("behavior/bugs/2346.zig");
_ = @import("behavior/bugs/2578.zig");
- _ = @import("behavior/bugs/2692.zig");
_ = @import("behavior/bugs/2889.zig");
_ = @import("behavior/bugs/3007.zig");
_ = @import("behavior/bugs/3046.zig");
@@ -60,8 +85,6 @@ test {
_ = @import("behavior/bugs/3779.zig");
_ = @import("behavior/bugs/4328.zig");
_ = @import("behavior/bugs/4560.zig");
- _ = @import("behavior/bugs/4769_a.zig");
- _ = @import("behavior/bugs/4769_b.zig");
_ = @import("behavior/bugs/4954.zig");
_ = @import("behavior/bugs/5398.zig");
_ = @import("behavior/bugs/5413.zig");
@@ -69,30 +92,14 @@ test {
_ = @import("behavior/bugs/5487.zig");
_ = @import("behavior/bugs/6456.zig");
_ = @import("behavior/bugs/6781.zig");
- _ = @import("behavior/bugs/6850.zig");
_ = @import("behavior/bugs/7027.zig");
_ = @import("behavior/bugs/7047.zig");
_ = @import("behavior/bugs/7003.zig");
_ = @import("behavior/bugs/7250.zig");
- _ = @import("behavior/bugs/9584.zig");
- _ = @import("behavior/bugs/394.zig");
- _ = @import("behavior/bugs/421.zig");
- _ = @import("behavior/bugs/529.zig");
- _ = @import("behavior/bugs/624.zig");
- _ = @import("behavior/bugs/655.zig");
- _ = @import("behavior/bugs/656.zig");
- _ = @import("behavior/bugs/679.zig");
- _ = @import("behavior/bugs/704.zig");
- _ = @import("behavior/bugs/718.zig");
- _ = @import("behavior/bugs/726.zig");
- _ = @import("behavior/bugs/828.zig");
- _ = @import("behavior/bugs/920.zig");
_ = @import("behavior/byteswap.zig");
_ = @import("behavior/byval_arg_var.zig");
_ = @import("behavior/call.zig");
_ = @import("behavior/cast_stage1.zig");
- // When these tests pass, #9646 can be closed.
- // _ = @import("behavior/comptime_memory.zig");
_ = @import("behavior/const_slice_child.zig");
_ = @import("behavior/defer.zig");
_ = @import("behavior/enum.zig");
@@ -113,7 +120,7 @@ test {
_ = @import("behavior/incomplete_struct_param_tld.zig");
_ = @import("behavior/inttoptr.zig");
_ = @import("behavior/ir_block_deps.zig");
- _ = @import("behavior/math.zig");
+ _ = @import("behavior/math_stage1.zig");
_ = @import("behavior/maximum_minimum.zig");
_ = @import("behavior/merge_error_sets.zig");
_ = @import("behavior/misc.zig");
@@ -130,16 +137,15 @@ test {
_ = @import("behavior/saturating_arithmetic.zig");
_ = @import("behavior/shuffle.zig");
_ = @import("behavior/select.zig");
- _ = @import("behavior/sizeof_and_typeof.zig");
+ _ = @import("behavior/sizeof_and_typeof_stage1.zig");
_ = @import("behavior/slice.zig");
_ = @import("behavior/slice_sentinel_comptime.zig");
- _ = @import("behavior/struct.zig");
+ _ = @import("behavior/struct_stage1.zig");
_ = @import("behavior/struct_contains_null_ptr_itself.zig");
_ = @import("behavior/struct_contains_slice_of_itself.zig");
_ = @import("behavior/switch.zig");
_ = @import("behavior/switch_prong_err_enum.zig");
_ = @import("behavior/switch_prong_implicit_cast.zig");
- _ = @import("behavior/this.zig");
_ = @import("behavior/truncate.zig");
_ = @import("behavior/try.zig");
_ = @import("behavior/tuple.zig");
@@ -148,7 +154,7 @@ test {
_ = @import("behavior/typename.zig");
_ = @import("behavior/undefined.zig");
_ = @import("behavior/underscore.zig");
- _ = @import("behavior/union.zig");
+ _ = @import("behavior/union_stage1.zig");
_ = @import("behavior/usingnamespace_stage1.zig");
_ = @import("behavior/var_args.zig");
_ = @import("behavior/vector.zig");
@@ -157,8 +163,7 @@ test {
_ = @import("behavior/wasm.zig");
}
_ = @import("behavior/while.zig");
- _ = @import("behavior/widening.zig");
_ = @import("behavior/src.zig");
- _ = @import("behavior/translate_c_macros.zig");
+ _ = @import("behavior/translate_c_macros_stage1.zig");
}
}
diff --git a/test/behavior/array.zig b/test/behavior/array.zig
index 8cf0042d5f..8250cdea06 100644
--- a/test/behavior/array.zig
+++ b/test/behavior/array.zig
@@ -27,3 +27,9 @@ test "arrays" {
fn getArrayLen(a: []const u32) usize {
return a.len;
}
+
+test "array init with mult" {
+ const a = 'a';
+ var i: [8]u8 = [2]u8{ a, 'b' } ** 4;
+ try expect(std.mem.eql(u8, &i, "abababab"));
+}
diff --git a/test/behavior/array_stage1.zig b/test/behavior/array_stage1.zig
index d3a97665ac..c290ef9a08 100644
--- a/test/behavior/array_stage1.zig
+++ b/test/behavior/array_stage1.zig
@@ -140,7 +140,7 @@ fn testArrayByValAtComptime(b: [2]u8) u8 {
return b[0];
}
-test "comptime evalutating function that takes array by value" {
+test "comptime evaluating function that takes array by value" {
const arr = [_]u8{ 0, 1 };
_ = comptime testArrayByValAtComptime(arr);
_ = comptime testArrayByValAtComptime(arr);
diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig
index 444ff56438..b22183bdbd 100644
--- a/test/behavior/atomics.zig
+++ b/test/behavior/atomics.zig
@@ -24,3 +24,198 @@ fn testCmpxchg() !void {
try expect(@cmpxchgStrong(i32, &x, 5678, 42, .SeqCst, .SeqCst) == null);
try expect(x == 42);
}
+
+test "fence" {
+ var x: i32 = 1234;
+ @fence(.SeqCst);
+ x = 5678;
+}
+
+test "atomicrmw and atomicload" {
+ var data: u8 = 200;
+ try testAtomicRmw(&data);
+ try expect(data == 42);
+ try testAtomicLoad(&data);
+}
+
+fn testAtomicRmw(ptr: *u8) !void {
+ const prev_value = @atomicRmw(u8, ptr, .Xchg, 42, .SeqCst);
+ try expect(prev_value == 200);
+ comptime {
+ var x: i32 = 1234;
+ const y: i32 = 12345;
+ try expect(@atomicLoad(i32, &x, .SeqCst) == 1234);
+ try expect(@atomicLoad(i32, &y, .SeqCst) == 12345);
+ }
+}
+
+fn testAtomicLoad(ptr: *u8) !void {
+ const x = @atomicLoad(u8, ptr, .SeqCst);
+ try expect(x == 42);
+}
+
+test "cmpxchg with ptr" {
+ var data1: i32 = 1234;
+ var data2: i32 = 5678;
+ var data3: i32 = 9101;
+ var x: *i32 = &data1;
+ if (@cmpxchgWeak(*i32, &x, &data2, &data3, .SeqCst, .SeqCst)) |x1| {
+ try expect(x1 == &data1);
+ } else {
+ @panic("cmpxchg should have failed");
+ }
+
+ while (@cmpxchgWeak(*i32, &x, &data1, &data3, .SeqCst, .SeqCst)) |x1| {
+ try expect(x1 == &data1);
+ }
+ try expect(x == &data3);
+
+ try expect(@cmpxchgStrong(*i32, &x, &data3, &data2, .SeqCst, .SeqCst) == null);
+ try expect(x == &data2);
+}
+
+test "cmpxchg with ignored result" {
+ var x: i32 = 1234;
+
+ _ = @cmpxchgStrong(i32, &x, 1234, 5678, .Monotonic, .Monotonic);
+
+ try expect(5678 == x);
+}
+
+test "128-bit cmpxchg" {
+ try test_u128_cmpxchg();
+ comptime try test_u128_cmpxchg();
+}
+
+fn test_u128_cmpxchg() !void {
+ if (builtin.zig_is_stage2) {
+ if (builtin.stage2_arch != .x86_64) return error.SkipZigTest;
+ if (!builtin.stage2_x86_cx16) return error.SkipZigTest;
+ } else {
+ if (builtin.cpu.arch != .x86_64) return error.SkipZigTest;
+ if (comptime !std.Target.x86.featureSetHas(builtin.cpu.features, .cx16)) return error.SkipZigTest;
+ }
+
+ var x: u128 = 1234;
+ if (@cmpxchgWeak(u128, &x, 99, 5678, .SeqCst, .SeqCst)) |x1| {
+ try expect(x1 == 1234);
+ } else {
+ @panic("cmpxchg should have failed");
+ }
+
+ while (@cmpxchgWeak(u128, &x, 1234, 5678, .SeqCst, .SeqCst)) |x1| {
+ try expect(x1 == 1234);
+ }
+ try expect(x == 5678);
+
+ try expect(@cmpxchgStrong(u128, &x, 5678, 42, .SeqCst, .SeqCst) == null);
+ try expect(x == 42);
+}
+
+var a_global_variable = @as(u32, 1234);
+
+test "cmpxchg on a global variable" {
+ _ = @cmpxchgWeak(u32, &a_global_variable, 1234, 42, .Acquire, .Monotonic);
+ try expect(a_global_variable == 42);
+}
+
+test "atomic load and rmw with enum" {
+ const Value = enum(u8) { a, b, c };
+ var x = Value.a;
+
+ try expect(@atomicLoad(Value, &x, .SeqCst) != .b);
+
+ _ = @atomicRmw(Value, &x, .Xchg, .c, .SeqCst);
+ try expect(@atomicLoad(Value, &x, .SeqCst) == .c);
+ try expect(@atomicLoad(Value, &x, .SeqCst) != .a);
+ try expect(@atomicLoad(Value, &x, .SeqCst) != .b);
+}
+
+test "atomic store" {
+ var x: u32 = 0;
+ @atomicStore(u32, &x, 1, .SeqCst);
+ try expect(@atomicLoad(u32, &x, .SeqCst) == 1);
+ @atomicStore(u32, &x, 12345678, .SeqCst);
+ try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
+}
+
+test "atomic store comptime" {
+ comptime try testAtomicStore();
+ try testAtomicStore();
+}
+
+fn testAtomicStore() !void {
+ var x: u32 = 0;
+ @atomicStore(u32, &x, 1, .SeqCst);
+ try expect(@atomicLoad(u32, &x, .SeqCst) == 1);
+ @atomicStore(u32, &x, 12345678, .SeqCst);
+ try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
+}
+
+test "atomicrmw with floats" {
+ try testAtomicRmwFloat();
+ comptime try testAtomicRmwFloat();
+}
+
+fn testAtomicRmwFloat() !void {
+ var x: f32 = 0;
+ try expect(x == 0);
+ _ = @atomicRmw(f32, &x, .Xchg, 1, .SeqCst);
+ try expect(x == 1);
+ _ = @atomicRmw(f32, &x, .Add, 5, .SeqCst);
+ try expect(x == 6);
+ _ = @atomicRmw(f32, &x, .Sub, 2, .SeqCst);
+ try expect(x == 4);
+}
+
+test "atomicrmw with ints" {
+ try testAtomicRmwInt();
+ comptime try testAtomicRmwInt();
+}
+
+fn testAtomicRmwInt() !void {
+ var x: u8 = 1;
+ var res = @atomicRmw(u8, &x, .Xchg, 3, .SeqCst);
+ try expect(x == 3 and res == 1);
+ _ = @atomicRmw(u8, &x, .Add, 3, .SeqCst);
+ try expect(x == 6);
+ _ = @atomicRmw(u8, &x, .Sub, 1, .SeqCst);
+ try expect(x == 5);
+ _ = @atomicRmw(u8, &x, .And, 4, .SeqCst);
+ try expect(x == 4);
+ _ = @atomicRmw(u8, &x, .Nand, 4, .SeqCst);
+ try expect(x == 0xfb);
+ _ = @atomicRmw(u8, &x, .Or, 6, .SeqCst);
+ try expect(x == 0xff);
+ _ = @atomicRmw(u8, &x, .Xor, 2, .SeqCst);
+ try expect(x == 0xfd);
+
+ _ = @atomicRmw(u8, &x, .Max, 1, .SeqCst);
+ try expect(x == 0xfd);
+ _ = @atomicRmw(u8, &x, .Min, 1, .SeqCst);
+ try expect(x == 1);
+}
+
+test "atomics with different types" {
+ try testAtomicsWithType(bool, true, false);
+
+ try testAtomicsWithType(u1, 0, 1);
+ try testAtomicsWithType(i4, 0, 1);
+ try testAtomicsWithType(u5, 0, 1);
+ try testAtomicsWithType(i15, 0, 1);
+ try testAtomicsWithType(u24, 0, 1);
+
+ try testAtomicsWithType(u0, 0, 0);
+ try testAtomicsWithType(i0, 0, 0);
+}
+
+fn testAtomicsWithType(comptime T: type, a: T, b: T) !void {
+ var x: T = b;
+ @atomicStore(T, &x, a, .SeqCst);
+ try expect(x == a);
+ try expect(@atomicLoad(T, &x, .SeqCst) == a);
+ try expect(@atomicRmw(T, &x, .Xchg, b, .SeqCst) == a);
+ try expect(@cmpxchgStrong(T, &x, b, a, .SeqCst, .SeqCst) == null);
+ if (@sizeOf(T) != 0)
+ try expect(@cmpxchgStrong(T, &x, b, a, .SeqCst, .SeqCst).? == a);
+}
diff --git a/test/behavior/atomics_stage1.zig b/test/behavior/atomics_stage1.zig
deleted file mode 100644
index 6e754e30cd..0000000000
--- a/test/behavior/atomics_stage1.zig
+++ /dev/null
@@ -1,194 +0,0 @@
-const std = @import("std");
-const expect = std.testing.expect;
-const expectEqual = std.testing.expectEqual;
-const builtin = @import("builtin");
-
-test "fence" {
- var x: i32 = 1234;
- @fence(.SeqCst);
- x = 5678;
-}
-
-test "atomicrmw and atomicload" {
- var data: u8 = 200;
- try testAtomicRmw(&data);
- try expect(data == 42);
- try testAtomicLoad(&data);
-}
-
-fn testAtomicRmw(ptr: *u8) !void {
- const prev_value = @atomicRmw(u8, ptr, .Xchg, 42, .SeqCst);
- try expect(prev_value == 200);
- comptime {
- var x: i32 = 1234;
- const y: i32 = 12345;
- try expect(@atomicLoad(i32, &x, .SeqCst) == 1234);
- try expect(@atomicLoad(i32, &y, .SeqCst) == 12345);
- }
-}
-
-fn testAtomicLoad(ptr: *u8) !void {
- const x = @atomicLoad(u8, ptr, .SeqCst);
- try expect(x == 42);
-}
-
-test "cmpxchg with ptr" {
- var data1: i32 = 1234;
- var data2: i32 = 5678;
- var data3: i32 = 9101;
- var x: *i32 = &data1;
- if (@cmpxchgWeak(*i32, &x, &data2, &data3, .SeqCst, .SeqCst)) |x1| {
- try expect(x1 == &data1);
- } else {
- @panic("cmpxchg should have failed");
- }
-
- while (@cmpxchgWeak(*i32, &x, &data1, &data3, .SeqCst, .SeqCst)) |x1| {
- try expect(x1 == &data1);
- }
- try expect(x == &data3);
-
- try expect(@cmpxchgStrong(*i32, &x, &data3, &data2, .SeqCst, .SeqCst) == null);
- try expect(x == &data2);
-}
-
-test "128-bit cmpxchg" {
- try test_u128_cmpxchg();
- comptime try test_u128_cmpxchg();
-}
-
-fn test_u128_cmpxchg() !void {
- if (std.Target.current.cpu.arch != .x86_64) return error.SkipZigTest;
- if (comptime !std.Target.x86.featureSetHas(std.Target.current.cpu.features, .cx16)) return error.SkipZigTest;
-
- var x: u128 = 1234;
- if (@cmpxchgWeak(u128, &x, 99, 5678, .SeqCst, .SeqCst)) |x1| {
- try expect(x1 == 1234);
- } else {
- @panic("cmpxchg should have failed");
- }
-
- while (@cmpxchgWeak(u128, &x, 1234, 5678, .SeqCst, .SeqCst)) |x1| {
- try expect(x1 == 1234);
- }
- try expect(x == 5678);
-
- try expect(@cmpxchgStrong(u128, &x, 5678, 42, .SeqCst, .SeqCst) == null);
- try expect(x == 42);
-}
-
-test "cmpxchg with ignored result" {
- var x: i32 = 1234;
-
- _ = @cmpxchgStrong(i32, &x, 1234, 5678, .Monotonic, .Monotonic);
-
- try expectEqual(@as(i32, 5678), x);
-}
-
-var a_global_variable = @as(u32, 1234);
-
-test "cmpxchg on a global variable" {
- _ = @cmpxchgWeak(u32, &a_global_variable, 1234, 42, .Acquire, .Monotonic);
- try expectEqual(@as(u32, 42), a_global_variable);
-}
-
-test "atomic load and rmw with enum" {
- const Value = enum(u8) {
- a,
- b,
- c,
- };
- var x = Value.a;
-
- try expect(@atomicLoad(Value, &x, .SeqCst) != .b);
-
- _ = @atomicRmw(Value, &x, .Xchg, .c, .SeqCst);
- try expect(@atomicLoad(Value, &x, .SeqCst) == .c);
- try expect(@atomicLoad(Value, &x, .SeqCst) != .a);
- try expect(@atomicLoad(Value, &x, .SeqCst) != .b);
-}
-
-test "atomic store" {
- var x: u32 = 0;
- @atomicStore(u32, &x, 1, .SeqCst);
- try expect(@atomicLoad(u32, &x, .SeqCst) == 1);
- @atomicStore(u32, &x, 12345678, .SeqCst);
- try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
-}
-
-test "atomic store comptime" {
- comptime try testAtomicStore();
- try testAtomicStore();
-}
-
-fn testAtomicStore() !void {
- var x: u32 = 0;
- @atomicStore(u32, &x, 1, .SeqCst);
- try expect(@atomicLoad(u32, &x, .SeqCst) == 1);
- @atomicStore(u32, &x, 12345678, .SeqCst);
- try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
-}
-
-test "atomicrmw with floats" {
- try testAtomicRmwFloat();
- comptime try testAtomicRmwFloat();
-}
-
-fn testAtomicRmwFloat() !void {
- var x: f32 = 0;
- try expect(x == 0);
- _ = @atomicRmw(f32, &x, .Xchg, 1, .SeqCst);
- try expect(x == 1);
- _ = @atomicRmw(f32, &x, .Add, 5, .SeqCst);
- try expect(x == 6);
- _ = @atomicRmw(f32, &x, .Sub, 2, .SeqCst);
- try expect(x == 4);
-}
-
-test "atomicrmw with ints" {
- try testAtomicRmwInt();
- comptime try testAtomicRmwInt();
-}
-
-fn testAtomicRmwInt() !void {
- var x: u8 = 1;
- var res = @atomicRmw(u8, &x, .Xchg, 3, .SeqCst);
- try expect(x == 3 and res == 1);
- _ = @atomicRmw(u8, &x, .Add, 3, .SeqCst);
- try expect(x == 6);
- _ = @atomicRmw(u8, &x, .Sub, 1, .SeqCst);
- try expect(x == 5);
- _ = @atomicRmw(u8, &x, .And, 4, .SeqCst);
- try expect(x == 4);
- _ = @atomicRmw(u8, &x, .Nand, 4, .SeqCst);
- try expect(x == 0xfb);
- _ = @atomicRmw(u8, &x, .Or, 6, .SeqCst);
- try expect(x == 0xff);
- _ = @atomicRmw(u8, &x, .Xor, 2, .SeqCst);
- try expect(x == 0xfd);
-
- _ = @atomicRmw(u8, &x, .Max, 1, .SeqCst);
- try expect(x == 0xfd);
- _ = @atomicRmw(u8, &x, .Min, 1, .SeqCst);
- try expect(x == 1);
-}
-
-test "atomics with different types" {
- try testAtomicsWithType(bool, true, false);
- inline for (.{ u1, i4, u5, i15, u24 }) |T| {
- try testAtomicsWithType(T, 0, 1);
- }
- try testAtomicsWithType(u0, 0, 0);
- try testAtomicsWithType(i0, 0, 0);
-}
-
-fn testAtomicsWithType(comptime T: type, a: T, b: T) !void {
- var x: T = b;
- @atomicStore(T, &x, a, .SeqCst);
- try expect(x == a);
- try expect(@atomicLoad(T, &x, .SeqCst) == a);
- try expect(@atomicRmw(T, &x, .Xchg, b, .SeqCst) == a);
- try expect(@cmpxchgStrong(T, &x, b, a, .SeqCst, .SeqCst) == null);
- if (@sizeOf(T) != 0)
- try expect(@cmpxchgStrong(T, &x, b, a, .SeqCst, .SeqCst).? == a);
-}
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index 517162c8d4..f6876e29ad 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -170,3 +170,21 @@ test "string concatenation" {
test "array mult operator" {
try expect(mem.eql(u8, "ab" ** 5, "ababababab"));
}
+
+test "memcpy and memset intrinsics" {
+ try testMemcpyMemset();
+ // TODO add comptime test coverage
+ //comptime try testMemcpyMemset();
+}
+
+fn testMemcpyMemset() !void {
+ var foo: [20]u8 = undefined;
+ var bar: [20]u8 = undefined;
+
+ @memset(&foo, 'A', foo.len);
+ @memcpy(&bar, &foo, bar.len);
+
+ try expect(bar[0] == 'A');
+ try expect(bar[11] == 'A');
+ try expect(bar[19] == 'A');
+}
diff --git a/test/behavior/bugs/1735.zig b/test/behavior/bugs/1735.zig
index f3aa6eb9ec..1f6e3c99f4 100644
--- a/test/behavior/bugs/1735.zig
+++ b/test/behavior/bugs/1735.zig
@@ -40,7 +40,7 @@ const a = struct {
}
};
-test "intialization" {
+test "initialization" {
var t = a.init();
try std.testing.expect(t.foo.len == 0);
}
diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig
index 0d103bc49a..6acdf15e89 100644
--- a/test/behavior/eval.zig
+++ b/test/behavior/eval.zig
@@ -155,3 +155,31 @@ fn MakeType(comptime T: type) type {
field: T,
};
}
+
+test "try to trick eval with runtime if" {
+ try expect(testTryToTrickEvalWithRuntimeIf(true) == 10);
+}
+
+fn testTryToTrickEvalWithRuntimeIf(b: bool) usize {
+ comptime var i: usize = 0;
+ inline while (i < 10) : (i += 1) {
+ const result = if (b) false else true;
+ _ = result;
+ }
+ comptime {
+ return i;
+ }
+}
+
+test "@setEvalBranchQuota" {
+ comptime {
+ // 1001 for the loop and then 1 more for the expect fn call
+ @setEvalBranchQuota(1002);
+ var i = 0;
+ var sum = 0;
+ while (i < 1001) : (i += 1) {
+ sum += i;
+ }
+ try expect(sum == 500500);
+ }
+}
diff --git a/test/behavior/eval_stage1.zig b/test/behavior/eval_stage1.zig
index 644de50fd0..4e945d7af0 100644
--- a/test/behavior/eval_stage1.zig
+++ b/test/behavior/eval_stage1.zig
@@ -88,7 +88,7 @@ var st_init_str_foo = StInitStrFoo{
.y = true,
};
-test "statically initalized array literal" {
+test "statically initialized array literal" {
const y: [4]u8 = st_init_arr_lit_x;
try expect(y[3] == 4);
}
@@ -109,21 +109,6 @@ test "const slice" {
}
}
-test "try to trick eval with runtime if" {
- try expect(testTryToTrickEvalWithRuntimeIf(true) == 10);
-}
-
-fn testTryToTrickEvalWithRuntimeIf(b: bool) usize {
- comptime var i: usize = 0;
- inline while (i < 10) : (i += 1) {
- const result = if (b) false else true;
- _ = result;
- }
- comptime {
- return i;
- }
-}
-
test "inlined loop has array literal with elided runtime scope on first iteration but not second iteration" {
var runtime = [1]i32{3};
comptime var i: usize = 0;
@@ -276,19 +261,6 @@ fn assertEqualPtrs(ptr1: *const u8, ptr2: *const u8) !void {
try expect(ptr1 == ptr2);
}
-test "@setEvalBranchQuota" {
- comptime {
- // 1001 for the loop and then 1 more for the expect fn call
- @setEvalBranchQuota(1002);
- var i = 0;
- var sum = 0;
- while (i < 1001) : (i += 1) {
- sum += i;
- }
- try expect(sum == 500500);
- }
-}
-
test "float literal at compile time not lossy" {
try expect(16777216.0 + 1.0 == 16777217.0);
try expect(9007199254740992.0 + 1.0 == 9007199254740993.0);
diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig
index f642028e54..76ad5cede7 100644
--- a/test/behavior/generics.zig
+++ b/test/behavior/generics.zig
@@ -118,3 +118,19 @@ pub fn SmallList(comptime T: type, comptime STATIC_SIZE: usize) type {
prealloc_items: [STATIC_SIZE]T,
};
}
+
+test "const decls in struct" {
+ try expect(GenericDataThing(3).count_plus_one == 4);
+}
+fn GenericDataThing(comptime count: isize) type {
+ return struct {
+ const count_plus_one = count + 1;
+ };
+}
+
+test "use generic param in generic param" {
+ try expect(aGenericFn(i32, 3, 4) == 7);
+}
+fn aGenericFn(comptime T: type, comptime a: T, b: T) T {
+ return a + b;
+}
diff --git a/test/behavior/generics_stage1.zig b/test/behavior/generics_stage1.zig
index 4fa52f5377..c4b9687aa6 100644
--- a/test/behavior/generics_stage1.zig
+++ b/test/behavior/generics_stage1.zig
@@ -26,22 +26,6 @@ fn GenNode(comptime T: type) type {
};
}
-test "const decls in struct" {
- try expect(GenericDataThing(3).count_plus_one == 4);
-}
-fn GenericDataThing(comptime count: isize) type {
- return struct {
- const count_plus_one = count + 1;
- };
-}
-
-test "use generic param in generic param" {
- try expect(aGenericFn(i32, 3, 4) == 7);
-}
-fn aGenericFn(comptime T: type, comptime a: T, b: T) T {
- return a + b;
-}
-
test "generic fn with implicit cast" {
try expect(getFirstByte(u8, &[_]u8{13}) == 13);
try expect(getFirstByte(u16, &[_]u16{
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index 7a5c31f67a..56fbdc124d 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -6,171 +6,6 @@ const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
const mem = std.mem;
-test "division" {
- try testDivision();
- comptime try testDivision();
-}
-fn testDivision() !void {
- try expect(div(u32, 13, 3) == 4);
- try expect(div(f16, 1.0, 2.0) == 0.5);
- try expect(div(f32, 1.0, 2.0) == 0.5);
-
- try expect(divExact(u32, 55, 11) == 5);
- try expect(divExact(i32, -55, 11) == -5);
- try expect(divExact(f16, 55.0, 11.0) == 5.0);
- try expect(divExact(f16, -55.0, 11.0) == -5.0);
- try expect(divExact(f32, 55.0, 11.0) == 5.0);
- try expect(divExact(f32, -55.0, 11.0) == -5.0);
-
- try expect(divFloor(i32, 5, 3) == 1);
- try expect(divFloor(i32, -5, 3) == -2);
- try expect(divFloor(f16, 5.0, 3.0) == 1.0);
- try expect(divFloor(f16, -5.0, 3.0) == -2.0);
- try expect(divFloor(f32, 5.0, 3.0) == 1.0);
- try expect(divFloor(f32, -5.0, 3.0) == -2.0);
- try expect(divFloor(i32, -0x80000000, -2) == 0x40000000);
- try expect(divFloor(i32, 0, -0x80000000) == 0);
- try expect(divFloor(i32, -0x40000001, 0x40000000) == -2);
- try expect(divFloor(i32, -0x80000000, 1) == -0x80000000);
- try expect(divFloor(i32, 10, 12) == 0);
- try expect(divFloor(i32, -14, 12) == -2);
- try expect(divFloor(i32, -2, 12) == -1);
-
- try expect(divTrunc(i32, 5, 3) == 1);
- try expect(divTrunc(i32, -5, 3) == -1);
- try expect(divTrunc(f16, 5.0, 3.0) == 1.0);
- try expect(divTrunc(f16, -5.0, 3.0) == -1.0);
- try expect(divTrunc(f32, 5.0, 3.0) == 1.0);
- try expect(divTrunc(f32, -5.0, 3.0) == -1.0);
- try expect(divTrunc(f64, 5.0, 3.0) == 1.0);
- try expect(divTrunc(f64, -5.0, 3.0) == -1.0);
- try expect(divTrunc(i32, 10, 12) == 0);
- try expect(divTrunc(i32, -14, 12) == -1);
- try expect(divTrunc(i32, -2, 12) == 0);
-
- try expect(mod(i32, 10, 12) == 10);
- try expect(mod(i32, -14, 12) == 10);
- try expect(mod(i32, -2, 12) == 10);
-
- comptime {
- try expect(
- 1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600,
- );
- try expect(
- @rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600,
- );
- try expect(
- 1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2,
- );
- try expect(
- @divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2,
- );
- try expect(
- @divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2,
- );
- try expect(
- @divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2,
- );
- try expect(
- 4126227191251978491697987544882340798050766755606969681711 % 10 == 1,
- );
- }
-}
-fn div(comptime T: type, a: T, b: T) T {
- return a / b;
-}
-fn divExact(comptime T: type, a: T, b: T) T {
- return @divExact(a, b);
-}
-fn divFloor(comptime T: type, a: T, b: T) T {
- return @divFloor(a, b);
-}
-fn divTrunc(comptime T: type, a: T, b: T) T {
- return @divTrunc(a, b);
-}
-fn mod(comptime T: type, a: T, b: T) T {
- return @mod(a, b);
-}
-
-test "@addWithOverflow" {
- var result: u8 = undefined;
- try expect(@addWithOverflow(u8, 250, 100, &result));
- try expect(!@addWithOverflow(u8, 100, 150, &result));
- try expect(result == 250);
-}
-
-// TODO test mulWithOverflow
-// TODO test subWithOverflow
-
-test "@shlWithOverflow" {
- var result: u16 = undefined;
- try expect(@shlWithOverflow(u16, 0b0010111111111111, 3, &result));
- try expect(!@shlWithOverflow(u16, 0b0010111111111111, 2, &result));
- try expect(result == 0b1011111111111100);
-}
-
-test "@*WithOverflow with u0 values" {
- var result: u0 = undefined;
- try expect(!@addWithOverflow(u0, 0, 0, &result));
- try expect(!@subWithOverflow(u0, 0, 0, &result));
- try expect(!@mulWithOverflow(u0, 0, 0, &result));
- try expect(!@shlWithOverflow(u0, 0, 0, &result));
-}
-
-test "@clz" {
- try testClz();
- comptime try testClz();
-}
-
-fn testClz() !void {
- try expect(@clz(u8, 0b10001010) == 0);
- try expect(@clz(u8, 0b00001010) == 4);
- try expect(@clz(u8, 0b00011010) == 3);
- try expect(@clz(u8, 0b00000000) == 8);
- try expect(@clz(u128, 0xffffffffffffffff) == 64);
- try expect(@clz(u128, 0x10000000000000000) == 63);
-}
-
-test "@clz vectors" {
- try testClzVectors();
- comptime try testClzVectors();
-}
-
-fn testClzVectors() !void {
- @setEvalBranchQuota(10_000);
- try expectEqual(@clz(u8, @splat(64, @as(u8, 0b10001010))), @splat(64, @as(u4, 0)));
- try expectEqual(@clz(u8, @splat(64, @as(u8, 0b00001010))), @splat(64, @as(u4, 4)));
- try expectEqual(@clz(u8, @splat(64, @as(u8, 0b00011010))), @splat(64, @as(u4, 3)));
- try expectEqual(@clz(u8, @splat(64, @as(u8, 0b00000000))), @splat(64, @as(u4, 8)));
- try expectEqual(@clz(u128, @splat(64, @as(u128, 0xffffffffffffffff))), @splat(64, @as(u8, 64)));
- try expectEqual(@clz(u128, @splat(64, @as(u128, 0x10000000000000000))), @splat(64, @as(u8, 63)));
-}
-
-test "@ctz" {
- try testCtz();
- comptime try testCtz();
-}
-
-fn testCtz() !void {
- try expect(@ctz(u8, 0b10100000) == 5);
- try expect(@ctz(u8, 0b10001010) == 1);
- try expect(@ctz(u8, 0b00000000) == 8);
- try expect(@ctz(u16, 0b00000000) == 16);
-}
-
-test "@ctz vectors" {
- try testClzVectors();
- comptime try testClzVectors();
-}
-
-fn testCtzVectors() !void {
- @setEvalBranchQuota(10_000);
- try expectEqual(@ctz(u8, @splat(64, @as(u8, 0b10100000))), @splat(64, @as(u4, 5)));
- try expectEqual(@ctz(u8, @splat(64, @as(u8, 0b10001010))), @splat(64, @as(u4, 1)));
- try expectEqual(@ctz(u8, @splat(64, @as(u8, 0b00000000))), @splat(64, @as(u4, 8)));
- try expectEqual(@ctz(u16, @splat(64, @as(u16, 0b00000000))), @splat(64, @as(u5, 16)));
-}
-
test "assignment operators" {
var i: u32 = 0;
i += 5;
@@ -219,6 +54,24 @@ fn assertFalse(b: bool) !void {
try expect(!b);
}
+test "@clz" {
+ try testClz();
+ comptime try testClz();
+}
+
+fn testClz() !void {
+ try expect(testOneClz(u8, 0b10001010) == 0);
+ try expect(testOneClz(u8, 0b00001010) == 4);
+ try expect(testOneClz(u8, 0b00011010) == 3);
+ try expect(testOneClz(u8, 0b00000000) == 8);
+ try expect(testOneClz(u128, 0xffffffffffffffff) == 64);
+ try expect(testOneClz(u128, 0x10000000000000000) == 63);
+}
+
+fn testOneClz(comptime T: type, x: T) u32 {
+ return @clz(T, x);
+}
+
test "const number literal" {
const one = 1;
const eleven = ten + one;
@@ -227,101 +80,6 @@ test "const number literal" {
}
const ten = 10;
-test "unsigned wrapping" {
- try testUnsignedWrappingEval(maxInt(u32));
- comptime try testUnsignedWrappingEval(maxInt(u32));
-}
-fn testUnsignedWrappingEval(x: u32) !void {
- const zero = x +% 1;
- try expect(zero == 0);
- const orig = zero -% 1;
- try expect(orig == maxInt(u32));
-}
-
-test "signed wrapping" {
- try testSignedWrappingEval(maxInt(i32));
- comptime try testSignedWrappingEval(maxInt(i32));
-}
-fn testSignedWrappingEval(x: i32) !void {
- const min_val = x +% 1;
- try expect(min_val == minInt(i32));
- const max_val = min_val -% 1;
- try expect(max_val == maxInt(i32));
-}
-
-test "signed negation wrapping" {
- try testSignedNegationWrappingEval(minInt(i16));
- comptime try testSignedNegationWrappingEval(minInt(i16));
-}
-fn testSignedNegationWrappingEval(x: i16) !void {
- try expect(x == -32768);
- const neg = -%x;
- try expect(neg == -32768);
-}
-
-test "unsigned negation wrapping" {
- try testUnsignedNegationWrappingEval(1);
- comptime try testUnsignedNegationWrappingEval(1);
-}
-fn testUnsignedNegationWrappingEval(x: u16) !void {
- try expect(x == 1);
- const neg = -%x;
- try expect(neg == maxInt(u16));
-}
-
-test "unsigned 64-bit division" {
- try test_u64_div();
- comptime try test_u64_div();
-}
-fn test_u64_div() !void {
- const result = divWithResult(1152921504606846976, 34359738365);
- try expect(result.quotient == 33554432);
- try expect(result.remainder == 100663296);
-}
-fn divWithResult(a: u64, b: u64) DivResult {
- return DivResult{
- .quotient = a / b,
- .remainder = a % b,
- };
-}
-const DivResult = struct {
- quotient: u64,
- remainder: u64,
-};
-
-test "binary not" {
- try expect(comptime x: {
- break :x ~@as(u16, 0b1010101010101010) == 0b0101010101010101;
- });
- try expect(comptime x: {
- break :x ~@as(u64, 2147483647) == 18446744071562067968;
- });
- try testBinaryNot(0b1010101010101010);
-}
-
-fn testBinaryNot(x: u16) !void {
- try expect(~x == 0b0101010101010101);
-}
-
-test "small int addition" {
- var x: u2 = 0;
- try expect(x == 0);
-
- x += 1;
- try expect(x == 1);
-
- x += 1;
- try expect(x == 2);
-
- x += 1;
- try expect(x == 3);
-
- var result: @TypeOf(x) = 3;
- try expect(@addWithOverflow(@TypeOf(x), x, 1, &result));
-
- try expect(result == 0);
-}
-
test "float equality" {
const x: f64 = 0.012;
const y: f64 = x + 1.0;
@@ -335,15 +93,6 @@ fn testFloatEqualityImpl(x: f64, y: f64) !void {
try expect(y == y2);
}
-test "allow signed integer division/remainder when values are comptime known and positive or exact" {
- try expect(5 / 3 == 1);
- try expect(-5 / -3 == 1);
- try expect(-6 / 3 == -2);
-
- try expect(5 % 3 == 2);
- try expect(-6 % 3 == 0);
-}
-
test "hex float literal parsing" {
comptime try expect(0x1.0 == 1.0);
}
@@ -353,102 +102,10 @@ test "quad hex float literal parsing in range" {
const b = 0x1.dedafcff354b6ae9758763545432p-9;
const c = 0x1.2f34dd5f437e849b4baab754cdefp+4534;
const d = 0x1.edcbff8ad76ab5bf46463233214fp-435;
- if (false) {
- a;
- b;
- c;
- d;
- }
-}
-
-test "quad hex float literal parsing accurate" {
- const a: f128 = 0x1.1111222233334444555566667777p+0;
-
- // implied 1 is dropped, with an exponent of 0 (0x3fff) after biasing.
- const expected: u128 = 0x3fff1111222233334444555566667777;
- try expect(@bitCast(u128, a) == expected);
-
- // non-normalized
- const b: f128 = 0x11.111222233334444555566667777p-4;
- try expect(@bitCast(u128, b) == expected);
-
- const S = struct {
- fn doTheTest() !void {
- {
- var f: f128 = 0x1.2eab345678439abcdefea56782346p+5;
- try expect(@bitCast(u128, f) == 0x40042eab345678439abcdefea5678234);
- }
- {
- var f: f128 = 0x1.edcb34a235253948765432134674fp-1;
- try expect(@bitCast(u128, f) == 0x3ffeedcb34a235253948765432134674);
- }
- {
- var f: f128 = 0x1.353e45674d89abacc3a2ebf3ff4ffp-50;
- try expect(@bitCast(u128, f) == 0x3fcd353e45674d89abacc3a2ebf3ff50);
- }
- {
- var f: f128 = 0x1.ed8764648369535adf4be3214567fp-9;
- try expect(@bitCast(u128, f) == 0x3ff6ed8764648369535adf4be3214568);
- }
- const exp2ft = [_]f64{
- 0x1.6a09e667f3bcdp-1,
- 0x1.7a11473eb0187p-1,
- 0x1.8ace5422aa0dbp-1,
- 0x1.9c49182a3f090p-1,
- 0x1.ae89f995ad3adp-1,
- 0x1.c199bdd85529cp-1,
- 0x1.d5818dcfba487p-1,
- 0x1.ea4afa2a490dap-1,
- 0x1.0000000000000p+0,
- 0x1.0b5586cf9890fp+0,
- 0x1.172b83c7d517bp+0,
- 0x1.2387a6e756238p+0,
- 0x1.306fe0a31b715p+0,
- 0x1.3dea64c123422p+0,
- 0x1.4bfdad5362a27p+0,
- 0x1.5ab07dd485429p+0,
- 0x1.8p23,
- 0x1.62e430p-1,
- 0x1.ebfbe0p-3,
- 0x1.c6b348p-5,
- 0x1.3b2c9cp-7,
- 0x1.0p127,
- -0x1.0p-149,
- };
-
- const answers = [_]u64{
- 0x3fe6a09e667f3bcd,
- 0x3fe7a11473eb0187,
- 0x3fe8ace5422aa0db,
- 0x3fe9c49182a3f090,
- 0x3feae89f995ad3ad,
- 0x3fec199bdd85529c,
- 0x3fed5818dcfba487,
- 0x3feea4afa2a490da,
- 0x3ff0000000000000,
- 0x3ff0b5586cf9890f,
- 0x3ff172b83c7d517b,
- 0x3ff2387a6e756238,
- 0x3ff306fe0a31b715,
- 0x3ff3dea64c123422,
- 0x3ff4bfdad5362a27,
- 0x3ff5ab07dd485429,
- 0x4168000000000000,
- 0x3fe62e4300000000,
- 0x3fcebfbe00000000,
- 0x3fac6b3480000000,
- 0x3f83b2c9c0000000,
- 0x47e0000000000000,
- 0xb6a0000000000000,
- };
-
- for (exp2ft) |x, i| {
- try expect(@bitCast(u64, x) == answers[i]);
- }
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
+ _ = a;
+ _ = b;
+ _ = c;
+ _ = d;
}
test "underscore separator parsing" {
@@ -483,66 +140,9 @@ test "hex float literal within range" {
const a = 0x1.0p16383;
const b = 0x0.1p16387;
const c = 0x1.0p-16382;
- if (false) {
- a;
- b;
- c;
- }
-}
-
-test "truncating shift left" {
- try testShlTrunc(maxInt(u16));
- comptime try testShlTrunc(maxInt(u16));
-}
-fn testShlTrunc(x: u16) !void {
- const shifted = x << 1;
- try expect(shifted == 65534);
-}
-
-test "truncating shift right" {
- try testShrTrunc(maxInt(u16));
- comptime try testShrTrunc(maxInt(u16));
-}
-fn testShrTrunc(x: u16) !void {
- const shifted = x >> 1;
- try expect(shifted == 32767);
-}
-
-test "exact shift left" {
- try testShlExact(0b00110101);
- comptime try testShlExact(0b00110101);
-}
-fn testShlExact(x: u8) !void {
- const shifted = @shlExact(x, 2);
- try expect(shifted == 0b11010100);
-}
-
-test "exact shift right" {
- try testShrExact(0b10110100);
- comptime try testShrExact(0b10110100);
-}
-fn testShrExact(x: u8) !void {
- const shifted = @shrExact(x, 2);
- try expect(shifted == 0b00101101);
-}
-
-test "shift left/right on u0 operand" {
- const S = struct {
- fn doTheTest() !void {
- var x: u0 = 0;
- var y: u0 = 0;
- try expectEqual(@as(u0, 0), x << 0);
- try expectEqual(@as(u0, 0), x >> 0);
- try expectEqual(@as(u0, 0), x << y);
- try expectEqual(@as(u0, 0), x >> y);
- try expectEqual(@as(u0, 0), @shlExact(x, 0));
- try expectEqual(@as(u0, 0), @shrExact(x, 0));
- try expectEqual(@as(u0, 0), @shlExact(x, y));
- try expectEqual(@as(u0, 0), @shrExact(x, y));
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
+ _ = a;
+ _ = b;
+ _ = c;
}
test "comptime_int addition" {
@@ -600,11 +200,15 @@ test "xor" {
}
fn test_xor() !void {
- try expect(0xFF ^ 0x00 == 0xFF);
- try expect(0xF0 ^ 0x0F == 0xFF);
- try expect(0xFF ^ 0xF0 == 0x0F);
- try expect(0xFF ^ 0x0F == 0xF0);
- try expect(0xFF ^ 0xFF == 0x00);
+ try testOneXor(0xFF, 0x00, 0xFF);
+ try testOneXor(0xF0, 0x0F, 0xFF);
+ try testOneXor(0xFF, 0xF0, 0x0F);
+ try testOneXor(0xFF, 0x0F, 0xF0);
+ try testOneXor(0xFF, 0xFF, 0x00);
+}
+
+fn testOneXor(a: u8, b: u8, c: u8) !void {
+ try expect(a ^ b == c);
}
test "comptime_int xor" {
@@ -620,191 +224,6 @@ test "comptime_int xor" {
}
}
-test "f128" {
- try test_f128();
- comptime try test_f128();
-}
-
-fn make_f128(x: f128) f128 {
- return x;
-}
-
-fn test_f128() !void {
- try expect(@sizeOf(f128) == 16);
- try expect(make_f128(1.0) == 1.0);
- try expect(make_f128(1.0) != 1.1);
- try expect(make_f128(1.0) > 0.9);
- try expect(make_f128(1.0) >= 0.9);
- try expect(make_f128(1.0) >= 1.0);
- try should_not_be_zero(1.0);
-}
-
-fn should_not_be_zero(x: f128) !void {
- try expect(x != 0.0);
-}
-
-test "comptime float rem int" {
- comptime {
- var x = @as(f32, 1) % 2;
- try expect(x == 1.0);
- }
-}
-
-test "remainder division" {
- comptime try remdiv(f16);
- comptime try remdiv(f32);
- comptime try remdiv(f64);
- comptime try remdiv(f128);
- try remdiv(f16);
- try remdiv(f64);
- try remdiv(f128);
-}
-
-fn remdiv(comptime T: type) !void {
- try expect(@as(T, 1) == @as(T, 1) % @as(T, 2));
- try expect(@as(T, 1) == @as(T, 7) % @as(T, 3));
-}
-
-test "@sqrt" {
- try testSqrt(f64, 12.0);
- comptime try testSqrt(f64, 12.0);
- try testSqrt(f32, 13.0);
- comptime try testSqrt(f32, 13.0);
- try testSqrt(f16, 13.0);
- comptime try testSqrt(f16, 13.0);
-
- const x = 14.0;
- const y = x * x;
- const z = @sqrt(y);
- comptime try expect(z == x);
-}
-
-fn testSqrt(comptime T: type, x: T) !void {
- try expect(@sqrt(x * x) == x);
-}
-
-test "@fabs" {
- try testFabs(f128, 12.0);
- comptime try testFabs(f128, 12.0);
- try testFabs(f64, 12.0);
- comptime try testFabs(f64, 12.0);
- try testFabs(f32, 12.0);
- comptime try testFabs(f32, 12.0);
- try testFabs(f16, 12.0);
- comptime try testFabs(f16, 12.0);
-
- const x = 14.0;
- const y = -x;
- const z = @fabs(y);
- comptime try expectEqual(x, z);
-}
-
-fn testFabs(comptime T: type, x: T) !void {
- const y = -x;
- const z = @fabs(y);
- try expectEqual(x, z);
-}
-
-test "@floor" {
- // FIXME: Generates a floorl function call
- // testFloor(f128, 12.0);
- comptime try testFloor(f128, 12.0);
- try testFloor(f64, 12.0);
- comptime try testFloor(f64, 12.0);
- try testFloor(f32, 12.0);
- comptime try testFloor(f32, 12.0);
- try testFloor(f16, 12.0);
- comptime try testFloor(f16, 12.0);
-
- const x = 14.0;
- const y = x + 0.7;
- const z = @floor(y);
- comptime try expectEqual(x, z);
-}
-
-fn testFloor(comptime T: type, x: T) !void {
- const y = x + 0.6;
- const z = @floor(y);
- try expectEqual(x, z);
-}
-
-test "@ceil" {
- // FIXME: Generates a ceill function call
- //testCeil(f128, 12.0);
- comptime try testCeil(f128, 12.0);
- try testCeil(f64, 12.0);
- comptime try testCeil(f64, 12.0);
- try testCeil(f32, 12.0);
- comptime try testCeil(f32, 12.0);
- try testCeil(f16, 12.0);
- comptime try testCeil(f16, 12.0);
-
- const x = 14.0;
- const y = x - 0.7;
- const z = @ceil(y);
- comptime try expectEqual(x, z);
-}
-
-fn testCeil(comptime T: type, x: T) !void {
- const y = x - 0.8;
- const z = @ceil(y);
- try expectEqual(x, z);
-}
-
-test "@trunc" {
- // FIXME: Generates a truncl function call
- //testTrunc(f128, 12.0);
- comptime try testTrunc(f128, 12.0);
- try testTrunc(f64, 12.0);
- comptime try testTrunc(f64, 12.0);
- try testTrunc(f32, 12.0);
- comptime try testTrunc(f32, 12.0);
- try testTrunc(f16, 12.0);
- comptime try testTrunc(f16, 12.0);
-
- const x = 14.0;
- const y = x + 0.7;
- const z = @trunc(y);
- comptime try expectEqual(x, z);
-}
-
-fn testTrunc(comptime T: type, x: T) !void {
- {
- const y = x + 0.8;
- const z = @trunc(y);
- try expectEqual(x, z);
- }
-
- {
- const y = -x - 0.8;
- const z = @trunc(y);
- try expectEqual(-x, z);
- }
-}
-
-test "@round" {
- // FIXME: Generates a roundl function call
- //testRound(f128, 12.0);
- comptime try testRound(f128, 12.0);
- try testRound(f64, 12.0);
- comptime try testRound(f64, 12.0);
- try testRound(f32, 12.0);
- comptime try testRound(f32, 12.0);
- try testRound(f16, 12.0);
- comptime try testRound(f16, 12.0);
-
- const x = 14.0;
- const y = x + 0.4;
- const z = @round(y);
- comptime try expectEqual(x, z);
-}
-
-fn testRound(comptime T: type, x: T) !void {
- const y = x - 0.5;
- const z = @round(y);
- try expectEqual(x, z);
-}
-
test "comptime_int param and return" {
const a = comptimeAdd(35361831660712422535336160538497375248, 101752735581729509668353361206450473702);
try expect(a == 137114567242441932203689521744947848950);
@@ -816,88 +235,3 @@ test "comptime_int param and return" {
fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int {
return a + b;
}
-
-test "vector integer addition" {
- const S = struct {
- fn doTheTest() !void {
- var a: std.meta.Vector(4, i32) = [_]i32{ 1, 2, 3, 4 };
- var b: std.meta.Vector(4, i32) = [_]i32{ 5, 6, 7, 8 };
- var result = a + b;
- var result_array: [4]i32 = result;
- const expected = [_]i32{ 6, 8, 10, 12 };
- try expectEqualSlices(i32, &expected, &result_array);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "NaN comparison" {
- try testNanEqNan(f16);
- try testNanEqNan(f32);
- try testNanEqNan(f64);
- try testNanEqNan(f128);
- comptime try testNanEqNan(f16);
- comptime try testNanEqNan(f32);
- comptime try testNanEqNan(f64);
- comptime try testNanEqNan(f128);
-}
-
-fn testNanEqNan(comptime F: type) !void {
- var nan1 = std.math.nan(F);
- var nan2 = std.math.nan(F);
- try expect(nan1 != nan2);
- try expect(!(nan1 == nan2));
- try expect(!(nan1 > nan2));
- try expect(!(nan1 >= nan2));
- try expect(!(nan1 < nan2));
- try expect(!(nan1 <= nan2));
-}
-
-test "128-bit multiplication" {
- var a: i128 = 3;
- var b: i128 = 2;
- var c = a * b;
- try expect(c == 6);
-}
-
-test "vector comparison" {
- const S = struct {
- fn doTheTest() !void {
- var a: std.meta.Vector(6, i32) = [_]i32{ 1, 3, -1, 5, 7, 9 };
- var b: std.meta.Vector(6, i32) = [_]i32{ -1, 3, 0, 6, 10, -10 };
- try expect(mem.eql(bool, &@as([6]bool, a < b), &[_]bool{ false, false, true, true, true, false }));
- try expect(mem.eql(bool, &@as([6]bool, a <= b), &[_]bool{ false, true, true, true, true, false }));
- try expect(mem.eql(bool, &@as([6]bool, a == b), &[_]bool{ false, true, false, false, false, false }));
- try expect(mem.eql(bool, &@as([6]bool, a != b), &[_]bool{ true, false, true, true, true, true }));
- try expect(mem.eql(bool, &@as([6]bool, a > b), &[_]bool{ true, false, false, false, false, true }));
- try expect(mem.eql(bool, &@as([6]bool, a >= b), &[_]bool{ true, true, false, false, false, true }));
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "compare undefined literal with comptime_int" {
- var x = undefined == 1;
- // x is now undefined with type bool
- x = true;
- try expect(x);
-}
-
-test "signed zeros are represented properly" {
- const S = struct {
- fn doTheTest() !void {
- inline for ([_]type{ f16, f32, f64, f128 }) |T| {
- const ST = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
- var as_fp_val = -@as(T, 0.0);
- var as_uint_val = @bitCast(ST, as_fp_val);
- // Ensure the sign bit is set.
- try expect(as_uint_val >> (@typeInfo(T).Float.bits - 1) == 1);
- }
- }
- };
-
- try S.doTheTest();
- comptime try S.doTheTest();
-}
diff --git a/test/behavior/math_stage1.zig b/test/behavior/math_stage1.zig
new file mode 100644
index 0000000000..f0c160ebc4
--- /dev/null
+++ b/test/behavior/math_stage1.zig
@@ -0,0 +1,677 @@
+const std = @import("std");
+const expect = std.testing.expect;
+const expectEqual = std.testing.expectEqual;
+const expectEqualSlices = std.testing.expectEqualSlices;
+const maxInt = std.math.maxInt;
+const minInt = std.math.minInt;
+const mem = std.mem;
+
+test "division" {
+ try testDivision();
+ comptime try testDivision();
+}
+fn testDivision() !void {
+ try expect(div(u32, 13, 3) == 4);
+ try expect(div(f16, 1.0, 2.0) == 0.5);
+ try expect(div(f32, 1.0, 2.0) == 0.5);
+
+ try expect(divExact(u32, 55, 11) == 5);
+ try expect(divExact(i32, -55, 11) == -5);
+ try expect(divExact(f16, 55.0, 11.0) == 5.0);
+ try expect(divExact(f16, -55.0, 11.0) == -5.0);
+ try expect(divExact(f32, 55.0, 11.0) == 5.0);
+ try expect(divExact(f32, -55.0, 11.0) == -5.0);
+
+ try expect(divFloor(i32, 5, 3) == 1);
+ try expect(divFloor(i32, -5, 3) == -2);
+ try expect(divFloor(f16, 5.0, 3.0) == 1.0);
+ try expect(divFloor(f16, -5.0, 3.0) == -2.0);
+ try expect(divFloor(f32, 5.0, 3.0) == 1.0);
+ try expect(divFloor(f32, -5.0, 3.0) == -2.0);
+ try expect(divFloor(i32, -0x80000000, -2) == 0x40000000);
+ try expect(divFloor(i32, 0, -0x80000000) == 0);
+ try expect(divFloor(i32, -0x40000001, 0x40000000) == -2);
+ try expect(divFloor(i32, -0x80000000, 1) == -0x80000000);
+ try expect(divFloor(i32, 10, 12) == 0);
+ try expect(divFloor(i32, -14, 12) == -2);
+ try expect(divFloor(i32, -2, 12) == -1);
+
+ try expect(divTrunc(i32, 5, 3) == 1);
+ try expect(divTrunc(i32, -5, 3) == -1);
+ try expect(divTrunc(f16, 5.0, 3.0) == 1.0);
+ try expect(divTrunc(f16, -5.0, 3.0) == -1.0);
+ try expect(divTrunc(f32, 5.0, 3.0) == 1.0);
+ try expect(divTrunc(f32, -5.0, 3.0) == -1.0);
+ try expect(divTrunc(f64, 5.0, 3.0) == 1.0);
+ try expect(divTrunc(f64, -5.0, 3.0) == -1.0);
+ try expect(divTrunc(i32, 10, 12) == 0);
+ try expect(divTrunc(i32, -14, 12) == -1);
+ try expect(divTrunc(i32, -2, 12) == 0);
+
+ try expect(mod(i32, 10, 12) == 10);
+ try expect(mod(i32, -14, 12) == 10);
+ try expect(mod(i32, -2, 12) == 10);
+
+ comptime {
+ try expect(
+ 1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600,
+ );
+ try expect(
+ @rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600,
+ );
+ try expect(
+ 1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2,
+ );
+ try expect(
+ @divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2,
+ );
+ try expect(
+ @divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2,
+ );
+ try expect(
+ @divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2,
+ );
+ try expect(
+ 4126227191251978491697987544882340798050766755606969681711 % 10 == 1,
+ );
+ }
+}
+fn div(comptime T: type, a: T, b: T) T {
+ return a / b;
+}
+fn divExact(comptime T: type, a: T, b: T) T {
+ return @divExact(a, b);
+}
+fn divFloor(comptime T: type, a: T, b: T) T {
+ return @divFloor(a, b);
+}
+fn divTrunc(comptime T: type, a: T, b: T) T {
+ return @divTrunc(a, b);
+}
+fn mod(comptime T: type, a: T, b: T) T {
+ return @mod(a, b);
+}
+
+test "@addWithOverflow" {
+ var result: u8 = undefined;
+ try expect(@addWithOverflow(u8, 250, 100, &result));
+ try expect(!@addWithOverflow(u8, 100, 150, &result));
+ try expect(result == 250);
+}
+
+// TODO test mulWithOverflow
+// TODO test subWithOverflow
+
+test "@shlWithOverflow" {
+ var result: u16 = undefined;
+ try expect(@shlWithOverflow(u16, 0b0010111111111111, 3, &result));
+ try expect(!@shlWithOverflow(u16, 0b0010111111111111, 2, &result));
+ try expect(result == 0b1011111111111100);
+}
+
+test "@*WithOverflow with u0 values" {
+ var result: u0 = undefined;
+ try expect(!@addWithOverflow(u0, 0, 0, &result));
+ try expect(!@subWithOverflow(u0, 0, 0, &result));
+ try expect(!@mulWithOverflow(u0, 0, 0, &result));
+ try expect(!@shlWithOverflow(u0, 0, 0, &result));
+}
+
+test "@clz vectors" {
+ try testClzVectors();
+ comptime try testClzVectors();
+}
+
+fn testClzVectors() !void {
+ @setEvalBranchQuota(10_000);
+ try expectEqual(@clz(u8, @splat(64, @as(u8, 0b10001010))), @splat(64, @as(u4, 0)));
+ try expectEqual(@clz(u8, @splat(64, @as(u8, 0b00001010))), @splat(64, @as(u4, 4)));
+ try expectEqual(@clz(u8, @splat(64, @as(u8, 0b00011010))), @splat(64, @as(u4, 3)));
+ try expectEqual(@clz(u8, @splat(64, @as(u8, 0b00000000))), @splat(64, @as(u4, 8)));
+ try expectEqual(@clz(u128, @splat(64, @as(u128, 0xffffffffffffffff))), @splat(64, @as(u8, 64)));
+ try expectEqual(@clz(u128, @splat(64, @as(u128, 0x10000000000000000))), @splat(64, @as(u8, 63)));
+}
+
+test "@ctz" {
+ try testCtz();
+ comptime try testCtz();
+}
+
+fn testCtz() !void {
+ try expect(@ctz(u8, 0b10100000) == 5);
+ try expect(@ctz(u8, 0b10001010) == 1);
+ try expect(@ctz(u8, 0b00000000) == 8);
+ try expect(@ctz(u16, 0b00000000) == 16);
+}
+
+test "@ctz vectors" {
+ try testClzVectors();
+ comptime try testClzVectors();
+}
+
+fn testCtzVectors() !void {
+ @setEvalBranchQuota(10_000);
+ try expectEqual(@ctz(u8, @splat(64, @as(u8, 0b10100000))), @splat(64, @as(u4, 5)));
+ try expectEqual(@ctz(u8, @splat(64, @as(u8, 0b10001010))), @splat(64, @as(u4, 1)));
+ try expectEqual(@ctz(u8, @splat(64, @as(u8, 0b00000000))), @splat(64, @as(u4, 8)));
+ try expectEqual(@ctz(u16, @splat(64, @as(u16, 0b00000000))), @splat(64, @as(u5, 16)));
+}
+
+test "unsigned wrapping" {
+ try testUnsignedWrappingEval(maxInt(u32));
+ comptime try testUnsignedWrappingEval(maxInt(u32));
+}
+fn testUnsignedWrappingEval(x: u32) !void {
+ const zero = x +% 1;
+ try expect(zero == 0);
+ const orig = zero -% 1;
+ try expect(orig == maxInt(u32));
+}
+
+test "signed wrapping" {
+ try testSignedWrappingEval(maxInt(i32));
+ comptime try testSignedWrappingEval(maxInt(i32));
+}
+fn testSignedWrappingEval(x: i32) !void {
+ const min_val = x +% 1;
+ try expect(min_val == minInt(i32));
+ const max_val = min_val -% 1;
+ try expect(max_val == maxInt(i32));
+}
+
+test "signed negation wrapping" {
+ try testSignedNegationWrappingEval(minInt(i16));
+ comptime try testSignedNegationWrappingEval(minInt(i16));
+}
+fn testSignedNegationWrappingEval(x: i16) !void {
+ try expect(x == -32768);
+ const neg = -%x;
+ try expect(neg == -32768);
+}
+
+test "unsigned negation wrapping" {
+ try testUnsignedNegationWrappingEval(1);
+ comptime try testUnsignedNegationWrappingEval(1);
+}
+fn testUnsignedNegationWrappingEval(x: u16) !void {
+ try expect(x == 1);
+ const neg = -%x;
+ try expect(neg == maxInt(u16));
+}
+
+test "unsigned 64-bit division" {
+ try test_u64_div();
+ comptime try test_u64_div();
+}
+fn test_u64_div() !void {
+ const result = divWithResult(1152921504606846976, 34359738365);
+ try expect(result.quotient == 33554432);
+ try expect(result.remainder == 100663296);
+}
+fn divWithResult(a: u64, b: u64) DivResult {
+ return DivResult{
+ .quotient = a / b,
+ .remainder = a % b,
+ };
+}
+const DivResult = struct {
+ quotient: u64,
+ remainder: u64,
+};
+
+test "binary not" {
+ try expect(comptime x: {
+ break :x ~@as(u16, 0b1010101010101010) == 0b0101010101010101;
+ });
+ try expect(comptime x: {
+ break :x ~@as(u64, 2147483647) == 18446744071562067968;
+ });
+ try testBinaryNot(0b1010101010101010);
+}
+
+fn testBinaryNot(x: u16) !void {
+ try expect(~x == 0b0101010101010101);
+}
+
+test "small int addition" {
+ var x: u2 = 0;
+ try expect(x == 0);
+
+ x += 1;
+ try expect(x == 1);
+
+ x += 1;
+ try expect(x == 2);
+
+ x += 1;
+ try expect(x == 3);
+
+ var result: @TypeOf(x) = 3;
+ try expect(@addWithOverflow(@TypeOf(x), x, 1, &result));
+
+ try expect(result == 0);
+}
+
+test "allow signed integer division/remainder when values are comptime known and positive or exact" {
+ try expect(5 / 3 == 1);
+ try expect(-5 / -3 == 1);
+ try expect(-6 / 3 == -2);
+
+ try expect(5 % 3 == 2);
+ try expect(-6 % 3 == 0);
+}
+
+test "quad hex float literal parsing accurate" {
+ const a: f128 = 0x1.1111222233334444555566667777p+0;
+
+ // implied 1 is dropped, with an exponent of 0 (0x3fff) after biasing.
+ const expected: u128 = 0x3fff1111222233334444555566667777;
+ try expect(@bitCast(u128, a) == expected);
+
+ // non-normalized
+ const b: f128 = 0x11.111222233334444555566667777p-4;
+ try expect(@bitCast(u128, b) == expected);
+
+ const S = struct {
+ fn doTheTest() !void {
+ {
+ var f: f128 = 0x1.2eab345678439abcdefea56782346p+5;
+ try expect(@bitCast(u128, f) == 0x40042eab345678439abcdefea5678234);
+ }
+ {
+ var f: f128 = 0x1.edcb34a235253948765432134674fp-1;
+ try expect(@bitCast(u128, f) == 0x3ffeedcb34a235253948765432134674);
+ }
+ {
+ var f: f128 = 0x1.353e45674d89abacc3a2ebf3ff4ffp-50;
+ try expect(@bitCast(u128, f) == 0x3fcd353e45674d89abacc3a2ebf3ff50);
+ }
+ {
+ var f: f128 = 0x1.ed8764648369535adf4be3214567fp-9;
+ try expect(@bitCast(u128, f) == 0x3ff6ed8764648369535adf4be3214568);
+ }
+ const exp2ft = [_]f64{
+ 0x1.6a09e667f3bcdp-1,
+ 0x1.7a11473eb0187p-1,
+ 0x1.8ace5422aa0dbp-1,
+ 0x1.9c49182a3f090p-1,
+ 0x1.ae89f995ad3adp-1,
+ 0x1.c199bdd85529cp-1,
+ 0x1.d5818dcfba487p-1,
+ 0x1.ea4afa2a490dap-1,
+ 0x1.0000000000000p+0,
+ 0x1.0b5586cf9890fp+0,
+ 0x1.172b83c7d517bp+0,
+ 0x1.2387a6e756238p+0,
+ 0x1.306fe0a31b715p+0,
+ 0x1.3dea64c123422p+0,
+ 0x1.4bfdad5362a27p+0,
+ 0x1.5ab07dd485429p+0,
+ 0x1.8p23,
+ 0x1.62e430p-1,
+ 0x1.ebfbe0p-3,
+ 0x1.c6b348p-5,
+ 0x1.3b2c9cp-7,
+ 0x1.0p127,
+ -0x1.0p-149,
+ };
+
+ const answers = [_]u64{
+ 0x3fe6a09e667f3bcd,
+ 0x3fe7a11473eb0187,
+ 0x3fe8ace5422aa0db,
+ 0x3fe9c49182a3f090,
+ 0x3feae89f995ad3ad,
+ 0x3fec199bdd85529c,
+ 0x3fed5818dcfba487,
+ 0x3feea4afa2a490da,
+ 0x3ff0000000000000,
+ 0x3ff0b5586cf9890f,
+ 0x3ff172b83c7d517b,
+ 0x3ff2387a6e756238,
+ 0x3ff306fe0a31b715,
+ 0x3ff3dea64c123422,
+ 0x3ff4bfdad5362a27,
+ 0x3ff5ab07dd485429,
+ 0x4168000000000000,
+ 0x3fe62e4300000000,
+ 0x3fcebfbe00000000,
+ 0x3fac6b3480000000,
+ 0x3f83b2c9c0000000,
+ 0x47e0000000000000,
+ 0xb6a0000000000000,
+ };
+
+ for (exp2ft) |x, i| {
+ try expect(@bitCast(u64, x) == answers[i]);
+ }
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "truncating shift left" {
+ try testShlTrunc(maxInt(u16));
+ comptime try testShlTrunc(maxInt(u16));
+}
+fn testShlTrunc(x: u16) !void {
+ const shifted = x << 1;
+ try expect(shifted == 65534);
+}
+
+test "truncating shift right" {
+ try testShrTrunc(maxInt(u16));
+ comptime try testShrTrunc(maxInt(u16));
+}
+fn testShrTrunc(x: u16) !void {
+ const shifted = x >> 1;
+ try expect(shifted == 32767);
+}
+
+test "exact shift left" {
+ try testShlExact(0b00110101);
+ comptime try testShlExact(0b00110101);
+}
+fn testShlExact(x: u8) !void {
+ const shifted = @shlExact(x, 2);
+ try expect(shifted == 0b11010100);
+}
+
+test "exact shift right" {
+ try testShrExact(0b10110100);
+ comptime try testShrExact(0b10110100);
+}
+fn testShrExact(x: u8) !void {
+ const shifted = @shrExact(x, 2);
+ try expect(shifted == 0b00101101);
+}
+
+test "shift left/right on u0 operand" {
+ const S = struct {
+ fn doTheTest() !void {
+ var x: u0 = 0;
+ var y: u0 = 0;
+ try expectEqual(@as(u0, 0), x << 0);
+ try expectEqual(@as(u0, 0), x >> 0);
+ try expectEqual(@as(u0, 0), x << y);
+ try expectEqual(@as(u0, 0), x >> y);
+ try expectEqual(@as(u0, 0), @shlExact(x, 0));
+ try expectEqual(@as(u0, 0), @shrExact(x, 0));
+ try expectEqual(@as(u0, 0), @shlExact(x, y));
+ try expectEqual(@as(u0, 0), @shrExact(x, y));
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "f128" {
+ try test_f128();
+ comptime try test_f128();
+}
+
+fn make_f128(x: f128) f128 {
+ return x;
+}
+
+fn test_f128() !void {
+ try expect(@sizeOf(f128) == 16);
+ try expect(make_f128(1.0) == 1.0);
+ try expect(make_f128(1.0) != 1.1);
+ try expect(make_f128(1.0) > 0.9);
+ try expect(make_f128(1.0) >= 0.9);
+ try expect(make_f128(1.0) >= 1.0);
+ try should_not_be_zero(1.0);
+}
+
+fn should_not_be_zero(x: f128) !void {
+ try expect(x != 0.0);
+}
+
+test "comptime float rem int" {
+ comptime {
+ var x = @as(f32, 1) % 2;
+ try expect(x == 1.0);
+ }
+}
+
+test "remainder division" {
+ comptime try remdiv(f16);
+ comptime try remdiv(f32);
+ comptime try remdiv(f64);
+ comptime try remdiv(f128);
+ try remdiv(f16);
+ try remdiv(f64);
+ try remdiv(f128);
+}
+
+fn remdiv(comptime T: type) !void {
+ try expect(@as(T, 1) == @as(T, 1) % @as(T, 2));
+ try expect(@as(T, 1) == @as(T, 7) % @as(T, 3));
+}
+
+test "@sqrt" {
+ try testSqrt(f64, 12.0);
+ comptime try testSqrt(f64, 12.0);
+ try testSqrt(f32, 13.0);
+ comptime try testSqrt(f32, 13.0);
+ try testSqrt(f16, 13.0);
+ comptime try testSqrt(f16, 13.0);
+
+ const x = 14.0;
+ const y = x * x;
+ const z = @sqrt(y);
+ comptime try expect(z == x);
+}
+
+fn testSqrt(comptime T: type, x: T) !void {
+ try expect(@sqrt(x * x) == x);
+}
+
+test "@fabs" {
+ try testFabs(f128, 12.0);
+ comptime try testFabs(f128, 12.0);
+ try testFabs(f64, 12.0);
+ comptime try testFabs(f64, 12.0);
+ try testFabs(f32, 12.0);
+ comptime try testFabs(f32, 12.0);
+ try testFabs(f16, 12.0);
+ comptime try testFabs(f16, 12.0);
+
+ const x = 14.0;
+ const y = -x;
+ const z = @fabs(y);
+ comptime try expectEqual(x, z);
+}
+
+fn testFabs(comptime T: type, x: T) !void {
+ const y = -x;
+ const z = @fabs(y);
+ try expectEqual(x, z);
+}
+
+test "@floor" {
+ // FIXME: Generates a floorl function call
+ // testFloor(f128, 12.0);
+ comptime try testFloor(f128, 12.0);
+ try testFloor(f64, 12.0);
+ comptime try testFloor(f64, 12.0);
+ try testFloor(f32, 12.0);
+ comptime try testFloor(f32, 12.0);
+ try testFloor(f16, 12.0);
+ comptime try testFloor(f16, 12.0);
+
+ const x = 14.0;
+ const y = x + 0.7;
+ const z = @floor(y);
+ comptime try expectEqual(x, z);
+}
+
+fn testFloor(comptime T: type, x: T) !void {
+ const y = x + 0.6;
+ const z = @floor(y);
+ try expectEqual(x, z);
+}
+
+test "@ceil" {
+ // FIXME: Generates a ceill function call
+ //testCeil(f128, 12.0);
+ comptime try testCeil(f128, 12.0);
+ try testCeil(f64, 12.0);
+ comptime try testCeil(f64, 12.0);
+ try testCeil(f32, 12.0);
+ comptime try testCeil(f32, 12.0);
+ try testCeil(f16, 12.0);
+ comptime try testCeil(f16, 12.0);
+
+ const x = 14.0;
+ const y = x - 0.7;
+ const z = @ceil(y);
+ comptime try expectEqual(x, z);
+}
+
+fn testCeil(comptime T: type, x: T) !void {
+ const y = x - 0.8;
+ const z = @ceil(y);
+ try expectEqual(x, z);
+}
+
+test "@trunc" {
+ // FIXME: Generates a truncl function call
+ //testTrunc(f128, 12.0);
+ comptime try testTrunc(f128, 12.0);
+ try testTrunc(f64, 12.0);
+ comptime try testTrunc(f64, 12.0);
+ try testTrunc(f32, 12.0);
+ comptime try testTrunc(f32, 12.0);
+ try testTrunc(f16, 12.0);
+ comptime try testTrunc(f16, 12.0);
+
+ const x = 14.0;
+ const y = x + 0.7;
+ const z = @trunc(y);
+ comptime try expectEqual(x, z);
+}
+
+fn testTrunc(comptime T: type, x: T) !void {
+ {
+ const y = x + 0.8;
+ const z = @trunc(y);
+ try expectEqual(x, z);
+ }
+
+ {
+ const y = -x - 0.8;
+ const z = @trunc(y);
+ try expectEqual(-x, z);
+ }
+}
+
+test "@round" {
+ // FIXME: Generates a roundl function call
+ //testRound(f128, 12.0);
+ comptime try testRound(f128, 12.0);
+ try testRound(f64, 12.0);
+ comptime try testRound(f64, 12.0);
+ try testRound(f32, 12.0);
+ comptime try testRound(f32, 12.0);
+ try testRound(f16, 12.0);
+ comptime try testRound(f16, 12.0);
+
+ const x = 14.0;
+ const y = x + 0.4;
+ const z = @round(y);
+ comptime try expectEqual(x, z);
+}
+
+fn testRound(comptime T: type, x: T) !void {
+ const y = x - 0.5;
+ const z = @round(y);
+ try expectEqual(x, z);
+}
+
+test "vector integer addition" {
+ const S = struct {
+ fn doTheTest() !void {
+ var a: std.meta.Vector(4, i32) = [_]i32{ 1, 2, 3, 4 };
+ var b: std.meta.Vector(4, i32) = [_]i32{ 5, 6, 7, 8 };
+ var result = a + b;
+ var result_array: [4]i32 = result;
+ const expected = [_]i32{ 6, 8, 10, 12 };
+ try expectEqualSlices(i32, &expected, &result_array);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "NaN comparison" {
+ try testNanEqNan(f16);
+ try testNanEqNan(f32);
+ try testNanEqNan(f64);
+ try testNanEqNan(f128);
+ comptime try testNanEqNan(f16);
+ comptime try testNanEqNan(f32);
+ comptime try testNanEqNan(f64);
+ comptime try testNanEqNan(f128);
+}
+
+fn testNanEqNan(comptime F: type) !void {
+ var nan1 = std.math.nan(F);
+ var nan2 = std.math.nan(F);
+ try expect(nan1 != nan2);
+ try expect(!(nan1 == nan2));
+ try expect(!(nan1 > nan2));
+ try expect(!(nan1 >= nan2));
+ try expect(!(nan1 < nan2));
+ try expect(!(nan1 <= nan2));
+}
+
+test "128-bit multiplication" {
+ var a: i128 = 3;
+ var b: i128 = 2;
+ var c = a * b;
+ try expect(c == 6);
+}
+
+test "vector comparison" {
+ const S = struct {
+ fn doTheTest() !void {
+ var a: std.meta.Vector(6, i32) = [_]i32{ 1, 3, -1, 5, 7, 9 };
+ var b: std.meta.Vector(6, i32) = [_]i32{ -1, 3, 0, 6, 10, -10 };
+ try expect(mem.eql(bool, &@as([6]bool, a < b), &[_]bool{ false, false, true, true, true, false }));
+ try expect(mem.eql(bool, &@as([6]bool, a <= b), &[_]bool{ false, true, true, true, true, false }));
+ try expect(mem.eql(bool, &@as([6]bool, a == b), &[_]bool{ false, true, false, false, false, false }));
+ try expect(mem.eql(bool, &@as([6]bool, a != b), &[_]bool{ true, false, true, true, true, true }));
+ try expect(mem.eql(bool, &@as([6]bool, a > b), &[_]bool{ true, false, false, false, false, true }));
+ try expect(mem.eql(bool, &@as([6]bool, a >= b), &[_]bool{ true, true, false, false, false, true }));
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "compare undefined literal with comptime_int" {
+ var x = undefined == 1;
+ // x is now undefined with type bool
+ x = true;
+ try expect(x);
+}
+
+test "signed zeros are represented properly" {
+ const S = struct {
+ fn doTheTest() !void {
+ inline for ([_]type{ f16, f32, f64, f128 }) |T| {
+ const ST = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
+ var as_fp_val = -@as(T, 0.0);
+ var as_uint_val = @bitCast(ST, as_fp_val);
+ // Ensure the sign bit is set.
+ try expect(as_uint_val >> (@typeInfo(T).Float.bits - 1) == 1);
+ }
+ }
+ };
+
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
diff --git a/test/behavior/member_func.zig b/test/behavior/member_func.zig
new file mode 100644
index 0000000000..092a691901
--- /dev/null
+++ b/test/behavior/member_func.zig
@@ -0,0 +1,103 @@
+const expect = @import("std").testing.expect;
+
+const HasFuncs = struct {
+ state: u32,
+ func_field: fn (u32) u32,
+
+ fn inc(self: *HasFuncs) void {
+ self.state += 1;
+ }
+
+ fn get(self: HasFuncs) u32 {
+ return self.state;
+ }
+
+ fn getPtr(self: *const HasFuncs) *const u32 {
+ return &self.state;
+ }
+
+ fn one(_: u32) u32 {
+ return 1;
+ }
+ fn two(_: u32) u32 {
+ return 2;
+ }
+};
+
+test "standard field calls" {
+ try expect(HasFuncs.one(0) == 1);
+ try expect(HasFuncs.two(0) == 2);
+
+ var v: HasFuncs = undefined;
+ v.state = 0;
+ v.func_field = HasFuncs.one;
+
+ const pv = &v;
+ const pcv: *const HasFuncs = pv;
+
+ try expect(v.get() == 0);
+ v.inc();
+ try expect(v.state == 1);
+ try expect(v.get() == 1);
+
+ pv.inc();
+ try expect(v.state == 2);
+ try expect(pv.get() == 2);
+ try expect(v.getPtr().* == 2);
+ try expect(pcv.get() == 2);
+ try expect(pcv.getPtr().* == 2);
+
+ v.func_field = HasFuncs.one;
+ try expect(v.func_field(0) == 1);
+ try expect(pv.func_field(0) == 1);
+ try expect(pcv.func_field(0) == 1);
+
+ try expect(pcv.func_field(blk: {
+ pv.func_field = HasFuncs.two;
+ break :blk 0;
+ }) == 1);
+
+ v.func_field = HasFuncs.two;
+ try expect(v.func_field(0) == 2);
+ try expect(pv.func_field(0) == 2);
+ try expect(pcv.func_field(0) == 2);
+}
+
+test "@field field calls" {
+ try expect(@field(HasFuncs, "one")(0) == 1);
+ try expect(@field(HasFuncs, "two")(0) == 2);
+
+ var v: HasFuncs = undefined;
+ v.state = 0;
+ v.func_field = HasFuncs.one;
+
+ const pv = &v;
+ const pcv: *const HasFuncs = pv;
+
+ try expect(@field(v, "get")() == 0);
+ @field(v, "inc")();
+ try expect(v.state == 1);
+ try expect(@field(v, "get")() == 1);
+
+ @field(pv, "inc")();
+ try expect(v.state == 2);
+ try expect(@field(pv, "get")() == 2);
+ try expect(@field(v, "getPtr")().* == 2);
+ try expect(@field(pcv, "get")() == 2);
+ try expect(@field(pcv, "getPtr")().* == 2);
+
+ v.func_field = HasFuncs.one;
+ try expect(@field(v, "func_field")(0) == 1);
+ try expect(@field(pv, "func_field")(0) == 1);
+ try expect(@field(pcv, "func_field")(0) == 1);
+
+ try expect(@field(pcv, "func_field")(blk: {
+ pv.func_field = HasFuncs.two;
+ break :blk 0;
+ }) == 1);
+
+ v.func_field = HasFuncs.two;
+ try expect(@field(v, "func_field")(0) == 2);
+ try expect(@field(pv, "func_field")(0) == 2);
+ try expect(@field(pcv, "func_field")(0) == 2);
+}
diff --git a/test/behavior/misc.zig b/test/behavior/misc.zig
index 5394e6fd14..9b3bf48366 100644
--- a/test/behavior/misc.zig
+++ b/test/behavior/misc.zig
@@ -5,16 +5,6 @@ const expectEqualStrings = std.testing.expectEqualStrings;
const mem = std.mem;
const builtin = @import("builtin");
-test "memcpy and memset intrinsics" {
- var foo: [20]u8 = undefined;
- var bar: [20]u8 = undefined;
-
- @memset(&foo, 'A', foo.len);
- @memcpy(&bar, &foo, bar.len);
-
- if (bar[11] != 'A') unreachable;
-}
-
test "slicing" {
var array: [20]i32 = undefined;
diff --git a/test/behavior/saturating_arithmetic.zig b/test/behavior/saturating_arithmetic.zig
index 553e9ff21a..91f9c17fb9 100644
--- a/test/behavior/saturating_arithmetic.zig
+++ b/test/behavior/saturating_arithmetic.zig
@@ -11,16 +11,28 @@ fn testSaturatingOp(comptime op: Op, comptime T: type, test_data: [3]T) !void {
const a = test_data[0];
const b = test_data[1];
const expected = test_data[2];
- const actual = switch (op) {
- .add => @addWithSaturation(a, b),
- .sub => @subWithSaturation(a, b),
- .mul => @mulWithSaturation(a, b),
- .shl => @shlWithSaturation(a, b),
- };
- try expectEqual(expected, actual);
+ {
+ const actual = switch (op) {
+ .add => a +| b,
+ .sub => a -| b,
+ .mul => a *| b,
+ .shl => a <<| b,
+ };
+ try expectEqual(expected, actual);
+ }
+ {
+ var actual = a;
+ switch (op) {
+ .add => actual +|= b,
+ .sub => actual -|= b,
+ .mul => actual *|= b,
+ .shl => actual <<|= b,
+ }
+ try expectEqual(expected, actual);
+ }
}
-test "@addWithSaturation" {
+test "saturating add" {
const S = struct {
fn doTheTest() !void {
// .{a, b, expected a+b}
@@ -38,22 +50,16 @@ test "@addWithSaturation" {
try testSaturatingOp(.add, u128, .{ maxInt(u128), 1, maxInt(u128) });
const u8x3 = std.meta.Vector(3, u8);
- try expectEqual(u8x3{ 255, 255, 255 }, @addWithSaturation(
- u8x3{ 255, 254, 1 },
- u8x3{ 1, 2, 255 },
- ));
+ try expectEqual(u8x3{ 255, 255, 255 }, (u8x3{ 255, 254, 1 } +| u8x3{ 1, 2, 255 }));
const i8x3 = std.meta.Vector(3, i8);
- try expectEqual(i8x3{ 127, 127, 127 }, @addWithSaturation(
- i8x3{ 127, 126, 1 },
- i8x3{ 1, 2, 127 },
- ));
+ try expectEqual(i8x3{ 127, 127, 127 }, (i8x3{ 127, 126, 1 } +| i8x3{ 1, 2, 127 }));
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
-test "@subWithSaturation" {
+test "saturating subtraction" {
const S = struct {
fn doTheTest() !void {
// .{a, b, expected a-b}
@@ -69,17 +75,14 @@ test "@subWithSaturation" {
try testSaturatingOp(.sub, u128, .{ 0, maxInt(u128), 0 });
const u8x3 = std.meta.Vector(3, u8);
- try expectEqual(u8x3{ 0, 0, 0 }, @subWithSaturation(
- u8x3{ 0, 0, 0 },
- u8x3{ 255, 255, 255 },
- ));
+ try expectEqual(u8x3{ 0, 0, 0 }, (u8x3{ 0, 0, 0 } -| u8x3{ 255, 255, 255 }));
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
-test "@mulWithSaturation" {
+test "saturating multiplication" {
// TODO: once #9660 has been solved, remove this line
if (std.builtin.target.cpu.arch == .wasm32) return error.SkipZigTest;
@@ -100,10 +103,7 @@ test "@mulWithSaturation" {
try testSaturatingOp(.mul, u128, .{ maxInt(u128), maxInt(u128), maxInt(u128) });
const u8x3 = std.meta.Vector(3, u8);
- try expectEqual(u8x3{ 255, 255, 255 }, @mulWithSaturation(
- u8x3{ 2, 2, 2 },
- u8x3{ 255, 255, 255 },
- ));
+ try expectEqual(u8x3{ 255, 255, 255 }, (u8x3{ 2, 2, 2 } *| u8x3{ 255, 255, 255 }));
}
};
@@ -111,7 +111,7 @@ test "@mulWithSaturation" {
comptime try S.doTheTest();
}
-test "@shlWithSaturation" {
+test "saturating shift-left" {
const S = struct {
fn doTheTest() !void {
// .{a, b, expected a< 0);
- }
- };
- // Zero-sized type
- try S.doTheTest(u0, false);
- try S.doTheTest(*u0, false);
- // Non byte-sized type
- try S.doTheTest(u1, true);
- try S.doTheTest(*u1, true);
- // Regular type
- try S.doTheTest(u8, true);
- try S.doTheTest(*u8, true);
- try S.doTheTest(f32, true);
- try S.doTheTest(*f32, true);
- // Container with ptr pointing to themselves
- try S.doTheTest(S0, true);
- try S.doTheTest(U0, true);
- try S.doTheTest(S1, true);
- try S.doTheTest(U1, true);
-}
diff --git a/test/behavior/sizeof_and_typeof_stage1.zig b/test/behavior/sizeof_and_typeof_stage1.zig
new file mode 100644
index 0000000000..974e79015f
--- /dev/null
+++ b/test/behavior/sizeof_and_typeof_stage1.zig
@@ -0,0 +1,218 @@
+const std = @import("std");
+const builtin = std.builtin;
+const expect = std.testing.expect;
+const expectEqual = std.testing.expectEqual;
+
+const A = struct {
+ a: u8,
+ b: u32,
+ c: u8,
+ d: u3,
+ e: u5,
+ f: u16,
+ g: u16,
+ h: u9,
+ i: u7,
+};
+
+const P = packed struct {
+ a: u8,
+ b: u32,
+ c: u8,
+ d: u3,
+ e: u5,
+ f: u16,
+ g: u16,
+ h: u9,
+ i: u7,
+};
+
+test "@offsetOf" {
+ // Packed structs have fixed memory layout
+ try expect(@offsetOf(P, "a") == 0);
+ try expect(@offsetOf(P, "b") == 1);
+ try expect(@offsetOf(P, "c") == 5);
+ try expect(@offsetOf(P, "d") == 6);
+ try expect(@offsetOf(P, "e") == 6);
+ try expect(@offsetOf(P, "f") == 7);
+ try expect(@offsetOf(P, "g") == 9);
+ try expect(@offsetOf(P, "h") == 11);
+ try expect(@offsetOf(P, "i") == 12);
+
+ // Normal struct fields can be moved/padded
+ var a: A = undefined;
+ try expect(@ptrToInt(&a.a) - @ptrToInt(&a) == @offsetOf(A, "a"));
+ try expect(@ptrToInt(&a.b) - @ptrToInt(&a) == @offsetOf(A, "b"));
+ try expect(@ptrToInt(&a.c) - @ptrToInt(&a) == @offsetOf(A, "c"));
+ try expect(@ptrToInt(&a.d) - @ptrToInt(&a) == @offsetOf(A, "d"));
+ try expect(@ptrToInt(&a.e) - @ptrToInt(&a) == @offsetOf(A, "e"));
+ try expect(@ptrToInt(&a.f) - @ptrToInt(&a) == @offsetOf(A, "f"));
+ try expect(@ptrToInt(&a.g) - @ptrToInt(&a) == @offsetOf(A, "g"));
+ try expect(@ptrToInt(&a.h) - @ptrToInt(&a) == @offsetOf(A, "h"));
+ try expect(@ptrToInt(&a.i) - @ptrToInt(&a) == @offsetOf(A, "i"));
+}
+
+test "@offsetOf packed struct, array length not power of 2 or multiple of native pointer width in bytes" {
+ const p3a_len = 3;
+ const P3 = packed struct {
+ a: [p3a_len]u8,
+ b: usize,
+ };
+ try std.testing.expectEqual(0, @offsetOf(P3, "a"));
+ try std.testing.expectEqual(p3a_len, @offsetOf(P3, "b"));
+
+ const p5a_len = 5;
+ const P5 = packed struct {
+ a: [p5a_len]u8,
+ b: usize,
+ };
+ try std.testing.expectEqual(0, @offsetOf(P5, "a"));
+ try std.testing.expectEqual(p5a_len, @offsetOf(P5, "b"));
+
+ const p6a_len = 6;
+ const P6 = packed struct {
+ a: [p6a_len]u8,
+ b: usize,
+ };
+ try std.testing.expectEqual(0, @offsetOf(P6, "a"));
+ try std.testing.expectEqual(p6a_len, @offsetOf(P6, "b"));
+
+ const p7a_len = 7;
+ const P7 = packed struct {
+ a: [p7a_len]u8,
+ b: usize,
+ };
+ try std.testing.expectEqual(0, @offsetOf(P7, "a"));
+ try std.testing.expectEqual(p7a_len, @offsetOf(P7, "b"));
+
+ const p9a_len = 9;
+ const P9 = packed struct {
+ a: [p9a_len]u8,
+ b: usize,
+ };
+ try std.testing.expectEqual(0, @offsetOf(P9, "a"));
+ try std.testing.expectEqual(p9a_len, @offsetOf(P9, "b"));
+
+ // 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 25 etc. are further cases
+}
+
+test "@bitOffsetOf" {
+ // Packed structs have fixed memory layout
+ try expect(@bitOffsetOf(P, "a") == 0);
+ try expect(@bitOffsetOf(P, "b") == 8);
+ try expect(@bitOffsetOf(P, "c") == 40);
+ try expect(@bitOffsetOf(P, "d") == 48);
+ try expect(@bitOffsetOf(P, "e") == 51);
+ try expect(@bitOffsetOf(P, "f") == 56);
+ try expect(@bitOffsetOf(P, "g") == 72);
+
+ try expect(@offsetOf(A, "a") * 8 == @bitOffsetOf(A, "a"));
+ try expect(@offsetOf(A, "b") * 8 == @bitOffsetOf(A, "b"));
+ try expect(@offsetOf(A, "c") * 8 == @bitOffsetOf(A, "c"));
+ try expect(@offsetOf(A, "d") * 8 == @bitOffsetOf(A, "d"));
+ try expect(@offsetOf(A, "e") * 8 == @bitOffsetOf(A, "e"));
+ try expect(@offsetOf(A, "f") * 8 == @bitOffsetOf(A, "f"));
+ try expect(@offsetOf(A, "g") * 8 == @bitOffsetOf(A, "g"));
+}
+
+test "@sizeOf(T) == 0 doesn't force resolving struct size" {
+ const S = struct {
+ const Foo = struct {
+ y: if (@sizeOf(Foo) == 0) u64 else u32,
+ };
+ const Bar = struct {
+ x: i32,
+ y: if (0 == @sizeOf(Bar)) u64 else u32,
+ };
+ };
+
+ try expect(@sizeOf(S.Foo) == 4);
+ try expect(@sizeOf(S.Bar) == 8);
+}
+
+test "@TypeOf() has no runtime side effects" {
+ const S = struct {
+ fn foo(comptime T: type, ptr: *T) T {
+ ptr.* += 1;
+ return ptr.*;
+ }
+ };
+ var data: i32 = 0;
+ const T = @TypeOf(S.foo(i32, &data));
+ comptime try expect(T == i32);
+ try expect(data == 0);
+}
+
+test "branching logic inside @TypeOf" {
+ const S = struct {
+ var data: i32 = 0;
+ fn foo() anyerror!i32 {
+ data += 1;
+ return undefined;
+ }
+ };
+ const T = @TypeOf(S.foo() catch undefined);
+ comptime try expect(T == i32);
+ try expect(S.data == 0);
+}
+
+test "@bitSizeOf" {
+ try expect(@bitSizeOf(u2) == 2);
+ try expect(@bitSizeOf(u8) == @sizeOf(u8) * 8);
+ try expect(@bitSizeOf(struct {
+ a: u2,
+ }) == 8);
+ try expect(@bitSizeOf(packed struct {
+ a: u2,
+ }) == 2);
+}
+
+test "@sizeOf comparison against zero" {
+ const S0 = struct {
+ f: *@This(),
+ };
+ const U0 = union {
+ f: *@This(),
+ };
+ const S1 = struct {
+ fn H(comptime T: type) type {
+ return struct {
+ x: T,
+ };
+ }
+ f0: H(*@This()),
+ f1: H(**@This()),
+ f2: H(***@This()),
+ };
+ const U1 = union {
+ fn H(comptime T: type) type {
+ return struct {
+ x: T,
+ };
+ }
+ f0: H(*@This()),
+ f1: H(**@This()),
+ f2: H(***@This()),
+ };
+ const S = struct {
+ fn doTheTest(comptime T: type, comptime result: bool) !void {
+ try expectEqual(result, @sizeOf(T) > 0);
+ }
+ };
+ // Zero-sized type
+ try S.doTheTest(u0, false);
+ try S.doTheTest(*u0, false);
+ // Non byte-sized type
+ try S.doTheTest(u1, true);
+ try S.doTheTest(*u1, true);
+ // Regular type
+ try S.doTheTest(u8, true);
+ try S.doTheTest(*u8, true);
+ try S.doTheTest(f32, true);
+ try S.doTheTest(*f32, true);
+ // Container with ptr pointing to themselves
+ try S.doTheTest(S0, true);
+ try S.doTheTest(U0, true);
+ try S.doTheTest(S1, true);
+ try S.doTheTest(U1, true);
+}
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index d9e1c02aa1..d755c92c72 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -5,28 +5,26 @@ const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectEqualSlices = std.testing.expectEqualSlices;
const maxInt = std.math.maxInt;
+
const StructWithNoFields = struct {
fn add(a: i32, b: i32) i32 {
return a + b;
}
};
-const empty_global_instance = StructWithNoFields{};
-
-top_level_field: i32,
-
-test "top level fields" {
- var instance = @This(){
- .top_level_field = 1234,
- };
- instance.top_level_field += 1;
- try expectEqual(@as(i32, 1235), instance.top_level_field);
-}
test "call struct static method" {
const result = StructWithNoFields.add(3, 4);
try expect(result == 7);
}
+const should_be_11 = StructWithNoFields.add(5, 6);
+
+test "invoke static method in global scope" {
+ try expect(should_be_11 == 11);
+}
+
+const empty_global_instance = StructWithNoFields{};
+
test "return empty struct instance" {
_ = returnEmptyStructInstance();
}
@@ -34,10 +32,76 @@ fn returnEmptyStructInstance() StructWithNoFields {
return empty_global_instance;
}
-const should_be_11 = StructWithNoFields.add(5, 6);
+const StructFoo = struct {
+ a: i32,
+ b: bool,
+ c: f32,
+};
+test "structs" {
+ var foo: StructFoo = undefined;
+ @memset(@ptrCast([*]u8, &foo), 0, @sizeOf(StructFoo));
+ foo.a += 1;
+ foo.b = foo.a == 1;
+ try testFoo(foo);
+ testMutation(&foo);
+ try expect(foo.c == 100);
+}
+fn testFoo(foo: StructFoo) !void {
+ try expect(foo.b);
+}
+fn testMutation(foo: *StructFoo) void {
+ foo.c = 100;
+}
-test "invoke static method in global scope" {
- try expect(should_be_11 == 11);
+test "struct byval assign" {
+ var foo1: StructFoo = undefined;
+ var foo2: StructFoo = undefined;
+
+ foo1.a = 1234;
+ foo2.a = 0;
+ try expect(foo2.a == 0);
+ foo2 = foo1;
+ try expect(foo2.a == 1234);
+}
+
+const Node = struct {
+ val: Val,
+ next: *Node,
+};
+
+const Val = struct {
+ x: i32,
+};
+
+test "struct initializer" {
+ const val = Val{ .x = 42 };
+ try expect(val.x == 42);
+}
+
+const MemberFnTestFoo = struct {
+ x: i32,
+ fn member(foo: MemberFnTestFoo) i32 {
+ return foo.x;
+ }
+};
+
+test "call member function directly" {
+ const instance = MemberFnTestFoo{ .x = 1234 };
+ const result = MemberFnTestFoo.member(instance);
+ try expect(result == 1234);
+}
+
+test "struct point to self" {
+ var root: Node = undefined;
+ root.val.x = 1;
+
+ var node: Node = undefined;
+ node.next = &root;
+ node.val.x = 2;
+
+ root.next = &node;
+
+ try expect(node.next.next.next.val.x == 1);
}
test "void struct fields" {
@@ -55,101 +119,6 @@ const VoidStructFieldsFoo = struct {
c: void,
};
-test "structs" {
- var foo: StructFoo = undefined;
- @memset(@ptrCast([*]u8, &foo), 0, @sizeOf(StructFoo));
- foo.a += 1;
- foo.b = foo.a == 1;
- try testFoo(foo);
- testMutation(&foo);
- try expect(foo.c == 100);
-}
-const StructFoo = struct {
- a: i32,
- b: bool,
- c: f32,
-};
-fn testFoo(foo: StructFoo) !void {
- try expect(foo.b);
-}
-fn testMutation(foo: *StructFoo) void {
- foo.c = 100;
-}
-
-const Node = struct {
- val: Val,
- next: *Node,
-};
-
-const Val = struct {
- x: i32,
-};
-
-test "struct point to self" {
- var root: Node = undefined;
- root.val.x = 1;
-
- var node: Node = undefined;
- node.next = &root;
- node.val.x = 2;
-
- root.next = &node;
-
- try expect(node.next.next.next.val.x == 1);
-}
-
-test "struct byval assign" {
- var foo1: StructFoo = undefined;
- var foo2: StructFoo = undefined;
-
- foo1.a = 1234;
- foo2.a = 0;
- try expect(foo2.a == 0);
- foo2 = foo1;
- try expect(foo2.a == 1234);
-}
-
-fn structInitializer() void {
- const val = Val{ .x = 42 };
- try expect(val.x == 42);
-}
-
-test "fn call of struct field" {
- const Foo = struct {
- ptr: fn () i32,
- };
- const S = struct {
- fn aFunc() i32 {
- return 13;
- }
-
- fn callStructField(foo: Foo) i32 {
- return foo.ptr();
- }
- };
-
- try expect(S.callStructField(Foo{ .ptr = S.aFunc }) == 13);
-}
-
-test "store member function in variable" {
- const instance = MemberFnTestFoo{ .x = 1234 };
- const memberFn = MemberFnTestFoo.member;
- const result = memberFn(instance);
- try expect(result == 1234);
-}
-const MemberFnTestFoo = struct {
- x: i32,
- fn member(foo: MemberFnTestFoo) i32 {
- return foo.x;
- }
-};
-
-test "call member function directly" {
- const instance = MemberFnTestFoo{ .x = 1234 };
- const result = MemberFnTestFoo.member(instance);
- try expect(result == 1234);
-}
-
test "member functions" {
const r = MemberFnRand{ .seed = 1234 };
try expect(r.getSeed() == 1234);
@@ -160,794 +129,3 @@ const MemberFnRand = struct {
return r.seed;
}
};
-
-test "return struct byval from function" {
- const bar = makeBar2(1234, 5678);
- try expect(bar.y == 5678);
-}
-const Bar = struct {
- x: i32,
- y: i32,
-};
-fn makeBar2(x: i32, y: i32) Bar {
- return Bar{
- .x = x,
- .y = y,
- };
-}
-
-test "empty struct method call" {
- const es = EmptyStruct{};
- try expect(es.method() == 1234);
-}
-const EmptyStruct = struct {
- fn method(es: *const EmptyStruct) i32 {
- _ = es;
- return 1234;
- }
-};
-
-test "return empty struct from fn" {
- _ = testReturnEmptyStructFromFn();
-}
-const EmptyStruct2 = struct {};
-fn testReturnEmptyStructFromFn() EmptyStruct2 {
- return EmptyStruct2{};
-}
-
-test "pass slice of empty struct to fn" {
- try expect(testPassSliceOfEmptyStructToFn(&[_]EmptyStruct2{EmptyStruct2{}}) == 1);
-}
-fn testPassSliceOfEmptyStructToFn(slice: []const EmptyStruct2) usize {
- return slice.len;
-}
-
-const APackedStruct = packed struct {
- x: u8,
- y: u8,
-};
-
-test "packed struct" {
- var foo = APackedStruct{
- .x = 1,
- .y = 2,
- };
- foo.y += 1;
- const four = foo.x + foo.y;
- try expect(four == 4);
-}
-
-const BitField1 = packed struct {
- a: u3,
- b: u3,
- c: u2,
-};
-
-const bit_field_1 = BitField1{
- .a = 1,
- .b = 2,
- .c = 3,
-};
-
-test "bit field access" {
- var data = bit_field_1;
- try expect(getA(&data) == 1);
- try expect(getB(&data) == 2);
- try expect(getC(&data) == 3);
- comptime try expect(@sizeOf(BitField1) == 1);
-
- data.b += 1;
- try expect(data.b == 3);
-
- data.a += 1;
- try expect(data.a == 2);
- try expect(data.b == 3);
-}
-
-fn getA(data: *const BitField1) u3 {
- return data.a;
-}
-
-fn getB(data: *const BitField1) u3 {
- return data.b;
-}
-
-fn getC(data: *const BitField1) u2 {
- return data.c;
-}
-
-const Foo24Bits = packed struct {
- field: u24,
-};
-const Foo96Bits = packed struct {
- a: u24,
- b: u24,
- c: u24,
- d: u24,
-};
-
-test "packed struct 24bits" {
- comptime {
- try expect(@sizeOf(Foo24Bits) == 4);
- if (@sizeOf(usize) == 4) {
- try expect(@sizeOf(Foo96Bits) == 12);
- } else {
- try expect(@sizeOf(Foo96Bits) == 16);
- }
- }
-
- var value = Foo96Bits{
- .a = 0,
- .b = 0,
- .c = 0,
- .d = 0,
- };
- value.a += 1;
- try expect(value.a == 1);
- try expect(value.b == 0);
- try expect(value.c == 0);
- try expect(value.d == 0);
-
- value.b += 1;
- try expect(value.a == 1);
- try expect(value.b == 1);
- try expect(value.c == 0);
- try expect(value.d == 0);
-
- value.c += 1;
- try expect(value.a == 1);
- try expect(value.b == 1);
- try expect(value.c == 1);
- try expect(value.d == 0);
-
- value.d += 1;
- try expect(value.a == 1);
- try expect(value.b == 1);
- try expect(value.c == 1);
- try expect(value.d == 1);
-}
-
-const Foo32Bits = packed struct {
- field: u24,
- pad: u8,
-};
-
-const FooArray24Bits = packed struct {
- a: u16,
- b: [2]Foo32Bits,
- c: u16,
-};
-
-// TODO revisit this test when doing https://github.com/ziglang/zig/issues/1512
-test "packed array 24bits" {
- comptime {
- try expect(@sizeOf([9]Foo32Bits) == 9 * 4);
- try expect(@sizeOf(FooArray24Bits) == 2 + 2 * 4 + 2);
- }
-
- var bytes = [_]u8{0} ** (@sizeOf(FooArray24Bits) + 1);
- bytes[bytes.len - 1] = 0xaa;
- const ptr = &std.mem.bytesAsSlice(FooArray24Bits, bytes[0 .. bytes.len - 1])[0];
- try expect(ptr.a == 0);
- try expect(ptr.b[0].field == 0);
- try expect(ptr.b[1].field == 0);
- try expect(ptr.c == 0);
-
- ptr.a = maxInt(u16);
- try expect(ptr.a == maxInt(u16));
- try expect(ptr.b[0].field == 0);
- try expect(ptr.b[1].field == 0);
- try expect(ptr.c == 0);
-
- ptr.b[0].field = maxInt(u24);
- try expect(ptr.a == maxInt(u16));
- try expect(ptr.b[0].field == maxInt(u24));
- try expect(ptr.b[1].field == 0);
- try expect(ptr.c == 0);
-
- ptr.b[1].field = maxInt(u24);
- try expect(ptr.a == maxInt(u16));
- try expect(ptr.b[0].field == maxInt(u24));
- try expect(ptr.b[1].field == maxInt(u24));
- try expect(ptr.c == 0);
-
- ptr.c = maxInt(u16);
- try expect(ptr.a == maxInt(u16));
- try expect(ptr.b[0].field == maxInt(u24));
- try expect(ptr.b[1].field == maxInt(u24));
- try expect(ptr.c == maxInt(u16));
-
- try expect(bytes[bytes.len - 1] == 0xaa);
-}
-
-const FooStructAligned = packed struct {
- a: u8,
- b: u8,
-};
-
-const FooArrayOfAligned = packed struct {
- a: [2]FooStructAligned,
-};
-
-test "aligned array of packed struct" {
- comptime {
- try expect(@sizeOf(FooStructAligned) == 2);
- try expect(@sizeOf(FooArrayOfAligned) == 2 * 2);
- }
-
- var bytes = [_]u8{0xbb} ** @sizeOf(FooArrayOfAligned);
- const ptr = &std.mem.bytesAsSlice(FooArrayOfAligned, bytes[0..])[0];
-
- try expect(ptr.a[0].a == 0xbb);
- try expect(ptr.a[0].b == 0xbb);
- try expect(ptr.a[1].a == 0xbb);
- try expect(ptr.a[1].b == 0xbb);
-}
-
-test "runtime struct initialization of bitfield" {
- const s1 = Nibbles{
- .x = x1,
- .y = x1,
- };
- const s2 = Nibbles{
- .x = @intCast(u4, x2),
- .y = @intCast(u4, x2),
- };
-
- try expect(s1.x == x1);
- try expect(s1.y == x1);
- try expect(s2.x == @intCast(u4, x2));
- try expect(s2.y == @intCast(u4, x2));
-}
-
-var x1 = @as(u4, 1);
-var x2 = @as(u8, 2);
-
-const Nibbles = packed struct {
- x: u4,
- y: u4,
-};
-
-const Bitfields = packed struct {
- f1: u16,
- f2: u16,
- f3: u8,
- f4: u8,
- f5: u4,
- f6: u4,
- f7: u8,
-};
-
-test "native bit field understands endianness" {
- var all: u64 = if (native_endian != .Little)
- 0x1111222233445677
- else
- 0x7765443322221111;
- var bytes: [8]u8 = undefined;
- @memcpy(&bytes, @ptrCast([*]u8, &all), 8);
- var bitfields = @ptrCast(*Bitfields, &bytes).*;
-
- try expect(bitfields.f1 == 0x1111);
- try expect(bitfields.f2 == 0x2222);
- try expect(bitfields.f3 == 0x33);
- try expect(bitfields.f4 == 0x44);
- try expect(bitfields.f5 == 0x5);
- try expect(bitfields.f6 == 0x6);
- try expect(bitfields.f7 == 0x77);
-}
-
-test "align 1 field before self referential align 8 field as slice return type" {
- const result = alloc(Expr);
- try expect(result.len == 0);
-}
-
-const Expr = union(enum) {
- Literal: u8,
- Question: *Expr,
-};
-
-fn alloc(comptime T: type) []T {
- return &[_]T{};
-}
-
-test "call method with mutable reference to struct with no fields" {
- const S = struct {
- fn doC(s: *const @This()) bool {
- _ = s;
- return true;
- }
- fn do(s: *@This()) bool {
- _ = s;
- return true;
- }
- };
-
- var s = S{};
- try expect(S.doC(&s));
- try expect(s.doC());
- try expect(S.do(&s));
- try expect(s.do());
-}
-
-test "implicit cast packed struct field to const ptr" {
- const LevelUpMove = packed struct {
- move_id: u9,
- level: u7,
-
- fn toInt(value: u7) u7 {
- return value;
- }
- };
-
- var lup: LevelUpMove = undefined;
- lup.level = 12;
- const res = LevelUpMove.toInt(lup.level);
- try expect(res == 12);
-}
-
-test "pointer to packed struct member in a stack variable" {
- const S = packed struct {
- a: u2,
- b: u2,
- };
-
- var s = S{ .a = 2, .b = 0 };
- var b_ptr = &s.b;
- try expect(s.b == 0);
- b_ptr.* = 2;
- try expect(s.b == 2);
-}
-
-test "non-byte-aligned array inside packed struct" {
- const Foo = packed struct {
- a: bool,
- b: [0x16]u8,
- };
- const S = struct {
- fn bar(slice: []const u8) !void {
- try expectEqualSlices(u8, slice, "abcdefghijklmnopqurstu");
- }
- fn doTheTest() !void {
- var foo = Foo{
- .a = true,
- .b = "abcdefghijklmnopqurstu".*,
- };
- const value = foo.b;
- try bar(&value);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "packed struct with u0 field access" {
- const S = packed struct {
- f0: u0,
- };
- var s = S{ .f0 = 0 };
- comptime try expect(s.f0 == 0);
-}
-
-const S0 = struct {
- bar: S1,
-
- pub const S1 = struct {
- value: u8,
- };
-
- fn init() @This() {
- return S0{ .bar = S1{ .value = 123 } };
- }
-};
-
-var g_foo: S0 = S0.init();
-
-test "access to global struct fields" {
- g_foo.bar.value = 42;
- try expect(g_foo.bar.value == 42);
-}
-
-test "packed struct with fp fields" {
- const S = packed struct {
- data: [3]f32,
-
- pub fn frob(self: *@This()) void {
- self.data[0] += self.data[1] + self.data[2];
- self.data[1] += self.data[0] + self.data[2];
- self.data[2] += self.data[0] + self.data[1];
- }
- };
-
- var s: S = undefined;
- s.data[0] = 1.0;
- s.data[1] = 2.0;
- s.data[2] = 3.0;
- s.frob();
- try expectEqual(@as(f32, 6.0), s.data[0]);
- try expectEqual(@as(f32, 11.0), s.data[1]);
- try expectEqual(@as(f32, 20.0), s.data[2]);
-}
-
-test "use within struct scope" {
- const S = struct {
- usingnamespace struct {
- pub fn inner() i32 {
- return 42;
- }
- };
- };
- try expectEqual(@as(i32, 42), S.inner());
-}
-
-test "default struct initialization fields" {
- const S = struct {
- a: i32 = 1234,
- b: i32,
- };
- const x = S{
- .b = 5,
- };
- var five: i32 = 5;
- const y = S{
- .b = five,
- };
- if (x.a + x.b != 1239) {
- @compileError("it should be comptime known");
- }
- try expectEqual(y, x);
- try expectEqual(1239, x.a + x.b);
-}
-
-test "fn with C calling convention returns struct by value" {
- const S = struct {
- fn entry() !void {
- var x = makeBar(10);
- try expectEqual(@as(i32, 10), x.handle);
- }
-
- const ExternBar = extern struct {
- handle: i32,
- };
-
- fn makeBar(t: i32) callconv(.C) ExternBar {
- return ExternBar{
- .handle = t,
- };
- }
- };
- try S.entry();
- comptime try S.entry();
-}
-
-test "for loop over pointers to struct, getting field from struct pointer" {
- const S = struct {
- const Foo = struct {
- name: []const u8,
- };
-
- var ok = true;
-
- fn eql(a: []const u8) bool {
- _ = a;
- return true;
- }
-
- const ArrayList = struct {
- fn toSlice(self: *ArrayList) []*Foo {
- _ = self;
- return @as([*]*Foo, undefined)[0..0];
- }
- };
-
- fn doTheTest() !void {
- var objects: ArrayList = undefined;
-
- for (objects.toSlice()) |obj| {
- if (eql(obj.name)) {
- ok = false;
- }
- }
-
- try expect(ok);
- }
- };
- try S.doTheTest();
-}
-
-test "zero-bit field in packed struct" {
- const S = packed struct {
- x: u10,
- y: void,
- };
- var x: S = undefined;
- _ = x;
-}
-
-test "struct field init with catch" {
- const S = struct {
- fn doTheTest() !void {
- var x: anyerror!isize = 1;
- var req = Foo{
- .field = x catch undefined,
- };
- try expect(req.field == 1);
- }
-
- pub const Foo = extern struct {
- field: isize,
- };
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "packed struct with non-ABI-aligned field" {
- const S = packed struct {
- x: u9,
- y: u183,
- };
- var s: S = undefined;
- s.x = 1;
- s.y = 42;
- try expect(s.x == 1);
- try expect(s.y == 42);
-}
-
-test "non-packed struct with u128 entry in union" {
- const U = union(enum) {
- Num: u128,
- Void,
- };
-
- const S = struct {
- f1: U,
- f2: U,
- };
-
- var sx: S = undefined;
- var s = &sx;
- try std.testing.expect(@ptrToInt(&s.f2) - @ptrToInt(&s.f1) == @offsetOf(S, "f2"));
- var v2 = U{ .Num = 123 };
- s.f2 = v2;
- try std.testing.expect(s.f2.Num == 123);
-}
-
-test "packed struct field passed to generic function" {
- const S = struct {
- const P = packed struct {
- b: u5,
- g: u5,
- r: u5,
- a: u1,
- };
-
- fn genericReadPackedField(ptr: anytype) u5 {
- return ptr.*;
- }
- };
-
- var p: S.P = undefined;
- p.b = 29;
- var loaded = S.genericReadPackedField(&p.b);
- try expect(loaded == 29);
-}
-
-test "anonymous struct literal syntax" {
- const S = struct {
- const Point = struct {
- x: i32,
- y: i32,
- };
-
- fn doTheTest() !void {
- var p: Point = .{
- .x = 1,
- .y = 2,
- };
- try expect(p.x == 1);
- try expect(p.y == 2);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "fully anonymous struct" {
- const S = struct {
- fn doTheTest() !void {
- try dump(.{
- .int = @as(u32, 1234),
- .float = @as(f64, 12.34),
- .b = true,
- .s = "hi",
- });
- }
- fn dump(args: anytype) !void {
- try expect(args.int == 1234);
- try expect(args.float == 12.34);
- try expect(args.b);
- try expect(args.s[0] == 'h');
- try expect(args.s[1] == 'i');
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "fully anonymous list literal" {
- const S = struct {
- fn doTheTest() !void {
- try dump(.{ @as(u32, 1234), @as(f64, 12.34), true, "hi" });
- }
- fn dump(args: anytype) !void {
- try expect(args.@"0" == 1234);
- try expect(args.@"1" == 12.34);
- try expect(args.@"2");
- try expect(args.@"3"[0] == 'h');
- try expect(args.@"3"[1] == 'i');
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "anonymous struct literal assigned to variable" {
- var vec = .{ @as(i32, 22), @as(i32, 55), @as(i32, 99) };
- try expect(vec.@"0" == 22);
- try expect(vec.@"1" == 55);
- try expect(vec.@"2" == 99);
-}
-
-test "struct with var field" {
- const Point = struct {
- x: anytype,
- y: anytype,
- };
- const pt = Point{
- .x = 1,
- .y = 2,
- };
- try expect(pt.x == 1);
- try expect(pt.y == 2);
-}
-
-test "comptime struct field" {
- const T = struct {
- a: i32,
- comptime b: i32 = 1234,
- };
-
- var foo: T = undefined;
- comptime try expect(foo.b == 1234);
-}
-
-test "anon struct literal field value initialized with fn call" {
- const S = struct {
- fn doTheTest() !void {
- var x = .{foo()};
- try expectEqualSlices(u8, x[0], "hi");
- }
- fn foo() []const u8 {
- return "hi";
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "self-referencing struct via array member" {
- const T = struct {
- children: [1]*@This(),
- };
- var x: T = undefined;
- x = T{ .children = .{&x} };
- try expect(x.children[0] == &x);
-}
-
-test "struct with union field" {
- const Value = struct {
- ref: u32 = 2,
- kind: union(enum) {
- None: usize,
- Bool: bool,
- },
- };
-
- var True = Value{
- .kind = .{ .Bool = true },
- };
- try expectEqual(@as(u32, 2), True.ref);
- try expectEqual(true, True.kind.Bool);
-}
-
-test "type coercion of anon struct literal to struct" {
- const S = struct {
- const S2 = struct {
- A: u32,
- B: []const u8,
- C: void,
- D: Foo = .{},
- };
-
- const Foo = struct {
- field: i32 = 1234,
- };
-
- fn doTheTest() !void {
- var y: u32 = 42;
- const t0 = .{ .A = 123, .B = "foo", .C = {} };
- const t1 = .{ .A = y, .B = "foo", .C = {} };
- const y0: S2 = t0;
- var y1: S2 = t1;
- try expect(y0.A == 123);
- try expect(std.mem.eql(u8, y0.B, "foo"));
- try expect(y0.C == {});
- try expect(y0.D.field == 1234);
- try expect(y1.A == y);
- try expect(std.mem.eql(u8, y1.B, "foo"));
- try expect(y1.C == {});
- try expect(y1.D.field == 1234);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "type coercion of pointer to anon struct literal to pointer to struct" {
- const S = struct {
- const S2 = struct {
- A: u32,
- B: []const u8,
- C: void,
- D: Foo = .{},
- };
-
- const Foo = struct {
- field: i32 = 1234,
- };
-
- fn doTheTest() !void {
- var y: u32 = 42;
- const t0 = &.{ .A = 123, .B = "foo", .C = {} };
- const t1 = &.{ .A = y, .B = "foo", .C = {} };
- const y0: *const S2 = t0;
- var y1: *const S2 = t1;
- try expect(y0.A == 123);
- try expect(std.mem.eql(u8, y0.B, "foo"));
- try expect(y0.C == {});
- try expect(y0.D.field == 1234);
- try expect(y1.A == y);
- try expect(std.mem.eql(u8, y1.B, "foo"));
- try expect(y1.C == {});
- try expect(y1.D.field == 1234);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "packed struct with undefined initializers" {
- const S = struct {
- const P = packed struct {
- a: u3,
- _a: u3 = undefined,
- b: u3,
- _b: u3 = undefined,
- c: u3,
- _c: u3 = undefined,
- };
-
- fn doTheTest() !void {
- var p: P = undefined;
- p = P{ .a = 2, .b = 4, .c = 6 };
- // Make sure the compiler doesn't touch the unprefixed fields.
- // Use expect since i386-linux doesn't like expectEqual
- try expect(p.a == 2);
- try expect(p.b == 4);
- try expect(p.c == 6);
- }
- };
-
- try S.doTheTest();
- comptime try S.doTheTest();
-}
diff --git a/test/behavior/struct_stage1.zig b/test/behavior/struct_stage1.zig
new file mode 100644
index 0000000000..3c4aaf58ec
--- /dev/null
+++ b/test/behavior/struct_stage1.zig
@@ -0,0 +1,853 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const native_endian = builtin.target.cpu.arch.endian();
+const expect = std.testing.expect;
+const expectEqual = std.testing.expectEqual;
+const expectEqualSlices = std.testing.expectEqualSlices;
+const maxInt = std.math.maxInt;
+
+top_level_field: i32,
+
+test "top level fields" {
+ var instance = @This(){
+ .top_level_field = 1234,
+ };
+ instance.top_level_field += 1;
+ try expectEqual(@as(i32, 1235), instance.top_level_field);
+}
+
+const StructFoo = struct {
+ a: i32,
+ b: bool,
+ c: f32,
+};
+
+const Node = struct {
+ val: Val,
+ next: *Node,
+};
+
+const Val = struct {
+ x: i32,
+};
+
+test "fn call of struct field" {
+ const Foo = struct {
+ ptr: fn () i32,
+ };
+ const S = struct {
+ fn aFunc() i32 {
+ return 13;
+ }
+
+ fn callStructField(foo: Foo) i32 {
+ return foo.ptr();
+ }
+ };
+
+ try expect(S.callStructField(Foo{ .ptr = S.aFunc }) == 13);
+}
+
+const MemberFnTestFoo = struct {
+ x: i32,
+ fn member(foo: MemberFnTestFoo) i32 {
+ return foo.x;
+ }
+};
+test "store member function in variable" {
+ const instance = MemberFnTestFoo{ .x = 1234 };
+ const memberFn = MemberFnTestFoo.member;
+ const result = memberFn(instance);
+ try expect(result == 1234);
+}
+
+test "return struct byval from function" {
+ const bar = makeBar2(1234, 5678);
+ try expect(bar.y == 5678);
+}
+const Bar = struct {
+ x: i32,
+ y: i32,
+};
+fn makeBar2(x: i32, y: i32) Bar {
+ return Bar{
+ .x = x,
+ .y = y,
+ };
+}
+
+test "empty struct method call" {
+ const es = EmptyStruct{};
+ try expect(es.method() == 1234);
+}
+const EmptyStruct = struct {
+ fn method(es: *const EmptyStruct) i32 {
+ _ = es;
+ return 1234;
+ }
+};
+
+test "return empty struct from fn" {
+ _ = testReturnEmptyStructFromFn();
+}
+const EmptyStruct2 = struct {};
+fn testReturnEmptyStructFromFn() EmptyStruct2 {
+ return EmptyStruct2{};
+}
+
+test "pass slice of empty struct to fn" {
+ try expect(testPassSliceOfEmptyStructToFn(&[_]EmptyStruct2{EmptyStruct2{}}) == 1);
+}
+fn testPassSliceOfEmptyStructToFn(slice: []const EmptyStruct2) usize {
+ return slice.len;
+}
+
+const APackedStruct = packed struct {
+ x: u8,
+ y: u8,
+};
+
+test "packed struct" {
+ var foo = APackedStruct{
+ .x = 1,
+ .y = 2,
+ };
+ foo.y += 1;
+ const four = foo.x + foo.y;
+ try expect(four == 4);
+}
+
+const BitField1 = packed struct {
+ a: u3,
+ b: u3,
+ c: u2,
+};
+
+const bit_field_1 = BitField1{
+ .a = 1,
+ .b = 2,
+ .c = 3,
+};
+
+test "bit field access" {
+ var data = bit_field_1;
+ try expect(getA(&data) == 1);
+ try expect(getB(&data) == 2);
+ try expect(getC(&data) == 3);
+ comptime try expect(@sizeOf(BitField1) == 1);
+
+ data.b += 1;
+ try expect(data.b == 3);
+
+ data.a += 1;
+ try expect(data.a == 2);
+ try expect(data.b == 3);
+}
+
+fn getA(data: *const BitField1) u3 {
+ return data.a;
+}
+
+fn getB(data: *const BitField1) u3 {
+ return data.b;
+}
+
+fn getC(data: *const BitField1) u2 {
+ return data.c;
+}
+
+const Foo24Bits = packed struct {
+ field: u24,
+};
+const Foo96Bits = packed struct {
+ a: u24,
+ b: u24,
+ c: u24,
+ d: u24,
+};
+
+test "packed struct 24bits" {
+ comptime {
+ try expect(@sizeOf(Foo24Bits) == 4);
+ if (@sizeOf(usize) == 4) {
+ try expect(@sizeOf(Foo96Bits) == 12);
+ } else {
+ try expect(@sizeOf(Foo96Bits) == 16);
+ }
+ }
+
+ var value = Foo96Bits{
+ .a = 0,
+ .b = 0,
+ .c = 0,
+ .d = 0,
+ };
+ value.a += 1;
+ try expect(value.a == 1);
+ try expect(value.b == 0);
+ try expect(value.c == 0);
+ try expect(value.d == 0);
+
+ value.b += 1;
+ try expect(value.a == 1);
+ try expect(value.b == 1);
+ try expect(value.c == 0);
+ try expect(value.d == 0);
+
+ value.c += 1;
+ try expect(value.a == 1);
+ try expect(value.b == 1);
+ try expect(value.c == 1);
+ try expect(value.d == 0);
+
+ value.d += 1;
+ try expect(value.a == 1);
+ try expect(value.b == 1);
+ try expect(value.c == 1);
+ try expect(value.d == 1);
+}
+
+const Foo32Bits = packed struct {
+ field: u24,
+ pad: u8,
+};
+
+const FooArray24Bits = packed struct {
+ a: u16,
+ b: [2]Foo32Bits,
+ c: u16,
+};
+
+// TODO revisit this test when doing https://github.com/ziglang/zig/issues/1512
+test "packed array 24bits" {
+ comptime {
+ try expect(@sizeOf([9]Foo32Bits) == 9 * 4);
+ try expect(@sizeOf(FooArray24Bits) == 2 + 2 * 4 + 2);
+ }
+
+ var bytes = [_]u8{0} ** (@sizeOf(FooArray24Bits) + 1);
+ bytes[bytes.len - 1] = 0xaa;
+ const ptr = &std.mem.bytesAsSlice(FooArray24Bits, bytes[0 .. bytes.len - 1])[0];
+ try expect(ptr.a == 0);
+ try expect(ptr.b[0].field == 0);
+ try expect(ptr.b[1].field == 0);
+ try expect(ptr.c == 0);
+
+ ptr.a = maxInt(u16);
+ try expect(ptr.a == maxInt(u16));
+ try expect(ptr.b[0].field == 0);
+ try expect(ptr.b[1].field == 0);
+ try expect(ptr.c == 0);
+
+ ptr.b[0].field = maxInt(u24);
+ try expect(ptr.a == maxInt(u16));
+ try expect(ptr.b[0].field == maxInt(u24));
+ try expect(ptr.b[1].field == 0);
+ try expect(ptr.c == 0);
+
+ ptr.b[1].field = maxInt(u24);
+ try expect(ptr.a == maxInt(u16));
+ try expect(ptr.b[0].field == maxInt(u24));
+ try expect(ptr.b[1].field == maxInt(u24));
+ try expect(ptr.c == 0);
+
+ ptr.c = maxInt(u16);
+ try expect(ptr.a == maxInt(u16));
+ try expect(ptr.b[0].field == maxInt(u24));
+ try expect(ptr.b[1].field == maxInt(u24));
+ try expect(ptr.c == maxInt(u16));
+
+ try expect(bytes[bytes.len - 1] == 0xaa);
+}
+
+const FooStructAligned = packed struct {
+ a: u8,
+ b: u8,
+};
+
+const FooArrayOfAligned = packed struct {
+ a: [2]FooStructAligned,
+};
+
+test "aligned array of packed struct" {
+ comptime {
+ try expect(@sizeOf(FooStructAligned) == 2);
+ try expect(@sizeOf(FooArrayOfAligned) == 2 * 2);
+ }
+
+ var bytes = [_]u8{0xbb} ** @sizeOf(FooArrayOfAligned);
+ const ptr = &std.mem.bytesAsSlice(FooArrayOfAligned, bytes[0..])[0];
+
+ try expect(ptr.a[0].a == 0xbb);
+ try expect(ptr.a[0].b == 0xbb);
+ try expect(ptr.a[1].a == 0xbb);
+ try expect(ptr.a[1].b == 0xbb);
+}
+
+test "runtime struct initialization of bitfield" {
+ const s1 = Nibbles{
+ .x = x1,
+ .y = x1,
+ };
+ const s2 = Nibbles{
+ .x = @intCast(u4, x2),
+ .y = @intCast(u4, x2),
+ };
+
+ try expect(s1.x == x1);
+ try expect(s1.y == x1);
+ try expect(s2.x == @intCast(u4, x2));
+ try expect(s2.y == @intCast(u4, x2));
+}
+
+var x1 = @as(u4, 1);
+var x2 = @as(u8, 2);
+
+const Nibbles = packed struct {
+ x: u4,
+ y: u4,
+};
+
+const Bitfields = packed struct {
+ f1: u16,
+ f2: u16,
+ f3: u8,
+ f4: u8,
+ f5: u4,
+ f6: u4,
+ f7: u8,
+};
+
+test "native bit field understands endianness" {
+ var all: u64 = if (native_endian != .Little)
+ 0x1111222233445677
+ else
+ 0x7765443322221111;
+ var bytes: [8]u8 = undefined;
+ @memcpy(&bytes, @ptrCast([*]u8, &all), 8);
+ var bitfields = @ptrCast(*Bitfields, &bytes).*;
+
+ try expect(bitfields.f1 == 0x1111);
+ try expect(bitfields.f2 == 0x2222);
+ try expect(bitfields.f3 == 0x33);
+ try expect(bitfields.f4 == 0x44);
+ try expect(bitfields.f5 == 0x5);
+ try expect(bitfields.f6 == 0x6);
+ try expect(bitfields.f7 == 0x77);
+}
+
+test "align 1 field before self referential align 8 field as slice return type" {
+ const result = alloc(Expr);
+ try expect(result.len == 0);
+}
+
+const Expr = union(enum) {
+ Literal: u8,
+ Question: *Expr,
+};
+
+fn alloc(comptime T: type) []T {
+ return &[_]T{};
+}
+
+test "call method with mutable reference to struct with no fields" {
+ const S = struct {
+ fn doC(s: *const @This()) bool {
+ _ = s;
+ return true;
+ }
+ fn do(s: *@This()) bool {
+ _ = s;
+ return true;
+ }
+ };
+
+ var s = S{};
+ try expect(S.doC(&s));
+ try expect(s.doC());
+ try expect(S.do(&s));
+ try expect(s.do());
+}
+
+test "implicit cast packed struct field to const ptr" {
+ const LevelUpMove = packed struct {
+ move_id: u9,
+ level: u7,
+
+ fn toInt(value: u7) u7 {
+ return value;
+ }
+ };
+
+ var lup: LevelUpMove = undefined;
+ lup.level = 12;
+ const res = LevelUpMove.toInt(lup.level);
+ try expect(res == 12);
+}
+
+test "pointer to packed struct member in a stack variable" {
+ const S = packed struct {
+ a: u2,
+ b: u2,
+ };
+
+ var s = S{ .a = 2, .b = 0 };
+ var b_ptr = &s.b;
+ try expect(s.b == 0);
+ b_ptr.* = 2;
+ try expect(s.b == 2);
+}
+
+test "non-byte-aligned array inside packed struct" {
+ const Foo = packed struct {
+ a: bool,
+ b: [0x16]u8,
+ };
+ const S = struct {
+ fn bar(slice: []const u8) !void {
+ try expectEqualSlices(u8, slice, "abcdefghijklmnopqurstu");
+ }
+ fn doTheTest() !void {
+ var foo = Foo{
+ .a = true,
+ .b = "abcdefghijklmnopqurstu".*,
+ };
+ const value = foo.b;
+ try bar(&value);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "packed struct with u0 field access" {
+ const S = packed struct {
+ f0: u0,
+ };
+ var s = S{ .f0 = 0 };
+ comptime try expect(s.f0 == 0);
+}
+
+const S0 = struct {
+ bar: S1,
+
+ pub const S1 = struct {
+ value: u8,
+ };
+
+ fn init() @This() {
+ return S0{ .bar = S1{ .value = 123 } };
+ }
+};
+
+var g_foo: S0 = S0.init();
+
+test "access to global struct fields" {
+ g_foo.bar.value = 42;
+ try expect(g_foo.bar.value == 42);
+}
+
+test "packed struct with fp fields" {
+ const S = packed struct {
+ data: [3]f32,
+
+ pub fn frob(self: *@This()) void {
+ self.data[0] += self.data[1] + self.data[2];
+ self.data[1] += self.data[0] + self.data[2];
+ self.data[2] += self.data[0] + self.data[1];
+ }
+ };
+
+ var s: S = undefined;
+ s.data[0] = 1.0;
+ s.data[1] = 2.0;
+ s.data[2] = 3.0;
+ s.frob();
+ try expectEqual(@as(f32, 6.0), s.data[0]);
+ try expectEqual(@as(f32, 11.0), s.data[1]);
+ try expectEqual(@as(f32, 20.0), s.data[2]);
+}
+
+test "use within struct scope" {
+ const S = struct {
+ usingnamespace struct {
+ pub fn inner() i32 {
+ return 42;
+ }
+ };
+ };
+ try expectEqual(@as(i32, 42), S.inner());
+}
+
+test "default struct initialization fields" {
+ const S = struct {
+ a: i32 = 1234,
+ b: i32,
+ };
+ const x = S{
+ .b = 5,
+ };
+ var five: i32 = 5;
+ const y = S{
+ .b = five,
+ };
+ if (x.a + x.b != 1239) {
+ @compileError("it should be comptime known");
+ }
+ try expectEqual(y, x);
+ try expectEqual(1239, x.a + x.b);
+}
+
+test "fn with C calling convention returns struct by value" {
+ const S = struct {
+ fn entry() !void {
+ var x = makeBar(10);
+ try expectEqual(@as(i32, 10), x.handle);
+ }
+
+ const ExternBar = extern struct {
+ handle: i32,
+ };
+
+ fn makeBar(t: i32) callconv(.C) ExternBar {
+ return ExternBar{
+ .handle = t,
+ };
+ }
+ };
+ try S.entry();
+ comptime try S.entry();
+}
+
+test "for loop over pointers to struct, getting field from struct pointer" {
+ const S = struct {
+ const Foo = struct {
+ name: []const u8,
+ };
+
+ var ok = true;
+
+ fn eql(a: []const u8) bool {
+ _ = a;
+ return true;
+ }
+
+ const ArrayList = struct {
+ fn toSlice(self: *ArrayList) []*Foo {
+ _ = self;
+ return @as([*]*Foo, undefined)[0..0];
+ }
+ };
+
+ fn doTheTest() !void {
+ var objects: ArrayList = undefined;
+
+ for (objects.toSlice()) |obj| {
+ if (eql(obj.name)) {
+ ok = false;
+ }
+ }
+
+ try expect(ok);
+ }
+ };
+ try S.doTheTest();
+}
+
+test "zero-bit field in packed struct" {
+ const S = packed struct {
+ x: u10,
+ y: void,
+ };
+ var x: S = undefined;
+ _ = x;
+}
+
+test "struct field init with catch" {
+ const S = struct {
+ fn doTheTest() !void {
+ var x: anyerror!isize = 1;
+ var req = Foo{
+ .field = x catch undefined,
+ };
+ try expect(req.field == 1);
+ }
+
+ pub const Foo = extern struct {
+ field: isize,
+ };
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "packed struct with non-ABI-aligned field" {
+ const S = packed struct {
+ x: u9,
+ y: u183,
+ };
+ var s: S = undefined;
+ s.x = 1;
+ s.y = 42;
+ try expect(s.x == 1);
+ try expect(s.y == 42);
+}
+
+test "non-packed struct with u128 entry in union" {
+ const U = union(enum) {
+ Num: u128,
+ Void,
+ };
+
+ const S = struct {
+ f1: U,
+ f2: U,
+ };
+
+ var sx: S = undefined;
+ var s = &sx;
+ try std.testing.expect(@ptrToInt(&s.f2) - @ptrToInt(&s.f1) == @offsetOf(S, "f2"));
+ var v2 = U{ .Num = 123 };
+ s.f2 = v2;
+ try std.testing.expect(s.f2.Num == 123);
+}
+
+test "packed struct field passed to generic function" {
+ const S = struct {
+ const P = packed struct {
+ b: u5,
+ g: u5,
+ r: u5,
+ a: u1,
+ };
+
+ fn genericReadPackedField(ptr: anytype) u5 {
+ return ptr.*;
+ }
+ };
+
+ var p: S.P = undefined;
+ p.b = 29;
+ var loaded = S.genericReadPackedField(&p.b);
+ try expect(loaded == 29);
+}
+
+test "anonymous struct literal syntax" {
+ const S = struct {
+ const Point = struct {
+ x: i32,
+ y: i32,
+ };
+
+ fn doTheTest() !void {
+ var p: Point = .{
+ .x = 1,
+ .y = 2,
+ };
+ try expect(p.x == 1);
+ try expect(p.y == 2);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "fully anonymous struct" {
+ const S = struct {
+ fn doTheTest() !void {
+ try dump(.{
+ .int = @as(u32, 1234),
+ .float = @as(f64, 12.34),
+ .b = true,
+ .s = "hi",
+ });
+ }
+ fn dump(args: anytype) !void {
+ try expect(args.int == 1234);
+ try expect(args.float == 12.34);
+ try expect(args.b);
+ try expect(args.s[0] == 'h');
+ try expect(args.s[1] == 'i');
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "fully anonymous list literal" {
+ const S = struct {
+ fn doTheTest() !void {
+ try dump(.{ @as(u32, 1234), @as(f64, 12.34), true, "hi" });
+ }
+ fn dump(args: anytype) !void {
+ try expect(args.@"0" == 1234);
+ try expect(args.@"1" == 12.34);
+ try expect(args.@"2");
+ try expect(args.@"3"[0] == 'h');
+ try expect(args.@"3"[1] == 'i');
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "anonymous struct literal assigned to variable" {
+ var vec = .{ @as(i32, 22), @as(i32, 55), @as(i32, 99) };
+ try expect(vec.@"0" == 22);
+ try expect(vec.@"1" == 55);
+ try expect(vec.@"2" == 99);
+}
+
+test "struct with var field" {
+ const Point = struct {
+ x: anytype,
+ y: anytype,
+ };
+ const pt = Point{
+ .x = 1,
+ .y = 2,
+ };
+ try expect(pt.x == 1);
+ try expect(pt.y == 2);
+}
+
+test "comptime struct field" {
+ const T = struct {
+ a: i32,
+ comptime b: i32 = 1234,
+ };
+
+ var foo: T = undefined;
+ comptime try expect(foo.b == 1234);
+}
+
+test "anon struct literal field value initialized with fn call" {
+ const S = struct {
+ fn doTheTest() !void {
+ var x = .{foo()};
+ try expectEqualSlices(u8, x[0], "hi");
+ }
+ fn foo() []const u8 {
+ return "hi";
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "self-referencing struct via array member" {
+ const T = struct {
+ children: [1]*@This(),
+ };
+ var x: T = undefined;
+ x = T{ .children = .{&x} };
+ try expect(x.children[0] == &x);
+}
+
+test "struct with union field" {
+ const Value = struct {
+ ref: u32 = 2,
+ kind: union(enum) {
+ None: usize,
+ Bool: bool,
+ },
+ };
+
+ var True = Value{
+ .kind = .{ .Bool = true },
+ };
+ try expectEqual(@as(u32, 2), True.ref);
+ try expectEqual(true, True.kind.Bool);
+}
+
+test "type coercion of anon struct literal to struct" {
+ const S = struct {
+ const S2 = struct {
+ A: u32,
+ B: []const u8,
+ C: void,
+ D: Foo = .{},
+ };
+
+ const Foo = struct {
+ field: i32 = 1234,
+ };
+
+ fn doTheTest() !void {
+ var y: u32 = 42;
+ const t0 = .{ .A = 123, .B = "foo", .C = {} };
+ const t1 = .{ .A = y, .B = "foo", .C = {} };
+ const y0: S2 = t0;
+ var y1: S2 = t1;
+ try expect(y0.A == 123);
+ try expect(std.mem.eql(u8, y0.B, "foo"));
+ try expect(y0.C == {});
+ try expect(y0.D.field == 1234);
+ try expect(y1.A == y);
+ try expect(std.mem.eql(u8, y1.B, "foo"));
+ try expect(y1.C == {});
+ try expect(y1.D.field == 1234);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "type coercion of pointer to anon struct literal to pointer to struct" {
+ const S = struct {
+ const S2 = struct {
+ A: u32,
+ B: []const u8,
+ C: void,
+ D: Foo = .{},
+ };
+
+ const Foo = struct {
+ field: i32 = 1234,
+ };
+
+ fn doTheTest() !void {
+ var y: u32 = 42;
+ const t0 = &.{ .A = 123, .B = "foo", .C = {} };
+ const t1 = &.{ .A = y, .B = "foo", .C = {} };
+ const y0: *const S2 = t0;
+ var y1: *const S2 = t1;
+ try expect(y0.A == 123);
+ try expect(std.mem.eql(u8, y0.B, "foo"));
+ try expect(y0.C == {});
+ try expect(y0.D.field == 1234);
+ try expect(y1.A == y);
+ try expect(std.mem.eql(u8, y1.B, "foo"));
+ try expect(y1.C == {});
+ try expect(y1.D.field == 1234);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "packed struct with undefined initializers" {
+ const S = struct {
+ const P = packed struct {
+ a: u3,
+ _a: u3 = undefined,
+ b: u3,
+ _b: u3 = undefined,
+ c: u3,
+ _c: u3 = undefined,
+ };
+
+ fn doTheTest() !void {
+ var p: P = undefined;
+ p = P{ .a = 2, .b = 4, .c = 6 };
+ // Make sure the compiler doesn't touch the unprefixed fields.
+ // Use expect since i386-linux doesn't like expectEqual
+ try expect(p.a == 2);
+ try expect(p.b == 4);
+ try expect(p.c == 6);
+ }
+ };
+
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig
index e512565b80..62afc74d83 100644
--- a/test/behavior/switch.zig
+++ b/test/behavior/switch.zig
@@ -65,18 +65,18 @@ fn nonConstSwitchOnEnum(fruit: Fruit) void {
}
test "switch statement" {
- try nonConstSwitch(SwitchStatmentFoo.C);
+ try nonConstSwitch(SwitchStatementFoo.C);
}
-fn nonConstSwitch(foo: SwitchStatmentFoo) !void {
+fn nonConstSwitch(foo: SwitchStatementFoo) !void {
const val = switch (foo) {
- SwitchStatmentFoo.A => @as(i32, 1),
- SwitchStatmentFoo.B => 2,
- SwitchStatmentFoo.C => 3,
- SwitchStatmentFoo.D => 4,
+ SwitchStatementFoo.A => @as(i32, 1),
+ SwitchStatementFoo.B => 2,
+ SwitchStatementFoo.C => 3,
+ SwitchStatementFoo.D => 4,
};
try expect(val == 3);
}
-const SwitchStatmentFoo = enum {
+const SwitchStatementFoo = enum {
A,
B,
C,
diff --git a/test/behavior/this.zig b/test/behavior/this.zig
index 086fe2814a..0fcfd5910c 100644
--- a/test/behavior/this.zig
+++ b/test/behavior/this.zig
@@ -24,11 +24,10 @@ test "this refer to module call private fn" {
}
test "this refer to container" {
- var pt = Point(i32){
- .x = 12,
- .y = 34,
- };
- pt.addOne();
+ var pt: Point(i32) = undefined;
+ pt.x = 12;
+ pt.y = 34;
+ Point(i32).addOne(&pt);
try expect(pt.x == 13);
try expect(pt.y == 35);
}
diff --git a/test/behavior/translate_c_macros.zig b/test/behavior/translate_c_macros.zig
index 0daf4cec90..0baaf24283 100644
--- a/test/behavior/translate_c_macros.zig
+++ b/test/behavior/translate_c_macros.zig
@@ -3,28 +3,6 @@ const expectEqual = @import("std").testing.expectEqual;
const h = @cImport(@cInclude("behavior/translate_c_macros.h"));
-test "initializer list expression" {
- try expectEqual(h.Color{
- .r = 200,
- .g = 200,
- .b = 200,
- .a = 255,
- }, h.LIGHTGRAY);
-}
-
-test "sizeof in macros" {
- try expectEqual(@as(c_int, @sizeOf(u32)), h.MY_SIZEOF(u32));
- try expectEqual(@as(c_int, @sizeOf(u32)), h.MY_SIZEOF2(u32));
-}
-
-test "reference to a struct type" {
- try expectEqual(@sizeOf(h.struct_Foo), h.SIZE_OF_FOO);
-}
-
-test "cast negative integer to pointer" {
- try expectEqual(@intToPtr(?*c_void, @bitCast(usize, @as(isize, -1))), h.MAP_FAILED);
-}
-
test "casting to void with a macro" {
h.IGNORE_ME_1(42);
h.IGNORE_ME_2(42);
diff --git a/test/behavior/translate_c_macros_stage1.zig b/test/behavior/translate_c_macros_stage1.zig
new file mode 100644
index 0000000000..8de06ae8ea
--- /dev/null
+++ b/test/behavior/translate_c_macros_stage1.zig
@@ -0,0 +1,26 @@
+const expect = @import("std").testing.expect;
+const expectEqual = @import("std").testing.expectEqual;
+
+const h = @cImport(@cInclude("behavior/translate_c_macros.h"));
+
+test "initializer list expression" {
+ try expectEqual(h.Color{
+ .r = 200,
+ .g = 200,
+ .b = 200,
+ .a = 255,
+ }, h.LIGHTGRAY);
+}
+
+test "sizeof in macros" {
+ try expectEqual(@as(c_int, @sizeOf(u32)), h.MY_SIZEOF(u32));
+ try expectEqual(@as(c_int, @sizeOf(u32)), h.MY_SIZEOF2(u32));
+}
+
+test "reference to a struct type" {
+ try expectEqual(@sizeOf(h.struct_Foo), h.SIZE_OF_FOO);
+}
+
+test "cast negative integer to pointer" {
+ try expectEqual(@intToPtr(?*c_void, @bitCast(usize, @as(isize, -1))), h.MAP_FAILED);
+}
diff --git a/test/behavior/type.zig b/test/behavior/type.zig
index 3a56f2171f..cd5d2c3e06 100644
--- a/test/behavior/type.zig
+++ b/test/behavior/type.zig
@@ -137,6 +137,7 @@ test "@Type create slice with null sentinel" {
.is_volatile = false,
.is_allowzero = false,
.alignment = 8,
+ .address_space = .generic,
.child = *i32,
.sentinel = null,
},
diff --git a/test/behavior/union.zig b/test/behavior/union.zig
index 323dd18f4d..afefa7cf85 100644
--- a/test/behavior/union.zig
+++ b/test/behavior/union.zig
@@ -3,42 +3,6 @@ const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const Tag = std.meta.Tag;
-const Value = union(enum) {
- Int: u64,
- Array: [9]u8,
-};
-
-const Agg = struct {
- val1: Value,
- val2: Value,
-};
-
-const v1 = Value{ .Int = 1234 };
-const v2 = Value{ .Array = [_]u8{3} ** 9 };
-
-const err = @as(anyerror!Agg, Agg{
- .val1 = v1,
- .val2 = v2,
-});
-
-const array = [_]Value{
- v1,
- v2,
- v1,
- v2,
-};
-
-test "unions embedded in aggregate types" {
- switch (array[1]) {
- Value.Array => |arr| try expect(arr[4] == 3),
- else => unreachable,
- }
- switch ((err catch unreachable).val1) {
- Value.Int => |x| try expect(x == 1234),
- else => unreachable,
- }
-}
-
const Foo = union {
float: f64,
int: i32,
@@ -51,16 +15,6 @@ test "basic unions" {
try expect(foo.float == 12.34);
}
-test "comptime union field access" {
- comptime {
- var foo = Foo{ .int = 0 };
- try expect(foo.int == 0);
-
- foo = Foo{ .float = 42.42 };
- try expect(foo.float == 42.42);
- }
-}
-
test "init union with runtime value" {
var foo: Foo = undefined;
@@ -78,740 +32,3 @@ fn setFloat(foo: *Foo, x: f64) void {
fn setInt(foo: *Foo, x: i32) void {
foo.* = Foo{ .int = x };
}
-
-const FooExtern = extern union {
- float: f64,
- int: i32,
-};
-
-test "basic extern unions" {
- var foo = FooExtern{ .int = 1 };
- try expect(foo.int == 1);
- foo.float = 12.34;
- try expect(foo.float == 12.34);
-}
-
-const Letter = enum {
- A,
- B,
- C,
-};
-const Payload = union(Letter) {
- A: i32,
- B: f64,
- C: bool,
-};
-
-test "union with specified enum tag" {
- try doTest();
- comptime try doTest();
-}
-
-fn doTest() error{TestUnexpectedResult}!void {
- try expect((try bar(Payload{ .A = 1234 })) == -10);
-}
-
-fn bar(value: Payload) error{TestUnexpectedResult}!i32 {
- try expect(@as(Letter, value) == Letter.A);
- return switch (value) {
- Payload.A => |x| return x - 1244,
- Payload.B => |x| if (x == 12.34) @as(i32, 20) else 21,
- Payload.C => |x| if (x) @as(i32, 30) else 31,
- };
-}
-
-const MultipleChoice = union(enum(u32)) {
- A = 20,
- B = 40,
- C = 60,
- D = 1000,
-};
-test "simple union(enum(u32))" {
- var x = MultipleChoice.C;
- try expect(x == MultipleChoice.C);
- try expect(@enumToInt(@as(Tag(MultipleChoice), x)) == 60);
-}
-
-const MultipleChoice2 = union(enum(u32)) {
- Unspecified1: i32,
- A: f32 = 20,
- Unspecified2: void,
- B: bool = 40,
- Unspecified3: i32,
- C: i8 = 60,
- Unspecified4: void,
- D: void = 1000,
- Unspecified5: i32,
-};
-
-test "union(enum(u32)) with specified and unspecified tag values" {
- comptime try expect(Tag(Tag(MultipleChoice2)) == u32);
- try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 });
- comptime try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 });
-}
-
-fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) !void {
- try expect(@enumToInt(@as(Tag(MultipleChoice2), x)) == 60);
- try expect(1123 == switch (x) {
- MultipleChoice2.A => 1,
- MultipleChoice2.B => 2,
- MultipleChoice2.C => |v| @as(i32, 1000) + v,
- MultipleChoice2.D => 4,
- MultipleChoice2.Unspecified1 => 5,
- MultipleChoice2.Unspecified2 => 6,
- MultipleChoice2.Unspecified3 => 7,
- MultipleChoice2.Unspecified4 => 8,
- MultipleChoice2.Unspecified5 => 9,
- });
-}
-
-const ExternPtrOrInt = extern union {
- ptr: *u8,
- int: u64,
-};
-test "extern union size" {
- comptime try expect(@sizeOf(ExternPtrOrInt) == 8);
-}
-
-const PackedPtrOrInt = packed union {
- ptr: *u8,
- int: u64,
-};
-test "extern union size" {
- comptime try expect(@sizeOf(PackedPtrOrInt) == 8);
-}
-
-const ZeroBits = union {
- OnlyField: void,
-};
-test "union with only 1 field which is void should be zero bits" {
- comptime try expect(@sizeOf(ZeroBits) == 0);
-}
-
-const TheTag = enum {
- A,
- B,
- C,
-};
-const TheUnion = union(TheTag) {
- A: i32,
- B: i32,
- C: i32,
-};
-test "union field access gives the enum values" {
- try expect(TheUnion.A == TheTag.A);
- try expect(TheUnion.B == TheTag.B);
- try expect(TheUnion.C == TheTag.C);
-}
-
-test "cast union to tag type of union" {
- try testCastUnionToTag(TheUnion{ .B = 1234 });
- comptime try testCastUnionToTag(TheUnion{ .B = 1234 });
-}
-
-fn testCastUnionToTag(x: TheUnion) !void {
- try expect(@as(TheTag, x) == TheTag.B);
-}
-
-test "cast tag type of union to union" {
- var x: Value2 = Letter2.B;
- try expect(@as(Letter2, x) == Letter2.B);
-}
-const Letter2 = enum {
- A,
- B,
- C,
-};
-const Value2 = union(Letter2) {
- A: i32,
- B,
- C,
-};
-
-test "implicit cast union to its tag type" {
- var x: Value2 = Letter2.B;
- try expect(x == Letter2.B);
- try giveMeLetterB(x);
-}
-fn giveMeLetterB(x: Letter2) !void {
- try expect(x == Value2.B);
-}
-
-pub const PackThis = union(enum) {
- Invalid: bool,
- StringLiteral: u2,
-};
-
-test "constant packed union" {
- try testConstPackedUnion(&[_]PackThis{PackThis{ .StringLiteral = 1 }});
-}
-
-fn testConstPackedUnion(expected_tokens: []const PackThis) !void {
- try expect(expected_tokens[0].StringLiteral == 1);
-}
-
-test "switch on union with only 1 field" {
- var r: PartialInst = undefined;
- r = PartialInst.Compiled;
- switch (r) {
- PartialInst.Compiled => {
- var z: PartialInstWithPayload = undefined;
- z = PartialInstWithPayload{ .Compiled = 1234 };
- switch (z) {
- PartialInstWithPayload.Compiled => |x| {
- try expect(x == 1234);
- return;
- },
- }
- },
- }
- unreachable;
-}
-
-const PartialInst = union(enum) {
- Compiled,
-};
-
-const PartialInstWithPayload = union(enum) {
- Compiled: i32,
-};
-
-test "access a member of tagged union with conflicting enum tag name" {
- const Bar = union(enum) {
- A: A,
- B: B,
-
- const A = u8;
- const B = void;
- };
-
- comptime try expect(Bar.A == u8);
-}
-
-test "tagged union initialization with runtime void" {
- try expect(testTaggedUnionInit({}));
-}
-
-const TaggedUnionWithAVoid = union(enum) {
- A,
- B: i32,
-};
-
-fn testTaggedUnionInit(x: anytype) bool {
- const y = TaggedUnionWithAVoid{ .A = x };
- return @as(Tag(TaggedUnionWithAVoid), y) == TaggedUnionWithAVoid.A;
-}
-
-pub const UnionEnumNoPayloads = union(enum) {
- A,
- B,
-};
-
-test "tagged union with no payloads" {
- const a = UnionEnumNoPayloads{ .B = {} };
- switch (a) {
- Tag(UnionEnumNoPayloads).A => @panic("wrong"),
- Tag(UnionEnumNoPayloads).B => {},
- }
-}
-
-test "union with only 1 field casted to its enum type" {
- const Literal = union(enum) {
- Number: f64,
- Bool: bool,
- };
-
- const Expr = union(enum) {
- Literal: Literal,
- };
-
- var e = Expr{ .Literal = Literal{ .Bool = true } };
- const ExprTag = Tag(Expr);
- comptime try expect(Tag(ExprTag) == u0);
- var t = @as(ExprTag, e);
- try expect(t == Expr.Literal);
-}
-
-test "union with only 1 field casted to its enum type which has enum value specified" {
- const Literal = union(enum) {
- Number: f64,
- Bool: bool,
- };
-
- const ExprTag = enum(comptime_int) {
- Literal = 33,
- };
-
- const Expr = union(ExprTag) {
- Literal: Literal,
- };
-
- var e = Expr{ .Literal = Literal{ .Bool = true } };
- comptime try expect(Tag(ExprTag) == comptime_int);
- var t = @as(ExprTag, e);
- try expect(t == Expr.Literal);
- try expect(@enumToInt(t) == 33);
- comptime try expect(@enumToInt(t) == 33);
-}
-
-test "@enumToInt works on unions" {
- const Bar = union(enum) {
- A: bool,
- B: u8,
- C,
- };
-
- const a = Bar{ .A = true };
- var b = Bar{ .B = undefined };
- var c = Bar.C;
- try expect(@enumToInt(a) == 0);
- try expect(@enumToInt(b) == 1);
- try expect(@enumToInt(c) == 2);
-}
-
-const Attribute = union(enum) {
- A: bool,
- B: u8,
-};
-
-fn setAttribute(attr: Attribute) void {
- _ = attr;
-}
-
-fn Setter(attr: Attribute) type {
- return struct {
- fn set() void {
- setAttribute(attr);
- }
- };
-}
-
-test "comptime union field value equality" {
- const a0 = Setter(Attribute{ .A = false });
- const a1 = Setter(Attribute{ .A = true });
- const a2 = Setter(Attribute{ .A = false });
-
- const b0 = Setter(Attribute{ .B = 5 });
- const b1 = Setter(Attribute{ .B = 9 });
- const b2 = Setter(Attribute{ .B = 5 });
-
- try expect(a0 == a0);
- try expect(a1 == a1);
- try expect(a0 == a2);
-
- try expect(b0 == b0);
- try expect(b1 == b1);
- try expect(b0 == b2);
-
- try expect(a0 != b0);
- try expect(a0 != a1);
- try expect(b0 != b1);
-}
-
-test "return union init with void payload" {
- const S = struct {
- fn entry() !void {
- try expect(func().state == State.one);
- }
- const Outer = union(enum) {
- state: State,
- };
- const State = union(enum) {
- one: void,
- two: u32,
- };
- fn func() Outer {
- return Outer{ .state = State{ .one = {} } };
- }
- };
- try S.entry();
- comptime try S.entry();
-}
-
-test "@unionInit can modify a union type" {
- const UnionInitEnum = union(enum) {
- Boolean: bool,
- Byte: u8,
- };
-
- var value: UnionInitEnum = undefined;
-
- value = @unionInit(UnionInitEnum, "Boolean", true);
- try expect(value.Boolean == true);
- value.Boolean = false;
- try expect(value.Boolean == false);
-
- value = @unionInit(UnionInitEnum, "Byte", 2);
- try expect(value.Byte == 2);
- value.Byte = 3;
- try expect(value.Byte == 3);
-}
-
-test "@unionInit can modify a pointer value" {
- const UnionInitEnum = union(enum) {
- Boolean: bool,
- Byte: u8,
- };
-
- var value: UnionInitEnum = undefined;
- var value_ptr = &value;
-
- value_ptr.* = @unionInit(UnionInitEnum, "Boolean", true);
- try expect(value.Boolean == true);
-
- value_ptr.* = @unionInit(UnionInitEnum, "Byte", 2);
- try expect(value.Byte == 2);
-}
-
-test "union no tag with struct member" {
- const Struct = struct {};
- const Union = union {
- s: Struct,
- pub fn foo(self: *@This()) void {
- _ = self;
- }
- };
- var u = Union{ .s = Struct{} };
- u.foo();
-}
-
-fn testComparison() !void {
- var x = Payload{ .A = 42 };
- try expect(x == .A);
- try expect(x != .B);
- try expect(x != .C);
- try expect((x == .B) == false);
- try expect((x == .C) == false);
- try expect((x != .A) == false);
-}
-
-test "comparison between union and enum literal" {
- try testComparison();
- comptime try testComparison();
-}
-
-test "packed union generates correctly aligned LLVM type" {
- const U = packed union {
- f1: fn () error{TestUnexpectedResult}!void,
- f2: u32,
- };
- var foo = [_]U{
- U{ .f1 = doTest },
- U{ .f2 = 0 },
- };
- try foo[0].f1();
-}
-
-test "union with one member defaults to u0 tag type" {
- const U0 = union(enum) {
- X: u32,
- };
- comptime try expect(Tag(Tag(U0)) == u0);
-}
-
-test "union with comptime_int tag" {
- const Union = union(enum(comptime_int)) {
- X: u32,
- Y: u16,
- Z: u8,
- };
- comptime try expect(Tag(Tag(Union)) == comptime_int);
-}
-
-test "extern union doesn't trigger field check at comptime" {
- const U = extern union {
- x: u32,
- y: u8,
- };
-
- const x = U{ .x = 0x55AAAA55 };
- comptime try expect(x.y == 0x55);
-}
-
-const Foo1 = union(enum) {
- f: struct {
- x: usize,
- },
-};
-var glbl: Foo1 = undefined;
-
-test "global union with single field is correctly initialized" {
- glbl = Foo1{
- .f = @typeInfo(Foo1).Union.fields[0].field_type{ .x = 123 },
- };
- try expect(glbl.f.x == 123);
-}
-
-pub const FooUnion = union(enum) {
- U0: usize,
- U1: u8,
-};
-
-var glbl_array: [2]FooUnion = undefined;
-
-test "initialize global array of union" {
- glbl_array[1] = FooUnion{ .U1 = 2 };
- glbl_array[0] = FooUnion{ .U0 = 1 };
- try expect(glbl_array[0].U0 == 1);
- try expect(glbl_array[1].U1 == 2);
-}
-
-test "anonymous union literal syntax" {
- const S = struct {
- const Number = union {
- int: i32,
- float: f64,
- };
-
- fn doTheTest() !void {
- var i: Number = .{ .int = 42 };
- var f = makeNumber();
- try expect(i.int == 42);
- try expect(f.float == 12.34);
- }
-
- fn makeNumber() Number {
- return .{ .float = 12.34 };
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "update the tag value for zero-sized unions" {
- const S = union(enum) {
- U0: void,
- U1: void,
- };
- var x = S{ .U0 = {} };
- try expect(x == .U0);
- x = S{ .U1 = {} };
- try expect(x == .U1);
-}
-
-test "function call result coerces from tagged union to the tag" {
- const S = struct {
- const Arch = union(enum) {
- One,
- Two: usize,
- };
-
- const ArchTag = Tag(Arch);
-
- fn doTheTest() !void {
- var x: ArchTag = getArch1();
- try expect(x == .One);
-
- var y: ArchTag = getArch2();
- try expect(y == .Two);
- }
-
- pub fn getArch1() Arch {
- return .One;
- }
-
- pub fn getArch2() Arch {
- return .{ .Two = 99 };
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "0-sized extern union definition" {
- const U = extern union {
- a: void,
- const f = 1;
- };
-
- try expect(U.f == 1);
-}
-
-test "union initializer generates padding only if needed" {
- const U = union(enum) {
- A: u24,
- };
-
- var v = U{ .A = 532 };
- try expect(v.A == 532);
-}
-
-test "runtime tag name with single field" {
- const U = union(enum) {
- A: i32,
- };
-
- var v = U{ .A = 42 };
- try expect(std.mem.eql(u8, @tagName(v), "A"));
-}
-
-test "cast from anonymous struct to union" {
- const S = struct {
- const U = union(enum) {
- A: u32,
- B: []const u8,
- C: void,
- };
- fn doTheTest() !void {
- var y: u32 = 42;
- const t0 = .{ .A = 123 };
- const t1 = .{ .B = "foo" };
- const t2 = .{ .C = {} };
- const t3 = .{ .A = y };
- const x0: U = t0;
- var x1: U = t1;
- const x2: U = t2;
- var x3: U = t3;
- try expect(x0.A == 123);
- try expect(std.mem.eql(u8, x1.B, "foo"));
- try expect(x2 == .C);
- try expect(x3.A == y);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "cast from pointer to anonymous struct to pointer to union" {
- const S = struct {
- const U = union(enum) {
- A: u32,
- B: []const u8,
- C: void,
- };
- fn doTheTest() !void {
- var y: u32 = 42;
- const t0 = &.{ .A = 123 };
- const t1 = &.{ .B = "foo" };
- const t2 = &.{ .C = {} };
- const t3 = &.{ .A = y };
- const x0: *const U = t0;
- var x1: *const U = t1;
- const x2: *const U = t2;
- var x3: *const U = t3;
- try expect(x0.A == 123);
- try expect(std.mem.eql(u8, x1.B, "foo"));
- try expect(x2.* == .C);
- try expect(x3.A == y);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "method call on an empty union" {
- const S = struct {
- const MyUnion = union(MyUnionTag) {
- pub const MyUnionTag = enum { X1, X2 };
- X1: [0]u8,
- X2: [0]u8,
-
- pub fn useIt(self: *@This()) bool {
- _ = self;
- return true;
- }
- };
-
- fn doTheTest() !void {
- var u = MyUnion{ .X1 = [0]u8{} };
- try expect(u.useIt());
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "switching on non exhaustive union" {
- const S = struct {
- const E = enum(u8) {
- a,
- b,
- _,
- };
- const U = union(E) {
- a: i32,
- b: u32,
- };
- fn doTheTest() !void {
- var a = U{ .a = 2 };
- switch (a) {
- .a => |val| try expect(val == 2),
- .b => unreachable,
- }
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "containers with single-field enums" {
- const S = struct {
- const A = union(enum) { f1 };
- const B = union(enum) { f1: void };
- const C = struct { a: A };
- const D = struct { a: B };
-
- fn doTheTest() !void {
- var array1 = [1]A{A{ .f1 = {} }};
- var array2 = [1]B{B{ .f1 = {} }};
- try expect(array1[0] == .f1);
- try expect(array2[0] == .f1);
-
- var struct1 = C{ .a = A{ .f1 = {} } };
- var struct2 = D{ .a = B{ .f1 = {} } };
- try expect(struct1.a == .f1);
- try expect(struct2.a == .f1);
- }
- };
-
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "@unionInit on union w/ tag but no fields" {
- const S = struct {
- const Type = enum(u8) { no_op = 105 };
-
- const Data = union(Type) {
- no_op: void,
-
- pub fn decode(buf: []const u8) Data {
- _ = buf;
- return @unionInit(Data, "no_op", {});
- }
- };
-
- comptime {
- std.debug.assert(@sizeOf(Data) != 0);
- }
-
- fn doTheTest() !void {
- var data: Data = .{ .no_op = .{} };
- _ = data;
- var o = Data.decode(&[_]u8{});
- try expectEqual(Type.no_op, o);
- }
- };
-
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "union enum type gets a separate scope" {
- const S = struct {
- const U = union(enum) {
- a: u8,
- const foo = 1;
- };
-
- fn doTheTest() !void {
- try expect(!@hasDecl(Tag(U), "foo"));
- }
- };
-
- try S.doTheTest();
-}
-test "anytype union field: issue #9233" {
- const Baz = union(enum) { bar: anytype };
- _ = Baz;
-}
diff --git a/test/behavior/union_stage1.zig b/test/behavior/union_stage1.zig
new file mode 100644
index 0000000000..725d7bd028
--- /dev/null
+++ b/test/behavior/union_stage1.zig
@@ -0,0 +1,775 @@
+const std = @import("std");
+const expect = std.testing.expect;
+const expectEqual = std.testing.expectEqual;
+const Tag = std.meta.Tag;
+
+const Value = union(enum) {
+ Int: u64,
+ Array: [9]u8,
+};
+
+const Agg = struct {
+ val1: Value,
+ val2: Value,
+};
+
+const v1 = Value{ .Int = 1234 };
+const v2 = Value{ .Array = [_]u8{3} ** 9 };
+
+const err = @as(anyerror!Agg, Agg{
+ .val1 = v1,
+ .val2 = v2,
+});
+
+const array = [_]Value{ v1, v2, v1, v2 };
+
+test "unions embedded in aggregate types" {
+ switch (array[1]) {
+ Value.Array => |arr| try expect(arr[4] == 3),
+ else => unreachable,
+ }
+ switch ((err catch unreachable).val1) {
+ Value.Int => |x| try expect(x == 1234),
+ else => unreachable,
+ }
+}
+
+const Foo = union {
+ float: f64,
+ int: i32,
+};
+
+test "comptime union field access" {
+ comptime {
+ var foo = Foo{ .int = 0 };
+ try expect(foo.int == 0);
+
+ foo = Foo{ .float = 42.42 };
+ try expect(foo.float == 42.42);
+ }
+}
+
+const FooExtern = extern union {
+ float: f64,
+ int: i32,
+};
+
+test "basic extern unions" {
+ var foo = FooExtern{ .int = 1 };
+ try expect(foo.int == 1);
+ foo.float = 12.34;
+ try expect(foo.float == 12.34);
+}
+
+const Letter = enum { A, B, C };
+const Payload = union(Letter) {
+ A: i32,
+ B: f64,
+ C: bool,
+};
+
+test "union with specified enum tag" {
+ try doTest();
+ comptime try doTest();
+}
+
+fn doTest() error{TestUnexpectedResult}!void {
+ try expect((try bar(Payload{ .A = 1234 })) == -10);
+}
+
+fn bar(value: Payload) error{TestUnexpectedResult}!i32 {
+ try expect(@as(Letter, value) == Letter.A);
+ return switch (value) {
+ Payload.A => |x| return x - 1244,
+ Payload.B => |x| if (x == 12.34) @as(i32, 20) else 21,
+ Payload.C => |x| if (x) @as(i32, 30) else 31,
+ };
+}
+
+const MultipleChoice = union(enum(u32)) {
+ A = 20,
+ B = 40,
+ C = 60,
+ D = 1000,
+};
+test "simple union(enum(u32))" {
+ var x = MultipleChoice.C;
+ try expect(x == MultipleChoice.C);
+ try expect(@enumToInt(@as(Tag(MultipleChoice), x)) == 60);
+}
+
+const MultipleChoice2 = union(enum(u32)) {
+ Unspecified1: i32,
+ A: f32 = 20,
+ Unspecified2: void,
+ B: bool = 40,
+ Unspecified3: i32,
+ C: i8 = 60,
+ Unspecified4: void,
+ D: void = 1000,
+ Unspecified5: i32,
+};
+
+test "union(enum(u32)) with specified and unspecified tag values" {
+ comptime try expect(Tag(Tag(MultipleChoice2)) == u32);
+ try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 });
+ comptime try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 });
+}
+
+fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) !void {
+ try expect(@enumToInt(@as(Tag(MultipleChoice2), x)) == 60);
+ try expect(1123 == switch (x) {
+ MultipleChoice2.A => 1,
+ MultipleChoice2.B => 2,
+ MultipleChoice2.C => |v| @as(i32, 1000) + v,
+ MultipleChoice2.D => 4,
+ MultipleChoice2.Unspecified1 => 5,
+ MultipleChoice2.Unspecified2 => 6,
+ MultipleChoice2.Unspecified3 => 7,
+ MultipleChoice2.Unspecified4 => 8,
+ MultipleChoice2.Unspecified5 => 9,
+ });
+}
+
+const ExternPtrOrInt = extern union {
+ ptr: *u8,
+ int: u64,
+};
+test "extern union size" {
+ comptime try expect(@sizeOf(ExternPtrOrInt) == 8);
+}
+
+const PackedPtrOrInt = packed union {
+ ptr: *u8,
+ int: u64,
+};
+test "extern union size" {
+ comptime try expect(@sizeOf(PackedPtrOrInt) == 8);
+}
+
+const ZeroBits = union {
+ OnlyField: void,
+};
+test "union with only 1 field which is void should be zero bits" {
+ comptime try expect(@sizeOf(ZeroBits) == 0);
+}
+
+const TheTag = enum { A, B, C };
+const TheUnion = union(TheTag) {
+ A: i32,
+ B: i32,
+ C: i32,
+};
+test "union field access gives the enum values" {
+ try expect(TheUnion.A == TheTag.A);
+ try expect(TheUnion.B == TheTag.B);
+ try expect(TheUnion.C == TheTag.C);
+}
+
+test "cast union to tag type of union" {
+ try testCastUnionToTag();
+ comptime try testCastUnionToTag();
+}
+
+fn testCastUnionToTag() !void {
+ var u = TheUnion{ .B = 1234 };
+ try expect(@as(TheTag, u) == TheTag.B);
+}
+
+test "cast tag type of union to union" {
+ var x: Value2 = Letter2.B;
+ try expect(@as(Letter2, x) == Letter2.B);
+}
+const Letter2 = enum { A, B, C };
+const Value2 = union(Letter2) {
+ A: i32,
+ B,
+ C,
+};
+
+test "implicit cast union to its tag type" {
+ var x: Value2 = Letter2.B;
+ try expect(x == Letter2.B);
+ try giveMeLetterB(x);
+}
+fn giveMeLetterB(x: Letter2) !void {
+ try expect(x == Value2.B);
+}
+
+// TODO it looks like this test intended to test packed unions, but this is not a packed
+// union. go through git history and find out what happened.
+pub const PackThis = union(enum) {
+ Invalid: bool,
+ StringLiteral: u2,
+};
+
+test "constant packed union" {
+ try testConstPackedUnion(&[_]PackThis{PackThis{ .StringLiteral = 1 }});
+}
+
+fn testConstPackedUnion(expected_tokens: []const PackThis) !void {
+ try expect(expected_tokens[0].StringLiteral == 1);
+}
+
+test "switch on union with only 1 field" {
+ var r: PartialInst = undefined;
+ r = PartialInst.Compiled;
+ switch (r) {
+ PartialInst.Compiled => {
+ var z: PartialInstWithPayload = undefined;
+ z = PartialInstWithPayload{ .Compiled = 1234 };
+ switch (z) {
+ PartialInstWithPayload.Compiled => |x| {
+ try expect(x == 1234);
+ return;
+ },
+ }
+ },
+ }
+ unreachable;
+}
+
+const PartialInst = union(enum) {
+ Compiled,
+};
+
+const PartialInstWithPayload = union(enum) {
+ Compiled: i32,
+};
+
+test "access a member of tagged union with conflicting enum tag name" {
+ const Bar = union(enum) {
+ A: A,
+ B: B,
+
+ const A = u8;
+ const B = void;
+ };
+
+ comptime try expect(Bar.A == u8);
+}
+
+test "tagged union initialization with runtime void" {
+ try expect(testTaggedUnionInit({}));
+}
+
+const TaggedUnionWithAVoid = union(enum) {
+ A,
+ B: i32,
+};
+
+fn testTaggedUnionInit(x: anytype) bool {
+ const y = TaggedUnionWithAVoid{ .A = x };
+ return @as(Tag(TaggedUnionWithAVoid), y) == TaggedUnionWithAVoid.A;
+}
+
+pub const UnionEnumNoPayloads = union(enum) { A, B };
+
+test "tagged union with no payloads" {
+ const a = UnionEnumNoPayloads{ .B = {} };
+ switch (a) {
+ Tag(UnionEnumNoPayloads).A => @panic("wrong"),
+ Tag(UnionEnumNoPayloads).B => {},
+ }
+}
+
+test "union with only 1 field casted to its enum type" {
+ const Literal = union(enum) {
+ Number: f64,
+ Bool: bool,
+ };
+
+ const Expr = union(enum) {
+ Literal: Literal,
+ };
+
+ var e = Expr{ .Literal = Literal{ .Bool = true } };
+ const ExprTag = Tag(Expr);
+ comptime try expect(Tag(ExprTag) == u0);
+ var t = @as(ExprTag, e);
+ try expect(t == Expr.Literal);
+}
+
+test "union with only 1 field casted to its enum type which has enum value specified" {
+ const Literal = union(enum) {
+ Number: f64,
+ Bool: bool,
+ };
+
+ const ExprTag = enum(comptime_int) {
+ Literal = 33,
+ };
+
+ const Expr = union(ExprTag) {
+ Literal: Literal,
+ };
+
+ var e = Expr{ .Literal = Literal{ .Bool = true } };
+ comptime try expect(Tag(ExprTag) == comptime_int);
+ var t = @as(ExprTag, e);
+ try expect(t == Expr.Literal);
+ try expect(@enumToInt(t) == 33);
+ comptime try expect(@enumToInt(t) == 33);
+}
+
+test "@enumToInt works on unions" {
+ const Bar = union(enum) {
+ A: bool,
+ B: u8,
+ C,
+ };
+
+ const a = Bar{ .A = true };
+ var b = Bar{ .B = undefined };
+ var c = Bar.C;
+ try expect(@enumToInt(a) == 0);
+ try expect(@enumToInt(b) == 1);
+ try expect(@enumToInt(c) == 2);
+}
+
+const Attribute = union(enum) {
+ A: bool,
+ B: u8,
+};
+
+fn setAttribute(attr: Attribute) void {
+ _ = attr;
+}
+
+fn Setter(attr: Attribute) type {
+ return struct {
+ fn set() void {
+ setAttribute(attr);
+ }
+ };
+}
+
+test "comptime union field value equality" {
+ const a0 = Setter(Attribute{ .A = false });
+ const a1 = Setter(Attribute{ .A = true });
+ const a2 = Setter(Attribute{ .A = false });
+
+ const b0 = Setter(Attribute{ .B = 5 });
+ const b1 = Setter(Attribute{ .B = 9 });
+ const b2 = Setter(Attribute{ .B = 5 });
+
+ try expect(a0 == a0);
+ try expect(a1 == a1);
+ try expect(a0 == a2);
+
+ try expect(b0 == b0);
+ try expect(b1 == b1);
+ try expect(b0 == b2);
+
+ try expect(a0 != b0);
+ try expect(a0 != a1);
+ try expect(b0 != b1);
+}
+
+test "return union init with void payload" {
+ const S = struct {
+ fn entry() !void {
+ try expect(func().state == State.one);
+ }
+ const Outer = union(enum) {
+ state: State,
+ };
+ const State = union(enum) {
+ one: void,
+ two: u32,
+ };
+ fn func() Outer {
+ return Outer{ .state = State{ .one = {} } };
+ }
+ };
+ try S.entry();
+ comptime try S.entry();
+}
+
+test "@unionInit can modify a union type" {
+ const UnionInitEnum = union(enum) {
+ Boolean: bool,
+ Byte: u8,
+ };
+
+ var value: UnionInitEnum = undefined;
+
+ value = @unionInit(UnionInitEnum, "Boolean", true);
+ try expect(value.Boolean == true);
+ value.Boolean = false;
+ try expect(value.Boolean == false);
+
+ value = @unionInit(UnionInitEnum, "Byte", 2);
+ try expect(value.Byte == 2);
+ value.Byte = 3;
+ try expect(value.Byte == 3);
+}
+
+test "@unionInit can modify a pointer value" {
+ const UnionInitEnum = union(enum) {
+ Boolean: bool,
+ Byte: u8,
+ };
+
+ var value: UnionInitEnum = undefined;
+ var value_ptr = &value;
+
+ value_ptr.* = @unionInit(UnionInitEnum, "Boolean", true);
+ try expect(value.Boolean == true);
+
+ value_ptr.* = @unionInit(UnionInitEnum, "Byte", 2);
+ try expect(value.Byte == 2);
+}
+
+test "union no tag with struct member" {
+ const Struct = struct {};
+ const Union = union {
+ s: Struct,
+ pub fn foo(self: *@This()) void {
+ _ = self;
+ }
+ };
+ var u = Union{ .s = Struct{} };
+ u.foo();
+}
+
+fn testComparison() !void {
+ var x = Payload{ .A = 42 };
+ try expect(x == .A);
+ try expect(x != .B);
+ try expect(x != .C);
+ try expect((x == .B) == false);
+ try expect((x == .C) == false);
+ try expect((x != .A) == false);
+}
+
+test "comparison between union and enum literal" {
+ try testComparison();
+ comptime try testComparison();
+}
+
+test "packed union generates correctly aligned LLVM type" {
+ const U = packed union {
+ f1: fn () error{TestUnexpectedResult}!void,
+ f2: u32,
+ };
+ var foo = [_]U{
+ U{ .f1 = doTest },
+ U{ .f2 = 0 },
+ };
+ try foo[0].f1();
+}
+
+test "union with one member defaults to u0 tag type" {
+ const U0 = union(enum) {
+ X: u32,
+ };
+ comptime try expect(Tag(Tag(U0)) == u0);
+}
+
+test "union with comptime_int tag" {
+ const Union = union(enum(comptime_int)) {
+ X: u32,
+ Y: u16,
+ Z: u8,
+ };
+ comptime try expect(Tag(Tag(Union)) == comptime_int);
+}
+
+test "extern union doesn't trigger field check at comptime" {
+ const U = extern union {
+ x: u32,
+ y: u8,
+ };
+
+ const x = U{ .x = 0x55AAAA55 };
+ comptime try expect(x.y == 0x55);
+}
+
+const Foo1 = union(enum) {
+ f: struct {
+ x: usize,
+ },
+};
+var glbl: Foo1 = undefined;
+
+test "global union with single field is correctly initialized" {
+ glbl = Foo1{
+ .f = @typeInfo(Foo1).Union.fields[0].field_type{ .x = 123 },
+ };
+ try expect(glbl.f.x == 123);
+}
+
+pub const FooUnion = union(enum) {
+ U0: usize,
+ U1: u8,
+};
+
+var glbl_array: [2]FooUnion = undefined;
+
+test "initialize global array of union" {
+ glbl_array[1] = FooUnion{ .U1 = 2 };
+ glbl_array[0] = FooUnion{ .U0 = 1 };
+ try expect(glbl_array[0].U0 == 1);
+ try expect(glbl_array[1].U1 == 2);
+}
+
+test "anonymous union literal syntax" {
+ const S = struct {
+ const Number = union {
+ int: i32,
+ float: f64,
+ };
+
+ fn doTheTest() !void {
+ var i: Number = .{ .int = 42 };
+ var f = makeNumber();
+ try expect(i.int == 42);
+ try expect(f.float == 12.34);
+ }
+
+ fn makeNumber() Number {
+ return .{ .float = 12.34 };
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "update the tag value for zero-sized unions" {
+ const S = union(enum) {
+ U0: void,
+ U1: void,
+ };
+ var x = S{ .U0 = {} };
+ try expect(x == .U0);
+ x = S{ .U1 = {} };
+ try expect(x == .U1);
+}
+
+test "function call result coerces from tagged union to the tag" {
+ const S = struct {
+ const Arch = union(enum) {
+ One,
+ Two: usize,
+ };
+
+ const ArchTag = Tag(Arch);
+
+ fn doTheTest() !void {
+ var x: ArchTag = getArch1();
+ try expect(x == .One);
+
+ var y: ArchTag = getArch2();
+ try expect(y == .Two);
+ }
+
+ pub fn getArch1() Arch {
+ return .One;
+ }
+
+ pub fn getArch2() Arch {
+ return .{ .Two = 99 };
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "0-sized extern union definition" {
+ const U = extern union {
+ a: void,
+ const f = 1;
+ };
+
+ try expect(U.f == 1);
+}
+
+test "union initializer generates padding only if needed" {
+ const U = union(enum) {
+ A: u24,
+ };
+
+ var v = U{ .A = 532 };
+ try expect(v.A == 532);
+}
+
+test "runtime tag name with single field" {
+ const U = union(enum) {
+ A: i32,
+ };
+
+ var v = U{ .A = 42 };
+ try expect(std.mem.eql(u8, @tagName(v), "A"));
+}
+
+test "cast from anonymous struct to union" {
+ const S = struct {
+ const U = union(enum) {
+ A: u32,
+ B: []const u8,
+ C: void,
+ };
+ fn doTheTest() !void {
+ var y: u32 = 42;
+ const t0 = .{ .A = 123 };
+ const t1 = .{ .B = "foo" };
+ const t2 = .{ .C = {} };
+ const t3 = .{ .A = y };
+ const x0: U = t0;
+ var x1: U = t1;
+ const x2: U = t2;
+ var x3: U = t3;
+ try expect(x0.A == 123);
+ try expect(std.mem.eql(u8, x1.B, "foo"));
+ try expect(x2 == .C);
+ try expect(x3.A == y);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "cast from pointer to anonymous struct to pointer to union" {
+ const S = struct {
+ const U = union(enum) {
+ A: u32,
+ B: []const u8,
+ C: void,
+ };
+ fn doTheTest() !void {
+ var y: u32 = 42;
+ const t0 = &.{ .A = 123 };
+ const t1 = &.{ .B = "foo" };
+ const t2 = &.{ .C = {} };
+ const t3 = &.{ .A = y };
+ const x0: *const U = t0;
+ var x1: *const U = t1;
+ const x2: *const U = t2;
+ var x3: *const U = t3;
+ try expect(x0.A == 123);
+ try expect(std.mem.eql(u8, x1.B, "foo"));
+ try expect(x2.* == .C);
+ try expect(x3.A == y);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "method call on an empty union" {
+ const S = struct {
+ const MyUnion = union(MyUnionTag) {
+ pub const MyUnionTag = enum { X1, X2 };
+ X1: [0]u8,
+ X2: [0]u8,
+
+ pub fn useIt(self: *@This()) bool {
+ _ = self;
+ return true;
+ }
+ };
+
+ fn doTheTest() !void {
+ var u = MyUnion{ .X1 = [0]u8{} };
+ try expect(u.useIt());
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "switching on non exhaustive union" {
+ const S = struct {
+ const E = enum(u8) {
+ a,
+ b,
+ _,
+ };
+ const U = union(E) {
+ a: i32,
+ b: u32,
+ };
+ fn doTheTest() !void {
+ var a = U{ .a = 2 };
+ switch (a) {
+ .a => |val| try expect(val == 2),
+ .b => unreachable,
+ }
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "containers with single-field enums" {
+ const S = struct {
+ const A = union(enum) { f1 };
+ const B = union(enum) { f1: void };
+ const C = struct { a: A };
+ const D = struct { a: B };
+
+ fn doTheTest() !void {
+ var array1 = [1]A{A{ .f1 = {} }};
+ var array2 = [1]B{B{ .f1 = {} }};
+ try expect(array1[0] == .f1);
+ try expect(array2[0] == .f1);
+
+ var struct1 = C{ .a = A{ .f1 = {} } };
+ var struct2 = D{ .a = B{ .f1 = {} } };
+ try expect(struct1.a == .f1);
+ try expect(struct2.a == .f1);
+ }
+ };
+
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "@unionInit on union w/ tag but no fields" {
+ const S = struct {
+ const Type = enum(u8) { no_op = 105 };
+
+ const Data = union(Type) {
+ no_op: void,
+
+ pub fn decode(buf: []const u8) Data {
+ _ = buf;
+ return @unionInit(Data, "no_op", {});
+ }
+ };
+
+ comptime {
+ std.debug.assert(@sizeOf(Data) != 0);
+ }
+
+ fn doTheTest() !void {
+ var data: Data = .{ .no_op = .{} };
+ _ = data;
+ var o = Data.decode(&[_]u8{});
+ try expectEqual(Type.no_op, o);
+ }
+ };
+
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "union enum type gets a separate scope" {
+ const S = struct {
+ const U = union(enum) {
+ a: u8,
+ const foo = 1;
+ };
+
+ fn doTheTest() !void {
+ try expect(!@hasDecl(Tag(U), "foo"));
+ }
+ };
+
+ try S.doTheTest();
+}
+test "anytype union field: issue #9233" {
+ const Baz = union(enum) { bar: anytype };
+ _ = Baz;
+}
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 2c615b542b..b673542f8b 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -116,7 +116,7 @@ test "array to vector" {
_ = vec;
}
-test "vector casts of sizes not divisable by 8" {
+test "vector casts of sizes not divisible by 8" {
const S = struct {
fn doTheTest() !void {
{
diff --git a/test/behavior/widening.zig b/test/behavior/widening.zig
index 19abf767b8..9c1694b368 100644
--- a/test/behavior/widening.zig
+++ b/test/behavior/widening.zig
@@ -19,6 +19,12 @@ test "implicit unsigned integer to signed integer" {
}
test "float widening" {
+ if (@import("builtin").zig_is_stage2) {
+ // This test is passing but it depends on compiler-rt symbols, which
+ // cannot yet be built with stage2 due to
+ // "TODO implement equality comparison between a union's tag value and an enum literal"
+ return error.SkipZigTest;
+ }
var a: f16 = 12.34;
var b: f32 = a;
var c: f64 = b;
@@ -29,9 +35,15 @@ test "float widening" {
}
test "float widening f16 to f128" {
+ if (@import("builtin").zig_is_stage2) {
+ // This test is passing but it depends on compiler-rt symbols, which
+ // cannot yet be built with stage2 due to
+ // "TODO implement equality comparison between a union's tag value and an enum literal"
+ return error.SkipZigTest;
+ }
// TODO https://github.com/ziglang/zig/issues/3282
- if (@import("builtin").target.cpu.arch == .aarch64) return error.SkipZigTest;
- if (@import("builtin").target.cpu.arch == .powerpc64le) return error.SkipZigTest;
+ if (@import("builtin").stage2_arch == .aarch64) return error.SkipZigTest;
+ if (@import("builtin").stage2_arch == .powerpc64le) return error.SkipZigTest;
var x: f16 = 12.34;
var y: f128 = x;
diff --git a/test/cases.zig b/test/cases.zig
index 64fe39e07b..3a8389f7d4 100644
--- a/test/cases.zig
+++ b/test/cases.zig
@@ -26,7 +26,7 @@ pub fn addCases(ctx: *TestContext) !void {
var case = ctx.exe("hello world with updates", linux_x64);
case.addError("", &[_][]const u8{
- ":90:9: error: struct 'tmp.tmp' has no member named 'main'",
+ ":97:9: error: struct 'tmp.tmp' has no member named 'main'",
});
// Incorrect return type
@@ -1807,4 +1807,16 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
, "");
}
+
+ {
+ var case = ctx.exe("setting an address space on a local variable", linux_x64);
+ case.addError(
+ \\export fn entry() i32 {
+ \\ var foo: i32 addrspace(".general") = 1234;
+ \\ return foo;
+ \\}
+ , &[_][]const u8{
+ ":2:28: error: cannot set address space of local variable 'foo'",
+ });
+ }
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 9fd125a775..4acd563da9 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -683,7 +683,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ _ = u;
\\}
, &[_][]const u8{
- "tmp.zig:12:16: error: runtime cast to union 'U' from non-exhustive enum",
+ "tmp.zig:12:16: error: runtime cast to union 'U' from non-exhaustive enum",
"tmp.zig:17:16: error: no tag by value 15",
});
@@ -711,6 +711,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ .is_const = false,
\\ .is_volatile = false,
\\ .alignment = 1,
+ \\ .address_space = .generic,
\\ .child = u8,
\\ .is_allowzero = false,
\\ .sentinel = 0,
@@ -720,6 +721,23 @@ pub fn addCases(ctx: *TestContext) !void {
"tmp.zig:2:16: error: sentinels are only allowed on slices and unknown-length pointers",
});
+ ctx.objErrStage1("@Type(.Pointer) with invalid address space ",
+ \\export fn entry() void {
+ \\ _ = @Type(.{ .Pointer = .{
+ \\ .size = .One,
+ \\ .is_const = false,
+ \\ .is_volatile = false,
+ \\ .alignment = 1,
+ \\ .address_space = .gs,
+ \\ .child = u8,
+ \\ .is_allowzero = false,
+ \\ .sentinel = null,
+ \\ }});
+ \\}
+ , &[_][]const u8{
+ "tmp.zig:2:16: error: address space 'gs' not available in stage 1 compiler, must be .generic",
+ });
+
ctx.testErrStage1("helpful return type error message",
\\export fn foo() u32 {
\\ return error.Ohno;
@@ -6127,9 +6145,9 @@ pub fn addCases(ctx: *TestContext) !void {
});
ctx.objErrStage1("endless loop in function evaluation",
- \\const seventh_fib_number = fibbonaci(7);
- \\fn fibbonaci(x: i32) i32 {
- \\ return fibbonaci(x - 1) + fibbonaci(x - 2);
+ \\const seventh_fib_number = fibonacci(7);
+ \\fn fibonacci(x: i32) i32 {
+ \\ return fibonacci(x - 1) + fibonacci(x - 2);
\\}
\\
\\export fn entry() usize { return @sizeOf(@TypeOf(seventh_fib_number)); }
@@ -6757,7 +6775,7 @@ pub fn addCases(ctx: *TestContext) !void {
"tmp.zig:2:5: error: expression value is ignored",
});
- ctx.objErrStage1("ignored defered statement value",
+ ctx.objErrStage1("ignored deferred statement value",
\\export fn foo() void {
\\ defer {1;}
\\}
@@ -6765,7 +6783,7 @@ pub fn addCases(ctx: *TestContext) !void {
"tmp.zig:2:12: error: expression value is ignored",
});
- ctx.objErrStage1("ignored defered function call",
+ ctx.objErrStage1("ignored deferred function call",
\\export fn foo() void {
\\ defer bar();
\\}
@@ -8841,9 +8859,9 @@ pub fn addCases(ctx: *TestContext) !void {
"tmp.zig:3:12: note: crosses namespace boundary here",
});
- ctx.objErrStage1("Issue #9619: saturating arithmetic builtins should fail to compile when given floats",
+ ctx.objErrStage1("saturating arithmetic does not allow floats",
\\pub fn main() !void {
- \\ _ = @addWithSaturation(@as(f32, 1.0), @as(f32, 1.0));
+ \\ _ = @as(f32, 1.0) +| @as(f32, 1.0);
\\}
, &[_][]const u8{
"error: invalid operands to binary expression: 'f32' and 'f32'",
diff --git a/test/run_translated_c.zig b/test/run_translated_c.zig
index 28ba7aa704..c222a00eb7 100644
--- a/test/run_translated_c.zig
+++ b/test/run_translated_c.zig
@@ -24,7 +24,7 @@ pub fn addCases(cases: *tests.RunTranslatedCContext) void {
\\}
, "DEG2RAD is: 0.017453" ++ nl);
- cases.add("use global scope for record/enum/typedef type transalation if needed",
+ cases.add("use global scope for record/enum/typedef type translation if needed",
\\void bar(void);
\\void baz(void);
\\struct foo { int x; };
@@ -394,7 +394,7 @@ pub fn addCases(cases: *tests.RunTranslatedCContext) void {
\\}
, "");
- cases.add("ensure array casts outisde +=",
+ cases.add("ensure array casts outside +=",
\\#include
\\static int hash_binary(int k)
\\{
diff --git a/test/stage2/darwin.zig b/test/stage2/darwin.zig
index 86c2d313a0..84334828bd 100644
--- a/test/stage2/darwin.zig
+++ b/test/stage2/darwin.zig
@@ -12,9 +12,9 @@ pub fn addCases(ctx: *TestContext) !void {
.os_tag = .macos,
};
{
- var case = ctx.exe("hello world with updates", target);
+ var case = ctx.exe("darwin hello world with updates", target);
case.addError("", &[_][]const u8{
- ":90:9: error: struct 'tmp.tmp' has no member named 'main'",
+ ":97:9: error: struct 'tmp.tmp' has no member named 'main'",
});
// Incorrect return type
@@ -118,7 +118,7 @@ pub fn addCases(ctx: *TestContext) !void {
{
var case = ctx.exe("corner case - update existing, singular TextBlock", target);
- // This test case also covers an infrequent scenarion where the string table *may* be relocated
+ // This test case also covers an infrequent scenario where the string table *may* be relocated
// into the position preceeding the symbol table which results in a dyld error.
case.addCompareOutput(
\\extern fn exit(usize) noreturn;
diff --git a/test/stage2/llvm.zig b/test/stage2/llvm.zig
index 34f73b01c7..820768efe3 100644
--- a/test/stage2/llvm.zig
+++ b/test/stage2/llvm.zig
@@ -242,4 +242,184 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
, "");
}
+
+ {
+ var case = ctx.exeUsingLlvmBackend("invalid address space coercion", linux_x64);
+ case.addError(
+ \\fn entry(a: *addrspace(.gs) i32) *i32 {
+ \\ return a;
+ \\}
+ \\pub export fn main() void { _ = entry; }
+ , &[_][]const u8{
+ ":2:12: error: expected *i32, found *addrspace(.gs) i32",
+ });
+ }
+
+ {
+ var case = ctx.exeUsingLlvmBackend("pointer keeps address space", linux_x64);
+ case.compiles(
+ \\fn entry(a: *addrspace(.gs) i32) *addrspace(.gs) i32 {
+ \\ return a;
+ \\}
+ \\pub export fn main() void { _ = entry; }
+ );
+ }
+
+ {
+ var case = ctx.exeUsingLlvmBackend("pointer to explicit generic address space coerces to implicit pointer", linux_x64);
+ case.compiles(
+ \\fn entry(a: *addrspace(.generic) i32) *i32 {
+ \\ return a;
+ \\}
+ \\pub export fn main() void { _ = entry; }
+ );
+ }
+
+ {
+ var case = ctx.exeUsingLlvmBackend("pointers with different address spaces", linux_x64);
+ case.addError(
+ \\fn entry(a: *addrspace(.gs) i32) *addrspace(.fs) i32 {
+ \\ return a;
+ \\}
+ \\pub export fn main() void { _ = entry; }
+ , &[_][]const u8{
+ ":2:12: error: expected *addrspace(.fs) i32, found *addrspace(.gs) i32",
+ });
+ }
+
+ {
+ var case = ctx.exeUsingLlvmBackend("pointers with different address spaces", linux_x64);
+ case.addError(
+ \\fn entry(a: ?*addrspace(.gs) i32) *i32 {
+ \\ return a.?;
+ \\}
+ \\pub export fn main() void { _ = entry; }
+ , &[_][]const u8{
+ ":2:13: error: expected *i32, found *addrspace(.gs) i32",
+ });
+ }
+
+ {
+ var case = ctx.exeUsingLlvmBackend("invalid pointer keeps address space when taking address of dereference", linux_x64);
+ case.addError(
+ \\fn entry(a: *addrspace(.gs) i32) *i32 {
+ \\ return &a.*;
+ \\}
+ \\pub export fn main() void { _ = entry; }
+ , &[_][]const u8{
+ ":2:12: error: expected *i32, found *addrspace(.gs) i32",
+ });
+ }
+
+ {
+ var case = ctx.exeUsingLlvmBackend("pointer keeps address space when taking address of dereference", linux_x64);
+ case.compiles(
+ \\fn entry(a: *addrspace(.gs) i32) *addrspace(.gs) i32 {
+ \\ return &a.*;
+ \\}
+ \\pub export fn main() void { _ = entry; }
+ );
+ }
+
+ {
+ var case = ctx.exeUsingLlvmBackend("address spaces pointer access chaining: array pointer", linux_x64);
+ case.compiles(
+ \\fn entry(a: *addrspace(.gs) [1]i32) *addrspace(.gs) i32 {
+ \\ return &a[0];
+ \\}
+ \\pub export fn main() void { _ = entry; }
+ );
+ }
+
+ {
+ var case = ctx.exeUsingLlvmBackend("address spaces pointer access chaining: pointer to optional array", linux_x64);
+ case.compiles(
+ \\fn entry(a: *addrspace(.gs) ?[1]i32) *addrspace(.gs) i32 {
+ \\ return &a.*.?[0];
+ \\}
+ \\pub export fn main() void { _ = entry; }
+ );
+ }
+
+ {
+ var case = ctx.exeUsingLlvmBackend("address spaces pointer access chaining: struct pointer", linux_x64);
+ case.compiles(
+ \\const A = struct{ a: i32 };
+ \\fn entry(a: *addrspace(.gs) A) *addrspace(.gs) i32 {
+ \\ return &a.a;
+ \\}
+ \\pub export fn main() void { _ = entry; }
+ );
+ }
+
+ {
+ var case = ctx.exeUsingLlvmBackend("address spaces pointer access chaining: complex", linux_x64);
+ case.compiles(
+ \\const A = struct{ a: ?[1]i32 };
+ \\fn entry(a: *addrspace(.gs) [1]A) *addrspace(.gs) i32 {
+ \\ return &a[0].a.?[0];
+ \\}
+ \\pub export fn main() void { _ = entry; }
+ );
+ }
+
+ {
+ var case = ctx.exeUsingLlvmBackend("dereferencing through multiple pointers with address spaces", linux_x64);
+ case.compiles(
+ \\fn entry(a: *addrspace(.fs) *addrspace(.gs) *i32) *i32 {
+ \\ return a.*.*;
+ \\}
+ \\pub export fn main() void { _ = entry; }
+ );
+ }
+
+ {
+ var case = ctx.exeUsingLlvmBackend("f segment address space reading and writing", linux_x64);
+ case.addCompareOutput(
+ \\fn assert(ok: bool) void {
+ \\ if (!ok) unreachable;
+ \\}
+ \\
+ \\fn setFs(value: c_ulong) void {
+ \\ asm volatile (
+ \\ \\syscall
+ \\ :
+ \\ : [number] "{rax}" (158),
+ \\ [code] "{rdi}" (0x1002),
+ \\ [val] "{rsi}" (value),
+ \\ : "rcx", "r11", "memory"
+ \\ );
+ \\}
+ \\
+ \\fn getFs() c_ulong {
+ \\ var result: c_ulong = undefined;
+ \\ asm volatile (
+ \\ \\syscall
+ \\ :
+ \\ : [number] "{rax}" (158),
+ \\ [code] "{rdi}" (0x1003),
+ \\ [ptr] "{rsi}" (@ptrToInt(&result)),
+ \\ : "rcx", "r11", "memory"
+ \\ );
+ \\ return result;
+ \\}
+ \\
+ \\var test_value: u64 = 12345;
+ \\
+ \\pub export fn main() c_int {
+ \\ const orig_fs = getFs();
+ \\
+ \\ setFs(@ptrToInt(&test_value));
+ \\ assert(getFs() == @ptrToInt(&test_value));
+ \\
+ \\ var test_ptr = @intToPtr(*allowzero addrspace(.fs) u64, 0);
+ \\ assert(test_ptr.* == 12345);
+ \\ test_ptr.* = 98765;
+ \\ assert(test_value == 98765);
+ \\
+ \\ setFs(orig_fs);
+ \\ return 0;
+ \\}
+ , "");
+ }
}
diff --git a/test/standalone.zig b/test/standalone.zig
index 6c074642dd..c5e12f1dcc 100644
--- a/test/standalone.zig
+++ b/test/standalone.zig
@@ -28,6 +28,7 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
cases.addBuildFile("test/standalone/empty_env/build.zig", .{});
cases.addBuildFile("test/standalone/issue_7030/build.zig", .{});
cases.addBuildFile("test/standalone/install_raw_hex/build.zig", .{});
+ cases.addBuildFile("test/standalone/issue_9812/build.zig", .{});
if (std.Target.current.os.tag != .wasi) {
cases.addBuildFile("test/standalone/load_dynamic_library/build.zig", .{});
}
@@ -36,6 +37,9 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
}
cases.addBuildFile("test/standalone/c_compiler/build.zig", .{ .build_modes = true, .cross_targets = true });
+ if (std.Target.current.os.tag == .windows) {
+ cases.addC("test/standalone/issue_9402/main.zig");
+ }
// Try to build and run a PIE executable.
if (std.Target.current.os.tag == .linux) {
cases.addBuildFile("test/standalone/pie/build.zig", .{});
diff --git a/test/standalone/issue_9402/main.zig b/test/standalone/issue_9402/main.zig
new file mode 100644
index 0000000000..eea6bbf4b5
--- /dev/null
+++ b/test/standalone/issue_9402/main.zig
@@ -0,0 +1,14 @@
+const FILE = extern struct {
+ dummy_field: u8,
+};
+
+extern fn _ftelli64([*c]FILE) i64;
+extern fn _fseeki64([*c]FILE, i64, c_int) c_int;
+
+pub export fn main(argc: c_int, argv: **u8) c_int {
+ _ = argv;
+ _ = argc;
+ _ = _ftelli64(null);
+ _ = _fseeki64(null, 123, 2);
+ return 0;
+}
diff --git a/test/standalone/issue_9812/build.zig b/test/standalone/issue_9812/build.zig
new file mode 100644
index 0000000000..de13ada8ec
--- /dev/null
+++ b/test/standalone/issue_9812/build.zig
@@ -0,0 +1,16 @@
+const std = @import("std");
+
+pub fn build(b: *std.build.Builder) !void {
+ const mode = b.standardReleaseOptions();
+ const zip_add = b.addTest("main.zig");
+ zip_add.setBuildMode(mode);
+ zip_add.addCSourceFile("vendor/kuba-zip/zip.c", &[_][]const u8{
+ "-std=c99",
+ "-fno-sanitize=undefined",
+ });
+ zip_add.addIncludeDir("vendor/kuba-zip");
+ zip_add.linkLibC();
+
+ const test_step = b.step("test", "Test it");
+ test_step.dependOn(&zip_add.step);
+}
diff --git a/test/standalone/issue_9812/main.zig b/test/standalone/issue_9812/main.zig
new file mode 100644
index 0000000000..70899c9326
--- /dev/null
+++ b/test/standalone/issue_9812/main.zig
@@ -0,0 +1,45 @@
+const std = @import("std");
+const testing = std.testing;
+
+const c = @cImport({
+ @cInclude("zip.h");
+});
+
+const Error = error{
+ FailedToWriteEntry,
+ FileNotFound,
+ FailedToCreateEntry,
+ Overflow,
+ OutOfMemory,
+ InvalidCmdLine,
+};
+
+test "" {
+ const allocator = std.heap.c_allocator;
+
+ const args = try std.process.argsAlloc(allocator);
+ if (args.len != 4) {
+ return;
+ }
+
+ const zip_file = args[1];
+ const src_file_name = args[2];
+ const dst_file_name = args[3];
+
+ errdefer |e| switch (@as(Error, e)) {
+ error.FailedToWriteEntry => std.log.err("could not find {s}", .{src_file_name}),
+ error.FileNotFound => std.log.err("could not open {s}", .{zip_file}),
+ error.FailedToCreateEntry => std.log.err("could not create {s}", .{dst_file_name}),
+ else => {},
+ };
+
+ const zip = c.zip_open(zip_file, c.ZIP_DEFAULT_COMPRESSION_LEVEL, 'a') orelse return error.FileNotFound;
+ defer c.zip_close(zip);
+
+ if (c.zip_entry_open(zip, dst_file_name) < 0)
+ return error.FailedToCreateEntry;
+ defer _ = c.zip_entry_close(zip);
+
+ if (c.zip_entry_fwrite(zip, src_file_name) < 0)
+ return error.FailedToWriteEntry;
+}
diff --git a/test/standalone/issue_9812/vendor/kuba-zip/miniz.h b/test/standalone/issue_9812/vendor/kuba-zip/miniz.h
new file mode 100644
index 0000000000..3bbe2d937b
--- /dev/null
+++ b/test/standalone/issue_9812/vendor/kuba-zip/miniz.h
@@ -0,0 +1,6875 @@
+/*
+ miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
+ reading/writing/appending, PNG writing See "unlicense" statement at the end
+ of this file. Rich Geldreich , last updated Oct. 13,
+ 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
+ http://www.ietf.org/rfc/rfc1951.txt
+
+ Most API's defined in miniz.c are optional. For example, to disable the
+ archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of
+ all stdio usage define MINIZ_NO_STDIO (see the list below for more macros).
+
+ * Change History
+ 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
+ release with Zip64 support (almost there!):
+ - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
+ (thanks kahmyong.moon@hp.com) which could cause locate files to not find
+ files. This bug would only have occured in earlier versions if you explicitly
+ used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or
+ mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you
+ can't switch to v1.15 but want to fix this bug, just remove the uses of this
+ flag from both helper funcs (and of course don't use the flag).
+ - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
+ pUser_read_buf is not NULL and compressed size is > uncompressed size
+ - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
+ compressed data from directory entries, to account for weird zipfiles which
+ contain zero-size compressed data on dir entries. Hopefully this fix won't
+ cause any issues on weird zip archives, because it assumes the low 16-bits of
+ zip external attributes are DOS attributes (which I believe they always are
+ in practice).
+ - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
+ internal attributes, just the filename and external attributes
+ - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
+ - Added cmake support for Linux builds which builds all the examples,
+ tested with clang v3.3 and gcc v4.6.
+ - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
+ - Merged MZ_FORCEINLINE fix from hdeanclark
+ - Fix include before config #ifdef, thanks emil.brink
+ - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
+ (super useful for OpenGL apps), and explicit control over the compression
+ level (so you can set it to 1 for real-time compression).
+ - Merged in some compiler fixes from paulharris's github repro.
+ - Retested this build under Windows (VS 2010, including static analysis),
+ tcc 0.9.26, gcc v4.6 and clang v3.3.
+ - Added example6.c, which dumps an image of the mandelbrot set to a PNG
+ file.
+ - Modified example2 to help test the
+ MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
+ - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
+ possible src file fclose() leak if alignment bytes+local header file write
+ faiiled
+ - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the
+ wrong central dir header offset, appears harmless in this release, but it
+ became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1
+ compiler fixes: added MZ_FORCEINLINE, #include (thanks fermtect).
+ 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
+ mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
+ - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
+ re-ran a randomized regression test on ~500k files.
+ - Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
+ - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
+ (static analysis) option and fixed all warnings (except for the silly "Use of
+ the comma-operator in a tested expression.." analysis warning, which I
+ purposely use to work around a MSVC compiler warning).
+ - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
+ tested Linux executables. The codeblocks workspace is compatible with
+ Linux+Win32/x64.
+ - Added miniz_tester solution/project, which is a useful little app
+ derived from LZHAM's tester app that I use as part of the regression test.
+ - Ran miniz.c and tinfl.c through another series of regression testing on
+ ~500,000 files and archives.
+ - Modified example5.c so it purposely disables a bunch of high-level
+ functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
+ MINIZ_NO_STDIO bug report.)
+ - Fix ftell() usage in examples so they exit with an error on files which
+ are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12
+ - More comments, added low-level example5.c, fixed a couple minor
+ level_and_flags issues in the archive API's. level_and_flags can now be set
+ to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson
+ for the feedback/bug report. 5/28/11 v1.11 - Added statement from
+ unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations:
+ - Level 1 is now ~4x faster than before. The L1 compressor's throughput
+ now varies between 70-110MB/sec. on a
+ - Core i7 (actual throughput varies depending on the type of data, and x64
+ vs. x86).
+ - Improved baseline L2-L9 compression perf. Also, greatly improved
+ compression perf. issues on some file types.
+ - Refactored the compression code for better readability and
+ maintainability.
+ - Added level 10 compression level (L10 has slightly better ratio than
+ level 9, but could have a potentially large drop in throughput on some
+ files). 5/15/11 v1.09 - Initial stable release.
+
+ * Low-level Deflate/Inflate implementation notes:
+
+ Compression: Use the "tdefl" API's. The compressor supports raw, static,
+ and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only,
+ and Huffman-only streams. It performs and compresses approximately as well as
+ zlib.
+
+ Decompression: Use the "tinfl" API's. The entire decompressor is
+ implemented as a single function coroutine: see tinfl_decompress(). It
+ supports decompression into a 32KB (or larger power of 2) wrapping buffer, or
+ into a memory block large enough to hold the entire file.
+
+ The low-level tdefl/tinfl API's do not make any use of dynamic memory
+ allocation.
+
+ * zlib-style API notes:
+
+ miniz.c implements a fairly large subset of zlib. There's enough
+ functionality present for it to be a drop-in zlib replacement in many apps:
+ The z_stream struct, optional memory allocation callbacks
+ deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
+ inflateInit/inflateInit2/inflate/inflateEnd
+ compress, compress2, compressBound, uncompress
+ CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
+ routines. Supports raw deflate streams or standard zlib streams with adler-32
+ checking.
+
+ Limitations:
+ The callback API's are not implemented yet. No support for gzip headers or
+ zlib static dictionaries. I've tried to closely emulate zlib's various
+ flavors of stream flushing and return status codes, but there are no
+ guarantees that miniz.c pulls this off perfectly.
+
+ * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
+ originally written by Alex Evans. Supports 1-4 bytes/pixel images.
+
+ * ZIP archive API notes:
+
+ The ZIP archive API's where designed with simplicity and efficiency in
+ mind, with just enough abstraction to get the job done with minimal fuss.
+ There are simple API's to retrieve file information, read files from existing
+ archives, create new archives, append new files to existing archives, or
+ clone archive data from one archive to another. It supports archives located
+ in memory or the heap, on disk (using stdio.h), or you can specify custom
+ file read/write callbacks.
+
+ - Archive reading: Just call this function to read a single file from a
+ disk archive:
+
+ void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
+ char *pArchive_name, size_t *pSize, mz_uint zip_flags);
+
+ For more complex cases, use the "mz_zip_reader" functions. Upon opening an
+ archive, the entire central directory is located and read as-is into memory,
+ and subsequent file access only occurs when reading individual files.
+
+ - Archives file scanning: The simple way is to use this function to scan a
+ loaded archive for a specific file:
+
+ int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
+ const char *pComment, mz_uint flags);
+
+ The locate operation can optionally check file comments too, which (as one
+ example) can be used to identify multiple versions of the same file in an
+ archive. This function uses a simple linear search through the central
+ directory, so it's not very fast.
+
+ Alternately, you can iterate through all the files in an archive (using
+ mz_zip_reader_get_num_files()) and retrieve detailed info on each file by
+ calling mz_zip_reader_file_stat().
+
+ - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
+ immediately writes compressed file data to disk and builds an exact image of
+ the central directory in memory. The central directory image is written all
+ at once at the end of the archive file when the archive is finalized.
+
+ The archive writer can optionally align each file's local header and file
+ data to any power of 2 alignment, which can be useful when the archive will
+ be read from optical media. Also, the writer supports placing arbitrary data
+ blobs at the very beginning of ZIP archives. Archives written using either
+ feature are still readable by any ZIP tool.
+
+ - Archive appending: The simple way to add a single file to an archive is
+ to call this function:
+
+ mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
+ const char *pArchive_name, const void *pBuf, size_t buf_size, const void
+ *pComment, mz_uint16 comment_size, mz_uint level_and_flags);
+
+ The archive will be created if it doesn't already exist, otherwise it'll be
+ appended to. Note the appending is done in-place and is not an atomic
+ operation, so if something goes wrong during the operation it's possible the
+ archive could be left without a central directory (although the local file
+ headers and file data will be fine, so the archive will be recoverable).
+
+ For more complex archive modification scenarios:
+ 1. The safest way is to use a mz_zip_reader to read the existing archive,
+ cloning only those bits you want to preserve into a new archive using using
+ the mz_zip_writer_add_from_zip_reader() function (which compiles the
+ compressed file data as-is). When you're done, delete the old archive and
+ rename the newly written archive, and you're done. This is safe but requires
+ a bunch of temporary disk space or heap memory.
+
+ 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
+ mz_zip_writer_init_from_reader(), append new files as needed, then finalize
+ the archive which will write an updated central directory to the original
+ archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place()
+ does.) There's a possibility that the archive's central directory could be
+ lost with this method if anything goes wrong, though.
+
+ - ZIP archive support limitations:
+ No zip64 or spanning support. Extraction functions can only handle
+ unencrypted, stored or deflated files. Requires streams capable of seeking.
+
+ * This is a header file library, like stb_image.c. To get only a header file,
+ either cut and paste the below header, or create miniz.h, #define
+ MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it.
+
+ * Important: For best perf. be sure to customize the below macros for your
+ target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define
+ MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1
+
+ * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
+ including miniz.c to ensure miniz uses the 64-bit variants: fopen64(),
+ stat64(), etc. Otherwise you won't be able to process large files (i.e.
+ 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
+*/
+
+#ifndef MINIZ_HEADER_INCLUDED
+#define MINIZ_HEADER_INCLUDED
+
+#include
+#include
+
+// Defines to completely disable specific portions of miniz.c:
+// If all macros here are defined the only functionality remaining will be
+// CRC-32, adler-32, tinfl, and tdefl.
+
+// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
+// stdio for file I/O.
+//#define MINIZ_NO_STDIO
+
+// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
+// to get the current time, or get/set file times, and the C run-time funcs that
+// get/set times won't be called. The current downside is the times written to
+// your archives will be from 1979.
+//#define MINIZ_NO_TIME
+
+// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
+//#define MINIZ_NO_ARCHIVE_APIS
+
+// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
+// API's.
+//#define MINIZ_NO_ARCHIVE_WRITING_APIS
+
+// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
+// API's.
+//#define MINIZ_NO_ZLIB_APIS
+
+// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
+// conflicts against stock zlib.
+//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
+
+// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
+// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
+// user alloc/free/realloc callbacks to the zlib and archive API's, and a few
+// stand-alone helper API's which don't provide custom user functions (such as
+// tdefl_compress_mem_to_heap() and tinfl_decompress_mem_to_heap()) won't work.
+//#define MINIZ_NO_MALLOC
+
+#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
+// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
+// on Linux
+#define MINIZ_NO_TIME
+#endif
+
+#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
+#include
+#endif
+
+#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
+ defined(__i386) || defined(__i486__) || defined(__i486) || \
+ defined(i386) || defined(__ia64__) || defined(__x86_64__)
+// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
+#define MINIZ_X86_OR_X64_CPU 1
+#endif
+
+#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
+// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
+#define MINIZ_LITTLE_ENDIAN 1
+#endif
+
+/* Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES only if not set */
+#if !defined(MINIZ_USE_UNALIGNED_LOADS_AND_STORES)
+#if MINIZ_X86_OR_X64_CPU
+/* Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
+ * integer loads and stores from unaligned addresses. */
+#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
+#define MINIZ_UNALIGNED_USE_MEMCPY
+#else
+#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 0
+#endif
+#endif
+
+#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
+ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
+ defined(__x86_64__)
+// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
+// reasonably fast (and don't involve compiler generated calls to helper
+// functions).
+#define MINIZ_HAS_64BIT_REGISTERS 1
+#endif
+
+#ifdef __APPLE__
+#define ftello64 ftello
+#define fseeko64 fseeko
+#define fopen64 fopen
+#define freopen64 freopen
+
+// Darwin OSX
+#define MZ_PLATFORM 19
+#endif
+
+#ifndef MZ_PLATFORM
+#if defined(_WIN64) || defined(_WIN32) || defined(__WIN32__)
+#define MZ_PLATFORM 0
+#else
+// UNIX
+#define MZ_PLATFORM 3
+#endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// ------------------- zlib-style API Definitions.
+
+// For more compatibility with zlib, miniz.c uses unsigned long for some
+// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
+typedef unsigned long mz_ulong;
+
+// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
+// unless you've modified the MZ_MALLOC macro) to release a block allocated from
+// the heap.
+void mz_free(void *p);
+
+#define MZ_ADLER32_INIT (1)
+// mz_adler32() returns the initial adler-32 value to use when called with
+// ptr==NULL.
+mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
+
+#define MZ_CRC32_INIT (0)
+// mz_crc32() returns the initial CRC-32 value to use when called with
+// ptr==NULL.
+mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
+
+// Compression strategies.
+enum {
+ MZ_DEFAULT_STRATEGY = 0,
+ MZ_FILTERED = 1,
+ MZ_HUFFMAN_ONLY = 2,
+ MZ_RLE = 3,
+ MZ_FIXED = 4
+};
+
+/* miniz error codes. Be sure to update mz_zip_get_error_string() if you add or
+ * modify this enum. */
+typedef enum {
+ MZ_ZIP_NO_ERROR = 0,
+ MZ_ZIP_UNDEFINED_ERROR,
+ MZ_ZIP_TOO_MANY_FILES,
+ MZ_ZIP_FILE_TOO_LARGE,
+ MZ_ZIP_UNSUPPORTED_METHOD,
+ MZ_ZIP_UNSUPPORTED_ENCRYPTION,
+ MZ_ZIP_UNSUPPORTED_FEATURE,
+ MZ_ZIP_FAILED_FINDING_CENTRAL_DIR,
+ MZ_ZIP_NOT_AN_ARCHIVE,
+ MZ_ZIP_INVALID_HEADER_OR_CORRUPTED,
+ MZ_ZIP_UNSUPPORTED_MULTIDISK,
+ MZ_ZIP_DECOMPRESSION_FAILED,
+ MZ_ZIP_COMPRESSION_FAILED,
+ MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE,
+ MZ_ZIP_CRC_CHECK_FAILED,
+ MZ_ZIP_UNSUPPORTED_CDIR_SIZE,
+ MZ_ZIP_ALLOC_FAILED,
+ MZ_ZIP_FILE_OPEN_FAILED,
+ MZ_ZIP_FILE_CREATE_FAILED,
+ MZ_ZIP_FILE_WRITE_FAILED,
+ MZ_ZIP_FILE_READ_FAILED,
+ MZ_ZIP_FILE_CLOSE_FAILED,
+ MZ_ZIP_FILE_SEEK_FAILED,
+ MZ_ZIP_FILE_STAT_FAILED,
+ MZ_ZIP_INVALID_PARAMETER,
+ MZ_ZIP_INVALID_FILENAME,
+ MZ_ZIP_BUF_TOO_SMALL,
+ MZ_ZIP_INTERNAL_ERROR,
+ MZ_ZIP_FILE_NOT_FOUND,
+ MZ_ZIP_ARCHIVE_TOO_LARGE,
+ MZ_ZIP_VALIDATION_FAILED,
+ MZ_ZIP_WRITE_CALLBACK_FAILED,
+ MZ_ZIP_TOTAL_ERRORS
+} mz_zip_error;
+
+// Method
+#define MZ_DEFLATED 8
+
+#ifndef MINIZ_NO_ZLIB_APIS
+
+// Heap allocation callbacks.
+// Note that mz_alloc_func parameter types purposely differ from zlib's:
+// items/size is size_t, not unsigned long.
+typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
+typedef void (*mz_free_func)(void *opaque, void *address);
+typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
+ size_t size);
+
+#define MZ_VERSION "9.1.15"
+#define MZ_VERNUM 0x91F0
+#define MZ_VER_MAJOR 9
+#define MZ_VER_MINOR 1
+#define MZ_VER_REVISION 15
+#define MZ_VER_SUBREVISION 0
+
+// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
+// other values are for advanced use (refer to the zlib docs).
+enum {
+ MZ_NO_FLUSH = 0,
+ MZ_PARTIAL_FLUSH = 1,
+ MZ_SYNC_FLUSH = 2,
+ MZ_FULL_FLUSH = 3,
+ MZ_FINISH = 4,
+ MZ_BLOCK = 5
+};
+
+// Return status codes. MZ_PARAM_ERROR is non-standard.
+enum {
+ MZ_OK = 0,
+ MZ_STREAM_END = 1,
+ MZ_NEED_DICT = 2,
+ MZ_ERRNO = -1,
+ MZ_STREAM_ERROR = -2,
+ MZ_DATA_ERROR = -3,
+ MZ_MEM_ERROR = -4,
+ MZ_BUF_ERROR = -5,
+ MZ_VERSION_ERROR = -6,
+ MZ_PARAM_ERROR = -10000
+};
+
+// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
+// possible compression (not zlib compatible, and may be very slow),
+// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
+enum {
+ MZ_NO_COMPRESSION = 0,
+ MZ_BEST_SPEED = 1,
+ MZ_BEST_COMPRESSION = 9,
+ MZ_UBER_COMPRESSION = 10,
+ MZ_DEFAULT_LEVEL = 6,
+ MZ_DEFAULT_COMPRESSION = -1
+};
+
+// Window bits
+#define MZ_DEFAULT_WINDOW_BITS 15
+
+struct mz_internal_state;
+
+// Compression/decompression stream struct.
+typedef struct mz_stream_s {
+ const unsigned char *next_in; // pointer to next byte to read
+ unsigned int avail_in; // number of bytes available at next_in
+ mz_ulong total_in; // total number of bytes consumed so far
+
+ unsigned char *next_out; // pointer to next byte to write
+ unsigned int avail_out; // number of bytes that can be written to next_out
+ mz_ulong total_out; // total number of bytes produced so far
+
+ char *msg; // error msg (unused)
+ struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
+
+ mz_alloc_func
+ zalloc; // optional heap allocation function (defaults to malloc)
+ mz_free_func zfree; // optional heap free function (defaults to free)
+ void *opaque; // heap alloc function user pointer
+
+ int data_type; // data_type (unused)
+ mz_ulong adler; // adler32 of the source or uncompressed data
+ mz_ulong reserved; // not used
+} mz_stream;
+
+typedef mz_stream *mz_streamp;
+
+// Returns the version string of miniz.c.
+const char *mz_version(void);
+
+// mz_deflateInit() initializes a compressor with default options:
+// Parameters:
+// pStream must point to an initialized mz_stream struct.
+// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
+// level 1 enables a specially optimized compression function that's been
+// optimized purely for performance, not ratio. (This special func. is
+// currently only enabled when MINIZ_USE_UNALIGNED_LOADS_AND_STORES and
+// MINIZ_LITTLE_ENDIAN are defined.)
+// Return values:
+// MZ_OK on success.
+// MZ_STREAM_ERROR if the stream is bogus.
+// MZ_PARAM_ERROR if the input parameters are bogus.
+// MZ_MEM_ERROR on out of memory.
+int mz_deflateInit(mz_streamp pStream, int level);
+
+// mz_deflateInit2() is like mz_deflate(), except with more control:
+// Additional parameters:
+// method must be MZ_DEFLATED
+// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
+// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
+// header or footer) mem_level must be between [1, 9] (it's checked but
+// ignored by miniz.c)
+int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
+ int mem_level, int strategy);
+
+// Quickly resets a compressor without having to reallocate anything. Same as
+// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
+int mz_deflateReset(mz_streamp pStream);
+
+// mz_deflate() compresses the input to output, consuming as much of the input
+// and producing as much output as possible. Parameters:
+// pStream is the stream to read from and write to. You must initialize/update
+// the next_in, avail_in, next_out, and avail_out members. flush may be
+// MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or MZ_FINISH.
+// Return values:
+// MZ_OK on success (when flushing, or if more input is needed but not
+// available, and/or there's more output to be written but the output buffer
+// is full). MZ_STREAM_END if all input has been consumed and all output bytes
+// have been written. Don't call mz_deflate() on the stream anymore.
+// MZ_STREAM_ERROR if the stream is bogus.
+// MZ_PARAM_ERROR if one of the parameters is invalid.
+// MZ_BUF_ERROR if no forward progress is possible because the input and/or
+// output buffers are empty. (Fill up the input buffer or free up some output
+// space and try again.)
+int mz_deflate(mz_streamp pStream, int flush);
+
+// mz_deflateEnd() deinitializes a compressor:
+// Return values:
+// MZ_OK on success.
+// MZ_STREAM_ERROR if the stream is bogus.
+int mz_deflateEnd(mz_streamp pStream);
+
+// mz_deflateBound() returns a (very) conservative upper bound on the amount of
+// data that could be generated by deflate(), assuming flush is set to only
+// MZ_NO_FLUSH or MZ_FINISH.
+mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
+
+// Single-call compression functions mz_compress() and mz_compress2():
+// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
+// failure.
+int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
+ const unsigned char *pSource, mz_ulong source_len);
+int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
+ const unsigned char *pSource, mz_ulong source_len, int level);
+
+// mz_compressBound() returns a (very) conservative upper bound on the amount of
+// data that could be generated by calling mz_compress().
+mz_ulong mz_compressBound(mz_ulong source_len);
+
+// Initializes a decompressor.
+int mz_inflateInit(mz_streamp pStream);
+
+// mz_inflateInit2() is like mz_inflateInit() with an additional option that
+// controls the window size and whether or not the stream has been wrapped with
+// a zlib header/footer: window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse
+// zlib header/footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate).
+int mz_inflateInit2(mz_streamp pStream, int window_bits);
+
+// Decompresses the input stream to the output, consuming only as much of the
+// input as needed, and writing as much to the output as possible. Parameters:
+// pStream is the stream to read from and write to. You must initialize/update
+// the next_in, avail_in, next_out, and avail_out members. flush may be
+// MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. On the first call, if flush is
+// MZ_FINISH it's assumed the input and output buffers are both sized large
+// enough to decompress the entire stream in a single call (this is slightly
+// faster). MZ_FINISH implies that there are no more source bytes available
+// beside what's already in the input buffer, and that the output buffer is
+// large enough to hold the rest of the decompressed data.
+// Return values:
+// MZ_OK on success. Either more input is needed but not available, and/or
+// there's more output to be written but the output buffer is full.
+// MZ_STREAM_END if all needed input has been consumed and all output bytes
+// have been written. For zlib streams, the adler-32 of the decompressed data
+// has also been verified. MZ_STREAM_ERROR if the stream is bogus.
+// MZ_DATA_ERROR if the deflate stream is invalid.
+// MZ_PARAM_ERROR if one of the parameters is invalid.
+// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
+// empty but the inflater needs more input to continue, or if the output
+// buffer is not large enough. Call mz_inflate() again with more input data,
+// or with more room in the output buffer (except when using single call
+// decompression, described above).
+int mz_inflate(mz_streamp pStream, int flush);
+
+// Deinitializes a decompressor.
+int mz_inflateEnd(mz_streamp pStream);
+
+// Single-call decompression.
+// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
+// failure.
+int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
+ const unsigned char *pSource, mz_ulong source_len);
+
+// Returns a string description of the specified error code, or NULL if the
+// error code is invalid.
+const char *mz_error(int err);
+
+// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
+// as a drop-in replacement for the subset of zlib that miniz.c supports. Define
+// MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you use zlib
+// in the same project.
+#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
+typedef unsigned char Byte;
+typedef unsigned int uInt;
+typedef mz_ulong uLong;
+typedef Byte Bytef;
+typedef uInt uIntf;
+typedef char charf;
+typedef int intf;
+typedef void *voidpf;
+typedef uLong uLongf;
+typedef void *voidp;
+typedef void *const voidpc;
+#define Z_NULL 0
+#define Z_NO_FLUSH MZ_NO_FLUSH
+#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
+#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
+#define Z_FULL_FLUSH MZ_FULL_FLUSH
+#define Z_FINISH MZ_FINISH
+#define Z_BLOCK MZ_BLOCK
+#define Z_OK MZ_OK
+#define Z_STREAM_END MZ_STREAM_END
+#define Z_NEED_DICT MZ_NEED_DICT
+#define Z_ERRNO MZ_ERRNO
+#define Z_STREAM_ERROR MZ_STREAM_ERROR
+#define Z_DATA_ERROR MZ_DATA_ERROR
+#define Z_MEM_ERROR MZ_MEM_ERROR
+#define Z_BUF_ERROR MZ_BUF_ERROR
+#define Z_VERSION_ERROR MZ_VERSION_ERROR
+#define Z_PARAM_ERROR MZ_PARAM_ERROR
+#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
+#define Z_BEST_SPEED MZ_BEST_SPEED
+#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
+#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
+#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
+#define Z_FILTERED MZ_FILTERED
+#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
+#define Z_RLE MZ_RLE
+#define Z_FIXED MZ_FIXED
+#define Z_DEFLATED MZ_DEFLATED
+#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
+#define alloc_func mz_alloc_func
+#define free_func mz_free_func
+#define internal_state mz_internal_state
+#define z_stream mz_stream
+#define deflateInit mz_deflateInit
+#define deflateInit2 mz_deflateInit2
+#define deflateReset mz_deflateReset
+#define deflate mz_deflate
+#define deflateEnd mz_deflateEnd
+#define deflateBound mz_deflateBound
+#define compress mz_compress
+#define compress2 mz_compress2
+#define compressBound mz_compressBound
+#define inflateInit mz_inflateInit
+#define inflateInit2 mz_inflateInit2
+#define inflate mz_inflate
+#define inflateEnd mz_inflateEnd
+#define uncompress mz_uncompress
+#define crc32 mz_crc32
+#define adler32 mz_adler32
+#define MAX_WBITS 15
+#define MAX_MEM_LEVEL 9
+#define zError mz_error
+#define ZLIB_VERSION MZ_VERSION
+#define ZLIB_VERNUM MZ_VERNUM
+#define ZLIB_VER_MAJOR MZ_VER_MAJOR
+#define ZLIB_VER_MINOR MZ_VER_MINOR
+#define ZLIB_VER_REVISION MZ_VER_REVISION
+#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
+#define zlibVersion mz_version
+#define zlib_version mz_version()
+#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
+
+#endif // MINIZ_NO_ZLIB_APIS
+
+// ------------------- Types and macros
+
+typedef unsigned char mz_uint8;
+typedef signed short mz_int16;
+typedef unsigned short mz_uint16;
+typedef unsigned int mz_uint32;
+typedef unsigned int mz_uint;
+typedef long long mz_int64;
+typedef unsigned long long mz_uint64;
+typedef int mz_bool;
+
+#define MZ_FALSE (0)
+#define MZ_TRUE (1)
+
+// An attempt to work around MSVC's spammy "warning C4127: conditional
+// expression is constant" message.
+#ifdef _MSC_VER
+#define MZ_MACRO_END while (0, 0)
+#else
+#define MZ_MACRO_END while (0)
+#endif
+
+// ------------------- ZIP archive reading/writing
+
+#ifndef MINIZ_NO_ARCHIVE_APIS
+
+enum {
+ MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
+ MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
+ MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
+};
+
+typedef struct {
+ mz_uint32 m_file_index;
+ mz_uint32 m_central_dir_ofs;
+ mz_uint16 m_version_made_by;
+ mz_uint16 m_version_needed;
+ mz_uint16 m_bit_flag;
+ mz_uint16 m_method;
+#ifndef MINIZ_NO_TIME
+ time_t m_time;
+#endif
+ mz_uint32 m_crc32;
+ mz_uint64 m_comp_size;
+ mz_uint64 m_uncomp_size;
+ mz_uint16 m_internal_attr;
+ mz_uint32 m_external_attr;
+ mz_uint64 m_local_header_ofs;
+ mz_uint32 m_comment_size;
+ char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
+ char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
+} mz_zip_archive_file_stat;
+
+typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
+ void *pBuf, size_t n);
+typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
+ const void *pBuf, size_t n);
+typedef mz_bool (*mz_file_needs_keepalive)(void *pOpaque);
+
+struct mz_zip_internal_state_tag;
+typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
+
+typedef enum {
+ MZ_ZIP_MODE_INVALID = 0,
+ MZ_ZIP_MODE_READING = 1,
+ MZ_ZIP_MODE_WRITING = 2,
+ MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
+} mz_zip_mode;
+
+typedef enum {
+ MZ_ZIP_TYPE_INVALID = 0,
+ MZ_ZIP_TYPE_USER,
+ MZ_ZIP_TYPE_MEMORY,
+ MZ_ZIP_TYPE_HEAP,
+ MZ_ZIP_TYPE_FILE,
+ MZ_ZIP_TYPE_CFILE,
+ MZ_ZIP_TOTAL_TYPES
+} mz_zip_type;
+
+typedef struct {
+ mz_uint64 m_archive_size;
+ mz_uint64 m_central_directory_file_ofs;
+
+ /* We only support up to UINT32_MAX files in zip64 mode. */
+ mz_uint32 m_total_files;
+ mz_zip_mode m_zip_mode;
+ mz_zip_type m_zip_type;
+ mz_zip_error m_last_error;
+
+ mz_uint64 m_file_offset_alignment;
+
+ mz_alloc_func m_pAlloc;
+ mz_free_func m_pFree;
+ mz_realloc_func m_pRealloc;
+ void *m_pAlloc_opaque;
+
+ mz_file_read_func m_pRead;
+ mz_file_write_func m_pWrite;
+ mz_file_needs_keepalive m_pNeeds_keepalive;
+ void *m_pIO_opaque;
+
+ mz_zip_internal_state *m_pState;
+
+} mz_zip_archive;
+
+typedef enum {
+ MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
+ MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
+ MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
+ MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
+} mz_zip_flags;
+
+// ZIP archive reading
+
+// Inits a ZIP archive reader.
+// These functions read and validate the archive's central directory.
+mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
+ mz_uint32 flags);
+mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
+ size_t size, mz_uint32 flags);
+
+#ifndef MINIZ_NO_STDIO
+mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
+ mz_uint32 flags);
+#endif
+
+// Returns the total number of files in the archive.
+mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
+
+// Returns detailed information about an archive file entry.
+mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
+ mz_zip_archive_file_stat *pStat);
+
+// Determines if an archive file entry is a directory entry.
+mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
+ mz_uint file_index);
+mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
+ mz_uint file_index);
+
+// Retrieves the filename of an archive file entry.
+// Returns the number of bytes written to pFilename, or if filename_buf_size is
+// 0 this function returns the number of bytes needed to fully store the
+// filename.
+mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
+ char *pFilename, mz_uint filename_buf_size);
+
+// Attempts to locates a file in the archive's central directory.
+// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
+// Returns -1 if the file cannot be found.
+int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
+ const char *pComment, mz_uint flags);
+
+// Extracts a archive file to a memory buffer using no memory allocation.
+mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
+ mz_uint file_index, void *pBuf,
+ size_t buf_size, mz_uint flags,
+ void *pUser_read_buf,
+ size_t user_read_buf_size);
+mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
+ mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
+ mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
+
+// Extracts a archive file to a memory buffer.
+mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
+ void *pBuf, size_t buf_size,
+ mz_uint flags);
+mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
+ const char *pFilename, void *pBuf,
+ size_t buf_size, mz_uint flags);
+
+// Extracts a archive file to a dynamically allocated heap buffer.
+void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
+ size_t *pSize, mz_uint flags);
+void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
+ const char *pFilename, size_t *pSize,
+ mz_uint flags);
+
+// Extracts a archive file using a callback function to output the file's data.
+mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
+ mz_uint file_index,
+ mz_file_write_func pCallback,
+ void *pOpaque, mz_uint flags);
+mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
+ const char *pFilename,
+ mz_file_write_func pCallback,
+ void *pOpaque, mz_uint flags);
+
+#ifndef MINIZ_NO_STDIO
+// Extracts a archive file to a disk file and sets its last accessed and
+// modified times. This function only extracts files, not archive directory
+// records.
+mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
+ const char *pDst_filename, mz_uint flags);
+mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
+ const char *pArchive_filename,
+ const char *pDst_filename,
+ mz_uint flags);
+#endif
+
+// Ends archive reading, freeing all allocations, and closing the input archive
+// file if mz_zip_reader_init_file() was used.
+mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
+
+// ZIP archive writing
+
+#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
+
+// Inits a ZIP archive writer.
+mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
+mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
+ size_t size_to_reserve_at_beginning,
+ size_t initial_allocation_size);
+
+#ifndef MINIZ_NO_STDIO
+mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
+ mz_uint64 size_to_reserve_at_beginning);
+#endif
+
+// Converts a ZIP archive reader object into a writer object, to allow efficient
+// in-place file appends to occur on an existing archive. For archives opened
+// using mz_zip_reader_init_file, pFilename must be the archive's filename so it
+// can be reopened for writing. If the file can't be reopened,
+// mz_zip_reader_end() will be called. For archives opened using
+// mz_zip_reader_init_mem, the memory block must be growable using the realloc
+// callback (which defaults to realloc unless you've overridden it). Finally,
+// for archives opened using mz_zip_reader_init, the mz_zip_archive's user
+// provided m_pWrite function cannot be NULL. Note: In-place archive
+// modification is not recommended unless you know what you're doing, because if
+// execution stops or something goes wrong before the archive is finalized the
+// file's central directory will be hosed.
+mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
+ const char *pFilename);
+
+// Adds the contents of a memory buffer to an archive. These functions record
+// the current local time into the archive. To add a directory entry, call this
+// method with an archive name ending in a forwardslash with empty buffer.
+// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
+// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
+// just set to MZ_DEFAULT_COMPRESSION.
+mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
+ const void *pBuf, size_t buf_size,
+ mz_uint level_and_flags);
+mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
+ const char *pArchive_name, const void *pBuf,
+ size_t buf_size, const void *pComment,
+ mz_uint16 comment_size,
+ mz_uint level_and_flags, mz_uint64 uncomp_size,
+ mz_uint32 uncomp_crc32);
+
+#ifndef MINIZ_NO_STDIO
+// Adds the contents of a disk file to an archive. This function also records
+// the disk file's modified time into the archive. level_and_flags - compression
+// level (0-10, see MZ_BEST_SPEED, MZ_BEST_COMPRESSION, etc.) logically OR'd
+// with zero or more mz_zip_flags, or just set to MZ_DEFAULT_COMPRESSION.
+mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
+ const char *pSrc_filename, const void *pComment,
+ mz_uint16 comment_size, mz_uint level_and_flags,
+ mz_uint32 ext_attributes);
+#endif
+
+// Adds a file to an archive by fully cloning the data from another archive.
+// This function fully clones the source file's compressed data (no
+// recompression), along with its full filename, extra data, and comment fields.
+mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
+ mz_zip_archive *pSource_zip,
+ mz_uint file_index);
+
+// Finalizes the archive by writing the central directory records followed by
+// the end of central directory record. After an archive is finalized, the only
+// valid call on the mz_zip_archive struct is mz_zip_writer_end(). An archive
+// must be manually finalized by calling this function for it to be valid.
+mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
+mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
+ size_t *pSize);
+
+// Ends archive writing, freeing all allocations, and closing the output file if
+// mz_zip_writer_init_file() was used. Note for the archive to be valid, it must
+// have been finalized before ending.
+mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
+
+// Misc. high-level helper functions:
+
+// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
+// appends a memory blob to a ZIP archive. level_and_flags - compression level
+// (0-10, see MZ_BEST_SPEED, MZ_BEST_COMPRESSION, etc.) logically OR'd with zero
+// or more mz_zip_flags, or just set to MZ_DEFAULT_COMPRESSION.
+mz_bool mz_zip_add_mem_to_archive_file_in_place(
+ const char *pZip_filename, const char *pArchive_name, const void *pBuf,
+ size_t buf_size, const void *pComment, mz_uint16 comment_size,
+ mz_uint level_and_flags);
+
+// Reads a single file from an archive into a heap block.
+// Returns NULL on failure.
+void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
+ const char *pArchive_name,
+ size_t *pSize, mz_uint zip_flags);
+
+#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
+
+#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
+
+// ------------------- Low-level Decompression API Definitions
+
+// Decompression flags used by tinfl_decompress().
+// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
+// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
+// input is a raw deflate stream. TINFL_FLAG_HAS_MORE_INPUT: If set, there are
+// more input bytes available beyond the end of the supplied input buffer. If
+// clear, the input buffer contains all remaining input.
+// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
+// enough to hold the entire decompressed stream. If clear, the output buffer is
+// at least the size of the dictionary (typically 32KB).
+// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
+// decompressed bytes.
+enum {
+ TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
+ TINFL_FLAG_HAS_MORE_INPUT = 2,
+ TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
+ TINFL_FLAG_COMPUTE_ADLER32 = 8
+};
+
+// High level decompression functions:
+// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
+// allocated via malloc(). On entry:
+// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
+// to decompress.
+// On return:
+// Function returns a pointer to the decompressed data, or NULL on failure.
+// *pOut_len will be set to the decompressed data's size, which could be larger
+// than src_buf_len on uncompressible data. The caller must call mz_free() on
+// the returned block when it's no longer needed.
+void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
+ size_t *pOut_len, int flags);
+
+// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
+// in memory. Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the
+// number of bytes written on success.
+#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
+size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
+ const void *pSrc_buf, size_t src_buf_len,
+ int flags);
+
+// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
+// internal 32KB buffer, and a user provided callback function will be called to
+// flush the buffer. Returns 1 on success or 0 on failure.
+typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
+int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
+ tinfl_put_buf_func_ptr pPut_buf_func,
+ void *pPut_buf_user, int flags);
+
+struct tinfl_decompressor_tag;
+typedef struct tinfl_decompressor_tag tinfl_decompressor;
+
+// Max size of LZ dictionary.
+#define TINFL_LZ_DICT_SIZE 32768
+
+// Return status.
+typedef enum {
+ TINFL_STATUS_BAD_PARAM = -3,
+ TINFL_STATUS_ADLER32_MISMATCH = -2,
+ TINFL_STATUS_FAILED = -1,
+ TINFL_STATUS_DONE = 0,
+ TINFL_STATUS_NEEDS_MORE_INPUT = 1,
+ TINFL_STATUS_HAS_MORE_OUTPUT = 2
+} tinfl_status;
+
+// Initializes the decompressor to its initial state.
+#define tinfl_init(r) \
+ do { \
+ (r)->m_state = 0; \
+ } \
+ MZ_MACRO_END
+#define tinfl_get_adler32(r) (r)->m_check_adler32
+
+// Main low-level decompressor coroutine function. This is the only function
+// actually needed for decompression. All the other functions are just
+// high-level helpers for improved usability. This is a universal API, i.e. it
+// can be used as a building block to build any desired higher level
+// decompression API. In the limit case, it can be called once per every byte
+// input or output.
+tinfl_status tinfl_decompress(tinfl_decompressor *r,
+ const mz_uint8 *pIn_buf_next,
+ size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
+ mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
+ const mz_uint32 decomp_flags);
+
+// Internal/private bits follow.
+enum {
+ TINFL_MAX_HUFF_TABLES = 3,
+ TINFL_MAX_HUFF_SYMBOLS_0 = 288,
+ TINFL_MAX_HUFF_SYMBOLS_1 = 32,
+ TINFL_MAX_HUFF_SYMBOLS_2 = 19,
+ TINFL_FAST_LOOKUP_BITS = 10,
+ TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
+};
+
+typedef struct {
+ mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
+ mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
+ m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
+} tinfl_huff_table;
+
+#if MINIZ_HAS_64BIT_REGISTERS
+#define TINFL_USE_64BIT_BITBUF 1
+#endif
+
+#if TINFL_USE_64BIT_BITBUF
+typedef mz_uint64 tinfl_bit_buf_t;
+#define TINFL_BITBUF_SIZE (64)
+#else
+typedef mz_uint32 tinfl_bit_buf_t;
+#define TINFL_BITBUF_SIZE (32)
+#endif
+
+struct tinfl_decompressor_tag {
+ mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
+ m_check_adler32, m_dist, m_counter, m_num_extra,
+ m_table_sizes[TINFL_MAX_HUFF_TABLES];
+ tinfl_bit_buf_t m_bit_buf;
+ size_t m_dist_from_out_buf_start;
+ tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
+ mz_uint8 m_raw_header[4],
+ m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
+};
+
+// ------------------- Low-level Compression API Definitions
+
+// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
+// slower, and raw/dynamic blocks will be output more frequently).
+#define TDEFL_LESS_MEMORY 0
+
+// tdefl_init() compression flags logically OR'd together (low 12 bits contain
+// the max. number of probes per dictionary search): TDEFL_DEFAULT_MAX_PROBES:
+// The compressor defaults to 128 dictionary probes per dictionary search.
+// 0=Huffman only, 1=Huffman+LZ (fastest/crap compression), 4095=Huffman+LZ
+// (slowest/best compression).
+enum {
+ TDEFL_HUFFMAN_ONLY = 0,
+ TDEFL_DEFAULT_MAX_PROBES = 128,
+ TDEFL_MAX_PROBES_MASK = 0xFFF
+};
+
+// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
+// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
+// you'll get raw deflate data. TDEFL_COMPUTE_ADLER32: Always compute the
+// adler-32 of the input data (even when not writing zlib headers).
+// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
+// efficient lazy parsing. TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to
+// decrease the compressor's initialization time to the minimum, but the output
+// may vary from run to run given the same input (depending on the contents of
+// memory). TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a
+// distance of 1) TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
+// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
+// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
+// The low 12 bits are reserved to control the max # of hash probes per
+// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
+enum {
+ TDEFL_WRITE_ZLIB_HEADER = 0x01000,
+ TDEFL_COMPUTE_ADLER32 = 0x02000,
+ TDEFL_GREEDY_PARSING_FLAG = 0x04000,
+ TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
+ TDEFL_RLE_MATCHES = 0x10000,
+ TDEFL_FILTER_MATCHES = 0x20000,
+ TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
+ TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
+};
+
+// High level compression functions:
+// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
+// allocated via malloc(). On entry:
+// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
+// flags: The max match finder probes (default is 128) logically OR'd against
+// the above flags. Higher probes are slower but improve compression.
+// On return:
+// Function returns a pointer to the compressed data, or NULL on failure.
+// *pOut_len will be set to the compressed data's size, which could be larger
+// than src_buf_len on uncompressible data. The caller must free() the returned
+// block when it's no longer needed.
+void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
+ size_t *pOut_len, int flags);
+
+// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
+// memory. Returns 0 on failure.
+size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
+ const void *pSrc_buf, size_t src_buf_len,
+ int flags);
+
+// Compresses an image to a compressed PNG file in memory.
+// On entry:
+// pImage, w, h, and num_chans describe the image to compress. num_chans may be
+// 1, 2, 3, or 4. The image pitch in bytes per scanline will be w*num_chans.
+// The leftmost pixel on the top scanline is stored first in memory. level may
+// range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
+// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL If flip is
+// true, the image will be flipped on the Y axis (useful for OpenGL apps).
+// On return:
+// Function returns a pointer to the compressed data, or NULL on failure.
+// *pLen_out will be set to the size of the PNG image file.
+// The caller must mz_free() the returned heap block (which will typically be
+// larger than *pLen_out) when it's no longer needed.
+void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
+ int h, int num_chans,
+ size_t *pLen_out,
+ mz_uint level, mz_bool flip);
+void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
+ int num_chans, size_t *pLen_out);
+
+// Output stream interface. The compressor uses this interface to write
+// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
+typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
+ void *pUser);
+
+// tdefl_compress_mem_to_output() compresses a block to an output stream. The
+// above helpers use this function internally.
+mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
+ tdefl_put_buf_func_ptr pPut_buf_func,
+ void *pPut_buf_user, int flags);
+
+enum {
+ TDEFL_MAX_HUFF_TABLES = 3,
+ TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
+ TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
+ TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
+ TDEFL_LZ_DICT_SIZE = 32768,
+ TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
+ TDEFL_MIN_MATCH_LEN = 3,
+ TDEFL_MAX_MATCH_LEN = 258
+};
+
+// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
+// output block (using static/fixed Huffman codes).
+#if TDEFL_LESS_MEMORY
+enum {
+ TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
+ TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
+ TDEFL_MAX_HUFF_SYMBOLS = 288,
+ TDEFL_LZ_HASH_BITS = 12,
+ TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
+ TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
+ TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
+};
+#else
+enum {
+ TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
+ TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
+ TDEFL_MAX_HUFF_SYMBOLS = 288,
+ TDEFL_LZ_HASH_BITS = 15,
+ TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
+ TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
+ TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
+};
+#endif
+
+// The low-level tdefl functions below may be used directly if the above helper
+// functions aren't flexible enough. The low-level functions don't make any heap
+// allocations, unlike the above helper functions.
+typedef enum {
+ TDEFL_STATUS_BAD_PARAM = -2,
+ TDEFL_STATUS_PUT_BUF_FAILED = -1,
+ TDEFL_STATUS_OKAY = 0,
+ TDEFL_STATUS_DONE = 1,
+} tdefl_status;
+
+// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
+typedef enum {
+ TDEFL_NO_FLUSH = 0,
+ TDEFL_SYNC_FLUSH = 2,
+ TDEFL_FULL_FLUSH = 3,
+ TDEFL_FINISH = 4
+} tdefl_flush;
+
+// tdefl's compression state structure.
+typedef struct {
+ tdefl_put_buf_func_ptr m_pPut_buf_func;
+ void *m_pPut_buf_user;
+ mz_uint m_flags, m_max_probes[2];
+ int m_greedy_parsing;
+ mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
+ mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
+ mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
+ m_bit_buffer;
+ mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
+ m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
+ m_wants_to_finish;
+ tdefl_status m_prev_return_status;
+ const void *m_pIn_buf;
+ void *m_pOut_buf;
+ size_t *m_pIn_buf_size, *m_pOut_buf_size;
+ tdefl_flush m_flush;
+ const mz_uint8 *m_pSrc;
+ size_t m_src_buf_left, m_out_buf_ofs;
+ mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
+ mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
+ mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
+ mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
+ mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
+ mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
+ mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
+ mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
+} tdefl_compressor;
+
+// Initializes the compressor.
+// There is no corresponding deinit() function because the tdefl API's do not
+// dynamically allocate memory. pBut_buf_func: If NULL, output data will be
+// supplied to the specified callback. In this case, the user should call the
+// tdefl_compress_buffer() API for compression. If pBut_buf_func is NULL the
+// user should always call the tdefl_compress() API. flags: See the above enums
+// (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, etc.)
+tdefl_status tdefl_init(tdefl_compressor *d,
+ tdefl_put_buf_func_ptr pPut_buf_func,
+ void *pPut_buf_user, int flags);
+
+// Compresses a block of data, consuming as much of the specified input buffer
+// as possible, and writing as much compressed data to the specified output
+// buffer as possible.
+tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
+ size_t *pIn_buf_size, void *pOut_buf,
+ size_t *pOut_buf_size, tdefl_flush flush);
+
+// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
+// non-NULL tdefl_put_buf_func_ptr. tdefl_compress_buffer() always consumes the
+// entire input buffer.
+tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
+ size_t in_buf_size, tdefl_flush flush);
+
+tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
+mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
+
+// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
+// defined, because it uses some of its macros.
+#ifndef MINIZ_NO_ZLIB_APIS
+// Create tdefl_compress() flags given zlib-style compression parameters.
+// level may range from [0,10] (where 10 is absolute max compression, but may be
+// much slower on some files) window_bits may be -15 (raw deflate) or 15 (zlib)
+// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
+// MZ_RLE, or MZ_FIXED
+mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
+ int strategy);
+#endif // #ifndef MINIZ_NO_ZLIB_APIS
+
+#define MZ_UINT16_MAX (0xFFFFU)
+#define MZ_UINT32_MAX (0xFFFFFFFFU)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // MINIZ_HEADER_INCLUDED
+
+// ------------------- End of Header: Implementation follows. (If you only want
+// the header, define MINIZ_HEADER_FILE_ONLY.)
+
+#ifndef MINIZ_HEADER_FILE_ONLY
+
+typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
+typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
+typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
+
+#include
+#include
+
+#define MZ_ASSERT(x) assert(x)
+
+#ifdef MINIZ_NO_MALLOC
+#define MZ_MALLOC(x) NULL
+#define MZ_FREE(x) (void)x, ((void)0)
+#define MZ_REALLOC(p, x) NULL
+#else
+#define MZ_MALLOC(x) malloc(x)
+#define MZ_FREE(x) free(x)
+#define MZ_REALLOC(p, x) realloc(p, x)
+#endif
+
+#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
+#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
+#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
+#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
+#else
+#define MZ_READ_LE16(p) \
+ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
+ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
+#define MZ_READ_LE32(p) \
+ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
+ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
+ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
+ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
+#endif
+
+#define MZ_READ_LE64(p) \
+ (((mz_uint64)MZ_READ_LE32(p)) | \
+ (((mz_uint64)MZ_READ_LE32((const mz_uint8 *)(p) + sizeof(mz_uint32))) \
+ << 32U))
+
+#ifdef _MSC_VER
+#define MZ_FORCEINLINE __forceinline
+#elif defined(__GNUC__)
+#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
+#else
+#define MZ_FORCEINLINE inline
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// ------------------- zlib-style API's
+
+mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
+ mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
+ size_t block_len = buf_len % 5552;
+ if (!ptr)
+ return MZ_ADLER32_INIT;
+ while (buf_len) {
+ for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
+ s1 += ptr[0], s2 += s1;
+ s1 += ptr[1], s2 += s1;
+ s1 += ptr[2], s2 += s1;
+ s1 += ptr[3], s2 += s1;
+ s1 += ptr[4], s2 += s1;
+ s1 += ptr[5], s2 += s1;
+ s1 += ptr[6], s2 += s1;
+ s1 += ptr[7], s2 += s1;
+ }
+ for (; i < block_len; ++i)
+ s1 += *ptr++, s2 += s1;
+ s1 %= 65521U, s2 %= 65521U;
+ buf_len -= block_len;
+ block_len = 5552;
+ }
+ return (s2 << 16) + s1;
+}
+
+// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
+// implementation that balances processor cache usage against speed":
+// http://www.geocities.com/malbrain/
+mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
+ static const mz_uint32 s_crc32[16] = {
+ 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
+ 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
+ 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
+ mz_uint32 crcu32 = (mz_uint32)crc;
+ if (!ptr)
+ return MZ_CRC32_INIT;
+ crcu32 = ~crcu32;
+ while (buf_len--) {
+ mz_uint8 b = *ptr++;
+ crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
+ crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
+ }
+ return ~crcu32;
+}
+
+void mz_free(void *p) { MZ_FREE(p); }
+
+#ifndef MINIZ_NO_ZLIB_APIS
+
+static void *def_alloc_func(void *opaque, size_t items, size_t size) {
+ (void)opaque, (void)items, (void)size;
+ return MZ_MALLOC(items * size);
+}
+static void def_free_func(void *opaque, void *address) {
+ (void)opaque, (void)address;
+ MZ_FREE(address);
+}
+static void *def_realloc_func(void *opaque, void *address, size_t items,
+ size_t size) {
+ (void)opaque, (void)address, (void)items, (void)size;
+ return MZ_REALLOC(address, items * size);
+}
+
+const char *mz_version(void) { return MZ_VERSION; }
+
+int mz_deflateInit(mz_streamp pStream, int level) {
+ return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
+ MZ_DEFAULT_STRATEGY);
+}
+
+int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
+ int mem_level, int strategy) {
+ tdefl_compressor *pComp;
+ mz_uint comp_flags =
+ TDEFL_COMPUTE_ADLER32 |
+ tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
+
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+ if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
+ ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
+ (-window_bits != MZ_DEFAULT_WINDOW_BITS)))
+ return MZ_PARAM_ERROR;
+
+ pStream->data_type = 0;
+ pStream->adler = MZ_ADLER32_INIT;
+ pStream->msg = NULL;
+ pStream->reserved = 0;
+ pStream->total_in = 0;
+ pStream->total_out = 0;
+ if (!pStream->zalloc)
+ pStream->zalloc = def_alloc_func;
+ if (!pStream->zfree)
+ pStream->zfree = def_free_func;
+
+ pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
+ sizeof(tdefl_compressor));
+ if (!pComp)
+ return MZ_MEM_ERROR;
+
+ pStream->state = (struct mz_internal_state *)pComp;
+
+ if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
+ mz_deflateEnd(pStream);
+ return MZ_PARAM_ERROR;
+ }
+
+ return MZ_OK;
+}
+
+int mz_deflateReset(mz_streamp pStream) {
+ if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
+ (!pStream->zfree))
+ return MZ_STREAM_ERROR;
+ pStream->total_in = pStream->total_out = 0;
+ tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
+ ((tdefl_compressor *)pStream->state)->m_flags);
+ return MZ_OK;
+}
+
+int mz_deflate(mz_streamp pStream, int flush) {
+ size_t in_bytes, out_bytes;
+ mz_ulong orig_total_in, orig_total_out;
+ int mz_status = MZ_OK;
+
+ if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
+ (!pStream->next_out))
+ return MZ_STREAM_ERROR;
+ if (!pStream->avail_out)
+ return MZ_BUF_ERROR;
+
+ if (flush == MZ_PARTIAL_FLUSH)
+ flush = MZ_SYNC_FLUSH;
+
+ if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
+ TDEFL_STATUS_DONE)
+ return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
+
+ orig_total_in = pStream->total_in;
+ orig_total_out = pStream->total_out;
+ for (;;) {
+ tdefl_status defl_status;
+ in_bytes = pStream->avail_in;
+ out_bytes = pStream->avail_out;
+
+ defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
+ pStream->next_in, &in_bytes, pStream->next_out,
+ &out_bytes, (tdefl_flush)flush);
+ pStream->next_in += (mz_uint)in_bytes;
+ pStream->avail_in -= (mz_uint)in_bytes;
+ pStream->total_in += (mz_uint)in_bytes;
+ pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
+
+ pStream->next_out += (mz_uint)out_bytes;
+ pStream->avail_out -= (mz_uint)out_bytes;
+ pStream->total_out += (mz_uint)out_bytes;
+
+ if (defl_status < 0) {
+ mz_status = MZ_STREAM_ERROR;
+ break;
+ } else if (defl_status == TDEFL_STATUS_DONE) {
+ mz_status = MZ_STREAM_END;
+ break;
+ } else if (!pStream->avail_out)
+ break;
+ else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
+ if ((flush) || (pStream->total_in != orig_total_in) ||
+ (pStream->total_out != orig_total_out))
+ break;
+ return MZ_BUF_ERROR; // Can't make forward progress without some input.
+ }
+ }
+ return mz_status;
+}
+
+int mz_deflateEnd(mz_streamp pStream) {
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+ if (pStream->state) {
+ pStream->zfree(pStream->opaque, pStream->state);
+ pStream->state = NULL;
+ }
+ return MZ_OK;
+}
+
+mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
+ (void)pStream;
+ // This is really over conservative. (And lame, but it's actually pretty
+ // tricky to compute a true upper bound given the way tdefl's blocking works.)
+ return MZ_MAX(128 + (source_len * 110) / 100,
+ 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
+}
+
+int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
+ const unsigned char *pSource, mz_ulong source_len, int level) {
+ int status;
+ mz_stream stream;
+ memset(&stream, 0, sizeof(stream));
+
+ // In case mz_ulong is 64-bits (argh I hate longs).
+ if ((source_len | *pDest_len) > 0xFFFFFFFFU)
+ return MZ_PARAM_ERROR;
+
+ stream.next_in = pSource;
+ stream.avail_in = (mz_uint32)source_len;
+ stream.next_out = pDest;
+ stream.avail_out = (mz_uint32)*pDest_len;
+
+ status = mz_deflateInit(&stream, level);
+ if (status != MZ_OK)
+ return status;
+
+ status = mz_deflate(&stream, MZ_FINISH);
+ if (status != MZ_STREAM_END) {
+ mz_deflateEnd(&stream);
+ return (status == MZ_OK) ? MZ_BUF_ERROR : status;
+ }
+
+ *pDest_len = stream.total_out;
+ return mz_deflateEnd(&stream);
+}
+
+int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
+ const unsigned char *pSource, mz_ulong source_len) {
+ return mz_compress2(pDest, pDest_len, pSource, source_len,
+ MZ_DEFAULT_COMPRESSION);
+}
+
+mz_ulong mz_compressBound(mz_ulong source_len) {
+ return mz_deflateBound(NULL, source_len);
+}
+
+typedef struct {
+ tinfl_decompressor m_decomp;
+ mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
+ int m_window_bits;
+ mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
+ tinfl_status m_last_status;
+} inflate_state;
+
+int mz_inflateInit2(mz_streamp pStream, int window_bits) {
+ inflate_state *pDecomp;
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+ if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
+ (-window_bits != MZ_DEFAULT_WINDOW_BITS))
+ return MZ_PARAM_ERROR;
+
+ pStream->data_type = 0;
+ pStream->adler = 0;
+ pStream->msg = NULL;
+ pStream->total_in = 0;
+ pStream->total_out = 0;
+ pStream->reserved = 0;
+ if (!pStream->zalloc)
+ pStream->zalloc = def_alloc_func;
+ if (!pStream->zfree)
+ pStream->zfree = def_free_func;
+
+ pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
+ sizeof(inflate_state));
+ if (!pDecomp)
+ return MZ_MEM_ERROR;
+
+ pStream->state = (struct mz_internal_state *)pDecomp;
+
+ tinfl_init(&pDecomp->m_decomp);
+ pDecomp->m_dict_ofs = 0;
+ pDecomp->m_dict_avail = 0;
+ pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
+ pDecomp->m_first_call = 1;
+ pDecomp->m_has_flushed = 0;
+ pDecomp->m_window_bits = window_bits;
+
+ return MZ_OK;
+}
+
+int mz_inflateInit(mz_streamp pStream) {
+ return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
+}
+
+int mz_inflate(mz_streamp pStream, int flush) {
+ inflate_state *pState;
+ mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
+ size_t in_bytes, out_bytes, orig_avail_in;
+ tinfl_status status;
+
+ if ((!pStream) || (!pStream->state))
+ return MZ_STREAM_ERROR;
+ if (flush == MZ_PARTIAL_FLUSH)
+ flush = MZ_SYNC_FLUSH;
+ if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
+ return MZ_STREAM_ERROR;
+
+ pState = (inflate_state *)pStream->state;
+ if (pState->m_window_bits > 0)
+ decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
+ orig_avail_in = pStream->avail_in;
+
+ first_call = pState->m_first_call;
+ pState->m_first_call = 0;
+ if (pState->m_last_status < 0)
+ return MZ_DATA_ERROR;
+
+ if (pState->m_has_flushed && (flush != MZ_FINISH))
+ return MZ_STREAM_ERROR;
+ pState->m_has_flushed |= (flush == MZ_FINISH);
+
+ if ((flush == MZ_FINISH) && (first_call)) {
+ // MZ_FINISH on the first call implies that the input and output buffers are
+ // large enough to hold the entire compressed/decompressed file.
+ decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
+ in_bytes = pStream->avail_in;
+ out_bytes = pStream->avail_out;
+ status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
+ pStream->next_out, pStream->next_out, &out_bytes,
+ decomp_flags);
+ pState->m_last_status = status;
+ pStream->next_in += (mz_uint)in_bytes;
+ pStream->avail_in -= (mz_uint)in_bytes;
+ pStream->total_in += (mz_uint)in_bytes;
+ pStream->adler = tinfl_get_adler32(&pState->m_decomp);
+ pStream->next_out += (mz_uint)out_bytes;
+ pStream->avail_out -= (mz_uint)out_bytes;
+ pStream->total_out += (mz_uint)out_bytes;
+
+ if (status < 0)
+ return MZ_DATA_ERROR;
+ else if (status != TINFL_STATUS_DONE) {
+ pState->m_last_status = TINFL_STATUS_FAILED;
+ return MZ_BUF_ERROR;
+ }
+ return MZ_STREAM_END;
+ }
+ // flush != MZ_FINISH then we must assume there's more input.
+ if (flush != MZ_FINISH)
+ decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
+
+ if (pState->m_dict_avail) {
+ n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
+ memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
+ pStream->next_out += n;
+ pStream->avail_out -= n;
+ pStream->total_out += n;
+ pState->m_dict_avail -= n;
+ pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
+ return ((pState->m_last_status == TINFL_STATUS_DONE) &&
+ (!pState->m_dict_avail))
+ ? MZ_STREAM_END
+ : MZ_OK;
+ }
+
+ for (;;) {
+ in_bytes = pStream->avail_in;
+ out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
+
+ status = tinfl_decompress(
+ &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
+ pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
+ pState->m_last_status = status;
+
+ pStream->next_in += (mz_uint)in_bytes;
+ pStream->avail_in -= (mz_uint)in_bytes;
+ pStream->total_in += (mz_uint)in_bytes;
+ pStream->adler = tinfl_get_adler32(&pState->m_decomp);
+
+ pState->m_dict_avail = (mz_uint)out_bytes;
+
+ n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
+ memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
+ pStream->next_out += n;
+ pStream->avail_out -= n;
+ pStream->total_out += n;
+ pState->m_dict_avail -= n;
+ pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
+
+ if (status < 0)
+ return MZ_DATA_ERROR; // Stream is corrupted (there could be some
+ // uncompressed data left in the output dictionary -
+ // oh well).
+ else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
+ return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
+ // without supplying more input or by setting flush
+ // to MZ_FINISH.
+ else if (flush == MZ_FINISH) {
+ // The output buffer MUST be large to hold the remaining uncompressed data
+ // when flush==MZ_FINISH.
+ if (status == TINFL_STATUS_DONE)
+ return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
+ // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
+ // at least 1 more byte on the way. If there's no more room left in the
+ // output buffer then something is wrong.
+ else if (!pStream->avail_out)
+ return MZ_BUF_ERROR;
+ } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
+ (!pStream->avail_out) || (pState->m_dict_avail))
+ break;
+ }
+
+ return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
+ ? MZ_STREAM_END
+ : MZ_OK;
+}
+
+int mz_inflateEnd(mz_streamp pStream) {
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+ if (pStream->state) {
+ pStream->zfree(pStream->opaque, pStream->state);
+ pStream->state = NULL;
+ }
+ return MZ_OK;
+}
+
+int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
+ const unsigned char *pSource, mz_ulong source_len) {
+ mz_stream stream;
+ int status;
+ memset(&stream, 0, sizeof(stream));
+
+ // In case mz_ulong is 64-bits (argh I hate longs).
+ if ((source_len | *pDest_len) > 0xFFFFFFFFU)
+ return MZ_PARAM_ERROR;
+
+ stream.next_in = pSource;
+ stream.avail_in = (mz_uint32)source_len;
+ stream.next_out = pDest;
+ stream.avail_out = (mz_uint32)*pDest_len;
+
+ status = mz_inflateInit(&stream);
+ if (status != MZ_OK)
+ return status;
+
+ status = mz_inflate(&stream, MZ_FINISH);
+ if (status != MZ_STREAM_END) {
+ mz_inflateEnd(&stream);
+ return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
+ : status;
+ }
+ *pDest_len = stream.total_out;
+
+ return mz_inflateEnd(&stream);
+}
+
+const char *mz_error(int err) {
+ static struct {
+ int m_err;
+ const char *m_pDesc;
+ } s_error_descs[] = {{MZ_OK, ""},
+ {MZ_STREAM_END, "stream end"},
+ {MZ_NEED_DICT, "need dictionary"},
+ {MZ_ERRNO, "file error"},
+ {MZ_STREAM_ERROR, "stream error"},
+ {MZ_DATA_ERROR, "data error"},
+ {MZ_MEM_ERROR, "out of memory"},
+ {MZ_BUF_ERROR, "buf error"},
+ {MZ_VERSION_ERROR, "version error"},
+ {MZ_PARAM_ERROR, "parameter error"}};
+ mz_uint i;
+ for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
+ if (s_error_descs[i].m_err == err)
+ return s_error_descs[i].m_pDesc;
+ return NULL;
+}
+
+#endif // MINIZ_NO_ZLIB_APIS
+
+// ------------------- Low-level Decompression (completely independent from all
+// compression API's)
+
+#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
+#define TINFL_MEMSET(p, c, l) memset(p, c, l)
+
+#define TINFL_CR_BEGIN \
+ switch (r->m_state) { \
+ case 0:
+#define TINFL_CR_RETURN(state_index, result) \
+ do { \
+ status = result; \
+ r->m_state = state_index; \
+ goto common_exit; \
+ case state_index:; \
+ } \
+ MZ_MACRO_END
+#define TINFL_CR_RETURN_FOREVER(state_index, result) \
+ do { \
+ for (;;) { \
+ TINFL_CR_RETURN(state_index, result); \
+ } \
+ } \
+ MZ_MACRO_END
+#define TINFL_CR_FINISH }
+
+// TODO: If the caller has indicated that there's no more input, and we attempt
+// to read beyond the input buf, then something is wrong with the input because
+// the inflator never reads ahead more than it needs to. Currently
+// TINFL_GET_BYTE() pads the end of the stream with 0's in this scenario.
+#define TINFL_GET_BYTE(state_index, c) \
+ do { \
+ if (pIn_buf_cur >= pIn_buf_end) { \
+ for (;;) { \
+ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
+ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
+ if (pIn_buf_cur < pIn_buf_end) { \
+ c = *pIn_buf_cur++; \
+ break; \
+ } \
+ } else { \
+ c = 0; \
+ break; \
+ } \
+ } \
+ } else \
+ c = *pIn_buf_cur++; \
+ } \
+ MZ_MACRO_END
+
+#define TINFL_NEED_BITS(state_index, n) \
+ do { \
+ mz_uint c; \
+ TINFL_GET_BYTE(state_index, c); \
+ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
+ num_bits += 8; \
+ } while (num_bits < (mz_uint)(n))
+#define TINFL_SKIP_BITS(state_index, n) \
+ do { \
+ if (num_bits < (mz_uint)(n)) { \
+ TINFL_NEED_BITS(state_index, n); \
+ } \
+ bit_buf >>= (n); \
+ num_bits -= (n); \
+ } \
+ MZ_MACRO_END
+#define TINFL_GET_BITS(state_index, b, n) \
+ do { \
+ if (num_bits < (mz_uint)(n)) { \
+ TINFL_NEED_BITS(state_index, n); \
+ } \
+ b = bit_buf & ((1 << (n)) - 1); \
+ bit_buf >>= (n); \
+ num_bits -= (n); \
+ } \
+ MZ_MACRO_END
+
+// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
+// remaining in the input buffer falls below 2. It reads just enough bytes from
+// the input stream that are needed to decode the next Huffman code (and
+// absolutely no more). It works by trying to fully decode a Huffman code by
+// using whatever bits are currently present in the bit buffer. If this fails,
+// it reads another byte, and tries again until it succeeds or until the bit
+// buffer contains >=15 bits (deflate's max. Huffman code size).
+#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
+ do { \
+ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
+ if (temp >= 0) { \
+ code_len = temp >> 9; \
+ if ((code_len) && (num_bits >= code_len)) \
+ break; \
+ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
+ code_len = TINFL_FAST_LOOKUP_BITS; \
+ do { \
+ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
+ } while ((temp < 0) && (num_bits >= (code_len + 1))); \
+ if (temp >= 0) \
+ break; \
+ } \
+ TINFL_GET_BYTE(state_index, c); \
+ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
+ num_bits += 8; \
+ } while (num_bits < 15);
+
+// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
+// than you would initially expect because the zlib API expects the decompressor
+// to never read beyond the final byte of the deflate stream. (In other words,
+// when this macro wants to read another byte from the input, it REALLY needs
+// another byte in order to fully decode the next Huffman code.) Handling this
+// properly is particularly important on raw deflate (non-zlib) streams, which
+// aren't followed by a byte aligned adler-32. The slow path is only executed at
+// the very end of the input buffer.
+#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
+ do { \
+ int temp; \
+ mz_uint code_len, c; \
+ if (num_bits < 15) { \
+ if ((pIn_buf_end - pIn_buf_cur) < 2) { \
+ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
+ } else { \
+ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
+ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
+ pIn_buf_cur += 2; \
+ num_bits += 16; \
+ } \
+ } \
+ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
+ 0) \
+ code_len = temp >> 9, temp &= 511; \
+ else { \
+ code_len = TINFL_FAST_LOOKUP_BITS; \
+ do { \
+ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
+ } while (temp < 0); \
+ } \
+ sym = temp; \
+ bit_buf >>= code_len; \
+ num_bits -= code_len; \
+ } \
+ MZ_MACRO_END
+
+tinfl_status tinfl_decompress(tinfl_decompressor *r,
+ const mz_uint8 *pIn_buf_next,
+ size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
+ mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
+ const mz_uint32 decomp_flags) {
+ static const int s_length_base[31] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+ static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
+ 4, 4, 5, 5, 5, 5, 0, 0, 0};
+ static const int s_dist_base[32] = {
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
+ 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
+ 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
+ static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
+ static const mz_uint8 s_length_dezigzag[19] = {
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+ static const int s_min_table_sizes[3] = {257, 1, 4};
+
+ tinfl_status status = TINFL_STATUS_FAILED;
+ mz_uint32 num_bits, dist, counter, num_extra;
+ tinfl_bit_buf_t bit_buf;
+ const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end =
+ pIn_buf_next + *pIn_buf_size;
+ mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end =
+ pOut_buf_next + *pOut_buf_size;
+ size_t out_buf_size_mask =
+ (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
+ ? (size_t)-1
+ : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
+ dist_from_out_buf_start;
+
+ // Ensure the output buffer's size is a power of 2, unless the output buffer
+ // is large enough to hold the entire output file (in which case it doesn't
+ // matter).
+ if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
+ (pOut_buf_next < pOut_buf_start)) {
+ *pIn_buf_size = *pOut_buf_size = 0;
+ return TINFL_STATUS_BAD_PARAM;
+ }
+
+ num_bits = r->m_num_bits;
+ bit_buf = r->m_bit_buf;
+ dist = r->m_dist;
+ counter = r->m_counter;
+ num_extra = r->m_num_extra;
+ dist_from_out_buf_start = r->m_dist_from_out_buf_start;
+ TINFL_CR_BEGIN
+
+ bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
+ r->m_z_adler32 = r->m_check_adler32 = 1;
+ if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
+ TINFL_GET_BYTE(1, r->m_zhdr0);
+ TINFL_GET_BYTE(2, r->m_zhdr1);
+ counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
+ (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
+ if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
+ counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
+ ((out_buf_size_mask + 1) <
+ (size_t)(1U << (8U + (r->m_zhdr0 >> 4)))));
+ if (counter) {
+ TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
+ }
+ }
+
+ do {
+ TINFL_GET_BITS(3, r->m_final, 3);
+ r->m_type = r->m_final >> 1;
+ if (r->m_type == 0) {
+ TINFL_SKIP_BITS(5, num_bits & 7);
+ for (counter = 0; counter < 4; ++counter) {
+ if (num_bits)
+ TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
+ else
+ TINFL_GET_BYTE(7, r->m_raw_header[counter]);
+ }
+ if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
+ (mz_uint)(0xFFFF ^
+ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
+ TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
+ }
+ while ((counter) && (num_bits)) {
+ TINFL_GET_BITS(51, dist, 8);
+ while (pOut_buf_cur >= pOut_buf_end) {
+ TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
+ }
+ *pOut_buf_cur++ = (mz_uint8)dist;
+ counter--;
+ }
+ while (counter) {
+ size_t n;
+ while (pOut_buf_cur >= pOut_buf_end) {
+ TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
+ }
+ while (pIn_buf_cur >= pIn_buf_end) {
+ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
+ TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
+ } else {
+ TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
+ }
+ }
+ n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
+ (size_t)(pIn_buf_end - pIn_buf_cur)),
+ counter);
+ TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
+ pIn_buf_cur += n;
+ pOut_buf_cur += n;
+ counter -= (mz_uint)n;
+ }
+ } else if (r->m_type == 3) {
+ TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
+ } else {
+ if (r->m_type == 1) {
+ mz_uint8 *p = r->m_tables[0].m_code_size;
+ mz_uint i;
+ r->m_table_sizes[0] = 288;
+ r->m_table_sizes[1] = 32;
+ TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
+ for (i = 0; i <= 143; ++i)
+ *p++ = 8;
+ for (; i <= 255; ++i)
+ *p++ = 9;
+ for (; i <= 279; ++i)
+ *p++ = 7;
+ for (; i <= 287; ++i)
+ *p++ = 8;
+ } else {
+ for (counter = 0; counter < 3; counter++) {
+ TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
+ r->m_table_sizes[counter] += s_min_table_sizes[counter];
+ }
+ MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
+ for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
+ mz_uint s;
+ TINFL_GET_BITS(14, s, 3);
+ r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
+ }
+ r->m_table_sizes[2] = 19;
+ }
+ for (; (int)r->m_type >= 0; r->m_type--) {
+ int tree_next, tree_cur;
+ tinfl_huff_table *pTable;
+ mz_uint i, j, used_syms, total, sym_index, next_code[17],
+ total_syms[16];
+ pTable = &r->m_tables[r->m_type];
+ MZ_CLEAR_OBJ(total_syms);
+ MZ_CLEAR_OBJ(pTable->m_look_up);
+ MZ_CLEAR_OBJ(pTable->m_tree);
+ for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
+ total_syms[pTable->m_code_size[i]]++;
+ used_syms = 0, total = 0;
+ next_code[0] = next_code[1] = 0;
+ for (i = 1; i <= 15; ++i) {
+ used_syms += total_syms[i];
+ next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
+ }
+ if ((65536 != total) && (used_syms > 1)) {
+ TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
+ }
+ for (tree_next = -1, sym_index = 0;
+ sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
+ mz_uint rev_code = 0, l, cur_code,
+ code_size = pTable->m_code_size[sym_index];
+ if (!code_size)
+ continue;
+ cur_code = next_code[code_size]++;
+ for (l = code_size; l > 0; l--, cur_code >>= 1)
+ rev_code = (rev_code << 1) | (cur_code & 1);
+ if (code_size <= TINFL_FAST_LOOKUP_BITS) {
+ mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
+ while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
+ pTable->m_look_up[rev_code] = k;
+ rev_code += (1 << code_size);
+ }
+ continue;
+ }
+ if (0 ==
+ (tree_cur = pTable->m_look_up[rev_code &
+ (TINFL_FAST_LOOKUP_SIZE - 1)])) {
+ pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
+ (mz_int16)tree_next;
+ tree_cur = tree_next;
+ tree_next -= 2;
+ }
+ rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
+ for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
+ tree_cur -= ((rev_code >>= 1) & 1);
+ if (!pTable->m_tree[-tree_cur - 1]) {
+ pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
+ tree_cur = tree_next;
+ tree_next -= 2;
+ } else
+ tree_cur = pTable->m_tree[-tree_cur - 1];
+ }
+ rev_code >>= 1;
+ tree_cur -= (rev_code & 1);
+ pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
+ }
+ if (r->m_type == 2) {
+ for (counter = 0;
+ counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
+ mz_uint s;
+ TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
+ if (dist < 16) {
+ r->m_len_codes[counter++] = (mz_uint8)dist;
+ continue;
+ }
+ if ((dist == 16) && (!counter)) {
+ TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
+ }
+ num_extra = "\02\03\07"[dist - 16];
+ TINFL_GET_BITS(18, s, num_extra);
+ s += "\03\03\013"[dist - 16];
+ TINFL_MEMSET(r->m_len_codes + counter,
+ (dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
+ counter += s;
+ }
+ if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
+ TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
+ }
+ TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
+ r->m_table_sizes[0]);
+ TINFL_MEMCPY(r->m_tables[1].m_code_size,
+ r->m_len_codes + r->m_table_sizes[0],
+ r->m_table_sizes[1]);
+ }
+ }
+ for (;;) {
+ mz_uint8 *pSrc;
+ for (;;) {
+ if (((pIn_buf_end - pIn_buf_cur) < 4) ||
+ ((pOut_buf_end - pOut_buf_cur) < 2)) {
+ TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
+ if (counter >= 256)
+ break;
+ while (pOut_buf_cur >= pOut_buf_end) {
+ TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
+ }
+ *pOut_buf_cur++ = (mz_uint8)counter;
+ } else {
+ int sym2;
+ mz_uint code_len;
+#if TINFL_USE_64BIT_BITBUF
+ if (num_bits < 30) {
+ bit_buf |=
+ (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
+ pIn_buf_cur += 4;
+ num_bits += 32;
+ }
+#else
+ if (num_bits < 15) {
+ bit_buf |=
+ (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
+ pIn_buf_cur += 2;
+ num_bits += 16;
+ }
+#endif
+ if ((sym2 =
+ r->m_tables[0]
+ .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
+ 0)
+ code_len = sym2 >> 9;
+ else {
+ code_len = TINFL_FAST_LOOKUP_BITS;
+ do {
+ sym2 = r->m_tables[0]
+ .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
+ } while (sym2 < 0);
+ }
+ counter = sym2;
+ bit_buf >>= code_len;
+ num_bits -= code_len;
+ if (counter & 256)
+ break;
+
+#if !TINFL_USE_64BIT_BITBUF
+ if (num_bits < 15) {
+ bit_buf |=
+ (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
+ pIn_buf_cur += 2;
+ num_bits += 16;
+ }
+#endif
+ if ((sym2 =
+ r->m_tables[0]
+ .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
+ 0)
+ code_len = sym2 >> 9;
+ else {
+ code_len = TINFL_FAST_LOOKUP_BITS;
+ do {
+ sym2 = r->m_tables[0]
+ .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
+ } while (sym2 < 0);
+ }
+ bit_buf >>= code_len;
+ num_bits -= code_len;
+
+ pOut_buf_cur[0] = (mz_uint8)counter;
+ if (sym2 & 256) {
+ pOut_buf_cur++;
+ counter = sym2;
+ break;
+ }
+ pOut_buf_cur[1] = (mz_uint8)sym2;
+ pOut_buf_cur += 2;
+ }
+ }
+ if ((counter &= 511) == 256)
+ break;
+
+ num_extra = s_length_extra[counter - 257];
+ counter = s_length_base[counter - 257];
+ if (num_extra) {
+ mz_uint extra_bits;
+ TINFL_GET_BITS(25, extra_bits, num_extra);
+ counter += extra_bits;
+ }
+
+ TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
+ num_extra = s_dist_extra[dist];
+ dist = s_dist_base[dist];
+ if (num_extra) {
+ mz_uint extra_bits;
+ TINFL_GET_BITS(27, extra_bits, num_extra);
+ dist += extra_bits;
+ }
+
+ dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
+ if ((dist > dist_from_out_buf_start) &&
+ (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
+ TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
+ }
+
+ pSrc = pOut_buf_start +
+ ((dist_from_out_buf_start - dist) & out_buf_size_mask);
+
+ if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
+ while (counter--) {
+ while (pOut_buf_cur >= pOut_buf_end) {
+ TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
+ }
+ *pOut_buf_cur++ =
+ pOut_buf_start[(dist_from_out_buf_start++ - dist) &
+ out_buf_size_mask];
+ }
+ continue;
+ }
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
+ else if ((counter >= 9) && (counter <= dist)) {
+ const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
+ do {
+ ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
+ ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
+ pOut_buf_cur += 8;
+ } while ((pSrc += 8) < pSrc_end);
+ if ((counter &= 7) < 3) {
+ if (counter) {
+ pOut_buf_cur[0] = pSrc[0];
+ if (counter > 1)
+ pOut_buf_cur[1] = pSrc[1];
+ pOut_buf_cur += counter;
+ }
+ continue;
+ }
+ }
+#endif
+ do {
+ pOut_buf_cur[0] = pSrc[0];
+ pOut_buf_cur[1] = pSrc[1];
+ pOut_buf_cur[2] = pSrc[2];
+ pOut_buf_cur += 3;
+ pSrc += 3;
+ } while ((int)(counter -= 3) > 2);
+ if ((int)counter > 0) {
+ pOut_buf_cur[0] = pSrc[0];
+ if ((int)counter > 1)
+ pOut_buf_cur[1] = pSrc[1];
+ pOut_buf_cur += counter;
+ }
+ }
+ }
+ } while (!(r->m_final & 1));
+ if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
+ TINFL_SKIP_BITS(32, num_bits & 7);
+ for (counter = 0; counter < 4; ++counter) {
+ mz_uint s;
+ if (num_bits)
+ TINFL_GET_BITS(41, s, 8);
+ else
+ TINFL_GET_BYTE(42, s);
+ r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
+ }
+ }
+ TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
+ TINFL_CR_FINISH
+
+common_exit:
+ r->m_num_bits = num_bits;
+ r->m_bit_buf = bit_buf;
+ r->m_dist = dist;
+ r->m_counter = counter;
+ r->m_num_extra = num_extra;
+ r->m_dist_from_out_buf_start = dist_from_out_buf_start;
+ *pIn_buf_size = pIn_buf_cur - pIn_buf_next;
+ *pOut_buf_size = pOut_buf_cur - pOut_buf_next;
+ if ((decomp_flags &
+ (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
+ (status >= 0)) {
+ const mz_uint8 *ptr = pOut_buf_next;
+ size_t buf_len = *pOut_buf_size;
+ mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
+ s2 = r->m_check_adler32 >> 16;
+ size_t block_len = buf_len % 5552;
+ while (buf_len) {
+ for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
+ s1 += ptr[0], s2 += s1;
+ s1 += ptr[1], s2 += s1;
+ s1 += ptr[2], s2 += s1;
+ s1 += ptr[3], s2 += s1;
+ s1 += ptr[4], s2 += s1;
+ s1 += ptr[5], s2 += s1;
+ s1 += ptr[6], s2 += s1;
+ s1 += ptr[7], s2 += s1;
+ }
+ for (; i < block_len; ++i)
+ s1 += *ptr++, s2 += s1;
+ s1 %= 65521U, s2 %= 65521U;
+ buf_len -= block_len;
+ block_len = 5552;
+ }
+ r->m_check_adler32 = (s2 << 16) + s1;
+ if ((status == TINFL_STATUS_DONE) &&
+ (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
+ (r->m_check_adler32 != r->m_z_adler32))
+ status = TINFL_STATUS_ADLER32_MISMATCH;
+ }
+ return status;
+}
+
+// Higher level helper functions.
+void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
+ size_t *pOut_len, int flags) {
+ tinfl_decompressor decomp;
+ void *pBuf = NULL, *pNew_buf;
+ size_t src_buf_ofs = 0, out_buf_capacity = 0;
+ *pOut_len = 0;
+ tinfl_init(&decomp);
+ for (;;) {
+ size_t src_buf_size = src_buf_len - src_buf_ofs,
+ dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
+ tinfl_status status = tinfl_decompress(
+ &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
+ (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
+ &dst_buf_size,
+ (flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
+ TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
+ if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
+ MZ_FREE(pBuf);
+ *pOut_len = 0;
+ return NULL;
+ }
+ src_buf_ofs += src_buf_size;
+ *pOut_len += dst_buf_size;
+ if (status == TINFL_STATUS_DONE)
+ break;
+ new_out_buf_capacity = out_buf_capacity * 2;
+ if (new_out_buf_capacity < 128)
+ new_out_buf_capacity = 128;
+ pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
+ if (!pNew_buf) {
+ MZ_FREE(pBuf);
+ *pOut_len = 0;
+ return NULL;
+ }
+ pBuf = pNew_buf;
+ out_buf_capacity = new_out_buf_capacity;
+ }
+ return pBuf;
+}
+
+size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
+ const void *pSrc_buf, size_t src_buf_len,
+ int flags) {
+ tinfl_decompressor decomp;
+ tinfl_status status;
+ tinfl_init(&decomp);
+ status =
+ tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
+ (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
+ (flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
+ TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
+ return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
+ : out_buf_len;
+}
+
+int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
+ tinfl_put_buf_func_ptr pPut_buf_func,
+ void *pPut_buf_user, int flags) {
+ int result = 0;
+ tinfl_decompressor decomp;
+ mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
+ size_t in_buf_ofs = 0, dict_ofs = 0;
+ if (!pDict)
+ return TINFL_STATUS_FAILED;
+ tinfl_init(&decomp);
+ for (;;) {
+ size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
+ dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
+ tinfl_status status =
+ tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
+ &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
+ (flags & ~(TINFL_FLAG_HAS_MORE_INPUT |
+ TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
+ in_buf_ofs += in_buf_size;
+ if ((dst_buf_size) &&
+ (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
+ break;
+ if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
+ result = (status == TINFL_STATUS_DONE);
+ break;
+ }
+ dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
+ }
+ MZ_FREE(pDict);
+ *pIn_buf_size = in_buf_ofs;
+ return result;
+}
+
+// ------------------- Low-level Compression (independent from all decompression
+// API's)
+
+// Purposely making these tables static for faster init and thread safety.
+static const mz_uint16 s_tdefl_len_sym[256] = {
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
+ 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
+ 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
+ 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
+ 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
+ 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
+ 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
+ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
+ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
+ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
+ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
+ 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
+ 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
+ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
+ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
+ 285};
+
+static const mz_uint8 s_tdefl_len_extra[256] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
+
+static const mz_uint8 s_tdefl_small_dist_sym[512] = {
+ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
+ 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
+
+static const mz_uint8 s_tdefl_small_dist_extra[512] = {
+ 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
+
+static const mz_uint8 s_tdefl_large_dist_sym[128] = {
+ 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
+ 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
+
+static const mz_uint8 s_tdefl_large_dist_extra[128] = {
+ 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
+
+// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
+// values.
+typedef struct {
+ mz_uint16 m_key, m_sym_index;
+} tdefl_sym_freq;
+static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
+ tdefl_sym_freq *pSyms0,
+ tdefl_sym_freq *pSyms1) {
+ mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
+ tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
+ MZ_CLEAR_OBJ(hist);
+ for (i = 0; i < num_syms; i++) {
+ mz_uint freq = pSyms0[i].m_key;
+ hist[freq & 0xFF]++;
+ hist[256 + ((freq >> 8) & 0xFF)]++;
+ }
+ while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
+ total_passes--;
+ for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
+ const mz_uint32 *pHist = &hist[pass << 8];
+ mz_uint offsets[256], cur_ofs = 0;
+ for (i = 0; i < 256; i++) {
+ offsets[i] = cur_ofs;
+ cur_ofs += pHist[i];
+ }
+ for (i = 0; i < num_syms; i++)
+ pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
+ pCur_syms[i];
+ {
+ tdefl_sym_freq *t = pCur_syms;
+ pCur_syms = pNew_syms;
+ pNew_syms = t;
+ }
+ }
+ return pCur_syms;
+}
+
+// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
+// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
+static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
+ int root, leaf, next, avbl, used, dpth;
+ if (n == 0)
+ return;
+ else if (n == 1) {
+ A[0].m_key = 1;
+ return;
+ }
+ A[0].m_key += A[1].m_key;
+ root = 0;
+ leaf = 2;
+ for (next = 1; next < n - 1; next++) {
+ if (leaf >= n || A[root].m_key < A[leaf].m_key) {
+ A[next].m_key = A[root].m_key;
+ A[root++].m_key = (mz_uint16)next;
+ } else
+ A[next].m_key = A[leaf++].m_key;
+ if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
+ A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
+ A[root++].m_key = (mz_uint16)next;
+ } else
+ A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
+ }
+ A[n - 2].m_key = 0;
+ for (next = n - 3; next >= 0; next--)
+ A[next].m_key = A[A[next].m_key].m_key + 1;
+ avbl = 1;
+ used = dpth = 0;
+ root = n - 2;
+ next = n - 1;
+ while (avbl > 0) {
+ while (root >= 0 && (int)A[root].m_key == dpth) {
+ used++;
+ root--;
+ }
+ while (avbl > used) {
+ A[next--].m_key = (mz_uint16)(dpth);
+ avbl--;
+ }
+ avbl = 2 * used;
+ dpth++;
+ used = 0;
+ }
+}
+
+// Limits canonical Huffman code table's max code size.
+enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
+static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
+ int code_list_len,
+ int max_code_size) {
+ int i;
+ mz_uint32 total = 0;
+ if (code_list_len <= 1)
+ return;
+ for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
+ pNum_codes[max_code_size] += pNum_codes[i];
+ for (i = max_code_size; i > 0; i--)
+ total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
+ while (total != (1UL << max_code_size)) {
+ pNum_codes[max_code_size]--;
+ for (i = max_code_size - 1; i > 0; i--)
+ if (pNum_codes[i]) {
+ pNum_codes[i]--;
+ pNum_codes[i + 1] += 2;
+ break;
+ }
+ total--;
+ }
+}
+
+static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
+ int table_len, int code_size_limit,
+ int static_table) {
+ int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
+ mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
+ MZ_CLEAR_OBJ(num_codes);
+ if (static_table) {
+ for (i = 0; i < table_len; i++)
+ num_codes[d->m_huff_code_sizes[table_num][i]]++;
+ } else {
+ tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
+ *pSyms;
+ int num_used_syms = 0;
+ const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
+ for (i = 0; i < table_len; i++)
+ if (pSym_count[i]) {
+ syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
+ syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
+ }
+
+ pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
+ tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
+
+ for (i = 0; i < num_used_syms; i++)
+ num_codes[pSyms[i].m_key]++;
+
+ tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
+ code_size_limit);
+
+ MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
+ MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
+ for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
+ for (l = num_codes[i]; l > 0; l--)
+ d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
+ }
+
+ next_code[1] = 0;
+ for (j = 0, i = 2; i <= code_size_limit; i++)
+ next_code[i] = j = ((j + num_codes[i - 1]) << 1);
+
+ for (i = 0; i < table_len; i++) {
+ mz_uint rev_code = 0, code, code_size;
+ if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0)
+ continue;
+ code = next_code[code_size]++;
+ for (l = code_size; l > 0; l--, code >>= 1)
+ rev_code = (rev_code << 1) | (code & 1);
+ d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
+ }
+}
+
+#define TDEFL_PUT_BITS(b, l) \
+ do { \
+ mz_uint bits = b; \
+ mz_uint len = l; \
+ MZ_ASSERT(bits <= ((1U << len) - 1U)); \
+ d->m_bit_buffer |= (bits << d->m_bits_in); \
+ d->m_bits_in += len; \
+ while (d->m_bits_in >= 8) { \
+ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
+ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
+ d->m_bit_buffer >>= 8; \
+ d->m_bits_in -= 8; \
+ } \
+ } \
+ MZ_MACRO_END
+
+#define TDEFL_RLE_PREV_CODE_SIZE() \
+ { \
+ if (rle_repeat_count) { \
+ if (rle_repeat_count < 3) { \
+ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
+ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
+ while (rle_repeat_count--) \
+ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
+ } else { \
+ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
+ packed_code_sizes[num_packed_code_sizes++] = 16; \
+ packed_code_sizes[num_packed_code_sizes++] = \
+ (mz_uint8)(rle_repeat_count - 3); \
+ } \
+ rle_repeat_count = 0; \
+ } \
+ }
+
+#define TDEFL_RLE_ZERO_CODE_SIZE() \
+ { \
+ if (rle_z_count) { \
+ if (rle_z_count < 3) { \
+ d->m_huff_count[2][0] = \
+ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
+ while (rle_z_count--) \
+ packed_code_sizes[num_packed_code_sizes++] = 0; \
+ } else if (rle_z_count <= 10) { \
+ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
+ packed_code_sizes[num_packed_code_sizes++] = 17; \
+ packed_code_sizes[num_packed_code_sizes++] = \
+ (mz_uint8)(rle_z_count - 3); \
+ } else { \
+ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
+ packed_code_sizes[num_packed_code_sizes++] = 18; \
+ packed_code_sizes[num_packed_code_sizes++] = \
+ (mz_uint8)(rle_z_count - 11); \
+ } \
+ rle_z_count = 0; \
+ } \
+ }
+
+static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+static void tdefl_start_dynamic_block(tdefl_compressor *d) {
+ int num_lit_codes, num_dist_codes, num_bit_lengths;
+ mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
+ rle_repeat_count, packed_code_sizes_index;
+ mz_uint8
+ code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
+ packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
+ prev_code_size = 0xFF;
+
+ d->m_huff_count[0][256] = 1;
+
+ tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
+ tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
+
+ for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
+ if (d->m_huff_code_sizes[0][num_lit_codes - 1])
+ break;
+ for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
+ if (d->m_huff_code_sizes[1][num_dist_codes - 1])
+ break;
+
+ memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0],
+ sizeof(mz_uint8) * num_lit_codes);
+ memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
+ sizeof(mz_uint8) * num_dist_codes);
+ total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
+ num_packed_code_sizes = 0;
+ rle_z_count = 0;
+ rle_repeat_count = 0;
+
+ memset(&d->m_huff_count[2][0], 0,
+ sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
+ for (i = 0; i < total_code_sizes_to_pack; i++) {
+ mz_uint8 code_size = code_sizes_to_pack[i];
+ if (!code_size) {
+ TDEFL_RLE_PREV_CODE_SIZE();
+ if (++rle_z_count == 138) {
+ TDEFL_RLE_ZERO_CODE_SIZE();
+ }
+ } else {
+ TDEFL_RLE_ZERO_CODE_SIZE();
+ if (code_size != prev_code_size) {
+ TDEFL_RLE_PREV_CODE_SIZE();
+ d->m_huff_count[2][code_size] =
+ (mz_uint16)(d->m_huff_count[2][code_size] + 1);
+ packed_code_sizes[num_packed_code_sizes++] = code_size;
+ } else if (++rle_repeat_count == 6) {
+ TDEFL_RLE_PREV_CODE_SIZE();
+ }
+ }
+ prev_code_size = code_size;
+ }
+ if (rle_repeat_count) {
+ TDEFL_RLE_PREV_CODE_SIZE();
+ } else {
+ TDEFL_RLE_ZERO_CODE_SIZE();
+ }
+
+ tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
+
+ TDEFL_PUT_BITS(2, 2);
+
+ TDEFL_PUT_BITS(num_lit_codes - 257, 5);
+ TDEFL_PUT_BITS(num_dist_codes - 1, 5);
+
+ for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
+ if (d->m_huff_code_sizes
+ [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
+ break;
+ num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
+ TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
+ for (i = 0; (int)i < num_bit_lengths; i++)
+ TDEFL_PUT_BITS(
+ d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
+
+ for (packed_code_sizes_index = 0;
+ packed_code_sizes_index < num_packed_code_sizes;) {
+ mz_uint code = packed_code_sizes[packed_code_sizes_index++];
+ MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
+ TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
+ if (code >= 16)
+ TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
+ "\02\03\07"[code - 16]);
+ }
+}
+
+static void tdefl_start_static_block(tdefl_compressor *d) {
+ mz_uint i;
+ mz_uint8 *p = &d->m_huff_code_sizes[0][0];
+
+ for (i = 0; i <= 143; ++i)
+ *p++ = 8;
+ for (; i <= 255; ++i)
+ *p++ = 9;
+ for (; i <= 279; ++i)
+ *p++ = 7;
+ for (; i <= 287; ++i)
+ *p++ = 8;
+
+ memset(d->m_huff_code_sizes[1], 5, 32);
+
+ tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
+ tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
+
+ TDEFL_PUT_BITS(1, 2);
+}
+
+static const mz_uint mz_bitmasks[17] = {
+ 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
+ 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
+ MINIZ_HAS_64BIT_REGISTERS
+static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
+ mz_uint flags;
+ mz_uint8 *pLZ_codes;
+ mz_uint8 *pOutput_buf = d->m_pOutput_buf;
+ mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
+ mz_uint64 bit_buffer = d->m_bit_buffer;
+ mz_uint bits_in = d->m_bits_in;
+
+#define TDEFL_PUT_BITS_FAST(b, l) \
+ { \
+ bit_buffer |= (((mz_uint64)(b)) << bits_in); \
+ bits_in += (l); \
+ }
+
+ flags = 1;
+ for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
+ flags >>= 1) {
+ if (flags == 1)
+ flags = *pLZ_codes++ | 0x100;
+
+ if (flags & 1) {
+ mz_uint s0, s1, n0, n1, sym, num_extra_bits;
+ mz_uint match_len = pLZ_codes[0],
+ match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
+ pLZ_codes += 3;
+
+ MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
+ d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
+ TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
+ s_tdefl_len_extra[match_len]);
+
+ // This sequence coaxes MSVC into using cmov's vs. jmp's.
+ s0 = s_tdefl_small_dist_sym[match_dist & 511];
+ n0 = s_tdefl_small_dist_extra[match_dist & 511];
+ s1 = s_tdefl_large_dist_sym[match_dist >> 8];
+ n1 = s_tdefl_large_dist_extra[match_dist >> 8];
+ sym = (match_dist < 512) ? s0 : s1;
+ num_extra_bits = (match_dist < 512) ? n0 : n1;
+
+ MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
+ d->m_huff_code_sizes[1][sym]);
+ TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
+ num_extra_bits);
+ } else {
+ mz_uint lit = *pLZ_codes++;
+ MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
+ d->m_huff_code_sizes[0][lit]);
+
+ if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
+ flags >>= 1;
+ lit = *pLZ_codes++;
+ MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
+ d->m_huff_code_sizes[0][lit]);
+
+ if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
+ flags >>= 1;
+ lit = *pLZ_codes++;
+ MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
+ d->m_huff_code_sizes[0][lit]);
+ }
+ }
+ }
+
+ if (pOutput_buf >= d->m_pOutput_buf_end)
+ return MZ_FALSE;
+
+ *(mz_uint64 *)pOutput_buf = bit_buffer;
+ pOutput_buf += (bits_in >> 3);
+ bit_buffer >>= (bits_in & ~7);
+ bits_in &= 7;
+ }
+
+#undef TDEFL_PUT_BITS_FAST
+
+ d->m_pOutput_buf = pOutput_buf;
+ d->m_bits_in = 0;
+ d->m_bit_buffer = 0;
+
+ while (bits_in) {
+ mz_uint32 n = MZ_MIN(bits_in, 16);
+ TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
+ bit_buffer >>= n;
+ bits_in -= n;
+ }
+
+ TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
+
+ return (d->m_pOutput_buf < d->m_pOutput_buf_end);
+}
+#else
+static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
+ mz_uint flags;
+ mz_uint8 *pLZ_codes;
+
+ flags = 1;
+ for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
+ flags >>= 1) {
+ if (flags == 1)
+ flags = *pLZ_codes++ | 0x100;
+ if (flags & 1) {
+ mz_uint sym, num_extra_bits;
+ mz_uint match_len = pLZ_codes[0],
+ match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
+ pLZ_codes += 3;
+
+ MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
+ TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
+ d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
+ TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
+ s_tdefl_len_extra[match_len]);
+
+ if (match_dist < 512) {
+ sym = s_tdefl_small_dist_sym[match_dist];
+ num_extra_bits = s_tdefl_small_dist_extra[match_dist];
+ } else {
+ sym = s_tdefl_large_dist_sym[match_dist >> 8];
+ num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
+ }
+ TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
+ TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
+ } else {
+ mz_uint lit = *pLZ_codes++;
+ MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
+ TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
+ }
+ }
+
+ TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
+
+ return (d->m_pOutput_buf < d->m_pOutput_buf_end);
+}
+#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
+ // MINIZ_HAS_64BIT_REGISTERS
+
+static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
+ if (static_block)
+ tdefl_start_static_block(d);
+ else
+ tdefl_start_dynamic_block(d);
+ return tdefl_compress_lz_codes(d);
+}
+
+static int tdefl_flush_block(tdefl_compressor *d, int flush) {
+ mz_uint saved_bit_buf, saved_bits_in;
+ mz_uint8 *pSaved_output_buf;
+ mz_bool comp_block_succeeded = MZ_FALSE;
+ int n, use_raw_block =
+ ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
+ (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
+ mz_uint8 *pOutput_buf_start =
+ ((d->m_pPut_buf_func == NULL) &&
+ ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
+ ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
+ : d->m_output_buf;
+
+ d->m_pOutput_buf = pOutput_buf_start;
+ d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
+
+ MZ_ASSERT(!d->m_output_flush_remaining);
+ d->m_output_flush_ofs = 0;
+ d->m_output_flush_remaining = 0;
+
+ *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
+ d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
+
+ if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
+ TDEFL_PUT_BITS(0x78, 8);
+ TDEFL_PUT_BITS(0x01, 8);
+ }
+
+ TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
+
+ pSaved_output_buf = d->m_pOutput_buf;
+ saved_bit_buf = d->m_bit_buffer;
+ saved_bits_in = d->m_bits_in;
+
+ if (!use_raw_block)
+ comp_block_succeeded =
+ tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
+ (d->m_total_lz_bytes < 48));
+
+ // If the block gets expanded, forget the current contents of the output
+ // buffer and send a raw block instead.
+ if (((use_raw_block) ||
+ ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
+ d->m_total_lz_bytes))) &&
+ ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
+ mz_uint i;
+ d->m_pOutput_buf = pSaved_output_buf;
+ d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
+ TDEFL_PUT_BITS(0, 2);
+ if (d->m_bits_in) {
+ TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
+ }
+ for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
+ TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
+ }
+ for (i = 0; i < d->m_total_lz_bytes; ++i) {
+ TDEFL_PUT_BITS(
+ d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
+ 8);
+ }
+ }
+ // Check for the extremely unlikely (if not impossible) case of the compressed
+ // block not fitting into the output buffer when using dynamic codes.
+ else if (!comp_block_succeeded) {
+ d->m_pOutput_buf = pSaved_output_buf;
+ d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
+ tdefl_compress_block(d, MZ_TRUE);
+ }
+
+ if (flush) {
+ if (flush == TDEFL_FINISH) {
+ if (d->m_bits_in) {
+ TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
+ }
+ if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
+ mz_uint i, a = d->m_adler32;
+ for (i = 0; i < 4; i++) {
+ TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
+ a <<= 8;
+ }
+ }
+ } else {
+ mz_uint i, z = 0;
+ TDEFL_PUT_BITS(0, 3);
+ if (d->m_bits_in) {
+ TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
+ }
+ for (i = 2; i; --i, z ^= 0xFFFF) {
+ TDEFL_PUT_BITS(z & 0xFFFF, 16);
+ }
+ }
+ }
+
+ MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
+
+ memset(&d->m_huff_count[0][0], 0,
+ sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
+ memset(&d->m_huff_count[1][0], 0,
+ sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
+
+ d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
+ d->m_pLZ_flags = d->m_lz_code_buf;
+ d->m_num_flags_left = 8;
+ d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
+ d->m_total_lz_bytes = 0;
+ d->m_block_index++;
+
+ if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
+ if (d->m_pPut_buf_func) {
+ *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
+ if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
+ return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
+ } else if (pOutput_buf_start == d->m_output_buf) {
+ int bytes_to_copy = (int)MZ_MIN(
+ (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
+ memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
+ bytes_to_copy);
+ d->m_out_buf_ofs += bytes_to_copy;
+ if ((n -= bytes_to_copy) != 0) {
+ d->m_output_flush_ofs = bytes_to_copy;
+ d->m_output_flush_remaining = n;
+ }
+ } else {
+ d->m_out_buf_ofs += n;
+ }
+ }
+
+ return d->m_output_flush_remaining;
+}
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
+#define TDEFL_READ_UNALIGNED_WORD(p) ((p)[0] | (p)[1] << 8)
+static MZ_FORCEINLINE void
+tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
+ mz_uint max_match_len, mz_uint *pMatch_dist,
+ mz_uint *pMatch_len) {
+ mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
+ match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
+ probe_len;
+ mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
+ const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
+ mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
+ s01 = *s;
+ MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
+ if (max_match_len <= match_len)
+ return;
+ for (;;) {
+ for (;;) {
+ if (--num_probes_left == 0)
+ return;
+#define TDEFL_PROBE \
+ next_probe_pos = d->m_next[probe_pos]; \
+ if ((!next_probe_pos) || \
+ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
+ return; \
+ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
+ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
+ break;
+ TDEFL_PROBE;
+ TDEFL_PROBE;
+ TDEFL_PROBE;
+ }
+ if (!dist)
+ break;
+ q = (const mz_uint16 *)(d->m_dict + probe_pos);
+ if (*q != s01)
+ continue;
+ p = s;
+ probe_len = 32;
+ do {
+ } while ((*(++p) == *(++q)) && (*(++p) == *(++q)) && (*(++p) == *(++q)) &&
+ (*(++p) == *(++q)) && (--probe_len > 0));
+ if (!probe_len) {
+ *pMatch_dist = dist;
+ *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
+ break;
+ } else if ((probe_len = ((mz_uint)(p - s) * 2) +
+ (mz_uint)(*(const mz_uint8 *)p ==
+ *(const mz_uint8 *)q)) > match_len) {
+ *pMatch_dist = dist;
+ if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
+ max_match_len)
+ break;
+ c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
+ }
+ }
+}
+#else
+static MZ_FORCEINLINE void
+tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
+ mz_uint max_match_len, mz_uint *pMatch_dist,
+ mz_uint *pMatch_len) {
+ mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
+ match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
+ probe_len;
+ mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
+ const mz_uint8 *s = d->m_dict + pos, *p, *q;
+ mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
+ MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
+ if (max_match_len <= match_len)
+ return;
+ for (;;) {
+ for (;;) {
+ if (--num_probes_left == 0)
+ return;
+#define TDEFL_PROBE \
+ next_probe_pos = d->m_next[probe_pos]; \
+ if ((!next_probe_pos) || \
+ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
+ return; \
+ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
+ if ((d->m_dict[probe_pos + match_len] == c0) && \
+ (d->m_dict[probe_pos + match_len - 1] == c1)) \
+ break;
+ TDEFL_PROBE;
+ TDEFL_PROBE;
+ TDEFL_PROBE;
+ }
+ if (!dist)
+ break;
+ p = s;
+ q = d->m_dict + probe_pos;
+ for (probe_len = 0; probe_len < max_match_len; probe_len++)
+ if (*p++ != *q++)
+ break;
+ if (probe_len > match_len) {
+ *pMatch_dist = dist;
+ if ((*pMatch_len = match_len = probe_len) == max_match_len)
+ return;
+ c0 = d->m_dict[pos + match_len];
+ c1 = d->m_dict[pos + match_len - 1];
+ }
+ }
+}
+#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
+static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
+ // Faster, minimally featured LZRW1-style match+parse loop with better
+ // register utilization. Intended for applications where raw throughput is
+ // valued more highly than ratio.
+ mz_uint lookahead_pos = d->m_lookahead_pos,
+ lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
+ total_lz_bytes = d->m_total_lz_bytes,
+ num_flags_left = d->m_num_flags_left;
+ mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
+ mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
+
+ while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
+ const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
+ mz_uint dst_pos =
+ (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
+ mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
+ d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
+ d->m_src_buf_left -= num_bytes_to_process;
+ lookahead_size += num_bytes_to_process;
+
+ while (num_bytes_to_process) {
+ mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
+ memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
+ if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
+ memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
+ MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
+ d->m_pSrc += n;
+ dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
+ num_bytes_to_process -= n;
+ }
+
+ dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
+ if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
+ break;
+
+ while (lookahead_size >= 4) {
+ mz_uint cur_match_dist, cur_match_len = 1;
+ mz_uint8 *pCur_dict = d->m_dict + cur_pos;
+ mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
+ mz_uint hash =
+ (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
+ TDEFL_LEVEL1_HASH_SIZE_MASK;
+ mz_uint probe_pos = d->m_hash[hash];
+ d->m_hash[hash] = (mz_uint16)lookahead_pos;
+
+ if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
+ dict_size) &&
+ ((mz_uint32)(
+ *(d->m_dict + (probe_pos & TDEFL_LZ_DICT_SIZE_MASK)) |
+ (*(d->m_dict + ((probe_pos & TDEFL_LZ_DICT_SIZE_MASK) + 1))
+ << 8) |
+ (*(d->m_dict + ((probe_pos & TDEFL_LZ_DICT_SIZE_MASK) + 2))
+ << 16)) == first_trigram)) {
+ const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
+ const mz_uint16 *q =
+ (const mz_uint16 *)(d->m_dict +
+ (probe_pos & TDEFL_LZ_DICT_SIZE_MASK));
+ mz_uint32 probe_len = 32;
+ do {
+ } while ((*(++p) == *(++q)) && (*(++p) == *(++q)) &&
+ (*(++p) == *(++q)) && (*(++p) == *(++q)) && (--probe_len > 0));
+ cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
+ (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
+ if (!probe_len)
+ cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
+
+ if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
+ ((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
+ (cur_match_dist >= 8U * 1024U))) {
+ cur_match_len = 1;
+ *pLZ_code_buf++ = (mz_uint8)first_trigram;
+ *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
+ d->m_huff_count[0][(mz_uint8)first_trigram]++;
+ } else {
+ mz_uint32 s0, s1;
+ cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
+
+ MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
+ (cur_match_dist >= 1) &&
+ (cur_match_dist <= TDEFL_LZ_DICT_SIZE));
+
+ cur_match_dist--;
+
+ pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
+ *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
+ pLZ_code_buf += 3;
+ *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
+
+ s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
+ s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
+ d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
+
+ d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
+ TDEFL_MIN_MATCH_LEN]]++;
+ }
+ } else {
+ *pLZ_code_buf++ = (mz_uint8)first_trigram;
+ *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
+ d->m_huff_count[0][(mz_uint8)first_trigram]++;
+ }
+
+ if (--num_flags_left == 0) {
+ num_flags_left = 8;
+ pLZ_flags = pLZ_code_buf++;
+ }
+
+ total_lz_bytes += cur_match_len;
+ lookahead_pos += cur_match_len;
+ dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
+ cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
+ MZ_ASSERT(lookahead_size >= cur_match_len);
+ lookahead_size -= cur_match_len;
+
+ if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
+ int n;
+ d->m_lookahead_pos = lookahead_pos;
+ d->m_lookahead_size = lookahead_size;
+ d->m_dict_size = dict_size;
+ d->m_total_lz_bytes = total_lz_bytes;
+ d->m_pLZ_code_buf = pLZ_code_buf;
+ d->m_pLZ_flags = pLZ_flags;
+ d->m_num_flags_left = num_flags_left;
+ if ((n = tdefl_flush_block(d, 0)) != 0)
+ return (n < 0) ? MZ_FALSE : MZ_TRUE;
+ total_lz_bytes = d->m_total_lz_bytes;
+ pLZ_code_buf = d->m_pLZ_code_buf;
+ pLZ_flags = d->m_pLZ_flags;
+ num_flags_left = d->m_num_flags_left;
+ }
+ }
+
+ while (lookahead_size) {
+ mz_uint8 lit = d->m_dict[cur_pos];
+
+ total_lz_bytes++;
+ *pLZ_code_buf++ = lit;
+ *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
+ if (--num_flags_left == 0) {
+ num_flags_left = 8;
+ pLZ_flags = pLZ_code_buf++;
+ }
+
+ d->m_huff_count[0][lit]++;
+
+ lookahead_pos++;
+ dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
+ cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
+ lookahead_size--;
+
+ if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
+ int n;
+ d->m_lookahead_pos = lookahead_pos;
+ d->m_lookahead_size = lookahead_size;
+ d->m_dict_size = dict_size;
+ d->m_total_lz_bytes = total_lz_bytes;
+ d->m_pLZ_code_buf = pLZ_code_buf;
+ d->m_pLZ_flags = pLZ_flags;
+ d->m_num_flags_left = num_flags_left;
+ if ((n = tdefl_flush_block(d, 0)) != 0)
+ return (n < 0) ? MZ_FALSE : MZ_TRUE;
+ total_lz_bytes = d->m_total_lz_bytes;
+ pLZ_code_buf = d->m_pLZ_code_buf;
+ pLZ_flags = d->m_pLZ_flags;
+ num_flags_left = d->m_num_flags_left;
+ }
+ }
+ }
+
+ d->m_lookahead_pos = lookahead_pos;
+ d->m_lookahead_size = lookahead_size;
+ d->m_dict_size = dict_size;
+ d->m_total_lz_bytes = total_lz_bytes;
+ d->m_pLZ_code_buf = pLZ_code_buf;
+ d->m_pLZ_flags = pLZ_flags;
+ d->m_num_flags_left = num_flags_left;
+ return MZ_TRUE;
+}
+#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
+
+static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
+ mz_uint8 lit) {
+ d->m_total_lz_bytes++;
+ *d->m_pLZ_code_buf++ = lit;
+ *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
+ if (--d->m_num_flags_left == 0) {
+ d->m_num_flags_left = 8;
+ d->m_pLZ_flags = d->m_pLZ_code_buf++;
+ }
+ d->m_huff_count[0][lit]++;
+}
+
+static MZ_FORCEINLINE void
+tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) {
+ mz_uint32 s0, s1;
+
+ MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
+ (match_dist <= TDEFL_LZ_DICT_SIZE));
+
+ d->m_total_lz_bytes += match_len;
+
+ d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
+
+ match_dist -= 1;
+ d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
+ d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
+ d->m_pLZ_code_buf += 3;
+
+ *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
+ if (--d->m_num_flags_left == 0) {
+ d->m_num_flags_left = 8;
+ d->m_pLZ_flags = d->m_pLZ_code_buf++;
+ }
+
+ s0 = s_tdefl_small_dist_sym[match_dist & 511];
+ s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
+ d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
+
+ if (match_len >= TDEFL_MIN_MATCH_LEN)
+ d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
+}
+
+static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
+ const mz_uint8 *pSrc = d->m_pSrc;
+ size_t src_buf_left = d->m_src_buf_left;
+ tdefl_flush flush = d->m_flush;
+
+ while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
+ mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
+ // Update dictionary and hash chains. Keeps the lookahead size equal to
+ // TDEFL_MAX_MATCH_LEN.
+ if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
+ mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
+ TDEFL_LZ_DICT_SIZE_MASK,
+ ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
+ mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
+ << TDEFL_LZ_HASH_SHIFT) ^
+ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
+ mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
+ src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
+ const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
+ src_buf_left -= num_bytes_to_process;
+ d->m_lookahead_size += num_bytes_to_process;
+ while (pSrc != pSrc_end) {
+ mz_uint8 c = *pSrc++;
+ d->m_dict[dst_pos] = c;
+ if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
+ d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
+ hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
+ d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
+ d->m_hash[hash] = (mz_uint16)(ins_pos);
+ dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
+ ins_pos++;
+ }
+ } else {
+ while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
+ mz_uint8 c = *pSrc++;
+ mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
+ TDEFL_LZ_DICT_SIZE_MASK;
+ src_buf_left--;
+ d->m_dict[dst_pos] = c;
+ if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
+ d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
+ if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
+ mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
+ mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
+ << (TDEFL_LZ_HASH_SHIFT * 2)) ^
+ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
+ << TDEFL_LZ_HASH_SHIFT) ^
+ c) &
+ (TDEFL_LZ_HASH_SIZE - 1);
+ d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
+ d->m_hash[hash] = (mz_uint16)(ins_pos);
+ }
+ }
+ }
+ d->m_dict_size =
+ MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
+ if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
+ break;
+
+ // Simple lazy/greedy parsing state machine.
+ len_to_move = 1;
+ cur_match_dist = 0;
+ cur_match_len =
+ d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
+ cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
+ if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
+ if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
+ mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
+ cur_match_len = 0;
+ while (cur_match_len < d->m_lookahead_size) {
+ if (d->m_dict[cur_pos + cur_match_len] != c)
+ break;
+ cur_match_len++;
+ }
+ if (cur_match_len < TDEFL_MIN_MATCH_LEN)
+ cur_match_len = 0;
+ else
+ cur_match_dist = 1;
+ }
+ } else {
+ tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
+ d->m_lookahead_size, &cur_match_dist, &cur_match_len);
+ }
+ if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
+ (cur_match_dist >= 8U * 1024U)) ||
+ (cur_pos == cur_match_dist) ||
+ ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
+ cur_match_dist = cur_match_len = 0;
+ }
+ if (d->m_saved_match_len) {
+ if (cur_match_len > d->m_saved_match_len) {
+ tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
+ if (cur_match_len >= 128) {
+ tdefl_record_match(d, cur_match_len, cur_match_dist);
+ d->m_saved_match_len = 0;
+ len_to_move = cur_match_len;
+ } else {
+ d->m_saved_lit = d->m_dict[cur_pos];
+ d->m_saved_match_dist = cur_match_dist;
+ d->m_saved_match_len = cur_match_len;
+ }
+ } else {
+ tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
+ len_to_move = d->m_saved_match_len - 1;
+ d->m_saved_match_len = 0;
+ }
+ } else if (!cur_match_dist)
+ tdefl_record_literal(d,
+ d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
+ else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
+ (cur_match_len >= 128)) {
+ tdefl_record_match(d, cur_match_len, cur_match_dist);
+ len_to_move = cur_match_len;
+ } else {
+ d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
+ d->m_saved_match_dist = cur_match_dist;
+ d->m_saved_match_len = cur_match_len;
+ }
+ // Move the lookahead forward by len_to_move bytes.
+ d->m_lookahead_pos += len_to_move;
+ MZ_ASSERT(d->m_lookahead_size >= len_to_move);
+ d->m_lookahead_size -= len_to_move;
+ d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, TDEFL_LZ_DICT_SIZE);
+ // Check if it's time to flush the current LZ codes to the internal output
+ // buffer.
+ if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
+ ((d->m_total_lz_bytes > 31 * 1024) &&
+ (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
+ d->m_total_lz_bytes) ||
+ (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
+ int n;
+ d->m_pSrc = pSrc;
+ d->m_src_buf_left = src_buf_left;
+ if ((n = tdefl_flush_block(d, 0)) != 0)
+ return (n < 0) ? MZ_FALSE : MZ_TRUE;
+ }
+ }
+
+ d->m_pSrc = pSrc;
+ d->m_src_buf_left = src_buf_left;
+ return MZ_TRUE;
+}
+
+static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
+ if (d->m_pIn_buf_size) {
+ *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
+ }
+
+ if (d->m_pOut_buf_size) {
+ size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
+ d->m_output_flush_remaining);
+ memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
+ d->m_output_buf + d->m_output_flush_ofs, n);
+ d->m_output_flush_ofs += (mz_uint)n;
+ d->m_output_flush_remaining -= (mz_uint)n;
+ d->m_out_buf_ofs += n;
+
+ *d->m_pOut_buf_size = d->m_out_buf_ofs;
+ }
+
+ return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
+ : TDEFL_STATUS_OKAY;
+}
+
+tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
+ size_t *pIn_buf_size, void *pOut_buf,
+ size_t *pOut_buf_size, tdefl_flush flush) {
+ if (!d) {
+ if (pIn_buf_size)
+ *pIn_buf_size = 0;
+ if (pOut_buf_size)
+ *pOut_buf_size = 0;
+ return TDEFL_STATUS_BAD_PARAM;
+ }
+
+ d->m_pIn_buf = pIn_buf;
+ d->m_pIn_buf_size = pIn_buf_size;
+ d->m_pOut_buf = pOut_buf;
+ d->m_pOut_buf_size = pOut_buf_size;
+ d->m_pSrc = (const mz_uint8 *)(pIn_buf);
+ d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
+ d->m_out_buf_ofs = 0;
+ d->m_flush = flush;
+
+ if (((d->m_pPut_buf_func != NULL) ==
+ ((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
+ (d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
+ (d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
+ (pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
+ (pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
+ if (pIn_buf_size)
+ *pIn_buf_size = 0;
+ if (pOut_buf_size)
+ *pOut_buf_size = 0;
+ return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
+ }
+ d->m_wants_to_finish |= (flush == TDEFL_FINISH);
+
+ if ((d->m_output_flush_remaining) || (d->m_finished))
+ return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
+ if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
+ ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
+ ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
+ TDEFL_RLE_MATCHES)) == 0)) {
+ if (!tdefl_compress_fast(d))
+ return d->m_prev_return_status;
+ } else
+#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
+ {
+ if (!tdefl_compress_normal(d))
+ return d->m_prev_return_status;
+ }
+
+ if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
+ (pIn_buf))
+ d->m_adler32 =
+ (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
+ d->m_pSrc - (const mz_uint8 *)pIn_buf);
+
+ if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
+ (!d->m_output_flush_remaining)) {
+ if (tdefl_flush_block(d, flush) < 0)
+ return d->m_prev_return_status;
+ d->m_finished = (flush == TDEFL_FINISH);
+ if (flush == TDEFL_FULL_FLUSH) {
+ MZ_CLEAR_OBJ(d->m_hash);
+ MZ_CLEAR_OBJ(d->m_next);
+ d->m_dict_size = 0;
+ }
+ }
+
+ return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
+}
+
+tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
+ size_t in_buf_size, tdefl_flush flush) {
+ MZ_ASSERT(d->m_pPut_buf_func);
+ return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
+}
+
+tdefl_status tdefl_init(tdefl_compressor *d,
+ tdefl_put_buf_func_ptr pPut_buf_func,
+ void *pPut_buf_user, int flags) {
+ d->m_pPut_buf_func = pPut_buf_func;
+ d->m_pPut_buf_user = pPut_buf_user;
+ d->m_flags = (mz_uint)(flags);
+ d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
+ d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
+ d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
+ if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG))
+ MZ_CLEAR_OBJ(d->m_hash);
+ d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
+ d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
+ d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
+ d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
+ d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
+ d->m_pLZ_flags = d->m_lz_code_buf;
+ d->m_num_flags_left = 8;
+ d->m_pOutput_buf = d->m_output_buf;
+ d->m_pOutput_buf_end = d->m_output_buf;
+ d->m_prev_return_status = TDEFL_STATUS_OKAY;
+ d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
+ d->m_adler32 = 1;
+ d->m_pIn_buf = NULL;
+ d->m_pOut_buf = NULL;
+ d->m_pIn_buf_size = NULL;
+ d->m_pOut_buf_size = NULL;
+ d->m_flush = TDEFL_NO_FLUSH;
+ d->m_pSrc = NULL;
+ d->m_src_buf_left = 0;
+ d->m_out_buf_ofs = 0;
+ memset(&d->m_huff_count[0][0], 0,
+ sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
+ memset(&d->m_huff_count[1][0], 0,
+ sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
+ return TDEFL_STATUS_OKAY;
+}
+
+tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
+ return d->m_prev_return_status;
+}
+
+mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
+
+mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
+ tdefl_put_buf_func_ptr pPut_buf_func,
+ void *pPut_buf_user, int flags) {
+ tdefl_compressor *pComp;
+ mz_bool succeeded;
+ if (((buf_len) && (!pBuf)) || (!pPut_buf_func))
+ return MZ_FALSE;
+ pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
+ if (!pComp)
+ return MZ_FALSE;
+ succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
+ TDEFL_STATUS_OKAY);
+ succeeded =
+ succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
+ TDEFL_STATUS_DONE);
+ MZ_FREE(pComp);
+ return succeeded;
+}
+
+typedef struct {
+ size_t m_size, m_capacity;
+ mz_uint8 *m_pBuf;
+ mz_bool m_expandable;
+} tdefl_output_buffer;
+
+static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
+ void *pUser) {
+ tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
+ size_t new_size = p->m_size + len;
+ if (new_size > p->m_capacity) {
+ size_t new_capacity = p->m_capacity;
+ mz_uint8 *pNew_buf;
+ if (!p->m_expandable)
+ return MZ_FALSE;
+ do {
+ new_capacity = MZ_MAX(128U, new_capacity << 1U);
+ } while (new_size > new_capacity);
+ pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
+ if (!pNew_buf)
+ return MZ_FALSE;
+ p->m_pBuf = pNew_buf;
+ p->m_capacity = new_capacity;
+ }
+ memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
+ p->m_size = new_size;
+ return MZ_TRUE;
+}
+
+void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
+ size_t *pOut_len, int flags) {
+ tdefl_output_buffer out_buf;
+ MZ_CLEAR_OBJ(out_buf);
+ if (!pOut_len)
+ return MZ_FALSE;
+ else
+ *pOut_len = 0;
+ out_buf.m_expandable = MZ_TRUE;
+ if (!tdefl_compress_mem_to_output(
+ pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
+ return NULL;
+ *pOut_len = out_buf.m_size;
+ return out_buf.m_pBuf;
+}
+
+size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
+ const void *pSrc_buf, size_t src_buf_len,
+ int flags) {
+ tdefl_output_buffer out_buf;
+ MZ_CLEAR_OBJ(out_buf);
+ if (!pOut_buf)
+ return 0;
+ out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
+ out_buf.m_capacity = out_buf_len;
+ if (!tdefl_compress_mem_to_output(
+ pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
+ return 0;
+ return out_buf.m_size;
+}
+
+#ifndef MINIZ_NO_ZLIB_APIS
+static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
+ 128, 256, 512, 768, 1500};
+
+// level may actually range from [0,10] (10 is a "hidden" max level, where we
+// want a bit more compression and it's fine if throughput to fall off a cliff
+// on some files).
+mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
+ int strategy) {
+ mz_uint comp_flags =
+ s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
+ ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
+ if (window_bits > 0)
+ comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
+
+ if (!level)
+ comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
+ else if (strategy == MZ_FILTERED)
+ comp_flags |= TDEFL_FILTER_MATCHES;
+ else if (strategy == MZ_HUFFMAN_ONLY)
+ comp_flags &= ~TDEFL_MAX_PROBES_MASK;
+ else if (strategy == MZ_FIXED)
+ comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
+ else if (strategy == MZ_RLE)
+ comp_flags |= TDEFL_RLE_MATCHES;
+
+ return comp_flags;
+}
+#endif // MINIZ_NO_ZLIB_APIS
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4204) // nonstandard extension used : non-constant
+ // aggregate initializer (also supported by GNU
+ // C and C99, so no big deal)
+#endif
+
+// Simple PNG writer function by Alex Evans, 2011. Released into the public
+// domain: https://gist.github.com/908299, more context at
+// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
+// This is actually a modification of Alex's original code so PNG files
+// generated by this function pass pngcheck.
+void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
+ int h, int num_chans,
+ size_t *pLen_out,
+ mz_uint level, mz_bool flip) {
+ // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
+ // defined.
+ static const mz_uint s_tdefl_png_num_probes[11] = {
+ 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
+ tdefl_compressor *pComp =
+ (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
+ tdefl_output_buffer out_buf;
+ int i, bpl = w * num_chans, y, z;
+ mz_uint32 c;
+ *pLen_out = 0;
+ if (!pComp)
+ return NULL;
+ MZ_CLEAR_OBJ(out_buf);
+ out_buf.m_expandable = MZ_TRUE;
+ out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
+ if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
+ MZ_FREE(pComp);
+ return NULL;
+ }
+ // write dummy header
+ for (z = 41; z; --z)
+ tdefl_output_buffer_putter(&z, 1, &out_buf);
+ // compress image data
+ tdefl_init(pComp, tdefl_output_buffer_putter, &out_buf,
+ s_tdefl_png_num_probes[MZ_MIN(10, level)] |
+ TDEFL_WRITE_ZLIB_HEADER);
+ for (y = 0; y < h; ++y) {
+ tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
+ tdefl_compress_buffer(pComp,
+ (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
+ bpl, TDEFL_NO_FLUSH);
+ }
+ if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
+ TDEFL_STATUS_DONE) {
+ MZ_FREE(pComp);
+ MZ_FREE(out_buf.m_pBuf);
+ return NULL;
+ }
+ // write real header
+ *pLen_out = out_buf.m_size - 41;
+ {
+ static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
+ mz_uint8 pnghdr[41] = {0x89,
+ 0x50,
+ 0x4e,
+ 0x47,
+ 0x0d,
+ 0x0a,
+ 0x1a,
+ 0x0a,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x0d,
+ 0x49,
+ 0x48,
+ 0x44,
+ 0x52,
+ 0,
+ 0,
+ (mz_uint8)(w >> 8),
+ (mz_uint8)w,
+ 0,
+ 0,
+ (mz_uint8)(h >> 8),
+ (mz_uint8)h,
+ 8,
+ chans[num_chans],
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ (mz_uint8)(*pLen_out >> 24),
+ (mz_uint8)(*pLen_out >> 16),
+ (mz_uint8)(*pLen_out >> 8),
+ (mz_uint8)*pLen_out,
+ 0x49,
+ 0x44,
+ 0x41,
+ 0x54};
+ c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
+ for (i = 0; i < 4; ++i, c <<= 8)
+ ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
+ memcpy(out_buf.m_pBuf, pnghdr, 41);
+ }
+ // write footer (IDAT CRC-32, followed by IEND chunk)
+ if (!tdefl_output_buffer_putter(
+ "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
+ *pLen_out = 0;
+ MZ_FREE(pComp);
+ MZ_FREE(out_buf.m_pBuf);
+ return NULL;
+ }
+ c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
+ *pLen_out + 4);
+ for (i = 0; i < 4; ++i, c <<= 8)
+ (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
+ // compute final size of file, grab compressed data buffer and return
+ *pLen_out += 57;
+ MZ_FREE(pComp);
+ return out_buf.m_pBuf;
+}
+void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
+ int num_chans, size_t *pLen_out) {
+ // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
+ // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
+ // where #defined out)
+ return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
+ pLen_out, 6, MZ_FALSE);
+}
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+// ------------------- .ZIP archive reading
+
+#ifndef MINIZ_NO_ARCHIVE_APIS
+
+#ifdef MINIZ_NO_STDIO
+#define MZ_FILE void *
+#else
+#include
+#include
+
+#if defined(_MSC_VER) || defined(__MINGW32__)
+
+#include
+
+static wchar_t *str2wstr(const char *str) {
+ int len = strlen(str) + 1;
+ wchar_t *wstr = (wchar_t*)malloc(len * sizeof(wchar_t));
+ MultiByteToWideChar(CP_UTF8, 0, str, len * sizeof(char), wstr, len);
+ return wstr;
+}
+
+static FILE *mz_fopen(const char *pFilename, const char *pMode) {
+ wchar_t *wFilename = str2wstr(pFilename);
+ wchar_t *wMode = str2wstr(pMode);
+ FILE *pFile = _wfopen(wFilename, wMode);
+
+ free(wFilename);
+ free(wMode);
+
+ return pFile;
+}
+
+static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
+ wchar_t *wPath = str2wstr(pPath);
+ wchar_t *wMode = str2wstr(pMode);
+ FILE *pFile = _wfreopen(wPath, wMode, pStream);
+
+ free(wPath);
+ free(wMode);
+
+ return pFile;
+}
+
+#ifndef MINIZ_NO_TIME
+#include
+#endif
+#define MZ_FILE FILE
+#define MZ_FOPEN mz_fopen
+#define MZ_FCLOSE fclose
+#define MZ_FREAD fread
+#define MZ_FWRITE fwrite
+// ZIG_ANDROID_MOD: Zig's mingw64 doesn't have _ftelli64/_fseeki64
+#if defined(__MINGW64__)
+#define MZ_FTELL64 ftello
+#define MZ_FSEEK64 fseeko
+#else
+#define MZ_FTELL64 _ftelli64
+#define MZ_FSEEK64 _fseeki64
+#endif
+#define MZ_FILE_STAT_STRUCT _stat
+#define MZ_FILE_STAT _stat
+#define MZ_FFLUSH fflush
+#define MZ_FREOPEN mz_freopen
+#define MZ_DELETE_FILE remove
+#elif defined(__MINGW32__)
+#ifndef MINIZ_NO_TIME
+#include
+#endif
+#define MZ_FILE FILE
+#define MZ_FOPEN(f, m) mz_fopen
+#define MZ_FCLOSE fclose
+#define MZ_FREAD fread
+#define MZ_FWRITE fwrite
+#define MZ_FTELL64 ftell
+#define MZ_FSEEK64 fseek
+#define MZ_FILE_STAT_STRUCT _stat
+#define MZ_FILE_STAT _stat
+#define MZ_FFLUSH fflush
+#define MZ_FREOPEN(f, m, s) mz_freopen
+#define MZ_DELETE_FILE remove
+#elif defined(__TINYC__)
+#ifndef MINIZ_NO_TIME
+#include
+#endif
+#define MZ_FILE FILE
+#define MZ_FOPEN(f, m) fopen(f, m)
+#define MZ_FCLOSE fclose
+#define MZ_FREAD fread
+#define MZ_FWRITE fwrite
+#define MZ_FTELL64 ftell
+#define MZ_FSEEK64 fseek
+#define MZ_FILE_STAT_STRUCT stat
+#define MZ_FILE_STAT stat
+#define MZ_FFLUSH fflush
+#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
+#define MZ_DELETE_FILE remove
+#elif defined(__GNUC__) && _LARGEFILE64_SOURCE
+#ifndef MINIZ_NO_TIME
+#include
+#endif
+#define MZ_FILE FILE
+#define MZ_FOPEN(f, m) fopen64(f, m)
+#define MZ_FCLOSE fclose
+#define MZ_FREAD fread
+#define MZ_FWRITE fwrite
+#define MZ_FTELL64 ftello64
+#define MZ_FSEEK64 fseeko64
+#define MZ_FILE_STAT_STRUCT stat64
+#define MZ_FILE_STAT stat64
+#define MZ_FFLUSH fflush
+#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
+#define MZ_DELETE_FILE remove
+#else
+#ifndef MINIZ_NO_TIME
+#include
+#endif
+#define MZ_FILE FILE
+#define MZ_FOPEN(f, m) fopen(f, m)
+#define MZ_FCLOSE fclose
+#define MZ_FREAD fread
+#define MZ_FWRITE fwrite
+#if _FILE_OFFSET_BITS == 64 || _POSIX_C_SOURCE >= 200112L
+#define MZ_FTELL64 ftello
+#define MZ_FSEEK64 fseeko
+#else
+#define MZ_FTELL64 ftell
+#define MZ_FSEEK64 fseek
+#endif
+#define MZ_FILE_STAT_STRUCT stat
+#define MZ_FILE_STAT stat
+#define MZ_FFLUSH fflush
+#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
+#define MZ_DELETE_FILE remove
+#endif // #ifdef _MSC_VER
+#endif // #ifdef MINIZ_NO_STDIO
+
+#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
+
+// Various ZIP archive enums. To completely avoid cross platform compiler
+// alignment and platform endian issues, miniz.c doesn't use structs for any of
+// this stuff.
+enum {
+ // ZIP archive identifiers and record sizes
+ MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
+ MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
+ MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
+ MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
+ MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
+
+ /* ZIP64 archive identifier and record sizes */
+ MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06064b50,
+ MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG = 0x07064b50,
+ MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE = 56,
+ MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE = 20,
+ MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID = 0x0001,
+ MZ_ZIP_DATA_DESCRIPTOR_ID = 0x08074b50,
+ MZ_ZIP_DATA_DESCRIPTER_SIZE64 = 24,
+ MZ_ZIP_DATA_DESCRIPTER_SIZE32 = 16,
+
+ // Central directory header record offsets
+ MZ_ZIP_CDH_SIG_OFS = 0,
+ MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
+ MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
+ MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
+ MZ_ZIP_CDH_METHOD_OFS = 10,
+ MZ_ZIP_CDH_FILE_TIME_OFS = 12,
+ MZ_ZIP_CDH_FILE_DATE_OFS = 14,
+ MZ_ZIP_CDH_CRC32_OFS = 16,
+ MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
+ MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
+ MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
+ MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
+ MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
+ MZ_ZIP_CDH_DISK_START_OFS = 34,
+ MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
+ MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
+ MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
+ // Local directory header offsets
+ MZ_ZIP_LDH_SIG_OFS = 0,
+ MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
+ MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
+ MZ_ZIP_LDH_METHOD_OFS = 8,
+ MZ_ZIP_LDH_FILE_TIME_OFS = 10,
+ MZ_ZIP_LDH_FILE_DATE_OFS = 12,
+ MZ_ZIP_LDH_CRC32_OFS = 14,
+ MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
+ MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
+ MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
+ MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
+ // End of central directory offsets
+ MZ_ZIP_ECDH_SIG_OFS = 0,
+ MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
+ MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
+ MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
+ MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
+ MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
+ MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
+ MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
+
+ /* ZIP64 End of central directory locator offsets */
+ MZ_ZIP64_ECDL_SIG_OFS = 0, /* 4 bytes */
+ MZ_ZIP64_ECDL_NUM_DISK_CDIR_OFS = 4, /* 4 bytes */
+ MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS = 8, /* 8 bytes */
+ MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS = 16, /* 4 bytes */
+
+ /* ZIP64 End of central directory header offsets */
+ MZ_ZIP64_ECDH_SIG_OFS = 0, /* 4 bytes */
+ MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS = 4, /* 8 bytes */
+ MZ_ZIP64_ECDH_VERSION_MADE_BY_OFS = 12, /* 2 bytes */
+ MZ_ZIP64_ECDH_VERSION_NEEDED_OFS = 14, /* 2 bytes */
+ MZ_ZIP64_ECDH_NUM_THIS_DISK_OFS = 16, /* 4 bytes */
+ MZ_ZIP64_ECDH_NUM_DISK_CDIR_OFS = 20, /* 4 bytes */
+ MZ_ZIP64_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 24, /* 8 bytes */
+ MZ_ZIP64_ECDH_CDIR_TOTAL_ENTRIES_OFS = 32, /* 8 bytes */
+ MZ_ZIP64_ECDH_CDIR_SIZE_OFS = 40, /* 8 bytes */
+ MZ_ZIP64_ECDH_CDIR_OFS_OFS = 48, /* 8 bytes */
+ MZ_ZIP_VERSION_MADE_BY_DOS_FILESYSTEM_ID = 0,
+ MZ_ZIP_DOS_DIR_ATTRIBUTE_BITFLAG = 0x10,
+ MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED = 1,
+ MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG = 32,
+ MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION = 64,
+ MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_LOCAL_DIR_IS_MASKED = 8192,
+ MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8 = 1 << 11
+};
+
+typedef struct {
+ void *m_p;
+ size_t m_size, m_capacity;
+ mz_uint m_element_size;
+} mz_zip_array;
+
+struct mz_zip_internal_state_tag {
+ mz_zip_array m_central_dir;
+ mz_zip_array m_central_dir_offsets;
+ mz_zip_array m_sorted_central_dir_offsets;
+
+ /* The flags passed in when the archive is initially opened. */
+ uint32_t m_init_flags;
+
+ /* MZ_TRUE if the archive has a zip64 end of central directory headers, etc.
+ */
+ mz_bool m_zip64;
+
+ /* MZ_TRUE if we found zip64 extended info in the central directory (m_zip64
+ * will also be slammed to true too, even if we didn't find a zip64 end of
+ * central dir header, etc.) */
+ mz_bool m_zip64_has_extended_info_fields;
+
+ /* These fields are used by the file, FILE, memory, and memory/heap read/write
+ * helpers. */
+ MZ_FILE *m_pFile;
+ mz_uint64 m_file_archive_start_ofs;
+
+ void *m_pMem;
+ size_t m_mem_size;
+ size_t m_mem_capacity;
+};
+
+#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
+ (array_ptr)->m_element_size = element_size
+#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
+ ((element_type *)((array_ptr)->m_p))[index]
+
+static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
+ mz_zip_array *pArray) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
+ memset(pArray, 0, sizeof(mz_zip_array));
+}
+
+static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
+ mz_zip_array *pArray,
+ size_t min_new_capacity,
+ mz_uint growing) {
+ void *pNew_p;
+ size_t new_capacity = min_new_capacity;
+ MZ_ASSERT(pArray->m_element_size);
+ if (pArray->m_capacity >= min_new_capacity)
+ return MZ_TRUE;
+ if (growing) {
+ new_capacity = MZ_MAX(1, pArray->m_capacity);
+ while (new_capacity < min_new_capacity)
+ new_capacity *= 2;
+ }
+ if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
+ pArray->m_element_size, new_capacity)))
+ return MZ_FALSE;
+ pArray->m_p = pNew_p;
+ pArray->m_capacity = new_capacity;
+ return MZ_TRUE;
+}
+
+static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
+ mz_zip_array *pArray,
+ size_t new_capacity,
+ mz_uint growing) {
+ if (new_capacity > pArray->m_capacity) {
+ if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
+ return MZ_FALSE;
+ }
+ return MZ_TRUE;
+}
+
+static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
+ mz_zip_array *pArray,
+ size_t new_size,
+ mz_uint growing) {
+ if (new_size > pArray->m_capacity) {
+ if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
+ return MZ_FALSE;
+ }
+ pArray->m_size = new_size;
+ return MZ_TRUE;
+}
+
+static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
+ mz_zip_array *pArray,
+ size_t n) {
+ return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
+}
+
+static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
+ mz_zip_array *pArray,
+ const void *pElements,
+ size_t n) {
+ if (0 == n)
+ return MZ_TRUE;
+ if (!pElements)
+ return MZ_FALSE;
+
+ size_t orig_size = pArray->m_size;
+ if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
+ return MZ_FALSE;
+ memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
+ pElements, n * pArray->m_element_size);
+ return MZ_TRUE;
+}
+
+#ifndef MINIZ_NO_TIME
+static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
+ struct tm tm;
+ memset(&tm, 0, sizeof(tm));
+ tm.tm_isdst = -1;
+ tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
+ tm.tm_mon = ((dos_date >> 5) & 15) - 1;
+ tm.tm_mday = dos_date & 31;
+ tm.tm_hour = (dos_time >> 11) & 31;
+ tm.tm_min = (dos_time >> 5) & 63;
+ tm.tm_sec = (dos_time << 1) & 62;
+ return mktime(&tm);
+}
+
+#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
+static void mz_zip_time_t_to_dos_time(time_t time, mz_uint16 *pDOS_time,
+ mz_uint16 *pDOS_date) {
+#ifdef _MSC_VER
+ struct tm tm_struct;
+ struct tm *tm = &tm_struct;
+ errno_t err = localtime_s(tm, &time);
+ if (err) {
+ *pDOS_date = 0;
+ *pDOS_time = 0;
+ return;
+ }
+#else
+ struct tm *tm = localtime(&time);
+#endif /* #ifdef _MSC_VER */
+
+ *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
+ ((tm->tm_sec) >> 1));
+ *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
+ ((tm->tm_mon + 1) << 5) + tm->tm_mday);
+}
+#endif /* MINIZ_NO_ARCHIVE_WRITING_APIS */
+
+#ifndef MINIZ_NO_STDIO
+#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
+static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
+ time_t *pTime) {
+ struct MZ_FILE_STAT_STRUCT file_stat;
+
+ /* On Linux with x86 glibc, this call will fail on large files (I think >=
+ * 0x80000000 bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. */
+ if (MZ_FILE_STAT(pFilename, &file_stat) != 0)
+ return MZ_FALSE;
+
+ *pTime = file_stat.st_mtime;
+
+ return MZ_TRUE;
+}
+#endif /* #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS*/
+
+static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
+ time_t modified_time) {
+ struct utimbuf t;
+
+ memset(&t, 0, sizeof(t));
+ t.actime = access_time;
+ t.modtime = modified_time;
+
+ return !utime(pFilename, &t);
+}
+#endif /* #ifndef MINIZ_NO_STDIO */
+#endif /* #ifndef MINIZ_NO_TIME */
+
+static MZ_FORCEINLINE mz_bool mz_zip_set_error(mz_zip_archive *pZip,
+ mz_zip_error err_num) {
+ if (pZip)
+ pZip->m_last_error = err_num;
+ return MZ_FALSE;
+}
+
+static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
+ mz_uint32 flags) {
+ (void)flags;
+ if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
+ return MZ_FALSE;
+
+ if (!pZip->m_pAlloc)
+ pZip->m_pAlloc = def_alloc_func;
+ if (!pZip->m_pFree)
+ pZip->m_pFree = def_free_func;
+ if (!pZip->m_pRealloc)
+ pZip->m_pRealloc = def_realloc_func;
+
+ pZip->m_zip_mode = MZ_ZIP_MODE_READING;
+ pZip->m_archive_size = 0;
+ pZip->m_central_directory_file_ofs = 0;
+ pZip->m_total_files = 0;
+
+ if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
+ pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
+ return MZ_FALSE;
+ memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
+ MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
+ sizeof(mz_uint8));
+ MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
+ sizeof(mz_uint32));
+ MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
+ sizeof(mz_uint32));
+ return MZ_TRUE;
+}
+
+static MZ_FORCEINLINE mz_bool
+mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
+ const mz_zip_array *pCentral_dir_offsets,
+ mz_uint l_index, mz_uint r_index) {
+ const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
+ pCentral_dir_array, mz_uint8,
+ MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
+ l_index)),
+ *pE;
+ const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
+ pCentral_dir_array, mz_uint8,
+ MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
+ mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
+ r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
+ mz_uint8 l = 0, r = 0;
+ pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
+ pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
+ pE = pL + MZ_MIN(l_len, r_len);
+ while (pL < pE) {
+ if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR)))
+ break;
+ pL++;
+ pR++;
+ }
+ return (pL == pE) ? (l_len < r_len) : (l < r);
+}
+
+#define MZ_SWAP_UINT32(a, b) \
+ do { \
+ mz_uint32 t = a; \
+ a = b; \
+ b = t; \
+ } \
+ MZ_MACRO_END
+
+// Heap sort of lowercased filenames, used to help accelerate plain central
+// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
+// but it could allocate memory.)
+static void
+mz_zip_reader_sort_central_dir_offsets_by_filename(mz_zip_archive *pZip) {
+ mz_zip_internal_state *pState = pZip->m_pState;
+ const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
+ const mz_zip_array *pCentral_dir = &pState->m_central_dir;
+ mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
+ &pState->m_sorted_central_dir_offsets, mz_uint32, 0);
+ const int size = pZip->m_total_files;
+ int start = (size - 2) >> 1, end;
+ while (start >= 0) {
+ int child, root = start;
+ for (;;) {
+ if ((child = (root << 1) + 1) >= size)
+ break;
+ child +=
+ (((child + 1) < size) &&
+ (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
+ pIndices[child], pIndices[child + 1])));
+ if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
+ pIndices[root], pIndices[child]))
+ break;
+ MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
+ root = child;
+ }
+ start--;
+ }
+
+ end = size - 1;
+ while (end > 0) {
+ int child, root = 0;
+ MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
+ for (;;) {
+ if ((child = (root << 1) + 1) >= end)
+ break;
+ child +=
+ (((child + 1) < end) &&
+ mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
+ pIndices[child], pIndices[child + 1]));
+ if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
+ pIndices[root], pIndices[child]))
+ break;
+ MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
+ root = child;
+ }
+ end--;
+ }
+}
+
+static mz_bool mz_zip_reader_locate_header_sig(mz_zip_archive *pZip,
+ mz_uint32 record_sig,
+ mz_uint32 record_size,
+ mz_int64 *pOfs) {
+ mz_int64 cur_file_ofs;
+ mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
+ mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
+
+ /* Basic sanity checks - reject files which are too small */
+ if (pZip->m_archive_size < record_size)
+ return MZ_FALSE;
+
+ /* Find the record by scanning the file from the end towards the beginning. */
+ cur_file_ofs =
+ MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
+ for (;;) {
+ int i,
+ n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
+
+ if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
+ return MZ_FALSE;
+
+ for (i = n - 4; i >= 0; --i) {
+ mz_uint s = MZ_READ_LE32(pBuf + i);
+ if (s == record_sig) {
+ if ((pZip->m_archive_size - (cur_file_ofs + i)) >= record_size)
+ break;
+ }
+ }
+
+ if (i >= 0) {
+ cur_file_ofs += i;
+ break;
+ }
+
+ /* Give up if we've searched the entire file, or we've gone back "too far"
+ * (~64kb) */
+ if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
+ (MZ_UINT16_MAX + record_size)))
+ return MZ_FALSE;
+
+ cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
+ }
+
+ *pOfs = cur_file_ofs;
+ return MZ_TRUE;
+}
+
+static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
+ mz_uint flags) {
+ mz_uint cdir_size = 0, cdir_entries_on_this_disk = 0, num_this_disk = 0,
+ cdir_disk_index = 0;
+ mz_uint64 cdir_ofs = 0;
+ mz_int64 cur_file_ofs = 0;
+ const mz_uint8 *p;
+
+ mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
+ mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
+ mz_bool sort_central_dir =
+ ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
+ mz_uint32 zip64_end_of_central_dir_locator_u32
+ [(MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE + sizeof(mz_uint32) - 1) /
+ sizeof(mz_uint32)];
+ mz_uint8 *pZip64_locator = (mz_uint8 *)zip64_end_of_central_dir_locator_u32;
+
+ mz_uint32 zip64_end_of_central_dir_header_u32
+ [(MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
+ sizeof(mz_uint32)];
+ mz_uint8 *pZip64_end_of_central_dir =
+ (mz_uint8 *)zip64_end_of_central_dir_header_u32;
+
+ mz_uint64 zip64_end_of_central_dir_ofs = 0;
+
+ /* Basic sanity checks - reject files which are too small, and check the first
+ * 4 bytes of the file to make sure a local header is there. */
+ if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
+ return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE);
+
+ if (!mz_zip_reader_locate_header_sig(
+ pZip, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG,
+ MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE, &cur_file_ofs))
+ return mz_zip_set_error(pZip, MZ_ZIP_FAILED_FINDING_CENTRAL_DIR);
+
+ /* Read and verify the end of central directory record. */
+ if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
+ MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
+ MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
+ return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
+
+ if (MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
+ MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG)
+ return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE);
+
+ if (cur_file_ofs >= (MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE +
+ MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE)) {
+ if (pZip->m_pRead(pZip->m_pIO_opaque,
+ cur_file_ofs - MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE,
+ pZip64_locator,
+ MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) ==
+ MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) {
+ if (MZ_READ_LE32(pZip64_locator + MZ_ZIP64_ECDL_SIG_OFS) ==
+ MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG) {
+ zip64_end_of_central_dir_ofs = MZ_READ_LE64(
+ pZip64_locator + MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS);
+ if (zip64_end_of_central_dir_ofs >
+ (pZip->m_archive_size - MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE))
+ return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE);
+
+ if (pZip->m_pRead(pZip->m_pIO_opaque, zip64_end_of_central_dir_ofs,
+ pZip64_end_of_central_dir,
+ MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) ==
+ MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) {
+ if (MZ_READ_LE32(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_SIG_OFS) ==
+ MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG) {
+ pZip->m_pState->m_zip64 = MZ_TRUE;
+ }
+ }
+ }
+ }
+ }
+
+ pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS);
+ cdir_entries_on_this_disk =
+ MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS);
+ num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
+ cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
+ cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS);
+ cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
+
+ if (pZip->m_pState->m_zip64) {
+ mz_uint32 zip64_total_num_of_disks =
+ MZ_READ_LE32(pZip64_locator + MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS);
+ mz_uint64 zip64_cdir_total_entries = MZ_READ_LE64(
+ pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_TOTAL_ENTRIES_OFS);
+ mz_uint64 zip64_cdir_total_entries_on_this_disk = MZ_READ_LE64(
+ pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS);
+ mz_uint64 zip64_size_of_end_of_central_dir_record = MZ_READ_LE64(
+ pZip64_end_of_central_dir + MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS);
+ mz_uint64 zip64_size_of_central_directory =
+ MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_SIZE_OFS);
+
+ if (zip64_size_of_end_of_central_dir_record <
+ (MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE - 12))
+ return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
+
+ if (zip64_total_num_of_disks != 1U)
+ return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK);
+
+ /* Check for miniz's practical limits */
+ if (zip64_cdir_total_entries > MZ_UINT32_MAX)
+ return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES);
+
+ pZip->m_total_files = (mz_uint32)zip64_cdir_total_entries;
+
+ if (zip64_cdir_total_entries_on_this_disk > MZ_UINT32_MAX)
+ return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES);
+
+ cdir_entries_on_this_disk =
+ (mz_uint32)zip64_cdir_total_entries_on_this_disk;
+
+ /* Check for miniz's current practical limits (sorry, this should be enough
+ * for millions of files) */
+ if (zip64_size_of_central_directory > MZ_UINT32_MAX)
+ return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE);
+
+ cdir_size = (mz_uint32)zip64_size_of_central_directory;
+
+ num_this_disk = MZ_READ_LE32(pZip64_end_of_central_dir +
+ MZ_ZIP64_ECDH_NUM_THIS_DISK_OFS);
+
+ cdir_disk_index = MZ_READ_LE32(pZip64_end_of_central_dir +
+ MZ_ZIP64_ECDH_NUM_DISK_CDIR_OFS);
+
+ cdir_ofs =
+ MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_OFS_OFS);
+ }
+
+ if (pZip->m_total_files != cdir_entries_on_this_disk)
+ return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK);
+
+ if (((num_this_disk | cdir_disk_index) != 0) &&
+ ((num_this_disk != 1) || (cdir_disk_index != 1)))
+ return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK);
+
+ if (cdir_size < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
+ return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
+
+ if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size)
+ return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
+
+ pZip->m_central_directory_file_ofs = cdir_ofs;
+
+ if (pZip->m_total_files) {
+ mz_uint i, n;
+ /* Read the entire central directory into a heap block, and allocate another
+ * heap block to hold the unsorted central dir file record offsets, and
+ * possibly another to hold the sorted indices. */
+ if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
+ MZ_FALSE)) ||
+ (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
+ pZip->m_total_files, MZ_FALSE)))
+ return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
+
+ if (sort_central_dir) {
+ if (!mz_zip_array_resize(pZip,
+ &pZip->m_pState->m_sorted_central_dir_offsets,
+ pZip->m_total_files, MZ_FALSE))
+ return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
+ }
+
+ if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
+ pZip->m_pState->m_central_dir.m_p,
+ cdir_size) != cdir_size)
+ return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
+
+ /* Now create an index into the central directory file records, do some
+ * basic sanity checking on each record */
+ p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
+ for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
+ mz_uint total_header_size, disk_index, bit_flags, filename_size,
+ ext_data_size;
+ mz_uint64 comp_size, decomp_size, local_header_ofs;
+
+ if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
+ (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
+ return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
+
+ MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
+ i) =
+ (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
+
+ if (sort_central_dir)
+ MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
+ mz_uint32, i) = i;
+
+ comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
+ decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
+ local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
+ filename_size = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
+ ext_data_size = MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS);
+
+ if ((!pZip->m_pState->m_zip64_has_extended_info_fields) &&
+ (ext_data_size) &&
+ (MZ_MAX(MZ_MAX(comp_size, decomp_size), local_header_ofs) ==
+ MZ_UINT32_MAX)) {
+ /* Attempt to find zip64 extended information field in the entry's extra
+ * data */
+ mz_uint32 extra_size_remaining = ext_data_size;
+
+ if (extra_size_remaining) {
+ const mz_uint8 *pExtra_data;
+ void *buf = NULL;
+
+ if (MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + ext_data_size >
+ n) {
+ buf = MZ_MALLOC(ext_data_size);
+ if (buf == NULL)
+ return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
+
+ if (pZip->m_pRead(pZip->m_pIO_opaque,
+ cdir_ofs + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
+ filename_size,
+ buf, ext_data_size) != ext_data_size) {
+ MZ_FREE(buf);
+ return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
+ }
+
+ pExtra_data = (mz_uint8 *)buf;
+ } else {
+ pExtra_data = p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size;
+ }
+
+ do {
+ mz_uint32 field_id;
+ mz_uint32 field_data_size;
+
+ if (extra_size_remaining < (sizeof(mz_uint16) * 2)) {
+ MZ_FREE(buf);
+ return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
+ }
+
+ field_id = MZ_READ_LE16(pExtra_data);
+ field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16));
+
+ if ((field_data_size + sizeof(mz_uint16) * 2) >
+ extra_size_remaining) {
+ MZ_FREE(buf);
+ return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
+ }
+
+ if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID) {
+ /* Ok, the archive didn't have any zip64 headers but it uses a
+ * zip64 extended information field so mark it as zip64 anyway
+ * (this can occur with infozip's zip util when it reads
+ * compresses files from stdin). */
+ pZip->m_pState->m_zip64 = MZ_TRUE;
+ pZip->m_pState->m_zip64_has_extended_info_fields = MZ_TRUE;
+ break;
+ }
+
+ pExtra_data += sizeof(mz_uint16) * 2 + field_data_size;
+ extra_size_remaining =
+ extra_size_remaining - sizeof(mz_uint16) * 2 - field_data_size;
+ } while (extra_size_remaining);
+
+ MZ_FREE(buf);
+ }
+ }
+
+ /* I've seen archives that aren't marked as zip64 that uses zip64 ext
+ * data, argh */
+ if ((comp_size != MZ_UINT32_MAX) && (decomp_size != MZ_UINT32_MAX)) {
+ if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
+ (decomp_size != comp_size)) ||
+ (decomp_size && !comp_size))
+ return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
+ }
+
+ disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
+ if ((disk_index == MZ_UINT16_MAX) ||
+ ((disk_index != num_this_disk) && (disk_index != 1)))
+ return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK);
+
+ if (comp_size != MZ_UINT32_MAX) {
+ if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
+ return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
+ }
+
+ bit_flags = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
+ if (bit_flags & MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_LOCAL_DIR_IS_MASKED)
+ return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION);
+
+ if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
+ MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
+ MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
+ MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
+ n)
+ return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
+
+ n -= total_header_size;
+ p += total_header_size;
+ }
+ }
+
+ if (sort_central_dir)
+ mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
+
+ return MZ_TRUE;
+}
+
+mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
+ mz_uint32 flags) {
+ if ((!pZip) || (!pZip->m_pRead))
+ return MZ_FALSE;
+ if (!mz_zip_reader_init_internal(pZip, flags))
+ return MZ_FALSE;
+ pZip->m_archive_size = size;
+ if (!mz_zip_reader_read_central_dir(pZip, flags)) {
+ mz_zip_reader_end(pZip);
+ return MZ_FALSE;
+ }
+ return MZ_TRUE;
+}
+
+static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
+ void *pBuf, size_t n) {
+ mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
+ size_t s = (file_ofs >= pZip->m_archive_size)
+ ? 0
+ : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
+ memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
+ return s;
+}
+
+mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
+ size_t size, mz_uint32 flags) {
+ if (!mz_zip_reader_init_internal(pZip, flags))
+ return MZ_FALSE;
+ pZip->m_archive_size = size;
+ pZip->m_pRead = mz_zip_mem_read_func;
+ pZip->m_pIO_opaque = pZip;
+#ifdef __cplusplus
+ pZip->m_pState->m_pMem = const_cast(pMem);
+#else
+ pZip->m_pState->m_pMem = (void *)pMem;
+#endif
+ pZip->m_pState->m_mem_size = size;
+ if (!mz_zip_reader_read_central_dir(pZip, flags)) {
+ mz_zip_reader_end(pZip);
+ return MZ_FALSE;
+ }
+ return MZ_TRUE;
+}
+
+#ifndef MINIZ_NO_STDIO
+static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
+ void *pBuf, size_t n) {
+ mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
+ mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
+ if (((mz_int64)file_ofs < 0) ||
+ (((cur_ofs != (mz_int64)file_ofs)) &&
+ (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
+ return 0;
+ return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
+}
+
+mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
+ mz_uint32 flags) {
+ mz_uint64 file_size;
+ MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
+ if (!pFile)
+ return MZ_FALSE;
+ if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
+ MZ_FCLOSE(pFile);
+ return MZ_FALSE;
+ }
+ file_size = MZ_FTELL64(pFile);
+ if (!mz_zip_reader_init_internal(pZip, flags)) {
+ MZ_FCLOSE(pFile);
+ return MZ_FALSE;
+ }
+ pZip->m_pRead = mz_zip_file_read_func;
+ pZip->m_pIO_opaque = pZip;
+ pZip->m_pState->m_pFile = pFile;
+ pZip->m_archive_size = file_size;
+ if (!mz_zip_reader_read_central_dir(pZip, flags)) {
+ mz_zip_reader_end(pZip);
+ return MZ_FALSE;
+ }
+ return MZ_TRUE;
+}
+#endif // #ifndef MINIZ_NO_STDIO
+
+mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
+ return pZip ? pZip->m_total_files : 0;
+}
+
+static MZ_FORCEINLINE const mz_uint8 *
+mz_zip_reader_get_cdh(mz_zip_archive *pZip, mz_uint file_index) {
+ if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
+ (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
+ return NULL;
+ return &MZ_ZIP_ARRAY_ELEMENT(
+ &pZip->m_pState->m_central_dir, mz_uint8,
+ MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
+ file_index));
+}
+
+mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
+ mz_uint file_index) {
+ mz_uint m_bit_flag;
+ const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
+ if (!p)
+ return MZ_FALSE;
+ m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
+ return (m_bit_flag & 1);
+}
+
+mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
+ mz_uint file_index) {
+ mz_uint filename_len, external_attr;
+ const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
+ if (!p)
+ return MZ_FALSE;
+
+ // First see if the filename ends with a '/' character.
+ filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
+ if (filename_len) {
+ if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
+ return MZ_TRUE;
+ }
+
+ // Bugfix: This code was also checking if the internal attribute was non-zero,
+ // which wasn't correct. Most/all zip writers (hopefully) set DOS
+ // file/directory attributes in the low 16-bits, so check for the DOS
+ // directory flag and ignore the source OS ID in the created by field.
+ // FIXME: Remove this check? Is it necessary - we already check the filename.
+ external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
+ if ((external_attr & 0x10) != 0)
+ return MZ_TRUE;
+
+ return MZ_FALSE;
+}
+
+mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
+ mz_zip_archive_file_stat *pStat) {
+ mz_uint n;
+ const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
+ if ((!p) || (!pStat))
+ return MZ_FALSE;
+
+ // Unpack the central directory record.
+ pStat->m_file_index = file_index;
+ pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
+ &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
+ pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
+ pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
+ pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
+ pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
+#ifndef MINIZ_NO_TIME
+ pStat->m_time =
+ mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
+ MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
+#endif
+ pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
+ pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
+ pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
+ pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
+ pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
+ pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
+
+ // Copy as much of the filename and comment as possible.
+ n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
+ n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
+ memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
+ pStat->m_filename[n] = '\0';
+
+ n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
+ n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
+ pStat->m_comment_size = n;
+ memcpy(pStat->m_comment,
+ p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
+ MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
+ MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
+ n);
+ pStat->m_comment[n] = '\0';
+
+ return MZ_TRUE;
+}
+
+mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
+ char *pFilename, mz_uint filename_buf_size) {
+ mz_uint n;
+ const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
+ if (!p) {
+ if (filename_buf_size)
+ pFilename[0] = '\0';
+ return 0;
+ }
+ n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
+ if (filename_buf_size) {
+ n = MZ_MIN(n, filename_buf_size - 1);
+ memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
+ pFilename[n] = '\0';
+ }
+ return n + 1;
+}
+
+static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
+ const char *pB,
+ mz_uint len,
+ mz_uint flags) {
+ mz_uint i;
+ if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE)
+ return 0 == memcmp(pA, pB, len);
+ for (i = 0; i < len; ++i)
+ if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i]))
+ return MZ_FALSE;
+ return MZ_TRUE;
+}
+
+static MZ_FORCEINLINE int
+mz_zip_reader_filename_compare(const mz_zip_array *pCentral_dir_array,
+ const mz_zip_array *pCentral_dir_offsets,
+ mz_uint l_index, const char *pR, mz_uint r_len) {
+ const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
+ pCentral_dir_array, mz_uint8,
+ MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
+ l_index)),
+ *pE;
+ mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
+ mz_uint8 l = 0, r = 0;
+ pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
+ pE = pL + MZ_MIN(l_len, r_len);
+ while (pL < pE) {
+ if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR)))
+ break;
+ pL++;
+ pR++;
+ }
+ return (pL == pE) ? (int)(l_len - r_len) : (l - r);
+}
+
+static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
+ const char *pFilename) {
+ mz_zip_internal_state *pState = pZip->m_pState;
+ const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
+ const mz_zip_array *pCentral_dir = &pState->m_central_dir;
+ mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
+ &pState->m_sorted_central_dir_offsets, mz_uint32, 0);
+ const int size = pZip->m_total_files;
+ const mz_uint filename_len = (mz_uint)strlen(pFilename);
+ int l = 0, h = size - 1;
+ while (l <= h) {
+ int m = (l + h) >> 1, file_index = pIndices[m],
+ comp =
+ mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
+ file_index, pFilename, filename_len);
+ if (!comp)
+ return file_index;
+ else if (comp < 0)
+ l = m + 1;
+ else
+ h = m - 1;
+ }
+ return -1;
+}
+
+int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
+ const char *pComment, mz_uint flags) {
+ mz_uint file_index;
+ size_t name_len, comment_len;
+ if ((!pZip) || (!pZip->m_pState) || (!pName) ||
+ (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
+ return -1;
+ if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
+ (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
+ return mz_zip_reader_locate_file_binary_search(pZip, pName);
+ name_len = strlen(pName);
+ if (name_len > 0xFFFF)
+ return -1;
+ comment_len = pComment ? strlen(pComment) : 0;
+ if (comment_len > 0xFFFF)
+ return -1;
+ for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
+ const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
+ &pZip->m_pState->m_central_dir, mz_uint8,
+ MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
+ file_index));
+ mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
+ const char *pFilename =
+ (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
+ if (filename_len < name_len)
+ continue;
+ if (comment_len) {
+ mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
+ file_comment_len =
+ MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
+ const char *pFile_comment = pFilename + filename_len + file_extra_len;
+ if ((file_comment_len != comment_len) ||
+ (!mz_zip_reader_string_equal(pComment, pFile_comment,
+ file_comment_len, flags)))
+ continue;
+ }
+ if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
+ int ofs = filename_len - 1;
+ do {
+ if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
+ (pFilename[ofs] == ':'))
+ break;
+ } while (--ofs >= 0);
+ ofs++;
+ pFilename += ofs;
+ filename_len -= ofs;
+ }
+ if ((filename_len == name_len) &&
+ (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
+ return file_index;
+ }
+ return -1;
+}
+
+mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
+ mz_uint file_index, void *pBuf,
+ size_t buf_size, mz_uint flags,
+ void *pUser_read_buf,
+ size_t user_read_buf_size) {
+ int status = TINFL_STATUS_DONE;
+ mz_uint64 needed_size, cur_file_ofs, comp_remaining,
+ out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
+ mz_zip_archive_file_stat file_stat;
+ void *pRead_buf;
+ mz_uint32
+ local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
+ sizeof(mz_uint32)];
+ mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
+ tinfl_decompressor inflator;
+
+ if ((buf_size) && (!pBuf))
+ return MZ_FALSE;
+
+ if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
+ return MZ_FALSE;
+
+ // Empty file, or a directory (but not always a directory - I've seen odd zips
+ // with directories that have compressed data which inflates to 0 bytes)
+ if (!file_stat.m_comp_size)
+ return MZ_TRUE;
+
+ // Entry is a subdirectory (I've seen old zips with dir entries which have
+ // compressed deflate data which inflates to 0 bytes, but these entries claim
+ // to uncompress to 512 bytes in the headers). I'm torn how to handle this
+ // case - should it fail instead?
+ if (mz_zip_reader_is_file_a_directory(pZip, file_index))
+ return MZ_TRUE;
+
+ // Encryption and patch files are not supported.
+ if (file_stat.m_bit_flag & (1 | 32))
+ return MZ_FALSE;
+
+ // This function only supports stored and deflate.
+ if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
+ (file_stat.m_method != MZ_DEFLATED))
+ return MZ_FALSE;
+
+ // Ensure supplied output buffer is large enough.
+ needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
+ : file_stat.m_uncomp_size;
+ if (buf_size < needed_size)
+ return MZ_FALSE;
+
+ // Read and parse the local directory entry.
+ cur_file_ofs = file_stat.m_local_header_ofs;
+ if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
+ return MZ_FALSE;
+ if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
+ return MZ_FALSE;
+
+ cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
+ MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
+ MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
+ if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
+ return MZ_FALSE;
+
+ if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
+ // The file is stored or the caller has requested the compressed data.
+ if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
+ (size_t)needed_size) != needed_size)
+ return MZ_FALSE;
+ return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
+ (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
+ (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
+ }
+
+ // Decompress the file either directly from memory or from a file input
+ // buffer.
+ tinfl_init(&inflator);
+
+ if (pZip->m_pState->m_pMem) {
+ // Read directly from the archive in memory.
+ pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
+ read_buf_size = read_buf_avail = file_stat.m_comp_size;
+ comp_remaining = 0;
+ } else if (pUser_read_buf) {
+ // Use a user provided read buffer.
+ if (!user_read_buf_size)
+ return MZ_FALSE;
+ pRead_buf = (mz_uint8 *)pUser_read_buf;
+ read_buf_size = user_read_buf_size;
+ read_buf_avail = 0;
+ comp_remaining = file_stat.m_comp_size;
+ } else {
+ // Temporarily allocate a read buffer.
+ read_buf_size = MZ_MIN(file_stat.m_comp_size, MZ_ZIP_MAX_IO_BUF_SIZE);
+ if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
+ return MZ_FALSE;
+
+ if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
+ (size_t)read_buf_size)))
+ return MZ_FALSE;
+ read_buf_avail = 0;
+ comp_remaining = file_stat.m_comp_size;
+ }
+
+ do {
+ size_t in_buf_size,
+ out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
+ if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
+ read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
+ if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
+ (size_t)read_buf_avail) != read_buf_avail) {
+ status = TINFL_STATUS_FAILED;
+ break;
+ }
+ cur_file_ofs += read_buf_avail;
+ comp_remaining -= read_buf_avail;
+ read_buf_ofs = 0;
+ }
+ in_buf_size = (size_t)read_buf_avail;
+ status = tinfl_decompress(
+ &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
+ (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
+ TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
+ (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
+ read_buf_avail -= in_buf_size;
+ read_buf_ofs += in_buf_size;
+ out_buf_ofs += out_buf_size;
+ } while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
+
+ if (status == TINFL_STATUS_DONE) {
+ // Make sure the entire file was decompressed, and check its CRC.
+ if ((out_buf_ofs != file_stat.m_uncomp_size) ||
+ (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
+ (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
+ status = TINFL_STATUS_FAILED;
+ }
+
+ if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
+
+ return status == TINFL_STATUS_DONE;
+}
+
+mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
+ mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
+ mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
+ int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
+ if (file_index < 0)
+ return MZ_FALSE;
+ return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
+ flags, pUser_read_buf,
+ user_read_buf_size);
+}
+
+mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
+ void *pBuf, size_t buf_size,
+ mz_uint flags) {
+ return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
+ flags, NULL, 0);
+}
+
+mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
+ const char *pFilename, void *pBuf,
+ size_t buf_size, mz_uint flags) {
+ return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
+ buf_size, flags, NULL, 0);
+}
+
+void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
+ size_t *pSize, mz_uint flags) {
+ mz_uint64 comp_size, uncomp_size, alloc_size;
+ const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
+ void *pBuf;
+
+ if (pSize)
+ *pSize = 0;
+ if (!p)
+ return NULL;
+
+ comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
+ uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
+
+ alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
+ if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
+ return NULL;
+ if (NULL ==
+ (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
+ return NULL;
+
+ if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
+ flags)) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
+ return NULL;
+ }
+
+ if (pSize)
+ *pSize = (size_t)alloc_size;
+ return pBuf;
+}
+
+void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
+ const char *pFilename, size_t *pSize,
+ mz_uint flags) {
+ int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
+ if (file_index < 0) {
+ if (pSize)
+ *pSize = 0;
+ return MZ_FALSE;
+ }
+ return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
+}
+
+mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
+ mz_uint file_index,
+ mz_file_write_func pCallback,
+ void *pOpaque, mz_uint flags) {
+ int status = TINFL_STATUS_DONE;
+ mz_uint file_crc32 = MZ_CRC32_INIT;
+ mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
+ out_buf_ofs = 0, cur_file_ofs;
+ mz_zip_archive_file_stat file_stat;
+ void *pRead_buf = NULL;
+ void *pWrite_buf = NULL;
+ mz_uint32
+ local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
+ sizeof(mz_uint32)];
+ mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
+
+ if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
+ return MZ_FALSE;
+
+ // Empty file, or a directory (but not always a directory - I've seen odd zips
+ // with directories that have compressed data which inflates to 0 bytes)
+ if (!file_stat.m_comp_size)
+ return MZ_TRUE;
+
+ // Entry is a subdirectory (I've seen old zips with dir entries which have
+ // compressed deflate data which inflates to 0 bytes, but these entries claim
+ // to uncompress to 512 bytes in the headers). I'm torn how to handle this
+ // case - should it fail instead?
+ if (mz_zip_reader_is_file_a_directory(pZip, file_index))
+ return MZ_TRUE;
+
+ // Encryption and patch files are not supported.
+ if (file_stat.m_bit_flag & (1 | 32))
+ return MZ_FALSE;
+
+ // This function only supports stored and deflate.
+ if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
+ (file_stat.m_method != MZ_DEFLATED))
+ return MZ_FALSE;
+
+ // Read and parse the local directory entry.
+ cur_file_ofs = file_stat.m_local_header_ofs;
+ if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
+ return MZ_FALSE;
+ if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
+ return MZ_FALSE;
+
+ cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
+ MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
+ MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
+ if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
+ return MZ_FALSE;
+
+ // Decompress the file either directly from memory or from a file input
+ // buffer.
+ if (pZip->m_pState->m_pMem) {
+ pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
+ read_buf_size = read_buf_avail = file_stat.m_comp_size;
+ comp_remaining = 0;
+ } else {
+ read_buf_size = MZ_MIN(file_stat.m_comp_size, MZ_ZIP_MAX_IO_BUF_SIZE);
+ if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
+ (size_t)read_buf_size)))
+ return MZ_FALSE;
+ read_buf_avail = 0;
+ comp_remaining = file_stat.m_comp_size;
+ }
+
+ if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
+ // The file is stored or the caller has requested the compressed data.
+ if (pZip->m_pState->m_pMem) {
+ if (((sizeof(size_t) == sizeof(mz_uint32))) &&
+ (file_stat.m_comp_size > 0xFFFFFFFF))
+ return MZ_FALSE;
+
+ if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
+ (size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
+ status = TINFL_STATUS_FAILED;
+ else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
+ file_crc32 =
+ (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
+ (size_t)file_stat.m_comp_size);
+ // cur_file_ofs += file_stat.m_comp_size;
+ out_buf_ofs += file_stat.m_comp_size;
+ // comp_remaining = 0;
+ } else {
+ while (comp_remaining) {
+ read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
+ if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
+ (size_t)read_buf_avail) != read_buf_avail) {
+ status = TINFL_STATUS_FAILED;
+ break;
+ }
+
+ if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
+ file_crc32 = (mz_uint32)mz_crc32(
+ file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
+
+ if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
+ (size_t)read_buf_avail) != read_buf_avail) {
+ status = TINFL_STATUS_FAILED;
+ break;
+ }
+ cur_file_ofs += read_buf_avail;
+ out_buf_ofs += read_buf_avail;
+ comp_remaining -= read_buf_avail;
+ }
+ }
+ } else {
+ tinfl_decompressor inflator;
+ tinfl_init(&inflator);
+
+ if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
+ TINFL_LZ_DICT_SIZE)))
+ status = TINFL_STATUS_FAILED;
+ else {
+ do {
+ mz_uint8 *pWrite_buf_cur =
+ (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
+ size_t in_buf_size,
+ out_buf_size =
+ TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
+ if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
+ read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
+ if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
+ (size_t)read_buf_avail) != read_buf_avail) {
+ status = TINFL_STATUS_FAILED;
+ break;
+ }
+ cur_file_ofs += read_buf_avail;
+ comp_remaining -= read_buf_avail;
+ read_buf_ofs = 0;
+ }
+
+ in_buf_size = (size_t)read_buf_avail;
+ status = tinfl_decompress(
+ &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
+ (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
+ comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
+ read_buf_avail -= in_buf_size;
+ read_buf_ofs += in_buf_size;
+
+ if (out_buf_size) {
+ if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
+ out_buf_size) {
+ status = TINFL_STATUS_FAILED;
+ break;
+ }
+ file_crc32 =
+ (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
+ if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
+ status = TINFL_STATUS_FAILED;
+ break;
+ }
+ }
+ } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
+ (status == TINFL_STATUS_HAS_MORE_OUTPUT));
+ }
+ }
+
+ if ((status == TINFL_STATUS_DONE) &&
+ (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
+ // Make sure the entire file was decompressed, and check its CRC.
+ if ((out_buf_ofs != file_stat.m_uncomp_size) ||
+ (file_crc32 != file_stat.m_crc32))
+ status = TINFL_STATUS_FAILED;
+ }
+
+ if (!pZip->m_pState->m_pMem)
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
+ if (pWrite_buf)
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
+
+ return status == TINFL_STATUS_DONE;
+}
+
+mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
+ const char *pFilename,
+ mz_file_write_func pCallback,
+ void *pOpaque, mz_uint flags) {
+ int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
+ if (file_index < 0)
+ return MZ_FALSE;
+ return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
+ flags);
+}
+
+#ifndef MINIZ_NO_STDIO
+static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
+ const void *pBuf, size_t n) {
+ (void)ofs;
+ return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
+}
+
+mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
+ const char *pDst_filename,
+ mz_uint flags) {
+ mz_bool status;
+ mz_zip_archive_file_stat file_stat;
+ MZ_FILE *pFile;
+ if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
+ return MZ_FALSE;
+
+ pFile = MZ_FOPEN(pDst_filename, "wb");
+ if (!pFile)
+ return MZ_FALSE;
+ status = mz_zip_reader_extract_to_callback(
+ pZip, file_index, mz_zip_file_write_callback, pFile, flags);
+ if (MZ_FCLOSE(pFile) == EOF)
+ return MZ_FALSE;
+#ifndef MINIZ_NO_TIME
+ if (status) {
+ mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
+ }
+#endif
+
+ return status;
+}
+#endif // #ifndef MINIZ_NO_STDIO
+
+mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
+ if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
+ (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
+ return MZ_FALSE;
+
+ mz_zip_internal_state *pState = pZip->m_pState;
+ pZip->m_pState = NULL;
+ mz_zip_array_clear(pZip, &pState->m_central_dir);
+ mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
+ mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
+
+#ifndef MINIZ_NO_STDIO
+ if (pState->m_pFile) {
+ MZ_FCLOSE(pState->m_pFile);
+ pState->m_pFile = NULL;
+ }
+#endif // #ifndef MINIZ_NO_STDIO
+
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
+
+ pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
+
+ return MZ_TRUE;
+}
+
+#ifndef MINIZ_NO_STDIO
+mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
+ const char *pArchive_filename,
+ const char *pDst_filename,
+ mz_uint flags) {
+ int file_index =
+ mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
+ if (file_index < 0)
+ return MZ_FALSE;
+ return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
+}
+#endif
+
+// ------------------- .ZIP archive writing
+
+#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
+
+static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
+ p[0] = (mz_uint8)v;
+ p[1] = (mz_uint8)(v >> 8);
+}
+static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
+ p[0] = (mz_uint8)v;
+ p[1] = (mz_uint8)(v >> 8);
+ p[2] = (mz_uint8)(v >> 16);
+ p[3] = (mz_uint8)(v >> 24);
+}
+#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
+#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
+
+mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
+ if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
+ (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
+ return MZ_FALSE;
+
+ if (pZip->m_file_offset_alignment) {
+ // Ensure user specified file offset alignment is a power of 2.
+ if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
+ return MZ_FALSE;
+ }
+
+ if (!pZip->m_pAlloc)
+ pZip->m_pAlloc = def_alloc_func;
+ if (!pZip->m_pFree)
+ pZip->m_pFree = def_free_func;
+ if (!pZip->m_pRealloc)
+ pZip->m_pRealloc = def_realloc_func;
+
+ pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
+ pZip->m_archive_size = existing_size;
+ pZip->m_central_directory_file_ofs = 0;
+ pZip->m_total_files = 0;
+
+ if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
+ pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
+ return MZ_FALSE;
+ memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
+ MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
+ sizeof(mz_uint8));
+ MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
+ sizeof(mz_uint32));
+ MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
+ sizeof(mz_uint32));
+ return MZ_TRUE;
+}
+
+static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
+ const void *pBuf, size_t n) {
+ mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
+ mz_zip_internal_state *pState = pZip->m_pState;
+ mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
+
+ if ((!n) ||
+ ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
+ return 0;
+
+ if (new_size > pState->m_mem_capacity) {
+ void *pNew_block;
+ size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
+ while (new_capacity < new_size)
+ new_capacity *= 2;
+ if (NULL == (pNew_block = pZip->m_pRealloc(
+ pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
+ return 0;
+ pState->m_pMem = pNew_block;
+ pState->m_mem_capacity = new_capacity;
+ }
+ memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
+ pState->m_mem_size = (size_t)new_size;
+ return n;
+}
+
+mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
+ size_t size_to_reserve_at_beginning,
+ size_t initial_allocation_size) {
+ pZip->m_pWrite = mz_zip_heap_write_func;
+ pZip->m_pIO_opaque = pZip;
+ if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning))
+ return MZ_FALSE;
+ if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
+ size_to_reserve_at_beginning))) {
+ if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
+ pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
+ mz_zip_writer_end(pZip);
+ return MZ_FALSE;
+ }
+ pZip->m_pState->m_mem_capacity = initial_allocation_size;
+ }
+ return MZ_TRUE;
+}
+
+#ifndef MINIZ_NO_STDIO
+static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
+ const void *pBuf, size_t n) {
+ mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
+ mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
+ if (((mz_int64)file_ofs < 0) ||
+ (((cur_ofs != (mz_int64)file_ofs)) &&
+ (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
+ return 0;
+ return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
+}
+
+mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
+ mz_uint64 size_to_reserve_at_beginning) {
+ MZ_FILE *pFile;
+ pZip->m_pWrite = mz_zip_file_write_func;
+ pZip->m_pIO_opaque = pZip;
+ if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning))
+ return MZ_FALSE;
+ if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
+ mz_zip_writer_end(pZip);
+ return MZ_FALSE;
+ }
+ pZip->m_pState->m_pFile = pFile;
+ if (size_to_reserve_at_beginning) {
+ mz_uint64 cur_ofs = 0;
+ char buf[4096];
+ MZ_CLEAR_OBJ(buf);
+ do {
+ size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
+ if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
+ mz_zip_writer_end(pZip);
+ return MZ_FALSE;
+ }
+ cur_ofs += n;
+ size_to_reserve_at_beginning -= n;
+ } while (size_to_reserve_at_beginning);
+ }
+ return MZ_TRUE;
+}
+#endif // #ifndef MINIZ_NO_STDIO
+
+mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
+ const char *pFilename) {
+ mz_zip_internal_state *pState;
+ if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
+ return MZ_FALSE;
+ // No sense in trying to write to an archive that's already at the support max
+ // size
+ if ((pZip->m_total_files == 0xFFFF) ||
+ ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
+ return MZ_FALSE;
+
+ pState = pZip->m_pState;
+
+ if (pState->m_pFile) {
+#ifdef MINIZ_NO_STDIO
+ pFilename;
+ return MZ_FALSE;
+#else
+ // Archive is being read from stdio - try to reopen as writable.
+ if (pZip->m_pIO_opaque != pZip)
+ return MZ_FALSE;
+ if (!pFilename)
+ return MZ_FALSE;
+ pZip->m_pWrite = mz_zip_file_write_func;
+ if (NULL ==
+ (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
+ // The mz_zip_archive is now in a bogus state because pState->m_pFile is
+ // NULL, so just close it.
+ mz_zip_reader_end(pZip);
+ return MZ_FALSE;
+ }
+#endif // #ifdef MINIZ_NO_STDIO
+ } else if (pState->m_pMem) {
+ // Archive lives in a memory block. Assume it's from the heap that we can
+ // resize using the realloc callback.
+ if (pZip->m_pIO_opaque != pZip)
+ return MZ_FALSE;
+ pState->m_mem_capacity = pState->m_mem_size;
+ pZip->m_pWrite = mz_zip_heap_write_func;
+ }
+ // Archive is being read via a user provided read function - make sure the
+ // user has specified a write function too.
+ else if (!pZip->m_pWrite)
+ return MZ_FALSE;
+
+ // Start writing new files at the archive's current central directory
+ // location.
+ pZip->m_archive_size = pZip->m_central_directory_file_ofs;
+ pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
+ pZip->m_central_directory_file_ofs = 0;
+
+ return MZ_TRUE;
+}
+
+mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
+ const void *pBuf, size_t buf_size,
+ mz_uint level_and_flags) {
+ return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
+ level_and_flags, 0, 0);
+}
+
+typedef struct {
+ mz_zip_archive *m_pZip;
+ mz_uint64 m_cur_archive_file_ofs;
+ mz_uint64 m_comp_size;
+} mz_zip_writer_add_state;
+
+static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
+ void *pUser) {
+ mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
+ if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
+ pState->m_cur_archive_file_ofs, pBuf,
+ len) != len)
+ return MZ_FALSE;
+ pState->m_cur_archive_file_ofs += len;
+ pState->m_comp_size += len;
+ return MZ_TRUE;
+}
+
+static mz_bool mz_zip_writer_create_local_dir_header(
+ mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
+ mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
+ mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
+ mz_uint16 dos_time, mz_uint16 dos_date) {
+ (void)pZip;
+ memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
+ MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
+ MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
+ MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
+ MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
+ return MZ_TRUE;
+}
+
+static mz_bool mz_zip_writer_create_central_dir_header(
+ mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
+ mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
+ mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
+ mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
+ mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
+ (void)pZip;
+ mz_uint16 version_made_by = 10 * MZ_VER_MAJOR + MZ_VER_MINOR;
+ version_made_by |= (MZ_PLATFORM << 8);
+
+ memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
+ MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_MADE_BY_OFS, version_made_by);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
+ MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
+ MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
+ MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
+ MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
+ MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
+ MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
+ return MZ_TRUE;
+}
+
+static mz_bool mz_zip_writer_add_to_central_dir(
+ mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
+ const void *pExtra, mz_uint16 extra_size, const void *pComment,
+ mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
+ mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
+ mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
+ mz_uint32 ext_attributes) {
+ mz_zip_internal_state *pState = pZip->m_pState;
+ mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
+ size_t orig_central_dir_size = pState->m_central_dir.m_size;
+ mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
+
+ // No zip64 support yet
+ if ((local_header_ofs > 0xFFFFFFFF) ||
+ (((mz_uint64)pState->m_central_dir.m_size +
+ MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
+ comment_size) > 0xFFFFFFFF))
+ return MZ_FALSE;
+
+ if (!mz_zip_writer_create_central_dir_header(
+ pZip, central_dir_header, filename_size, extra_size, comment_size,
+ uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
+ dos_date, local_header_ofs, ext_attributes))
+ return MZ_FALSE;
+
+ if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
+ MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
+ (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
+ filename_size)) ||
+ (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
+ extra_size)) ||
+ (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
+ comment_size)) ||
+ (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
+ ¢ral_dir_ofs, 1))) {
+ // Try to push the central directory array back into its original state.
+ mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
+ MZ_FALSE);
+ return MZ_FALSE;
+ }
+
+ return MZ_TRUE;
+}
+
+static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
+ // Basic ZIP archive filename validity checks: Valid filenames cannot start
+ // with a forward slash, cannot contain a drive letter, and cannot use
+ // DOS-style backward slashes.
+ if (*pArchive_name == '/')
+ return MZ_FALSE;
+ while (*pArchive_name) {
+ if ((*pArchive_name == '\\') || (*pArchive_name == ':'))
+ return MZ_FALSE;
+ pArchive_name++;
+ }
+ return MZ_TRUE;
+}
+
+static mz_uint
+mz_zip_writer_compute_padding_needed_for_file_alignment(mz_zip_archive *pZip) {
+ mz_uint32 n;
+ if (!pZip->m_file_offset_alignment)
+ return 0;
+ n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
+ return (pZip->m_file_offset_alignment - n) &
+ (pZip->m_file_offset_alignment - 1);
+}
+
+static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
+ mz_uint64 cur_file_ofs, mz_uint32 n) {
+ char buf[4096];
+ memset(buf, 0, MZ_MIN(sizeof(buf), n));
+ while (n) {
+ mz_uint32 s = MZ_MIN(sizeof(buf), n);
+ if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
+ return MZ_FALSE;
+ cur_file_ofs += s;
+ n -= s;
+ }
+ return MZ_TRUE;
+}
+
+mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
+ const char *pArchive_name, const void *pBuf,
+ size_t buf_size, const void *pComment,
+ mz_uint16 comment_size,
+ mz_uint level_and_flags, mz_uint64 uncomp_size,
+ mz_uint32 uncomp_crc32) {
+ mz_uint32 ext_attributes = 0;
+ mz_uint16 method = 0, dos_time = 0, dos_date = 0;
+ mz_uint level, num_alignment_padding_bytes;
+ mz_uint64 local_dir_header_ofs, cur_archive_file_ofs, comp_size = 0;
+ size_t archive_name_size;
+ mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
+ tdefl_compressor *pComp = NULL;
+ mz_bool store_data_uncompressed;
+ mz_zip_internal_state *pState;
+
+ if ((int)level_and_flags < 0)
+ level_and_flags = MZ_DEFAULT_LEVEL;
+ level = level_and_flags & 0xF;
+ store_data_uncompressed =
+ ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
+
+ if ((!pZip) || (!pZip->m_pState) ||
+ (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
+ (!pArchive_name) || ((comment_size) && (!pComment)) ||
+ (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
+ return MZ_FALSE;
+
+ local_dir_header_ofs = cur_archive_file_ofs = pZip->m_archive_size;
+ pState = pZip->m_pState;
+
+ if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
+ return MZ_FALSE;
+ // No zip64 support yet
+ if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF))
+ return MZ_FALSE;
+ if (!mz_zip_writer_validate_archive_name(pArchive_name))
+ return MZ_FALSE;
+
+#ifndef MINIZ_NO_TIME
+ {
+ time_t cur_time;
+ time(&cur_time);
+ mz_zip_time_t_to_dos_time(cur_time, &dos_time, &dos_date);
+ }
+#endif // #ifndef MINIZ_NO_TIME
+
+ archive_name_size = strlen(pArchive_name);
+ if (archive_name_size > 0xFFFF)
+ return MZ_FALSE;
+
+ num_alignment_padding_bytes =
+ mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
+
+ // no zip64 support yet
+ if ((pZip->m_total_files == 0xFFFF) ||
+ ((pZip->m_archive_size + num_alignment_padding_bytes +
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
+ comment_size + archive_name_size) > 0xFFFFFFFF))
+ return MZ_FALSE;
+
+ if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
+ // Set DOS Subdirectory attribute bit.
+ ext_attributes |= 0x10;
+ // Subdirectories cannot contain data.
+ if ((buf_size) || (uncomp_size))
+ return MZ_FALSE;
+ }
+
+ // Try to do any allocations before writing to the archive, so if an
+ // allocation fails the file remains unmodified. (A good idea if we're doing
+ // an in-place modification.)
+ if ((!mz_zip_array_ensure_room(pZip, &pState->m_central_dir,
+ MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
+ archive_name_size + comment_size)) ||
+ (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
+ return MZ_FALSE;
+
+ if ((!store_data_uncompressed) && (buf_size)) {
+ if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
+ pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
+ return MZ_FALSE;
+ }
+
+ if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs,
+ num_alignment_padding_bytes +
+ sizeof(local_dir_header))) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
+ return MZ_FALSE;
+ }
+ local_dir_header_ofs += num_alignment_padding_bytes;
+ if (pZip->m_file_offset_alignment) {
+ MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
+ 0);
+ }
+ cur_archive_file_ofs +=
+ num_alignment_padding_bytes + sizeof(local_dir_header);
+
+ MZ_CLEAR_OBJ(local_dir_header);
+ if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
+ archive_name_size) != archive_name_size) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
+ return MZ_FALSE;
+ }
+ cur_archive_file_ofs += archive_name_size;
+
+ if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
+ uncomp_crc32 =
+ (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
+ uncomp_size = buf_size;
+ if (uncomp_size <= 3) {
+ level = 0;
+ store_data_uncompressed = MZ_TRUE;
+ }
+ }
+
+ if (store_data_uncompressed) {
+ if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
+ buf_size) != buf_size) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
+ return MZ_FALSE;
+ }
+
+ cur_archive_file_ofs += buf_size;
+ comp_size = buf_size;
+
+ if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)
+ method = MZ_DEFLATED;
+ } else if (buf_size) {
+ mz_zip_writer_add_state state;
+
+ state.m_pZip = pZip;
+ state.m_cur_archive_file_ofs = cur_archive_file_ofs;
+ state.m_comp_size = 0;
+
+ if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
+ tdefl_create_comp_flags_from_zip_params(
+ level, -15, MZ_DEFAULT_STRATEGY)) !=
+ TDEFL_STATUS_OKAY) ||
+ (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
+ TDEFL_STATUS_DONE)) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
+ return MZ_FALSE;
+ }
+
+ comp_size = state.m_comp_size;
+ cur_archive_file_ofs = state.m_cur_archive_file_ofs;
+
+ method = MZ_DEFLATED;
+ }
+
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
+ pComp = NULL;
+
+ // no zip64 support yet
+ if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
+ return MZ_FALSE;
+
+ if (!mz_zip_writer_create_local_dir_header(
+ pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
+ comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
+ return MZ_FALSE;
+
+ if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
+ sizeof(local_dir_header)) != sizeof(local_dir_header))
+ return MZ_FALSE;
+
+ if (!mz_zip_writer_add_to_central_dir(
+ pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
+ comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
+ dos_time, dos_date, local_dir_header_ofs, ext_attributes))
+ return MZ_FALSE;
+
+ pZip->m_total_files++;
+ pZip->m_archive_size = cur_archive_file_ofs;
+
+ return MZ_TRUE;
+}
+
+#ifndef MINIZ_NO_STDIO
+mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
+ const char *pSrc_filename, const void *pComment,
+ mz_uint16 comment_size, mz_uint level_and_flags,
+ mz_uint32 ext_attributes) {
+ mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
+ mz_uint16 method = 0, dos_time = 0, dos_date = 0;
+#ifndef MINIZ_NO_TIME
+ time_t file_modified_time;
+#endif
+
+ mz_uint64 local_dir_header_ofs, cur_archive_file_ofs, uncomp_size = 0,
+ comp_size = 0;
+ size_t archive_name_size;
+ mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
+ MZ_FILE *pSrc_file = NULL;
+
+ if ((int)level_and_flags < 0)
+ level_and_flags = MZ_DEFAULT_LEVEL;
+ level = level_and_flags & 0xF;
+
+ if ((!pZip) || (!pZip->m_pState) ||
+ (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
+ ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
+ return MZ_FALSE;
+
+ local_dir_header_ofs = cur_archive_file_ofs = pZip->m_archive_size;
+
+ if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)
+ return MZ_FALSE;
+ if (!mz_zip_writer_validate_archive_name(pArchive_name))
+ return MZ_FALSE;
+
+ archive_name_size = strlen(pArchive_name);
+ if (archive_name_size > 0xFFFF)
+ return MZ_FALSE;
+
+ num_alignment_padding_bytes =
+ mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
+
+ // no zip64 support yet
+ if ((pZip->m_total_files == 0xFFFF) ||
+ ((pZip->m_archive_size + num_alignment_padding_bytes +
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
+ comment_size + archive_name_size) > 0xFFFFFFFF))
+ return MZ_FALSE;
+
+#ifndef MINIZ_NO_TIME
+ memset(&file_modified_time, 0, sizeof(file_modified_time));
+ if (!mz_zip_get_file_modified_time(pSrc_filename, &file_modified_time))
+ return MZ_FALSE;
+ mz_zip_time_t_to_dos_time(file_modified_time, &dos_time, &dos_date);
+#endif
+
+ pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
+ if (!pSrc_file)
+ return MZ_FALSE;
+ MZ_FSEEK64(pSrc_file, 0, SEEK_END);
+ uncomp_size = MZ_FTELL64(pSrc_file);
+ MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
+
+ if (uncomp_size > 0xFFFFFFFF) {
+ // No zip64 support yet
+ MZ_FCLOSE(pSrc_file);
+ return MZ_FALSE;
+ }
+ if (uncomp_size <= 3)
+ level = 0;
+
+ if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs,
+ num_alignment_padding_bytes +
+ sizeof(local_dir_header))) {
+ MZ_FCLOSE(pSrc_file);
+ return MZ_FALSE;
+ }
+ local_dir_header_ofs += num_alignment_padding_bytes;
+ if (pZip->m_file_offset_alignment) {
+ MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
+ 0);
+ }
+ cur_archive_file_ofs +=
+ num_alignment_padding_bytes + sizeof(local_dir_header);
+
+ MZ_CLEAR_OBJ(local_dir_header);
+ if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
+ archive_name_size) != archive_name_size) {
+ MZ_FCLOSE(pSrc_file);
+ return MZ_FALSE;
+ }
+ cur_archive_file_ofs += archive_name_size;
+
+ if (uncomp_size) {
+ mz_uint64 uncomp_remaining = uncomp_size;
+ void *pRead_buf =
+ pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
+ if (!pRead_buf) {
+ MZ_FCLOSE(pSrc_file);
+ return MZ_FALSE;
+ }
+
+ if (!level) {
+ while (uncomp_remaining) {
+ mz_uint n = (mz_uint)MZ_MIN(MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
+ if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
+ (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
+ n) != n)) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
+ MZ_FCLOSE(pSrc_file);
+ return MZ_FALSE;
+ }
+ uncomp_crc32 =
+ (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
+ uncomp_remaining -= n;
+ cur_archive_file_ofs += n;
+ }
+ comp_size = uncomp_size;
+ } else {
+ mz_bool result = MZ_FALSE;
+ mz_zip_writer_add_state state;
+ tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
+ pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
+ if (!pComp) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
+ MZ_FCLOSE(pSrc_file);
+ return MZ_FALSE;
+ }
+
+ state.m_pZip = pZip;
+ state.m_cur_archive_file_ofs = cur_archive_file_ofs;
+ state.m_comp_size = 0;
+
+ if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
+ tdefl_create_comp_flags_from_zip_params(
+ level, -15, MZ_DEFAULT_STRATEGY)) !=
+ TDEFL_STATUS_OKAY) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
+ MZ_FCLOSE(pSrc_file);
+ return MZ_FALSE;
+ }
+
+ for (;;) {
+ size_t in_buf_size =
+ (mz_uint32)MZ_MIN(uncomp_remaining, MZ_ZIP_MAX_IO_BUF_SIZE);
+ tdefl_status status;
+
+ if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
+ break;
+
+ uncomp_crc32 = (mz_uint32)mz_crc32(
+ uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
+ uncomp_remaining -= in_buf_size;
+
+ status = tdefl_compress_buffer(pComp, pRead_buf, in_buf_size,
+ uncomp_remaining ? TDEFL_NO_FLUSH
+ : TDEFL_FINISH);
+ if (status == TDEFL_STATUS_DONE) {
+ result = MZ_TRUE;
+ break;
+ } else if (status != TDEFL_STATUS_OKAY)
+ break;
+ }
+
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
+
+ if (!result) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
+ MZ_FCLOSE(pSrc_file);
+ return MZ_FALSE;
+ }
+
+ comp_size = state.m_comp_size;
+ cur_archive_file_ofs = state.m_cur_archive_file_ofs;
+
+ method = MZ_DEFLATED;
+ }
+
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
+ }
+
+ MZ_FCLOSE(pSrc_file);
+ pSrc_file = NULL;
+
+ // no zip64 support yet
+ if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
+ return MZ_FALSE;
+
+ if (!mz_zip_writer_create_local_dir_header(
+ pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
+ comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
+ return MZ_FALSE;
+
+ if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
+ sizeof(local_dir_header)) != sizeof(local_dir_header))
+ return MZ_FALSE;
+
+ if (!mz_zip_writer_add_to_central_dir(
+ pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
+ comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
+ dos_time, dos_date, local_dir_header_ofs, ext_attributes))
+ return MZ_FALSE;
+
+ pZip->m_total_files++;
+ pZip->m_archive_size = cur_archive_file_ofs;
+
+ return MZ_TRUE;
+}
+#endif // #ifndef MINIZ_NO_STDIO
+
+mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
+ mz_zip_archive *pSource_zip,
+ mz_uint file_index) {
+ mz_uint n, bit_flags, num_alignment_padding_bytes;
+ mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
+ mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
+ mz_uint32
+ local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
+ sizeof(mz_uint32)];
+ mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
+ mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
+ size_t orig_central_dir_size;
+ mz_zip_internal_state *pState;
+ void *pBuf;
+ const mz_uint8 *pSrc_central_header;
+
+ if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
+ return MZ_FALSE;
+ if (NULL ==
+ (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
+ return MZ_FALSE;
+ pState = pZip->m_pState;
+
+ num_alignment_padding_bytes =
+ mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
+
+ // no zip64 support yet
+ if ((pZip->m_total_files == 0xFFFF) ||
+ ((pZip->m_archive_size + num_alignment_padding_bytes +
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
+ 0xFFFFFFFF))
+ return MZ_FALSE;
+
+ cur_src_file_ofs =
+ MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
+ cur_dst_file_ofs = pZip->m_archive_size;
+
+ if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
+ pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
+ return MZ_FALSE;
+ if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
+ return MZ_FALSE;
+ cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
+
+ if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
+ num_alignment_padding_bytes))
+ return MZ_FALSE;
+ cur_dst_file_ofs += num_alignment_padding_bytes;
+ local_dir_header_ofs = cur_dst_file_ofs;
+ if (pZip->m_file_offset_alignment) {
+ MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
+ 0);
+ }
+
+ if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
+ return MZ_FALSE;
+ cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
+
+ n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
+ MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
+ comp_bytes_remaining =
+ n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
+
+ if (NULL ==
+ (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
+ (size_t)MZ_MAX(sizeof(mz_uint32) * 4,
+ MZ_MIN(MZ_ZIP_MAX_IO_BUF_SIZE,
+ comp_bytes_remaining)))))
+ return MZ_FALSE;
+
+ while (comp_bytes_remaining) {
+ n = (mz_uint)MZ_MIN(MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
+ if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
+ n) != n) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
+ return MZ_FALSE;
+ }
+ cur_src_file_ofs += n;
+
+ if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
+ return MZ_FALSE;
+ }
+ cur_dst_file_ofs += n;
+
+ comp_bytes_remaining -= n;
+ }
+
+ bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
+ if (bit_flags & 8) {
+ // Copy data descriptor
+ if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
+ sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
+ return MZ_FALSE;
+ }
+
+ n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
+ if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
+ return MZ_FALSE;
+ }
+
+ // cur_src_file_ofs += n;
+ cur_dst_file_ofs += n;
+ }
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
+
+ // no zip64 support yet
+ if (cur_dst_file_ofs > 0xFFFFFFFF)
+ return MZ_FALSE;
+
+ orig_central_dir_size = pState->m_central_dir.m_size;
+
+ memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
+ MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
+ local_dir_header_ofs);
+ if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
+ MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
+ return MZ_FALSE;
+
+ n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
+ MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
+ MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
+ if (!mz_zip_array_push_back(
+ pZip, &pState->m_central_dir,
+ pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
+ mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
+ MZ_FALSE);
+ return MZ_FALSE;
+ }
+
+ if (pState->m_central_dir.m_size > 0xFFFFFFFF)
+ return MZ_FALSE;
+ n = (mz_uint32)orig_central_dir_size;
+ if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
+ mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
+ MZ_FALSE);
+ return MZ_FALSE;
+ }
+
+ pZip->m_total_files++;
+ pZip->m_archive_size = cur_dst_file_ofs;
+
+ return MZ_TRUE;
+}
+
+mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
+ mz_zip_internal_state *pState;
+ mz_uint64 central_dir_ofs, central_dir_size;
+ mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
+
+ if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
+ return MZ_FALSE;
+
+ pState = pZip->m_pState;
+
+ // no zip64 support yet
+ if ((pZip->m_total_files > 0xFFFF) ||
+ ((pZip->m_archive_size + pState->m_central_dir.m_size +
+ MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
+ return MZ_FALSE;
+
+ central_dir_ofs = 0;
+ central_dir_size = 0;
+ if (pZip->m_total_files) {
+ // Write central directory
+ central_dir_ofs = pZip->m_archive_size;
+ central_dir_size = pState->m_central_dir.m_size;
+ pZip->m_central_directory_file_ofs = central_dir_ofs;
+ if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
+ pState->m_central_dir.m_p,
+ (size_t)central_dir_size) != central_dir_size)
+ return MZ_FALSE;
+ pZip->m_archive_size += central_dir_size;
+ }
+
+ // Write end of central directory record
+ MZ_CLEAR_OBJ(hdr);
+ MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
+ MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
+ MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
+ pZip->m_total_files);
+ MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
+ MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
+ MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
+
+ if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
+ sizeof(hdr)) != sizeof(hdr))
+ return MZ_FALSE;
+#ifndef MINIZ_NO_STDIO
+ if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF))
+ return MZ_FALSE;
+#endif // #ifndef MINIZ_NO_STDIO
+
+ pZip->m_archive_size += sizeof(hdr);
+
+ pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
+ return MZ_TRUE;
+}
+
+mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
+ size_t *pSize) {
+ if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize))
+ return MZ_FALSE;
+ if (pZip->m_pWrite != mz_zip_heap_write_func)
+ return MZ_FALSE;
+ if (!mz_zip_writer_finalize_archive(pZip))
+ return MZ_FALSE;
+
+ *pBuf = pZip->m_pState->m_pMem;
+ *pSize = pZip->m_pState->m_mem_size;
+ pZip->m_pState->m_pMem = NULL;
+ pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
+ return MZ_TRUE;
+}
+
+mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
+ mz_zip_internal_state *pState;
+ mz_bool status = MZ_TRUE;
+ if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
+ ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
+ (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
+ return MZ_FALSE;
+
+ pState = pZip->m_pState;
+ pZip->m_pState = NULL;
+ mz_zip_array_clear(pZip, &pState->m_central_dir);
+ mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
+ mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
+
+#ifndef MINIZ_NO_STDIO
+ if (pState->m_pFile) {
+ MZ_FCLOSE(pState->m_pFile);
+ pState->m_pFile = NULL;
+ }
+#endif // #ifndef MINIZ_NO_STDIO
+
+ if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
+ pState->m_pMem = NULL;
+ }
+
+ pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
+ pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
+ return status;
+}
+
+#ifndef MINIZ_NO_STDIO
+mz_bool mz_zip_add_mem_to_archive_file_in_place(
+ const char *pZip_filename, const char *pArchive_name, const void *pBuf,
+ size_t buf_size, const void *pComment, mz_uint16 comment_size,
+ mz_uint level_and_flags) {
+ mz_bool status, created_new_archive = MZ_FALSE;
+ mz_zip_archive zip_archive;
+ struct MZ_FILE_STAT_STRUCT file_stat;
+ MZ_CLEAR_OBJ(zip_archive);
+ if ((int)level_and_flags < 0)
+ level_and_flags = MZ_DEFAULT_LEVEL;
+ if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
+ ((comment_size) && (!pComment)) ||
+ ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
+ return MZ_FALSE;
+ if (!mz_zip_writer_validate_archive_name(pArchive_name))
+ return MZ_FALSE;
+ if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
+ // Create a new archive.
+ if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
+ return MZ_FALSE;
+ created_new_archive = MZ_TRUE;
+ } else {
+ // Append to an existing archive.
+ if (!mz_zip_reader_init_file(&zip_archive, pZip_filename,
+ level_and_flags |
+ MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
+ return MZ_FALSE;
+ if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
+ mz_zip_reader_end(&zip_archive);
+ return MZ_FALSE;
+ }
+ }
+ status =
+ mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
+ pComment, comment_size, level_and_flags, 0, 0);
+ // Always finalize, even if adding failed for some reason, so we have a valid
+ // central directory. (This may not always succeed, but we can try.)
+ if (!mz_zip_writer_finalize_archive(&zip_archive))
+ status = MZ_FALSE;
+ if (!mz_zip_writer_end(&zip_archive))
+ status = MZ_FALSE;
+ if ((!status) && (created_new_archive)) {
+ // It's a new archive and something went wrong, so just delete it.
+ int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
+ (void)ignoredStatus;
+ }
+ return status;
+}
+
+void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
+ const char *pArchive_name,
+ size_t *pSize, mz_uint flags) {
+ int file_index;
+ mz_zip_archive zip_archive;
+ void *p = NULL;
+
+ if (pSize)
+ *pSize = 0;
+
+ if ((!pZip_filename) || (!pArchive_name))
+ return NULL;
+
+ MZ_CLEAR_OBJ(zip_archive);
+ if (!mz_zip_reader_init_file(&zip_archive, pZip_filename,
+ flags |
+ MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
+ return NULL;
+
+ if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
+ flags)) >= 0)
+ p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
+
+ mz_zip_reader_end(&zip_archive);
+ return p;
+}
+
+#endif // #ifndef MINIZ_NO_STDIO
+
+#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
+
+#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // MINIZ_HEADER_FILE_ONLY
+
+/*
+ This is free and unencumbered software released into the public domain.
+
+ Anyone is free to copy, modify, publish, use, compile, sell, or
+ distribute this software, either in source code form or as a compiled
+ binary, for any purpose, commercial or non-commercial, and by any
+ means.
+
+ In jurisdictions that recognize copyright laws, the author or authors
+ of this software dedicate any and all copyright interest in the
+ software to the public domain. We make this dedication for the benefit
+ of the public at large and to the detriment of our heirs and
+ successors. We intend this dedication to be an overt act of
+ relinquishment in perpetuity of all present and future rights to this
+ software under copyright law.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+ For more information, please refer to
+*/
diff --git a/test/standalone/issue_9812/vendor/kuba-zip/zip.c b/test/standalone/issue_9812/vendor/kuba-zip/zip.c
new file mode 100644
index 0000000000..3e648995c1
--- /dev/null
+++ b/test/standalone/issue_9812/vendor/kuba-zip/zip.c
@@ -0,0 +1,1622 @@
+/*
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define __STDC_WANT_LIB_EXT1__ 1
+
+#include
+#include
+#include
+
+#if defined(_WIN32) || defined(__WIN32__) || defined(_MSC_VER) || \
+ defined(__MINGW32__)
+/* Win32, DOS, MSVC, MSVS */
+#include
+
+#define MKDIR(DIRNAME) _mkdir(DIRNAME)
+#define STRCLONE(STR) ((STR) ? _strdup(STR) : NULL)
+#define HAS_DEVICE(P) \
+ ((((P)[0] >= 'A' && (P)[0] <= 'Z') || ((P)[0] >= 'a' && (P)[0] <= 'z')) && \
+ (P)[1] == ':')
+#define FILESYSTEM_PREFIX_LEN(P) (HAS_DEVICE(P) ? 2 : 0)
+
+#else
+
+#include // needed for symlink()
+
+#define MKDIR(DIRNAME) mkdir(DIRNAME, 0755)
+#define STRCLONE(STR) ((STR) ? strdup(STR) : NULL)
+
+#endif
+
+#ifdef __MINGW32__
+#include
+#include
+#endif
+
+#include "miniz.h"
+#include "zip.h"
+
+#ifdef _MSC_VER
+#include
+
+#define ftruncate(fd, sz) (-(_chsize_s((fd), (sz)) != 0))
+#define fileno _fileno
+#endif
+
+#ifndef HAS_DEVICE
+#define HAS_DEVICE(P) 0
+#endif
+
+#ifndef FILESYSTEM_PREFIX_LEN
+#define FILESYSTEM_PREFIX_LEN(P) 0
+#endif
+
+#ifndef ISSLASH
+#define ISSLASH(C) ((C) == '/' || (C) == '\\')
+#endif
+
+#define CLEANUP(ptr) \
+ do { \
+ if (ptr) { \
+ free((void *)ptr); \
+ ptr = NULL; \
+ } \
+ } while (0)
+
+struct zip_entry_t {
+ int index;
+ char *name;
+ mz_uint64 uncomp_size;
+ mz_uint64 comp_size;
+ mz_uint32 uncomp_crc32;
+ mz_uint64 offset;
+ mz_uint8 header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
+ mz_uint64 header_offset;
+ mz_uint16 method;
+ mz_zip_writer_add_state state;
+ tdefl_compressor comp;
+ mz_uint32 external_attr;
+ time_t m_time;
+};
+
+struct zip_t {
+ mz_zip_archive archive;
+ mz_uint level;
+ struct zip_entry_t entry;
+};
+
+enum zip_modify_t {
+ MZ_KEEP = 0,
+ MZ_DELETE = 1,
+ MZ_MOVE = 2,
+};
+
+struct zip_entry_mark_t {
+ int file_index;
+ enum zip_modify_t type;
+ mz_uint64 m_local_header_ofs;
+ mz_uint64 lf_length;
+};
+
+static const char *const zip_errlist[30] = {
+ NULL,
+ "not initialized\0",
+ "invalid entry name\0",
+ "entry not found\0",
+ "invalid zip mode\0",
+ "invalid compression level\0",
+ "no zip 64 support\0",
+ "memset error\0",
+ "cannot write data to entry\0",
+ "cannot initialize tdefl compressor\0",
+ "invalid index\0",
+ "header not found\0",
+ "cannot flush tdefl buffer\0",
+ "cannot write entry header\0",
+ "cannot create entry header\0",
+ "cannot write to central dir\0",
+ "cannot open file\0",
+ "invalid entry type\0",
+ "extracting data using no memory allocation\0",
+ "file not found\0",
+ "no permission\0",
+ "out of memory\0",
+ "invalid zip archive name\0",
+ "make dir error\0",
+ "symlink error\0",
+ "close archive error\0",
+ "capacity size too small\0",
+ "fseek error\0",
+ "fread error\0",
+ "fwrite error\0",
+};
+
+const char *zip_strerror(int errnum) {
+ errnum = -errnum;
+ if (errnum <= 0 || errnum >= 30) {
+ return NULL;
+ }
+
+ return zip_errlist[errnum];
+}
+
+static const char *zip_basename(const char *name) {
+ char const *p;
+ char const *base = name += FILESYSTEM_PREFIX_LEN(name);
+ int all_slashes = 1;
+
+ for (p = name; *p; p++) {
+ if (ISSLASH(*p))
+ base = p + 1;
+ else
+ all_slashes = 0;
+ }
+
+ /* If NAME is all slashes, arrange to return `/'. */
+ if (*base == '\0' && ISSLASH(*name) && all_slashes)
+ --base;
+
+ return base;
+}
+
+static int zip_mkpath(char *path) {
+ char *p;
+ char npath[MAX_PATH + 1];
+ int len = 0;
+ int has_device = HAS_DEVICE(path);
+
+ memset(npath, 0, MAX_PATH + 1);
+ if (has_device) {
+ // only on windows
+ npath[0] = path[0];
+ npath[1] = path[1];
+ len = 2;
+ }
+ for (p = path + len; *p && len < MAX_PATH; p++) {
+ if (ISSLASH(*p) && ((!has_device && len > 0) || (has_device && len > 2))) {
+#if defined(_WIN32) || defined(__WIN32__) || defined(_MSC_VER) || \
+ defined(__MINGW32__)
+#else
+ if ('\\' == *p) {
+ *p = '/';
+ }
+#endif
+
+ if (MKDIR(npath) == -1) {
+ if (errno != EEXIST) {
+ return ZIP_EMKDIR;
+ }
+ }
+ }
+ npath[len++] = *p;
+ }
+
+ return 0;
+}
+
+static char *zip_strrpl(const char *str, size_t n, char oldchar, char newchar) {
+ char c;
+ size_t i;
+ char *rpl = (char *)calloc((1 + n), sizeof(char));
+ char *begin = rpl;
+ if (!rpl) {
+ return NULL;
+ }
+
+ for (i = 0; (i < n) && (c = *str++); ++i) {
+ if (c == oldchar) {
+ c = newchar;
+ }
+ *rpl++ = c;
+ }
+
+ return begin;
+}
+
+static char *zip_name_normalize(char *name, char *const nname, size_t len) {
+ size_t offn = 0;
+ size_t offnn = 0, ncpy = 0;
+
+ if (name == NULL || nname == NULL || len <= 0) {
+ return NULL;
+ }
+ // skip trailing '/'
+ while (ISSLASH(*name))
+ name++;
+
+ for (; offn < len; offn++) {
+ if (ISSLASH(name[offn])) {
+ if (ncpy > 0 && strcmp(&nname[offnn], ".\0") &&
+ strcmp(&nname[offnn], "..\0")) {
+ offnn += ncpy;
+ nname[offnn++] = name[offn]; // append '/'
+ }
+ ncpy = 0;
+ } else {
+ nname[offnn + ncpy] = name[offn];
+ ncpy++;
+ }
+ }
+
+ // at the end, extra check what we've already copied
+ if (ncpy == 0 || !strcmp(&nname[offnn], ".\0") ||
+ !strcmp(&nname[offnn], "..\0")) {
+ nname[offnn] = 0;
+ }
+ return nname;
+}
+
+static mz_bool zip_name_match(const char *name1, const char *name2) {
+ int len2 = strlen(name2);
+ char *nname2 = zip_strrpl(name2, len2, '\\', '/');
+ if (!nname2) {
+ return MZ_FALSE;
+ }
+
+ mz_bool res = (strcmp(name1, nname2) == 0) ? MZ_TRUE : MZ_FALSE;
+ CLEANUP(nname2);
+ return res;
+}
+
+static int zip_archive_truncate(mz_zip_archive *pzip) {
+ mz_zip_internal_state *pState = pzip->m_pState;
+ mz_uint64 file_size = pzip->m_archive_size;
+ if ((pzip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
+ return 0;
+ }
+ if (pzip->m_zip_mode == MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED) {
+ if (pState->m_pFile) {
+ int fd = fileno(pState->m_pFile);
+ return ftruncate(fd, file_size);
+ }
+ }
+ return 0;
+}
+
+static int zip_archive_extract(mz_zip_archive *zip_archive, const char *dir,
+ int (*on_extract)(const char *filename,
+ void *arg),
+ void *arg) {
+ int err = 0;
+ mz_uint i, n;
+ char path[MAX_PATH + 1];
+ char symlink_to[MAX_PATH + 1];
+ mz_zip_archive_file_stat info;
+ size_t dirlen = 0;
+ mz_uint32 xattr = 0;
+
+ memset(path, 0, sizeof(path));
+ memset(symlink_to, 0, sizeof(symlink_to));
+
+ dirlen = strlen(dir);
+ if (dirlen + 1 > MAX_PATH) {
+ return ZIP_EINVENTNAME;
+ }
+
+ memset((void *)&info, 0, sizeof(mz_zip_archive_file_stat));
+
+#if defined(_MSC_VER)
+ strcpy_s(path, MAX_PATH, dir);
+#else
+ strcpy(path, dir);
+#endif
+
+ if (!ISSLASH(path[dirlen - 1])) {
+#if defined(_WIN32) || defined(__WIN32__)
+ path[dirlen] = '\\';
+#else
+ path[dirlen] = '/';
+#endif
+ ++dirlen;
+ }
+
+ // Get and print information about each file in the archive.
+ n = mz_zip_reader_get_num_files(zip_archive);
+ for (i = 0; i < n; ++i) {
+ if (!mz_zip_reader_file_stat(zip_archive, i, &info)) {
+ // Cannot get information about zip archive;
+ err = ZIP_ENOENT;
+ goto out;
+ }
+
+ if (!zip_name_normalize(info.m_filename, info.m_filename,
+ strlen(info.m_filename))) {
+ // Cannot normalize file name;
+ err = ZIP_EINVENTNAME;
+ goto out;
+ }
+#if defined(_MSC_VER)
+ strncpy_s(&path[dirlen], MAX_PATH - dirlen, info.m_filename,
+ MAX_PATH - dirlen);
+#else
+ strncpy(&path[dirlen], info.m_filename, MAX_PATH - dirlen);
+#endif
+ err = zip_mkpath(path);
+ if (err < 0) {
+ // Cannot make a path
+ goto out;
+ }
+
+ if ((((info.m_version_made_by >> 8) == 3) ||
+ ((info.m_version_made_by >> 8) ==
+ 19)) // if zip is produced on Unix or macOS (3 and 19 from
+ // section 4.4.2.2 of zip standard)
+ && info.m_external_attr &
+ (0x20 << 24)) { // and has sym link attribute (0x80 is file, 0x40
+ // is directory)
+#if defined(_WIN32) || defined(__WIN32__) || defined(_MSC_VER) || \
+ defined(__MINGW32__)
+#else
+ if (info.m_uncomp_size > MAX_PATH ||
+ !mz_zip_reader_extract_to_mem_no_alloc(zip_archive, i, symlink_to,
+ MAX_PATH, 0, NULL, 0)) {
+ err = ZIP_EMEMNOALLOC;
+ goto out;
+ }
+ symlink_to[info.m_uncomp_size] = '\0';
+ if (symlink(symlink_to, path) != 0) {
+ err = ZIP_ESYMLINK;
+ goto out;
+ }
+#endif
+ } else {
+ if (!mz_zip_reader_is_file_a_directory(zip_archive, i)) {
+ if (!mz_zip_reader_extract_to_file(zip_archive, i, path, 0)) {
+ // Cannot extract zip archive to file
+ err = ZIP_ENOFILE;
+ goto out;
+ }
+ }
+
+#if defined(_MSC_VER)
+ (void)xattr; // unused
+#else
+ xattr = (info.m_external_attr >> 16) & 0xFFFF;
+ if (xattr > 0) {
+ if (chmod(path, (mode_t)xattr) < 0) {
+ err = ZIP_ENOPERM;
+ goto out;
+ }
+ }
+#endif
+ }
+
+ if (on_extract) {
+ if (on_extract(path, arg) < 0) {
+ goto out;
+ }
+ }
+ }
+
+out:
+ // Close the archive, freeing any resources it was using
+ if (!mz_zip_reader_end(zip_archive)) {
+ // Cannot end zip reader
+ err = ZIP_ECLSZIP;
+ }
+ return err;
+}
+
+static inline void zip_archive_finalize(mz_zip_archive *pzip) {
+ mz_zip_writer_finalize_archive(pzip);
+ zip_archive_truncate(pzip);
+}
+
+static int zip_entry_mark(struct zip_t *zip,
+ struct zip_entry_mark_t *entry_mark, int n,
+ char *const entries[], const size_t len) {
+ int err = 0;
+ if (!zip || !entry_mark || !entries) {
+ return ZIP_ENOINIT;
+ }
+
+ mz_zip_archive_file_stat file_stat;
+ mz_uint64 d_pos = ~0;
+ for (int i = 0; i < n; ++i) {
+
+ if ((err = zip_entry_openbyindex(zip, i))) {
+ return err;
+ }
+
+ mz_bool name_matches = MZ_FALSE;
+ for (int j = 0; j < (const int)len; ++j) {
+ if (zip_name_match(zip->entry.name, entries[j])) {
+ name_matches = MZ_TRUE;
+ break;
+ }
+ }
+ if (name_matches) {
+ entry_mark[i].type = MZ_DELETE;
+ } else {
+ entry_mark[i].type = MZ_KEEP;
+ }
+
+ if (!mz_zip_reader_file_stat(&zip->archive, i, &file_stat)) {
+ return ZIP_ENOENT;
+ }
+
+ zip_entry_close(zip);
+
+ entry_mark[i].m_local_header_ofs = file_stat.m_local_header_ofs;
+ entry_mark[i].file_index = -1;
+ entry_mark[i].lf_length = 0;
+ if ((entry_mark[i].type) == MZ_DELETE &&
+ (d_pos > entry_mark[i].m_local_header_ofs)) {
+ d_pos = entry_mark[i].m_local_header_ofs;
+ }
+ }
+ for (int i = 0; i < n; ++i) {
+ if ((entry_mark[i].m_local_header_ofs > d_pos) &&
+ (entry_mark[i].type != MZ_DELETE)) {
+ entry_mark[i].type = MZ_MOVE;
+ }
+ }
+ return err;
+}
+
+static int zip_index_next(mz_uint64 *local_header_ofs_array, int cur_index) {
+ int new_index = 0;
+ for (int i = cur_index - 1; i >= 0; --i) {
+ if (local_header_ofs_array[cur_index] > local_header_ofs_array[i]) {
+ new_index = i + 1;
+ return new_index;
+ }
+ }
+ return new_index;
+}
+
+static int zip_sort(mz_uint64 *local_header_ofs_array, int cur_index) {
+ int nxt_index = zip_index_next(local_header_ofs_array, cur_index);
+
+ if (nxt_index != cur_index) {
+ mz_uint64 temp = local_header_ofs_array[cur_index];
+ for (int i = cur_index; i > nxt_index; i--) {
+ local_header_ofs_array[i] = local_header_ofs_array[i - 1];
+ }
+ local_header_ofs_array[nxt_index] = temp;
+ }
+ return nxt_index;
+}
+
+static int zip_index_update(struct zip_entry_mark_t *entry_mark, int last_index,
+ int nxt_index) {
+ for (int j = 0; j < last_index; j++) {
+ if (entry_mark[j].file_index >= nxt_index) {
+ entry_mark[j].file_index += 1;
+ }
+ }
+ entry_mark[nxt_index].file_index = last_index;
+ return 0;
+}
+
+static int zip_entry_finalize(struct zip_t *zip,
+ struct zip_entry_mark_t *entry_mark,
+ const int n) {
+
+ mz_uint64 *local_header_ofs_array = (mz_uint64 *)calloc(n, sizeof(mz_uint64));
+ if (!local_header_ofs_array) {
+ return ZIP_EOOMEM;
+ }
+
+ for (int i = 0; i < n; ++i) {
+ local_header_ofs_array[i] = entry_mark[i].m_local_header_ofs;
+ int index = zip_sort(local_header_ofs_array, i);
+
+ if (index != i) {
+ zip_index_update(entry_mark, i, index);
+ }
+ entry_mark[i].file_index = index;
+ }
+
+ mz_uint64 *length = (mz_uint64 *)calloc(n, sizeof(mz_uint64));
+ if (!length) {
+ CLEANUP(local_header_ofs_array);
+ return ZIP_EOOMEM;
+ }
+ for (int i = 0; i < n - 1; i++) {
+ length[i] = local_header_ofs_array[i + 1] - local_header_ofs_array[i];
+ }
+ length[n - 1] = zip->archive.m_archive_size - local_header_ofs_array[n - 1];
+
+ for (int i = 0; i < n; i++) {
+ entry_mark[i].lf_length = length[entry_mark[i].file_index];
+ }
+
+ CLEANUP(length);
+ CLEANUP(local_header_ofs_array);
+ return 0;
+}
+
+static int zip_entry_set(struct zip_t *zip, struct zip_entry_mark_t *entry_mark,
+ int n, char *const entries[], const size_t len) {
+ int err = 0;
+
+ if ((err = zip_entry_mark(zip, entry_mark, n, entries, len)) < 0) {
+ return err;
+ }
+ if ((err = zip_entry_finalize(zip, entry_mark, n)) < 0) {
+ return err;
+ }
+ return 0;
+}
+
+static mz_int64 zip_file_move(MZ_FILE *m_pFile, const mz_uint64 to,
+ const mz_uint64 from, const mz_uint64 length,
+ mz_uint8 *move_buf,
+ const mz_int64 capacity_size) {
+ if ((mz_int64)length > capacity_size) {
+ return ZIP_ECAPSIZE;
+ }
+ if (MZ_FSEEK64(m_pFile, from, SEEK_SET)) {
+ MZ_FCLOSE(m_pFile);
+ return ZIP_EFSEEK;
+ }
+
+ if (fread(move_buf, 1, length, m_pFile) != length) {
+ MZ_FCLOSE(m_pFile);
+ return ZIP_EFREAD;
+ }
+ if (MZ_FSEEK64(m_pFile, to, SEEK_SET)) {
+ MZ_FCLOSE(m_pFile);
+ return ZIP_EFSEEK;
+ }
+ if (fwrite(move_buf, 1, length, m_pFile) != length) {
+ MZ_FCLOSE(m_pFile);
+ return ZIP_EFWRITE;
+ }
+ return (mz_int64)length;
+}
+
+static mz_int64 zip_files_move(MZ_FILE *m_pFile, mz_uint64 writen_num,
+ mz_uint64 read_num, mz_uint64 length) {
+ int n = 0;
+ const mz_int64 page_size = 1 << 12; // 4K
+ mz_uint8 *move_buf = (mz_uint8 *)calloc(1, page_size);
+ if (move_buf == NULL) {
+ return ZIP_EOOMEM;
+ }
+
+ mz_int64 moved_length = 0;
+ mz_int64 move_count = 0;
+ while ((mz_int64)length > 0) {
+ move_count = ((mz_int64)length >= page_size) ? page_size : (mz_int64)length;
+ n = zip_file_move(m_pFile, writen_num, read_num, move_count, move_buf,
+ page_size);
+ if (n < 0) {
+ moved_length = n;
+ goto cleanup;
+ }
+
+ if (n != move_count) {
+ goto cleanup;
+ }
+
+ writen_num += move_count;
+ read_num += move_count;
+ length -= move_count;
+ moved_length += move_count;
+ }
+
+cleanup:
+ CLEANUP(move_buf);
+ return moved_length;
+}
+
+static int zip_central_dir_move(mz_zip_internal_state *pState, int begin,
+ int end, int entry_num) {
+ if (begin == entry_num) {
+ return 0;
+ }
+
+ mz_uint64 l_size = 0;
+ mz_uint64 r_size = 0;
+ mz_uint64 d_size = 0;
+ mz_uint8 *next = NULL;
+ mz_uint8 *deleted = &MZ_ZIP_ARRAY_ELEMENT(
+ &pState->m_central_dir, mz_uint8,
+ MZ_ZIP_ARRAY_ELEMENT(&pState->m_central_dir_offsets, mz_uint32, begin));
+ l_size = (mz_uint32)(deleted - (mz_uint8 *)(pState->m_central_dir.m_p));
+ if (end == entry_num) {
+ r_size = 0;
+ } else {
+ next = &MZ_ZIP_ARRAY_ELEMENT(
+ &pState->m_central_dir, mz_uint8,
+ MZ_ZIP_ARRAY_ELEMENT(&pState->m_central_dir_offsets, mz_uint32, end));
+ r_size = pState->m_central_dir.m_size -
+ (mz_uint32)(next - (mz_uint8 *)(pState->m_central_dir.m_p));
+ d_size = next - deleted;
+ }
+
+ if (l_size == 0) {
+ memmove(pState->m_central_dir.m_p, next, r_size);
+ pState->m_central_dir.m_p = MZ_REALLOC(pState->m_central_dir.m_p, r_size);
+ for (int i = end; i < entry_num; i++) {
+ MZ_ZIP_ARRAY_ELEMENT(&pState->m_central_dir_offsets, mz_uint32, i) -=
+ d_size;
+ }
+ }
+
+ if (l_size * r_size != 0) {
+ memmove(deleted, next, r_size);
+ for (int i = end; i < entry_num; i++) {
+ MZ_ZIP_ARRAY_ELEMENT(&pState->m_central_dir_offsets, mz_uint32, i) -=
+ d_size;
+ }
+ }
+
+ pState->m_central_dir.m_size = l_size + r_size;
+ return 0;
+}
+
+static int zip_central_dir_delete(mz_zip_internal_state *pState,
+ int *deleted_entry_index_array,
+ int entry_num) {
+ int i = 0;
+ int begin = 0;
+ int end = 0;
+ int d_num = 0;
+ while (i < entry_num) {
+ while ((!deleted_entry_index_array[i]) && (i < entry_num)) {
+ i++;
+ }
+ begin = i;
+
+ while ((deleted_entry_index_array[i]) && (i < entry_num)) {
+ i++;
+ }
+ end = i;
+ zip_central_dir_move(pState, begin, end, entry_num);
+ }
+
+ i = 0;
+ while (i < entry_num) {
+ while ((!deleted_entry_index_array[i]) && (i < entry_num)) {
+ i++;
+ }
+ begin = i;
+ if (begin == entry_num) {
+ break;
+ }
+ while ((deleted_entry_index_array[i]) && (i < entry_num)) {
+ i++;
+ }
+ end = i;
+ int k = 0;
+ for (int j = end; j < entry_num; j++) {
+ MZ_ZIP_ARRAY_ELEMENT(&pState->m_central_dir_offsets, mz_uint32,
+ begin + k) =
+ (mz_uint32)MZ_ZIP_ARRAY_ELEMENT(&pState->m_central_dir_offsets,
+ mz_uint32, j);
+ k++;
+ }
+ d_num += end - begin;
+ }
+
+ pState->m_central_dir_offsets.m_size =
+ sizeof(mz_uint32) * (entry_num - d_num);
+ return 0;
+}
+
+static int zip_entries_delete_mark(struct zip_t *zip,
+ struct zip_entry_mark_t *entry_mark,
+ int entry_num) {
+ mz_uint64 writen_num = 0;
+ mz_uint64 read_num = 0;
+ mz_uint64 deleted_length = 0;
+ mz_uint64 move_length = 0;
+ int i = 0;
+ int deleted_entry_num = 0;
+ int n = 0;
+
+ mz_bool *deleted_entry_flag_array =
+ (mz_bool *)calloc(entry_num, sizeof(mz_bool));
+ if (deleted_entry_flag_array == NULL) {
+ return ZIP_EOOMEM;
+ }
+
+ mz_zip_internal_state *pState = zip->archive.m_pState;
+ zip->archive.m_zip_mode = MZ_ZIP_MODE_WRITING;
+
+ if (MZ_FSEEK64(pState->m_pFile, 0, SEEK_SET)) {
+ CLEANUP(deleted_entry_flag_array);
+ return ZIP_ENOENT;
+ }
+
+ while (i < entry_num) {
+ while ((entry_mark[i].type == MZ_KEEP) && (i < entry_num)) {
+ writen_num += entry_mark[i].lf_length;
+ read_num = writen_num;
+ i++;
+ }
+
+ while ((entry_mark[i].type == MZ_DELETE) && (i < entry_num)) {
+ deleted_entry_flag_array[i] = MZ_TRUE;
+ read_num += entry_mark[i].lf_length;
+ deleted_length += entry_mark[i].lf_length;
+ i++;
+ deleted_entry_num++;
+ }
+
+ while ((entry_mark[i].type == MZ_MOVE) && (i < entry_num)) {
+ move_length += entry_mark[i].lf_length;
+ mz_uint8 *p = &MZ_ZIP_ARRAY_ELEMENT(
+ &pState->m_central_dir, mz_uint8,
+ MZ_ZIP_ARRAY_ELEMENT(&pState->m_central_dir_offsets, mz_uint32, i));
+ if (!p) {
+ CLEANUP(deleted_entry_flag_array);
+ return ZIP_ENOENT;
+ }
+ mz_uint32 offset = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
+ offset -= (mz_uint32)deleted_length;
+ MZ_WRITE_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS, offset);
+ i++;
+ }
+
+ n = zip_files_move(pState->m_pFile, writen_num, read_num, move_length);
+ if (n != (mz_int64)move_length) {
+ CLEANUP(deleted_entry_flag_array);
+ return n;
+ }
+ writen_num += move_length;
+ read_num += move_length;
+ }
+
+ zip->archive.m_archive_size -= deleted_length;
+ zip->archive.m_total_files = entry_num - deleted_entry_num;
+
+ zip_central_dir_delete(pState, deleted_entry_flag_array, entry_num);
+ CLEANUP(deleted_entry_flag_array);
+
+ return deleted_entry_num;
+}
+
+struct zip_t *zip_open(const char *zipname, int level, char mode) {
+ struct zip_t *zip = NULL;
+
+ if (!zipname || strlen(zipname) < 1) {
+ // zip_t archive name is empty or NULL
+ goto cleanup;
+ }
+
+ if (level < 0)
+ level = MZ_DEFAULT_LEVEL;
+ if ((level & 0xF) > MZ_UBER_COMPRESSION) {
+ // Wrong compression level
+ goto cleanup;
+ }
+
+ zip = (struct zip_t *)calloc((size_t)1, sizeof(struct zip_t));
+ if (!zip)
+ goto cleanup;
+
+ zip->level = (mz_uint)level;
+ switch (mode) {
+ case 'w':
+ // Create a new archive.
+ if (!mz_zip_writer_init_file(&(zip->archive), zipname, 0)) {
+ // Cannot initialize zip_archive writer
+ goto cleanup;
+ }
+ break;
+
+ case 'r':
+ case 'a':
+ case 'd':
+ if (!mz_zip_reader_init_file(
+ &(zip->archive), zipname,
+ zip->level | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) {
+ // An archive file does not exist or cannot initialize
+ // zip_archive reader
+ goto cleanup;
+ }
+ if ((mode == 'a' || mode == 'd') &&
+ !mz_zip_writer_init_from_reader(&(zip->archive), zipname)) {
+ mz_zip_reader_end(&(zip->archive));
+ goto cleanup;
+ }
+ break;
+
+ default:
+ goto cleanup;
+ }
+
+ return zip;
+
+cleanup:
+ CLEANUP(zip);
+ return NULL;
+}
+
+void zip_close(struct zip_t *zip) {
+ if (zip) {
+ // Always finalize, even if adding failed for some reason, so we have a
+ // valid central directory.
+ mz_zip_writer_finalize_archive(&(zip->archive));
+ zip_archive_truncate(&(zip->archive));
+ mz_zip_writer_end(&(zip->archive));
+ mz_zip_reader_end(&(zip->archive));
+
+ CLEANUP(zip);
+ }
+}
+
+int zip_is64(struct zip_t *zip) {
+ if (!zip || !zip->archive.m_pState) {
+ // zip_t handler or zip state is not initialized
+ return ZIP_ENOINIT;
+ }
+
+ return (int)zip->archive.m_pState->m_zip64;
+}
+
+int zip_entry_open(struct zip_t *zip, const char *entryname) {
+ size_t entrylen = 0;
+ mz_zip_archive *pzip = NULL;
+ mz_uint num_alignment_padding_bytes, level;
+ mz_zip_archive_file_stat stats;
+ int err = 0;
+
+ if (!zip) {
+ return ZIP_ENOINIT;
+ }
+
+ if (!entryname) {
+ return ZIP_EINVENTNAME;
+ }
+
+ entrylen = strlen(entryname);
+ if (entrylen == 0) {
+ return ZIP_EINVENTNAME;
+ }
+
+ /*
+ .ZIP File Format Specification Version: 6.3.3
+
+ 4.4.17.1 The name of the file, with optional relative path.
+ The path stored MUST not contain a drive or
+ device letter, or a leading slash. All slashes
+ MUST be forward slashes '/' as opposed to
+ backwards slashes '\' for compatibility with Amiga
+ and UNIX file systems etc. If input came from standard
+ input, there is no file name field.
+ */
+ if (zip->entry.name) {
+ CLEANUP(zip->entry.name);
+ }
+ zip->entry.name = zip_strrpl(entryname, entrylen, '\\', '/');
+ if (!zip->entry.name) {
+ // Cannot parse zip entry name
+ return ZIP_EINVENTNAME;
+ }
+
+ pzip = &(zip->archive);
+ if (pzip->m_zip_mode == MZ_ZIP_MODE_READING) {
+ zip->entry.index =
+ mz_zip_reader_locate_file(pzip, zip->entry.name, NULL, 0);
+ if (zip->entry.index < 0) {
+ err = ZIP_ENOENT;
+ goto cleanup;
+ }
+
+ if (!mz_zip_reader_file_stat(pzip, (mz_uint)zip->entry.index, &stats)) {
+ err = ZIP_ENOENT;
+ goto cleanup;
+ }
+
+ zip->entry.comp_size = stats.m_comp_size;
+ zip->entry.uncomp_size = stats.m_uncomp_size;
+ zip->entry.uncomp_crc32 = stats.m_crc32;
+ zip->entry.offset = stats.m_central_dir_ofs;
+ zip->entry.header_offset = stats.m_local_header_ofs;
+ zip->entry.method = stats.m_method;
+ zip->entry.external_attr = stats.m_external_attr;
+#ifndef MINIZ_NO_TIME
+ zip->entry.m_time = stats.m_time;
+#endif
+
+ return 0;
+ }
+
+ zip->entry.index = (int)zip->archive.m_total_files;
+ zip->entry.comp_size = 0;
+ zip->entry.uncomp_size = 0;
+ zip->entry.uncomp_crc32 = MZ_CRC32_INIT;
+ zip->entry.offset = zip->archive.m_archive_size;
+ zip->entry.header_offset = zip->archive.m_archive_size;
+ memset(zip->entry.header, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE * sizeof(mz_uint8));
+ zip->entry.method = 0;
+
+ // UNIX or APPLE
+#if MZ_PLATFORM == 3 || MZ_PLATFORM == 19
+ // regular file with rw-r--r-- persmissions
+ zip->entry.external_attr = (mz_uint32)(0100644) << 16;
+#else
+ zip->entry.external_attr = 0;
+#endif
+
+ num_alignment_padding_bytes =
+ mz_zip_writer_compute_padding_needed_for_file_alignment(pzip);
+
+ if (!pzip->m_pState || (pzip->m_zip_mode != MZ_ZIP_MODE_WRITING)) {
+ // Invalid zip mode
+ err = ZIP_EINVMODE;
+ goto cleanup;
+ }
+ if (zip->level & MZ_ZIP_FLAG_COMPRESSED_DATA) {
+ // Invalid zip compression level
+ err = ZIP_EINVLVL;
+ goto cleanup;
+ }
+ // no zip64 support yet
+ if ((pzip->m_total_files == 0xFFFF) ||
+ ((pzip->m_archive_size + num_alignment_padding_bytes +
+ MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
+ entrylen) > 0xFFFFFFFF)) {
+ // No zip64 support yet
+ err = ZIP_ENOSUP64;
+ goto cleanup;
+ }
+ if (!mz_zip_writer_write_zeros(pzip, zip->entry.offset,
+ num_alignment_padding_bytes +
+ sizeof(zip->entry.header))) {
+ // Cannot memset zip entry header
+ err = ZIP_EMEMSET;
+ goto cleanup;
+ }
+
+ zip->entry.header_offset += num_alignment_padding_bytes;
+ if (pzip->m_file_offset_alignment) {
+ MZ_ASSERT(
+ (zip->entry.header_offset & (pzip->m_file_offset_alignment - 1)) == 0);
+ }
+ zip->entry.offset += num_alignment_padding_bytes + sizeof(zip->entry.header);
+
+ if (pzip->m_pWrite(pzip->m_pIO_opaque, zip->entry.offset, zip->entry.name,
+ entrylen) != entrylen) {
+ // Cannot write data to zip entry
+ err = ZIP_EWRTENT;
+ goto cleanup;
+ }
+
+ zip->entry.offset += entrylen;
+ level = zip->level & 0xF;
+ if (level) {
+ zip->entry.state.m_pZip = pzip;
+ zip->entry.state.m_cur_archive_file_ofs = zip->entry.offset;
+ zip->entry.state.m_comp_size = 0;
+
+ if (tdefl_init(&(zip->entry.comp), mz_zip_writer_add_put_buf_callback,
+ &(zip->entry.state),
+ (int)tdefl_create_comp_flags_from_zip_params(
+ (int)level, -15, MZ_DEFAULT_STRATEGY)) !=
+ TDEFL_STATUS_OKAY) {
+ // Cannot initialize the zip compressor
+ err = ZIP_ETDEFLINIT;
+ goto cleanup;
+ }
+ }
+
+ zip->entry.m_time = time(NULL);
+
+ return 0;
+
+cleanup:
+ CLEANUP(zip->entry.name);
+ return err;
+}
+
+int zip_entry_openbyindex(struct zip_t *zip, int index) {
+ mz_zip_archive *pZip = NULL;
+ mz_zip_archive_file_stat stats;
+ mz_uint namelen;
+ const mz_uint8 *pHeader;
+ const char *pFilename;
+
+ if (!zip) {
+ // zip_t handler is not initialized
+ return ZIP_ENOINIT;
+ }
+
+ pZip = &(zip->archive);
+ if (pZip->m_zip_mode != MZ_ZIP_MODE_READING) {
+ // open by index requires readonly mode
+ return ZIP_EINVMODE;
+ }
+
+ if (index < 0 || (mz_uint)index >= pZip->m_total_files) {
+ // index out of range
+ return ZIP_EINVIDX;
+ }
+
+ if (!(pHeader = &MZ_ZIP_ARRAY_ELEMENT(
+ &pZip->m_pState->m_central_dir, mz_uint8,
+ MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets,
+ mz_uint32, index)))) {
+ // cannot find header in central directory
+ return ZIP_ENOHDR;
+ }
+
+ namelen = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
+ pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
+
+ /*
+ .ZIP File Format Specification Version: 6.3.3
+
+ 4.4.17.1 The name of the file, with optional relative path.
+ The path stored MUST not contain a drive or
+ device letter, or a leading slash. All slashes
+ MUST be forward slashes '/' as opposed to
+ backwards slashes '\' for compatibility with Amiga
+ and UNIX file systems etc. If input came from standard
+ input, there is no file name field.
+ */
+ if (zip->entry.name) {
+ CLEANUP(zip->entry.name);
+ }
+ zip->entry.name = zip_strrpl(pFilename, namelen, '\\', '/');
+ if (!zip->entry.name) {
+ // local entry name is NULL
+ return ZIP_EINVENTNAME;
+ }
+
+ if (!mz_zip_reader_file_stat(pZip, (mz_uint)index, &stats)) {
+ return ZIP_ENOENT;
+ }
+
+ zip->entry.index = index;
+ zip->entry.comp_size = stats.m_comp_size;
+ zip->entry.uncomp_size = stats.m_uncomp_size;
+ zip->entry.uncomp_crc32 = stats.m_crc32;
+ zip->entry.offset = stats.m_central_dir_ofs;
+ zip->entry.header_offset = stats.m_local_header_ofs;
+ zip->entry.method = stats.m_method;
+ zip->entry.external_attr = stats.m_external_attr;
+#ifndef MINIZ_NO_TIME
+ zip->entry.m_time = stats.m_time;
+#endif
+
+ return 0;
+}
+
+int zip_entry_close(struct zip_t *zip) {
+ mz_zip_archive *pzip = NULL;
+ mz_uint level;
+ tdefl_status done;
+ mz_uint16 entrylen;
+ mz_uint16 dos_time = 0, dos_date = 0;
+ int err = 0;
+
+ if (!zip) {
+ // zip_t handler is not initialized
+ err = ZIP_ENOINIT;
+ goto cleanup;
+ }
+
+ pzip = &(zip->archive);
+ if (pzip->m_zip_mode == MZ_ZIP_MODE_READING) {
+ goto cleanup;
+ }
+
+ level = zip->level & 0xF;
+ if (level) {
+ done = tdefl_compress_buffer(&(zip->entry.comp), "", 0, TDEFL_FINISH);
+ if (done != TDEFL_STATUS_DONE && done != TDEFL_STATUS_OKAY) {
+ // Cannot flush compressed buffer
+ err = ZIP_ETDEFLBUF;
+ goto cleanup;
+ }
+ zip->entry.comp_size = zip->entry.state.m_comp_size;
+ zip->entry.offset = zip->entry.state.m_cur_archive_file_ofs;
+ zip->entry.method = MZ_DEFLATED;
+ }
+
+ entrylen = (mz_uint16)strlen(zip->entry.name);
+ if ((zip->entry.comp_size > 0xFFFFFFFF) || (zip->entry.offset > 0xFFFFFFFF)) {
+ // No zip64 support, yet
+ err = ZIP_ENOSUP64;
+ goto cleanup;
+ }
+
+#ifndef MINIZ_NO_TIME
+ mz_zip_time_t_to_dos_time(zip->entry.m_time, &dos_time, &dos_date);
+#endif
+
+ if (!mz_zip_writer_create_local_dir_header(
+ pzip, zip->entry.header, entrylen, 0, zip->entry.uncomp_size,
+ zip->entry.comp_size, zip->entry.uncomp_crc32, zip->entry.method, 0,
+ dos_time, dos_date)) {
+ // Cannot create zip entry header
+ err = ZIP_ECRTHDR;
+ goto cleanup;
+ }
+
+ if (pzip->m_pWrite(pzip->m_pIO_opaque, zip->entry.header_offset,
+ zip->entry.header,
+ sizeof(zip->entry.header)) != sizeof(zip->entry.header)) {
+ // Cannot write zip entry header
+ err = ZIP_EWRTHDR;
+ goto cleanup;
+ }
+
+ if (!mz_zip_writer_add_to_central_dir(
+ pzip, zip->entry.name, entrylen, NULL, 0, "", 0,
+ zip->entry.uncomp_size, zip->entry.comp_size, zip->entry.uncomp_crc32,
+ zip->entry.method, 0, dos_time, dos_date, zip->entry.header_offset,
+ zip->entry.external_attr)) {
+ // Cannot write to zip central dir
+ err = ZIP_EWRTDIR;
+ goto cleanup;
+ }
+
+ pzip->m_total_files++;
+ pzip->m_archive_size = zip->entry.offset;
+
+cleanup:
+ if (zip) {
+ zip->entry.m_time = 0;
+ CLEANUP(zip->entry.name);
+ }
+ return err;
+}
+
+const char *zip_entry_name(struct zip_t *zip) {
+ if (!zip) {
+ // zip_t handler is not initialized
+ return NULL;
+ }
+
+ return zip->entry.name;
+}
+
+int zip_entry_index(struct zip_t *zip) {
+ if (!zip) {
+ // zip_t handler is not initialized
+ return ZIP_ENOINIT;
+ }
+
+ return zip->entry.index;
+}
+
+int zip_entry_isdir(struct zip_t *zip) {
+ if (!zip) {
+ // zip_t handler is not initialized
+ return ZIP_ENOINIT;
+ }
+
+ if (zip->entry.index < 0) {
+ // zip entry is not opened
+ return ZIP_EINVIDX;
+ }
+
+ return (int)mz_zip_reader_is_file_a_directory(&zip->archive,
+ (mz_uint)zip->entry.index);
+}
+
+unsigned long long zip_entry_size(struct zip_t *zip) {
+ return zip ? zip->entry.uncomp_size : 0;
+}
+
+unsigned int zip_entry_crc32(struct zip_t *zip) {
+ return zip ? zip->entry.uncomp_crc32 : 0;
+}
+
+int zip_entry_write(struct zip_t *zip, const void *buf, size_t bufsize) {
+ mz_uint level;
+ mz_zip_archive *pzip = NULL;
+ tdefl_status status;
+
+ if (!zip) {
+ // zip_t handler is not initialized
+ return ZIP_ENOINIT;
+ }
+
+ pzip = &(zip->archive);
+ if (buf && bufsize > 0) {
+ zip->entry.uncomp_size += bufsize;
+ zip->entry.uncomp_crc32 = (mz_uint32)mz_crc32(
+ zip->entry.uncomp_crc32, (const mz_uint8 *)buf, bufsize);
+
+ level = zip->level & 0xF;
+ if (!level) {
+ if ((pzip->m_pWrite(pzip->m_pIO_opaque, zip->entry.offset, buf,
+ bufsize) != bufsize)) {
+ // Cannot write buffer
+ return ZIP_EWRTENT;
+ }
+ zip->entry.offset += bufsize;
+ zip->entry.comp_size += bufsize;
+ } else {
+ status = tdefl_compress_buffer(&(zip->entry.comp), buf, bufsize,
+ TDEFL_NO_FLUSH);
+ if (status != TDEFL_STATUS_DONE && status != TDEFL_STATUS_OKAY) {
+ // Cannot compress buffer
+ return ZIP_ETDEFLBUF;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int zip_entry_fwrite(struct zip_t *zip, const char *filename) {
+ int err = 0;
+ size_t n = 0;
+ FILE *stream = NULL;
+ mz_uint8 buf[MZ_ZIP_MAX_IO_BUF_SIZE];
+ struct MZ_FILE_STAT_STRUCT file_stat;
+
+ if (!zip) {
+ // zip_t handler is not initialized
+ return ZIP_ENOINIT;
+ }
+
+ memset(buf, 0, MZ_ZIP_MAX_IO_BUF_SIZE);
+ memset((void *)&file_stat, 0, sizeof(struct MZ_FILE_STAT_STRUCT));
+ if (MZ_FILE_STAT(filename, &file_stat) != 0) {
+ // problem getting information - check errno
+ return ZIP_ENOENT;
+ }
+
+ if ((file_stat.st_mode & 0200) == 0) {
+ // MS-DOS read-only attribute
+ zip->entry.external_attr |= 0x01;
+ }
+ zip->entry.external_attr |= (mz_uint32)((file_stat.st_mode & 0xFFFF) << 16);
+ zip->entry.m_time = file_stat.st_mtime;
+
+#if defined(_MSC_VER)
+ if (fopen_s(&stream, filename, "rb"))
+#else
+ if (!(stream = fopen(filename, "rb")))
+#endif
+ {
+ // Cannot open filename
+ return ZIP_EOPNFILE;
+ }
+
+ while ((n = fread(buf, sizeof(mz_uint8), MZ_ZIP_MAX_IO_BUF_SIZE, stream)) >
+ 0) {
+ if (zip_entry_write(zip, buf, n) < 0) {
+ err = ZIP_EWRTENT;
+ break;
+ }
+ }
+ fclose(stream);
+
+ return err;
+}
+
+ssize_t zip_entry_read(struct zip_t *zip, void **buf, size_t *bufsize) {
+ mz_zip_archive *pzip = NULL;
+ mz_uint idx;
+ size_t size = 0;
+
+ if (!zip) {
+ // zip_t handler is not initialized
+ return (ssize_t)ZIP_ENOINIT;
+ }
+
+ pzip = &(zip->archive);
+ if (pzip->m_zip_mode != MZ_ZIP_MODE_READING || zip->entry.index < 0) {
+ // the entry is not found or we do not have read access
+ return (ssize_t)ZIP_ENOENT;
+ }
+
+ idx = (mz_uint)zip->entry.index;
+ if (mz_zip_reader_is_file_a_directory(pzip, idx)) {
+ // the entry is a directory
+ return (ssize_t)ZIP_EINVENTTYPE;
+ }
+
+ *buf = mz_zip_reader_extract_to_heap(pzip, idx, &size, 0);
+ if (*buf && bufsize) {
+ *bufsize = size;
+ }
+ return (ssize_t)size;
+}
+
+ssize_t zip_entry_noallocread(struct zip_t *zip, void *buf, size_t bufsize) {
+ mz_zip_archive *pzip = NULL;
+
+ if (!zip) {
+ // zip_t handler is not initialized
+ return (ssize_t)ZIP_ENOINIT;
+ }
+
+ pzip = &(zip->archive);
+ if (pzip->m_zip_mode != MZ_ZIP_MODE_READING || zip->entry.index < 0) {
+ // the entry is not found or we do not have read access
+ return (ssize_t)ZIP_ENOENT;
+ }
+
+ if (!mz_zip_reader_extract_to_mem_no_alloc(pzip, (mz_uint)zip->entry.index,
+ buf, bufsize, 0, NULL, 0)) {
+ return (ssize_t)ZIP_EMEMNOALLOC;
+ }
+
+ return (ssize_t)zip->entry.uncomp_size;
+}
+
+int zip_entry_fread(struct zip_t *zip, const char *filename) {
+ mz_zip_archive *pzip = NULL;
+ mz_uint idx;
+ mz_uint32 xattr = 0;
+ mz_zip_archive_file_stat info;
+
+ if (!zip) {
+ // zip_t handler is not initialized
+ return ZIP_ENOINIT;
+ }
+
+ memset((void *)&info, 0, sizeof(mz_zip_archive_file_stat));
+ pzip = &(zip->archive);
+ if (pzip->m_zip_mode != MZ_ZIP_MODE_READING || zip->entry.index < 0) {
+ // the entry is not found or we do not have read access
+ return ZIP_ENOENT;
+ }
+
+ idx = (mz_uint)zip->entry.index;
+ if (mz_zip_reader_is_file_a_directory(pzip, idx)) {
+ // the entry is a directory
+ return ZIP_EINVENTTYPE;
+ }
+
+ if (!mz_zip_reader_extract_to_file(pzip, idx, filename, 0)) {
+ return ZIP_ENOFILE;
+ }
+
+#if defined(_MSC_VER)
+ (void)xattr; // unused
+#else
+ if (!mz_zip_reader_file_stat(pzip, idx, &info)) {
+ // Cannot get information about zip archive;
+ return ZIP_ENOFILE;
+ }
+
+ xattr = (info.m_external_attr >> 16) & 0xFFFF;
+ if (xattr > 0) {
+ if (chmod(filename, (mode_t)xattr) < 0) {
+ return ZIP_ENOPERM;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+int zip_entry_extract(struct zip_t *zip,
+ size_t (*on_extract)(void *arg, unsigned long long offset,
+ const void *buf, size_t bufsize),
+ void *arg) {
+ mz_zip_archive *pzip = NULL;
+ mz_uint idx;
+
+ if (!zip) {
+ // zip_t handler is not initialized
+ return ZIP_ENOINIT;
+ }
+
+ pzip = &(zip->archive);
+ if (pzip->m_zip_mode != MZ_ZIP_MODE_READING || zip->entry.index < 0) {
+ // the entry is not found or we do not have read access
+ return ZIP_ENOENT;
+ }
+
+ idx = (mz_uint)zip->entry.index;
+ return (mz_zip_reader_extract_to_callback(pzip, idx, on_extract, arg, 0))
+ ? 0
+ : ZIP_EINVIDX;
+}
+
+int zip_entries_total(struct zip_t *zip) {
+ if (!zip) {
+ // zip_t handler is not initialized
+ return ZIP_ENOINIT;
+ }
+
+ return (int)zip->archive.m_total_files;
+}
+
+int zip_entries_delete(struct zip_t *zip, char *const entries[],
+ const size_t len) {
+ int n = 0;
+ int err = 0;
+ struct zip_entry_mark_t *entry_mark = NULL;
+
+ if (zip == NULL || (entries == NULL && len != 0)) {
+ return ZIP_ENOINIT;
+ }
+
+ if (entries == NULL && len == 0) {
+ return 0;
+ }
+
+ n = zip_entries_total(zip);
+
+ entry_mark =
+ (struct zip_entry_mark_t *)calloc(n, sizeof(struct zip_entry_mark_t));
+ if (!entry_mark) {
+ return ZIP_EOOMEM;
+ }
+
+ zip->archive.m_zip_mode = MZ_ZIP_MODE_READING;
+
+ err = zip_entry_set(zip, entry_mark, n, entries, len);
+ if (err < 0) {
+ CLEANUP(entry_mark);
+ return err;
+ }
+
+ err = zip_entries_delete_mark(zip, entry_mark, n);
+ CLEANUP(entry_mark);
+ return err;
+}
+
+int zip_stream_extract(const char *stream, size_t size, const char *dir,
+ int (*on_extract)(const char *filename, void *arg),
+ void *arg) {
+ mz_zip_archive zip_archive;
+ if (!stream || !dir) {
+ // Cannot parse zip archive stream
+ return ZIP_ENOINIT;
+ }
+ if (!memset(&zip_archive, 0, sizeof(mz_zip_archive))) {
+ // Cannot memset zip archive
+ return ZIP_EMEMSET;
+ }
+ if (!mz_zip_reader_init_mem(&zip_archive, stream, size, 0)) {
+ // Cannot initialize zip_archive reader
+ return ZIP_ENOINIT;
+ }
+
+ return zip_archive_extract(&zip_archive, dir, on_extract, arg);
+}
+
+struct zip_t *zip_stream_open(const char *stream, size_t size, int level,
+ char mode) {
+ struct zip_t *zip = (struct zip_t *)calloc((size_t)1, sizeof(struct zip_t));
+ if (!zip) {
+ return NULL;
+ }
+
+ if (level < 0) {
+ level = MZ_DEFAULT_LEVEL;
+ }
+ if ((level & 0xF) > MZ_UBER_COMPRESSION) {
+ // Wrong compression level
+ goto cleanup;
+ }
+ zip->level = (mz_uint)level;
+
+ if ((stream != NULL) && (size > 0) && (mode == 'r')) {
+ if (!mz_zip_reader_init_mem(&(zip->archive), stream, size, 0)) {
+ goto cleanup;
+ }
+ } else if ((stream == NULL) && (size == 0) && (mode == 'w')) {
+ // Create a new archive.
+ if (!mz_zip_writer_init_heap(&(zip->archive), 0, 1024)) {
+ // Cannot initialize zip_archive writer
+ goto cleanup;
+ }
+ } else {
+ goto cleanup;
+ }
+ return zip;
+
+cleanup:
+ CLEANUP(zip);
+ return NULL;
+}
+
+ssize_t zip_stream_copy(struct zip_t *zip, void **buf, size_t *bufsize) {
+ if (!zip) {
+ return (ssize_t)ZIP_ENOINIT;
+ }
+
+ zip_archive_finalize(&(zip->archive));
+
+ if (bufsize != NULL) {
+ *bufsize = (size_t)zip->archive.m_archive_size;
+ }
+ *buf = calloc(sizeof(unsigned char), zip->archive.m_archive_size);
+ memcpy(*buf, zip->archive.m_pState->m_pMem, zip->archive.m_archive_size);
+
+ return (ssize_t)zip->archive.m_archive_size;
+}
+
+void zip_stream_close(struct zip_t *zip) {
+ if (zip) {
+ mz_zip_writer_end(&(zip->archive));
+ mz_zip_reader_end(&(zip->archive));
+ CLEANUP(zip);
+ }
+}
+
+int zip_create(const char *zipname, const char *filenames[], size_t len) {
+ int err = 0;
+ size_t i;
+ mz_zip_archive zip_archive;
+ struct MZ_FILE_STAT_STRUCT file_stat;
+ mz_uint32 ext_attributes = 0;
+
+ if (!zipname || strlen(zipname) < 1) {
+ // zip_t archive name is empty or NULL
+ return ZIP_EINVZIPNAME;
+ }
+
+ // Create a new archive.
+ if (!memset(&(zip_archive), 0, sizeof(zip_archive))) {
+ // Cannot memset zip archive
+ return ZIP_EMEMSET;
+ }
+
+ if (!mz_zip_writer_init_file(&zip_archive, zipname, 0)) {
+ // Cannot initialize zip_archive writer
+ return ZIP_ENOINIT;
+ }
+
+ if (!memset((void *)&file_stat, 0, sizeof(struct MZ_FILE_STAT_STRUCT))) {
+ return ZIP_EMEMSET;
+ }
+
+ for (i = 0; i < len; ++i) {
+ const char *name = filenames[i];
+ if (!name) {
+ err = ZIP_EINVENTNAME;
+ break;
+ }
+
+ if (MZ_FILE_STAT(name, &file_stat) != 0) {
+ // problem getting information - check errno
+ err = ZIP_ENOFILE;
+ break;
+ }
+
+ if ((file_stat.st_mode & 0200) == 0) {
+ // MS-DOS read-only attribute
+ ext_attributes |= 0x01;
+ }
+ ext_attributes |= (mz_uint32)((file_stat.st_mode & 0xFFFF) << 16);
+
+ if (!mz_zip_writer_add_file(&zip_archive, zip_basename(name), name, "", 0,
+ ZIP_DEFAULT_COMPRESSION_LEVEL,
+ ext_attributes)) {
+ // Cannot add file to zip_archive
+ err = ZIP_ENOFILE;
+ break;
+ }
+ }
+
+ mz_zip_writer_finalize_archive(&zip_archive);
+ mz_zip_writer_end(&zip_archive);
+ return err;
+}
+
+int zip_extract(const char *zipname, const char *dir,
+ int (*on_extract)(const char *filename, void *arg), void *arg) {
+ mz_zip_archive zip_archive;
+
+ if (!zipname || !dir) {
+ // Cannot parse zip archive name
+ return ZIP_EINVZIPNAME;
+ }
+
+ if (!memset(&zip_archive, 0, sizeof(mz_zip_archive))) {
+ // Cannot memset zip archive
+ return ZIP_EMEMSET;
+ }
+
+ // Now try to open the archive.
+ if (!mz_zip_reader_init_file(&zip_archive, zipname, 0)) {
+ // Cannot initialize zip_archive reader
+ return ZIP_ENOINIT;
+ }
+
+ return zip_archive_extract(&zip_archive, dir, on_extract, arg);
+}
diff --git a/test/standalone/issue_9812/vendor/kuba-zip/zip.h b/test/standalone/issue_9812/vendor/kuba-zip/zip.h
new file mode 100644
index 0000000000..54b412d8bd
--- /dev/null
+++ b/test/standalone/issue_9812/vendor/kuba-zip/zip.h
@@ -0,0 +1,433 @@
+/*
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#pragma once
+#ifndef ZIP_H
+#define ZIP_H
+
+#include
+#include
+
+#ifndef ZIP_SHARED
+# define ZIP_EXPORT
+#else
+# ifdef _WIN32
+# ifdef ZIP_BUILD_SHARED
+# define ZIP_EXPORT __declspec(dllexport)
+# else
+# define ZIP_EXPORT __declspec(dllimport)
+# endif
+# else
+# define ZIP_EXPORT __attribute__ ((visibility ("default")))
+# endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if !defined(_POSIX_C_SOURCE) && defined(_MSC_VER)
+// 64-bit Windows is the only mainstream platform
+// where sizeof(long) != sizeof(void*)
+#ifdef _WIN64
+typedef long long ssize_t; /* byte count or error */
+#else
+typedef long ssize_t; /* byte count or error */
+#endif
+#endif
+
+#ifndef MAX_PATH
+#define MAX_PATH 32767 /* # chars in a path name including NULL */
+#endif
+
+/**
+ * @mainpage
+ *
+ * Documenation for @ref zip.
+ */
+
+/**
+ * @addtogroup zip
+ * @{
+ */
+
+/**
+ * Default zip compression level.
+ */
+#define ZIP_DEFAULT_COMPRESSION_LEVEL 6
+
+/**
+ * Error codes
+ */
+#define ZIP_ENOINIT -1 // not initialized
+#define ZIP_EINVENTNAME -2 // invalid entry name
+#define ZIP_ENOENT -3 // entry not found
+#define ZIP_EINVMODE -4 // invalid zip mode
+#define ZIP_EINVLVL -5 // invalid compression level
+#define ZIP_ENOSUP64 -6 // no zip 64 support
+#define ZIP_EMEMSET -7 // memset error
+#define ZIP_EWRTENT -8 // cannot write data to entry
+#define ZIP_ETDEFLINIT -9 // cannot initialize tdefl compressor
+#define ZIP_EINVIDX -10 // invalid index
+#define ZIP_ENOHDR -11 // header not found
+#define ZIP_ETDEFLBUF -12 // cannot flush tdefl buffer
+#define ZIP_ECRTHDR -13 // cannot create entry header
+#define ZIP_EWRTHDR -14 // cannot write entry header
+#define ZIP_EWRTDIR -15 // cannot write to central dir
+#define ZIP_EOPNFILE -16 // cannot open file
+#define ZIP_EINVENTTYPE -17 // invalid entry type
+#define ZIP_EMEMNOALLOC -18 // extracting data using no memory allocation
+#define ZIP_ENOFILE -19 // file not found
+#define ZIP_ENOPERM -20 // no permission
+#define ZIP_EOOMEM -21 // out of memory
+#define ZIP_EINVZIPNAME -22 // invalid zip archive name
+#define ZIP_EMKDIR -23 // make dir error
+#define ZIP_ESYMLINK -24 // symlink error
+#define ZIP_ECLSZIP -25 // close archive error
+#define ZIP_ECAPSIZE -26 // capacity size too small
+#define ZIP_EFSEEK -27 // fseek error
+#define ZIP_EFREAD -28 // fread error
+#define ZIP_EFWRITE -29 // fwrite error
+
+/**
+ * Looks up the error message string coresponding to an error number.
+ * @param errnum error number
+ * @return error message string coresponding to errnum or NULL if error is not
+ * found.
+ */
+extern ZIP_EXPORT const char *zip_strerror(int errnum);
+
+/**
+ * @struct zip_t
+ *
+ * This data structure is used throughout the library to represent zip archive -
+ * forward declaration.
+ */
+struct zip_t;
+
+/**
+ * Opens zip archive with compression level using the given mode.
+ *
+ * @param zipname zip archive file name.
+ * @param level compression level (0-9 are the standard zlib-style levels).
+ * @param mode file access mode.
+ * - 'r': opens a file for reading/extracting (the file must exists).
+ * - 'w': creates an empty file for writing.
+ * - 'a': appends to an existing archive.
+ *
+ * @return the zip archive handler or NULL on error
+ */
+extern ZIP_EXPORT struct zip_t *zip_open(const char *zipname, int level,
+ char mode);
+
+/**
+ * Closes the zip archive, releases resources - always finalize.
+ *
+ * @param zip zip archive handler.
+ */
+extern ZIP_EXPORT void zip_close(struct zip_t *zip);
+
+/**
+ * Determines if the archive has a zip64 end of central directory headers.
+ *
+ * @param zip zip archive handler.
+ *
+ * @return the return code - 1 (true), 0 (false), negative number (< 0) on
+ * error.
+ */
+extern ZIP_EXPORT int zip_is64(struct zip_t *zip);
+
+/**
+ * Opens an entry by name in the zip archive.
+ *
+ * For zip archive opened in 'w' or 'a' mode the function will append
+ * a new entry. In readonly mode the function tries to locate the entry
+ * in global dictionary.
+ *
+ * @param zip zip archive handler.
+ * @param entryname an entry name in local dictionary.
+ *
+ * @return the return code - 0 on success, negative number (< 0) on error.
+ */
+extern ZIP_EXPORT int zip_entry_open(struct zip_t *zip, const char *entryname);
+
+/**
+ * Opens a new entry by index in the zip archive.
+ *
+ * This function is only valid if zip archive was opened in 'r' (readonly) mode.
+ *
+ * @param zip zip archive handler.
+ * @param index index in local dictionary.
+ *
+ * @return the return code - 0 on success, negative number (< 0) on error.
+ */
+extern ZIP_EXPORT int zip_entry_openbyindex(struct zip_t *zip, int index);
+
+/**
+ * Closes a zip entry, flushes buffer and releases resources.
+ *
+ * @param zip zip archive handler.
+ *
+ * @return the return code - 0 on success, negative number (< 0) on error.
+ */
+extern ZIP_EXPORT int zip_entry_close(struct zip_t *zip);
+
+/**
+ * Returns a local name of the current zip entry.
+ *
+ * The main difference between user's entry name and local entry name
+ * is optional relative path.
+ * Following .ZIP File Format Specification - the path stored MUST not contain
+ * a drive or device letter, or a leading slash.
+ * All slashes MUST be forward slashes '/' as opposed to backwards slashes '\'
+ * for compatibility with Amiga and UNIX file systems etc.
+ *
+ * @param zip: zip archive handler.
+ *
+ * @return the pointer to the current zip entry name, or NULL on error.
+ */
+extern ZIP_EXPORT const char *zip_entry_name(struct zip_t *zip);
+
+/**
+ * Returns an index of the current zip entry.
+ *
+ * @param zip zip archive handler.
+ *
+ * @return the index on success, negative number (< 0) on error.
+ */
+extern ZIP_EXPORT int zip_entry_index(struct zip_t *zip);
+
+/**
+ * Determines if the current zip entry is a directory entry.
+ *
+ * @param zip zip archive handler.
+ *
+ * @return the return code - 1 (true), 0 (false), negative number (< 0) on
+ * error.
+ */
+extern ZIP_EXPORT int zip_entry_isdir(struct zip_t *zip);
+
+/**
+ * Returns an uncompressed size of the current zip entry.
+ *
+ * @param zip zip archive handler.
+ *
+ * @return the uncompressed size in bytes.
+ */
+extern ZIP_EXPORT unsigned long long zip_entry_size(struct zip_t *zip);
+
+/**
+ * Returns CRC-32 checksum of the current zip entry.
+ *
+ * @param zip zip archive handler.
+ *
+ * @return the CRC-32 checksum.
+ */
+extern ZIP_EXPORT unsigned int zip_entry_crc32(struct zip_t *zip);
+
+/**
+ * Compresses an input buffer for the current zip entry.
+ *
+ * @param zip zip archive handler.
+ * @param buf input buffer.
+ * @param bufsize input buffer size (in bytes).
+ *
+ * @return the return code - 0 on success, negative number (< 0) on error.
+ */
+extern ZIP_EXPORT int zip_entry_write(struct zip_t *zip, const void *buf,
+ size_t bufsize);
+
+/**
+ * Compresses a file for the current zip entry.
+ *
+ * @param zip zip archive handler.
+ * @param filename input file.
+ *
+ * @return the return code - 0 on success, negative number (< 0) on error.
+ */
+extern ZIP_EXPORT int zip_entry_fwrite(struct zip_t *zip, const char *filename);
+
+/**
+ * Extracts the current zip entry into output buffer.
+ *
+ * The function allocates sufficient memory for a output buffer.
+ *
+ * @param zip zip archive handler.
+ * @param buf output buffer.
+ * @param bufsize output buffer size (in bytes).
+ *
+ * @note remember to release memory allocated for a output buffer.
+ * for large entries, please take a look at zip_entry_extract function.
+ *
+ * @return the return code - the number of bytes actually read on success.
+ * Otherwise a negative number (< 0) on error.
+ */
+extern ZIP_EXPORT ssize_t zip_entry_read(struct zip_t *zip, void **buf,
+ size_t *bufsize);
+
+/**
+ * Extracts the current zip entry into a memory buffer using no memory
+ * allocation.
+ *
+ * @param zip zip archive handler.
+ * @param buf preallocated output buffer.
+ * @param bufsize output buffer size (in bytes).
+ *
+ * @note ensure supplied output buffer is large enough.
+ * zip_entry_size function (returns uncompressed size for the current
+ * entry) can be handy to estimate how big buffer is needed.
+ * For large entries, please take a look at zip_entry_extract function.
+ *
+ * @return the return code - the number of bytes actually read on success.
+ * Otherwise a negative number (< 0) on error (e.g. bufsize is not large enough).
+ */
+extern ZIP_EXPORT ssize_t zip_entry_noallocread(struct zip_t *zip, void *buf,
+ size_t bufsize);
+
+/**
+ * Extracts the current zip entry into output file.
+ *
+ * @param zip zip archive handler.
+ * @param filename output file.
+ *
+ * @return the return code - 0 on success, negative number (< 0) on error.
+ */
+extern ZIP_EXPORT int zip_entry_fread(struct zip_t *zip, const char *filename);
+
+/**
+ * Extracts the current zip entry using a callback function (on_extract).
+ *
+ * @param zip zip archive handler.
+ * @param on_extract callback function.
+ * @param arg opaque pointer (optional argument, which you can pass to the
+ * on_extract callback)
+ *
+ * @return the return code - 0 on success, negative number (< 0) on error.
+ */
+extern ZIP_EXPORT int
+zip_entry_extract(struct zip_t *zip,
+ size_t (*on_extract)(void *arg, unsigned long long offset,
+ const void *data, size_t size),
+ void *arg);
+
+/**
+ * Returns the number of all entries (files and directories) in the zip archive.
+ *
+ * @param zip zip archive handler.
+ *
+ * @return the return code - the number of entries on success, negative number
+ * (< 0) on error.
+ */
+extern ZIP_EXPORT int zip_entries_total(struct zip_t *zip);
+
+/**
+ * Deletes zip archive entries.
+ *
+ * @param zip zip archive handler.
+ * @param entries array of zip archive entries to be deleted.
+ * @param len the number of entries to be deleted.
+ * @return the number of deleted entries, or negative number (< 0) on error.
+ */
+extern ZIP_EXPORT int zip_entries_delete(struct zip_t *zip,
+ char *const entries[], size_t len);
+
+/**
+ * Extracts a zip archive stream into directory.
+ *
+ * If on_extract is not NULL, the callback will be called after
+ * successfully extracted each zip entry.
+ * Returning a negative value from the callback will cause abort and return an
+ * error. The last argument (void *arg) is optional, which you can use to pass
+ * data to the on_extract callback.
+ *
+ * @param stream zip archive stream.
+ * @param size stream size.
+ * @param dir output directory.
+ * @param on_extract on extract callback.
+ * @param arg opaque pointer.
+ *
+ * @return the return code - 0 on success, negative number (< 0) on error.
+ */
+extern ZIP_EXPORT int
+zip_stream_extract(const char *stream, size_t size, const char *dir,
+ int (*on_extract)(const char *filename, void *arg),
+ void *arg);
+
+/**
+ * Opens zip archive stream into memory.
+ *
+ * @param stream zip archive stream.
+ * @param size stream size.
+ *
+ * @return the zip archive handler or NULL on error
+ */
+extern ZIP_EXPORT struct zip_t *zip_stream_open(const char *stream, size_t size,
+ int level, char mode);
+
+/**
+ * Copy zip archive stream output buffer.
+ *
+ * @param zip zip archive handler.
+ * @param buf output buffer. User should free buf.
+ * @param bufsize output buffer size (in bytes).
+ *
+ * @return copy size
+ */
+extern ZIP_EXPORT ssize_t zip_stream_copy(struct zip_t *zip, void **buf,
+ size_t *bufsize);
+
+/**
+ * Close zip archive releases resources.
+ *
+ * @param zip zip archive handler.
+ *
+ * @return
+ */
+extern ZIP_EXPORT void zip_stream_close(struct zip_t *zip);
+
+/**
+ * Creates a new archive and puts files into a single zip archive.
+ *
+ * @param zipname zip archive file.
+ * @param filenames input files.
+ * @param len: number of input files.
+ *
+ * @return the return code - 0 on success, negative number (< 0) on error.
+ */
+extern ZIP_EXPORT int zip_create(const char *zipname, const char *filenames[],
+ size_t len);
+
+/**
+ * Extracts a zip archive file into directory.
+ *
+ * If on_extract_entry is not NULL, the callback will be called after
+ * successfully extracted each zip entry.
+ * Returning a negative value from the callback will cause abort and return an
+ * error. The last argument (void *arg) is optional, which you can use to pass
+ * data to the on_extract_entry callback.
+ *
+ * @param zipname zip archive file.
+ * @param dir output directory.
+ * @param on_extract_entry on extract callback.
+ * @param arg opaque pointer.
+ *
+ * @return the return code - 0 on success, negative number (< 0) on error.
+ */
+extern ZIP_EXPORT int zip_extract(const char *zipname, const char *dir,
+ int (*on_extract_entry)(const char *filename,
+ void *arg),
+ void *arg);
+
+/** @} */
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/test/tests.zig b/test/tests.zig
index a90531c600..0577092845 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -455,7 +455,7 @@ pub fn addTranslateCTests(b: *build.Builder, test_filter: ?[]const u8) *build.St
const cases = b.allocator.create(TranslateCContext) catch unreachable;
cases.* = TranslateCContext{
.b = b,
- .step = b.step("test-translate-c", "Run the C transation tests"),
+ .step = b.step("test-translate-c", "Run the C translation tests"),
.test_index = 0,
.test_filter = test_filter,
};
diff --git a/test/translate_c.zig b/test/translate_c.zig
index 5ff6c22ead..db665124ba 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -3188,7 +3188,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\}
});
- cases.add("macro comparisions",
+ cases.add("macro comparisons",
\\#define MIN(a, b) ((b) < (a) ? (b) : (a))
\\#define MAX(a, b) ((b) > (a) ? (b) : (a))
, &[_][]const u8{
@@ -3443,7 +3443,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
});
}
- cases.add("unnamed fields have predictabile names",
+ cases.add("unnamed fields have predictable names",
\\struct a {
\\ struct {};
\\};
diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig
index 407e813ef3..2a0da66578 100644
--- a/tools/update_clang_options.zig
+++ b/tools/update_clang_options.zig
@@ -376,6 +376,10 @@ const known_options = [_]KnownOpt{
.name = "mexec-model",
.ident = "exec_model",
},
+ .{
+ .name = "emit-llvm",
+ .ident = "emit_llvm",
+ },
};
const blacklisted_options = [_][]const u8{};