diff --git a/.gitattributes b/.gitattributes
index f86311554f..e4a9c93776 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1 +1,2 @@
*.zig text eol=lf
+langref.html.in text eol=lf
diff --git a/.gitignore b/.gitignore
index 20b208975a..4b7bff11a6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,15 @@
+# This file is for zig-specific build artifacts.
+# If you have OS-specific or editor-specific files to ignore,
+# such as *.swp or .DS_Store, put those in your global
+# ~/.gitignore and put this in your ~/.gitconfig:
+#
+# [core]
+# excludesfile = ~/.gitignore
+#
+# Cheers!
+# -andrewrk
+
zig-cache/
build/
build-*/
+docgen_tmp/
diff --git a/.travis.yml b/.travis.yml
index c5299e914e..731202f5f7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,18 +1,22 @@
sudo: required
services:
- - docker
+- docker
os:
- - linux
- - osx
+- linux
+- osx
dist: trusty
osx_image: xcode8.3
language: cpp
before_install:
- - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then ci/travis_linux_before_install; fi
- - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then ci/travis_osx_before_install; fi
+- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then ci/travis_linux_before_install; fi
+- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then ci/travis_osx_before_install; fi
install:
- - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then ci/travis_linux_install; fi
- - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then ci/travis_osx_install; fi
+- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then ci/travis_linux_install; fi
+- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then ci/travis_osx_install; fi
script:
- - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then ci/travis_linux_script; fi
- - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then ci/travis_osx_script; fi
+- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then ci/travis_linux_script; fi
+- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then ci/travis_osx_script; fi
+env:
+ global:
+ - secure: QmJ+eLOxj3Irl5SHxt6lQvrj7++1AIz8bYri6RScAQGHQPIztkmbpBjAkpFgYaWPkZ04ROtamFXdS7oHtJHSECesgPoqM/CHIychQkgpDq30+TsFyYbBpDGHY+N6r2WnQTvg+9EuAp6P365us6qFS0D5zQ3P40c56uMbazFu3J4W1HZP+pLWlLjEXaN88ePhHWqNZyvwGMkLpYl3ghcrE9H4vGZQ7jenRW4UmskLEkuhUPJbQiow3Td8arJiRmLVISzWqneqNraLUpGyUVr4F3Rbjzacfoo3r9ZZynhY0mFsEye82x6TMGgH2xsNGkd91zpQuckWUT+pQv/G6FXpnEnjIJSO2Z5WAxXrx6xB1k2HZ17/4NWLF3fJVhdQJm3mS6odeGzUjgGrl1A42evxU+7VbcofEJq1aMiLgU1jUT2pt+pefCwmKJYLpEsSzuyrVxgvskQz0QpC053TAYSNf2Jj6Qhg9YDWyOeemYmDgffTqErF7AYhc6NKH0s0XKkIiNFSxorkEsfG/Ck1o+15slHNmWZXlmXToxDqFkLDoPvfGKg7koU5YTGvci/F9ZKb1juhGLxZbwap/18zN40BqA+Ip2yDBJAKxsIiwSjSIguy6g/Z1I50s0xNGOr36urfRRQX5H+rqr/xCZ63B6WSe6qBcZboWAQMDn8HLS9Xiwc=
+ - secure: dnb7r5guUeMOX9e7XlPUSZzmga8VW3G9Q1aa7LxEKiTjSnWhu5KpPDe8o1X3Rj6nc5iXDqmBH/C/7eNXPDyXJJWPvpE2YRpGymyUkRaakul0QBKJEaMvwy2SuAfS69CWC+TSzfGRvtSYkdpBhhLvs0h5S819S5jYbCNSCmOKfFucaP5NsHNIZ/I19oIeTPTa0/UnVm7DLFZXZjvbS+czkdyH1DhbT85sLj+XqNTzLePImE68efrjaHnlSy/CzBVJzj55UgD5i9fxNCQWzGWim/SD5xZ0zKtLycSOf6wQN2lCo0lkjw9rDlYz69mM5L9ikfYL9oHDPZnh84oXKglQ5miOHCgqs/qs4439I05lIu8i/EfbFA55YG4NyO3rL9YVOOt5gwiwvJYhDcnkVVzSl0o5bsoZgQfYvPWaIQKNkl3C53zfDQjgqS54CeDzlZpFrQTDQ1RrH8oeVC1gfYAeMabMDadox5rfZmLIN5JTf/F8iD/QdxGcoUvkEENcQgfP9PnubExtexgHGsEmqbm6ORSZ1MkEh2m3fo0f8KE6TbN1UigmcQ8nTkWBHsSmfHnB8HwJQp8mwQmDamXA+Hl3e3w4LOdYkJVlNW1/TTyJJOOvjMQCjF8SJmPHuh+QpqKbSaT9XM/vBhxbIZEufH8kawJKCBBcCNspGMNjhXfNjM0=
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 021fd43cf0..867f2684db 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -22,7 +22,7 @@ set(ZIG_VERSION "${ZIG_VERSION_MAJOR}.${ZIG_VERSION_MINOR}.${ZIG_VERSION_PATCH}"
find_program(GIT_EXE NAMES git)
if(GIT_EXE)
execute_process(
- COMMAND ${GIT_EXE} name-rev HEAD --tags --name-only --no-undefined --always
+ COMMAND ${GIT_EXE} -C ${CMAKE_SOURCE_DIR} name-rev HEAD --tags --name-only --no-undefined --always
OUTPUT_VARIABLE ZIG_GIT_REV
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(ZIG_GIT_REV MATCHES "\\^0$")
@@ -30,7 +30,7 @@ if(GIT_EXE)
message("WARNING: Tag does not match configured Zig version")
endif()
else()
- set(ZIG_VERSION "${ZIG_VERSION_MAJOR}.${ZIG_VERSION_MINOR}.${ZIG_VERSION_PATCH}.${ZIG_GIT_REV}")
+ set(ZIG_VERSION "${ZIG_VERSION}+${ZIG_GIT_REV}")
endif()
endif()
message("Configuring zig version ${ZIG_VERSION}")
@@ -196,7 +196,7 @@ else()
if(MSVC)
set(ZIG_LLD_COMPILE_FLAGS "-std=c++11 -D_CRT_SECURE_NO_WARNINGS /w")
else()
- set(ZIG_LLD_COMPILE_FLAGS "-std=c++11 -fno-exceptions -fno-rtti -Wno-comment")
+ set(ZIG_LLD_COMPILE_FLAGS "-std=c++11 -fno-exceptions -fno-rtti -Wno-comment -Wno-class-memaccess -Wno-unknown-warning-option")
endif()
set_target_properties(embedded_lld_lib PROPERTIES
COMPILE_FLAGS ${ZIG_LLD_COMPILE_FLAGS}
@@ -261,12 +261,15 @@ endif()
set(EMBEDDED_SOFTFLOAT_SOURCES
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/f128M_isSignalingNaN.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF128M.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF16UI.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF32UI.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF64UI.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f128MToCommonNaN.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f16UIToCommonNaN.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f32UIToCommonNaN.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f64UIToCommonNaN.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_propagateNaNF128M.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_propagateNaNF16UI.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/softfloat_raiseFlags.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_add.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_div.c"
@@ -293,8 +296,20 @@ set(EMBEDDED_SOFTFLOAT_SOURCES
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui32_r_minMag.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui64.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui64_r_minMag.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_add.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_div.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_eq.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_lt.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_mul.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_rem.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_roundToInt.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_sqrt.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_sub.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_to_f128M.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_to_f64.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f32_to_f128M.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f64_to_f128M.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f64_to_f16.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_add256M.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addCarryM.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addComplCarryM.c"
@@ -411,10 +426,15 @@ set(ZIG_SOURCES
)
set(ZIG_CPP_SOURCES
"${CMAKE_SOURCE_DIR}/src/zig_llvm.cpp"
+ "${CMAKE_SOURCE_DIR}/src/windows_sdk.cpp"
)
set(ZIG_STD_FILES
"array_list.zig"
+ "atomic/index.zig"
+ "atomic/int.zig"
+ "atomic/queue.zig"
+ "atomic/stack.zig"
"base64.zig"
"buf_map.zig"
"buf_set.zig"
@@ -424,33 +444,43 @@ set(ZIG_STD_FILES
"c/index.zig"
"c/linux.zig"
"c/windows.zig"
+ "crypto/blake2.zig"
+ "crypto/hmac.zig"
"crypto/index.zig"
"crypto/md5.zig"
"crypto/sha1.zig"
"crypto/sha2.zig"
"crypto/sha3.zig"
- "crypto/blake2.zig"
- "crypto/hmac.zig"
"cstr.zig"
"debug/failing_allocator.zig"
"debug/index.zig"
"dwarf.zig"
+ "dynamic_library.zig"
"elf.zig"
"empty.zig"
"event.zig"
+ "event/channel.zig"
+ "event/future.zig"
+ "event/group.zig"
+ "event/lock.zig"
+ "event/locked.zig"
+ "event/loop.zig"
+ "event/tcp.zig"
"fmt/errol/enum3.zig"
"fmt/errol/index.zig"
"fmt/errol/lookup.zig"
"fmt/index.zig"
- "hash_map.zig"
- "hash/index.zig"
"hash/adler.zig"
"hash/crc.zig"
"hash/fnv.zig"
+ "hash/index.zig"
"hash/siphash.zig"
+ "hash_map.zig"
"heap.zig"
"index.zig"
"io.zig"
+ "json.zig"
+ "lazy_init.zig"
"linked_list.zig"
"macho.zig"
"math/acos.zig"
@@ -460,8 +490,32 @@ set(ZIG_STD_FILES
"math/atan.zig"
"math/atan2.zig"
"math/atanh.zig"
+ "math/big/index.zig"
+ "math/big/int.zig"
"math/cbrt.zig"
"math/ceil.zig"
+ "math/complex/abs.zig"
+ "math/complex/acos.zig"
+ "math/complex/acosh.zig"
+ "math/complex/arg.zig"
+ "math/complex/asin.zig"
+ "math/complex/asinh.zig"
+ "math/complex/atan.zig"
+ "math/complex/atanh.zig"
+ "math/complex/conj.zig"
+ "math/complex/cos.zig"
+ "math/complex/cosh.zig"
+ "math/complex/exp.zig"
+ "math/complex/index.zig"
+ "math/complex/ldexp.zig"
+ "math/complex/log.zig"
+ "math/complex/pow.zig"
+ "math/complex/proj.zig"
+ "math/complex/sin.zig"
+ "math/complex/sinh.zig"
+ "math/complex/sqrt.zig"
+ "math/complex/tan.zig"
+ "math/complex/tanh.zig"
"math/copysign.zig"
"math/cos.zig"
"math/cosh.zig"
@@ -498,24 +552,35 @@ set(ZIG_STD_FILES
"math/tan.zig"
"math/tanh.zig"
"math/trunc.zig"
- "math/x86_64/sqrt.zig"
"mem.zig"
"net.zig"
"os/child_process.zig"
"os/darwin.zig"
- "os/darwin_errno.zig"
+ "os/darwin/errno.zig"
+ "os/epoch.zig"
"os/file.zig"
+ "os/get_app_data_dir.zig"
"os/get_user_id.zig"
"os/index.zig"
"os/linux/errno.zig"
"os/linux/index.zig"
+ "os/linux/vdso.zig"
"os/linux/x86_64.zig"
"os/path.zig"
+ "os/time.zig"
+ "os/windows/advapi32.zig"
"os/windows/error.zig"
"os/windows/index.zig"
+ "os/windows/kernel32.zig"
+ "os/windows/ole32.zig"
+ "os/windows/shell32.zig"
+ "os/windows/shlwapi.zig"
+ "os/windows/user32.zig"
"os/windows/util.zig"
"os/zen.zig"
"rand/index.zig"
+ "rand/ziggurat.zig"
+ "segmented_list.zig"
"sort.zig"
"special/bootstrap.zig"
"special/bootstrap_lib.zig"
@@ -525,6 +590,8 @@ set(ZIG_STD_FILES
"special/compiler_rt/aulldiv.zig"
"special/compiler_rt/aullrem.zig"
"special/compiler_rt/comparetf2.zig"
+ "special/compiler_rt/divti3.zig"
+ "special/compiler_rt/extendXfYf2.zig"
"special/compiler_rt/fixuint.zig"
"special/compiler_rt/fixunsdfdi.zig"
"special/compiler_rt/fixunsdfsi.zig"
@@ -535,7 +602,17 @@ set(ZIG_STD_FILES
"special/compiler_rt/fixunstfdi.zig"
"special/compiler_rt/fixunstfsi.zig"
"special/compiler_rt/fixunstfti.zig"
+ "special/compiler_rt/floatunditf.zig"
+ "special/compiler_rt/floatunsitf.zig"
+ "special/compiler_rt/floatuntidf.zig"
+ "special/compiler_rt/floatuntisf.zig"
+ "special/compiler_rt/floatuntitf.zig"
+ "special/compiler_rt/floattidf.zig"
+ "special/compiler_rt/floattisf.zig"
+ "special/compiler_rt/floattitf.zig"
+ "special/compiler_rt/muloti4.zig"
"special/compiler_rt/index.zig"
+ "special/compiler_rt/truncXfYf2.zig"
"special/compiler_rt/udivmod.zig"
"special/compiler_rt/udivmoddi4.zig"
"special/compiler_rt/udivmodti4.zig"
@@ -546,7 +623,9 @@ set(ZIG_STD_FILES
"unicode.zig"
"zig/ast.zig"
"zig/index.zig"
- "zig/parser.zig"
+ "zig/parse.zig"
+ "zig/parse_string_literal.zig"
+ "zig/render.zig"
"zig/tokenizer.zig"
)
diff --git a/README.md b/README.md
index 1f23e133f8..2ee3f178ce 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,9 @@
-
+
A programming language designed for robustness, optimality, and
clarity.
-[ziglang.org](http://ziglang.org)
+[ziglang.org](https://ziglang.org)
## Feature Highlights
@@ -21,19 +21,19 @@ clarity.
* Compatible with C libraries with no wrapper necessary. Directly include
C .h files and get access to the functions and symbols therein.
* Provides standard library which competes with the C standard library and is
- always compiled against statically in source form. Compile units do not
+ always compiled against statically in source form. Zig binaries do not
depend on libc unless explicitly linked.
- * Nullable type instead of null pointers.
+ * Optional type instead of null pointers.
* Safe unions, tagged unions, and C ABI compatible unions.
* Generics so that one can write efficient data structures that work for any
data type.
* No header files required. Top level declarations are entirely
order-independent.
* Compile-time code execution. Compile-time reflection.
- * Partial compile-time function evaluation with eliminates the need for
+ * Partial compile-time function evaluation which eliminates the need for
a preprocessor or macros.
* The binaries produced by Zig have complete debugging information so you can,
- for example, use GDB or MSVC to debug your software.
+ for example, use GDB, MSVC, or LLDB to debug your software.
* Built-in unit tests with `zig test`.
* Friendly toward package maintainers. Reproducible build, bootstrapping
process carefully documented. Issues filed by package maintainers are
@@ -55,66 +55,28 @@ that counts as "freestanding" for the purposes of this table.
|i386 | OK | planned | OK | planned | planned |
|x86_64 | OK | OK | OK | OK | planned |
|arm | OK | planned | planned | N/A | planned |
-|aarch64 | OK | planned | planned | planned | planned |
-|bpf | OK | planned | planned | N/A | planned |
-|hexagon | OK | planned | planned | N/A | planned |
-|mips | OK | planned | planned | N/A | planned |
-|powerpc | OK | planned | planned | N/A | planned |
-|r600 | OK | planned | planned | N/A | planned |
-|amdgcn | OK | planned | planned | N/A | planned |
-|sparc | OK | planned | planned | N/A | planned |
-|s390x | OK | planned | planned | N/A | planned |
-|thumb | OK | planned | planned | N/A | planned |
-|spir | OK | planned | planned | N/A | planned |
-|lanai | OK | planned | planned | N/A | planned |
+|aarch64 | OK | planned | N/A | planned | planned |
+|bpf | OK | planned | N/A | N/A | planned |
+|hexagon | OK | planned | N/A | N/A | planned |
+|mips | OK | planned | N/A | N/A | planned |
+|powerpc | OK | planned | N/A | N/A | planned |
+|r600 | OK | planned | N/A | N/A | planned |
+|amdgcn | OK | planned | N/A | N/A | planned |
+|sparc | OK | planned | N/A | N/A | planned |
+|s390x | OK | planned | N/A | N/A | planned |
+|thumb | OK | planned | N/A | N/A | planned |
+|spir | OK | planned | N/A | N/A | planned |
+|lanai | OK | planned | N/A | N/A | planned |
## Community
- * IRC: `#zig` on Freenode.
+ * IRC: `#zig` on Freenode ([Channel Logs](https://irclog.whitequark.org/zig/)).
* Reddit: [/r/zig](https://www.reddit.com/r/zig)
* Email list: [ziglang@googlegroups.com](https://groups.google.com/forum/#!forum/ziglang)
-### Wanted: Windows Developers
-
-Flesh out the standard library for Windows, streamline Zig installation and
-distribution for Windows. Work with LLVM and LLD teams to improve
-PDB/CodeView/MSVC debugging. Implement stack traces for Windows in the MinGW
-environment and the MSVC environment.
-
-### Wanted: MacOS and iOS Developers
-
-Flesh out the standard library for MacOS. Improve the MACH-O linker. Implement
-stack traces for MacOS. Streamline the process of using Zig to build for
-iOS.
-
-### Wanted: Android Developers
-
-Flesh out the standard library for Android. Streamline the process of using
-Zig to build for Android and for depending on Zig code on Android.
-
-### Wanted: Web Developers
-
-Figure out what are the use cases for compiling Zig to WebAssembly. Create demo
-projects with it and streamline experience for users trying to output
-WebAssembly. Work on the documentation generator outputting useful searchable html
-documentation. Create Zig modules for common web tasks such as WebSockets and gzip.
-
-### Wanted: Embedded Developers
-
-Flesh out the standard library for uncommon CPU architectures and OS targets.
-Drive issue discussion for cross compiling and using Zig in constrained
-or unusual environments.
-
-### Wanted: Game Developers
-
-Create cross platform Zig modules to compete with SDL and GLFW. Create an
-OpenGL library that does not depend on libc. Drive the usability of Zig
-for video games. Create a general purpose allocator that does not depend on
-libc. Create demo games using Zig.
-
## Building
-[](https://travis-ci.org/zig-lang/zig)
+[](https://travis-ci.org/ziglang/zig)
[](https://ci.appveyor.com/project/andrewrk/zig-d3l86/branch/master)
### Stage 1: Build Zig from C++ Source Code
@@ -161,7 +123,7 @@ bin/zig build --build-file ../build.zig test
##### Windows
-See https://github.com/zig-lang/zig/wiki/Building-Zig-on-Windows
+See https://github.com/ziglang/zig/wiki/Building-Zig-on-Windows
### Stage 2: Build Self-Hosted Zig from Zig Source Code
@@ -182,6 +144,9 @@ binary.
This is the actual compiler binary that we will install to the system.
+*Note: Stage 2 compiler is not yet able to build Stage 3. Building Stage 3 is
+not yet supported.*
+
#### Debug / Development Build
```
diff --git a/build.zig b/build.zig
index b72641a2ef..dd939365a2 100644
--- a/build.zig
+++ b/build.zig
@@ -10,13 +10,13 @@ const ArrayList = std.ArrayList;
const Buffer = std.Buffer;
const io = std.io;
-pub fn build(b: &Builder) !void {
+pub fn build(b: *Builder) !void {
const mode = b.standardReleaseOptions();
var docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
const rel_zig_exe = try os.path.relative(b.allocator, b.build_root, b.zig_exe);
- var docgen_cmd = b.addCommand(null, b.env_map, [][]const u8 {
+ var docgen_cmd = b.addCommand(null, b.env_map, [][]const u8{
docgen_exe.getOutputPath(),
rel_zig_exe,
"doc/langref.html.in",
@@ -30,109 +30,79 @@ pub fn build(b: &Builder) !void {
const test_step = b.step("test", "Run all the tests");
// find the stage0 build artifacts because we're going to re-use config.h and zig_cpp library
- const build_info = try b.exec([][]const u8{b.zig_exe, "BUILD_INFO"});
+ const build_info = try b.exec([][]const u8{
+ b.zig_exe,
+ "BUILD_INFO",
+ });
var index: usize = 0;
- const cmake_binary_dir = nextValue(&index, build_info);
- const cxx_compiler = nextValue(&index, build_info);
- const llvm_config_exe = nextValue(&index, build_info);
- const lld_include_dir = nextValue(&index, build_info);
- const lld_libraries = nextValue(&index, build_info);
- const std_files = nextValue(&index, build_info);
- const c_header_files = nextValue(&index, build_info);
- const dia_guids_lib = nextValue(&index, build_info);
+ var ctx = Context{
+ .cmake_binary_dir = nextValue(&index, build_info),
+ .cxx_compiler = nextValue(&index, build_info),
+ .llvm_config_exe = nextValue(&index, build_info),
+ .lld_include_dir = nextValue(&index, build_info),
+ .lld_libraries = nextValue(&index, build_info),
+ .std_files = nextValue(&index, build_info),
+ .c_header_files = nextValue(&index, build_info),
+ .dia_guids_lib = nextValue(&index, build_info),
+ .llvm = undefined,
+ .no_rosegment = b.option(bool, "no-rosegment", "Workaround to enable valgrind builds") orelse false,
+ };
+ ctx.llvm = try findLLVM(b, ctx.llvm_config_exe);
- const llvm = findLLVM(b, llvm_config_exe) catch unreachable;
+ var test_stage2 = b.addTest("src-self-hosted/test.zig");
+ test_stage2.setBuildMode(builtin.Mode.Debug);
var exe = b.addExecutable("zig", "src-self-hosted/main.zig");
exe.setBuildMode(mode);
- // This is for finding /lib/libz.a on alpine linux.
- // TODO turn this into -Dextra-lib-path=/lib option
- exe.addLibPath("/lib");
-
- exe.addIncludeDir("src");
- exe.addIncludeDir(cmake_binary_dir);
- addCppLib(b, exe, cmake_binary_dir, "zig_cpp");
- if (lld_include_dir.len != 0) {
- exe.addIncludeDir(lld_include_dir);
- var it = mem.split(lld_libraries, ";");
- while (it.next()) |lib| {
- exe.addObjectFile(lib);
- }
- } else {
- addCppLib(b, exe, cmake_binary_dir, "embedded_lld_elf");
- addCppLib(b, exe, cmake_binary_dir, "embedded_lld_coff");
- addCppLib(b, exe, cmake_binary_dir, "embedded_lld_lib");
- }
- dependOnLib(exe, llvm);
-
- if (exe.target.getOs() == builtin.Os.linux) {
- const libstdcxx_path_padded = try b.exec([][]const u8{cxx_compiler, "-print-file-name=libstdc++.a"});
- const libstdcxx_path = ??mem.split(libstdcxx_path_padded, "\r\n").next();
- if (mem.eql(u8, libstdcxx_path, "libstdc++.a")) {
- warn(
- \\Unable to determine path to libstdc++.a
- \\On Fedora, install libstdc++-static and try again.
- \\
- );
- return error.RequiredLibraryNotFound;
- }
- exe.addObjectFile(libstdcxx_path);
-
- exe.linkSystemLibrary("pthread");
- } else if (exe.target.isDarwin()) {
- exe.linkSystemLibrary("c++");
- }
-
- if (dia_guids_lib.len != 0) {
- exe.addObjectFile(dia_guids_lib);
- }
-
- if (exe.target.getOs() != builtin.Os.windows) {
- exe.linkSystemLibrary("xml2");
- }
- exe.linkSystemLibrary("c");
+ try configureStage2(b, test_stage2, ctx);
+ try configureStage2(b, exe, ctx);
b.default_step.dependOn(&exe.step);
- const skip_self_hosted = b.option(bool, "skip-self-hosted", "Main test suite skips building self hosted compiler") ?? false;
+ const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false;
+ const skip_self_hosted = b.option(bool, "skip-self-hosted", "Main test suite skips building self hosted compiler") orelse false;
if (!skip_self_hosted) {
test_step.dependOn(&exe.step);
}
- const verbose_link_exe = b.option(bool, "verbose-link", "Print link command for self hosted compiler") ?? false;
+ const verbose_link_exe = b.option(bool, "verbose-link", "Print link command for self hosted compiler") orelse false;
exe.setVerboseLink(verbose_link_exe);
b.installArtifact(exe);
- installStdLib(b, std_files);
- installCHeaders(b, c_header_files);
+ installStdLib(b, ctx.std_files);
+ installCHeaders(b, ctx.c_header_files);
const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");
- const with_lldb = b.option(bool, "with-lldb", "Run tests in LLDB to get a backtrace if one fails") ?? false;
- test_step.dependOn(docs_step);
+ const test_stage2_step = b.step("test-stage2", "Run the stage2 compiler tests");
+ test_stage2_step.dependOn(&test_stage2.step);
+ test_step.dependOn(test_stage2_step);
- test_step.dependOn(tests.addPkgTests(b, test_filter,
- "test/behavior.zig", "behavior", "Run the behavior tests",
- with_lldb));
+ const all_modes = []builtin.Mode{
+ builtin.Mode.Debug,
+ builtin.Mode.ReleaseSafe,
+ builtin.Mode.ReleaseFast,
+ builtin.Mode.ReleaseSmall,
+ };
+ const modes = if (skip_release) []builtin.Mode{builtin.Mode.Debug} else all_modes;
- test_step.dependOn(tests.addPkgTests(b, test_filter,
- "std/index.zig", "std", "Run the standard library tests",
- with_lldb));
+ test_step.dependOn(tests.addPkgTests(b, test_filter, "test/behavior.zig", "behavior", "Run the behavior tests", modes));
- test_step.dependOn(tests.addPkgTests(b, test_filter,
- "std/special/compiler_rt/index.zig", "compiler-rt", "Run the compiler_rt tests",
- with_lldb));
+ test_step.dependOn(tests.addPkgTests(b, test_filter, "std/index.zig", "std", "Run the standard library tests", modes));
- test_step.dependOn(tests.addCompareOutputTests(b, test_filter));
- test_step.dependOn(tests.addBuildExampleTests(b, test_filter));
- test_step.dependOn(tests.addCompileErrorTests(b, test_filter));
- test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter));
- test_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter));
+ test_step.dependOn(tests.addPkgTests(b, test_filter, "std/special/compiler_rt/index.zig", "compiler-rt", "Run the compiler_rt tests", modes));
+
+ test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
+ test_step.dependOn(tests.addBuildExampleTests(b, test_filter, modes));
+ test_step.dependOn(tests.addCompileErrorTests(b, test_filter, modes));
+ test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
+ test_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter, modes));
test_step.dependOn(tests.addTranslateCTests(b, test_filter));
test_step.dependOn(tests.addGenHTests(b, test_filter));
+ test_step.dependOn(docs_step);
}
-fn dependOnLib(lib_exe_obj: &std.build.LibExeObjStep, dep: &const LibraryDep) void {
+fn dependOnLib(lib_exe_obj: var, dep: *const LibraryDep) void {
for (dep.libdirs.toSliceConst()) |lib_dir| {
lib_exe_obj.addLibPath(lib_dir);
}
@@ -147,10 +117,9 @@ fn dependOnLib(lib_exe_obj: &std.build.LibExeObjStep, dep: &const LibraryDep) vo
}
}
-fn addCppLib(b: &Builder, lib_exe_obj: &std.build.LibExeObjStep, cmake_binary_dir: []const u8, lib_name: []const u8) void {
+fn addCppLib(b: *Builder, lib_exe_obj: var, cmake_binary_dir: []const u8, lib_name: []const u8) void {
const lib_prefix = if (lib_exe_obj.target.isWindows()) "" else "lib";
- lib_exe_obj.addObjectFile(os.path.join(b.allocator, cmake_binary_dir, "zig_cpp",
- b.fmt("{}{}{}", lib_prefix, lib_name, lib_exe_obj.target.libFileExt())) catch unreachable);
+ lib_exe_obj.addObjectFile(os.path.join(b.allocator, cmake_binary_dir, "zig_cpp", b.fmt("{}{}{}", lib_prefix, lib_name, lib_exe_obj.target.libFileExt())) catch unreachable);
}
const LibraryDep = struct {
@@ -160,12 +129,22 @@ const LibraryDep = struct {
includes: ArrayList([]const u8),
};
-fn findLLVM(b: &Builder, llvm_config_exe: []const u8) !LibraryDep {
- const libs_output = try b.exec([][]const u8{llvm_config_exe, "--libs", "--system-libs"});
- const includes_output = try b.exec([][]const u8{llvm_config_exe, "--includedir"});
- const libdir_output = try b.exec([][]const u8{llvm_config_exe, "--libdir"});
+fn findLLVM(b: *Builder, llvm_config_exe: []const u8) !LibraryDep {
+ const libs_output = try b.exec([][]const u8{
+ llvm_config_exe,
+ "--libs",
+ "--system-libs",
+ });
+ const includes_output = try b.exec([][]const u8{
+ llvm_config_exe,
+ "--includedir",
+ });
+ const libdir_output = try b.exec([][]const u8{
+ llvm_config_exe,
+ "--libdir",
+ });
- var result = LibraryDep {
+ var result = LibraryDep{
.libs = ArrayList([]const u8).init(b.allocator),
.system_libs = ArrayList([]const u8).init(b.allocator),
.includes = ArrayList([]const u8).init(b.allocator),
@@ -208,7 +187,7 @@ fn findLLVM(b: &Builder, llvm_config_exe: []const u8) !LibraryDep {
return result;
}
-pub fn installStdLib(b: &Builder, stdlib_files: []const u8) void {
+pub fn installStdLib(b: *Builder, stdlib_files: []const u8) void {
var it = mem.split(stdlib_files, ";");
while (it.next()) |stdlib_file| {
const src_path = os.path.join(b.allocator, "std", stdlib_file) catch unreachable;
@@ -217,7 +196,7 @@ pub fn installStdLib(b: &Builder, stdlib_files: []const u8) void {
}
}
-pub fn installCHeaders(b: &Builder, c_header_files: []const u8) void {
+pub fn installCHeaders(b: *Builder, c_header_files: []const u8) void {
var it = mem.split(c_header_files, ";");
while (it.next()) |c_header_file| {
const src_path = os.path.join(b.allocator, "c_headers", c_header_file) catch unreachable;
@@ -226,21 +205,89 @@ pub fn installCHeaders(b: &Builder, c_header_files: []const u8) void {
}
}
-fn nextValue(index: &usize, build_info: []const u8) []const u8 {
- const start = *index;
- while (true) : (*index += 1) {
- switch (build_info[*index]) {
+fn nextValue(index: *usize, build_info: []const u8) []const u8 {
+ const start = index.*;
+ while (true) : (index.* += 1) {
+ switch (build_info[index.*]) {
'\n' => {
- const result = build_info[start..*index];
- *index += 1;
+ const result = build_info[start..index.*];
+ index.* += 1;
return result;
},
'\r' => {
- const result = build_info[start..*index];
- *index += 2;
+ const result = build_info[start..index.*];
+ index.* += 2;
return result;
},
else => continue,
}
}
}
+
+fn configureStage2(b: *Builder, exe: var, ctx: Context) !void {
+ // This is for finding /lib/libz.a on alpine linux.
+ // TODO turn this into -Dextra-lib-path=/lib option
+ exe.addLibPath("/lib");
+
+ exe.setNoRoSegment(ctx.no_rosegment);
+
+ exe.addIncludeDir("src");
+ exe.addIncludeDir(ctx.cmake_binary_dir);
+ addCppLib(b, exe, ctx.cmake_binary_dir, "zig_cpp");
+ if (ctx.lld_include_dir.len != 0) {
+ exe.addIncludeDir(ctx.lld_include_dir);
+ var it = mem.split(ctx.lld_libraries, ";");
+ while (it.next()) |lib| {
+ exe.addObjectFile(lib);
+ }
+ } else {
+ addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_wasm");
+ addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_elf");
+ addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_coff");
+ addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_lib");
+ }
+ dependOnLib(exe, ctx.llvm);
+
+ if (exe.target.getOs() == builtin.Os.linux) {
+ const libstdcxx_path_padded = try b.exec([][]const u8{
+ ctx.cxx_compiler,
+ "-print-file-name=libstdc++.a",
+ });
+ const libstdcxx_path = mem.split(libstdcxx_path_padded, "\r\n").next().?;
+ if (mem.eql(u8, libstdcxx_path, "libstdc++.a")) {
+ warn(
+ \\Unable to determine path to libstdc++.a
+ \\On Fedora, install libstdc++-static and try again.
+ \\
+ );
+ return error.RequiredLibraryNotFound;
+ }
+ exe.addObjectFile(libstdcxx_path);
+
+ exe.linkSystemLibrary("pthread");
+ } else if (exe.target.isDarwin()) {
+ exe.linkSystemLibrary("c++");
+ }
+
+ if (ctx.dia_guids_lib.len != 0) {
+ exe.addObjectFile(ctx.dia_guids_lib);
+ }
+
+ if (exe.target.getOs() != builtin.Os.windows) {
+ exe.linkSystemLibrary("xml2");
+ }
+ exe.linkSystemLibrary("c");
+}
+
+const Context = struct {
+ cmake_binary_dir: []const u8,
+ cxx_compiler: []const u8,
+ llvm_config_exe: []const u8,
+ lld_include_dir: []const u8,
+ lld_libraries: []const u8,
+ std_files: []const u8,
+ c_header_files: []const u8,
+ dia_guids_lib: []const u8,
+ llvm: LibraryDep,
+ no_rosegment: bool,
+};
diff --git a/ci/appveyor/appveyor.yml b/ci/appveyor/appveyor.yml
index 2122153494..5a2ad9bca1 100644
--- a/ci/appveyor/appveyor.yml
+++ b/ci/appveyor/appveyor.yml
@@ -6,5 +6,4 @@ build_script:
after_build:
- '%APPVEYOR_BUILD_FOLDER%\ci\appveyor\after_build.bat'
cache:
- - 'llvm+clang-5.0.1-win64-msvc-release.tar.xz'
- 'llvm+clang-6.0.0-win64-msvc-release.tar.xz'
diff --git a/deps/lld/COFF/Driver.cpp b/deps/lld/COFF/Driver.cpp
index 0f3d8fb0b4..34b968fe5e 100644
--- a/deps/lld/COFF/Driver.cpp
+++ b/deps/lld/COFF/Driver.cpp
@@ -72,6 +72,9 @@ bool link(ArrayRef
", builtin_code);
+ },
Node.HeaderOpen => |info| {
try out.print("{}$ zig build-exe {}.zig", code.name);
switch (code.mode) {
@@ -749,6 +764,10 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
try build_args.append("--release-fast");
try out.print(" --release-fast");
},
+ builtin.Mode.ReleaseSmall => {
+ try build_args.append("--release-small");
+ try out.print(" --release-small");
+ },
}
for (code.link_objects) |link_object| {
const name_with_ext = try std.fmt.allocPrint(allocator, "{}{}", link_object, obj_ext);
@@ -762,18 +781,20 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
try build_args.append("c");
try out.print(" --library c");
}
- _ = exec(allocator, build_args.toSliceConst()) catch return parseError(
- tokenizer, code.source_token, "example failed to compile");
+ _ = exec(allocator, &env_map, build_args.toSliceConst()) catch return parseError(tokenizer, code.source_token, "example failed to compile");
- const run_args = [][]const u8 {tmp_bin_file_name};
+ const run_args = [][]const u8{tmp_bin_file_name};
const result = if (expected_outcome == ExpectedOutcome.Fail) blk: {
- const result = try os.ChildProcess.exec(allocator, run_args, null, null, max_doc_file_size);
+ const result = try os.ChildProcess.exec(allocator, run_args, null, &env_map, max_doc_file_size);
switch (result.term) {
os.ChildProcess.Term.Exited => |exit_code| {
if (exit_code == 0) {
warn("{}\nThe following command incorrectly succeeded:\n", result.stderr);
- for (run_args) |arg| warn("{} ", arg) else warn("\n");
+ for (run_args) |arg|
+ warn("{} ", arg)
+ else
+ warn("\n");
return parseError(tokenizer, code.source_token, "example incorrectly compiled");
}
},
@@ -781,11 +802,9 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
}
break :blk result;
} else blk: {
- break :blk exec(allocator, run_args) catch return parseError(
- tokenizer, code.source_token, "example crashed");
+ break :blk exec(allocator, &env_map, run_args) catch return parseError(tokenizer, code.source_token, "example crashed");
};
-
const escaped_stderr = try escapeHtml(allocator, result.stderr);
const escaped_stdout = try escapeHtml(allocator, result.stdout);
@@ -798,7 +817,11 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
var test_args = std.ArrayList([]const u8).init(allocator);
defer test_args.deinit();
- try test_args.appendSlice([][]const u8 {zig_exe, "test", tmp_source_file_name});
+ try test_args.appendSlice([][]const u8{
+ zig_exe,
+ "test",
+ tmp_source_file_name,
+ });
try out.print("
\n", escaped_stderr, escaped_stdout);
@@ -828,7 +857,13 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
var test_args = std.ArrayList([]const u8).init(allocator);
defer test_args.deinit();
- try test_args.appendSlice([][]const u8 {zig_exe, "test", "--color", "on", tmp_source_file_name});
+ try test_args.appendSlice([][]const u8{
+ zig_exe,
+ "test",
+ "--color",
+ "on",
+ tmp_source_file_name,
+ });
try out.print("$ zig test {}.zig", code.name);
switch (code.mode) {
builtin.Mode.Debug => {},
@@ -810,16 +833,22 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
try test_args.append("--release-fast");
try out.print(" --release-fast");
},
+ builtin.Mode.ReleaseSmall => {
+ try test_args.append("--release-small");
+ try out.print(" --release-small");
+ },
}
if (code.target_windows) {
try test_args.appendSlice([][]const u8{
- "--target-os", "windows",
- "--target-arch", "x86_64",
- "--target-environ", "msvc",
+ "--target-os",
+ "windows",
+ "--target-arch",
+ "x86_64",
+ "--target-environ",
+ "msvc",
});
}
- const result = exec(allocator, test_args.toSliceConst()) catch return parseError(
- tokenizer, code.source_token, "test failed");
+ const result = exec(allocator, &env_map, test_args.toSliceConst()) catch return parseError(tokenizer, code.source_token, "test failed");
const escaped_stderr = try escapeHtml(allocator, result.stderr);
const escaped_stdout = try escapeHtml(allocator, result.stdout);
try out.print("\n{}{}
\n");
@@ -968,24 +1046,37 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
},
}
}
-
}
-fn exec(allocator: &mem.Allocator, args: []const []const u8) !os.ChildProcess.ExecResult {
- const result = try os.ChildProcess.exec(allocator, args, null, null, max_doc_file_size);
+fn exec(allocator: *mem.Allocator, env_map: *std.BufMap, args: []const []const u8) !os.ChildProcess.ExecResult {
+ const result = try os.ChildProcess.exec(allocator, args, null, env_map, max_doc_file_size);
switch (result.term) {
os.ChildProcess.Term.Exited => |exit_code| {
if (exit_code != 0) {
warn("{}\nThe following command exited with code {}:\n", result.stderr, exit_code);
- for (args) |arg| warn("{} ", arg) else warn("\n");
+ for (args) |arg|
+ warn("{} ", arg)
+ else
+ warn("\n");
return error.ChildExitError;
}
},
else => {
warn("{}\nThe following command crashed:\n", result.stderr);
- for (args) |arg| warn("{} ", arg) else warn("\n");
+ for (args) |arg|
+ warn("{} ", arg)
+ else
+ warn("\n");
return error.ChildCrashed;
},
}
return result;
}
+
+fn getBuiltinCode(allocator: *mem.Allocator, env_map: *std.BufMap, zig_exe: []const u8) ![]const u8 {
+ const result = try exec(allocator, env_map, []const []const u8{
+ zig_exe,
+ "builtin",
+ });
+ return result.stdout;
+}
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 856d62f142..54677bc5b5 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -96,7 +96,7 @@
$ zig test {}.zig", code.name);
switch (code.mode) {
builtin.Mode.Debug => {},
@@ -840,19 +875,29 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
try test_args.append("--release-fast");
try out.print(" --release-fast");
},
+ builtin.Mode.ReleaseSmall => {
+ try test_args.append("--release-small");
+ try out.print(" --release-small");
+ },
}
- const result = try os.ChildProcess.exec(allocator, test_args.toSliceConst(), null, null, max_doc_file_size);
+ const result = try os.ChildProcess.exec(allocator, test_args.toSliceConst(), null, &env_map, max_doc_file_size);
switch (result.term) {
os.ChildProcess.Term.Exited => |exit_code| {
if (exit_code == 0) {
warn("{}\nThe following command incorrectly succeeded:\n", result.stderr);
- for (test_args.toSliceConst()) |arg| warn("{} ", arg) else warn("\n");
+ for (test_args.toSliceConst()) |arg|
+ warn("{} ", arg)
+ else
+ warn("\n");
return parseError(tokenizer, code.source_token, "example incorrectly compiled");
}
},
else => {
warn("{}\nThe following command crashed:\n", result.stderr);
- for (test_args.toSliceConst()) |arg| warn("{} ", arg) else warn("\n");
+ for (test_args.toSliceConst()) |arg|
+ warn("{} ", arg)
+ else
+ warn("\n");
return parseError(tokenizer, code.source_token, "example compile crashed");
},
}
@@ -869,25 +914,36 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
var test_args = std.ArrayList([]const u8).init(allocator);
defer test_args.deinit();
- try test_args.appendSlice([][]const u8 {zig_exe, "test", tmp_source_file_name});
+ try test_args.appendSlice([][]const u8{
+ zig_exe,
+ "test",
+ tmp_source_file_name,
+ });
switch (code.mode) {
builtin.Mode.Debug => {},
builtin.Mode.ReleaseSafe => try test_args.append("--release-safe"),
builtin.Mode.ReleaseFast => try test_args.append("--release-fast"),
+ builtin.Mode.ReleaseSmall => try test_args.append("--release-small"),
}
- const result = try os.ChildProcess.exec(allocator, test_args.toSliceConst(), null, null, max_doc_file_size);
+ const result = try os.ChildProcess.exec(allocator, test_args.toSliceConst(), null, &env_map, max_doc_file_size);
switch (result.term) {
os.ChildProcess.Term.Exited => |exit_code| {
if (exit_code == 0) {
warn("{}\nThe following command incorrectly succeeded:\n", result.stderr);
- for (test_args.toSliceConst()) |arg| warn("{} ", arg) else warn("\n");
+ for (test_args.toSliceConst()) |arg|
+ warn("{} ", arg)
+ else
+ warn("\n");
return parseError(tokenizer, code.source_token, "example test incorrectly succeeded");
}
},
else => {
warn("{}\nThe following command crashed:\n", result.stderr);
- for (test_args.toSliceConst()) |arg| warn("{} ", arg) else warn("\n");
+ for (test_args.toSliceConst()) |arg|
+ warn("{} ", arg)
+ else
+ warn("\n");
return parseError(tokenizer, code.source_token, "example compile crashed");
},
}
@@ -905,9 +961,20 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
var build_args = std.ArrayList([]const u8).init(allocator);
defer build_args.deinit();
- try build_args.appendSlice([][]const u8 {zig_exe, "build-obj", tmp_source_file_name,
- "--color", "on",
- "--output", tmp_obj_file_name});
+ const name_plus_h_ext = try std.fmt.allocPrint(allocator, "{}.h", code.name);
+ const output_h_file_name = try os.path.join(allocator, tmp_dir_name, name_plus_h_ext);
+
+ try build_args.appendSlice([][]const u8{
+ zig_exe,
+ "build-obj",
+ tmp_source_file_name,
+ "--color",
+ "on",
+ "--output",
+ tmp_obj_file_name,
+ "--output-h",
+ output_h_file_name,
+ });
if (!code.is_inline) {
try out.print("
\n");
}
} else {
- _ = exec(allocator, build_args.toSliceConst()) catch return parseError(
- tokenizer, code.source_token, "example failed to compile");
+ _ = exec(allocator, &env_map, build_args.toSliceConst()) catch return parseError(tokenizer, code.source_token, "example failed to compile");
}
if (!code.is_inline) {
try out.print("$ zig build-obj {}.zig", code.name);
@@ -927,21 +994,33 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
try out.print(" --release-fast");
}
},
+ builtin.Mode.ReleaseSmall => {
+ try build_args.append("--release-small");
+ if (!code.is_inline) {
+ try out.print(" --release-small");
+ }
+ },
}
if (maybe_error_match) |error_match| {
- const result = try os.ChildProcess.exec(allocator, build_args.toSliceConst(), null, null, max_doc_file_size);
+ const result = try os.ChildProcess.exec(allocator, build_args.toSliceConst(), null, &env_map, max_doc_file_size);
switch (result.term) {
os.ChildProcess.Term.Exited => |exit_code| {
if (exit_code == 0) {
warn("{}\nThe following command incorrectly succeeded:\n", result.stderr);
- for (build_args.toSliceConst()) |arg| warn("{} ", arg) else warn("\n");
+ for (build_args.toSliceConst()) |arg|
+ warn("{} ", arg)
+ else
+ warn("\n");
return parseError(tokenizer, code.source_token, "example build incorrectly succeeded");
}
},
else => {
warn("{}\nThe following command crashed:\n", result.stderr);
- for (build_args.toSliceConst()) |arg| warn("{} ", arg) else warn("\n");
+ for (build_args.toSliceConst()) |arg|
+ warn("{} ", arg)
+ else
+ warn("\n");
return parseError(tokenizer, code.source_token, "example compile crashed");
},
}
@@ -956,8 +1035,7 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
try out.print("
If you search for something specific in this documentation and do not find it, - please file an issue or say something on IRC. + please file an issue or say something on IRC.
The code samples in this document are compiled and tested as part of the main test suite of Zig. @@ -134,6 +134,58 @@ pub fn main() void {
{#see_also|Values|@import|Errors|Root Source File#} {#header_close#} + {#header_open|Comments#} + {#code_begin|test|comments#} +const assert = @import("std").debug.assert; + +test "comments" { + // Comments in Zig start with "//" and end at the next LF byte (end of line). + // The below line is a comment, and won't be executed. + + //assert(false); + + const x = true; // another comment + assert(x); +} + {#code_end#} +
+ There are no multiline comments in Zig (e.g. like /* */
+ comments in C). This helps allow Zig to have the property that each line
+ of code can be tokenized out of context.
+
+ A doc comment is one that begins with exactly three slashes (i.e.
+ /// but not ////);
+ multiple doc comments in a row are merged together to form a multiline
+ doc comment. The doc comment documents whatever immediately follows it.
+
+ Doc comments are only allowed in certain places; eventually, it will + become a compile error have a doc comment in an unexpected place, such as + in the middle of an expression, or just before a non-doc comment. +
+ {#header_close#} + {#header_close#} {#header_open|Values#} {#code_begin|exe|values#} const std = @import("std"); @@ -156,18 +208,18 @@ pub fn main() void { true or false, !true); - // nullable - var nullable_value: ?[]const u8 = null; - assert(nullable_value == null); + // optional + var optional_value: ?[]const u8 = null; + assert(optional_value == null); - warn("\nnullable 1\ntype: {}\nvalue: {}\n", - @typeName(@typeOf(nullable_value)), nullable_value); + warn("\noptional 1\ntype: {}\nvalue: {}\n", + @typeName(@typeOf(optional_value)), optional_value); - nullable_value = "hi"; - assert(nullable_value != null); + optional_value = "hi"; + assert(optional_value != null); - warn("\nnullable 2\ntype: {}\nvalue: {}\n", - @typeName(@typeOf(nullable_value)), nullable_value); + warn("\noptional 2\ntype: {}\nvalue: {}\n", + @typeName(@typeOf(optional_value)), optional_value); // error union var number_or_error: error!i32 = error.ArgNotFound; @@ -367,20 +419,25 @@ pub fn main() void {f16floatf32floatf64doublef128boolcomptime_intcomptime_floatnullnullnullundefined
- In this example the variable c_string_literal has type &const char and
+ In this example the variable c_string_literal has type [*]const char and
has a terminating null byte.
Use undefined to leave variables uninitialized:
+ undefined can be {#link|implicitly cast|Implicit Casts#} to any type.
+ Once this happens, it is no longer possible to detect that the value is undefined.
+ undefined means the value could be anything, even something that is nonsense
+ according to the type. Translated into English, undefined means "Not a meaningful
+ value. Using this value would be a bug. The value will be unused, or overwritten before being used."
+
+ In {#link|Debug#} mode, Zig writes 0xaa bytes to undefined memory. This is to catch
+ bugs early, and to help detect use of undefined memory in a debugger.
+
Zig has the following floating point types:
+f16 - IEEE-754-2008 binary16f32 - IEEE-754-2008 binary32f64 - IEEE-754-2008 binary64f128 - IEEE-754-2008 binary128c_longdouble - matches long double for the target C ABI
+ Float literals have type comptime_float which is guaranteed to hold at least all possible values
+ that the largest other floating point type can hold. Float literals {#link|implicitly cast|Implicit Casts#} to any other type.
+
a ?? ba orelse ba is null,
@@ -998,24 +1090,24 @@ a ^= bconst value: ?u32 = null;
-const unwrapped = value ?? 1234;
+const unwrapped = value orelse 1234;
unwrapped == 1234
??aa.?a ?? unreachable
+ a orelse unreachable
const value: ?u32 = 5678;
-??value == 5678
+value.? == 5678
a is false, returns false
- without evaluating b. Otherwise, retuns b.
+ without evaluating b. Otherwise, returns b.
false and true == false
@@ -1062,7 +1154,7 @@ unwrapped == 1234
a is true, returns true
- without evaluating b. Otherwise, retuns b.
+ without evaluating b. Otherwise, returns b.
false or true == true
@@ -1103,7 +1195,7 @@ unwrapped == 1234
a == null*aa.*const x: u32 = 1234;
const ptr = &x;
-*x == 1234
+x.* == 1234
const x: u32 = 1234;
const ptr = &x;
-*x == 1234
+x.* == 1234
+ a || bconst A = error{One};
+const B = error{Two};
+(A || B) == error{One, Two}
x() x[] x.y
a!b
-!x -x -%x ~x *x &x ?x ??x
-x{}
-! * / % ** *%
+!x -x -%x ~x &x ?x
+x{} x.* x.?
+! * / % ** *% ||
+ - ++ +% -%
<< >>
&
@@ -1278,7 +1386,7 @@ x{}
== != < > <= >=
and
or
-?? catch
+orelse catch
= *= /= %= += -= <<= >>= &= ^= |=
{#header_close#}
{#header_close#}
@@ -1288,7 +1396,7 @@ const assert = @import("std").debug.assert;
const mem = @import("std").mem;
// array literal
-const message = []u8{'h', 'e', 'l', 'l', 'o'};
+const message = []u8{ 'h', 'e', 'l', 'l', 'o' };
// get the size of an array
comptime {
@@ -1316,7 +1424,7 @@ var some_integers: [100]i32 = undefined;
test "modify an array" {
for (some_integers) |*item, i| {
- *item = i32(i);
+ item.* = @intCast(i32, i);
}
assert(some_integers[10] == 10);
assert(some_integers[99] == 99);
@@ -1324,11 +1432,11 @@ test "modify an array" {
// array concatenation works if the values are known
// at compile time
-const part_one = []i32{1, 2, 3, 4};
-const part_two = []i32{5, 6, 7, 8};
+const part_one = []i32{ 1, 2, 3, 4 };
+const part_two = []i32{ 5, 6, 7, 8 };
const all_of_it = part_one ++ part_two;
comptime {
- assert(mem.eql(i32, all_of_it, []i32{1,2,3,4,5,6,7,8}));
+ assert(mem.eql(i32, all_of_it, []i32{ 1, 2, 3, 4, 5, 6, 7, 8 }));
}
// remember that string literals are arrays
@@ -1357,9 +1465,9 @@ comptime {
var fancy_array = init: {
var initial_value: [10]Point = undefined;
for (initial_value) |*pt, i| {
- *pt = Point {
- .x = i32(i),
- .y = i32(i) * 2,
+ pt.* = Point{
+ .x = @intCast(i32, i),
+ .y = @intCast(i32, i) * 2,
};
}
break :init initial_value;
@@ -1377,7 +1485,7 @@ test "compile-time array initalization" {
// call a function to initialize an array
var more_points = []Point{makePoint(3)} ** 10;
fn makePoint(x: i32) Point {
- return Point {
+ return Point{
.x = x,
.y = x * 2,
};
@@ -1400,39 +1508,37 @@ test "address of syntax" {
const x_ptr = &x;
// Deference a pointer:
- assert(*x_ptr == 1234);
+ assert(x_ptr.* == 1234);
// When you get the address of a const variable, you get a const pointer.
- assert(@typeOf(x_ptr) == &const i32);
+ assert(@typeOf(x_ptr) == *const i32);
// If you want to mutate the value, you'd need an address of a mutable variable:
var y: i32 = 5678;
const y_ptr = &y;
- assert(@typeOf(y_ptr) == &i32);
- *y_ptr += 1;
- assert(*y_ptr == 5679);
+ assert(@typeOf(y_ptr) == *i32);
+ y_ptr.* += 1;
+ assert(y_ptr.* == 5679);
}
test "pointer array access" {
- // Pointers do not support pointer arithmetic. If you
- // need such a thing, use array index syntax:
-
- var array = []u8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
- const ptr = &array[1];
+ // Taking an address of an individual element gives a
+ // pointer to a single item. This kind of pointer
+ // does not support pointer arithmetic.
+ var array = []u8{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+ const ptr = &array[2];
+ assert(@typeOf(ptr) == *u8);
assert(array[2] == 3);
- ptr[1] += 1;
+ ptr.* += 1;
assert(array[2] == 4);
}
test "pointer slicing" {
- // In Zig, we prefer using slices over null-terminated pointers.
- // You can turn a pointer into a slice using slice syntax:
- var array = []u8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
- const ptr = &array[1];
- const slice = ptr[1..3];
-
- assert(slice.ptr == &ptr[1]);
+ // In Zig, we prefer slices over pointers to null-terminated arrays.
+ // You can turn an array into a slice using slice syntax:
+ var array = []u8{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+ const slice = array[2..4];
assert(slice.len == 2);
// Slices have bounds checking and are therefore protected
@@ -1448,14 +1554,14 @@ comptime {
// @ptrCast.
var x: i32 = 1;
const ptr = &x;
- *ptr += 1;
+ ptr.* += 1;
x += 1;
- assert(*ptr == 3);
+ assert(ptr.* == 3);
}
test "@ptrToInt and @intToPtr" {
// To convert an integer address into a pointer, use @intToPtr:
- const ptr = @intToPtr(&i32, 0xdeadbeef);
+ const ptr = @intToPtr(*i32, 0xdeadbeef);
// To convert a pointer to an integer, use @ptrToInt:
const addr = @ptrToInt(ptr);
@@ -1467,7 +1573,7 @@ test "@ptrToInt and @intToPtr" {
comptime {
// Zig is able to do this at compile-time, as long as
// ptr is never dereferenced.
- const ptr = @intToPtr(&i32, 0xdeadbeef);
+ const ptr = @intToPtr(*i32, 0xdeadbeef);
const addr = @ptrToInt(ptr);
assert(@typeOf(addr) == usize);
assert(addr == 0xdeadbeef);
@@ -1477,39 +1583,39 @@ test "volatile" {
// In Zig, loads and stores are assumed to not have side effects.
// If a given load or store should have side effects, such as
// Memory Mapped Input/Output (MMIO), use `volatile`:
- const mmio_ptr = @intToPtr(&volatile u8, 0x12345678);
+ const mmio_ptr = @intToPtr(*volatile u8, 0x12345678);
// Now loads and stores with mmio_ptr are guaranteed to all happen
// and in the same order as in source code.
- assert(@typeOf(mmio_ptr) == &volatile u8);
+ assert(@typeOf(mmio_ptr) == *volatile u8);
}
-test "nullable pointers" {
- // Pointers cannot be null. If you want a null pointer, use the nullable
- // prefix `?` to make the pointer type nullable.
- var ptr: ?&i32 = null;
+test "optional pointers" {
+ // Pointers cannot be null. If you want a null pointer, use the optional
+ // prefix `?` to make the pointer type optional.
+ var ptr: ?*i32 = null;
var x: i32 = 1;
ptr = &x;
- assert(*??ptr == 1);
+ assert(ptr.?.* == 1);
- // Nullable pointers are the same size as normal pointers, because pointer
+ // Optional pointers are the same size as normal pointers, because pointer
// value 0 is used as the null value.
- assert(@sizeOf(?&i32) == @sizeOf(&i32));
+ assert(@sizeOf(?*i32) == @sizeOf(*i32));
}
test "pointer casting" {
// To convert one pointer type to another, use @ptrCast. This is an unsafe
// operation that Zig cannot protect you against. Use @ptrCast only when other
// conversions are not possible.
- const bytes align(@alignOf(u32)) = []u8{0x12, 0x12, 0x12, 0x12};
- const u32_ptr = @ptrCast(&const u32, &bytes[0]);
- assert(*u32_ptr == 0x12121212);
+ const bytes align(@alignOf(u32)) = []u8{ 0x12, 0x12, 0x12, 0x12 };
+ const u32_ptr = @ptrCast(*const u32, &bytes[0]);
+ assert(u32_ptr.* == 0x12121212);
// Even this example is contrived - there are better ways to do the above than
// pointer casting. For example, using a slice narrowing cast:
- const u32_value = ([]const u32)(bytes[0..])[0];
+ const u32_value = @bytesToSlice(u32, bytes[0..])[0];
assert(u32_value == 0x12121212);
// And even another way, the most straightforward way to do it:
@@ -1518,7 +1624,7 @@ test "pointer casting" {
test "pointer child type" {
// pointer types have a `child` field which tells you the type they point to.
- assert((&u32).Child == u32);
+ assert((*u32).Child == u32);
}
{#code_end#}
{#header_open|Alignment#}
@@ -1543,15 +1649,15 @@ const builtin = @import("builtin");
test "variable alignment" {
var x: i32 = 1234;
const align_of_i32 = @alignOf(@typeOf(x));
- assert(@typeOf(&x) == &i32);
- assert(&i32 == &align(align_of_i32) i32);
+ assert(@typeOf(&x) == *i32);
+ assert(*i32 == *align(align_of_i32) i32);
if (builtin.arch == builtin.Arch.x86_64) {
- assert((&i32).alignment == 4);
+ assert((*i32).alignment == 4);
}
}
{#code_end#}
- In the same way that a &i32 can be implicitly cast to a
- &const i32, a pointer with a larger alignment can be implicitly
+
In the same way that a *i32 can be {#link|implicitly cast|Implicit Casts#} to a
+ *const i32, a pointer with a larger alignment can be implicitly
cast to a pointer with a smaller alignment, but not vice versa.
@@ -1565,8 +1671,8 @@ var foo: u8 align(4) = 100;
test "global variable alignment" {
assert(@typeOf(&foo).alignment == 4);
- assert(@typeOf(&foo) == &align(4) u8);
- const slice = (&foo)[0..1];
+ assert(@typeOf(&foo) == *align(4) u8);
+ const slice = (*[1]u8)(&foo)[0..];
assert(@typeOf(slice) == []align(4) u8);
}
@@ -1592,13 +1698,13 @@ test "function alignment" {
const assert = @import("std").debug.assert;
test "pointer alignment safety" {
- var array align(4) = []u32{0x11111111, 0x11111111};
- const bytes = ([]u8)(array[0..]);
+ var array align(4) = []u32{ 0x11111111, 0x11111111 };
+ const bytes = @sliceToBytes(array[0..]);
assert(foo(bytes) == 0x11111111);
}
fn foo(bytes: []u8) u32 {
const slice4 = bytes[1..5];
- const int_slice = ([]u32)(@alignCast(4, slice4));
+ const int_slice = @bytesToSlice(u32, @alignCast(4, slice4));
return int_slice[0];
}
{#code_end#}
@@ -1610,10 +1716,10 @@ fn foo(bytes: []u8) u32 {
u8 can alias any memory.
As an example, this code produces undefined behavior:
-*@ptrCast(&u32, f32(12.34))
+ @ptrCast(*u32, f32(12.34)).*
Instead, use {#link|@bitCast#}:
@bitCast(u32, f32(12.34))
- As an added benefit, the @bitcast version works at compile-time.
As an added benefit, the @bitCast version works at compile-time.
This is one reason we prefer slices to pointers.
@@ -1663,7 +1778,7 @@ test "using slices for strings" { test "slice pointer" { var array: [10]u8 = undefined; - const ptr = &array[0]; + const ptr = &array; // You can use slicing syntax to convert a pointer into a slice: const slice = ptr[0..5]; @@ -1681,8 +1796,8 @@ test "slice pointer" { test "slice widening" { // Zig supports slice widening and slice narrowing. Cast a slice of u8 // to a slice of anything else, and Zig will perform the length conversion. - const array align(@alignOf(u32)) = []u8{0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13}; - const slice = ([]const u32)(array[0..]); + const array align(@alignOf(u32)) = []u8{ 0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13 }; + const slice = @bytesToSlice(u32, array[0..]); assert(slice.len == 2); assert(slice[0] == 0x12121212); assert(slice[1] == 0x13131313); @@ -1736,7 +1851,7 @@ const Vec3 = struct { }; } - pub fn dot(self: &const Vec3, other: &const Vec3) f32 { + pub fn dot(self: *const Vec3, other: *const Vec3) f32 { return self.x * other.x + self.y * other.y + self.z * other.z; } }; @@ -1768,7 +1883,7 @@ test "struct namespaced variable" { // struct field order is determined by the compiler for optimal performance. // however, you can still calculate a struct base pointer given a field pointer: -fn setYBasedOnX(x: &f32, y: f32) void { +fn setYBasedOnX(x: *f32, y: f32) void { const point = @fieldParentPtr(Point, "x", x); point.y = y; } @@ -1786,13 +1901,13 @@ test "field parent pointer" { fn LinkedList(comptime T: type) type { return struct { pub const Node = struct { - prev: ?&Node, - next: ?&Node, + prev: ?*Node, + next: ?*Node, data: T, }; - first: ?&Node, - last: ?&Node, + first: ?*Node, + last: ?*Node, len: usize, }; } @@ -1824,7 +1939,7 @@ test "linked list" { .last = &node, .len = 1, }; - assert((??list2.first).data == 1234); + assert(list2.first.?.data == 1234); } {#code_end#} {#see_also|comptime|@fieldParentPtr#} @@ -1854,9 +1969,9 @@ const Value = enum(u2) { // Now you can cast between u2 and Value. // The ordinal value starts from 0, counting up for each member. test "enum ordinal value" { - assert(u2(Value.Zero) == 0); - assert(u2(Value.One) == 1); - assert(u2(Value.Two) == 2); + assert(@enumToInt(Value.Zero) == 0); + assert(@enumToInt(Value.One) == 1); + assert(@enumToInt(Value.Two) == 2); } // You can override the ordinal value for an enum. @@ -1866,9 +1981,9 @@ const Value2 = enum(u32) { Million = 1000000, }; test "set enum ordinal value" { - assert(u32(Value2.Hundred) == 100); - assert(u32(Value2.Thousand) == 1000); - assert(u32(Value2.Million) == 1000000); + assert(@enumToInt(Value2.Hundred) == 100); + assert(@enumToInt(Value2.Thousand) == 1000); + assert(@enumToInt(Value2.Million) == 1000000); } // Enums can have methods, the same as structs and unions. @@ -2039,8 +2154,8 @@ const Variant = union(enum) { Int: i32, Bool: bool, - fn truthy(self: &const Variant) bool { - return switch (*self) { + fn truthy(self: *const Variant) bool { + return switch (self.*) { Variant.Int => |x_int| x_int != 0, Variant.Bool => |x_bool| x_bool, }; @@ -2151,7 +2266,7 @@ test "switch enum" { // A reference to the matched value can be obtained using `*` syntax. Item.C => |*item| blk: { - (*item).x += 1; + item.*.x += 1; break :blk 6; }, @@ -2176,7 +2291,7 @@ test "switch inside function" { // On an OS other than fuchsia, block is not even analyzed, // so this compile error is not triggered. // On fuchsia this compile error would be triggered. - @compileError("windows not supported"); + @compileError("fuchsia not supported"); }, else => {}, } @@ -2185,21 +2300,28 @@ test "switch inside function" { {#see_also|comptime|enum|@compileError|Compile Variables#} {#header_close#} {#header_open|while#} ++ A while loop is used to repeatedly execute an expression until + some condition is no longer true. +
{#code_begin|test|while#} const assert = @import("std").debug.assert; test "while basic" { - // A while loop is used to repeatedly execute an expression until - // some condition is no longer true. var i: usize = 0; while (i < 10) { i += 1; } assert(i == 10); } + {#code_end#} +
+ Use break to exit a while loop early.
+
+ Use continue to jump back to the beginning of the loop.
+
+ While loops support a continue expression which is executed when the loop
+ is continued. The continue keyword respects this expression.
+
+ While loops are expressions. The result of the expression is the
+ result of the else clause of a while loop, which is executed when
+ the condition of the while loop is tested as false.
+
+ break, like return, accepts a value
+ parameter. This is the result of the while expression.
+ When you break from a while loop, the else branch is not
+ evaluated.
+
+ Just like {#link|if#} expressions, while loops can take an optional as the + condition and capture the payload. When {#link|null#} is encountered the loop + exits. +
+
+ When the |x| syntax is present on a while expression,
+ the while condition must have an {#link|Optional Type#}.
+
+ The else branch is allowed on optional iteration. In this case, it will
+ be executed on the first null value encountered.
+
+ Just like {#link|if#} expressions, while loops can take an error union as + the condition and capture the payload or the error code. When the + condition results in an error code the else branch is evaluated and + the loop is finished. +
+
+ When the else |x| syntax is present on a while expression,
+ the while condition must have an {#link|Error Union Type#}.
+
+ While loops can be inlined. This causes the loop to be unrolled, which + allows the code to do some things which only work at compile time, + such as use types as first class values. +
+ {#code_begin|test#} +const assert = @import("std").debug.assert; test "inline while loop" { - // While loops can be inlined. This causes the loop to be unrolled, which - // allows the code to do some things which only work at compile time, - // such as use types as first class values. comptime var i = 0; var sum: usize = 0; inline while (i < 3) : (i += 1) { @@ -2332,7 +2506,17 @@ fn typeNameLength(comptime T: type) usize { return @typeName(T).len; } {#code_end#} - {#see_also|if|Nullables|Errors|comptime|unreachable#} +
+ It is recommended to use inline loops only for one of these reasons:
+
+ For loops can be inlined. This causes the loop to be unrolled, which + allows the code to do some things which only work at compile time, + such as use types as first class values. + The capture value and iterator value of inlined for loops are + compile-time known. +
+ {#code_begin|test#} +const assert = @import("std").debug.assert; test "inline for loop" { const nums = []i32{2, 4, 6}; - // For loops can be inlined. This causes the loop to be unrolled, which - // allows the code to do some things which only work at compile time, - // such as use types as first class values. - // The capture value and iterator value of inlined for loops are - // compile-time known. var sum: usize = 0; inline for (nums) |i| { const T = switch (i) { @@ -2425,6 +2614,16 @@ fn typeNameLength(comptime T: type) usize { return @typeName(T).len; } {#code_end#} +
+ It is recommended to use inline loops only for one of these reasons:
+
- In Zig, structs, unions, and enums with payloads cannot be passed by value - to a function. -
- {#code_begin|test_err|not copyable; cannot pass by value#} -const Foo = struct { - x: i32, -}; - -fn bar(foo: Foo) void {} - -test "pass aggregate type by value to function" { - bar(Foo {.x = 12,}); -} - {#code_end#} -
- Instead, one must use &const. Zig allows implicitly casting something
- to a const pointer to it:
+ In Zig, structs, unions, and enums with payloads can be passed directly to a function:
- However, - the C ABI does allow passing structs and unions by value. So functions which - use the C calling convention may pass structs and unions by value. + In this case, the value may be passed by reference, or by value, whichever way + Zig decides will be faster. +
++ For extern functions, Zig follows the C ABI for passing structs and unions by value.
{#header_close#} {#header_open|Function Reflection#} @@ -2827,10 +3017,10 @@ test "fn reflection" {
The number of unique error values across the entire compilation should determine the size of the error set type.
- However right now it is hard coded to be a u16. See #768.
+ However right now it is hard coded to be a u16. See #768.
- You can implicitly cast an error from a subset to its superset: + You can {#link|implicitly cast|Implicit Casts#} an error from a subset to its superset:
{#code_begin|test#} const std = @import("std"); @@ -2963,7 +3153,7 @@ test "parse u64" {
Within the function definition, you can see some return statements that return
an error, and at the bottom a return statement that returns a u64.
- Both types implicitly cast to error!u64.
+ Both types {#link|implicitly cast|Implicit Casts#} to error!u64.
What it looks like to use this function varies depending on what you're @@ -2975,6 +3165,7 @@ test "parse u64" {
If you want to provide a default value, you can use the catch binary operator:
catch operator must
match the unwrapped error union type, or be of type noreturn.
+ {#header_close#}
+ {#header_open|try#}
Let's say you wanted to return the error if you got one, otherwise continue with the function logic:
{#code_begin|syntax#} @@ -3009,6 +3202,7 @@ fn doAThing(str: []u8) !void { from the current function with the same error. Otherwise, the expression results in the unwrapped value. + {#header_close#}Maybe you know with complete certainty that an expression will never be an error. In this case you can do this: @@ -3023,7 +3217,7 @@ fn doAThing(str: []u8) !void {
Finally, you may want to take a different action for every situation. For that, we combine
- the if and switch expression:
+ the {#link|if#} and {#link|switch#} expression:
The other component to error handling is defer statements.
- In addition to an unconditional defer, Zig has errdefer,
+ In addition to an unconditional {#link|defer#}, Zig has errdefer,
which evaluates the deferred expression on block exit path if and only if
the function returned with an error from the block.
A couple of other tidbits about error handling:
@@ -3109,33 +3305,284 @@ test "error union" { comptime assert(@typeOf(foo).ErrorSet == error); } {#code_end#} -TODO the || operator for error sets
+ Use the || operator to merge two error sets together. The resulting
+ error set contains the errors of both error sets. Doc comments from the left-hand
+ side override doc comments from the right-hand side. In this example, the doc
+ comments for C.PathNotFound is A doc comment.
+
+ This is especially useful for functions which return different error sets depending
+ on {#link|comptime#} branches. For example, the Zig standard library uses
+ LinuxFileOpenError || WindowsFileOpenError for the error set of opening
+ files.
+
TODO
++ Because many functions in Zig return a possible error, Zig supports inferring the error set. + To infer the error set for a function, use this syntax: +
+{#code_begin|test#} +// With an inferred error set +pub fn add_inferred(comptime T: type, a: T, b: T) !T { + var answer: T = undefined; + return if (@addWithOverflow(T, a, b, &answer)) error.Overflow else answer; +} + +// With an explicit error set +pub fn add_explicit(comptime T: type, a: T, b: T) Error!T { + var answer: T = undefined; + return if (@addWithOverflow(T, a, b, &answer)) error.Overflow else answer; +} + +const Error = error { + Overflow, +}; + +const std = @import("std"); + +test "inferred error set" { + if (add_inferred(u8, 255, 1)) |_| unreachable else |err| switch (err) { + error.Overflow => {}, // ok + } +} +{#code_end#} ++ When a function has an inferred error set, that function becomes generic and thus it becomes + trickier to do certain things with it, such as obtain a function pointer, or have an error + set that is consistent across different build targets. Additionally, inferred error sets + are incompatible with recursion. +
++ In these situations, it is recommended to use an explicit error set. You can generally start + with an empty error set and let compile errors guide you toward completing the set. +
++ These limitations may be overcome in a future version of Zig. +
{#header_close#} {#header_close#} {#header_open|Error Return Traces#} -TODO
- {#header_close#} - {#header_close#} - {#header_open|Nullables#}- One area that Zig provides safety without compromising efficiency or - readability is with the nullable type. + Error Return Traces show all the points in the code that an error was returned to the calling function. This makes it practical to use {#link|try#} everywhere and then still be able to know what happened if an error ends up bubbling all the way out of your application. +
+ {#code_begin|exe_err#} +pub fn main() !void { + try foo(12); +} + +fn foo(x: i32) !void { + if (x >= 5) { + try bar(); + } else { + try bang2(); + } +} + +fn bar() !void { + if (baz()) { + try quux(); + } else |err| switch (err) { + error.FileNotFound => try hello(), + else => try another(), + } +} + +fn baz() !void { + try bang1(); +} + +fn quux() !void { + try bang2(); +} + +fn hello() !void { + try bang2(); +} + +fn another() !void { + try bang1(); +} + +fn bang1() !void { + return error.FileNotFound; +} + +fn bang2() !void { + return error.PermissionDenied; +} + {#code_end#} ++ Look closely at this example. This is no stack trace.
- The question mark symbolizes the nullable type. You can convert a type to a nullable
+ You can see that the final error bubbled up was PermissionDenied,
+ but the original error that started this whole thing was FileNotFound. In the bar function, the code handles the original error code,
+ and then returns another one, from the switch statement. Error Return Traces make this clear, whereas a stack trace would look like this:
+
+ Here, the stack trace does not explain how the control
+ flow in bar got to the hello() call.
+ One would have to open a debugger or further instrument the application
+ in order to find out. The error return trace, on the other hand,
+ shows exactly how the error bubbled up.
+
+ This debugging feature makes it easier to iterate quickly on code that + robustly handles all error conditions. This means that Zig developers + will naturally find themselves writing correct, robust code in order + to increase their development pace. +
++ Error Return Traces are enabled by default in {#link|Debug#} and {#link|ReleaseSafe#} builds and disabled by default in {#link|ReleaseFast#} and {#link|ReleaseSmall#} builds. +
++ There are a few ways to activate this error return tracing feature: +
+catch unreachable and you have not overridden the default panic handlerstd.debug.dumpStackTrace to print it. This function returns comptime-known {#link|null#} when building without error return tracing support.+ To analyze performance cost, there are two cases: +
+
+ For the case when no errors are returned, the cost is a single memory write operation, only in the first non-failable function in the call graph that calls a failable function, i.e. when a function returning void calls a function returning error.
+ This is to initialize this struct in the stack memory:
+
+ Here, N is the maximum function call depth as determined by call graph analysis. Recursion is ignored and counts for 2. +
+
+ A pointer to StackTrace is passed as a secret parameter to every function that can return an error, but it's always the first parameter, so it can likely sit in a register and stay there.
+
+ That's it for the path when no errors occur. It's practically free in terms of performance. +
+
+ When generating the code for a function that returns an error, just before the return statement (only for the return statements that return errors), Zig generates a call to this function:
+
+ The cost is 2 math operations plus some memory reads and writes. The memory accessed is constrained and should remain cached for the duration of the error return bubbling. +
+
+ As for code size cost, 1 function call before a return statement is no big deal. Even so,
+ I have a plan to make the call to
+ __zig_return_error a tail call, which brings the code size cost down to actually zero. What is a return statement in code without error return tracing can become a jump instruction in code with error return tracing.
+
+ One area that Zig provides safety without compromising efficiency or + readability is with the optional type. +
++ The question mark symbolizes the optional type. You can convert a type to an optional type by putting a question mark in front of it, like this:
{#code_begin|syntax#} // normal integer const normal_int: i32 = 1234; -// nullable integer -const nullable_int: ?i32 = 5678; +// optional integer +const optional_int: ?i32 = 5678; {#code_end#}
- Now the variable nullable_int could be an i32, or null.
+ Now the variable optional_int could be an i32, or null.
Instead of integers, let's talk about pointers. Null references are the source of many runtime @@ -3144,8 +3591,8 @@ const nullable_int: ?i32 = 5678;
Zig does not have them.
- Instead, you can use a nullable pointer. This secretly compiles down to a normal pointer, - since we know we can use 0 as the null value for the nullable type. But the compiler + Instead, you can use an optional pointer. This secretly compiles down to a normal pointer, + since we know we can use 0 as the null value for the optional type. But the compiler can check your work and make sure you don't assign null to something that can't be null.
@@ -3167,17 +3614,17 @@ struct Foo *do_a_thing(void) {
Zig code
{#code_begin|syntax#} // malloc prototype included for reference -extern fn malloc(size: size_t) ?&u8; +extern fn malloc(size: size_t) ?*u8; -fn doAThing() ?&Foo { - const ptr = malloc(1234) ?? return null; +fn doAThing() ?*Foo { + const ptr = malloc(1234) orelse return null; // ... } {#code_end#}
Here, Zig is at least as convenient, if not more, than C. And, the type of "ptr"
- is &u8 not ?&u8. The ?? operator
- unwrapped the nullable type and therefore ptr is guaranteed to be non-null everywhere
+ is *u8 not ?*u8. The orelse keyword
+ unwrapped the optional type and therefore ptr is guaranteed to be non-null everywhere
it is used in the function.
@@ -3196,10 +3643,10 @@ fn doAThing() ?&Foo { In Zig you can accomplish the same thing:
{#code_begin|syntax#} -fn doAThing(nullable_foo: ?&Foo) void { +fn doAThing(optional_foo: ?*Foo) void { // do some stuff - if (nullable_foo) |foo| { + if (optional_foo) |foo| { doSomethingWithFoo(foo); } @@ -3208,7 +3655,7 @@ fn doAThing(nullable_foo: ?&Foo) void { {#code_end#}
Once again, the notable thing here is that inside the if block,
- foo is no longer a nullable pointer, it is a pointer, which
+ foo is no longer an optional pointer, it is a pointer, which
cannot be null.
@@ -3218,42 +3665,306 @@ fn doAThing(nullable_foo: ?&Foo) void { The optimizer can sometimes make better decisions knowing that pointer arguments cannot be null.
- {#header_open|Nullable Type#} -A nullable is created by putting ? in front of a type. You can use compile-time
- reflection to access the child type of a nullable:
An optional is created by putting ? in front of a type. You can use compile-time
+ reflection to access the child type of an optional:
+ Just like {#link|undefined#}, null has its own type, and the only way to use it is to
+ cast it to a different type:
+
TODO: explain implicit vs explicit casting
-TODO: resolve peer types builtin
-TODO: truncate builtin
-TODO: bitcast builtin
-TODO: int to ptr builtin
-TODO: ptr to int builtin
-TODO: ptrcast builtin
-TODO: explain number literals vs concrete types
++ A type cast converts a value of one type to another. + Zig has {#link|Implicit Casts#} for conversions that are known to be completely safe and unambiguous, + and {#link|Explicit Casts#} for conversions that one would not want to happen on accident. + There is also a third kind of type conversion called {#link|Peer Type Resolution#} for + the case when a result type must be decided given multiple operand types. +
+ {#header_open|Implicit Casts#} ++ An implicit cast occurs when one type is expected, but different type is provided: +
+ {#code_begin|test#} +test "implicit cast - variable declaration" { + var a: u8 = 1; + var b: u16 = a; +} + +test "implicit cast - function call" { + var a: u8 = 1; + foo(a); +} + +fn foo(b: u16) void {} + +test "implicit cast - invoke a type as a function" { + var a: u8 = 1; + var b = u16(a); +} + {#code_end#} ++ Implicit casts are only allowed when it is completely unambiguous how to get from one type to another, + and the transformation is guaranteed to be safe. +
+ {#header_open|Implicit Cast: Stricter Qualification#} ++ Values which have the same representation at runtime can be cast to increase the strictness + of the qualifiers, no matter how nested the qualifiers are: +
+const - non-const to const is allowedvolatile - non-volatile to volatile is allowedalign - bigger to smaller alignment is allowed + These casts are no-ops at runtime since the value representation does not change. +
+ {#code_begin|test#} +test "implicit cast - const qualification" { + var a: i32 = 1; + var b: *i32 = &a; + foo(b); +} + +fn foo(a: *const i32) void {} + {#code_end#} ++ In addition, pointers implicitly cast to const optional pointers: +
+ {#code_begin|test#} +const std = @import("std"); +const assert = std.debug.assert; +const mem = std.mem; + +test "cast *[1][*]const u8 to [*]const ?[*]const u8" { + const window_name = [1][*]const u8{c"window name"}; + const x: [*]const ?[*]const u8 = &window_name; + assert(mem.eql(u8, std.cstr.toSliceConst(x[0].?), "window name")); +} + {#code_end#} {#header_close#} + {#header_open|Implicit Cast: Integer and Float Widening#} ++ {#link|Integers#} implicitly cast to integer types which can represent every value of the old type, and likewise + {#link|Floats#} implicitly cast to float types which can represent every value of the old type. +
+ {#code_begin|test#} +const std = @import("std"); +const assert = std.debug.assert; +const mem = std.mem; + +test "integer widening" { + var a: u8 = 250; + var b: u16 = a; + var c: u32 = b; + var d: u64 = c; + var e: u64 = d; + var f: u128 = e; + assert(f == a); +} + +test "implicit unsigned integer to signed integer" { + var a: u8 = 250; + var b: i16 = a; + assert(b == 250); +} + +test "float widening" { + var a: f16 = 12.34; + var b: f32 = a; + var c: f64 = b; + var d: f128 = c; + assert(d == a); +} + {#code_end#} + {#header_close#} + {#header_open|Implicit Cast: Arrays#} +TODO: [N]T to []const T
+TODO: *const [N]T to []const T
+TODO: [N]T to *const []const T
+TODO: [N]T to ?[]const T
+TODO: *[N]T to []T
+TODO: *[N]T to [*]T
+TODO: *T to *[1]T
+TODO: [N]T to E![]const T
+ {#header_close#} + {#header_open|Implicit Cast: Optionals#} +TODO: T to ?T
+TODO: T to E!?T
+TODO: null to ?T
+ {#header_close#} + {#header_open|Implicit Cast: T to E!T#} +TODO
+ {#header_close#} + {#header_open|Implicit Cast: E to E!T#} +TODO
+ {#header_close#} + {#header_open|Implicit Cast: comptime_int to *const integer#} +TODO
+ {#header_close#} + {#header_open|Implicit Cast: comptime_float to *const float#} +TODO
+ {#header_close#} + {#header_open|Implicit Cast: compile-time known numbers#} +TODO
+ {#header_close#} + {#header_open|Implicit Cast: union to enum#} +TODO
+ {#header_close#} + {#header_open|Implicit Cast: enum to union#} +TODO
+ {#header_close#} + {#header_open|Implicit Cast: T to *T when @sizeOf(T) == 0#} +TODO
+ {#header_close#} + {#header_open|Implicit Cast: undefined#} +TODO
+ {#header_close#} + {#header_open|Implicit Cast: T to *const T#} +TODO
+ {#header_close#} + {#header_close#} + + {#header_open|Explicit Casts#} ++ Explicit casts are performed via {#link|Builtin Functions#}. + Some explicit casts are safe; some are not. + Some explicit casts perform language-level assertions; some do not. + Some explicit casts are no-ops at runtime; some are not. +
+TODO
+ {#header_close#} + {#header_close#} + {#header_open|void#} -TODO: assigning void has no codegen
-TODO: hashmap with void becomes a set
-TODO: difference between c_void and void
-TODO: void is the default return value of functions
-TODO: functions require assigning the return value
+
+ void represents a type that has no value. Code that makes use of void values is
+ not included in the final generated code:
+
When this turns into LLVM IR, there is no code generated in the body of entry,
+ even in debug mode. For example, on x86_64:
0000000000000010 <entry>:
+ 10: 55 push %rbp
+ 11: 48 89 e5 mov %rsp,%rbp
+ 14: 5d pop %rbp
+ 15: c3 retq
+ These assembly instructions do not have any code associated with the void values - + they only perform the function call prologue and epilog.
+
+ void can be useful for instantiating generic types. For example, given a
+ Map(Key, Value), one can pass void for the Value
+ type to make it into a Set:
+
Note that this is different than using a dummy value for the hash map value.
+ By using void as the type of the value, the hash map entry type has no value field, and
+ thus the hash map takes up less space. Further, all the code that deals with storing and loading the
+ value is deleted, as seen above.
+
+ void is distinct from c_void, which is defined like this:
+ pub const c_void = @OpaqueType();.
+ void has a known size of 0 bytes, and c_void has an unknown, but non-zero, size.
+
+ Expressions of type void are the only ones whose value can be ignored. For example:
+
However, if the expression has type void:
TODO: example of this referring to Self struct
TODO: example of this referring to recursion function
@@ -3672,7 +4383,7 @@ fn List(comptime T: type) type { {#code_begin|syntax#} const Node = struct { - next: &Node, + next: *Node, name: []u8, }; {#code_end#} @@ -3704,7 +4415,7 @@ pub fn main() void { {#code_begin|syntax#} /// Calls print and then flushes the buffer. -pub fn printf(self: &OutStream, comptime format: []const u8, args: ...) error!void { +pub fn printf(self: *OutStream, comptime format: []const u8, args: ...) error!void { const State = enum { Start, OpenBrace, @@ -3776,7 +4487,7 @@ pub fn printf(self: &OutStream, comptime format: []const u8, args: ...) error!vo and emits a function that actually looks like this: {#code_begin|syntax#} -pub fn printf(self: &OutStream, arg0: i32, arg1: []const u8) !void { +pub fn printf(self: *OutStream, arg0: i32, arg1: []const u8) !void { try self.write("here is a string: '"); try self.printValue(arg0); try self.write("' here is a number: "); @@ -3790,15 +4501,12 @@ pub fn printf(self: &OutStream, arg0: i32, arg1: []const u8) !void { on the type: {#code_begin|syntax#} -pub fn printValue(self: &OutStream, value: var) !void { +pub fn printValue(self: *OutStream, value: var) !void { const T = @typeOf(value); if (@isInteger(T)) { return self.printInt(T, value); } else if (@isFloat(T)) { return self.printFloat(T, value); - } else if (@canImplicitCast([]const u8, value)) { - const casted_value = ([]const u8)(value); - return self.write(casted_value); } else { @compileError("Unable to print type '" ++ @typeName(T) ++ "'"); } @@ -3844,13 +4552,8 @@ pub fn main() void { task in userland. It does so without introducing another language on top of Zig, such as a macro language or a preprocessor language. It's Zig all the way down. -TODO: suggestion to not use inline unless necessary
{#header_close#} - {#header_close#} - {#header_open|inline#} -TODO: inline while
-TODO: inline for
-TODO: suggestion to not use inline unless necessary
+ {#see_also|inline while|inline for#} {#header_close#} {#header_open|Assembly#}TODO: example of inline assembly
@@ -3862,6 +4565,277 @@ pub fn main() void {TODO: @fence()
TODO: @atomic rmw
TODO: builtin atomic memory ordering enum
+ {#header_close#} + {#header_open|Coroutines#} ++ A coroutine is a generalization of a function. +
++ When you call a function, it creates a stack frame, + and then the function runs until it reaches a return + statement, and then the stack frame is destroyed. + At the callsite, the next line of code does not run + until the function returns. +
++ A coroutine is like a function, but it can be suspended + and resumed any number of times, and then it must be + explicitly destroyed. When a coroutine suspends, it + returns to the resumer. +
+ {#header_open|Minimal Coroutine Example#} +
+ Declare a coroutine with the async keyword.
+ The expression in angle brackets must evaluate to a struct
+ which has these fields:
+
allocFn: fn (self: *Allocator, byte_count: usize, alignment: u29) Error![]u8 - where Error can be any error set.freeFn: fn (self: *Allocator, old_mem: []u8) void
+ You may notice that this corresponds to the std.mem.Allocator interface.
+ This makes it convenient to integrate with existing allocators. Note, however,
+ that the language feature does not depend on the standard library, and any struct which
+ has these fields is allowed.
+
+ Omitting the angle bracket expression when defining an async function makes + the function generic. Zig will infer the allocator type when the async function is called. +
+
+ Call a coroutine with the async keyword. Here, the expression in angle brackets
+ is a pointer to the allocator struct that the coroutine expects.
+
+ The result of an async function call is a promise->T type, where T
+ is the return type of the async function. Once a promise has been created, it must be
+ consumed, either with cancel or await:
+
+ Async functions start executing when created, so in the following example, the entire + async function completes before it is canceled: +
+ {#code_begin|test#} +const std = @import("std"); +const assert = std.debug.assert; + +var x: i32 = 1; + +test "create a coroutine and cancel it" { + const p = try async+ At any point, an async function may suspend itself. This causes control flow to + return to the caller or resumer. The following code demonstrates where control flow + goes: +
+ {#code_begin|test#} +const std = @import("std"); +const assert = std.debug.assert; + +test "coroutine suspend, resume, cancel" { + seq('a'); + const p = try async+ When an async function suspends itself, it must be sure that it will be + resumed or canceled somehow, for example by registering its promise handle + in an event loop. Use a suspend capture block to gain access to the + promise: +
+ {#code_begin|test#} +const std = @import("std"); +const assert = std.debug.assert; + +test "coroutine suspend with block" { + const p = try async
+ Every suspend point in an async function represents a point at which the coroutine
+ could be destroyed. If that happens, defer expressions that are in
+ scope are run, as well as errdefer expressions.
+
+ {#link|Await#} counts as a suspend point. +
+ {#header_open|Breaking from Suspend Blocks#} ++ Suspend blocks support labeled break, just like {#link|while#} and {#link|for#}. +
+
+ Upon entering a suspend block, the coroutine is already considered
+ suspended, and can be resumed. For example, if you started another kernel thread,
+ and had that thread call resume on the promise handle provided by the
+ suspend block, the new thread would begin executing after the suspend
+ block, while the old thread continued executing the suspend block.
+
+ However, the coroutine can be directly resumed from the suspend block, in which case it + never returns to its resumer and continues executing. +
+ {#code_begin|test#} +const std = @import("std"); +const assert = std.debug.assert; + +test "resume from suspend" { + var buf: [500]u8 = undefined; + var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator; + var my_result: i32 = 1; + const p = try async testResumeFromSuspend(&my_result); + cancel p; + std.debug.assert(my_result == 2); +} +async fn testResumeFromSuspend(my_result: *i32) void { + suspend { + resume @handle(); + } + my_result.* += 1; + suspend; + my_result.* += 1; +} + {#code_end#} + {#header_close#} + {#header_close#} + {#header_open|Await#} +
+ The await keyword is used to coordinate with an async function's
+ return statement.
+
+ await is valid only in an async function, and it takes
+ as an operand a promise handle.
+ If the async function associated with the promise handle has already returned,
+ then await destroys the target async function, and gives the return value.
+ Otherwise, await suspends the current async function, registering its
+ promise handle with the target coroutine. It becomes the target coroutine's responsibility
+ to have ensured that it will be resumed or destroyed. When the target coroutine reaches
+ its return statement, it gives the return value to the awaiter, destroys itself, and then
+ resumes the awaiter.
+
+ A promise handle must be consumed exactly once after it is created, either by cancel or await.
+
+ await counts as a suspend point, and therefore at every await,
+ a coroutine can be potentially destroyed, which would run defer and errdefer expressions.
+
+ In general, suspend is lower level than await. Most application
+ code will use only async and await, but event loop
+ implementations will make use of suspend internally.
+
+ There are a few issues with coroutines that are considered unresolved. Best be aware of them, + as the situation is likely to change before 1.0.0: +
+@@ -3870,18 +4844,46 @@ pub fn main() void { at compile time.
{#header_open|@addWithOverflow#} -@addWithOverflow(comptime T: type, a: T, b: T, result: &T) -> bool
+ @addWithOverflow(comptime T: type, a: T, b: T, result: *T) bool
- Performs *result = a + b. If overflow or underflow occurs,
+ Performs result.* = a + b. If overflow or underflow occurs,
stores the overflowed bits in result and returns true.
If no overflow or underflow occurs, returns false.
TODO
+@ArgType(comptime T: type, comptime n: usize) type
+
+ This builtin function takes a function type and returns the type of the parameter at index n.
+
+ T must be a function type.
+
+ Note: This function is deprecated. Use {#link|@typeInfo#} instead. +
+ {#header_close#} + {#header_open|@atomicLoad#} +@atomicLoad(comptime T: type, ptr: *const T, comptime ordering: builtin.AtomicOrder) T
+ + This builtin function atomically dereferences a pointer and returns the value. +
+
+ T must be a pointer type, a bool,
+ or an integer whose bit count meets these requirements:
+
+ TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe + we can remove this restriction +
{#header_close#} {#header_open|@atomicRmw#} -@atomicRmw(comptime T: type, ptr: &T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) -> T
+ @atomicRmw(comptime T: type, ptr: *T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) T
This builtin function atomically modifies memory and then returns the previous value.
@@ -3900,7 +4902,7 @@ pub fn main() void { {#header_close#} {#header_open|@bitCast#} -@bitCast(comptime DestType: type, value: var) -> DestType
+ @bitCast(comptime DestType: type, value: var) DestType
Converts a value of one type to another type.
@@ -3933,9 +4935,9 @@ pub fn main() void { {#header_close#} {#header_open|@alignCast#} -@alignCast(comptime alignment: u29, ptr: var) -> var
+ @alignCast(comptime alignment: u29, ptr: var) var
- ptr can be &T, fn(), ?&T,
+ ptr can be *T, fn(), ?*T,
?fn(), or []T. It returns the same type as ptr
except with the alignment adjusted to the new value.
@alignOf(comptime T: type) -> (number literal)
+ @alignOf(comptime T: type) (number literal)
This function returns the number of bytes that this type should be aligned to for the current target to match the C ABI. When the child type of a pointer has @@ -3952,7 +4954,7 @@ pub fn main() void {
const assert = @import("std").debug.assert;
comptime {
- assert(&u32 == &align(@alignOf(u32)) u32);
+ assert(*u32 == *align(@alignOf(u32)) u32);
}
The result is a target-specific compile time constant. It is guaranteed to be @@ -3960,6 +4962,31 @@ comptime {
{#see_also|Alignment#} {#header_close#} + + {#header_open|@boolToInt#} +@boolToInt(value: bool) u1
+
+ Converts true to u1(1) and false to
+ u1(0).
+
+ If the value is known at compile-time, the return type is comptime_int
+ instead of u1.
+
@bytesToSlice(comptime Element: type, bytes: []u8) []Element
+
+ Converts a slice of bytes or array of bytes into a slice of Element.
+ The resulting slice has the same {#link|pointer|Pointers#} properties as the parameter.
+
+ Attempting to convert a number of bytes with a length that does not evenly divide into a slice of + elements results in safety-protected {#link|Undefined Behavior#}. +
+ {#header_close#} + {#header_open|@cDefine#}@cDefine(comptime name: []u8, value)
@@ -3980,7 +5007,7 @@ comptime { {#see_also|Import from C Header File|@cInclude|@cImport|@cUndef|void#} {#header_close#} {#header_open|@cImport#} -
@cImport(expression) -> (namespace)
+ @cImport(expression) (namespace)
This function parses C code and imports the functions, types, variables, and compatible macro definitions into the result namespace. @@ -4025,14 +5052,8 @@ comptime {
{#see_also|Import from C Header File|@cImport|@cDefine|@cInclude#} {#header_close#} - {#header_open|@canImplicitCast#} -@canImplicitCast(comptime T: type, value) -> bool
- - Returns whether a value can be implicitly casted to a given type. -
- {#header_close#} {#header_open|@clz#} -@clz(x: T) -> U
+ @clz(x: T) U
This function counts the number of leading zeroes in x which is an integer
type T.
@@ -4044,18 +5065,62 @@ comptime {
If x is zero, @clz returns T.bit_count.
@cmpxchg(ptr: &T, cmp: T, new: T, success_order: AtomicOrder, fail_order: AtomicOrder) -> bool
+ {#header_open|@cmpxchgStrong#}
+ @cmpxchgStrong(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T
- This function performs an atomic compare exchange operation. + This function performs a strong atomic compare exchange operation. It's the equivalent of this code, + except atomic: +
+ {#code_begin|syntax#} +fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_value: T) ?T { + const old_value = ptr.*; + if (old_value == expected_value) { + ptr.* = new_value; + return null; + } else { + return old_value; + } +} + {#code_end#} ++ If you are using cmpxchg in a loop, {#link|@cmpxchgWeak#} is the better choice, because it can be implemented + more efficiently in machine instructions.
AtomicOrder can be found with @import("builtin").AtomicOrder.
@typeOf(ptr).alignment must be >= @sizeOf(T).
@cmpxchgWeak(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T
+ + This function performs a weak atomic compare exchange operation. It's the equivalent of this code, + except atomic: +
+ {#code_begin|syntax#} +fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_value: T) ?T { + const old_value = ptr.*; + if (old_value == expected_value and usuallyTrueButSometimesFalse()) { + ptr.* = new_value; + return null; + } else { + return old_value; + } +} + {#code_end#} +
+ If you are using cmpxchg in a loop, the sporadic failure will be no problem, and cmpxchgWeak
+ is the better choice, because it can be implemented more efficiently in machine instructions.
+ However if you need a stronger guarantee, use {#link|@cmpxchgStrong#}.
+
+ AtomicOrder can be found with @import("builtin").AtomicOrder.
+
@typeOf(ptr).alignment must be >= @sizeOf(T).
@compileError(comptime msg: []u8)
@@ -4124,7 +5189,7 @@ test "main" {
{#code_end#}
{#header_close#}
{#header_open|@ctz#}
- @ctz(x: T) -> U
+ @ctz(x: T) U
This function counts the number of trailing zeroes in x which is an integer
type T.
@@ -4136,9 +5201,10 @@ test "main" {
If x is zero, @ctz returns T.bit_count.
@divExact(numerator: T, denominator: T) -> T
+ @divExact(numerator: T, denominator: T) T
Exact division. Caller guarantees denominator != 0 and
@divTrunc(numerator, denominator) * denominator == numerator.
@@ -4151,7 +5217,7 @@ test "main" {
{#see_also|@divTrunc|@divFloor#}
{#header_close#}
{#header_open|@divFloor#}
-
@divFloor(numerator: T, denominator: T) -> T
+ @divFloor(numerator: T, denominator: T) T
Floored division. Rounds toward negative infinity. For unsigned integers it is
the same as numerator / denominator. Caller guarantees denominator != 0 and
@@ -4165,7 +5231,7 @@ test "main" {
{#see_also|@divTrunc|@divExact#}
{#header_close#}
{#header_open|@divTrunc#}
-
@divTrunc(numerator: T, denominator: T) -> T
+ @divTrunc(numerator: T, denominator: T) T
Truncated division. Rounds toward zero. For unsigned integers it is
the same as numerator / denominator. Caller guarantees denominator != 0 and
@@ -4179,7 +5245,7 @@ test "main" {
{#see_also|@divFloor|@divExact#}
{#header_close#}
{#header_open|@embedFile#}
-
@embedFile(comptime path: []const u8) -> [X]u8
+ @embedFile(comptime path: []const u8) [X]u8
This function returns a compile time constant fixed-size array with length
equal to the byte count of the file given by path. The contents of the array
@@ -4190,29 +5256,25 @@ test "main" {
@export(comptime name: []const u8, target: var, linkage: builtin.GlobalLinkage) -> []const u8
+
+ {#header_open|@enumToInt#}
+ @enumToInt(enum_value: var) var
- Creates a symbol in the output object file. -
- {#header_close#} - {#header_open|@tagName#} -@tagName(value: var) -> []const u8
- - Converts an enum value or union value to a slice of bytes representing the name. -
- {#header_close#} - {#header_open|@TagType#} -@TagType(T: type) -> type
- - For an enum, returns the integer type that is used to store the enumeration value. -
-- For a union, returns the enum type that is used to store the tag value. + Converts an enumeration value into its integer tag type. +
+ {#see_also|@intToEnum#} + {#header_close#} + + {#header_open|@errSetCast#} +@errSetCast(comptime T: DestType, value: var) DestType
+ + Converts an error value from one error set to another error set. Attempting to convert an error + which is not in the destination error set results in safety-protected {#link|Undefined Behavior#}.
{#header_close#} + {#header_open|@errorName#} -@errorName(err: error) -> []u8
+ @errorName(err: error) []u8
This function returns the string representation of an error. If an error declaration is: @@ -4227,14 +5289,42 @@ test "main" { error name table will be generated.
{#header_close#} + {#header_open|@errorReturnTrace#} -@errorReturnTrace() -> ?&builtin.StackTrace
+ @errorReturnTrace() ?*builtin.StackTrace
If the binary is built with error return tracing, and this function is invoked in a function that calls a function with an error or error union return type, returns a stack trace object. Otherwise returns `null`.
{#header_close#} + + {#header_open|@errorToInt#} +@errorToInt(err: var) @IntType(false, @sizeOf(error) * 8)
+ + Supports the following types: +
+E!void+ Converts an error to the integer representation of an error. +
++ It is generally recommended to avoid this + cast, as the integer representation of an error is not stable across source code changes. +
+ {#see_also|@intToError#} + {#header_close#} + + {#header_open|@export#} +@export(comptime name: []const u8, target: var, linkage: builtin.GlobalLinkage) []const u8
+ + Creates a symbol in the output object file. +
+ {#header_close#} + {#header_open|@fence#}@fence(order: AtomicOrder)
@@ -4245,13 +5335,40 @@ test "main" {
{#see_also|Compile Variables#} {#header_close#} + + {#header_open|@field#} +@field(lhs: var, comptime field_name: []const u8) (field)
+ Preforms field access equivalent to lhs.->field_name-<.
@fieldParentPtr(comptime ParentType: type, comptime field_name: []const u8,
- field_ptr: &T) -> &ParentType
+ field_ptr: *T) *ParentType
Given a pointer to a field, returns the base pointer of a struct.
{#header_close#} + + {#header_open|@floatCast#} +@floatCast(comptime DestType: type, value: var) DestType
+ + Convert from one float type to another. This cast is safe, but may cause the + numeric value to lose precision. +
+ {#header_close#} + + {#header_open|@floatToInt#} +@floatToInt(comptime DestType: type, float: var) DestType
+ + Converts the integer part of a floating point number to the destination type. +
++ If the integer part of the floating point number cannot fit in the destination type, + it invokes safety-checked {#link|Undefined Behavior#}. +
+ {#see_also|@intToFloat#} + {#header_close#} + {#header_open|@frameAddress#}@frameAddress()
@@ -4266,8 +5383,18 @@ test "main" { This function is only valid within function scope.
{#header_close#} + {#header_open|@handle#} +@handle()
+
+ This function returns a promise->T type, where T
+ is the return type of the async function in scope.
+
+ This function is only valid within an async function scope. +
+ {#header_close#} {#header_open|@import#} -@import(comptime path: []u8) -> (namespace)
+ @import(comptime path: []u8) (namespace)
This function finds a zig file corresponding to path and imports all the
public top level declarations into the resulting namespace.
@@ -4287,7 +5414,7 @@ test "main" {
{#see_also|Compile Variables|@embedFile#}
{#header_close#}
{#header_open|@inlineCall#}
-
@inlineCall(function: X, args: ...) -> Y
+ @inlineCall(function: X, args: ...) Y
This calls a function, in the same way that invoking an expression with parentheses does:
@@ -4306,20 +5433,66 @@ fn add(a: i32, b: i32) i32 { return a + b; } {#see_also|@noInlineCall#} {#header_close#} + + {#header_open|@intCast#} +@intCast(comptime DestType: type, int: var) DestType
+ + Converts an integer to another integer while keeping the same numerical value. + Attempting to convert a number which is out of range of the destination type results in + safety-protected {#link|Undefined Behavior#}. +
+ {#header_close#} + + {#header_open|@intToEnum#} +@intToEnum(comptime DestType: type, int_value: @TagType(DestType)) DestType
+ + Converts an integer into an {#link|enum#} value. +
++ Attempting to convert an integer which represents no value in the chosen enum type invokes + safety-checked {#link|Undefined Behavior#}. +
+ {#see_also|@enumToInt#} + {#header_close#} + + {#header_open|@intToError#} +@intToError(value: @IntType(false, @sizeOf(error) * 8)) error
+ + Converts from the integer representation of an error into the global error set type. +
++ It is generally recommended to avoid this + cast, as the integer representation of an error is not stable across source code changes. +
++ Attempting to convert an integer that does not correspond to any error results in + safety-protected {#link|Undefined Behavior#}. +
+ {#see_also|@errorToInt#} + {#header_close#} + + {#header_open|@intToFloat#} +@intToFloat(comptime DestType: type, int: var) DestType
+ + Converts an integer to the closest floating point representation. To convert the other way, use {#link|@floatToInt#}. This cast is always safe. +
+ {#header_close#} + {#header_open|@intToPtr#} -@intToPtr(comptime DestType: type, int: usize) -> DestType
+ @intToPtr(comptime DestType: type, int: usize) DestType
Converts an integer to a pointer. To convert the other way, use {#link|@ptrToInt#}.
{#header_close#} + {#header_open|@IntType#} -@IntType(comptime is_signed: bool, comptime bit_count: u8) -> type
+ @IntType(comptime is_signed: bool, comptime bit_count: u32) type
This function returns an integer type with the given signness and bit count.
{#header_close#} {#header_open|@maxValue#} -@maxValue(comptime T: type) -> (number literal)
+ @maxValue(comptime T: type) (number literal)
This function returns the maximum value of the integer type T.
@memberCount(comptime T: type) -> (number literal)
+ @memberCount(comptime T: type) (number literal)
This function returns the number of members in a struct, enum, or union type.
@@ -4340,7 +5513,7 @@ fn add(a: i32, b: i32) i32 { return a + b; } {#header_close#} {#header_open|@memberName#} -@memberName(comptime T: type, comptime index: usize) -> [N]u8
+ @memberName(comptime T: type, comptime index: usize) [N]u8
Returns the field name of a struct, union, or enum.
The result is a compile time constant. @@ -4350,11 +5523,11 @@ fn add(a: i32, b: i32) i32 { return a + b; }
{#header_close#} {#header_open|@memberType#} -@memberType(comptime T: type, comptime index: usize) -> type
+ @memberType(comptime T: type, comptime index: usize) type
Returns the field type of a struct or union.
{#header_close#} {#header_open|@memcpy#} -@memcpy(noalias dest: &u8, noalias source: &const u8, byte_count: usize)
+ @memcpy(noalias dest: *u8, noalias source: *const u8, byte_count: usize)
This function copies bytes from one region of memory to another. dest and
source are both pointers and must not overlap.
@@ -4372,7 +5545,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
mem.copy(u8, dest[0...byte_count], source[0...byte_count]);
{#header_close#}
{#header_open|@memset#}
-
@memset(dest: &u8, c: u8, byte_count: usize)
+ @memset(dest: *u8, c: u8, byte_count: usize)
This function sets a region of memory to c. dest is a pointer.
for (dest[0...byte_count]) |*b| *b = c;
+ for (dest[0...byte_count]) |*b| b.* = c;
The optimizer is intelligent enough to turn the above snippet into a memset.
@@ -4389,7 +5562,7 @@ mem.copy(u8, dest[0...byte_count], source[0...byte_count]); mem.set(u8, dest, c); {#header_close#} {#header_open|@minValue#} -@minValue(comptime T: type) -> (number literal)
+ @minValue(comptime T: type) (number literal)
This function returns the minimum value of the integer type T.
@@ -4398,7 +5571,7 @@ mem.set(u8, dest, c); {#header_close#} {#header_open|@mod#} -@mod(numerator: T, denominator: T) -> T
+ @mod(numerator: T, denominator: T) T
Modulus division. For unsigned integers this is the same as
numerator % denominator. Caller guarantees denominator > 0.
@@ -4411,24 +5584,65 @@ mem.set(u8, dest, c);
{#see_also|@rem#}
{#header_close#}
{#header_open|@mulWithOverflow#}
-
@mulWithOverflow(comptime T: type, a: T, b: T, result: &T) -> bool
+ @mulWithOverflow(comptime T: type, a: T, b: T, result: *T) bool
- Performs *result = a * b. If overflow or underflow occurs,
+ Performs result.* = a * b. If overflow or underflow occurs,
stores the overflowed bits in result and returns true.
If no overflow or underflow occurs, returns false.
@newStackCall(new_stack: []u8, function: var, args: ...) var
+
+ This calls a function, in the same way that invoking an expression with parentheses does. However,
+ instead of using the same stack as the caller, the function uses the stack provided in the new_stack
+ parameter.
+
@noInlineCall(function: var, args: ...) -> var
+ @noInlineCall(function: var, args: ...) var
This calls a function, in the same way that invoking an expression with parentheses does:
-const assert = @import("std").debug.assert;
+ {#code_begin|test#}
+const assert = @import("std").debug.assert;
+
test "noinline function call" {
assert(@noInlineCall(add, 3, 9) == 12);
}
-fn add(a: i32, b: i32) -> i32 { a + b }
+fn add(a: i32, b: i32) i32 {
+ return a + b;
+}
+ {#code_end#}
Unlike a normal function call, however, @noInlineCall guarantees that the call
will not be inlined. If the call must be inlined, a compile error is emitted.
@@ -4436,13 +5650,13 @@ fn add(a: i32, b: i32) -> i32 { a + b }
{#see_also|@inlineCall#}
{#header_close#}
{#header_open|@offsetOf#}
-
@offsetOf(comptime T: type, comptime field_name: [] const u8) -> (number literal)
+ @offsetOf(comptime T: type, comptime field_name: [] const u8) (number literal)
This function returns the byte offset of a field relative to its containing struct.
{#header_close#} {#header_open|@OpaqueType#} -@OpaqueType() -> type
+ @OpaqueType() type
Creates a new type with an unknown size and alignment.
@@ -4450,12 +5664,12 @@ fn add(a: i32, b: i32) -> i32 { a + b } This is typically used for type safety when interacting with C code that does not expose struct details. Example: - {#code_begin|test_err|expected type '&Derp', found '&Wat'#} + {#code_begin|test_err|expected type '*Derp', found '*Wat'#} const Derp = @OpaqueType(); const Wat = @OpaqueType(); -extern fn bar(d: &Derp) void; -export fn foo(w: &Wat) void { +extern fn bar(d: *Derp) void; +export fn foo(w: *Wat) void { bar(w); } @@ -4465,7 +5679,7 @@ test "call foo" { {#code_end#} {#header_close#} {#header_open|@panic#} -@panic(message: []const u8) -> noreturn
+ @panic(message: []const u8) noreturn
Invokes the panic handler function. By default the panic handler function
calls the public panic function exposed in the root source file, or
@@ -4480,20 +5694,30 @@ test "call foo" {
{#see_also|Root Source File#}
{#header_close#}
+ {#header_open|@popCount#}
+
@popCount(integer: var) var
+ Counts the number of bits set in an integer.
+
+ If integer is known at {#link|comptime#}, the return type is comptime_int.
+ Otherwise, the return type is an unsigned integer with the minimum number
+ of bits that can represent the bit count of the integer type.
+
@ptrCast(comptime DestType: type, value: var) -> DestType
+ @ptrCast(comptime DestType: type, value: var) DestType
Converts a pointer of one type to a pointer of another type.
{#header_close#} {#header_open|@ptrToInt#} -@ptrToInt(value: var) -> usize
+ @ptrToInt(value: var) usize
Converts value to a usize which is the address of the pointer. value can be one of these types:
&T?&T*T?*Tfn()?fn()@rem(numerator: T, denominator: T) -> T
+ @rem(numerator: T, denominator: T) T
Remainder division. For unsigned integers this is the same as
numerator % denominator. Caller guarantees denominator > 0.
@@ -4617,14 +5841,8 @@ pub const FloatMode = enum {
@setGlobalSection(global_variable_name, comptime section_name: []const u8) -> bool
- - Puts the global variable in the specified section. -
- {#header_close#} {#header_open|@shlExact#} -@shlExact(value: T, shift_amt: Log2T) -> T
+ @shlExact(value: T, shift_amt: Log2T) T
Performs the left shift operation (<<). Caller guarantees
that the shift will not shift any 1 bits out.
@@ -4636,9 +5854,9 @@ pub const FloatMode = enum {
{#see_also|@shrExact|@shlWithOverflow#}
{#header_close#}
{#header_open|@shlWithOverflow#}
-
@shlWithOverflow(comptime T: type, a: T, shift_amt: Log2T, result: &T) -> bool
+ @shlWithOverflow(comptime T: type, a: T, shift_amt: Log2T, result: *T) bool
- Performs *result = a << b. If overflow or underflow occurs,
+ Performs result.* = a << b. If overflow or underflow occurs,
stores the overflowed bits in result and returns true.
If no overflow or underflow occurs, returns false.
@shrExact(value: T, shift_amt: Log2T) -> T
+ @shrExact(value: T, shift_amt: Log2T) T
Performs the right shift operation (>>). Caller guarantees
that the shift will not shift any 1 bits out.
@@ -4660,8 +5878,9 @@ pub const FloatMode = enum {
@sizeOf(comptime T: type) -> (number literal)
+ @sizeOf(comptime T: type) comptime_int
This function returns the number of bytes it takes to store T in memory.
@subWithOverflow(comptime T: type, a: T, b: T, result: &T) -> bool
+
+ {#header_open|@sliceToBytes#}
+ @sliceToBytes(value: var) []u8
- Performs *result = a - b. If overflow or underflow occurs,
+ Converts a slice or array to a slice of u8. The resulting slice has the same
+ {#link|pointer|Pointers#} properties as the parameter.
+
@sqrt(comptime T: type, value: T) T
+ + Performs the square root of a floating point number. Uses a dedicated hardware instruction + when available. Currently only supports f32 and f64 at runtime. f128 at runtime is TODO. +
+
+ This is a low-level intrinsic. Most code can use std.math.sqrt instead.
+
@subWithOverflow(comptime T: type, a: T, b: T, result: *T) bool
+
+ Performs result.* = a - b. If overflow or underflow occurs,
stores the overflowed bits in result and returns true.
If no overflow or underflow occurs, returns false.
@tagName(value: var) []const u8
+ + Converts an enum value or union value to a slice of bytes representing the name. +
+ {#header_close#} + {#header_open|@TagType#} +@TagType(T: type) type
+ + For an enum, returns the integer type that is used to store the enumeration value. +
++ For a union, returns the enum type that is used to store the tag value. +
+ {#header_close#} {#header_open|@truncate#} -@truncate(comptime T: type, integer) -> T
+ @truncate(comptime T: type, integer) T
This function truncates bits from an integer type, resulting in a smaller integer type. @@ -4702,7 +5955,7 @@ const b: u8 = @truncate(u8, a); {#header_close#} {#header_open|@typeId#} -
@typeId(comptime T: type) -> @import("builtin").TypeId
+ @typeId(comptime T: type) @import("builtin").TypeId
Returns which kind of type something is. Possible values:
@@ -4717,11 +5970,11 @@ pub const TypeId = enum { Pointer, Array, Struct, - FloatLiteral, - IntLiteral, - UndefinedLiteral, - NullLiteral, - Nullable, + ComptimeFloat, + ComptimeInt, + Undefined, + Null, + Optional, ErrorUnion, Error, Enum, @@ -4732,18 +5985,201 @@ pub const TypeId = enum { BoundFn, ArgTuple, Opaque, +}; + {#code_end#} + {#header_close#} + {#header_open|@typeInfo#} +@typeInfo(comptime T: type) @import("builtin").TypeInfo
+ + Returns information on the type. Returns a value of the following union: +
+ {#code_begin|syntax#} +pub const TypeInfo = union(TypeId) { + Type: void, + Void: void, + Bool: void, + NoReturn: void, + Int: Int, + Float: Float, + Pointer: Pointer, + Array: Array, + Struct: Struct, + ComptimeFloat: void, + ComptimeInt: void, + Undefined: void, + Null: void, + Optional: Optional, + ErrorUnion: ErrorUnion, + ErrorSet: ErrorSet, + Enum: Enum, + Union: Union, + Fn: Fn, + Namespace: void, + Block: void, + BoundFn: Fn, + ArgTuple: void, + Opaque: void, + Promise: Promise, + + + pub const Int = struct { + is_signed: bool, + bits: u8, + }; + + pub const Float = struct { + bits: u8, + }; + + pub const Pointer = struct { + size: Size, + is_const: bool, + is_volatile: bool, + alignment: u32, + child: type, + + pub const Size = enum { + One, + Many, + Slice, + }; + }; + + pub const Array = struct { + len: usize, + child: type, + }; + + pub const ContainerLayout = enum { + Auto, + Extern, + Packed, + }; + + pub const StructField = struct { + name: []const u8, + offset: ?usize, + field_type: type, + }; + + pub const Struct = struct { + layout: ContainerLayout, + fields: []StructField, + defs: []Definition, + }; + + pub const Optional = struct { + child: type, + }; + + pub const ErrorUnion = struct { + error_set: type, + payload: type, + }; + + pub const Error = struct { + name: []const u8, + value: usize, + }; + + pub const ErrorSet = struct { + errors: []Error, + }; + + pub const EnumField = struct { + name: []const u8, + value: usize, + }; + + pub const Enum = struct { + layout: ContainerLayout, + tag_type: type, + fields: []EnumField, + defs: []Definition, + }; + + pub const UnionField = struct { + name: []const u8, + enum_field: ?EnumField, + field_type: type, + }; + + pub const Union = struct { + layout: ContainerLayout, + tag_type: ?type, + fields: []UnionField, + defs: []Definition, + }; + + pub const CallingConvention = enum { + Unspecified, + C, + Cold, + Naked, + Stdcall, + Async, + }; + + pub const FnArg = struct { + is_generic: bool, + is_noalias: bool, + arg_type: ?type, + }; + + pub const Fn = struct { + calling_convention: CallingConvention, + is_generic: bool, + is_var_args: bool, + return_type: ?type, + async_allocator_type: ?type, + args: []FnArg, + }; + + pub const Promise = struct { + child: ?type, + }; + + pub const Definition = struct { + name: []const u8, + is_pub: bool, + data: Data, + + pub const Data = union(enum) { + Type: type, + Var: type, + Fn: FnDef, + + pub const FnDef = struct { + fn_type: type, + inline_type: Inline, + calling_convention: CallingConvention, + is_var_args: bool, + is_extern: bool, + is_export: bool, + lib_name: ?[]const u8, + return_type: type, + arg_names: [][] const u8, + + pub const Inline = enum { + Auto, + Always, + Never, + }; + }; + }; + }; }; {#code_end#} {#header_close#} {#header_open|@typeName#} -@typeName(T: type) -> []u8
+ @typeName(T: type) []u8
This function returns the string representation of a type.
{#header_close#} {#header_open|@typeOf#} -@typeOf(expression) -> type
+ @typeOf(expression) type
This function returns a compile-time constant, which is the type of the expression passed as an argument. The expression is evaluated. @@ -4753,12 +6189,13 @@ pub const TypeId = enum { {#header_close#} {#header_open|Build Mode#}
- Zig has three build modes: + Zig has four build modes:
To add standard build options to a build.zig file:
@@ -4766,7 +6203,7 @@ pub const TypeId = enum {
{#code_begin|syntax#}
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const exe = b.addExecutable("example", "example.zig");
exe.setBuildMode(b.standardReleaseOptions());
b.default_step.dependOn(&exe.step);
@@ -4775,14 +6212,16 @@ pub fn build(b: &Builder) void {
This causes these options to be available:
- -Drelease-safe=(bool) optimizations on and safety on
- -Drelease-fast=(bool) optimizations on and safety off
+ -Drelease-safe=[bool] optimizations on and safety on
+ -Drelease-fast=[bool] optimizations on and safety off
+ -Drelease-small=[bool] size optimizations on and safety off
{#header_open|Debug#}
$ zig build-exe example.zig
$ zig build-exe example.zig --release-small
+
Zig has many instances of undefined behavior. If undefined behavior is
- detected at compile-time, Zig emits an error. Most undefined behavior that
- cannot be detected at compile-time can be detected at runtime. In these cases,
- Zig has safety checks. Safety checks can be disabled on a per-block basis
- with @setRuntimeSafety. The {#link|ReleaseFast#}
+ detected at compile-time, Zig emits a compile error and refuses to continue.
+ Most undefined behavior that cannot be detected at compile-time can be detected
+ at runtime. In these cases, Zig has safety checks. Safety checks can be disabled
+ on a per-block basis with {#link|setRuntimeSafety#}. The {#link|ReleaseFast#}
build mode disables all safety checks in order to facilitate optimizations.
@@ -4830,7 +6280,14 @@ fn assert(ok: bool) void { if (!ok) unreachable; // assertion failure } {#code_end#} -
At runtime crashes with the message reached unreachable code and a stack trace.
At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); + +pub fn main() void { + std.debug.assert(false); +} + {#code_end#} {#header_close#} {#header_open|Index out of Bounds#}At compile-time:
@@ -4840,20 +6297,37 @@ comptime { const garbage = array[5]; } {#code_end#} -At runtime crashes with the message index out of bounds and a stack trace.
At runtime:
+ {#code_begin|exe_err#} +pub fn main() void { + var x = foo("hello"); +} + +fn foo(x: []const u8) u8 { + return x[5]; +} + {#code_end#} {#header_close#} {#header_open|Cast Negative Number to Unsigned Integer#}At compile-time:
{#code_begin|test_err|attempt to cast negative value to unsigned integer#} comptime { const value: i32 = -1; - const unsigned = u32(value); + const unsigned = @intCast(u32, value); +} + {#code_end#} +At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); + +pub fn main() void { + var value: i32 = -1; + var unsigned = @intCast(u32, value); + std.debug.warn("value: {}\n", unsigned); } {#code_end#} -At runtime crashes with the message attempt to cast negative value to unsigned integer and a stack trace.
- If you are trying to obtain the maximum value of an unsigned integer, use @maxValue(T),
- where T is the integer type, such as u32.
+ To obtain the maximum value of an unsigned integer, use {#link|@maxValue#}.
At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); + +pub fn main() void { + var spartan_count: u16 = 300; + const byte = @intCast(u8, spartan_count); + std.debug.warn("value: {}\n", byte); } {#code_end#} -At runtime crashes with the message integer cast truncated bits and a stack trace.
- If you are trying to truncate bits, use @truncate(T, value),
- where T is the integer type, such as u32, and value
- is the value you want to truncate.
+ To truncate bits, use {#link|@truncate#}.
- (negation)* (multiplication)/ (division)@divTrunc (division)@divFloor (division)@divExact (division)Example with addition at compile-time:
{#code_begin|test_err|operation caused overflow#} @@ -4891,7 +6372,16 @@ comptime { byte += 1; } {#code_end#} -At runtime crashes with the message integer overflow and a stack trace.
At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); + +pub fn main() void { + var byte: u8 = 255; + byte += 1; + std.debug.warn("value: {}\n", byte); +} + {#code_end#} {#header_close#} {#header_open|Standard Library Math Functions#}These functions provided by the standard library return possible errors.
@@ -4926,13 +6416,13 @@ pub fn main() !void { occurred, as well as returning the overflowed bits:@addWithOverflow@subWithOverflow@mulWithOverflow@shlWithOverflow
- Example of @addWithOverflow:
+ Example of {#link|@addWithOverflow#}:
At runtime crashes with the message left shift overflowed bits and a stack trace.
At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); + +pub fn main() void { + var x: u8 = 0b01010101; + var y = @shlExact(x, 2); + std.debug.warn("value: {}\n", y); +} + {#code_end#} {#header_close#} {#header_open|Exact Right Shift Overflow#}At compile-time:
@@ -4987,7 +6486,16 @@ comptime { const x = @shrExact(u8(0b10101010), 2); } {#code_end#} -At runtime crashes with the message right shift overflowed bits and a stack trace.
At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); + +pub fn main() void { + var x: u8 = 0b10101010; + var y = @shrExact(x, 2); + std.debug.warn("value: {}\n", y); +} + {#code_end#} {#header_close#} {#header_open|Division by Zero#}At compile-time:
@@ -4998,8 +6506,17 @@ comptime { const c = a / b; } {#code_end#} -At runtime crashes with the message division by zero and a stack trace.
At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); +pub fn main() void { + var a: u32 = 1; + var b: u32 = 0; + var c = a / b; + std.debug.warn("value: {}\n", c); +} + {#code_end#} {#header_close#} {#header_open|Remainder Division by Zero#}At compile-time:
@@ -5010,38 +6527,91 @@ comptime { const c = a % b; } {#code_end#} -At runtime crashes with the message remainder division by zero and a stack trace.
At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); +pub fn main() void { + var a: u32 = 10; + var b: u32 = 0; + var c = a % b; + std.debug.warn("value: {}\n", c); +} + {#code_end#} {#header_close#} {#header_open|Exact Division Remainder#} -TODO
+At compile-time:
+ {#code_begin|test_err|exact division had a remainder#} +comptime { + const a: u32 = 10; + const b: u32 = 3; + const c = @divExact(a, b); +} + {#code_end#} +At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); + +pub fn main() void { + var a: u32 = 10; + var b: u32 = 3; + var c = @divExact(a, b); + std.debug.warn("value: {}\n", c); +} + {#code_end#} {#header_close#} {#header_open|Slice Widen Remainder#} -TODO
+At compile-time:
+ {#code_begin|test_err|unable to convert#} +comptime { + var bytes = [5]u8{ 1, 2, 3, 4, 5 }; + var slice = @bytesToSlice(u32, bytes); +} + {#code_end#} +At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); + +pub fn main() void { + var bytes = [5]u8{ 1, 2, 3, 4, 5 }; + var slice = @bytesToSlice(u32, bytes[0..]); + std.debug.warn("value: {}\n", slice[0]); +} + {#code_end#} {#header_close#} {#header_open|Attempt to Unwrap Null#}At compile-time:
{#code_begin|test_err|unable to unwrap null#} comptime { - const nullable_number: ?i32 = null; - const number = ??nullable_number; + const optional_number: ?i32 = null; + const number = optional_number.?; +} + {#code_end#} +At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); + +pub fn main() void { + var optional_number: ?i32 = null; + var number = optional_number.?; + std.debug.warn("value: {}\n", number); } {#code_end#} -At runtime crashes with the message attempt to unwrap null and a stack trace.
One way to avoid this crash is to test for null instead of assuming non-null, with
the if expression:
At compile-time:
@@ -5054,7 +6624,19 @@ fn getNumberOrFail() !i32 { return error.UnableToReturnNumber; } {#code_end#} -At runtime crashes with the message attempt to unwrap error: ErrorCode and a stack trace.
At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); + +pub fn main() void { + const number = getNumberOrFail() catch unreachable; + std.debug.warn("value: {}\n", number); +} + +fn getNumberOrFail() !i32 { + return error.UnableToReturnNumber; +} + {#code_end#}One way to avoid this crash is to test for an error instead of assuming a successful result, with
the if expression:
At compile-time:
{#code_begin|test_err|integer value 11 represents no error#} comptime { const err = error.AnError; - const number = u32(err) + 10; - const invalid_err = error(number); + const number = @errorToInt(err) + 10; + const invalid_err = @intToError(number); +} + {#code_end#} +At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); + +pub fn main() void { + var err = error.AnError; + var number = @errorToInt(err) + 500; + var invalid_err = @intToError(number); + std.debug.warn("value: {}\n", number); } {#code_end#} -At runtime crashes with the message invalid error code and a stack trace.
TODO
+At compile-time:
+ {#code_begin|test_err|has no tag matching integer value 3#} +const Foo = enum { + A, + B, + C, +}; +comptime { + const a: u2 = 3; + const b = @intToEnum(Foo, a); +} + {#code_end#} +At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); +const Foo = enum { + A, + B, + C, +}; + +pub fn main() void { + var a: u2 = 3; + var b = @intToEnum(Foo, a); + std.debug.warn("value: {}\n", @tagName(b)); +} + {#code_end#} {#header_close#} - {#header_open|Incorrect Pointer Alignment#} -TODO
+ {#header_open|Invalid Error Set Cast#} +At compile-time:
+ {#code_begin|test_err|error.B not a member of error set 'Set2'#} +const Set1 = error{ + A, + B, +}; +const Set2 = error{ + A, + C, +}; +comptime { + _ = @errSetCast(Set2, Set1.B); +} + {#code_end#} +At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); + +const Set1 = error{ + A, + B, +}; +const Set2 = error{ + A, + C, +}; +pub fn main() void { + foo(Set1.B); +} +fn foo(set1: Set1) void { + const x = @errSetCast(Set2, set1); + std.debug.warn("value: {}\n", x); +} + {#code_end#} + {#header_close#} + + {#header_open|Incorrect Pointer Alignment#} +At compile-time:
+ {#code_begin|test_err|pointer address 0x1 is not aligned to 4 bytes#} +comptime { + const ptr = @intToPtr(*i32, 0x1); + const aligned = @alignCast(4, ptr); +} + {#code_end#} +At runtime:
+ {#code_begin|exe_err#} +pub fn main() !void { + var array align(4) = []u32{ 0x11111111, 0x11111111 }; + const bytes = @sliceToBytes(array[0..]); + if (foo(bytes) != 0x11111111) return error.Wrong; +} +fn foo(bytes: []u8) u32 { + const slice4 = bytes[1..5]; + const int_slice = @bytesToSlice(u32, @alignCast(4, slice4)); + return int_slice[0]; +} + {#code_end#} {#header_close#} {#header_open|Wrong Union Field Access#} -TODO
+At compile-time:
+ {#code_begin|test_err|accessing union field 'float' while field 'int' is set#} +comptime { + var f = Foo{ .int = 42 }; + f.float = 12.34; +} +const Foo = union { + float: f32, + int: u32, +}; + {#code_end#} +At runtime:
+ {#code_begin|exe_err#} +const std = @import("std"); + +const Foo = union { + float: f32, + int: u32, +}; + +pub fn main() void { + var f = Foo{ .int = 42 }; + bar(&f); +} + +fn bar(f: *Foo) void { + f.float = 12.34; + std.debug.warn("value: {}\n", f.float); +} + {#code_end#} +
+ This safety is not available for extern or packed unions.
+
+ To change the active field of a union, assign the entire union, like this: +
+ {#code_begin|exe#} +const std = @import("std"); + +const Foo = union { + float: f32, + int: u32, +}; + +pub fn main() void { + var f = Foo{ .int = 42 }; + bar(&f); +} + +fn bar(f: *Foo) void { + f.* = Foo{ .float = 12.34 }; + std.debug.warn("value: {}\n", f.float); +} + {#code_end#} ++ To change the active field of a union when a meaningful value for the field is not known, + use {#link|undefined#}, like this: +
+ {#code_begin|exe#} +const std = @import("std"); + +const Foo = union { + float: f32, + int: u32, +}; + +pub fn main() void { + var f = Foo{ .int = 42 }; + f = Foo{ .float = undefined }; + bar(&f); + std.debug.warn("value: {}\n", f.float); +} + +fn bar(f: *Foo) void { + f.float = 12.34; +} + {#code_end#} {#header_close#} + + {#header_open|Out of Bounds Float To Integer Cast#} +TODO
+ {#header_close#} + {#header_close#} {#header_open|Memory#}TODO: explain no default allocator in zig
@@ -5122,218 +6878,7 @@ const separator = if (builtin.os == builtin.Os.windows) '\\' else '/';
Example of what is imported with @import("builtin"):
terminal
$ zig build
$ ./test
all your base are belong to us
@@ -5633,10 +7180,16 @@ Environments:
opencl
The Zig Standard Library (@import("std")) has architecture, environment, and operating sytsem
- abstractions, and thus takes additional work to support more platforms. It currently supports
- Linux x86_64. Not all standard library code requires operating system abstractions, however,
+ abstractions, and thus takes additional work to support more platforms.
+ Not all standard library code requires operating system abstractions, however,
so things such as generic data structures work an all above platforms.
The current list of targets supported by the Zig Standard Library is:
+@@ -5750,7 +7303,7 @@ fn readU32Be() u32 {}
The codepoint U+000a (LF) (which is encoded as the single-byte value 0x0a) is the line terminator character. This character always terminates a line of zig source code (except possbly the last line of the file).
-For some discussion on the rationale behind these design decisions, see issue #663
+For some discussion on the rationale behind these design decisions, see issue #663
{#header_close#} {#header_open|Grammar#}Root = many(TopLevelItem) EOF
@@ -5805,9 +7358,9 @@ AsmInputItem = "[" Symbol "]" String "(" Expression ")"
AsmClobbers= ":" list(String, ",")
-UnwrapExpression = BoolOrExpression (UnwrapNullable | UnwrapError) | BoolOrExpression
+UnwrapExpression = BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression
-UnwrapNullable = "??" Expression
+UnwrapOptional = "orelse" Expression
UnwrapError = "catch" option("|" Symbol "|") Expression
@@ -5845,7 +7398,7 @@ Defer(body) = ("defer" | "deferror") body
IfExpression(body) = "if" "(" Expression ")" body option("else" BlockExpression(body))
-SuspendExpression(body) = "suspend" option(("|" Symbol "|" body))
+SuspendExpression(body) = "suspend" option( body )
IfErrorExpression(body) = "if" "(" Expression ")" option("|" option("*") Symbol "|") body "else" "|" Symbol "|" BlockExpression(body)
@@ -5881,7 +7434,7 @@ MultiplyOperator = "||" | "*" | "/" | "%" | "**" | "*%"
PrefixOpExpression = PrefixOp TypeExpr | SuffixOpExpression
-SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression)
+SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression | ".*" | ".?")
FieldAccessExpression = "." Symbol
@@ -5897,7 +7450,7 @@ ContainerInitBody = list(StructLiteralField, ",") | list(Expression, ",")
StructLiteralField = "." Symbol "=" Expression
-PrefixOp = "!" | "-" | "~" | "*" | ("&" option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" | "await"
+PrefixOp = "!" | "-" | "~" | (("*" | "[*]") option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "-%" | "try" | "await"
PrimaryExpression = Integer | Float | String | CharLiteral | KeywordLiteral | GroupedExpression | BlockExpression(BlockOrExpression) | Symbol | ("@" Symbol FnCallExpression) | ArrayType | FnProto | AsmExpression | ContainerDecl | ("continue" option(":" Symbol)) | ErrorSetDecl | PromiseType
@@ -5990,8 +7543,8 @@ hljs.registerLanguage("zig", function(t) {
},
a = t.IR + "\\s*\\(",
c = {
- keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong",
- built_in: "breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic canImplicitCast ptrCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchg fence divExact truncate atomicRmw",
+ keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong resume cancel await async orelse",
+ built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast intCast floatCast intToFloat floatToInt boolToInt bytesToSlice sliceToBytes errSetCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz popCount import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall errorToInt intToError enumToInt intToEnum",
literal: "true false null undefined"
},
n = [e, t.CLCM, t.CBCM, s, r];
diff --git a/doc/semantic_analysis.md b/doc/semantic_analysis.md
deleted file mode 100644
index 6e860aac42..0000000000
--- a/doc/semantic_analysis.md
+++ /dev/null
@@ -1,74 +0,0 @@
-# How Semantic Analysis Works
-
-We start with a set of files. Typically the user only has one entry point file,
-which imports the other files they want to use. However, the compiler may
-choose to add more files to the compilation, for example bootstrap.zig which
-contains the code that calls main.
-
-Our goal now is to treat everything that is marked with the `export` keyword
-as a root node, and then parse and semantically analyze as little as possible
-in order to fulfill these exports.
-
-So, some parts of the code very well may have uncaught semantic errors, but as
-long as the code is not referenced in any way, the compiler will not complain
-because the code may as well not exist. This is similar to the fact that code
-excluded from compilation with an `#ifdef` in C is not analyzed. Avoiding
-analyzing unused code will save compilation time - one of Zig's goals.
-
-So, for each file, we iterate over the top level declarations. The set of top
-level declarations are:
-
- * Function Definition
- * Global Variable Declaration
- * Container Declaration (struct or enum)
- * Error Value Declaration
- * Use Declaration
-
-Each of these can have `export` attached to them except for error value
-declarations and use declarations.
-
-When we see a top level declaration during this iteration, we determine its
-unique name identifier within the file. For example, for a function definition,
-the unique name identifier is simply its name. Using this name we add the top
-level declaration to a map.
-
-If the top level declaration is exported, we add it to a set of exported top
-level identifiers.
-
-If the top level declaration is a use declaration, we add it to a set of use
-declarations.
-
-If the top level declaration is an error value declaration, we assign it a value
-and increment the count of error values.
-
-After this preliminary iteration over the top level declarations, we iterate
-over the use declarations and resolve them. To resolve a use declaration, we
-analyze the associated expression, verify that its type is the namespace type,
-and then add all the items from the namespace into the top level declaration
-map for the current file.
-
-To analyze an expression, we recurse the abstract syntax tree of the
-expression. Whenever we must look up a symbol, if the symbol exists already,
-we can use it. Otherwise, we look it up in the top level declaration map.
-If it exists, we can use it. Otherwise, we interrupt resolving this use
-declaration to resolve the next one. If a dependency loop is detected, emit
-an error. If all use declarations are resolved yet the symbol we need still
-does not exist, emit an error.
-
-To analyze an `@import` expression, find the referenced file, parse it, and
-add it to the set of files to perform semantic analysis on.
-
-Proceed through the rest of the use declarations the same way.
-
-If we make it through the use declarations without an error, then we have a
-complete map of all globals that exist in the current file.
-
-Next we iterate over the set of exported top level declarations.
-
-If it's a function definition, add it to the set of exported function
-definitions and resolve the function prototype only. Otherwise, resolve the
-top level declaration completely. This may involve recursively resolving other
-top level declarations that expressions depend on.
-
-Finally, iterate over the set of exported function definitions and analyze the
-bodies.
diff --git a/example/cat/main.zig b/example/cat/main.zig
index de0d323bed..27690d2695 100644
--- a/example/cat/main.zig
+++ b/example/cat/main.zig
@@ -7,7 +7,7 @@ const allocator = std.debug.global_allocator;
pub fn main() !void {
var args_it = os.args();
- const exe = try unwrapArg(??args_it.next(allocator));
+ const exe = try unwrapArg(args_it.next(allocator).?);
var catted_anything = false;
var stdout_file = try io.getStdOut();
@@ -41,7 +41,7 @@ fn usage(exe: []const u8) !void {
return error.Invalid;
}
-fn cat_file(stdout: &os.File, file: &os.File) !void {
+fn cat_file(stdout: *os.File, file: *os.File) !void {
var buf: [1024 * 4]u8 = undefined;
while (true) {
diff --git a/example/guess_number/main.zig b/example/guess_number/main.zig
index 7178c5274a..bed132b25c 100644
--- a/example/guess_number/main.zig
+++ b/example/guess_number/main.zig
@@ -23,7 +23,7 @@ pub fn main() !void {
while (true) {
try stdout.print("\nGuess a number between 1 and 100: ");
- var line_buf : [20]u8 = undefined;
+ var line_buf: [20]u8 = undefined;
const line_len = io.readLine(line_buf[0..]) catch |err| switch (err) {
error.InputTooLong => {
diff --git a/example/hello_world/hello_libc.zig b/example/hello_world/hello_libc.zig
index 60123c6fd8..60a1f76871 100644
--- a/example/hello_world/hello_libc.zig
+++ b/example/hello_world/hello_libc.zig
@@ -1,5 +1,5 @@
const c = @cImport({
- // See https://github.com/zig-lang/zig/issues/515
+ // See https://github.com/ziglang/zig/issues/515
@cDefine("_NO_CRT_STDIO_INLINE", "1");
@cInclude("stdio.h");
@cInclude("string.h");
@@ -7,9 +7,8 @@ const c = @cImport({
const msg = c"Hello, world!\n";
-export fn main(argc: c_int, argv: &&u8) c_int {
- if (c.printf(msg) != c_int(c.strlen(msg)))
- return -1;
+export fn main(argc: c_int, argv: **u8) c_int {
+ if (c.printf(msg) != @intCast(c_int, c.strlen(msg))) return -1;
return 0;
}
diff --git a/example/mix_o_files/base64.zig b/example/mix_o_files/base64.zig
index e682a97055..7ded9824a0 100644
--- a/example/mix_o_files/base64.zig
+++ b/example/mix_o_files/base64.zig
@@ -1,6 +1,6 @@
const base64 = @import("std").base64;
-export fn decode_base_64(dest_ptr: &u8, dest_len: usize, source_ptr: &const u8, source_len: usize) usize {
+export fn decode_base_64(dest_ptr: [*]u8, dest_len: usize, source_ptr: [*]const u8, source_len: usize) usize {
const src = source_ptr[0..source_len];
const dest = dest_ptr[0..dest_len];
const base64_decoder = base64.standard_decoder_unsafe;
diff --git a/example/mix_o_files/build.zig b/example/mix_o_files/build.zig
index 4380486867..a4e7fbbf8f 100644
--- a/example/mix_o_files/build.zig
+++ b/example/mix_o_files/build.zig
@@ -1,12 +1,10 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const obj = b.addObject("base64", "base64.zig");
const exe = b.addCExecutable("test");
- exe.addCompileFlags([][]const u8 {
- "-std=c99",
- });
+ exe.addCompileFlags([][]const u8{"-std=c99"});
exe.addSourceFile("test.c");
exe.addObject(obj);
diff --git a/example/shared_library/build.zig b/example/shared_library/build.zig
index 2b5a178b35..05648cf9eb 100644
--- a/example/shared_library/build.zig
+++ b/example/shared_library/build.zig
@@ -1,12 +1,10 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const lib = b.addSharedLibrary("mathtest", "mathtest.zig", b.version(1, 0, 0));
const exe = b.addCExecutable("test");
- exe.addCompileFlags([][]const u8 {
- "-std=c99",
- });
+ exe.addCompileFlags([][]const u8{"-std=c99"});
exe.addSourceFile("test.c");
exe.linkLibrary(lib);
diff --git a/src-self-hosted/arg.zig b/src-self-hosted/arg.zig
index 707f208287..2ab44e5fdf 100644
--- a/src-self-hosted/arg.zig
+++ b/src-self-hosted/arg.zig
@@ -30,24 +30,22 @@ fn argInAllowedSet(maybe_set: ?[]const []const u8, arg: []const u8) bool {
}
// Modifies the current argument index during iteration
-fn readFlagArguments(allocator: &Allocator, args: []const []const u8, required: usize,
- allowed_set: ?[]const []const u8, index: &usize) !FlagArg {
-
+fn readFlagArguments(allocator: *Allocator, args: []const []const u8, required: usize, allowed_set: ?[]const []const u8, index: *usize) !FlagArg {
switch (required) {
- 0 => return FlagArg { .None = undefined }, // TODO: Required to force non-tag but value?
+ 0 => return FlagArg{ .None = undefined }, // TODO: Required to force non-tag but value?
1 => {
- if (*index + 1 >= args.len) {
+ if (index.* + 1 >= args.len) {
return error.MissingFlagArguments;
}
- *index += 1;
- const arg = args[*index];
+ index.* += 1;
+ const arg = args[index.*];
if (!argInAllowedSet(allowed_set, arg)) {
return error.ArgumentNotInAllowedSet;
}
- return FlagArg { .Single = arg };
+ return FlagArg{ .Single = arg };
},
else => |needed| {
var extra = ArrayList([]const u8).init(allocator);
@@ -55,12 +53,12 @@ fn readFlagArguments(allocator: &Allocator, args: []const []const u8, required:
var j: usize = 0;
while (j < needed) : (j += 1) {
- if (*index + 1 >= args.len) {
+ if (index.* + 1 >= args.len) {
return error.MissingFlagArguments;
}
- *index += 1;
- const arg = args[*index];
+ index.* += 1;
+ const arg = args[index.*];
if (!argInAllowedSet(allowed_set, arg)) {
return error.ArgumentNotInAllowedSet;
@@ -69,7 +67,7 @@ fn readFlagArguments(allocator: &Allocator, args: []const []const u8, required:
try extra.append(arg);
}
- return FlagArg { .Many = extra };
+ return FlagArg{ .Many = extra };
},
}
}
@@ -81,8 +79,8 @@ pub const Args = struct {
flags: HashMapFlags,
positionals: ArrayList([]const u8),
- pub fn parse(allocator: &Allocator, comptime spec: []const Flag, args: []const []const u8) !Args {
- var parsed = Args {
+ pub fn parse(allocator: *Allocator, comptime spec: []const Flag, args: []const []const u8) !Args {
+ var parsed = Args{
.flags = HashMapFlags.init(allocator),
.positionals = ArrayList([]const u8).init(allocator),
};
@@ -101,7 +99,7 @@ pub const Args = struct {
error.ArgumentNotInAllowedSet => {
std.debug.warn("argument '{}' is invalid for flag '{}'\n", args[i], arg);
std.debug.warn("allowed options are ");
- for (??flag.allowed_set) |possible| {
+ for (flag.allowed_set.?) |possible| {
std.debug.warn("'{}' ", possible);
}
std.debug.warn("\n");
@@ -116,11 +114,7 @@ pub const Args = struct {
};
if (flag.mergable) {
- var prev =
- if (parsed.flags.get(flag_name_trimmed)) |entry|
- entry.value.Many
- else
- ArrayList([]const u8).init(allocator);
+ var prev = if (parsed.flags.get(flag_name_trimmed)) |entry| entry.value.Many else ArrayList([]const u8).init(allocator);
// MergeN creation disallows 0 length flag entry (doesn't make sense)
switch (flag_args) {
@@ -129,7 +123,7 @@ pub const Args = struct {
FlagArg.Many => |inner| try prev.appendSlice(inner.toSliceConst()),
}
- _ = try parsed.flags.put(flag_name_trimmed, FlagArg { .Many = prev });
+ _ = try parsed.flags.put(flag_name_trimmed, FlagArg{ .Many = prev });
} else {
_ = try parsed.flags.put(flag_name_trimmed, flag_args);
}
@@ -149,21 +143,23 @@ pub const Args = struct {
return parsed;
}
- pub fn deinit(self: &Args) void {
+ pub fn deinit(self: *Args) void {
self.flags.deinit();
self.positionals.deinit();
}
// e.g. --help
- pub fn present(self: &Args, name: []const u8) bool {
+ pub fn present(self: *Args, name: []const u8) bool {
return self.flags.contains(name);
}
// e.g. --name value
- pub fn single(self: &Args, name: []const u8) ?[]const u8 {
+ pub fn single(self: *Args, name: []const u8) ?[]const u8 {
if (self.flags.get(name)) |entry| {
switch (entry.value) {
- FlagArg.Single => |inner| { return inner; },
+ FlagArg.Single => |inner| {
+ return inner;
+ },
else => @panic("attempted to retrieve flag with wrong type"),
}
} else {
@@ -172,14 +168,16 @@ pub const Args = struct {
}
// e.g. --names value1 value2 value3
- pub fn many(self: &Args, name: []const u8) ?[]const []const u8 {
+ pub fn many(self: *Args, name: []const u8) []const []const u8 {
if (self.flags.get(name)) |entry| {
switch (entry.value) {
- FlagArg.Many => |inner| { return inner.toSliceConst(); },
+ FlagArg.Many => |inner| {
+ return inner.toSliceConst();
+ },
else => @panic("attempted to retrieve flag with wrong type"),
}
} else {
- return null;
+ return []const []const u8{};
}
}
};
@@ -207,7 +205,7 @@ pub const Flag = struct {
}
pub fn ArgN(comptime name: []const u8, comptime n: usize) Flag {
- return Flag {
+ return Flag{
.name = name,
.required = n,
.mergable = false,
@@ -220,7 +218,7 @@ pub const Flag = struct {
@compileError("n must be greater than 0");
}
- return Flag {
+ return Flag{
.name = name,
.required = n,
.mergable = true,
@@ -229,7 +227,7 @@ pub const Flag = struct {
}
pub fn Option(comptime name: []const u8, comptime set: []const []const u8) Flag {
- return Flag {
+ return Flag{
.name = name,
.required = 1,
.mergable = false,
@@ -239,26 +237,36 @@ pub const Flag = struct {
};
test "parse arguments" {
- const spec1 = comptime []const Flag {
+ const spec1 = comptime []const Flag{
Flag.Bool("--help"),
Flag.Bool("--init"),
Flag.Arg1("--build-file"),
- Flag.Option("--color", []const []const u8 { "on", "off", "auto" }),
+ Flag.Option("--color", []const []const u8{
+ "on",
+ "off",
+ "auto",
+ }),
Flag.ArgN("--pkg-begin", 2),
Flag.ArgMergeN("--object", 1),
Flag.ArgN("--library", 1),
};
- const cliargs = []const []const u8 {
+ const cliargs = []const []const u8{
"build",
"--help",
"pos1",
- "--build-file", "build.zig",
- "--object", "obj1",
- "--object", "obj2",
- "--library", "lib1",
- "--library", "lib2",
- "--color", "on",
+ "--build-file",
+ "build.zig",
+ "--object",
+ "obj1",
+ "--object",
+ "obj2",
+ "--library",
+ "lib1",
+ "--library",
+ "lib2",
+ "--color",
+ "on",
"pos2",
};
@@ -268,14 +276,14 @@ test "parse arguments" {
debug.assert(!args.present("help2"));
debug.assert(!args.present("init"));
- debug.assert(mem.eql(u8, ??args.single("build-file"), "build.zig"));
- debug.assert(mem.eql(u8, ??args.single("color"), "on"));
+ debug.assert(mem.eql(u8, args.single("build-file").?, "build.zig"));
+ debug.assert(mem.eql(u8, args.single("color").?, "on"));
- const objects = ??args.many("object");
+ const objects = args.many("object").?;
debug.assert(mem.eql(u8, objects[0], "obj1"));
debug.assert(mem.eql(u8, objects[1], "obj2"));
- debug.assert(mem.eql(u8, ??args.single("library"), "lib2"));
+ debug.assert(mem.eql(u8, args.single("library").?, "lib2"));
const pos = args.positionals.toSliceConst();
debug.assert(mem.eql(u8, pos[0], "build"));
diff --git a/src-self-hosted/c.zig b/src-self-hosted/c.zig
index 08060fbe3a..778d851240 100644
--- a/src-self-hosted/c.zig
+++ b/src-self-hosted/c.zig
@@ -1,5 +1,8 @@
pub use @cImport({
+ @cDefine("__STDC_CONSTANT_MACROS", "");
+ @cDefine("__STDC_LIMIT_MACROS", "");
@cInclude("inttypes.h");
@cInclude("config.h");
@cInclude("zig_llvm.h");
+ @cInclude("windows_sdk.h");
});
diff --git a/src-self-hosted/c_int.zig b/src-self-hosted/c_int.zig
new file mode 100644
index 0000000000..10ce54da05
--- /dev/null
+++ b/src-self-hosted/c_int.zig
@@ -0,0 +1,68 @@
+pub const CInt = struct {
+ id: Id,
+ zig_name: []const u8,
+ c_name: []const u8,
+ is_signed: bool,
+
+ pub const Id = enum {
+ Short,
+ UShort,
+ Int,
+ UInt,
+ Long,
+ ULong,
+ LongLong,
+ ULongLong,
+ };
+
+ pub const list = []CInt{
+ CInt{
+ .id = Id.Short,
+ .zig_name = "c_short",
+ .c_name = "short",
+ .is_signed = true,
+ },
+ CInt{
+ .id = Id.UShort,
+ .zig_name = "c_ushort",
+ .c_name = "unsigned short",
+ .is_signed = false,
+ },
+ CInt{
+ .id = Id.Int,
+ .zig_name = "c_int",
+ .c_name = "int",
+ .is_signed = true,
+ },
+ CInt{
+ .id = Id.UInt,
+ .zig_name = "c_uint",
+ .c_name = "unsigned int",
+ .is_signed = false,
+ },
+ CInt{
+ .id = Id.Long,
+ .zig_name = "c_long",
+ .c_name = "long",
+ .is_signed = true,
+ },
+ CInt{
+ .id = Id.ULong,
+ .zig_name = "c_ulong",
+ .c_name = "unsigned long",
+ .is_signed = false,
+ },
+ CInt{
+ .id = Id.LongLong,
+ .zig_name = "c_longlong",
+ .c_name = "long long",
+ .is_signed = true,
+ },
+ CInt{
+ .id = Id.ULongLong,
+ .zig_name = "c_ulonglong",
+ .c_name = "unsigned long long",
+ .is_signed = false,
+ },
+ };
+};
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
new file mode 100644
index 0000000000..5ca01ca7e7
--- /dev/null
+++ b/src-self-hosted/codegen.zig
@@ -0,0 +1,450 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const Compilation = @import("compilation.zig").Compilation;
+const llvm = @import("llvm.zig");
+const c = @import("c.zig");
+const ir = @import("ir.zig");
+const Value = @import("value.zig").Value;
+const Type = @import("type.zig").Type;
+const Scope = @import("scope.zig").Scope;
+const event = std.event;
+const assert = std.debug.assert;
+const DW = std.dwarf;
+
+pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code) !void {
+ fn_val.base.ref();
+ defer fn_val.base.deref(comp);
+ defer code.destroy(comp.gpa());
+
+ var output_path = try await (async comp.createRandomOutputPath(comp.target.objFileExt()) catch unreachable);
+ errdefer output_path.deinit();
+
+ const llvm_handle = try comp.event_loop_local.getAnyLlvmContext();
+ defer llvm_handle.release(comp.event_loop_local);
+
+ const context = llvm_handle.node.data;
+
+ const module = llvm.ModuleCreateWithNameInContext(comp.name.ptr(), context) orelse return error.OutOfMemory;
+ defer llvm.DisposeModule(module);
+
+ llvm.SetTarget(module, comp.llvm_triple.ptr());
+ llvm.SetDataLayout(module, comp.target_layout_str);
+
+ if (comp.target.getObjectFormat() == builtin.ObjectFormat.coff) {
+ llvm.AddModuleCodeViewFlag(module);
+ } else {
+ llvm.AddModuleDebugInfoFlag(module);
+ }
+
+ const builder = llvm.CreateBuilderInContext(context) orelse return error.OutOfMemory;
+ defer llvm.DisposeBuilder(builder);
+
+ const dibuilder = llvm.CreateDIBuilder(module, true) orelse return error.OutOfMemory;
+ defer llvm.DisposeDIBuilder(dibuilder);
+
+ // Don't use ZIG_VERSION_STRING here. LLVM misparses it when it includes
+ // the git revision.
+ const producer = try std.Buffer.allocPrint(
+ &code.arena.allocator,
+ "zig {}.{}.{}",
+ u32(c.ZIG_VERSION_MAJOR),
+ u32(c.ZIG_VERSION_MINOR),
+ u32(c.ZIG_VERSION_PATCH),
+ );
+ const flags = c"";
+ const runtime_version = 0;
+ const compile_unit_file = llvm.CreateFile(
+ dibuilder,
+ comp.name.ptr(),
+ comp.root_package.root_src_dir.ptr(),
+ ) orelse return error.OutOfMemory;
+ const is_optimized = comp.build_mode != builtin.Mode.Debug;
+ const compile_unit = llvm.CreateCompileUnit(
+ dibuilder,
+ DW.LANG_C99,
+ compile_unit_file,
+ producer.ptr(),
+ is_optimized,
+ flags,
+ runtime_version,
+ c"",
+ 0,
+ !comp.strip,
+ ) orelse return error.OutOfMemory;
+
+ var ofile = ObjectFile{
+ .comp = comp,
+ .module = module,
+ .builder = builder,
+ .dibuilder = dibuilder,
+ .context = context,
+ .lock = event.Lock.init(comp.loop),
+ .arena = &code.arena.allocator,
+ };
+
+ try renderToLlvmModule(&ofile, fn_val, code);
+
+ // TODO module level assembly
+ //if (buf_len(&g->global_asm) != 0) {
+ // LLVMSetModuleInlineAsm(g->module, buf_ptr(&g->global_asm));
+ //}
+
+ llvm.DIBuilderFinalize(dibuilder);
+
+ if (comp.verbose_llvm_ir) {
+ std.debug.warn("raw module:\n");
+ llvm.DumpModule(ofile.module);
+ }
+
+ // verify the llvm module when safety is on
+ if (std.debug.runtime_safety) {
+ var error_ptr: ?[*]u8 = null;
+ _ = llvm.VerifyModule(ofile.module, llvm.AbortProcessAction, &error_ptr);
+ }
+
+ assert(comp.emit_file_type == Compilation.Emit.Binary); // TODO support other types
+
+ const is_small = comp.build_mode == builtin.Mode.ReleaseSmall;
+ const is_debug = comp.build_mode == builtin.Mode.Debug;
+
+ var err_msg: [*]u8 = undefined;
+ // TODO integrate this with evented I/O
+ if (llvm.TargetMachineEmitToFile(
+ comp.target_machine,
+ module,
+ output_path.ptr(),
+ llvm.EmitBinary,
+ &err_msg,
+ is_debug,
+ is_small,
+ )) {
+ if (std.debug.runtime_safety) {
+ std.debug.panic("unable to write object file {}: {s}\n", output_path.toSliceConst(), err_msg);
+ }
+ return error.WritingObjectFileFailed;
+ }
+ //validate_inline_fns(g); TODO
+ fn_val.containing_object = output_path;
+ if (comp.verbose_llvm_ir) {
+ std.debug.warn("optimized module:\n");
+ llvm.DumpModule(ofile.module);
+ }
+ if (comp.verbose_link) {
+ std.debug.warn("created {}\n", output_path.toSliceConst());
+ }
+}
+
+pub const ObjectFile = struct {
+ comp: *Compilation,
+ module: llvm.ModuleRef,
+ builder: llvm.BuilderRef,
+ dibuilder: *llvm.DIBuilder,
+ context: llvm.ContextRef,
+ lock: event.Lock,
+ arena: *std.mem.Allocator,
+
+ fn gpa(self: *ObjectFile) *std.mem.Allocator {
+ return self.comp.gpa();
+ }
+};
+
+pub fn renderToLlvmModule(ofile: *ObjectFile, fn_val: *Value.Fn, code: *ir.Code) !void {
+ // TODO audit more of codegen.cpp:fn_llvm_value and port more logic
+ const llvm_fn_type = try fn_val.base.typ.getLlvmType(ofile.arena, ofile.context);
+ const llvm_fn = llvm.AddFunction(
+ ofile.module,
+ fn_val.symbol_name.ptr(),
+ llvm_fn_type,
+ ) orelse return error.OutOfMemory;
+
+ const want_fn_safety = fn_val.block_scope.?.safety.get(ofile.comp);
+ if (want_fn_safety and ofile.comp.haveLibC()) {
+ try addLLVMFnAttr(ofile, llvm_fn, "sspstrong");
+ try addLLVMFnAttrStr(ofile, llvm_fn, "stack-protector-buffer-size", "4");
+ }
+
+ // TODO
+ //if (fn_val.align_stack) |align_stack| {
+ // try addLLVMFnAttrInt(ofile, llvm_fn, "alignstack", align_stack);
+ //}
+
+ const fn_type = fn_val.base.typ.cast(Type.Fn).?;
+ const fn_type_normal = &fn_type.key.data.Normal;
+
+ try addLLVMFnAttr(ofile, llvm_fn, "nounwind");
+ //add_uwtable_attr(g, fn_table_entry->llvm_value);
+ try addLLVMFnAttr(ofile, llvm_fn, "nobuiltin");
+
+ //if (g->build_mode == BuildModeDebug && fn_table_entry->fn_inline != FnInlineAlways) {
+ // ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim", "true");
+ // ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim-non-leaf", nullptr);
+ //}
+
+ //if (fn_table_entry->section_name) {
+ // LLVMSetSection(fn_table_entry->llvm_value, buf_ptr(fn_table_entry->section_name));
+ //}
+ //if (fn_table_entry->align_bytes > 0) {
+ // LLVMSetAlignment(fn_table_entry->llvm_value, (unsigned)fn_table_entry->align_bytes);
+ //} else {
+ // // We'd like to set the best alignment for the function here, but on Darwin LLVM gives
+ // // "Cannot getTypeInfo() on a type that is unsized!" assertion failure when calling
+ // // any of the functions for getting alignment. Not specifying the alignment should
+ // // use the ABI alignment, which is fine.
+ //}
+
+ //if (!type_has_bits(return_type)) {
+ // // nothing to do
+ //} else if (type_is_codegen_pointer(return_type)) {
+ // addLLVMAttr(fn_table_entry->llvm_value, 0, "nonnull");
+ //} else if (handle_is_ptr(return_type) &&
+ // calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc))
+ //{
+ // addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret");
+ // addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull");
+ //}
+
+ // TODO set parameter attributes
+
+ // TODO
+ //uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry);
+ //if (err_ret_trace_arg_index != UINT32_MAX) {
+ // addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull");
+ //}
+
+ const cur_ret_ptr = if (fn_type_normal.return_type.handleIsPtr()) llvm.GetParam(llvm_fn, 0) else null;
+
+ // build all basic blocks
+ for (code.basic_block_list.toSlice()) |bb| {
+ bb.llvm_block = llvm.AppendBasicBlockInContext(
+ ofile.context,
+ llvm_fn,
+ bb.name_hint,
+ ) orelse return error.OutOfMemory;
+ }
+ const entry_bb = code.basic_block_list.at(0);
+ llvm.PositionBuilderAtEnd(ofile.builder, entry_bb.llvm_block);
+
+ llvm.ClearCurrentDebugLocation(ofile.builder);
+
+ // TODO set up error return tracing
+ // TODO allocate temporary stack values
+
+ const var_list = fn_type.non_key.Normal.variable_list.toSliceConst();
+ // create debug variable declarations for variables and allocate all local variables
+ for (var_list) |var_scope, i| {
+ const var_type = switch (var_scope.data) {
+ Scope.Var.Data.Const => unreachable,
+ Scope.Var.Data.Param => |param| param.typ,
+ };
+ // if (!type_has_bits(var->value->type)) {
+ // continue;
+ // }
+ // if (ir_get_var_is_comptime(var))
+ // continue;
+ // if (type_requires_comptime(var->value->type))
+ // continue;
+ // if (var->src_arg_index == SIZE_MAX) {
+ // var->value_ref = build_alloca(g, var->value->type, buf_ptr(&var->name), var->align_bytes);
+
+ // var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope),
+ // buf_ptr(&var->name), import->di_file, (unsigned)(var->decl_node->line + 1),
+ // var->value->type->di_type, !g->strip_debug_symbols, 0);
+
+ // } else {
+ // it's a parameter
+ // assert(var->gen_arg_index != SIZE_MAX);
+ // TypeTableEntry *gen_type;
+ // FnGenParamInfo *gen_info = &fn_table_entry->type_entry->data.fn.gen_param_info[var->src_arg_index];
+
+ if (var_type.handleIsPtr()) {
+ // if (gen_info->is_byval) {
+ // gen_type = var->value->type;
+ // } else {
+ // gen_type = gen_info->type;
+ // }
+ var_scope.data.Param.llvm_value = llvm.GetParam(llvm_fn, @intCast(c_uint, i));
+ } else {
+ // gen_type = var->value->type;
+ var_scope.data.Param.llvm_value = try renderAlloca(ofile, var_type, var_scope.name, Type.Pointer.Align.Abi);
+ }
+ // if (var->decl_node) {
+ // var->di_loc_var = ZigLLVMCreateParameterVariable(g->dbuilder, get_di_scope(g, var->parent_scope),
+ // buf_ptr(&var->name), import->di_file,
+ // (unsigned)(var->decl_node->line + 1),
+ // gen_type->di_type, !g->strip_debug_symbols, 0, (unsigned)(var->gen_arg_index + 1));
+ // }
+
+ // }
+ }
+
+ // TODO finishing error return trace setup. we have to do this after all the allocas.
+
+ // create debug variable declarations for parameters
+ // rely on the first variables in the variable_list being parameters.
+ //size_t next_var_i = 0;
+ for (fn_type.key.data.Normal.params) |param, i| {
+ //FnGenParamInfo *info = &fn_table_entry->type_entry->data.fn.gen_param_info[param_i];
+ //if (info->gen_index == SIZE_MAX)
+ // continue;
+ const scope_var = var_list[i];
+ //assert(variable->src_arg_index != SIZE_MAX);
+ //next_var_i += 1;
+ //assert(variable);
+ //assert(variable->value_ref);
+
+ if (!param.typ.handleIsPtr()) {
+ //clear_debug_source_node(g);
+ const llvm_param = llvm.GetParam(llvm_fn, @intCast(c_uint, i));
+ _ = renderStoreUntyped(
+ ofile,
+ llvm_param,
+ scope_var.data.Param.llvm_value,
+ Type.Pointer.Align.Abi,
+ Type.Pointer.Vol.Non,
+ );
+ }
+
+ //if (variable->decl_node) {
+ // gen_var_debug_decl(g, variable);
+ //}
+ }
+
+ for (code.basic_block_list.toSlice()) |current_block| {
+ llvm.PositionBuilderAtEnd(ofile.builder, current_block.llvm_block);
+ for (current_block.instruction_list.toSlice()) |instruction| {
+ if (instruction.ref_count == 0 and !instruction.hasSideEffects()) continue;
+
+ instruction.llvm_value = try instruction.render(ofile, fn_val);
+ }
+ current_block.llvm_exit_block = llvm.GetInsertBlock(ofile.builder);
+ }
+}
+
+fn addLLVMAttr(
+ ofile: *ObjectFile,
+ val: llvm.ValueRef,
+ attr_index: llvm.AttributeIndex,
+ attr_name: []const u8,
+) !void {
+ const kind_id = llvm.GetEnumAttributeKindForName(attr_name.ptr, attr_name.len);
+ assert(kind_id != 0);
+ const llvm_attr = llvm.CreateEnumAttribute(ofile.context, kind_id, 0) orelse return error.OutOfMemory;
+ llvm.AddAttributeAtIndex(val, attr_index, llvm_attr);
+}
+
+fn addLLVMAttrStr(
+ ofile: *ObjectFile,
+ val: llvm.ValueRef,
+ attr_index: llvm.AttributeIndex,
+ attr_name: []const u8,
+ attr_val: []const u8,
+) !void {
+ const llvm_attr = llvm.CreateStringAttribute(
+ ofile.context,
+ attr_name.ptr,
+ @intCast(c_uint, attr_name.len),
+ attr_val.ptr,
+ @intCast(c_uint, attr_val.len),
+ ) orelse return error.OutOfMemory;
+ llvm.AddAttributeAtIndex(val, attr_index, llvm_attr);
+}
+
+fn addLLVMAttrInt(
+ val: llvm.ValueRef,
+ attr_index: llvm.AttributeIndex,
+ attr_name: []const u8,
+ attr_val: u64,
+) !void {
+ const kind_id = llvm.GetEnumAttributeKindForName(attr_name.ptr, attr_name.len);
+ assert(kind_id != 0);
+ const llvm_attr = llvm.CreateEnumAttribute(ofile.context, kind_id, attr_val) orelse return error.OutOfMemory;
+ llvm.AddAttributeAtIndex(val, attr_index, llvm_attr);
+}
+
+fn addLLVMFnAttr(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []const u8) !void {
+ return addLLVMAttr(ofile, fn_val, @maxValue(llvm.AttributeIndex), attr_name);
+}
+
+fn addLLVMFnAttrStr(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []const u8, attr_val: []const u8) !void {
+ return addLLVMAttrStr(ofile, fn_val, @maxValue(llvm.AttributeIndex), attr_name, attr_val);
+}
+
+fn addLLVMFnAttrInt(ofile: *ObjectFile, fn_val: llvm.ValueRef, attr_name: []const u8, attr_val: u64) !void {
+ return addLLVMAttrInt(ofile, fn_val, @maxValue(llvm.AttributeIndex), attr_name, attr_val);
+}
+
+fn renderLoadUntyped(
+ ofile: *ObjectFile,
+ ptr: llvm.ValueRef,
+ alignment: Type.Pointer.Align,
+ vol: Type.Pointer.Vol,
+ name: [*]const u8,
+) !llvm.ValueRef {
+ const result = llvm.BuildLoad(ofile.builder, ptr, name) orelse return error.OutOfMemory;
+ switch (vol) {
+ Type.Pointer.Vol.Non => {},
+ Type.Pointer.Vol.Volatile => llvm.SetVolatile(result, 1),
+ }
+ llvm.SetAlignment(result, resolveAlign(ofile, alignment, llvm.GetElementType(llvm.TypeOf(ptr))));
+ return result;
+}
+
+fn renderLoad(ofile: *ObjectFile, ptr: llvm.ValueRef, ptr_type: *Type.Pointer, name: [*]const u8) !llvm.ValueRef {
+ return renderLoadUntyped(ofile, ptr, ptr_type.key.alignment, ptr_type.key.vol, name);
+}
+
+pub fn getHandleValue(ofile: *ObjectFile, ptr: llvm.ValueRef, ptr_type: *Type.Pointer) !?llvm.ValueRef {
+ const child_type = ptr_type.key.child_type;
+ if (!child_type.hasBits()) {
+ return null;
+ }
+ if (child_type.handleIsPtr()) {
+ return ptr;
+ }
+ return try renderLoad(ofile, ptr, ptr_type, c"");
+}
+
+pub fn renderStoreUntyped(
+ ofile: *ObjectFile,
+ value: llvm.ValueRef,
+ ptr: llvm.ValueRef,
+ alignment: Type.Pointer.Align,
+ vol: Type.Pointer.Vol,
+) !llvm.ValueRef {
+ const result = llvm.BuildStore(ofile.builder, value, ptr) orelse return error.OutOfMemory;
+ switch (vol) {
+ Type.Pointer.Vol.Non => {},
+ Type.Pointer.Vol.Volatile => llvm.SetVolatile(result, 1),
+ }
+ llvm.SetAlignment(result, resolveAlign(ofile, alignment, llvm.TypeOf(value)));
+ return result;
+}
+
+pub fn renderStore(
+ ofile: *ObjectFile,
+ value: llvm.ValueRef,
+ ptr: llvm.ValueRef,
+ ptr_type: *Type.Pointer,
+) !llvm.ValueRef {
+ return renderStoreUntyped(ofile, value, ptr, ptr_type.key.alignment, ptr_type.key.vol);
+}
+
+pub fn renderAlloca(
+ ofile: *ObjectFile,
+ var_type: *Type,
+ name: []const u8,
+ alignment: Type.Pointer.Align,
+) !llvm.ValueRef {
+ const llvm_var_type = try var_type.getLlvmType(ofile.arena, ofile.context);
+ const name_with_null = try std.cstr.addNullByte(ofile.arena, name);
+ const result = llvm.BuildAlloca(ofile.builder, llvm_var_type, name_with_null.ptr) orelse return error.OutOfMemory;
+ llvm.SetAlignment(result, resolveAlign(ofile, alignment, llvm_var_type));
+ return result;
+}
+
+pub fn resolveAlign(ofile: *ObjectFile, alignment: Type.Pointer.Align, llvm_type: llvm.TypeRef) u32 {
+ return switch (alignment) {
+ Type.Pointer.Align.Abi => return llvm.ABIAlignmentOfType(ofile.comp.target_data_ref, llvm_type),
+ Type.Pointer.Align.Override => |a| a,
+ };
+}
diff --git a/src-self-hosted/compilation.zig b/src-self-hosted/compilation.zig
new file mode 100644
index 0000000000..5ff8b1a858
--- /dev/null
+++ b/src-self-hosted/compilation.zig
@@ -0,0 +1,1303 @@
+const std = @import("std");
+const os = std.os;
+const io = std.io;
+const mem = std.mem;
+const Allocator = mem.Allocator;
+const Buffer = std.Buffer;
+const llvm = @import("llvm.zig");
+const c = @import("c.zig");
+const builtin = @import("builtin");
+const Target = @import("target.zig").Target;
+const warn = std.debug.warn;
+const Token = std.zig.Token;
+const ArrayList = std.ArrayList;
+const errmsg = @import("errmsg.zig");
+const ast = std.zig.ast;
+const event = std.event;
+const assert = std.debug.assert;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
+const Scope = @import("scope.zig").Scope;
+const Decl = @import("decl.zig").Decl;
+const ir = @import("ir.zig");
+const Visib = @import("visib.zig").Visib;
+const Value = @import("value.zig").Value;
+const Type = Value.Type;
+const Span = errmsg.Span;
+const Msg = errmsg.Msg;
+const codegen = @import("codegen.zig");
+const Package = @import("package.zig").Package;
+const link = @import("link.zig").link;
+const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
+const CInt = @import("c_int.zig").CInt;
+
+/// Data that is local to the event loop.
+pub const EventLoopLocal = struct {
+ loop: *event.Loop,
+ llvm_handle_pool: std.atomic.Stack(llvm.ContextRef),
+ lld_lock: event.Lock,
+
+ /// TODO pool these so that it doesn't have to lock
+ prng: event.Locked(std.rand.DefaultPrng),
+
+ native_libc: event.Future(LibCInstallation),
+
+ var lazy_init_targets = std.lazyInit(void);
+
+ fn init(loop: *event.Loop) !EventLoopLocal {
+ lazy_init_targets.get() orelse {
+ Target.initializeAll();
+ lazy_init_targets.resolve();
+ };
+
+ var seed_bytes: [@sizeOf(u64)]u8 = undefined;
+ try std.os.getRandomBytes(seed_bytes[0..]);
+ const seed = std.mem.readInt(seed_bytes, u64, builtin.Endian.Big);
+
+ return EventLoopLocal{
+ .loop = loop,
+ .lld_lock = event.Lock.init(loop),
+ .llvm_handle_pool = std.atomic.Stack(llvm.ContextRef).init(),
+ .prng = event.Locked(std.rand.DefaultPrng).init(loop, std.rand.DefaultPrng.init(seed)),
+ .native_libc = event.Future(LibCInstallation).init(loop),
+ };
+ }
+
+ /// Must be called only after EventLoop.run completes.
+ fn deinit(self: *EventLoopLocal) void {
+ self.lld_lock.deinit();
+ while (self.llvm_handle_pool.pop()) |node| {
+ c.LLVMContextDispose(node.data);
+ self.loop.allocator.destroy(node);
+ }
+ }
+
+ /// Gets an exclusive handle on any LlvmContext.
+ /// Caller must release the handle when done.
+ pub fn getAnyLlvmContext(self: *EventLoopLocal) !LlvmHandle {
+ if (self.llvm_handle_pool.pop()) |node| return LlvmHandle{ .node = node };
+
+ const context_ref = c.LLVMContextCreate() orelse return error.OutOfMemory;
+ errdefer c.LLVMContextDispose(context_ref);
+
+ const node = try self.loop.allocator.create(std.atomic.Stack(llvm.ContextRef).Node{
+ .next = undefined,
+ .data = context_ref,
+ });
+ errdefer self.loop.allocator.destroy(node);
+
+ return LlvmHandle{ .node = node };
+ }
+
+ pub async fn getNativeLibC(self: *EventLoopLocal) !*LibCInstallation {
+ if (await (async self.native_libc.start() catch unreachable)) |ptr| return ptr;
+ try await (async self.native_libc.data.findNative(self.loop) catch unreachable);
+ self.native_libc.resolve();
+ return &self.native_libc.data;
+ }
+};
+
+pub const LlvmHandle = struct {
+ node: *std.atomic.Stack(llvm.ContextRef).Node,
+
+ pub fn release(self: LlvmHandle, event_loop_local: *EventLoopLocal) void {
+ event_loop_local.llvm_handle_pool.push(self.node);
+ }
+};
+
+pub const Compilation = struct {
+ event_loop_local: *EventLoopLocal,
+ loop: *event.Loop,
+ name: Buffer,
+ llvm_triple: Buffer,
+ root_src_path: ?[]const u8,
+ target: Target,
+ llvm_target: llvm.TargetRef,
+ build_mode: builtin.Mode,
+ zig_lib_dir: []const u8,
+ zig_std_dir: []const u8,
+
+ /// lazily created when we need it
+ tmp_dir: event.Future(BuildError![]u8),
+
+ version_major: u32,
+ version_minor: u32,
+ version_patch: u32,
+
+ linker_script: ?[]const u8,
+ out_h_path: ?[]const u8,
+
+ is_test: bool,
+ each_lib_rpath: bool,
+ strip: bool,
+ is_static: bool,
+ linker_rdynamic: bool,
+
+ clang_argv: []const []const u8,
+ llvm_argv: []const []const u8,
+ lib_dirs: []const []const u8,
+ rpath_list: []const []const u8,
+ assembly_files: []const []const u8,
+
+ /// paths that are explicitly provided by the user to link against
+ link_objects: []const []const u8,
+
+ /// functions that have their own objects that we need to link
+ /// it uses an optional pointer so that tombstone removals are possible
+ fn_link_set: event.Locked(FnLinkSet),
+
+ pub const FnLinkSet = std.LinkedList(?*Value.Fn);
+
+ windows_subsystem_windows: bool,
+ windows_subsystem_console: bool,
+
+ link_libs_list: ArrayList(*LinkLib),
+ libc_link_lib: ?*LinkLib,
+
+ err_color: errmsg.Color,
+
+ verbose_tokenize: bool,
+ verbose_ast_tree: bool,
+ verbose_ast_fmt: bool,
+ verbose_cimport: bool,
+ verbose_ir: bool,
+ verbose_llvm_ir: bool,
+ verbose_link: bool,
+
+ darwin_frameworks: []const []const u8,
+ darwin_version_min: DarwinVersionMin,
+
+ test_filters: []const []const u8,
+ test_name_prefix: ?[]const u8,
+
+ emit_file_type: Emit,
+
+ kind: Kind,
+
+ link_out_file: ?[]const u8,
+ events: *event.Channel(Event),
+
+ exported_symbol_names: event.Locked(Decl.Table),
+
+ /// Before code generation starts, must wait on this group to make sure
+ /// the build is complete.
+ prelink_group: event.Group(BuildError!void),
+
+ compile_errors: event.Locked(CompileErrList),
+
+ meta_type: *Type.MetaType,
+ void_type: *Type.Void,
+ bool_type: *Type.Bool,
+ noreturn_type: *Type.NoReturn,
+ comptime_int_type: *Type.ComptimeInt,
+ u8_type: *Type.Int,
+
+ void_value: *Value.Void,
+ true_value: *Value.Bool,
+ false_value: *Value.Bool,
+ noreturn_value: *Value.NoReturn,
+
+ target_machine: llvm.TargetMachineRef,
+ target_data_ref: llvm.TargetDataRef,
+ target_layout_str: [*]u8,
+ target_ptr_bits: u32,
+
+ /// for allocating things which have the same lifetime as this Compilation
+ arena_allocator: std.heap.ArenaAllocator,
+
+ root_package: *Package,
+ std_package: *Package,
+
+ override_libc: ?*LibCInstallation,
+
+ /// need to wait on this group before deinitializing
+ deinit_group: event.Group(void),
+
+ destroy_handle: promise,
+
+ have_err_ret_tracing: bool,
+
+ /// not locked because it is read-only
+ primitive_type_table: TypeTable,
+
+ int_type_table: event.Locked(IntTypeTable),
+ array_type_table: event.Locked(ArrayTypeTable),
+ ptr_type_table: event.Locked(PtrTypeTable),
+ fn_type_table: event.Locked(FnTypeTable),
+
+ c_int_types: [CInt.list.len]*Type.Int,
+
+ const IntTypeTable = std.HashMap(*const Type.Int.Key, *Type.Int, Type.Int.Key.hash, Type.Int.Key.eql);
+ const ArrayTypeTable = std.HashMap(*const Type.Array.Key, *Type.Array, Type.Array.Key.hash, Type.Array.Key.eql);
+ const PtrTypeTable = std.HashMap(*const Type.Pointer.Key, *Type.Pointer, Type.Pointer.Key.hash, Type.Pointer.Key.eql);
+ const FnTypeTable = std.HashMap(*const Type.Fn.Key, *Type.Fn, Type.Fn.Key.hash, Type.Fn.Key.eql);
+ const TypeTable = std.HashMap([]const u8, *Type, mem.hash_slice_u8, mem.eql_slice_u8);
+
+ const CompileErrList = std.ArrayList(*Msg);
+
+ // TODO handle some of these earlier and report them in a way other than error codes
+ pub const BuildError = error{
+ OutOfMemory,
+ EndOfStream,
+ BadFd,
+ Io,
+ IsDir,
+ Unexpected,
+ SystemResources,
+ SharingViolation,
+ PathAlreadyExists,
+ FileNotFound,
+ AccessDenied,
+ PipeBusy,
+ FileTooBig,
+ SymLinkLoop,
+ ProcessFdQuotaExceeded,
+ NameTooLong,
+ SystemFdQuotaExceeded,
+ NoDevice,
+ PathNotFound,
+ NoSpaceLeft,
+ NotDir,
+ FileSystem,
+ OperationAborted,
+ IoPending,
+ BrokenPipe,
+ WouldBlock,
+ FileClosed,
+ DestinationAddressRequired,
+ DiskQuota,
+ InputOutput,
+ NoStdHandles,
+ Overflow,
+ NotSupported,
+ BufferTooSmall,
+ Unimplemented, // TODO remove this one
+ SemanticAnalysisFailed, // TODO remove this one
+ ReadOnlyFileSystem,
+ LinkQuotaExceeded,
+ EnvironmentVariableNotFound,
+ AppDataDirUnavailable,
+ LinkFailed,
+ LibCRequiredButNotProvidedOrFound,
+ LibCMissingDynamicLinker,
+ InvalidDarwinVersionString,
+ UnsupportedLinkArchitecture,
+ };
+
+ pub const Event = union(enum) {
+ Ok,
+ Error: BuildError,
+ Fail: []*Msg,
+ };
+
+ pub const DarwinVersionMin = union(enum) {
+ None,
+ MacOS: []const u8,
+ Ios: []const u8,
+ };
+
+ pub const Kind = enum {
+ Exe,
+ Lib,
+ Obj,
+ };
+
+ pub const LinkLib = struct {
+ name: []const u8,
+ path: ?[]const u8,
+
+ /// the list of symbols we depend on from this lib
+ symbols: ArrayList([]u8),
+ provided_explicitly: bool,
+ };
+
+ pub const Emit = enum {
+ Binary,
+ Assembly,
+ LlvmIr,
+ };
+
+ pub fn create(
+ event_loop_local: *EventLoopLocal,
+ name: []const u8,
+ root_src_path: ?[]const u8,
+ target: Target,
+ kind: Kind,
+ build_mode: builtin.Mode,
+ is_static: bool,
+ zig_lib_dir: []const u8,
+ ) !*Compilation {
+ const loop = event_loop_local.loop;
+ const comp = try event_loop_local.loop.allocator.create(Compilation{
+ .loop = loop,
+ .arena_allocator = std.heap.ArenaAllocator.init(loop.allocator),
+ .event_loop_local = event_loop_local,
+ .events = undefined,
+ .root_src_path = root_src_path,
+ .target = target,
+ .llvm_target = undefined,
+ .kind = kind,
+ .build_mode = build_mode,
+ .zig_lib_dir = zig_lib_dir,
+ .zig_std_dir = undefined,
+ .tmp_dir = event.Future(BuildError![]u8).init(loop),
+
+ .name = undefined,
+ .llvm_triple = undefined,
+
+ .version_major = 0,
+ .version_minor = 0,
+ .version_patch = 0,
+
+ .verbose_tokenize = false,
+ .verbose_ast_tree = false,
+ .verbose_ast_fmt = false,
+ .verbose_cimport = false,
+ .verbose_ir = false,
+ .verbose_llvm_ir = false,
+ .verbose_link = false,
+
+ .linker_script = null,
+ .out_h_path = null,
+ .is_test = false,
+ .each_lib_rpath = false,
+ .strip = false,
+ .is_static = is_static,
+ .linker_rdynamic = false,
+ .clang_argv = [][]const u8{},
+ .llvm_argv = [][]const u8{},
+ .lib_dirs = [][]const u8{},
+ .rpath_list = [][]const u8{},
+ .assembly_files = [][]const u8{},
+ .link_objects = [][]const u8{},
+ .fn_link_set = event.Locked(FnLinkSet).init(loop, FnLinkSet.init()),
+ .windows_subsystem_windows = false,
+ .windows_subsystem_console = false,
+ .link_libs_list = undefined,
+ .libc_link_lib = null,
+ .err_color = errmsg.Color.Auto,
+ .darwin_frameworks = [][]const u8{},
+ .darwin_version_min = DarwinVersionMin.None,
+ .test_filters = [][]const u8{},
+ .test_name_prefix = null,
+ .emit_file_type = Emit.Binary,
+ .link_out_file = null,
+ .exported_symbol_names = event.Locked(Decl.Table).init(loop, Decl.Table.init(loop.allocator)),
+ .prelink_group = event.Group(BuildError!void).init(loop),
+ .deinit_group = event.Group(void).init(loop),
+ .compile_errors = event.Locked(CompileErrList).init(loop, CompileErrList.init(loop.allocator)),
+ .int_type_table = event.Locked(IntTypeTable).init(loop, IntTypeTable.init(loop.allocator)),
+ .array_type_table = event.Locked(ArrayTypeTable).init(loop, ArrayTypeTable.init(loop.allocator)),
+ .ptr_type_table = event.Locked(PtrTypeTable).init(loop, PtrTypeTable.init(loop.allocator)),
+ .fn_type_table = event.Locked(FnTypeTable).init(loop, FnTypeTable.init(loop.allocator)),
+ .c_int_types = undefined,
+
+ .meta_type = undefined,
+ .void_type = undefined,
+ .void_value = undefined,
+ .bool_type = undefined,
+ .true_value = undefined,
+ .false_value = undefined,
+ .noreturn_type = undefined,
+ .noreturn_value = undefined,
+ .comptime_int_type = undefined,
+ .u8_type = undefined,
+
+ .target_machine = undefined,
+ .target_data_ref = undefined,
+ .target_layout_str = undefined,
+ .target_ptr_bits = target.getArchPtrBitWidth(),
+
+ .root_package = undefined,
+ .std_package = undefined,
+
+ .override_libc = null,
+ .destroy_handle = undefined,
+ .have_err_ret_tracing = false,
+ .primitive_type_table = undefined,
+ });
+ errdefer {
+ comp.int_type_table.private_data.deinit();
+ comp.array_type_table.private_data.deinit();
+ comp.ptr_type_table.private_data.deinit();
+ comp.fn_type_table.private_data.deinit();
+ comp.arena_allocator.deinit();
+ comp.loop.allocator.destroy(comp);
+ }
+
+ comp.name = try Buffer.init(comp.arena(), name);
+ comp.llvm_triple = try target.getTriple(comp.arena());
+ comp.llvm_target = try Target.llvmTargetFromTriple(comp.llvm_triple);
+ comp.link_libs_list = ArrayList(*LinkLib).init(comp.arena());
+ comp.zig_std_dir = try std.os.path.join(comp.arena(), zig_lib_dir, "std");
+ comp.primitive_type_table = TypeTable.init(comp.arena());
+
+ const opt_level = switch (build_mode) {
+ builtin.Mode.Debug => llvm.CodeGenLevelNone,
+ else => llvm.CodeGenLevelAggressive,
+ };
+
+ const reloc_mode = if (is_static) llvm.RelocStatic else llvm.RelocPIC;
+
+ // LLVM creates invalid binaries on Windows sometimes.
+ // See https://github.com/ziglang/zig/issues/508
+ // As a workaround we do not use target native features on Windows.
+ var target_specific_cpu_args: ?[*]u8 = null;
+ var target_specific_cpu_features: ?[*]u8 = null;
+ errdefer llvm.DisposeMessage(target_specific_cpu_args);
+ errdefer llvm.DisposeMessage(target_specific_cpu_features);
+ if (target == Target.Native and !target.isWindows()) {
+ target_specific_cpu_args = llvm.GetHostCPUName() orelse return error.OutOfMemory;
+ target_specific_cpu_features = llvm.GetNativeFeatures() orelse return error.OutOfMemory;
+ }
+
+ comp.target_machine = llvm.CreateTargetMachine(
+ comp.llvm_target,
+ comp.llvm_triple.ptr(),
+ target_specific_cpu_args orelse c"",
+ target_specific_cpu_features orelse c"",
+ opt_level,
+ reloc_mode,
+ llvm.CodeModelDefault,
+ ) orelse return error.OutOfMemory;
+ errdefer llvm.DisposeTargetMachine(comp.target_machine);
+
+ comp.target_data_ref = llvm.CreateTargetDataLayout(comp.target_machine) orelse return error.OutOfMemory;
+ errdefer llvm.DisposeTargetData(comp.target_data_ref);
+
+ comp.target_layout_str = llvm.CopyStringRepOfTargetData(comp.target_data_ref) orelse return error.OutOfMemory;
+ errdefer llvm.DisposeMessage(comp.target_layout_str);
+
+ comp.events = try event.Channel(Event).create(comp.loop, 0);
+ errdefer comp.events.destroy();
+
+ if (root_src_path) |root_src| {
+ const dirname = std.os.path.dirname(root_src) orelse ".";
+ const basename = std.os.path.basename(root_src);
+
+ comp.root_package = try Package.create(comp.arena(), dirname, basename);
+ comp.std_package = try Package.create(comp.arena(), comp.zig_std_dir, "index.zig");
+ try comp.root_package.add("std", comp.std_package);
+ } else {
+ comp.root_package = try Package.create(comp.arena(), ".", "");
+ }
+
+ try comp.initTypes();
+
+ comp.destroy_handle = try async comp.internalDeinit();
+
+ return comp;
+ }
+
+ /// it does ref the result because it could be an arbitrary integer size
+ pub async fn getPrimitiveType(comp: *Compilation, name: []const u8) !?*Type {
+ if (name.len >= 2) {
+ switch (name[0]) {
+ 'i', 'u' => blk: {
+ for (name[1..]) |byte|
+ switch (byte) {
+ '0'...'9' => {},
+ else => break :blk,
+ };
+ const is_signed = name[0] == 'i';
+ const bit_count = std.fmt.parseUnsigned(u32, name[1..], 10) catch |err| switch (err) {
+ error.Overflow => return error.Overflow,
+ error.InvalidCharacter => unreachable, // we just checked the characters above
+ };
+ const int_type = try await (async Type.Int.get(comp, Type.Int.Key{
+ .bit_count = bit_count,
+ .is_signed = is_signed,
+ }) catch unreachable);
+ errdefer int_type.base.base.deref();
+ return &int_type.base;
+ },
+ else => {},
+ }
+ }
+
+ if (comp.primitive_type_table.get(name)) |entry| {
+ entry.value.base.ref();
+ return entry.value;
+ }
+
+ return null;
+ }
+
+ fn initTypes(comp: *Compilation) !void {
+ comp.meta_type = try comp.arena().create(Type.MetaType{
+ .base = Type{
+ .name = "type",
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = undefined,
+ .ref_count = std.atomic.Int(usize).init(3), // 3 because it references itself twice
+ },
+ .id = builtin.TypeId.Type,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ .value = undefined,
+ });
+ comp.meta_type.value = &comp.meta_type.base;
+ comp.meta_type.base.base.typ = &comp.meta_type.base;
+ assert((try comp.primitive_type_table.put(comp.meta_type.base.name, &comp.meta_type.base)) == null);
+
+ comp.void_type = try comp.arena().create(Type.Void{
+ .base = Type{
+ .name = "void",
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.Void,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ });
+ assert((try comp.primitive_type_table.put(comp.void_type.base.name, &comp.void_type.base)) == null);
+
+ comp.noreturn_type = try comp.arena().create(Type.NoReturn{
+ .base = Type{
+ .name = "noreturn",
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.NoReturn,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ });
+ assert((try comp.primitive_type_table.put(comp.noreturn_type.base.name, &comp.noreturn_type.base)) == null);
+
+ comp.comptime_int_type = try comp.arena().create(Type.ComptimeInt{
+ .base = Type{
+ .name = "comptime_int",
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.ComptimeInt,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ });
+ assert((try comp.primitive_type_table.put(comp.comptime_int_type.base.name, &comp.comptime_int_type.base)) == null);
+
+ comp.bool_type = try comp.arena().create(Type.Bool{
+ .base = Type{
+ .name = "bool",
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.Bool,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ });
+ assert((try comp.primitive_type_table.put(comp.bool_type.base.name, &comp.bool_type.base)) == null);
+
+ comp.void_value = try comp.arena().create(Value.Void{
+ .base = Value{
+ .id = Value.Id.Void,
+ .typ = &Type.Void.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ });
+
+ comp.true_value = try comp.arena().create(Value.Bool{
+ .base = Value{
+ .id = Value.Id.Bool,
+ .typ = &Type.Bool.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .x = true,
+ });
+
+ comp.false_value = try comp.arena().create(Value.Bool{
+ .base = Value{
+ .id = Value.Id.Bool,
+ .typ = &Type.Bool.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .x = false,
+ });
+
+ comp.noreturn_value = try comp.arena().create(Value.NoReturn{
+ .base = Value{
+ .id = Value.Id.NoReturn,
+ .typ = &Type.NoReturn.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ });
+
+ for (CInt.list) |cint, i| {
+ const c_int_type = try comp.arena().create(Type.Int{
+ .base = Type{
+ .name = cint.zig_name,
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.Int,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ .key = Type.Int.Key{
+ .is_signed = cint.is_signed,
+ .bit_count = comp.target.cIntTypeSizeInBits(cint.id),
+ },
+ .garbage_node = undefined,
+ });
+ comp.c_int_types[i] = c_int_type;
+ assert((try comp.primitive_type_table.put(cint.zig_name, &c_int_type.base)) == null);
+ }
+ comp.u8_type = try comp.arena().create(Type.Int{
+ .base = Type{
+ .name = "u8",
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &Type.MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = builtin.TypeId.Int,
+ .abi_alignment = Type.AbiAlignment.init(comp.loop),
+ },
+ .key = Type.Int.Key{
+ .is_signed = false,
+ .bit_count = 8,
+ },
+ .garbage_node = undefined,
+ });
+ assert((try comp.primitive_type_table.put(comp.u8_type.base.name, &comp.u8_type.base)) == null);
+ }
+
+ /// This function can safely use async/await, because it manages Compilation's lifetime,
+ /// and EventLoopLocal.deinit will not be called until the event.Loop.run() completes.
+ async fn internalDeinit(self: *Compilation) void {
+ suspend;
+
+ await (async self.deinit_group.wait() catch unreachable);
+ if (self.tmp_dir.getOrNull()) |tmp_dir_result| if (tmp_dir_result.*) |tmp_dir| {
+ // TODO evented I/O?
+ os.deleteTree(self.arena(), tmp_dir) catch {};
+ } else |_| {};
+
+ self.events.destroy();
+
+ llvm.DisposeMessage(self.target_layout_str);
+ llvm.DisposeTargetData(self.target_data_ref);
+ llvm.DisposeTargetMachine(self.target_machine);
+
+ self.primitive_type_table.deinit();
+
+ self.arena_allocator.deinit();
+ self.gpa().destroy(self);
+ }
+
+ pub fn destroy(self: *Compilation) void {
+ resume self.destroy_handle;
+ }
+
+ pub fn build(self: *Compilation) !void {
+ if (self.llvm_argv.len != 0) {
+ var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.arena(), [][]const []const u8{
+ [][]const u8{"zig (LLVM option parsing)"},
+ self.llvm_argv,
+ });
+ defer c_compatible_args.deinit();
+ // TODO this sets global state
+ c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
+ }
+
+ _ = try async self.buildAsync();
+ }
+
+ async fn buildAsync(self: *Compilation) void {
+ while (true) {
+ // TODO directly awaiting async should guarantee memory allocation elision
+ const build_result = await (async self.compileAndLink() catch unreachable);
+
+ // this makes a handy error return trace and stack trace in debug mode
+ if (std.debug.runtime_safety) {
+ build_result catch unreachable;
+ }
+
+ const compile_errors = blk: {
+ const held = await (async self.compile_errors.acquire() catch unreachable);
+ defer held.release();
+ break :blk held.value.toOwnedSlice();
+ };
+
+ if (build_result) |_| {
+ if (compile_errors.len == 0) {
+ await (async self.events.put(Event.Ok) catch unreachable);
+ } else {
+ await (async self.events.put(Event{ .Fail = compile_errors }) catch unreachable);
+ }
+ } else |err| {
+ // if there's an error then the compile errors have dangling references
+ self.gpa().free(compile_errors);
+
+ await (async self.events.put(Event{ .Error = err }) catch unreachable);
+ }
+
+ // for now we stop after 1
+ return;
+ }
+ }
+
+ async fn compileAndLink(self: *Compilation) !void {
+ if (self.root_src_path) |root_src_path| {
+ // TODO async/await os.path.real
+ const root_src_real_path = os.path.real(self.gpa(), root_src_path) catch |err| {
+ try printError("unable to get real path '{}': {}", root_src_path, err);
+ return err;
+ };
+ const root_scope = blk: {
+ errdefer self.gpa().free(root_src_real_path);
+
+ // TODO async/await readFileAlloc()
+ const source_code = io.readFileAlloc(self.gpa(), root_src_real_path) catch |err| {
+ try printError("unable to open '{}': {}", root_src_real_path, err);
+ return err;
+ };
+ errdefer self.gpa().free(source_code);
+
+ const tree = try self.gpa().createOne(ast.Tree);
+ tree.* = try std.zig.parse(self.gpa(), source_code);
+ errdefer {
+ tree.deinit();
+ self.gpa().destroy(tree);
+ }
+
+ break :blk try Scope.Root.create(self, tree, root_src_real_path);
+ };
+ defer root_scope.base.deref(self);
+ const tree = root_scope.tree;
+
+ var error_it = tree.errors.iterator(0);
+ while (error_it.next()) |parse_error| {
+ const msg = try Msg.createFromParseErrorAndScope(self, root_scope, parse_error);
+ errdefer msg.destroy();
+
+ try await (async self.addCompileErrorAsync(msg) catch unreachable);
+ }
+ if (tree.errors.len != 0) {
+ return;
+ }
+
+ const decls = try Scope.Decls.create(self, &root_scope.base);
+ defer decls.base.deref(self);
+
+ var decl_group = event.Group(BuildError!void).init(self.loop);
+ var decl_group_consumed = false;
+ errdefer if (!decl_group_consumed) decl_group.cancelAll();
+
+ var it = tree.root_node.decls.iterator(0);
+ while (it.next()) |decl_ptr| {
+ const decl = decl_ptr.*;
+ switch (decl.id) {
+ ast.Node.Id.Comptime => {
+ const comptime_node = @fieldParentPtr(ast.Node.Comptime, "base", decl);
+
+ try self.prelink_group.call(addCompTimeBlock, self, &decls.base, comptime_node);
+ },
+ ast.Node.Id.VarDecl => @panic("TODO"),
+ ast.Node.Id.FnProto => {
+ const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
+
+ const name = if (fn_proto.name_token) |name_token| tree.tokenSlice(name_token) else {
+ try self.addCompileError(root_scope, Span{
+ .first = fn_proto.fn_token,
+ .last = fn_proto.fn_token + 1,
+ }, "missing function name");
+ continue;
+ };
+
+ const fn_decl = try self.gpa().create(Decl.Fn{
+ .base = Decl{
+ .id = Decl.Id.Fn,
+ .name = name,
+ .visib = parseVisibToken(tree, fn_proto.visib_token),
+ .resolution = event.Future(BuildError!void).init(self.loop),
+ .parent_scope = &decls.base,
+ },
+ .value = Decl.Fn.Val{ .Unresolved = {} },
+ .fn_proto = fn_proto,
+ });
+ errdefer self.gpa().destroy(fn_decl);
+
+ try decl_group.call(addTopLevelDecl, self, decls, &fn_decl.base);
+ },
+ ast.Node.Id.TestDecl => @panic("TODO"),
+ else => unreachable,
+ }
+ }
+ decl_group_consumed = true;
+ try await (async decl_group.wait() catch unreachable);
+
+ // Now other code can rely on the decls scope having a complete list of names.
+ decls.name_future.resolve();
+ }
+
+ (await (async self.prelink_group.wait() catch unreachable)) catch |err| switch (err) {
+ error.SemanticAnalysisFailed => {},
+ else => return err,
+ };
+
+ const any_prelink_errors = blk: {
+ const compile_errors = await (async self.compile_errors.acquire() catch unreachable);
+ defer compile_errors.release();
+
+ break :blk compile_errors.value.len != 0;
+ };
+
+ if (!any_prelink_errors) {
+ try await (async link(self) catch unreachable);
+ }
+ }
+
+ /// caller takes ownership of resulting Code
+ async fn genAndAnalyzeCode(
+ comp: *Compilation,
+ scope: *Scope,
+ node: *ast.Node,
+ expected_type: ?*Type,
+ ) !*ir.Code {
+ const unanalyzed_code = try await (async ir.gen(
+ comp,
+ node,
+ scope,
+ ) catch unreachable);
+ defer unanalyzed_code.destroy(comp.gpa());
+
+ if (comp.verbose_ir) {
+ std.debug.warn("unanalyzed:\n");
+ unanalyzed_code.dump();
+ }
+
+ const analyzed_code = try await (async ir.analyze(
+ comp,
+ unanalyzed_code,
+ expected_type,
+ ) catch unreachable);
+ errdefer analyzed_code.destroy(comp.gpa());
+
+ if (comp.verbose_ir) {
+ std.debug.warn("analyzed:\n");
+ analyzed_code.dump();
+ }
+
+ return analyzed_code;
+ }
+
+ async fn addCompTimeBlock(
+ comp: *Compilation,
+ scope: *Scope,
+ comptime_node: *ast.Node.Comptime,
+ ) !void {
+ const void_type = Type.Void.get(comp);
+ defer void_type.base.base.deref(comp);
+
+ const analyzed_code = (await (async genAndAnalyzeCode(
+ comp,
+ scope,
+ comptime_node.expr,
+ &void_type.base,
+ ) catch unreachable)) catch |err| switch (err) {
+ // This poison value should not cause the errdefers to run. It simply means
+ // that comp.compile_errors is populated.
+ error.SemanticAnalysisFailed => return {},
+ else => return err,
+ };
+ analyzed_code.destroy(comp.gpa());
+ }
+
+ async fn addTopLevelDecl(self: *Compilation, decls: *Scope.Decls, decl: *Decl) !void {
+ const tree = decl.findRootScope().tree;
+ const is_export = decl.isExported(tree);
+
+ var add_to_table_resolved = false;
+ const add_to_table = async self.addDeclToTable(decls, decl) catch unreachable;
+ errdefer if (!add_to_table_resolved) cancel add_to_table; // TODO https://github.com/ziglang/zig/issues/1261
+
+ if (is_export) {
+ try self.prelink_group.call(verifyUniqueSymbol, self, decl);
+ try self.prelink_group.call(resolveDecl, self, decl);
+ }
+
+ add_to_table_resolved = true;
+ try await add_to_table;
+ }
+
+ async fn addDeclToTable(self: *Compilation, decls: *Scope.Decls, decl: *Decl) !void {
+ const held = await (async decls.table.acquire() catch unreachable);
+ defer held.release();
+
+ if (try held.value.put(decl.name, decl)) |other_decl| {
+ try self.addCompileError(decls.base.findRoot(), decl.getSpan(), "redefinition of '{}'", decl.name);
+ // TODO note: other definition here
+ }
+ }
+
+ fn addCompileError(self: *Compilation, root: *Scope.Root, span: Span, comptime fmt: []const u8, args: ...) !void {
+ const text = try std.fmt.allocPrint(self.gpa(), fmt, args);
+ errdefer self.gpa().free(text);
+
+ const msg = try Msg.createFromScope(self, root, span, text);
+ errdefer msg.destroy();
+
+ try self.prelink_group.call(addCompileErrorAsync, self, msg);
+ }
+
+ async fn addCompileErrorAsync(
+ self: *Compilation,
+ msg: *Msg,
+ ) !void {
+ errdefer msg.destroy();
+
+ const compile_errors = await (async self.compile_errors.acquire() catch unreachable);
+ defer compile_errors.release();
+
+ try compile_errors.value.append(msg);
+ }
+
+ async fn verifyUniqueSymbol(self: *Compilation, decl: *Decl) !void {
+ const exported_symbol_names = await (async self.exported_symbol_names.acquire() catch unreachable);
+ defer exported_symbol_names.release();
+
+ if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
+ try self.addCompileError(
+ decl.findRootScope(),
+ decl.getSpan(),
+ "exported symbol collision: '{}'",
+ decl.name,
+ );
+ // TODO add error note showing location of other symbol
+ }
+ }
+
+ pub fn haveLibC(self: *Compilation) bool {
+ return self.libc_link_lib != null;
+ }
+
+ pub fn addLinkLib(self: *Compilation, name: []const u8, provided_explicitly: bool) !*LinkLib {
+ const is_libc = mem.eql(u8, name, "c");
+
+ if (is_libc) {
+ if (self.libc_link_lib) |libc_link_lib| {
+ return libc_link_lib;
+ }
+ }
+
+ for (self.link_libs_list.toSliceConst()) |existing_lib| {
+ if (mem.eql(u8, name, existing_lib.name)) {
+ return existing_lib;
+ }
+ }
+
+ const link_lib = try self.gpa().create(LinkLib{
+ .name = name,
+ .path = null,
+ .provided_explicitly = provided_explicitly,
+ .symbols = ArrayList([]u8).init(self.gpa()),
+ });
+ try self.link_libs_list.append(link_lib);
+ if (is_libc) {
+ self.libc_link_lib = link_lib;
+
+ // get a head start on looking for the native libc
+ if (self.target == Target.Native and self.override_libc == null) {
+ try self.deinit_group.call(startFindingNativeLibC, self);
+ }
+ }
+ return link_lib;
+ }
+
+ /// cancels itself so no need to await or cancel the promise.
+ async fn startFindingNativeLibC(self: *Compilation) void {
+ await (async self.loop.yield() catch unreachable);
+ // we don't care if it fails, we're just trying to kick off the future resolution
+ _ = (await (async self.event_loop_local.getNativeLibC() catch unreachable)) catch return;
+ }
+
+ /// General Purpose Allocator. Must free when done.
+ fn gpa(self: Compilation) *mem.Allocator {
+ return self.loop.allocator;
+ }
+
+ /// Arena Allocator. Automatically freed when the Compilation is destroyed.
+ fn arena(self: *Compilation) *mem.Allocator {
+ return &self.arena_allocator.allocator;
+ }
+
+ /// If the temporary directory for this compilation has not been created, it creates it.
+ /// Then it creates a random file name in that dir and returns it.
+ pub async fn createRandomOutputPath(self: *Compilation, suffix: []const u8) !Buffer {
+ const tmp_dir = try await (async self.getTmpDir() catch unreachable);
+ const file_prefix = await (async self.getRandomFileName() catch unreachable);
+
+ const file_name = try std.fmt.allocPrint(self.gpa(), "{}{}", file_prefix[0..], suffix);
+ defer self.gpa().free(file_name);
+
+ const full_path = try os.path.join(self.gpa(), tmp_dir, file_name[0..]);
+ errdefer self.gpa().free(full_path);
+
+ return Buffer.fromOwnedSlice(self.gpa(), full_path);
+ }
+
+ /// If the temporary directory for this Compilation has not been created, creates it.
+ /// Then returns it. The directory is unique to this Compilation and cleaned up when
+ /// the Compilation deinitializes.
+ async fn getTmpDir(self: *Compilation) ![]const u8 {
+ if (await (async self.tmp_dir.start() catch unreachable)) |ptr| return ptr.*;
+ self.tmp_dir.data = await (async self.getTmpDirImpl() catch unreachable);
+ self.tmp_dir.resolve();
+ return self.tmp_dir.data;
+ }
+
+ async fn getTmpDirImpl(self: *Compilation) ![]u8 {
+ const comp_dir_name = await (async self.getRandomFileName() catch unreachable);
+ const zig_dir_path = try getZigDir(self.gpa());
+ defer self.gpa().free(zig_dir_path);
+
+ const tmp_dir = try os.path.join(self.arena(), zig_dir_path, comp_dir_name[0..]);
+ try os.makePath(self.gpa(), tmp_dir);
+ return tmp_dir;
+ }
+
+ async fn getRandomFileName(self: *Compilation) [12]u8 {
+ // here we replace the standard +/ with -_ so that it can be used in a file name
+ const b64_fs_encoder = std.base64.Base64Encoder.init(
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_",
+ std.base64.standard_pad_char,
+ );
+
+ var rand_bytes: [9]u8 = undefined;
+
+ {
+ const held = await (async self.event_loop_local.prng.acquire() catch unreachable);
+ defer held.release();
+
+ held.value.random.bytes(rand_bytes[0..]);
+ }
+
+ var result: [12]u8 = undefined;
+ b64_fs_encoder.encode(result[0..], rand_bytes);
+ return result;
+ }
+
+ fn registerGarbage(comp: *Compilation, comptime T: type, node: *std.atomic.Stack(*T).Node) void {
+ // TODO put the garbage somewhere
+ }
+
+ /// Returns a value which has been ref()'d once
+ async fn analyzeConstValue(comp: *Compilation, scope: *Scope, node: *ast.Node, expected_type: *Type) !*Value {
+ const analyzed_code = try await (async comp.genAndAnalyzeCode(scope, node, expected_type) catch unreachable);
+ defer analyzed_code.destroy(comp.gpa());
+
+ return analyzed_code.getCompTimeResult(comp);
+ }
+
+ async fn analyzeTypeExpr(comp: *Compilation, scope: *Scope, node: *ast.Node) !*Type {
+ const meta_type = &Type.MetaType.get(comp).base;
+ defer meta_type.base.deref(comp);
+
+ const result_val = try await (async comp.analyzeConstValue(scope, node, meta_type) catch unreachable);
+ errdefer result_val.base.deref(comp);
+
+ return result_val.cast(Type).?;
+ }
+
+ /// This declaration has been blessed as going into the final code generation.
+ pub async fn resolveDecl(comp: *Compilation, decl: *Decl) !void {
+ if (await (async decl.resolution.start() catch unreachable)) |ptr| return ptr.*;
+
+ decl.resolution.data = try await (async generateDecl(comp, decl) catch unreachable);
+ decl.resolution.resolve();
+ return decl.resolution.data;
+ }
+};
+
+fn printError(comptime format: []const u8, args: ...) !void {
+ var stderr_file = try std.io.getStdErr();
+ var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
+ const out_stream = &stderr_file_out_stream.stream;
+ try out_stream.print(format, args);
+}
+
+fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib {
+ if (optional_token_index) |token_index| {
+ const token = tree.tokens.at(token_index);
+ assert(token.id == Token.Id.Keyword_pub);
+ return Visib.Pub;
+ } else {
+ return Visib.Private;
+ }
+}
+
+/// The function that actually does the generation.
+async fn generateDecl(comp: *Compilation, decl: *Decl) !void {
+ switch (decl.id) {
+ Decl.Id.Var => @panic("TODO"),
+ Decl.Id.Fn => {
+ const fn_decl = @fieldParentPtr(Decl.Fn, "base", decl);
+ return await (async generateDeclFn(comp, fn_decl) catch unreachable);
+ },
+ Decl.Id.CompTime => @panic("TODO"),
+ }
+}
+
+async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
+ const body_node = fn_decl.fn_proto.body_node orelse return await (async generateDeclFnProto(comp, fn_decl) catch unreachable);
+
+ const fndef_scope = try Scope.FnDef.create(comp, fn_decl.base.parent_scope);
+ defer fndef_scope.base.deref(comp);
+
+ const fn_type = try await (async analyzeFnType(comp, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable);
+ defer fn_type.base.base.deref(comp);
+
+ var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name);
+ var symbol_name_consumed = false;
+ errdefer if (!symbol_name_consumed) symbol_name.deinit();
+
+ // The Decl.Fn owns the initial 1 reference count
+ const fn_val = try Value.Fn.create(comp, fn_type, fndef_scope, symbol_name);
+ fn_decl.value = Decl.Fn.Val{ .Fn = fn_val };
+ symbol_name_consumed = true;
+
+ // Define local parameter variables
+ const root_scope = fn_decl.base.findRootScope();
+ for (fn_type.key.data.Normal.params) |param, i| {
+ //AstNode *param_decl_node = get_param_decl_node(fn_table_entry, i);
+ const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", fn_decl.fn_proto.params.at(i).*);
+ const name_token = param_decl.name_token orelse {
+ try comp.addCompileError(root_scope, Span{
+ .first = param_decl.firstToken(),
+ .last = param_decl.type_node.firstToken(),
+ }, "missing parameter name");
+ return error.SemanticAnalysisFailed;
+ };
+ const param_name = root_scope.tree.tokenSlice(name_token);
+
+ // if (is_noalias && get_codegen_ptr_type(param_type) == nullptr) {
+ // add_node_error(g, param_decl_node, buf_sprintf("noalias on non-pointer parameter"));
+ // }
+
+ // TODO check for shadowing
+
+ const var_scope = try Scope.Var.createParam(
+ comp,
+ fn_val.child_scope,
+ param_name,
+ ¶m_decl.base,
+ i,
+ param.typ,
+ );
+ fn_val.child_scope = &var_scope.base;
+
+ try fn_type.non_key.Normal.variable_list.append(var_scope);
+ }
+
+ const analyzed_code = try await (async comp.genAndAnalyzeCode(
+ fn_val.child_scope,
+ body_node,
+ fn_type.key.data.Normal.return_type,
+ ) catch unreachable);
+ errdefer analyzed_code.destroy(comp.gpa());
+
+ assert(fn_val.block_scope != null);
+
+ // Kick off rendering to LLVM module, but it doesn't block the fn decl
+ // analysis from being complete.
+ try comp.prelink_group.call(codegen.renderToLlvm, comp, fn_val, analyzed_code);
+ try comp.prelink_group.call(addFnToLinkSet, comp, fn_val);
+}
+
+async fn addFnToLinkSet(comp: *Compilation, fn_val: *Value.Fn) void {
+ fn_val.base.ref();
+ defer fn_val.base.deref(comp);
+
+ fn_val.link_set_node.data = fn_val;
+
+ const held = await (async comp.fn_link_set.acquire() catch unreachable);
+ defer held.release();
+
+ held.value.append(fn_val.link_set_node);
+}
+
+fn getZigDir(allocator: *mem.Allocator) ![]u8 {
+ return os.getAppDataDir(allocator, "zig");
+}
+
+async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.FnProto) !*Type.Fn {
+ const return_type_node = switch (fn_proto.return_type) {
+ ast.Node.FnProto.ReturnType.Explicit => |n| n,
+ ast.Node.FnProto.ReturnType.InferErrorSet => |n| n,
+ };
+ const return_type = try await (async comp.analyzeTypeExpr(scope, return_type_node) catch unreachable);
+ return_type.base.deref(comp);
+
+ var params = ArrayList(Type.Fn.Param).init(comp.gpa());
+ var params_consumed = false;
+ defer if (!params_consumed) {
+ for (params.toSliceConst()) |param| {
+ param.typ.base.deref(comp);
+ }
+ params.deinit();
+ };
+
+ {
+ var it = fn_proto.params.iterator(0);
+ while (it.next()) |param_node_ptr| {
+ const param_node = param_node_ptr.*.cast(ast.Node.ParamDecl).?;
+ const param_type = try await (async comp.analyzeTypeExpr(scope, param_node.type_node) catch unreachable);
+ errdefer param_type.base.deref(comp);
+ try params.append(Type.Fn.Param{
+ .typ = param_type,
+ .is_noalias = param_node.noalias_token != null,
+ });
+ }
+ }
+
+ const key = Type.Fn.Key{
+ .alignment = null,
+ .data = Type.Fn.Key.Data{
+ .Normal = Type.Fn.Key.Normal{
+ .return_type = return_type,
+ .params = params.toOwnedSlice(),
+ .is_var_args = false, // TODO
+ .cc = Type.Fn.CallingConvention.Auto, // TODO
+ },
+ },
+ };
+ params_consumed = true;
+ var key_consumed = false;
+ defer if (!key_consumed) {
+ for (key.data.Normal.params) |param| {
+ param.typ.base.deref(comp);
+ }
+ comp.gpa().free(key.data.Normal.params);
+ };
+
+ const fn_type = try await (async Type.Fn.get(comp, key) catch unreachable);
+ key_consumed = true;
+ errdefer fn_type.base.base.deref(comp);
+
+ return fn_type;
+}
+
+async fn generateDeclFnProto(comp: *Compilation, fn_decl: *Decl.Fn) !void {
+ const fn_type = try await (async analyzeFnType(comp, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable);
+ defer fn_type.base.base.deref(comp);
+
+ var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name);
+ var symbol_name_consumed = false;
+ defer if (!symbol_name_consumed) symbol_name.deinit();
+
+ // The Decl.Fn owns the initial 1 reference count
+ const fn_proto_val = try Value.FnProto.create(comp, fn_type, symbol_name);
+ fn_decl.value = Decl.Fn.Val{ .FnProto = fn_proto_val };
+ symbol_name_consumed = true;
+}
diff --git a/src-self-hosted/decl.zig b/src-self-hosted/decl.zig
new file mode 100644
index 0000000000..6e80243038
--- /dev/null
+++ b/src-self-hosted/decl.zig
@@ -0,0 +1,98 @@
+const std = @import("std");
+const Allocator = mem.Allocator;
+const mem = std.mem;
+const ast = std.zig.ast;
+const Visib = @import("visib.zig").Visib;
+const event = std.event;
+const Value = @import("value.zig").Value;
+const Token = std.zig.Token;
+const errmsg = @import("errmsg.zig");
+const Scope = @import("scope.zig").Scope;
+const Compilation = @import("compilation.zig").Compilation;
+
+pub const Decl = struct {
+ id: Id,
+ name: []const u8,
+ visib: Visib,
+ resolution: event.Future(Compilation.BuildError!void),
+ parent_scope: *Scope,
+
+ pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8);
+
+ pub fn isExported(base: *const Decl, tree: *ast.Tree) bool {
+ switch (base.id) {
+ Id.Fn => {
+ const fn_decl = @fieldParentPtr(Fn, "base", base);
+ return fn_decl.isExported(tree);
+ },
+ else => return false,
+ }
+ }
+
+ pub fn getSpan(base: *const Decl) errmsg.Span {
+ switch (base.id) {
+ Id.Fn => {
+ const fn_decl = @fieldParentPtr(Fn, "base", base);
+ const fn_proto = fn_decl.fn_proto;
+ const start = fn_proto.fn_token;
+ const end = fn_proto.name_token orelse start;
+ return errmsg.Span{
+ .first = start,
+ .last = end + 1,
+ };
+ },
+ else => @panic("TODO"),
+ }
+ }
+
+ pub fn findRootScope(base: *const Decl) *Scope.Root {
+ return base.parent_scope.findRoot();
+ }
+
+ pub const Id = enum {
+ Var,
+ Fn,
+ CompTime,
+ };
+
+ pub const Var = struct {
+ base: Decl,
+ };
+
+ pub const Fn = struct {
+ base: Decl,
+ value: Val,
+ fn_proto: *ast.Node.FnProto,
+
+ // TODO https://github.com/ziglang/zig/issues/683 and then make this anonymous
+ pub const Val = union(enum) {
+ Unresolved: void,
+ Fn: *Value.Fn,
+ FnProto: *Value.FnProto,
+ };
+
+ pub fn externLibName(self: Fn, tree: *ast.Tree) ?[]const u8 {
+ return if (self.fn_proto.extern_export_inline_token) |tok_index| x: {
+ const token = tree.tokens.at(tok_index);
+ break :x switch (token.id) {
+ Token.Id.Extern => tree.tokenSlicePtr(token),
+ else => null,
+ };
+ } else null;
+ }
+
+ pub fn isExported(self: Fn, tree: *ast.Tree) bool {
+ if (self.fn_proto.extern_export_inline_token) |tok_index| {
+ const token = tree.tokens.at(tok_index);
+ return token.id == Token.Id.Keyword_export;
+ } else {
+ return false;
+ }
+ }
+ };
+
+ pub const CompTime = struct {
+ base: Decl,
+ };
+};
+
diff --git a/src-self-hosted/errmsg.zig b/src-self-hosted/errmsg.zig
new file mode 100644
index 0000000000..51e135686a
--- /dev/null
+++ b/src-self-hosted/errmsg.zig
@@ -0,0 +1,237 @@
+const std = @import("std");
+const mem = std.mem;
+const os = std.os;
+const Token = std.zig.Token;
+const ast = std.zig.ast;
+const TokenIndex = std.zig.ast.TokenIndex;
+const Compilation = @import("compilation.zig").Compilation;
+const Scope = @import("scope.zig").Scope;
+
+pub const Color = enum {
+ Auto,
+ Off,
+ On,
+};
+
+pub const Span = struct {
+ first: ast.TokenIndex,
+ last: ast.TokenIndex,
+
+ pub fn token(i: TokenIndex) Span {
+ return Span{
+ .first = i,
+ .last = i,
+ };
+ }
+
+ pub fn node(n: *ast.Node) Span {
+ return Span{
+ .first = n.firstToken(),
+ .last = n.lastToken(),
+ };
+ }
+};
+
+pub const Msg = struct {
+ span: Span,
+ text: []u8,
+ data: Data,
+
+ const Data = union(enum) {
+ PathAndTree: PathAndTree,
+ ScopeAndComp: ScopeAndComp,
+ };
+
+ const PathAndTree = struct {
+ realpath: []const u8,
+ tree: *ast.Tree,
+ allocator: *mem.Allocator,
+ };
+
+ const ScopeAndComp = struct {
+ root_scope: *Scope.Root,
+ compilation: *Compilation,
+ };
+
+ pub fn destroy(self: *Msg) void {
+ switch (self.data) {
+ Data.PathAndTree => |path_and_tree| {
+ path_and_tree.allocator.free(self.text);
+ path_and_tree.allocator.destroy(self);
+ },
+ Data.ScopeAndComp => |scope_and_comp| {
+ scope_and_comp.root_scope.base.deref(scope_and_comp.compilation);
+ scope_and_comp.compilation.gpa().free(self.text);
+ scope_and_comp.compilation.gpa().destroy(self);
+ },
+ }
+ }
+
+ fn getAllocator(self: *const Msg) *mem.Allocator {
+ switch (self.data) {
+ Data.PathAndTree => |path_and_tree| {
+ return path_and_tree.allocator;
+ },
+ Data.ScopeAndComp => |scope_and_comp| {
+ return scope_and_comp.compilation.gpa();
+ },
+ }
+ }
+
+ pub fn getRealPath(self: *const Msg) []const u8 {
+ switch (self.data) {
+ Data.PathAndTree => |path_and_tree| {
+ return path_and_tree.realpath;
+ },
+ Data.ScopeAndComp => |scope_and_comp| {
+ return scope_and_comp.root_scope.realpath;
+ },
+ }
+ }
+
+ pub fn getTree(self: *const Msg) *ast.Tree {
+ switch (self.data) {
+ Data.PathAndTree => |path_and_tree| {
+ return path_and_tree.tree;
+ },
+ Data.ScopeAndComp => |scope_and_comp| {
+ return scope_and_comp.root_scope.tree;
+ },
+ }
+ }
+
+ /// Takes ownership of text
+ /// References root_scope, and derefs when the msg is freed
+ pub fn createFromScope(comp: *Compilation, root_scope: *Scope.Root, span: Span, text: []u8) !*Msg {
+ const msg = try comp.gpa().create(Msg{
+ .text = text,
+ .span = span,
+ .data = Data{
+ .ScopeAndComp = ScopeAndComp{
+ .root_scope = root_scope,
+ .compilation = comp,
+ },
+ },
+ });
+ root_scope.base.ref();
+ return msg;
+ }
+
+ pub fn createFromParseErrorAndScope(
+ comp: *Compilation,
+ root_scope: *Scope.Root,
+ parse_error: *const ast.Error,
+ ) !*Msg {
+ const loc_token = parse_error.loc();
+ var text_buf = try std.Buffer.initSize(comp.gpa(), 0);
+ defer text_buf.deinit();
+
+ var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
+ try parse_error.render(&root_scope.tree.tokens, out_stream);
+
+ const msg = try comp.gpa().create(Msg{
+ .text = undefined,
+ .span = Span{
+ .first = loc_token,
+ .last = loc_token,
+ },
+ .data = Data{
+ .ScopeAndComp = ScopeAndComp{
+ .root_scope = root_scope,
+ .compilation = comp,
+ },
+ },
+ });
+ root_scope.base.ref();
+ msg.text = text_buf.toOwnedSlice();
+ return msg;
+ }
+
+ /// `realpath` must outlive the returned Msg
+ /// `tree` must outlive the returned Msg
+ /// Caller owns returned Msg and must free with `allocator`
+ /// allocator will additionally be used for printing messages later.
+ pub fn createFromParseError(
+ allocator: *mem.Allocator,
+ parse_error: *const ast.Error,
+ tree: *ast.Tree,
+ realpath: []const u8,
+ ) !*Msg {
+ const loc_token = parse_error.loc();
+ var text_buf = try std.Buffer.initSize(allocator, 0);
+ defer text_buf.deinit();
+
+ var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
+ try parse_error.render(&tree.tokens, out_stream);
+
+ const msg = try allocator.create(Msg{
+ .text = undefined,
+ .data = Data{
+ .PathAndTree = PathAndTree{
+ .allocator = allocator,
+ .realpath = realpath,
+ .tree = tree,
+ },
+ },
+ .span = Span{
+ .first = loc_token,
+ .last = loc_token,
+ },
+ });
+ msg.text = text_buf.toOwnedSlice();
+ errdefer allocator.destroy(msg);
+
+ return msg;
+ }
+
+ pub fn printToStream(msg: *const Msg, stream: var, color_on: bool) !void {
+ const allocator = msg.getAllocator();
+ const realpath = msg.getRealPath();
+ const tree = msg.getTree();
+
+ const cwd = try os.getCwd(allocator);
+ defer allocator.free(cwd);
+
+ const relpath = try os.path.relative(allocator, cwd, realpath);
+ defer allocator.free(relpath);
+
+ const path = if (relpath.len < realpath.len) relpath else realpath;
+
+ const first_token = tree.tokens.at(msg.span.first);
+ const last_token = tree.tokens.at(msg.span.last);
+ const start_loc = tree.tokenLocationPtr(0, first_token);
+ const end_loc = tree.tokenLocationPtr(first_token.end, last_token);
+ if (!color_on) {
+ try stream.print(
+ "{}:{}:{}: error: {}\n",
+ path,
+ start_loc.line + 1,
+ start_loc.column + 1,
+ msg.text,
+ );
+ return;
+ }
+
+ try stream.print(
+ "{}:{}:{}: error: {}\n{}\n",
+ path,
+ start_loc.line + 1,
+ start_loc.column + 1,
+ msg.text,
+ tree.source[start_loc.line_start..start_loc.line_end],
+ );
+ try stream.writeByteNTimes(' ', start_loc.column);
+ try stream.writeByteNTimes('~', last_token.end - first_token.start);
+ try stream.write("\n");
+ }
+
+ pub fn printToFile(msg: *const Msg, file: *os.File, color: Color) !void {
+ const color_on = switch (color) {
+ Color.Auto => file.isTty(),
+ Color.On => true,
+ Color.Off => false,
+ };
+ var stream = &std.io.FileOutStream.init(file).stream;
+ return msg.printToStream(stream, color_on);
+ }
+};
diff --git a/src-self-hosted/introspect.zig b/src-self-hosted/introspect.zig
index 3f1fefdd5a..ecd04c4467 100644
--- a/src-self-hosted/introspect.zig
+++ b/src-self-hosted/introspect.zig
@@ -7,7 +7,7 @@ const os = std.os;
const warn = std.debug.warn;
/// Caller must free result
-pub fn testZigInstallPrefix(allocator: &mem.Allocator, test_path: []const u8) ![]u8 {
+pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![]u8 {
const test_zig_dir = try os.path.join(allocator, test_path, "lib", "zig");
errdefer allocator.free(test_zig_dir);
@@ -21,13 +21,13 @@ pub fn testZigInstallPrefix(allocator: &mem.Allocator, test_path: []const u8) ![
}
/// Caller must free result
-pub fn findZigLibDir(allocator: &mem.Allocator) ![]u8 {
+pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
const self_exe_path = try os.selfExeDirPath(allocator);
defer allocator.free(self_exe_path);
var cur_path: []const u8 = self_exe_path;
while (true) {
- const test_dir = os.path.dirname(cur_path);
+ const test_dir = os.path.dirname(cur_path) orelse ".";
if (mem.eql(u8, test_dir, cur_path)) {
break;
@@ -42,16 +42,19 @@ pub fn findZigLibDir(allocator: &mem.Allocator) ![]u8 {
return error.FileNotFound;
}
-pub fn resolveZigLibDir(allocator: &mem.Allocator) ![]u8 {
+pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 {
return findZigLibDir(allocator) catch |err| {
warn(
\\Unable to find zig lib directory: {}.
\\Reinstall Zig or use --zig-install-prefix.
\\
- ,
- @errorName(err)
- );
+ , @errorName(err));
return error.ZigLibDirNotFound;
};
}
+
+/// Caller must free result
+pub fn resolveZigCacheDir(allocator: *mem.Allocator) ![]u8 {
+ return std.mem.dupe(allocator, u8, "zig-cache");
+}
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index b66a0abdee..619cd4f330 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -1,112 +1,2598 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const Compilation = @import("compilation.zig").Compilation;
const Scope = @import("scope.zig").Scope;
+const ast = std.zig.ast;
+const Allocator = std.mem.Allocator;
+const Value = @import("value.zig").Value;
+const Type = Value.Type;
+const assert = std.debug.assert;
+const Token = std.zig.Token;
+const Span = @import("errmsg.zig").Span;
+const llvm = @import("llvm.zig");
+const codegen = @import("codegen.zig");
+const ObjectFile = codegen.ObjectFile;
+const Decl = @import("decl.zig").Decl;
+const mem = std.mem;
-pub const Instruction = struct {
- id: Id,
- scope: &Scope,
+pub const LVal = enum {
+ None,
+ Ptr,
+};
- pub const Id = enum {
- Br,
- CondBr,
- SwitchBr,
- SwitchVar,
- SwitchTarget,
- Phi,
- UnOp,
- BinOp,
- DeclVar,
- LoadPtr,
- StorePtr,
- FieldPtr,
- StructFieldPtr,
- UnionFieldPtr,
- ElemPtr,
- VarPtr,
- Call,
- Const,
- Return,
- Cast,
- ContainerInitList,
- ContainerInitFields,
- StructInit,
- UnionInit,
- Unreachable,
- TypeOf,
- ToPtrType,
- PtrTypeChild,
- SetRuntimeSafety,
- SetFloatMode,
- ArrayType,
- SliceType,
- Asm,
- SizeOf,
- TestNonNull,
- UnwrapMaybe,
- MaybeWrap,
- UnionTag,
- Clz,
- Ctz,
- Import,
- CImport,
- CInclude,
- CDefine,
- CUndef,
- ArrayLen,
- Ref,
- MinValue,
- MaxValue,
- CompileErr,
- CompileLog,
- ErrName,
- EmbedFile,
- Cmpxchg,
- Fence,
- Truncate,
- IntType,
- BoolNot,
- Memset,
- Memcpy,
- Slice,
- MemberCount,
- MemberType,
- MemberName,
- Breakpoint,
- ReturnAddress,
- FrameAddress,
- AlignOf,
- OverflowOp,
- TestErr,
- UnwrapErrCode,
- UnwrapErrPayload,
- ErrWrapCode,
- ErrWrapPayload,
- FnProto,
- TestComptime,
- PtrCast,
- BitCast,
- WidenOrShorten,
- IntToPtr,
- PtrToInt,
- IntToEnum,
- IntToErr,
- ErrToInt,
- CheckSwitchProngs,
- CheckStatementIsVoid,
- TypeName,
- CanImplicitCast,
- DeclRef,
- Panic,
- TagName,
- TagType,
- FieldParentPtr,
- OffsetOf,
- TypeId,
- SetEvalBranchQuota,
- PtrTypeOf,
- AlignCast,
- OpaqueType,
- SetAlignStack,
- ArgType,
- Export,
+pub const IrVal = union(enum) {
+ Unknown,
+ KnownType: *Type,
+ KnownValue: *Value,
+
+ const Init = enum {
+ Unknown,
+ NoReturn,
+ Void,
};
+ pub fn dump(self: IrVal) void {
+ switch (self) {
+ IrVal.Unknown => std.debug.warn("Unknown"),
+ IrVal.KnownType => |typ| {
+ std.debug.warn("KnownType(");
+ typ.dump();
+ std.debug.warn(")");
+ },
+ IrVal.KnownValue => |value| {
+ std.debug.warn("KnownValue(");
+ value.dump();
+ std.debug.warn(")");
+ },
+ }
+ }
};
+
+pub const Inst = struct {
+ id: Id,
+ scope: *Scope,
+ debug_id: usize,
+ val: IrVal,
+ ref_count: usize,
+ span: Span,
+ owner_bb: *BasicBlock,
+
+ /// true if this instruction was generated by zig and not from user code
+ is_generated: bool,
+
+ /// the instruction that is derived from this one in analysis
+ child: ?*Inst,
+
+ /// the instruction that this one derives from in analysis
+ parent: ?*Inst,
+
+ /// populated durign codegen
+ llvm_value: ?llvm.ValueRef,
+
+ pub fn cast(base: *Inst, comptime T: type) ?*T {
+ if (base.id == comptime typeToId(T)) {
+ return @fieldParentPtr(T, "base", base);
+ }
+ return null;
+ }
+
+ pub fn typeToId(comptime T: type) Id {
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (T == @field(Inst, @memberName(Id, i))) {
+ return @field(Id, @memberName(Id, i));
+ }
+ }
+ unreachable;
+ }
+
+ pub fn dump(base: *const Inst) void {
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (base.id == @field(Id, @memberName(Id, i))) {
+ const T = @field(Inst, @memberName(Id, i));
+ std.debug.warn("#{} = {}(", base.debug_id, @tagName(base.id));
+ @fieldParentPtr(T, "base", base).dump();
+ std.debug.warn(")");
+ return;
+ }
+ }
+ unreachable;
+ }
+
+ pub fn hasSideEffects(base: *const Inst) bool {
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (base.id == @field(Id, @memberName(Id, i))) {
+ const T = @field(Inst, @memberName(Id, i));
+ return @fieldParentPtr(T, "base", base).hasSideEffects();
+ }
+ }
+ unreachable;
+ }
+
+ pub async fn analyze(base: *Inst, ira: *Analyze) Analyze.Error!*Inst {
+ switch (base.id) {
+ Id.Return => return @fieldParentPtr(Return, "base", base).analyze(ira),
+ Id.Const => return @fieldParentPtr(Const, "base", base).analyze(ira),
+ Id.Call => return @fieldParentPtr(Call, "base", base).analyze(ira),
+ Id.DeclRef => return await (async @fieldParentPtr(DeclRef, "base", base).analyze(ira) catch unreachable),
+ Id.Ref => return await (async @fieldParentPtr(Ref, "base", base).analyze(ira) catch unreachable),
+ Id.DeclVar => return @fieldParentPtr(DeclVar, "base", base).analyze(ira),
+ Id.CheckVoidStmt => return @fieldParentPtr(CheckVoidStmt, "base", base).analyze(ira),
+ Id.Phi => return @fieldParentPtr(Phi, "base", base).analyze(ira),
+ Id.Br => return @fieldParentPtr(Br, "base", base).analyze(ira),
+ Id.AddImplicitReturnType => return @fieldParentPtr(AddImplicitReturnType, "base", base).analyze(ira),
+ Id.PtrType => return await (async @fieldParentPtr(PtrType, "base", base).analyze(ira) catch unreachable),
+ Id.VarPtr => return await (async @fieldParentPtr(VarPtr, "base", base).analyze(ira) catch unreachable),
+ Id.LoadPtr => return await (async @fieldParentPtr(LoadPtr, "base", base).analyze(ira) catch unreachable),
+ }
+ }
+
+ pub fn render(base: *Inst, ofile: *ObjectFile, fn_val: *Value.Fn) (error{OutOfMemory}!?llvm.ValueRef) {
+ switch (base.id) {
+ Id.Return => return @fieldParentPtr(Return, "base", base).render(ofile, fn_val),
+ Id.Const => return @fieldParentPtr(Const, "base", base).render(ofile, fn_val),
+ Id.Call => return @fieldParentPtr(Call, "base", base).render(ofile, fn_val),
+ Id.VarPtr => return @fieldParentPtr(VarPtr, "base", base).render(ofile, fn_val),
+ Id.LoadPtr => return @fieldParentPtr(LoadPtr, "base", base).render(ofile, fn_val),
+ Id.DeclRef => unreachable,
+ Id.PtrType => unreachable,
+ Id.Ref => @panic("TODO"),
+ Id.DeclVar => @panic("TODO"),
+ Id.CheckVoidStmt => @panic("TODO"),
+ Id.Phi => @panic("TODO"),
+ Id.Br => @panic("TODO"),
+ Id.AddImplicitReturnType => @panic("TODO"),
+ }
+ }
+
+ fn ref(base: *Inst, builder: *Builder) void {
+ base.ref_count += 1;
+ if (base.owner_bb != builder.current_basic_block and !base.isCompTime()) {
+ base.owner_bb.ref(builder);
+ }
+ }
+
+ fn copyVal(base: *Inst, comp: *Compilation) !*Value {
+ if (base.parent.?.ref_count == 0) {
+ return base.val.KnownValue.derefAndCopy(comp);
+ }
+ return base.val.KnownValue.copy(comp);
+ }
+
+ fn getAsParam(param: *Inst) !*Inst {
+ param.ref_count -= 1;
+ const child = param.child orelse return error.SemanticAnalysisFailed;
+ switch (child.val) {
+ IrVal.Unknown => return error.SemanticAnalysisFailed,
+ else => return child,
+ }
+ }
+
+ fn getConstVal(self: *Inst, ira: *Analyze) !*Value {
+ if (self.isCompTime()) {
+ return self.val.KnownValue;
+ } else {
+ try ira.addCompileError(self.span, "unable to evaluate constant expression");
+ return error.SemanticAnalysisFailed;
+ }
+ }
+
+ fn getAsConstType(param: *Inst, ira: *Analyze) !*Type {
+ const meta_type = Type.MetaType.get(ira.irb.comp);
+ meta_type.base.base.deref(ira.irb.comp);
+
+ const inst = try param.getAsParam();
+ const casted = try ira.implicitCast(inst, &meta_type.base);
+ const val = try casted.getConstVal(ira);
+ return val.cast(Value.Type).?;
+ }
+
+ fn getAsConstAlign(param: *Inst, ira: *Analyze) !u32 {
+ return error.Unimplemented;
+ //const align_type = Type.Int.get_align(ira.irb.comp);
+ //align_type.base.base.deref(ira.irb.comp);
+
+ //const inst = try param.getAsParam();
+ //const casted = try ira.implicitCast(inst, align_type);
+ //const val = try casted.getConstVal(ira);
+
+ //uint32_t align_bytes = bigint_as_unsigned(&const_val->data.x_bigint);
+ //if (align_bytes == 0) {
+ // ir_add_error(ira, value, buf_sprintf("alignment must be >= 1"));
+ // return false;
+ //}
+
+ //if (!is_power_of_2(align_bytes)) {
+ // ir_add_error(ira, value, buf_sprintf("alignment value %" PRIu32 " is not a power of 2", align_bytes));
+ // return false;
+ //}
+ }
+
+ /// asserts that the type is known
+ fn getKnownType(self: *Inst) *Type {
+ switch (self.val) {
+ IrVal.KnownType => |typ| return typ,
+ IrVal.KnownValue => |value| return value.typ,
+ IrVal.Unknown => unreachable,
+ }
+ }
+
+ pub fn setGenerated(base: *Inst) void {
+ base.is_generated = true;
+ }
+
+ pub fn isNoReturn(base: *const Inst) bool {
+ switch (base.val) {
+ IrVal.Unknown => return false,
+ IrVal.KnownValue => |x| return x.typ.id == Type.Id.NoReturn,
+ IrVal.KnownType => |typ| return typ.id == Type.Id.NoReturn,
+ }
+ }
+
+ pub fn isCompTime(base: *const Inst) bool {
+ return base.val == IrVal.KnownValue;
+ }
+
+ pub fn linkToParent(self: *Inst, parent: *Inst) void {
+ assert(self.parent == null);
+ assert(parent.child == null);
+ self.parent = parent;
+ parent.child = self;
+ }
+
+ pub const Id = enum {
+ Return,
+ Const,
+ Ref,
+ DeclVar,
+ CheckVoidStmt,
+ Phi,
+ Br,
+ AddImplicitReturnType,
+ Call,
+ DeclRef,
+ PtrType,
+ VarPtr,
+ LoadPtr,
+ };
+
+ pub const Call = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ fn_ref: *Inst,
+ args: []*Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(self: *const Call) void {
+ std.debug.warn("#{}(", self.params.fn_ref.debug_id);
+ for (self.params.args) |arg| {
+ std.debug.warn("#{},", arg.debug_id);
+ }
+ std.debug.warn(")");
+ }
+
+ pub fn hasSideEffects(self: *const Call) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const Call, ira: *Analyze) !*Inst {
+ const fn_ref = try self.params.fn_ref.getAsParam();
+ const fn_ref_type = fn_ref.getKnownType();
+ const fn_type = fn_ref_type.cast(Type.Fn) orelse {
+ try ira.addCompileError(fn_ref.span, "type '{}' not a function", fn_ref_type.name);
+ return error.SemanticAnalysisFailed;
+ };
+
+ const fn_type_param_count = fn_type.paramCount();
+
+ if (fn_type_param_count != self.params.args.len) {
+ try ira.addCompileError(
+ self.base.span,
+ "expected {} arguments, found {}",
+ fn_type_param_count,
+ self.params.args.len,
+ );
+ return error.SemanticAnalysisFailed;
+ }
+
+ const args = try ira.irb.arena().alloc(*Inst, self.params.args.len);
+ for (self.params.args) |arg, i| {
+ args[i] = try arg.getAsParam();
+ }
+ const new_inst = try ira.irb.build(Call, self.base.scope, self.base.span, Params{
+ .fn_ref = fn_ref,
+ .args = args,
+ });
+ new_inst.val = IrVal{ .KnownType = fn_type.key.data.Normal.return_type };
+ return new_inst;
+ }
+
+ pub fn render(self: *Call, ofile: *ObjectFile, fn_val: *Value.Fn) !?llvm.ValueRef {
+ const fn_ref = self.params.fn_ref.llvm_value.?;
+
+ const args = try ofile.arena.alloc(llvm.ValueRef, self.params.args.len);
+ for (self.params.args) |arg, i| {
+ args[i] = arg.llvm_value.?;
+ }
+
+ const llvm_cc = llvm.CCallConv;
+ const fn_inline = llvm.FnInline.Auto;
+
+ return llvm.BuildCall(
+ ofile.builder,
+ fn_ref,
+ args.ptr,
+ @intCast(c_uint, args.len),
+ llvm_cc,
+ fn_inline,
+ c"",
+ ) orelse error.OutOfMemory;
+ }
+ };
+
+ pub const Const = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {};
+
+ // Use Builder.buildConst* methods, or, after building a Const instruction,
+ // manually set the ir_val field.
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(self: *const Const) void {
+ self.base.val.KnownValue.dump();
+ }
+
+ pub fn hasSideEffects(self: *const Const) bool {
+ return false;
+ }
+
+ pub fn analyze(self: *const Const, ira: *Analyze) !*Inst {
+ const new_inst = try ira.irb.build(Const, self.base.scope, self.base.span, Params{});
+ new_inst.val = IrVal{ .KnownValue = self.base.val.KnownValue.getRef() };
+ return new_inst;
+ }
+
+ pub fn render(self: *Const, ofile: *ObjectFile, fn_val: *Value.Fn) !?llvm.ValueRef {
+ return self.base.val.KnownValue.getLlvmConst(ofile);
+ }
+ };
+
+ pub const Return = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ return_value: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.NoReturn;
+
+ pub fn dump(self: *const Return) void {
+ std.debug.warn("#{}", self.params.return_value.debug_id);
+ }
+
+ pub fn hasSideEffects(self: *const Return) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const Return, ira: *Analyze) !*Inst {
+ const value = try self.params.return_value.getAsParam();
+ const casted_value = try ira.implicitCast(value, ira.explicit_return_type);
+
+ // TODO detect returning local variable address
+
+ return ira.irb.build(Return, self.base.scope, self.base.span, Params{ .return_value = casted_value });
+ }
+
+ pub fn render(self: *Return, ofile: *ObjectFile, fn_val: *Value.Fn) !?llvm.ValueRef {
+ const value = self.params.return_value.llvm_value;
+ const return_type = self.params.return_value.getKnownType();
+
+ if (return_type.handleIsPtr()) {
+ @panic("TODO");
+ } else {
+ _ = llvm.BuildRet(ofile.builder, value) orelse return error.OutOfMemory;
+ }
+ return null;
+ }
+ };
+
+ pub const Ref = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ target: *Inst,
+ mut: Type.Pointer.Mut,
+ volatility: Type.Pointer.Vol,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const Ref) void {}
+
+ pub fn hasSideEffects(inst: *const Ref) bool {
+ return false;
+ }
+
+ pub async fn analyze(self: *const Ref, ira: *Analyze) !*Inst {
+ const target = try self.params.target.getAsParam();
+
+ if (ira.getCompTimeValOrNullUndefOk(target)) |val| {
+ return ira.getCompTimeRef(
+ val,
+ Value.Ptr.Mut.CompTimeConst,
+ self.params.mut,
+ self.params.volatility,
+ );
+ }
+
+ const new_inst = try ira.irb.build(Ref, self.base.scope, self.base.span, Params{
+ .target = target,
+ .mut = self.params.mut,
+ .volatility = self.params.volatility,
+ });
+ const elem_type = target.getKnownType();
+ const ptr_type = try await (async Type.Pointer.get(ira.irb.comp, Type.Pointer.Key{
+ .child_type = elem_type,
+ .mut = self.params.mut,
+ .vol = self.params.volatility,
+ .size = Type.Pointer.Size.One,
+ .alignment = Type.Pointer.Align.Abi,
+ }) catch unreachable);
+ // TODO: potentially set the hint that this is a stack pointer. But it might not be - this
+ // could be a ref of a global, for example
+ new_inst.val = IrVal{ .KnownType = &ptr_type.base };
+ // TODO potentially add an alloca entry here
+ return new_inst;
+ }
+ };
+
+ pub const DeclRef = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ decl: *Decl,
+ lval: LVal,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const DeclRef) void {}
+
+ pub fn hasSideEffects(inst: *const DeclRef) bool {
+ return false;
+ }
+
+ pub async fn analyze(self: *const DeclRef, ira: *Analyze) !*Inst {
+ (await (async ira.irb.comp.resolveDecl(self.params.decl) catch unreachable)) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => return error.SemanticAnalysisFailed,
+ };
+ switch (self.params.decl.id) {
+ Decl.Id.CompTime => unreachable,
+ Decl.Id.Var => return error.Unimplemented,
+ Decl.Id.Fn => {
+ const fn_decl = @fieldParentPtr(Decl.Fn, "base", self.params.decl);
+ const decl_val = switch (fn_decl.value) {
+ Decl.Fn.Val.Unresolved => unreachable,
+ Decl.Fn.Val.Fn => |fn_val| &fn_val.base,
+ Decl.Fn.Val.FnProto => |fn_proto| &fn_proto.base,
+ };
+ switch (self.params.lval) {
+ LVal.None => {
+ return ira.irb.buildConstValue(self.base.scope, self.base.span, decl_val);
+ },
+ LVal.Ptr => return error.Unimplemented,
+ }
+ },
+ }
+ }
+ };
+
+ pub const VarPtr = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ var_scope: *Scope.Var,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const VarPtr) void {
+ std.debug.warn("{}", inst.params.var_scope.name);
+ }
+
+ pub fn hasSideEffects(inst: *const VarPtr) bool {
+ return false;
+ }
+
+ pub async fn analyze(self: *const VarPtr, ira: *Analyze) !*Inst {
+ switch (self.params.var_scope.data) {
+ Scope.Var.Data.Const => @panic("TODO"),
+ Scope.Var.Data.Param => |param| {
+ const new_inst = try ira.irb.build(
+ Inst.VarPtr,
+ self.base.scope,
+ self.base.span,
+ Inst.VarPtr.Params{ .var_scope = self.params.var_scope },
+ );
+ const ptr_type = try await (async Type.Pointer.get(ira.irb.comp, Type.Pointer.Key{
+ .child_type = param.typ,
+ .mut = Type.Pointer.Mut.Const,
+ .vol = Type.Pointer.Vol.Non,
+ .size = Type.Pointer.Size.One,
+ .alignment = Type.Pointer.Align.Abi,
+ }) catch unreachable);
+ new_inst.val = IrVal{ .KnownType = &ptr_type.base };
+ return new_inst;
+ },
+ }
+ }
+
+ pub fn render(self: *VarPtr, ofile: *ObjectFile, fn_val: *Value.Fn) llvm.ValueRef {
+ switch (self.params.var_scope.data) {
+ Scope.Var.Data.Const => unreachable, // turned into Inst.Const in analyze pass
+ Scope.Var.Data.Param => |param| return param.llvm_value,
+ }
+ }
+ };
+
+ pub const LoadPtr = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ target: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const LoadPtr) void {}
+
+ pub fn hasSideEffects(inst: *const LoadPtr) bool {
+ return false;
+ }
+
+ pub async fn analyze(self: *const LoadPtr, ira: *Analyze) !*Inst {
+ const target = try self.params.target.getAsParam();
+ const target_type = target.getKnownType();
+ if (target_type.id != Type.Id.Pointer) {
+ try ira.addCompileError(self.base.span, "dereference of non pointer type '{}'", target_type.name);
+ return error.SemanticAnalysisFailed;
+ }
+ const ptr_type = @fieldParentPtr(Type.Pointer, "base", target_type);
+ // if (instr_is_comptime(ptr)) {
+ // if (ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst ||
+ // ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar)
+ // {
+ // ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &ptr->value);
+ // if (pointee->special != ConstValSpecialRuntime) {
+ // IrInstruction *result = ir_create_const(&ira->new_irb, source_instruction->scope,
+ // source_instruction->source_node, child_type);
+ // copy_const_val(&result->value, pointee, ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst);
+ // result->value.type = child_type;
+ // return result;
+ // }
+ // }
+ // }
+ const new_inst = try ira.irb.build(
+ Inst.LoadPtr,
+ self.base.scope,
+ self.base.span,
+ Inst.LoadPtr.Params{ .target = target },
+ );
+ new_inst.val = IrVal{ .KnownType = ptr_type.key.child_type };
+ return new_inst;
+ }
+
+ pub fn render(self: *LoadPtr, ofile: *ObjectFile, fn_val: *Value.Fn) !?llvm.ValueRef {
+ const child_type = self.base.getKnownType();
+ if (!child_type.hasBits()) {
+ return null;
+ }
+ const ptr = self.params.target.llvm_value.?;
+ const ptr_type = self.params.target.getKnownType().cast(Type.Pointer).?;
+
+ return try codegen.getHandleValue(ofile, ptr, ptr_type);
+
+ //uint32_t unaligned_bit_count = ptr_type->data.pointer.unaligned_bit_count;
+ //if (unaligned_bit_count == 0)
+ // return get_handle_value(g, ptr, child_type, ptr_type);
+
+ //bool big_endian = g->is_big_endian;
+
+ //assert(!handle_is_ptr(child_type));
+ //LLVMValueRef containing_int = gen_load(g, ptr, ptr_type, "");
+
+ //uint32_t bit_offset = ptr_type->data.pointer.bit_offset;
+ //uint32_t host_bit_count = LLVMGetIntTypeWidth(LLVMTypeOf(containing_int));
+ //uint32_t shift_amt = big_endian ? host_bit_count - bit_offset - unaligned_bit_count : bit_offset;
+
+ //LLVMValueRef shift_amt_val = LLVMConstInt(LLVMTypeOf(containing_int), shift_amt, false);
+ //LLVMValueRef shifted_value = LLVMBuildLShr(g->builder, containing_int, shift_amt_val, "");
+
+ //return LLVMBuildTrunc(g->builder, shifted_value, child_type->type_ref, "");
+ }
+ };
+
+ pub const PtrType = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ child_type: *Inst,
+ mut: Type.Pointer.Mut,
+ vol: Type.Pointer.Vol,
+ size: Type.Pointer.Size,
+ alignment: ?*Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const PtrType) void {}
+
+ pub fn hasSideEffects(inst: *const PtrType) bool {
+ return false;
+ }
+
+ pub async fn analyze(self: *const PtrType, ira: *Analyze) !*Inst {
+ const child_type = try self.params.child_type.getAsConstType(ira);
+ // if (child_type->id == TypeTableEntryIdUnreachable) {
+ // ir_add_error(ira, &instruction->base, buf_sprintf("pointer to noreturn not allowed"));
+ // return ira->codegen->builtin_types.entry_invalid;
+ // } else if (child_type->id == TypeTableEntryIdOpaque && instruction->ptr_len == PtrLenUnknown) {
+ // ir_add_error(ira, &instruction->base, buf_sprintf("unknown-length pointer to opaque"));
+ // return ira->codegen->builtin_types.entry_invalid;
+ // }
+ const alignment = if (self.params.alignment) |align_inst| blk: {
+ const amt = try align_inst.getAsConstAlign(ira);
+ break :blk Type.Pointer.Align{ .Override = amt };
+ } else blk: {
+ break :blk Type.Pointer.Align{ .Abi = {} };
+ };
+ const ptr_type = try await (async Type.Pointer.get(ira.irb.comp, Type.Pointer.Key{
+ .child_type = child_type,
+ .mut = self.params.mut,
+ .vol = self.params.vol,
+ .size = self.params.size,
+ .alignment = alignment,
+ }) catch unreachable);
+ ptr_type.base.base.deref(ira.irb.comp);
+
+ return ira.irb.buildConstValue(self.base.scope, self.base.span, &ptr_type.base.base);
+ }
+ };
+
+ pub const DeclVar = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ variable: *Variable,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const DeclVar) void {}
+
+ pub fn hasSideEffects(inst: *const DeclVar) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const DeclVar, ira: *Analyze) !*Inst {
+ return error.Unimplemented; // TODO
+ }
+ };
+
+ pub const CheckVoidStmt = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ target: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(self: *const CheckVoidStmt) void {
+ std.debug.warn("#{}", self.params.target.debug_id);
+ }
+
+ pub fn hasSideEffects(inst: *const CheckVoidStmt) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const CheckVoidStmt, ira: *Analyze) !*Inst {
+ const target = try self.params.target.getAsParam();
+ if (target.getKnownType().id != Type.Id.Void) {
+ try ira.addCompileError(self.base.span, "expression value is ignored");
+ return error.SemanticAnalysisFailed;
+ }
+ return ira.irb.buildConstVoid(self.base.scope, self.base.span, true);
+ }
+ };
+
+ pub const Phi = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ incoming_blocks: []*BasicBlock,
+ incoming_values: []*Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const Phi) void {}
+
+ pub fn hasSideEffects(inst: *const Phi) bool {
+ return false;
+ }
+
+ pub fn analyze(self: *const Phi, ira: *Analyze) !*Inst {
+ return error.Unimplemented; // TODO
+ }
+ };
+
+ pub const Br = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ dest_block: *BasicBlock,
+ is_comptime: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.NoReturn;
+
+ pub fn dump(inst: *const Br) void {}
+
+ pub fn hasSideEffects(inst: *const Br) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const Br, ira: *Analyze) !*Inst {
+ return error.Unimplemented; // TODO
+ }
+ };
+
+ pub const CondBr = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {
+ condition: *Inst,
+ then_block: *BasicBlock,
+ else_block: *BasicBlock,
+ is_comptime: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.NoReturn;
+
+ pub fn dump(inst: *const CondBr) void {}
+
+ pub fn hasSideEffects(inst: *const CondBr) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const CondBr, ira: *Analyze) !*Inst {
+ return error.Unimplemented; // TODO
+ }
+ };
+
+ pub const AddImplicitReturnType = struct {
+ base: Inst,
+ params: Params,
+
+ pub const Params = struct {
+ target: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const AddImplicitReturnType) void {
+ std.debug.warn("#{}", inst.params.target.debug_id);
+ }
+
+ pub fn hasSideEffects(inst: *const AddImplicitReturnType) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const AddImplicitReturnType, ira: *Analyze) !*Inst {
+ const target = try self.params.target.getAsParam();
+ try ira.src_implicit_return_type_list.append(target);
+ return ira.irb.buildConstVoid(self.base.scope, self.base.span, true);
+ }
+ };
+
+ pub const TestErr = struct {
+ base: Inst,
+ params: Params,
+
+ pub const Params = struct {
+ target: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const TestErr) void {
+ std.debug.warn("#{}", inst.params.target.debug_id);
+ }
+
+ pub fn hasSideEffects(inst: *const TestErr) bool {
+ return false;
+ }
+
+ pub fn analyze(self: *const TestErr, ira: *Analyze) !*Inst {
+ const target = try self.params.target.getAsParam();
+ const target_type = target.getKnownType();
+ switch (target_type.id) {
+ Type.Id.ErrorUnion => {
+ return error.Unimplemented;
+ // if (instr_is_comptime(value)) {
+ // ConstExprValue *err_union_val = ir_resolve_const(ira, value, UndefBad);
+ // if (!err_union_val)
+ // return ira->codegen->builtin_types.entry_invalid;
+
+ // if (err_union_val->special != ConstValSpecialRuntime) {
+ // ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
+ // out_val->data.x_bool = (err_union_val->data.x_err_union.err != nullptr);
+ // return ira->codegen->builtin_types.entry_bool;
+ // }
+ // }
+
+ // TypeTableEntry *err_set_type = type_entry->data.error_union.err_set_type;
+ // if (!resolve_inferred_error_set(ira->codegen, err_set_type, instruction->base.source_node)) {
+ // return ira->codegen->builtin_types.entry_invalid;
+ // }
+ // if (!type_is_global_error_set(err_set_type) &&
+ // err_set_type->data.error_set.err_count == 0)
+ // {
+ // assert(err_set_type->data.error_set.infer_fn == nullptr);
+ // ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
+ // out_val->data.x_bool = false;
+ // return ira->codegen->builtin_types.entry_bool;
+ // }
+
+ // ir_build_test_err_from(&ira->new_irb, &instruction->base, value);
+ // return ira->codegen->builtin_types.entry_bool;
+ },
+ Type.Id.ErrorSet => {
+ return ira.irb.buildConstBool(self.base.scope, self.base.span, true);
+ },
+ else => {
+ return ira.irb.buildConstBool(self.base.scope, self.base.span, false);
+ },
+ }
+ }
+ };
+
+ pub const TestCompTime = struct {
+ base: Inst,
+ params: Params,
+
+ pub const Params = struct {
+ target: *Inst,
+ };
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const TestCompTime) void {
+ std.debug.warn("#{}", inst.params.target.debug_id);
+ }
+
+ pub fn hasSideEffects(inst: *const TestCompTime) bool {
+ return false;
+ }
+
+ pub fn analyze(self: *const TestCompTime, ira: *Analyze) !*Inst {
+ const target = try self.params.target.getAsParam();
+ return ira.irb.buildConstBool(self.base.scope, self.base.span, target.isCompTime());
+ }
+ };
+
+ pub const SaveErrRetAddr = struct {
+ base: Inst,
+ params: Params,
+
+ const Params = struct {};
+
+ const ir_val_init = IrVal.Init.Unknown;
+
+ pub fn dump(inst: *const SaveErrRetAddr) void {}
+
+ pub fn hasSideEffects(inst: *const SaveErrRetAddr) bool {
+ return true;
+ }
+
+ pub fn analyze(self: *const SaveErrRetAddr, ira: *Analyze) !*Inst {
+ return ira.irb.build(Inst.SaveErrRetAddr, self.base.scope, self.base.span, Params{});
+ }
+ };
+};
+
+pub const Variable = struct {
+ child_scope: *Scope,
+};
+
+pub const BasicBlock = struct {
+ ref_count: usize,
+ name_hint: [*]const u8, // must be a C string literal
+ debug_id: usize,
+ scope: *Scope,
+ instruction_list: std.ArrayList(*Inst),
+ ref_instruction: ?*Inst,
+
+ /// for codegen
+ llvm_block: llvm.BasicBlockRef,
+ llvm_exit_block: llvm.BasicBlockRef,
+
+ /// the basic block that is derived from this one in analysis
+ child: ?*BasicBlock,
+
+ /// the basic block that this one derives from in analysis
+ parent: ?*BasicBlock,
+
+ pub fn ref(self: *BasicBlock, builder: *Builder) void {
+ self.ref_count += 1;
+ }
+
+ pub fn linkToParent(self: *BasicBlock, parent: *BasicBlock) void {
+ assert(self.parent == null);
+ assert(parent.child == null);
+ self.parent = parent;
+ parent.child = self;
+ }
+};
+
+/// Stuff that survives longer than Builder
+pub const Code = struct {
+ basic_block_list: std.ArrayList(*BasicBlock),
+ arena: std.heap.ArenaAllocator,
+ return_type: ?*Type,
+
+ /// allocator is comp.gpa()
+ pub fn destroy(self: *Code, allocator: *Allocator) void {
+ self.arena.deinit();
+ allocator.destroy(self);
+ }
+
+ pub fn dump(self: *Code) void {
+ var bb_i: usize = 0;
+ for (self.basic_block_list.toSliceConst()) |bb| {
+ std.debug.warn("{s}_{}:\n", bb.name_hint, bb.debug_id);
+ for (bb.instruction_list.toSliceConst()) |instr| {
+ std.debug.warn(" ");
+ instr.dump();
+ std.debug.warn("\n");
+ }
+ }
+ }
+
+ /// returns a ref-incremented value, or adds a compile error
+ pub fn getCompTimeResult(self: *Code, comp: *Compilation) !*Value {
+ const bb = self.basic_block_list.at(0);
+ for (bb.instruction_list.toSliceConst()) |inst| {
+ if (inst.cast(Inst.Return)) |ret_inst| {
+ const ret_value = ret_inst.params.return_value;
+ if (ret_value.isCompTime()) {
+ return ret_value.val.KnownValue.getRef();
+ }
+ try comp.addCompileError(
+ ret_value.scope.findRoot(),
+ ret_value.span,
+ "unable to evaluate constant expression",
+ );
+ return error.SemanticAnalysisFailed;
+ } else if (inst.hasSideEffects()) {
+ try comp.addCompileError(
+ inst.scope.findRoot(),
+ inst.span,
+ "unable to evaluate constant expression",
+ );
+ return error.SemanticAnalysisFailed;
+ }
+ }
+ unreachable;
+ }
+};
+
+pub const Builder = struct {
+ comp: *Compilation,
+ code: *Code,
+ current_basic_block: *BasicBlock,
+ next_debug_id: usize,
+ root_scope: *Scope.Root,
+ is_comptime: bool,
+ is_async: bool,
+ begin_scope: ?*Scope,
+
+ pub const Error = Analyze.Error;
+
+ pub fn init(comp: *Compilation, root_scope: *Scope.Root, begin_scope: ?*Scope) !Builder {
+ const code = try comp.gpa().create(Code{
+ .basic_block_list = undefined,
+ .arena = std.heap.ArenaAllocator.init(comp.gpa()),
+ .return_type = null,
+ });
+ code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator);
+ errdefer code.destroy(comp.gpa());
+
+ return Builder{
+ .comp = comp,
+ .root_scope = root_scope,
+ .current_basic_block = undefined,
+ .code = code,
+ .next_debug_id = 0,
+ .is_comptime = false,
+ .is_async = false,
+ .begin_scope = begin_scope,
+ };
+ }
+
+ pub fn abort(self: *Builder) void {
+ self.code.destroy(self.comp.gpa());
+ }
+
+ /// Call code.destroy() when done
+ pub fn finish(self: *Builder) *Code {
+ return self.code;
+ }
+
+ /// No need to clean up resources thanks to the arena allocator.
+ pub fn createBasicBlock(self: *Builder, scope: *Scope, name_hint: [*]const u8) !*BasicBlock {
+ const basic_block = try self.arena().create(BasicBlock{
+ .ref_count = 0,
+ .name_hint = name_hint,
+ .debug_id = self.next_debug_id,
+ .scope = scope,
+ .instruction_list = std.ArrayList(*Inst).init(self.arena()),
+ .child = null,
+ .parent = null,
+ .ref_instruction = null,
+ .llvm_block = undefined,
+ .llvm_exit_block = undefined,
+ });
+ self.next_debug_id += 1;
+ return basic_block;
+ }
+
+ pub fn setCursorAtEndAndAppendBlock(self: *Builder, basic_block: *BasicBlock) !void {
+ try self.code.basic_block_list.append(basic_block);
+ self.setCursorAtEnd(basic_block);
+ }
+
+ pub fn setCursorAtEnd(self: *Builder, basic_block: *BasicBlock) void {
+ self.current_basic_block = basic_block;
+ }
+
+ pub async fn genNode(irb: *Builder, node: *ast.Node, scope: *Scope, lval: LVal) Error!*Inst {
+ switch (node.id) {
+ ast.Node.Id.Root => unreachable,
+ ast.Node.Id.Use => unreachable,
+ ast.Node.Id.TestDecl => unreachable,
+ ast.Node.Id.VarDecl => return error.Unimplemented,
+ ast.Node.Id.Defer => return error.Unimplemented,
+ ast.Node.Id.InfixOp => return error.Unimplemented,
+ ast.Node.Id.PrefixOp => {
+ const prefix_op = @fieldParentPtr(ast.Node.PrefixOp, "base", node);
+ switch (prefix_op.op) {
+ ast.Node.PrefixOp.Op.AddressOf => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.ArrayType => |n| return error.Unimplemented,
+ ast.Node.PrefixOp.Op.Await => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.BitNot => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.BoolNot => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.Cancel => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.OptionalType => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.Negation => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.NegationWrap => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.Resume => return error.Unimplemented,
+ ast.Node.PrefixOp.Op.PtrType => |ptr_info| {
+ const inst = try await (async irb.genPtrType(prefix_op, ptr_info, scope) catch unreachable);
+ return irb.lvalWrap(scope, inst, lval);
+ },
+ ast.Node.PrefixOp.Op.SliceType => |ptr_info| return error.Unimplemented,
+ ast.Node.PrefixOp.Op.Try => return error.Unimplemented,
+ }
+ },
+ ast.Node.Id.SuffixOp => {
+ const suffix_op = @fieldParentPtr(ast.Node.SuffixOp, "base", node);
+ switch (suffix_op.op) {
+ @TagType(ast.Node.SuffixOp.Op).Call => |*call| {
+ const inst = try await (async irb.genCall(suffix_op, call, scope) catch unreachable);
+ return irb.lvalWrap(scope, inst, lval);
+ },
+ @TagType(ast.Node.SuffixOp.Op).ArrayAccess => |n| return error.Unimplemented,
+ @TagType(ast.Node.SuffixOp.Op).Slice => |slice| return error.Unimplemented,
+ @TagType(ast.Node.SuffixOp.Op).ArrayInitializer => |init_list| return error.Unimplemented,
+ @TagType(ast.Node.SuffixOp.Op).StructInitializer => |init_list| return error.Unimplemented,
+ @TagType(ast.Node.SuffixOp.Op).Deref => return error.Unimplemented,
+ @TagType(ast.Node.SuffixOp.Op).UnwrapOptional => return error.Unimplemented,
+ }
+ },
+ ast.Node.Id.Switch => return error.Unimplemented,
+ ast.Node.Id.While => return error.Unimplemented,
+ ast.Node.Id.For => return error.Unimplemented,
+ ast.Node.Id.If => return error.Unimplemented,
+ ast.Node.Id.ControlFlowExpression => {
+ const control_flow_expr = @fieldParentPtr(ast.Node.ControlFlowExpression, "base", node);
+ return await (async irb.genControlFlowExpr(control_flow_expr, scope, lval) catch unreachable);
+ },
+ ast.Node.Id.Suspend => return error.Unimplemented,
+ ast.Node.Id.VarType => return error.Unimplemented,
+ ast.Node.Id.ErrorType => return error.Unimplemented,
+ ast.Node.Id.FnProto => return error.Unimplemented,
+ ast.Node.Id.PromiseType => return error.Unimplemented,
+ ast.Node.Id.IntegerLiteral => {
+ const int_lit = @fieldParentPtr(ast.Node.IntegerLiteral, "base", node);
+ return irb.lvalWrap(scope, try irb.genIntLit(int_lit, scope), lval);
+ },
+ ast.Node.Id.FloatLiteral => return error.Unimplemented,
+ ast.Node.Id.StringLiteral => {
+ const str_lit = @fieldParentPtr(ast.Node.StringLiteral, "base", node);
+ const inst = try await (async irb.genStrLit(str_lit, scope) catch unreachable);
+ return irb.lvalWrap(scope, inst, lval);
+ },
+ ast.Node.Id.MultilineStringLiteral => return error.Unimplemented,
+ ast.Node.Id.CharLiteral => return error.Unimplemented,
+ ast.Node.Id.BoolLiteral => return error.Unimplemented,
+ ast.Node.Id.NullLiteral => return error.Unimplemented,
+ ast.Node.Id.UndefinedLiteral => return error.Unimplemented,
+ ast.Node.Id.ThisLiteral => return error.Unimplemented,
+ ast.Node.Id.Unreachable => return error.Unimplemented,
+ ast.Node.Id.Identifier => {
+ const identifier = @fieldParentPtr(ast.Node.Identifier, "base", node);
+ return await (async irb.genIdentifier(identifier, scope, lval) catch unreachable);
+ },
+ ast.Node.Id.GroupedExpression => {
+ const grouped_expr = @fieldParentPtr(ast.Node.GroupedExpression, "base", node);
+ return await (async irb.genNode(grouped_expr.expr, scope, lval) catch unreachable);
+ },
+ ast.Node.Id.BuiltinCall => return error.Unimplemented,
+ ast.Node.Id.ErrorSetDecl => return error.Unimplemented,
+ ast.Node.Id.ContainerDecl => return error.Unimplemented,
+ ast.Node.Id.Asm => return error.Unimplemented,
+ ast.Node.Id.Comptime => return error.Unimplemented,
+ ast.Node.Id.Block => {
+ const block = @fieldParentPtr(ast.Node.Block, "base", node);
+ const inst = try await (async irb.genBlock(block, scope) catch unreachable);
+ return irb.lvalWrap(scope, inst, lval);
+ },
+ ast.Node.Id.DocComment => return error.Unimplemented,
+ ast.Node.Id.SwitchCase => return error.Unimplemented,
+ ast.Node.Id.SwitchElse => return error.Unimplemented,
+ ast.Node.Id.Else => return error.Unimplemented,
+ ast.Node.Id.Payload => return error.Unimplemented,
+ ast.Node.Id.PointerPayload => return error.Unimplemented,
+ ast.Node.Id.PointerIndexPayload => return error.Unimplemented,
+ ast.Node.Id.StructField => return error.Unimplemented,
+ ast.Node.Id.UnionTag => return error.Unimplemented,
+ ast.Node.Id.EnumTag => return error.Unimplemented,
+ ast.Node.Id.ErrorTag => return error.Unimplemented,
+ ast.Node.Id.AsmInput => return error.Unimplemented,
+ ast.Node.Id.AsmOutput => return error.Unimplemented,
+ ast.Node.Id.AsyncAttribute => return error.Unimplemented,
+ ast.Node.Id.ParamDecl => return error.Unimplemented,
+ ast.Node.Id.FieldInitializer => return error.Unimplemented,
+ }
+ }
+
+ async fn genCall(irb: *Builder, suffix_op: *ast.Node.SuffixOp, call: *ast.Node.SuffixOp.Op.Call, scope: *Scope) !*Inst {
+ const fn_ref = try await (async irb.genNode(suffix_op.lhs, scope, LVal.None) catch unreachable);
+
+ const args = try irb.arena().alloc(*Inst, call.params.len);
+ var it = call.params.iterator(0);
+ var i: usize = 0;
+ while (it.next()) |arg_node_ptr| : (i += 1) {
+ args[i] = try await (async irb.genNode(arg_node_ptr.*, scope, LVal.None) catch unreachable);
+ }
+
+ //bool is_async = node->data.fn_call_expr.is_async;
+ //IrInstruction *async_allocator = nullptr;
+ //if (is_async) {
+ // if (node->data.fn_call_expr.async_allocator) {
+ // async_allocator = ir_gen_node(irb, node->data.fn_call_expr.async_allocator, scope);
+ // if (async_allocator == irb->codegen->invalid_instruction)
+ // return async_allocator;
+ // }
+ //}
+
+ return irb.build(Inst.Call, scope, Span.token(suffix_op.rtoken), Inst.Call.Params{
+ .fn_ref = fn_ref,
+ .args = args,
+ });
+ //IrInstruction *fn_call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, is_async, async_allocator, nullptr);
+ //return ir_lval_wrap(irb, scope, fn_call, lval);
+ }
+
+ async fn genPtrType(
+ irb: *Builder,
+ prefix_op: *ast.Node.PrefixOp,
+ ptr_info: ast.Node.PrefixOp.PtrInfo,
+ scope: *Scope,
+ ) !*Inst {
+ // TODO port more logic
+
+ //assert(node->type == NodeTypePointerType);
+ //PtrLen ptr_len = (node->data.pointer_type.star_token->id == TokenIdStar ||
+ // node->data.pointer_type.star_token->id == TokenIdStarStar) ? PtrLenSingle : PtrLenUnknown;
+ //bool is_const = node->data.pointer_type.is_const;
+ //bool is_volatile = node->data.pointer_type.is_volatile;
+ //AstNode *expr_node = node->data.pointer_type.op_expr;
+ //AstNode *align_expr = node->data.pointer_type.align_expr;
+
+ //IrInstruction *align_value;
+ //if (align_expr != nullptr) {
+ // align_value = ir_gen_node(irb, align_expr, scope);
+ // if (align_value == irb->codegen->invalid_instruction)
+ // return align_value;
+ //} else {
+ // align_value = nullptr;
+ //}
+ const child_type = try await (async irb.genNode(prefix_op.rhs, scope, LVal.None) catch unreachable);
+
+ //uint32_t bit_offset_start = 0;
+ //if (node->data.pointer_type.bit_offset_start != nullptr) {
+ // if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_start, 32, false)) {
+ // Buf *val_buf = buf_alloc();
+ // bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_start, 10);
+ // exec_add_error_node(irb->codegen, irb->exec, node,
+ // buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
+ // return irb->codegen->invalid_instruction;
+ // }
+ // bit_offset_start = bigint_as_unsigned(node->data.pointer_type.bit_offset_start);
+ //}
+
+ //uint32_t bit_offset_end = 0;
+ //if (node->data.pointer_type.bit_offset_end != nullptr) {
+ // if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_end, 32, false)) {
+ // Buf *val_buf = buf_alloc();
+ // bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_end, 10);
+ // exec_add_error_node(irb->codegen, irb->exec, node,
+ // buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
+ // return irb->codegen->invalid_instruction;
+ // }
+ // bit_offset_end = bigint_as_unsigned(node->data.pointer_type.bit_offset_end);
+ //}
+
+ //if ((bit_offset_start != 0 || bit_offset_end != 0) && bit_offset_start >= bit_offset_end) {
+ // exec_add_error_node(irb->codegen, irb->exec, node,
+ // buf_sprintf("bit offset start must be less than bit offset end"));
+ // return irb->codegen->invalid_instruction;
+ //}
+
+ return irb.build(Inst.PtrType, scope, Span.node(&prefix_op.base), Inst.PtrType.Params{
+ .child_type = child_type,
+ .mut = Type.Pointer.Mut.Mut,
+ .vol = Type.Pointer.Vol.Non,
+ .size = Type.Pointer.Size.Many,
+ .alignment = null,
+ });
+ }
+
+ fn isCompTime(irb: *Builder, target_scope: *Scope) bool {
+ if (irb.is_comptime)
+ return true;
+
+ var scope = target_scope;
+ while (true) {
+ switch (scope.id) {
+ Scope.Id.CompTime => return true,
+ Scope.Id.FnDef => return false,
+ Scope.Id.Decls => unreachable,
+ Scope.Id.Root => unreachable,
+ Scope.Id.Block,
+ Scope.Id.Defer,
+ Scope.Id.DeferExpr,
+ Scope.Id.Var,
+ => scope = scope.parent.?,
+ }
+ }
+ }
+
+ pub fn genIntLit(irb: *Builder, int_lit: *ast.Node.IntegerLiteral, scope: *Scope) !*Inst {
+ const int_token = irb.root_scope.tree.tokenSlice(int_lit.token);
+
+ var base: u8 = undefined;
+ var rest: []const u8 = undefined;
+ if (int_token.len >= 3 and int_token[0] == '0') {
+ base = switch (int_token[1]) {
+ 'b' => u8(2),
+ 'o' => u8(8),
+ 'x' => u8(16),
+ else => unreachable,
+ };
+ rest = int_token[2..];
+ } else {
+ base = 10;
+ rest = int_token;
+ }
+
+ const comptime_int_type = Type.ComptimeInt.get(irb.comp);
+ defer comptime_int_type.base.base.deref(irb.comp);
+
+ const int_val = Value.Int.createFromString(
+ irb.comp,
+ &comptime_int_type.base,
+ base,
+ rest,
+ ) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.InvalidBase => unreachable,
+ error.InvalidCharForDigit => unreachable,
+ error.DigitTooLargeForBase => unreachable,
+ };
+ errdefer int_val.base.deref(irb.comp);
+
+ const inst = try irb.build(Inst.Const, scope, Span.token(int_lit.token), Inst.Const.Params{});
+ inst.val = IrVal{ .KnownValue = &int_val.base };
+ return inst;
+ }
+
+ pub async fn genStrLit(irb: *Builder, str_lit: *ast.Node.StringLiteral, scope: *Scope) !*Inst {
+ const str_token = irb.root_scope.tree.tokenSlice(str_lit.token);
+ const src_span = Span.token(str_lit.token);
+
+ var bad_index: usize = undefined;
+ var buf = std.zig.parseStringLiteral(irb.comp.gpa(), str_token, &bad_index) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.InvalidCharacter => {
+ try irb.comp.addCompileError(
+ irb.root_scope,
+ src_span,
+ "invalid character in string literal: '{c}'",
+ str_token[bad_index],
+ );
+ return error.SemanticAnalysisFailed;
+ },
+ };
+ var buf_cleaned = false;
+ errdefer if (!buf_cleaned) irb.comp.gpa().free(buf);
+
+ if (str_token[0] == 'c') {
+ // first we add a null
+ buf = try irb.comp.gpa().realloc(u8, buf, buf.len + 1);
+ buf[buf.len - 1] = 0;
+
+ // next make an array value
+ const array_val = try await (async Value.Array.createOwnedBuffer(irb.comp, buf) catch unreachable);
+ buf_cleaned = true;
+ defer array_val.base.deref(irb.comp);
+
+ // then make a pointer value pointing at the first element
+ const ptr_val = try await (async Value.Ptr.createArrayElemPtr(
+ irb.comp,
+ array_val,
+ Type.Pointer.Mut.Const,
+ Type.Pointer.Size.Many,
+ 0,
+ ) catch unreachable);
+ defer ptr_val.base.deref(irb.comp);
+
+ return irb.buildConstValue(scope, src_span, &ptr_val.base);
+ } else {
+ const array_val = try await (async Value.Array.createOwnedBuffer(irb.comp, buf) catch unreachable);
+ buf_cleaned = true;
+ defer array_val.base.deref(irb.comp);
+
+ return irb.buildConstValue(scope, src_span, &array_val.base);
+ }
+ }
+
+ pub async fn genBlock(irb: *Builder, block: *ast.Node.Block, parent_scope: *Scope) !*Inst {
+ const block_scope = try Scope.Block.create(irb.comp, parent_scope);
+
+ const outer_block_scope = &block_scope.base;
+ var child_scope = outer_block_scope;
+
+ if (parent_scope.findFnDef()) |fndef_scope| {
+ if (fndef_scope.fn_val.?.block_scope == null) {
+ fndef_scope.fn_val.?.block_scope = block_scope;
+ }
+ }
+
+ if (block.statements.len == 0) {
+ // {}
+ return irb.buildConstVoid(child_scope, Span.token(block.lbrace), false);
+ }
+
+ if (block.label) |label| {
+ block_scope.incoming_values = std.ArrayList(*Inst).init(irb.arena());
+ block_scope.incoming_blocks = std.ArrayList(*BasicBlock).init(irb.arena());
+ block_scope.end_block = try irb.createBasicBlock(parent_scope, c"BlockEnd");
+ block_scope.is_comptime = try irb.buildConstBool(
+ parent_scope,
+ Span.token(block.lbrace),
+ irb.isCompTime(parent_scope),
+ );
+ }
+
+ var is_continuation_unreachable = false;
+ var noreturn_return_value: ?*Inst = null;
+
+ var stmt_it = block.statements.iterator(0);
+ while (stmt_it.next()) |statement_node_ptr| {
+ const statement_node = statement_node_ptr.*;
+
+ if (statement_node.cast(ast.Node.Defer)) |defer_node| {
+ // defer starts a new scope
+ const defer_token = irb.root_scope.tree.tokens.at(defer_node.defer_token);
+ const kind = switch (defer_token.id) {
+ Token.Id.Keyword_defer => Scope.Defer.Kind.ScopeExit,
+ Token.Id.Keyword_errdefer => Scope.Defer.Kind.ErrorExit,
+ else => unreachable,
+ };
+ const defer_expr_scope = try Scope.DeferExpr.create(irb.comp, parent_scope, defer_node.expr);
+ const defer_child_scope = try Scope.Defer.create(irb.comp, parent_scope, kind, defer_expr_scope);
+ child_scope = &defer_child_scope.base;
+ continue;
+ }
+ const statement_value = try await (async irb.genNode(statement_node, child_scope, LVal.None) catch unreachable);
+
+ is_continuation_unreachable = statement_value.isNoReturn();
+ if (is_continuation_unreachable) {
+ // keep the last noreturn statement value around in case we need to return it
+ noreturn_return_value = statement_value;
+ }
+
+ if (statement_value.cast(Inst.DeclVar)) |decl_var| {
+ // variable declarations start a new scope
+ child_scope = decl_var.params.variable.child_scope;
+ } else if (!is_continuation_unreachable) {
+ // this statement's value must be void
+ _ = irb.build(
+ Inst.CheckVoidStmt,
+ child_scope,
+ Span{
+ .first = statement_node.firstToken(),
+ .last = statement_node.lastToken(),
+ },
+ Inst.CheckVoidStmt.Params{ .target = statement_value },
+ );
+ }
+ }
+
+ if (is_continuation_unreachable) {
+ assert(noreturn_return_value != null);
+ if (block.label == null or block_scope.incoming_blocks.len == 0) {
+ return noreturn_return_value.?;
+ }
+
+ try irb.setCursorAtEndAndAppendBlock(block_scope.end_block);
+ return irb.build(Inst.Phi, parent_scope, Span.token(block.rbrace), Inst.Phi.Params{
+ .incoming_blocks = block_scope.incoming_blocks.toOwnedSlice(),
+ .incoming_values = block_scope.incoming_values.toOwnedSlice(),
+ });
+ }
+
+ if (block.label) |label| {
+ try block_scope.incoming_blocks.append(irb.current_basic_block);
+ try block_scope.incoming_values.append(
+ try irb.buildConstVoid(parent_scope, Span.token(block.rbrace), true),
+ );
+ _ = try await (async irb.genDefersForBlock(child_scope, outer_block_scope, Scope.Defer.Kind.ScopeExit) catch unreachable);
+
+ _ = try irb.buildGen(Inst.Br, parent_scope, Span.token(block.rbrace), Inst.Br.Params{
+ .dest_block = block_scope.end_block,
+ .is_comptime = block_scope.is_comptime,
+ });
+
+ try irb.setCursorAtEndAndAppendBlock(block_scope.end_block);
+
+ return irb.build(Inst.Phi, parent_scope, Span.token(block.rbrace), Inst.Phi.Params{
+ .incoming_blocks = block_scope.incoming_blocks.toOwnedSlice(),
+ .incoming_values = block_scope.incoming_values.toOwnedSlice(),
+ });
+ }
+
+ _ = try await (async irb.genDefersForBlock(child_scope, outer_block_scope, Scope.Defer.Kind.ScopeExit) catch unreachable);
+ return irb.buildConstVoid(child_scope, Span.token(block.rbrace), true);
+ }
+
+ pub async fn genControlFlowExpr(
+ irb: *Builder,
+ control_flow_expr: *ast.Node.ControlFlowExpression,
+ scope: *Scope,
+ lval: LVal,
+ ) !*Inst {
+ switch (control_flow_expr.kind) {
+ ast.Node.ControlFlowExpression.Kind.Break => |arg| return error.Unimplemented,
+ ast.Node.ControlFlowExpression.Kind.Continue => |arg| return error.Unimplemented,
+ ast.Node.ControlFlowExpression.Kind.Return => {
+ const src_span = Span.token(control_flow_expr.ltoken);
+ if (scope.findFnDef() == null) {
+ try irb.comp.addCompileError(
+ irb.root_scope,
+ src_span,
+ "return expression outside function definition",
+ );
+ return error.SemanticAnalysisFailed;
+ }
+
+ if (scope.findDeferExpr()) |scope_defer_expr| {
+ if (!scope_defer_expr.reported_err) {
+ try irb.comp.addCompileError(
+ irb.root_scope,
+ src_span,
+ "cannot return from defer expression",
+ );
+ scope_defer_expr.reported_err = true;
+ }
+ return error.SemanticAnalysisFailed;
+ }
+
+ const outer_scope = irb.begin_scope.?;
+ const return_value = if (control_flow_expr.rhs) |rhs| blk: {
+ break :blk try await (async irb.genNode(rhs, scope, LVal.None) catch unreachable);
+ } else blk: {
+ break :blk try irb.buildConstVoid(scope, src_span, true);
+ };
+
+ const defer_counts = irb.countDefers(scope, outer_scope);
+ const have_err_defers = defer_counts.error_exit != 0;
+ if (have_err_defers or irb.comp.have_err_ret_tracing) {
+ const err_block = try irb.createBasicBlock(scope, c"ErrRetErr");
+ const ok_block = try irb.createBasicBlock(scope, c"ErrRetOk");
+ if (!have_err_defers) {
+ _ = try await (async irb.genDefersForBlock(scope, outer_scope, Scope.Defer.Kind.ScopeExit) catch unreachable);
+ }
+
+ const is_err = try irb.build(
+ Inst.TestErr,
+ scope,
+ src_span,
+ Inst.TestErr.Params{ .target = return_value },
+ );
+
+ const err_is_comptime = try irb.buildTestCompTime(scope, src_span, is_err);
+
+ _ = try irb.buildGen(Inst.CondBr, scope, src_span, Inst.CondBr.Params{
+ .condition = is_err,
+ .then_block = err_block,
+ .else_block = ok_block,
+ .is_comptime = err_is_comptime,
+ });
+
+ const ret_stmt_block = try irb.createBasicBlock(scope, c"RetStmt");
+
+ try irb.setCursorAtEndAndAppendBlock(err_block);
+ if (have_err_defers) {
+ _ = try await (async irb.genDefersForBlock(scope, outer_scope, Scope.Defer.Kind.ErrorExit) catch unreachable);
+ }
+ if (irb.comp.have_err_ret_tracing and !irb.isCompTime(scope)) {
+ _ = try irb.build(Inst.SaveErrRetAddr, scope, src_span, Inst.SaveErrRetAddr.Params{});
+ }
+ _ = try irb.build(Inst.Br, scope, src_span, Inst.Br.Params{
+ .dest_block = ret_stmt_block,
+ .is_comptime = err_is_comptime,
+ });
+
+ try irb.setCursorAtEndAndAppendBlock(ok_block);
+ if (have_err_defers) {
+ _ = try await (async irb.genDefersForBlock(scope, outer_scope, Scope.Defer.Kind.ScopeExit) catch unreachable);
+ }
+ _ = try irb.build(Inst.Br, scope, src_span, Inst.Br.Params{
+ .dest_block = ret_stmt_block,
+ .is_comptime = err_is_comptime,
+ });
+
+ try irb.setCursorAtEndAndAppendBlock(ret_stmt_block);
+ return irb.genAsyncReturn(scope, src_span, return_value, false);
+ } else {
+ _ = try await (async irb.genDefersForBlock(scope, outer_scope, Scope.Defer.Kind.ScopeExit) catch unreachable);
+ return irb.genAsyncReturn(scope, src_span, return_value, false);
+ }
+ },
+ }
+ }
+
+ pub async fn genIdentifier(irb: *Builder, identifier: *ast.Node.Identifier, scope: *Scope, lval: LVal) !*Inst {
+ const src_span = Span.token(identifier.token);
+ const name = irb.root_scope.tree.tokenSlice(identifier.token);
+
+ //if (buf_eql_str(variable_name, "_") && lval == LValPtr) {
+ // IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, node);
+ // const_instruction->base.value.type = get_pointer_to_type(irb->codegen,
+ // irb->codegen->builtin_types.entry_void, false);
+ // const_instruction->base.value.special = ConstValSpecialStatic;
+ // const_instruction->base.value.data.x_ptr.special = ConstPtrSpecialDiscard;
+ // return &const_instruction->base;
+ //}
+
+ if (await (async irb.comp.getPrimitiveType(name) catch unreachable)) |result| {
+ if (result) |primitive_type| {
+ defer primitive_type.base.deref(irb.comp);
+ switch (lval) {
+ // if (lval == LValPtr) {
+ // return ir_build_ref(irb, scope, node, value, false, false);
+ LVal.Ptr => return error.Unimplemented,
+ LVal.None => return irb.buildConstValue(scope, src_span, &primitive_type.base),
+ }
+ }
+ } else |err| switch (err) {
+ error.Overflow => {
+ try irb.comp.addCompileError(irb.root_scope, src_span, "integer too large");
+ return error.SemanticAnalysisFailed;
+ },
+ error.OutOfMemory => return error.OutOfMemory,
+ }
+
+ switch (await (async irb.findIdent(scope, name) catch unreachable)) {
+ Ident.Decl => |decl| {
+ return irb.build(Inst.DeclRef, scope, src_span, Inst.DeclRef.Params{
+ .decl = decl,
+ .lval = lval,
+ });
+ },
+ Ident.VarScope => |var_scope| {
+ const var_ptr = try irb.build(Inst.VarPtr, scope, src_span, Inst.VarPtr.Params{ .var_scope = var_scope });
+ switch (lval) {
+ LVal.Ptr => return var_ptr,
+ LVal.None => {
+ return irb.build(Inst.LoadPtr, scope, src_span, Inst.LoadPtr.Params{ .target = var_ptr });
+ },
+ }
+ },
+ Ident.NotFound => {},
+ }
+
+ //if (node->owner->any_imports_failed) {
+ // // skip the error message since we had a failing import in this file
+ // // if an import breaks we don't need redundant undeclared identifier errors
+ // return irb->codegen->invalid_instruction;
+ //}
+
+ // TODO put a variable of same name with invalid type in global scope
+ // so that future references to this same name will find a variable with an invalid type
+
+ try irb.comp.addCompileError(irb.root_scope, src_span, "unknown identifier '{}'", name);
+ return error.SemanticAnalysisFailed;
+ }
+
+ const DeferCounts = struct {
+ scope_exit: usize,
+ error_exit: usize,
+ };
+
+ fn countDefers(irb: *Builder, inner_scope: *Scope, outer_scope: *Scope) DeferCounts {
+ var result = DeferCounts{ .scope_exit = 0, .error_exit = 0 };
+
+ var scope = inner_scope;
+ while (scope != outer_scope) {
+ switch (scope.id) {
+ Scope.Id.Defer => {
+ const defer_scope = @fieldParentPtr(Scope.Defer, "base", scope);
+ switch (defer_scope.kind) {
+ Scope.Defer.Kind.ScopeExit => result.scope_exit += 1,
+ Scope.Defer.Kind.ErrorExit => result.error_exit += 1,
+ }
+ scope = scope.parent orelse break;
+ },
+ Scope.Id.FnDef => break,
+
+ Scope.Id.CompTime,
+ Scope.Id.Block,
+ Scope.Id.Decls,
+ Scope.Id.Root,
+ Scope.Id.Var,
+ => scope = scope.parent orelse break,
+
+ Scope.Id.DeferExpr => unreachable,
+ }
+ }
+ return result;
+ }
+
+ async fn genDefersForBlock(
+ irb: *Builder,
+ inner_scope: *Scope,
+ outer_scope: *Scope,
+ gen_kind: Scope.Defer.Kind,
+ ) !bool {
+ var scope = inner_scope;
+ var is_noreturn = false;
+ while (true) {
+ switch (scope.id) {
+ Scope.Id.Defer => {
+ const defer_scope = @fieldParentPtr(Scope.Defer, "base", scope);
+ const generate = switch (defer_scope.kind) {
+ Scope.Defer.Kind.ScopeExit => true,
+ Scope.Defer.Kind.ErrorExit => gen_kind == Scope.Defer.Kind.ErrorExit,
+ };
+ if (generate) {
+ const defer_expr_scope = defer_scope.defer_expr_scope;
+ const instruction = try await (async irb.genNode(
+ defer_expr_scope.expr_node,
+ &defer_expr_scope.base,
+ LVal.None,
+ ) catch unreachable);
+ if (instruction.isNoReturn()) {
+ is_noreturn = true;
+ } else {
+ _ = try irb.build(
+ Inst.CheckVoidStmt,
+ &defer_expr_scope.base,
+ Span.token(defer_expr_scope.expr_node.lastToken()),
+ Inst.CheckVoidStmt.Params{ .target = instruction },
+ );
+ }
+ }
+ },
+ Scope.Id.FnDef,
+ Scope.Id.Decls,
+ Scope.Id.Root,
+ => return is_noreturn,
+
+ Scope.Id.CompTime,
+ Scope.Id.Block,
+ Scope.Id.Var,
+ => scope = scope.parent orelse return is_noreturn,
+
+ Scope.Id.DeferExpr => unreachable,
+ }
+ }
+ }
+
+ pub fn lvalWrap(irb: *Builder, scope: *Scope, instruction: *Inst, lval: LVal) !*Inst {
+ switch (lval) {
+ LVal.None => return instruction,
+ LVal.Ptr => {
+ // We needed a pointer to a value, but we got a value. So we create
+ // an instruction which just makes a const pointer of it.
+ return irb.build(Inst.Ref, scope, instruction.span, Inst.Ref.Params{
+ .target = instruction,
+ .mut = Type.Pointer.Mut.Const,
+ .volatility = Type.Pointer.Vol.Non,
+ });
+ },
+ }
+ }
+
+ fn arena(self: *Builder) *Allocator {
+ return &self.code.arena.allocator;
+ }
+
+ fn buildExtra(
+ self: *Builder,
+ comptime I: type,
+ scope: *Scope,
+ span: Span,
+ params: I.Params,
+ is_generated: bool,
+ ) !*Inst {
+ const inst = try self.arena().create(I{
+ .base = Inst{
+ .id = Inst.typeToId(I),
+ .is_generated = is_generated,
+ .scope = scope,
+ .debug_id = self.next_debug_id,
+ .val = switch (I.ir_val_init) {
+ IrVal.Init.Unknown => IrVal.Unknown,
+ IrVal.Init.NoReturn => IrVal{ .KnownValue = &Value.NoReturn.get(self.comp).base },
+ IrVal.Init.Void => IrVal{ .KnownValue = &Value.Void.get(self.comp).base },
+ },
+ .ref_count = 0,
+ .span = span,
+ .child = null,
+ .parent = null,
+ .llvm_value = undefined,
+ .owner_bb = self.current_basic_block,
+ },
+ .params = params,
+ });
+
+ // Look at the params and ref() other instructions
+ comptime var i = 0;
+ inline while (i < @memberCount(I.Params)) : (i += 1) {
+ const FieldType = comptime @typeOf(@field(I.Params(undefined), @memberName(I.Params, i)));
+ switch (FieldType) {
+ *Inst => @field(inst.params, @memberName(I.Params, i)).ref(self),
+ *BasicBlock => @field(inst.params, @memberName(I.Params, i)).ref(self),
+ ?*Inst => if (@field(inst.params, @memberName(I.Params, i))) |other| other.ref(self),
+ []*Inst => {
+ // TODO https://github.com/ziglang/zig/issues/1269
+ for (@field(inst.params, @memberName(I.Params, i))) |other|
+ other.ref(self);
+ },
+ []*BasicBlock => {
+ // TODO https://github.com/ziglang/zig/issues/1269
+ for (@field(inst.params, @memberName(I.Params, i))) |other|
+ other.ref(self);
+ },
+ Type.Pointer.Mut,
+ Type.Pointer.Vol,
+ Type.Pointer.Size,
+ LVal,
+ *Decl,
+ *Scope.Var,
+ => {},
+ // it's ok to add more types here, just make sure that
+ // any instructions and basic blocks are ref'd appropriately
+ else => @compileError("unrecognized type in Params: " ++ @typeName(FieldType)),
+ }
+ }
+
+ self.next_debug_id += 1;
+ try self.current_basic_block.instruction_list.append(&inst.base);
+ return &inst.base;
+ }
+
+ fn build(
+ self: *Builder,
+ comptime I: type,
+ scope: *Scope,
+ span: Span,
+ params: I.Params,
+ ) !*Inst {
+ return self.buildExtra(I, scope, span, params, false);
+ }
+
+ fn buildGen(
+ self: *Builder,
+ comptime I: type,
+ scope: *Scope,
+ span: Span,
+ params: I.Params,
+ ) !*Inst {
+ return self.buildExtra(I, scope, span, params, true);
+ }
+
+ fn buildConstBool(self: *Builder, scope: *Scope, span: Span, x: bool) !*Inst {
+ const inst = try self.build(Inst.Const, scope, span, Inst.Const.Params{});
+ inst.val = IrVal{ .KnownValue = &Value.Bool.get(self.comp, x).base };
+ return inst;
+ }
+
+ fn buildConstVoid(self: *Builder, scope: *Scope, span: Span, is_generated: bool) !*Inst {
+ const inst = try self.buildExtra(Inst.Const, scope, span, Inst.Const.Params{}, is_generated);
+ inst.val = IrVal{ .KnownValue = &Value.Void.get(self.comp).base };
+ return inst;
+ }
+
+ fn buildConstValue(self: *Builder, scope: *Scope, span: Span, v: *Value) !*Inst {
+ const inst = try self.build(Inst.Const, scope, span, Inst.Const.Params{});
+ inst.val = IrVal{ .KnownValue = v.getRef() };
+ return inst;
+ }
+
+ /// If the code is explicitly set to be comptime, then builds a const bool,
+ /// otherwise builds a TestCompTime instruction.
+ fn buildTestCompTime(self: *Builder, scope: *Scope, span: Span, target: *Inst) !*Inst {
+ if (self.isCompTime(scope)) {
+ return self.buildConstBool(scope, span, true);
+ } else {
+ return self.build(
+ Inst.TestCompTime,
+ scope,
+ span,
+ Inst.TestCompTime.Params{ .target = target },
+ );
+ }
+ }
+
+ fn genAsyncReturn(irb: *Builder, scope: *Scope, span: Span, result: *Inst, is_gen: bool) !*Inst {
+ _ = irb.buildGen(
+ Inst.AddImplicitReturnType,
+ scope,
+ span,
+ Inst.AddImplicitReturnType.Params{ .target = result },
+ );
+
+ if (!irb.is_async) {
+ return irb.buildExtra(
+ Inst.Return,
+ scope,
+ span,
+ Inst.Return.Params{ .return_value = result },
+ is_gen,
+ );
+ }
+ return error.Unimplemented;
+
+ //ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value);
+ //IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node,
+ // get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
+ //// TODO replace replacement_value with @intToPtr(?promise, 0x1) when it doesn't crash zig
+ //IrInstruction *replacement_value = irb->exec->coro_handle;
+ //IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, scope, node,
+ // promise_type_val, irb->exec->coro_awaiter_field_ptr, nullptr, replacement_value, nullptr,
+ // AtomicRmwOp_xchg, AtomicOrderSeqCst);
+ //ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, maybe_await_handle);
+ //IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_await_handle);
+ //IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
+ //return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final, irb->exec->coro_early_final,
+ // is_comptime);
+ //// the above blocks are rendered by ir_gen after the rest of codegen
+ }
+
+ const Ident = union(enum) {
+ NotFound,
+ Decl: *Decl,
+ VarScope: *Scope.Var,
+ };
+
+ async fn findIdent(irb: *Builder, scope: *Scope, name: []const u8) Ident {
+ var s = scope;
+ while (true) {
+ switch (s.id) {
+ Scope.Id.Root => return Ident.NotFound,
+ Scope.Id.Decls => {
+ const decls = @fieldParentPtr(Scope.Decls, "base", s);
+ const table = await (async decls.getTableReadOnly() catch unreachable);
+ if (table.get(name)) |entry| {
+ return Ident{ .Decl = entry.value };
+ }
+ },
+ Scope.Id.Var => {
+ const var_scope = @fieldParentPtr(Scope.Var, "base", s);
+ if (mem.eql(u8, var_scope.name, name)) {
+ return Ident{ .VarScope = var_scope };
+ }
+ },
+ else => {},
+ }
+ s = s.parent.?;
+ }
+ }
+};
+
+const Analyze = struct {
+ irb: Builder,
+ old_bb_index: usize,
+ const_predecessor_bb: ?*BasicBlock,
+ parent_basic_block: *BasicBlock,
+ instruction_index: usize,
+ src_implicit_return_type_list: std.ArrayList(*Inst),
+ explicit_return_type: ?*Type,
+
+ pub const Error = error{
+ /// This is only for when we have already reported a compile error. It is the poison value.
+ SemanticAnalysisFailed,
+
+ /// This is a placeholder - it is useful to use instead of panicking but once the compiler is
+ /// done this error code will be removed.
+ Unimplemented,
+
+ OutOfMemory,
+ };
+
+ pub fn init(comp: *Compilation, root_scope: *Scope.Root, explicit_return_type: ?*Type) !Analyze {
+ var irb = try Builder.init(comp, root_scope, null);
+ errdefer irb.abort();
+
+ return Analyze{
+ .irb = irb,
+ .old_bb_index = 0,
+ .const_predecessor_bb = null,
+ .parent_basic_block = undefined, // initialized with startBasicBlock
+ .instruction_index = undefined, // initialized with startBasicBlock
+ .src_implicit_return_type_list = std.ArrayList(*Inst).init(irb.arena()),
+ .explicit_return_type = explicit_return_type,
+ };
+ }
+
+ pub fn abort(self: *Analyze) void {
+ self.irb.abort();
+ }
+
+ pub fn getNewBasicBlock(self: *Analyze, old_bb: *BasicBlock, ref_old_instruction: ?*Inst) !*BasicBlock {
+ if (old_bb.child) |child| {
+ if (ref_old_instruction == null or child.ref_instruction != ref_old_instruction)
+ return child;
+ }
+
+ const new_bb = try self.irb.createBasicBlock(old_bb.scope, old_bb.name_hint);
+ new_bb.linkToParent(old_bb);
+ new_bb.ref_instruction = ref_old_instruction;
+ return new_bb;
+ }
+
+ pub fn startBasicBlock(self: *Analyze, old_bb: *BasicBlock, const_predecessor_bb: ?*BasicBlock) void {
+ self.instruction_index = 0;
+ self.parent_basic_block = old_bb;
+ self.const_predecessor_bb = const_predecessor_bb;
+ }
+
+ pub fn finishBasicBlock(ira: *Analyze, old_code: *Code) !void {
+ try ira.irb.code.basic_block_list.append(ira.irb.current_basic_block);
+ ira.instruction_index += 1;
+
+ while (ira.instruction_index < ira.parent_basic_block.instruction_list.len) {
+ const next_instruction = ira.parent_basic_block.instruction_list.at(ira.instruction_index);
+
+ if (!next_instruction.is_generated) {
+ try ira.addCompileError(next_instruction.span, "unreachable code");
+ break;
+ }
+ ira.instruction_index += 1;
+ }
+
+ ira.old_bb_index += 1;
+
+ var need_repeat = true;
+ while (true) {
+ while (ira.old_bb_index < old_code.basic_block_list.len) {
+ const old_bb = old_code.basic_block_list.at(ira.old_bb_index);
+ const new_bb = old_bb.child orelse {
+ ira.old_bb_index += 1;
+ continue;
+ };
+ if (new_bb.instruction_list.len != 0) {
+ ira.old_bb_index += 1;
+ continue;
+ }
+ ira.irb.current_basic_block = new_bb;
+
+ ira.startBasicBlock(old_bb, null);
+ return;
+ }
+ if (!need_repeat)
+ return;
+ need_repeat = false;
+ ira.old_bb_index = 0;
+ continue;
+ }
+ }
+
+ fn addCompileError(self: *Analyze, span: Span, comptime fmt: []const u8, args: ...) !void {
+ return self.irb.comp.addCompileError(self.irb.root_scope, span, fmt, args);
+ }
+
+ fn resolvePeerTypes(self: *Analyze, expected_type: ?*Type, peers: []const *Inst) Analyze.Error!*Type {
+ // TODO actual implementation
+ return &Type.Void.get(self.irb.comp).base;
+ }
+
+ fn implicitCast(self: *Analyze, target: *Inst, optional_dest_type: ?*Type) Analyze.Error!*Inst {
+ const dest_type = optional_dest_type orelse return target;
+ const from_type = target.getKnownType();
+ if (from_type == dest_type or from_type.id == Type.Id.NoReturn) return target;
+ return self.analyzeCast(target, target, dest_type);
+ }
+
+ fn analyzeCast(ira: *Analyze, source_instr: *Inst, target: *Inst, dest_type: *Type) !*Inst {
+ const from_type = target.getKnownType();
+
+ //if (type_is_invalid(wanted_type) || type_is_invalid(actual_type)) {
+ // return ira->codegen->invalid_instruction;
+ //}
+
+ //// perfect match or non-const to const
+ //ConstCastOnly const_cast_result = types_match_const_cast_only(ira, wanted_type, actual_type,
+ // source_node, false);
+ //if (const_cast_result.id == ConstCastResultIdOk) {
+ // return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpNoop, false);
+ //}
+
+ //// widening conversion
+ //if (wanted_type->id == TypeTableEntryIdInt &&
+ // actual_type->id == TypeTableEntryIdInt &&
+ // wanted_type->data.integral.is_signed == actual_type->data.integral.is_signed &&
+ // wanted_type->data.integral.bit_count >= actual_type->data.integral.bit_count)
+ //{
+ // return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
+ //}
+
+ //// small enough unsigned ints can get casted to large enough signed ints
+ //if (wanted_type->id == TypeTableEntryIdInt && wanted_type->data.integral.is_signed &&
+ // actual_type->id == TypeTableEntryIdInt && !actual_type->data.integral.is_signed &&
+ // wanted_type->data.integral.bit_count > actual_type->data.integral.bit_count)
+ //{
+ // return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
+ //}
+
+ //// float widening conversion
+ //if (wanted_type->id == TypeTableEntryIdFloat &&
+ // actual_type->id == TypeTableEntryIdFloat &&
+ // wanted_type->data.floating.bit_count >= actual_type->data.floating.bit_count)
+ //{
+ // return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
+ //}
+
+ //// cast from [N]T to []const T
+ //if (is_slice(wanted_type) && actual_type->id == TypeTableEntryIdArray) {
+ // TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ // assert(ptr_type->id == TypeTableEntryIdPointer);
+ // if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
+ // types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ // source_node, false).id == ConstCastResultIdOk)
+ // {
+ // return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ //// cast from *const [N]T to []const T
+ //if (is_slice(wanted_type) &&
+ // actual_type->id == TypeTableEntryIdPointer &&
+ // actual_type->data.pointer.is_const &&
+ // actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
+ //{
+ // TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ // assert(ptr_type->id == TypeTableEntryIdPointer);
+
+ // TypeTableEntry *array_type = actual_type->data.pointer.child_type;
+
+ // if ((ptr_type->data.pointer.is_const || array_type->data.array.len == 0) &&
+ // types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type,
+ // source_node, false).id == ConstCastResultIdOk)
+ // {
+ // return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ //// cast from [N]T to *const []const T
+ //if (wanted_type->id == TypeTableEntryIdPointer &&
+ // wanted_type->data.pointer.is_const &&
+ // is_slice(wanted_type->data.pointer.child_type) &&
+ // actual_type->id == TypeTableEntryIdArray)
+ //{
+ // TypeTableEntry *ptr_type =
+ // wanted_type->data.pointer.child_type->data.structure.fields[slice_ptr_index].type_entry;
+ // assert(ptr_type->id == TypeTableEntryIdPointer);
+ // if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
+ // types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ // source_node, false).id == ConstCastResultIdOk)
+ // {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.pointer.child_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // }
+ //}
+
+ //// cast from [N]T to ?[]const T
+ //if (wanted_type->id == TypeTableEntryIdOptional &&
+ // is_slice(wanted_type->data.maybe.child_type) &&
+ // actual_type->id == TypeTableEntryIdArray)
+ //{
+ // TypeTableEntry *ptr_type =
+ // wanted_type->data.maybe.child_type->data.structure.fields[slice_ptr_index].type_entry;
+ // assert(ptr_type->id == TypeTableEntryIdPointer);
+ // if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
+ // types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ // source_node, false).id == ConstCastResultIdOk)
+ // {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.maybe.child_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // }
+ //}
+
+ //// *[N]T to [*]T
+ //if (wanted_type->id == TypeTableEntryIdPointer &&
+ // wanted_type->data.pointer.ptr_len == PtrLenUnknown &&
+ // actual_type->id == TypeTableEntryIdPointer &&
+ // actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ // actual_type->data.pointer.child_type->id == TypeTableEntryIdArray &&
+ // actual_type->data.pointer.alignment >= wanted_type->data.pointer.alignment &&
+ // types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ // actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ // !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ //{
+ // return ir_resolve_ptr_of_array_to_unknown_len_ptr(ira, source_instr, value, wanted_type);
+ //}
+
+ //// *[N]T to []T
+ //if (is_slice(wanted_type) &&
+ // actual_type->id == TypeTableEntryIdPointer &&
+ // actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ // actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
+ //{
+ // TypeTableEntry *slice_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ // assert(slice_ptr_type->id == TypeTableEntryIdPointer);
+ // if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
+ // actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ // !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ // {
+ // return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ //// cast from T to ?T
+ //// note that the *T to ?*T case is handled via the "ConstCastOnly" mechanism
+ //if (wanted_type->id == TypeTableEntryIdOptional) {
+ // TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type;
+ // if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node,
+ // false).id == ConstCastResultIdOk)
+ // {
+ // return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type);
+ // } else if (actual_type->id == TypeTableEntryIdComptimeInt ||
+ // actual_type->id == TypeTableEntryIdComptimeFloat)
+ // {
+ // if (ir_num_lit_fits_in_other_type(ira, value, wanted_child_type, true)) {
+ // return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type);
+ // } else {
+ // return ira->codegen->invalid_instruction;
+ // }
+ // } else if (wanted_child_type->id == TypeTableEntryIdPointer &&
+ // wanted_child_type->data.pointer.is_const &&
+ // (actual_type->id == TypeTableEntryIdPointer || is_container(actual_type)))
+ // {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_child_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // }
+ //}
+
+ //// cast from null literal to maybe type
+ //if (wanted_type->id == TypeTableEntryIdOptional &&
+ // actual_type->id == TypeTableEntryIdNull)
+ //{
+ // return ir_analyze_null_to_maybe(ira, source_instr, value, wanted_type);
+ //}
+
+ //// cast from child type of error type to error type
+ //if (wanted_type->id == TypeTableEntryIdErrorUnion) {
+ // if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type,
+ // source_node, false).id == ConstCastResultIdOk)
+ // {
+ // return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type);
+ // } else if (actual_type->id == TypeTableEntryIdComptimeInt ||
+ // actual_type->id == TypeTableEntryIdComptimeFloat)
+ // {
+ // if (ir_num_lit_fits_in_other_type(ira, value, wanted_type->data.error_union.payload_type, true)) {
+ // return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type);
+ // } else {
+ // return ira->codegen->invalid_instruction;
+ // }
+ // }
+ //}
+
+ //// cast from [N]T to E![]const T
+ //if (wanted_type->id == TypeTableEntryIdErrorUnion &&
+ // is_slice(wanted_type->data.error_union.payload_type) &&
+ // actual_type->id == TypeTableEntryIdArray)
+ //{
+ // TypeTableEntry *ptr_type =
+ // wanted_type->data.error_union.payload_type->data.structure.fields[slice_ptr_index].type_entry;
+ // assert(ptr_type->id == TypeTableEntryIdPointer);
+ // if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
+ // types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ // source_node, false).id == ConstCastResultIdOk)
+ // {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // }
+ //}
+
+ //// cast from error set to error union type
+ //if (wanted_type->id == TypeTableEntryIdErrorUnion &&
+ // actual_type->id == TypeTableEntryIdErrorSet)
+ //{
+ // return ir_analyze_err_wrap_code(ira, source_instr, value, wanted_type);
+ //}
+
+ //// cast from T to E!?T
+ //if (wanted_type->id == TypeTableEntryIdErrorUnion &&
+ // wanted_type->data.error_union.payload_type->id == TypeTableEntryIdOptional &&
+ // actual_type->id != TypeTableEntryIdOptional)
+ //{
+ // TypeTableEntry *wanted_child_type = wanted_type->data.error_union.payload_type->data.maybe.child_type;
+ // if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node, false).id == ConstCastResultIdOk ||
+ // actual_type->id == TypeTableEntryIdNull ||
+ // actual_type->id == TypeTableEntryIdComptimeInt ||
+ // actual_type->id == TypeTableEntryIdComptimeFloat)
+ // {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // }
+ //}
+
+ // cast from comptime-known integer to another integer where the value fits
+ if (target.isCompTime() and (from_type.id == Type.Id.Int or from_type.id == Type.Id.ComptimeInt)) cast: {
+ const target_val = target.val.KnownValue;
+ const from_int = &target_val.cast(Value.Int).?.big_int;
+ const fits = fits: {
+ if (dest_type.cast(Type.ComptimeInt)) |ctint| {
+ break :fits true;
+ }
+ if (dest_type.cast(Type.Int)) |int| {
+ break :fits from_int.fitsInTwosComp(int.key.is_signed, int.key.bit_count);
+ }
+ break :cast;
+ };
+ if (!fits) {
+ try ira.addCompileError(
+ source_instr.span,
+ "integer value '{}' cannot be stored in type '{}'",
+ from_int,
+ dest_type.name,
+ );
+ return error.SemanticAnalysisFailed;
+ }
+
+ const new_val = try target.copyVal(ira.irb.comp);
+ new_val.setType(dest_type, ira.irb.comp);
+ return ira.irb.buildConstValue(source_instr.scope, source_instr.span, new_val);
+ }
+
+ // cast from number literal to another type
+ // cast from number literal to *const integer
+ //if (actual_type->id == TypeTableEntryIdComptimeFloat ||
+ // actual_type->id == TypeTableEntryIdComptimeInt)
+ //{
+ // ensure_complete_type(ira->codegen, wanted_type);
+ // if (type_is_invalid(wanted_type))
+ // return ira->codegen->invalid_instruction;
+ // if (wanted_type->id == TypeTableEntryIdEnum) {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.enumeration.tag_int_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // } else if (wanted_type->id == TypeTableEntryIdPointer &&
+ // wanted_type->data.pointer.is_const)
+ // {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.pointer.child_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // } else if (ir_num_lit_fits_in_other_type(ira, value, wanted_type, true)) {
+ // CastOp op;
+ // if ((actual_type->id == TypeTableEntryIdComptimeFloat &&
+ // wanted_type->id == TypeTableEntryIdFloat) ||
+ // (actual_type->id == TypeTableEntryIdComptimeInt &&
+ // wanted_type->id == TypeTableEntryIdInt))
+ // {
+ // op = CastOpNumLitToConcrete;
+ // } else if (wanted_type->id == TypeTableEntryIdInt) {
+ // op = CastOpFloatToInt;
+ // } else if (wanted_type->id == TypeTableEntryIdFloat) {
+ // op = CastOpIntToFloat;
+ // } else {
+ // zig_unreachable();
+ // }
+ // return ir_resolve_cast(ira, source_instr, value, wanted_type, op, false);
+ // } else {
+ // return ira->codegen->invalid_instruction;
+ // }
+ //}
+
+ //// cast from typed number to integer or float literal.
+ //// works when the number is known at compile time
+ //if (instr_is_comptime(value) &&
+ // ((actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdComptimeInt) ||
+ // (actual_type->id == TypeTableEntryIdFloat && wanted_type->id == TypeTableEntryIdComptimeFloat)))
+ //{
+ // return ir_analyze_number_to_literal(ira, source_instr, value, wanted_type);
+ //}
+
+ //// cast from union to the enum type of the union
+ //if (actual_type->id == TypeTableEntryIdUnion && wanted_type->id == TypeTableEntryIdEnum) {
+ // type_ensure_zero_bits_known(ira->codegen, actual_type);
+ // if (type_is_invalid(actual_type))
+ // return ira->codegen->invalid_instruction;
+
+ // if (actual_type->data.unionation.tag_type == wanted_type) {
+ // return ir_analyze_union_to_tag(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ //// enum to union which has the enum as the tag type
+ //if (wanted_type->id == TypeTableEntryIdUnion && actual_type->id == TypeTableEntryIdEnum &&
+ // (wanted_type->data.unionation.decl_node->data.container_decl.auto_enum ||
+ // wanted_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
+ //{
+ // type_ensure_zero_bits_known(ira->codegen, wanted_type);
+ // if (wanted_type->data.unionation.tag_type == actual_type) {
+ // return ir_analyze_enum_to_union(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ //// enum to &const union which has the enum as the tag type
+ //if (actual_type->id == TypeTableEntryIdEnum && wanted_type->id == TypeTableEntryIdPointer) {
+ // TypeTableEntry *union_type = wanted_type->data.pointer.child_type;
+ // if (union_type->data.unionation.decl_node->data.container_decl.auto_enum ||
+ // union_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr)
+ // {
+ // type_ensure_zero_bits_known(ira->codegen, union_type);
+ // if (union_type->data.unionation.tag_type == actual_type) {
+ // IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, union_type, value);
+ // if (type_is_invalid(cast1->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1);
+ // if (type_is_invalid(cast2->value.type))
+ // return ira->codegen->invalid_instruction;
+
+ // return cast2;
+ // }
+ // }
+ //}
+
+ //// cast from *T to *[1]T
+ //if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ // actual_type->id == TypeTableEntryIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle)
+ //{
+ // TypeTableEntry *array_type = wanted_type->data.pointer.child_type;
+ // if (array_type->id == TypeTableEntryIdArray && array_type->data.array.len == 1 &&
+ // types_match_const_cast_only(ira, array_type->data.array.child_type,
+ // actual_type->data.pointer.child_type, source_node,
+ // !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ // {
+ // if (wanted_type->data.pointer.alignment > actual_type->data.pointer.alignment) {
+ // ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("cast increases pointer alignment"));
+ // add_error_note(ira->codegen, msg, value->source_node,
+ // buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&actual_type->name),
+ // actual_type->data.pointer.alignment));
+ // add_error_note(ira->codegen, msg, source_instr->source_node,
+ // buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&wanted_type->name),
+ // wanted_type->data.pointer.alignment));
+ // return ira->codegen->invalid_instruction;
+ // }
+ // return ir_analyze_ptr_to_array(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ //// cast from T to *T where T is zero bits
+ //if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ // types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ // actual_type, source_node, !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ //{
+ // type_ensure_zero_bits_known(ira->codegen, actual_type);
+ // if (type_is_invalid(actual_type)) {
+ // return ira->codegen->invalid_instruction;
+ // }
+ // if (!type_has_bits(actual_type)) {
+ // return ir_get_ref(ira, source_instr, value, false, false);
+ // }
+ //}
+
+ //// cast from undefined to anything
+ //if (actual_type->id == TypeTableEntryIdUndefined) {
+ // return ir_analyze_undefined_to_anything(ira, source_instr, value, wanted_type);
+ //}
+
+ //// cast from something to const pointer of it
+ //if (!type_requires_comptime(actual_type)) {
+ // TypeTableEntry *const_ptr_actual = get_pointer_to_type(ira->codegen, actual_type, true);
+ // if (types_match_const_cast_only(ira, wanted_type, const_ptr_actual, source_node, false).id == ConstCastResultIdOk) {
+ // return ir_analyze_cast_ref(ira, source_instr, value, wanted_type);
+ // }
+ //}
+
+ try ira.addCompileError(
+ source_instr.span,
+ "expected type '{}', found '{}'",
+ dest_type.name,
+ from_type.name,
+ );
+ //ErrorMsg *parent_msg = ir_add_error_node(ira, source_instr->source_node,
+ // buf_sprintf("expected type '%s', found '%s'",
+ // buf_ptr(&wanted_type->name),
+ // buf_ptr(&actual_type->name)));
+ //report_recursive_error(ira, source_instr->source_node, &const_cast_result, parent_msg);
+ return error.SemanticAnalysisFailed;
+ }
+
+ fn getCompTimeValOrNullUndefOk(self: *Analyze, target: *Inst) ?*Value {
+ @panic("TODO");
+ }
+
+ fn getCompTimeRef(
+ self: *Analyze,
+ value: *Value,
+ ptr_mut: Value.Ptr.Mut,
+ mut: Type.Pointer.Mut,
+ volatility: Type.Pointer.Vol,
+ ) Analyze.Error!*Inst {
+ return error.Unimplemented;
+ }
+};
+
+pub async fn gen(
+ comp: *Compilation,
+ body_node: *ast.Node,
+ scope: *Scope,
+) !*Code {
+ var irb = try Builder.init(comp, scope.findRoot(), scope);
+ errdefer irb.abort();
+
+ const entry_block = try irb.createBasicBlock(scope, c"Entry");
+ entry_block.ref(&irb); // Entry block gets a reference because we enter it to begin.
+ try irb.setCursorAtEndAndAppendBlock(entry_block);
+
+ const result = try await (async irb.genNode(body_node, scope, LVal.None) catch unreachable);
+ if (!result.isNoReturn()) {
+ // no need for save_err_ret_addr because this cannot return error
+ _ = try irb.genAsyncReturn(scope, Span.token(body_node.lastToken()), result, true);
+ }
+
+ return irb.finish();
+}
+
+pub async fn analyze(comp: *Compilation, old_code: *Code, expected_type: ?*Type) !*Code {
+ const old_entry_bb = old_code.basic_block_list.at(0);
+ const root_scope = old_entry_bb.scope.findRoot();
+
+ var ira = try Analyze.init(comp, root_scope, expected_type);
+ errdefer ira.abort();
+
+ const new_entry_bb = try ira.getNewBasicBlock(old_entry_bb, null);
+ new_entry_bb.ref(&ira.irb);
+
+ ira.irb.current_basic_block = new_entry_bb;
+
+ ira.startBasicBlock(old_entry_bb, null);
+
+ while (ira.old_bb_index < old_code.basic_block_list.len) {
+ const old_instruction = ira.parent_basic_block.instruction_list.at(ira.instruction_index);
+
+ if (old_instruction.ref_count == 0 and !old_instruction.hasSideEffects()) {
+ ira.instruction_index += 1;
+ continue;
+ }
+
+ const return_inst = try await (async old_instruction.analyze(&ira) catch unreachable);
+ assert(return_inst.val != IrVal.Unknown); // at least the type should be known at this point
+ return_inst.linkToParent(old_instruction);
+ // Note: if we ever modify the above to handle error.CompileError by continuing analysis,
+ // then here we want to check if ira.isCompTime() and return early if true
+
+ if (return_inst.isNoReturn()) {
+ try ira.finishBasicBlock(old_code);
+ continue;
+ }
+
+ ira.instruction_index += 1;
+ }
+
+ if (ira.src_implicit_return_type_list.len == 0) {
+ ira.irb.code.return_type = &Type.NoReturn.get(comp).base;
+ return ira.irb.finish();
+ }
+
+ ira.irb.code.return_type = try ira.resolvePeerTypes(expected_type, ira.src_implicit_return_type_list.toSliceConst());
+ return ira.irb.finish();
+}
diff --git a/src-self-hosted/libc_installation.zig b/src-self-hosted/libc_installation.zig
new file mode 100644
index 0000000000..3938c0d90c
--- /dev/null
+++ b/src-self-hosted/libc_installation.zig
@@ -0,0 +1,462 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const event = std.event;
+const Target = @import("target.zig").Target;
+const c = @import("c.zig");
+
+/// See the render function implementation for documentation of the fields.
+pub const LibCInstallation = struct {
+ include_dir: []const u8,
+ lib_dir: ?[]const u8,
+ static_lib_dir: ?[]const u8,
+ msvc_lib_dir: ?[]const u8,
+ kernel32_lib_dir: ?[]const u8,
+ dynamic_linker_path: ?[]const u8,
+
+ pub const FindError = error{
+ OutOfMemory,
+ FileSystem,
+ UnableToSpawnCCompiler,
+ CCompilerExitCode,
+ CCompilerCrashed,
+ CCompilerCannotFindHeaders,
+ LibCRuntimeNotFound,
+ LibCStdLibHeaderNotFound,
+ LibCKernel32LibNotFound,
+ UnsupportedArchitecture,
+ };
+
+ pub fn parse(
+ self: *LibCInstallation,
+ allocator: *std.mem.Allocator,
+ libc_file: []const u8,
+ stderr: *std.io.OutStream(std.io.FileOutStream.Error),
+ ) !void {
+ self.initEmpty();
+
+ const keys = []const []const u8{
+ "include_dir",
+ "lib_dir",
+ "static_lib_dir",
+ "msvc_lib_dir",
+ "kernel32_lib_dir",
+ "dynamic_linker_path",
+ };
+ const FoundKey = struct {
+ found: bool,
+ allocated: ?[]u8,
+ };
+ var found_keys = [1]FoundKey{FoundKey{ .found = false, .allocated = null }} ** keys.len;
+ errdefer {
+ self.initEmpty();
+ for (found_keys) |found_key| {
+ if (found_key.allocated) |s| allocator.free(s);
+ }
+ }
+
+ const contents = try std.io.readFileAlloc(allocator, libc_file);
+ defer allocator.free(contents);
+
+ var it = std.mem.split(contents, "\n");
+ while (it.next()) |line| {
+ if (line.len == 0 or line[0] == '#') continue;
+ var line_it = std.mem.split(line, "=");
+ const name = line_it.next() orelse {
+ try stderr.print("missing equal sign after field name\n");
+ return error.ParseError;
+ };
+ const value = line_it.rest();
+ inline for (keys) |key, i| {
+ if (std.mem.eql(u8, name, key)) {
+ found_keys[i].found = true;
+ switch (@typeInfo(@typeOf(@field(self, key)))) {
+ builtin.TypeId.Optional => {
+ if (value.len == 0) {
+ @field(self, key) = null;
+ } else {
+ found_keys[i].allocated = try std.mem.dupe(allocator, u8, value);
+ @field(self, key) = found_keys[i].allocated;
+ }
+ },
+ else => {
+ if (value.len == 0) {
+ try stderr.print("field cannot be empty: {}\n", key);
+ return error.ParseError;
+ }
+ const dupe = try std.mem.dupe(allocator, u8, value);
+ found_keys[i].allocated = dupe;
+ @field(self, key) = dupe;
+ },
+ }
+ break;
+ }
+ }
+ }
+ for (found_keys) |found_key, i| {
+ if (!found_key.found) {
+ try stderr.print("missing field: {}\n", keys[i]);
+ return error.ParseError;
+ }
+ }
+ }
+
+ pub fn render(self: *const LibCInstallation, out: *std.io.OutStream(std.io.FileOutStream.Error)) !void {
+ @setEvalBranchQuota(4000);
+ try out.print(
+ \\# The directory that contains `stdlib.h`.
+ \\# On Linux, can be found with: `cc -E -Wp,-v -xc /dev/null`
+ \\include_dir={}
+ \\
+ \\# The directory that contains `crt1.o`.
+ \\# On Linux, can be found with `cc -print-file-name=crt1.o`.
+ \\# Not needed when targeting MacOS.
+ \\lib_dir={}
+ \\
+ \\# The directory that contains `crtbegin.o`.
+ \\# On Linux, can be found with `cc -print-file-name=crtbegin.o`.
+ \\# Not needed when targeting MacOS or Windows.
+ \\static_lib_dir={}
+ \\
+ \\# The directory that contains `vcruntime.lib`.
+ \\# Only needed when targeting Windows.
+ \\msvc_lib_dir={}
+ \\
+ \\# The directory that contains `kernel32.lib`.
+ \\# Only needed when targeting Windows.
+ \\kernel32_lib_dir={}
+ \\
+ \\# The full path to the dynamic linker, on the target system.
+ \\# Only needed when targeting Linux.
+ \\dynamic_linker_path={}
+ \\
+ ,
+ self.include_dir,
+ self.lib_dir orelse "",
+ self.static_lib_dir orelse "",
+ self.msvc_lib_dir orelse "",
+ self.kernel32_lib_dir orelse "",
+ self.dynamic_linker_path orelse Target(Target.Native).getDynamicLinkerPath(),
+ );
+ }
+
+ /// Finds the default, native libc.
+ pub async fn findNative(self: *LibCInstallation, loop: *event.Loop) !void {
+ self.initEmpty();
+ var group = event.Group(FindError!void).init(loop);
+ errdefer group.cancelAll();
+ var windows_sdk: ?*c.ZigWindowsSDK = null;
+ errdefer if (windows_sdk) |sdk| c.zig_free_windows_sdk(@ptrCast(?[*]c.ZigWindowsSDK, sdk));
+
+ switch (builtin.os) {
+ builtin.Os.windows => {
+ var sdk: *c.ZigWindowsSDK = undefined;
+ switch (c.zig_find_windows_sdk(@ptrCast(?[*]?[*]c.ZigWindowsSDK, &sdk))) {
+ c.ZigFindWindowsSdkError.None => {
+ windows_sdk = sdk;
+
+ if (sdk.msvc_lib_dir_ptr) |ptr| {
+ self.msvc_lib_dir = try std.mem.dupe(loop.allocator, u8, ptr[0..sdk.msvc_lib_dir_len]);
+ }
+ try group.call(findNativeKernel32LibDir, self, loop, sdk);
+ try group.call(findNativeIncludeDirWindows, self, loop, sdk);
+ try group.call(findNativeLibDirWindows, self, loop, sdk);
+ },
+ c.ZigFindWindowsSdkError.OutOfMemory => return error.OutOfMemory,
+ c.ZigFindWindowsSdkError.NotFound => return error.NotFound,
+ c.ZigFindWindowsSdkError.PathTooLong => return error.NotFound,
+ }
+ },
+ builtin.Os.linux => {
+ try group.call(findNativeIncludeDirLinux, self, loop);
+ try group.call(findNativeLibDirLinux, self, loop);
+ try group.call(findNativeStaticLibDir, self, loop);
+ try group.call(findNativeDynamicLinker, self, loop);
+ },
+ builtin.Os.macosx => {
+ self.include_dir = try std.mem.dupe(loop.allocator, u8, "/usr/include");
+ },
+ else => @compileError("unimplemented: find libc for this OS"),
+ }
+ return await (async group.wait() catch unreachable);
+ }
+
+ async fn findNativeIncludeDirLinux(self: *LibCInstallation, loop: *event.Loop) !void {
+ const cc_exe = std.os.getEnvPosix("CC") orelse "cc";
+ const argv = []const []const u8{
+ cc_exe,
+ "-E",
+ "-Wp,-v",
+ "-xc",
+ "/dev/null",
+ };
+ // TODO make this use event loop
+ const errorable_result = std.os.ChildProcess.exec(loop.allocator, argv, null, null, 1024 * 1024);
+ const exec_result = if (std.debug.runtime_safety) blk: {
+ break :blk errorable_result catch unreachable;
+ } else blk: {
+ break :blk errorable_result catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => return error.UnableToSpawnCCompiler,
+ };
+ };
+ defer {
+ loop.allocator.free(exec_result.stdout);
+ loop.allocator.free(exec_result.stderr);
+ }
+
+ switch (exec_result.term) {
+ std.os.ChildProcess.Term.Exited => |code| {
+ if (code != 0) return error.CCompilerExitCode;
+ },
+ else => {
+ return error.CCompilerCrashed;
+ },
+ }
+
+ var it = std.mem.split(exec_result.stderr, "\n\r");
+ var search_paths = std.ArrayList([]const u8).init(loop.allocator);
+ defer search_paths.deinit();
+ while (it.next()) |line| {
+ if (line.len != 0 and line[0] == ' ') {
+ try search_paths.append(line);
+ }
+ }
+ if (search_paths.len == 0) {
+ return error.CCompilerCannotFindHeaders;
+ }
+
+ // search in reverse order
+ var path_i: usize = 0;
+ while (path_i < search_paths.len) : (path_i += 1) {
+ const search_path_untrimmed = search_paths.at(search_paths.len - path_i - 1);
+ const search_path = std.mem.trimLeft(u8, search_path_untrimmed, " ");
+ const stdlib_path = try std.os.path.join(loop.allocator, search_path, "stdlib.h");
+ defer loop.allocator.free(stdlib_path);
+
+ if (try fileExists(loop.allocator, stdlib_path)) {
+ self.include_dir = try std.mem.dupe(loop.allocator, u8, search_path);
+ return;
+ }
+ }
+
+ return error.LibCStdLibHeaderNotFound;
+ }
+
+ async fn findNativeIncludeDirWindows(self: *LibCInstallation, loop: *event.Loop, sdk: *c.ZigWindowsSDK) !void {
+ var search_buf: [2]Search = undefined;
+ const searches = fillSearch(&search_buf, sdk);
+
+ var result_buf = try std.Buffer.initSize(loop.allocator, 0);
+ defer result_buf.deinit();
+
+ for (searches) |search| {
+ result_buf.shrink(0);
+ const stream = &std.io.BufferOutStream.init(&result_buf).stream;
+ try stream.print("{}\\Include\\{}\\ucrt", search.path, search.version);
+
+ const stdlib_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "stdlib.h");
+ defer loop.allocator.free(stdlib_path);
+
+ if (try fileExists(loop.allocator, stdlib_path)) {
+ self.include_dir = result_buf.toOwnedSlice();
+ return;
+ }
+ }
+
+ return error.LibCStdLibHeaderNotFound;
+ }
+
+ async fn findNativeLibDirWindows(self: *LibCInstallation, loop: *event.Loop, sdk: *c.ZigWindowsSDK) FindError!void {
+ var search_buf: [2]Search = undefined;
+ const searches = fillSearch(&search_buf, sdk);
+
+ var result_buf = try std.Buffer.initSize(loop.allocator, 0);
+ defer result_buf.deinit();
+
+ for (searches) |search| {
+ result_buf.shrink(0);
+ const stream = &std.io.BufferOutStream.init(&result_buf).stream;
+ try stream.print("{}\\Lib\\{}\\ucrt\\", search.path, search.version);
+ switch (builtin.arch) {
+ builtin.Arch.i386 => try stream.write("x86"),
+ builtin.Arch.x86_64 => try stream.write("x64"),
+ builtin.Arch.aarch64 => try stream.write("arm"),
+ else => return error.UnsupportedArchitecture,
+ }
+ const ucrt_lib_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "ucrt.lib");
+ defer loop.allocator.free(ucrt_lib_path);
+ if (try fileExists(loop.allocator, ucrt_lib_path)) {
+ self.lib_dir = result_buf.toOwnedSlice();
+ return;
+ }
+ }
+ return error.LibCRuntimeNotFound;
+ }
+
+ async fn findNativeLibDirLinux(self: *LibCInstallation, loop: *event.Loop) FindError!void {
+ self.lib_dir = try await (async ccPrintFileName(loop, "crt1.o", true) catch unreachable);
+ }
+
+ async fn findNativeStaticLibDir(self: *LibCInstallation, loop: *event.Loop) FindError!void {
+ self.static_lib_dir = try await (async ccPrintFileName(loop, "crtbegin.o", true) catch unreachable);
+ }
+
+ async fn findNativeDynamicLinker(self: *LibCInstallation, loop: *event.Loop) FindError!void {
+ var dyn_tests = []DynTest{
+ DynTest{
+ .name = "ld-linux-x86-64.so.2",
+ .result = null,
+ },
+ DynTest{
+ .name = "ld-musl-x86_64.so.1",
+ .result = null,
+ },
+ };
+ var group = event.Group(FindError!void).init(loop);
+ errdefer group.cancelAll();
+ for (dyn_tests) |*dyn_test| {
+ try group.call(testNativeDynamicLinker, self, loop, dyn_test);
+ }
+ try await (async group.wait() catch unreachable);
+ for (dyn_tests) |*dyn_test| {
+ if (dyn_test.result) |result| {
+ self.dynamic_linker_path = result;
+ return;
+ }
+ }
+ }
+
+ const DynTest = struct {
+ name: []const u8,
+ result: ?[]const u8,
+ };
+
+ async fn testNativeDynamicLinker(self: *LibCInstallation, loop: *event.Loop, dyn_test: *DynTest) FindError!void {
+ if (await (async ccPrintFileName(loop, dyn_test.name, false) catch unreachable)) |result| {
+ dyn_test.result = result;
+ return;
+ } else |err| switch (err) {
+ error.LibCRuntimeNotFound => return,
+ else => return err,
+ }
+ }
+
+
+ async fn findNativeKernel32LibDir(self: *LibCInstallation, loop: *event.Loop, sdk: *c.ZigWindowsSDK) FindError!void {
+ var search_buf: [2]Search = undefined;
+ const searches = fillSearch(&search_buf, sdk);
+
+ var result_buf = try std.Buffer.initSize(loop.allocator, 0);
+ defer result_buf.deinit();
+
+ for (searches) |search| {
+ result_buf.shrink(0);
+ const stream = &std.io.BufferOutStream.init(&result_buf).stream;
+ try stream.print("{}\\Lib\\{}\\um\\", search.path, search.version);
+ switch (builtin.arch) {
+ builtin.Arch.i386 => try stream.write("x86\\"),
+ builtin.Arch.x86_64 => try stream.write("x64\\"),
+ builtin.Arch.aarch64 => try stream.write("arm\\"),
+ else => return error.UnsupportedArchitecture,
+ }
+ const kernel32_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "kernel32.lib");
+ defer loop.allocator.free(kernel32_path);
+ if (try fileExists(loop.allocator, kernel32_path)) {
+ self.kernel32_lib_dir = result_buf.toOwnedSlice();
+ return;
+ }
+ }
+ return error.LibCKernel32LibNotFound;
+ }
+
+ fn initEmpty(self: *LibCInstallation) void {
+ self.* = LibCInstallation{
+ .include_dir = ([*]const u8)(undefined)[0..0],
+ .lib_dir = null,
+ .static_lib_dir = null,
+ .msvc_lib_dir = null,
+ .kernel32_lib_dir = null,
+ .dynamic_linker_path = null,
+ };
+ }
+};
+
+/// caller owns returned memory
+async fn ccPrintFileName(loop: *event.Loop, o_file: []const u8, want_dirname: bool) ![]u8 {
+ const cc_exe = std.os.getEnvPosix("CC") orelse "cc";
+ const arg1 = try std.fmt.allocPrint(loop.allocator, "-print-file-name={}", o_file);
+ defer loop.allocator.free(arg1);
+ const argv = []const []const u8{ cc_exe, arg1 };
+
+ // TODO This simulates evented I/O for the child process exec
+ await (async loop.yield() catch unreachable);
+ const errorable_result = std.os.ChildProcess.exec(loop.allocator, argv, null, null, 1024 * 1024);
+ const exec_result = if (std.debug.runtime_safety) blk: {
+ break :blk errorable_result catch unreachable;
+ } else blk: {
+ break :blk errorable_result catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => return error.UnableToSpawnCCompiler,
+ };
+ };
+ defer {
+ loop.allocator.free(exec_result.stdout);
+ loop.allocator.free(exec_result.stderr);
+ }
+ switch (exec_result.term) {
+ std.os.ChildProcess.Term.Exited => |code| {
+ if (code != 0) return error.CCompilerExitCode;
+ },
+ else => {
+ return error.CCompilerCrashed;
+ },
+ }
+ var it = std.mem.split(exec_result.stdout, "\n\r");
+ const line = it.next() orelse return error.LibCRuntimeNotFound;
+ const dirname = std.os.path.dirname(line) orelse return error.LibCRuntimeNotFound;
+
+ if (want_dirname) {
+ return std.mem.dupe(loop.allocator, u8, dirname);
+ } else {
+ return std.mem.dupe(loop.allocator, u8, line);
+ }
+}
+
+const Search = struct {
+ path: []const u8,
+ version: []const u8,
+};
+
+fn fillSearch(search_buf: *[2]Search, sdk: *c.ZigWindowsSDK) []Search {
+ var search_end: usize = 0;
+ if (sdk.path10_ptr) |path10_ptr| {
+ if (sdk.version10_ptr) |ver10_ptr| {
+ search_buf[search_end] = Search{
+ .path = path10_ptr[0..sdk.path10_len],
+ .version = ver10_ptr[0..sdk.version10_len],
+ };
+ search_end += 1;
+ }
+ }
+ if (sdk.path81_ptr) |path81_ptr| {
+ if (sdk.version81_ptr) |ver81_ptr| {
+ search_buf[search_end] = Search{
+ .path = path81_ptr[0..sdk.path81_len],
+ .version = ver81_ptr[0..sdk.version81_len],
+ };
+ search_end += 1;
+ }
+ }
+ return search_buf[0..search_end];
+}
+
+
+fn fileExists(allocator: *std.mem.Allocator, path: []const u8) !bool {
+ if (std.os.File.access(allocator, path)) |_| {
+ return true;
+ } else |err| switch (err) {
+ error.NotFound, error.PermissionDenied => return false,
+ error.OutOfMemory => return error.OutOfMemory,
+ else => return error.FileSystem,
+ }
+}
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
new file mode 100644
index 0000000000..3b79c5b891
--- /dev/null
+++ b/src-self-hosted/link.zig
@@ -0,0 +1,737 @@
+const std = @import("std");
+const mem = std.mem;
+const c = @import("c.zig");
+const builtin = @import("builtin");
+const ObjectFormat = builtin.ObjectFormat;
+const Compilation = @import("compilation.zig").Compilation;
+const Target = @import("target.zig").Target;
+const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
+const assert = std.debug.assert;
+
+const Context = struct {
+ comp: *Compilation,
+ arena: std.heap.ArenaAllocator,
+ args: std.ArrayList([*]const u8),
+ link_in_crt: bool,
+
+ link_err: error{OutOfMemory}!void,
+ link_msg: std.Buffer,
+
+ libc: *LibCInstallation,
+ out_file_path: std.Buffer,
+};
+
+pub async fn link(comp: *Compilation) !void {
+ var ctx = Context{
+ .comp = comp,
+ .arena = std.heap.ArenaAllocator.init(comp.gpa()),
+ .args = undefined,
+ .link_in_crt = comp.haveLibC() and comp.kind == Compilation.Kind.Exe,
+ .link_err = {},
+ .link_msg = undefined,
+ .libc = undefined,
+ .out_file_path = undefined,
+ };
+ defer ctx.arena.deinit();
+ ctx.args = std.ArrayList([*]const u8).init(&ctx.arena.allocator);
+ ctx.link_msg = std.Buffer.initNull(&ctx.arena.allocator);
+
+ if (comp.link_out_file) |out_file| {
+ ctx.out_file_path = try std.Buffer.init(&ctx.arena.allocator, out_file);
+ } else {
+ ctx.out_file_path = try std.Buffer.init(&ctx.arena.allocator, comp.name.toSliceConst());
+ switch (comp.kind) {
+ Compilation.Kind.Exe => {
+ try ctx.out_file_path.append(comp.target.exeFileExt());
+ },
+ Compilation.Kind.Lib => {
+ try ctx.out_file_path.append(comp.target.libFileExt(comp.is_static));
+ },
+ Compilation.Kind.Obj => {
+ try ctx.out_file_path.append(comp.target.objFileExt());
+ },
+ }
+ }
+
+ // even though we're calling LLD as a library it thinks the first
+ // argument is its own exe name
+ try ctx.args.append(c"lld");
+
+ if (comp.haveLibC()) {
+ ctx.libc = ctx.comp.override_libc orelse blk: {
+ switch (comp.target) {
+ Target.Native => {
+ break :blk (await (async comp.event_loop_local.getNativeLibC() catch unreachable)) catch return error.LibCRequiredButNotProvidedOrFound;
+ },
+ else => return error.LibCRequiredButNotProvidedOrFound,
+ }
+ };
+ }
+
+ try constructLinkerArgs(&ctx);
+
+ if (comp.verbose_link) {
+ for (ctx.args.toSliceConst()) |arg, i| {
+ const space = if (i == 0) "" else " ";
+ std.debug.warn("{}{s}", space, arg);
+ }
+ std.debug.warn("\n");
+ }
+
+ const extern_ofmt = toExternObjectFormatType(comp.target.getObjectFormat());
+ const args_slice = ctx.args.toSlice();
+
+ {
+ // LLD is not thread-safe, so we grab a global lock.
+ const held = await (async comp.event_loop_local.lld_lock.acquire() catch unreachable);
+ defer held.release();
+
+ // Not evented I/O. LLD does its own multithreading internally.
+ if (!ZigLLDLink(extern_ofmt, args_slice.ptr, args_slice.len, linkDiagCallback, @ptrCast(*c_void, &ctx))) {
+ if (!ctx.link_msg.isNull()) {
+ // TODO capture these messages and pass them through the system, reporting them through the
+ // event system instead of printing them directly here.
+ // perhaps try to parse and understand them.
+ std.debug.warn("{}\n", ctx.link_msg.toSliceConst());
+ }
+ return error.LinkFailed;
+ }
+ }
+}
+
+extern fn ZigLLDLink(
+ oformat: c.ZigLLVM_ObjectFormatType,
+ args: [*]const [*]const u8,
+ arg_count: usize,
+ append_diagnostic: extern fn (*c_void, [*]const u8, usize) void,
+ context: *c_void,
+) bool;
+
+extern fn linkDiagCallback(context: *c_void, ptr: [*]const u8, len: usize) void {
+ const ctx = @ptrCast(*Context, @alignCast(@alignOf(Context), context));
+ ctx.link_err = linkDiagCallbackErrorable(ctx, ptr[0..len]);
+}
+
+fn linkDiagCallbackErrorable(ctx: *Context, msg: []const u8) !void {
+ if (ctx.link_msg.isNull()) {
+ try ctx.link_msg.resize(0);
+ }
+ try ctx.link_msg.append(msg);
+}
+
+fn toExternObjectFormatType(ofmt: ObjectFormat) c.ZigLLVM_ObjectFormatType {
+ return switch (ofmt) {
+ ObjectFormat.unknown => c.ZigLLVM_UnknownObjectFormat,
+ ObjectFormat.coff => c.ZigLLVM_COFF,
+ ObjectFormat.elf => c.ZigLLVM_ELF,
+ ObjectFormat.macho => c.ZigLLVM_MachO,
+ ObjectFormat.wasm => c.ZigLLVM_Wasm,
+ };
+}
+
+fn constructLinkerArgs(ctx: *Context) !void {
+ switch (ctx.comp.target.getObjectFormat()) {
+ ObjectFormat.unknown => unreachable,
+ ObjectFormat.coff => return constructLinkerArgsCoff(ctx),
+ ObjectFormat.elf => return constructLinkerArgsElf(ctx),
+ ObjectFormat.macho => return constructLinkerArgsMachO(ctx),
+ ObjectFormat.wasm => return constructLinkerArgsWasm(ctx),
+ }
+}
+
+fn constructLinkerArgsElf(ctx: *Context) !void {
+ // TODO commented out code in this function
+ //if (g->linker_script) {
+ // lj->args.append("-T");
+ // lj->args.append(g->linker_script);
+ //}
+
+ //if (g->no_rosegment_workaround) {
+ // lj->args.append("--no-rosegment");
+ //}
+ try ctx.args.append(c"--gc-sections");
+
+ //lj->args.append("-m");
+ //lj->args.append(getLDMOption(&g->zig_target));
+
+ //bool is_lib = g->out_type == OutTypeLib;
+ //bool shared = !g->is_static && is_lib;
+ //Buf *soname = nullptr;
+ if (ctx.comp.is_static) {
+ if (ctx.comp.target.isArmOrThumb()) {
+ try ctx.args.append(c"-Bstatic");
+ } else {
+ try ctx.args.append(c"-static");
+ }
+ }
+ //} else if (shared) {
+ // lj->args.append("-shared");
+
+ // if (buf_len(&lj->out_file) == 0) {
+ // buf_appendf(&lj->out_file, "lib%s.so.%" ZIG_PRI_usize ".%" ZIG_PRI_usize ".%" ZIG_PRI_usize "",
+ // buf_ptr(g->root_out_name), g->version_major, g->version_minor, g->version_patch);
+ // }
+ // soname = buf_sprintf("lib%s.so.%" ZIG_PRI_usize "", buf_ptr(g->root_out_name), g->version_major);
+ //}
+
+ try ctx.args.append(c"-o");
+ try ctx.args.append(ctx.out_file_path.ptr());
+
+ if (ctx.link_in_crt) {
+ const crt1o = if (ctx.comp.is_static) "crt1.o" else "Scrt1.o";
+ const crtbegino = if (ctx.comp.is_static) "crtbeginT.o" else "crtbegin.o";
+ try addPathJoin(ctx, ctx.libc.lib_dir.?, crt1o);
+ try addPathJoin(ctx, ctx.libc.lib_dir.?, "crti.o");
+ try addPathJoin(ctx, ctx.libc.static_lib_dir.?, crtbegino);
+ }
+
+ //for (size_t i = 0; i < g->rpath_list.length; i += 1) {
+ // Buf *rpath = g->rpath_list.at(i);
+ // add_rpath(lj, rpath);
+ //}
+ //if (g->each_lib_rpath) {
+ // for (size_t i = 0; i < g->lib_dirs.length; i += 1) {
+ // const char *lib_dir = g->lib_dirs.at(i);
+ // for (size_t i = 0; i < g->link_libs_list.length; i += 1) {
+ // LinkLib *link_lib = g->link_libs_list.at(i);
+ // if (buf_eql_str(link_lib->name, "c")) {
+ // continue;
+ // }
+ // bool does_exist;
+ // Buf *test_path = buf_sprintf("%s/lib%s.so", lib_dir, buf_ptr(link_lib->name));
+ // if (os_file_exists(test_path, &does_exist) != ErrorNone) {
+ // zig_panic("link: unable to check if file exists: %s", buf_ptr(test_path));
+ // }
+ // if (does_exist) {
+ // add_rpath(lj, buf_create_from_str(lib_dir));
+ // break;
+ // }
+ // }
+ // }
+ //}
+
+ //for (size_t i = 0; i < g->lib_dirs.length; i += 1) {
+ // const char *lib_dir = g->lib_dirs.at(i);
+ // lj->args.append("-L");
+ // lj->args.append(lib_dir);
+ //}
+
+ if (ctx.comp.haveLibC()) {
+ try ctx.args.append(c"-L");
+ try ctx.args.append((try std.cstr.addNullByte(&ctx.arena.allocator, ctx.libc.lib_dir.?)).ptr);
+
+ try ctx.args.append(c"-L");
+ try ctx.args.append((try std.cstr.addNullByte(&ctx.arena.allocator, ctx.libc.static_lib_dir.?)).ptr);
+
+ if (!ctx.comp.is_static) {
+ const dl = blk: {
+ if (ctx.libc.dynamic_linker_path) |dl| break :blk dl;
+ if (ctx.comp.target.getDynamicLinkerPath()) |dl| break :blk dl;
+ return error.LibCMissingDynamicLinker;
+ };
+ try ctx.args.append(c"-dynamic-linker");
+ try ctx.args.append((try std.cstr.addNullByte(&ctx.arena.allocator, dl)).ptr);
+ }
+ }
+
+ //if (shared) {
+ // lj->args.append("-soname");
+ // lj->args.append(buf_ptr(soname));
+ //}
+
+ // .o files
+ for (ctx.comp.link_objects) |link_object| {
+ const link_obj_with_null = try std.cstr.addNullByte(&ctx.arena.allocator, link_object);
+ try ctx.args.append(link_obj_with_null.ptr);
+ }
+ try addFnObjects(ctx);
+
+ //if (g->out_type == OutTypeExe || g->out_type == OutTypeLib) {
+ // if (g->libc_link_lib == nullptr) {
+ // Buf *builtin_o_path = build_o(g, "builtin");
+ // lj->args.append(buf_ptr(builtin_o_path));
+ // }
+
+ // // sometimes libgcc is missing stuff, so we still build compiler_rt and rely on weak linkage
+ // Buf *compiler_rt_o_path = build_compiler_rt(g);
+ // lj->args.append(buf_ptr(compiler_rt_o_path));
+ //}
+
+ //for (size_t i = 0; i < g->link_libs_list.length; i += 1) {
+ // LinkLib *link_lib = g->link_libs_list.at(i);
+ // if (buf_eql_str(link_lib->name, "c")) {
+ // continue;
+ // }
+ // Buf *arg;
+ // if (buf_starts_with_str(link_lib->name, "/") || buf_ends_with_str(link_lib->name, ".a") ||
+ // buf_ends_with_str(link_lib->name, ".so"))
+ // {
+ // arg = link_lib->name;
+ // } else {
+ // arg = buf_sprintf("-l%s", buf_ptr(link_lib->name));
+ // }
+ // lj->args.append(buf_ptr(arg));
+ //}
+
+ // libc dep
+ if (ctx.comp.haveLibC()) {
+ if (ctx.comp.is_static) {
+ try ctx.args.append(c"--start-group");
+ try ctx.args.append(c"-lgcc");
+ try ctx.args.append(c"-lgcc_eh");
+ try ctx.args.append(c"-lc");
+ try ctx.args.append(c"-lm");
+ try ctx.args.append(c"--end-group");
+ } else {
+ try ctx.args.append(c"-lgcc");
+ try ctx.args.append(c"--as-needed");
+ try ctx.args.append(c"-lgcc_s");
+ try ctx.args.append(c"--no-as-needed");
+ try ctx.args.append(c"-lc");
+ try ctx.args.append(c"-lm");
+ try ctx.args.append(c"-lgcc");
+ try ctx.args.append(c"--as-needed");
+ try ctx.args.append(c"-lgcc_s");
+ try ctx.args.append(c"--no-as-needed");
+ }
+ }
+
+ // crt end
+ if (ctx.link_in_crt) {
+ try addPathJoin(ctx, ctx.libc.static_lib_dir.?, "crtend.o");
+ try addPathJoin(ctx, ctx.libc.lib_dir.?, "crtn.o");
+ }
+
+ if (ctx.comp.target != Target.Native) {
+ try ctx.args.append(c"--allow-shlib-undefined");
+ }
+
+ if (ctx.comp.target.getOs() == builtin.Os.zen) {
+ try ctx.args.append(c"-e");
+ try ctx.args.append(c"_start");
+
+ try ctx.args.append(c"--image-base=0x10000000");
+ }
+}
+
+fn addPathJoin(ctx: *Context, dirname: []const u8, basename: []const u8) !void {
+ const full_path = try std.os.path.join(&ctx.arena.allocator, dirname, basename);
+ const full_path_with_null = try std.cstr.addNullByte(&ctx.arena.allocator, full_path);
+ try ctx.args.append(full_path_with_null.ptr);
+}
+
+fn constructLinkerArgsCoff(ctx: *Context) !void {
+ try ctx.args.append(c"-NOLOGO");
+
+ if (!ctx.comp.strip) {
+ try ctx.args.append(c"-DEBUG");
+ }
+
+ switch (ctx.comp.target.getArch()) {
+ builtin.Arch.i386 => try ctx.args.append(c"-MACHINE:X86"),
+ builtin.Arch.x86_64 => try ctx.args.append(c"-MACHINE:X64"),
+ builtin.Arch.aarch64 => try ctx.args.append(c"-MACHINE:ARM"),
+ else => return error.UnsupportedLinkArchitecture,
+ }
+
+ if (ctx.comp.windows_subsystem_windows) {
+ try ctx.args.append(c"/SUBSYSTEM:windows");
+ } else if (ctx.comp.windows_subsystem_console) {
+ try ctx.args.append(c"/SUBSYSTEM:console");
+ }
+
+ const is_library = ctx.comp.kind == Compilation.Kind.Lib;
+
+ const out_arg = try std.fmt.allocPrint(&ctx.arena.allocator, "-OUT:{}\x00", ctx.out_file_path.toSliceConst());
+ try ctx.args.append(out_arg.ptr);
+
+ if (ctx.comp.haveLibC()) {
+ try ctx.args.append((try std.fmt.allocPrint(&ctx.arena.allocator, "-LIBPATH:{}\x00", ctx.libc.msvc_lib_dir.?)).ptr);
+ try ctx.args.append((try std.fmt.allocPrint(&ctx.arena.allocator, "-LIBPATH:{}\x00", ctx.libc.kernel32_lib_dir.?)).ptr);
+ try ctx.args.append((try std.fmt.allocPrint(&ctx.arena.allocator, "-LIBPATH:{}\x00", ctx.libc.lib_dir.?)).ptr);
+ }
+
+ if (ctx.link_in_crt) {
+ const lib_str = if (ctx.comp.is_static) "lib" else "";
+ const d_str = if (ctx.comp.build_mode == builtin.Mode.Debug) "d" else "";
+
+ if (ctx.comp.is_static) {
+ const cmt_lib_name = try std.fmt.allocPrint(&ctx.arena.allocator, "libcmt{}.lib\x00", d_str);
+ try ctx.args.append(cmt_lib_name.ptr);
+ } else {
+ const msvcrt_lib_name = try std.fmt.allocPrint(&ctx.arena.allocator, "msvcrt{}.lib\x00", d_str);
+ try ctx.args.append(msvcrt_lib_name.ptr);
+ }
+
+ const vcruntime_lib_name = try std.fmt.allocPrint(&ctx.arena.allocator, "{}vcruntime{}.lib\x00", lib_str, d_str);
+ try ctx.args.append(vcruntime_lib_name.ptr);
+
+ const crt_lib_name = try std.fmt.allocPrint(&ctx.arena.allocator, "{}ucrt{}.lib\x00", lib_str, d_str);
+ try ctx.args.append(crt_lib_name.ptr);
+
+ // Visual C++ 2015 Conformance Changes
+ // https://msdn.microsoft.com/en-us/library/bb531344.aspx
+ try ctx.args.append(c"legacy_stdio_definitions.lib");
+
+ // msvcrt depends on kernel32
+ try ctx.args.append(c"kernel32.lib");
+ } else {
+ try ctx.args.append(c"-NODEFAULTLIB");
+ if (!is_library) {
+ try ctx.args.append(c"-ENTRY:WinMainCRTStartup");
+ // TODO
+ //if (g->have_winmain) {
+ // lj->args.append("-ENTRY:WinMain");
+ //} else {
+ // lj->args.append("-ENTRY:WinMainCRTStartup");
+ //}
+ }
+ }
+
+ if (is_library and !ctx.comp.is_static) {
+ try ctx.args.append(c"-DLL");
+ }
+
+ //for (size_t i = 0; i < g->lib_dirs.length; i += 1) {
+ // const char *lib_dir = g->lib_dirs.at(i);
+ // lj->args.append(buf_ptr(buf_sprintf("-LIBPATH:%s", lib_dir)));
+ //}
+
+ for (ctx.comp.link_objects) |link_object| {
+ const link_obj_with_null = try std.cstr.addNullByte(&ctx.arena.allocator, link_object);
+ try ctx.args.append(link_obj_with_null.ptr);
+ }
+ try addFnObjects(ctx);
+
+ switch (ctx.comp.kind) {
+ Compilation.Kind.Exe, Compilation.Kind.Lib => {
+ if (!ctx.comp.haveLibC()) {
+ @panic("TODO");
+ //Buf *builtin_o_path = build_o(g, "builtin");
+ //lj->args.append(buf_ptr(builtin_o_path));
+ }
+
+ // msvc compiler_rt is missing some stuff, so we still build it and rely on weak linkage
+ // TODO
+ //Buf *compiler_rt_o_path = build_compiler_rt(g);
+ //lj->args.append(buf_ptr(compiler_rt_o_path));
+ },
+ Compilation.Kind.Obj => {},
+ }
+
+ //Buf *def_contents = buf_alloc();
+ //ZigList gen_lib_args = {0};
+ //for (size_t lib_i = 0; lib_i < g->link_libs_list.length; lib_i += 1) {
+ // LinkLib *link_lib = g->link_libs_list.at(lib_i);
+ // if (buf_eql_str(link_lib->name, "c")) {
+ // continue;
+ // }
+ // if (link_lib->provided_explicitly) {
+ // if (lj->codegen->zig_target.env_type == ZigLLVM_GNU) {
+ // Buf *arg = buf_sprintf("-l%s", buf_ptr(link_lib->name));
+ // lj->args.append(buf_ptr(arg));
+ // }
+ // else {
+ // lj->args.append(buf_ptr(link_lib->name));
+ // }
+ // } else {
+ // buf_resize(def_contents, 0);
+ // buf_appendf(def_contents, "LIBRARY %s\nEXPORTS\n", buf_ptr(link_lib->name));
+ // for (size_t exp_i = 0; exp_i < link_lib->symbols.length; exp_i += 1) {
+ // Buf *symbol_name = link_lib->symbols.at(exp_i);
+ // buf_appendf(def_contents, "%s\n", buf_ptr(symbol_name));
+ // }
+ // buf_appendf(def_contents, "\n");
+
+ // Buf *def_path = buf_alloc();
+ // os_path_join(g->cache_dir, buf_sprintf("%s.def", buf_ptr(link_lib->name)), def_path);
+ // os_write_file(def_path, def_contents);
+
+ // Buf *generated_lib_path = buf_alloc();
+ // os_path_join(g->cache_dir, buf_sprintf("%s.lib", buf_ptr(link_lib->name)), generated_lib_path);
+
+ // gen_lib_args.resize(0);
+ // gen_lib_args.append("link");
+
+ // coff_append_machine_arg(g, &gen_lib_args);
+ // gen_lib_args.append(buf_ptr(buf_sprintf("-DEF:%s", buf_ptr(def_path))));
+ // gen_lib_args.append(buf_ptr(buf_sprintf("-OUT:%s", buf_ptr(generated_lib_path))));
+ // Buf diag = BUF_INIT;
+ // if (!zig_lld_link(g->zig_target.oformat, gen_lib_args.items, gen_lib_args.length, &diag)) {
+ // fprintf(stderr, "%s\n", buf_ptr(&diag));
+ // exit(1);
+ // }
+ // lj->args.append(buf_ptr(generated_lib_path));
+ // }
+ //}
+}
+
+fn constructLinkerArgsMachO(ctx: *Context) !void {
+ try ctx.args.append(c"-demangle");
+
+ if (ctx.comp.linker_rdynamic) {
+ try ctx.args.append(c"-export_dynamic");
+ }
+
+ const is_lib = ctx.comp.kind == Compilation.Kind.Lib;
+ const shared = !ctx.comp.is_static and is_lib;
+ if (ctx.comp.is_static) {
+ try ctx.args.append(c"-static");
+ } else {
+ try ctx.args.append(c"-dynamic");
+ }
+
+ //if (is_lib) {
+ // if (!g->is_static) {
+ // lj->args.append("-dylib");
+
+ // Buf *compat_vers = buf_sprintf("%" ZIG_PRI_usize ".0.0", g->version_major);
+ // lj->args.append("-compatibility_version");
+ // lj->args.append(buf_ptr(compat_vers));
+
+ // Buf *cur_vers = buf_sprintf("%" ZIG_PRI_usize ".%" ZIG_PRI_usize ".%" ZIG_PRI_usize,
+ // g->version_major, g->version_minor, g->version_patch);
+ // lj->args.append("-current_version");
+ // lj->args.append(buf_ptr(cur_vers));
+
+ // // TODO getting an error when running an executable when doing this rpath thing
+ // //Buf *dylib_install_name = buf_sprintf("@rpath/lib%s.%" ZIG_PRI_usize ".dylib",
+ // // buf_ptr(g->root_out_name), g->version_major);
+ // //lj->args.append("-install_name");
+ // //lj->args.append(buf_ptr(dylib_install_name));
+
+ // if (buf_len(&lj->out_file) == 0) {
+ // buf_appendf(&lj->out_file, "lib%s.%" ZIG_PRI_usize ".%" ZIG_PRI_usize ".%" ZIG_PRI_usize ".dylib",
+ // buf_ptr(g->root_out_name), g->version_major, g->version_minor, g->version_patch);
+ // }
+ // }
+ //}
+
+ try ctx.args.append(c"-arch");
+ const darwin_arch_str = try std.cstr.addNullByte(
+ &ctx.arena.allocator,
+ ctx.comp.target.getDarwinArchString(),
+ );
+ try ctx.args.append(darwin_arch_str.ptr);
+
+ const platform = try DarwinPlatform.get(ctx.comp);
+ switch (platform.kind) {
+ DarwinPlatform.Kind.MacOS => try ctx.args.append(c"-macosx_version_min"),
+ DarwinPlatform.Kind.IPhoneOS => try ctx.args.append(c"-iphoneos_version_min"),
+ DarwinPlatform.Kind.IPhoneOSSimulator => try ctx.args.append(c"-ios_simulator_version_min"),
+ }
+ const ver_str = try std.fmt.allocPrint(&ctx.arena.allocator, "{}.{}.{}\x00", platform.major, platform.minor, platform.micro);
+ try ctx.args.append(ver_str.ptr);
+
+ if (ctx.comp.kind == Compilation.Kind.Exe) {
+ if (ctx.comp.is_static) {
+ try ctx.args.append(c"-no_pie");
+ } else {
+ try ctx.args.append(c"-pie");
+ }
+ }
+
+ try ctx.args.append(c"-o");
+ try ctx.args.append(ctx.out_file_path.ptr());
+
+ //for (size_t i = 0; i < g->rpath_list.length; i += 1) {
+ // Buf *rpath = g->rpath_list.at(i);
+ // add_rpath(lj, rpath);
+ //}
+ //add_rpath(lj, &lj->out_file);
+
+ if (shared) {
+ try ctx.args.append(c"-headerpad_max_install_names");
+ } else if (ctx.comp.is_static) {
+ try ctx.args.append(c"-lcrt0.o");
+ } else {
+ switch (platform.kind) {
+ DarwinPlatform.Kind.MacOS => {
+ if (platform.versionLessThan(10, 5)) {
+ try ctx.args.append(c"-lcrt1.o");
+ } else if (platform.versionLessThan(10, 6)) {
+ try ctx.args.append(c"-lcrt1.10.5.o");
+ } else if (platform.versionLessThan(10, 8)) {
+ try ctx.args.append(c"-lcrt1.10.6.o");
+ }
+ },
+ DarwinPlatform.Kind.IPhoneOS => {
+ if (ctx.comp.target.getArch() == builtin.Arch.aarch64) {
+ // iOS does not need any crt1 files for arm64
+ } else if (platform.versionLessThan(3, 1)) {
+ try ctx.args.append(c"-lcrt1.o");
+ } else if (platform.versionLessThan(6, 0)) {
+ try ctx.args.append(c"-lcrt1.3.1.o");
+ }
+ },
+ DarwinPlatform.Kind.IPhoneOSSimulator => {}, // no crt1.o needed
+ }
+ }
+
+ //for (size_t i = 0; i < g->lib_dirs.length; i += 1) {
+ // const char *lib_dir = g->lib_dirs.at(i);
+ // lj->args.append("-L");
+ // lj->args.append(lib_dir);
+ //}
+
+ for (ctx.comp.link_objects) |link_object| {
+ const link_obj_with_null = try std.cstr.addNullByte(&ctx.arena.allocator, link_object);
+ try ctx.args.append(link_obj_with_null.ptr);
+ }
+ try addFnObjects(ctx);
+
+ //// compiler_rt on darwin is missing some stuff, so we still build it and rely on LinkOnce
+ //if (g->out_type == OutTypeExe || g->out_type == OutTypeLib) {
+ // Buf *compiler_rt_o_path = build_compiler_rt(g);
+ // lj->args.append(buf_ptr(compiler_rt_o_path));
+ //}
+
+ if (ctx.comp.target == Target.Native) {
+ for (ctx.comp.link_libs_list.toSliceConst()) |lib| {
+ if (mem.eql(u8, lib.name, "c")) {
+ // on Darwin, libSystem has libc in it, but also you have to use it
+ // to make syscalls because the syscall numbers are not documented
+ // and change between versions.
+ // so we always link against libSystem
+ try ctx.args.append(c"-lSystem");
+ } else {
+ if (mem.indexOfScalar(u8, lib.name, '/') == null) {
+ const arg = try std.fmt.allocPrint(&ctx.arena.allocator, "-l{}\x00", lib.name);
+ try ctx.args.append(arg.ptr);
+ } else {
+ const arg = try std.cstr.addNullByte(&ctx.arena.allocator, lib.name);
+ try ctx.args.append(arg.ptr);
+ }
+ }
+ }
+ } else {
+ try ctx.args.append(c"-undefined");
+ try ctx.args.append(c"dynamic_lookup");
+ }
+
+ if (platform.kind == DarwinPlatform.Kind.MacOS) {
+ if (platform.versionLessThan(10, 5)) {
+ try ctx.args.append(c"-lgcc_s.10.4");
+ } else if (platform.versionLessThan(10, 6)) {
+ try ctx.args.append(c"-lgcc_s.10.5");
+ }
+ } else {
+ @panic("TODO");
+ }
+
+ //for (size_t i = 0; i < g->darwin_frameworks.length; i += 1) {
+ // lj->args.append("-framework");
+ // lj->args.append(buf_ptr(g->darwin_frameworks.at(i)));
+ //}
+}
+
+fn constructLinkerArgsWasm(ctx: *Context) void {
+ @panic("TODO");
+}
+
+fn addFnObjects(ctx: *Context) !void {
+ // at this point it's guaranteed nobody else has this lock, so we circumvent it
+ // and avoid having to be a coroutine
+ const fn_link_set = &ctx.comp.fn_link_set.private_data;
+
+ var it = fn_link_set.first;
+ while (it) |node| {
+ const fn_val = node.data orelse {
+ // handle the tombstone. See Value.Fn.destroy.
+ it = node.next;
+ fn_link_set.remove(node);
+ ctx.comp.gpa().destroy(node);
+ continue;
+ };
+ try ctx.args.append(fn_val.containing_object.ptr());
+ it = node.next;
+ }
+}
+
+const DarwinPlatform = struct {
+ kind: Kind,
+ major: u32,
+ minor: u32,
+ micro: u32,
+
+ const Kind = enum {
+ MacOS,
+ IPhoneOS,
+ IPhoneOSSimulator,
+ };
+
+ fn get(comp: *Compilation) !DarwinPlatform {
+ var result: DarwinPlatform = undefined;
+ const ver_str = switch (comp.darwin_version_min) {
+ Compilation.DarwinVersionMin.MacOS => |ver| blk: {
+ result.kind = Kind.MacOS;
+ break :blk ver;
+ },
+ Compilation.DarwinVersionMin.Ios => |ver| blk: {
+ result.kind = Kind.IPhoneOS;
+ break :blk ver;
+ },
+ Compilation.DarwinVersionMin.None => blk: {
+ assert(comp.target.getOs() == builtin.Os.macosx);
+ result.kind = Kind.MacOS;
+ break :blk "10.10";
+ },
+ };
+
+ var had_extra: bool = undefined;
+ try darwinGetReleaseVersion(
+ ver_str,
+ &result.major,
+ &result.minor,
+ &result.micro,
+ &had_extra,
+ );
+ if (had_extra or result.major != 10 or result.minor >= 100 or result.micro >= 100) {
+ return error.InvalidDarwinVersionString;
+ }
+
+ if (result.kind == Kind.IPhoneOS) {
+ switch (comp.target.getArch()) {
+ builtin.Arch.i386,
+ builtin.Arch.x86_64,
+ => result.kind = Kind.IPhoneOSSimulator,
+ else => {},
+ }
+ }
+ return result;
+ }
+
+ fn versionLessThan(self: DarwinPlatform, major: u32, minor: u32) bool {
+ if (self.major < major)
+ return true;
+ if (self.major > major)
+ return false;
+ if (self.minor < minor)
+ return true;
+ return false;
+ }
+};
+
+/// Parse (([0-9]+)(.([0-9]+)(.([0-9]+)?))?)? and return the
+/// grouped values as integers. Numbers which are not provided are set to 0.
+/// return true if the entire string was parsed (9.2), or all groups were
+/// parsed (10.3.5extrastuff).
+fn darwinGetReleaseVersion(str: []const u8, major: *u32, minor: *u32, micro: *u32, had_extra: *bool) !void {
+ major.* = 0;
+ minor.* = 0;
+ micro.* = 0;
+ had_extra.* = false;
+
+ if (str.len == 0)
+ return error.InvalidDarwinVersionString;
+
+ var start_pos: usize = 0;
+ for ([]*u32{ major, minor, micro }) |v| {
+ const dot_pos = mem.indexOfScalarPos(u8, str, start_pos, '.');
+ const end_pos = dot_pos orelse str.len;
+ v.* = std.fmt.parseUnsigned(u32, str[start_pos..end_pos], 10) catch return error.InvalidDarwinVersionString;
+ start_pos = (dot_pos orelse return) + 1;
+ if (start_pos == str.len) return;
+ }
+ had_extra.* = true;
+}
diff --git a/src-self-hosted/llvm.zig b/src-self-hosted/llvm.zig
index 16c359adcf..778d3fae07 100644
--- a/src-self-hosted/llvm.zig
+++ b/src-self-hosted/llvm.zig
@@ -2,12 +2,213 @@ const builtin = @import("builtin");
const c = @import("c.zig");
const assert = @import("std").debug.assert;
-pub const ValueRef = removeNullability(c.LLVMValueRef);
-pub const ModuleRef = removeNullability(c.LLVMModuleRef);
-pub const ContextRef = removeNullability(c.LLVMContextRef);
+// we wrap the c module for 3 reasons:
+// 1. to avoid accidentally calling the non-thread-safe functions
+// 2. patch up some of the types to remove nullability
+// 3. some functions have been augmented by zig_llvm.cpp to be more powerful,
+// such as ZigLLVMTargetMachineEmitToFile
+
+pub const AttributeIndex = c_uint;
+pub const Bool = c_int;
+
pub const BuilderRef = removeNullability(c.LLVMBuilderRef);
+pub const ContextRef = removeNullability(c.LLVMContextRef);
+pub const ModuleRef = removeNullability(c.LLVMModuleRef);
+pub const ValueRef = removeNullability(c.LLVMValueRef);
+pub const TypeRef = removeNullability(c.LLVMTypeRef);
+pub const BasicBlockRef = removeNullability(c.LLVMBasicBlockRef);
+pub const AttributeRef = removeNullability(c.LLVMAttributeRef);
+pub const TargetRef = removeNullability(c.LLVMTargetRef);
+pub const TargetMachineRef = removeNullability(c.LLVMTargetMachineRef);
+pub const TargetDataRef = removeNullability(c.LLVMTargetDataRef);
+pub const DIBuilder = c.ZigLLVMDIBuilder;
+
+pub const ABIAlignmentOfType = c.LLVMABIAlignmentOfType;
+pub const AddAttributeAtIndex = c.LLVMAddAttributeAtIndex;
+pub const AddFunction = c.LLVMAddFunction;
+pub const AddGlobal = c.LLVMAddGlobal;
+pub const AddModuleCodeViewFlag = c.ZigLLVMAddModuleCodeViewFlag;
+pub const AddModuleDebugInfoFlag = c.ZigLLVMAddModuleDebugInfoFlag;
+pub const ArrayType = c.LLVMArrayType;
+pub const BuildLoad = c.LLVMBuildLoad;
+pub const ClearCurrentDebugLocation = c.ZigLLVMClearCurrentDebugLocation;
+pub const ConstAllOnes = c.LLVMConstAllOnes;
+pub const ConstArray = c.LLVMConstArray;
+pub const ConstBitCast = c.LLVMConstBitCast;
+pub const ConstInt = c.LLVMConstInt;
+pub const ConstIntOfArbitraryPrecision = c.LLVMConstIntOfArbitraryPrecision;
+pub const ConstNeg = c.LLVMConstNeg;
+pub const ConstNull = c.LLVMConstNull;
+pub const ConstStringInContext = c.LLVMConstStringInContext;
+pub const ConstStructInContext = c.LLVMConstStructInContext;
+pub const CopyStringRepOfTargetData = c.LLVMCopyStringRepOfTargetData;
+pub const CreateBuilderInContext = c.LLVMCreateBuilderInContext;
+pub const CreateCompileUnit = c.ZigLLVMCreateCompileUnit;
+pub const CreateDIBuilder = c.ZigLLVMCreateDIBuilder;
+pub const CreateEnumAttribute = c.LLVMCreateEnumAttribute;
+pub const CreateFile = c.ZigLLVMCreateFile;
+pub const CreateStringAttribute = c.LLVMCreateStringAttribute;
+pub const CreateTargetDataLayout = c.LLVMCreateTargetDataLayout;
+pub const CreateTargetMachine = c.LLVMCreateTargetMachine;
+pub const DIBuilderFinalize = c.ZigLLVMDIBuilderFinalize;
+pub const DisposeBuilder = c.LLVMDisposeBuilder;
+pub const DisposeDIBuilder = c.ZigLLVMDisposeDIBuilder;
+pub const DisposeMessage = c.LLVMDisposeMessage;
+pub const DisposeModule = c.LLVMDisposeModule;
+pub const DisposeTargetData = c.LLVMDisposeTargetData;
+pub const DisposeTargetMachine = c.LLVMDisposeTargetMachine;
+pub const DoubleTypeInContext = c.LLVMDoubleTypeInContext;
+pub const DumpModule = c.LLVMDumpModule;
+pub const FP128TypeInContext = c.LLVMFP128TypeInContext;
+pub const FloatTypeInContext = c.LLVMFloatTypeInContext;
+pub const GetEnumAttributeKindForName = c.LLVMGetEnumAttributeKindForName;
+pub const GetHostCPUName = c.ZigLLVMGetHostCPUName;
+pub const GetMDKindIDInContext = c.LLVMGetMDKindIDInContext;
+pub const GetNativeFeatures = c.ZigLLVMGetNativeFeatures;
+pub const GetUndef = c.LLVMGetUndef;
+pub const HalfTypeInContext = c.LLVMHalfTypeInContext;
+pub const InitializeAllAsmParsers = c.LLVMInitializeAllAsmParsers;
+pub const InitializeAllAsmPrinters = c.LLVMInitializeAllAsmPrinters;
+pub const InitializeAllTargetInfos = c.LLVMInitializeAllTargetInfos;
+pub const InitializeAllTargetMCs = c.LLVMInitializeAllTargetMCs;
+pub const InitializeAllTargets = c.LLVMInitializeAllTargets;
+pub const InsertBasicBlockInContext = c.LLVMInsertBasicBlockInContext;
+pub const Int128TypeInContext = c.LLVMInt128TypeInContext;
+pub const Int16TypeInContext = c.LLVMInt16TypeInContext;
+pub const Int1TypeInContext = c.LLVMInt1TypeInContext;
+pub const Int32TypeInContext = c.LLVMInt32TypeInContext;
+pub const Int64TypeInContext = c.LLVMInt64TypeInContext;
+pub const Int8TypeInContext = c.LLVMInt8TypeInContext;
+pub const IntPtrTypeForASInContext = c.LLVMIntPtrTypeForASInContext;
+pub const IntPtrTypeInContext = c.LLVMIntPtrTypeInContext;
+pub const IntTypeInContext = c.LLVMIntTypeInContext;
+pub const LabelTypeInContext = c.LLVMLabelTypeInContext;
+pub const MDNodeInContext = c.LLVMMDNodeInContext;
+pub const MDStringInContext = c.LLVMMDStringInContext;
+pub const MetadataTypeInContext = c.LLVMMetadataTypeInContext;
+pub const ModuleCreateWithNameInContext = c.LLVMModuleCreateWithNameInContext;
+pub const PPCFP128TypeInContext = c.LLVMPPCFP128TypeInContext;
+pub const PointerType = c.LLVMPointerType;
+pub const SetAlignment = c.LLVMSetAlignment;
+pub const SetDataLayout = c.LLVMSetDataLayout;
+pub const SetGlobalConstant = c.LLVMSetGlobalConstant;
+pub const SetInitializer = c.LLVMSetInitializer;
+pub const SetLinkage = c.LLVMSetLinkage;
+pub const SetTarget = c.LLVMSetTarget;
+pub const SetUnnamedAddr = c.LLVMSetUnnamedAddr;
+pub const SetVolatile = c.LLVMSetVolatile;
+pub const StructTypeInContext = c.LLVMStructTypeInContext;
+pub const TokenTypeInContext = c.LLVMTokenTypeInContext;
+pub const VoidTypeInContext = c.LLVMVoidTypeInContext;
+pub const X86FP80TypeInContext = c.LLVMX86FP80TypeInContext;
+pub const X86MMXTypeInContext = c.LLVMX86MMXTypeInContext;
+
+pub const GetElementType = LLVMGetElementType;
+extern fn LLVMGetElementType(Ty: TypeRef) TypeRef;
+
+pub const TypeOf = LLVMTypeOf;
+extern fn LLVMTypeOf(Val: ValueRef) TypeRef;
+
+pub const BuildStore = LLVMBuildStore;
+extern fn LLVMBuildStore(arg0: BuilderRef, Val: ValueRef, Ptr: ValueRef) ?ValueRef;
+
+pub const BuildAlloca = LLVMBuildAlloca;
+extern fn LLVMBuildAlloca(arg0: BuilderRef, Ty: TypeRef, Name: ?[*]const u8) ?ValueRef;
+
+pub const ConstInBoundsGEP = LLVMConstInBoundsGEP;
+pub extern fn LLVMConstInBoundsGEP(ConstantVal: ValueRef, ConstantIndices: [*]ValueRef, NumIndices: c_uint) ?ValueRef;
+
+pub const GetTargetFromTriple = LLVMGetTargetFromTriple;
+extern fn LLVMGetTargetFromTriple(Triple: [*]const u8, T: *TargetRef, ErrorMessage: ?*[*]u8) Bool;
+
+pub const VerifyModule = LLVMVerifyModule;
+extern fn LLVMVerifyModule(M: ModuleRef, Action: VerifierFailureAction, OutMessage: *?[*]u8) Bool;
+
+pub const GetInsertBlock = LLVMGetInsertBlock;
+extern fn LLVMGetInsertBlock(Builder: BuilderRef) BasicBlockRef;
+
+pub const FunctionType = LLVMFunctionType;
+extern fn LLVMFunctionType(
+ ReturnType: TypeRef,
+ ParamTypes: [*]TypeRef,
+ ParamCount: c_uint,
+ IsVarArg: Bool,
+) ?TypeRef;
+
+pub const GetParam = LLVMGetParam;
+extern fn LLVMGetParam(Fn: ValueRef, Index: c_uint) ValueRef;
+
+pub const AppendBasicBlockInContext = LLVMAppendBasicBlockInContext;
+extern fn LLVMAppendBasicBlockInContext(C: ContextRef, Fn: ValueRef, Name: [*]const u8) ?BasicBlockRef;
+
+pub const PositionBuilderAtEnd = LLVMPositionBuilderAtEnd;
+extern fn LLVMPositionBuilderAtEnd(Builder: BuilderRef, Block: BasicBlockRef) void;
+
+pub const AbortProcessAction = VerifierFailureAction.LLVMAbortProcessAction;
+pub const PrintMessageAction = VerifierFailureAction.LLVMPrintMessageAction;
+pub const ReturnStatusAction = VerifierFailureAction.LLVMReturnStatusAction;
+pub const VerifierFailureAction = c.LLVMVerifierFailureAction;
+
+pub const CodeGenLevelNone = c.LLVMCodeGenOptLevel.LLVMCodeGenLevelNone;
+pub const CodeGenLevelLess = c.LLVMCodeGenOptLevel.LLVMCodeGenLevelLess;
+pub const CodeGenLevelDefault = c.LLVMCodeGenOptLevel.LLVMCodeGenLevelDefault;
+pub const CodeGenLevelAggressive = c.LLVMCodeGenOptLevel.LLVMCodeGenLevelAggressive;
+pub const CodeGenOptLevel = c.LLVMCodeGenOptLevel;
+
+pub const RelocDefault = c.LLVMRelocMode.LLVMRelocDefault;
+pub const RelocStatic = c.LLVMRelocMode.LLVMRelocStatic;
+pub const RelocPIC = c.LLVMRelocMode.LLVMRelocPIC;
+pub const RelocDynamicNoPic = c.LLVMRelocMode.LLVMRelocDynamicNoPic;
+pub const RelocMode = c.LLVMRelocMode;
+
+pub const CodeModelDefault = c.LLVMCodeModel.LLVMCodeModelDefault;
+pub const CodeModelJITDefault = c.LLVMCodeModel.LLVMCodeModelJITDefault;
+pub const CodeModelSmall = c.LLVMCodeModel.LLVMCodeModelSmall;
+pub const CodeModelKernel = c.LLVMCodeModel.LLVMCodeModelKernel;
+pub const CodeModelMedium = c.LLVMCodeModel.LLVMCodeModelMedium;
+pub const CodeModelLarge = c.LLVMCodeModel.LLVMCodeModelLarge;
+pub const CodeModel = c.LLVMCodeModel;
+
+pub const EmitAssembly = EmitOutputType.ZigLLVM_EmitAssembly;
+pub const EmitBinary = EmitOutputType.ZigLLVM_EmitBinary;
+pub const EmitLLVMIr = EmitOutputType.ZigLLVM_EmitLLVMIr;
+pub const EmitOutputType = c.ZigLLVM_EmitOutputType;
+
+pub const CCallConv = c.LLVMCCallConv;
+pub const FastCallConv = c.LLVMFastCallConv;
+pub const ColdCallConv = c.LLVMColdCallConv;
+pub const WebKitJSCallConv = c.LLVMWebKitJSCallConv;
+pub const AnyRegCallConv = c.LLVMAnyRegCallConv;
+pub const X86StdcallCallConv = c.LLVMX86StdcallCallConv;
+pub const X86FastcallCallConv = c.LLVMX86FastcallCallConv;
+pub const CallConv = c.LLVMCallConv;
+
+pub const FnInline = extern enum {
+ Auto,
+ Always,
+ Never,
+};
fn removeNullability(comptime T: type) type {
- comptime assert(@typeId(T) == builtin.TypeId.Nullable);
+ comptime assert(@typeId(T) == builtin.TypeId.Optional);
return T.Child;
}
+
+pub const BuildRet = LLVMBuildRet;
+extern fn LLVMBuildRet(arg0: BuilderRef, V: ?ValueRef) ?ValueRef;
+
+pub const TargetMachineEmitToFile = ZigLLVMTargetMachineEmitToFile;
+extern fn ZigLLVMTargetMachineEmitToFile(
+ targ_machine_ref: TargetMachineRef,
+ module_ref: ModuleRef,
+ filename: [*]const u8,
+ output_type: EmitOutputType,
+ error_message: *[*]u8,
+ is_debug: bool,
+ is_small: bool,
+) bool;
+
+pub const BuildCall = ZigLLVMBuildCall;
+extern fn ZigLLVMBuildCall(B: BuilderRef, Fn: ValueRef, Args: [*]ValueRef, NumArgs: c_uint, CC: c_uint, fn_inline: FnInline, Name: [*]const u8) ?ValueRef;
+
+pub const PrivateLinkage = c.LLVMLinkage.LLVMPrivateLinkage;
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index c1a6bbe99a..37bb435c1b 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -1,6 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
+const event = std.event;
const os = std.os;
const io = std.io;
const mem = std.mem;
@@ -13,247 +14,118 @@ const c = @import("c.zig");
const introspect = @import("introspect.zig");
const Args = arg.Args;
const Flag = arg.Flag;
-const Module = @import("module.zig").Module;
+const EventLoopLocal = @import("compilation.zig").EventLoopLocal;
+const Compilation = @import("compilation.zig").Compilation;
const Target = @import("target.zig").Target;
+const errmsg = @import("errmsg.zig");
+const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
-var stderr: &io.OutStream(io.FileOutStream.Error) = undefined;
-var stdout: &io.OutStream(io.FileOutStream.Error) = undefined;
+var stderr_file: os.File = undefined;
+var stderr: *io.OutStream(io.FileOutStream.Error) = undefined;
+var stdout: *io.OutStream(io.FileOutStream.Error) = undefined;
const usage =
\\usage: zig [command] [options]
\\
\\Commands:
\\
- \\ build Build project from build.zig
- \\ build-exe [source] Create executable from source or object files
- \\ build-lib [source] Create library from source or object files
- \\ build-obj [source] Create object from source or assembly
- \\ fmt [source] Parse file and render in canonical zig format
- \\ run [source] Create executable and run immediately
- \\ targets List available compilation targets
- \\ test [source] Create and run a test build
- \\ translate-c [source] Convert c code to zig code
- \\ version Print version number and exit
- \\ zen Print zen of zig and exit
+ \\ build-exe [source] Create executable from source or object files
+ \\ build-lib [source] Create library from source or object files
+ \\ build-obj [source] Create object from source or assembly
+ \\ fmt [source] Parse file and render in canonical zig format
+ \\ libc [paths_file] Display native libc paths file or validate one
+ \\ targets List available compilation targets
+ \\ version Print version number and exit
+ \\ zen Print zen of zig and exit
\\
\\
- ;
+;
const Command = struct {
name: []const u8,
- exec: fn(&Allocator, []const []const u8) error!void,
+ exec: fn (*Allocator, []const []const u8) error!void,
};
pub fn main() !void {
- var allocator = std.heap.c_allocator;
+ // This allocator needs to be thread-safe because we use it for the event.Loop
+ // which multiplexes coroutines onto kernel threads.
+ // libc allocator is guaranteed to have this property.
+ const allocator = std.heap.c_allocator;
var stdout_file = try std.io.getStdOut();
var stdout_out_stream = std.io.FileOutStream.init(&stdout_file);
stdout = &stdout_out_stream.stream;
- var stderr_file = try std.io.getStdErr();
+ stderr_file = try std.io.getStdErr();
var stderr_out_stream = std.io.FileOutStream.init(&stderr_file);
stderr = &stderr_out_stream.stream;
const args = try os.argsAlloc(allocator);
- defer os.argsFree(allocator, args);
+ // TODO I'm getting unreachable code here, which shouldn't happen
+ //defer os.argsFree(allocator, args);
if (args.len <= 1) {
+ try stderr.write("expected command argument\n\n");
try stderr.write(usage);
os.exit(1);
}
- const commands = []Command {
- Command { .name = "build", .exec = cmdBuild },
- Command { .name = "build-exe", .exec = cmdBuildExe },
- Command { .name = "build-lib", .exec = cmdBuildLib },
- Command { .name = "build-obj", .exec = cmdBuildObj },
- Command { .name = "fmt", .exec = cmdFmt },
- Command { .name = "run", .exec = cmdRun },
- Command { .name = "targets", .exec = cmdTargets },
- Command { .name = "test", .exec = cmdTest },
- Command { .name = "translate-c", .exec = cmdTranslateC },
- Command { .name = "version", .exec = cmdVersion },
- Command { .name = "zen", .exec = cmdZen },
+ const commands = []Command{
+ Command{
+ .name = "build-exe",
+ .exec = cmdBuildExe,
+ },
+ Command{
+ .name = "build-lib",
+ .exec = cmdBuildLib,
+ },
+ Command{
+ .name = "build-obj",
+ .exec = cmdBuildObj,
+ },
+ Command{
+ .name = "fmt",
+ .exec = cmdFmt,
+ },
+ Command{
+ .name = "libc",
+ .exec = cmdLibC,
+ },
+ Command{
+ .name = "targets",
+ .exec = cmdTargets,
+ },
+ Command{
+ .name = "version",
+ .exec = cmdVersion,
+ },
+ Command{
+ .name = "zen",
+ .exec = cmdZen,
+ },
// undocumented commands
- Command { .name = "help", .exec = cmdHelp },
- Command { .name = "internal", .exec = cmdInternal },
+ Command{
+ .name = "help",
+ .exec = cmdHelp,
+ },
+ Command{
+ .name = "internal",
+ .exec = cmdInternal,
+ },
};
for (commands) |command| {
if (mem.eql(u8, command.name, args[1])) {
- try command.exec(allocator, args[2..]);
- return;
+ return command.exec(allocator, args[2..]);
}
}
try stderr.print("unknown command: {}\n\n", args[1]);
try stderr.write(usage);
+ os.exit(1);
}
-// cmd:build ///////////////////////////////////////////////////////////////////////////////////////
-
-const usage_build =
- \\usage: zig build
- \\
- \\General Options:
- \\ --help Print this help and exit
- \\ --init Generate a build.zig template
- \\ --build-file [file] Override path to build.zig
- \\ --cache-dir [path] Override path to cache directory
- \\ --verbose Print commands before executing them
- \\ --prefix [path] Override default install prefix
- \\
- \\Project-Specific Options:
- \\
- \\ Project-specific options become available when the build file is found.
- \\
- \\Advanced Options:
- \\ --build-file [file] Override path to build.zig
- \\ --cache-dir [path] Override path to cache directory
- \\ --verbose-tokenize Enable compiler debug output for tokenization
- \\ --verbose-ast Enable compiler debug output for parsing into an AST
- \\ --verbose-link Enable compiler debug output for linking
- \\ --verbose-ir Enable compiler debug output for Zig IR
- \\ --verbose-llvm-ir Enable compiler debug output for LLVM IR
- \\ --verbose-cimport Enable compiler debug output for C imports
- \\
- \\
- ;
-
-const args_build_spec = []Flag {
- Flag.Bool("--help"),
- Flag.Bool("--init"),
- Flag.Arg1("--build-file"),
- Flag.Arg1("--cache-dir"),
- Flag.Bool("--verbose"),
- Flag.Arg1("--prefix"),
-
- Flag.Arg1("--build-file"),
- Flag.Arg1("--cache-dir"),
- Flag.Bool("--verbose-tokenize"),
- Flag.Bool("--verbose-ast"),
- Flag.Bool("--verbose-link"),
- Flag.Bool("--verbose-ir"),
- Flag.Bool("--verbose-llvm-ir"),
- Flag.Bool("--verbose-cimport"),
-};
-
-const missing_build_file =
- \\No 'build.zig' file found.
- \\
- \\Initialize a 'build.zig' template file with `zig build --init`,
- \\or build an executable directly with `zig build-exe $FILENAME.zig`.
- \\
- \\See: `zig build --help` or `zig help` for more options.
- \\
- ;
-
-fn cmdBuild(allocator: &Allocator, args: []const []const u8) !void {
- var flags = try Args.parse(allocator, args_build_spec, args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_build);
- os.exit(0);
- }
-
- const zig_lib_dir = try introspect.resolveZigLibDir(allocator);
- defer allocator.free(zig_lib_dir);
-
- const zig_std_dir = try os.path.join(allocator, zig_lib_dir, "std");
- defer allocator.free(zig_std_dir);
-
- const special_dir = try os.path.join(allocator, zig_std_dir, "special");
- defer allocator.free(special_dir);
-
- const build_runner_path = try os.path.join(allocator, special_dir, "build_runner.zig");
- defer allocator.free(build_runner_path);
-
- const build_file = flags.single("build-file") ?? "build.zig";
- const build_file_abs = try os.path.resolve(allocator, ".", build_file);
- defer allocator.free(build_file_abs);
-
- const build_file_exists = os.File.access(allocator, build_file_abs, os.default_file_mode) catch false;
-
- if (flags.present("init")) {
- if (build_file_exists) {
- try stderr.print("build.zig already exists\n");
- os.exit(1);
- }
-
- // need a new scope for proper defer scope finalization on exit
- {
- const build_template_path = try os.path.join(allocator, special_dir, "build_file_template.zig");
- defer allocator.free(build_template_path);
-
- try os.copyFile(allocator, build_template_path, build_file_abs);
- try stderr.print("wrote build.zig template\n");
- }
-
- os.exit(0);
- }
-
- if (!build_file_exists) {
- try stderr.write(missing_build_file);
- os.exit(1);
- }
-
- // TODO: Invoke build.zig entrypoint directly?
- var zig_exe_path = try os.selfExePath(allocator);
- defer allocator.free(zig_exe_path);
-
- var build_args = ArrayList([]const u8).init(allocator);
- defer build_args.deinit();
-
- const build_file_basename = os.path.basename(build_file_abs);
- const build_file_dirname = os.path.dirname(build_file_abs);
-
- var full_cache_dir: []u8 = undefined;
- if (flags.single("cache-dir")) |cache_dir| {
- full_cache_dir = try os.path.resolve(allocator, ".", cache_dir, full_cache_dir);
- } else {
- full_cache_dir = try os.path.join(allocator, build_file_dirname, "zig-cache");
- }
- defer allocator.free(full_cache_dir);
-
- const path_to_build_exe = try os.path.join(allocator, full_cache_dir, "build");
- defer allocator.free(path_to_build_exe);
-
- try build_args.append(path_to_build_exe);
- try build_args.append(zig_exe_path);
- try build_args.append(build_file_dirname);
- try build_args.append(full_cache_dir);
-
- var proc = try os.ChildProcess.init(build_args.toSliceConst(), allocator);
- defer proc.deinit();
-
- var term = try proc.spawnAndWait();
- switch (term) {
- os.ChildProcess.Term.Exited => |status| {
- if (status != 0) {
- try stderr.print("{} exited with status {}\n", build_args.at(0), status);
- os.exit(1);
- }
- },
- os.ChildProcess.Term.Signal => |signal| {
- try stderr.print("{} killed by signal {}\n", build_args.at(0), signal);
- os.exit(1);
- },
- os.ChildProcess.Term.Stopped => |signal| {
- try stderr.print("{} stopped by signal {}\n", build_args.at(0), signal);
- os.exit(1);
- },
- os.ChildProcess.Term.Unknown => |status| {
- try stderr.print("{} encountered unknown failure {}\n", build_args.at(0), status);
- os.exit(1);
- },
- }
-}
-
-// cmd:build-exe ///////////////////////////////////////////////////////////////////////////////////
-
const usage_build_generic =
\\usage: zig build-exe [file]
\\ zig build-lib [file]
@@ -264,18 +136,20 @@ const usage_build_generic =
\\ --color [auto|off|on] Enable or disable colored error messages
\\
\\Compile Options:
+ \\ --libc [file] Provide a file which specifies libc paths
\\ --assembly [source] Add assembly file to build
- \\ --cache-dir [path] Override the cache directory
\\ --emit [filetype] Emit a specific file format as compilation output
\\ --enable-timing-info Print timing diagnostics
- \\ --libc-include-dir [path] Directory where libc stdlib.h resides
\\ --name [name] Override output name
\\ --output [file] Override destination path
\\ --output-h [file] Override generated header file path
\\ --pkg-begin [name] [path] Make package available to import and push current pkg
\\ --pkg-end Pop current pkg
- \\ --release-fast Build with optimizations on and safety off
- \\ --release-safe Build with optimizations on and safety on
+ \\ --mode [mode] Set the build mode
+ \\ debug (default) optimizations off, safety on
+ \\ release-fast optimizations on, safety off
+ \\ release-safe optimizations on, safety on
+ \\ release-small optimize for small binary, safety off
\\ --static Output will be statically linked
\\ --strip Exclude debug symbols
\\ --target-arch [name] Specify target architecture
@@ -294,12 +168,7 @@ const usage_build_generic =
\\
\\Link Options:
\\ --ar-path [path] Set the path to ar
- \\ --dynamic-linker [path] Set the path to ld.so
\\ --each-lib-rpath Add rpath for each used dynamic library
- \\ --libc-lib-dir [path] Directory where libc crt1.o resides
- \\ --libc-static-lib-dir [path] Directory where libc crtbegin.o resides
- \\ --msvc-lib-dir [path] (windows) directory where vcruntime.lib resides
- \\ --kernel32-lib-dir [path] (windows) directory where kernel32.lib resides
\\ --library [lib] Link against lib
\\ --forbid-library [lib] Make it an error to link against lib
\\ --library-path [dir] Add a directory to the library search path
@@ -317,25 +186,36 @@ const usage_build_generic =
\\ --ver-patch [ver] Dynamic library semver patch version
\\
\\
- ;
+;
-const args_build_generic = []Flag {
+const args_build_generic = []Flag{
Flag.Bool("--help"),
- Flag.Option("--color", []const []const u8 { "auto", "off", "on" }),
+ Flag.Option("--color", []const []const u8{
+ "auto",
+ "off",
+ "on",
+ }),
+ Flag.Option("--mode", []const []const u8{
+ "debug",
+ "release-fast",
+ "release-safe",
+ "release-small",
+ }),
Flag.ArgMergeN("--assembly", 1),
- Flag.Arg1("--cache-dir"),
- Flag.Option("--emit", []const []const u8 { "asm", "bin", "llvm-ir" }),
+ Flag.Option("--emit", []const []const u8{
+ "asm",
+ "bin",
+ "llvm-ir",
+ }),
Flag.Bool("--enable-timing-info"),
- Flag.Arg1("--libc-include-dir"),
+ Flag.Arg1("--libc"),
Flag.Arg1("--name"),
Flag.Arg1("--output"),
Flag.Arg1("--output-h"),
// NOTE: Parsed manually after initial check
Flag.ArgN("--pkg-begin", 2),
Flag.Bool("--pkg-end"),
- Flag.Bool("--release-fast"),
- Flag.Bool("--release-safe"),
Flag.Bool("--static"),
Flag.Bool("--strip"),
Flag.Arg1("--target-arch"),
@@ -353,12 +233,7 @@ const args_build_generic = []Flag {
Flag.Arg1("-mllvm"),
Flag.Arg1("--ar-path"),
- Flag.Arg1("--dynamic-linker"),
Flag.Bool("--each-lib-rpath"),
- Flag.Arg1("--libc-lib-dir"),
- Flag.Arg1("--libc-static-lib-dir"),
- Flag.Arg1("--msvc-lib-dir"),
- Flag.Arg1("--kernel32-lib-dir"),
Flag.ArgMergeN("--library", 1),
Flag.ArgMergeN("--forbid-library", 1),
Flag.ArgMergeN("--library-path", 1),
@@ -377,49 +252,60 @@ const args_build_generic = []Flag {
Flag.Arg1("--ver-patch"),
};
-fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Module.Kind) !void {
+fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Compilation.Kind) !void {
var flags = try Args.parse(allocator, args_build_generic, args);
defer flags.deinit();
if (flags.present("help")) {
- try stderr.write(usage_build_generic);
+ try stdout.write(usage_build_generic);
os.exit(0);
}
- var build_mode = builtin.Mode.Debug;
- if (flags.present("release-fast")) {
- build_mode = builtin.Mode.ReleaseFast;
- } else if (flags.present("release-safe")) {
- build_mode = builtin.Mode.ReleaseSafe;
- }
-
- var color = Module.ErrColor.Auto;
- if (flags.single("color")) |color_flag| {
- if (mem.eql(u8, color_flag, "auto")) {
- color = Module.ErrColor.Auto;
- } else if (mem.eql(u8, color_flag, "on")) {
- color = Module.ErrColor.On;
- } else if (mem.eql(u8, color_flag, "off")) {
- color = Module.ErrColor.Off;
+ const build_mode = blk: {
+ if (flags.single("mode")) |mode_flag| {
+ if (mem.eql(u8, mode_flag, "debug")) {
+ break :blk builtin.Mode.Debug;
+ } else if (mem.eql(u8, mode_flag, "release-fast")) {
+ break :blk builtin.Mode.ReleaseFast;
+ } else if (mem.eql(u8, mode_flag, "release-safe")) {
+ break :blk builtin.Mode.ReleaseSafe;
+ } else if (mem.eql(u8, mode_flag, "release-small")) {
+ break :blk builtin.Mode.ReleaseSmall;
+ } else unreachable;
} else {
- unreachable;
+ break :blk builtin.Mode.Debug;
}
- }
+ };
- var emit_type = Module.Emit.Binary;
- if (flags.single("emit")) |emit_flag| {
- if (mem.eql(u8, emit_flag, "asm")) {
- emit_type = Module.Emit.Assembly;
- } else if (mem.eql(u8, emit_flag, "bin")) {
- emit_type = Module.Emit.Binary;
- } else if (mem.eql(u8, emit_flag, "llvm-ir")) {
- emit_type = Module.Emit.LlvmIr;
+ const color = blk: {
+ if (flags.single("color")) |color_flag| {
+ if (mem.eql(u8, color_flag, "auto")) {
+ break :blk errmsg.Color.Auto;
+ } else if (mem.eql(u8, color_flag, "on")) {
+ break :blk errmsg.Color.On;
+ } else if (mem.eql(u8, color_flag, "off")) {
+ break :blk errmsg.Color.Off;
+ } else unreachable;
} else {
- unreachable;
+ break :blk errmsg.Color.Auto;
}
- }
+ };
- var cur_pkg = try Module.CliPkg.init(allocator, "", "", null); // TODO: Need a path, name?
+ const emit_type = blk: {
+ if (flags.single("emit")) |emit_flag| {
+ if (mem.eql(u8, emit_flag, "asm")) {
+ break :blk Compilation.Emit.Assembly;
+ } else if (mem.eql(u8, emit_flag, "bin")) {
+ break :blk Compilation.Emit.Binary;
+ } else if (mem.eql(u8, emit_flag, "llvm-ir")) {
+ break :blk Compilation.Emit.LlvmIr;
+ } else unreachable;
+ } else {
+ break :blk Compilation.Emit.Binary;
+ }
+ };
+
+ var cur_pkg = try CliPkg.init(allocator, "", "", null);
defer cur_pkg.deinit();
var i: usize = 0;
@@ -432,15 +318,16 @@ fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Mo
i += 1;
const new_pkg_path = args[i];
- var new_cur_pkg = try Module.CliPkg.init(allocator, new_pkg_name, new_pkg_path, cur_pkg);
+ var new_cur_pkg = try CliPkg.init(allocator, new_pkg_name, new_pkg_path, cur_pkg);
try cur_pkg.children.append(new_cur_pkg);
cur_pkg = new_cur_pkg;
} else if (mem.eql(u8, "--pkg-end", arg_name)) {
- if (cur_pkg.parent == null) {
+ if (cur_pkg.parent) |parent| {
+ cur_pkg = parent;
+ } else {
try stderr.print("encountered --pkg-end with no matching --pkg-begin\n");
os.exit(1);
}
- cur_pkg = ??cur_pkg.parent;
}
}
@@ -449,138 +336,117 @@ fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Mo
os.exit(1);
}
- var in_file: ?[]const u8 = undefined;
- switch (flags.positionals.len) {
- 0 => {
- try stderr.write("--name [name] not provided and unable to infer\n");
- os.exit(1);
- },
- 1 => {
- in_file = flags.positionals.at(0);
- },
+ const provided_name = flags.single("name");
+ const root_source_file = switch (flags.positionals.len) {
+ 0 => null,
+ 1 => flags.positionals.at(0),
else => {
- try stderr.write("only one zig input file is accepted during build\n");
+ try stderr.print("unexpected extra parameter: {}\n", flags.positionals.at(1));
os.exit(1);
},
- }
-
- const basename = os.path.basename(??in_file);
- var it = mem.split(basename, ".");
- const root_name = it.next() ?? {
- try stderr.write("file name cannot be empty\n");
- os.exit(1);
};
- const asm_a= flags.many("assembly");
- const obj_a = flags.many("object");
- if (in_file == null and (obj_a == null or (??obj_a).len == 0) and (asm_a == null or (??asm_a).len == 0)) {
+ const root_name = if (provided_name) |n| n else blk: {
+ if (root_source_file) |file| {
+ const basename = os.path.basename(file);
+ var it = mem.split(basename, ".");
+ break :blk it.next() orelse basename;
+ } else {
+ try stderr.write("--name [name] not provided and unable to infer\n");
+ os.exit(1);
+ }
+ };
+
+ const is_static = flags.present("static");
+
+ const assembly_files = flags.many("assembly");
+ const link_objects = flags.many("object");
+ if (root_source_file == null and link_objects.len == 0 and assembly_files.len == 0) {
try stderr.write("Expected source file argument or at least one --object or --assembly argument\n");
os.exit(1);
}
- if (out_type == Module.Kind.Obj and (obj_a != null and (??obj_a).len != 0)) {
+ if (out_type == Compilation.Kind.Obj and link_objects.len != 0) {
try stderr.write("When building an object file, --object arguments are invalid\n");
os.exit(1);
}
- const zig_root_source_file = in_file;
-
- const full_cache_dir = os.path.resolve(allocator, ".", flags.single("cache-dir") ?? "zig-cache"[0..]) catch {
- os.exit(1);
- };
- defer allocator.free(full_cache_dir);
-
const zig_lib_dir = introspect.resolveZigLibDir(allocator) catch os.exit(1);
defer allocator.free(zig_lib_dir);
- var module =
- try Module.create(
- allocator,
- root_name,
- zig_root_source_file,
- Target.Native,
- out_type,
- build_mode,
- zig_lib_dir,
- full_cache_dir
- );
- defer module.destroy();
+ var override_libc: LibCInstallation = undefined;
- module.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") ?? "0", 10);
- module.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") ?? "0", 10);
- module.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") ?? "0", 10);
+ var loop: event.Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
- module.is_test = false;
+ var event_loop_local = try EventLoopLocal.init(&loop);
+ defer event_loop_local.deinit();
- if (flags.single("linker-script")) |linker_script| {
- module.linker_script = linker_script;
+ var comp = try Compilation.create(
+ &event_loop_local,
+ root_name,
+ root_source_file,
+ Target.Native,
+ out_type,
+ build_mode,
+ is_static,
+ zig_lib_dir,
+ );
+ defer comp.destroy();
+
+ if (flags.single("libc")) |libc_path| {
+ parseLibcPaths(loop.allocator, &override_libc, libc_path);
+ comp.override_libc = &override_libc;
}
- module.each_lib_rpath = flags.present("each-lib-rpath");
+ for (flags.many("library")) |lib| {
+ _ = try comp.addLinkLib(lib, true);
+ }
+
+ comp.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") orelse "0", 10);
+ comp.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") orelse "0", 10);
+ comp.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") orelse "0", 10);
+
+ comp.is_test = false;
+
+ comp.linker_script = flags.single("linker-script");
+ comp.each_lib_rpath = flags.present("each-lib-rpath");
var clang_argv_buf = ArrayList([]const u8).init(allocator);
defer clang_argv_buf.deinit();
- if (flags.many("mllvm")) |mllvm_flags| {
- for (mllvm_flags) |mllvm| {
- try clang_argv_buf.append("-mllvm");
- try clang_argv_buf.append(mllvm);
- }
- module.llvm_argv = mllvm_flags;
- module.clang_argv = clang_argv_buf.toSliceConst();
+ const mllvm_flags = flags.many("mllvm");
+ for (mllvm_flags) |mllvm| {
+ try clang_argv_buf.append("-mllvm");
+ try clang_argv_buf.append(mllvm);
}
- module.strip = flags.present("strip");
- module.is_static = flags.present("static");
+ comp.llvm_argv = mllvm_flags;
+ comp.clang_argv = clang_argv_buf.toSliceConst();
- if (flags.single("libc-lib-dir")) |libc_lib_dir| {
- module.libc_lib_dir = libc_lib_dir;
- }
- if (flags.single("libc-static-lib-dir")) |libc_static_lib_dir| {
- module.libc_static_lib_dir = libc_static_lib_dir;
- }
- if (flags.single("libc-include-dir")) |libc_include_dir| {
- module.libc_include_dir = libc_include_dir;
- }
- if (flags.single("msvc-lib-dir")) |msvc_lib_dir| {
- module.msvc_lib_dir = msvc_lib_dir;
- }
- if (flags.single("kernel32-lib-dir")) |kernel32_lib_dir| {
- module.kernel32_lib_dir = kernel32_lib_dir;
- }
- if (flags.single("dynamic-linker")) |dynamic_linker| {
- module.dynamic_linker = dynamic_linker;
- }
+ comp.strip = flags.present("strip");
- module.verbose_tokenize = flags.present("verbose-tokenize");
- module.verbose_ast_tree = flags.present("verbose-ast-tree");
- module.verbose_ast_fmt = flags.present("verbose-ast-fmt");
- module.verbose_link = flags.present("verbose-link");
- module.verbose_ir = flags.present("verbose-ir");
- module.verbose_llvm_ir = flags.present("verbose-llvm-ir");
- module.verbose_cimport = flags.present("verbose-cimport");
+ comp.verbose_tokenize = flags.present("verbose-tokenize");
+ comp.verbose_ast_tree = flags.present("verbose-ast-tree");
+ comp.verbose_ast_fmt = flags.present("verbose-ast-fmt");
+ comp.verbose_link = flags.present("verbose-link");
+ comp.verbose_ir = flags.present("verbose-ir");
+ comp.verbose_llvm_ir = flags.present("verbose-llvm-ir");
+ comp.verbose_cimport = flags.present("verbose-cimport");
- module.err_color = color;
-
- if (flags.many("library-path")) |lib_dirs| {
- module.lib_dirs = lib_dirs;
- }
-
- if (flags.many("framework")) |frameworks| {
- module.darwin_frameworks = frameworks;
- }
-
- if (flags.many("rpath")) |rpath_list| {
- module.rpath_list = rpath_list;
- }
+ comp.err_color = color;
+ comp.lib_dirs = flags.many("library-path");
+ comp.darwin_frameworks = flags.many("framework");
+ comp.rpath_list = flags.many("rpath");
if (flags.single("output-h")) |output_h| {
- module.out_h_path = output_h;
+ comp.out_h_path = output_h;
}
- module.windows_subsystem_windows = flags.present("mwindows");
- module.windows_subsystem_console = flags.present("mconsole");
- module.linker_rdynamic = flags.present("rdynamic");
+ comp.windows_subsystem_windows = flags.present("mwindows");
+ comp.windows_subsystem_console = flags.present("mconsole");
+ comp.linker_rdynamic = flags.present("rdynamic");
if (flags.single("mmacosx-version-min") != null and flags.single("mios-version-min") != null) {
try stderr.write("-mmacosx-version-min and -mios-version-min options not allowed together\n");
@@ -588,48 +454,56 @@ fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Mo
}
if (flags.single("mmacosx-version-min")) |ver| {
- module.darwin_version_min = Module.DarwinVersionMin { .MacOS = ver };
+ comp.darwin_version_min = Compilation.DarwinVersionMin{ .MacOS = ver };
}
if (flags.single("mios-version-min")) |ver| {
- module.darwin_version_min = Module.DarwinVersionMin { .Ios = ver };
+ comp.darwin_version_min = Compilation.DarwinVersionMin{ .Ios = ver };
}
- module.emit_file_type = emit_type;
- if (flags.many("object")) |objects| {
- module.link_objects = objects;
+ comp.emit_file_type = emit_type;
+ comp.assembly_files = assembly_files;
+ comp.link_out_file = flags.single("output");
+ comp.link_objects = link_objects;
+
+ try comp.build();
+ const process_build_events_handle = try async processBuildEvents(comp, color);
+ defer cancel process_build_events_handle;
+ loop.run();
+}
+
+async fn processBuildEvents(comp: *Compilation, color: errmsg.Color) void {
+ // TODO directly awaiting async should guarantee memory allocation elision
+ const build_event = await (async comp.events.get() catch unreachable);
+
+ switch (build_event) {
+ Compilation.Event.Ok => {
+ return;
+ },
+ Compilation.Event.Error => |err| {
+ std.debug.warn("build failed: {}\n", @errorName(err));
+ os.exit(1);
+ },
+ Compilation.Event.Fail => |msgs| {
+ for (msgs) |msg| {
+ defer msg.destroy();
+ msg.printToFile(&stderr_file, color) catch os.exit(1);
+ }
+ },
}
- if (flags.many("assembly")) |assembly_files| {
- module.assembly_files = assembly_files;
- }
-
- try module.build();
- try module.link(flags.single("out-file") ?? null);
-
- if (flags.present("print-timing-info")) {
- // codegen_print_timing_info(g, stderr);
- }
-
- try stderr.print("building {}: {}\n", @tagName(out_type), in_file);
}
-fn cmdBuildExe(allocator: &Allocator, args: []const []const u8) !void {
- try buildOutputType(allocator, args, Module.Kind.Exe);
+fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
+ return buildOutputType(allocator, args, Compilation.Kind.Exe);
}
-// cmd:build-lib ///////////////////////////////////////////////////////////////////////////////////
-
-fn cmdBuildLib(allocator: &Allocator, args: []const []const u8) !void {
- try buildOutputType(allocator, args, Module.Kind.Lib);
+fn cmdBuildLib(allocator: *Allocator, args: []const []const u8) !void {
+ return buildOutputType(allocator, args, Compilation.Kind.Lib);
}
-// cmd:build-obj ///////////////////////////////////////////////////////////////////////////////////
-
-fn cmdBuildObj(allocator: &Allocator, args: []const []const u8) !void {
- try buildOutputType(allocator, args, Module.Kind.Obj);
+fn cmdBuildObj(allocator: *Allocator, args: []const []const u8) !void {
+ return buildOutputType(allocator, args, Compilation.Kind.Obj);
}
-// cmd:fmt /////////////////////////////////////////////////////////////////////////////////////////
-
const usage_fmt =
\\usage: zig fmt [file]...
\\
@@ -637,82 +511,233 @@ const usage_fmt =
\\
\\Options:
\\ --help Print this help and exit
- \\ --keep-backups Retain backup entries for every file
+ \\ --color [auto|off|on] Enable or disable colored error messages
+ \\ --stdin Format code from stdin
\\
\\
- ;
+;
-const args_fmt_spec = []Flag {
+const args_fmt_spec = []Flag{
Flag.Bool("--help"),
- Flag.Bool("--keep-backups"),
+ Flag.Option("--color", []const []const u8{
+ "auto",
+ "off",
+ "on",
+ }),
+ Flag.Bool("--stdin"),
};
-fn cmdFmt(allocator: &Allocator, args: []const []const u8) !void {
+const Fmt = struct {
+ seen: std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8),
+ queue: std.LinkedList([]const u8),
+ any_error: bool,
+
+ // file_path must outlive Fmt
+ fn addToQueue(self: *Fmt, file_path: []const u8) !void {
+ const new_node = try self.seen.allocator.create(std.LinkedList([]const u8).Node{
+ .prev = undefined,
+ .next = undefined,
+ .data = file_path,
+ });
+
+ if (try self.seen.put(file_path, {})) |_| return;
+
+ self.queue.append(new_node);
+ }
+
+ fn addDirToQueue(self: *Fmt, file_path: []const u8) !void {
+ var dir = try std.os.Dir.open(self.seen.allocator, file_path);
+ defer dir.close();
+ while (try dir.next()) |entry| {
+ if (entry.kind == std.os.Dir.Entry.Kind.Directory or mem.endsWith(u8, entry.name, ".zig")) {
+ const full_path = try os.path.join(self.seen.allocator, file_path, entry.name);
+ try self.addToQueue(full_path);
+ }
+ }
+ }
+};
+
+fn parseLibcPaths(allocator: *Allocator, libc: *LibCInstallation, libc_paths_file: []const u8) void {
+ libc.parse(allocator, libc_paths_file, stderr) catch |err| {
+ stderr.print(
+ "Unable to parse libc path file '{}': {}.\n" ++
+ "Try running `zig libc` to see an example for the native target.\n",
+ libc_paths_file,
+ @errorName(err),
+ ) catch os.exit(1);
+ os.exit(1);
+ };
+}
+
+fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void {
+ switch (args.len) {
+ 0 => {},
+ 1 => {
+ var libc_installation: LibCInstallation = undefined;
+ parseLibcPaths(allocator, &libc_installation, args[0]);
+ return;
+ },
+ else => {
+ try stderr.print("unexpected extra parameter: {}\n", args[1]);
+ os.exit(1);
+ },
+ }
+
+ var loop: event.Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ var event_loop_local = try EventLoopLocal.init(&loop);
+ defer event_loop_local.deinit();
+
+ const handle = try async findLibCAsync(&event_loop_local);
+ defer cancel handle;
+
+ loop.run();
+}
+
+async fn findLibCAsync(event_loop_local: *EventLoopLocal) void {
+ const libc = (await (async event_loop_local.getNativeLibC() catch unreachable)) catch |err| {
+ stderr.print("unable to find libc: {}\n", @errorName(err)) catch os.exit(1);
+ os.exit(1);
+ };
+ libc.render(stdout) catch os.exit(1);
+}
+
+fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_fmt_spec, args);
defer flags.deinit();
if (flags.present("help")) {
- try stderr.write(usage_fmt);
+ try stdout.write(usage_fmt);
os.exit(0);
}
+ const color = blk: {
+ if (flags.single("color")) |color_flag| {
+ if (mem.eql(u8, color_flag, "auto")) {
+ break :blk errmsg.Color.Auto;
+ } else if (mem.eql(u8, color_flag, "on")) {
+ break :blk errmsg.Color.On;
+ } else if (mem.eql(u8, color_flag, "off")) {
+ break :blk errmsg.Color.Off;
+ } else unreachable;
+ } else {
+ break :blk errmsg.Color.Auto;
+ }
+ };
+
+ if (flags.present("stdin")) {
+ if (flags.positionals.len != 0) {
+ try stderr.write("cannot use --stdin with positional arguments\n");
+ os.exit(1);
+ }
+
+ var stdin_file = try io.getStdIn();
+ var stdin = io.FileInStream.init(&stdin_file);
+
+ const source_code = try stdin.stream.readAllAlloc(allocator, @maxValue(usize));
+ defer allocator.free(source_code);
+
+ var tree = std.zig.parse(allocator, source_code) catch |err| {
+ try stderr.print("error parsing stdin: {}\n", err);
+ os.exit(1);
+ };
+ defer tree.deinit();
+
+ var error_it = tree.errors.iterator(0);
+ while (error_it.next()) |parse_error| {
+ const msg = try errmsg.Msg.createFromParseError(allocator, parse_error, &tree, "");
+ defer msg.destroy();
+
+ try msg.printToFile(&stderr_file, color);
+ }
+ if (tree.errors.len != 0) {
+ os.exit(1);
+ }
+
+ _ = try std.zig.render(allocator, stdout, &tree);
+ return;
+ }
+
if (flags.positionals.len == 0) {
try stderr.write("expected at least one source file argument\n");
os.exit(1);
}
+ var fmt = Fmt{
+ .seen = std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8).init(allocator),
+ .queue = std.LinkedList([]const u8).init(),
+ .any_error = false,
+ };
+
for (flags.positionals.toSliceConst()) |file_path| {
+ try fmt.addToQueue(file_path);
+ }
+
+ while (fmt.queue.popFirst()) |node| {
+ const file_path = node.data;
+
var file = try os.File.openRead(allocator, file_path);
defer file.close();
- const source_code = io.readFileAlloc(allocator, file_path) catch |err| {
- try stderr.print("unable to open '{}': {}", file_path, err);
- continue;
+ const source_code = io.readFileAlloc(allocator, file_path) catch |err| switch (err) {
+ error.IsDir => {
+ try fmt.addDirToQueue(file_path);
+ continue;
+ },
+ else => {
+ try stderr.print("unable to open '{}': {}\n", file_path, err);
+ fmt.any_error = true;
+ continue;
+ },
};
defer allocator.free(source_code);
- var tokenizer = std.zig.Tokenizer.init(source_code);
- var parser = std.zig.Parser.init(&tokenizer, allocator, file_path);
- defer parser.deinit();
-
- var tree = parser.parse() catch |err| {
+ var tree = std.zig.parse(allocator, source_code) catch |err| {
try stderr.print("error parsing file '{}': {}\n", file_path, err);
+ fmt.any_error = true;
continue;
};
defer tree.deinit();
- var original_file_backup = try Buffer.init(allocator, file_path);
- defer original_file_backup.deinit();
- try original_file_backup.append(".backup");
+ var error_it = tree.errors.iterator(0);
+ while (error_it.next()) |parse_error| {
+ const msg = try errmsg.Msg.createFromParseError(allocator, parse_error, &tree, file_path);
+ defer msg.destroy();
- try os.rename(allocator, file_path, original_file_backup.toSliceConst());
-
- try stderr.print("{}\n", file_path);
-
- // TODO: BufferedAtomicFile has some access problems.
- var out_file = try os.File.openWrite(allocator, file_path);
- defer out_file.close();
-
- var out_file_stream = io.FileOutStream.init(&out_file);
- try parser.renderSource(out_file_stream.stream, tree.root_node);
-
- if (!flags.present("keep-backups")) {
- try os.deleteFile(allocator, original_file_backup.toSliceConst());
+ try msg.printToFile(&stderr_file, color);
}
+ if (tree.errors.len != 0) {
+ fmt.any_error = true;
+ continue;
+ }
+
+ const baf = try io.BufferedAtomicFile.create(allocator, file_path);
+ defer baf.destroy();
+
+ const anything_changed = try std.zig.render(allocator, baf.stream(), &tree);
+ if (anything_changed) {
+ try stderr.print("{}\n", file_path);
+ try baf.finish();
+ }
+ }
+
+ if (fmt.any_error) {
+ os.exit(1);
}
}
// cmd:targets /////////////////////////////////////////////////////////////////////////////////////
-fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void {
try stdout.write("Architectures:\n");
{
comptime var i: usize = 0;
inline while (i < @memberCount(builtin.Arch)) : (i += 1) {
comptime const arch_tag = @memberName(builtin.Arch, i);
// NOTE: Cannot use empty string, see #918.
- comptime const native_str =
- if (comptime mem.eql(u8, arch_tag, @tagName(builtin.arch))) " (native)\n" else "\n";
+ comptime const native_str = if (comptime mem.eql(u8, arch_tag, @tagName(builtin.arch))) " (native)\n" else "\n";
try stdout.print(" {}{}", arch_tag, native_str);
}
@@ -725,8 +750,7 @@ fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void {
inline while (i < @memberCount(builtin.Os)) : (i += 1) {
comptime const os_tag = @memberName(builtin.Os, i);
// NOTE: Cannot use empty string, see #918.
- comptime const native_str =
- if (comptime mem.eql(u8, os_tag, @tagName(builtin.os))) " (native)\n" else "\n";
+ comptime const native_str = if (comptime mem.eql(u8, os_tag, @tagName(builtin.os))) " (native)\n" else "\n";
try stdout.print(" {}{}", os_tag, native_str);
}
@@ -739,176 +763,23 @@ fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void {
inline while (i < @memberCount(builtin.Environ)) : (i += 1) {
comptime const environ_tag = @memberName(builtin.Environ, i);
// NOTE: Cannot use empty string, see #918.
- comptime const native_str =
- if (comptime mem.eql(u8, environ_tag, @tagName(builtin.environ))) " (native)\n" else "\n";
+ comptime const native_str = if (comptime mem.eql(u8, environ_tag, @tagName(builtin.environ))) " (native)\n" else "\n";
try stdout.print(" {}{}", environ_tag, native_str);
}
}
}
-// cmd:version /////////////////////////////////////////////////////////////////////////////////////
-
-fn cmdVersion(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdVersion(allocator: *Allocator, args: []const []const u8) !void {
try stdout.print("{}\n", std.cstr.toSliceConst(c.ZIG_VERSION_STRING));
}
-// cmd:test ////////////////////////////////////////////////////////////////////////////////////////
+const args_test_spec = []Flag{Flag.Bool("--help")};
-const usage_test =
- \\usage: zig test [file]...
- \\
- \\Options:
- \\ --help Print this help and exit
- \\
- \\
- ;
-
-const args_test_spec = []Flag {
- Flag.Bool("--help"),
-};
-
-
-fn cmdTest(allocator: &Allocator, args: []const []const u8) !void {
- var flags = try Args.parse(allocator, args_build_spec, args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_test);
- os.exit(0);
- }
-
- if (flags.positionals.len != 1) {
- try stderr.write("expected exactly one zig source file\n");
- os.exit(1);
- }
-
- // compile the test program into the cache and run
-
- // NOTE: May be overlap with buildOutput, take the shared part out.
- try stderr.print("testing file {}\n", flags.positionals.at(0));
+fn cmdHelp(allocator: *Allocator, args: []const []const u8) !void {
+ try stdout.write(usage);
}
-// cmd:run /////////////////////////////////////////////////////////////////////////////////////////
-
-// Run should be simple and not expose the full set of arguments provided by build-exe. If specific
-// build requirements are need, the user should `build-exe` then `run` manually.
-const usage_run =
- \\usage: zig run [file] --
- \\
- \\Options:
- \\ --help Print this help and exit
- \\
- \\
- ;
-
-const args_run_spec = []Flag {
- Flag.Bool("--help"),
-};
-
-
-fn cmdRun(allocator: &Allocator, args: []const []const u8) !void {
- var compile_args = args;
- var runtime_args: []const []const u8 = []const []const u8 {};
-
- for (args) |argv, i| {
- if (mem.eql(u8, argv, "--")) {
- compile_args = args[0..i];
- runtime_args = args[i+1..];
- break;
- }
- }
- var flags = try Args.parse(allocator, args_run_spec, compile_args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_run);
- os.exit(0);
- }
-
- if (flags.positionals.len != 1) {
- try stderr.write("expected exactly one zig source file\n");
- os.exit(1);
- }
-
- try stderr.print("runtime args:\n");
- for (runtime_args) |cargs| {
- try stderr.print("{}\n", cargs);
- }
-}
-
-// cmd:translate-c /////////////////////////////////////////////////////////////////////////////////
-
-const usage_translate_c =
- \\usage: zig translate-c [file]
- \\
- \\Options:
- \\ --help Print this help and exit
- \\ --enable-timing-info Print timing diagnostics
- \\ --output [path] Output file to write generated zig file (default: stdout)
- \\
- \\
- ;
-
-const args_translate_c_spec = []Flag {
- Flag.Bool("--help"),
- Flag.Bool("--enable-timing-info"),
- Flag.Arg1("--libc-include-dir"),
- Flag.Arg1("--output"),
-};
-
-fn cmdTranslateC(allocator: &Allocator, args: []const []const u8) !void {
- var flags = try Args.parse(allocator, args_translate_c_spec, args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_translate_c);
- os.exit(0);
- }
-
- if (flags.positionals.len != 1) {
- try stderr.write("expected exactly one c source file\n");
- os.exit(1);
- }
-
- // set up codegen
-
- const zig_root_source_file = null;
-
- // NOTE: translate-c shouldn't require setting up the full codegen instance as it does in
- // the C++ compiler.
-
- // codegen_create(g);
- // codegen_set_out_name(g, null);
- // codegen_translate_c(g, flags.positional.at(0))
-
- var output_stream = stdout;
- if (flags.single("output")) |output_file| {
- var file = try os.File.openWrite(allocator, output_file);
- defer file.close();
-
- var file_stream = io.FileOutStream.init(&file);
- // TODO: Not being set correctly, still stdout
- output_stream = &file_stream.stream;
- }
-
- // ast_render(g, output_stream, g->root_import->root, 4);
- try output_stream.write("pub const example = 10;\n");
-
- if (flags.present("enable-timing-info")) {
- // codegen_print_timing_info(g, stdout);
- try stderr.write("printing timing info for translate-c\n");
- }
-}
-
-// cmd:help ////////////////////////////////////////////////////////////////////////////////////////
-
-fn cmdHelp(allocator: &Allocator, args: []const []const u8) !void {
- try stderr.write(usage);
-}
-
-// cmd:zen /////////////////////////////////////////////////////////////////////////////////////////
-
const info_zen =
\\
\\ * Communicate intent precisely.
@@ -924,14 +795,12 @@ const info_zen =
\\ * Together we serve end users.
\\
\\
- ;
+;
-fn cmdZen(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdZen(allocator: *Allocator, args: []const []const u8) !void {
try stdout.write(info_zen);
}
-// cmd:internal ////////////////////////////////////////////////////////////////////////////////////
-
const usage_internal =
\\usage: zig internal [subcommand]
\\
@@ -939,17 +808,18 @@ const usage_internal =
\\ build-info Print static compiler build-info
\\
\\
- ;
+;
-fn cmdInternal(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdInternal(allocator: *Allocator, args: []const []const u8) !void {
if (args.len == 0) {
try stderr.write(usage_internal);
os.exit(1);
}
- const sub_commands = []Command {
- Command { .name = "build-info", .exec = cmdInternalBuildInfo },
- };
+ const sub_commands = []Command{Command{
+ .name = "build-info",
+ .exec = cmdInternalBuildInfo,
+ }};
for (sub_commands) |sub_command| {
if (mem.eql(u8, sub_command.name, args[0])) {
@@ -962,7 +832,7 @@ fn cmdInternal(allocator: &Allocator, args: []const []const u8) !void {
try stderr.write(usage_internal);
}
-fn cmdInternalBuildInfo(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdInternalBuildInfo(allocator: *Allocator, args: []const []const u8) !void {
try stdout.print(
\\ZIG_CMAKE_BINARY_DIR {}
\\ZIG_CXX_COMPILER {}
@@ -973,7 +843,7 @@ fn cmdInternalBuildInfo(allocator: &Allocator, args: []const []const u8) !void {
\\ZIG_C_HEADER_FILES {}
\\ZIG_DIA_GUIDS_LIB {}
\\
- ,
+ ,
std.cstr.toSliceConst(c.ZIG_CMAKE_BINARY_DIR),
std.cstr.toSliceConst(c.ZIG_CXX_COMPILER),
std.cstr.toSliceConst(c.ZIG_LLVM_CONFIG_EXE),
@@ -984,3 +854,27 @@ fn cmdInternalBuildInfo(allocator: &Allocator, args: []const []const u8) !void {
std.cstr.toSliceConst(c.ZIG_DIA_GUIDS_LIB),
);
}
+
+const CliPkg = struct {
+ name: []const u8,
+ path: []const u8,
+ children: ArrayList(*CliPkg),
+ parent: ?*CliPkg,
+
+ pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
+ var pkg = try allocator.create(CliPkg{
+ .name = name,
+ .path = path,
+ .children = ArrayList(*CliPkg).init(allocator),
+ .parent = parent,
+ });
+ return pkg;
+ }
+
+ pub fn deinit(self: *CliPkg) void {
+ for (self.children.toSliceConst()) |child| {
+ child.deinit();
+ }
+ self.children.deinit();
+ }
+};
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
deleted file mode 100644
index eec30749e2..0000000000
--- a/src-self-hosted/module.zig
+++ /dev/null
@@ -1,326 +0,0 @@
-const std = @import("std");
-const os = std.os;
-const io = std.io;
-const mem = std.mem;
-const Buffer = std.Buffer;
-const llvm = @import("llvm.zig");
-const c = @import("c.zig");
-const builtin = @import("builtin");
-const Target = @import("target.zig").Target;
-const warn = std.debug.warn;
-const Tokenizer = std.zig.Tokenizer;
-const Token = std.zig.Token;
-const Parser = std.zig.Parser;
-const ArrayList = std.ArrayList;
-
-pub const Module = struct {
- allocator: &mem.Allocator,
- name: Buffer,
- root_src_path: ?[]const u8,
- module: llvm.ModuleRef,
- context: llvm.ContextRef,
- builder: llvm.BuilderRef,
- target: Target,
- build_mode: builtin.Mode,
- zig_lib_dir: []const u8,
-
- version_major: u32,
- version_minor: u32,
- version_patch: u32,
-
- linker_script: ?[]const u8,
- cache_dir: []const u8,
- libc_lib_dir: ?[]const u8,
- libc_static_lib_dir: ?[]const u8,
- libc_include_dir: ?[]const u8,
- msvc_lib_dir: ?[]const u8,
- kernel32_lib_dir: ?[]const u8,
- dynamic_linker: ?[]const u8,
- out_h_path: ?[]const u8,
-
- is_test: bool,
- each_lib_rpath: bool,
- strip: bool,
- is_static: bool,
- linker_rdynamic: bool,
-
- clang_argv: []const []const u8,
- llvm_argv: []const []const u8,
- lib_dirs: []const []const u8,
- rpath_list: []const []const u8,
- assembly_files: []const []const u8,
- link_objects: []const []const u8,
-
- windows_subsystem_windows: bool,
- windows_subsystem_console: bool,
-
- link_libs_list: ArrayList(&LinkLib),
- libc_link_lib: ?&LinkLib,
-
- err_color: ErrColor,
-
- verbose_tokenize: bool,
- verbose_ast_tree: bool,
- verbose_ast_fmt: bool,
- verbose_cimport: bool,
- verbose_ir: bool,
- verbose_llvm_ir: bool,
- verbose_link: bool,
-
- darwin_frameworks: []const []const u8,
- darwin_version_min: DarwinVersionMin,
-
- test_filters: []const []const u8,
- test_name_prefix: ?[]const u8,
-
- emit_file_type: Emit,
-
- kind: Kind,
-
- pub const DarwinVersionMin = union(enum) {
- None,
- MacOS: []const u8,
- Ios: []const u8,
- };
-
- pub const Kind = enum {
- Exe,
- Lib,
- Obj,
- };
-
- pub const ErrColor = enum {
- Auto,
- Off,
- On,
- };
-
- pub const LinkLib = struct {
- name: []const u8,
- path: ?[]const u8,
- /// the list of symbols we depend on from this lib
- symbols: ArrayList([]u8),
- provided_explicitly: bool,
- };
-
- pub const Emit = enum {
- Binary,
- Assembly,
- LlvmIr,
- };
-
- pub const CliPkg = struct {
- name: []const u8,
- path: []const u8,
- children: ArrayList(&CliPkg),
- parent: ?&CliPkg,
-
- pub fn init(allocator: &mem.Allocator, name: []const u8, path: []const u8, parent: ?&CliPkg) !&CliPkg {
- var pkg = try allocator.create(CliPkg);
- pkg.name = name;
- pkg.path = path;
- pkg.children = ArrayList(&CliPkg).init(allocator);
- pkg.parent = parent;
- return pkg;
- }
-
- pub fn deinit(self: &CliPkg) void {
- for (self.children.toSliceConst()) |child| {
- child.deinit();
- }
- self.children.deinit();
- }
- };
-
- pub fn create(allocator: &mem.Allocator, name: []const u8, root_src_path: ?[]const u8, target: &const Target,
- kind: Kind, build_mode: builtin.Mode, zig_lib_dir: []const u8, cache_dir: []const u8) !&Module
- {
- var name_buffer = try Buffer.init(allocator, name);
- errdefer name_buffer.deinit();
-
- const context = c.LLVMContextCreate() ?? return error.OutOfMemory;
- errdefer c.LLVMContextDispose(context);
-
- const module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) ?? return error.OutOfMemory;
- errdefer c.LLVMDisposeModule(module);
-
- const builder = c.LLVMCreateBuilderInContext(context) ?? return error.OutOfMemory;
- errdefer c.LLVMDisposeBuilder(builder);
-
- const module_ptr = try allocator.create(Module);
- errdefer allocator.destroy(module_ptr);
-
- *module_ptr = Module {
- .allocator = allocator,
- .name = name_buffer,
- .root_src_path = root_src_path,
- .module = module,
- .context = context,
- .builder = builder,
- .target = *target,
- .kind = kind,
- .build_mode = build_mode,
- .zig_lib_dir = zig_lib_dir,
- .cache_dir = cache_dir,
-
- .version_major = 0,
- .version_minor = 0,
- .version_patch = 0,
-
- .verbose_tokenize = false,
- .verbose_ast_tree = false,
- .verbose_ast_fmt = false,
- .verbose_cimport = false,
- .verbose_ir = false,
- .verbose_llvm_ir = false,
- .verbose_link = false,
-
- .linker_script = null,
- .libc_lib_dir = null,
- .libc_static_lib_dir = null,
- .libc_include_dir = null,
- .msvc_lib_dir = null,
- .kernel32_lib_dir = null,
- .dynamic_linker = null,
- .out_h_path = null,
- .is_test = false,
- .each_lib_rpath = false,
- .strip = false,
- .is_static = false,
- .linker_rdynamic = false,
- .clang_argv = [][]const u8{},
- .llvm_argv = [][]const u8{},
- .lib_dirs = [][]const u8{},
- .rpath_list = [][]const u8{},
- .assembly_files = [][]const u8{},
- .link_objects = [][]const u8{},
- .windows_subsystem_windows = false,
- .windows_subsystem_console = false,
- .link_libs_list = ArrayList(&LinkLib).init(allocator),
- .libc_link_lib = null,
- .err_color = ErrColor.Auto,
- .darwin_frameworks = [][]const u8{},
- .darwin_version_min = DarwinVersionMin.None,
- .test_filters = [][]const u8{},
- .test_name_prefix = null,
- .emit_file_type = Emit.Binary,
- };
- return module_ptr;
- }
-
- fn dump(self: &Module) void {
- c.LLVMDumpModule(self.module);
- }
-
- pub fn destroy(self: &Module) void {
- c.LLVMDisposeBuilder(self.builder);
- c.LLVMDisposeModule(self.module);
- c.LLVMContextDispose(self.context);
- self.name.deinit();
-
- self.allocator.destroy(self);
- }
-
- pub fn build(self: &Module) !void {
- if (self.llvm_argv.len != 0) {
- var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.allocator,
- [][]const []const u8 { [][]const u8{"zig (LLVM option parsing)"}, self.llvm_argv, });
- defer c_compatible_args.deinit();
- c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
- }
-
- const root_src_path = self.root_src_path ?? @panic("TODO handle null root src path");
- const root_src_real_path = os.path.real(self.allocator, root_src_path) catch |err| {
- try printError("unable to get real path '{}': {}", root_src_path, err);
- return err;
- };
- errdefer self.allocator.free(root_src_real_path);
-
- const source_code = io.readFileAlloc(self.allocator, root_src_real_path) catch |err| {
- try printError("unable to open '{}': {}", root_src_real_path, err);
- return err;
- };
- errdefer self.allocator.free(source_code);
-
- warn("====input:====\n");
-
- warn("{}", source_code);
-
- warn("====tokenization:====\n");
- {
- var tokenizer = Tokenizer.init(source_code);
- while (true) {
- const token = tokenizer.next();
- tokenizer.dump(token);
- if (token.id == Token.Id.Eof) {
- break;
- }
- }
- }
-
- warn("====parse:====\n");
-
- var tokenizer = Tokenizer.init(source_code);
- var parser = Parser.init(&tokenizer, self.allocator, root_src_real_path);
- defer parser.deinit();
-
- var tree = try parser.parse();
- defer tree.deinit();
-
- var stderr_file = try std.io.getStdErr();
- var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
- const out_stream = &stderr_file_out_stream.stream;
- try parser.renderAst(out_stream, tree.root_node);
-
- warn("====fmt:====\n");
- try parser.renderSource(out_stream, tree.root_node);
-
- warn("====ir:====\n");
- warn("TODO\n\n");
-
- warn("====llvm ir:====\n");
- self.dump();
-
- }
-
- pub fn link(self: &Module, out_file: ?[]const u8) !void {
- warn("TODO link");
- return error.Todo;
- }
-
- pub fn addLinkLib(self: &Module, name: []const u8, provided_explicitly: bool) !&LinkLib {
- const is_libc = mem.eql(u8, name, "c");
-
- if (is_libc) {
- if (self.libc_link_lib) |libc_link_lib| {
- return libc_link_lib;
- }
- }
-
- for (self.link_libs_list.toSliceConst()) |existing_lib| {
- if (mem.eql(u8, name, existing_lib.name)) {
- return existing_lib;
- }
- }
-
- const link_lib = try self.allocator.create(LinkLib);
- *link_lib = LinkLib {
- .name = name,
- .path = null,
- .provided_explicitly = provided_explicitly,
- .symbols = ArrayList([]u8).init(self.allocator),
- };
- try self.link_libs_list.append(link_lib);
- if (is_libc) {
- self.libc_link_lib = link_lib;
- }
- return link_lib;
- }
-};
-
-fn printError(comptime format: []const u8, args: ...) !void {
- var stderr_file = try std.io.getStdErr();
- var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
- const out_stream = &stderr_file_out_stream.stream;
- try out_stream.print(format, args);
-}
diff --git a/src-self-hosted/package.zig b/src-self-hosted/package.zig
new file mode 100644
index 0000000000..720b279651
--- /dev/null
+++ b/src-self-hosted/package.zig
@@ -0,0 +1,29 @@
+const std = @import("std");
+const mem = std.mem;
+const assert = std.debug.assert;
+const Buffer = std.Buffer;
+
+pub const Package = struct {
+ root_src_dir: Buffer,
+ root_src_path: Buffer,
+
+ /// relative to root_src_dir
+ table: Table,
+
+ pub const Table = std.HashMap([]const u8, *Package, mem.hash_slice_u8, mem.eql_slice_u8);
+
+ /// makes internal copies of root_src_dir and root_src_path
+ /// allocator should be an arena allocator because Package never frees anything
+ pub fn create(allocator: *mem.Allocator, root_src_dir: []const u8, root_src_path: []const u8) !*Package {
+ return allocator.create(Package{
+ .root_src_dir = try Buffer.init(allocator, root_src_dir),
+ .root_src_path = try Buffer.init(allocator, root_src_path),
+ .table = Table.init(allocator),
+ });
+ }
+
+ pub fn add(self: *Package, name: []const u8, package: *Package) !void {
+ const entry = try self.table.put(try mem.dupe(self.table.allocator, u8, name), package);
+ assert(entry == null);
+ }
+};
diff --git a/src-self-hosted/scope.zig b/src-self-hosted/scope.zig
index 05e586daae..a38e765c6e 100644
--- a/src-self-hosted/scope.zig
+++ b/src-self-hosted/scope.zig
@@ -1,16 +1,396 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const Allocator = mem.Allocator;
+const Decl = @import("decl.zig").Decl;
+const Compilation = @import("compilation.zig").Compilation;
+const mem = std.mem;
+const ast = std.zig.ast;
+const Value = @import("value.zig").Value;
+const Type = @import("type.zig").Type;
+const ir = @import("ir.zig");
+const Span = @import("errmsg.zig").Span;
+const assert = std.debug.assert;
+const event = std.event;
+const llvm = @import("llvm.zig");
+
pub const Scope = struct {
id: Id,
- parent: &Scope,
+ parent: ?*Scope,
+ ref_count: std.atomic.Int(usize),
+
+ /// Thread-safe
+ pub fn ref(base: *Scope) void {
+ _ = base.ref_count.incr();
+ }
+
+ /// Thread-safe
+ pub fn deref(base: *Scope, comp: *Compilation) void {
+ if (base.ref_count.decr() == 1) {
+ if (base.parent) |parent| parent.deref(comp);
+ switch (base.id) {
+ Id.Root => @fieldParentPtr(Root, "base", base).destroy(comp),
+ Id.Decls => @fieldParentPtr(Decls, "base", base).destroy(comp),
+ Id.Block => @fieldParentPtr(Block, "base", base).destroy(comp),
+ Id.FnDef => @fieldParentPtr(FnDef, "base", base).destroy(comp),
+ Id.CompTime => @fieldParentPtr(CompTime, "base", base).destroy(comp),
+ Id.Defer => @fieldParentPtr(Defer, "base", base).destroy(comp),
+ Id.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(comp),
+ Id.Var => @fieldParentPtr(Var, "base", base).destroy(comp),
+ }
+ }
+ }
+
+ pub fn findRoot(base: *Scope) *Root {
+ var scope = base;
+ while (scope.parent) |parent| {
+ scope = parent;
+ }
+ assert(scope.id == Id.Root);
+ return @fieldParentPtr(Root, "base", scope);
+ }
+
+ pub fn findFnDef(base: *Scope) ?*FnDef {
+ var scope = base;
+ while (true) {
+ switch (scope.id) {
+ Id.FnDef => return @fieldParentPtr(FnDef, "base", scope),
+ Id.Root, Id.Decls => return null,
+
+ Id.Block,
+ Id.Defer,
+ Id.DeferExpr,
+ Id.CompTime,
+ Id.Var,
+ => scope = scope.parent.?,
+ }
+ }
+ }
+
+ pub fn findDeferExpr(base: *Scope) ?*DeferExpr {
+ var scope = base;
+ while (true) {
+ switch (scope.id) {
+ Id.DeferExpr => return @fieldParentPtr(DeferExpr, "base", scope),
+
+ Id.FnDef,
+ Id.Decls,
+ => return null,
+
+ Id.Block,
+ Id.Defer,
+ Id.CompTime,
+ Id.Root,
+ Id.Var,
+ => scope = scope.parent orelse return null,
+ }
+ }
+ }
+
+ fn init(base: *Scope, id: Id, parent: *Scope) void {
+ base.* = Scope{
+ .id = id,
+ .parent = parent,
+ .ref_count = std.atomic.Int(usize).init(1),
+ };
+ parent.ref();
+ }
pub const Id = enum {
+ Root,
Decls,
Block,
- Defer,
- DeferExpr,
- VarDecl,
- CImport,
- Loop,
FnDef,
CompTime,
+ Defer,
+ DeferExpr,
+ Var,
+ };
+
+ pub const Root = struct {
+ base: Scope,
+ tree: *ast.Tree,
+ realpath: []const u8,
+
+ /// Creates a Root scope with 1 reference
+ /// Takes ownership of realpath
+ /// Takes ownership of tree, will deinit and destroy when done.
+ pub fn create(comp: *Compilation, tree: *ast.Tree, realpath: []u8) !*Root {
+ const self = try comp.gpa().createOne(Root);
+ self.* = Root{
+ .base = Scope{
+ .id = Id.Root,
+ .parent = null,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .tree = tree,
+ .realpath = realpath,
+ };
+
+ return self;
+ }
+
+ pub fn destroy(self: *Root, comp: *Compilation) void {
+ comp.gpa().free(self.tree.source);
+ self.tree.deinit();
+ comp.gpa().destroy(self.tree);
+ comp.gpa().free(self.realpath);
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Decls = struct {
+ base: Scope,
+
+ /// The lock must be respected for writing. However once name_future resolves,
+ /// readers can freely access it.
+ table: event.Locked(Decl.Table),
+
+ /// Once this future is resolved, the table is complete and available for unlocked
+ /// read-only access. It does not mean all the decls are resolved; it means only that
+ /// the table has all the names. Each decl in the table has its own resolution state.
+ name_future: event.Future(void),
+
+ /// Creates a Decls scope with 1 reference
+ pub fn create(comp: *Compilation, parent: *Scope) !*Decls {
+ const self = try comp.gpa().createOne(Decls);
+ self.* = Decls{
+ .base = undefined,
+ .table = event.Locked(Decl.Table).init(comp.loop, Decl.Table.init(comp.gpa())),
+ .name_future = event.Future(void).init(comp.loop),
+ };
+ self.base.init(Id.Decls, parent);
+ return self;
+ }
+
+ pub fn destroy(self: *Decls, comp: *Compilation) void {
+ self.table.deinit();
+ comp.gpa().destroy(self);
+ }
+
+ pub async fn getTableReadOnly(self: *Decls) *Decl.Table {
+ _ = await (async self.name_future.get() catch unreachable);
+ return &self.table.private_data;
+ }
+ };
+
+ pub const Block = struct {
+ base: Scope,
+ incoming_values: std.ArrayList(*ir.Inst),
+ incoming_blocks: std.ArrayList(*ir.BasicBlock),
+ end_block: *ir.BasicBlock,
+ is_comptime: *ir.Inst,
+
+ safety: Safety,
+
+ const Safety = union(enum) {
+ Auto,
+ Manual: Manual,
+
+ const Manual = struct {
+ /// the source span that disabled the safety value
+ span: Span,
+
+ /// whether safety is enabled
+ enabled: bool,
+ };
+
+ fn get(self: Safety, comp: *Compilation) bool {
+ return switch (self) {
+ Safety.Auto => switch (comp.build_mode) {
+ builtin.Mode.Debug,
+ builtin.Mode.ReleaseSafe,
+ => true,
+ builtin.Mode.ReleaseFast,
+ builtin.Mode.ReleaseSmall,
+ => false,
+ },
+ @TagType(Safety).Manual => |man| man.enabled,
+ };
+ }
+ };
+
+ /// Creates a Block scope with 1 reference
+ pub fn create(comp: *Compilation, parent: *Scope) !*Block {
+ const self = try comp.gpa().createOne(Block);
+ self.* = Block{
+ .base = undefined,
+ .incoming_values = undefined,
+ .incoming_blocks = undefined,
+ .end_block = undefined,
+ .is_comptime = undefined,
+ .safety = Safety.Auto,
+ };
+ self.base.init(Id.Block, parent);
+ return self;
+ }
+
+ pub fn destroy(self: *Block, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const FnDef = struct {
+ base: Scope,
+
+ /// This reference is not counted so that the scope can get destroyed with the function
+ fn_val: ?*Value.Fn,
+
+ /// Creates a FnDef scope with 1 reference
+ /// Must set the fn_val later
+ pub fn create(comp: *Compilation, parent: *Scope) !*FnDef {
+ const self = try comp.gpa().createOne(FnDef);
+ self.* = FnDef{
+ .base = undefined,
+ .fn_val = null,
+ };
+ self.base.init(Id.FnDef, parent);
+ return self;
+ }
+
+ pub fn destroy(self: *FnDef, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const CompTime = struct {
+ base: Scope,
+
+ /// Creates a CompTime scope with 1 reference
+ pub fn create(comp: *Compilation, parent: *Scope) !*CompTime {
+ const self = try comp.gpa().createOne(CompTime);
+ self.* = CompTime{ .base = undefined };
+ self.base.init(Id.CompTime, parent);
+ return self;
+ }
+
+ pub fn destroy(self: *CompTime, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Defer = struct {
+ base: Scope,
+ defer_expr_scope: *DeferExpr,
+ kind: Kind,
+
+ pub const Kind = enum {
+ ScopeExit,
+ ErrorExit,
+ };
+
+ /// Creates a Defer scope with 1 reference
+ pub fn create(
+ comp: *Compilation,
+ parent: *Scope,
+ kind: Kind,
+ defer_expr_scope: *DeferExpr,
+ ) !*Defer {
+ const self = try comp.gpa().createOne(Defer);
+ self.* = Defer{
+ .base = undefined,
+ .defer_expr_scope = defer_expr_scope,
+ .kind = kind,
+ };
+ self.base.init(Id.Defer, parent);
+ defer_expr_scope.base.ref();
+ return self;
+ }
+
+ pub fn destroy(self: *Defer, comp: *Compilation) void {
+ self.defer_expr_scope.base.deref(comp);
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const DeferExpr = struct {
+ base: Scope,
+ expr_node: *ast.Node,
+ reported_err: bool,
+
+ /// Creates a DeferExpr scope with 1 reference
+ pub fn create(comp: *Compilation, parent: *Scope, expr_node: *ast.Node) !*DeferExpr {
+ const self = try comp.gpa().createOne(DeferExpr);
+ self.* = DeferExpr{
+ .base = undefined,
+ .expr_node = expr_node,
+ .reported_err = false,
+ };
+ self.base.init(Id.DeferExpr, parent);
+ return self;
+ }
+
+ pub fn destroy(self: *DeferExpr, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Var = struct {
+ base: Scope,
+ name: []const u8,
+ src_node: *ast.Node,
+ data: Data,
+
+ pub const Data = union(enum) {
+ Param: Param,
+ Const: *Value,
+ };
+
+ pub const Param = struct {
+ index: usize,
+ typ: *Type,
+ llvm_value: llvm.ValueRef,
+ };
+
+ pub fn createParam(
+ comp: *Compilation,
+ parent: *Scope,
+ name: []const u8,
+ src_node: *ast.Node,
+ param_index: usize,
+ param_type: *Type,
+ ) !*Var {
+ const self = try create(comp, parent, name, src_node);
+ self.data = Data{
+ .Param = Param{
+ .index = param_index,
+ .typ = param_type,
+ .llvm_value = undefined,
+ },
+ };
+ return self;
+ }
+
+ pub fn createConst(
+ comp: *Compilation,
+ parent: *Scope,
+ name: []const u8,
+ src_node: *ast.Node,
+ value: *Value,
+ ) !*Var {
+ const self = try create(comp, parent, name, src_node);
+ self.data = Data{ .Const = value };
+ value.ref();
+ return self;
+ }
+
+ fn create(comp: *Compilation, parent: *Scope, name: []const u8, src_node: *ast.Node) !*Var {
+ const self = try comp.gpa().createOne(Var);
+ self.* = Var{
+ .base = undefined,
+ .name = name,
+ .src_node = src_node,
+ .data = undefined,
+ };
+ self.base.init(Id.Var, parent);
+ return self;
+ }
+
+ pub fn destroy(self: *Var, comp: *Compilation) void {
+ switch (self.data) {
+ Data.Param => {},
+ Data.Const => |value| value.deref(comp),
+ }
+ comp.gpa().destroy(self);
+ }
};
};
diff --git a/src-self-hosted/target.zig b/src-self-hosted/target.zig
index 375b48f10d..0cc8d02a62 100644
--- a/src-self-hosted/target.zig
+++ b/src-self-hosted/target.zig
@@ -1,60 +1,562 @@
+const std = @import("std");
const builtin = @import("builtin");
-const c = @import("c.zig");
+const llvm = @import("llvm.zig");
+const CInt = @import("c_int.zig").CInt;
-pub const CrossTarget = struct {
- arch: builtin.Arch,
- os: builtin.Os,
- environ: builtin.Environ,
+pub const FloatAbi = enum {
+ Hard,
+ Soft,
+ SoftFp,
};
pub const Target = union(enum) {
Native,
- Cross: CrossTarget,
+ Cross: Cross,
- pub fn oFileExt(self: &const Target) []const u8 {
- const environ = switch (*self) {
- Target.Native => builtin.environ,
- Target.Cross => |t| t.environ,
- };
- return switch (environ) {
- builtin.Environ.msvc => ".obj",
+ pub const Cross = struct {
+ arch: builtin.Arch,
+ os: builtin.Os,
+ environ: builtin.Environ,
+ object_format: builtin.ObjectFormat,
+ };
+
+ pub fn objFileExt(self: Target) []const u8 {
+ return switch (self.getObjectFormat()) {
+ builtin.ObjectFormat.coff => ".obj",
else => ".o",
};
}
- pub fn exeFileExt(self: &const Target) []const u8 {
+ pub fn exeFileExt(self: Target) []const u8 {
return switch (self.getOs()) {
builtin.Os.windows => ".exe",
else => "",
};
}
- pub fn getOs(self: &const Target) builtin.Os {
- return switch (*self) {
- Target.Native => builtin.os,
- Target.Cross => |t| t.os,
+ pub fn libFileExt(self: Target, is_static: bool) []const u8 {
+ return switch (self.getOs()) {
+ builtin.Os.windows => if (is_static) ".lib" else ".dll",
+ else => if (is_static) ".a" else ".so",
};
}
- pub fn isDarwin(self: &const Target) bool {
+ pub fn getOs(self: Target) builtin.Os {
+ return switch (self) {
+ Target.Native => builtin.os,
+ @TagType(Target).Cross => |t| t.os,
+ };
+ }
+
+ pub fn getArch(self: Target) builtin.Arch {
+ return switch (self) {
+ Target.Native => builtin.arch,
+ @TagType(Target).Cross => |t| t.arch,
+ };
+ }
+
+ pub fn getEnviron(self: Target) builtin.Environ {
+ return switch (self) {
+ Target.Native => builtin.environ,
+ @TagType(Target).Cross => |t| t.environ,
+ };
+ }
+
+ pub fn getObjectFormat(self: Target) builtin.ObjectFormat {
+ return switch (self) {
+ Target.Native => builtin.object_format,
+ @TagType(Target).Cross => |t| t.object_format,
+ };
+ }
+
+ pub fn isWasm(self: Target) bool {
+ return switch (self.getArch()) {
+ builtin.Arch.wasm32, builtin.Arch.wasm64 => true,
+ else => false,
+ };
+ }
+
+ pub fn isDarwin(self: Target) bool {
return switch (self.getOs()) {
builtin.Os.ios, builtin.Os.macosx => true,
else => false,
};
}
- pub fn isWindows(self: &const Target) bool {
+ pub fn isWindows(self: Target) bool {
return switch (self.getOs()) {
builtin.Os.windows => true,
else => false,
};
}
-};
-pub fn initializeAll() void {
- c.LLVMInitializeAllTargets();
- c.LLVMInitializeAllTargetInfos();
- c.LLVMInitializeAllTargetMCs();
- c.LLVMInitializeAllAsmPrinters();
- c.LLVMInitializeAllAsmParsers();
-}
+ /// TODO expose the arch and subarch separately
+ pub fn isArmOrThumb(self: Target) bool {
+ return switch (self.getArch()) {
+ builtin.Arch.armv8_3a,
+ builtin.Arch.armv8_2a,
+ builtin.Arch.armv8_1a,
+ builtin.Arch.armv8,
+ builtin.Arch.armv8r,
+ builtin.Arch.armv8m_baseline,
+ builtin.Arch.armv8m_mainline,
+ builtin.Arch.armv7,
+ builtin.Arch.armv7em,
+ builtin.Arch.armv7m,
+ builtin.Arch.armv7s,
+ builtin.Arch.armv7k,
+ builtin.Arch.armv7ve,
+ builtin.Arch.armv6,
+ builtin.Arch.armv6m,
+ builtin.Arch.armv6k,
+ builtin.Arch.armv6t2,
+ builtin.Arch.armv5,
+ builtin.Arch.armv5te,
+ builtin.Arch.armv4t,
+ builtin.Arch.armebv8_3a,
+ builtin.Arch.armebv8_2a,
+ builtin.Arch.armebv8_1a,
+ builtin.Arch.armebv8,
+ builtin.Arch.armebv8r,
+ builtin.Arch.armebv8m_baseline,
+ builtin.Arch.armebv8m_mainline,
+ builtin.Arch.armebv7,
+ builtin.Arch.armebv7em,
+ builtin.Arch.armebv7m,
+ builtin.Arch.armebv7s,
+ builtin.Arch.armebv7k,
+ builtin.Arch.armebv7ve,
+ builtin.Arch.armebv6,
+ builtin.Arch.armebv6m,
+ builtin.Arch.armebv6k,
+ builtin.Arch.armebv6t2,
+ builtin.Arch.armebv5,
+ builtin.Arch.armebv5te,
+ builtin.Arch.armebv4t,
+ builtin.Arch.thumb,
+ builtin.Arch.thumbeb,
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn initializeAll() void {
+ llvm.InitializeAllTargets();
+ llvm.InitializeAllTargetInfos();
+ llvm.InitializeAllTargetMCs();
+ llvm.InitializeAllAsmPrinters();
+ llvm.InitializeAllAsmParsers();
+ }
+
+ pub fn getTriple(self: Target, allocator: *std.mem.Allocator) !std.Buffer {
+ var result = try std.Buffer.initSize(allocator, 0);
+ errdefer result.deinit();
+
+ // LLVM WebAssembly output support requires the target to be activated at
+ // build type with -DCMAKE_LLVM_EXPIERMENTAL_TARGETS_TO_BUILD=WebAssembly.
+ //
+ // LLVM determines the output format based on the environment suffix,
+ // defaulting to an object based on the architecture. The default format in
+ // LLVM 6 sets the wasm arch output incorrectly to ELF. We need to
+ // explicitly set this ourself in order for it to work.
+ //
+ // This is fixed in LLVM 7 and you will be able to get wasm output by
+ // using the target triple `wasm32-unknown-unknown-unknown`.
+ const env_name = if (self.isWasm()) "wasm" else @tagName(self.getEnviron());
+
+ var out = &std.io.BufferOutStream.init(&result).stream;
+ try out.print("{}-unknown-{}-{}", @tagName(self.getArch()), @tagName(self.getOs()), env_name);
+
+ return result;
+ }
+
+ pub fn is64bit(self: Target) bool {
+ return self.getArchPtrBitWidth() == 64;
+ }
+
+ pub fn getArchPtrBitWidth(self: Target) u32 {
+ switch (self.getArch()) {
+ builtin.Arch.avr,
+ builtin.Arch.msp430,
+ => return 16,
+
+ builtin.Arch.arc,
+ builtin.Arch.armv8_3a,
+ builtin.Arch.armv8_2a,
+ builtin.Arch.armv8_1a,
+ builtin.Arch.armv8,
+ builtin.Arch.armv8r,
+ builtin.Arch.armv8m_baseline,
+ builtin.Arch.armv8m_mainline,
+ builtin.Arch.armv7,
+ builtin.Arch.armv7em,
+ builtin.Arch.armv7m,
+ builtin.Arch.armv7s,
+ builtin.Arch.armv7k,
+ builtin.Arch.armv7ve,
+ builtin.Arch.armv6,
+ builtin.Arch.armv6m,
+ builtin.Arch.armv6k,
+ builtin.Arch.armv6t2,
+ builtin.Arch.armv5,
+ builtin.Arch.armv5te,
+ builtin.Arch.armv4t,
+ builtin.Arch.armebv8_3a,
+ builtin.Arch.armebv8_2a,
+ builtin.Arch.armebv8_1a,
+ builtin.Arch.armebv8,
+ builtin.Arch.armebv8r,
+ builtin.Arch.armebv8m_baseline,
+ builtin.Arch.armebv8m_mainline,
+ builtin.Arch.armebv7,
+ builtin.Arch.armebv7em,
+ builtin.Arch.armebv7m,
+ builtin.Arch.armebv7s,
+ builtin.Arch.armebv7k,
+ builtin.Arch.armebv7ve,
+ builtin.Arch.armebv6,
+ builtin.Arch.armebv6m,
+ builtin.Arch.armebv6k,
+ builtin.Arch.armebv6t2,
+ builtin.Arch.armebv5,
+ builtin.Arch.armebv5te,
+ builtin.Arch.armebv4t,
+ builtin.Arch.hexagon,
+ builtin.Arch.le32,
+ builtin.Arch.mips,
+ builtin.Arch.mipsel,
+ builtin.Arch.nios2,
+ builtin.Arch.powerpc,
+ builtin.Arch.r600,
+ builtin.Arch.riscv32,
+ builtin.Arch.sparc,
+ builtin.Arch.sparcel,
+ builtin.Arch.tce,
+ builtin.Arch.tcele,
+ builtin.Arch.thumb,
+ builtin.Arch.thumbeb,
+ builtin.Arch.i386,
+ builtin.Arch.xcore,
+ builtin.Arch.nvptx,
+ builtin.Arch.amdil,
+ builtin.Arch.hsail,
+ builtin.Arch.spir,
+ builtin.Arch.kalimbav3,
+ builtin.Arch.kalimbav4,
+ builtin.Arch.kalimbav5,
+ builtin.Arch.shave,
+ builtin.Arch.lanai,
+ builtin.Arch.wasm32,
+ builtin.Arch.renderscript32,
+ => return 32,
+
+ builtin.Arch.aarch64,
+ builtin.Arch.aarch64_be,
+ builtin.Arch.mips64,
+ builtin.Arch.mips64el,
+ builtin.Arch.powerpc64,
+ builtin.Arch.powerpc64le,
+ builtin.Arch.riscv64,
+ builtin.Arch.x86_64,
+ builtin.Arch.nvptx64,
+ builtin.Arch.le64,
+ builtin.Arch.amdil64,
+ builtin.Arch.hsail64,
+ builtin.Arch.spir64,
+ builtin.Arch.wasm64,
+ builtin.Arch.renderscript64,
+ builtin.Arch.amdgcn,
+ builtin.Arch.bpfel,
+ builtin.Arch.bpfeb,
+ builtin.Arch.sparcv9,
+ builtin.Arch.s390x,
+ => return 64,
+ }
+ }
+
+ pub fn getFloatAbi(self: Target) FloatAbi {
+ return switch (self.getEnviron()) {
+ builtin.Environ.gnueabihf,
+ builtin.Environ.eabihf,
+ builtin.Environ.musleabihf,
+ => FloatAbi.Hard,
+ else => FloatAbi.Soft,
+ };
+ }
+
+ pub fn getDynamicLinkerPath(self: Target) ?[]const u8 {
+ const env = self.getEnviron();
+ const arch = self.getArch();
+ switch (env) {
+ builtin.Environ.android => {
+ if (self.is64bit()) {
+ return "/system/bin/linker64";
+ } else {
+ return "/system/bin/linker";
+ }
+ },
+ builtin.Environ.gnux32 => {
+ if (arch == builtin.Arch.x86_64) {
+ return "/libx32/ld-linux-x32.so.2";
+ }
+ },
+ builtin.Environ.musl,
+ builtin.Environ.musleabi,
+ builtin.Environ.musleabihf,
+ => {
+ if (arch == builtin.Arch.x86_64) {
+ return "/lib/ld-musl-x86_64.so.1";
+ }
+ },
+ else => {},
+ }
+ switch (arch) {
+ builtin.Arch.i386,
+ builtin.Arch.sparc,
+ builtin.Arch.sparcel,
+ => return "/lib/ld-linux.so.2",
+
+ builtin.Arch.aarch64 => return "/lib/ld-linux-aarch64.so.1",
+ builtin.Arch.aarch64_be => return "/lib/ld-linux-aarch64_be.so.1",
+
+ builtin.Arch.armv8_3a,
+ builtin.Arch.armv8_2a,
+ builtin.Arch.armv8_1a,
+ builtin.Arch.armv8,
+ builtin.Arch.armv8r,
+ builtin.Arch.armv8m_baseline,
+ builtin.Arch.armv8m_mainline,
+ builtin.Arch.armv7,
+ builtin.Arch.armv7em,
+ builtin.Arch.armv7m,
+ builtin.Arch.armv7s,
+ builtin.Arch.armv7k,
+ builtin.Arch.armv7ve,
+ builtin.Arch.armv6,
+ builtin.Arch.armv6m,
+ builtin.Arch.armv6k,
+ builtin.Arch.armv6t2,
+ builtin.Arch.armv5,
+ builtin.Arch.armv5te,
+ builtin.Arch.armv4t,
+ builtin.Arch.thumb,
+ => return switch (self.getFloatAbi()) {
+ FloatAbi.Hard => return "/lib/ld-linux-armhf.so.3",
+ else => return "/lib/ld-linux.so.3",
+ },
+
+ builtin.Arch.armebv8_3a,
+ builtin.Arch.armebv8_2a,
+ builtin.Arch.armebv8_1a,
+ builtin.Arch.armebv8,
+ builtin.Arch.armebv8r,
+ builtin.Arch.armebv8m_baseline,
+ builtin.Arch.armebv8m_mainline,
+ builtin.Arch.armebv7,
+ builtin.Arch.armebv7em,
+ builtin.Arch.armebv7m,
+ builtin.Arch.armebv7s,
+ builtin.Arch.armebv7k,
+ builtin.Arch.armebv7ve,
+ builtin.Arch.armebv6,
+ builtin.Arch.armebv6m,
+ builtin.Arch.armebv6k,
+ builtin.Arch.armebv6t2,
+ builtin.Arch.armebv5,
+ builtin.Arch.armebv5te,
+ builtin.Arch.armebv4t,
+ builtin.Arch.thumbeb,
+ => return switch (self.getFloatAbi()) {
+ FloatAbi.Hard => return "/lib/ld-linux-armhf.so.3",
+ else => return "/lib/ld-linux.so.3",
+ },
+
+ builtin.Arch.mips,
+ builtin.Arch.mipsel,
+ builtin.Arch.mips64,
+ builtin.Arch.mips64el,
+ => return null,
+
+ builtin.Arch.powerpc => return "/lib/ld.so.1",
+ builtin.Arch.powerpc64 => return "/lib64/ld64.so.2",
+ builtin.Arch.powerpc64le => return "/lib64/ld64.so.2",
+ builtin.Arch.s390x => return "/lib64/ld64.so.1",
+ builtin.Arch.sparcv9 => return "/lib64/ld-linux.so.2",
+ builtin.Arch.x86_64 => return "/lib64/ld-linux-x86-64.so.2",
+
+ builtin.Arch.arc,
+ builtin.Arch.avr,
+ builtin.Arch.bpfel,
+ builtin.Arch.bpfeb,
+ builtin.Arch.hexagon,
+ builtin.Arch.msp430,
+ builtin.Arch.nios2,
+ builtin.Arch.r600,
+ builtin.Arch.amdgcn,
+ builtin.Arch.riscv32,
+ builtin.Arch.riscv64,
+ builtin.Arch.tce,
+ builtin.Arch.tcele,
+ builtin.Arch.xcore,
+ builtin.Arch.nvptx,
+ builtin.Arch.nvptx64,
+ builtin.Arch.le32,
+ builtin.Arch.le64,
+ builtin.Arch.amdil,
+ builtin.Arch.amdil64,
+ builtin.Arch.hsail,
+ builtin.Arch.hsail64,
+ builtin.Arch.spir,
+ builtin.Arch.spir64,
+ builtin.Arch.kalimbav3,
+ builtin.Arch.kalimbav4,
+ builtin.Arch.kalimbav5,
+ builtin.Arch.shave,
+ builtin.Arch.lanai,
+ builtin.Arch.wasm32,
+ builtin.Arch.wasm64,
+ builtin.Arch.renderscript32,
+ builtin.Arch.renderscript64,
+ => return null,
+ }
+ }
+
+ pub fn llvmTargetFromTriple(triple: std.Buffer) !llvm.TargetRef {
+ var result: llvm.TargetRef = undefined;
+ var err_msg: [*]u8 = undefined;
+ if (llvm.GetTargetFromTriple(triple.ptr(), &result, &err_msg) != 0) {
+ std.debug.warn("triple: {s} error: {s}\n", triple.ptr(), err_msg);
+ return error.UnsupportedTarget;
+ }
+ return result;
+ }
+
+ pub fn cIntTypeSizeInBits(self: Target, id: CInt.Id) u32 {
+ const arch = self.getArch();
+ switch (self.getOs()) {
+ builtin.Os.freestanding => switch (self.getArch()) {
+ builtin.Arch.msp430 => switch (id) {
+ CInt.Id.Short,
+ CInt.Id.UShort,
+ CInt.Id.Int,
+ CInt.Id.UInt,
+ => return 16,
+ CInt.Id.Long,
+ CInt.Id.ULong,
+ => return 32,
+ CInt.Id.LongLong,
+ CInt.Id.ULongLong,
+ => return 64,
+ },
+ else => switch (id) {
+ CInt.Id.Short,
+ CInt.Id.UShort,
+ => return 16,
+ CInt.Id.Int,
+ CInt.Id.UInt,
+ => return 32,
+ CInt.Id.Long,
+ CInt.Id.ULong,
+ => return self.getArchPtrBitWidth(),
+ CInt.Id.LongLong,
+ CInt.Id.ULongLong,
+ => return 64,
+ },
+ },
+
+ builtin.Os.linux,
+ builtin.Os.macosx,
+ builtin.Os.openbsd,
+ builtin.Os.zen,
+ => switch (id) {
+ CInt.Id.Short,
+ CInt.Id.UShort,
+ => return 16,
+ CInt.Id.Int,
+ CInt.Id.UInt,
+ => return 32,
+ CInt.Id.Long,
+ CInt.Id.ULong,
+ => return self.getArchPtrBitWidth(),
+ CInt.Id.LongLong,
+ CInt.Id.ULongLong,
+ => return 64,
+ },
+
+ builtin.Os.windows => switch (id) {
+ CInt.Id.Short,
+ CInt.Id.UShort,
+ => return 16,
+ CInt.Id.Int,
+ CInt.Id.UInt,
+ => return 32,
+ CInt.Id.Long,
+ CInt.Id.ULong,
+ CInt.Id.LongLong,
+ CInt.Id.ULongLong,
+ => return 64,
+ },
+
+ builtin.Os.ananas,
+ builtin.Os.cloudabi,
+ builtin.Os.dragonfly,
+ builtin.Os.freebsd,
+ builtin.Os.fuchsia,
+ builtin.Os.ios,
+ builtin.Os.kfreebsd,
+ builtin.Os.lv2,
+ builtin.Os.netbsd,
+ builtin.Os.solaris,
+ builtin.Os.haiku,
+ builtin.Os.minix,
+ builtin.Os.rtems,
+ builtin.Os.nacl,
+ builtin.Os.cnk,
+ builtin.Os.aix,
+ builtin.Os.cuda,
+ builtin.Os.nvcl,
+ builtin.Os.amdhsa,
+ builtin.Os.ps4,
+ builtin.Os.elfiamcu,
+ builtin.Os.tvos,
+ builtin.Os.watchos,
+ builtin.Os.mesa3d,
+ builtin.Os.contiki,
+ builtin.Os.amdpal,
+ => @panic("TODO specify the C integer type sizes for this OS"),
+ }
+ }
+
+ pub fn getDarwinArchString(self: Target) []const u8 {
+ const arch = self.getArch();
+ switch (arch) {
+ builtin.Arch.aarch64 => return "arm64",
+ builtin.Arch.thumb,
+ builtin.Arch.armv8_3a,
+ builtin.Arch.armv8_2a,
+ builtin.Arch.armv8_1a,
+ builtin.Arch.armv8,
+ builtin.Arch.armv8r,
+ builtin.Arch.armv8m_baseline,
+ builtin.Arch.armv8m_mainline,
+ builtin.Arch.armv7,
+ builtin.Arch.armv7em,
+ builtin.Arch.armv7m,
+ builtin.Arch.armv7s,
+ builtin.Arch.armv7k,
+ builtin.Arch.armv7ve,
+ builtin.Arch.armv6,
+ builtin.Arch.armv6m,
+ builtin.Arch.armv6k,
+ builtin.Arch.armv6t2,
+ builtin.Arch.armv5,
+ builtin.Arch.armv5te,
+ builtin.Arch.armv4t,
+ => return "arm",
+ builtin.Arch.powerpc => return "ppc",
+ builtin.Arch.powerpc64 => return "ppc64",
+ builtin.Arch.powerpc64le => return "ppc64le",
+ else => return @tagName(arch),
+ }
+ }
+};
diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig
new file mode 100644
index 0000000000..47e45d1bb0
--- /dev/null
+++ b/src-self-hosted/test.zig
@@ -0,0 +1,243 @@
+const std = @import("std");
+const mem = std.mem;
+const builtin = @import("builtin");
+const Target = @import("target.zig").Target;
+const Compilation = @import("compilation.zig").Compilation;
+const introspect = @import("introspect.zig");
+const assertOrPanic = std.debug.assertOrPanic;
+const errmsg = @import("errmsg.zig");
+const EventLoopLocal = @import("compilation.zig").EventLoopLocal;
+
+var ctx: TestContext = undefined;
+
+test "stage2" {
+ try ctx.init();
+ defer ctx.deinit();
+
+ try @import("../test/stage2/compile_errors.zig").addCases(&ctx);
+ try @import("../test/stage2/compare_output.zig").addCases(&ctx);
+
+ try ctx.run();
+}
+
+const file1 = "1.zig";
+const allocator = std.heap.c_allocator;
+
+pub const TestContext = struct {
+ loop: std.event.Loop,
+ event_loop_local: EventLoopLocal,
+ zig_lib_dir: []u8,
+ file_index: std.atomic.Int(usize),
+ group: std.event.Group(error!void),
+ any_err: error!void,
+
+ const tmp_dir_name = "stage2_test_tmp";
+
+ fn init(self: *TestContext) !void {
+ self.* = TestContext{
+ .any_err = {},
+ .loop = undefined,
+ .event_loop_local = undefined,
+ .zig_lib_dir = undefined,
+ .group = undefined,
+ .file_index = std.atomic.Int(usize).init(0),
+ };
+
+ try self.loop.initMultiThreaded(allocator);
+ errdefer self.loop.deinit();
+
+ self.event_loop_local = try EventLoopLocal.init(&self.loop);
+ errdefer self.event_loop_local.deinit();
+
+ self.group = std.event.Group(error!void).init(&self.loop);
+ errdefer self.group.cancelAll();
+
+ self.zig_lib_dir = try introspect.resolveZigLibDir(allocator);
+ errdefer allocator.free(self.zig_lib_dir);
+
+ try std.os.makePath(allocator, tmp_dir_name);
+ errdefer std.os.deleteTree(allocator, tmp_dir_name) catch {};
+ }
+
+ fn deinit(self: *TestContext) void {
+ std.os.deleteTree(allocator, tmp_dir_name) catch {};
+ allocator.free(self.zig_lib_dir);
+ self.event_loop_local.deinit();
+ self.loop.deinit();
+ }
+
+ fn run(self: *TestContext) !void {
+ const handle = try self.loop.call(waitForGroup, self);
+ defer cancel handle;
+ self.loop.run();
+ return self.any_err;
+ }
+
+ async fn waitForGroup(self: *TestContext) void {
+ self.any_err = await (async self.group.wait() catch unreachable);
+ }
+
+ fn testCompileError(
+ self: *TestContext,
+ source: []const u8,
+ path: []const u8,
+ line: usize,
+ column: usize,
+ msg: []const u8,
+ ) !void {
+ var file_index_buf: [20]u8 = undefined;
+ const file_index = try std.fmt.bufPrint(file_index_buf[0..], "{}", self.file_index.incr());
+ const file1_path = try std.os.path.join(allocator, tmp_dir_name, file_index, file1);
+
+ if (std.os.path.dirname(file1_path)) |dirname| {
+ try std.os.makePath(allocator, dirname);
+ }
+
+ // TODO async I/O
+ try std.io.writeFile(allocator, file1_path, source);
+
+ var comp = try Compilation.create(
+ &self.event_loop_local,
+ "test",
+ file1_path,
+ Target.Native,
+ Compilation.Kind.Obj,
+ builtin.Mode.Debug,
+ true, // is_static
+ self.zig_lib_dir,
+ );
+ errdefer comp.destroy();
+
+ try comp.build();
+
+ try self.group.call(getModuleEvent, comp, source, path, line, column, msg);
+ }
+
+ fn testCompareOutputLibC(
+ self: *TestContext,
+ source: []const u8,
+ expected_output: []const u8,
+ ) !void {
+ var file_index_buf: [20]u8 = undefined;
+ const file_index = try std.fmt.bufPrint(file_index_buf[0..], "{}", self.file_index.incr());
+ const file1_path = try std.os.path.join(allocator, tmp_dir_name, file_index, file1);
+
+ const output_file = try std.fmt.allocPrint(allocator, "{}-out{}", file1_path, Target(Target.Native).exeFileExt());
+ if (std.os.path.dirname(file1_path)) |dirname| {
+ try std.os.makePath(allocator, dirname);
+ }
+
+ // TODO async I/O
+ try std.io.writeFile(allocator, file1_path, source);
+
+ var comp = try Compilation.create(
+ &self.event_loop_local,
+ "test",
+ file1_path,
+ Target.Native,
+ Compilation.Kind.Exe,
+ builtin.Mode.Debug,
+ false,
+ self.zig_lib_dir,
+ );
+ errdefer comp.destroy();
+
+ _ = try comp.addLinkLib("c", true);
+ comp.link_out_file = output_file;
+ try comp.build();
+
+ try self.group.call(getModuleEventSuccess, comp, output_file, expected_output);
+ }
+
+ async fn getModuleEventSuccess(
+ comp: *Compilation,
+ exe_file: []const u8,
+ expected_output: []const u8,
+ ) !void {
+ // TODO this should not be necessary
+ const exe_file_2 = try std.mem.dupe(allocator, u8, exe_file);
+
+ defer comp.destroy();
+ const build_event = await (async comp.events.get() catch unreachable);
+
+ switch (build_event) {
+ Compilation.Event.Ok => {
+ const argv = []const []const u8{exe_file_2};
+ // TODO use event loop
+ const child = try std.os.ChildProcess.exec(allocator, argv, null, null, 1024 * 1024);
+ switch (child.term) {
+ std.os.ChildProcess.Term.Exited => |code| {
+ if (code != 0) {
+ return error.BadReturnCode;
+ }
+ },
+ else => {
+ return error.Crashed;
+ },
+ }
+ if (!mem.eql(u8, child.stdout, expected_output)) {
+ return error.OutputMismatch;
+ }
+ },
+ Compilation.Event.Error => |err| return err,
+ Compilation.Event.Fail => |msgs| {
+ var stderr = try std.io.getStdErr();
+ try stderr.write("build incorrectly failed:\n");
+ for (msgs) |msg| {
+ defer msg.destroy();
+ try msg.printToFile(&stderr, errmsg.Color.Auto);
+ }
+ },
+ }
+ }
+
+ async fn getModuleEvent(
+ comp: *Compilation,
+ source: []const u8,
+ path: []const u8,
+ line: usize,
+ column: usize,
+ text: []const u8,
+ ) !void {
+ defer comp.destroy();
+ const build_event = await (async comp.events.get() catch unreachable);
+
+ switch (build_event) {
+ Compilation.Event.Ok => {
+ @panic("build incorrectly succeeded");
+ },
+ Compilation.Event.Error => |err| {
+ @panic("build incorrectly failed");
+ },
+ Compilation.Event.Fail => |msgs| {
+ assertOrPanic(msgs.len != 0);
+ for (msgs) |msg| {
+ if (mem.endsWith(u8, msg.getRealPath(), path) and mem.eql(u8, msg.text, text)) {
+ const first_token = msg.getTree().tokens.at(msg.span.first);
+ const last_token = msg.getTree().tokens.at(msg.span.first);
+ const start_loc = msg.getTree().tokenLocationPtr(0, first_token);
+ if (start_loc.line + 1 == line and start_loc.column + 1 == column) {
+ return;
+ }
+ }
+ }
+ std.debug.warn(
+ "\n=====source:=======\n{}\n====expected:========\n{}:{}:{}: error: {}\n",
+ source,
+ path,
+ line,
+ column,
+ text,
+ );
+ std.debug.warn("\n====found:========\n");
+ var stderr = try std.io.getStdErr();
+ for (msgs) |msg| {
+ defer msg.destroy();
+ try msg.printToFile(&stderr, errmsg.Color.Auto);
+ }
+ std.debug.warn("============\n");
+ return error.TestFailed;
+ },
+ }
+ }
+};
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
new file mode 100644
index 0000000000..6783130fc7
--- /dev/null
+++ b/src-self-hosted/type.zig
@@ -0,0 +1,1101 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const Scope = @import("scope.zig").Scope;
+const Compilation = @import("compilation.zig").Compilation;
+const Value = @import("value.zig").Value;
+const llvm = @import("llvm.zig");
+const event = std.event;
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+
+pub const Type = struct {
+ base: Value,
+ id: Id,
+ name: []const u8,
+ abi_alignment: AbiAlignment,
+
+ pub const AbiAlignment = event.Future(error{OutOfMemory}!u32);
+
+ pub const Id = builtin.TypeId;
+
+ pub fn destroy(base: *Type, comp: *Compilation) void {
+ switch (base.id) {
+ Id.Struct => @fieldParentPtr(Struct, "base", base).destroy(comp),
+ Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(comp),
+ Id.Type => @fieldParentPtr(MetaType, "base", base).destroy(comp),
+ Id.Void => @fieldParentPtr(Void, "base", base).destroy(comp),
+ Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(comp),
+ Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(comp),
+ Id.Int => @fieldParentPtr(Int, "base", base).destroy(comp),
+ Id.Float => @fieldParentPtr(Float, "base", base).destroy(comp),
+ Id.Pointer => @fieldParentPtr(Pointer, "base", base).destroy(comp),
+ Id.Array => @fieldParentPtr(Array, "base", base).destroy(comp),
+ Id.ComptimeFloat => @fieldParentPtr(ComptimeFloat, "base", base).destroy(comp),
+ Id.ComptimeInt => @fieldParentPtr(ComptimeInt, "base", base).destroy(comp),
+ Id.Undefined => @fieldParentPtr(Undefined, "base", base).destroy(comp),
+ Id.Null => @fieldParentPtr(Null, "base", base).destroy(comp),
+ Id.Optional => @fieldParentPtr(Optional, "base", base).destroy(comp),
+ Id.ErrorUnion => @fieldParentPtr(ErrorUnion, "base", base).destroy(comp),
+ Id.ErrorSet => @fieldParentPtr(ErrorSet, "base", base).destroy(comp),
+ Id.Enum => @fieldParentPtr(Enum, "base", base).destroy(comp),
+ Id.Union => @fieldParentPtr(Union, "base", base).destroy(comp),
+ Id.Namespace => @fieldParentPtr(Namespace, "base", base).destroy(comp),
+ Id.Block => @fieldParentPtr(Block, "base", base).destroy(comp),
+ Id.BoundFn => @fieldParentPtr(BoundFn, "base", base).destroy(comp),
+ Id.ArgTuple => @fieldParentPtr(ArgTuple, "base", base).destroy(comp),
+ Id.Opaque => @fieldParentPtr(Opaque, "base", base).destroy(comp),
+ Id.Promise => @fieldParentPtr(Promise, "base", base).destroy(comp),
+ }
+ }
+
+ pub fn getLlvmType(
+ base: *Type,
+ allocator: *Allocator,
+ llvm_context: llvm.ContextRef,
+ ) (error{OutOfMemory}!llvm.TypeRef) {
+ switch (base.id) {
+ Id.Struct => return @fieldParentPtr(Struct, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Fn => return @fieldParentPtr(Fn, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Type => unreachable,
+ Id.Void => unreachable,
+ Id.Bool => return @fieldParentPtr(Bool, "base", base).getLlvmType(allocator, llvm_context),
+ Id.NoReturn => unreachable,
+ Id.Int => return @fieldParentPtr(Int, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Float => return @fieldParentPtr(Float, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Pointer => return @fieldParentPtr(Pointer, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Array => return @fieldParentPtr(Array, "base", base).getLlvmType(allocator, llvm_context),
+ Id.ComptimeFloat => unreachable,
+ Id.ComptimeInt => unreachable,
+ Id.Undefined => unreachable,
+ Id.Null => unreachable,
+ Id.Optional => return @fieldParentPtr(Optional, "base", base).getLlvmType(allocator, llvm_context),
+ Id.ErrorUnion => return @fieldParentPtr(ErrorUnion, "base", base).getLlvmType(allocator, llvm_context),
+ Id.ErrorSet => return @fieldParentPtr(ErrorSet, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Enum => return @fieldParentPtr(Enum, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Union => return @fieldParentPtr(Union, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Namespace => unreachable,
+ Id.Block => unreachable,
+ Id.BoundFn => return @fieldParentPtr(BoundFn, "base", base).getLlvmType(allocator, llvm_context),
+ Id.ArgTuple => unreachable,
+ Id.Opaque => return @fieldParentPtr(Opaque, "base", base).getLlvmType(allocator, llvm_context),
+ Id.Promise => return @fieldParentPtr(Promise, "base", base).getLlvmType(allocator, llvm_context),
+ }
+ }
+
+ pub fn handleIsPtr(base: *Type) bool {
+ switch (base.id) {
+ Id.Type,
+ Id.ComptimeFloat,
+ Id.ComptimeInt,
+ Id.Undefined,
+ Id.Null,
+ Id.Namespace,
+ Id.Block,
+ Id.BoundFn,
+ Id.ArgTuple,
+ Id.Opaque,
+ => unreachable,
+
+ Id.NoReturn,
+ Id.Void,
+ Id.Bool,
+ Id.Int,
+ Id.Float,
+ Id.Pointer,
+ Id.ErrorSet,
+ Id.Enum,
+ Id.Fn,
+ Id.Promise,
+ => return false,
+
+ Id.Struct => @panic("TODO"),
+ Id.Array => @panic("TODO"),
+ Id.Optional => @panic("TODO"),
+ Id.ErrorUnion => @panic("TODO"),
+ Id.Union => @panic("TODO"),
+ }
+ }
+
+ pub fn hasBits(base: *Type) bool {
+ switch (base.id) {
+ Id.Type,
+ Id.ComptimeFloat,
+ Id.ComptimeInt,
+ Id.Undefined,
+ Id.Null,
+ Id.Namespace,
+ Id.Block,
+ Id.BoundFn,
+ Id.ArgTuple,
+ Id.Opaque,
+ => unreachable,
+
+ Id.Void,
+ Id.NoReturn,
+ => return false,
+
+ Id.Bool,
+ Id.Int,
+ Id.Float,
+ Id.Fn,
+ Id.Promise,
+ => return true,
+
+ Id.Pointer => {
+ const ptr_type = @fieldParentPtr(Pointer, "base", base);
+ return ptr_type.key.child_type.hasBits();
+ },
+
+ Id.ErrorSet => @panic("TODO"),
+ Id.Enum => @panic("TODO"),
+ Id.Struct => @panic("TODO"),
+ Id.Array => @panic("TODO"),
+ Id.Optional => @panic("TODO"),
+ Id.ErrorUnion => @panic("TODO"),
+ Id.Union => @panic("TODO"),
+ }
+ }
+
+ pub fn cast(base: *Type, comptime T: type) ?*T {
+ if (base.id != @field(Id, @typeName(T))) return null;
+ return @fieldParentPtr(T, "base", base);
+ }
+
+ pub fn dump(base: *const Type) void {
+ std.debug.warn("{}", @tagName(base.id));
+ }
+
+ fn init(base: *Type, comp: *Compilation, id: Id, name: []const u8) void {
+ base.* = Type{
+ .base = Value{
+ .id = Value.Id.Type,
+ .typ = &MetaType.get(comp).base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .id = id,
+ .name = name,
+ .abi_alignment = AbiAlignment.init(comp.loop),
+ };
+ }
+
+ /// If you happen to have an llvm context handy, use getAbiAlignmentInContext instead.
+ /// Otherwise, this one will grab one from the pool and then release it.
+ pub async fn getAbiAlignment(base: *Type, comp: *Compilation) !u32 {
+ if (await (async base.abi_alignment.start() catch unreachable)) |ptr| return ptr.*;
+
+ {
+ const held = try comp.event_loop_local.getAnyLlvmContext();
+ defer held.release(comp.event_loop_local);
+
+ const llvm_context = held.node.data;
+
+ base.abi_alignment.data = await (async base.resolveAbiAlignment(comp, llvm_context) catch unreachable);
+ }
+ base.abi_alignment.resolve();
+ return base.abi_alignment.data;
+ }
+
+ /// If you have an llvm conext handy, you can use it here.
+ pub async fn getAbiAlignmentInContext(base: *Type, comp: *Compilation, llvm_context: llvm.ContextRef) !u32 {
+ if (await (async base.abi_alignment.start() catch unreachable)) |ptr| return ptr.*;
+
+ base.abi_alignment.data = await (async base.resolveAbiAlignment(comp, llvm_context) catch unreachable);
+ base.abi_alignment.resolve();
+ return base.abi_alignment.data;
+ }
+
+ /// Lower level function that does the work. See getAbiAlignment.
+ async fn resolveAbiAlignment(base: *Type, comp: *Compilation, llvm_context: llvm.ContextRef) !u32 {
+ const llvm_type = try base.getLlvmType(comp.gpa(), llvm_context);
+ return @intCast(u32, llvm.ABIAlignmentOfType(comp.target_data_ref, llvm_type));
+ }
+
+ pub const Struct = struct {
+ base: Type,
+ decls: *Scope.Decls,
+
+ pub fn destroy(self: *Struct, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Struct, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const Fn = struct {
+ base: Type,
+ key: Key,
+ non_key: NonKey,
+ garbage_node: std.atomic.Stack(*Fn).Node,
+
+ pub const Kind = enum {
+ Normal,
+ Generic,
+ };
+
+ pub const NonKey = union {
+ Normal: Normal,
+ Generic: void,
+
+ pub const Normal = struct {
+ variable_list: std.ArrayList(*Scope.Var),
+ };
+ };
+
+ pub const Key = struct {
+ data: Data,
+ alignment: ?u32,
+
+ pub const Data = union(Kind) {
+ Generic: Generic,
+ Normal: Normal,
+ };
+
+ pub const Normal = struct {
+ params: []Param,
+ return_type: *Type,
+ is_var_args: bool,
+ cc: CallingConvention,
+ };
+
+ pub const Generic = struct {
+ param_count: usize,
+ cc: CC,
+
+ pub const CC = union(CallingConvention) {
+ Auto,
+ C,
+ Cold,
+ Naked,
+ Stdcall,
+ Async: *Type, // allocator type
+ };
+ };
+
+ pub fn hash(self: *const Key) u32 {
+ var result: u32 = 0;
+ result +%= hashAny(self.alignment, 0);
+ switch (self.data) {
+ Kind.Generic => |generic| {
+ result +%= hashAny(generic.param_count, 1);
+ switch (generic.cc) {
+ CallingConvention.Async => |allocator_type| result +%= hashAny(allocator_type, 2),
+ else => result +%= hashAny(CallingConvention(generic.cc), 3),
+ }
+ },
+ Kind.Normal => |normal| {
+ result +%= hashAny(normal.return_type, 4);
+ result +%= hashAny(normal.is_var_args, 5);
+ result +%= hashAny(normal.cc, 6);
+ for (normal.params) |param| {
+ result +%= hashAny(param.is_noalias, 7);
+ result +%= hashAny(param.typ, 8);
+ }
+ },
+ }
+ return result;
+ }
+
+ pub fn eql(self: *const Key, other: *const Key) bool {
+ if ((self.alignment == null) != (other.alignment == null)) return false;
+ if (self.alignment) |self_align| {
+ if (self_align != other.alignment.?) return false;
+ }
+ if (@TagType(Data)(self.data) != @TagType(Data)(other.data)) return false;
+ switch (self.data) {
+ Kind.Generic => |*self_generic| {
+ const other_generic = &other.data.Generic;
+ if (self_generic.param_count != other_generic.param_count) return false;
+ if (CallingConvention(self_generic.cc) != CallingConvention(other_generic.cc)) return false;
+ switch (self_generic.cc) {
+ CallingConvention.Async => |self_allocator_type| {
+ const other_allocator_type = other_generic.cc.Async;
+ if (self_allocator_type != other_allocator_type) return false;
+ },
+ else => {},
+ }
+ },
+ Kind.Normal => |*self_normal| {
+ const other_normal = &other.data.Normal;
+ if (self_normal.cc != other_normal.cc) return false;
+ if (self_normal.is_var_args != other_normal.is_var_args) return false;
+ if (self_normal.return_type != other_normal.return_type) return false;
+ for (self_normal.params) |*self_param, i| {
+ const other_param = &other_normal.params[i];
+ if (self_param.is_noalias != other_param.is_noalias) return false;
+ if (self_param.typ != other_param.typ) return false;
+ }
+ },
+ }
+ return true;
+ }
+
+ pub fn deref(key: Key, comp: *Compilation) void {
+ switch (key.data) {
+ Kind.Generic => |generic| {
+ switch (generic.cc) {
+ CallingConvention.Async => |allocator_type| allocator_type.base.deref(comp),
+ else => {},
+ }
+ },
+ Kind.Normal => |normal| {
+ normal.return_type.base.deref(comp);
+ for (normal.params) |param| {
+ param.typ.base.deref(comp);
+ }
+ },
+ }
+ }
+
+ pub fn ref(key: Key) void {
+ switch (key.data) {
+ Kind.Generic => |generic| {
+ switch (generic.cc) {
+ CallingConvention.Async => |allocator_type| allocator_type.base.ref(),
+ else => {},
+ }
+ },
+ Kind.Normal => |normal| {
+ normal.return_type.base.ref();
+ for (normal.params) |param| {
+ param.typ.base.ref();
+ }
+ },
+ }
+ }
+ };
+
+ pub const CallingConvention = enum {
+ Auto,
+ C,
+ Cold,
+ Naked,
+ Stdcall,
+ Async,
+ };
+
+ pub const Param = struct {
+ is_noalias: bool,
+ typ: *Type,
+ };
+
+ fn ccFnTypeStr(cc: CallingConvention) []const u8 {
+ return switch (cc) {
+ CallingConvention.Auto => "",
+ CallingConvention.C => "extern ",
+ CallingConvention.Cold => "coldcc ",
+ CallingConvention.Naked => "nakedcc ",
+ CallingConvention.Stdcall => "stdcallcc ",
+ CallingConvention.Async => unreachable,
+ };
+ }
+
+ pub fn paramCount(self: *Fn) usize {
+ return switch (self.key.data) {
+ Kind.Generic => |generic| generic.param_count,
+ Kind.Normal => |normal| normal.params.len,
+ };
+ }
+
+ /// takes ownership of key.Normal.params on success
+ pub async fn get(comp: *Compilation, key: Key) !*Fn {
+ {
+ const held = await (async comp.fn_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ if (held.value.get(&key)) |entry| {
+ entry.value.base.base.ref();
+ return entry.value;
+ }
+ }
+
+ key.ref();
+ errdefer key.deref(comp);
+
+ const self = try comp.gpa().createOne(Fn);
+ self.* = Fn{
+ .base = undefined,
+ .key = key,
+ .non_key = undefined,
+ .garbage_node = undefined,
+ };
+ errdefer comp.gpa().destroy(self);
+
+ var name_buf = try std.Buffer.initSize(comp.gpa(), 0);
+ defer name_buf.deinit();
+
+ const name_stream = &std.io.BufferOutStream.init(&name_buf).stream;
+
+ switch (key.data) {
+ Kind.Generic => |generic| {
+ self.non_key = NonKey{ .Generic = {} };
+ switch (generic.cc) {
+ CallingConvention.Async => |async_allocator_type| {
+ try name_stream.print("async<{}> ", async_allocator_type.name);
+ },
+ else => {
+ const cc_str = ccFnTypeStr(generic.cc);
+ try name_stream.write(cc_str);
+ },
+ }
+ try name_stream.write("fn(");
+ var param_i: usize = 0;
+ while (param_i < generic.param_count) : (param_i += 1) {
+ const arg = if (param_i == 0) "var" else ", var";
+ try name_stream.write(arg);
+ }
+ try name_stream.write(")");
+ if (key.alignment) |alignment| {
+ try name_stream.print(" align<{}>", alignment);
+ }
+ try name_stream.write(" var");
+ },
+ Kind.Normal => |normal| {
+ self.non_key = NonKey{
+ .Normal = NonKey.Normal{ .variable_list = std.ArrayList(*Scope.Var).init(comp.gpa()) },
+ };
+ const cc_str = ccFnTypeStr(normal.cc);
+ try name_stream.print("{}fn(", cc_str);
+ for (normal.params) |param, i| {
+ if (i != 0) try name_stream.write(", ");
+ if (param.is_noalias) try name_stream.write("noalias ");
+ try name_stream.write(param.typ.name);
+ }
+ if (normal.is_var_args) {
+ if (normal.params.len != 0) try name_stream.write(", ");
+ try name_stream.write("...");
+ }
+ try name_stream.write(")");
+ if (key.alignment) |alignment| {
+ try name_stream.print(" align<{}>", alignment);
+ }
+ try name_stream.print(" {}", normal.return_type.name);
+ },
+ }
+
+ self.base.init(comp, Id.Fn, name_buf.toOwnedSlice());
+
+ {
+ const held = await (async comp.fn_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ _ = try held.value.put(&self.key, self);
+ }
+ return self;
+ }
+
+ pub fn destroy(self: *Fn, comp: *Compilation) void {
+ self.key.deref(comp);
+ switch (self.key.data) {
+ Kind.Generic => {},
+ Kind.Normal => {
+ self.non_key.Normal.variable_list.deinit();
+ },
+ }
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Fn, allocator: *Allocator, llvm_context: llvm.ContextRef) !llvm.TypeRef {
+ const normal = &self.key.data.Normal;
+ const llvm_return_type = switch (normal.return_type.id) {
+ Type.Id.Void => llvm.VoidTypeInContext(llvm_context) orelse return error.OutOfMemory,
+ else => try normal.return_type.getLlvmType(allocator, llvm_context),
+ };
+ const llvm_param_types = try allocator.alloc(llvm.TypeRef, normal.params.len);
+ defer allocator.free(llvm_param_types);
+ for (llvm_param_types) |*llvm_param_type, i| {
+ llvm_param_type.* = try normal.params[i].typ.getLlvmType(allocator, llvm_context);
+ }
+
+ return llvm.FunctionType(
+ llvm_return_type,
+ llvm_param_types.ptr,
+ @intCast(c_uint, llvm_param_types.len),
+ @boolToInt(normal.is_var_args),
+ ) orelse error.OutOfMemory;
+ }
+ };
+
+ pub const MetaType = struct {
+ base: Type,
+ value: *Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(comp: *Compilation) *MetaType {
+ comp.meta_type.base.base.ref();
+ return comp.meta_type;
+ }
+
+ pub fn destroy(self: *MetaType, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Void = struct {
+ base: Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(comp: *Compilation) *Void {
+ comp.void_type.base.base.ref();
+ return comp.void_type;
+ }
+
+ pub fn destroy(self: *Void, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Bool = struct {
+ base: Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(comp: *Compilation) *Bool {
+ comp.bool_type.base.base.ref();
+ return comp.bool_type;
+ }
+
+ pub fn destroy(self: *Bool, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Bool, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const NoReturn = struct {
+ base: Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(comp: *Compilation) *NoReturn {
+ comp.noreturn_type.base.base.ref();
+ return comp.noreturn_type;
+ }
+
+ pub fn destroy(self: *NoReturn, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Int = struct {
+ base: Type,
+ key: Key,
+ garbage_node: std.atomic.Stack(*Int).Node,
+
+ pub const Key = struct {
+ bit_count: u32,
+ is_signed: bool,
+
+ pub fn hash(self: *const Key) u32 {
+ var result: u32 = 0;
+ result +%= hashAny(self.is_signed, 0);
+ result +%= hashAny(self.bit_count, 1);
+ return result;
+ }
+
+ pub fn eql(self: *const Key, other: *const Key) bool {
+ return self.bit_count == other.bit_count and self.is_signed == other.is_signed;
+ }
+ };
+
+ pub fn get_u8(comp: *Compilation) *Int {
+ comp.u8_type.base.base.ref();
+ return comp.u8_type;
+ }
+
+ pub async fn get(comp: *Compilation, key: Key) !*Int {
+ {
+ const held = await (async comp.int_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ if (held.value.get(&key)) |entry| {
+ entry.value.base.base.ref();
+ return entry.value;
+ }
+ }
+
+ const self = try comp.gpa().create(Int{
+ .base = undefined,
+ .key = key,
+ .garbage_node = undefined,
+ });
+ errdefer comp.gpa().destroy(self);
+
+ const u_or_i = "ui"[@boolToInt(key.is_signed)];
+ const name = try std.fmt.allocPrint(comp.gpa(), "{c}{}", u_or_i, key.bit_count);
+ errdefer comp.gpa().free(name);
+
+ self.base.init(comp, Id.Int, name);
+
+ {
+ const held = await (async comp.int_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ _ = try held.value.put(&self.key, self);
+ }
+ return self;
+ }
+
+ pub fn destroy(self: *Int, comp: *Compilation) void {
+ self.garbage_node = std.atomic.Stack(*Int).Node{
+ .data = self,
+ .next = undefined,
+ };
+ comp.registerGarbage(Int, &self.garbage_node);
+ }
+
+ pub async fn gcDestroy(self: *Int, comp: *Compilation) void {
+ {
+ const held = await (async comp.int_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ _ = held.value.remove(&self.key).?;
+ }
+ // we allocated the name
+ comp.gpa().free(self.base.name);
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Int, allocator: *Allocator, llvm_context: llvm.ContextRef) !llvm.TypeRef {
+ return llvm.IntTypeInContext(llvm_context, self.key.bit_count) orelse return error.OutOfMemory;
+ }
+ };
+
+ pub const Float = struct {
+ base: Type,
+
+ pub fn destroy(self: *Float, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Float, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+ pub const Pointer = struct {
+ base: Type,
+ key: Key,
+ garbage_node: std.atomic.Stack(*Pointer).Node,
+
+ pub const Key = struct {
+ child_type: *Type,
+ mut: Mut,
+ vol: Vol,
+ size: Size,
+ alignment: Align,
+
+ pub fn hash(self: *const Key) u32 {
+ var result: u32 = 0;
+ result +%= switch (self.alignment) {
+ Align.Abi => 0xf201c090,
+ Align.Override => |x| hashAny(x, 0),
+ };
+ result +%= hashAny(self.child_type, 1);
+ result +%= hashAny(self.mut, 2);
+ result +%= hashAny(self.vol, 3);
+ result +%= hashAny(self.size, 4);
+ return result;
+ }
+
+ pub fn eql(self: *const Key, other: *const Key) bool {
+ if (self.child_type != other.child_type or
+ self.mut != other.mut or
+ self.vol != other.vol or
+ self.size != other.size or
+ @TagType(Align)(self.alignment) != @TagType(Align)(other.alignment))
+ {
+ return false;
+ }
+ switch (self.alignment) {
+ Align.Abi => return true,
+ Align.Override => |x| return x == other.alignment.Override,
+ }
+ }
+ };
+
+ pub const Mut = enum {
+ Mut,
+ Const,
+ };
+
+ pub const Vol = enum {
+ Non,
+ Volatile,
+ };
+
+ pub const Align = union(enum) {
+ Abi,
+ Override: u32,
+ };
+
+ pub const Size = builtin.TypeInfo.Pointer.Size;
+
+ pub fn destroy(self: *Pointer, comp: *Compilation) void {
+ self.garbage_node = std.atomic.Stack(*Pointer).Node{
+ .data = self,
+ .next = undefined,
+ };
+ comp.registerGarbage(Pointer, &self.garbage_node);
+ }
+
+ pub async fn gcDestroy(self: *Pointer, comp: *Compilation) void {
+ {
+ const held = await (async comp.ptr_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ _ = held.value.remove(&self.key).?;
+ }
+ self.key.child_type.base.deref(comp);
+ comp.gpa().destroy(self);
+ }
+
+ pub async fn getAlignAsInt(self: *Pointer, comp: *Compilation) u32 {
+ switch (self.key.alignment) {
+ Align.Abi => return await (async self.key.child_type.getAbiAlignment(comp) catch unreachable),
+ Align.Override => |alignment| return alignment,
+ }
+ }
+
+ pub async fn get(
+ comp: *Compilation,
+ key: Key,
+ ) !*Pointer {
+ var normal_key = key;
+ switch (key.alignment) {
+ Align.Abi => {},
+ Align.Override => |alignment| {
+ const abi_align = try await (async key.child_type.getAbiAlignment(comp) catch unreachable);
+ if (abi_align == alignment) {
+ normal_key.alignment = Align.Abi;
+ }
+ },
+ }
+ {
+ const held = await (async comp.ptr_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ if (held.value.get(&normal_key)) |entry| {
+ entry.value.base.base.ref();
+ return entry.value;
+ }
+ }
+
+ const self = try comp.gpa().create(Pointer{
+ .base = undefined,
+ .key = normal_key,
+ .garbage_node = undefined,
+ });
+ errdefer comp.gpa().destroy(self);
+
+ const size_str = switch (self.key.size) {
+ Size.One => "*",
+ Size.Many => "[*]",
+ Size.Slice => "[]",
+ };
+ const mut_str = switch (self.key.mut) {
+ Mut.Const => "const ",
+ Mut.Mut => "",
+ };
+ const vol_str = switch (self.key.vol) {
+ Vol.Volatile => "volatile ",
+ Vol.Non => "",
+ };
+ const name = switch (self.key.alignment) {
+ Align.Abi => try std.fmt.allocPrint(
+ comp.gpa(),
+ "{}{}{}{}",
+ size_str,
+ mut_str,
+ vol_str,
+ self.key.child_type.name,
+ ),
+ Align.Override => |alignment| try std.fmt.allocPrint(
+ comp.gpa(),
+ "{}align<{}> {}{}{}",
+ size_str,
+ alignment,
+ mut_str,
+ vol_str,
+ self.key.child_type.name,
+ ),
+ };
+ errdefer comp.gpa().free(name);
+
+ self.base.init(comp, Id.Pointer, name);
+
+ {
+ const held = await (async comp.ptr_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ _ = try held.value.put(&self.key, self);
+ }
+ return self;
+ }
+
+ pub fn getLlvmType(self: *Pointer, allocator: *Allocator, llvm_context: llvm.ContextRef) !llvm.TypeRef {
+ const elem_llvm_type = try self.key.child_type.getLlvmType(allocator, llvm_context);
+ return llvm.PointerType(elem_llvm_type, 0) orelse return error.OutOfMemory;
+ }
+ };
+
+ pub const Array = struct {
+ base: Type,
+ key: Key,
+ garbage_node: std.atomic.Stack(*Array).Node,
+
+ pub const Key = struct {
+ elem_type: *Type,
+ len: usize,
+
+ pub fn hash(self: *const Key) u32 {
+ var result: u32 = 0;
+ result +%= hashAny(self.elem_type, 0);
+ result +%= hashAny(self.len, 1);
+ return result;
+ }
+
+ pub fn eql(self: *const Key, other: *const Key) bool {
+ return self.elem_type == other.elem_type and self.len == other.len;
+ }
+ };
+
+ pub fn destroy(self: *Array, comp: *Compilation) void {
+ self.key.elem_type.base.deref(comp);
+ comp.gpa().destroy(self);
+ }
+
+ pub async fn get(comp: *Compilation, key: Key) !*Array {
+ key.elem_type.base.ref();
+ errdefer key.elem_type.base.deref(comp);
+
+ {
+ const held = await (async comp.array_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ if (held.value.get(&key)) |entry| {
+ entry.value.base.base.ref();
+ return entry.value;
+ }
+ }
+
+ const self = try comp.gpa().create(Array{
+ .base = undefined,
+ .key = key,
+ .garbage_node = undefined,
+ });
+ errdefer comp.gpa().destroy(self);
+
+ const name = try std.fmt.allocPrint(comp.gpa(), "[{}]{}", key.len, key.elem_type.name);
+ errdefer comp.gpa().free(name);
+
+ self.base.init(comp, Id.Array, name);
+
+ {
+ const held = await (async comp.array_type_table.acquire() catch unreachable);
+ defer held.release();
+
+ _ = try held.value.put(&self.key, self);
+ }
+ return self;
+ }
+
+ pub fn getLlvmType(self: *Array, allocator: *Allocator, llvm_context: llvm.ContextRef) !llvm.TypeRef {
+ const elem_llvm_type = try self.key.elem_type.getLlvmType(allocator, llvm_context);
+ return llvm.ArrayType(elem_llvm_type, @intCast(c_uint, self.key.len)) orelse return error.OutOfMemory;
+ }
+ };
+
+ pub const ComptimeFloat = struct {
+ base: Type,
+
+ pub fn destroy(self: *ComptimeFloat, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const ComptimeInt = struct {
+ base: Type,
+
+ /// Adds 1 reference to the resulting type
+ pub fn get(comp: *Compilation) *ComptimeInt {
+ comp.comptime_int_type.base.base.ref();
+ return comp.comptime_int_type;
+ }
+
+ pub fn destroy(self: *ComptimeInt, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Undefined = struct {
+ base: Type,
+
+ pub fn destroy(self: *Undefined, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Null = struct {
+ base: Type,
+
+ pub fn destroy(self: *Null, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Optional = struct {
+ base: Type,
+
+ pub fn destroy(self: *Optional, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Optional, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const ErrorUnion = struct {
+ base: Type,
+
+ pub fn destroy(self: *ErrorUnion, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *ErrorUnion, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const ErrorSet = struct {
+ base: Type,
+
+ pub fn destroy(self: *ErrorSet, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *ErrorSet, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const Enum = struct {
+ base: Type,
+
+ pub fn destroy(self: *Enum, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Enum, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const Union = struct {
+ base: Type,
+
+ pub fn destroy(self: *Union, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Union, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const Namespace = struct {
+ base: Type,
+
+ pub fn destroy(self: *Namespace, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Block = struct {
+ base: Type,
+
+ pub fn destroy(self: *Block, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const BoundFn = struct {
+ base: Type,
+
+ pub fn destroy(self: *BoundFn, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *BoundFn, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const ArgTuple = struct {
+ base: Type,
+
+ pub fn destroy(self: *ArgTuple, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Opaque = struct {
+ base: Type,
+
+ pub fn destroy(self: *Opaque, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Opaque, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+
+ pub const Promise = struct {
+ base: Type,
+
+ pub fn destroy(self: *Promise, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmType(self: *Promise, allocator: *Allocator, llvm_context: llvm.ContextRef) llvm.TypeRef {
+ @panic("TODO");
+ }
+ };
+};
+
+fn hashAny(x: var, comptime seed: u64) u32 {
+ switch (@typeInfo(@typeOf(x))) {
+ builtin.TypeId.Int => |info| {
+ comptime var rng = comptime std.rand.DefaultPrng.init(seed);
+ const unsigned_x = @bitCast(@IntType(false, info.bits), x);
+ if (info.bits <= 32) {
+ return u32(unsigned_x) *% comptime rng.random.scalar(u32);
+ } else {
+ return @truncate(u32, unsigned_x *% comptime rng.random.scalar(@typeOf(unsigned_x)));
+ }
+ },
+ builtin.TypeId.Pointer => |info| {
+ switch (info.size) {
+ builtin.TypeInfo.Pointer.Size.One => return hashAny(@ptrToInt(x), seed),
+ builtin.TypeInfo.Pointer.Size.Many => @compileError("implement hash function"),
+ builtin.TypeInfo.Pointer.Size.Slice => @compileError("implement hash function"),
+ }
+ },
+ builtin.TypeId.Enum => return hashAny(@enumToInt(x), seed),
+ builtin.TypeId.Bool => {
+ comptime var rng = comptime std.rand.DefaultPrng.init(seed);
+ const vals = comptime [2]u32{ rng.random.scalar(u32), rng.random.scalar(u32) };
+ return vals[@boolToInt(x)];
+ },
+ builtin.TypeId.Optional => {
+ if (x) |non_opt| {
+ return hashAny(non_opt, seed);
+ } else {
+ return hashAny(u32(1), seed);
+ }
+ },
+ else => @compileError("implement hash function for " ++ @typeName(@typeOf(x))),
+ }
+}
diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig
new file mode 100644
index 0000000000..e6dca4eff7
--- /dev/null
+++ b/src-self-hosted/value.zig
@@ -0,0 +1,581 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const Scope = @import("scope.zig").Scope;
+const Compilation = @import("compilation.zig").Compilation;
+const ObjectFile = @import("codegen.zig").ObjectFile;
+const llvm = @import("llvm.zig");
+const Buffer = std.Buffer;
+const assert = std.debug.assert;
+
+/// Values are ref-counted, heap-allocated, and copy-on-write
+/// If there is only 1 ref then write need not copy
+pub const Value = struct {
+ id: Id,
+ typ: *Type,
+ ref_count: std.atomic.Int(usize),
+
+ /// Thread-safe
+ pub fn ref(base: *Value) void {
+ _ = base.ref_count.incr();
+ }
+
+ /// Thread-safe
+ pub fn deref(base: *Value, comp: *Compilation) void {
+ if (base.ref_count.decr() == 1) {
+ base.typ.base.deref(comp);
+ switch (base.id) {
+ Id.Type => @fieldParentPtr(Type, "base", base).destroy(comp),
+ Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(comp),
+ Id.FnProto => @fieldParentPtr(FnProto, "base", base).destroy(comp),
+ Id.Void => @fieldParentPtr(Void, "base", base).destroy(comp),
+ Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(comp),
+ Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(comp),
+ Id.Ptr => @fieldParentPtr(Ptr, "base", base).destroy(comp),
+ Id.Int => @fieldParentPtr(Int, "base", base).destroy(comp),
+ Id.Array => @fieldParentPtr(Array, "base", base).destroy(comp),
+ }
+ }
+ }
+
+ pub fn setType(base: *Value, new_type: *Type, comp: *Compilation) void {
+ base.typ.base.deref(comp);
+ new_type.base.ref();
+ base.typ = new_type;
+ }
+
+ pub fn getRef(base: *Value) *Value {
+ base.ref();
+ return base;
+ }
+
+ pub fn cast(base: *Value, comptime T: type) ?*T {
+ if (base.id != @field(Id, @typeName(T))) return null;
+ return @fieldParentPtr(T, "base", base);
+ }
+
+ pub fn dump(base: *const Value) void {
+ std.debug.warn("{}", @tagName(base.id));
+ }
+
+ pub fn getLlvmConst(base: *Value, ofile: *ObjectFile) (error{OutOfMemory}!?llvm.ValueRef) {
+ switch (base.id) {
+ Id.Type => unreachable,
+ Id.Fn => return @fieldParentPtr(Fn, "base", base).getLlvmConst(ofile),
+ Id.FnProto => return @fieldParentPtr(FnProto, "base", base).getLlvmConst(ofile),
+ Id.Void => return null,
+ Id.Bool => return @fieldParentPtr(Bool, "base", base).getLlvmConst(ofile),
+ Id.NoReturn => unreachable,
+ Id.Ptr => return @fieldParentPtr(Ptr, "base", base).getLlvmConst(ofile),
+ Id.Int => return @fieldParentPtr(Int, "base", base).getLlvmConst(ofile),
+ Id.Array => return @fieldParentPtr(Array, "base", base).getLlvmConst(ofile),
+ }
+ }
+
+ pub fn derefAndCopy(self: *Value, comp: *Compilation) (error{OutOfMemory}!*Value) {
+ if (self.ref_count.get() == 1) {
+ // ( ͡° ͜ʖ ͡°)
+ return self;
+ }
+
+ assert(self.ref_count.decr() != 1);
+ return self.copy(comp);
+ }
+
+ pub fn copy(base: *Value, comp: *Compilation) (error{OutOfMemory}!*Value) {
+ switch (base.id) {
+ Id.Type => unreachable,
+ Id.Fn => unreachable,
+ Id.FnProto => unreachable,
+ Id.Void => unreachable,
+ Id.Bool => unreachable,
+ Id.NoReturn => unreachable,
+ Id.Ptr => unreachable,
+ Id.Array => unreachable,
+ Id.Int => return &(try @fieldParentPtr(Int, "base", base).copy(comp)).base,
+ }
+ }
+
+ pub const Parent = union(enum) {
+ None,
+ BaseStruct: BaseStruct,
+ BaseArray: BaseArray,
+ BaseUnion: *Value,
+ BaseScalar: *Value,
+
+ pub const BaseStruct = struct {
+ val: *Value,
+ field_index: usize,
+ };
+
+ pub const BaseArray = struct {
+ val: *Value,
+ elem_index: usize,
+ };
+ };
+
+ pub const Id = enum {
+ Type,
+ Fn,
+ Void,
+ Bool,
+ NoReturn,
+ Array,
+ Ptr,
+ Int,
+ FnProto,
+ };
+
+ pub const Type = @import("type.zig").Type;
+
+ pub const FnProto = struct {
+ base: Value,
+
+ /// The main external name that is used in the .o file.
+ /// TODO https://github.com/ziglang/zig/issues/265
+ symbol_name: Buffer,
+
+ pub fn create(comp: *Compilation, fn_type: *Type.Fn, symbol_name: Buffer) !*FnProto {
+ const self = try comp.gpa().create(FnProto{
+ .base = Value{
+ .id = Value.Id.FnProto,
+ .typ = &fn_type.base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .symbol_name = symbol_name,
+ });
+ fn_type.base.base.ref();
+ return self;
+ }
+
+ pub fn destroy(self: *FnProto, comp: *Compilation) void {
+ self.symbol_name.deinit();
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmConst(self: *FnProto, ofile: *ObjectFile) !?llvm.ValueRef {
+ const llvm_fn_type = try self.base.typ.getLlvmType(ofile.arena, ofile.context);
+ const llvm_fn = llvm.AddFunction(
+ ofile.module,
+ self.symbol_name.ptr(),
+ llvm_fn_type,
+ ) orelse return error.OutOfMemory;
+
+ // TODO port more logic from codegen.cpp:fn_llvm_value
+
+ return llvm_fn;
+ }
+ };
+
+ pub const Fn = struct {
+ base: Value,
+
+ /// The main external name that is used in the .o file.
+ /// TODO https://github.com/ziglang/zig/issues/265
+ symbol_name: Buffer,
+
+ /// parent should be the top level decls or container decls
+ fndef_scope: *Scope.FnDef,
+
+ /// parent is scope for last parameter
+ child_scope: *Scope,
+
+ /// parent is child_scope
+ block_scope: ?*Scope.Block,
+
+ /// Path to the object file that contains this function
+ containing_object: Buffer,
+
+ link_set_node: *std.LinkedList(?*Value.Fn).Node,
+
+ /// Creates a Fn value with 1 ref
+ /// Takes ownership of symbol_name
+ pub fn create(comp: *Compilation, fn_type: *Type.Fn, fndef_scope: *Scope.FnDef, symbol_name: Buffer) !*Fn {
+ const link_set_node = try comp.gpa().create(Compilation.FnLinkSet.Node{
+ .data = null,
+ .next = undefined,
+ .prev = undefined,
+ });
+ errdefer comp.gpa().destroy(link_set_node);
+
+ const self = try comp.gpa().create(Fn{
+ .base = Value{
+ .id = Value.Id.Fn,
+ .typ = &fn_type.base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .fndef_scope = fndef_scope,
+ .child_scope = &fndef_scope.base,
+ .block_scope = null,
+ .symbol_name = symbol_name,
+ .containing_object = Buffer.initNull(comp.gpa()),
+ .link_set_node = link_set_node,
+ });
+ fn_type.base.base.ref();
+ fndef_scope.fn_val = self;
+ fndef_scope.base.ref();
+ return self;
+ }
+
+ pub fn destroy(self: *Fn, comp: *Compilation) void {
+ // remove with a tombstone so that we do not have to grab a lock
+ if (self.link_set_node.data != null) {
+ // it's now the job of the link step to find this tombstone and
+ // deallocate it.
+ self.link_set_node.data = null;
+ } else {
+ comp.gpa().destroy(self.link_set_node);
+ }
+
+ self.containing_object.deinit();
+ self.fndef_scope.base.deref(comp);
+ self.symbol_name.deinit();
+ comp.gpa().destroy(self);
+ }
+
+ /// We know that the function definition will end up in an .o file somewhere.
+ /// Here, all we have to do is generate a global prototype.
+ /// TODO cache the prototype per ObjectFile
+ pub fn getLlvmConst(self: *Fn, ofile: *ObjectFile) !?llvm.ValueRef {
+ const llvm_fn_type = try self.base.typ.getLlvmType(ofile.arena, ofile.context);
+ const llvm_fn = llvm.AddFunction(
+ ofile.module,
+ self.symbol_name.ptr(),
+ llvm_fn_type,
+ ) orelse return error.OutOfMemory;
+
+ // TODO port more logic from codegen.cpp:fn_llvm_value
+
+ return llvm_fn;
+ }
+ };
+
+ pub const Void = struct {
+ base: Value,
+
+ pub fn get(comp: *Compilation) *Void {
+ comp.void_value.base.ref();
+ return comp.void_value;
+ }
+
+ pub fn destroy(self: *Void, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Bool = struct {
+ base: Value,
+ x: bool,
+
+ pub fn get(comp: *Compilation, x: bool) *Bool {
+ if (x) {
+ comp.true_value.base.ref();
+ return comp.true_value;
+ } else {
+ comp.false_value.base.ref();
+ return comp.false_value;
+ }
+ }
+
+ pub fn destroy(self: *Bool, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmConst(self: *Bool, ofile: *ObjectFile) ?llvm.ValueRef {
+ const llvm_type = llvm.Int1TypeInContext(ofile.context);
+ if (self.x) {
+ return llvm.ConstAllOnes(llvm_type);
+ } else {
+ return llvm.ConstNull(llvm_type);
+ }
+ }
+ };
+
+ pub const NoReturn = struct {
+ base: Value,
+
+ pub fn get(comp: *Compilation) *NoReturn {
+ comp.noreturn_value.base.ref();
+ return comp.noreturn_value;
+ }
+
+ pub fn destroy(self: *NoReturn, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const Ptr = struct {
+ base: Value,
+ special: Special,
+ mut: Mut,
+
+ pub const Mut = enum {
+ CompTimeConst,
+ CompTimeVar,
+ RunTime,
+ };
+
+ pub const Special = union(enum) {
+ Scalar: *Value,
+ BaseArray: BaseArray,
+ BaseStruct: BaseStruct,
+ HardCodedAddr: u64,
+ Discard,
+ };
+
+ pub const BaseArray = struct {
+ val: *Value,
+ elem_index: usize,
+ };
+
+ pub const BaseStruct = struct {
+ val: *Value,
+ field_index: usize,
+ };
+
+ pub async fn createArrayElemPtr(
+ comp: *Compilation,
+ array_val: *Array,
+ mut: Type.Pointer.Mut,
+ size: Type.Pointer.Size,
+ elem_index: usize,
+ ) !*Ptr {
+ array_val.base.ref();
+ errdefer array_val.base.deref(comp);
+
+ const elem_type = array_val.base.typ.cast(Type.Array).?.key.elem_type;
+ const ptr_type = try await (async Type.Pointer.get(comp, Type.Pointer.Key{
+ .child_type = elem_type,
+ .mut = mut,
+ .vol = Type.Pointer.Vol.Non,
+ .size = size,
+ .alignment = Type.Pointer.Align.Abi,
+ }) catch unreachable);
+ var ptr_type_consumed = false;
+ errdefer if (!ptr_type_consumed) ptr_type.base.base.deref(comp);
+
+ const self = try comp.gpa().create(Value.Ptr{
+ .base = Value{
+ .id = Value.Id.Ptr,
+ .typ = &ptr_type.base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .special = Special{
+ .BaseArray = BaseArray{
+ .val = &array_val.base,
+ .elem_index = 0,
+ },
+ },
+ .mut = Mut.CompTimeConst,
+ });
+ ptr_type_consumed = true;
+ errdefer comp.gpa().destroy(self);
+
+ return self;
+ }
+
+ pub fn destroy(self: *Ptr, comp: *Compilation) void {
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmConst(self: *Ptr, ofile: *ObjectFile) !?llvm.ValueRef {
+ const llvm_type = self.base.typ.getLlvmType(ofile.arena, ofile.context);
+ // TODO carefully port the logic from codegen.cpp:gen_const_val_ptr
+ switch (self.special) {
+ Special.Scalar => |scalar| @panic("TODO"),
+ Special.BaseArray => |base_array| {
+ // TODO put this in one .o file only, and after that, generate extern references to it
+ const array_llvm_value = (try base_array.val.getLlvmConst(ofile)).?;
+ const ptr_bit_count = ofile.comp.target_ptr_bits;
+ const usize_llvm_type = llvm.IntTypeInContext(ofile.context, ptr_bit_count) orelse return error.OutOfMemory;
+ const indices = []llvm.ValueRef{
+ llvm.ConstNull(usize_llvm_type) orelse return error.OutOfMemory,
+ llvm.ConstInt(usize_llvm_type, base_array.elem_index, 0) orelse return error.OutOfMemory,
+ };
+ return llvm.ConstInBoundsGEP(
+ array_llvm_value,
+ &indices,
+ @intCast(c_uint, indices.len),
+ ) orelse return error.OutOfMemory;
+ },
+ Special.BaseStruct => |base_struct| @panic("TODO"),
+ Special.HardCodedAddr => |addr| @panic("TODO"),
+ Special.Discard => unreachable,
+ }
+ }
+ };
+
+ pub const Array = struct {
+ base: Value,
+ special: Special,
+
+ pub const Special = union(enum) {
+ Undefined,
+ OwnedBuffer: []u8,
+ Explicit: Data,
+ };
+
+ pub const Data = struct {
+ parent: Parent,
+ elements: []*Value,
+ };
+
+ /// Takes ownership of buffer
+ pub async fn createOwnedBuffer(comp: *Compilation, buffer: []u8) !*Array {
+ const u8_type = Type.Int.get_u8(comp);
+ defer u8_type.base.base.deref(comp);
+
+ const array_type = try await (async Type.Array.get(comp, Type.Array.Key{
+ .elem_type = &u8_type.base,
+ .len = buffer.len,
+ }) catch unreachable);
+ errdefer array_type.base.base.deref(comp);
+
+ const self = try comp.gpa().create(Value.Array{
+ .base = Value{
+ .id = Value.Id.Array,
+ .typ = &array_type.base,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .special = Special{ .OwnedBuffer = buffer },
+ });
+ errdefer comp.gpa().destroy(self);
+
+ return self;
+ }
+
+ pub fn destroy(self: *Array, comp: *Compilation) void {
+ switch (self.special) {
+ Special.Undefined => {},
+ Special.OwnedBuffer => |buf| {
+ comp.gpa().free(buf);
+ },
+ Special.Explicit => {},
+ }
+ comp.gpa().destroy(self);
+ }
+
+ pub fn getLlvmConst(self: *Array, ofile: *ObjectFile) !?llvm.ValueRef {
+ switch (self.special) {
+ Special.Undefined => {
+ const llvm_type = try self.base.typ.getLlvmType(ofile.arena, ofile.context);
+ return llvm.GetUndef(llvm_type);
+ },
+ Special.OwnedBuffer => |buf| {
+ const dont_null_terminate = 1;
+ const llvm_str_init = llvm.ConstStringInContext(
+ ofile.context,
+ buf.ptr,
+ @intCast(c_uint, buf.len),
+ dont_null_terminate,
+ ) orelse return error.OutOfMemory;
+ const str_init_type = llvm.TypeOf(llvm_str_init);
+ const global = llvm.AddGlobal(ofile.module, str_init_type, c"") orelse return error.OutOfMemory;
+ llvm.SetInitializer(global, llvm_str_init);
+ llvm.SetLinkage(global, llvm.PrivateLinkage);
+ llvm.SetGlobalConstant(global, 1);
+ llvm.SetUnnamedAddr(global, 1);
+ llvm.SetAlignment(global, llvm.ABIAlignmentOfType(ofile.comp.target_data_ref, str_init_type));
+ return global;
+ },
+ Special.Explicit => @panic("TODO"),
+ }
+
+ //{
+ // uint64_t len = type_entry->data.array.len;
+ // if (const_val->data.x_array.special == ConstArraySpecialUndef) {
+ // return LLVMGetUndef(type_entry->type_ref);
+ // }
+
+ // LLVMValueRef *values = allocate(len);
+ // LLVMTypeRef element_type_ref = type_entry->data.array.child_type->type_ref;
+ // bool make_unnamed_struct = false;
+ // for (uint64_t i = 0; i < len; i += 1) {
+ // ConstExprValue *elem_value = &const_val->data.x_array.s_none.elements[i];
+ // LLVMValueRef val = gen_const_val(g, elem_value, "");
+ // values[i] = val;
+ // make_unnamed_struct = make_unnamed_struct || is_llvm_value_unnamed_type(elem_value->type, val);
+ // }
+ // if (make_unnamed_struct) {
+ // return LLVMConstStruct(values, len, true);
+ // } else {
+ // return LLVMConstArray(element_type_ref, values, (unsigned)len);
+ // }
+ //}
+ }
+ };
+
+ pub const Int = struct {
+ base: Value,
+ big_int: std.math.big.Int,
+
+ pub fn createFromString(comp: *Compilation, typ: *Type, base: u8, value: []const u8) !*Int {
+ const self = try comp.gpa().create(Value.Int{
+ .base = Value{
+ .id = Value.Id.Int,
+ .typ = typ,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .big_int = undefined,
+ });
+ typ.base.ref();
+ errdefer comp.gpa().destroy(self);
+
+ self.big_int = try std.math.big.Int.init(comp.gpa());
+ errdefer self.big_int.deinit();
+
+ try self.big_int.setString(base, value);
+
+ return self;
+ }
+
+ pub fn getLlvmConst(self: *Int, ofile: *ObjectFile) !?llvm.ValueRef {
+ switch (self.base.typ.id) {
+ Type.Id.Int => {
+ const type_ref = try self.base.typ.getLlvmType(ofile.arena, ofile.context);
+ if (self.big_int.len == 0) {
+ return llvm.ConstNull(type_ref);
+ }
+ const unsigned_val = if (self.big_int.len == 1) blk: {
+ break :blk llvm.ConstInt(type_ref, self.big_int.limbs[0], @boolToInt(false));
+ } else if (@sizeOf(std.math.big.Limb) == @sizeOf(u64)) blk: {
+ break :blk llvm.ConstIntOfArbitraryPrecision(
+ type_ref,
+ @intCast(c_uint, self.big_int.len),
+ @ptrCast([*]u64, self.big_int.limbs.ptr),
+ );
+ } else {
+ @compileError("std.math.Big.Int.Limb size does not match LLVM");
+ };
+ return if (self.big_int.positive) unsigned_val else llvm.ConstNeg(unsigned_val);
+ },
+ Type.Id.ComptimeInt => unreachable,
+ else => unreachable,
+ }
+ }
+
+ pub fn copy(old: *Int, comp: *Compilation) !*Int {
+ old.base.typ.base.ref();
+ errdefer old.base.typ.base.deref(comp);
+
+ const new = try comp.gpa().create(Value.Int{
+ .base = Value{
+ .id = Value.Id.Int,
+ .typ = old.base.typ,
+ .ref_count = std.atomic.Int(usize).init(1),
+ },
+ .big_int = undefined,
+ });
+ errdefer comp.gpa().destroy(new);
+
+ new.big_int = try old.big_int.clone();
+ errdefer new.big_int.deinit();
+
+ return new;
+ }
+
+ pub fn destroy(self: *Int, comp: *Compilation) void {
+ self.big_int.deinit();
+ comp.gpa().destroy(self);
+ }
+ };
+};
diff --git a/src-self-hosted/visib.zig b/src-self-hosted/visib.zig
new file mode 100644
index 0000000000..3704600cca
--- /dev/null
+++ b/src-self-hosted/visib.zig
@@ -0,0 +1,4 @@
+pub const Visib = enum {
+ Private,
+ Pub,
+};
diff --git a/src/all_types.hpp b/src/all_types.hpp
index d27a5c7a1c..b1e8a3746d 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -60,7 +60,7 @@ struct IrExecutable {
ZigList tld_list;
IrInstruction *coro_handle;
- IrInstruction *coro_awaiter_field_ptr; // this one is shared and in the promise
+ IrInstruction *atomic_state_field_ptr; // this one is shared and in the promise
IrInstruction *coro_result_ptr_field_ptr;
IrInstruction *coro_result_field_ptr;
IrInstruction *await_handle_var_ptr; // this one is where we put the one we extracted from the promise
@@ -83,6 +83,7 @@ enum ConstParentId {
ConstParentIdStruct,
ConstParentIdArray,
ConstParentIdUnion,
+ ConstParentIdScalar,
};
struct ConstParent {
@@ -100,6 +101,9 @@ struct ConstParent {
struct {
ConstExprValue *union_val;
} p_union;
+ struct {
+ ConstExprValue *scalar_val;
+ } p_scalar;
} data;
};
@@ -140,6 +144,9 @@ enum ConstPtrSpecial {
// understand the value of pointee at compile time. However, we will still
// emit a binary with a compile time known address.
// In this case index is the numeric address value.
+ // We also use this for null pointer. We need the data layout for ConstCastOnly == true
+ // types to be the same, so all optionals of pointer types use x_ptr
+ // instead of x_optional
ConstPtrSpecialHardCodedAddr,
// This means that the pointer represents memory of assigning to _.
// That is, storing discards the data, and loading is invalid.
@@ -215,10 +222,10 @@ enum RuntimeHintErrorUnion {
RuntimeHintErrorUnionNonError,
};
-enum RuntimeHintMaybe {
- RuntimeHintMaybeUnknown,
- RuntimeHintMaybeNull, // TODO is this value even possible? if this is the case it might mean the const value is compile time known.
- RuntimeHintMaybeNonNull,
+enum RuntimeHintOptional {
+ RuntimeHintOptionalUnknown,
+ RuntimeHintOptionalNull, // TODO is this value even possible? if this is the case it might mean the const value is compile time known.
+ RuntimeHintOptionalNonNull,
};
enum RuntimeHintPtr {
@@ -227,6 +234,16 @@ enum RuntimeHintPtr {
RuntimeHintPtrNonStack,
};
+enum RuntimeHintSliceId {
+ RuntimeHintSliceIdUnknown,
+ RuntimeHintSliceIdLen,
+};
+
+struct RuntimeHintSlice {
+ enum RuntimeHintSliceId id;
+ uint64_t len;
+};
+
struct ConstGlobalRefs {
LLVMValueRef llvm_value;
LLVMValueRef llvm_global;
@@ -241,13 +258,14 @@ struct ConstExprValue {
// populated if special == ConstValSpecialStatic
BigInt x_bigint;
BigFloat x_bigfloat;
+ float16_t x_f16;
float x_f32;
double x_f64;
float128_t x_f128;
bool x_bool;
ConstBoundFnValue x_bound_fn;
TypeTableEntry *x_type;
- ConstExprValue *x_maybe;
+ ConstExprValue *x_optional;
ConstErrValue x_err_union;
ErrorTableEntry *x_err_set;
BigInt x_enum_tag;
@@ -261,8 +279,9 @@ struct ConstExprValue {
// populated if special == ConstValSpecialRuntime
RuntimeHintErrorUnion rh_error_union;
- RuntimeHintMaybe rh_maybe;
+ RuntimeHintOptional rh_maybe;
RuntimeHintPtr rh_ptr;
+ RuntimeHintSlice rh_slice;
} data;
};
@@ -374,11 +393,13 @@ enum NodeType {
NodeTypeCharLiteral,
NodeTypeSymbol,
NodeTypePrefixOpExpr,
- NodeTypeAddrOfExpr,
+ NodeTypePointerType,
NodeTypeFnCallExpr,
NodeTypeArrayAccessExpr,
NodeTypeSliceExpr,
NodeTypeFieldAccessExpr,
+ NodeTypePtrDeref,
+ NodeTypeUnwrapOptional,
NodeTypeUse,
NodeTypeBoolLiteral,
NodeTypeNullLiteral,
@@ -548,7 +569,7 @@ enum BinOpType {
BinOpTypeMultWrap,
BinOpTypeDiv,
BinOpTypeMod,
- BinOpTypeUnwrapMaybe,
+ BinOpTypeUnwrapOptional,
BinOpTypeArrayCat,
BinOpTypeArrayMult,
BinOpTypeErrorUnion,
@@ -567,6 +588,10 @@ struct AstNodeCatchExpr {
AstNode *op2;
};
+struct AstNodeUnwrapOptional {
+ AstNode *expr;
+};
+
enum CastOp {
CastOpNoCast, // signifies the function call expression is not a cast
CastOpNoop, // fn call expr is a cast, but does nothing
@@ -577,6 +602,8 @@ enum CastOp {
CastOpBytesToSlice,
CastOpNumLitToConcrete,
CastOpErrSet,
+ CastOpBitCast,
+ CastOpPtrOfArrayToSlice,
};
struct AstNodeFnCallExpr {
@@ -603,15 +630,18 @@ struct AstNodeFieldAccessExpr {
Buf *field_name;
};
+struct AstNodePtrDerefExpr {
+ AstNode *target;
+};
+
enum PrefixOp {
PrefixOpInvalid,
PrefixOpBoolNot,
PrefixOpBinNot,
PrefixOpNegation,
PrefixOpNegationWrap,
- PrefixOpDereference,
- PrefixOpMaybe,
- PrefixOpUnwrapMaybe,
+ PrefixOpOptional,
+ PrefixOpAddrOf,
};
struct AstNodePrefixOpExpr {
@@ -619,7 +649,8 @@ struct AstNodePrefixOpExpr {
AstNode *primary_expr;
};
-struct AstNodeAddrOfExpr {
+struct AstNodePointerType {
+ Token *star_token;
AstNode *align_expr;
BigInt *bit_offset_start;
BigInt *bit_offset_end;
@@ -868,7 +899,6 @@ struct AstNodeAwaitExpr {
struct AstNodeSuspend {
AstNode *block;
- AstNode *promise_symbol;
};
struct AstNodePromiseType {
@@ -893,8 +923,9 @@ struct AstNode {
AstNodeTestDecl test_decl;
AstNodeBinOpExpr bin_op_expr;
AstNodeCatchExpr unwrap_err_expr;
+ AstNodeUnwrapOptional unwrap_optional;
AstNodePrefixOpExpr prefix_op_expr;
- AstNodeAddrOfExpr addr_of_expr;
+ AstNodePointerType pointer_type;
AstNodeFnCallExpr fn_call_expr;
AstNodeArrayAccessExpr array_access_expr;
AstNodeSliceExpr slice_expr;
@@ -910,6 +941,7 @@ struct AstNode {
AstNodeCompTime comptime_expr;
AstNodeAsmExpr asm_expr;
AstNodeFieldAccessExpr field_access_expr;
+ AstNodePtrDerefExpr ptr_deref_expr;
AstNodeContainerDecl container_decl;
AstNodeStructField struct_field;
AstNodeStringLiteral string_literal;
@@ -966,8 +998,14 @@ struct FnTypeId {
uint32_t fn_type_id_hash(FnTypeId*);
bool fn_type_id_eql(FnTypeId *a, FnTypeId *b);
+enum PtrLen {
+ PtrLenUnknown,
+ PtrLenSingle,
+};
+
struct TypeTableEntryPointer {
TypeTableEntry *child_type;
+ PtrLen ptr_len;
bool is_const;
bool is_volatile;
uint32_t alignment;
@@ -1018,6 +1056,10 @@ struct TypeTableEntryStruct {
// whether we've finished resolving it
bool complete;
+ // whether any of the fields require comptime
+ // the value is not valid until zero_bits_known == true
+ bool requires_comptime;
+
bool zero_bits_loop_flag;
bool zero_bits_known;
uint32_t abi_alignment; // also figured out with zero_bits pass
@@ -1025,7 +1067,7 @@ struct TypeTableEntryStruct {
HashMap fields_by_name;
};
-struct TypeTableEntryMaybe {
+struct TypeTableEntryOptional {
TypeTableEntry *child_type;
};
@@ -1059,8 +1101,7 @@ struct TypeTableEntryEnum {
bool zero_bits_loop_flag;
bool zero_bits_known;
- bool generate_name_table;
- LLVMValueRef name_table;
+ LLVMValueRef name_function;
HashMap fields_by_name;
};
@@ -1086,6 +1127,10 @@ struct TypeTableEntryUnion {
// whether we've finished resolving it
bool complete;
+ // whether any of the fields require comptime
+ // the value is not valid until zero_bits_known == true
+ bool requires_comptime;
+
bool zero_bits_loop_flag;
bool zero_bits_known;
uint32_t abi_alignment; // also figured out with zero_bits pass
@@ -1140,11 +1185,11 @@ enum TypeTableEntryId {
TypeTableEntryIdPointer,
TypeTableEntryIdArray,
TypeTableEntryIdStruct,
- TypeTableEntryIdNumLitFloat,
- TypeTableEntryIdNumLitInt,
- TypeTableEntryIdUndefLit,
- TypeTableEntryIdNullLit,
- TypeTableEntryIdMaybe,
+ TypeTableEntryIdComptimeFloat,
+ TypeTableEntryIdComptimeInt,
+ TypeTableEntryIdUndefined,
+ TypeTableEntryIdNull,
+ TypeTableEntryIdOptional,
TypeTableEntryIdErrorUnion,
TypeTableEntryIdErrorSet,
TypeTableEntryIdEnum,
@@ -1175,7 +1220,7 @@ struct TypeTableEntry {
TypeTableEntryFloat floating;
TypeTableEntryArray array;
TypeTableEntryStruct structure;
- TypeTableEntryMaybe maybe;
+ TypeTableEntryOptional maybe;
TypeTableEntryErrorUnion error_union;
TypeTableEntryErrorSet error_set;
TypeTableEntryEnum enumeration;
@@ -1187,7 +1232,7 @@ struct TypeTableEntry {
// use these fields to make sure we don't duplicate type table entries for the same type
TypeTableEntry *pointer_parent[2]; // [0 - mut, 1 - const]
- TypeTableEntry *maybe_parent;
+ TypeTableEntry *optional_parent;
TypeTableEntry *promise_parent;
TypeTableEntry *promise_frame_parent;
// If we generate a constant name value for this type, we memoize it here.
@@ -1291,6 +1336,8 @@ enum BuiltinFnId {
BuiltinFnIdMemberCount,
BuiltinFnIdMemberType,
BuiltinFnIdMemberName,
+ BuiltinFnIdField,
+ BuiltinFnIdTypeInfo,
BuiltinFnIdTypeof,
BuiltinFnIdAddWithOverflow,
BuiltinFnIdSubWithOverflow,
@@ -1303,27 +1350,42 @@ enum BuiltinFnId {
BuiltinFnIdCompileLog,
BuiltinFnIdCtz,
BuiltinFnIdClz,
+ BuiltinFnIdPopCount,
BuiltinFnIdImport,
BuiltinFnIdCImport,
BuiltinFnIdErrName,
BuiltinFnIdBreakpoint,
BuiltinFnIdReturnAddress,
BuiltinFnIdFrameAddress,
+ BuiltinFnIdHandle,
BuiltinFnIdEmbedFile,
- BuiltinFnIdCmpExchange,
+ BuiltinFnIdCmpxchgWeak,
+ BuiltinFnIdCmpxchgStrong,
BuiltinFnIdFence,
BuiltinFnIdDivExact,
BuiltinFnIdDivTrunc,
BuiltinFnIdDivFloor,
BuiltinFnIdRem,
BuiltinFnIdMod,
+ BuiltinFnIdSqrt,
BuiltinFnIdTruncate,
+ BuiltinFnIdIntCast,
+ BuiltinFnIdFloatCast,
+ BuiltinFnIdErrSetCast,
+ BuiltinFnIdToBytes,
+ BuiltinFnIdFromBytes,
+ BuiltinFnIdIntToFloat,
+ BuiltinFnIdFloatToInt,
+ BuiltinFnIdBoolToInt,
+ BuiltinFnIdErrToInt,
+ BuiltinFnIdIntToErr,
+ BuiltinFnIdEnumToInt,
+ BuiltinFnIdIntToEnum,
BuiltinFnIdIntType,
BuiltinFnIdSetCold,
BuiltinFnIdSetRuntimeSafety,
BuiltinFnIdSetFloatMode,
BuiltinFnIdTypeName,
- BuiltinFnIdCanImplicitCast,
BuiltinFnIdPanic,
BuiltinFnIdPtrCast,
BuiltinFnIdBitCast,
@@ -1335,6 +1397,7 @@ enum BuiltinFnId {
BuiltinFnIdOffsetOf,
BuiltinFnIdInlineCall,
BuiltinFnIdNoInlineCall,
+ BuiltinFnIdNewStackCall,
BuiltinFnIdTypeId,
BuiltinFnIdShlExact,
BuiltinFnIdShrExact,
@@ -1346,6 +1409,7 @@ enum BuiltinFnId {
BuiltinFnIdExport,
BuiltinFnIdErrorReturnTrace,
BuiltinFnIdAtomicRmw,
+ BuiltinFnIdAtomicLoad,
};
struct BuiltinFnEntry {
@@ -1366,10 +1430,12 @@ enum PanicMsgId {
PanicMsgIdRemainderDivisionByZero,
PanicMsgIdExactDivisionRemainder,
PanicMsgIdSliceWidenRemainder,
- PanicMsgIdUnwrapMaybeFail,
+ PanicMsgIdUnwrapOptionalFail,
PanicMsgIdInvalidErrorCode,
PanicMsgIdIncorrectAlignment,
PanicMsgIdBadUnionField,
+ PanicMsgIdBadEnumValue,
+ PanicMsgIdFloatToInt,
PanicMsgIdCount,
};
@@ -1383,6 +1449,7 @@ struct TypeId {
union {
struct {
TypeTableEntry *child_type;
+ PtrLen ptr_len;
bool is_const;
bool is_volatile;
uint32_t alignment;
@@ -1410,9 +1477,11 @@ bool type_id_eql(TypeId a, TypeId b);
enum ZigLLVMFnId {
ZigLLVMFnIdCtz,
ZigLLVMFnIdClz,
+ ZigLLVMFnIdPopCount,
ZigLLVMFnIdOverflowArithmetic,
ZigLLVMFnIdFloor,
ZigLLVMFnIdCeil,
+ ZigLLVMFnIdSqrt,
};
enum AddSubMul {
@@ -1433,7 +1502,10 @@ struct ZigLLVMFnKey {
} clz;
struct {
uint32_t bit_count;
- } floor_ceil;
+ } pop_count;
+ struct {
+ uint32_t bit_count;
+ } floating;
struct {
AddSubMul add_sub_mul;
uint32_t bit_count;
@@ -1454,6 +1526,7 @@ enum BuildMode {
BuildModeDebug,
BuildModeFastRelease,
BuildModeSafeRelease,
+ BuildModeSmallRelease,
};
enum EmitFileType {
@@ -1499,6 +1572,7 @@ struct CodeGen {
HashMap exported_symbol_names;
HashMap external_prototypes;
HashMap string_literals_table;
+ HashMap type_info_cache;
ZigList import_queue;
@@ -1512,7 +1586,6 @@ struct CodeGen {
struct {
TypeTableEntry *entry_bool;
- TypeTableEntry *entry_int[2][12]; // [signed,unsigned][2,3,4,5,6,7,8,16,29,32,64,128]
TypeTableEntry *entry_c_int[CIntTypeCount];
TypeTableEntry *entry_c_longdouble;
TypeTableEntry *entry_c_void;
@@ -1521,14 +1594,12 @@ struct CodeGen {
TypeTableEntry *entry_u32;
TypeTableEntry *entry_u29;
TypeTableEntry *entry_u64;
- TypeTableEntry *entry_u128;
TypeTableEntry *entry_i8;
- TypeTableEntry *entry_i16;
TypeTableEntry *entry_i32;
TypeTableEntry *entry_i64;
- TypeTableEntry *entry_i128;
TypeTableEntry *entry_isize;
TypeTableEntry *entry_usize;
+ TypeTableEntry *entry_f16;
TypeTableEntry *entry_f32;
TypeTableEntry *entry_f64;
TypeTableEntry *entry_f128;
@@ -1645,10 +1716,16 @@ struct CodeGen {
LLVMValueRef coro_save_fn_val;
LLVMValueRef coro_promise_fn_val;
LLVMValueRef coro_alloc_helper_fn_val;
+ LLVMValueRef coro_frame_fn_val;
LLVMValueRef merge_err_ret_traces_fn_val;
LLVMValueRef add_error_return_trace_addr_fn_val;
+ LLVMValueRef stacksave_fn_val;
+ LLVMValueRef stackrestore_fn_val;
+ LLVMValueRef write_register_fn_val;
bool error_during_imports;
+ LLVMValueRef sp_md_node;
+
const char **clang_argv;
size_t clang_argv_len;
ZigList lib_dirs;
@@ -1680,8 +1757,6 @@ struct CodeGen {
ZigList link_objects;
ZigList assembly_files;
- ZigList name_table_enums;
-
Buf *test_filter;
Buf *test_name_prefix;
@@ -1700,6 +1775,8 @@ struct CodeGen {
ZigList error_di_types;
ZigList forbidden_libs;
+
+ bool no_rosegment_workaround;
};
enum VarLinkage {
@@ -1750,6 +1827,7 @@ enum ScopeId {
ScopeIdVarDecl,
ScopeIdCImport,
ScopeIdLoop,
+ ScopeIdSuspend,
ScopeIdFnDef,
ScopeIdCompTime,
ScopeIdCoroPrelude,
@@ -1845,6 +1923,16 @@ struct ScopeLoop {
ZigList *incoming_blocks;
};
+// This scope is created for a suspend block in order to have labeled
+// suspend for breaking out of a suspend and for detecting if a suspend
+// block is inside a suspend block.
+struct ScopeSuspend {
+ Scope base;
+
+ IrBasicBlock *resume_block;
+ bool reported_err;
+};
+
// This scope is created for a comptime expression.
// NodeTypeCompTime, NodeTypeSwitchExpr
struct ScopeCompTime {
@@ -1912,12 +2000,6 @@ struct IrBasicBlock {
IrInstruction *must_be_comptime_source_instr;
};
-struct LVal {
- bool is_ptr;
- bool is_const;
- bool is_volatile;
-};
-
enum IrInstructionId {
IrInstructionIdInvalid,
IrInstructionIdBr,
@@ -1957,11 +2039,12 @@ enum IrInstructionId {
IrInstructionIdAsm,
IrInstructionIdSizeOf,
IrInstructionIdTestNonNull,
- IrInstructionIdUnwrapMaybe,
- IrInstructionIdMaybeWrap,
+ IrInstructionIdUnwrapOptional,
+ IrInstructionIdOptionalWrap,
IrInstructionIdUnionTag,
IrInstructionIdClz,
IrInstructionIdCtz,
+ IrInstructionIdPopCount,
IrInstructionIdImport,
IrInstructionIdCImport,
IrInstructionIdCInclude,
@@ -1978,6 +2061,11 @@ enum IrInstructionId {
IrInstructionIdCmpxchg,
IrInstructionIdFence,
IrInstructionIdTruncate,
+ IrInstructionIdIntCast,
+ IrInstructionIdFloatCast,
+ IrInstructionIdIntToFloat,
+ IrInstructionIdFloatToInt,
+ IrInstructionIdBoolToInt,
IrInstructionIdIntType,
IrInstructionIdBoolNot,
IrInstructionIdMemset,
@@ -1989,6 +2077,7 @@ enum IrInstructionId {
IrInstructionIdBreakpoint,
IrInstructionIdReturnAddress,
IrInstructionIdFrameAddress,
+ IrInstructionIdHandle,
IrInstructionIdAlignOf,
IrInstructionIdOverflowOp,
IrInstructionIdTestErr,
@@ -2004,21 +2093,22 @@ enum IrInstructionId {
IrInstructionIdIntToPtr,
IrInstructionIdPtrToInt,
IrInstructionIdIntToEnum,
+ IrInstructionIdEnumToInt,
IrInstructionIdIntToErr,
IrInstructionIdErrToInt,
IrInstructionIdCheckSwitchProngs,
IrInstructionIdCheckStatementIsVoid,
IrInstructionIdTypeName,
- IrInstructionIdCanImplicitCast,
IrInstructionIdDeclRef,
IrInstructionIdPanic,
IrInstructionIdTagName,
IrInstructionIdTagType,
IrInstructionIdFieldParentPtr,
IrInstructionIdOffsetOf,
+ IrInstructionIdTypeInfo,
IrInstructionIdTypeId,
IrInstructionIdSetEvalBranchQuota,
- IrInstructionIdPtrTypeOf,
+ IrInstructionIdPtrType,
IrInstructionIdAlignCast,
IrInstructionIdOpaqueType,
IrInstructionIdSetAlignStack,
@@ -2041,12 +2131,17 @@ enum IrInstructionId {
IrInstructionIdCoroPromise,
IrInstructionIdCoroAllocHelper,
IrInstructionIdAtomicRmw,
+ IrInstructionIdAtomicLoad,
IrInstructionIdPromiseResultType,
IrInstructionIdAwaitBookkeeping,
IrInstructionIdSaveErrRetAddr,
IrInstructionIdAddImplicitReturnType,
IrInstructionIdMergeErrRetTraces,
IrInstructionIdMarkErrRetTracePtr,
+ IrInstructionIdSqrt,
+ IrInstructionIdErrSetCast,
+ IrInstructionIdToBytes,
+ IrInstructionIdFromBytes,
};
struct IrInstruction {
@@ -2094,6 +2189,7 @@ struct IrInstructionSwitchBr {
size_t case_count;
IrInstructionSwitchBrCase *cases;
IrInstruction *is_comptime;
+ IrInstruction *switch_prongs_void;
};
struct IrInstructionSwitchVar {
@@ -2123,7 +2219,7 @@ enum IrUnOp {
IrUnOpNegation,
IrUnOpNegationWrap,
IrUnOpDereference,
- IrUnOpMaybe,
+ IrUnOpOptional,
};
struct IrInstructionUnOp {
@@ -2203,7 +2299,8 @@ struct IrInstructionFieldPtr {
IrInstruction base;
IrInstruction *container_ptr;
- Buf *field_name;
+ Buf *field_name_buffer;
+ IrInstruction *field_name_expr;
bool is_const;
};
@@ -2228,6 +2325,7 @@ struct IrInstructionElemPtr {
IrInstruction *array_ptr;
IrInstruction *elem_index;
+ PtrLen ptr_len;
bool is_const;
bool safety_check_on;
};
@@ -2236,8 +2334,6 @@ struct IrInstructionVarPtr {
IrInstruction base;
VariableTableEntry *var;
- bool is_const;
- bool is_volatile;
};
struct IrInstructionCall {
@@ -2253,6 +2349,7 @@ struct IrInstructionCall {
bool is_async;
IrInstruction *async_allocator;
+ IrInstruction *new_stack;
};
struct IrInstructionConst {
@@ -2373,6 +2470,18 @@ struct IrInstructionArrayType {
IrInstruction *child_type;
};
+struct IrInstructionPtrType {
+ IrInstruction base;
+
+ IrInstruction *align_value;
+ IrInstruction *child_type;
+ uint32_t bit_offset_start;
+ uint32_t bit_offset_end;
+ PtrLen ptr_len;
+ bool is_const;
+ bool is_volatile;
+};
+
struct IrInstructionPromiseType {
IrInstruction base;
@@ -2413,7 +2522,7 @@ struct IrInstructionTestNonNull {
IrInstruction *value;
};
-struct IrInstructionUnwrapMaybe {
+struct IrInstructionUnwrapOptional {
IrInstruction base;
IrInstruction *value;
@@ -2432,6 +2541,12 @@ struct IrInstructionClz {
IrInstruction *value;
};
+struct IrInstructionPopCount {
+ IrInstruction base;
+
+ IrInstruction *value;
+};
+
struct IrInstructionUnionTag {
IrInstruction base;
@@ -2522,6 +2637,7 @@ struct IrInstructionEmbedFile {
struct IrInstructionCmpxchg {
IrInstruction base;
+ IrInstruction *type_value;
IrInstruction *ptr;
IrInstruction *cmp_value;
IrInstruction *new_value;
@@ -2529,8 +2645,13 @@ struct IrInstructionCmpxchg {
IrInstruction *failure_order_value;
// if this instruction gets to runtime then we know these values:
+ TypeTableEntry *type;
AtomicOrder success_order;
AtomicOrder failure_order;
+
+ bool is_weak;
+
+ LLVMValueRef tmp_ptr;
};
struct IrInstructionFence {
@@ -2549,6 +2670,60 @@ struct IrInstructionTruncate {
IrInstruction *target;
};
+struct IrInstructionIntCast {
+ IrInstruction base;
+
+ IrInstruction *dest_type;
+ IrInstruction *target;
+};
+
+struct IrInstructionFloatCast {
+ IrInstruction base;
+
+ IrInstruction *dest_type;
+ IrInstruction *target;
+};
+
+struct IrInstructionErrSetCast {
+ IrInstruction base;
+
+ IrInstruction *dest_type;
+ IrInstruction *target;
+};
+
+struct IrInstructionToBytes {
+ IrInstruction base;
+
+ IrInstruction *target;
+};
+
+struct IrInstructionFromBytes {
+ IrInstruction base;
+
+ IrInstruction *dest_child_type;
+ IrInstruction *target;
+};
+
+struct IrInstructionIntToFloat {
+ IrInstruction base;
+
+ IrInstruction *dest_type;
+ IrInstruction *target;
+};
+
+struct IrInstructionFloatToInt {
+ IrInstruction base;
+
+ IrInstruction *dest_type;
+ IrInstruction *target;
+};
+
+struct IrInstructionBoolToInt {
+ IrInstruction base;
+
+ IrInstruction *target;
+};
+
struct IrInstructionIntType {
IrInstruction base;
@@ -2620,6 +2795,10 @@ struct IrInstructionFrameAddress {
IrInstruction base;
};
+struct IrInstructionHandle {
+ IrInstruction base;
+};
+
enum IrOverflowOp {
IrOverflowOpAdd,
IrOverflowOpSub,
@@ -2665,7 +2844,7 @@ struct IrInstructionUnwrapErrPayload {
bool safety_check_on;
};
-struct IrInstructionMaybeWrap {
+struct IrInstructionOptionalWrap {
IrInstruction base;
IrInstruction *value;
@@ -2739,6 +2918,13 @@ struct IrInstructionIntToPtr {
struct IrInstructionIntToEnum {
IrInstruction base;
+ IrInstruction *dest_type;
+ IrInstruction *target;
+};
+
+struct IrInstructionEnumToInt {
+ IrInstruction base;
+
IrInstruction *target;
};
@@ -2780,11 +2966,9 @@ struct IrInstructionTypeName {
IrInstruction *type_value;
};
-struct IrInstructionCanImplicitCast {
- IrInstruction base;
-
- IrInstruction *type_value;
- IrInstruction *target_value;
+enum LVal {
+ LValNone,
+ LValPtr,
};
struct IrInstructionDeclRef {
@@ -2828,6 +3012,12 @@ struct IrInstructionOffsetOf {
IrInstruction *field_name;
};
+struct IrInstructionTypeInfo {
+ IrInstruction base;
+
+ IrInstruction *type_value;
+};
+
struct IrInstructionTypeId {
IrInstruction base;
@@ -2840,17 +3030,6 @@ struct IrInstructionSetEvalBranchQuota {
IrInstruction *new_quota;
};
-struct IrInstructionPtrTypeOf {
- IrInstruction base;
-
- IrInstruction *align_value;
- IrInstruction *child_type;
- uint32_t bit_offset_start;
- uint32_t bit_offset_end;
- bool is_const;
- bool is_volatile;
-};
-
struct IrInstructionAlignCast {
IrInstruction base;
@@ -2886,10 +3065,10 @@ struct IrInstructionExport {
struct IrInstructionErrorReturnTrace {
IrInstruction base;
- enum Nullable {
+ enum Optional {
Null,
NonNull,
- } nullable;
+ } optional;
};
struct IrInstructionErrorUnion {
@@ -3000,6 +3179,15 @@ struct IrInstructionAtomicRmw {
AtomicOrder resolved_ordering;
};
+struct IrInstructionAtomicLoad {
+ IrInstruction base;
+
+ IrInstruction *operand_type;
+ IrInstruction *ptr;
+ IrInstruction *ordering;
+ AtomicOrder resolved_ordering;
+};
+
struct IrInstructionPromiseResultType {
IrInstruction base;
@@ -3036,6 +3224,13 @@ struct IrInstructionMarkErrRetTracePtr {
IrInstruction *err_ret_trace_ptr;
};
+struct IrInstructionSqrt {
+ IrInstruction base;
+
+ IrInstruction *type;
+ IrInstruction *op;
+};
+
static const size_t slice_ptr_index = 0;
static const size_t slice_len_index = 1;
@@ -3054,7 +3249,7 @@ static const size_t stack_trace_ptr_count = 30;
#define RESULT_FIELD_NAME "result"
#define ASYNC_ALLOC_FIELD_NAME "allocFn"
#define ASYNC_FREE_FIELD_NAME "freeFn"
-#define AWAITER_HANDLE_FIELD_NAME "awaiter_handle"
+#define ATOMIC_STATE_FIELD_NAME "atomic_state"
// these point to data belonging to the awaiter
#define ERR_RET_TRACE_PTR_FIELD_NAME "err_ret_trace_ptr"
#define RESULT_PTR_FIELD_NAME "result_ptr"
diff --git a/src/analyze.cpp b/src/analyze.cpp
index c73e6b39e3..03cfa5b67b 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -25,6 +25,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type);
static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type);
static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type);
static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type);
+static void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry);
ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg) {
if (node->owner->c_import_node != nullptr) {
@@ -156,6 +157,13 @@ ScopeLoop *create_loop_scope(AstNode *node, Scope *parent) {
return scope;
}
+ScopeSuspend *create_suspend_scope(AstNode *node, Scope *parent) {
+ assert(node->type == NodeTypeSuspend);
+ ScopeSuspend *scope = allocate(1);
+ init_scope(&scope->base, ScopeIdSuspend, node, parent);
+ return scope;
+}
+
ScopeFnDef *create_fndef_scope(AstNode *node, Scope *parent, FnTableEntry *fn_entry) {
ScopeFnDef *scope = allocate(1);
init_scope(&scope->base, ScopeIdFnDef, node, parent);
@@ -203,6 +211,43 @@ static uint8_t bits_needed_for_unsigned(uint64_t x) {
return (upper >= x) ? base : (base + 1);
}
+AstNode *type_decl_node(TypeTableEntry *type_entry) {
+ switch (type_entry->id) {
+ case TypeTableEntryIdInvalid:
+ zig_unreachable();
+ case TypeTableEntryIdStruct:
+ return type_entry->data.structure.decl_node;
+ case TypeTableEntryIdEnum:
+ return type_entry->data.enumeration.decl_node;
+ case TypeTableEntryIdUnion:
+ return type_entry->data.unionation.decl_node;
+ case TypeTableEntryIdOpaque:
+ case TypeTableEntryIdMetaType:
+ case TypeTableEntryIdVoid:
+ case TypeTableEntryIdBool:
+ case TypeTableEntryIdUnreachable:
+ case TypeTableEntryIdInt:
+ case TypeTableEntryIdFloat:
+ case TypeTableEntryIdPointer:
+ case TypeTableEntryIdArray:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
+ case TypeTableEntryIdOptional:
+ case TypeTableEntryIdErrorUnion:
+ case TypeTableEntryIdErrorSet:
+ case TypeTableEntryIdFn:
+ case TypeTableEntryIdNamespace:
+ case TypeTableEntryIdBlock:
+ case TypeTableEntryIdBoundFn:
+ case TypeTableEntryIdArgTuple:
+ case TypeTableEntryIdPromise:
+ return nullptr;
+ }
+ zig_unreachable();
+}
+
bool type_is_complete(TypeTableEntry *type_entry) {
switch (type_entry->id) {
case TypeTableEntryIdInvalid:
@@ -223,11 +268,11 @@ bool type_is_complete(TypeTableEntry *type_entry) {
case TypeTableEntryIdFloat:
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
@@ -259,11 +304,11 @@ bool type_has_zero_bits_known(TypeTableEntry *type_entry) {
case TypeTableEntryIdFloat:
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
@@ -372,14 +417,15 @@ TypeTableEntry *get_promise_type(CodeGen *g, TypeTableEntry *result_type) {
}
TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type, bool is_const,
- bool is_volatile, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count)
+ bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count)
{
assert(!type_is_invalid(child_type));
+ assert(ptr_len == PtrLenSingle || child_type->id != TypeTableEntryIdOpaque);
TypeId type_id = {};
TypeTableEntry **parent_pointer = nullptr;
uint32_t abi_alignment = get_abi_alignment(g, child_type);
- if (unaligned_bit_count != 0 || is_volatile || byte_alignment != abi_alignment) {
+ if (unaligned_bit_count != 0 || is_volatile || byte_alignment != abi_alignment || ptr_len != PtrLenSingle) {
type_id.id = TypeTableEntryIdPointer;
type_id.data.pointer.child_type = child_type;
type_id.data.pointer.is_const = is_const;
@@ -387,6 +433,7 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type
type_id.data.pointer.alignment = byte_alignment;
type_id.data.pointer.bit_offset = bit_offset;
type_id.data.pointer.unaligned_bit_count = unaligned_bit_count;
+ type_id.data.pointer.ptr_len = ptr_len;
auto existing_entry = g->type_table.maybe_get(type_id);
if (existing_entry)
@@ -405,16 +452,17 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdPointer);
entry->is_copyable = true;
+ const char *star_str = ptr_len == PtrLenSingle ? "*" : "[*]";
const char *const_str = is_const ? "const " : "";
const char *volatile_str = is_volatile ? "volatile " : "";
buf_resize(&entry->name, 0);
if (unaligned_bit_count == 0 && byte_alignment == abi_alignment) {
- buf_appendf(&entry->name, "&%s%s%s", const_str, volatile_str, buf_ptr(&child_type->name));
+ buf_appendf(&entry->name, "%s%s%s%s", star_str, const_str, volatile_str, buf_ptr(&child_type->name));
} else if (unaligned_bit_count == 0) {
- buf_appendf(&entry->name, "&align(%" PRIu32 ") %s%s%s", byte_alignment,
+ buf_appendf(&entry->name, "%salign(%" PRIu32 ") %s%s%s", star_str, byte_alignment,
const_str, volatile_str, buf_ptr(&child_type->name));
} else {
- buf_appendf(&entry->name, "&align(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", byte_alignment,
+ buf_appendf(&entry->name, "%salign(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", star_str, byte_alignment,
bit_offset, bit_offset + unaligned_bit_count, const_str, volatile_str, buf_ptr(&child_type->name));
}
@@ -424,7 +472,9 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type
if (!entry->zero_bits) {
assert(byte_alignment > 0);
- if (is_const || is_volatile || unaligned_bit_count != 0 || byte_alignment != abi_alignment) {
+ if (is_const || is_volatile || unaligned_bit_count != 0 || byte_alignment != abi_alignment ||
+ ptr_len != PtrLenSingle)
+ {
TypeTableEntry *peer_type = get_pointer_to_type(g, child_type, false);
entry->type_ref = peer_type->type_ref;
entry->di_type = peer_type->di_type;
@@ -442,6 +492,7 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type
entry->di_type = g->builtin_types.entry_void->di_type;
}
+ entry->data.pointer.ptr_len = ptr_len;
entry->data.pointer.child_type = child_type;
entry->data.pointer.is_const = is_const;
entry->data.pointer.is_volatile = is_volatile;
@@ -458,7 +509,8 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type
}
TypeTableEntry *get_pointer_to_type(CodeGen *g, TypeTableEntry *child_type, bool is_const) {
- return get_pointer_to_type_extra(g, child_type, is_const, false, get_abi_alignment(g, child_type), 0, 0);
+ return get_pointer_to_type_extra(g, child_type, is_const, false, PtrLenSingle,
+ get_abi_alignment(g, child_type), 0, 0);
}
TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type) {
@@ -466,11 +518,11 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type)
return return_type->promise_frame_parent;
}
- TypeTableEntry *awaiter_handle_type = get_maybe_type(g, g->builtin_types.entry_promise);
+ TypeTableEntry *atomic_state_type = g->builtin_types.entry_usize;
TypeTableEntry *result_ptr_type = get_pointer_to_type(g, return_type, false);
ZigList field_names = {};
- field_names.append(AWAITER_HANDLE_FIELD_NAME);
+ field_names.append(ATOMIC_STATE_FIELD_NAME);
field_names.append(RESULT_FIELD_NAME);
field_names.append(RESULT_PTR_FIELD_NAME);
if (g->have_err_ret_tracing) {
@@ -480,7 +532,7 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type)
}
ZigList field_types = {};
- field_types.append(awaiter_handle_type);
+ field_types.append(atomic_state_type);
field_types.append(return_type);
field_types.append(result_ptr_type);
if (g->have_err_ret_tracing) {
@@ -497,16 +549,15 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type)
return entry;
}
-TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) {
- if (child_type->maybe_parent) {
- TypeTableEntry *entry = child_type->maybe_parent;
+TypeTableEntry *get_optional_type(CodeGen *g, TypeTableEntry *child_type) {
+ if (child_type->optional_parent) {
+ TypeTableEntry *entry = child_type->optional_parent;
return entry;
} else {
ensure_complete_type(g, child_type);
- TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdMaybe);
+ TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdOptional);
assert(child_type->type_ref || child_type->zero_bits);
- assert(child_type->di_type);
entry->is_copyable = type_is_copyable(g, child_type);
buf_resize(&entry->name, 0);
@@ -516,12 +567,14 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) {
entry->type_ref = LLVMInt1Type();
entry->di_type = g->builtin_types.entry_bool->di_type;
} else if (type_is_codegen_pointer(child_type)) {
+ assert(child_type->di_type);
// this is an optimization but also is necessary for calling C
// functions where all pointers are maybe pointers
// function types are technically pointers
entry->type_ref = child_type->type_ref;
entry->di_type = child_type->di_type;
} else {
+ assert(child_type->di_type);
// create a struct with a boolean whether this is the null value
LLVMTypeRef elem_types[] = {
child_type->type_ref,
@@ -575,7 +628,7 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) {
entry->data.maybe.child_type = child_type;
- child_type->maybe_parent = entry;
+ child_type->optional_parent = entry;
return entry;
}
}
@@ -748,6 +801,7 @@ static void slice_type_common_init(CodeGen *g, TypeTableEntry *pointer_type, Typ
TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type) {
assert(ptr_type->id == TypeTableEntryIdPointer);
+ assert(ptr_type->data.pointer.ptr_len == PtrLenUnknown);
TypeTableEntry **parent_pointer = &ptr_type->data.pointer.slice_parent;
if (*parent_pointer) {
@@ -759,14 +813,16 @@ TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type) {
// replace the & with [] to go from a ptr type name to a slice type name
buf_resize(&entry->name, 0);
- buf_appendf(&entry->name, "[]%s", buf_ptr(&ptr_type->name) + 1);
+ size_t name_offset = (ptr_type->data.pointer.ptr_len == PtrLenSingle) ? 1 : 3;
+ buf_appendf(&entry->name, "[]%s", buf_ptr(&ptr_type->name) + name_offset);
TypeTableEntry *child_type = ptr_type->data.pointer.child_type;
- uint32_t abi_alignment;
+ uint32_t abi_alignment = get_abi_alignment(g, child_type);
if (ptr_type->data.pointer.is_const || ptr_type->data.pointer.is_volatile ||
- ptr_type->data.pointer.alignment != (abi_alignment = get_abi_alignment(g, child_type)))
+ ptr_type->data.pointer.alignment != abi_alignment)
{
- TypeTableEntry *peer_ptr_type = get_pointer_to_type(g, child_type, false);
+ TypeTableEntry *peer_ptr_type = get_pointer_to_type_extra(g, child_type, false, false,
+ PtrLenUnknown, abi_alignment, 0, 0);
TypeTableEntry *peer_slice_type = get_slice_type(g, peer_ptr_type);
slice_type_common_init(g, ptr_type, entry);
@@ -790,9 +846,11 @@ TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type) {
if (child_ptr_type->data.pointer.is_const || child_ptr_type->data.pointer.is_volatile ||
child_ptr_type->data.pointer.alignment != get_abi_alignment(g, grand_child_type))
{
- TypeTableEntry *bland_child_ptr_type = get_pointer_to_type(g, grand_child_type, false);
+ TypeTableEntry *bland_child_ptr_type = get_pointer_to_type_extra(g, grand_child_type, false, false,
+ PtrLenUnknown, get_abi_alignment(g, grand_child_type), 0, 0);
TypeTableEntry *bland_child_slice = get_slice_type(g, bland_child_ptr_type);
- TypeTableEntry *peer_ptr_type = get_pointer_to_type(g, bland_child_slice, false);
+ TypeTableEntry *peer_ptr_type = get_pointer_to_type_extra(g, bland_child_slice, false, false,
+ PtrLenUnknown, get_abi_alignment(g, bland_child_slice), 0, 0);
TypeTableEntry *peer_slice_type = get_slice_type(g, peer_ptr_type);
entry->type_ref = peer_slice_type->type_ref;
@@ -998,8 +1056,11 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
}
if (fn_type_id->return_type != nullptr) {
ensure_complete_type(g, fn_type_id->return_type);
+ if (type_is_invalid(fn_type_id->return_type))
+ return g->builtin_types.entry_invalid;
+ assert(fn_type_id->return_type->id != TypeTableEntryIdOpaque);
} else {
- zig_panic("TODO implement inferred return types https://github.com/zig-lang/zig/issues/447");
+ zig_panic("TODO implement inferred return types https://github.com/ziglang/zig/issues/447");
}
TypeTableEntry *fn_type = new_type_table_entry(TypeTableEntryIdFn);
@@ -1111,7 +1172,10 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
gen_param_info->src_index = i;
gen_param_info->gen_index = SIZE_MAX;
- type_ensure_zero_bits_known(g, type_entry);
+ ensure_complete_type(g, type_entry);
+ if (type_is_invalid(type_entry))
+ return g->builtin_types.entry_invalid;
+
if (type_has_bits(type_entry)) {
TypeTableEntry *gen_type;
if (handle_is_ptr(type_entry)) {
@@ -1250,7 +1314,7 @@ void init_fn_type_id(FnTypeId *fn_type_id, AstNode *proto_node, size_t param_cou
}
fn_type_id->param_count = fn_proto->params.length;
- fn_type_id->param_info = allocate_nonzero(param_count_alloc);
+ fn_type_id->param_info = allocate(param_count_alloc);
fn_type_id->next_param_index = 0;
fn_type_id->is_var_args = fn_proto->is_var_args;
}
@@ -1275,7 +1339,8 @@ static bool analyze_const_align(CodeGen *g, Scope *scope, AstNode *node, uint32_
}
static bool analyze_const_string(CodeGen *g, Scope *scope, AstNode *node, Buf **out_buffer) {
- TypeTableEntry *ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true);
+ TypeTableEntry *ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false,
+ PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0);
TypeTableEntry *str_type = get_slice_type(g, ptr_type);
IrInstruction *instr = analyze_const_value(g, scope, node, str_type, nullptr);
if (type_is_invalid(instr->value.type))
@@ -1312,10 +1377,10 @@ static bool type_allowed_in_packed_struct(TypeTableEntry *type_entry) {
zig_unreachable();
case TypeTableEntryIdMetaType:
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdNamespace:
@@ -1337,7 +1402,7 @@ static bool type_allowed_in_packed_struct(TypeTableEntry *type_entry) {
return type_entry->data.structure.layout == ContainerLayoutPacked;
case TypeTableEntryIdUnion:
return type_entry->data.unionation.layout == ContainerLayoutPacked;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
return type_is_codegen_pointer(child_type);
@@ -1353,10 +1418,10 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) {
case TypeTableEntryIdInvalid:
zig_unreachable();
case TypeTableEntryIdMetaType:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdNamespace:
@@ -1364,10 +1429,10 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) {
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdArgTuple:
case TypeTableEntryIdPromise:
+ case TypeTableEntryIdVoid:
return false;
case TypeTableEntryIdOpaque:
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdVoid:
case TypeTableEntryIdBool:
return true;
case TypeTableEntryIdInt:
@@ -1388,13 +1453,18 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) {
case TypeTableEntryIdFn:
return type_entry->data.fn.fn_type_id.cc == CallingConventionC;
case TypeTableEntryIdPointer:
- return type_allowed_in_extern(g, type_entry->data.pointer.child_type);
+ if (type_size(g, type_entry) == 0)
+ return false;
+ return true;
case TypeTableEntryIdStruct:
return type_entry->data.structure.layout == ContainerLayoutExtern || type_entry->data.structure.layout == ContainerLayoutPacked;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
- return child_type->id == TypeTableEntryIdPointer || child_type->id == TypeTableEntryIdFn;
+ if (child_type->id != TypeTableEntryIdPointer && child_type->id != TypeTableEntryIdFn) {
+ return false;
+ }
+ return type_allowed_in_extern(g, child_type);
}
case TypeTableEntryIdEnum:
return type_entry->data.enumeration.layout == ContainerLayoutExtern || type_entry->data.enumeration.layout == ContainerLayoutPacked;
@@ -1441,6 +1511,17 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
calling_convention_name(fn_type_id.cc)));
return g->builtin_types.entry_invalid;
}
+ if (param_node->data.param_decl.type != nullptr) {
+ TypeTableEntry *type_entry = analyze_type_expr(g, child_scope, param_node->data.param_decl.type);
+ if (type_is_invalid(type_entry)) {
+ return g->builtin_types.entry_invalid;
+ }
+ FnTypeParamInfo *param_info = &fn_type_id.param_info[fn_type_id.next_param_index];
+ param_info->type = type_entry;
+ param_info->is_noalias = param_node->data.param_decl.is_noalias;
+ fn_type_id.next_param_index += 1;
+ }
+
return get_generic_fn_type(g, &fn_type_id);
} else if (param_is_var_args) {
if (fn_type_id.cc == CallingConventionC) {
@@ -1490,23 +1571,19 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
case TypeTableEntryIdInvalid:
return g->builtin_types.entry_invalid;
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdArgTuple:
case TypeTableEntryIdOpaque:
add_node_error(g, param_node->data.param_decl.type,
buf_sprintf("parameter of type '%s' not allowed", buf_ptr(&type_entry->name)));
return g->builtin_types.entry_invalid;
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdMetaType:
- add_node_error(g, param_node->data.param_decl.type,
- buf_sprintf("parameter of type '%s' must be declared comptime",
- buf_ptr(&type_entry->name)));
- return g->builtin_types.entry_invalid;
case TypeTableEntryIdVoid:
case TypeTableEntryIdBool:
case TypeTableEntryIdInt:
@@ -1514,17 +1591,18 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
case TypeTableEntryIdUnion:
case TypeTableEntryIdFn:
case TypeTableEntryIdPromise:
- ensure_complete_type(g, type_entry);
- if (calling_convention_allows_zig_types(fn_type_id.cc) && !type_is_copyable(g, type_entry)) {
+ type_ensure_zero_bits_known(g, type_entry);
+ if (type_requires_comptime(type_entry)) {
add_node_error(g, param_node->data.param_decl.type,
- buf_sprintf("type '%s' is not copyable; cannot pass by value", buf_ptr(&type_entry->name)));
+ buf_sprintf("parameter of type '%s' must be declared comptime",
+ buf_ptr(&type_entry->name)));
return g->builtin_types.entry_invalid;
}
break;
@@ -1548,7 +1626,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
return g->builtin_types.entry_invalid;
}
add_node_error(g, proto_node,
- buf_sprintf("TODO implement inferred return types https://github.com/zig-lang/zig/issues/447"));
+ buf_sprintf("TODO implement inferred return types https://github.com/ziglang/zig/issues/447"));
return g->builtin_types.entry_invalid;
//return get_generic_fn_type(g, &fn_type_id);
}
@@ -1566,7 +1644,10 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
fn_type_id.return_type = specified_return_type;
}
- if (!calling_convention_allows_zig_types(fn_type_id.cc) && !type_allowed_in_extern(g, fn_type_id.return_type)) {
+ if (!calling_convention_allows_zig_types(fn_type_id.cc) &&
+ fn_type_id.return_type->id != TypeTableEntryIdVoid &&
+ !type_allowed_in_extern(g, fn_type_id.return_type))
+ {
add_node_error(g, fn_proto->return_type,
buf_sprintf("return type '%s' not allowed in function with calling convention '%s'",
buf_ptr(&fn_type_id.return_type->name),
@@ -1578,16 +1659,16 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
case TypeTableEntryIdInvalid:
zig_unreachable();
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdArgTuple:
case TypeTableEntryIdOpaque:
add_node_error(g, fn_proto->return_type,
buf_sprintf("return type '%s' not allowed", buf_ptr(&fn_type_id.return_type->name)));
return g->builtin_types.entry_invalid;
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
@@ -1608,7 +1689,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -1839,7 +1920,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
}
assert(!struct_type->data.structure.zero_bits_loop_flag);
- assert(struct_type->data.structure.fields);
+ assert(struct_type->data.structure.fields || struct_type->data.structure.src_field_count == 0);
assert(decl_node->type == NodeTypeContainerDecl);
size_t field_count = struct_type->data.structure.src_field_count;
@@ -1868,6 +1949,17 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
break;
}
+ if (struct_type->data.structure.layout == ContainerLayoutExtern) {
+ if (!type_allowed_in_extern(g, field_type)) {
+ AstNode *field_source_node = decl_node->data.container_decl.fields.at(i);
+ add_node_error(g, field_source_node,
+ buf_sprintf("extern structs cannot contain fields of type '%s'",
+ buf_ptr(&field_type->name)));
+ struct_type->data.structure.is_invalid = true;
+ break;
+ }
+ }
+
if (!type_has_bits(field_type))
continue;
@@ -2284,8 +2376,9 @@ static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) {
return;
if (enum_type->data.enumeration.zero_bits_loop_flag) {
- enum_type->data.enumeration.zero_bits_known = true;
- enum_type->data.enumeration.zero_bits_loop_flag = false;
+ add_node_error(g, enum_type->data.enumeration.decl_node,
+ buf_sprintf("'%s' depends on itself", buf_ptr(&enum_type->name)));
+ enum_type->data.enumeration.is_invalid = true;
return;
}
@@ -2317,8 +2410,14 @@ static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) {
HashMap occupied_tag_values = {};
occupied_tag_values.init(field_count);
- TypeTableEntry *tag_int_type = get_smallest_unsigned_int_type(g, field_count - 1);
+ TypeTableEntry *tag_int_type;
+ if (enum_type->data.enumeration.layout == ContainerLayoutExtern) {
+ tag_int_type = get_c_int_type(g, CIntTypeInt);
+ } else {
+ tag_int_type = get_smallest_unsigned_int_type(g, field_count - 1);
+ }
+ // TODO: Are extern enums allowed to have an init_arg_expr?
if (decl_node->data.container_decl.init_arg_expr != nullptr) {
TypeTableEntry *wanted_tag_int_type = analyze_type_expr(g, scope, decl_node->data.container_decl.init_arg_expr);
if (type_is_invalid(wanted_tag_int_type)) {
@@ -2503,6 +2602,10 @@ static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) {
continue;
}
+ if (type_requires_comptime(field_type)) {
+ struct_type->data.structure.requires_comptime = true;
+ }
+
if (!type_has_bits(field_type))
continue;
@@ -2650,8 +2753,8 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
return;
}
tag_type = enum_type;
+ abi_alignment_so_far = get_abi_alignment(g, enum_type); // this populates src_field_count
covered_enum_fields = allocate(enum_type->data.enumeration.src_field_count);
- abi_alignment_so_far = get_abi_alignment(g, enum_type);
} else {
tag_type = nullptr;
abi_alignment_so_far = 0;
@@ -2694,6 +2797,11 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
}
union_field->type_entry = field_type;
+ if (type_requires_comptime(field_type)) {
+ union_type->data.unionation.requires_comptime = true;
+ }
+
+
if (field_node->data.struct_field.value != nullptr && !decl_node->data.container_decl.auto_enum) {
ErrorMsg *msg = add_node_error(g, field_node->data.struct_field.value,
buf_sprintf("non-enum union field assignment"));
@@ -2939,14 +3047,15 @@ static void typecheck_panic_fn(CodeGen *g, FnTableEntry *panic_fn) {
if (fn_type_id->param_count != 2) {
return wrong_panic_prototype(g, proto_node, fn_type);
}
- TypeTableEntry *const_u8_ptr = get_pointer_to_type(g, g->builtin_types.entry_u8, true);
+ TypeTableEntry *const_u8_ptr = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false,
+ PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0);
TypeTableEntry *const_u8_slice = get_slice_type(g, const_u8_ptr);
if (fn_type_id->param_info[0].type != const_u8_slice) {
return wrong_panic_prototype(g, proto_node, fn_type);
}
- TypeTableEntry *nullable_ptr_to_stack_trace_type = get_maybe_type(g, get_ptr_to_stack_trace_type(g));
- if (fn_type_id->param_info[1].type != nullable_ptr_to_stack_trace_type) {
+ TypeTableEntry *optional_ptr_to_stack_trace_type = get_optional_type(g, get_ptr_to_stack_trace_type(g));
+ if (fn_type_id->param_info[1].type != optional_ptr_to_stack_trace_type) {
return wrong_panic_prototype(g, proto_node, fn_type);
}
@@ -3122,9 +3231,8 @@ static void add_top_level_decl(CodeGen *g, ScopeDecls *decls_scope, Tld *tld) {
}
{
- auto entry = g->primitive_type_table.maybe_get(tld->name);
- if (entry) {
- TypeTableEntry *type = entry->value;
+ TypeTableEntry *type = get_primitive_type(g, tld->name);
+ if (type != nullptr) {
add_node_error(g, tld->source_node,
buf_sprintf("declaration shadows type '%s'", buf_ptr(&type->name)));
}
@@ -3255,7 +3363,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeThisLiteral:
case NodeTypeSymbol:
case NodeTypePrefixOpExpr:
- case NodeTypeAddrOfExpr:
+ case NodeTypePointerType:
case NodeTypeIfBoolExpr:
case NodeTypeWhileExpr:
case NodeTypeForExpr:
@@ -3267,6 +3375,8 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeUnreachable:
case NodeTypeAsmExpr:
case NodeTypeFieldAccessExpr:
+ case NodeTypePtrDeref:
+ case NodeTypeUnwrapOptional:
case NodeTypeStructField:
case NodeTypeContainerInitExpr:
case NodeTypeStructValueField:
@@ -3308,16 +3418,16 @@ TypeTableEntry *validate_var_type(CodeGen *g, AstNode *source_node, TypeTableEnt
case TypeTableEntryIdInvalid:
return g->builtin_types.entry_invalid;
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdBlock:
case TypeTableEntryIdArgTuple:
case TypeTableEntryIdOpaque:
add_node_error(g, source_node, buf_sprintf("variable of type '%s' not allowed",
buf_ptr(&type_entry->name)));
return g->builtin_types.entry_invalid;
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdNamespace:
case TypeTableEntryIdMetaType:
case TypeTableEntryIdVoid:
@@ -3327,7 +3437,7 @@ TypeTableEntry *validate_var_type(CodeGen *g, AstNode *source_node, TypeTableEnt
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -3367,9 +3477,8 @@ VariableTableEntry *add_variable(CodeGen *g, AstNode *source_node, Scope *parent
add_error_note(g, msg, existing_var->decl_node, buf_sprintf("previous declaration is here"));
variable_entry->value->type = g->builtin_types.entry_invalid;
} else {
- auto primitive_table_entry = g->primitive_type_table.maybe_get(name);
- if (primitive_table_entry) {
- TypeTableEntry *type = primitive_table_entry->value;
+ TypeTableEntry *type = get_primitive_type(g, name);
+ if (type != nullptr) {
add_node_error(g, source_node,
buf_sprintf("variable shadows type '%s'", buf_ptr(&type->name)));
variable_entry->value->type = g->builtin_types.entry_invalid;
@@ -3451,12 +3560,12 @@ static void resolve_decl_var(CodeGen *g, TldVar *tld_var) {
add_node_error(g, source_node, buf_sprintf("variable initialization is unreachable"));
implicit_type = g->builtin_types.entry_invalid;
} else if ((!is_const || linkage == VarLinkageExternal) &&
- (implicit_type->id == TypeTableEntryIdNumLitFloat ||
- implicit_type->id == TypeTableEntryIdNumLitInt))
+ (implicit_type->id == TypeTableEntryIdComptimeFloat ||
+ implicit_type->id == TypeTableEntryIdComptimeInt))
{
add_node_error(g, source_node, buf_sprintf("unable to infer variable type"));
implicit_type = g->builtin_types.entry_invalid;
- } else if (implicit_type->id == TypeTableEntryIdNullLit) {
+ } else if (implicit_type->id == TypeTableEntryIdNull) {
add_node_error(g, source_node, buf_sprintf("unable to infer variable type"));
implicit_type = g->builtin_types.entry_invalid;
} else if (implicit_type->id == TypeTableEntryIdMetaType && !is_const) {
@@ -3616,6 +3725,7 @@ FnTableEntry *scope_get_fn_if_root(Scope *scope) {
case ScopeIdVarDecl:
case ScopeIdCImport:
case ScopeIdLoop:
+ case ScopeIdSuspend:
case ScopeIdCompTime:
case ScopeIdCoroPrelude:
scope = scope->parent;
@@ -3674,6 +3784,7 @@ TypeUnionField *find_union_field_by_tag(TypeTableEntry *type_entry, const BigInt
}
TypeEnumField *find_enum_field_by_tag(TypeTableEntry *enum_type, const BigInt *tag) {
+ assert(enum_type->data.enumeration.zero_bits_known);
for (uint32_t i = 0; i < enum_type->data.enumeration.src_field_count; i += 1) {
TypeEnumField *field = &enum_type->data.enumeration.fields[i];
if (bigint_cmp(&field->value, tag) == CmpEQ) {
@@ -3700,11 +3811,11 @@ static bool is_container(TypeTableEntry *type_entry) {
case TypeTableEntryIdInt:
case TypeTableEntryIdFloat:
case TypeTableEntryIdArray:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
@@ -3719,14 +3830,24 @@ static bool is_container(TypeTableEntry *type_entry) {
zig_unreachable();
}
+bool is_ref(TypeTableEntry *type_entry) {
+ return type_entry->id == TypeTableEntryIdPointer && type_entry->data.pointer.ptr_len == PtrLenSingle;
+}
+
+bool is_array_ref(TypeTableEntry *type_entry) {
+ TypeTableEntry *array = is_ref(type_entry) ?
+ type_entry->data.pointer.child_type : type_entry;
+ return array->id == TypeTableEntryIdArray;
+}
+
bool is_container_ref(TypeTableEntry *type_entry) {
- return (type_entry->id == TypeTableEntryIdPointer) ?
+ return is_ref(type_entry) ?
is_container(type_entry->data.pointer.child_type) : is_container(type_entry);
}
TypeTableEntry *container_ref_type(TypeTableEntry *type_entry) {
assert(is_container_ref(type_entry));
- return (type_entry->id == TypeTableEntryIdPointer) ?
+ return is_ref(type_entry) ?
type_entry->data.pointer.child_type : type_entry;
}
@@ -3749,11 +3870,11 @@ void resolve_container_type(CodeGen *g, TypeTableEntry *type_entry) {
case TypeTableEntryIdInt:
case TypeTableEntryIdFloat:
case TypeTableEntryIdArray:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
@@ -3772,7 +3893,7 @@ TypeTableEntry *get_codegen_ptr_type(TypeTableEntry *type) {
if (type->id == TypeTableEntryIdPointer) return type;
if (type->id == TypeTableEntryIdFn) return type;
if (type->id == TypeTableEntryIdPromise) return type;
- if (type->id == TypeTableEntryIdMaybe) {
+ if (type->id == TypeTableEntryIdOptional) {
if (type->data.maybe.child_type->id == TypeTableEntryIdPointer) return type->data.maybe.child_type;
if (type->data.maybe.child_type->id == TypeTableEntryIdFn) return type->data.maybe.child_type;
if (type->data.maybe.child_type->id == TypeTableEntryIdPromise) return type->data.maybe.child_type;
@@ -3819,7 +3940,7 @@ AstNode *get_param_decl_node(FnTableEntry *fn_entry, size_t index) {
return nullptr;
}
-static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entry, VariableTableEntry **arg_vars) {
+static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entry) {
TypeTableEntry *fn_type = fn_table_entry->type_entry;
assert(!fn_type->data.fn.is_generic);
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
@@ -3857,14 +3978,10 @@ static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entr
if (fn_type->data.fn.gen_param_info) {
var->gen_arg_index = fn_type->data.fn.gen_param_info[i].gen_index;
}
-
- if (arg_vars) {
- arg_vars[i] = var;
- }
}
}
-static bool analyze_resolve_inferred_error_set(CodeGen *g, TypeTableEntry *err_set_type, AstNode *source_node) {
+bool resolve_inferred_error_set(CodeGen *g, TypeTableEntry *err_set_type, AstNode *source_node) {
FnTableEntry *infer_fn = err_set_type->data.error_set.infer_fn;
if (infer_fn != nullptr) {
if (infer_fn->anal_state == FnAnalStateInvalid) {
@@ -3916,7 +4033,7 @@ void analyze_fn_ir(CodeGen *g, FnTableEntry *fn_table_entry, AstNode *return_typ
}
if (inferred_err_set_type->data.error_set.infer_fn != nullptr) {
- if (!analyze_resolve_inferred_error_set(g, inferred_err_set_type, return_type_node)) {
+ if (!resolve_inferred_error_set(g, inferred_err_set_type, return_type_node)) {
fn_table_entry->anal_state = FnAnalStateInvalid;
return;
}
@@ -3938,7 +4055,7 @@ void analyze_fn_ir(CodeGen *g, FnTableEntry *fn_table_entry, AstNode *return_typ
}
if (g->verbose_ir) {
- fprintf(stderr, "{ // (analyzed)\n");
+ fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn_table_entry->symbol_name));
ir_print(g, stderr, &fn_table_entry->analyzed_executable, 4);
fprintf(stderr, "}\n");
}
@@ -3946,7 +4063,7 @@ void analyze_fn_ir(CodeGen *g, FnTableEntry *fn_table_entry, AstNode *return_typ
fn_table_entry->anal_state = FnAnalStateComplete;
}
-void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry) {
+static void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry) {
assert(fn_table_entry->anal_state != FnAnalStateProbing);
if (fn_table_entry->anal_state != FnAnalStateReady)
return;
@@ -3960,7 +4077,7 @@ void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry) {
if (!fn_table_entry->child_scope)
fn_table_entry->child_scope = &fn_table_entry->fndef_scope->base;
- define_local_param_variables(g, fn_table_entry, nullptr);
+ define_local_param_variables(g, fn_table_entry);
TypeTableEntry *fn_type = fn_table_entry->type_entry;
assert(!fn_type->data.fn.is_generic);
@@ -4188,43 +4305,7 @@ void semantic_analyze(CodeGen *g) {
}
}
-TypeTableEntry **get_int_type_ptr(CodeGen *g, bool is_signed, uint32_t size_in_bits) {
- size_t index;
- if (size_in_bits == 2) {
- index = 0;
- } else if (size_in_bits == 3) {
- index = 1;
- } else if (size_in_bits == 4) {
- index = 2;
- } else if (size_in_bits == 5) {
- index = 3;
- } else if (size_in_bits == 6) {
- index = 4;
- } else if (size_in_bits == 7) {
- index = 5;
- } else if (size_in_bits == 8) {
- index = 6;
- } else if (size_in_bits == 16) {
- index = 7;
- } else if (size_in_bits == 29) {
- index = 8;
- } else if (size_in_bits == 32) {
- index = 9;
- } else if (size_in_bits == 64) {
- index = 10;
- } else if (size_in_bits == 128) {
- index = 11;
- } else {
- return nullptr;
- }
- return &g->builtin_types.entry_int[is_signed ? 0 : 1][index];
-}
-
TypeTableEntry *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) {
- TypeTableEntry **common_entry = get_int_type_ptr(g, is_signed, size_in_bits);
- if (common_entry)
- return *common_entry;
-
TypeId type_id = {};
type_id.id = TypeTableEntryIdInt;
type_id.data.integer.is_signed = is_signed;
@@ -4253,10 +4334,10 @@ bool handle_is_ptr(TypeTableEntry *type_entry) {
switch (type_entry->id) {
case TypeTableEntryIdInvalid:
case TypeTableEntryIdMetaType:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
@@ -4279,7 +4360,7 @@ bool handle_is_ptr(TypeTableEntry *type_entry) {
return type_has_bits(type_entry);
case TypeTableEntryIdErrorUnion:
return type_has_bits(type_entry->data.error_union.payload_type);
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
return type_has_bits(type_entry->data.maybe.child_type) &&
!type_is_codegen_pointer(type_entry->data.maybe.child_type);
case TypeTableEntryIdUnion:
@@ -4296,8 +4377,9 @@ bool handle_is_ptr(TypeTableEntry *type_entry) {
static ZigWindowsSDK *get_windows_sdk(CodeGen *g) {
if (g->win_sdk == nullptr) {
- if (os_find_windows_sdk(&g->win_sdk)) {
- zig_panic("Unable to determine Windows SDK path.");
+ if (zig_find_windows_sdk(&g->win_sdk)) {
+ fprintf(stderr, "unable to determine windows sdk path\n");
+ exit(1);
}
}
assert(g->win_sdk != nullptr);
@@ -4351,22 +4433,14 @@ Buf *get_linux_libc_include_path(void) {
}
char *prev_newline = buf_ptr(out_stderr);
ZigList search_paths = {};
- bool found_search_paths = false;
for (;;) {
char *newline = strchr(prev_newline, '\n');
if (newline == nullptr) {
- zig_panic("unable to determine libc include path: bad output from C compiler command");
+ break;
}
*newline = 0;
- if (found_search_paths) {
- if (strcmp(prev_newline, "End of search list.") == 0) {
- break;
- }
+ if (prev_newline[0] == ' ') {
search_paths.append(prev_newline);
- } else {
- if (strcmp(prev_newline, "#include <...> search starts here:") == 0) {
- found_search_paths = true;
- }
}
prev_newline = newline + 1;
}
@@ -4399,7 +4473,8 @@ void find_libc_include_path(CodeGen *g) {
ZigWindowsSDK *sdk = get_windows_sdk(g);
g->libc_include_dir = buf_alloc();
if (os_get_win32_ucrt_include_path(sdk, g->libc_include_dir)) {
- zig_panic("Unable to determine libc include path.");
+ fprintf(stderr, "Unable to determine libc include path. --libc-include-dir");
+ exit(1);
}
} else if (g->zig_target.os == OsLinux) {
g->libc_include_dir = get_linux_libc_include_path();
@@ -4421,24 +4496,32 @@ void find_libc_lib_path(CodeGen *g) {
if (g->zig_target.os == OsWindows) {
ZigWindowsSDK *sdk = get_windows_sdk(g);
- Buf* vc_lib_dir = buf_alloc();
- if (os_get_win32_vcruntime_path(vc_lib_dir, g->zig_target.arch.arch)) {
- zig_panic("Unable to determine vcruntime path.");
+ if (g->msvc_lib_dir == nullptr) {
+ if (sdk->msvc_lib_dir_ptr == nullptr) {
+ fprintf(stderr, "Unable to determine vcruntime path. --msvc-lib-dir");
+ exit(1);
+ }
+ g->msvc_lib_dir = buf_create_from_mem(sdk->msvc_lib_dir_ptr, sdk->msvc_lib_dir_len);
}
- Buf* ucrt_lib_path = buf_alloc();
- if (os_get_win32_ucrt_lib_path(sdk, ucrt_lib_path, g->zig_target.arch.arch)) {
- zig_panic("Unable to determine ucrt path.");
+ if (g->libc_lib_dir == nullptr) {
+ Buf* ucrt_lib_path = buf_alloc();
+ if (os_get_win32_ucrt_lib_path(sdk, ucrt_lib_path, g->zig_target.arch.arch)) {
+ fprintf(stderr, "Unable to determine ucrt path. --libc-lib-dir");
+ exit(1);
+ }
+ g->libc_lib_dir = ucrt_lib_path;
}
- Buf* kern_lib_path = buf_alloc();
- if (os_get_win32_kern32_path(sdk, kern_lib_path, g->zig_target.arch.arch)) {
- zig_panic("Unable to determine kernel32 path.");
+ if (g->kernel32_lib_dir == nullptr) {
+ Buf* kern_lib_path = buf_alloc();
+ if (os_get_win32_kern32_path(sdk, kern_lib_path, g->zig_target.arch.arch)) {
+ fprintf(stderr, "Unable to determine kernel32 path. --kernel32-lib-dir");
+ exit(1);
+ }
+ g->kernel32_lib_dir = kern_lib_path;
}
- g->msvc_lib_dir = vc_lib_dir;
- g->libc_lib_dir = ucrt_lib_path;
- g->kernel32_lib_dir = kern_lib_path;
} else if (g->zig_target.os == OsLinux) {
g->libc_lib_dir = get_linux_libc_lib_path("crt1.o");
} else {
@@ -4515,6 +4598,52 @@ bool fn_type_id_eql(FnTypeId *a, FnTypeId *b) {
return true;
}
+static uint32_t hash_const_val_ptr(ConstExprValue *const_val) {
+ uint32_t hash_val = 0;
+ switch (const_val->data.x_ptr.mut) {
+ case ConstPtrMutRuntimeVar:
+ hash_val += (uint32_t)3500721036;
+ break;
+ case ConstPtrMutComptimeConst:
+ hash_val += (uint32_t)4214318515;
+ break;
+ case ConstPtrMutComptimeVar:
+ hash_val += (uint32_t)1103195694;
+ break;
+ }
+ switch (const_val->data.x_ptr.special) {
+ case ConstPtrSpecialInvalid:
+ zig_unreachable();
+ case ConstPtrSpecialRef:
+ hash_val += (uint32_t)2478261866;
+ hash_val += hash_ptr(const_val->data.x_ptr.data.ref.pointee);
+ return hash_val;
+ case ConstPtrSpecialBaseArray:
+ hash_val += (uint32_t)1764906839;
+ hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val);
+ hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index);
+ hash_val += const_val->data.x_ptr.data.base_array.is_cstr ? 1297263887 : 200363492;
+ return hash_val;
+ case ConstPtrSpecialBaseStruct:
+ hash_val += (uint32_t)3518317043;
+ hash_val += hash_ptr(const_val->data.x_ptr.data.base_struct.struct_val);
+ hash_val += hash_size(const_val->data.x_ptr.data.base_struct.field_index);
+ return hash_val;
+ case ConstPtrSpecialHardCodedAddr:
+ hash_val += (uint32_t)4048518294;
+ hash_val += hash_size(const_val->data.x_ptr.data.hard_coded_addr.addr);
+ return hash_val;
+ case ConstPtrSpecialDiscard:
+ hash_val += 2010123162;
+ return hash_val;
+ case ConstPtrSpecialFunction:
+ hash_val += (uint32_t)2590901619;
+ hash_val += hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
+ return hash_val;
+ }
+ zig_unreachable();
+}
+
static uint32_t hash_const_val(ConstExprValue *const_val) {
assert(const_val->special == ConstValSpecialStatic);
switch (const_val->type->id) {
@@ -4527,7 +4656,7 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
case TypeTableEntryIdVoid:
return (uint32_t)4149439618;
case TypeTableEntryIdInt:
- case TypeTableEntryIdNumLitInt:
+ case TypeTableEntryIdComptimeInt:
{
uint32_t result = 1331471175;
for (size_t i = 0; i < const_val->data.x_bigint.digit_count; i += 1) {
@@ -4547,6 +4676,13 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
}
case TypeTableEntryIdFloat:
switch (const_val->type->data.floating.bit_count) {
+ case 16:
+ {
+ uint16_t result;
+ static_assert(sizeof(result) == sizeof(const_val->data.x_f16), "");
+ memcpy(&result, &const_val->data.x_f16, sizeof(result));
+ return result * 65537u;
+ }
case 32:
{
uint32_t result;
@@ -4568,7 +4704,7 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
default:
zig_unreachable();
}
- case TypeTableEntryIdNumLitFloat:
+ case TypeTableEntryIdComptimeFloat:
{
float128_t f128 = bigfloat_to_f128(&const_val->data.x_bigfloat);
uint32_t ints[4];
@@ -4583,57 +4719,13 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
assert(const_val->data.x_ptr.special == ConstPtrSpecialFunction);
return 3677364617 ^ hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
case TypeTableEntryIdPointer:
- {
- uint32_t hash_val = 0;
- switch (const_val->data.x_ptr.mut) {
- case ConstPtrMutRuntimeVar:
- hash_val += (uint32_t)3500721036;
- break;
- case ConstPtrMutComptimeConst:
- hash_val += (uint32_t)4214318515;
- break;
- case ConstPtrMutComptimeVar:
- hash_val += (uint32_t)1103195694;
- break;
- }
- switch (const_val->data.x_ptr.special) {
- case ConstPtrSpecialInvalid:
- zig_unreachable();
- case ConstPtrSpecialRef:
- hash_val += (uint32_t)2478261866;
- hash_val += hash_ptr(const_val->data.x_ptr.data.ref.pointee);
- return hash_val;
- case ConstPtrSpecialBaseArray:
- hash_val += (uint32_t)1764906839;
- hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val);
- hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index);
- hash_val += const_val->data.x_ptr.data.base_array.is_cstr ? 1297263887 : 200363492;
- return hash_val;
- case ConstPtrSpecialBaseStruct:
- hash_val += (uint32_t)3518317043;
- hash_val += hash_ptr(const_val->data.x_ptr.data.base_struct.struct_val);
- hash_val += hash_size(const_val->data.x_ptr.data.base_struct.field_index);
- return hash_val;
- case ConstPtrSpecialHardCodedAddr:
- hash_val += (uint32_t)4048518294;
- hash_val += hash_size(const_val->data.x_ptr.data.hard_coded_addr.addr);
- return hash_val;
- case ConstPtrSpecialDiscard:
- hash_val += 2010123162;
- return hash_val;
- case ConstPtrSpecialFunction:
- hash_val += (uint32_t)2590901619;
- hash_val += hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
- return hash_val;
- }
- zig_unreachable();
- }
+ return hash_const_val_ptr(const_val);
case TypeTableEntryIdPromise:
// TODO better hashing algorithm
return 223048345;
- case TypeTableEntryIdUndefLit:
+ case TypeTableEntryIdUndefined:
return 162837799;
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdNull:
return 844854567;
case TypeTableEntryIdArray:
// TODO better hashing algorithm
@@ -4644,11 +4736,15 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
case TypeTableEntryIdUnion:
// TODO better hashing algorithm
return 2709806591;
- case TypeTableEntryIdMaybe:
- if (const_val->data.x_maybe) {
- return hash_const_val(const_val->data.x_maybe) * 1992916303;
+ case TypeTableEntryIdOptional:
+ if (get_codegen_ptr_type(const_val->type) != nullptr) {
+ return hash_const_val(const_val) * 1992916303;
} else {
- return 4016830364;
+ if (const_val->data.x_optional) {
+ return hash_const_val(const_val->data.x_optional) * 1992916303;
+ } else {
+ return 4016830364;
+ }
}
case TypeTableEntryIdErrorUnion:
// TODO better hashing algorithm
@@ -4713,10 +4809,10 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) {
case TypeTableEntryIdUnreachable:
case TypeTableEntryIdInt:
case TypeTableEntryIdFloat:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdFn:
@@ -4748,10 +4844,12 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) {
}
return false;
- case TypeTableEntryIdMaybe:
- if (value->data.x_maybe == nullptr)
+ case TypeTableEntryIdOptional:
+ if (get_codegen_ptr_type(value->type) != nullptr)
+ return value->data.x_ptr.mut == ConstPtrMutComptimeVar;
+ if (value->data.x_optional == nullptr)
return false;
- return can_mutate_comptime_var_state(value->data.x_maybe);
+ return can_mutate_comptime_var_state(value->data.x_optional);
case TypeTableEntryIdErrorUnion:
if (value->data.x_err_union.err != nullptr)
@@ -4778,10 +4876,10 @@ static bool return_type_is_cacheable(TypeTableEntry *return_type) {
case TypeTableEntryIdUnreachable:
case TypeTableEntryIdInt:
case TypeTableEntryIdFloat:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdFn:
@@ -4798,7 +4896,7 @@ static bool return_type_is_cacheable(TypeTableEntry *return_type) {
case TypeTableEntryIdUnion:
return false;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
return return_type_is_cacheable(return_type->data.maybe.child_type);
case TypeTableEntryIdErrorUnion:
@@ -4816,6 +4914,8 @@ bool fn_eval_cacheable(Scope *scope, TypeTableEntry *return_type) {
while (scope) {
if (scope->id == ScopeIdVarDecl) {
ScopeVarDecl *var_scope = (ScopeVarDecl *)scope;
+ if (type_is_invalid(var_scope->var->value->type))
+ return false;
if (can_mutate_comptime_var_state(var_scope->var->value))
return false;
} else if (scope->id == ScopeIdFnDef) {
@@ -4889,10 +4989,10 @@ bool type_requires_comptime(TypeTableEntry *type_entry) {
case TypeTableEntryIdInvalid:
case TypeTableEntryIdOpaque:
zig_unreachable();
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdMetaType:
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
@@ -4900,17 +5000,30 @@ bool type_requires_comptime(TypeTableEntry *type_entry) {
case TypeTableEntryIdArgTuple:
return true;
case TypeTableEntryIdArray:
+ return type_requires_comptime(type_entry->data.array.child_type);
case TypeTableEntryIdStruct:
+ assert(type_has_zero_bits_known(type_entry));
+ return type_entry->data.structure.requires_comptime;
case TypeTableEntryIdUnion:
- case TypeTableEntryIdMaybe:
+ assert(type_has_zero_bits_known(type_entry));
+ return type_entry->data.unionation.requires_comptime;
+ case TypeTableEntryIdOptional:
+ return type_requires_comptime(type_entry->data.maybe.child_type);
case TypeTableEntryIdErrorUnion:
+ return type_requires_comptime(type_entry->data.error_union.payload_type);
+ case TypeTableEntryIdPointer:
+ if (type_entry->data.pointer.child_type->id == TypeTableEntryIdOpaque) {
+ return false;
+ } else {
+ return type_requires_comptime(type_entry->data.pointer.child_type);
+ }
+ case TypeTableEntryIdFn:
+ return type_entry->data.fn.is_generic;
case TypeTableEntryIdEnum:
case TypeTableEntryIdErrorSet:
- case TypeTableEntryIdFn:
case TypeTableEntryIdBool:
case TypeTableEntryIdInt:
case TypeTableEntryIdFloat:
- case TypeTableEntryIdPointer:
case TypeTableEntryIdVoid:
case TypeTableEntryIdUnreachable:
case TypeTableEntryIdPromise:
@@ -4966,7 +5079,9 @@ void init_const_c_str_lit(CodeGen *g, ConstExprValue *const_val, Buf *str) {
// then make the pointer point to it
const_val->special = ConstValSpecialStatic;
- const_val->type = get_pointer_to_type(g, g->builtin_types.entry_u8, true);
+ // TODO make this `[*]null u8` instead of `[*]u8`
+ const_val->type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false,
+ PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0);
const_val->data.x_ptr.special = ConstPtrSpecialBaseArray;
const_val->data.x_ptr.data.base_array.array_val = array_val;
const_val->data.x_ptr.data.base_array.elem_index = 0;
@@ -5027,10 +5142,13 @@ ConstExprValue *create_const_signed(TypeTableEntry *type, int64_t x) {
void init_const_float(ConstExprValue *const_val, TypeTableEntry *type, double value) {
const_val->special = ConstValSpecialStatic;
const_val->type = type;
- if (type->id == TypeTableEntryIdNumLitFloat) {
+ if (type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_init_64(&const_val->data.x_bigfloat, value);
} else if (type->id == TypeTableEntryIdFloat) {
switch (type->data.floating.bit_count) {
+ case 16:
+ const_val->data.x_f16 = zig_double_to_f16(value);
+ break;
case 32:
const_val->data.x_f32 = value;
break;
@@ -5107,13 +5225,16 @@ void init_const_slice(CodeGen *g, ConstExprValue *const_val, ConstExprValue *arr
{
assert(array_val->type->id == TypeTableEntryIdArray);
- TypeTableEntry *ptr_type = get_pointer_to_type(g, array_val->type->data.array.child_type, is_const);
+ TypeTableEntry *ptr_type = get_pointer_to_type_extra(g, array_val->type->data.array.child_type,
+ is_const, false, PtrLenUnknown, get_abi_alignment(g, array_val->type->data.array.child_type),
+ 0, 0);
const_val->special = ConstValSpecialStatic;
const_val->type = get_slice_type(g, ptr_type);
const_val->data.x_struct.fields = create_const_vals(2);
- init_const_ptr_array(g, &const_val->data.x_struct.fields[slice_ptr_index], array_val, start, is_const);
+ init_const_ptr_array(g, &const_val->data.x_struct.fields[slice_ptr_index], array_val, start, is_const,
+ PtrLenUnknown);
init_const_usize(g, &const_val->data.x_struct.fields[slice_len_index], len);
}
@@ -5124,21 +5245,24 @@ ConstExprValue *create_const_slice(CodeGen *g, ConstExprValue *array_val, size_t
}
void init_const_ptr_array(CodeGen *g, ConstExprValue *const_val, ConstExprValue *array_val,
- size_t elem_index, bool is_const)
+ size_t elem_index, bool is_const, PtrLen ptr_len)
{
assert(array_val->type->id == TypeTableEntryIdArray);
TypeTableEntry *child_type = array_val->type->data.array.child_type;
const_val->special = ConstValSpecialStatic;
- const_val->type = get_pointer_to_type(g, child_type, is_const);
+ const_val->type = get_pointer_to_type_extra(g, child_type, is_const, false,
+ ptr_len, get_abi_alignment(g, child_type), 0, 0);
const_val->data.x_ptr.special = ConstPtrSpecialBaseArray;
const_val->data.x_ptr.data.base_array.array_val = array_val;
const_val->data.x_ptr.data.base_array.elem_index = elem_index;
}
-ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, bool is_const) {
+ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, bool is_const,
+ PtrLen ptr_len)
+{
ConstExprValue *const_val = create_const_vals(1);
- init_const_ptr_array(g, const_val, array_val, elem_index, is_const);
+ init_const_ptr_array(g, const_val, array_val, elem_index, is_const, ptr_len);
return const_val;
}
@@ -5257,6 +5381,52 @@ bool ir_get_var_is_comptime(VariableTableEntry *var) {
return var->is_comptime->value.data.x_bool;
}
+bool const_values_equal_ptr(ConstExprValue *a, ConstExprValue *b) {
+ if (a->data.x_ptr.special != b->data.x_ptr.special)
+ return false;
+ if (a->data.x_ptr.mut != b->data.x_ptr.mut)
+ return false;
+ switch (a->data.x_ptr.special) {
+ case ConstPtrSpecialInvalid:
+ zig_unreachable();
+ case ConstPtrSpecialRef:
+ if (a->data.x_ptr.data.ref.pointee != b->data.x_ptr.data.ref.pointee)
+ return false;
+ return true;
+ case ConstPtrSpecialBaseArray:
+ if (a->data.x_ptr.data.base_array.array_val != b->data.x_ptr.data.base_array.array_val &&
+ a->data.x_ptr.data.base_array.array_val->global_refs !=
+ b->data.x_ptr.data.base_array.array_val->global_refs)
+ {
+ return false;
+ }
+ if (a->data.x_ptr.data.base_array.elem_index != b->data.x_ptr.data.base_array.elem_index)
+ return false;
+ if (a->data.x_ptr.data.base_array.is_cstr != b->data.x_ptr.data.base_array.is_cstr)
+ return false;
+ return true;
+ case ConstPtrSpecialBaseStruct:
+ if (a->data.x_ptr.data.base_struct.struct_val != b->data.x_ptr.data.base_struct.struct_val &&
+ a->data.x_ptr.data.base_struct.struct_val->global_refs !=
+ b->data.x_ptr.data.base_struct.struct_val->global_refs)
+ {
+ return false;
+ }
+ if (a->data.x_ptr.data.base_struct.field_index != b->data.x_ptr.data.base_struct.field_index)
+ return false;
+ return true;
+ case ConstPtrSpecialHardCodedAddr:
+ if (a->data.x_ptr.data.hard_coded_addr.addr != b->data.x_ptr.data.hard_coded_addr.addr)
+ return false;
+ return true;
+ case ConstPtrSpecialDiscard:
+ return true;
+ case ConstPtrSpecialFunction:
+ return a->data.x_ptr.data.fn.fn_entry == b->data.x_ptr.data.fn.fn_entry;
+ }
+ zig_unreachable();
+}
+
bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
assert(a->type->id == b->type->id);
assert(a->special == ConstValSpecialStatic);
@@ -5292,6 +5462,8 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
case TypeTableEntryIdFloat:
assert(a->type->data.floating.bit_count == b->type->data.floating.bit_count);
switch (a->type->data.floating.bit_count) {
+ case 16:
+ return f16_eq(a->data.x_f16, b->data.x_f16);
case 32:
return a->data.x_f32 == b->data.x_f32;
case 64:
@@ -5301,58 +5473,30 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
default:
zig_unreachable();
}
- case TypeTableEntryIdNumLitFloat:
+ case TypeTableEntryIdComptimeFloat:
return bigfloat_cmp(&a->data.x_bigfloat, &b->data.x_bigfloat) == CmpEQ;
case TypeTableEntryIdInt:
- case TypeTableEntryIdNumLitInt:
+ case TypeTableEntryIdComptimeInt:
return bigint_cmp(&a->data.x_bigint, &b->data.x_bigint) == CmpEQ;
case TypeTableEntryIdPointer:
case TypeTableEntryIdFn:
- if (a->data.x_ptr.special != b->data.x_ptr.special)
- return false;
- if (a->data.x_ptr.mut != b->data.x_ptr.mut)
- return false;
- switch (a->data.x_ptr.special) {
- case ConstPtrSpecialInvalid:
- zig_unreachable();
- case ConstPtrSpecialRef:
- if (a->data.x_ptr.data.ref.pointee != b->data.x_ptr.data.ref.pointee)
- return false;
- return true;
- case ConstPtrSpecialBaseArray:
- if (a->data.x_ptr.data.base_array.array_val != b->data.x_ptr.data.base_array.array_val &&
- a->data.x_ptr.data.base_array.array_val->global_refs !=
- b->data.x_ptr.data.base_array.array_val->global_refs)
- {
- return false;
- }
- if (a->data.x_ptr.data.base_array.elem_index != b->data.x_ptr.data.base_array.elem_index)
- return false;
- if (a->data.x_ptr.data.base_array.is_cstr != b->data.x_ptr.data.base_array.is_cstr)
- return false;
- return true;
- case ConstPtrSpecialBaseStruct:
- if (a->data.x_ptr.data.base_struct.struct_val != b->data.x_ptr.data.base_struct.struct_val &&
- a->data.x_ptr.data.base_struct.struct_val->global_refs !=
- b->data.x_ptr.data.base_struct.struct_val->global_refs)
- {
- return false;
- }
- if (a->data.x_ptr.data.base_struct.field_index != b->data.x_ptr.data.base_struct.field_index)
- return false;
- return true;
- case ConstPtrSpecialHardCodedAddr:
- if (a->data.x_ptr.data.hard_coded_addr.addr != b->data.x_ptr.data.hard_coded_addr.addr)
- return false;
- return true;
- case ConstPtrSpecialDiscard:
- return true;
- case ConstPtrSpecialFunction:
- return a->data.x_ptr.data.fn.fn_entry == b->data.x_ptr.data.fn.fn_entry;
+ return const_values_equal_ptr(a, b);
+ case TypeTableEntryIdArray: {
+ assert(a->type->data.array.len == b->type->data.array.len);
+ assert(a->data.x_array.special != ConstArraySpecialUndef);
+ assert(b->data.x_array.special != ConstArraySpecialUndef);
+
+ size_t len = a->type->data.array.len;
+ ConstExprValue *a_elems = a->data.x_array.s_none.elements;
+ ConstExprValue *b_elems = b->data.x_array.s_none.elements;
+
+ for (size_t i = 0; i < len; ++i) {
+ if (!const_values_equal(&a_elems[i], &b_elems[i]))
+ return false;
}
- zig_unreachable();
- case TypeTableEntryIdArray:
- zig_panic("TODO");
+
+ return true;
+ }
case TypeTableEntryIdStruct:
for (size_t i = 0; i < a->type->data.structure.src_field_count; i += 1) {
ConstExprValue *field_a = &a->data.x_struct.fields[i];
@@ -5361,15 +5505,17 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
return false;
}
return true;
- case TypeTableEntryIdUndefLit:
+ case TypeTableEntryIdUndefined:
zig_panic("TODO");
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdNull:
zig_panic("TODO");
- case TypeTableEntryIdMaybe:
- if (a->data.x_maybe == nullptr || b->data.x_maybe == nullptr) {
- return (a->data.x_maybe == nullptr && b->data.x_maybe == nullptr);
+ case TypeTableEntryIdOptional:
+ if (get_codegen_ptr_type(a->type) != nullptr)
+ return const_values_equal_ptr(a, b);
+ if (a->data.x_optional == nullptr || b->data.x_optional == nullptr) {
+ return (a->data.x_optional == nullptr && b->data.x_optional == nullptr);
} else {
- return const_values_equal(a->data.x_maybe, b->data.x_maybe);
+ return const_values_equal(a->data.x_optional, b->data.x_optional);
}
case TypeTableEntryIdErrorUnion:
zig_panic("TODO");
@@ -5442,6 +5588,41 @@ void eval_min_max_value(CodeGen *g, TypeTableEntry *type_entry, ConstExprValue *
}
}
+void render_const_val_ptr(CodeGen *g, Buf *buf, ConstExprValue *const_val, TypeTableEntry *type_entry) {
+ switch (const_val->data.x_ptr.special) {
+ case ConstPtrSpecialInvalid:
+ zig_unreachable();
+ case ConstPtrSpecialRef:
+ case ConstPtrSpecialBaseStruct:
+ buf_appendf(buf, "*");
+ render_const_value(g, buf, const_ptr_pointee(g, const_val));
+ return;
+ case ConstPtrSpecialBaseArray:
+ if (const_val->data.x_ptr.data.base_array.is_cstr) {
+ buf_appendf(buf, "*(c str lit)");
+ return;
+ } else {
+ buf_appendf(buf, "*");
+ render_const_value(g, buf, const_ptr_pointee(g, const_val));
+ return;
+ }
+ case ConstPtrSpecialHardCodedAddr:
+ buf_appendf(buf, "(%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->name),
+ const_val->data.x_ptr.data.hard_coded_addr.addr);
+ return;
+ case ConstPtrSpecialDiscard:
+ buf_append_str(buf, "*_");
+ return;
+ case ConstPtrSpecialFunction:
+ {
+ FnTableEntry *fn_entry = const_val->data.x_ptr.data.fn.fn_entry;
+ buf_appendf(buf, "@ptrCast(%s, %s)", buf_ptr(&const_val->type->name), buf_ptr(&fn_entry->symbol_name));
+ return;
+ }
+ }
+ zig_unreachable();
+}
+
void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
switch (const_val->special) {
case ConstValSpecialRuntime:
@@ -5465,11 +5646,14 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
case TypeTableEntryIdVoid:
buf_appendf(buf, "{}");
return;
- case TypeTableEntryIdNumLitFloat:
+ case TypeTableEntryIdComptimeFloat:
bigfloat_append_buf(buf, &const_val->data.x_bigfloat);
return;
case TypeTableEntryIdFloat:
switch (type_entry->data.floating.bit_count) {
+ case 16:
+ buf_appendf(buf, "%f", zig_f16_to_double(const_val->data.x_f16));
+ return;
case 32:
buf_appendf(buf, "%f", const_val->data.x_f32);
return;
@@ -5493,7 +5677,7 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
default:
zig_unreachable();
}
- case TypeTableEntryIdNumLitInt:
+ case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdInt:
bigint_append_buf(buf, &const_val->data.x_bigint, 10);
return;
@@ -5518,38 +5702,7 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
return;
}
case TypeTableEntryIdPointer:
- switch (const_val->data.x_ptr.special) {
- case ConstPtrSpecialInvalid:
- zig_unreachable();
- case ConstPtrSpecialRef:
- case ConstPtrSpecialBaseStruct:
- buf_appendf(buf, "&");
- render_const_value(g, buf, const_ptr_pointee(g, const_val));
- return;
- case ConstPtrSpecialBaseArray:
- if (const_val->data.x_ptr.data.base_array.is_cstr) {
- buf_appendf(buf, "&(c str lit)");
- return;
- } else {
- buf_appendf(buf, "&");
- render_const_value(g, buf, const_ptr_pointee(g, const_val));
- return;
- }
- case ConstPtrSpecialHardCodedAddr:
- buf_appendf(buf, "(&%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->data.pointer.child_type->name),
- const_val->data.x_ptr.data.hard_coded_addr.addr);
- return;
- case ConstPtrSpecialDiscard:
- buf_append_str(buf, "&_");
- return;
- case ConstPtrSpecialFunction:
- {
- FnTableEntry *fn_entry = const_val->data.x_ptr.data.fn.fn_entry;
- buf_appendf(buf, "@ptrCast(%s, %s)", buf_ptr(&const_val->type->name), buf_ptr(&fn_entry->symbol_name));
- return;
- }
- }
- zig_unreachable();
+ return render_const_val_ptr(g, buf, const_val, type_entry);
case TypeTableEntryIdBlock:
{
AstNode *node = const_val->data.x_block->source_node;
@@ -5597,20 +5750,22 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
buf_appendf(buf, "}");
return;
}
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdNull:
{
buf_appendf(buf, "null");
return;
}
- case TypeTableEntryIdUndefLit:
+ case TypeTableEntryIdUndefined:
{
buf_appendf(buf, "undefined");
return;
}
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
- if (const_val->data.x_maybe) {
- render_const_value(g, buf, const_val->data.x_maybe);
+ if (get_codegen_ptr_type(const_val->type) != nullptr)
+ return render_const_val_ptr(g, buf, const_val, type_entry->data.maybe.child_type);
+ if (const_val->data.x_optional) {
+ render_const_value(g, buf, const_val->data.x_optional);
} else {
buf_appendf(buf, "null");
}
@@ -5712,11 +5867,11 @@ uint32_t type_id_hash(TypeId x) {
case TypeTableEntryIdUnreachable:
case TypeTableEntryIdFloat:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
case TypeTableEntryIdUnion:
@@ -5731,6 +5886,7 @@ uint32_t type_id_hash(TypeId x) {
return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type);
case TypeTableEntryIdPointer:
return hash_ptr(x.data.pointer.child_type) +
+ ((x.data.pointer.ptr_len == PtrLenSingle) ? (uint32_t)1120226602 : (uint32_t)3200913342) +
(x.data.pointer.is_const ? (uint32_t)2749109194 : (uint32_t)4047371087) +
(x.data.pointer.is_volatile ? (uint32_t)536730450 : (uint32_t)1685612214) +
(((uint32_t)x.data.pointer.alignment) ^ (uint32_t)0x777fbe0e) +
@@ -5757,11 +5913,11 @@ bool type_id_eql(TypeId a, TypeId b) {
case TypeTableEntryIdUnreachable:
case TypeTableEntryIdFloat:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdPromise:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -5779,6 +5935,7 @@ bool type_id_eql(TypeId a, TypeId b) {
case TypeTableEntryIdPointer:
return a.data.pointer.child_type == b.data.pointer.child_type &&
+ a.data.pointer.ptr_len == b.data.pointer.ptr_len &&
a.data.pointer.is_const == b.data.pointer.is_const &&
a.data.pointer.is_volatile == b.data.pointer.is_volatile &&
a.data.pointer.alignment == b.data.pointer.alignment &&
@@ -5800,10 +5957,14 @@ uint32_t zig_llvm_fn_key_hash(ZigLLVMFnKey x) {
return (uint32_t)(x.data.ctz.bit_count) * (uint32_t)810453934;
case ZigLLVMFnIdClz:
return (uint32_t)(x.data.clz.bit_count) * (uint32_t)2428952817;
+ case ZigLLVMFnIdPopCount:
+ return (uint32_t)(x.data.clz.bit_count) * (uint32_t)101195049;
case ZigLLVMFnIdFloor:
- return (uint32_t)(x.data.floor_ceil.bit_count) * (uint32_t)1899859168;
+ return (uint32_t)(x.data.floating.bit_count) * (uint32_t)1899859168;
case ZigLLVMFnIdCeil:
- return (uint32_t)(x.data.floor_ceil.bit_count) * (uint32_t)1953839089;
+ return (uint32_t)(x.data.floating.bit_count) * (uint32_t)1953839089;
+ case ZigLLVMFnIdSqrt:
+ return (uint32_t)(x.data.floating.bit_count) * (uint32_t)2225366385;
case ZigLLVMFnIdOverflowArithmetic:
return ((uint32_t)(x.data.overflow_arithmetic.bit_count) * 87135777) +
((uint32_t)(x.data.overflow_arithmetic.add_sub_mul) * 31640542) +
@@ -5820,9 +5981,12 @@ bool zig_llvm_fn_key_eql(ZigLLVMFnKey a, ZigLLVMFnKey b) {
return a.data.ctz.bit_count == b.data.ctz.bit_count;
case ZigLLVMFnIdClz:
return a.data.clz.bit_count == b.data.clz.bit_count;
+ case ZigLLVMFnIdPopCount:
+ return a.data.pop_count.bit_count == b.data.pop_count.bit_count;
case ZigLLVMFnIdFloor:
case ZigLLVMFnIdCeil:
- return a.data.floor_ceil.bit_count == b.data.floor_ceil.bit_count;
+ case ZigLLVMFnIdSqrt:
+ return a.data.floating.bit_count == b.data.floating.bit_count;
case ZigLLVMFnIdOverflowArithmetic:
return (a.data.overflow_arithmetic.bit_count == b.data.overflow_arithmetic.bit_count) &&
(a.data.overflow_arithmetic.add_sub_mul == b.data.overflow_arithmetic.add_sub_mul) &&
@@ -5875,11 +6039,11 @@ static const TypeTableEntryId all_type_ids[] = {
TypeTableEntryIdPointer,
TypeTableEntryIdArray,
TypeTableEntryIdStruct,
- TypeTableEntryIdNumLitFloat,
- TypeTableEntryIdNumLitInt,
- TypeTableEntryIdUndefLit,
- TypeTableEntryIdNullLit,
- TypeTableEntryIdMaybe,
+ TypeTableEntryIdComptimeFloat,
+ TypeTableEntryIdComptimeInt,
+ TypeTableEntryIdUndefined,
+ TypeTableEntryIdNull,
+ TypeTableEntryIdOptional,
TypeTableEntryIdErrorUnion,
TypeTableEntryIdErrorSet,
TypeTableEntryIdEnum,
@@ -5902,8 +6066,8 @@ size_t type_id_len() {
return array_length(all_type_ids);
}
-size_t type_id_index(TypeTableEntryId id) {
- switch (id) {
+size_t type_id_index(TypeTableEntry *entry) {
+ switch (entry->id) {
case TypeTableEntryIdInvalid:
zig_unreachable();
case TypeTableEntryIdMetaType:
@@ -5923,16 +6087,18 @@ size_t type_id_index(TypeTableEntryId id) {
case TypeTableEntryIdArray:
return 7;
case TypeTableEntryIdStruct:
+ if (entry->data.structure.is_slice)
+ return 6;
return 8;
- case TypeTableEntryIdNumLitFloat:
+ case TypeTableEntryIdComptimeFloat:
return 9;
- case TypeTableEntryIdNumLitInt:
+ case TypeTableEntryIdComptimeInt:
return 10;
- case TypeTableEntryIdUndefLit:
+ case TypeTableEntryIdUndefined:
return 11;
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdNull:
return 12;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
return 13;
case TypeTableEntryIdErrorUnion:
return 14;
@@ -5982,16 +6148,16 @@ const char *type_id_name(TypeTableEntryId id) {
return "Array";
case TypeTableEntryIdStruct:
return "Struct";
- case TypeTableEntryIdNumLitFloat:
- return "FloatLiteral";
- case TypeTableEntryIdNumLitInt:
- return "IntLiteral";
- case TypeTableEntryIdUndefLit:
- return "UndefinedLiteral";
- case TypeTableEntryIdNullLit:
- return "NullLiteral";
- case TypeTableEntryIdMaybe:
- return "Nullable";
+ case TypeTableEntryIdComptimeFloat:
+ return "ComptimeFloat";
+ case TypeTableEntryIdComptimeInt:
+ return "ComptimeInt";
+ case TypeTableEntryIdUndefined:
+ return "Undefined";
+ case TypeTableEntryIdNull:
+ return "Null";
+ case TypeTableEntryIdOptional:
+ return "Optional";
case TypeTableEntryIdErrorUnion:
return "ErrorUnion";
case TypeTableEntryIdErrorSet:
@@ -6061,7 +6227,12 @@ uint32_t get_abi_alignment(CodeGen *g, TypeTableEntry *type_entry) {
} else if (type_entry->id == TypeTableEntryIdOpaque) {
return 1;
} else {
- return LLVMABIAlignmentOfType(g->target_data_ref, type_entry->type_ref);
+ uint32_t llvm_alignment = LLVMABIAlignmentOfType(g->target_data_ref, type_entry->type_ref);
+ // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw
+ if (type_entry->id == TypeTableEntryIdPromise && llvm_alignment < 8) {
+ return 8;
+ }
+ return llvm_alignment;
}
}
@@ -6109,3 +6280,27 @@ bool fn_type_can_fail(FnTypeId *fn_type_id) {
return type_can_fail(fn_type_id->return_type) || fn_type_id->cc == CallingConventionAsync;
}
+TypeTableEntry *get_primitive_type(CodeGen *g, Buf *name) {
+ if (buf_len(name) >= 2) {
+ uint8_t first_c = buf_ptr(name)[0];
+ if (first_c == 'i' || first_c == 'u') {
+ for (size_t i = 1; i < buf_len(name); i += 1) {
+ uint8_t c = buf_ptr(name)[i];
+ if (c < '0' || c > '9') {
+ goto not_integer;
+ }
+ }
+ bool is_signed = (first_c == 'i');
+ uint32_t bit_count = atoi(buf_ptr(name) + 1);
+ return get_int_type(g, is_signed, bit_count);
+ }
+ }
+
+not_integer:
+
+ auto primitive_table_entry = g->primitive_type_table.maybe_get(name);
+ if (primitive_table_entry != nullptr) {
+ return primitive_table_entry->value;
+ }
+ return nullptr;
+}
diff --git a/src/analyze.hpp b/src/analyze.hpp
index aa4557666b..e4dfae4ecb 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -16,15 +16,14 @@ ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *m
TypeTableEntry *new_type_table_entry(TypeTableEntryId id);
TypeTableEntry *get_pointer_to_type(CodeGen *g, TypeTableEntry *child_type, bool is_const);
TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type, bool is_const,
- bool is_volatile, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count);
+ bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count);
uint64_t type_size(CodeGen *g, TypeTableEntry *type_entry);
uint64_t type_size_bits(CodeGen *g, TypeTableEntry *type_entry);
-TypeTableEntry **get_int_type_ptr(CodeGen *g, bool is_signed, uint32_t size_in_bits);
TypeTableEntry *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits);
TypeTableEntry **get_c_int_type_ptr(CodeGen *g, CIntType c_int_type);
TypeTableEntry *get_c_int_type(CodeGen *g, CIntType c_int_type);
TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id);
-TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type);
+TypeTableEntry *get_optional_type(CodeGen *g, TypeTableEntry *child_type);
TypeTableEntry *get_array_type(CodeGen *g, TypeTableEntry *child_type, uint64_t array_size);
TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type);
TypeTableEntry *get_partial_container_type(CodeGen *g, Scope *scope, ContainerKind kind,
@@ -70,6 +69,8 @@ TypeUnionField *find_union_type_field(TypeTableEntry *type_entry, Buf *name);
TypeEnumField *find_enum_field_by_tag(TypeTableEntry *enum_type, const BigInt *tag);
TypeUnionField *find_union_field_by_tag(TypeTableEntry *type_entry, const BigInt *tag);
+bool is_ref(TypeTableEntry *type_entry);
+bool is_array_ref(TypeTableEntry *type_entry);
bool is_container_ref(TypeTableEntry *type_entry);
void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node);
void scan_import(CodeGen *g, ImportTableEntry *import);
@@ -104,6 +105,7 @@ ScopeDeferExpr *create_defer_expr_scope(AstNode *node, Scope *parent);
Scope *create_var_scope(AstNode *node, Scope *parent, VariableTableEntry *var);
ScopeCImport *create_cimport_scope(AstNode *node, Scope *parent);
ScopeLoop *create_loop_scope(AstNode *node, Scope *parent);
+ScopeSuspend *create_suspend_scope(AstNode *node, Scope *parent);
ScopeFnDef *create_fndef_scope(AstNode *node, Scope *parent, FnTableEntry *fn_entry);
ScopeDecls *create_decls_scope(AstNode *node, Scope *parent, TypeTableEntry *container_type, ImportTableEntry *import);
Scope *create_comptime_scope(AstNode *node, Scope *parent);
@@ -151,8 +153,9 @@ ConstExprValue *create_const_ptr_hard_coded_addr(CodeGen *g, TypeTableEntry *poi
size_t addr, bool is_const);
void init_const_ptr_array(CodeGen *g, ConstExprValue *const_val, ConstExprValue *array_val,
- size_t elem_index, bool is_const);
-ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, bool is_const);
+ size_t elem_index, bool is_const, PtrLen ptr_len);
+ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index,
+ bool is_const, PtrLen ptr_len);
void init_const_slice(CodeGen *g, ConstExprValue *const_val, ConstExprValue *array_val,
size_t start, size_t len, bool is_const);
@@ -173,7 +176,7 @@ void update_compile_var(CodeGen *g, Buf *name, ConstExprValue *value);
const char *type_id_name(TypeTableEntryId id);
TypeTableEntryId type_id_at_index(size_t index);
size_t type_id_len();
-size_t type_id_index(TypeTableEntryId id);
+size_t type_id_index(TypeTableEntry *entry);
TypeTableEntry *get_generic_fn_type(CodeGen *g, FnTypeId *fn_type_id);
bool type_is_copyable(CodeGen *g, TypeTableEntry *type_entry);
LinkLib *create_link_lib(Buf *name);
@@ -190,7 +193,7 @@ void add_fn_export(CodeGen *g, FnTableEntry *fn_table_entry, Buf *symbol_name, G
ConstExprValue *get_builtin_value(CodeGen *codegen, const char *name);
TypeTableEntry *get_ptr_to_stack_trace_type(CodeGen *g);
-void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry);
+bool resolve_inferred_error_set(CodeGen *g, TypeTableEntry *err_set_type, AstNode *source_node);
TypeTableEntry *get_auto_err_set_type(CodeGen *g, FnTableEntry *fn_entry);
@@ -198,5 +201,8 @@ uint32_t get_coro_frame_align_bytes(CodeGen *g);
bool fn_type_can_fail(FnTypeId *fn_type_id);
bool type_can_fail(TypeTableEntry *type_entry);
bool fn_eval_cacheable(Scope *scope, TypeTableEntry *return_type);
+AstNode *type_decl_node(TypeTableEntry *type_entry);
+
+TypeTableEntry *get_primitive_type(CodeGen *g, Buf *name);
#endif
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index 2c3e1fc873..984b4230b1 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -50,7 +50,7 @@ static const char *bin_op_str(BinOpType bin_op) {
case BinOpTypeAssignBitXor: return "^=";
case BinOpTypeAssignBitOr: return "|=";
case BinOpTypeAssignMergeErrorSets: return "||=";
- case BinOpTypeUnwrapMaybe: return "??";
+ case BinOpTypeUnwrapOptional: return "orelse";
case BinOpTypeArrayCat: return "++";
case BinOpTypeArrayMult: return "**";
case BinOpTypeErrorUnion: return "!";
@@ -66,9 +66,8 @@ static const char *prefix_op_str(PrefixOp prefix_op) {
case PrefixOpNegationWrap: return "-%";
case PrefixOpBoolNot: return "!";
case PrefixOpBinNot: return "~";
- case PrefixOpDereference: return "*";
- case PrefixOpMaybe: return "?";
- case PrefixOpUnwrapMaybe: return "??";
+ case PrefixOpOptional: return "?";
+ case PrefixOpAddrOf: return "&";
}
zig_unreachable();
}
@@ -186,8 +185,6 @@ static const char *node_type_str(NodeType node_type) {
return "Symbol";
case NodeTypePrefixOpExpr:
return "PrefixOpExpr";
- case NodeTypeAddrOfExpr:
- return "AddrOfExpr";
case NodeTypeUse:
return "Use";
case NodeTypeBoolLiteral:
@@ -222,6 +219,10 @@ static const char *node_type_str(NodeType node_type) {
return "AsmExpr";
case NodeTypeFieldAccessExpr:
return "FieldAccessExpr";
+ case NodeTypePtrDeref:
+ return "PtrDerefExpr";
+ case NodeTypeUnwrapOptional:
+ return "UnwrapOptional";
case NodeTypeContainerDecl:
return "ContainerDecl";
case NodeTypeStructField:
@@ -250,6 +251,8 @@ static const char *node_type_str(NodeType node_type) {
return "Suspend";
case NodeTypePromiseType:
return "PromiseType";
+ case NodeTypePointerType:
+ return "PointerType";
}
zig_unreachable();
}
@@ -615,41 +618,47 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, "%s", prefix_op_str(op));
AstNode *child_node = node->data.prefix_op_expr.primary_expr;
- bool new_grouped = child_node->type == NodeTypePrefixOpExpr || child_node->type == NodeTypeAddrOfExpr;
+ bool new_grouped = child_node->type == NodeTypePrefixOpExpr || child_node->type == NodeTypePointerType;
render_node_extra(ar, child_node, new_grouped);
if (!grouped) fprintf(ar->f, ")");
break;
}
- case NodeTypeAddrOfExpr:
+ case NodeTypePointerType:
{
if (!grouped) fprintf(ar->f, "(");
- fprintf(ar->f, "&");
- if (node->data.addr_of_expr.align_expr != nullptr) {
+ const char *star = "[*]";
+ if (node->data.pointer_type.star_token != nullptr &&
+ (node->data.pointer_type.star_token->id == TokenIdStar || node->data.pointer_type.star_token->id == TokenIdStarStar))
+ {
+ star = "*";
+ }
+ fprintf(ar->f, "%s", star);
+ if (node->data.pointer_type.align_expr != nullptr) {
fprintf(ar->f, "align(");
- render_node_grouped(ar, node->data.addr_of_expr.align_expr);
- if (node->data.addr_of_expr.bit_offset_start != nullptr) {
- assert(node->data.addr_of_expr.bit_offset_end != nullptr);
+ render_node_grouped(ar, node->data.pointer_type.align_expr);
+ if (node->data.pointer_type.bit_offset_start != nullptr) {
+ assert(node->data.pointer_type.bit_offset_end != nullptr);
Buf offset_start_buf = BUF_INIT;
buf_resize(&offset_start_buf, 0);
- bigint_append_buf(&offset_start_buf, node->data.addr_of_expr.bit_offset_start, 10);
+ bigint_append_buf(&offset_start_buf, node->data.pointer_type.bit_offset_start, 10);
Buf offset_end_buf = BUF_INIT;
buf_resize(&offset_end_buf, 0);
- bigint_append_buf(&offset_end_buf, node->data.addr_of_expr.bit_offset_end, 10);
+ bigint_append_buf(&offset_end_buf, node->data.pointer_type.bit_offset_end, 10);
fprintf(ar->f, ":%s:%s ", buf_ptr(&offset_start_buf), buf_ptr(&offset_end_buf));
}
fprintf(ar->f, ") ");
}
- if (node->data.addr_of_expr.is_const) {
+ if (node->data.pointer_type.is_const) {
fprintf(ar->f, "const ");
}
- if (node->data.addr_of_expr.is_volatile) {
+ if (node->data.pointer_type.is_volatile) {
fprintf(ar->f, "volatile ");
}
- render_node_ungrouped(ar, node->data.addr_of_expr.op_expr);
+ render_node_ungrouped(ar, node->data.pointer_type.op_expr);
if (!grouped) fprintf(ar->f, ")");
break;
}
@@ -668,7 +677,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, " ");
}
AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
- bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypeAddrOfExpr);
+ bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypePointerType);
render_node_extra(ar, fn_ref_node, grouped);
fprintf(ar->f, "(");
for (size_t i = 0; i < node->data.fn_call_expr.params.length; i += 1) {
@@ -696,6 +705,20 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
print_symbol(ar, rhs);
break;
}
+ case NodeTypePtrDeref:
+ {
+ AstNode *lhs = node->data.ptr_deref_expr.target;
+ render_node_ungrouped(ar, lhs);
+ fprintf(ar->f, ".*");
+ break;
+ }
+ case NodeTypeUnwrapOptional:
+ {
+ AstNode *lhs = node->data.unwrap_optional.expr;
+ render_node_ungrouped(ar, lhs);
+ fprintf(ar->f, ".?");
+ break;
+ }
case NodeTypeUndefinedLiteral:
fprintf(ar->f, "undefined");
break;
@@ -728,7 +751,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
render_node_grouped(ar, field_node->data.struct_field.type);
}
if (field_node->data.struct_field.value != nullptr) {
- fprintf(ar->f, "= ");
+ fprintf(ar->f, " = ");
render_node_grouped(ar, field_node->data.struct_field.value);
}
fprintf(ar->f, ",\n");
@@ -1089,9 +1112,6 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
{
fprintf(ar->f, "suspend");
if (node->data.suspend.block != nullptr) {
- fprintf(ar->f, " |");
- render_node_grouped(ar, node->data.suspend.promise_symbol);
- fprintf(ar->f, "| ");
render_node_grouped(ar, node->data.suspend.block);
}
break;
diff --git a/src/bigfloat.cpp b/src/bigfloat.cpp
index 2cab9658e8..cc442fa3b7 100644
--- a/src/bigfloat.cpp
+++ b/src/bigfloat.cpp
@@ -18,6 +18,10 @@ void bigfloat_init_128(BigFloat *dest, float128_t x) {
dest->value = x;
}
+void bigfloat_init_16(BigFloat *dest, float16_t x) {
+ f16_to_f128M(x, &dest->value);
+}
+
void bigfloat_init_32(BigFloat *dest, float x) {
float32_t f32_val;
memcpy(&f32_val, &x, sizeof(float));
@@ -146,6 +150,10 @@ Cmp bigfloat_cmp(const BigFloat *op1, const BigFloat *op2) {
}
}
+float16_t bigfloat_to_f16(const BigFloat *bigfloat) {
+ return f128M_to_f16(&bigfloat->value);
+}
+
float bigfloat_to_f32(const BigFloat *bigfloat) {
float32_t f32_value = f128M_to_f32(&bigfloat->value);
float result;
@@ -181,3 +189,7 @@ bool bigfloat_has_fraction(const BigFloat *bigfloat) {
f128M_roundToInt(&bigfloat->value, softfloat_round_minMag, false, &floored);
return !f128M_eq(&floored, &bigfloat->value);
}
+
+void bigfloat_sqrt(BigFloat *dest, const BigFloat *op) {
+ f128M_sqrt(&op->value, &dest->value);
+}
diff --git a/src/bigfloat.hpp b/src/bigfloat.hpp
index 894b252c3a..c6ae567945 100644
--- a/src/bigfloat.hpp
+++ b/src/bigfloat.hpp
@@ -22,6 +22,7 @@ struct BigFloat {
struct Buf;
+void bigfloat_init_16(BigFloat *dest, float16_t x);
void bigfloat_init_32(BigFloat *dest, float x);
void bigfloat_init_64(BigFloat *dest, double x);
void bigfloat_init_128(BigFloat *dest, float128_t x);
@@ -29,6 +30,7 @@ void bigfloat_init_bigfloat(BigFloat *dest, const BigFloat *x);
void bigfloat_init_bigint(BigFloat *dest, const BigInt *op);
int bigfloat_init_buf_base10(BigFloat *dest, const uint8_t *buf_ptr, size_t buf_len);
+float16_t bigfloat_to_f16(const BigFloat *bigfloat);
float bigfloat_to_f32(const BigFloat *bigfloat);
double bigfloat_to_f64(const BigFloat *bigfloat);
float128_t bigfloat_to_f128(const BigFloat *bigfloat);
@@ -42,6 +44,7 @@ void bigfloat_div_trunc(BigFloat *dest, const BigFloat *op1, const BigFloat *op2
void bigfloat_div_floor(BigFloat *dest, const BigFloat *op1, const BigFloat *op2);
void bigfloat_rem(BigFloat *dest, const BigFloat *op1, const BigFloat *op2);
void bigfloat_mod(BigFloat *dest, const BigFloat *op1, const BigFloat *op2);
+void bigfloat_sqrt(BigFloat *dest, const BigFloat *op);
void bigfloat_append_buf(Buf *buf, const BigFloat *op);
Cmp bigfloat_cmp(const BigFloat *op1, const BigFloat *op2);
diff --git a/src/bigint.cpp b/src/bigint.cpp
index 85e5dad4ad..bf18b9a1bf 100644
--- a/src/bigint.cpp
+++ b/src/bigint.cpp
@@ -86,6 +86,11 @@ static void to_twos_complement(BigInt *dest, const BigInt *op, size_t bit_count)
size_t digits_to_copy = bit_count / 64;
size_t leftover_bits = bit_count % 64;
dest->digit_count = digits_to_copy + ((leftover_bits == 0) ? 0 : 1);
+ if (dest->digit_count == 1 && leftover_bits == 0) {
+ dest->data.digit = op_digits[0];
+ if (dest->data.digit == 0) dest->digit_count = 0;
+ return;
+ }
dest->data.digits = allocate_nonzero(dest->digit_count);
for (size_t i = 0; i < digits_to_copy; i += 1) {
uint64_t digit = (i < op->digit_count) ? op_digits[i] : 0;
@@ -1254,12 +1259,11 @@ void bigint_and(BigInt *dest, const BigInt *op1, const BigInt *op2) {
bigint_normalize(dest);
return;
}
- // TODO this code path is untested
- uint64_t first_digit = dest->data.digit;
+
dest->digit_count = max(op1->digit_count, op2->digit_count);
dest->data.digits = allocate_nonzero(dest->digit_count);
- dest->data.digits[0] = first_digit;
- size_t i = 1;
+
+ size_t i = 0;
for (; i < op1->digit_count && i < op2->digit_count; i += 1) {
dest->data.digits[i] = op1_digits[i] & op2_digits[i];
}
@@ -1407,7 +1411,6 @@ void bigint_shr(BigInt *dest, const BigInt *op1, const BigInt *op2) {
return;
}
- // TODO this code path is untested
size_t digit_shift_count = shift_amt / 64;
size_t leftover_shift_count = shift_amt % 64;
@@ -1422,7 +1425,7 @@ void bigint_shr(BigInt *dest, const BigInt *op1, const BigInt *op2) {
uint64_t digit = op1_digits[op_digit_index];
size_t dest_digit_index = op_digit_index - digit_shift_count;
dest->data.digits[dest_digit_index] = carry | (digit >> leftover_shift_count);
- carry = (0xffffffffffffffffULL << leftover_shift_count) & digit;
+ carry = digit << (64 - leftover_shift_count);
if (dest_digit_index == 0) { break; }
op_digit_index -= 1;
@@ -1590,6 +1593,37 @@ void bigint_append_buf(Buf *buf, const BigInt *op, uint64_t base) {
}
}
+size_t bigint_popcount_unsigned(const BigInt *bi) {
+ assert(!bi->is_negative);
+ if (bi->digit_count == 0)
+ return 0;
+
+ size_t count = 0;
+ size_t bit_count = bi->digit_count * 64;
+ for (size_t i = 0; i < bit_count; i += 1) {
+ if (bit_at_index(bi, i))
+ count += 1;
+ }
+ return count;
+}
+
+size_t bigint_popcount_signed(const BigInt *bi, size_t bit_count) {
+ if (bit_count == 0)
+ return 0;
+ if (bi->digit_count == 0)
+ return 0;
+
+ BigInt twos_comp = {0};
+ to_twos_complement(&twos_comp, bi, bit_count);
+
+ size_t count = 0;
+ for (size_t i = 0; i < bit_count; i += 1) {
+ if (bit_at_index(&twos_comp, i))
+ count += 1;
+ }
+ return count;
+}
+
size_t bigint_ctz(const BigInt *bi, size_t bit_count) {
if (bit_count == 0)
return 0;
@@ -1680,10 +1714,15 @@ void bigint_incr(BigInt *x) {
bigint_init_unsigned(x, 1);
return;
}
-
- if (x->digit_count == 1 && x->data.digit != UINT64_MAX) {
- x->data.digit += 1;
- return;
+
+ if (x->digit_count == 1) {
+ if (x->is_negative && x->data.digit != 0) {
+ x->data.digit -= 1;
+ return;
+ } else if (!x->is_negative && x->data.digit != UINT64_MAX) {
+ x->data.digit += 1;
+ return;
+ }
}
BigInt copy;
diff --git a/src/bigint.hpp b/src/bigint.hpp
index 9f044c8722..48b222a227 100644
--- a/src/bigint.hpp
+++ b/src/bigint.hpp
@@ -81,6 +81,8 @@ void bigint_append_buf(Buf *buf, const BigInt *op, uint64_t base);
size_t bigint_ctz(const BigInt *bi, size_t bit_count);
size_t bigint_clz(const BigInt *bi, size_t bit_count);
+size_t bigint_popcount_signed(const BigInt *bi, size_t bit_count);
+size_t bigint_popcount_unsigned(const BigInt *bi);
size_t bigint_bits_needed(const BigInt *op);
diff --git a/src/codegen.cpp b/src/codegen.cpp
index a58832f983..539356ef2f 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -17,6 +17,7 @@
#include "os.hpp"
#include "translate_c.hpp"
#include "target.hpp"
+#include "util.hpp"
#include "zig_llvm.h"
#include
@@ -59,6 +60,33 @@ PackageTableEntry *new_anonymous_package(void) {
return new_package("", "");
}
+static const char *symbols_that_llvm_depends_on[] = {
+ "memcpy",
+ "memset",
+ "sqrt",
+ "powi",
+ "sin",
+ "cos",
+ "pow",
+ "exp",
+ "exp2",
+ "log",
+ "log10",
+ "log2",
+ "fma",
+ "fabs",
+ "minnum",
+ "maxnum",
+ "copysign",
+ "floor",
+ "ceil",
+ "trunc",
+ "rint",
+ "nearbyint",
+ "round",
+ // TODO probably all of compiler-rt needs to go here
+};
+
CodeGen *codegen_create(Buf *root_src_path, const ZigTarget *target, OutType out_type, BuildMode build_mode,
Buf *zig_lib_dir)
{
@@ -88,10 +116,15 @@ CodeGen *codegen_create(Buf *root_src_path, const ZigTarget *target, OutType out
g->exported_symbol_names.init(8);
g->external_prototypes.init(8);
g->string_literals_table.init(16);
+ g->type_info_cache.init(32);
g->is_test_build = false;
g->want_h_file = (out_type == OutTypeObj || out_type == OutTypeLib);
buf_resize(&g->global_asm, 0);
+ for (size_t i = 0; i < array_length(symbols_that_llvm_depends_on); i += 1) {
+ g->external_prototypes.put(buf_create_from_str(symbols_that_llvm_depends_on[i]), nullptr);
+ }
+
if (root_src_path) {
Buf *src_basename = buf_alloc();
Buf *src_dir = buf_alloc();
@@ -325,13 +358,6 @@ static void addLLVMArgAttr(LLVMValueRef arg_val, unsigned param_index, const cha
return addLLVMAttr(arg_val, param_index + 1, attr_name);
}
-static void addLLVMCallsiteAttr(LLVMValueRef call_instr, unsigned param_index, const char *attr_name) {
- unsigned kind_id = LLVMGetEnumAttributeKindForName(attr_name, strlen(attr_name));
- assert(kind_id != 0);
- LLVMAttributeRef llvm_attr = LLVMCreateEnumAttribute(LLVMGetGlobalContext(), kind_id, 0);
- LLVMAddCallSiteAttribute(call_instr, param_index + 1, llvm_attr);
-}
-
static bool is_symbol_available(CodeGen *g, Buf *name) {
return g->exported_symbol_names.maybe_get(name) == nullptr && g->external_prototypes.maybe_get(name) == nullptr;
}
@@ -512,7 +538,9 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, FnTableEntry *fn_table_entry) {
}
if (fn_table_entry->body_node != nullptr) {
- bool want_fn_safety = g->build_mode != BuildModeFastRelease && !fn_table_entry->def_scope->safety_off;
+ bool want_fn_safety = g->build_mode != BuildModeFastRelease &&
+ g->build_mode != BuildModeSmallRelease &&
+ !fn_table_entry->def_scope->safety_off;
if (want_fn_safety) {
if (g->libc_link_lib != nullptr) {
addLLVMFnAttr(fn_table_entry->llvm_value, "sspstrong");
@@ -578,11 +606,6 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, FnTableEntry *fn_table_entry) {
if (param_type->id == TypeTableEntryIdPointer) {
addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)gen_index, "nonnull");
}
- // Note: byval is disabled on windows due to an LLVM bug:
- // https://github.com/zig-lang/zig/issues/536
- if (is_byval && g->zig_target.os != OsWindows) {
- addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)gen_index, "byval");
- }
}
uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry);
@@ -652,6 +675,7 @@ static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) {
}
case ScopeIdDeferExpr:
case ScopeIdLoop:
+ case ScopeIdSuspend:
case ScopeIdCompTime:
case ScopeIdCoroPrelude:
return get_di_scope(g, scope->parent);
@@ -717,12 +741,12 @@ static LLVMValueRef get_int_overflow_fn(CodeGen *g, TypeTableEntry *type_entry,
return fn_val;
}
-static LLVMValueRef get_floor_ceil_fn(CodeGen *g, TypeTableEntry *type_entry, ZigLLVMFnId fn_id) {
+static LLVMValueRef get_float_fn(CodeGen *g, TypeTableEntry *type_entry, ZigLLVMFnId fn_id) {
assert(type_entry->id == TypeTableEntryIdFloat);
ZigLLVMFnKey key = {};
key.id = fn_id;
- key.data.floor_ceil.bit_count = (uint32_t)type_entry->data.floating.bit_count;
+ key.data.floating.bit_count = (uint32_t)type_entry->data.floating.bit_count;
auto existing_entry = g->llvm_fn_table.maybe_get(key);
if (existing_entry)
@@ -733,6 +757,8 @@ static LLVMValueRef get_floor_ceil_fn(CodeGen *g, TypeTableEntry *type_entry, Zi
name = "floor";
} else if (fn_id == ZigLLVMFnIdCeil) {
name = "ceil";
+ } else if (fn_id == ZigLLVMFnIdSqrt) {
+ name = "sqrt";
} else {
zig_unreachable();
}
@@ -815,7 +841,7 @@ static bool ir_want_fast_math(CodeGen *g, IrInstruction *instruction) {
}
static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) {
- if (g->build_mode == BuildModeFastRelease)
+ if (g->build_mode == BuildModeFastRelease || g->build_mode == BuildModeSmallRelease)
return false;
// TODO memoize
@@ -859,7 +885,7 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("exact division produced remainder");
case PanicMsgIdSliceWidenRemainder:
return buf_create_from_str("slice widening size mismatch");
- case PanicMsgIdUnwrapMaybeFail:
+ case PanicMsgIdUnwrapOptionalFail:
return buf_create_from_str("attempt to unwrap null");
case PanicMsgIdUnreachable:
return buf_create_from_str("reached unreachable code");
@@ -869,6 +895,10 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("incorrect alignment");
case PanicMsgIdBadUnionField:
return buf_create_from_str("access of inactive union field");
+ case PanicMsgIdBadEnumValue:
+ return buf_create_from_str("invalid enum value");
+ case PanicMsgIdFloatToInt:
+ return buf_create_from_str("integer part of floating point value out of bounds");
}
zig_unreachable();
}
@@ -887,7 +917,8 @@ static LLVMValueRef get_panic_msg_ptr_val(CodeGen *g, PanicMsgId msg_id) {
assert(val->global_refs->llvm_global);
}
- TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true);
+ TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false,
+ PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0);
TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type);
return LLVMConstBitCast(val->global_refs->llvm_global, LLVMPointerType(str_type->type_ref, 0));
}
@@ -932,6 +963,53 @@ static LLVMValueRef get_memcpy_fn_val(CodeGen *g) {
return g->memcpy_fn_val;
}
+static LLVMValueRef get_stacksave_fn_val(CodeGen *g) {
+ if (g->stacksave_fn_val)
+ return g->stacksave_fn_val;
+
+ // declare i8* @llvm.stacksave()
+
+ LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), nullptr, 0, false);
+ g->stacksave_fn_val = LLVMAddFunction(g->module, "llvm.stacksave", fn_type);
+ assert(LLVMGetIntrinsicID(g->stacksave_fn_val));
+
+ return g->stacksave_fn_val;
+}
+
+static LLVMValueRef get_stackrestore_fn_val(CodeGen *g) {
+ if (g->stackrestore_fn_val)
+ return g->stackrestore_fn_val;
+
+ // declare void @llvm.stackrestore(i8* %ptr)
+
+ LLVMTypeRef param_type = LLVMPointerType(LLVMInt8Type(), 0);
+ LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), ¶m_type, 1, false);
+ g->stackrestore_fn_val = LLVMAddFunction(g->module, "llvm.stackrestore", fn_type);
+ assert(LLVMGetIntrinsicID(g->stackrestore_fn_val));
+
+ return g->stackrestore_fn_val;
+}
+
+static LLVMValueRef get_write_register_fn_val(CodeGen *g) {
+ if (g->write_register_fn_val)
+ return g->write_register_fn_val;
+
+ // declare void @llvm.write_register.i64(metadata, i64 @value)
+ // !0 = !{!"sp\00"}
+
+ LLVMTypeRef param_types[] = {
+ LLVMMetadataTypeInContext(LLVMGetGlobalContext()),
+ LLVMIntType(g->pointer_size_bytes * 8),
+ };
+
+ LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), param_types, 2, false);
+ Buf *name = buf_sprintf("llvm.write_register.i%d", g->pointer_size_bytes * 8);
+ g->write_register_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
+ assert(LLVMGetIntrinsicID(g->write_register_fn_val));
+
+ return g->write_register_fn_val;
+}
+
static LLVMValueRef get_coro_destroy_fn_val(CodeGen *g) {
if (g->coro_destroy_fn_val)
return g->coro_destroy_fn_val;
@@ -1408,7 +1486,8 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) {
LLVMValueRef full_buf_ptr = LLVMConstInBoundsGEP(global_array, full_buf_ptr_indices, 2);
- TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true);
+ TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false,
+ PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0);
TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type);
LLVMValueRef global_slice_fields[] = {
full_buf_ptr,
@@ -1626,7 +1705,7 @@ static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_runtime_safety, T
return trunc_val;
}
LLVMValueRef orig_val;
- if (actual_type->data.integral.is_signed) {
+ if (wanted_type->data.integral.is_signed) {
orig_val = LLVMBuildSExt(g->builder, trunc_val, actual_type->type_ref, "");
} else {
orig_val = LLVMBuildZExt(g->builder, trunc_val, actual_type->type_ref, "");
@@ -1900,7 +1979,7 @@ static LLVMValueRef gen_floor(CodeGen *g, LLVMValueRef val, TypeTableEntry *type
if (type_entry->id == TypeTableEntryIdInt)
return val;
- LLVMValueRef floor_fn = get_floor_ceil_fn(g, type_entry, ZigLLVMFnIdFloor);
+ LLVMValueRef floor_fn = get_float_fn(g, type_entry, ZigLLVMFnIdFloor);
return LLVMBuildCall(g->builder, floor_fn, &val, 1, "");
}
@@ -1908,7 +1987,7 @@ static LLVMValueRef gen_ceil(CodeGen *g, LLVMValueRef val, TypeTableEntry *type_
if (type_entry->id == TypeTableEntryIdInt)
return val;
- LLVMValueRef ceil_fn = get_floor_ceil_fn(g, type_entry, ZigLLVMFnIdCeil);
+ LLVMValueRef ceil_fn = get_float_fn(g, type_entry, ZigLLVMFnIdCeil);
return LLVMBuildCall(g->builder, ceil_fn, &val, 1, "");
}
@@ -2159,9 +2238,13 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
IrInstruction *op2 = bin_op_instruction->op2;
assert(op1->value.type == op2->value.type || op_id == IrBinOpBitShiftLeftLossy ||
- op_id == IrBinOpBitShiftLeftExact || op_id == IrBinOpBitShiftRightLossy ||
- op_id == IrBinOpBitShiftRightExact ||
- (op1->value.type->id == TypeTableEntryIdErrorSet && op2->value.type->id == TypeTableEntryIdErrorSet));
+ op_id == IrBinOpBitShiftLeftExact || op_id == IrBinOpBitShiftRightLossy ||
+ op_id == IrBinOpBitShiftRightExact ||
+ (op1->value.type->id == TypeTableEntryIdErrorSet && op2->value.type->id == TypeTableEntryIdErrorSet) ||
+ (op1->value.type->id == TypeTableEntryIdPointer &&
+ (op_id == IrBinOpAdd || op_id == IrBinOpSub) &&
+ op1->value.type->data.pointer.ptr_len == PtrLenUnknown)
+ );
TypeTableEntry *type_entry = op1->value.type;
bool want_runtime_safety = bin_op_instruction->safety_check_on &&
@@ -2169,6 +2252,8 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
LLVMValueRef op1_value = ir_llvm_value(g, op1);
LLVMValueRef op2_value = ir_llvm_value(g, op2);
+
+
switch (op_id) {
case IrBinOpInvalid:
case IrBinOpArrayCat:
@@ -2193,12 +2278,10 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
} else if (type_entry->id == TypeTableEntryIdInt) {
LLVMIntPredicate pred = cmp_op_to_int_predicate(op_id, type_entry->data.integral.is_signed);
return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, "");
- } else if (type_entry->id == TypeTableEntryIdEnum) {
- LLVMIntPredicate pred = cmp_op_to_int_predicate(op_id, false);
- return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, "");
- } else if (type_entry->id == TypeTableEntryIdErrorSet ||
- type_entry->id == TypeTableEntryIdPointer ||
- type_entry->id == TypeTableEntryIdBool)
+ } else if (type_entry->id == TypeTableEntryIdEnum ||
+ type_entry->id == TypeTableEntryIdErrorSet ||
+ type_entry->id == TypeTableEntryIdBool ||
+ get_codegen_ptr_type(type_entry) != nullptr)
{
LLVMIntPredicate pred = cmp_op_to_int_predicate(op_id, false);
return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, "");
@@ -2207,7 +2290,11 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
}
case IrBinOpAdd:
case IrBinOpAddWrap:
- if (type_entry->id == TypeTableEntryIdFloat) {
+ if (type_entry->id == TypeTableEntryIdPointer) {
+ assert(type_entry->data.pointer.ptr_len == PtrLenUnknown);
+ // TODO runtime safety
+ return LLVMBuildInBoundsGEP(g->builder, op1_value, &op2_value, 1, "");
+ } else if (type_entry->id == TypeTableEntryIdFloat) {
ZigLLVMSetFastMath(g->builder, ir_want_fast_math(g, &bin_op_instruction->base));
return LLVMBuildFAdd(g->builder, op1_value, op2_value, "");
} else if (type_entry->id == TypeTableEntryIdInt) {
@@ -2270,7 +2357,12 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
}
case IrBinOpSub:
case IrBinOpSubWrap:
- if (type_entry->id == TypeTableEntryIdFloat) {
+ if (type_entry->id == TypeTableEntryIdPointer) {
+ assert(type_entry->data.pointer.ptr_len == PtrLenUnknown);
+ // TODO runtime safety
+ LLVMValueRef subscript_value = LLVMBuildNeg(g->builder, op2_value, "");
+ return LLVMBuildInBoundsGEP(g->builder, op1_value, &subscript_value, 1, "");
+ } else if (type_entry->id == TypeTableEntryIdFloat) {
ZigLLVMSetFastMath(g->builder, ir_want_fast_math(g, &bin_op_instruction->base));
return LLVMBuildFSub(g->builder, op1_value, op2_value, "");
} else if (type_entry->id == TypeTableEntryIdInt) {
@@ -2460,7 +2552,7 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
assert(wanted_type->data.structure.is_slice);
assert(actual_type->id == TypeTableEntryIdArray);
- TypeTableEntry *wanted_pointer_type = wanted_type->data.structure.fields[0].type_entry;
+ TypeTableEntry *wanted_pointer_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
TypeTableEntry *wanted_child_type = wanted_pointer_type->data.pointer.child_type;
@@ -2486,15 +2578,41 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
} else {
return LLVMBuildUIToFP(g->builder, expr_val, wanted_type->type_ref, "");
}
- case CastOpFloatToInt:
+ case CastOpFloatToInt: {
assert(wanted_type->id == TypeTableEntryIdInt);
ZigLLVMSetFastMath(g->builder, ir_want_fast_math(g, &cast_instruction->base));
+
+ bool want_safety = ir_want_runtime_safety(g, &cast_instruction->base);
+
+ LLVMValueRef result;
if (wanted_type->data.integral.is_signed) {
- return LLVMBuildFPToSI(g->builder, expr_val, wanted_type->type_ref, "");
+ result = LLVMBuildFPToSI(g->builder, expr_val, wanted_type->type_ref, "");
} else {
- return LLVMBuildFPToUI(g->builder, expr_val, wanted_type->type_ref, "");
+ result = LLVMBuildFPToUI(g->builder, expr_val, wanted_type->type_ref, "");
}
+ if (want_safety) {
+ LLVMValueRef back_to_float;
+ if (wanted_type->data.integral.is_signed) {
+ back_to_float = LLVMBuildSIToFP(g->builder, result, LLVMTypeOf(expr_val), "");
+ } else {
+ back_to_float = LLVMBuildUIToFP(g->builder, result, LLVMTypeOf(expr_val), "");
+ }
+ LLVMValueRef difference = LLVMBuildFSub(g->builder, expr_val, back_to_float, "");
+ LLVMValueRef one_pos = LLVMConstReal(LLVMTypeOf(expr_val), 1.0f);
+ LLVMValueRef one_neg = LLVMConstReal(LLVMTypeOf(expr_val), -1.0f);
+ LLVMValueRef ok_bit_pos = LLVMBuildFCmp(g->builder, LLVMRealOLT, difference, one_pos, "");
+ LLVMValueRef ok_bit_neg = LLVMBuildFCmp(g->builder, LLVMRealOGT, difference, one_neg, "");
+ LLVMValueRef ok_bit = LLVMBuildAnd(g->builder, ok_bit_pos, ok_bit_neg, "");
+ LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "FloatCheckOk");
+ LLVMBasicBlockRef bad_block = LLVMAppendBasicBlock(g->cur_fn_val, "FloatCheckFail");
+ LLVMBuildCondBr(g->builder, ok_bit, ok_block, bad_block);
+ LLVMPositionBuilderAtEnd(g->builder, bad_block);
+ gen_safety_crash(g, PanicMsgIdFloatToInt);
+ LLVMPositionBuilderAtEnd(g->builder, ok_block);
+ }
+ return result;
+ }
case CastOpBoolToInt:
assert(wanted_type->id == TypeTableEntryIdInt);
assert(actual_type->id == TypeTableEntryIdBool);
@@ -2504,6 +2622,31 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
add_error_range_check(g, wanted_type, g->err_tag_type, expr_val);
}
return expr_val;
+ case CastOpBitCast:
+ return LLVMBuildBitCast(g->builder, expr_val, wanted_type->type_ref, "");
+ case CastOpPtrOfArrayToSlice: {
+ assert(cast_instruction->tmp_ptr);
+ assert(actual_type->id == TypeTableEntryIdPointer);
+ TypeTableEntry *array_type = actual_type->data.pointer.child_type;
+ assert(array_type->id == TypeTableEntryIdArray);
+
+ LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, cast_instruction->tmp_ptr,
+ slice_ptr_index, "");
+ LLVMValueRef indices[] = {
+ LLVMConstNull(g->builtin_types.entry_usize->type_ref),
+ LLVMConstInt(g->builtin_types.entry_usize->type_ref, 0, false),
+ };
+ LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, expr_val, indices, 2, "");
+ gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false);
+
+ LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, cast_instruction->tmp_ptr,
+ slice_len_index, "");
+ LLVMValueRef len_value = LLVMConstInt(g->builtin_types.entry_usize->type_ref,
+ array_type->data.array.len, false);
+ gen_store_untyped(g, len_value, len_field_ptr, 0, false);
+
+ return cast_instruction->tmp_ptr;
+ }
}
zig_unreachable();
}
@@ -2559,8 +2702,25 @@ static LLVMValueRef ir_render_int_to_enum(CodeGen *g, IrExecutable *executable,
TypeTableEntry *tag_int_type = wanted_type->data.enumeration.tag_int_type;
LLVMValueRef target_val = ir_llvm_value(g, instruction->target);
- return gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base),
+ LLVMValueRef tag_int_value = gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base),
instruction->target->value.type, tag_int_type, target_val);
+
+ if (ir_want_runtime_safety(g, &instruction->base)) {
+ LLVMBasicBlockRef bad_value_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadValue");
+ LLVMBasicBlockRef ok_value_block = LLVMAppendBasicBlock(g->cur_fn_val, "OkValue");
+ size_t field_count = wanted_type->data.enumeration.src_field_count;
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, tag_int_value, bad_value_block, field_count);
+ for (size_t field_i = 0; field_i < field_count; field_i += 1) {
+ LLVMValueRef this_tag_int_value = bigint_to_llvm_const(tag_int_type->type_ref,
+ &wanted_type->data.enumeration.fields[field_i].value);
+ LLVMAddCase(switch_instr, this_tag_int_value, ok_value_block);
+ }
+ LLVMPositionBuilderAtEnd(g->builder, bad_value_block);
+ gen_safety_crash(g, PanicMsgIdBadEnumValue);
+
+ LLVMPositionBuilderAtEnd(g->builder, ok_value_block);
+ }
+ return tag_int_value;
}
static LLVMValueRef ir_render_int_to_err(CodeGen *g, IrExecutable *executable, IrInstructionIntToErr *instruction) {
@@ -2639,7 +2799,7 @@ static LLVMValueRef ir_render_un_op(CodeGen *g, IrExecutable *executable, IrInst
switch (op_id) {
case IrUnOpInvalid:
- case IrUnOpMaybe:
+ case IrUnOpOptional:
case IrUnOpDereference:
zig_unreachable();
case IrUnOpNegation:
@@ -2717,7 +2877,7 @@ static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable,
if (have_init_expr) {
assert(var->value->type == init_value->value.type);
TypeTableEntry *var_ptr_type = get_pointer_to_type_extra(g, var->value->type, false, false,
- var->align_bytes, 0, 0);
+ PtrLenSingle, var->align_bytes, 0, 0);
gen_assign_raw(g, var->value_ref, var_ptr_type, ir_llvm_value(g, init_value));
} else {
bool want_safe = ir_want_runtime_safety(g, &decl_var_instruction->base);
@@ -2814,7 +2974,13 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI
bool safety_check_on = ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on;
- if (array_type->id == TypeTableEntryIdArray) {
+ if (array_type->id == TypeTableEntryIdArray ||
+ (array_type->id == TypeTableEntryIdPointer && array_type->data.pointer.ptr_len == PtrLenSingle))
+ {
+ if (array_type->id == TypeTableEntryIdPointer) {
+ assert(array_type->data.pointer.child_type->id == TypeTableEntryIdArray);
+ array_type = array_type->data.pointer.child_type;
+ }
if (safety_check_on) {
LLVMValueRef end = LLVMConstInt(g->builtin_types.entry_usize->type_ref,
array_type->data.array.len, false);
@@ -2855,18 +3021,26 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI
return LLVMBuildInBoundsGEP(g->builder, array_ptr, indices, 1, "");
} else if (array_type->id == TypeTableEntryIdStruct) {
assert(array_type->data.structure.is_slice);
+ if (!type_has_bits(instruction->base.value.type)) {
+ if (safety_check_on) {
+ assert(LLVMGetTypeKind(LLVMTypeOf(array_ptr)) == LLVMIntegerTypeKind);
+ add_bounds_check(g, subscript_value, LLVMIntEQ, nullptr, LLVMIntULT, array_ptr);
+ }
+ return nullptr;
+ }
+
assert(LLVMGetTypeKind(LLVMTypeOf(array_ptr)) == LLVMPointerTypeKind);
assert(LLVMGetTypeKind(LLVMGetElementType(LLVMTypeOf(array_ptr))) == LLVMStructTypeKind);
if (safety_check_on) {
- size_t len_index = array_type->data.structure.fields[1].gen_index;
+ size_t len_index = array_type->data.structure.fields[slice_len_index].gen_index;
assert(len_index != SIZE_MAX);
LLVMValueRef len_ptr = LLVMBuildStructGEP(g->builder, array_ptr, (unsigned)len_index, "");
LLVMValueRef len = gen_load_untyped(g, len_ptr, 0, false, "");
add_bounds_check(g, subscript_value, LLVMIntEQ, nullptr, LLVMIntULT, len);
}
- size_t ptr_index = array_type->data.structure.fields[0].gen_index;
+ size_t ptr_index = array_type->data.structure.fields[slice_ptr_index].gen_index;
assert(ptr_index != SIZE_MAX);
LLVMValueRef ptr_ptr = LLVMBuildStructGEP(g->builder, array_ptr, (unsigned)ptr_index, "");
LLVMValueRef ptr = gen_load_untyped(g, ptr_ptr, 0, false, "");
@@ -2895,6 +3069,38 @@ static size_t get_async_err_code_arg_index(CodeGen *g, FnTypeId *fn_type_id) {
return 1 + get_async_allocator_arg_index(g, fn_type_id);
}
+
+static LLVMValueRef get_new_stack_addr(CodeGen *g, LLVMValueRef new_stack) {
+ LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_ptr_index, "");
+ LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_len_index, "");
+
+ LLVMValueRef ptr_value = gen_load_untyped(g, ptr_field_ptr, 0, false, "");
+ LLVMValueRef len_value = gen_load_untyped(g, len_field_ptr, 0, false, "");
+
+ LLVMValueRef ptr_addr = LLVMBuildPtrToInt(g->builder, ptr_value, LLVMTypeOf(len_value), "");
+ LLVMValueRef end_addr = LLVMBuildNUWAdd(g->builder, ptr_addr, len_value, "");
+ LLVMValueRef align_amt = LLVMConstInt(LLVMTypeOf(end_addr), get_abi_alignment(g, g->builtin_types.entry_usize), false);
+ LLVMValueRef align_adj = LLVMBuildURem(g->builder, end_addr, align_amt, "");
+ return LLVMBuildNUWSub(g->builder, end_addr, align_adj, "");
+}
+
+static void gen_set_stack_pointer(CodeGen *g, LLVMValueRef aligned_end_addr) {
+ LLVMValueRef write_register_fn_val = get_write_register_fn_val(g);
+
+ if (g->sp_md_node == nullptr) {
+ Buf *sp_reg_name = buf_create_from_str(arch_stack_pointer_register_name(&g->zig_target.arch));
+ LLVMValueRef str_node = LLVMMDString(buf_ptr(sp_reg_name), buf_len(sp_reg_name) + 1);
+ g->sp_md_node = LLVMMDNode(&str_node, 1);
+ }
+
+ LLVMValueRef params[] = {
+ g->sp_md_node,
+ aligned_end_addr,
+ };
+
+ LLVMBuildCall(g->builder, write_register_fn_val, params, 2, "");
+}
+
static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCall *instruction) {
LLVMValueRef fn_val;
TypeTableEntry *fn_type;
@@ -2961,18 +3167,24 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
}
LLVMCallConv llvm_cc = get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc);
- LLVMValueRef result = ZigLLVMBuildCall(g->builder, fn_val,
- gen_param_values, (unsigned)gen_param_index, llvm_cc, fn_inline, "");
+ LLVMValueRef result;
+
+ if (instruction->new_stack == nullptr) {
+ result = ZigLLVMBuildCall(g->builder, fn_val,
+ gen_param_values, (unsigned)gen_param_index, llvm_cc, fn_inline, "");
+ } else {
+ LLVMValueRef stacksave_fn_val = get_stacksave_fn_val(g);
+ LLVMValueRef stackrestore_fn_val = get_stackrestore_fn_val(g);
- for (size_t param_i = 0; param_i < fn_type_id->param_count; param_i += 1) {
- FnGenParamInfo *gen_info = &fn_type->data.fn.gen_param_info[param_i];
- // Note: byval is disabled on windows due to an LLVM bug:
- // https://github.com/zig-lang/zig/issues/536
- if (gen_info->is_byval && g->zig_target.os != OsWindows) {
- addLLVMCallsiteAttr(result, (unsigned)gen_info->gen_index, "byval");
- }
+ LLVMValueRef new_stack_addr = get_new_stack_addr(g, ir_llvm_value(g, instruction->new_stack));
+ LLVMValueRef old_stack_ref = LLVMBuildCall(g->builder, stacksave_fn_val, nullptr, 0, "");
+ gen_set_stack_pointer(g, new_stack_addr);
+ result = ZigLLVMBuildCall(g->builder, fn_val,
+ gen_param_values, (unsigned)gen_param_index, llvm_cc, fn_inline, "");
+ LLVMBuildCall(g->builder, stackrestore_fn_val, &old_stack_ref, 1, "");
}
+
if (instruction->is_async) {
LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, err_union_payload_index, "");
LLVMBuildStore(g->builder, result, payload_ptr);
@@ -2985,6 +3197,10 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
return nullptr;
} else if (first_arg_ret) {
return instruction->tmp_ptr;
+ } else if (handle_is_ptr(src_return_type)) {
+ auto store_instr = LLVMBuildStore(g->builder, result, instruction->tmp_ptr);
+ LLVMSetAlignment(store_instr, LLVMGetAlignment(instruction->tmp_ptr));
+ return instruction->tmp_ptr;
} else {
return result;
}
@@ -3185,7 +3401,7 @@ static LLVMValueRef ir_render_asm(CodeGen *g, IrExecutable *executable, IrInstru
}
static LLVMValueRef gen_non_null_bit(CodeGen *g, TypeTableEntry *maybe_type, LLVMValueRef maybe_handle) {
- assert(maybe_type->id == TypeTableEntryIdMaybe);
+ assert(maybe_type->id == TypeTableEntryIdOptional);
TypeTableEntry *child_type = maybe_type->data.maybe.child_type;
if (child_type->zero_bits) {
return maybe_handle;
@@ -3207,23 +3423,23 @@ static LLVMValueRef ir_render_test_non_null(CodeGen *g, IrExecutable *executable
}
static LLVMValueRef ir_render_unwrap_maybe(CodeGen *g, IrExecutable *executable,
- IrInstructionUnwrapMaybe *instruction)
+ IrInstructionUnwrapOptional *instruction)
{
TypeTableEntry *ptr_type = instruction->value->value.type;
assert(ptr_type->id == TypeTableEntryIdPointer);
TypeTableEntry *maybe_type = ptr_type->data.pointer.child_type;
- assert(maybe_type->id == TypeTableEntryIdMaybe);
+ assert(maybe_type->id == TypeTableEntryIdOptional);
TypeTableEntry *child_type = maybe_type->data.maybe.child_type;
LLVMValueRef maybe_ptr = ir_llvm_value(g, instruction->value);
LLVMValueRef maybe_handle = get_handle_value(g, maybe_ptr, maybe_type, ptr_type);
if (ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on) {
LLVMValueRef non_null_bit = gen_non_null_bit(g, maybe_type, maybe_handle);
- LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapMaybeOk");
- LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapMaybeFail");
+ LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapOptionalOk");
+ LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapOptionalFail");
LLVMBuildCondBr(g->builder, non_null_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
- gen_safety_crash(g, PanicMsgIdUnwrapMaybeFail);
+ gen_safety_crash(g, PanicMsgIdUnwrapOptionalFail);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
@@ -3243,14 +3459,24 @@ static LLVMValueRef ir_render_unwrap_maybe(CodeGen *g, IrExecutable *executable,
static LLVMValueRef get_int_builtin_fn(CodeGen *g, TypeTableEntry *int_type, BuiltinFnId fn_id) {
ZigLLVMFnKey key = {};
const char *fn_name;
+ uint32_t n_args;
if (fn_id == BuiltinFnIdCtz) {
fn_name = "cttz";
+ n_args = 2;
key.id = ZigLLVMFnIdCtz;
key.data.ctz.bit_count = (uint32_t)int_type->data.integral.bit_count;
- } else {
+ } else if (fn_id == BuiltinFnIdClz) {
fn_name = "ctlz";
+ n_args = 2;
key.id = ZigLLVMFnIdClz;
key.data.clz.bit_count = (uint32_t)int_type->data.integral.bit_count;
+ } else if (fn_id == BuiltinFnIdPopCount) {
+ fn_name = "ctpop";
+ n_args = 1;
+ key.id = ZigLLVMFnIdPopCount;
+ key.data.pop_count.bit_count = (uint32_t)int_type->data.integral.bit_count;
+ } else {
+ zig_unreachable();
}
auto existing_entry = g->llvm_fn_table.maybe_get(key);
@@ -3263,7 +3489,7 @@ static LLVMValueRef get_int_builtin_fn(CodeGen *g, TypeTableEntry *int_type, Bui
int_type->type_ref,
LLVMInt1Type(),
};
- LLVMTypeRef fn_type = LLVMFunctionType(int_type->type_ref, param_types, 2, false);
+ LLVMTypeRef fn_type = LLVMFunctionType(int_type->type_ref, param_types, n_args, false);
LLVMValueRef fn_val = LLVMAddFunction(g->module, llvm_name, fn_type);
assert(LLVMGetIntrinsicID(fn_val));
@@ -3296,6 +3522,14 @@ static LLVMValueRef ir_render_ctz(CodeGen *g, IrExecutable *executable, IrInstru
return gen_widen_or_shorten(g, false, int_type, instruction->base.value.type, wrong_size_int);
}
+static LLVMValueRef ir_render_pop_count(CodeGen *g, IrExecutable *executable, IrInstructionPopCount *instruction) {
+ TypeTableEntry *int_type = instruction->value->value.type;
+ LLVMValueRef fn_val = get_int_builtin_fn(g, int_type, BuiltinFnIdPopCount);
+ LLVMValueRef operand = ir_llvm_value(g, instruction->value);
+ LLVMValueRef wrong_size_int = LLVMBuildCall(g->builder, fn_val, &operand, 1, "");
+ return gen_widen_or_shorten(g, false, int_type, instruction->base.value.type, wrong_size_int);
+}
+
static LLVMValueRef ir_render_switch_br(CodeGen *g, IrExecutable *executable, IrInstructionSwitchBr *instruction) {
LLVMValueRef target_value = ir_llvm_value(g, instruction->target_value);
LLVMBasicBlockRef else_block = instruction->else_block->llvm_block;
@@ -3366,34 +3600,112 @@ static LLVMValueRef ir_render_err_name(CodeGen *g, IrExecutable *executable, IrI
return LLVMBuildInBoundsGEP(g->builder, g->err_name_table, indices, 2, "");
}
+static LLVMValueRef get_enum_tag_name_function(CodeGen *g, TypeTableEntry *enum_type) {
+ assert(enum_type->id == TypeTableEntryIdEnum);
+ if (enum_type->data.enumeration.name_function)
+ return enum_type->data.enumeration.name_function;
+
+ TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, false, false,
+ PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0);
+ TypeTableEntry *u8_slice_type = get_slice_type(g, u8_ptr_type);
+ TypeTableEntry *tag_int_type = enum_type->data.enumeration.tag_int_type;
+
+ LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMPointerType(u8_slice_type->type_ref, 0),
+ &tag_int_type->type_ref, 1, false);
+
+ Buf *fn_name = get_mangled_name(g, buf_sprintf("__zig_tag_name_%s", buf_ptr(&enum_type->name)), false);
+ LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref);
+ LLVMSetLinkage(fn_val, LLVMInternalLinkage);
+ LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
+ addLLVMFnAttr(fn_val, "nounwind");
+ add_uwtable_attr(g, fn_val);
+ if (g->build_mode == BuildModeDebug) {
+ ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
+ ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
+ }
+
+ LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder);
+ LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder);
+ FnTableEntry *prev_cur_fn = g->cur_fn;
+ LLVMValueRef prev_cur_fn_val = g->cur_fn_val;
+
+ LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry");
+ LLVMPositionBuilderAtEnd(g->builder, entry_block);
+ ZigLLVMClearCurrentDebugLocation(g->builder);
+ g->cur_fn = nullptr;
+ g->cur_fn_val = fn_val;
+
+ size_t field_count = enum_type->data.enumeration.src_field_count;
+ LLVMBasicBlockRef bad_value_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadValue");
+ LLVMValueRef tag_int_value = LLVMGetParam(fn_val, 0);
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, tag_int_value, bad_value_block, field_count);
+
+
+ TypeTableEntry *usize = g->builtin_types.entry_usize;
+ LLVMValueRef array_ptr_indices[] = {
+ LLVMConstNull(usize->type_ref),
+ LLVMConstNull(usize->type_ref),
+ };
+
+ for (size_t field_i = 0; field_i < field_count; field_i += 1) {
+ Buf *name = enum_type->data.enumeration.fields[field_i].name;
+ LLVMValueRef str_init = LLVMConstString(buf_ptr(name), (unsigned)buf_len(name), true);
+ LLVMValueRef str_global = LLVMAddGlobal(g->module, LLVMTypeOf(str_init), "");
+ LLVMSetInitializer(str_global, str_init);
+ LLVMSetLinkage(str_global, LLVMPrivateLinkage);
+ LLVMSetGlobalConstant(str_global, true);
+ LLVMSetUnnamedAddr(str_global, true);
+ LLVMSetAlignment(str_global, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(str_init)));
+
+ LLVMValueRef fields[] = {
+ LLVMConstGEP(str_global, array_ptr_indices, 2),
+ LLVMConstInt(g->builtin_types.entry_usize->type_ref, buf_len(name), false),
+ };
+ LLVMValueRef slice_init_value = LLVMConstNamedStruct(u8_slice_type->type_ref, fields, 2);
+
+ LLVMValueRef slice_global = LLVMAddGlobal(g->module, LLVMTypeOf(slice_init_value), "");
+ LLVMSetInitializer(slice_global, slice_init_value);
+ LLVMSetLinkage(slice_global, LLVMPrivateLinkage);
+ LLVMSetGlobalConstant(slice_global, true);
+ LLVMSetUnnamedAddr(slice_global, true);
+ LLVMSetAlignment(slice_global, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(slice_init_value)));
+
+ LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(g->cur_fn_val, "Name");
+ LLVMValueRef this_tag_int_value = bigint_to_llvm_const(tag_int_type->type_ref,
+ &enum_type->data.enumeration.fields[field_i].value);
+ LLVMAddCase(switch_instr, this_tag_int_value, return_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, return_block);
+ LLVMBuildRet(g->builder, slice_global);
+ }
+
+ LLVMPositionBuilderAtEnd(g->builder, bad_value_block);
+ if (g->build_mode == BuildModeDebug || g->build_mode == BuildModeSafeRelease) {
+ gen_safety_crash(g, PanicMsgIdBadEnumValue);
+ } else {
+ LLVMBuildUnreachable(g->builder);
+ }
+
+ g->cur_fn = prev_cur_fn;
+ g->cur_fn_val = prev_cur_fn_val;
+ LLVMPositionBuilderAtEnd(g->builder, prev_block);
+ LLVMSetCurrentDebugLocation(g->builder, prev_debug_location);
+
+ enum_type->data.enumeration.name_function = fn_val;
+ return fn_val;
+}
+
static LLVMValueRef ir_render_enum_tag_name(CodeGen *g, IrExecutable *executable,
IrInstructionTagName *instruction)
{
TypeTableEntry *enum_type = instruction->target->value.type;
assert(enum_type->id == TypeTableEntryIdEnum);
- assert(enum_type->data.enumeration.generate_name_table);
- TypeTableEntry *tag_int_type = enum_type->data.enumeration.tag_int_type;
+ LLVMValueRef enum_name_function = get_enum_tag_name_function(g, enum_type);
+
LLVMValueRef enum_tag_value = ir_llvm_value(g, instruction->target);
- if (ir_want_runtime_safety(g, &instruction->base)) {
- size_t field_count = enum_type->data.enumeration.src_field_count;
-
- // if the field_count can't fit in the bits of the enum_type, then it can't possibly
- // be the wrong value
- BigInt field_bi;
- bigint_init_unsigned(&field_bi, field_count);
- if (bigint_fits_in_bits(&field_bi, tag_int_type->data.integral.bit_count, false)) {
- LLVMValueRef end_val = LLVMConstInt(LLVMTypeOf(enum_tag_value), field_count, false);
- add_bounds_check(g, enum_tag_value, LLVMIntEQ, nullptr, LLVMIntULT, end_val);
- }
- }
-
- LLVMValueRef indices[] = {
- LLVMConstNull(g->builtin_types.entry_usize->type_ref),
- gen_widen_or_shorten(g, false, tag_int_type,
- g->builtin_types.entry_usize, enum_tag_value),
- };
- return LLVMBuildInBoundsGEP(g->builder, enum_type->data.enumeration.name_table, indices, 2, "");
+ return ZigLLVMBuildCall(g->builder, enum_name_function, &enum_tag_value, 1,
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
}
static LLVMValueRef ir_render_field_parent_ptr(CodeGen *g, IrExecutable *executable,
@@ -3443,17 +3755,17 @@ static LLVMValueRef ir_render_align_cast(CodeGen *g, IrExecutable *executable, I
} else if (target_type->id == TypeTableEntryIdFn) {
align_bytes = target_type->data.fn.fn_type_id.alignment;
ptr_val = target_val;
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdPointer)
{
align_bytes = target_type->data.maybe.child_type->data.pointer.alignment;
ptr_val = target_val;
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdFn)
{
align_bytes = target_type->data.maybe.child_type->data.fn.fn_type_id.alignment;
ptr_val = target_val;
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdPromise)
{
zig_panic("TODO audit this function");
@@ -3552,9 +3864,30 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutable *executable, IrIn
LLVMAtomicOrdering failure_order = to_LLVMAtomicOrdering(instruction->failure_order);
LLVMValueRef result_val = ZigLLVMBuildCmpXchg(g->builder, ptr_val, cmp_val, new_val,
- success_order, failure_order);
+ success_order, failure_order, instruction->is_weak);
- return LLVMBuildExtractValue(g->builder, result_val, 1, "");
+ TypeTableEntry *maybe_type = instruction->base.value.type;
+ assert(maybe_type->id == TypeTableEntryIdOptional);
+ TypeTableEntry *child_type = maybe_type->data.maybe.child_type;
+
+ if (type_is_codegen_pointer(child_type)) {
+ LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, "");
+ LLVMValueRef success_bit = LLVMBuildExtractValue(g->builder, result_val, 1, "");
+ return LLVMBuildSelect(g->builder, success_bit, LLVMConstNull(child_type->type_ref), payload_val, "");
+ }
+
+ assert(instruction->tmp_ptr != nullptr);
+ assert(type_has_bits(instruction->type));
+
+ LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, "");
+ LLVMValueRef val_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, maybe_child_index, "");
+ gen_assign_raw(g, val_ptr, get_pointer_to_type(g, instruction->type, false), payload_val);
+
+ LLVMValueRef success_bit = LLVMBuildExtractValue(g->builder, result_val, 1, "");
+ LLVMValueRef nonnull_bit = LLVMBuildNot(g->builder, success_bit, "");
+ LLVMValueRef maybe_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, maybe_null_index, "");
+ gen_store_untyped(g, nonnull_bit, maybe_ptr, 0, false);
+ return instruction->tmp_ptr;
}
static LLVMValueRef ir_render_fence(CodeGen *g, IrExecutable *executable, IrInstructionFence *instruction) {
@@ -3654,7 +3987,12 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
bool want_runtime_safety = instruction->safety_check_on && ir_want_runtime_safety(g, &instruction->base);
- if (array_type->id == TypeTableEntryIdArray) {
+ if (array_type->id == TypeTableEntryIdArray ||
+ (array_type->id == TypeTableEntryIdPointer && array_type->data.pointer.ptr_len == PtrLenSingle))
+ {
+ if (array_type->id == TypeTableEntryIdPointer) {
+ array_type = array_type->data.pointer.child_type;
+ }
LLVMValueRef start_val = ir_llvm_value(g, instruction->start);
LLVMValueRef end_val;
if (instruction->end) {
@@ -3662,7 +4000,6 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
} else {
end_val = LLVMConstInt(g->builtin_types.entry_usize->type_ref, array_type->data.array.len, false);
}
-
if (want_runtime_safety) {
add_bounds_check(g, start_val, LLVMIntEQ, nullptr, LLVMIntULE, end_val);
if (instruction->end) {
@@ -3695,6 +4032,7 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
return tmp_struct_ptr;
} else if (array_type->id == TypeTableEntryIdPointer) {
+ assert(array_type->data.pointer.ptr_len == PtrLenUnknown);
LLVMValueRef start_val = ir_llvm_value(g, instruction->start);
LLVMValueRef end_val = ir_llvm_value(g, instruction->end);
@@ -3702,11 +4040,15 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
add_bounds_check(g, start_val, LLVMIntEQ, nullptr, LLVMIntULE, end_val);
}
- LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, slice_ptr_index, "");
- LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, array_ptr, &start_val, 1, "");
- gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false);
+ if (type_has_bits(array_type)) {
+ size_t gen_ptr_index = instruction->base.value.type->data.structure.fields[slice_ptr_index].gen_index;
+ LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, gen_ptr_index, "");
+ LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, array_ptr, &start_val, 1, "");
+ gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false);
+ }
- LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, slice_len_index, "");
+ size_t gen_len_index = instruction->base.value.type->data.structure.fields[slice_len_index].gen_index;
+ LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, tmp_struct_ptr, gen_len_index, "");
LLVMValueRef len_value = LLVMBuildNSWSub(g->builder, end_val, start_val, "");
gen_store_untyped(g, len_value, len_field_ptr, 0, false);
@@ -3804,6 +4146,26 @@ static LLVMValueRef ir_render_frame_address(CodeGen *g, IrExecutable *executable
return LLVMBuildCall(g->builder, get_frame_address_fn_val(g), &zero, 1, "");
}
+static LLVMValueRef get_handle_fn_val(CodeGen *g) {
+ if (g->coro_frame_fn_val)
+ return g->coro_frame_fn_val;
+
+ LLVMTypeRef fn_type = LLVMFunctionType( LLVMPointerType(LLVMInt8Type(), 0)
+ , nullptr, 0, false);
+ Buf *name = buf_sprintf("llvm.coro.frame");
+ g->coro_frame_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type);
+ assert(LLVMGetIntrinsicID(g->coro_frame_fn_val));
+
+ return g->coro_frame_fn_val;
+}
+
+static LLVMValueRef ir_render_handle(CodeGen *g, IrExecutable *executable,
+ IrInstructionHandle *instruction)
+{
+ LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_promise->type_ref);
+ return LLVMBuildCall(g->builder, get_handle_fn_val(g), &zero, 0, "");
+}
+
static LLVMValueRef render_shl_with_overflow(CodeGen *g, IrInstructionOverflowOp *instruction) {
TypeTableEntry *int_type = instruction->result_ptr_type;
assert(int_type->id == TypeTableEntryIdInt);
@@ -3939,10 +4301,10 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *execu
}
}
-static LLVMValueRef ir_render_maybe_wrap(CodeGen *g, IrExecutable *executable, IrInstructionMaybeWrap *instruction) {
+static LLVMValueRef ir_render_maybe_wrap(CodeGen *g, IrExecutable *executable, IrInstructionOptionalWrap *instruction) {
TypeTableEntry *wanted_type = instruction->base.value.type;
- assert(wanted_type->id == TypeTableEntryIdMaybe);
+ assert(wanted_type->id == TypeTableEntryIdOptional);
TypeTableEntry *child_type = wanted_type->data.maybe.child_type;
@@ -4049,7 +4411,7 @@ static LLVMValueRef ir_render_struct_init(CodeGen *g, IrExecutable *executable,
uint32_t field_align_bytes = get_abi_alignment(g, type_struct_field->type_entry);
TypeTableEntry *ptr_type = get_pointer_to_type_extra(g, type_struct_field->type_entry,
- false, false, field_align_bytes,
+ false, false, PtrLenSingle, field_align_bytes,
(uint32_t)type_struct_field->packed_bits_offset, (uint32_t)type_struct_field->unaligned_bit_count);
gen_assign_raw(g, field_ptr, ptr_type, value);
@@ -4065,7 +4427,7 @@ static LLVMValueRef ir_render_union_init(CodeGen *g, IrExecutable *executable, I
uint32_t field_align_bytes = get_abi_alignment(g, type_union_field->type_entry);
TypeTableEntry *ptr_type = get_pointer_to_type_extra(g, type_union_field->type_entry,
- false, false, field_align_bytes,
+ false, false, PtrLenSingle, field_align_bytes,
0, 0);
LLVMValueRef uncasted_union_ptr;
@@ -4312,7 +4674,8 @@ static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_f
LLVMPositionBuilderAtEnd(g->builder, ok_block);
LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_payload_index, "");
- TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false);
+ TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, false, false,
+ PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0);
TypeTableEntry *slice_type = get_slice_type(g, u8_ptr_type);
size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index;
LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, payload_ptr, ptr_field_index, "");
@@ -4381,6 +4744,16 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable,
return LLVMBuildIntToPtr(g->builder, uncasted_result, operand_type->type_ref, "");
}
+static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutable *executable,
+ IrInstructionAtomicLoad *instruction)
+{
+ LLVMAtomicOrdering ordering = to_LLVMAtomicOrdering(instruction->resolved_ordering);
+ LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
+ LLVMValueRef load_inst = gen_load(g, ptr, instruction->ptr->value.type, "");
+ LLVMSetOrdering(load_inst, ordering);
+ return load_inst;
+}
+
static LLVMValueRef ir_render_merge_err_ret_traces(CodeGen *g, IrExecutable *executable,
IrInstructionMergeErrRetTraces *instruction)
{
@@ -4402,6 +4775,13 @@ static LLVMValueRef ir_render_mark_err_ret_trace_ptr(CodeGen *g, IrExecutable *e
return nullptr;
}
+static LLVMValueRef ir_render_sqrt(CodeGen *g, IrExecutable *executable, IrInstructionSqrt *instruction) {
+ LLVMValueRef op = ir_llvm_value(g, instruction->op);
+ assert(instruction->base.value.type->id == TypeTableEntryIdFloat);
+ LLVMValueRef fn_val = get_float_fn(g, instruction->base.value.type, ZigLLVMFnIdSqrt);
+ return LLVMBuildCall(g->builder, fn_val, &op, 1, "");
+}
+
static void set_debug_location(CodeGen *g, IrInstruction *instruction) {
AstNode *source_node = instruction->source_node;
Scope *scope = instruction->scope;
@@ -4453,13 +4833,13 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdCheckSwitchProngs:
case IrInstructionIdCheckStatementIsVoid:
case IrInstructionIdTypeName:
- case IrInstructionIdCanImplicitCast:
case IrInstructionIdDeclRef:
case IrInstructionIdSwitchVar:
case IrInstructionIdOffsetOf:
+ case IrInstructionIdTypeInfo:
case IrInstructionIdTypeId:
case IrInstructionIdSetEvalBranchQuota:
- case IrInstructionIdPtrTypeOf:
+ case IrInstructionIdPtrType:
case IrInstructionIdOpaqueType:
case IrInstructionIdSetAlignStack:
case IrInstructionIdArgType:
@@ -4469,6 +4849,15 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdPromiseResultType:
case IrInstructionIdAwaitBookkeeping:
case IrInstructionIdAddImplicitReturnType:
+ case IrInstructionIdIntCast:
+ case IrInstructionIdFloatCast:
+ case IrInstructionIdIntToFloat:
+ case IrInstructionIdFloatToInt:
+ case IrInstructionIdBoolToInt:
+ case IrInstructionIdErrSetCast:
+ case IrInstructionIdFromBytes:
+ case IrInstructionIdToBytes:
+ case IrInstructionIdEnumToInt:
zig_unreachable();
case IrInstructionIdReturn:
@@ -4505,12 +4894,14 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_asm(g, executable, (IrInstructionAsm *)instruction);
case IrInstructionIdTestNonNull:
return ir_render_test_non_null(g, executable, (IrInstructionTestNonNull *)instruction);
- case IrInstructionIdUnwrapMaybe:
- return ir_render_unwrap_maybe(g, executable, (IrInstructionUnwrapMaybe *)instruction);
+ case IrInstructionIdUnwrapOptional:
+ return ir_render_unwrap_maybe(g, executable, (IrInstructionUnwrapOptional *)instruction);
case IrInstructionIdClz:
return ir_render_clz(g, executable, (IrInstructionClz *)instruction);
case IrInstructionIdCtz:
return ir_render_ctz(g, executable, (IrInstructionCtz *)instruction);
+ case IrInstructionIdPopCount:
+ return ir_render_pop_count(g, executable, (IrInstructionPopCount *)instruction);
case IrInstructionIdSwitchBr:
return ir_render_switch_br(g, executable, (IrInstructionSwitchBr *)instruction);
case IrInstructionIdPhi:
@@ -4539,6 +4930,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_return_address(g, executable, (IrInstructionReturnAddress *)instruction);
case IrInstructionIdFrameAddress:
return ir_render_frame_address(g, executable, (IrInstructionFrameAddress *)instruction);
+ case IrInstructionIdHandle:
+ return ir_render_handle(g, executable, (IrInstructionHandle *)instruction);
case IrInstructionIdOverflowOp:
return ir_render_overflow_op(g, executable, (IrInstructionOverflowOp *)instruction);
case IrInstructionIdTestErr:
@@ -4547,8 +4940,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_unwrap_err_code(g, executable, (IrInstructionUnwrapErrCode *)instruction);
case IrInstructionIdUnwrapErrPayload:
return ir_render_unwrap_err_payload(g, executable, (IrInstructionUnwrapErrPayload *)instruction);
- case IrInstructionIdMaybeWrap:
- return ir_render_maybe_wrap(g, executable, (IrInstructionMaybeWrap *)instruction);
+ case IrInstructionIdOptionalWrap:
+ return ir_render_maybe_wrap(g, executable, (IrInstructionOptionalWrap *)instruction);
case IrInstructionIdErrWrapCode:
return ir_render_err_wrap_code(g, executable, (IrInstructionErrWrapCode *)instruction);
case IrInstructionIdErrWrapPayload:
@@ -4617,12 +5010,16 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_coro_alloc_helper(g, executable, (IrInstructionCoroAllocHelper *)instruction);
case IrInstructionIdAtomicRmw:
return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction);
+ case IrInstructionIdAtomicLoad:
+ return ir_render_atomic_load(g, executable, (IrInstructionAtomicLoad *)instruction);
case IrInstructionIdSaveErrRetAddr:
return ir_render_save_err_ret_addr(g, executable, (IrInstructionSaveErrRetAddr *)instruction);
case IrInstructionIdMergeErrRetTraces:
return ir_render_merge_err_ret_traces(g, executable, (IrInstructionMergeErrRetTraces *)instruction);
case IrInstructionIdMarkErrRetTracePtr:
return ir_render_mark_err_ret_trace_ptr(g, executable, (IrInstructionMarkErrRetTracePtr *)instruction);
+ case IrInstructionIdSqrt:
+ return ir_render_sqrt(g, executable, (IrInstructionSqrt *)instruction);
}
zig_unreachable();
}
@@ -4649,7 +5046,7 @@ static void ir_render(CodeGen *g, FnTableEntry *fn_entry) {
static LLVMValueRef gen_const_ptr_struct_recursive(CodeGen *g, ConstExprValue *struct_const_val, size_t field_index);
static LLVMValueRef gen_const_ptr_array_recursive(CodeGen *g, ConstExprValue *array_const_val, size_t index);
-static LLVMValueRef gen_const_ptr_union_recursive(CodeGen *g, ConstExprValue *array_const_val);
+static LLVMValueRef gen_const_ptr_union_recursive(CodeGen *g, ConstExprValue *union_const_val);
static LLVMValueRef gen_parent_ptr(CodeGen *g, ConstExprValue *val, ConstParent *parent) {
switch (parent->id) {
@@ -4665,6 +5062,10 @@ static LLVMValueRef gen_parent_ptr(CodeGen *g, ConstExprValue *val, ConstParent
parent->data.p_array.elem_index);
case ConstParentIdUnion:
return gen_const_ptr_union_recursive(g, parent->data.p_union.union_val);
+ case ConstParentIdScalar:
+ render_const_val(g, parent->data.p_scalar.scalar_val, "");
+ render_const_val_global(g, parent->data.p_scalar.scalar_val, "");
+ return parent->data.p_scalar.scalar_val->global_refs->llvm_global;
}
zig_unreachable();
}
@@ -4690,7 +5091,8 @@ static LLVMValueRef gen_const_ptr_array_recursive(CodeGen *g, ConstExprValue *ar
};
return LLVMConstInBoundsGEP(base_ptr, indices, 2);
} else {
- zig_unreachable();
+ assert(parent->id == ConstParentIdScalar);
+ return base_ptr;
}
}
@@ -4734,10 +5136,10 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con
case TypeTableEntryIdInvalid:
case TypeTableEntryIdMetaType:
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdNamespace:
@@ -4769,7 +5171,7 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con
}
case TypeTableEntryIdPointer:
case TypeTableEntryIdFn:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdPromise:
{
LLVMValueRef ptr_val = gen_const_val(g, const_val, "");
@@ -4817,6 +5219,79 @@ static bool is_llvm_value_unnamed_type(TypeTableEntry *type_entry, LLVMValueRef
return LLVMTypeOf(val) != type_entry->type_ref;
}
+static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, const char *name) {
+ render_const_val_global(g, const_val, name);
+ switch (const_val->data.x_ptr.special) {
+ case ConstPtrSpecialInvalid:
+ case ConstPtrSpecialDiscard:
+ zig_unreachable();
+ case ConstPtrSpecialRef:
+ {
+ ConstExprValue *pointee = const_val->data.x_ptr.data.ref.pointee;
+ render_const_val(g, pointee, "");
+ render_const_val_global(g, pointee, "");
+ ConstExprValue *other_val = pointee;
+ const_val->global_refs->llvm_value = LLVMConstBitCast(other_val->global_refs->llvm_global, const_val->type->type_ref);
+ render_const_val_global(g, const_val, "");
+ return const_val->global_refs->llvm_value;
+ }
+ case ConstPtrSpecialBaseArray:
+ {
+ ConstExprValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val;
+ size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;
+ assert(array_const_val->type->id == TypeTableEntryIdArray);
+ if (array_const_val->type->zero_bits) {
+ // make this a null pointer
+ TypeTableEntry *usize = g->builtin_types.entry_usize;
+ const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref),
+ const_val->type->type_ref);
+ render_const_val_global(g, const_val, "");
+ return const_val->global_refs->llvm_value;
+ }
+ LLVMValueRef uncasted_ptr_val = gen_const_ptr_array_recursive(g, array_const_val,
+ elem_index);
+ LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref);
+ const_val->global_refs->llvm_value = ptr_val;
+ render_const_val_global(g, const_val, "");
+ return ptr_val;
+ }
+ case ConstPtrSpecialBaseStruct:
+ {
+ ConstExprValue *struct_const_val = const_val->data.x_ptr.data.base_struct.struct_val;
+ assert(struct_const_val->type->id == TypeTableEntryIdStruct);
+ if (struct_const_val->type->zero_bits) {
+ // make this a null pointer
+ TypeTableEntry *usize = g->builtin_types.entry_usize;
+ const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref),
+ const_val->type->type_ref);
+ render_const_val_global(g, const_val, "");
+ return const_val->global_refs->llvm_value;
+ }
+ size_t src_field_index = const_val->data.x_ptr.data.base_struct.field_index;
+ size_t gen_field_index =
+ struct_const_val->type->data.structure.fields[src_field_index].gen_index;
+ LLVMValueRef uncasted_ptr_val = gen_const_ptr_struct_recursive(g, struct_const_val,
+ gen_field_index);
+ LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref);
+ const_val->global_refs->llvm_value = ptr_val;
+ render_const_val_global(g, const_val, "");
+ return ptr_val;
+ }
+ case ConstPtrSpecialHardCodedAddr:
+ {
+ uint64_t addr_value = const_val->data.x_ptr.data.hard_coded_addr.addr;
+ TypeTableEntry *usize = g->builtin_types.entry_usize;
+ const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstInt(usize->type_ref, addr_value, false),
+ const_val->type->type_ref);
+ render_const_val_global(g, const_val, "");
+ return const_val->global_refs->llvm_value;
+ }
+ case ConstPtrSpecialFunction:
+ return LLVMConstBitCast(fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry), const_val->type->type_ref);
+ }
+ zig_unreachable();
+}
+
static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const char *name) {
TypeTableEntry *type_entry = const_val->type;
assert(!type_entry->zero_bits);
@@ -4839,6 +5314,8 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
const_val->data.x_err_set->value, false);
case TypeTableEntryIdFloat:
switch (type_entry->data.floating.bit_count) {
+ case 16:
+ return LLVMConstReal(type_entry->type_ref, zig_f16_to_double(const_val->data.x_f16));
case 32:
return LLVMConstReal(type_entry->type_ref, const_val->data.x_f32);
case 64:
@@ -4861,23 +5338,19 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
} else {
return LLVMConstNull(LLVMInt1Type());
}
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
if (child_type->zero_bits) {
- return LLVMConstInt(LLVMInt1Type(), const_val->data.x_maybe ? 1 : 0, false);
+ return LLVMConstInt(LLVMInt1Type(), const_val->data.x_optional ? 1 : 0, false);
} else if (type_is_codegen_pointer(child_type)) {
- if (const_val->data.x_maybe) {
- return gen_const_val(g, const_val->data.x_maybe, "");
- } else {
- return LLVMConstNull(child_type->type_ref);
- }
+ return gen_const_val_ptr(g, const_val, name);
} else {
LLVMValueRef child_val;
LLVMValueRef maybe_val;
bool make_unnamed_struct;
- if (const_val->data.x_maybe) {
- child_val = gen_const_val(g, const_val->data.x_maybe, "");
+ if (const_val->data.x_optional) {
+ child_val = gen_const_val(g, const_val->data.x_optional, "");
maybe_val = LLVMConstAllOnes(LLVMInt1Type());
make_unnamed_struct = is_llvm_value_unnamed_type(const_val->type, child_val);
@@ -5067,78 +5540,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
assert(const_val->data.x_ptr.mut == ConstPtrMutComptimeConst);
return fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry);
case TypeTableEntryIdPointer:
- {
- render_const_val_global(g, const_val, name);
- switch (const_val->data.x_ptr.special) {
- case ConstPtrSpecialInvalid:
- case ConstPtrSpecialDiscard:
- zig_unreachable();
- case ConstPtrSpecialRef:
- {
- ConstExprValue *pointee = const_val->data.x_ptr.data.ref.pointee;
- render_const_val(g, pointee, "");
- render_const_val_global(g, pointee, "");
- ConstExprValue *other_val = pointee;
- const_val->global_refs->llvm_value = LLVMConstBitCast(other_val->global_refs->llvm_global, const_val->type->type_ref);
- render_const_val_global(g, const_val, "");
- return const_val->global_refs->llvm_value;
- }
- case ConstPtrSpecialBaseArray:
- {
- ConstExprValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val;
- size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;
- assert(array_const_val->type->id == TypeTableEntryIdArray);
- if (array_const_val->type->zero_bits) {
- // make this a null pointer
- TypeTableEntry *usize = g->builtin_types.entry_usize;
- const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref),
- const_val->type->type_ref);
- render_const_val_global(g, const_val, "");
- return const_val->global_refs->llvm_value;
- }
- LLVMValueRef uncasted_ptr_val = gen_const_ptr_array_recursive(g, array_const_val,
- elem_index);
- LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref);
- const_val->global_refs->llvm_value = ptr_val;
- render_const_val_global(g, const_val, "");
- return ptr_val;
- }
- case ConstPtrSpecialBaseStruct:
- {
- ConstExprValue *struct_const_val = const_val->data.x_ptr.data.base_struct.struct_val;
- assert(struct_const_val->type->id == TypeTableEntryIdStruct);
- if (struct_const_val->type->zero_bits) {
- // make this a null pointer
- TypeTableEntry *usize = g->builtin_types.entry_usize;
- const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref),
- const_val->type->type_ref);
- render_const_val_global(g, const_val, "");
- return const_val->global_refs->llvm_value;
- }
- size_t src_field_index = const_val->data.x_ptr.data.base_struct.field_index;
- size_t gen_field_index =
- struct_const_val->type->data.structure.fields[src_field_index].gen_index;
- LLVMValueRef uncasted_ptr_val = gen_const_ptr_struct_recursive(g, struct_const_val,
- gen_field_index);
- LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref);
- const_val->global_refs->llvm_value = ptr_val;
- render_const_val_global(g, const_val, "");
- return ptr_val;
- }
- case ConstPtrSpecialHardCodedAddr:
- {
- uint64_t addr_value = const_val->data.x_ptr.data.hard_coded_addr.addr;
- TypeTableEntry *usize = g->builtin_types.entry_usize;
- const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstInt(usize->type_ref, addr_value, false),
- const_val->type->type_ref);
- render_const_val_global(g, const_val, "");
- return const_val->global_refs->llvm_value;
- }
- case ConstPtrSpecialFunction:
- return LLVMConstBitCast(fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry), const_val->type->type_ref);
- }
- }
- zig_unreachable();
+ return gen_const_val_ptr(g, const_val, name);
case TypeTableEntryIdErrorUnion:
{
TypeTableEntry *payload_type = type_entry->data.error_union.payload_type;
@@ -5180,10 +5582,10 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
case TypeTableEntryIdInvalid:
case TypeTableEntryIdMetaType:
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
@@ -5232,7 +5634,8 @@ static void generate_error_name_table(CodeGen *g) {
assert(g->errors_by_index.length > 0);
- TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true);
+ TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false,
+ PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0);
TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type);
LLVMValueRef *values = allocate(g->errors_by_index.length);
@@ -5269,54 +5672,6 @@ static void generate_error_name_table(CodeGen *g) {
LLVMSetAlignment(g->err_name_table, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(err_name_table_init)));
}
-static void generate_enum_name_tables(CodeGen *g) {
- TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true);
- TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type);
-
- TypeTableEntry *usize = g->builtin_types.entry_usize;
- LLVMValueRef array_ptr_indices[] = {
- LLVMConstNull(usize->type_ref),
- LLVMConstNull(usize->type_ref),
- };
-
-
- for (size_t enum_i = 0; enum_i < g->name_table_enums.length; enum_i += 1) {
- TypeTableEntry *enum_type = g->name_table_enums.at(enum_i);
- assert(enum_type->id == TypeTableEntryIdEnum);
-
- size_t field_count = enum_type->data.enumeration.src_field_count;
- LLVMValueRef *values = allocate(field_count);
- for (size_t field_i = 0; field_i < field_count; field_i += 1) {
- Buf *name = enum_type->data.enumeration.fields[field_i].name;
-
- LLVMValueRef str_init = LLVMConstString(buf_ptr(name), (unsigned)buf_len(name), true);
- LLVMValueRef str_global = LLVMAddGlobal(g->module, LLVMTypeOf(str_init), "");
- LLVMSetInitializer(str_global, str_init);
- LLVMSetLinkage(str_global, LLVMPrivateLinkage);
- LLVMSetGlobalConstant(str_global, true);
- LLVMSetUnnamedAddr(str_global, true);
- LLVMSetAlignment(str_global, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(str_init)));
-
- LLVMValueRef fields[] = {
- LLVMConstGEP(str_global, array_ptr_indices, 2),
- LLVMConstInt(g->builtin_types.entry_usize->type_ref, buf_len(name), false),
- };
- values[field_i] = LLVMConstNamedStruct(str_type->type_ref, fields, 2);
- }
-
- LLVMValueRef name_table_init = LLVMConstArray(str_type->type_ref, values, (unsigned)field_count);
-
- Buf *table_name = get_mangled_name(g, buf_sprintf("%s_name_table", buf_ptr(&enum_type->name)), false);
- LLVMValueRef name_table = LLVMAddGlobal(g->module, LLVMTypeOf(name_table_init), buf_ptr(table_name));
- LLVMSetInitializer(name_table, name_table_init);
- LLVMSetLinkage(name_table, LLVMPrivateLinkage);
- LLVMSetGlobalConstant(name_table, true);
- LLVMSetUnnamedAddr(name_table, true);
- LLVMSetAlignment(name_table, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(name_table_init)));
- enum_type->data.enumeration.name_table = name_table;
- }
-}
-
static void build_all_basic_blocks(CodeGen *g, FnTableEntry *fn) {
IrExecutable *executable = &fn->analyzed_executable;
assert(executable->basic_block_list.length > 0);
@@ -5413,14 +5768,13 @@ static void do_code_gen(CodeGen *g) {
}
generate_error_name_table(g);
- generate_enum_name_tables(g);
// Generate module level variables
for (size_t i = 0; i < g->global_vars.length; i += 1) {
TldVar *tld_var = g->global_vars.at(i);
VariableTableEntry *var = tld_var->var;
- if (var->value->type->id == TypeTableEntryIdNumLitFloat) {
+ if (var->value->type->id == TypeTableEntryIdComptimeFloat) {
// Generate debug info for it but that's it.
ConstExprValue *const_val = var->value;
assert(const_val->special != ConstValSpecialRuntime);
@@ -5434,7 +5788,7 @@ static void do_code_gen(CodeGen *g) {
continue;
}
- if (var->value->type->id == TypeTableEntryIdNumLitInt) {
+ if (var->value->type->id == TypeTableEntryIdComptimeInt) {
// Generate debug info for it but that's it.
ConstExprValue *const_val = var->value;
assert(const_val->special != ConstValSpecialRuntime);
@@ -5552,8 +5906,8 @@ static void do_code_gen(CodeGen *g) {
} else if (instruction->id == IrInstructionIdSlice) {
IrInstructionSlice *slice_instruction = (IrInstructionSlice *)instruction;
slot = &slice_instruction->tmp_ptr;
- } else if (instruction->id == IrInstructionIdMaybeWrap) {
- IrInstructionMaybeWrap *maybe_wrap_instruction = (IrInstructionMaybeWrap *)instruction;
+ } else if (instruction->id == IrInstructionIdOptionalWrap) {
+ IrInstructionOptionalWrap *maybe_wrap_instruction = (IrInstructionOptionalWrap *)instruction;
slot = &maybe_wrap_instruction->tmp_ptr;
} else if (instruction->id == IrInstructionIdErrWrapPayload) {
IrInstructionErrWrapPayload *err_wrap_payload_instruction = (IrInstructionErrWrapPayload *)instruction;
@@ -5561,6 +5915,9 @@ static void do_code_gen(CodeGen *g) {
} else if (instruction->id == IrInstructionIdErrWrapCode) {
IrInstructionErrWrapCode *err_wrap_code_instruction = (IrInstructionErrWrapCode *)instruction;
slot = &err_wrap_code_instruction->tmp_ptr;
+ } else if (instruction->id == IrInstructionIdCmpxchg) {
+ IrInstructionCmpxchg *cmpxchg_instruction = (IrInstructionCmpxchg *)instruction;
+ slot = &cmpxchg_instruction->tmp_ptr;
} else {
zig_unreachable();
}
@@ -5670,6 +6027,7 @@ static void do_code_gen(CodeGen *g) {
ir_render(g, fn_table_entry);
}
+
assert(!g->errors.length);
if (buf_len(&g->global_asm) != 0) {
@@ -5722,10 +6080,12 @@ static void do_code_gen(CodeGen *g) {
os_path_join(g->cache_dir, o_basename, output_path);
ensure_cache_dir(g);
+ bool is_small = g->build_mode == BuildModeSmallRelease;
+
switch (g->emit_file_type) {
case EmitFileTypeBinary:
if (ZigLLVMTargetMachineEmitToFile(g->target_machine, g->module, buf_ptr(output_path),
- ZigLLVM_EmitBinary, &err_msg, g->build_mode == BuildModeDebug))
+ ZigLLVM_EmitBinary, &err_msg, g->build_mode == BuildModeDebug, is_small))
{
zig_panic("unable to write object file %s: %s", buf_ptr(output_path), err_msg);
}
@@ -5735,7 +6095,7 @@ static void do_code_gen(CodeGen *g) {
case EmitFileTypeAssembly:
if (ZigLLVMTargetMachineEmitToFile(g->target_machine, g->module, buf_ptr(output_path),
- ZigLLVM_EmitAssembly, &err_msg, g->build_mode == BuildModeDebug))
+ ZigLLVM_EmitAssembly, &err_msg, g->build_mode == BuildModeDebug, is_small))
{
zig_panic("unable to write assembly file %s: %s", buf_ptr(output_path), err_msg);
}
@@ -5744,7 +6104,7 @@ static void do_code_gen(CodeGen *g) {
case EmitFileTypeLLVMIr:
if (ZigLLVMTargetMachineEmitToFile(g->target_machine, g->module, buf_ptr(output_path),
- ZigLLVM_EmitLLVMIr, &err_msg, g->build_mode == BuildModeDebug))
+ ZigLLVM_EmitLLVMIr, &err_msg, g->build_mode == BuildModeDebug, is_small))
{
zig_panic("unable to write llvm-ir file %s: %s", buf_ptr(output_path), err_msg);
}
@@ -5756,21 +6116,6 @@ static void do_code_gen(CodeGen *g) {
}
}
-static const uint8_t int_sizes_in_bits[] = {
- 2,
- 3,
- 4,
- 5,
- 6,
- 7,
- 8,
- 16,
- 29,
- 32,
- 64,
- 128,
-};
-
struct CIntTypeInfo {
CIntType id;
const char *name;
@@ -5823,25 +6168,27 @@ static void define_builtin_types(CodeGen *g) {
g->builtin_types.entry_block = entry;
}
{
- TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdNumLitFloat);
- buf_init_from_str(&entry->name, "(float literal)");
+ TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdComptimeFloat);
+ buf_init_from_str(&entry->name, "comptime_float");
entry->zero_bits = true;
g->builtin_types.entry_num_lit_float = entry;
+ g->primitive_type_table.put(&entry->name, entry);
}
{
- TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdNumLitInt);
- buf_init_from_str(&entry->name, "(integer literal)");
+ TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdComptimeInt);
+ buf_init_from_str(&entry->name, "comptime_int");
entry->zero_bits = true;
g->builtin_types.entry_num_lit_int = entry;
+ g->primitive_type_table.put(&entry->name, entry);
}
{
- TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdUndefLit);
+ TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdUndefined);
buf_init_from_str(&entry->name, "(undefined)");
entry->zero_bits = true;
g->builtin_types.entry_undef = entry;
}
{
- TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdNullLit);
+ TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdNull);
buf_init_from_str(&entry->name, "(null)");
entry->zero_bits = true;
g->builtin_types.entry_null = entry;
@@ -5853,16 +6200,6 @@ static void define_builtin_types(CodeGen *g) {
g->builtin_types.entry_arg_tuple = entry;
}
- for (size_t int_size_i = 0; int_size_i < array_length(int_sizes_in_bits); int_size_i += 1) {
- uint8_t size_in_bits = int_sizes_in_bits[int_size_i];
- for (size_t is_sign_i = 0; is_sign_i < array_length(is_signed_list); is_sign_i += 1) {
- bool is_signed = is_signed_list[is_sign_i];
- TypeTableEntry *entry = make_int_type(g, is_signed, size_in_bits);
- g->primitive_type_table.put(&entry->name, entry);
- get_int_type_ptr(g, is_signed, size_in_bits)[0] = entry;
- }
- }
-
for (size_t i = 0; i < array_length(c_int_type_infos); i += 1) {
const CIntTypeInfo *info = &c_int_type_infos[i];
uint32_t size_in_bits = target_c_type_size_in_bits(&g->zig_target, info->id);
@@ -5921,58 +6258,30 @@ static void define_builtin_types(CodeGen *g) {
g->builtin_types.entry_usize = entry;
}
}
- {
+
+ auto add_fp_entry = [] (CodeGen *g,
+ const char *name,
+ uint32_t bit_count,
+ LLVMTypeRef type_ref,
+ TypeTableEntry **field) {
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat);
- entry->type_ref = LLVMFloatType();
- buf_init_from_str(&entry->name, "f32");
- entry->data.floating.bit_count = 32;
+ entry->type_ref = type_ref;
+ buf_init_from_str(&entry->name, name);
+ entry->data.floating.bit_count = bit_count;
uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref);
entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
debug_size_in_bits,
ZigLLVMEncoding_DW_ATE_float());
- g->builtin_types.entry_f32 = entry;
+ *field = entry;
g->primitive_type_table.put(&entry->name, entry);
- }
- {
- TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat);
- entry->type_ref = LLVMDoubleType();
- buf_init_from_str(&entry->name, "f64");
- entry->data.floating.bit_count = 64;
+ };
+ add_fp_entry(g, "f16", 16, LLVMHalfType(), &g->builtin_types.entry_f16);
+ add_fp_entry(g, "f32", 32, LLVMFloatType(), &g->builtin_types.entry_f32);
+ add_fp_entry(g, "f64", 64, LLVMDoubleType(), &g->builtin_types.entry_f64);
+ add_fp_entry(g, "f128", 128, LLVMFP128Type(), &g->builtin_types.entry_f128);
+ add_fp_entry(g, "c_longdouble", 80, LLVMX86FP80Type(), &g->builtin_types.entry_c_longdouble);
- uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref);
- entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
- debug_size_in_bits,
- ZigLLVMEncoding_DW_ATE_float());
- g->builtin_types.entry_f64 = entry;
- g->primitive_type_table.put(&entry->name, entry);
- }
- {
- TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat);
- entry->type_ref = LLVMFP128Type();
- buf_init_from_str(&entry->name, "f128");
- entry->data.floating.bit_count = 128;
-
- uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref);
- entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
- debug_size_in_bits,
- ZigLLVMEncoding_DW_ATE_float());
- g->builtin_types.entry_f128 = entry;
- g->primitive_type_table.put(&entry->name, entry);
- }
- {
- TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat);
- entry->type_ref = LLVMX86FP80Type();
- buf_init_from_str(&entry->name, "c_longdouble");
- entry->data.floating.bit_count = 80;
-
- uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref);
- entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
- debug_size_in_bits,
- ZigLLVMEncoding_DW_ATE_float());
- g->builtin_types.entry_c_longdouble = entry;
- g->primitive_type_table.put(&entry->name, entry);
- }
{
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdVoid);
entry->type_ref = LLVMVoidType();
@@ -6006,12 +6315,9 @@ static void define_builtin_types(CodeGen *g) {
g->builtin_types.entry_u29 = get_int_type(g, false, 29);
g->builtin_types.entry_u32 = get_int_type(g, false, 32);
g->builtin_types.entry_u64 = get_int_type(g, false, 64);
- g->builtin_types.entry_u128 = get_int_type(g, false, 128);
g->builtin_types.entry_i8 = get_int_type(g, true, 8);
- g->builtin_types.entry_i16 = get_int_type(g, true, 16);
g->builtin_types.entry_i32 = get_int_type(g, true, 32);
g->builtin_types.entry_i64 = get_int_type(g, true, 64);
- g->builtin_types.entry_i128 = get_int_type(g, true, 128);
{
g->builtin_types.entry_c_void = get_opaque_type(g, nullptr, nullptr, "c_void");
@@ -6061,6 +6367,7 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdBreakpoint, "breakpoint", 0);
create_builtin_fn(g, BuiltinFnIdReturnAddress, "returnAddress", 0);
create_builtin_fn(g, BuiltinFnIdFrameAddress, "frameAddress", 0);
+ create_builtin_fn(g, BuiltinFnIdHandle, "handle", 0);
create_builtin_fn(g, BuiltinFnIdMemcpy, "memcpy", 3);
create_builtin_fn(g, BuiltinFnIdMemset, "memset", 3);
create_builtin_fn(g, BuiltinFnIdSizeof, "sizeOf", 1);
@@ -6070,6 +6377,8 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdMemberCount, "memberCount", 1);
create_builtin_fn(g, BuiltinFnIdMemberType, "memberType", 2);
create_builtin_fn(g, BuiltinFnIdMemberName, "memberName", 2);
+ create_builtin_fn(g, BuiltinFnIdField, "field", 2);
+ create_builtin_fn(g, BuiltinFnIdTypeInfo, "typeInfo", 1);
create_builtin_fn(g, BuiltinFnIdTypeof, "typeOf", 1); // TODO rename to TypeOf
create_builtin_fn(g, BuiltinFnIdAddWithOverflow, "addWithOverflow", 4);
create_builtin_fn(g, BuiltinFnIdSubWithOverflow, "subWithOverflow", 4);
@@ -6080,15 +6389,25 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdCUndef, "cUndef", 1);
create_builtin_fn(g, BuiltinFnIdCtz, "ctz", 1);
create_builtin_fn(g, BuiltinFnIdClz, "clz", 1);
+ create_builtin_fn(g, BuiltinFnIdPopCount, "popCount", 1);
create_builtin_fn(g, BuiltinFnIdImport, "import", 1);
create_builtin_fn(g, BuiltinFnIdCImport, "cImport", 1);
create_builtin_fn(g, BuiltinFnIdErrName, "errorName", 1);
create_builtin_fn(g, BuiltinFnIdTypeName, "typeName", 1);
- create_builtin_fn(g, BuiltinFnIdCanImplicitCast, "canImplicitCast", 2);
create_builtin_fn(g, BuiltinFnIdEmbedFile, "embedFile", 1);
- create_builtin_fn(g, BuiltinFnIdCmpExchange, "cmpxchg", 5);
+ create_builtin_fn(g, BuiltinFnIdCmpxchgWeak, "cmpxchgWeak", 6);
+ create_builtin_fn(g, BuiltinFnIdCmpxchgStrong, "cmpxchgStrong", 6);
create_builtin_fn(g, BuiltinFnIdFence, "fence", 1);
create_builtin_fn(g, BuiltinFnIdTruncate, "truncate", 2);
+ create_builtin_fn(g, BuiltinFnIdIntCast, "intCast", 2);
+ create_builtin_fn(g, BuiltinFnIdFloatCast, "floatCast", 2);
+ create_builtin_fn(g, BuiltinFnIdIntToFloat, "intToFloat", 2);
+ create_builtin_fn(g, BuiltinFnIdFloatToInt, "floatToInt", 2);
+ create_builtin_fn(g, BuiltinFnIdBoolToInt, "boolToInt", 1);
+ create_builtin_fn(g, BuiltinFnIdErrToInt, "errorToInt", 1);
+ create_builtin_fn(g, BuiltinFnIdIntToErr, "intToError", 1);
+ create_builtin_fn(g, BuiltinFnIdEnumToInt, "enumToInt", 1);
+ create_builtin_fn(g, BuiltinFnIdIntToEnum, "intToEnum", 2);
create_builtin_fn(g, BuiltinFnIdCompileErr, "compileError", 1);
create_builtin_fn(g, BuiltinFnIdCompileLog, "compileLog", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdIntType, "IntType", 2); // TODO rename to Int
@@ -6109,8 +6428,10 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdDivFloor, "divFloor", 2);
create_builtin_fn(g, BuiltinFnIdRem, "rem", 2);
create_builtin_fn(g, BuiltinFnIdMod, "mod", 2);
+ create_builtin_fn(g, BuiltinFnIdSqrt, "sqrt", 2);
create_builtin_fn(g, BuiltinFnIdInlineCall, "inlineCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdNoInlineCall, "noInlineCall", SIZE_MAX);
+ create_builtin_fn(g, BuiltinFnIdNewStackCall, "newStackCall", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdTypeId, "typeId", 1);
create_builtin_fn(g, BuiltinFnIdShlExact, "shlExact", 2);
create_builtin_fn(g, BuiltinFnIdShrExact, "shrExact", 2);
@@ -6122,6 +6443,10 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdExport, "export", 3);
create_builtin_fn(g, BuiltinFnIdErrorReturnTrace, "errorReturnTrace", 0);
create_builtin_fn(g, BuiltinFnIdAtomicRmw, "atomicRmw", 5);
+ create_builtin_fn(g, BuiltinFnIdAtomicLoad, "atomicLoad", 3);
+ create_builtin_fn(g, BuiltinFnIdErrSetCast, "errSetCast", 2);
+ create_builtin_fn(g, BuiltinFnIdToBytes, "sliceToBytes", 1);
+ create_builtin_fn(g, BuiltinFnIdFromBytes, "bytesToSlice", 2);
}
static const char *bool_to_str(bool b) {
@@ -6133,17 +6458,12 @@ static const char *build_mode_to_str(BuildMode build_mode) {
case BuildModeDebug: return "Mode.Debug";
case BuildModeSafeRelease: return "Mode.ReleaseSafe";
case BuildModeFastRelease: return "Mode.ReleaseFast";
+ case BuildModeSmallRelease: return "Mode.ReleaseSmall";
}
zig_unreachable();
}
-static void define_builtin_compile_vars(CodeGen *g) {
- if (g->std_package == nullptr)
- return;
-
- const char *builtin_zig_basename = "builtin.zig";
- Buf *builtin_zig_path = buf_alloc();
- os_path_join(g->cache_dir, buf_create_from_str(builtin_zig_basename), builtin_zig_path);
+Buf *codegen_generate_builtin_source(CodeGen *g) {
Buf *contents = buf_alloc();
// Modifications to this struct must be coordinated with code that does anything with
@@ -6273,6 +6593,7 @@ static void define_builtin_compile_vars(CodeGen *g) {
" Debug,\n"
" ReleaseSafe,\n"
" ReleaseFast,\n"
+ " ReleaseSmall,\n"
"};\n\n");
}
{
@@ -6284,6 +6605,197 @@ static void define_builtin_compile_vars(CodeGen *g) {
}
buf_appendf(contents, "};\n\n");
}
+ {
+ buf_appendf(contents,
+ "pub const TypeInfo = union(TypeId) {\n"
+ " Type: void,\n"
+ " Void: void,\n"
+ " Bool: void,\n"
+ " NoReturn: void,\n"
+ " Int: Int,\n"
+ " Float: Float,\n"
+ " Pointer: Pointer,\n"
+ " Array: Array,\n"
+ " Struct: Struct,\n"
+ " ComptimeFloat: void,\n"
+ " ComptimeInt: void,\n"
+ " Undefined: void,\n"
+ " Null: void,\n"
+ " Optional: Optional,\n"
+ " ErrorUnion: ErrorUnion,\n"
+ " ErrorSet: ErrorSet,\n"
+ " Enum: Enum,\n"
+ " Union: Union,\n"
+ " Fn: Fn,\n"
+ " Namespace: void,\n"
+ " Block: void,\n"
+ " BoundFn: Fn,\n"
+ " ArgTuple: void,\n"
+ " Opaque: void,\n"
+ " Promise: Promise,\n"
+ "\n\n"
+ " pub const Int = struct {\n"
+ " is_signed: bool,\n"
+ " bits: u8,\n"
+ " };\n"
+ "\n"
+ " pub const Float = struct {\n"
+ " bits: u8,\n"
+ " };\n"
+ "\n"
+ " pub const Pointer = struct {\n"
+ " size: Size,\n"
+ " is_const: bool,\n"
+ " is_volatile: bool,\n"
+ " alignment: u32,\n"
+ " child: type,\n"
+ "\n"
+ " pub const Size = enum {\n"
+ " One,\n"
+ " Many,\n"
+ " Slice,\n"
+ " };\n"
+ " };\n"
+ "\n"
+ " pub const Array = struct {\n"
+ " len: usize,\n"
+ " child: type,\n"
+ " };\n"
+ "\n"
+ " pub const ContainerLayout = enum {\n"
+ " Auto,\n"
+ " Extern,\n"
+ " Packed,\n"
+ " };\n"
+ "\n"
+ " pub const StructField = struct {\n"
+ " name: []const u8,\n"
+ " offset: ?usize,\n"
+ " field_type: type,\n"
+ " };\n"
+ "\n"
+ " pub const Struct = struct {\n"
+ " layout: ContainerLayout,\n"
+ " fields: []StructField,\n"
+ " defs: []Definition,\n"
+ " };\n"
+ "\n"
+ " pub const Optional = struct {\n"
+ " child: type,\n"
+ " };\n"
+ "\n"
+ " pub const ErrorUnion = struct {\n"
+ " error_set: type,\n"
+ " payload: type,\n"
+ " };\n"
+ "\n"
+ " pub const Error = struct {\n"
+ " name: []const u8,\n"
+ " value: usize,\n"
+ " };\n"
+ "\n"
+ " pub const ErrorSet = struct {\n"
+ " errors: []Error,\n"
+ " };\n"
+ "\n"
+ " pub const EnumField = struct {\n"
+ " name: []const u8,\n"
+ " value: usize,\n"
+ " };\n"
+ "\n"
+ " pub const Enum = struct {\n"
+ " layout: ContainerLayout,\n"
+ " tag_type: type,\n"
+ " fields: []EnumField,\n"
+ " defs: []Definition,\n"
+ " };\n"
+ "\n"
+ " pub const UnionField = struct {\n"
+ " name: []const u8,\n"
+ " enum_field: ?EnumField,\n"
+ " field_type: type,\n"
+ " };\n"
+ "\n"
+ " pub const Union = struct {\n"
+ " layout: ContainerLayout,\n"
+ " tag_type: ?type,\n"
+ " fields: []UnionField,\n"
+ " defs: []Definition,\n"
+ " };\n"
+ "\n"
+ " pub const CallingConvention = enum {\n"
+ " Unspecified,\n"
+ " C,\n"
+ " Cold,\n"
+ " Naked,\n"
+ " Stdcall,\n"
+ " Async,\n"
+ " };\n"
+ "\n"
+ " pub const FnArg = struct {\n"
+ " is_generic: bool,\n"
+ " is_noalias: bool,\n"
+ " arg_type: ?type,\n"
+ " };\n"
+ "\n"
+ " pub const Fn = struct {\n"
+ " calling_convention: CallingConvention,\n"
+ " is_generic: bool,\n"
+ " is_var_args: bool,\n"
+ " return_type: ?type,\n"
+ " async_allocator_type: ?type,\n"
+ " args: []FnArg,\n"
+ " };\n"
+ "\n"
+ " pub const Promise = struct {\n"
+ " child: ?type,\n"
+ " };\n"
+ "\n"
+ " pub const Definition = struct {\n"
+ " name: []const u8,\n"
+ " is_pub: bool,\n"
+ " data: Data,\n"
+ "\n"
+ " pub const Data = union(enum) {\n"
+ " Type: type,\n"
+ " Var: type,\n"
+ " Fn: FnDef,\n"
+ "\n"
+ " pub const FnDef = struct {\n"
+ " fn_type: type,\n"
+ " inline_type: Inline,\n"
+ " calling_convention: CallingConvention,\n"
+ " is_var_args: bool,\n"
+ " is_extern: bool,\n"
+ " is_export: bool,\n"
+ " lib_name: ?[]const u8,\n"
+ " return_type: type,\n"
+ " arg_names: [][] const u8,\n"
+ "\n"
+ " pub const Inline = enum {\n"
+ " Auto,\n"
+ " Always,\n"
+ " Never,\n"
+ " };\n"
+ " };\n"
+ " };\n"
+ " };\n"
+ "};\n\n");
+ assert(ContainerLayoutAuto == 0);
+ assert(ContainerLayoutExtern == 1);
+ assert(ContainerLayoutPacked == 2);
+
+ assert(CallingConventionUnspecified == 0);
+ assert(CallingConventionC == 1);
+ assert(CallingConventionCold == 2);
+ assert(CallingConventionNaked == 3);
+ assert(CallingConventionStdcall == 4);
+ assert(CallingConventionAsync == 5);
+
+ assert(FnInlineAuto == 0);
+ assert(FnInlineAlways == 1);
+ assert(FnInlineNever == 2);
+ }
{
buf_appendf(contents,
"pub const FloatMode = enum {\n"
@@ -6317,13 +6829,27 @@ static void define_builtin_compile_vars(CodeGen *g) {
buf_appendf(contents, "pub const __zig_test_fn_slice = {}; // overwritten later\n");
+
+ return contents;
+}
+
+static void define_builtin_compile_vars(CodeGen *g) {
+ if (g->std_package == nullptr)
+ return;
+
+ const char *builtin_zig_basename = "builtin.zig";
+ Buf *builtin_zig_path = buf_alloc();
+ os_path_join(g->cache_dir, buf_create_from_str(builtin_zig_basename), builtin_zig_path);
+
+ Buf *contents = codegen_generate_builtin_source(g);
ensure_cache_dir(g);
os_write_file(builtin_zig_path, contents);
int err;
Buf *abs_full_path = buf_alloc();
if ((err = os_path_real(builtin_zig_path, abs_full_path))) {
- zig_panic("unable to open '%s': %s", buf_ptr(builtin_zig_path), err_str(err));
+ fprintf(stderr, "unable to open '%s': %s\n", buf_ptr(builtin_zig_path), err_str(err));
+ exit(1);
}
assert(g->root_package);
@@ -6383,7 +6909,7 @@ static void init(CodeGen *g) {
const char *target_specific_features;
if (g->is_native_target) {
// LLVM creates invalid binaries on Windows sometimes.
- // See https://github.com/zig-lang/zig/issues/508
+ // See https://github.com/ziglang/zig/issues/508
// As a workaround we do not use target native features on Windows.
if (g->zig_target.os == OsWindows) {
target_specific_cpu_args = "";
@@ -6443,7 +6969,7 @@ static void init(CodeGen *g) {
}
}
- g->have_err_ret_tracing = g->build_mode != BuildModeFastRelease;
+ g->have_err_ret_tracing = g->build_mode != BuildModeFastRelease && g->build_mode != BuildModeSmallRelease;
define_builtin_fns(g);
define_builtin_compile_vars(g);
@@ -6490,11 +7016,11 @@ static ImportTableEntry *add_special_code(CodeGen *g, PackageTableEntry *package
Buf *abs_full_path = buf_alloc();
int err;
if ((err = os_path_real(&path_to_code_src, abs_full_path))) {
- zig_panic("unable to open '%s': %s", buf_ptr(&path_to_code_src), err_str(err));
+ zig_panic("unable to open '%s': %s\n", buf_ptr(&path_to_code_src), err_str(err));
}
Buf *import_code = buf_alloc();
if ((err = os_fetch_file_path(abs_full_path, import_code, false))) {
- zig_panic("unable to open '%s': %s", buf_ptr(&path_to_code_src), err_str(err));
+ zig_panic("unable to open '%s': %s\n", buf_ptr(&path_to_code_src), err_str(err));
}
return add_source_file(g, package, abs_full_path, import_code);
@@ -6522,7 +7048,8 @@ static void create_test_compile_var_and_add_test_runner(CodeGen *g) {
exit(0);
}
- TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true);
+ TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false,
+ PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0);
TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type);
TypeTableEntry *fn_type = get_test_fn_type(g);
@@ -6577,12 +7104,14 @@ static void gen_root_source(CodeGen *g) {
Buf *abs_full_path = buf_alloc();
int err;
if ((err = os_path_real(rel_full_path, abs_full_path))) {
- zig_panic("unable to open '%s': %s", buf_ptr(rel_full_path), err_str(err));
+ fprintf(stderr, "unable to open '%s': %s\n", buf_ptr(rel_full_path), err_str(err));
+ exit(1);
}
Buf *source_code = buf_alloc();
if ((err = os_fetch_file_path(rel_full_path, source_code, true))) {
- zig_panic("unable to open '%s': %s", buf_ptr(rel_full_path), err_str(err));
+ fprintf(stderr, "unable to open '%s': %s\n", buf_ptr(rel_full_path), err_str(err));
+ exit(1);
}
g->root_import = add_source_file(g, g->root_package, abs_full_path, source_code);
@@ -6681,10 +7210,10 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, TypeTableEntry
switch (type_entry->id) {
case TypeTableEntryIdInvalid:
case TypeTableEntryIdMetaType:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
@@ -6726,7 +7255,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, TypeTableEntry
case TypeTableEntryIdArray:
prepend_c_type_to_decl_list(g, gen_h, type_entry->data.array.child_type);
return;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
prepend_c_type_to_decl_list(g, gen_h, type_entry->data.maybe.child_type);
return;
case TypeTableEntryIdFn:
@@ -6815,7 +7344,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, TypeTableEntry *type_entry, Buf
buf_appendf(out_buf, "%s%s *", const_str, buf_ptr(&child_buf));
break;
}
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
if (child_type->zero_bits) {
@@ -6866,10 +7395,10 @@ static void get_c_type(CodeGen *g, GenH *gen_h, TypeTableEntry *type_entry, Buf
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdArgTuple:
case TypeTableEntryIdPromise:
zig_unreachable();
@@ -6925,7 +7454,7 @@ static void gen_h_file(CodeGen *g) {
FILE *out_h = fopen(buf_ptr(g->out_h_path), "wb");
if (!out_h)
- zig_panic("unable to open %s: %s", buf_ptr(g->out_h_path), strerror(errno));
+ zig_panic("unable to open %s: %s\n", buf_ptr(g->out_h_path), strerror(errno));
Buf *export_macro = preprocessor_mangle(buf_sprintf("%s_EXPORT", buf_ptr(g->root_out_name)));
buf_upcase(export_macro);
@@ -7018,67 +7547,76 @@ static void gen_h_file(CodeGen *g) {
case TypeTableEntryIdInt:
case TypeTableEntryIdFloat:
case TypeTableEntryIdPointer:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdArray:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdArgTuple:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdFn:
case TypeTableEntryIdPromise:
zig_unreachable();
case TypeTableEntryIdEnum:
- assert(type_entry->data.enumeration.layout == ContainerLayoutExtern);
- fprintf(out_h, "enum %s {\n", buf_ptr(&type_entry->name));
- for (uint32_t field_i = 0; field_i < type_entry->data.enumeration.src_field_count; field_i += 1) {
- TypeEnumField *enum_field = &type_entry->data.enumeration.fields[field_i];
- Buf *value_buf = buf_alloc();
- bigint_append_buf(value_buf, &enum_field->value, 10);
- fprintf(out_h, " %s = %s", buf_ptr(enum_field->name), buf_ptr(value_buf));
- if (field_i != type_entry->data.enumeration.src_field_count - 1) {
- fprintf(out_h, ",");
+ if (type_entry->data.enumeration.layout == ContainerLayoutExtern) {
+ fprintf(out_h, "enum %s {\n", buf_ptr(&type_entry->name));
+ for (uint32_t field_i = 0; field_i < type_entry->data.enumeration.src_field_count; field_i += 1) {
+ TypeEnumField *enum_field = &type_entry->data.enumeration.fields[field_i];
+ Buf *value_buf = buf_alloc();
+ bigint_append_buf(value_buf, &enum_field->value, 10);
+ fprintf(out_h, " %s = %s", buf_ptr(enum_field->name), buf_ptr(value_buf));
+ if (field_i != type_entry->data.enumeration.src_field_count - 1) {
+ fprintf(out_h, ",");
+ }
+ fprintf(out_h, "\n");
}
- fprintf(out_h, "\n");
+ fprintf(out_h, "};\n\n");
+ } else {
+ fprintf(out_h, "enum %s;\n", buf_ptr(&type_entry->name));
}
- fprintf(out_h, "};\n\n");
break;
case TypeTableEntryIdStruct:
- assert(type_entry->data.structure.layout == ContainerLayoutExtern);
- fprintf(out_h, "struct %s {\n", buf_ptr(&type_entry->name));
- for (uint32_t field_i = 0; field_i < type_entry->data.structure.src_field_count; field_i += 1) {
- TypeStructField *struct_field = &type_entry->data.structure.fields[field_i];
+ if (type_entry->data.structure.layout == ContainerLayoutExtern) {
+ fprintf(out_h, "struct %s {\n", buf_ptr(&type_entry->name));
+ for (uint32_t field_i = 0; field_i < type_entry->data.structure.src_field_count; field_i += 1) {
+ TypeStructField *struct_field = &type_entry->data.structure.fields[field_i];
- Buf *type_name_buf = buf_alloc();
- get_c_type(g, gen_h, struct_field->type_entry, type_name_buf);
+ Buf *type_name_buf = buf_alloc();
+ get_c_type(g, gen_h, struct_field->type_entry, type_name_buf);
+
+ if (struct_field->type_entry->id == TypeTableEntryIdArray) {
+ fprintf(out_h, " %s %s[%" ZIG_PRI_u64 "];\n", buf_ptr(type_name_buf),
+ buf_ptr(struct_field->name),
+ struct_field->type_entry->data.array.len);
+ } else {
+ fprintf(out_h, " %s %s;\n", buf_ptr(type_name_buf), buf_ptr(struct_field->name));
+ }
- if (struct_field->type_entry->id == TypeTableEntryIdArray) {
- fprintf(out_h, " %s %s[%" ZIG_PRI_u64 "];\n", buf_ptr(type_name_buf),
- buf_ptr(struct_field->name),
- struct_field->type_entry->data.array.len);
- } else {
- fprintf(out_h, " %s %s;\n", buf_ptr(type_name_buf), buf_ptr(struct_field->name));
}
-
+ fprintf(out_h, "};\n\n");
+ } else {
+ fprintf(out_h, "struct %s;\n", buf_ptr(&type_entry->name));
}
- fprintf(out_h, "};\n\n");
break;
case TypeTableEntryIdUnion:
- assert(type_entry->data.unionation.layout == ContainerLayoutExtern);
- fprintf(out_h, "union %s {\n", buf_ptr(&type_entry->name));
- for (uint32_t field_i = 0; field_i < type_entry->data.unionation.src_field_count; field_i += 1) {
- TypeUnionField *union_field = &type_entry->data.unionation.fields[field_i];
+ if (type_entry->data.unionation.layout == ContainerLayoutExtern) {
+ fprintf(out_h, "union %s {\n", buf_ptr(&type_entry->name));
+ for (uint32_t field_i = 0; field_i < type_entry->data.unionation.src_field_count; field_i += 1) {
+ TypeUnionField *union_field = &type_entry->data.unionation.fields[field_i];
- Buf *type_name_buf = buf_alloc();
- get_c_type(g, gen_h, union_field->type_entry, type_name_buf);
- fprintf(out_h, " %s %s;\n", buf_ptr(type_name_buf), buf_ptr(union_field->name));
+ Buf *type_name_buf = buf_alloc();
+ get_c_type(g, gen_h, union_field->type_entry, type_name_buf);
+ fprintf(out_h, " %s %s;\n", buf_ptr(type_name_buf), buf_ptr(union_field->name));
+ }
+ fprintf(out_h, "};\n\n");
+ } else {
+ fprintf(out_h, "union %s;\n", buf_ptr(&type_entry->name));
}
- fprintf(out_h, "};\n\n");
break;
case TypeTableEntryIdOpaque:
fprintf(out_h, "struct %s;\n\n", buf_ptr(&type_entry->name));
@@ -7135,4 +7673,3 @@ PackageTableEntry *codegen_create_package(CodeGen *g, const char *root_src_dir,
}
return pkg;
}
-
diff --git a/src/codegen.hpp b/src/codegen.hpp
index a7a4b748c4..b5f3374ec4 100644
--- a/src/codegen.hpp
+++ b/src/codegen.hpp
@@ -59,5 +59,7 @@ void codegen_add_object(CodeGen *g, Buf *object_path);
void codegen_translate_c(CodeGen *g, Buf *path);
+Buf *codegen_generate_builtin_source(CodeGen *g);
+
#endif
diff --git a/src/ir.cpp b/src/ir.cpp
index 3ba58a09bd..3e423487aa 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -11,9 +11,10 @@
#include "ir.hpp"
#include "ir_print.hpp"
#include "os.hpp"
-#include "translate_c.hpp"
#include "range_set.hpp"
#include "softfloat.hpp"
+#include "translate_c.hpp"
+#include "util.hpp"
struct IrExecContext {
ConstExprValue *mem_slot_list;
@@ -38,20 +39,13 @@ struct IrAnalyze {
IrBasicBlock *const_predecessor_bb;
};
-static const LVal LVAL_NONE = { false, false, false };
-static const LVal LVAL_PTR = { true, false, false };
-
-static LVal make_lval_addr(bool is_const, bool is_volatile) {
- return { true, is_const, is_volatile };
-}
-
enum ConstCastResultId {
ConstCastResultIdOk,
ConstCastResultIdErrSet,
ConstCastResultIdErrSetGlobal,
ConstCastResultIdPointerChild,
ConstCastResultIdSliceChild,
- ConstCastResultIdNullableChild,
+ ConstCastResultIdOptionalChild,
ConstCastResultIdErrorUnionPayload,
ConstCastResultIdErrorUnionErrorSet,
ConstCastResultIdFnAlign,
@@ -66,14 +60,10 @@ enum ConstCastResultId {
ConstCastResultIdType,
ConstCastResultIdUnresolvedInferredErrSet,
ConstCastResultIdAsyncAllocatorType,
-};
-
-struct ConstCastErrSetMismatch {
- ZigList missing_errors;
+ ConstCastResultIdNullWrapPtr,
};
struct ConstCastOnly;
-
struct ConstCastArg {
size_t arg_index;
ConstCastOnly *child;
@@ -83,22 +73,70 @@ struct ConstCastArgNoAlias {
size_t arg_index;
};
+struct ConstCastOptionalMismatch;
+struct ConstCastPointerMismatch;
+struct ConstCastSliceMismatch;
+struct ConstCastErrUnionErrSetMismatch;
+struct ConstCastErrUnionPayloadMismatch;
+struct ConstCastErrSetMismatch;
+struct ConstCastTypeMismatch;
+
struct ConstCastOnly {
ConstCastResultId id;
union {
- ConstCastErrSetMismatch error_set;
- ConstCastOnly *pointer_child;
- ConstCastOnly *slice_child;
- ConstCastOnly *nullable_child;
- ConstCastOnly *error_union_payload;
- ConstCastOnly *error_union_error_set;
+ ConstCastErrSetMismatch *error_set_mismatch;
+ ConstCastPointerMismatch *pointer_mismatch;
+ ConstCastSliceMismatch *slice_mismatch;
+ ConstCastOptionalMismatch *optional;
+ ConstCastErrUnionPayloadMismatch *error_union_payload;
+ ConstCastErrUnionErrSetMismatch *error_union_error_set;
+ ConstCastTypeMismatch *type_mismatch;
ConstCastOnly *return_type;
ConstCastOnly *async_allocator_type;
+ ConstCastOnly *null_wrap_ptr_child;
ConstCastArg fn_arg;
ConstCastArgNoAlias arg_no_alias;
} data;
};
+struct ConstCastTypeMismatch {
+ TypeTableEntry *wanted_type;
+ TypeTableEntry *actual_type;
+};
+
+struct ConstCastOptionalMismatch {
+ ConstCastOnly child;
+ TypeTableEntry *wanted_child;
+ TypeTableEntry *actual_child;
+};
+
+struct ConstCastPointerMismatch {
+ ConstCastOnly child;
+ TypeTableEntry *wanted_child;
+ TypeTableEntry *actual_child;
+};
+
+struct ConstCastSliceMismatch {
+ ConstCastOnly child;
+ TypeTableEntry *wanted_child;
+ TypeTableEntry *actual_child;
+};
+
+struct ConstCastErrUnionErrSetMismatch {
+ ConstCastOnly child;
+ TypeTableEntry *wanted_err_set;
+ TypeTableEntry *actual_err_set;
+};
+
+struct ConstCastErrUnionPayloadMismatch {
+ ConstCastOnly child;
+ TypeTableEntry *wanted_payload;
+ TypeTableEntry *actual_payload;
+};
+
+struct ConstCastErrSetMismatch {
+ ZigList missing_errors;
+};
static IrInstruction *ir_gen_node(IrBuilder *irb, AstNode *node, Scope *scope);
static IrInstruction *ir_gen_node_extra(IrBuilder *irb, AstNode *node, Scope *scope, LVal lval);
@@ -108,11 +146,14 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc
static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutable *exec, AstNode *source_node, Buf *msg);
static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name,
IrInstruction *source_instr, IrInstruction *container_ptr, TypeTableEntry *container_type);
-static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
- VariableTableEntry *var, bool is_const_ptr, bool is_volatile_ptr);
+static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction, VariableTableEntry *var);
+static TypeTableEntry *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op);
+static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval);
+static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, uint32_t new_align);
+static TypeTableEntry *adjust_slice_align(CodeGen *g, TypeTableEntry *slice_type, uint32_t new_align);
ConstExprValue *const_ptr_pointee(CodeGen *g, ConstExprValue *const_val) {
- assert(const_val->type->id == TypeTableEntryIdPointer);
+ assert(get_codegen_ptr_type(const_val->type) != nullptr);
assert(const_val->special == ConstValSpecialStatic);
switch (const_val->data.x_ptr.special) {
case ConstPtrSpecialInvalid:
@@ -143,6 +184,8 @@ static bool ir_should_inline(IrExecutable *exec, Scope *scope) {
while (scope != nullptr) {
if (scope->id == ScopeIdCompTime)
return true;
+ if (scope->id == ScopeIdFnDef)
+ break;
scope = scope->parent;
}
return false;
@@ -369,8 +412,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTestNonNull *) {
return IrInstructionIdTestNonNull;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapMaybe *) {
- return IrInstructionIdUnwrapMaybe;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapOptional *) {
+ return IrInstructionIdUnwrapOptional;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionClz *) {
@@ -381,6 +424,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCtz *) {
return IrInstructionIdCtz;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionPopCount *) {
+ return IrInstructionIdPopCount;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionUnionTag *) {
return IrInstructionIdUnionTag;
}
@@ -457,6 +504,38 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTruncate *) {
return IrInstructionIdTruncate;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionIntCast *) {
+ return IrInstructionIdIntCast;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFloatCast *) {
+ return IrInstructionIdFloatCast;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionErrSetCast *) {
+ return IrInstructionIdErrSetCast;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionToBytes *) {
+ return IrInstructionIdToBytes;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFromBytes *) {
+ return IrInstructionIdFromBytes;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionIntToFloat *) {
+ return IrInstructionIdIntToFloat;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFloatToInt *) {
+ return IrInstructionIdFloatToInt;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionBoolToInt *) {
+ return IrInstructionIdBoolToInt;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionIntType *) {
return IrInstructionIdIntType;
}
@@ -501,6 +580,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionFrameAddress *)
return IrInstructionIdFrameAddress;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionHandle *) {
+ return IrInstructionIdHandle;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignOf *) {
return IrInstructionIdAlignOf;
}
@@ -521,8 +604,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapErrPayload
return IrInstructionIdUnwrapErrPayload;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionMaybeWrap *) {
- return IrInstructionIdMaybeWrap;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionOptionalWrap *) {
+ return IrInstructionIdOptionalWrap;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionErrWrapPayload *) {
@@ -565,6 +648,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionIntToEnum *) {
return IrInstructionIdIntToEnum;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionEnumToInt *) {
+ return IrInstructionIdEnumToInt;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionIntToErr *) {
return IrInstructionIdIntToErr;
}
@@ -585,10 +672,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTypeName *) {
return IrInstructionIdTypeName;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCanImplicitCast *) {
- return IrInstructionIdCanImplicitCast;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionDeclRef *) {
return IrInstructionIdDeclRef;
}
@@ -613,6 +696,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionOffsetOf *) {
return IrInstructionIdOffsetOf;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionTypeInfo *) {
+ return IrInstructionIdTypeInfo;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionTypeId *) {
return IrInstructionIdTypeId;
}
@@ -621,8 +708,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSetEvalBranchQuo
return IrInstructionIdSetEvalBranchQuota;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrTypeOf *) {
- return IrInstructionIdPtrTypeOf;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrType *) {
+ return IrInstructionIdPtrType;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignCast *) {
@@ -709,6 +796,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicRmw *) {
return IrInstructionIdAtomicRmw;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicLoad *) {
+ return IrInstructionIdAtomicLoad;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionPromiseResultType *) {
return IrInstructionIdPromiseResultType;
}
@@ -733,6 +824,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionMarkErrRetTraceP
return IrInstructionIdMarkErrRetTracePtr;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionSqrt *) {
+ return IrInstructionIdSqrt;
+}
+
template
static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) {
T *special_instruction = allocate(1);
@@ -988,13 +1083,9 @@ static IrInstruction *ir_build_bin_op_from(IrBuilder *irb, IrInstruction *old_in
return new_instruction;
}
-static IrInstruction *ir_build_var_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node,
- VariableTableEntry *var, bool is_const, bool is_volatile)
-{
+static IrInstruction *ir_build_var_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, VariableTableEntry *var) {
IrInstructionVarPtr *instruction = ir_build_instruction(irb, scope, source_node);
instruction->var = var;
- instruction->is_const = is_const;
- instruction->is_volatile = is_volatile;
ir_ref_var(var);
@@ -1002,12 +1093,13 @@ static IrInstruction *ir_build_var_ptr(IrBuilder *irb, Scope *scope, AstNode *so
}
static IrInstruction *ir_build_elem_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *array_ptr,
- IrInstruction *elem_index, bool safety_check_on)
+ IrInstruction *elem_index, bool safety_check_on, PtrLen ptr_len)
{
IrInstructionElemPtr *instruction = ir_build_instruction(irb, scope, source_node);
instruction->array_ptr = array_ptr;
instruction->elem_index = elem_index;
instruction->safety_check_on = safety_check_on;
+ instruction->ptr_len = ptr_len;
ir_ref_instruction(array_ptr, irb->current_basic_block);
ir_ref_instruction(elem_index, irb->current_basic_block);
@@ -1015,13 +1107,18 @@ static IrInstruction *ir_build_elem_ptr(IrBuilder *irb, Scope *scope, AstNode *s
return &instruction->base;
}
-static IrInstruction *ir_build_elem_ptr_from(IrBuilder *irb, IrInstruction *old_instruction,
- IrInstruction *array_ptr, IrInstruction *elem_index, bool safety_check_on)
+static IrInstruction *ir_build_field_ptr_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *container_ptr, IrInstruction *field_name_expr)
{
- IrInstruction *new_instruction = ir_build_elem_ptr(irb, old_instruction->scope,
- old_instruction->source_node, array_ptr, elem_index, safety_check_on);
- ir_link_new_instruction(new_instruction, old_instruction);
- return new_instruction;
+ IrInstructionFieldPtr *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->container_ptr = container_ptr;
+ instruction->field_name_buffer = nullptr;
+ instruction->field_name_expr = field_name_expr;
+
+ ir_ref_instruction(container_ptr, irb->current_basic_block);
+ ir_ref_instruction(field_name_expr, irb->current_basic_block);
+
+ return &instruction->base;
}
static IrInstruction *ir_build_field_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node,
@@ -1029,7 +1126,8 @@ static IrInstruction *ir_build_field_ptr(IrBuilder *irb, Scope *scope, AstNode *
{
IrInstructionFieldPtr *instruction = ir_build_instruction(irb, scope, source_node);
instruction->container_ptr = container_ptr;
- instruction->field_name = field_name;
+ instruction->field_name_buffer = field_name;
+ instruction->field_name_expr = nullptr;
ir_ref_instruction(container_ptr, irb->current_basic_block);
@@ -1071,7 +1169,8 @@ static IrInstruction *ir_build_union_field_ptr_from(IrBuilder *irb, IrInstructio
static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *source_node,
FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
- bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator)
+ bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator,
+ IrInstruction *new_stack)
{
IrInstructionCall *call_instruction = ir_build_instruction(irb, scope, source_node);
call_instruction->fn_entry = fn_entry;
@@ -1082,6 +1181,7 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc
call_instruction->arg_count = arg_count;
call_instruction->is_async = is_async;
call_instruction->async_allocator = async_allocator;
+ call_instruction->new_stack = new_stack;
if (fn_ref)
ir_ref_instruction(fn_ref, irb->current_basic_block);
@@ -1089,16 +1189,19 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc
ir_ref_instruction(args[i], irb->current_basic_block);
if (async_allocator)
ir_ref_instruction(async_allocator, irb->current_basic_block);
+ if (new_stack != nullptr)
+ ir_ref_instruction(new_stack, irb->current_basic_block);
return &call_instruction->base;
}
static IrInstruction *ir_build_call_from(IrBuilder *irb, IrInstruction *old_instruction,
FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
- bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator)
+ bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator,
+ IrInstruction *new_stack)
{
IrInstruction *new_instruction = ir_build_call(irb, old_instruction->scope,
- old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, fn_inline, is_async, async_allocator);
+ old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, fn_inline, is_async, async_allocator, new_stack);
ir_link_new_instruction(new_instruction, old_instruction);
return new_instruction;
}
@@ -1160,15 +1263,16 @@ static IrInstruction *ir_build_br_from(IrBuilder *irb, IrInstruction *old_instru
return new_instruction;
}
-static IrInstruction *ir_build_ptr_type_of(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value,
- uint32_t bit_offset_start, uint32_t bit_offset_end)
+static IrInstruction *ir_build_ptr_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *child_type, bool is_const, bool is_volatile, PtrLen ptr_len,
+ IrInstruction *align_value, uint32_t bit_offset_start, uint32_t bit_offset_end)
{
- IrInstructionPtrTypeOf *ptr_type_of_instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionPtrType *ptr_type_of_instruction = ir_build_instruction(irb, scope, source_node);
ptr_type_of_instruction->align_value = align_value;
ptr_type_of_instruction->child_type = child_type;
ptr_type_of_instruction->is_const = is_const;
ptr_type_of_instruction->is_volatile = is_volatile;
+ ptr_type_of_instruction->ptr_len = ptr_len;
ptr_type_of_instruction->bit_offset_start = bit_offset_start;
ptr_type_of_instruction->bit_offset_end = bit_offset_end;
@@ -1551,7 +1655,7 @@ static IrInstruction *ir_build_test_nonnull_from(IrBuilder *irb, IrInstruction *
static IrInstruction *ir_build_unwrap_maybe(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value,
bool safety_check_on)
{
- IrInstructionUnwrapMaybe *instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionUnwrapOptional *instruction = ir_build_instruction(irb, scope, source_node);
instruction->value = value;
instruction->safety_check_on = safety_check_on;
@@ -1570,7 +1674,7 @@ static IrInstruction *ir_build_unwrap_maybe_from(IrBuilder *irb, IrInstruction *
}
static IrInstruction *ir_build_maybe_wrap(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value) {
- IrInstructionMaybeWrap *instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionOptionalWrap *instruction = ir_build_instruction(irb, scope, source_node);
instruction->value = value;
ir_ref_instruction(value, irb->current_basic_block);
@@ -1626,8 +1730,18 @@ static IrInstruction *ir_build_ctz_from(IrBuilder *irb, IrInstruction *old_instr
return new_instruction;
}
+static IrInstruction *ir_build_pop_count(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value) {
+ IrInstructionPopCount *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->value = value;
+
+ ir_ref_instruction(value, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
static IrInstruction *ir_build_switch_br(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *target_value,
- IrBasicBlock *else_block, size_t case_count, IrInstructionSwitchBrCase *cases, IrInstruction *is_comptime)
+ IrBasicBlock *else_block, size_t case_count, IrInstructionSwitchBrCase *cases, IrInstruction *is_comptime,
+ IrInstruction *switch_prongs_void)
{
IrInstructionSwitchBr *instruction = ir_build_instruction(irb, scope, source_node);
instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable;
@@ -1637,10 +1751,12 @@ static IrInstruction *ir_build_switch_br(IrBuilder *irb, Scope *scope, AstNode *
instruction->case_count = case_count;
instruction->cases = cases;
instruction->is_comptime = is_comptime;
+ instruction->switch_prongs_void = switch_prongs_void;
ir_ref_instruction(target_value, irb->current_basic_block);
if (is_comptime) ir_ref_instruction(is_comptime, irb->current_basic_block);
ir_ref_bb(else_block);
+ if (switch_prongs_void) ir_ref_instruction(switch_prongs_void, irb->current_basic_block);
for (size_t i = 0; i < case_count; i += 1) {
ir_ref_instruction(cases[i].value, irb->current_basic_block);
@@ -1652,10 +1768,10 @@ static IrInstruction *ir_build_switch_br(IrBuilder *irb, Scope *scope, AstNode *
static IrInstruction *ir_build_switch_br_from(IrBuilder *irb, IrInstruction *old_instruction,
IrInstruction *target_value, IrBasicBlock *else_block, size_t case_count,
- IrInstructionSwitchBrCase *cases, IrInstruction *is_comptime)
+ IrInstructionSwitchBrCase *cases, IrInstruction *is_comptime, IrInstruction *switch_prongs_void)
{
IrInstruction *new_instruction = ir_build_switch_br(irb, old_instruction->scope, old_instruction->source_node,
- target_value, else_block, case_count, cases, is_comptime);
+ target_value, else_block, case_count, cases, is_comptime, switch_prongs_void);
ir_link_new_instruction(new_instruction, old_instruction);
return new_instruction;
}
@@ -1824,38 +1940,34 @@ static IrInstruction *ir_build_embed_file(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
-static IrInstruction *ir_build_cmpxchg(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *ptr,
- IrInstruction *cmp_value, IrInstruction *new_value, IrInstruction *success_order_value, IrInstruction *failure_order_value,
- AtomicOrder success_order, AtomicOrder failure_order)
+static IrInstruction *ir_build_cmpxchg(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *type_value,
+ IrInstruction *ptr, IrInstruction *cmp_value, IrInstruction *new_value,
+ IrInstruction *success_order_value, IrInstruction *failure_order_value,
+ bool is_weak,
+ TypeTableEntry *type, AtomicOrder success_order, AtomicOrder failure_order)
{
IrInstructionCmpxchg *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->type_value = type_value;
instruction->ptr = ptr;
instruction->cmp_value = cmp_value;
instruction->new_value = new_value;
instruction->success_order_value = success_order_value;
instruction->failure_order_value = failure_order_value;
+ instruction->is_weak = is_weak;
+ instruction->type = type;
instruction->success_order = success_order;
instruction->failure_order = failure_order;
+ if (type_value != nullptr) ir_ref_instruction(type_value, irb->current_basic_block);
ir_ref_instruction(ptr, irb->current_basic_block);
ir_ref_instruction(cmp_value, irb->current_basic_block);
ir_ref_instruction(new_value, irb->current_basic_block);
- ir_ref_instruction(success_order_value, irb->current_basic_block);
- ir_ref_instruction(failure_order_value, irb->current_basic_block);
+ if (type_value != nullptr) ir_ref_instruction(success_order_value, irb->current_basic_block);
+ if (type_value != nullptr) ir_ref_instruction(failure_order_value, irb->current_basic_block);
return &instruction->base;
}
-static IrInstruction *ir_build_cmpxchg_from(IrBuilder *irb, IrInstruction *old_instruction, IrInstruction *ptr,
- IrInstruction *cmp_value, IrInstruction *new_value, IrInstruction *success_order_value, IrInstruction *failure_order_value,
- AtomicOrder success_order, AtomicOrder failure_order)
-{
- IrInstruction *new_instruction = ir_build_cmpxchg(irb, old_instruction->scope, old_instruction->source_node,
- ptr, cmp_value, new_value, success_order_value, failure_order_value, success_order, failure_order);
- ir_link_new_instruction(new_instruction, old_instruction);
- return new_instruction;
-}
-
static IrInstruction *ir_build_fence(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *order_value, AtomicOrder order) {
IrInstructionFence *instruction = ir_build_instruction(irb, scope, source_node);
instruction->order_value = order_value;
@@ -1883,10 +1995,88 @@ static IrInstruction *ir_build_truncate(IrBuilder *irb, Scope *scope, AstNode *s
return &instruction->base;
}
-static IrInstruction *ir_build_truncate_from(IrBuilder *irb, IrInstruction *old_instruction, IrInstruction *dest_type, IrInstruction *target) {
- IrInstruction *new_instruction = ir_build_truncate(irb, old_instruction->scope, old_instruction->source_node, dest_type, target);
- ir_link_new_instruction(new_instruction, old_instruction);
- return new_instruction;
+static IrInstruction *ir_build_int_cast(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_type, IrInstruction *target) {
+ IrInstructionIntCast *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->dest_type = dest_type;
+ instruction->target = target;
+
+ ir_ref_instruction(dest_type, irb->current_basic_block);
+ ir_ref_instruction(target, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_float_cast(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_type, IrInstruction *target) {
+ IrInstructionFloatCast *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->dest_type = dest_type;
+ instruction->target = target;
+
+ ir_ref_instruction(dest_type, irb->current_basic_block);
+ ir_ref_instruction(target, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_err_set_cast(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_type, IrInstruction *target) {
+ IrInstructionErrSetCast *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->dest_type = dest_type;
+ instruction->target = target;
+
+ ir_ref_instruction(dest_type, irb->current_basic_block);
+ ir_ref_instruction(target, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_to_bytes(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *target) {
+ IrInstructionToBytes *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->target = target;
+
+ ir_ref_instruction(target, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_from_bytes(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_child_type, IrInstruction *target) {
+ IrInstructionFromBytes *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->dest_child_type = dest_child_type;
+ instruction->target = target;
+
+ ir_ref_instruction(dest_child_type, irb->current_basic_block);
+ ir_ref_instruction(target, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_int_to_float(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_type, IrInstruction *target) {
+ IrInstructionIntToFloat *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->dest_type = dest_type;
+ instruction->target = target;
+
+ ir_ref_instruction(dest_type, irb->current_basic_block);
+ ir_ref_instruction(target, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_float_to_int(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_type, IrInstruction *target) {
+ IrInstructionFloatToInt *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->dest_type = dest_type;
+ instruction->target = target;
+
+ ir_ref_instruction(dest_type, irb->current_basic_block);
+ ir_ref_instruction(target, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_bool_to_int(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *target) {
+ IrInstructionBoolToInt *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->target = target;
+
+ ir_ref_instruction(target, irb->current_basic_block);
+
+ return &instruction->base;
}
static IrInstruction *ir_build_int_type(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *is_signed, IrInstruction *bit_count) {
@@ -2054,6 +2244,17 @@ static IrInstruction *ir_build_frame_address_from(IrBuilder *irb, IrInstruction
return new_instruction;
}
+static IrInstruction *ir_build_handle(IrBuilder *irb, Scope *scope, AstNode *source_node) {
+ IrInstructionHandle *instruction = ir_build_instruction(irb, scope, source_node);
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_handle_from(IrBuilder *irb, IrInstruction *old_instruction) {
+ IrInstruction *new_instruction = ir_build_handle(irb, old_instruction->scope, old_instruction->source_node);
+ ir_link_new_instruction(new_instruction, old_instruction);
+ return new_instruction;
+}
+
static IrInstruction *ir_build_overflow_op(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrOverflowOp op, IrInstruction *type_value, IrInstruction *op1, IrInstruction *op2,
IrInstruction *result_ptr, TypeTableEntry *result_ptr_type)
@@ -2252,10 +2453,26 @@ static IrInstruction *ir_build_ptr_to_int(IrBuilder *irb, Scope *scope, AstNode
}
static IrInstruction *ir_build_int_to_enum(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *target)
+ IrInstruction *dest_type, IrInstruction *target)
{
IrInstructionIntToEnum *instruction = ir_build_instruction(
irb, scope, source_node);
+ instruction->dest_type = dest_type;
+ instruction->target = target;
+
+ if (dest_type) ir_ref_instruction(dest_type, irb->current_basic_block);
+ ir_ref_instruction(target, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+
+
+static IrInstruction *ir_build_enum_to_int(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *target)
+{
+ IrInstructionEnumToInt *instruction = ir_build_instruction(
+ irb, scope, source_node);
instruction->target = target;
ir_ref_instruction(target, irb->current_basic_block);
@@ -2331,20 +2548,6 @@ static IrInstruction *ir_build_type_name(IrBuilder *irb, Scope *scope, AstNode *
return &instruction->base;
}
-static IrInstruction *ir_build_can_implicit_cast(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *type_value, IrInstruction *target_value)
-{
- IrInstructionCanImplicitCast *instruction = ir_build_instruction(
- irb, scope, source_node);
- instruction->type_value = type_value;
- instruction->target_value = target_value;
-
- ir_ref_instruction(type_value, irb->current_basic_block);
- ir_ref_instruction(target_value, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_decl_ref(IrBuilder *irb, Scope *scope, AstNode *source_node,
Tld *tld, LVal lval)
{
@@ -2419,6 +2622,16 @@ static IrInstruction *ir_build_offset_of(IrBuilder *irb, Scope *scope, AstNode *
return &instruction->base;
}
+static IrInstruction *ir_build_type_info(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *type_value) {
+ IrInstructionTypeInfo *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->type_value = type_value;
+
+ ir_ref_instruction(type_value, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
static IrInstruction *ir_build_type_id(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *type_value)
{
@@ -2484,9 +2697,9 @@ static IrInstruction *ir_build_arg_type(IrBuilder *irb, Scope *scope, AstNode *s
return &instruction->base;
}
-static IrInstruction *ir_build_error_return_trace(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstructionErrorReturnTrace::Nullable nullable) {
+static IrInstruction *ir_build_error_return_trace(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstructionErrorReturnTrace::Optional optional) {
IrInstructionErrorReturnTrace *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->nullable = nullable;
+ instruction->optional = optional;
return &instruction->base;
}
@@ -2669,6 +2882,23 @@ static IrInstruction *ir_build_atomic_rmw(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
+static IrInstruction *ir_build_atomic_load(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *operand_type, IrInstruction *ptr,
+ IrInstruction *ordering, AtomicOrder resolved_ordering)
+{
+ IrInstructionAtomicLoad *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->operand_type = operand_type;
+ instruction->ptr = ptr;
+ instruction->ordering = ordering;
+ instruction->resolved_ordering = resolved_ordering;
+
+ if (operand_type != nullptr) ir_ref_instruction(operand_type, irb->current_basic_block);
+ ir_ref_instruction(ptr, irb->current_basic_block);
+ if (ordering != nullptr) ir_ref_instruction(ordering, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
static IrInstruction *ir_build_promise_result_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *promise_type)
{
@@ -2731,20 +2961,49 @@ static IrInstruction *ir_build_mark_err_ret_trace_ptr(IrBuilder *irb, Scope *sco
return &instruction->base;
}
+static IrInstruction *ir_build_sqrt(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *type, IrInstruction *op) {
+ IrInstructionSqrt *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->type = type;
+ instruction->op = op;
+
+ if (type != nullptr) ir_ref_instruction(type, irb->current_basic_block);
+ ir_ref_instruction(op, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) {
results[ReturnKindUnconditional] = 0;
results[ReturnKindError] = 0;
- while (inner_scope != outer_scope) {
- assert(inner_scope);
- if (inner_scope->id == ScopeIdDefer) {
- AstNode *defer_node = inner_scope->source_node;
- assert(defer_node->type == NodeTypeDefer);
- ReturnKind defer_kind = defer_node->data.defer.kind;
- results[defer_kind] += 1;
+ Scope *scope = inner_scope;
+ while (scope != outer_scope) {
+ assert(scope);
+ switch (scope->id) {
+ case ScopeIdDefer: {
+ AstNode *defer_node = scope->source_node;
+ assert(defer_node->type == NodeTypeDefer);
+ ReturnKind defer_kind = defer_node->data.defer.kind;
+ results[defer_kind] += 1;
+ scope = scope->parent;
+ continue;
+ }
+ case ScopeIdDecls:
+ case ScopeIdFnDef:
+ return;
+ case ScopeIdBlock:
+ case ScopeIdVarDecl:
+ case ScopeIdLoop:
+ case ScopeIdSuspend:
+ case ScopeIdCompTime:
+ scope = scope->parent;
+ continue;
+ case ScopeIdDeferExpr:
+ case ScopeIdCImport:
+ case ScopeIdCoroPrelude:
+ zig_unreachable();
}
- inner_scope = inner_scope->parent;
}
}
@@ -2760,27 +3019,43 @@ static bool ir_gen_defers_for_block(IrBuilder *irb, Scope *inner_scope, Scope *o
if (!scope)
return is_noreturn;
- if (scope->id == ScopeIdDefer) {
- AstNode *defer_node = scope->source_node;
- assert(defer_node->type == NodeTypeDefer);
- ReturnKind defer_kind = defer_node->data.defer.kind;
- if (defer_kind == ReturnKindUnconditional ||
- (gen_error_defers && defer_kind == ReturnKindError))
- {
- AstNode *defer_expr_node = defer_node->data.defer.expr;
- Scope *defer_expr_scope = defer_node->data.defer.expr_scope;
- IrInstruction *defer_expr_value = ir_gen_node(irb, defer_expr_node, defer_expr_scope);
- if (defer_expr_value != irb->codegen->invalid_instruction) {
- if (defer_expr_value->value.type != nullptr && defer_expr_value->value.type->id == TypeTableEntryIdUnreachable) {
- is_noreturn = true;
- } else {
- ir_mark_gen(ir_build_check_statement_is_void(irb, defer_expr_scope, defer_expr_node, defer_expr_value));
+ switch (scope->id) {
+ case ScopeIdDefer: {
+ AstNode *defer_node = scope->source_node;
+ assert(defer_node->type == NodeTypeDefer);
+ ReturnKind defer_kind = defer_node->data.defer.kind;
+ if (defer_kind == ReturnKindUnconditional ||
+ (gen_error_defers && defer_kind == ReturnKindError))
+ {
+ AstNode *defer_expr_node = defer_node->data.defer.expr;
+ Scope *defer_expr_scope = defer_node->data.defer.expr_scope;
+ IrInstruction *defer_expr_value = ir_gen_node(irb, defer_expr_node, defer_expr_scope);
+ if (defer_expr_value != irb->codegen->invalid_instruction) {
+ if (defer_expr_value->value.type != nullptr && defer_expr_value->value.type->id == TypeTableEntryIdUnreachable) {
+ is_noreturn = true;
+ } else {
+ ir_mark_gen(ir_build_check_statement_is_void(irb, defer_expr_scope, defer_expr_node, defer_expr_value));
+ }
}
}
+ scope = scope->parent;
+ continue;
}
-
+ case ScopeIdDecls:
+ case ScopeIdFnDef:
+ return is_noreturn;
+ case ScopeIdBlock:
+ case ScopeIdVarDecl:
+ case ScopeIdLoop:
+ case ScopeIdSuspend:
+ case ScopeIdCompTime:
+ scope = scope->parent;
+ continue;
+ case ScopeIdDeferExpr:
+ case ScopeIdCImport:
+ case ScopeIdCoroPrelude:
+ zig_unreachable();
}
- scope = scope->parent;
}
return is_noreturn;
}
@@ -2796,6 +3071,18 @@ static void ir_set_cursor_at_end_and_append_block(IrBuilder *irb, IrBasicBlock *
ir_set_cursor_at_end(irb, basic_block);
}
+static ScopeSuspend *get_scope_suspend(Scope *scope) {
+ while (scope) {
+ if (scope->id == ScopeIdSuspend)
+ return (ScopeSuspend *)scope;
+ if (scope->id == ScopeIdFnDef)
+ return nullptr;
+
+ scope = scope->parent;
+ }
+ return nullptr;
+}
+
static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) {
while (scope) {
if (scope->id == ScopeIdDeferExpr)
@@ -2825,20 +3112,47 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode
return return_inst;
}
- ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value);
- IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node,
- get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
- // TODO replace replacement_value with @intToPtr(?promise, 0x1) when it doesn't crash zig
- IrInstruction *replacement_value = irb->exec->coro_handle;
- IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, scope, node,
- promise_type_val, irb->exec->coro_awaiter_field_ptr, nullptr, replacement_value, nullptr,
- AtomicRmwOp_xchg, AtomicOrderSeqCst);
- ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, maybe_await_handle);
- IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_await_handle);
+ IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "Suspended");
+ IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "NotSuspended");
+ IrBasicBlock *store_awaiter_block = ir_create_basic_block(irb, scope, "StoreAwaiter");
+ IrBasicBlock *check_canceled_block = ir_create_basic_block(irb, scope, "CheckCanceled");
+
+ IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
+ IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
+ IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
+ IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
+ IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise);
IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
- return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final, irb->exec->coro_early_final,
- is_comptime);
- // the above blocks are rendered by ir_gen after the rest of codegen
+ IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
+
+ ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value);
+ IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
+ IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
+ usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, ptr_mask, nullptr,
+ AtomicRmwOp_or, AtomicOrderSeqCst);
+
+ IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
+ IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, suspended_block);
+ ir_build_unreachable(irb, scope, node);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
+ IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
+ // if we ever add null checking safety to the ptrtoint instruction, it needs to be disabled here
+ IrInstruction *have_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
+ ir_build_cond_br(irb, scope, node, have_await_handle, store_awaiter_block, check_canceled_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, store_awaiter_block);
+ IrInstruction *await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, await_handle_addr);
+ ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, await_handle);
+ ir_build_br(irb, scope, node, irb->exec->coro_normal_final, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, check_canceled_block);
+ IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
+ return ir_build_cond_br(irb, scope, node, is_canceled_bool, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, is_comptime);
}
static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) {
@@ -2923,7 +3237,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
case ReturnKindError:
{
assert(expr_node);
- IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
+ IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr);
if (err_union_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
IrInstruction *err_union_val = ir_build_load_ptr(irb, scope, node, err_union_ptr);
@@ -2951,7 +3265,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
ir_set_cursor_at_end_and_append_block(irb, continue_block);
IrInstruction *unwrapped_ptr = ir_build_unwrap_err_payload(irb, scope, node, err_union_ptr, false);
- if (lval.is_ptr)
+ if (lval == LValPtr)
return unwrapped_ptr;
else
return ir_build_load_ptr(irb, scope, node, unwrapped_ptr);
@@ -2981,9 +3295,8 @@ static VariableTableEntry *create_local_var(CodeGen *codegen, AstNode *node, Sco
add_error_note(codegen, msg, existing_var->decl_node, buf_sprintf("previous declaration is here"));
variable_entry->value->type = codegen->builtin_types.entry_invalid;
} else {
- auto primitive_table_entry = codegen->primitive_type_table.maybe_get(name);
- if (primitive_table_entry) {
- TypeTableEntry *type = primitive_table_entry->value;
+ TypeTableEntry *type = get_primitive_type(codegen, name);
+ if (type != nullptr) {
add_node_error(codegen, node,
buf_sprintf("variable shadows type '%s'", buf_ptr(&type->name)));
variable_entry->value->type = codegen->builtin_types.entry_invalid;
@@ -3019,7 +3332,15 @@ static VariableTableEntry *create_local_var(CodeGen *codegen, AstNode *node, Sco
static VariableTableEntry *ir_create_var(IrBuilder *irb, AstNode *node, Scope *scope, Buf *name,
bool src_is_const, bool gen_is_const, bool is_shadowable, IrInstruction *is_comptime)
{
- VariableTableEntry *var = create_local_var(irb->codegen, node, scope, name, src_is_const, gen_is_const, is_shadowable, is_comptime);
+ bool is_underscored = name ? buf_eql_str(name, "_") : false;
+ VariableTableEntry *var = create_local_var( irb->codegen
+ , node
+ , scope
+ , (is_underscored ? nullptr : name)
+ , src_is_const
+ , gen_is_const
+ , (is_underscored ? true : is_shadowable)
+ , is_comptime );
if (is_comptime != nullptr || gen_is_const) {
var->mem_slot_index = exec_next_mem_slot(irb->exec);
var->owner_exec = irb->exec;
@@ -3086,6 +3407,9 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode
if (block_node->data.block.name == nullptr || incoming_blocks.length == 0) {
return noreturn_return_value;
}
+
+ ir_set_cursor_at_end_and_append_block(irb, scope_block->end_block);
+ return ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length, incoming_blocks.items, incoming_values.items);
} else {
incoming_blocks.append(irb->current_basic_block);
incoming_values.append(ir_mark_gen(ir_build_const_void(irb, parent_scope, block_node)));
@@ -3113,7 +3437,7 @@ static IrInstruction *ir_gen_bin_op_id(IrBuilder *irb, Scope *scope, AstNode *no
}
static IrInstruction *ir_gen_assign(IrBuilder *irb, Scope *scope, AstNode *node) {
- IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LVAL_PTR);
+ IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValPtr);
IrInstruction *rvalue = ir_gen_node(irb, node->data.bin_op_expr.op2, scope);
if (lvalue == irb->codegen->invalid_instruction || rvalue == irb->codegen->invalid_instruction)
@@ -3124,7 +3448,7 @@ static IrInstruction *ir_gen_assign(IrBuilder *irb, Scope *scope, AstNode *node)
}
static IrInstruction *ir_gen_assign_op(IrBuilder *irb, Scope *scope, AstNode *node, IrBinOp op_id) {
- IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LVAL_PTR);
+ IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValPtr);
if (lvalue == irb->codegen->invalid_instruction)
return lvalue;
IrInstruction *op1 = ir_build_load_ptr(irb, scope, node->data.bin_op_expr.op1, lvalue);
@@ -3226,7 +3550,7 @@ static IrInstruction *ir_gen_maybe_ok_or(IrBuilder *irb, Scope *parent_scope, As
AstNode *op1_node = node->data.bin_op_expr.op1;
AstNode *op2_node = node->data.bin_op_expr.op2;
- IrInstruction *maybe_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LVAL_PTR);
+ IrInstruction *maybe_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LValPtr);
if (maybe_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
@@ -3240,9 +3564,9 @@ static IrInstruction *ir_gen_maybe_ok_or(IrBuilder *irb, Scope *parent_scope, As
is_comptime = ir_build_test_comptime(irb, parent_scope, node, is_non_null);
}
- IrBasicBlock *ok_block = ir_create_basic_block(irb, parent_scope, "MaybeNonNull");
- IrBasicBlock *null_block = ir_create_basic_block(irb, parent_scope, "MaybeNull");
- IrBasicBlock *end_block = ir_create_basic_block(irb, parent_scope, "MaybeEnd");
+ IrBasicBlock *ok_block = ir_create_basic_block(irb, parent_scope, "OptionalNonNull");
+ IrBasicBlock *null_block = ir_create_basic_block(irb, parent_scope, "OptionalNull");
+ IrBasicBlock *end_block = ir_create_basic_block(irb, parent_scope, "OptionalEnd");
ir_build_cond_br(irb, parent_scope, node, is_non_null, ok_block, null_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, null_block);
@@ -3371,7 +3695,7 @@ static IrInstruction *ir_gen_bin_op(IrBuilder *irb, Scope *scope, AstNode *node)
return ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayMult);
case BinOpTypeMergeErrorSets:
return ir_gen_bin_op_id(irb, scope, node, IrBinOpMergeErrorSets);
- case BinOpTypeUnwrapMaybe:
+ case BinOpTypeUnwrapOptional:
return ir_gen_maybe_ok_or(irb, scope, node);
case BinOpTypeErrorUnion:
return ir_gen_error_union(irb, scope, node);
@@ -3413,7 +3737,7 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node,
Buf *variable_name = node->data.symbol_expr.symbol;
- if (buf_eql_str(variable_name, "_") && lval.is_ptr) {
+ if (buf_eql_str(variable_name, "_") && lval == LValPtr) {
IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, node);
const_instruction->base.value.type = get_pointer_to_type(irb->codegen,
irb->codegen->builtin_types.entry_void, false);
@@ -3422,11 +3746,11 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node,
return &const_instruction->base;
}
- auto primitive_table_entry = irb->codegen->primitive_type_table.maybe_get(variable_name);
- if (primitive_table_entry) {
- IrInstruction *value = ir_build_const_type(irb, scope, node, primitive_table_entry->value);
- if (lval.is_ptr) {
- return ir_build_ref(irb, scope, node, value, lval.is_const, lval.is_volatile);
+ TypeTableEntry *primitive_type = get_primitive_type(irb->codegen, variable_name);
+ if (primitive_type != nullptr) {
+ IrInstruction *value = ir_build_const_type(irb, scope, node, primitive_type);
+ if (lval == LValPtr) {
+ return ir_build_ref(irb, scope, node, value, false, false);
} else {
return value;
}
@@ -3434,9 +3758,8 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node,
VariableTableEntry *var = find_variable(irb->codegen, scope, variable_name);
if (var) {
- IrInstruction *var_ptr = ir_build_var_ptr(irb, scope, node, var,
- !lval.is_ptr || lval.is_const, lval.is_ptr && lval.is_volatile);
- if (lval.is_ptr)
+ IrInstruction *var_ptr = ir_build_var_ptr(irb, scope, node, var);
+ if (lval == LValPtr)
return var_ptr;
else
return ir_build_load_ptr(irb, scope, node, var_ptr);
@@ -3462,7 +3785,7 @@ static IrInstruction *ir_gen_array_access(IrBuilder *irb, Scope *scope, AstNode
assert(node->type == NodeTypeArrayAccessExpr);
AstNode *array_ref_node = node->data.array_access_expr.array_ref_expr;
- IrInstruction *array_ref_instruction = ir_gen_node_extra(irb, array_ref_node, scope, LVAL_PTR);
+ IrInstruction *array_ref_instruction = ir_gen_node_extra(irb, array_ref_node, scope, LValPtr);
if (array_ref_instruction == irb->codegen->invalid_instruction)
return array_ref_instruction;
@@ -3472,28 +3795,24 @@ static IrInstruction *ir_gen_array_access(IrBuilder *irb, Scope *scope, AstNode
return subscript_instruction;
IrInstruction *ptr_instruction = ir_build_elem_ptr(irb, scope, node, array_ref_instruction,
- subscript_instruction, true);
- if (lval.is_ptr)
+ subscript_instruction, true, PtrLenSingle);
+ if (lval == LValPtr)
return ptr_instruction;
return ir_build_load_ptr(irb, scope, node, ptr_instruction);
}
-static IrInstruction *ir_gen_field_access(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) {
+static IrInstruction *ir_gen_field_access(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeFieldAccessExpr);
AstNode *container_ref_node = node->data.field_access_expr.struct_expr;
Buf *field_name = node->data.field_access_expr.field_name;
- IrInstruction *container_ref_instruction = ir_gen_node_extra(irb, container_ref_node, scope, LVAL_PTR);
+ IrInstruction *container_ref_instruction = ir_gen_node_extra(irb, container_ref_node, scope, LValPtr);
if (container_ref_instruction == irb->codegen->invalid_instruction)
return container_ref_instruction;
- IrInstruction *ptr_instruction = ir_build_field_ptr(irb, scope, node, container_ref_instruction, field_name);
- if (lval.is_ptr)
- return ptr_instruction;
-
- return ir_build_load_ptr(irb, scope, node, ptr_instruction);
+ return ir_build_field_ptr(irb, scope, node, container_ref_instruction, field_name);
}
static IrInstruction *ir_gen_overflow_op(IrBuilder *irb, Scope *scope, AstNode *node, IrOverflowOp op) {
@@ -3524,7 +3843,7 @@ static IrInstruction *ir_gen_overflow_op(IrBuilder *irb, Scope *scope, AstNode *
return ir_build_overflow_op(irb, scope, node, op, type_value, op1, op2, result_ptr, nullptr);
}
-static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNode *node) {
+static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) {
assert(node->type == NodeTypeFnCallExpr);
AstNode *fn_ref_expr = node->data.fn_call_expr.fn_ref_expr;
@@ -3547,6 +3866,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return irb->codegen->invalid_instruction;
}
+ bool is_async = exec_is_async(irb->exec);
+
switch (builtin_fn->id) {
case BuiltinFnIdInvalid:
zig_unreachable();
@@ -3556,7 +3877,9 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
IrInstruction *arg = ir_gen_node(irb, arg_node, scope);
if (arg == irb->codegen->invalid_instruction)
return arg;
- return ir_build_typeof(irb, scope, node, arg);
+
+ IrInstruction *type_of = ir_build_typeof(irb, scope, node, arg);
+ return ir_lval_wrap(irb, scope, type_of, lval);
}
case BuiltinFnIdSetCold:
{
@@ -3565,7 +3888,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_set_cold(irb, scope, node, arg0_value);
+ IrInstruction *set_cold = ir_build_set_cold(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, set_cold, lval);
}
case BuiltinFnIdSetRuntimeSafety:
{
@@ -3574,7 +3898,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_set_runtime_safety(irb, scope, node, arg0_value);
+ IrInstruction *set_safety = ir_build_set_runtime_safety(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, set_safety, lval);
}
case BuiltinFnIdSetFloatMode:
{
@@ -3588,7 +3913,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_set_float_mode(irb, scope, node, arg0_value, arg1_value);
+ IrInstruction *set_float_mode = ir_build_set_float_mode(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, set_float_mode, lval);
}
case BuiltinFnIdSizeof:
{
@@ -3597,7 +3923,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_size_of(irb, scope, node, arg0_value);
+ IrInstruction *size_of = ir_build_size_of(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, size_of, lval);
}
case BuiltinFnIdCtz:
{
@@ -3606,7 +3933,18 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_ctz(irb, scope, node, arg0_value);
+ IrInstruction *ctz = ir_build_ctz(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, ctz, lval);
+ }
+ case BuiltinFnIdPopCount:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *instr = ir_build_pop_count(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, instr, lval);
}
case BuiltinFnIdClz:
{
@@ -3615,7 +3953,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_clz(irb, scope, node, arg0_value);
+ IrInstruction *clz = ir_build_clz(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, clz, lval);
}
case BuiltinFnIdImport:
{
@@ -3624,11 +3963,13 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_import(irb, scope, node, arg0_value);
+ IrInstruction *import = ir_build_import(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, import, lval);
}
case BuiltinFnIdCImport:
{
- return ir_build_c_import(irb, scope, node);
+ IrInstruction *c_import = ir_build_c_import(irb, scope, node);
+ return ir_lval_wrap(irb, scope, c_import, lval);
}
case BuiltinFnIdCInclude:
{
@@ -3642,7 +3983,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return irb->codegen->invalid_instruction;
}
- return ir_build_c_include(irb, scope, node, arg0_value);
+ IrInstruction *c_include = ir_build_c_include(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, c_include, lval);
}
case BuiltinFnIdCDefine:
{
@@ -3661,7 +4003,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return irb->codegen->invalid_instruction;
}
- return ir_build_c_define(irb, scope, node, arg0_value, arg1_value);
+ IrInstruction *c_define = ir_build_c_define(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, c_define, lval);
}
case BuiltinFnIdCUndef:
{
@@ -3675,7 +4018,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return irb->codegen->invalid_instruction;
}
- return ir_build_c_undef(irb, scope, node, arg0_value);
+ IrInstruction *c_undef = ir_build_c_undef(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, c_undef, lval);
}
case BuiltinFnIdMaxValue:
{
@@ -3684,7 +4028,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_max_value(irb, scope, node, arg0_value);
+ IrInstruction *max_value = ir_build_max_value(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, max_value, lval);
}
case BuiltinFnIdMinValue:
{
@@ -3693,7 +4038,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_min_value(irb, scope, node, arg0_value);
+ IrInstruction *min_value = ir_build_min_value(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, min_value, lval);
}
case BuiltinFnIdCompileErr:
{
@@ -3702,7 +4048,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_compile_err(irb, scope, node, arg0_value);
+ IrInstruction *compile_err = ir_build_compile_err(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, compile_err, lval);
}
case BuiltinFnIdCompileLog:
{
@@ -3715,7 +4062,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return irb->codegen->invalid_instruction;
}
- return ir_build_compile_log(irb, scope, node, actual_param_count, args);
+ IrInstruction *compile_log = ir_build_compile_log(irb, scope, node, actual_param_count, args);
+ return ir_lval_wrap(irb, scope, compile_log, lval);
}
case BuiltinFnIdErrName:
{
@@ -3724,7 +4072,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_err_name(irb, scope, node, arg0_value);
+ IrInstruction *err_name = ir_build_err_name(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, err_name, lval);
}
case BuiltinFnIdEmbedFile:
{
@@ -3733,9 +4082,11 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_embed_file(irb, scope, node, arg0_value);
+ IrInstruction *embed_file = ir_build_embed_file(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, embed_file, lval);
}
- case BuiltinFnIdCmpExchange:
+ case BuiltinFnIdCmpxchgWeak:
+ case BuiltinFnIdCmpxchgStrong:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
@@ -3762,9 +4113,15 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg4_value == irb->codegen->invalid_instruction)
return arg4_value;
- return ir_build_cmpxchg(irb, scope, node, arg0_value, arg1_value,
- arg2_value, arg3_value, arg4_value,
- AtomicOrderUnordered, AtomicOrderUnordered);
+ AstNode *arg5_node = node->data.fn_call_expr.params.at(5);
+ IrInstruction *arg5_value = ir_gen_node(irb, arg5_node, scope);
+ if (arg5_value == irb->codegen->invalid_instruction)
+ return arg5_value;
+
+ IrInstruction *cmpxchg = ir_build_cmpxchg(irb, scope, node, arg0_value, arg1_value,
+ arg2_value, arg3_value, arg4_value, arg5_value, (builtin_fn->id == BuiltinFnIdCmpxchgWeak),
+ nullptr, AtomicOrderUnordered, AtomicOrderUnordered);
+ return ir_lval_wrap(irb, scope, cmpxchg, lval);
}
case BuiltinFnIdFence:
{
@@ -3773,7 +4130,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_fence(irb, scope, node, arg0_value, AtomicOrderUnordered);
+ IrInstruction *fence = ir_build_fence(irb, scope, node, arg0_value, AtomicOrderUnordered);
+ return ir_lval_wrap(irb, scope, fence, lval);
}
case BuiltinFnIdDivExact:
{
@@ -3787,7 +4145,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_bin_op(irb, scope, node, IrBinOpDivExact, arg0_value, arg1_value, true);
+ IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivExact, arg0_value, arg1_value, true);
+ return ir_lval_wrap(irb, scope, bin_op, lval);
}
case BuiltinFnIdDivTrunc:
{
@@ -3801,7 +4160,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_bin_op(irb, scope, node, IrBinOpDivTrunc, arg0_value, arg1_value, true);
+ IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivTrunc, arg0_value, arg1_value, true);
+ return ir_lval_wrap(irb, scope, bin_op, lval);
}
case BuiltinFnIdDivFloor:
{
@@ -3815,7 +4175,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_bin_op(irb, scope, node, IrBinOpDivFloor, arg0_value, arg1_value, true);
+ IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivFloor, arg0_value, arg1_value, true);
+ return ir_lval_wrap(irb, scope, bin_op, lval);
}
case BuiltinFnIdRem:
{
@@ -3829,7 +4190,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_bin_op(irb, scope, node, IrBinOpRemRem, arg0_value, arg1_value, true);
+ IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpRemRem, arg0_value, arg1_value, true);
+ return ir_lval_wrap(irb, scope, bin_op, lval);
}
case BuiltinFnIdMod:
{
@@ -3843,7 +4205,23 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_bin_op(irb, scope, node, IrBinOpRemMod, arg0_value, arg1_value, true);
+ IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpRemMod, arg0_value, arg1_value, true);
+ return ir_lval_wrap(irb, scope, bin_op, lval);
+ }
+ case BuiltinFnIdSqrt:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
+ if (arg1_value == irb->codegen->invalid_instruction)
+ return arg1_value;
+
+ IrInstruction *ir_sqrt = ir_build_sqrt(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, ir_sqrt, lval);
}
case BuiltinFnIdTruncate:
{
@@ -3857,7 +4235,138 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_truncate(irb, scope, node, arg0_value, arg1_value);
+ IrInstruction *truncate = ir_build_truncate(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, truncate, lval);
+ }
+ case BuiltinFnIdIntCast:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
+ if (arg1_value == irb->codegen->invalid_instruction)
+ return arg1_value;
+
+ IrInstruction *result = ir_build_int_cast(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdFloatCast:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
+ if (arg1_value == irb->codegen->invalid_instruction)
+ return arg1_value;
+
+ IrInstruction *result = ir_build_float_cast(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdErrSetCast:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
+ if (arg1_value == irb->codegen->invalid_instruction)
+ return arg1_value;
+
+ IrInstruction *result = ir_build_err_set_cast(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdFromBytes:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
+ if (arg1_value == irb->codegen->invalid_instruction)
+ return arg1_value;
+
+ IrInstruction *result = ir_build_from_bytes(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdToBytes:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *result = ir_build_to_bytes(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdIntToFloat:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
+ if (arg1_value == irb->codegen->invalid_instruction)
+ return arg1_value;
+
+ IrInstruction *result = ir_build_int_to_float(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdFloatToInt:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
+ if (arg1_value == irb->codegen->invalid_instruction)
+ return arg1_value;
+
+ IrInstruction *result = ir_build_float_to_int(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdErrToInt:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *result = ir_build_err_to_int(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdIntToErr:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *result = ir_build_int_to_err(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdBoolToInt:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *result = ir_build_bool_to_int(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, result, lval);
}
case BuiltinFnIdIntType:
{
@@ -3871,7 +4380,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_int_type(irb, scope, node, arg0_value, arg1_value);
+ IrInstruction *int_type = ir_build_int_type(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, int_type, lval);
}
case BuiltinFnIdMemcpy:
{
@@ -3890,7 +4400,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg2_value == irb->codegen->invalid_instruction)
return arg2_value;
- return ir_build_memcpy(irb, scope, node, arg0_value, arg1_value, arg2_value);
+ IrInstruction *ir_memcpy = ir_build_memcpy(irb, scope, node, arg0_value, arg1_value, arg2_value);
+ return ir_lval_wrap(irb, scope, ir_memcpy, lval);
}
case BuiltinFnIdMemset:
{
@@ -3909,7 +4420,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg2_value == irb->codegen->invalid_instruction)
return arg2_value;
- return ir_build_memset(irb, scope, node, arg0_value, arg1_value, arg2_value);
+ IrInstruction *ir_memset = ir_build_memset(irb, scope, node, arg0_value, arg1_value, arg2_value);
+ return ir_lval_wrap(irb, scope, ir_memset, lval);
}
case BuiltinFnIdMemberCount:
{
@@ -3918,7 +4430,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_member_count(irb, scope, node, arg0_value);
+ IrInstruction *member_count = ir_build_member_count(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, member_count, lval);
}
case BuiltinFnIdMemberType:
{
@@ -3933,7 +4446,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return arg1_value;
- return ir_build_member_type(irb, scope, node, arg0_value, arg1_value);
+ IrInstruction *member_type = ir_build_member_type(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, member_type, lval);
}
case BuiltinFnIdMemberName:
{
@@ -3948,44 +4462,13 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return arg1_value;
- return ir_build_member_name(irb, scope, node, arg0_value, arg1_value);
+ IrInstruction *member_name = ir_build_member_name(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, member_name, lval);
}
- case BuiltinFnIdBreakpoint:
- return ir_build_breakpoint(irb, scope, node);
- case BuiltinFnIdReturnAddress:
- return ir_build_return_address(irb, scope, node);
- case BuiltinFnIdFrameAddress:
- return ir_build_frame_address(irb, scope, node);
- case BuiltinFnIdAlignOf:
+ case BuiltinFnIdField:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
- IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
- if (arg0_value == irb->codegen->invalid_instruction)
- return arg0_value;
-
- return ir_build_align_of(irb, scope, node, arg0_value);
- }
- case BuiltinFnIdAddWithOverflow:
- return ir_gen_overflow_op(irb, scope, node, IrOverflowOpAdd);
- case BuiltinFnIdSubWithOverflow:
- return ir_gen_overflow_op(irb, scope, node, IrOverflowOpSub);
- case BuiltinFnIdMulWithOverflow:
- return ir_gen_overflow_op(irb, scope, node, IrOverflowOpMul);
- case BuiltinFnIdShlWithOverflow:
- return ir_gen_overflow_op(irb, scope, node, IrOverflowOpShl);
- case BuiltinFnIdTypeName:
- {
- AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
- IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
- if (arg0_value == irb->codegen->invalid_instruction)
- return arg0_value;
-
- return ir_build_type_name(irb, scope, node, arg0_value);
- }
- case BuiltinFnIdCanImplicitCast:
- {
- AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
- IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ IrInstruction *arg0_value = ir_gen_node_extra(irb, arg0_node, scope, LValPtr);
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
@@ -3994,7 +4477,66 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_can_implicit_cast(irb, scope, node, arg0_value, arg1_value);
+ IrInstruction *ptr_instruction = ir_build_field_ptr_instruction(irb, scope, node, arg0_value, arg1_value);
+
+ if (lval == LValPtr)
+ return ptr_instruction;
+
+ return ir_build_load_ptr(irb, scope, node, ptr_instruction);
+ }
+ case BuiltinFnIdTypeInfo:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *type_info = ir_build_type_info(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, type_info, lval);
+ }
+ case BuiltinFnIdBreakpoint:
+ return ir_lval_wrap(irb, scope, ir_build_breakpoint(irb, scope, node), lval);
+ case BuiltinFnIdReturnAddress:
+ return ir_lval_wrap(irb, scope, ir_build_return_address(irb, scope, node), lval);
+ case BuiltinFnIdFrameAddress:
+ return ir_lval_wrap(irb, scope, ir_build_frame_address(irb, scope, node), lval);
+ case BuiltinFnIdHandle:
+ if (!irb->exec->fn_entry) {
+ add_node_error(irb->codegen, node, buf_sprintf("@handle() called outside of function definition"));
+ return irb->codegen->invalid_instruction;
+ }
+ if (!is_async) {
+ add_node_error(irb->codegen, node, buf_sprintf("@handle() in non-async function"));
+ return irb->codegen->invalid_instruction;
+ }
+ return ir_lval_wrap(irb, scope, ir_build_handle(irb, scope, node), lval);
+ case BuiltinFnIdAlignOf:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *align_of = ir_build_align_of(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, align_of, lval);
+ }
+ case BuiltinFnIdAddWithOverflow:
+ return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpAdd), lval);
+ case BuiltinFnIdSubWithOverflow:
+ return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpSub), lval);
+ case BuiltinFnIdMulWithOverflow:
+ return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpMul), lval);
+ case BuiltinFnIdShlWithOverflow:
+ return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpShl), lval);
+ case BuiltinFnIdTypeName:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *type_name = ir_build_type_name(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, type_name, lval);
}
case BuiltinFnIdPanic:
{
@@ -4003,7 +4545,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_panic(irb, scope, node, arg0_value);
+ IrInstruction *panic = ir_build_panic(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, panic, lval);
}
case BuiltinFnIdPtrCast:
{
@@ -4017,7 +4560,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_ptr_cast(irb, scope, node, arg0_value, arg1_value);
+ IrInstruction *ptr_cast = ir_build_ptr_cast(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, ptr_cast, lval);
}
case BuiltinFnIdBitCast:
{
@@ -4031,7 +4575,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_bit_cast(irb, scope, node, arg0_value, arg1_value);
+ IrInstruction *bit_cast = ir_build_bit_cast(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, bit_cast, lval);
}
case BuiltinFnIdIntToPtr:
{
@@ -4045,7 +4590,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_int_to_ptr(irb, scope, node, arg0_value, arg1_value);
+ IrInstruction *int_to_ptr = ir_build_int_to_ptr(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, int_to_ptr, lval);
}
case BuiltinFnIdPtrToInt:
{
@@ -4054,7 +4600,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_ptr_to_int(irb, scope, node, arg0_value);
+ IrInstruction *ptr_to_int = ir_build_ptr_to_int(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, ptr_to_int, lval);
}
case BuiltinFnIdTagName:
{
@@ -4064,7 +4611,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
return arg0_value;
IrInstruction *actual_tag = ir_build_union_tag(irb, scope, node, arg0_value);
- return ir_build_tag_name(irb, scope, node, actual_tag);
+ IrInstruction *tag_name = ir_build_tag_name(irb, scope, node, actual_tag);
+ return ir_lval_wrap(irb, scope, tag_name, lval);
}
case BuiltinFnIdTagType:
{
@@ -4073,7 +4621,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_tag_type(irb, scope, node, arg0_value);
+ IrInstruction *tag_type = ir_build_tag_type(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, tag_type, lval);
}
case BuiltinFnIdFieldParentPtr:
{
@@ -4092,7 +4641,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg2_value == irb->codegen->invalid_instruction)
return arg2_value;
- return ir_build_field_parent_ptr(irb, scope, node, arg0_value, arg1_value, arg2_value, nullptr);
+ IrInstruction *field_parent_ptr = ir_build_field_parent_ptr(irb, scope, node, arg0_value, arg1_value, arg2_value, nullptr);
+ return ir_lval_wrap(irb, scope, field_parent_ptr, lval);
}
case BuiltinFnIdOffsetOf:
{
@@ -4106,7 +4656,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_offset_of(irb, scope, node, arg0_value, arg1_value);
+ IrInstruction *offset_of = ir_build_offset_of(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, offset_of, lval);
}
case BuiltinFnIdInlineCall:
case BuiltinFnIdNoInlineCall:
@@ -4132,7 +4683,38 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
}
FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever;
- return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline, false, nullptr);
+ IrInstruction *call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline, false, nullptr, nullptr);
+ return ir_lval_wrap(irb, scope, call, lval);
+ }
+ case BuiltinFnIdNewStackCall:
+ {
+ if (node->data.fn_call_expr.params.length == 0) {
+ add_node_error(irb->codegen, node, buf_sprintf("expected at least 1 argument, found 0"));
+ return irb->codegen->invalid_instruction;
+ }
+
+ AstNode *new_stack_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *new_stack = ir_gen_node(irb, new_stack_node, scope);
+ if (new_stack == irb->codegen->invalid_instruction)
+ return new_stack;
+
+ AstNode *fn_ref_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
+ if (fn_ref == irb->codegen->invalid_instruction)
+ return fn_ref;
+
+ size_t arg_count = node->data.fn_call_expr.params.length - 2;
+
+ IrInstruction **args = allocate(arg_count);
+ for (size_t i = 0; i < arg_count; i += 1) {
+ AstNode *arg_node = node->data.fn_call_expr.params.at(i + 2);
+ args[i] = ir_gen_node(irb, arg_node, scope);
+ if (args[i] == irb->codegen->invalid_instruction)
+ return args[i];
+ }
+
+ IrInstruction *call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, false, nullptr, new_stack);
+ return ir_lval_wrap(irb, scope, call, lval);
}
case BuiltinFnIdTypeId:
{
@@ -4141,7 +4723,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_type_id(irb, scope, node, arg0_value);
+ IrInstruction *type_id = ir_build_type_id(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, type_id, lval);
}
case BuiltinFnIdShlExact:
{
@@ -4155,7 +4738,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_bin_op(irb, scope, node, IrBinOpBitShiftLeftExact, arg0_value, arg1_value, true);
+ IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpBitShiftLeftExact, arg0_value, arg1_value, true);
+ return ir_lval_wrap(irb, scope, bin_op, lval);
}
case BuiltinFnIdShrExact:
{
@@ -4169,7 +4753,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_bin_op(irb, scope, node, IrBinOpBitShiftRightExact, arg0_value, arg1_value, true);
+ IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpBitShiftRightExact, arg0_value, arg1_value, true);
+ return ir_lval_wrap(irb, scope, bin_op, lval);
}
case BuiltinFnIdSetEvalBranchQuota:
{
@@ -4178,7 +4763,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_set_eval_branch_quota(irb, scope, node, arg0_value);
+ IrInstruction *set_eval_branch_quota = ir_build_set_eval_branch_quota(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, set_eval_branch_quota, lval);
}
case BuiltinFnIdAlignCast:
{
@@ -4192,10 +4778,14 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_align_cast(irb, scope, node, arg0_value, arg1_value);
+ IrInstruction *align_cast = ir_build_align_cast(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, align_cast, lval);
}
case BuiltinFnIdOpaqueType:
- return ir_build_opaque_type(irb, scope, node);
+ {
+ IrInstruction *opaque_type = ir_build_opaque_type(irb, scope, node);
+ return ir_lval_wrap(irb, scope, opaque_type, lval);
+ }
case BuiltinFnIdSetAlignStack:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
@@ -4203,7 +4793,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
- return ir_build_set_align_stack(irb, scope, node, arg0_value);
+ IrInstruction *set_align_stack = ir_build_set_align_stack(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, set_align_stack, lval);
}
case BuiltinFnIdArgType:
{
@@ -4217,7 +4808,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg1_value == irb->codegen->invalid_instruction)
return arg1_value;
- return ir_build_arg_type(irb, scope, node, arg0_value, arg1_value);
+ IrInstruction *arg_type = ir_build_arg_type(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, arg_type, lval);
}
case BuiltinFnIdExport:
{
@@ -4236,11 +4828,13 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
if (arg2_value == irb->codegen->invalid_instruction)
return arg2_value;
- return ir_build_export(irb, scope, node, arg0_value, arg1_value, arg2_value);
+ IrInstruction *ir_export = ir_build_export(irb, scope, node, arg0_value, arg1_value, arg2_value);
+ return ir_lval_wrap(irb, scope, ir_export, lval);
}
case BuiltinFnIdErrorReturnTrace:
{
- return ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::Null);
+ IrInstruction *error_return_trace = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::Null);
+ return ir_lval_wrap(irb, scope, error_return_trace, lval);
}
case BuiltinFnIdAtomicRmw:
{
@@ -4274,15 +4868,61 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
// these 2 values don't mean anything since we passed non-null values for other args
AtomicRmwOp_xchg, AtomicOrderMonotonic);
}
+ case BuiltinFnIdAtomicLoad:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
+ if (arg1_value == irb->codegen->invalid_instruction)
+ return arg1_value;
+
+ AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
+ IrInstruction *arg2_value = ir_gen_node(irb, arg2_node, scope);
+ if (arg2_value == irb->codegen->invalid_instruction)
+ return arg2_value;
+
+ return ir_build_atomic_load(irb, scope, node, arg0_value, arg1_value, arg2_value,
+ // this value does not mean anything since we passed non-null values for other arg
+ AtomicOrderMonotonic);
+ }
+ case BuiltinFnIdIntToEnum:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
+ if (arg1_value == irb->codegen->invalid_instruction)
+ return arg1_value;
+
+ IrInstruction *result = ir_build_int_to_enum(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdEnumToInt:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *result = ir_build_enum_to_int(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
}
zig_unreachable();
}
-static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node) {
+static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) {
assert(node->type == NodeTypeFnCallExpr);
if (node->data.fn_call_expr.is_builtin)
- return ir_gen_builtin_fn_call(irb, scope, node);
+ return ir_gen_builtin_fn_call(irb, scope, node, lval);
AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
@@ -4308,7 +4948,8 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node
}
}
- return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, is_async, async_allocator);
+ IrInstruction *fn_call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, is_async, async_allocator, nullptr);
+ return ir_lval_wrap(irb, scope, fn_call, lval);
}
static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode *node) {
@@ -4379,30 +5020,28 @@ static IrInstruction *ir_gen_prefix_op_id_lval(IrBuilder *irb, Scope *scope, Ast
}
static IrInstruction *ir_gen_prefix_op_id(IrBuilder *irb, Scope *scope, AstNode *node, IrUnOp op_id) {
- return ir_gen_prefix_op_id_lval(irb, scope, node, op_id, LVAL_NONE);
+ return ir_gen_prefix_op_id_lval(irb, scope, node, op_id, LValNone);
}
static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval) {
- if (!lval.is_ptr)
+ if (lval != LValPtr)
return value;
if (value == irb->codegen->invalid_instruction)
return value;
// We needed a pointer to a value, but we got a value. So we create
// an instruction which just makes a const pointer of it.
- return ir_build_ref(irb, scope, value->source_node, value, lval.is_const, lval.is_volatile);
+ return ir_build_ref(irb, scope, value->source_node, value, false, false);
}
-static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *node) {
- assert(node->type == NodeTypeAddrOfExpr);
- bool is_const = node->data.addr_of_expr.is_const;
- bool is_volatile = node->data.addr_of_expr.is_volatile;
- AstNode *expr_node = node->data.addr_of_expr.op_expr;
- AstNode *align_expr = node->data.addr_of_expr.align_expr;
-
- if (align_expr == nullptr && !is_const && !is_volatile) {
- return ir_gen_node_extra(irb, expr_node, scope, make_lval_addr(is_const, is_volatile));
- }
+static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode *node) {
+ assert(node->type == NodeTypePointerType);
+ PtrLen ptr_len = (node->data.pointer_type.star_token->id == TokenIdStar ||
+ node->data.pointer_type.star_token->id == TokenIdStarStar) ? PtrLenSingle : PtrLenUnknown;
+ bool is_const = node->data.pointer_type.is_const;
+ bool is_volatile = node->data.pointer_type.is_volatile;
+ AstNode *expr_node = node->data.pointer_type.op_expr;
+ AstNode *align_expr = node->data.pointer_type.align_expr;
IrInstruction *align_value;
if (align_expr != nullptr) {
@@ -4418,27 +5057,27 @@ static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *n
return child_type;
uint32_t bit_offset_start = 0;
- if (node->data.addr_of_expr.bit_offset_start != nullptr) {
- if (!bigint_fits_in_bits(node->data.addr_of_expr.bit_offset_start, 32, false)) {
+ if (node->data.pointer_type.bit_offset_start != nullptr) {
+ if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_start, 32, false)) {
Buf *val_buf = buf_alloc();
- bigint_append_buf(val_buf, node->data.addr_of_expr.bit_offset_start, 10);
+ bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_start, 10);
exec_add_error_node(irb->codegen, irb->exec, node,
buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
return irb->codegen->invalid_instruction;
}
- bit_offset_start = bigint_as_unsigned(node->data.addr_of_expr.bit_offset_start);
+ bit_offset_start = bigint_as_unsigned(node->data.pointer_type.bit_offset_start);
}
uint32_t bit_offset_end = 0;
- if (node->data.addr_of_expr.bit_offset_end != nullptr) {
- if (!bigint_fits_in_bits(node->data.addr_of_expr.bit_offset_end, 32, false)) {
+ if (node->data.pointer_type.bit_offset_end != nullptr) {
+ if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_end, 32, false)) {
Buf *val_buf = buf_alloc();
- bigint_append_buf(val_buf, node->data.addr_of_expr.bit_offset_end, 10);
+ bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_end, 10);
exec_add_error_node(irb->codegen, irb->exec, node,
buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
return irb->codegen->invalid_instruction;
}
- bit_offset_end = bigint_as_unsigned(node->data.addr_of_expr.bit_offset_end);
+ bit_offset_end = bigint_as_unsigned(node->data.pointer_type.bit_offset_end);
}
if ((bit_offset_start != 0 || bit_offset_end != 0) && bit_offset_start >= bit_offset_end) {
@@ -4447,14 +5086,14 @@ static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *n
return irb->codegen->invalid_instruction;
}
- return ir_build_ptr_type_of(irb, scope, node, child_type, is_const, is_volatile,
- align_value, bit_offset_start, bit_offset_end);
+ return ir_build_ptr_type(irb, scope, node, child_type, is_const, is_volatile,
+ ptr_len, align_value, bit_offset_start, bit_offset_end);
}
static IrInstruction *ir_gen_err_assert_ok(IrBuilder *irb, Scope *scope, AstNode *source_node, AstNode *expr_node,
LVal lval)
{
- IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
+ IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr);
if (err_union_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
@@ -4462,27 +5101,12 @@ static IrInstruction *ir_gen_err_assert_ok(IrBuilder *irb, Scope *scope, AstNode
if (payload_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- if (lval.is_ptr)
+ if (lval == LValPtr)
return payload_ptr;
return ir_build_load_ptr(irb, scope, source_node, payload_ptr);
}
-static IrInstruction *ir_gen_maybe_assert_ok(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) {
- assert(node->type == NodeTypePrefixOpExpr);
- AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
-
- IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
- if (maybe_ptr == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
-
- IrInstruction *unwrapped_ptr = ir_build_unwrap_maybe(irb, scope, node, maybe_ptr, true);
- if (lval.is_ptr)
- return unwrapped_ptr;
-
- return ir_build_load_ptr(irb, scope, node, unwrapped_ptr);
-}
-
static IrInstruction *ir_gen_bool_not(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypePrefixOpExpr);
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
@@ -4510,12 +5134,12 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegation), lval);
case PrefixOpNegationWrap:
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegationWrap), lval);
- case PrefixOpDereference:
- return ir_gen_prefix_op_id_lval(irb, scope, node, IrUnOpDereference, lval);
- case PrefixOpMaybe:
- return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpMaybe), lval);
- case PrefixOpUnwrapMaybe:
- return ir_gen_maybe_assert_ok(irb, scope, node, lval);
+ case PrefixOpOptional:
+ return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpOptional), lval);
+ case PrefixOpAddrOf: {
+ AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
+ return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LValPtr), lval);
+ }
}
zig_unreachable();
}
@@ -4570,6 +5194,11 @@ static IrInstruction *ir_gen_var_decl(IrBuilder *irb, Scope *scope, AstNode *nod
AstNodeVariableDeclaration *variable_declaration = &node->data.variable_declaration;
+ if (buf_eql_str(variable_declaration->symbol, "_")) {
+ add_node_error(irb->codegen, node, buf_sprintf("`_` is not a declarable symbol"));
+ return irb->codegen->invalid_instruction;
+ }
+
IrInstruction *type_instruction;
if (variable_declaration->type != nullptr) {
type_instruction = ir_gen_node(irb, variable_declaration->type, scope);
@@ -4582,6 +5211,7 @@ static IrInstruction *ir_gen_var_decl(IrBuilder *irb, Scope *scope, AstNode *nod
bool is_shadowable = false;
bool is_const = variable_declaration->is_const;
bool is_extern = variable_declaration->is_extern;
+
IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node,
ir_should_inline(irb->exec, scope) || variable_declaration->is_comptime);
VariableTableEntry *var = ir_create_var(irb, node, scope, variable_declaration->symbol,
@@ -4654,7 +5284,7 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n
} else {
payload_scope = scope;
}
- IrInstruction *err_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, scope, LVAL_PTR);
+ IrInstruction *err_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, scope, LValPtr);
if (err_val_ptr == irb->codegen->invalid_instruction)
return err_val_ptr;
IrInstruction *err_val = ir_build_load_ptr(irb, scope, node->data.while_expr.condition, err_val_ptr);
@@ -4689,8 +5319,10 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n
if (body_result == irb->codegen->invalid_instruction)
return body_result;
- if (!instr_is_unreachable(body_result))
+ if (!instr_is_unreachable(body_result)) {
+ ir_mark_gen(ir_build_check_statement_is_void(irb, payload_scope, node->data.while_expr.body, body_result));
ir_mark_gen(ir_build_br(irb, payload_scope, node, continue_block, is_comptime));
+ }
if (continue_expr_node) {
ir_set_cursor_at_end_and_append_block(irb, continue_block);
@@ -4737,7 +5369,7 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n
VariableTableEntry *payload_var = ir_create_var(irb, symbol_node, scope, var_symbol,
true, false, false, is_comptime);
Scope *child_scope = payload_var->child_scope;
- IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, scope, LVAL_PTR);
+ IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, scope, LValPtr);
if (maybe_val_ptr == irb->codegen->invalid_instruction)
return maybe_val_ptr;
IrInstruction *maybe_val = ir_build_load_ptr(irb, scope, node->data.while_expr.condition, maybe_val_ptr);
@@ -4769,8 +5401,10 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n
if (body_result == irb->codegen->invalid_instruction)
return body_result;
- if (!instr_is_unreachable(body_result))
+ if (!instr_is_unreachable(body_result)) {
+ ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.while_expr.body, body_result));
ir_mark_gen(ir_build_br(irb, child_scope, node, continue_block, is_comptime));
+ }
if (continue_expr_node) {
ir_set_cursor_at_end_and_append_block(irb, continue_block);
@@ -4830,8 +5464,10 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n
if (body_result == irb->codegen->invalid_instruction)
return body_result;
- if (!instr_is_unreachable(body_result))
+ if (!instr_is_unreachable(body_result)) {
+ ir_mark_gen(ir_build_check_statement_is_void(irb, scope, node->data.while_expr.body, body_result));
ir_mark_gen(ir_build_br(irb, scope, node, continue_block, is_comptime));
+ }
if (continue_expr_node) {
ir_set_cursor_at_end_and_append_block(irb, continue_block);
@@ -4881,7 +5517,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo
}
assert(elem_node->type == NodeTypeSymbol);
- IrInstruction *array_val_ptr = ir_gen_node_extra(irb, array_node, parent_scope, LVAL_PTR);
+ IrInstruction *array_val_ptr = ir_gen_node_extra(irb, array_node, parent_scope, LValPtr);
if (array_val_ptr == irb->codegen->invalid_instruction)
return array_val_ptr;
@@ -4905,7 +5541,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo
IrInstruction *undefined_value = ir_build_const_undefined(irb, child_scope, elem_node);
ir_build_var_decl(irb, child_scope, elem_node, elem_var, elem_var_type, nullptr, undefined_value);
- IrInstruction *elem_var_ptr = ir_build_var_ptr(irb, child_scope, node, elem_var, false, false);
+ IrInstruction *elem_var_ptr = ir_build_var_ptr(irb, child_scope, node, elem_var);
AstNode *index_var_source_node;
VariableTableEntry *index_var;
@@ -4923,7 +5559,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo
IrInstruction *zero = ir_build_const_usize(irb, child_scope, node, 0);
IrInstruction *one = ir_build_const_usize(irb, child_scope, node, 1);
ir_build_var_decl(irb, child_scope, index_var_source_node, index_var, usize, nullptr, zero);
- IrInstruction *index_ptr = ir_build_var_ptr(irb, child_scope, node, index_var, false, false);
+ IrInstruction *index_ptr = ir_build_var_ptr(irb, child_scope, node, index_var);
IrBasicBlock *cond_block = ir_create_basic_block(irb, child_scope, "ForCond");
@@ -4943,7 +5579,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo
ir_mark_gen(ir_build_cond_br(irb, child_scope, node, cond, body_block, else_block, is_comptime));
ir_set_cursor_at_end_and_append_block(irb, body_block);
- IrInstruction *elem_ptr = ir_build_elem_ptr(irb, child_scope, node, array_val_ptr, index_val, false);
+ IrInstruction *elem_ptr = ir_build_elem_ptr(irb, child_scope, node, array_val_ptr, index_val, false, PtrLenSingle);
IrInstruction *elem_val;
if (node->data.for_expr.elem_is_ptr) {
elem_val = elem_ptr;
@@ -5168,16 +5804,16 @@ static IrInstruction *ir_gen_test_expr(IrBuilder *irb, Scope *scope, AstNode *no
AstNode *else_node = node->data.test_expr.else_node;
bool var_is_ptr = node->data.test_expr.var_is_ptr;
- IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
+ IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr);
if (maybe_val_ptr == irb->codegen->invalid_instruction)
return maybe_val_ptr;
IrInstruction *maybe_val = ir_build_load_ptr(irb, scope, node, maybe_val_ptr);
IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_val);
- IrBasicBlock *then_block = ir_create_basic_block(irb, scope, "MaybeThen");
- IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "MaybeElse");
- IrBasicBlock *endif_block = ir_create_basic_block(irb, scope, "MaybeEndIf");
+ IrBasicBlock *then_block = ir_create_basic_block(irb, scope, "OptionalThen");
+ IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "OptionalElse");
+ IrBasicBlock *endif_block = ir_create_basic_block(irb, scope, "OptionalEndIf");
IrInstruction *is_comptime;
if (ir_should_inline(irb->exec, scope)) {
@@ -5246,7 +5882,7 @@ static IrInstruction *ir_gen_if_err_expr(IrBuilder *irb, Scope *scope, AstNode *
Buf *var_symbol = node->data.if_err_expr.var_symbol;
Buf *err_symbol = node->data.if_err_expr.err_symbol;
- IrInstruction *err_val_ptr = ir_gen_node_extra(irb, target_node, scope, LVAL_PTR);
+ IrInstruction *err_val_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr);
if (err_val_ptr == irb->codegen->invalid_instruction)
return err_val_ptr;
@@ -5372,7 +6008,7 @@ static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode *
assert(node->type == NodeTypeSwitchExpr);
AstNode *target_node = node->data.switch_expr.expr;
- IrInstruction *target_value_ptr = ir_gen_node_extra(irb, target_node, scope, LVAL_PTR);
+ IrInstruction *target_value_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr);
if (target_value_ptr == irb->codegen->invalid_instruction)
return target_value_ptr;
IrInstruction *target_value = ir_build_switch_target(irb, scope, node, target_value_ptr);
@@ -5536,13 +6172,13 @@ static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode *
}
- ir_build_check_switch_prongs(irb, scope, node, target_value, check_ranges.items, check_ranges.length,
+ IrInstruction *switch_prongs_void = ir_build_check_switch_prongs(irb, scope, node, target_value, check_ranges.items, check_ranges.length,
else_prong != nullptr);
if (cases.length == 0) {
ir_build_br(irb, scope, node, else_block, is_comptime);
} else {
- ir_build_switch_br(irb, scope, node, target_value, else_block, cases.length, cases.items, is_comptime);
+ ir_build_switch_br(irb, scope, node, target_value, else_block, cases.length, cases.items, is_comptime, switch_prongs_void);
}
if (!else_prong) {
@@ -5630,6 +6266,9 @@ static IrInstruction *ir_gen_break(IrBuilder *irb, Scope *break_scope, AstNode *
assert(this_block_scope->end_block != nullptr);
return ir_gen_return_from_block(irb, break_scope, node, this_block_scope);
}
+ } else if (search_scope->id == ScopeIdSuspend) {
+ add_node_error(irb->codegen, node, buf_sprintf("cannot break out of suspend block"));
+ return irb->codegen->invalid_instruction;
}
search_scope = search_scope->parent;
}
@@ -5729,7 +6368,7 @@ static IrInstruction *ir_gen_slice(IrBuilder *irb, Scope *scope, AstNode *node)
AstNode *start_node = slice_expr->start;
AstNode *end_node = slice_expr->end;
- IrInstruction *ptr_value = ir_gen_node_extra(irb, array_node, scope, LVAL_PTR);
+ IrInstruction *ptr_value = ir_gen_node_extra(irb, array_node, scope, LValPtr);
if (ptr_value == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
@@ -5763,11 +6402,11 @@ static IrInstruction *ir_gen_err_ok_or(IrBuilder *irb, Scope *parent_scope, AstN
add_node_error(irb->codegen, var_node, buf_sprintf("unused variable: '%s'", buf_ptr(var_name)));
return irb->codegen->invalid_instruction;
}
- return ir_gen_err_assert_ok(irb, parent_scope, node, op1_node, LVAL_NONE);
+ return ir_gen_err_assert_ok(irb, parent_scope, node, op1_node, LValNone);
}
- IrInstruction *err_union_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LVAL_PTR);
+ IrInstruction *err_union_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LValPtr);
if (err_union_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
@@ -5970,16 +6609,10 @@ static IrInstruction *ir_gen_err_set_decl(IrBuilder *irb, Scope *parent_scope, A
buf_init_from_buf(&err_set_type->name, type_name);
err_set_type->is_copyable = true;
err_set_type->data.error_set.err_count = err_count;
-
- if (err_count == 0) {
- err_set_type->zero_bits = true;
- err_set_type->di_type = irb->codegen->builtin_types.entry_void->di_type;
- } else {
- err_set_type->type_ref = irb->codegen->builtin_types.entry_global_error_set->type_ref;
- err_set_type->di_type = irb->codegen->builtin_types.entry_global_error_set->di_type;
- irb->codegen->error_di_types.append(&err_set_type->di_type);
- err_set_type->data.error_set.errors = allocate(err_count);
- }
+ err_set_type->type_ref = irb->codegen->builtin_types.entry_global_error_set->type_ref;
+ err_set_type->di_type = irb->codegen->builtin_types.entry_global_error_set->di_type;
+ irb->codegen->error_di_types.append(&err_set_type->di_type);
+ err_set_type->data.error_set.errors = allocate(err_count);
ErrorTableEntry **errors = allocate(irb->codegen->errors_by_index.length + err_count);
@@ -6071,30 +6704,150 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo
async_allocator_type_value, is_var_args);
}
-static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
+static IrInstruction *ir_gen_cancel_target(IrBuilder *irb, Scope *scope, AstNode *node,
+ IrInstruction *target_inst, bool cancel_non_suspended, bool cancel_awaited)
+{
+ IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "CancelDone");
+ IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
+ IrBasicBlock *pre_return_block = ir_create_basic_block(irb, scope, "PreReturn");
+ IrBasicBlock *post_return_block = ir_create_basic_block(irb, scope, "PostReturn");
+ IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel");
+
+ IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
+ IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
+ IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
+ IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
+ IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node,
+ get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void));
+ IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
+ IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
+ IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100
+ IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
+
+ // TODO relies on Zig not re-ordering fields
+ IrInstruction *casted_target_inst = ir_build_ptr_cast(irb, scope, node, promise_T_type_val, target_inst);
+ IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst);
+ Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
+ IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
+ atomic_state_field_name);
+
+ // set the is_canceled bit
+ IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
+ usize_type_val, atomic_state_ptr, nullptr, is_canceled_mask, nullptr,
+ AtomicRmwOp_or, AtomicOrderSeqCst);
+
+ IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
+ IrInstruction *awaiter_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
+ IrInstruction *is_returned_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, awaiter_addr, ptr_mask, false);
+ ir_build_cond_br(irb, scope, node, is_returned_bool, post_return_block, pre_return_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, post_return_block);
+ if (cancel_awaited) {
+ ir_build_br(irb, scope, node, do_cancel_block, is_comptime);
+ } else {
+ IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false);
+ IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_awaited_bool, done_block, do_cancel_block, is_comptime);
+ }
+
+ ir_set_cursor_at_end_and_append_block(irb, pre_return_block);
+ if (cancel_awaited) {
+ if (cancel_non_suspended) {
+ ir_build_br(irb, scope, node, do_cancel_block, is_comptime);
+ } else {
+ IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
+ IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_suspended_bool, do_cancel_block, done_block, is_comptime);
+ }
+ } else {
+ ir_build_br(irb, scope, node, done_block, is_comptime);
+ }
+
+ ir_set_cursor_at_end_and_append_block(irb, do_cancel_block);
+ ir_build_cancel(irb, scope, node, target_inst);
+ ir_build_br(irb, scope, node, done_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, done_block);
+ return ir_build_const_void(irb, scope, node);
+}
+
+static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeCancel);
- IrInstruction *target_inst = ir_gen_node(irb, node->data.cancel_expr.expr, parent_scope);
+ IrInstruction *target_inst = ir_gen_node(irb, node->data.cancel_expr.expr, scope);
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- return ir_build_cancel(irb, parent_scope, node, target_inst);
+ return ir_gen_cancel_target(irb, scope, node, target_inst, false, true);
}
-static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
+static IrInstruction *ir_gen_resume_target(IrBuilder *irb, Scope *scope, AstNode *node,
+ IrInstruction *target_inst)
+{
+ IrBasicBlock *done_block = ir_create_basic_block(irb, scope, "ResumeDone");
+ IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
+ IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "IsSuspended");
+ IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "IsNotSuspended");
+
+ IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
+ IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
+ IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
+ IrInstruction *and_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, is_suspended_mask);
+ IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false);
+ IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
+ IrInstruction *promise_T_type_val = ir_build_const_type(irb, scope, node,
+ get_promise_type(irb->codegen, irb->codegen->builtin_types.entry_void));
+
+ // TODO relies on Zig not re-ordering fields
+ IrInstruction *casted_target_inst = ir_build_ptr_cast(irb, scope, node, promise_T_type_val, target_inst);
+ IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst);
+ Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
+ IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
+ atomic_state_field_name);
+
+ // clear the is_suspended bit
+ IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
+ usize_type_val, atomic_state_ptr, nullptr, and_mask, nullptr,
+ AtomicRmwOp_and, AtomicOrderSeqCst);
+
+ IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_canceled_bool, done_block, not_canceled_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
+ IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
+ IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
+ ir_build_unreachable(irb, scope, node);
+
+ ir_set_cursor_at_end_and_append_block(irb, suspended_block);
+ ir_build_coro_resume(irb, scope, node, target_inst);
+ ir_build_br(irb, scope, node, done_block, is_comptime);
+
+ ir_set_cursor_at_end_and_append_block(irb, done_block);
+ return ir_build_const_void(irb, scope, node);
+}
+
+static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeResume);
- IrInstruction *target_inst = ir_gen_node(irb, node->data.resume_expr.expr, parent_scope);
+ IrInstruction *target_inst = ir_gen_node(irb, node->data.resume_expr.expr, scope);
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
- return ir_build_coro_resume(irb, parent_scope, node, target_inst);
+ return ir_gen_resume_target(irb, scope, node, target_inst);
}
-static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
+static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeAwaitExpr);
- IrInstruction *target_inst = ir_gen_node(irb, node->data.await_expr.expr, parent_scope);
+ IrInstruction *target_inst = ir_gen_node(irb, node->data.await_expr.expr, scope);
if (target_inst == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
@@ -6108,7 +6861,7 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast
return irb->codegen->invalid_instruction;
}
- ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope);
+ ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(scope);
if (scope_defer_expr) {
if (!scope_defer_expr->reported_err) {
add_node_error(irb->codegen, node, buf_sprintf("cannot await inside defer expression"));
@@ -6119,85 +6872,157 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast
Scope *outer_scope = irb->exec->begin_scope;
- IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, parent_scope, node, target_inst);
+ IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, target_inst);
Buf *result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME);
- IrInstruction *result_ptr_field_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, result_ptr_field_name);
+ IrInstruction *result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name);
if (irb->codegen->have_err_ret_tracing) {
- IrInstruction *err_ret_trace_ptr = ir_build_error_return_trace(irb, parent_scope, node, IrInstructionErrorReturnTrace::NonNull);
+ IrInstruction *err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull);
Buf *err_ret_trace_ptr_field_name = buf_create_from_str(ERR_RET_TRACE_PTR_FIELD_NAME);
- IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name);
- ir_build_store_ptr(irb, parent_scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr);
+ IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name);
+ ir_build_store_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr);
}
- Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME);
- IrInstruction *awaiter_field_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr,
- awaiter_handle_field_name);
+ IrBasicBlock *already_awaited_block = ir_create_basic_block(irb, scope, "AlreadyAwaited");
+ IrBasicBlock *not_awaited_block = ir_create_basic_block(irb, scope, "NotAwaited");
+ IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled");
+ IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, scope, "YesSuspend");
+ IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, scope, "NoSuspend");
+ IrBasicBlock *merge_block = ir_create_basic_block(irb, scope, "MergeSuspend");
+ IrBasicBlock *cleanup_block = ir_create_basic_block(irb, scope, "SuspendCleanup");
+ IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "SuspendResume");
+ IrBasicBlock *cancel_target_block = ir_create_basic_block(irb, scope, "CancelTarget");
+ IrBasicBlock *do_cancel_block = ir_create_basic_block(irb, scope, "DoCancel");
+ IrBasicBlock *do_defers_block = ir_create_basic_block(irb, scope, "DoDefers");
+ IrBasicBlock *destroy_block = ir_create_basic_block(irb, scope, "DestroyBlock");
+ IrBasicBlock *my_suspended_block = ir_create_basic_block(irb, scope, "AlreadySuspended");
+ IrBasicBlock *my_not_suspended_block = ir_create_basic_block(irb, scope, "NotAlreadySuspended");
+ IrBasicBlock *do_suspend_block = ir_create_basic_block(irb, scope, "DoSuspend");
- IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false);
- VariableTableEntry *result_var = ir_create_var(irb, node, parent_scope, nullptr,
+ Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
+ IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
+ atomic_state_field_name);
+
+ IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise);
+ IrInstruction *const_bool_false = ir_build_const_bool(irb, scope, node, false);
+ IrInstruction *undefined_value = ir_build_const_undefined(irb, scope, node);
+ IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize);
+ IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
+ IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111
+ IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
+ IrInstruction *await_mask = ir_build_const_usize(irb, scope, node, 0x4); // 0b100
+ IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001
+ IrInstruction *is_suspended_mask = ir_build_const_usize(irb, scope, node, 0x2); // 0b010
+
+ VariableTableEntry *result_var = ir_create_var(irb, node, scope, nullptr,
false, false, true, const_bool_false);
- IrInstruction *undefined_value = ir_build_const_undefined(irb, parent_scope, node);
- IrInstruction *target_promise_type = ir_build_typeof(irb, parent_scope, node, target_inst);
- IrInstruction *promise_result_type = ir_build_promise_result_type(irb, parent_scope, node, target_promise_type);
- ir_build_await_bookkeeping(irb, parent_scope, node, promise_result_type);
- ir_build_var_decl(irb, parent_scope, node, result_var, promise_result_type, nullptr, undefined_value);
- IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, parent_scope, node, result_var, false, false);
- ir_build_store_ptr(irb, parent_scope, node, result_ptr_field_ptr, my_result_var_ptr);
- IrInstruction *save_token = ir_build_coro_save(irb, parent_scope, node, irb->exec->coro_handle);
- IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node,
- get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
- IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, parent_scope, node,
- promise_type_val, awaiter_field_ptr, nullptr, irb->exec->coro_handle, nullptr,
- AtomicRmwOp_xchg, AtomicOrderSeqCst);
- IrInstruction *is_non_null = ir_build_test_nonnull(irb, parent_scope, node, maybe_await_handle);
- IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, parent_scope, "YesSuspend");
- IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, parent_scope, "NoSuspend");
- IrBasicBlock *merge_block = ir_create_basic_block(irb, parent_scope, "MergeSuspend");
- ir_build_cond_br(irb, parent_scope, node, is_non_null, no_suspend_block, yes_suspend_block, const_bool_false);
+ IrInstruction *target_promise_type = ir_build_typeof(irb, scope, node, target_inst);
+ IrInstruction *promise_result_type = ir_build_promise_result_type(irb, scope, node, target_promise_type);
+ ir_build_await_bookkeeping(irb, scope, node, promise_result_type);
+ ir_build_var_decl(irb, scope, node, result_var, promise_result_type, nullptr, undefined_value);
+ IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, scope, node, result_var);
+ ir_build_store_ptr(irb, scope, node, result_ptr_field_ptr, my_result_var_ptr);
+ IrInstruction *save_token = ir_build_coro_save(irb, scope, node, irb->exec->coro_handle);
+
+ IrInstruction *coro_handle_addr = ir_build_ptr_to_int(irb, scope, node, irb->exec->coro_handle);
+ IrInstruction *mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, coro_handle_addr, await_mask, false);
+ IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
+ usize_type_val, atomic_state_ptr, nullptr, mask_bits, nullptr,
+ AtomicRmwOp_or, AtomicOrderSeqCst);
+
+ IrInstruction *is_awaited_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, await_mask, false);
+ IrInstruction *is_awaited_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_awaited_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_awaited_bool, already_awaited_block, not_awaited_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, already_awaited_block);
+ ir_build_unreachable(irb, scope, node);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_awaited_block);
+ IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
+ IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
+ IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
+ ir_build_cond_br(irb, scope, node, is_canceled_bool, cancel_target_block, not_canceled_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
+ ir_build_cond_br(irb, scope, node, is_non_null, no_suspend_block, yes_suspend_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, cancel_target_block);
+ ir_build_cancel(irb, scope, node, target_inst);
+ ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false));
ir_set_cursor_at_end_and_append_block(irb, no_suspend_block);
if (irb->codegen->have_err_ret_tracing) {
Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME);
- IrInstruction *src_err_ret_trace_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, err_ret_trace_field_name);
- IrInstruction *dest_err_ret_trace_ptr = ir_build_error_return_trace(irb, parent_scope, node, IrInstructionErrorReturnTrace::NonNull);
- ir_build_merge_err_ret_traces(irb, parent_scope, node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr);
+ IrInstruction *src_err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name);
+ IrInstruction *dest_err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull);
+ ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr);
}
Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME);
- IrInstruction *promise_result_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, result_field_name);
- IrInstruction *no_suspend_result = ir_build_load_ptr(irb, parent_scope, node, promise_result_ptr);
- ir_build_cancel(irb, parent_scope, node, target_inst);
- ir_build_br(irb, parent_scope, node, merge_block, const_bool_false);
+ IrInstruction *promise_result_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name);
+ // If the type of the result handle_is_ptr then this does not actually perform a load. But we need it to,
+ // because we're about to destroy the memory. So we store it into our result variable.
+ IrInstruction *no_suspend_result = ir_build_load_ptr(irb, scope, node, promise_result_ptr);
+ ir_build_store_ptr(irb, scope, node, my_result_var_ptr, no_suspend_result);
+ ir_build_cancel(irb, scope, node, target_inst);
+ ir_build_br(irb, scope, node, merge_block, const_bool_false);
+
ir_set_cursor_at_end_and_append_block(irb, yes_suspend_block);
- IrInstruction *suspend_code = ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false);
- IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup");
- IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume");
+ IrInstruction *my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
+ usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr,
+ AtomicRmwOp_or, AtomicOrderSeqCst);
+ IrInstruction *my_is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_suspended_mask, false);
+ IrInstruction *my_is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_suspended_value, zero, false);
+ ir_build_cond_br(irb, scope, node, my_is_suspended_bool, my_suspended_block, my_not_suspended_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, my_suspended_block);
+ ir_build_unreachable(irb, scope, node);
+
+ ir_set_cursor_at_end_and_append_block(irb, my_not_suspended_block);
+ IrInstruction *my_is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, my_prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *my_is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, my_is_canceled_value, zero, false);
+ ir_build_cond_br(irb, scope, node, my_is_canceled_bool, cleanup_block, do_suspend_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, do_suspend_block);
+ IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, save_token, const_bool_false);
IrInstructionSwitchBrCase *cases = allocate(2);
- cases[0].value = ir_build_const_u8(irb, parent_scope, node, 0);
+ cases[0].value = ir_build_const_u8(irb, scope, node, 0);
cases[0].block = resume_block;
- cases[1].value = ir_build_const_u8(irb, parent_scope, node, 1);
- cases[1].block = cleanup_block;
- ir_build_switch_br(irb, parent_scope, node, suspend_code, irb->exec->coro_suspend_block,
- 2, cases, const_bool_false);
+ cases[1].value = ir_build_const_u8(irb, scope, node, 1);
+ cases[1].block = destroy_block;
+ ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block,
+ 2, cases, const_bool_false, nullptr);
+
+ ir_set_cursor_at_end_and_append_block(irb, destroy_block);
+ ir_gen_cancel_target(irb, scope, node, target_inst, false, true);
+ ir_mark_gen(ir_build_br(irb, scope, node, cleanup_block, const_bool_false));
ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
- ir_gen_defers_for_block(irb, parent_scope, outer_scope, true);
- ir_mark_gen(ir_build_br(irb, parent_scope, node, irb->exec->coro_final_cleanup_block, const_bool_false));
+ IrInstruction *my_mask_bits = ir_build_bin_op(irb, scope, node, IrBinOpBinOr, ptr_mask, is_canceled_mask, false);
+ IrInstruction *b_my_prev_atomic_value = ir_build_atomic_rmw(irb, scope, node,
+ usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, my_mask_bits, nullptr,
+ AtomicRmwOp_or, AtomicOrderSeqCst);
+ IrInstruction *my_await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, b_my_prev_atomic_value, ptr_mask, false);
+ IrInstruction *dont_have_my_await_handle = ir_build_bin_op(irb, scope, node, IrBinOpCmpEq, my_await_handle_addr, zero, false);
+ IrInstruction *dont_destroy_ourselves = ir_build_bin_op(irb, scope, node, IrBinOpBoolAnd, dont_have_my_await_handle, is_canceled_bool, false);
+ ir_build_cond_br(irb, scope, node, dont_have_my_await_handle, do_defers_block, do_cancel_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, do_cancel_block);
+ IrInstruction *my_await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, my_await_handle_addr);
+ ir_gen_cancel_target(irb, scope, node, my_await_handle, true, false);
+ ir_mark_gen(ir_build_br(irb, scope, node, do_defers_block, const_bool_false));
+
+ ir_set_cursor_at_end_and_append_block(irb, do_defers_block);
+ ir_gen_defers_for_block(irb, scope, outer_scope, true);
+ ir_mark_gen(ir_build_cond_br(irb, scope, node, dont_destroy_ourselves, irb->exec->coro_early_final, irb->exec->coro_final_cleanup_block, const_bool_false));
ir_set_cursor_at_end_and_append_block(irb, resume_block);
- IrInstruction *yes_suspend_result = ir_build_load_ptr(irb, parent_scope, node, my_result_var_ptr);
- ir_build_br(irb, parent_scope, node, merge_block, const_bool_false);
+ ir_build_br(irb, scope, node, merge_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, merge_block);
- IrBasicBlock **incoming_blocks = allocate(2);
- IrInstruction **incoming_values = allocate(2);
- incoming_blocks[0] = resume_block;
- incoming_values[0] = yes_suspend_result;
- incoming_blocks[1] = no_suspend_block;
- incoming_values[1] = no_suspend_result;
- return ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values);
+ return ir_build_load_ptr(irb, scope, node, my_result_var_ptr);
}
static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) {
@@ -6216,54 +7041,105 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod
ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope);
if (scope_defer_expr) {
if (!scope_defer_expr->reported_err) {
- add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression"));
+ ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression"));
+ add_error_note(irb->codegen, msg, scope_defer_expr->base.source_node, buf_sprintf("defer here"));
scope_defer_expr->reported_err = true;
}
return irb->codegen->invalid_instruction;
}
+ ScopeSuspend *existing_suspend_scope = get_scope_suspend(parent_scope);
+ if (existing_suspend_scope) {
+ if (!existing_suspend_scope->reported_err) {
+ ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside suspend block"));
+ add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("other suspend block here"));
+ existing_suspend_scope->reported_err = true;
+ }
+ return irb->codegen->invalid_instruction;
+ }
Scope *outer_scope = irb->exec->begin_scope;
+ IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup");
+ IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume");
+ IrBasicBlock *suspended_block = ir_create_basic_block(irb, parent_scope, "AlreadySuspended");
+ IrBasicBlock *canceled_block = ir_create_basic_block(irb, parent_scope, "IsCanceled");
+ IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, parent_scope, "NotCanceled");
+ IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, parent_scope, "NotAlreadySuspended");
+ IrBasicBlock *cancel_awaiter_block = ir_create_basic_block(irb, parent_scope, "CancelAwaiter");
- IrInstruction *suspend_code;
+ IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_promise);
+ IrInstruction *const_bool_true = ir_build_const_bool(irb, parent_scope, node, true);
IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false);
+ IrInstruction *usize_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_usize);
+ IrInstruction *is_canceled_mask = ir_build_const_usize(irb, parent_scope, node, 0x1); // 0b001
+ IrInstruction *is_suspended_mask = ir_build_const_usize(irb, parent_scope, node, 0x2); // 0b010
+ IrInstruction *zero = ir_build_const_usize(irb, parent_scope, node, 0);
+ IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, parent_scope, node, 0x7); // 0b111
+ IrInstruction *ptr_mask = ir_build_un_op(irb, parent_scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000
+
+ IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, parent_scope, node,
+ usize_type_val, irb->exec->atomic_state_field_ptr, nullptr, is_suspended_mask, nullptr,
+ AtomicRmwOp_or, AtomicOrderSeqCst);
+
+ IrInstruction *is_canceled_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false);
+ IrInstruction *is_canceled_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false);
+ ir_build_cond_br(irb, parent_scope, node, is_canceled_bool, canceled_block, not_canceled_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, canceled_block);
+ IrInstruction *await_handle_addr = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false);
+ IrInstruction *have_await_handle = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false);
+ IrBasicBlock *post_canceled_block = irb->current_basic_block;
+ ir_build_cond_br(irb, parent_scope, node, have_await_handle, cancel_awaiter_block, cleanup_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, cancel_awaiter_block);
+ IrInstruction *await_handle = ir_build_int_to_ptr(irb, parent_scope, node, promise_type_val, await_handle_addr);
+ ir_gen_cancel_target(irb, parent_scope, node, await_handle, true, false);
+ IrBasicBlock *post_cancel_awaiter_block = irb->current_basic_block;
+ ir_build_br(irb, parent_scope, node, cleanup_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_canceled_block);
+ IrInstruction *is_suspended_value = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, is_suspended_mask, false);
+ IrInstruction *is_suspended_bool = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false);
+ ir_build_cond_br(irb, parent_scope, node, is_suspended_bool, suspended_block, not_suspended_block, const_bool_false);
+
+ ir_set_cursor_at_end_and_append_block(irb, suspended_block);
+ ir_build_unreachable(irb, parent_scope, node);
+
+ ir_set_cursor_at_end_and_append_block(irb, not_suspended_block);
+ IrInstruction *suspend_code;
if (node->data.suspend.block == nullptr) {
suspend_code = ir_build_coro_suspend(irb, parent_scope, node, nullptr, const_bool_false);
} else {
- assert(node->data.suspend.promise_symbol != nullptr);
- assert(node->data.suspend.promise_symbol->type == NodeTypeSymbol);
- Buf *promise_symbol_name = node->data.suspend.promise_symbol->data.symbol_expr.symbol;
Scope *child_scope;
- if (!buf_eql_str(promise_symbol_name, "_")) {
- VariableTableEntry *promise_var = ir_create_var(irb, node, parent_scope, promise_symbol_name,
- true, true, false, const_bool_false);
- ir_build_var_decl(irb, parent_scope, node, promise_var, nullptr, nullptr, irb->exec->coro_handle);
- child_scope = promise_var->child_scope;
- } else {
- child_scope = parent_scope;
- }
+ ScopeSuspend *suspend_scope = create_suspend_scope(node, parent_scope);
+ suspend_scope->resume_block = resume_block;
+ child_scope = &suspend_scope->base;
IrInstruction *save_token = ir_build_coro_save(irb, child_scope, node, irb->exec->coro_handle);
ir_gen_node(irb, node->data.suspend.block, child_scope);
- suspend_code = ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false);
+ suspend_code = ir_mark_gen(ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false));
}
- IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup");
- IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume");
-
IrInstructionSwitchBrCase *cases = allocate(2);
- cases[0].value = ir_build_const_u8(irb, parent_scope, node, 0);
+ cases[0].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 0));
cases[0].block = resume_block;
- cases[1].value = ir_build_const_u8(irb, parent_scope, node, 1);
- cases[1].block = cleanup_block;
- ir_build_switch_br(irb, parent_scope, node, suspend_code, irb->exec->coro_suspend_block,
- 2, cases, const_bool_false);
+ cases[1].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 1));
+ cases[1].block = canceled_block;
+ ir_mark_gen(ir_build_switch_br(irb, parent_scope, node, suspend_code, irb->exec->coro_suspend_block,
+ 2, cases, const_bool_false, nullptr));
ir_set_cursor_at_end_and_append_block(irb, cleanup_block);
+ IrBasicBlock **incoming_blocks = allocate(2);
+ IrInstruction **incoming_values = allocate(2);
+ incoming_blocks[0] = post_canceled_block;
+ incoming_values[0] = const_bool_true;
+ incoming_blocks[1] = post_cancel_awaiter_block;
+ incoming_values[1] = const_bool_false;
+ IrInstruction *destroy_ourselves = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values);
ir_gen_defers_for_block(irb, parent_scope, outer_scope, true);
- ir_mark_gen(ir_build_br(irb, parent_scope, node, irb->exec->coro_final_cleanup_block, const_bool_false));
+ ir_mark_gen(ir_build_cond_br(irb, parent_scope, node, destroy_ourselves, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, const_bool_false));
ir_set_cursor_at_end_and_append_block(irb, resume_block);
- return ir_build_const_void(irb, parent_scope, node);
+ return ir_mark_gen(ir_build_const_void(irb, parent_scope, node));
}
static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope,
@@ -6296,13 +7172,11 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
case NodeTypeSymbol:
return ir_gen_symbol(irb, scope, node, lval);
case NodeTypeFnCallExpr:
- return ir_lval_wrap(irb, scope, ir_gen_fn_call(irb, scope, node), lval);
+ return ir_gen_fn_call(irb, scope, node, lval);
case NodeTypeIfBoolExpr:
return ir_lval_wrap(irb, scope, ir_gen_if_bool_expr(irb, scope, node), lval);
case NodeTypePrefixOpExpr:
return ir_gen_prefix_op_expr(irb, scope, node, lval);
- case NodeTypeAddrOfExpr:
- return ir_lval_wrap(irb, scope, ir_gen_address_of(irb, scope, node), lval);
case NodeTypeContainerInitExpr:
return ir_lval_wrap(irb, scope, ir_gen_container_init_expr(irb, scope, node), lval);
case NodeTypeVariableDeclaration:
@@ -6316,13 +7190,44 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
case NodeTypeReturnExpr:
return ir_gen_return(irb, scope, node, lval);
case NodeTypeFieldAccessExpr:
- return ir_gen_field_access(irb, scope, node, lval);
+ {
+ IrInstruction *ptr_instruction = ir_gen_field_access(irb, scope, node);
+ if (ptr_instruction == irb->codegen->invalid_instruction)
+ return ptr_instruction;
+ if (lval == LValPtr)
+ return ptr_instruction;
+
+ return ir_build_load_ptr(irb, scope, node, ptr_instruction);
+ }
+ case NodeTypePtrDeref: {
+ AstNode *expr_node = node->data.ptr_deref_expr.target;
+ IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval);
+ if (value == irb->codegen->invalid_instruction)
+ return value;
+
+ return ir_build_un_op(irb, scope, node, IrUnOpDereference, value);
+ }
+ case NodeTypeUnwrapOptional: {
+ AstNode *expr_node = node->data.unwrap_optional.expr;
+
+ IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr);
+ if (maybe_ptr == irb->codegen->invalid_instruction)
+ return irb->codegen->invalid_instruction;
+
+ IrInstruction *unwrapped_ptr = ir_build_unwrap_maybe(irb, scope, node, maybe_ptr, true);
+ if (lval == LValPtr)
+ return unwrapped_ptr;
+
+ return ir_build_load_ptr(irb, scope, node, unwrapped_ptr);
+ }
case NodeTypeThisLiteral:
return ir_lval_wrap(irb, scope, ir_gen_this_literal(irb, scope, node), lval);
case NodeTypeBoolLiteral:
return ir_lval_wrap(irb, scope, ir_gen_bool_literal(irb, scope, node), lval);
case NodeTypeArrayType:
return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval);
+ case NodeTypePointerType:
+ return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval);
case NodeTypePromiseType:
return ir_lval_wrap(irb, scope, ir_gen_promise_type(irb, scope, node), lval);
case NodeTypeStringLiteral:
@@ -6380,7 +7285,7 @@ static IrInstruction *ir_gen_node_extra(IrBuilder *irb, AstNode *node, Scope *sc
}
static IrInstruction *ir_gen_node(IrBuilder *irb, AstNode *node, Scope *scope) {
- return ir_gen_node_extra(irb, node, scope, LVAL_NONE);
+ return ir_gen_node_extra(irb, node, scope, LValNone);
}
static void invalidate_exec(IrExecutable *exec) {
@@ -6434,15 +7339,14 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
IrInstruction *coro_frame_type_value = ir_build_const_type(irb, coro_scope, node, coro_frame_type);
// TODO mark this var decl as "no safety" e.g. disable initializing the undef value to 0xaa
ir_build_var_decl(irb, coro_scope, node, promise_var, coro_frame_type_value, nullptr, undef);
- coro_promise_ptr = ir_build_var_ptr(irb, coro_scope, node, promise_var, false, false);
+ coro_promise_ptr = ir_build_var_ptr(irb, coro_scope, node, promise_var);
VariableTableEntry *await_handle_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false);
IrInstruction *null_value = ir_build_const_null(irb, coro_scope, node);
IrInstruction *await_handle_type_val = ir_build_const_type(irb, coro_scope, node,
- get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
+ get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
ir_build_var_decl(irb, coro_scope, node, await_handle_var, await_handle_type_val, nullptr, null_value);
- irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node,
- await_handle_var, false, false);
+ irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node, await_handle_var);
u8_ptr_type = ir_build_const_type(irb, coro_scope, node,
get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false));
@@ -6473,10 +7377,11 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, coro_scope, node, u8_ptr_type, maybe_coro_mem_ptr);
irb->exec->coro_handle = ir_build_coro_begin(irb, coro_scope, node, coro_id, coro_mem_ptr);
- Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME);
- irb->exec->coro_awaiter_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
- awaiter_handle_field_name);
- ir_build_store_ptr(irb, scope, node, irb->exec->coro_awaiter_field_ptr, null_value);
+ Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME);
+ irb->exec->atomic_state_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr,
+ atomic_state_field_name);
+ IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
+ ir_build_store_ptr(irb, scope, node, irb->exec->atomic_state_field_ptr, zero);
Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME);
irb->exec->coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name);
result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME);
@@ -6494,7 +7399,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
// coordinate with builtin.zig
Buf *index_name = buf_create_from_str("index");
IrInstruction *index_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, index_name);
- IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
ir_build_store_ptr(irb, scope, node, index_ptr, zero);
Buf *instruction_addresses_name = buf_create_from_str("instruction_addresses");
@@ -6511,7 +7415,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
irb->exec->coro_final_cleanup_block = ir_create_basic_block(irb, scope, "FinalCleanup");
}
- IrInstruction *result = ir_gen_node_extra(irb, node, scope, LVAL_NONE);
+ IrInstruction *result = ir_gen_node_extra(irb, node, scope, LValNone);
assert(result);
if (irb->exec->invalid)
return false;
@@ -6533,7 +7437,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
cases[0].block = invalid_resume_block;
cases[1].value = ir_build_const_u8(irb, scope, node, 1);
cases[1].block = irb->exec->coro_final_cleanup_block;
- ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block, 2, cases, const_bool_false);
+ ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block, 2, cases, const_bool_false, nullptr);
ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_suspend_block);
ir_build_coro_end(irb, scope, node);
@@ -6544,9 +7448,13 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_normal_final);
if (type_has_bits(return_type)) {
+ IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node,
+ get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8,
+ false, false, PtrLenUnknown, get_abi_alignment(irb->codegen, irb->codegen->builtin_types.entry_u8),
+ 0, 0));
IrInstruction *result_ptr = ir_build_load_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr);
- IrInstruction *result_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, result_ptr);
- IrInstruction *return_value_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type,
+ IrInstruction *result_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type_unknown_len, result_ptr);
+ IrInstruction *return_value_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type_unknown_len,
irb->exec->coro_result_field_ptr);
IrInstruction *return_type_inst = ir_build_const_type(irb, scope, node,
fn_entry->type_entry->data.fn.fn_type_id.return_type);
@@ -6559,6 +7467,12 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
IrInstruction *dest_err_ret_trace_ptr = ir_build_load_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr);
ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr, dest_err_ret_trace_ptr);
}
+ // Before we destroy the coroutine frame, we need to load the target promise into
+ // a register or local variable which does not get spilled into the frame,
+ // otherwise llvm tries to access memory inside the destroyed frame.
+ IrInstruction *unwrapped_await_handle_ptr = ir_build_unwrap_maybe(irb, scope, node,
+ irb->exec->await_handle_var_ptr, false);
+ IrInstruction *await_handle_in_block = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr);
ir_build_br(irb, scope, node, check_free_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_final_cleanup_block);
@@ -6573,6 +7487,14 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
incoming_values[1] = const_bool_true;
IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values);
+ IrBasicBlock **merge_incoming_blocks = allocate(2);
+ IrInstruction **merge_incoming_values = allocate(2);
+ merge_incoming_blocks[0] = irb->exec->coro_final_cleanup_block;
+ merge_incoming_values[0] = ir_build_const_undefined(irb, scope, node);
+ merge_incoming_blocks[1] = irb->exec->coro_normal_final;
+ merge_incoming_values[1] = await_handle_in_block;
+ IrInstruction *awaiter_handle = ir_build_phi(irb, scope, node, 2, merge_incoming_blocks, merge_incoming_values);
+
Buf *free_field_name = buf_create_from_str(ASYNC_FREE_FIELD_NAME);
IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node,
ImplicitAllocatorIdLocalVar);
@@ -6580,25 +7502,26 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
IrInstruction *free_fn = ir_build_load_ptr(irb, scope, node, free_fn_ptr);
IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
IrInstruction *coro_mem_ptr_maybe = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle);
- IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_mem_ptr_maybe);
+ IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node,
+ get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8,
+ false, false, PtrLenUnknown, get_abi_alignment(irb->codegen, irb->codegen->builtin_types.entry_u8),
+ 0, 0));
+ IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type_unknown_len, coro_mem_ptr_maybe);
IrInstruction *coro_mem_ptr_ref = ir_build_ref(irb, scope, node, coro_mem_ptr, true, false);
- IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var, true, false);
+ IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var);
IrInstruction *coro_size = ir_build_load_ptr(irb, scope, node, coro_size_ptr);
IrInstruction *mem_slice = ir_build_slice(irb, scope, node, coro_mem_ptr_ref, zero, coro_size, false);
size_t arg_count = 2;
IrInstruction **args = allocate(arg_count);
args[0] = implicit_allocator_ptr; // self
args[1] = mem_slice; // old_mem
- ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr);
+ ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr, nullptr);
IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume");
ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, irb->exec->coro_suspend_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, resume_block);
- IrInstruction *unwrapped_await_handle_ptr = ir_build_unwrap_maybe(irb, scope, node,
- irb->exec->await_handle_var_ptr, false);
- IrInstruction *awaiter_handle = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr);
- ir_build_coro_resume(irb, scope, node, awaiter_handle);
+ ir_gen_resume_target(irb, scope, node, awaiter_handle);
ir_build_br(irb, scope, node, irb->exec->coro_suspend_block, const_bool_false);
}
@@ -6671,17 +7594,22 @@ static bool ir_emit_global_runtime_side_effect(IrAnalyze *ira, IrInstruction *so
}
static bool const_val_fits_in_num_lit(ConstExprValue *const_val, TypeTableEntry *num_lit_type) {
- return ((num_lit_type->id == TypeTableEntryIdNumLitFloat &&
- (const_val->type->id == TypeTableEntryIdFloat || const_val->type->id == TypeTableEntryIdNumLitFloat)) ||
- (num_lit_type->id == TypeTableEntryIdNumLitInt &&
- (const_val->type->id == TypeTableEntryIdInt || const_val->type->id == TypeTableEntryIdNumLitInt)));
+ return ((num_lit_type->id == TypeTableEntryIdComptimeFloat &&
+ (const_val->type->id == TypeTableEntryIdFloat || const_val->type->id == TypeTableEntryIdComptimeFloat)) ||
+ (num_lit_type->id == TypeTableEntryIdComptimeInt &&
+ (const_val->type->id == TypeTableEntryIdInt || const_val->type->id == TypeTableEntryIdComptimeInt)));
}
static bool float_has_fraction(ConstExprValue *const_val) {
- if (const_val->type->id == TypeTableEntryIdNumLitFloat) {
+ if (const_val->type->id == TypeTableEntryIdComptimeFloat) {
return bigfloat_has_fraction(&const_val->data.x_bigfloat);
} else if (const_val->type->id == TypeTableEntryIdFloat) {
switch (const_val->type->data.floating.bit_count) {
+ case 16:
+ {
+ float16_t floored = f16_roundToInt(const_val->data.x_f16, softfloat_round_minMag, false);
+ return !f16_eq(floored, const_val->data.x_f16);
+ }
case 32:
return floorf(const_val->data.x_f32) != const_val->data.x_f32;
case 64:
@@ -6701,10 +7629,13 @@ static bool float_has_fraction(ConstExprValue *const_val) {
}
static void float_append_buf(Buf *buf, ConstExprValue *const_val) {
- if (const_val->type->id == TypeTableEntryIdNumLitFloat) {
+ if (const_val->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_append_buf(buf, &const_val->data.x_bigfloat);
} else if (const_val->type->id == TypeTableEntryIdFloat) {
switch (const_val->type->data.floating.bit_count) {
+ case 16:
+ buf_appendf(buf, "%f", zig_f16_to_double(const_val->data.x_f16));
+ break;
case 32:
buf_appendf(buf, "%f", const_val->data.x_f32);
break;
@@ -6736,10 +7667,21 @@ static void float_append_buf(Buf *buf, ConstExprValue *const_val) {
}
static void float_init_bigint(BigInt *bigint, ConstExprValue *const_val) {
- if (const_val->type->id == TypeTableEntryIdNumLitFloat) {
+ if (const_val->type->id == TypeTableEntryIdComptimeFloat) {
bigint_init_bigfloat(bigint, &const_val->data.x_bigfloat);
} else if (const_val->type->id == TypeTableEntryIdFloat) {
switch (const_val->type->data.floating.bit_count) {
+ case 16:
+ {
+ double x = zig_f16_to_double(const_val->data.x_f16);
+ if (x >= 0) {
+ bigint_init_unsigned(bigint, (uint64_t)x);
+ } else {
+ bigint_init_unsigned(bigint, (uint64_t)-x);
+ bigint->is_negative = true;
+ }
+ break;
+ }
case 32:
if (const_val->data.x_f32 >= 0) {
bigint_init_unsigned(bigint, (uint64_t)(const_val->data.x_f32));
@@ -6772,10 +7714,13 @@ static void float_init_bigint(BigInt *bigint, ConstExprValue *const_val) {
}
static void float_init_bigfloat(ConstExprValue *dest_val, BigFloat *bigfloat) {
- if (dest_val->type->id == TypeTableEntryIdNumLitFloat) {
+ if (dest_val->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_init_bigfloat(&dest_val->data.x_bigfloat, bigfloat);
} else if (dest_val->type->id == TypeTableEntryIdFloat) {
switch (dest_val->type->data.floating.bit_count) {
+ case 16:
+ dest_val->data.x_f16 = bigfloat_to_f16(bigfloat);
+ break;
case 32:
dest_val->data.x_f32 = bigfloat_to_f32(bigfloat);
break;
@@ -6793,11 +7738,39 @@ static void float_init_bigfloat(ConstExprValue *dest_val, BigFloat *bigfloat) {
}
}
+static void float_init_f16(ConstExprValue *dest_val, float16_t x) {
+ if (dest_val->type->id == TypeTableEntryIdComptimeFloat) {
+ bigfloat_init_16(&dest_val->data.x_bigfloat, x);
+ } else if (dest_val->type->id == TypeTableEntryIdFloat) {
+ switch (dest_val->type->data.floating.bit_count) {
+ case 16:
+ dest_val->data.x_f16 = x;
+ break;
+ case 32:
+ dest_val->data.x_f32 = zig_f16_to_double(x);
+ break;
+ case 64:
+ dest_val->data.x_f64 = zig_f16_to_double(x);
+ break;
+ case 128:
+ f16_to_f128M(x, &dest_val->data.x_f128);
+ break;
+ default:
+ zig_unreachable();
+ }
+ } else {
+ zig_unreachable();
+ }
+}
+
static void float_init_f32(ConstExprValue *dest_val, float x) {
- if (dest_val->type->id == TypeTableEntryIdNumLitFloat) {
+ if (dest_val->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_init_32(&dest_val->data.x_bigfloat, x);
} else if (dest_val->type->id == TypeTableEntryIdFloat) {
switch (dest_val->type->data.floating.bit_count) {
+ case 16:
+ dest_val->data.x_f16 = zig_double_to_f16(x);
+ break;
case 32:
dest_val->data.x_f32 = x;
break;
@@ -6820,10 +7793,13 @@ static void float_init_f32(ConstExprValue *dest_val, float x) {
}
static void float_init_f64(ConstExprValue *dest_val, double x) {
- if (dest_val->type->id == TypeTableEntryIdNumLitFloat) {
+ if (dest_val->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_init_64(&dest_val->data.x_bigfloat, x);
} else if (dest_val->type->id == TypeTableEntryIdFloat) {
switch (dest_val->type->data.floating.bit_count) {
+ case 16:
+ dest_val->data.x_f16 = zig_double_to_f16(x);
+ break;
case 32:
dest_val->data.x_f32 = x;
break;
@@ -6846,10 +7822,13 @@ static void float_init_f64(ConstExprValue *dest_val, double x) {
}
static void float_init_f128(ConstExprValue *dest_val, float128_t x) {
- if (dest_val->type->id == TypeTableEntryIdNumLitFloat) {
+ if (dest_val->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_init_128(&dest_val->data.x_bigfloat, x);
} else if (dest_val->type->id == TypeTableEntryIdFloat) {
switch (dest_val->type->data.floating.bit_count) {
+ case 16:
+ dest_val->data.x_f16 = f128M_to_f16(&x);
+ break;
case 32:
{
float32_t f32_val = f128M_to_f32(&x);
@@ -6876,10 +7855,13 @@ static void float_init_f128(ConstExprValue *dest_val, float128_t x) {
}
static void float_init_float(ConstExprValue *dest_val, ConstExprValue *src_val) {
- if (src_val->type->id == TypeTableEntryIdNumLitFloat) {
+ if (src_val->type->id == TypeTableEntryIdComptimeFloat) {
float_init_bigfloat(dest_val, &src_val->data.x_bigfloat);
} else if (src_val->type->id == TypeTableEntryIdFloat) {
switch (src_val->type->data.floating.bit_count) {
+ case 16:
+ float_init_f16(dest_val, src_val->data.x_f16);
+ break;
case 32:
float_init_f32(dest_val, src_val->data.x_f32);
break;
@@ -6899,10 +7881,18 @@ static void float_init_float(ConstExprValue *dest_val, ConstExprValue *src_val)
static Cmp float_cmp(ConstExprValue *op1, ConstExprValue *op2) {
assert(op1->type == op2->type);
- if (op1->type->id == TypeTableEntryIdNumLitFloat) {
+ if (op1->type->id == TypeTableEntryIdComptimeFloat) {
return bigfloat_cmp(&op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ if (f16_lt(op1->data.x_f16, op2->data.x_f16)) {
+ return CmpLT;
+ } else if (f16_lt(op2->data.x_f16, op1->data.x_f16)) {
+ return CmpGT;
+ } else {
+ return CmpEQ;
+ }
case 32:
if (op1->data.x_f32 > op2->data.x_f32) {
return CmpGT;
@@ -6936,10 +7926,21 @@ static Cmp float_cmp(ConstExprValue *op1, ConstExprValue *op2) {
}
static Cmp float_cmp_zero(ConstExprValue *op) {
- if (op->type->id == TypeTableEntryIdNumLitFloat) {
+ if (op->type->id == TypeTableEntryIdComptimeFloat) {
return bigfloat_cmp_zero(&op->data.x_bigfloat);
} else if (op->type->id == TypeTableEntryIdFloat) {
switch (op->type->data.floating.bit_count) {
+ case 16:
+ {
+ const float16_t zero = zig_double_to_f16(0);
+ if (f16_lt(op->data.x_f16, zero)) {
+ return CmpLT;
+ } else if (f16_lt(zero, op->data.x_f16)) {
+ return CmpGT;
+ } else {
+ return CmpEQ;
+ }
+ }
case 32:
if (op->data.x_f32 < 0.0) {
return CmpLT;
@@ -6977,10 +7978,13 @@ static Cmp float_cmp_zero(ConstExprValue *op) {
static void float_add(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) {
assert(op1->type == op2->type);
out_val->type = op1->type;
- if (op1->type->id == TypeTableEntryIdNumLitFloat) {
+ if (op1->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_add(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_add(op1->data.x_f16, op2->data.x_f16);
+ return;
case 32:
out_val->data.x_f32 = op1->data.x_f32 + op2->data.x_f32;
return;
@@ -7001,10 +8005,13 @@ static void float_add(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
static void float_sub(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) {
assert(op1->type == op2->type);
out_val->type = op1->type;
- if (op1->type->id == TypeTableEntryIdNumLitFloat) {
+ if (op1->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_sub(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_sub(op1->data.x_f16, op2->data.x_f16);
+ return;
case 32:
out_val->data.x_f32 = op1->data.x_f32 - op2->data.x_f32;
return;
@@ -7025,10 +8032,13 @@ static void float_sub(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
static void float_mul(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) {
assert(op1->type == op2->type);
out_val->type = op1->type;
- if (op1->type->id == TypeTableEntryIdNumLitFloat) {
+ if (op1->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_mul(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_mul(op1->data.x_f16, op2->data.x_f16);
+ return;
case 32:
out_val->data.x_f32 = op1->data.x_f32 * op2->data.x_f32;
return;
@@ -7049,10 +8059,13 @@ static void float_mul(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
static void float_div(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) {
assert(op1->type == op2->type);
out_val->type = op1->type;
- if (op1->type->id == TypeTableEntryIdNumLitFloat) {
+ if (op1->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_div(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16);
+ return;
case 32:
out_val->data.x_f32 = op1->data.x_f32 / op2->data.x_f32;
return;
@@ -7073,25 +8086,19 @@ static void float_div(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
static void float_div_trunc(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) {
assert(op1->type == op2->type);
out_val->type = op1->type;
- if (op1->type->id == TypeTableEntryIdNumLitFloat) {
+ if (op1->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_div_trunc(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16);
+ out_val->data.x_f16 = f16_roundToInt(out_val->data.x_f16, softfloat_round_minMag, false);
+ return;
case 32:
- out_val->data.x_f32 = op1->data.x_f32 / op2->data.x_f32;
- if (out_val->data.x_f32 >= 0.0) {
- out_val->data.x_f32 = floorf(out_val->data.x_f32);
- } else {
- out_val->data.x_f32 = ceilf(out_val->data.x_f32);
- }
+ out_val->data.x_f32 = truncf(op1->data.x_f32 / op2->data.x_f32);
return;
case 64:
- out_val->data.x_f64 = op1->data.x_f64 / op2->data.x_f64;
- if (out_val->data.x_f64 >= 0.0) {
- out_val->data.x_f64 = floor(out_val->data.x_f64);
- } else {
- out_val->data.x_f64 = ceil(out_val->data.x_f64);
- }
+ out_val->data.x_f64 = trunc(op1->data.x_f64 / op2->data.x_f64);
return;
case 128:
f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
@@ -7108,10 +8115,14 @@ static void float_div_trunc(ConstExprValue *out_val, ConstExprValue *op1, ConstE
static void float_div_floor(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) {
assert(op1->type == op2->type);
out_val->type = op1->type;
- if (op1->type->id == TypeTableEntryIdNumLitFloat) {
+ if (op1->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_div_floor(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16);
+ out_val->data.x_f16 = f16_roundToInt(out_val->data.x_f16, softfloat_round_min, false);
+ return;
case 32:
out_val->data.x_f32 = floorf(op1->data.x_f32 / op2->data.x_f32);
return;
@@ -7133,10 +8144,13 @@ static void float_div_floor(ConstExprValue *out_val, ConstExprValue *op1, ConstE
static void float_rem(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) {
assert(op1->type == op2->type);
out_val->type = op1->type;
- if (op1->type->id == TypeTableEntryIdNumLitFloat) {
+ if (op1->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_rem(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_rem(op1->data.x_f16, op2->data.x_f16);
+ return;
case 32:
out_val->data.x_f32 = fmodf(op1->data.x_f32, op2->data.x_f32);
return;
@@ -7154,13 +8168,34 @@ static void float_rem(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
}
}
+// c = a - b * trunc(a / b)
+static float16_t zig_f16_mod(float16_t a, float16_t b) {
+ float16_t c;
+ c = f16_div(a, b);
+ c = f16_roundToInt(c, softfloat_round_min, true);
+ c = f16_mul(b, c);
+ c = f16_sub(a, c);
+ return c;
+}
+
+// c = a - b * trunc(a / b)
+static void zig_f128M_mod(const float128_t* a, const float128_t* b, float128_t* c) {
+ f128M_div(a, b, c);
+ f128M_roundToInt(c, softfloat_round_min, true, c);
+ f128M_mul(b, c, c);
+ f128M_sub(a, c, c);
+}
+
static void float_mod(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) {
assert(op1->type == op2->type);
out_val->type = op1->type;
- if (op1->type->id == TypeTableEntryIdNumLitFloat) {
+ if (op1->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_mod(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = zig_f16_mod(op1->data.x_f16, op2->data.x_f16);
+ return;
case 32:
out_val->data.x_f32 = fmodf(fmodf(op1->data.x_f32, op2->data.x_f32) + op2->data.x_f32, op2->data.x_f32);
return;
@@ -7168,9 +8203,7 @@ static void float_mod(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
out_val->data.x_f64 = fmod(fmod(op1->data.x_f64, op2->data.x_f64) + op2->data.x_f64, op2->data.x_f64);
return;
case 128:
- f128M_rem(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
- f128M_add(&out_val->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
- f128M_rem(&out_val->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
+ zig_f128M_mod(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
return;
default:
zig_unreachable();
@@ -7182,10 +8215,16 @@ static void float_mod(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
static void float_negate(ConstExprValue *out_val, ConstExprValue *op) {
out_val->type = op->type;
- if (op->type->id == TypeTableEntryIdNumLitFloat) {
+ if (op->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_negate(&out_val->data.x_bigfloat, &op->data.x_bigfloat);
} else if (op->type->id == TypeTableEntryIdFloat) {
switch (op->type->data.floating.bit_count) {
+ case 16:
+ {
+ const float16_t zero = zig_double_to_f16(0);
+ out_val->data.x_f16 = f16_sub(zero, op->data.x_f16);
+ return;
+ }
case 32:
out_val->data.x_f32 = -op->data.x_f32;
return;
@@ -7208,6 +8247,9 @@ static void float_negate(ConstExprValue *out_val, ConstExprValue *op) {
void float_write_ieee597(ConstExprValue *op, uint8_t *buf, bool is_big_endian) {
if (op->type->id == TypeTableEntryIdFloat) {
switch (op->type->data.floating.bit_count) {
+ case 16:
+ memcpy(buf, &op->data.x_f16, 2); // TODO wrong when compiler is big endian
+ return;
case 32:
memcpy(buf, &op->data.x_f32, 4); // TODO wrong when compiler is big endian
return;
@@ -7228,6 +8270,9 @@ void float_write_ieee597(ConstExprValue *op, uint8_t *buf, bool is_big_endian) {
void float_read_ieee597(ConstExprValue *val, uint8_t *buf, bool is_big_endian) {
if (val->type->id == TypeTableEntryIdFloat) {
switch (val->type->data.floating.bit_count) {
+ case 16:
+ memcpy(&val->data.x_f16, buf, 2); // TODO wrong when compiler is big endian
+ return;
case 32:
memcpy(&val->data.x_f32, buf, 4); // TODO wrong when compiler is big endian
return;
@@ -7256,9 +8301,9 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc
assert(const_val->special != ConstValSpecialRuntime);
bool const_val_is_int = (const_val->type->id == TypeTableEntryIdInt ||
- const_val->type->id == TypeTableEntryIdNumLitInt);
+ const_val->type->id == TypeTableEntryIdComptimeInt);
bool const_val_is_float = (const_val->type->id == TypeTableEntryIdFloat ||
- const_val->type->id == TypeTableEntryIdNumLitFloat);
+ const_val->type->id == TypeTableEntryIdComptimeFloat);
if (other_type->id == TypeTableEntryIdFloat) {
return true;
} else if (other_type->id == TypeTableEntryIdInt && const_val_is_int) {
@@ -7278,7 +8323,7 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc
}
} else if (const_val_fits_in_num_lit(const_val, other_type)) {
return true;
- } else if (other_type->id == TypeTableEntryIdMaybe) {
+ } else if (other_type->id == TypeTableEntryIdOptional) {
TypeTableEntry *child_type = other_type->data.maybe.child_type;
if (const_val_fits_in_num_lit(const_val, child_type)) {
return true;
@@ -7302,7 +8347,7 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc
return true;
}
}
- if (explicit_cast && (other_type->id == TypeTableEntryIdInt || other_type->id == TypeTableEntryIdNumLitInt) &&
+ if (explicit_cast && (other_type->id == TypeTableEntryIdInt || other_type->id == TypeTableEntryIdComptimeInt) &&
const_val_is_float)
{
if (float_has_fraction(const_val)) {
@@ -7315,7 +8360,7 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc
buf_ptr(&other_type->name)));
return false;
} else {
- if (other_type->id == TypeTableEntryIdNumLitInt) {
+ if (other_type->id == TypeTableEntryIdComptimeInt) {
return true;
} else {
BigInt bigint;
@@ -7356,38 +8401,16 @@ static bool slice_is_const(TypeTableEntry *type) {
return type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const;
}
-static bool resolve_inferred_error_set(IrAnalyze *ira, TypeTableEntry *err_set_type, AstNode *source_node) {
- assert(err_set_type->id == TypeTableEntryIdErrorSet);
- FnTableEntry *infer_fn = err_set_type->data.error_set.infer_fn;
- if (infer_fn != nullptr) {
- if (infer_fn->anal_state == FnAnalStateInvalid) {
- return false;
- } else if (infer_fn->anal_state == FnAnalStateReady) {
- analyze_fn_body(ira->codegen, infer_fn);
- if (err_set_type->data.error_set.infer_fn != nullptr) {
- assert(ira->codegen->errors.length != 0);
- return false;
- }
- } else {
- ir_add_error_node(ira, source_node,
- buf_sprintf("cannot resolve inferred error set '%s': function '%s' not fully analyzed yet",
- buf_ptr(&err_set_type->name), buf_ptr(&err_set_type->data.error_set.infer_fn->symbol_name)));
- return false;
- }
- }
- return true;
-}
-
static TypeTableEntry *get_error_set_intersection(IrAnalyze *ira, TypeTableEntry *set1, TypeTableEntry *set2,
AstNode *source_node)
{
assert(set1->id == TypeTableEntryIdErrorSet);
assert(set2->id == TypeTableEntryIdErrorSet);
- if (!resolve_inferred_error_set(ira, set1, source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, set1, source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
- if (!resolve_inferred_error_set(ira, set2, source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, set2, source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
if (type_is_global_error_set(set1)) {
@@ -7433,89 +8456,122 @@ static TypeTableEntry *get_error_set_intersection(IrAnalyze *ira, TypeTableEntry
}
-static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry *expected_type,
- TypeTableEntry *actual_type, AstNode *source_node)
+static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry *wanted_type,
+ TypeTableEntry *actual_type, AstNode *source_node, bool wanted_is_mutable)
{
CodeGen *g = ira->codegen;
ConstCastOnly result = {};
result.id = ConstCastResultIdOk;
- if (expected_type == actual_type)
+ if (wanted_type == actual_type)
return result;
- // pointer const
- if (expected_type->id == TypeTableEntryIdPointer &&
- actual_type->id == TypeTableEntryIdPointer &&
- (!actual_type->data.pointer.is_const || expected_type->data.pointer.is_const) &&
- (!actual_type->data.pointer.is_volatile || expected_type->data.pointer.is_volatile) &&
- actual_type->data.pointer.bit_offset == expected_type->data.pointer.bit_offset &&
- actual_type->data.pointer.unaligned_bit_count == expected_type->data.pointer.unaligned_bit_count &&
- actual_type->data.pointer.alignment >= expected_type->data.pointer.alignment)
+ // * and [*] can do a const-cast-only to ?* and ?[*], respectively
+ // but not if there is a mutable parent pointer
+ // and not if the pointer is zero bits
+ if (!wanted_is_mutable && wanted_type->id == TypeTableEntryIdOptional &&
+ wanted_type->data.maybe.child_type->id == TypeTableEntryIdPointer &&
+ actual_type->id == TypeTableEntryIdPointer && type_has_bits(actual_type))
{
- ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.pointer.child_type, actual_type->data.pointer.child_type, source_node);
+ ConstCastOnly child = types_match_const_cast_only(ira,
+ wanted_type->data.maybe.child_type, actual_type, source_node, wanted_is_mutable);
if (child.id != ConstCastResultIdOk) {
- result.id = ConstCastResultIdPointerChild;
- result.data.pointer_child = allocate_nonzero(1);
- *result.data.pointer_child = child;
+ result.id = ConstCastResultIdNullWrapPtr;
+ result.data.null_wrap_ptr_child = allocate_nonzero(1);
+ *result.data.null_wrap_ptr_child = child;
}
return result;
}
- // slice const
- if (is_slice(expected_type) && is_slice(actual_type)) {
- TypeTableEntry *actual_ptr_type = actual_type->data.structure.fields[slice_ptr_index].type_entry;
- TypeTableEntry *expected_ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry;
- if ((!actual_ptr_type->data.pointer.is_const || expected_ptr_type->data.pointer.is_const) &&
- (!actual_ptr_type->data.pointer.is_volatile || expected_ptr_type->data.pointer.is_volatile) &&
- actual_ptr_type->data.pointer.bit_offset == expected_ptr_type->data.pointer.bit_offset &&
- actual_ptr_type->data.pointer.unaligned_bit_count == expected_ptr_type->data.pointer.unaligned_bit_count &&
- actual_ptr_type->data.pointer.alignment >= expected_ptr_type->data.pointer.alignment)
+ // pointer const
+ if (wanted_type->id == TypeTableEntryIdPointer && actual_type->id == TypeTableEntryIdPointer) {
+ ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type, source_node, !wanted_type->data.pointer.is_const);
+ if (child.id != ConstCastResultIdOk) {
+ result.id = ConstCastResultIdPointerChild;
+ result.data.pointer_mismatch = allocate_nonzero(1);
+ result.data.pointer_mismatch->child = child;
+ result.data.pointer_mismatch->wanted_child = wanted_type->data.pointer.child_type;
+ result.data.pointer_mismatch->actual_child = actual_type->data.pointer.child_type;
+ return result;
+ }
+ if ((actual_type->data.pointer.ptr_len == wanted_type->data.pointer.ptr_len) &&
+ (!actual_type->data.pointer.is_const || wanted_type->data.pointer.is_const) &&
+ (!actual_type->data.pointer.is_volatile || wanted_type->data.pointer.is_volatile) &&
+ actual_type->data.pointer.bit_offset == wanted_type->data.pointer.bit_offset &&
+ actual_type->data.pointer.unaligned_bit_count == wanted_type->data.pointer.unaligned_bit_count &&
+ actual_type->data.pointer.alignment >= wanted_type->data.pointer.alignment)
{
- ConstCastOnly child = types_match_const_cast_only(ira, expected_ptr_type->data.pointer.child_type,
- actual_ptr_type->data.pointer.child_type, source_node);
+ return result;
+ }
+ }
+
+ // slice const
+ if (is_slice(wanted_type) && is_slice(actual_type)) {
+ TypeTableEntry *actual_ptr_type = actual_type->data.structure.fields[slice_ptr_index].type_entry;
+ TypeTableEntry *wanted_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ if ((!actual_ptr_type->data.pointer.is_const || wanted_ptr_type->data.pointer.is_const) &&
+ (!actual_ptr_type->data.pointer.is_volatile || wanted_ptr_type->data.pointer.is_volatile) &&
+ actual_ptr_type->data.pointer.bit_offset == wanted_ptr_type->data.pointer.bit_offset &&
+ actual_ptr_type->data.pointer.unaligned_bit_count == wanted_ptr_type->data.pointer.unaligned_bit_count &&
+ actual_ptr_type->data.pointer.alignment >= wanted_ptr_type->data.pointer.alignment)
+ {
+ ConstCastOnly child = types_match_const_cast_only(ira, wanted_ptr_type->data.pointer.child_type,
+ actual_ptr_type->data.pointer.child_type, source_node, !wanted_ptr_type->data.pointer.is_const);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdSliceChild;
- result.data.slice_child = allocate_nonzero(1);
- *result.data.slice_child = child;
+ result.data.slice_mismatch = allocate_nonzero(1);
+ result.data.slice_mismatch->child = child;
+ result.data.slice_mismatch->actual_child = actual_ptr_type->data.pointer.child_type;
+ result.data.slice_mismatch->wanted_child = wanted_ptr_type->data.pointer.child_type;
}
return result;
}
}
// maybe
- if (expected_type->id == TypeTableEntryIdMaybe && actual_type->id == TypeTableEntryIdMaybe) {
- ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.maybe.child_type, actual_type->data.maybe.child_type, source_node);
+ if (wanted_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdOptional) {
+ ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.maybe.child_type,
+ actual_type->data.maybe.child_type, source_node, wanted_is_mutable);
if (child.id != ConstCastResultIdOk) {
- result.id = ConstCastResultIdNullableChild;
- result.data.nullable_child = allocate_nonzero(1);
- *result.data.nullable_child = child;
+ result.id = ConstCastResultIdOptionalChild;
+ result.data.optional = allocate_nonzero(1);
+ result.data.optional->child = child;
+ result.data.optional->wanted_child = wanted_type->data.maybe.child_type;
+ result.data.optional->actual_child = actual_type->data.maybe.child_type;
}
return result;
}
// error union
- if (expected_type->id == TypeTableEntryIdErrorUnion && actual_type->id == TypeTableEntryIdErrorUnion) {
- ConstCastOnly payload_child = types_match_const_cast_only(ira, expected_type->data.error_union.payload_type, actual_type->data.error_union.payload_type, source_node);
+ if (wanted_type->id == TypeTableEntryIdErrorUnion && actual_type->id == TypeTableEntryIdErrorUnion) {
+ ConstCastOnly payload_child = types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type,
+ actual_type->data.error_union.payload_type, source_node, wanted_is_mutable);
if (payload_child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdErrorUnionPayload;
- result.data.error_union_payload = allocate_nonzero(1);
- *result.data.error_union_payload = payload_child;
+ result.data.error_union_payload = allocate_nonzero(1);
+ result.data.error_union_payload->child = payload_child;
+ result.data.error_union_payload->wanted_payload = wanted_type->data.error_union.payload_type;
+ result.data.error_union_payload->actual_payload = actual_type->data.error_union.payload_type;
return result;
}
- ConstCastOnly error_set_child = types_match_const_cast_only(ira, expected_type->data.error_union.err_set_type, actual_type->data.error_union.err_set_type, source_node);
+ ConstCastOnly error_set_child = types_match_const_cast_only(ira, wanted_type->data.error_union.err_set_type,
+ actual_type->data.error_union.err_set_type, source_node, wanted_is_mutable);
if (error_set_child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdErrorUnionErrorSet;
- result.data.error_union_error_set = allocate_nonzero(1);
- *result.data.error_union_error_set = error_set_child;
+ result.data.error_union_error_set = allocate_nonzero(1);
+ result.data.error_union_error_set->child = error_set_child;
+ result.data.error_union_error_set->wanted_err_set = wanted_type->data.error_union.err_set_type;
+ result.data.error_union_error_set->actual_err_set = actual_type->data.error_union.err_set_type;
return result;
}
return result;
}
// error set
- if (expected_type->id == TypeTableEntryIdErrorSet && actual_type->id == TypeTableEntryIdErrorSet) {
+ if (wanted_type->id == TypeTableEntryIdErrorSet && actual_type->id == TypeTableEntryIdErrorSet) {
TypeTableEntry *contained_set = actual_type;
- TypeTableEntry *container_set = expected_type;
+ TypeTableEntry *container_set = wanted_type;
// if the container set is inferred, then this will always work.
if (container_set->data.error_set.infer_fn != nullptr) {
@@ -7526,7 +8582,7 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
return result;
}
- if (!resolve_inferred_error_set(ira, contained_set, source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, contained_set, source_node)) {
result.id = ConstCastResultIdUnresolvedInferredErrSet;
return result;
}
@@ -7548,44 +8604,46 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
if (error_entry == nullptr) {
if (result.id == ConstCastResultIdOk) {
result.id = ConstCastResultIdErrSet;
+ result.data.error_set_mismatch = allocate(1);
}
- result.data.error_set.missing_errors.append(contained_error_entry);
+ result.data.error_set_mismatch->missing_errors.append(contained_error_entry);
}
}
free(errors);
return result;
}
- if (expected_type == ira->codegen->builtin_types.entry_promise &&
+ if (wanted_type == ira->codegen->builtin_types.entry_promise &&
actual_type->id == TypeTableEntryIdPromise)
{
return result;
}
// fn
- if (expected_type->id == TypeTableEntryIdFn &&
+ if (wanted_type->id == TypeTableEntryIdFn &&
actual_type->id == TypeTableEntryIdFn)
{
- if (expected_type->data.fn.fn_type_id.alignment > actual_type->data.fn.fn_type_id.alignment) {
+ if (wanted_type->data.fn.fn_type_id.alignment > actual_type->data.fn.fn_type_id.alignment) {
result.id = ConstCastResultIdFnAlign;
return result;
}
- if (expected_type->data.fn.fn_type_id.cc != actual_type->data.fn.fn_type_id.cc) {
+ if (wanted_type->data.fn.fn_type_id.cc != actual_type->data.fn.fn_type_id.cc) {
result.id = ConstCastResultIdFnCC;
return result;
}
- if (expected_type->data.fn.fn_type_id.is_var_args != actual_type->data.fn.fn_type_id.is_var_args) {
+ if (wanted_type->data.fn.fn_type_id.is_var_args != actual_type->data.fn.fn_type_id.is_var_args) {
result.id = ConstCastResultIdFnVarArgs;
return result;
}
- if (expected_type->data.fn.is_generic != actual_type->data.fn.is_generic) {
+ if (wanted_type->data.fn.is_generic != actual_type->data.fn.is_generic) {
result.id = ConstCastResultIdFnIsGeneric;
return result;
}
- if (!expected_type->data.fn.is_generic &&
+ if (!wanted_type->data.fn.is_generic &&
actual_type->data.fn.fn_type_id.return_type->id != TypeTableEntryIdUnreachable)
{
- ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.fn.fn_type_id.return_type, actual_type->data.fn.fn_type_id.return_type, source_node);
+ ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.fn.fn_type_id.return_type,
+ actual_type->data.fn.fn_type_id.return_type, source_node, false);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdFnReturnType;
result.data.return_type = allocate_nonzero(1);
@@ -7593,9 +8651,11 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
return result;
}
}
- if (!expected_type->data.fn.is_generic && expected_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- ConstCastOnly child = types_match_const_cast_only(ira, actual_type->data.fn.fn_type_id.async_allocator_type,
- expected_type->data.fn.fn_type_id.async_allocator_type, source_node);
+ if (!wanted_type->data.fn.is_generic && wanted_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
+ ConstCastOnly child = types_match_const_cast_only(ira,
+ actual_type->data.fn.fn_type_id.async_allocator_type,
+ wanted_type->data.fn.fn_type_id.async_allocator_type,
+ source_node, false);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdAsyncAllocatorType;
result.data.async_allocator_type = allocate_nonzero(1);
@@ -7603,22 +8663,23 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
return result;
}
}
- if (expected_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) {
+ if (wanted_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) {
result.id = ConstCastResultIdFnArgCount;
return result;
}
- if (expected_type->data.fn.fn_type_id.next_param_index != actual_type->data.fn.fn_type_id.next_param_index) {
+ if (wanted_type->data.fn.fn_type_id.next_param_index != actual_type->data.fn.fn_type_id.next_param_index) {
result.id = ConstCastResultIdFnGenericArgCount;
return result;
}
- assert(expected_type->data.fn.is_generic ||
- expected_type->data.fn.fn_type_id.next_param_index == expected_type->data.fn.fn_type_id.param_count);
- for (size_t i = 0; i < expected_type->data.fn.fn_type_id.next_param_index; i += 1) {
+ assert(wanted_type->data.fn.is_generic ||
+ wanted_type->data.fn.fn_type_id.next_param_index == wanted_type->data.fn.fn_type_id.param_count);
+ for (size_t i = 0; i < wanted_type->data.fn.fn_type_id.next_param_index; i += 1) {
// note it's reversed for parameters
FnTypeParamInfo *actual_param_info = &actual_type->data.fn.fn_type_id.param_info[i];
- FnTypeParamInfo *expected_param_info = &expected_type->data.fn.fn_type_id.param_info[i];
+ FnTypeParamInfo *expected_param_info = &wanted_type->data.fn.fn_type_id.param_info[i];
- ConstCastOnly arg_child = types_match_const_cast_only(ira, actual_param_info->type, expected_param_info->type, source_node);
+ ConstCastOnly arg_child = types_match_const_cast_only(ira, actual_param_info->type,
+ expected_param_info->type, source_node, false);
if (arg_child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdFnArg;
result.data.fn_arg.arg_index = i;
@@ -7637,271 +8698,19 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
}
result.id = ConstCastResultIdType;
+ result.data.type_mismatch = allocate_nonzero(1);
+ result.data.type_mismatch->wanted_type = wanted_type;
+ result.data.type_mismatch->actual_type = actual_type;
return result;
}
-enum ImplicitCastMatchResult {
- ImplicitCastMatchResultNo,
- ImplicitCastMatchResultYes,
- ImplicitCastMatchResultReportedError,
-};
-
-static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, TypeTableEntry *expected_type,
- TypeTableEntry *actual_type, IrInstruction *value)
-{
- AstNode *source_node = value->source_node;
- ConstCastOnly const_cast_result = types_match_const_cast_only(ira, expected_type, actual_type, source_node);
- if (const_cast_result.id == ConstCastResultIdOk) {
- return ImplicitCastMatchResultYes;
- }
-
- // if we got here with error sets, make an error showing the incompatibilities
- ZigList *missing_errors = nullptr;
- if (const_cast_result.id == ConstCastResultIdErrSet) {
- missing_errors = &const_cast_result.data.error_set.missing_errors;
- }
- if (const_cast_result.id == ConstCastResultIdErrorUnionErrorSet) {
- if (const_cast_result.data.error_union_error_set->id == ConstCastResultIdErrSet) {
- missing_errors = &const_cast_result.data.error_union_error_set->data.error_set.missing_errors;
- } else if (const_cast_result.data.error_union_error_set->id == ConstCastResultIdErrSetGlobal) {
- ErrorMsg *msg = ir_add_error(ira, value,
- buf_sprintf("expected '%s', found '%s'", buf_ptr(&expected_type->name), buf_ptr(&actual_type->name)));
- add_error_note(ira->codegen, msg, value->source_node,
- buf_sprintf("unable to cast global error set into smaller set"));
- return ImplicitCastMatchResultReportedError;
- }
- } else if (const_cast_result.id == ConstCastResultIdErrSetGlobal) {
- ErrorMsg *msg = ir_add_error(ira, value,
- buf_sprintf("expected '%s', found '%s'", buf_ptr(&expected_type->name), buf_ptr(&actual_type->name)));
- add_error_note(ira->codegen, msg, value->source_node,
- buf_sprintf("unable to cast global error set into smaller set"));
- return ImplicitCastMatchResultReportedError;
- }
- if (missing_errors != nullptr) {
- ErrorMsg *msg = ir_add_error(ira, value,
- buf_sprintf("expected '%s', found '%s'", buf_ptr(&expected_type->name), buf_ptr(&actual_type->name)));
- for (size_t i = 0; i < missing_errors->length; i += 1) {
- ErrorTableEntry *error_entry = missing_errors->at(i);
- add_error_note(ira->codegen, msg, error_entry->decl_node,
- buf_sprintf("'error.%s' not a member of destination error set", buf_ptr(&error_entry->name)));
- }
-
- return ImplicitCastMatchResultReportedError;
- }
-
- // implicit conversion from non maybe type to maybe type
- if (expected_type->id == TypeTableEntryIdMaybe &&
- ir_types_match_with_implicit_cast(ira, expected_type->data.maybe.child_type, actual_type, value))
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit conversion from null literal to maybe type
- if (expected_type->id == TypeTableEntryIdMaybe &&
- actual_type->id == TypeTableEntryIdNullLit)
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit T to U!T
- if (expected_type->id == TypeTableEntryIdErrorUnion &&
- ir_types_match_with_implicit_cast(ira, expected_type->data.error_union.payload_type, actual_type, value))
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit conversion from error set to error union type
- if (expected_type->id == TypeTableEntryIdErrorUnion &&
- actual_type->id == TypeTableEntryIdErrorSet)
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit conversion from T to U!?T
- if (expected_type->id == TypeTableEntryIdErrorUnion &&
- expected_type->data.error_union.payload_type->id == TypeTableEntryIdMaybe &&
- ir_types_match_with_implicit_cast(ira,
- expected_type->data.error_union.payload_type->data.maybe.child_type,
- actual_type, value))
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit widening conversion
- if (expected_type->id == TypeTableEntryIdInt &&
- actual_type->id == TypeTableEntryIdInt &&
- expected_type->data.integral.is_signed == actual_type->data.integral.is_signed &&
- expected_type->data.integral.bit_count >= actual_type->data.integral.bit_count)
- {
- return ImplicitCastMatchResultYes;
- }
-
- // small enough unsigned ints can get casted to large enough signed ints
- if (expected_type->id == TypeTableEntryIdInt && expected_type->data.integral.is_signed &&
- actual_type->id == TypeTableEntryIdInt && !actual_type->data.integral.is_signed &&
- expected_type->data.integral.bit_count > actual_type->data.integral.bit_count)
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit float widening conversion
- if (expected_type->id == TypeTableEntryIdFloat &&
- actual_type->id == TypeTableEntryIdFloat &&
- expected_type->data.floating.bit_count >= actual_type->data.floating.bit_count)
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit [N]T to []const T
- if (is_slice(expected_type) && actual_type->id == TypeTableEntryIdArray) {
- TypeTableEntry *ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry;
- assert(ptr_type->id == TypeTableEntryIdPointer);
-
- if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
- {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit &const [N]T to []const T
- if (is_slice(expected_type) &&
- actual_type->id == TypeTableEntryIdPointer &&
- actual_type->data.pointer.is_const &&
- actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
- {
- TypeTableEntry *ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry;
- assert(ptr_type->id == TypeTableEntryIdPointer);
-
- TypeTableEntry *array_type = actual_type->data.pointer.child_type;
-
- if ((ptr_type->data.pointer.is_const || array_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
- {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit [N]T to &const []const T
- if (expected_type->id == TypeTableEntryIdPointer &&
- expected_type->data.pointer.is_const &&
- is_slice(expected_type->data.pointer.child_type) &&
- actual_type->id == TypeTableEntryIdArray)
- {
- TypeTableEntry *ptr_type =
- expected_type->data.pointer.child_type->data.structure.fields[slice_ptr_index].type_entry;
- assert(ptr_type->id == TypeTableEntryIdPointer);
- if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
- {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit [N]T to ?[]const T
- if (expected_type->id == TypeTableEntryIdMaybe &&
- is_slice(expected_type->data.maybe.child_type) &&
- actual_type->id == TypeTableEntryIdArray)
- {
- TypeTableEntry *ptr_type =
- expected_type->data.maybe.child_type->data.structure.fields[slice_ptr_index].type_entry;
- assert(ptr_type->id == TypeTableEntryIdPointer);
- if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
- {
- return ImplicitCastMatchResultYes;
- }
- }
-
-
- // implicit number literal to typed number
- // implicit number literal to &const integer
- if (actual_type->id == TypeTableEntryIdNumLitFloat ||
- actual_type->id == TypeTableEntryIdNumLitInt)
- {
- if (expected_type->id == TypeTableEntryIdPointer &&
- expected_type->data.pointer.is_const)
- {
- if (ir_num_lit_fits_in_other_type(ira, value, expected_type->data.pointer.child_type, false)) {
- return ImplicitCastMatchResultYes;
- } else {
- return ImplicitCastMatchResultReportedError;
- }
- } else if (ir_num_lit_fits_in_other_type(ira, value, expected_type, false)) {
- return ImplicitCastMatchResultYes;
- } else {
- return ImplicitCastMatchResultReportedError;
- }
- }
-
- // implicit typed number to integer or float literal.
- // works when the number is known
- if (value->value.special == ConstValSpecialStatic) {
- if (actual_type->id == TypeTableEntryIdInt && expected_type->id == TypeTableEntryIdNumLitInt) {
- return ImplicitCastMatchResultYes;
- } else if (actual_type->id == TypeTableEntryIdFloat && expected_type->id == TypeTableEntryIdNumLitFloat) {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit union to its enum tag type
- if (expected_type->id == TypeTableEntryIdEnum && actual_type->id == TypeTableEntryIdUnion &&
- (actual_type->data.unionation.decl_node->data.container_decl.auto_enum ||
- actual_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
- {
- type_ensure_zero_bits_known(ira->codegen, actual_type);
- if (actual_type->data.unionation.tag_type == expected_type) {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit enum to union which has the enum as the tag type
- if (expected_type->id == TypeTableEntryIdUnion && actual_type->id == TypeTableEntryIdEnum &&
- (expected_type->data.unionation.decl_node->data.container_decl.auto_enum ||
- expected_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
- {
- type_ensure_zero_bits_known(ira->codegen, expected_type);
- if (expected_type->data.unionation.tag_type == actual_type) {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit enum to &const union which has the enum as the tag type
- if (actual_type->id == TypeTableEntryIdEnum && expected_type->id == TypeTableEntryIdPointer) {
- TypeTableEntry *union_type = expected_type->data.pointer.child_type;
- if (union_type->data.unionation.decl_node->data.container_decl.auto_enum ||
- union_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr)
- {
- type_ensure_zero_bits_known(ira->codegen, union_type);
- if (union_type->data.unionation.tag_type == actual_type) {
- return ImplicitCastMatchResultYes;
- }
- }
- }
-
- // implicit undefined literal to anything
- if (actual_type->id == TypeTableEntryIdUndefLit) {
- return ImplicitCastMatchResultYes;
- }
-
- // implicitly take a const pointer to something
- if (!type_requires_comptime(actual_type)) {
- TypeTableEntry *const_ptr_actual = get_pointer_to_type(ira->codegen, actual_type, true);
- if (types_match_const_cast_only(ira, expected_type, const_ptr_actual, source_node).id == ConstCastResultIdOk) {
- return ImplicitCastMatchResultYes;
- }
- }
-
- return ImplicitCastMatchResultNo;
-}
-
static void update_errors_helper(CodeGen *g, ErrorTableEntry ***errors, size_t *errors_count) {
size_t old_errors_count = *errors_count;
*errors_count = g->errors_by_index.length;
*errors = reallocate(*errors, old_errors_count, *errors_count);
}
-static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, IrInstruction **instructions, size_t instruction_count) {
+static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, TypeTableEntry *expected_type, IrInstruction **instructions, size_t instruction_count) {
assert(instruction_count >= 1);
IrInstruction *prev_inst = instructions[0];
if (type_is_invalid(prev_inst->value.type)) {
@@ -7915,7 +8724,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
err_set_type = ira->codegen->builtin_types.entry_global_error_set;
} else {
err_set_type = prev_inst->value.type;
- if (!resolve_inferred_error_set(ira, err_set_type, prev_inst->source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, err_set_type, prev_inst->source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
update_errors_helper(ira->codegen, &errors, &errors_count);
@@ -7928,7 +8737,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
}
}
- bool any_are_null = (prev_inst->value.type->id == TypeTableEntryIdNullLit);
+ bool any_are_null = (prev_inst->value.type->id == TypeTableEntryIdNull);
bool convert_to_const_slice = false;
for (size_t i = 1; i < instruction_count; i += 1) {
IrInstruction *cur_inst = instructions[i];
@@ -7948,23 +8757,13 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
continue;
}
- if (prev_type->id == TypeTableEntryIdNullLit) {
- prev_inst = cur_inst;
- continue;
- }
-
- if (cur_type->id == TypeTableEntryIdNullLit) {
- any_are_null = true;
- continue;
- }
-
if (prev_type->id == TypeTableEntryIdErrorSet) {
assert(err_set_type != nullptr);
if (cur_type->id == TypeTableEntryIdErrorSet) {
if (type_is_global_error_set(err_set_type)) {
continue;
}
- if (!resolve_inferred_error_set(ira, cur_type, cur_inst->source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, cur_type, cur_inst->source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
if (type_is_global_error_set(cur_type)) {
@@ -8030,7 +8829,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
continue;
}
TypeTableEntry *cur_err_set_type = cur_type->data.error_union.err_set_type;
- if (!resolve_inferred_error_set(ira, cur_err_set_type, cur_inst->source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
if (type_is_global_error_set(cur_err_set_type)) {
@@ -8093,7 +8892,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
if (err_set_type != nullptr && type_is_global_error_set(err_set_type)) {
continue;
}
- if (!resolve_inferred_error_set(ira, cur_type, cur_inst->source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, cur_type, cur_inst->source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
@@ -8138,9 +8937,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
TypeTableEntry *cur_payload_type = cur_type->data.error_union.payload_type;
bool const_cast_prev = types_match_const_cast_only(ira, prev_payload_type, cur_payload_type,
- source_node).id == ConstCastResultIdOk;
+ source_node, false).id == ConstCastResultIdOk;
bool const_cast_cur = types_match_const_cast_only(ira, cur_payload_type, prev_payload_type,
- source_node).id == ConstCastResultIdOk;
+ source_node, false).id == ConstCastResultIdOk;
if (const_cast_prev || const_cast_cur) {
if (const_cast_cur) {
@@ -8150,11 +8949,11 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
TypeTableEntry *prev_err_set_type = (err_set_type == nullptr) ? prev_type->data.error_union.err_set_type : err_set_type;
TypeTableEntry *cur_err_set_type = cur_type->data.error_union.err_set_type;
- if (!resolve_inferred_error_set(ira, prev_err_set_type, cur_inst->source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, prev_err_set_type, cur_inst->source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
- if (!resolve_inferred_error_set(ira, cur_err_set_type, cur_inst->source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
@@ -8217,11 +9016,21 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
}
}
- if (types_match_const_cast_only(ira, prev_type, cur_type, source_node).id == ConstCastResultIdOk) {
+ if (prev_type->id == TypeTableEntryIdNull) {
+ prev_inst = cur_inst;
continue;
}
- if (types_match_const_cast_only(ira, cur_type, prev_type, source_node).id == ConstCastResultIdOk) {
+ if (cur_type->id == TypeTableEntryIdNull) {
+ any_are_null = true;
+ continue;
+ }
+
+ if (types_match_const_cast_only(ira, prev_type, cur_type, source_node, false).id == ConstCastResultIdOk) {
+ continue;
+ }
+
+ if (types_match_const_cast_only(ira, cur_type, prev_type, source_node, false).id == ConstCastResultIdOk) {
prev_inst = cur_inst;
continue;
}
@@ -8244,17 +9053,19 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
}
if (prev_type->id == TypeTableEntryIdErrorUnion &&
- types_match_const_cast_only(ira, prev_type->data.error_union.payload_type, cur_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, prev_type->data.error_union.payload_type, cur_type,
+ source_node, false).id == ConstCastResultIdOk)
{
continue;
}
if (cur_type->id == TypeTableEntryIdErrorUnion &&
- types_match_const_cast_only(ira, cur_type->data.error_union.payload_type, prev_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, cur_type->data.error_union.payload_type, prev_type,
+ source_node, false).id == ConstCastResultIdOk)
{
if (err_set_type != nullptr) {
TypeTableEntry *cur_err_set_type = cur_type->data.error_union.err_set_type;
- if (!resolve_inferred_error_set(ira, cur_err_set_type, cur_inst->source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
if (type_is_global_error_set(cur_err_set_type) || type_is_global_error_set(err_set_type)) {
@@ -8271,30 +9082,32 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
continue;
}
- if (prev_type->id == TypeTableEntryIdMaybe &&
- types_match_const_cast_only(ira, prev_type->data.maybe.child_type, cur_type, source_node).id == ConstCastResultIdOk)
+ if (prev_type->id == TypeTableEntryIdOptional &&
+ types_match_const_cast_only(ira, prev_type->data.maybe.child_type, cur_type,
+ source_node, false).id == ConstCastResultIdOk)
{
continue;
}
- if (cur_type->id == TypeTableEntryIdMaybe &&
- types_match_const_cast_only(ira, cur_type->data.maybe.child_type, prev_type, source_node).id == ConstCastResultIdOk)
+ if (cur_type->id == TypeTableEntryIdOptional &&
+ types_match_const_cast_only(ira, cur_type->data.maybe.child_type, prev_type,
+ source_node, false).id == ConstCastResultIdOk)
{
prev_inst = cur_inst;
continue;
}
- if (cur_type->id == TypeTableEntryIdUndefLit) {
+ if (cur_type->id == TypeTableEntryIdUndefined) {
continue;
}
- if (prev_type->id == TypeTableEntryIdUndefLit) {
+ if (prev_type->id == TypeTableEntryIdUndefined) {
prev_inst = cur_inst;
continue;
}
- if (prev_type->id == TypeTableEntryIdNumLitInt ||
- prev_type->id == TypeTableEntryIdNumLitFloat)
+ if (prev_type->id == TypeTableEntryIdComptimeInt ||
+ prev_type->id == TypeTableEntryIdComptimeFloat)
{
if (ir_num_lit_fits_in_other_type(ira, prev_inst, cur_type, false)) {
prev_inst = cur_inst;
@@ -8304,8 +9117,8 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
}
}
- if (cur_type->id == TypeTableEntryIdNumLitInt ||
- cur_type->id == TypeTableEntryIdNumLitFloat)
+ if (cur_type->id == TypeTableEntryIdComptimeInt ||
+ cur_type->id == TypeTableEntryIdComptimeFloat)
{
if (ir_num_lit_fits_in_other_type(ira, cur_inst, prev_type, false)) {
continue;
@@ -8315,8 +9128,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
}
if (cur_type->id == TypeTableEntryIdArray && prev_type->id == TypeTableEntryIdArray &&
- cur_type->data.array.len != prev_type->data.array.len &&
- types_match_const_cast_only(ira, cur_type->data.array.child_type, prev_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ cur_type->data.array.len != prev_type->data.array.len &&
+ types_match_const_cast_only(ira, cur_type->data.array.child_type, prev_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
convert_to_const_slice = true;
prev_inst = cur_inst;
@@ -8324,8 +9138,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
}
if (cur_type->id == TypeTableEntryIdArray && prev_type->id == TypeTableEntryIdArray &&
- cur_type->data.array.len != prev_type->data.array.len &&
- types_match_const_cast_only(ira, prev_type->data.array.child_type, cur_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ cur_type->data.array.len != prev_type->data.array.len &&
+ types_match_const_cast_only(ira, prev_type->data.array.child_type, cur_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
convert_to_const_slice = true;
continue;
@@ -8334,8 +9149,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
if (cur_type->id == TypeTableEntryIdArray && is_slice(prev_type) &&
(prev_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const ||
cur_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, prev_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type,
- cur_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira,
+ prev_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type,
+ cur_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
{
convert_to_const_slice = false;
continue;
@@ -8344,8 +9160,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
if (prev_type->id == TypeTableEntryIdArray && is_slice(cur_type) &&
(cur_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const ||
prev_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, cur_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type,
- prev_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira,
+ cur_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type,
+ prev_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
{
prev_inst = cur_inst;
convert_to_const_slice = false;
@@ -8390,7 +9207,11 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
if (convert_to_const_slice) {
assert(prev_inst->value.type->id == TypeTableEntryIdArray);
- TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, prev_inst->value.type->data.array.child_type, true);
+ TypeTableEntry *ptr_type = get_pointer_to_type_extra(
+ ira->codegen, prev_inst->value.type->data.array.child_type,
+ true, false, PtrLenUnknown,
+ get_abi_alignment(ira->codegen, prev_inst->value.type->data.array.child_type),
+ 0, 0);
TypeTableEntry *slice_type = get_slice_type(ira->codegen, ptr_type);
if (err_set_type != nullptr) {
return get_error_union_type(ira->codegen, err_set_type, slice_type);
@@ -8400,34 +9221,36 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
} else if (err_set_type != nullptr) {
if (prev_inst->value.type->id == TypeTableEntryIdErrorSet) {
return err_set_type;
+ } else if (prev_inst->value.type->id == TypeTableEntryIdErrorUnion) {
+ return get_error_union_type(ira->codegen, err_set_type, prev_inst->value.type->data.error_union.payload_type);
+ } else if (expected_type != nullptr && expected_type->id == TypeTableEntryIdErrorUnion) {
+ return get_error_union_type(ira->codegen, err_set_type, expected_type->data.error_union.payload_type);
} else {
- if (prev_inst->value.type->id == TypeTableEntryIdNumLitInt ||
- prev_inst->value.type->id == TypeTableEntryIdNumLitFloat)
+ if (prev_inst->value.type->id == TypeTableEntryIdComptimeInt ||
+ prev_inst->value.type->id == TypeTableEntryIdComptimeFloat)
{
ir_add_error_node(ira, source_node,
buf_sprintf("unable to make error union out of number literal"));
return ira->codegen->builtin_types.entry_invalid;
- } else if (prev_inst->value.type->id == TypeTableEntryIdNullLit) {
+ } else if (prev_inst->value.type->id == TypeTableEntryIdNull) {
ir_add_error_node(ira, source_node,
buf_sprintf("unable to make error union out of null literal"));
return ira->codegen->builtin_types.entry_invalid;
- } else if (prev_inst->value.type->id == TypeTableEntryIdErrorUnion) {
- return get_error_union_type(ira->codegen, err_set_type, prev_inst->value.type->data.error_union.payload_type);
} else {
return get_error_union_type(ira->codegen, err_set_type, prev_inst->value.type);
}
}
- } else if (any_are_null && prev_inst->value.type->id != TypeTableEntryIdNullLit) {
- if (prev_inst->value.type->id == TypeTableEntryIdNumLitInt ||
- prev_inst->value.type->id == TypeTableEntryIdNumLitFloat)
+ } else if (any_are_null && prev_inst->value.type->id != TypeTableEntryIdNull) {
+ if (prev_inst->value.type->id == TypeTableEntryIdComptimeInt ||
+ prev_inst->value.type->id == TypeTableEntryIdComptimeFloat)
{
ir_add_error_node(ira, source_node,
buf_sprintf("unable to make maybe out of number literal"));
return ira->codegen->builtin_types.entry_invalid;
- } else if (prev_inst->value.type->id == TypeTableEntryIdMaybe) {
+ } else if (prev_inst->value.type->id == TypeTableEntryIdOptional) {
return prev_inst->value.type;
} else {
- return get_maybe_type(ira->codegen, prev_inst->value.type);
+ return get_optional_type(ira->codegen, prev_inst->value.type);
}
} else {
return prev_inst->value.type;
@@ -8448,10 +9271,15 @@ static void copy_const_val(ConstExprValue *dest, ConstExprValue *src, bool same_
*dest = *src;
if (!same_global_refs) {
dest->global_refs = global_refs;
+ if (dest->type->id == TypeTableEntryIdStruct) {
+ dest->data.x_struct.fields = allocate_nonzero(dest->type->data.structure.src_field_count);
+ memcpy(dest->data.x_struct.fields, src->data.x_struct.fields, sizeof(ConstExprValue) * dest->type->data.structure.src_field_count);
+ }
}
}
-static void eval_const_expr_implicit_cast(CastOp cast_op,
+static bool eval_const_expr_implicit_cast(IrAnalyze *ira, IrInstruction *source_instr,
+ CastOp cast_op,
ConstExprValue *other_val, TypeTableEntry *other_type,
ConstExprValue *const_val, TypeTableEntry *new_type)
{
@@ -8462,17 +9290,23 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
case CastOpNoCast:
zig_unreachable();
case CastOpErrSet:
+ case CastOpBitCast:
+ case CastOpPtrOfArrayToSlice:
zig_panic("TODO");
case CastOpNoop:
{
- copy_const_val(const_val, other_val, other_val->special == ConstValSpecialStatic);
+ bool same_global_refs = other_val->special == ConstValSpecialStatic;
+ copy_const_val(const_val, other_val, same_global_refs);
const_val->type = new_type;
break;
}
case CastOpNumLitToConcrete:
- if (other_val->type->id == TypeTableEntryIdNumLitFloat) {
+ if (other_val->type->id == TypeTableEntryIdComptimeFloat) {
assert(new_type->id == TypeTableEntryIdFloat);
switch (new_type->data.floating.bit_count) {
+ case 16:
+ const_val->data.x_f16 = bigfloat_to_f16(&other_val->data.x_bigfloat);
+ break;
case 32:
const_val->data.x_f32 = bigfloat_to_f32(&other_val->data.x_bigfloat);
break;
@@ -8485,7 +9319,7 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
default:
zig_unreachable();
}
- } else if (other_val->type->id == TypeTableEntryIdNumLitInt) {
+ } else if (other_val->type->id == TypeTableEntryIdComptimeInt) {
bigint_init_bigint(&const_val->data.x_bigint, &other_val->data.x_bigint);
} else {
zig_unreachable();
@@ -8503,6 +9337,9 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
BigFloat bigfloat;
bigfloat_init_bigint(&bigfloat, &other_val->data.x_bigint);
switch (new_type->data.floating.bit_count) {
+ case 16:
+ const_val->data.x_f16 = bigfloat_to_f16(&bigfloat);
+ break;
case 32:
const_val->data.x_f32 = bigfloat_to_f32(&bigfloat);
break;
@@ -8520,6 +9357,20 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
}
case CastOpFloatToInt:
float_init_bigint(&const_val->data.x_bigint, other_val);
+ if (new_type->id == TypeTableEntryIdInt) {
+ if (!bigint_fits_in_bits(&const_val->data.x_bigint, new_type->data.integral.bit_count,
+ new_type->data.integral.is_signed))
+ {
+ Buf *int_buf = buf_alloc();
+ bigint_append_buf(int_buf, &const_val->data.x_bigint, 10);
+
+ ir_add_error(ira, source_instr,
+ buf_sprintf("integer value '%s' cannot be stored in type '%s'",
+ buf_ptr(int_buf), buf_ptr(&new_type->name)));
+ return false;
+ }
+ }
+
const_val->special = ConstValSpecialStatic;
break;
case CastOpBoolToInt:
@@ -8527,17 +9378,21 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
const_val->special = ConstValSpecialStatic;
break;
}
+ return true;
}
static IrInstruction *ir_resolve_cast(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value,
TypeTableEntry *wanted_type, CastOp cast_op, bool need_alloca)
{
- if (value->value.special != ConstValSpecialRuntime &&
+ if ((instr_is_comptime(value) || !type_has_bits(wanted_type)) &&
cast_op != CastOpResizeSlice && cast_op != CastOpBytesToSlice)
{
IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
source_instr->source_node, wanted_type);
- eval_const_expr_implicit_cast(cast_op, &value->value, value->value.type,
- &result->value, wanted_type);
+ if (!eval_const_expr_implicit_cast(ira, source_instr, cast_op, &value->value, value->value.type,
+ &result->value, wanted_type))
+ {
+ return ira->codegen->invalid_instruction;
+ }
return result;
} else {
IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node, wanted_type, value, cast_op);
@@ -8549,17 +9404,69 @@ static IrInstruction *ir_resolve_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
+static IrInstruction *ir_resolve_ptr_of_array_to_unknown_len_ptr(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *value, TypeTableEntry *wanted_type)
+{
+ assert(value->value.type->id == TypeTableEntryIdPointer);
+ wanted_type = adjust_ptr_align(ira->codegen, wanted_type, value->value.type->data.pointer.alignment);
+
+ if (instr_is_comptime(value)) {
+ ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &value->value);
+ if (pointee->special != ConstValSpecialRuntime) {
+ IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
+ source_instr->source_node, wanted_type);
+ result->value.type = wanted_type;
+ result->value.data.x_ptr.special = ConstPtrSpecialBaseArray;
+ result->value.data.x_ptr.mut = value->value.data.x_ptr.mut;
+ result->value.data.x_ptr.data.base_array.array_val = pointee;
+ result->value.data.x_ptr.data.base_array.elem_index = 0;
+ result->value.data.x_ptr.data.base_array.is_cstr = false;
+ return result;
+ }
+ }
+
+ IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node,
+ wanted_type, value, CastOpBitCast);
+ result->value.type = wanted_type;
+ return result;
+}
+
+static IrInstruction *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *value, TypeTableEntry *wanted_type)
+{
+ wanted_type = adjust_slice_align(ira->codegen, wanted_type, value->value.type->data.pointer.alignment);
+
+ if (instr_is_comptime(value)) {
+ ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &value->value);
+ if (pointee->special != ConstValSpecialRuntime) {
+ assert(value->value.type->id == TypeTableEntryIdPointer);
+ TypeTableEntry *array_type = value->value.type->data.pointer.child_type;
+ assert(is_slice(wanted_type));
+ bool is_const = wanted_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const;
+
+ IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
+ source_instr->source_node, wanted_type);
+ init_const_slice(ira->codegen, &result->value, pointee, 0, array_type->data.array.len, is_const);
+ result->value.data.x_struct.fields[slice_ptr_index].data.x_ptr.mut =
+ value->value.data.x_ptr.mut;
+ result->value.type = wanted_type;
+ return result;
+ }
+ }
+
+ IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node,
+ wanted_type, value, CastOpPtrOfArrayToSlice);
+ result->value.type = wanted_type;
+ ir_add_alloca(ira, result, wanted_type);
+ return result;
+}
+
static bool is_container(TypeTableEntry *type) {
return type->id == TypeTableEntryIdStruct ||
type->id == TypeTableEntryIdEnum ||
type->id == TypeTableEntryIdUnion;
}
-static bool is_u8(TypeTableEntry *type) {
- return type->id == TypeTableEntryIdInt &&
- !type->data.integral.is_signed && type->data.integral.bit_count == 8;
-}
-
static IrBasicBlock *ir_get_new_bb(IrAnalyze *ira, IrBasicBlock *old_bb, IrInstruction *ref_old_instruction) {
assert(old_bb);
@@ -8661,26 +9568,9 @@ static TypeTableEntry *ir_finish_anal(IrAnalyze *ira, TypeTableEntry *result_typ
}
static IrInstruction *ir_get_const(IrAnalyze *ira, IrInstruction *old_instruction) {
- IrInstruction *new_instruction;
- if (old_instruction->id == IrInstructionIdVarPtr) {
- IrInstructionVarPtr *old_var_ptr_instruction = (IrInstructionVarPtr *)old_instruction;
- IrInstructionVarPtr *var_ptr_instruction = ir_create_instruction(&ira->new_irb,
- old_instruction->scope, old_instruction->source_node);
- var_ptr_instruction->var = old_var_ptr_instruction->var;
- new_instruction = &var_ptr_instruction->base;
- } else if (old_instruction->id == IrInstructionIdFieldPtr) {
- IrInstructionFieldPtr *field_ptr_instruction = ir_create_instruction(&ira->new_irb,
- old_instruction->scope, old_instruction->source_node);
- new_instruction = &field_ptr_instruction->base;
- } else if (old_instruction->id == IrInstructionIdElemPtr) {
- IrInstructionElemPtr *elem_ptr_instruction = ir_create_instruction(&ira->new_irb,
- old_instruction->scope, old_instruction->source_node);
- new_instruction = &elem_ptr_instruction->base;
- } else {
- IrInstructionConst *const_instruction = ir_create_instruction(&ira->new_irb,
- old_instruction->scope, old_instruction->source_node);
- new_instruction = &const_instruction->base;
- }
+ IrInstructionConst *const_instruction = ir_create_instruction(&ira->new_irb,
+ old_instruction->scope, old_instruction->source_node);
+ IrInstruction *new_instruction = &const_instruction->base;
new_instruction->value.special = ConstValSpecialStatic;
return new_instruction;
}
@@ -8700,34 +9590,15 @@ static IrInstruction *ir_get_const_ptr(IrAnalyze *ira, IrInstruction *instructio
ConstExprValue *pointee, TypeTableEntry *pointee_type,
ConstPtrMut ptr_mut, bool ptr_is_const, bool ptr_is_volatile, uint32_t ptr_align)
{
- if (pointee_type->id == TypeTableEntryIdMetaType) {
- TypeTableEntry *type_entry = pointee->data.x_type;
- if (type_entry->id == TypeTableEntryIdUnreachable) {
- ir_add_error(ira, instruction, buf_sprintf("pointer to noreturn not allowed"));
- return ira->codegen->invalid_instruction;
- }
-
- IrInstruction *const_instr = ir_get_const(ira, instruction);
- ConstExprValue *const_val = &const_instr->value;
- const_val->type = pointee_type;
- type_ensure_zero_bits_known(ira->codegen, type_entry);
- if (type_is_invalid(type_entry)) {
- return ira->codegen->invalid_instruction;
- }
- const_val->data.x_type = get_pointer_to_type_extra(ira->codegen, type_entry,
- ptr_is_const, ptr_is_volatile, get_abi_alignment(ira->codegen, type_entry), 0, 0);
- return const_instr;
- } else {
- TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, pointee_type,
- ptr_is_const, ptr_is_volatile, ptr_align, 0, 0);
- IrInstruction *const_instr = ir_get_const(ira, instruction);
- ConstExprValue *const_val = &const_instr->value;
- const_val->type = ptr_type;
- const_val->data.x_ptr.special = ConstPtrSpecialRef;
- const_val->data.x_ptr.mut = ptr_mut;
- const_val->data.x_ptr.data.ref.pointee = pointee;
- return const_instr;
- }
+ TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, pointee_type,
+ ptr_is_const, ptr_is_volatile, PtrLenSingle, ptr_align, 0, 0);
+ IrInstruction *const_instr = ir_get_const(ira, instruction);
+ ConstExprValue *const_val = &const_instr->value;
+ const_val->type = ptr_type;
+ const_val->data.x_ptr.special = ConstPtrSpecialRef;
+ const_val->data.x_ptr.mut = ptr_mut;
+ const_val->data.x_ptr.data.ref.pointee = pointee;
+ return const_instr;
}
static TypeTableEntry *ir_analyze_const_ptr(IrAnalyze *ira, IrInstruction *instruction,
@@ -8757,6 +9628,9 @@ static ConstExprValue *ir_resolve_const(IrAnalyze *ira, IrInstruction *value, Un
case ConstValSpecialStatic:
return &value->value;
case ConstValSpecialRuntime:
+ if (!type_has_bits(value->value.type)) {
+ return &value->value;
+ }
ir_add_error(ira, value, buf_sprintf("unable to evaluate constant expression"));
return nullptr;
case ConstValSpecialUndef:
@@ -8861,7 +9735,7 @@ static FnTableEntry *ir_resolve_fn(IrAnalyze *ira, IrInstruction *fn_value) {
}
static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, TypeTableEntry *wanted_type) {
- assert(wanted_type->id == TypeTableEntryIdMaybe);
+ assert(wanted_type->id == TypeTableEntryIdOptional);
if (instr_is_comptime(value)) {
TypeTableEntry *payload_type = wanted_type->data.maybe.child_type;
@@ -8869,21 +9743,25 @@ static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *sourc
if (type_is_invalid(casted_payload->value.type))
return ira->codegen->invalid_instruction;
- ConstExprValue *val = ir_resolve_const(ira, casted_payload, UndefBad);
+ ConstExprValue *val = ir_resolve_const(ira, casted_payload, UndefOk);
if (!val)
return ira->codegen->invalid_instruction;
IrInstructionConst *const_instruction = ir_create_instruction(&ira->new_irb,
source_instr->scope, source_instr->source_node);
- const_instruction->base.value.type = wanted_type;
const_instruction->base.value.special = ConstValSpecialStatic;
- const_instruction->base.value.data.x_maybe = val;
+ if (get_codegen_ptr_type(wanted_type) != nullptr) {
+ copy_const_val(&const_instruction->base.value, val, val->data.x_ptr.mut == ConstPtrMutComptimeConst);
+ } else {
+ const_instruction->base.value.data.x_optional = val;
+ }
+ const_instruction->base.value.type = wanted_type;
return &const_instruction->base;
}
IrInstruction *result = ir_build_maybe_wrap(&ira->new_irb, source_instr->scope, source_instr->source_node, value);
result->value.type = wanted_type;
- result->value.data.rh_maybe = RuntimeHintMaybeNonNull;
+ result->value.data.rh_maybe = RuntimeHintOptionalNonNull;
ir_add_alloca(ira, result, wanted_type);
return result;
}
@@ -8930,7 +9808,7 @@ static IrInstruction *ir_analyze_err_set_cast(IrAnalyze *ira, IrInstruction *sou
if (!val)
return ira->codegen->invalid_instruction;
- if (!resolve_inferred_error_set(ira, wanted_type, source_instr->source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, wanted_type, source_instr->source_node)) {
return ira->codegen->invalid_instruction;
}
if (!type_is_global_error_set(wanted_type)) {
@@ -9025,16 +9903,21 @@ static IrInstruction *ir_analyze_cast_ref(IrAnalyze *ira, IrInstruction *source_
}
static IrInstruction *ir_analyze_null_to_maybe(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, TypeTableEntry *wanted_type) {
- assert(wanted_type->id == TypeTableEntryIdMaybe);
+ assert(wanted_type->id == TypeTableEntryIdOptional);
assert(instr_is_comptime(value));
ConstExprValue *val = ir_resolve_const(ira, value, UndefBad);
assert(val);
IrInstructionConst *const_instruction = ir_create_instruction(&ira->new_irb, source_instr->scope, source_instr->source_node);
- const_instruction->base.value.type = wanted_type;
const_instruction->base.value.special = ConstValSpecialStatic;
- const_instruction->base.value.data.x_maybe = nullptr;
+ if (get_codegen_ptr_type(wanted_type) != nullptr) {
+ const_instruction->base.value.data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
+ const_instruction->base.value.data.x_ptr.data.hard_coded_addr.addr = 0;
+ } else {
+ const_instruction->base.value.data.x_optional = nullptr;
+ }
+ const_instruction->base.value.type = wanted_type;
return &const_instruction->base;
}
@@ -9044,25 +9927,17 @@ static IrInstruction *ir_get_ref(IrAnalyze *ira, IrInstruction *source_instructi
if (type_is_invalid(value->value.type))
return ira->codegen->invalid_instruction;
- if (value->id == IrInstructionIdLoadPtr) {
- IrInstructionLoadPtr *load_ptr_inst = (IrInstructionLoadPtr *) value;
- if (load_ptr_inst->ptr->value.type->data.pointer.is_const) {
- return load_ptr_inst->ptr;
- }
- }
-
if (instr_is_comptime(value)) {
ConstExprValue *val = ir_resolve_const(ira, value, UndefOk);
if (!val)
return ira->codegen->invalid_instruction;
- bool final_is_const = (value->value.type->id == TypeTableEntryIdMetaType) ? is_const : true;
return ir_get_const_ptr(ira, source_instruction, val, value->value.type,
- ConstPtrMutComptimeConst, final_is_const, is_volatile,
+ ConstPtrMutComptimeConst, is_const, is_volatile,
get_abi_alignment(ira->codegen, value->value.type));
}
TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, value->value.type,
- is_const, is_volatile, get_abi_alignment(ira->codegen, value->value.type), 0, 0);
+ is_const, is_volatile, PtrLenSingle, get_abi_alignment(ira->codegen, value->value.type), 0, 0);
IrInstruction *new_instruction = ir_build_ref(&ira->new_irb, source_instruction->scope,
source_instruction->source_node, value, is_const, is_volatile);
new_instruction->value.type = ptr_type;
@@ -9114,6 +9989,8 @@ static IrInstruction *ir_analyze_array_to_slice(IrAnalyze *ira, IrInstruction *s
IrInstruction *result = ir_build_slice(&ira->new_irb, source_instr->scope,
source_instr->source_node, array_ptr, start, end, false);
result->value.type = wanted_type;
+ result->value.data.rh_slice.id = RuntimeHintSliceIdLen;
+ result->value.data.rh_slice.len = array_type->data.array.len;
ir_add_alloca(ira, result, result->value.type);
return result;
@@ -9202,6 +10079,8 @@ static IrInstruction *ir_analyze_enum_to_union(IrAnalyze *ira, IrInstruction *so
TypeUnionField *union_field = find_union_field_by_tag(wanted_type, &val->data.x_enum_tag);
assert(union_field != nullptr);
type_ensure_zero_bits_known(ira->codegen, union_field->type_entry);
+ if (type_is_invalid(union_field->type_entry))
+ return ira->codegen->invalid_instruction;
if (!union_field->type_entry->zero_bits) {
AstNode *field_node = wanted_type->data.unionation.decl_node->data.container_decl.fields.at(
union_field->enum_field->decl_index);
@@ -9332,7 +10211,7 @@ static IrInstruction *ir_analyze_int_to_enum(IrAnalyze *ira, IrInstruction *sour
}
IrInstruction *result = ir_build_int_to_enum(&ira->new_irb, source_instr->scope,
- source_instr->source_node, target);
+ source_instr->source_node, nullptr, target);
result->value.type = wanted_type;
return result;
}
@@ -9346,9 +10225,9 @@ static IrInstruction *ir_analyze_number_to_literal(IrAnalyze *ira, IrInstruction
IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
source_instr->source_node, wanted_type);
- if (wanted_type->id == TypeTableEntryIdNumLitFloat) {
+ if (wanted_type->id == TypeTableEntryIdComptimeFloat) {
float_init_float(&result->value, val);
- } else if (wanted_type->id == TypeTableEntryIdNumLitInt) {
+ } else if (wanted_type->id == TypeTableEntryIdComptimeInt) {
bigint_init_bigint(&result->value.data.x_bigint, &val->data.x_bigint);
} else {
zig_unreachable();
@@ -9371,7 +10250,7 @@ static IrInstruction *ir_analyze_int_to_err(IrAnalyze *ira, IrInstruction *sourc
IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
source_instr->source_node, wanted_type);
- if (!resolve_inferred_error_set(ira, wanted_type, source_instr->source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, wanted_type, source_instr->source_node)) {
return ira->codegen->invalid_instruction;
}
@@ -9469,7 +10348,7 @@ static IrInstruction *ir_analyze_err_to_int(IrAnalyze *ira, IrInstruction *sourc
zig_unreachable();
}
if (!type_is_global_error_set(err_set_type)) {
- if (!resolve_inferred_error_set(ira, err_set_type, source_instr->source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, err_set_type, source_instr->source_node)) {
return ira->codegen->invalid_instruction;
}
if (err_set_type->data.error_set.err_count == 0) {
@@ -9501,6 +10380,140 @@ static IrInstruction *ir_analyze_err_to_int(IrAnalyze *ira, IrInstruction *sourc
return result;
}
+static IrInstruction *ir_analyze_ptr_to_array(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *target,
+ TypeTableEntry *wanted_type)
+{
+ assert(wanted_type->id == TypeTableEntryIdPointer);
+ wanted_type = adjust_ptr_align(ira->codegen, wanted_type, target->value.type->data.pointer.alignment);
+ TypeTableEntry *array_type = wanted_type->data.pointer.child_type;
+ assert(array_type->id == TypeTableEntryIdArray);
+ assert(array_type->data.array.len == 1);
+
+ if (instr_is_comptime(target)) {
+ ConstExprValue *val = ir_resolve_const(ira, target, UndefBad);
+ if (!val)
+ return ira->codegen->invalid_instruction;
+
+ assert(val->type->id == TypeTableEntryIdPointer);
+ ConstExprValue *pointee = const_ptr_pointee(ira->codegen, val);
+ if (pointee->special != ConstValSpecialRuntime) {
+ ConstExprValue *array_val = create_const_vals(1);
+ array_val->special = ConstValSpecialStatic;
+ array_val->type = array_type;
+ array_val->data.x_array.special = ConstArraySpecialNone;
+ array_val->data.x_array.s_none.elements = pointee;
+ array_val->data.x_array.s_none.parent.id = ConstParentIdScalar;
+ array_val->data.x_array.s_none.parent.data.p_scalar.scalar_val = pointee;
+
+ IrInstructionConst *const_instruction = ir_create_instruction(&ira->new_irb,
+ source_instr->scope, source_instr->source_node);
+ const_instruction->base.value.type = wanted_type;
+ const_instruction->base.value.special = ConstValSpecialStatic;
+ const_instruction->base.value.data.x_ptr.special = ConstPtrSpecialRef;
+ const_instruction->base.value.data.x_ptr.data.ref.pointee = array_val;
+ const_instruction->base.value.data.x_ptr.mut = val->data.x_ptr.mut;
+ return &const_instruction->base;
+ }
+ }
+
+ // pointer to array and pointer to single item are represented the same way at runtime
+ IrInstruction *result = ir_build_cast(&ira->new_irb, target->scope, target->source_node,
+ wanted_type, target, CastOpBitCast);
+ result->value.type = wanted_type;
+ return result;
+}
+
+static void report_recursive_error(IrAnalyze *ira, AstNode *source_node, ConstCastOnly *cast_result,
+ ErrorMsg *parent_msg)
+{
+ switch (cast_result->id) {
+ case ConstCastResultIdOk:
+ zig_unreachable();
+ case ConstCastResultIdOptionalChild: {
+ ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
+ buf_sprintf("optional type child '%s' cannot cast into optional type child '%s'",
+ buf_ptr(&cast_result->data.optional->actual_child->name),
+ buf_ptr(&cast_result->data.optional->wanted_child->name)));
+ report_recursive_error(ira, source_node, &cast_result->data.optional->child, msg);
+ break;
+ }
+ case ConstCastResultIdErrorUnionErrorSet: {
+ ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
+ buf_sprintf("error set '%s' cannot cast into error set '%s'",
+ buf_ptr(&cast_result->data.error_union_error_set->actual_err_set->name),
+ buf_ptr(&cast_result->data.error_union_error_set->wanted_err_set->name)));
+ report_recursive_error(ira, source_node, &cast_result->data.error_union_error_set->child, msg);
+ break;
+ }
+ case ConstCastResultIdErrSet: {
+ ZigList *missing_errors = &cast_result->data.error_set_mismatch->missing_errors;
+ for (size_t i = 0; i < missing_errors->length; i += 1) {
+ ErrorTableEntry *error_entry = missing_errors->at(i);
+ add_error_note(ira->codegen, parent_msg, error_entry->decl_node,
+ buf_sprintf("'error.%s' not a member of destination error set", buf_ptr(&error_entry->name)));
+ }
+ break;
+ }
+ case ConstCastResultIdErrSetGlobal: {
+ add_error_note(ira->codegen, parent_msg, source_node,
+ buf_sprintf("cannot cast global error set into smaller set"));
+ break;
+ }
+ case ConstCastResultIdPointerChild: {
+ ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
+ buf_sprintf("pointer type child '%s' cannot cast into pointer type child '%s'",
+ buf_ptr(&cast_result->data.pointer_mismatch->actual_child->name),
+ buf_ptr(&cast_result->data.pointer_mismatch->wanted_child->name)));
+ report_recursive_error(ira, source_node, &cast_result->data.pointer_mismatch->child, msg);
+ break;
+ }
+ case ConstCastResultIdSliceChild: {
+ ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
+ buf_sprintf("slice type child '%s' cannot cast into slice type child '%s'",
+ buf_ptr(&cast_result->data.slice_mismatch->actual_child->name),
+ buf_ptr(&cast_result->data.slice_mismatch->wanted_child->name)));
+ report_recursive_error(ira, source_node, &cast_result->data.slice_mismatch->child, msg);
+ break;
+ }
+ case ConstCastResultIdErrorUnionPayload: {
+ ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
+ buf_sprintf("error union payload '%s' cannot cast into error union payload '%s'",
+ buf_ptr(&cast_result->data.error_union_payload->actual_payload->name),
+ buf_ptr(&cast_result->data.error_union_payload->wanted_payload->name)));
+ report_recursive_error(ira, source_node, &cast_result->data.error_union_payload->child, msg);
+ break;
+ }
+ case ConstCastResultIdType: {
+ AstNode *wanted_decl_node = type_decl_node(cast_result->data.type_mismatch->wanted_type);
+ AstNode *actual_decl_node = type_decl_node(cast_result->data.type_mismatch->actual_type);
+ if (wanted_decl_node != nullptr) {
+ add_error_note(ira->codegen, parent_msg, wanted_decl_node,
+ buf_sprintf("%s declared here",
+ buf_ptr(&cast_result->data.type_mismatch->wanted_type->name)));
+ }
+ if (actual_decl_node != nullptr) {
+ add_error_note(ira->codegen, parent_msg, actual_decl_node,
+ buf_sprintf("%s declared here",
+ buf_ptr(&cast_result->data.type_mismatch->actual_type->name)));
+ }
+ break;
+ }
+ case ConstCastResultIdFnAlign: // TODO
+ case ConstCastResultIdFnCC: // TODO
+ case ConstCastResultIdFnVarArgs: // TODO
+ case ConstCastResultIdFnIsGeneric: // TODO
+ case ConstCastResultIdFnReturnType: // TODO
+ case ConstCastResultIdFnArgCount: // TODO
+ case ConstCastResultIdFnGenericArgCount: // TODO
+ case ConstCastResultIdFnArg: // TODO
+ case ConstCastResultIdFnArgNoAlias: // TODO
+ case ConstCastResultIdUnresolvedInferredErrSet: // TODO
+ case ConstCastResultIdAsyncAllocatorType: // TODO
+ case ConstCastResultIdNullWrapPtr: // TODO
+ break;
+ }
+}
+
static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_instr,
TypeTableEntry *wanted_type, IrInstruction *value)
{
@@ -9511,60 +10524,52 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
return ira->codegen->invalid_instruction;
}
- // explicit match or non-const to const
- if (types_match_const_cast_only(ira, wanted_type, actual_type, source_node).id == ConstCastResultIdOk) {
+ // perfect match or non-const to const
+ ConstCastOnly const_cast_result = types_match_const_cast_only(ira, wanted_type, actual_type,
+ source_node, false);
+ if (const_cast_result.id == ConstCastResultIdOk) {
return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpNoop, false);
}
- // explicit cast from bool to int
+ // widening conversion
if (wanted_type->id == TypeTableEntryIdInt &&
- actual_type->id == TypeTableEntryIdBool)
- {
- return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpBoolToInt, false);
- }
-
- // explicit widening or shortening cast
- if ((wanted_type->id == TypeTableEntryIdInt &&
- actual_type->id == TypeTableEntryIdInt) ||
- (wanted_type->id == TypeTableEntryIdFloat &&
- actual_type->id == TypeTableEntryIdFloat))
+ actual_type->id == TypeTableEntryIdInt &&
+ wanted_type->data.integral.is_signed == actual_type->data.integral.is_signed &&
+ wanted_type->data.integral.bit_count >= actual_type->data.integral.bit_count)
{
return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
}
- // explicit error set cast
- if (wanted_type->id == TypeTableEntryIdErrorSet &&
- actual_type->id == TypeTableEntryIdErrorSet)
+ // small enough unsigned ints can get casted to large enough signed ints
+ if (wanted_type->id == TypeTableEntryIdInt && wanted_type->data.integral.is_signed &&
+ actual_type->id == TypeTableEntryIdInt && !actual_type->data.integral.is_signed &&
+ wanted_type->data.integral.bit_count > actual_type->data.integral.bit_count)
{
- return ir_analyze_err_set_cast(ira, source_instr, value, wanted_type);
+ return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
}
- // explicit cast from int to float
+ // float widening conversion
if (wanted_type->id == TypeTableEntryIdFloat &&
- actual_type->id == TypeTableEntryIdInt)
+ actual_type->id == TypeTableEntryIdFloat &&
+ wanted_type->data.floating.bit_count >= actual_type->data.floating.bit_count)
{
- return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpIntToFloat, false);
+ return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
}
- // explicit cast from float to int
- if (wanted_type->id == TypeTableEntryIdInt &&
- actual_type->id == TypeTableEntryIdFloat)
- {
- return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpFloatToInt, false);
- }
- // explicit cast from [N]T to []const T
+ // cast from [N]T to []const T
if (is_slice(wanted_type) && actual_type->id == TypeTableEntryIdArray) {
TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type);
}
}
- // explicit cast from &const [N]T to []const T
+ // cast from *const [N]T to []const T
if (is_slice(wanted_type) &&
actual_type->id == TypeTableEntryIdPointer &&
actual_type->data.pointer.is_const &&
@@ -9576,13 +10581,14 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
TypeTableEntry *array_type = actual_type->data.pointer.child_type;
if ((ptr_type->data.pointer.is_const || array_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type);
}
}
- // explicit cast from [N]T to &const []const N
+ // cast from [N]T to *const []const T
if (wanted_type->id == TypeTableEntryIdPointer &&
wanted_type->data.pointer.is_const &&
is_slice(wanted_type->data.pointer.child_type) &&
@@ -9592,7 +10598,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
wanted_type->data.pointer.child_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.pointer.child_type, value);
if (type_is_invalid(cast1->value.type))
@@ -9606,8 +10613,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from [N]T to ?[]const N
- if (wanted_type->id == TypeTableEntryIdMaybe &&
+ // cast from [N]T to ?[]const T
+ if (wanted_type->id == TypeTableEntryIdOptional &&
is_slice(wanted_type->data.maybe.child_type) &&
actual_type->id == TypeTableEntryIdArray)
{
@@ -9615,7 +10622,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
wanted_type->data.maybe.child_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.maybe.child_type, value);
if (type_is_invalid(cast1->value.type))
@@ -9629,59 +10637,47 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from []T to []u8 or []u8 to []T
- if (is_slice(wanted_type) && is_slice(actual_type)) {
- TypeTableEntry *wanted_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
- TypeTableEntry *actual_ptr_type = actual_type->data.structure.fields[slice_ptr_index].type_entry;
- if ((is_u8(wanted_ptr_type->data.pointer.child_type) || is_u8(actual_ptr_type->data.pointer.child_type)) &&
- (wanted_ptr_type->data.pointer.is_const || !actual_ptr_type->data.pointer.is_const))
- {
- uint32_t src_align_bytes = get_ptr_align(actual_ptr_type);
- uint32_t dest_align_bytes = get_ptr_align(wanted_ptr_type);
-
- if (dest_align_bytes > src_align_bytes) {
- ErrorMsg *msg = ir_add_error(ira, source_instr,
- buf_sprintf("cast increases pointer alignment"));
- add_error_note(ira->codegen, msg, source_instr->source_node,
- buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&actual_type->name), src_align_bytes));
- add_error_note(ira->codegen, msg, source_instr->source_node,
- buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&wanted_type->name), dest_align_bytes));
- return ira->codegen->invalid_instruction;
- }
-
- if (!ir_emit_global_runtime_side_effect(ira, source_instr))
- return ira->codegen->invalid_instruction;
- return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpResizeSlice, true);
- }
- }
-
- // explicit cast from [N]u8 to []const T
- if (is_slice(wanted_type) &&
- wanted_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const &&
- actual_type->id == TypeTableEntryIdArray &&
- is_u8(actual_type->data.array.child_type))
+ // *[N]T to [*]T
+ if (wanted_type->id == TypeTableEntryIdPointer &&
+ wanted_type->data.pointer.ptr_len == PtrLenUnknown &&
+ actual_type->id == TypeTableEntryIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == TypeTableEntryIdArray &&
+ actual_type->data.pointer.alignment >= wanted_type->data.pointer.alignment &&
+ types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
{
- if (!ir_emit_global_runtime_side_effect(ira, source_instr))
- return ira->codegen->invalid_instruction;
- uint64_t child_type_size = type_size(ira->codegen,
- wanted_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type);
- if (actual_type->data.array.len % child_type_size == 0) {
- return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpBytesToSlice, true);
- } else {
- ir_add_error_node(ira, source_instr->source_node,
- buf_sprintf("unable to convert %s to %s: size mismatch",
- buf_ptr(&actual_type->name), buf_ptr(&wanted_type->name)));
- return ira->codegen->invalid_instruction;
+ return ir_resolve_ptr_of_array_to_unknown_len_ptr(ira, source_instr, value, wanted_type);
+ }
+
+ // *[N]T to []T
+ if (is_slice(wanted_type) &&
+ actual_type->id == TypeTableEntryIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
+ {
+ TypeTableEntry *slice_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ assert(slice_ptr_type->id == TypeTableEntryIdPointer);
+ if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ {
+ return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, wanted_type);
}
}
- // explicit cast from child type of maybe type to maybe type
- if (wanted_type->id == TypeTableEntryIdMaybe) {
+
+ // cast from T to ?T
+ // note that the *T to ?*T case is handled via the "ConstCastOnly" mechanism
+ if (wanted_type->id == TypeTableEntryIdOptional) {
TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type;
- if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node,
+ false).id == ConstCastResultIdOk)
+ {
return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type);
- } else if (actual_type->id == TypeTableEntryIdNumLitInt ||
- actual_type->id == TypeTableEntryIdNumLitFloat)
+ } else if (actual_type->id == TypeTableEntryIdComptimeInt ||
+ actual_type->id == TypeTableEntryIdComptimeFloat)
{
if (ir_num_lit_fits_in_other_type(ira, value, wanted_child_type, true)) {
return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type);
@@ -9704,19 +10700,21 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from null literal to maybe type
- if (wanted_type->id == TypeTableEntryIdMaybe &&
- actual_type->id == TypeTableEntryIdNullLit)
+ // cast from null literal to maybe type
+ if (wanted_type->id == TypeTableEntryIdOptional &&
+ actual_type->id == TypeTableEntryIdNull)
{
return ir_analyze_null_to_maybe(ira, source_instr, value, wanted_type);
}
- // explicit cast from child type of error type to error type
+ // cast from child type of error type to error type
if (wanted_type->id == TypeTableEntryIdErrorUnion) {
- if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type,
+ source_node, false).id == ConstCastResultIdOk)
+ {
return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type);
- } else if (actual_type->id == TypeTableEntryIdNumLitInt ||
- actual_type->id == TypeTableEntryIdNumLitFloat)
+ } else if (actual_type->id == TypeTableEntryIdComptimeInt ||
+ actual_type->id == TypeTableEntryIdComptimeFloat)
{
if (ir_num_lit_fits_in_other_type(ira, value, wanted_type->data.error_union.payload_type, true)) {
return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type);
@@ -9726,7 +10724,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from [N]T to %[]const T
+ // cast from [N]T to E![]const T
if (wanted_type->id == TypeTableEntryIdErrorUnion &&
is_slice(wanted_type->data.error_union.payload_type) &&
actual_type->id == TypeTableEntryIdArray)
@@ -9735,7 +10733,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
wanted_type->data.error_union.payload_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value);
if (type_is_invalid(cast1->value.type))
@@ -9749,23 +10748,23 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from error set to error union type
+ // cast from error set to error union type
if (wanted_type->id == TypeTableEntryIdErrorUnion &&
actual_type->id == TypeTableEntryIdErrorSet)
{
return ir_analyze_err_wrap_code(ira, source_instr, value, wanted_type);
}
- // explicit cast from T to %?T
+ // cast from T to E!?T
if (wanted_type->id == TypeTableEntryIdErrorUnion &&
- wanted_type->data.error_union.payload_type->id == TypeTableEntryIdMaybe &&
- actual_type->id != TypeTableEntryIdMaybe)
+ wanted_type->data.error_union.payload_type->id == TypeTableEntryIdOptional &&
+ actual_type->id != TypeTableEntryIdOptional)
{
TypeTableEntry *wanted_child_type = wanted_type->data.error_union.payload_type->data.maybe.child_type;
- if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk ||
- actual_type->id == TypeTableEntryIdNullLit ||
- actual_type->id == TypeTableEntryIdNumLitInt ||
- actual_type->id == TypeTableEntryIdNumLitFloat)
+ if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node, false).id == ConstCastResultIdOk ||
+ actual_type->id == TypeTableEntryIdNull ||
+ actual_type->id == TypeTableEntryIdComptimeInt ||
+ actual_type->id == TypeTableEntryIdComptimeFloat)
{
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value);
if (type_is_invalid(cast1->value.type))
@@ -9779,11 +10778,14 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from number literal to another type
- // explicit cast from number literal to &const integer
- if (actual_type->id == TypeTableEntryIdNumLitFloat ||
- actual_type->id == TypeTableEntryIdNumLitInt)
+ // cast from number literal to another type
+ // cast from number literal to *const integer
+ if (actual_type->id == TypeTableEntryIdComptimeFloat ||
+ actual_type->id == TypeTableEntryIdComptimeInt)
{
+ ensure_complete_type(ira->codegen, wanted_type);
+ if (type_is_invalid(wanted_type))
+ return ira->codegen->invalid_instruction;
if (wanted_type->id == TypeTableEntryIdEnum) {
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.enumeration.tag_int_type, value);
if (type_is_invalid(cast1->value.type))
@@ -9808,9 +10810,9 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
return cast2;
} else if (ir_num_lit_fits_in_other_type(ira, value, wanted_type, true)) {
CastOp op;
- if ((actual_type->id == TypeTableEntryIdNumLitFloat &&
+ if ((actual_type->id == TypeTableEntryIdComptimeFloat &&
wanted_type->id == TypeTableEntryIdFloat) ||
- (actual_type->id == TypeTableEntryIdNumLitInt &&
+ (actual_type->id == TypeTableEntryIdComptimeInt &&
wanted_type->id == TypeTableEntryIdInt))
{
op = CastOpNumLitToConcrete;
@@ -9827,41 +10829,16 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from typed number to integer or float literal.
+ // cast from typed number to integer or float literal.
// works when the number is known at compile time
if (instr_is_comptime(value) &&
- ((actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdNumLitInt) ||
- (actual_type->id == TypeTableEntryIdFloat && wanted_type->id == TypeTableEntryIdNumLitFloat)))
+ ((actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdComptimeInt) ||
+ (actual_type->id == TypeTableEntryIdFloat && wanted_type->id == TypeTableEntryIdComptimeFloat)))
{
return ir_analyze_number_to_literal(ira, source_instr, value, wanted_type);
}
- // explicit cast from T!void to integer type which can fit it
- bool actual_type_is_void_err = actual_type->id == TypeTableEntryIdErrorUnion &&
- !type_has_bits(actual_type->data.error_union.payload_type);
- bool actual_type_is_err_set = actual_type->id == TypeTableEntryIdErrorSet;
- if ((actual_type_is_void_err || actual_type_is_err_set) && wanted_type->id == TypeTableEntryIdInt) {
- return ir_analyze_err_to_int(ira, source_instr, value, wanted_type);
- }
-
- // explicit cast from integer to error set
- if (wanted_type->id == TypeTableEntryIdErrorSet && actual_type->id == TypeTableEntryIdInt &&
- !actual_type->data.integral.is_signed)
- {
- return ir_analyze_int_to_err(ira, source_instr, value, wanted_type);
- }
-
- // explicit cast from integer to enum type with no payload
- if (actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdEnum) {
- return ir_analyze_int_to_enum(ira, source_instr, value, wanted_type);
- }
-
- // explicit cast from enum type with no payload to integer
- if (wanted_type->id == TypeTableEntryIdInt && actual_type->id == TypeTableEntryIdEnum) {
- return ir_analyze_enum_to_int(ira, source_instr, value, wanted_type);
- }
-
- // explicit cast from union to the enum type of the union
+ // cast from union to the enum type of the union
if (actual_type->id == TypeTableEntryIdUnion && wanted_type->id == TypeTableEntryIdEnum) {
type_ensure_zero_bits_known(ira->codegen, actual_type);
if (type_is_invalid(actual_type))
@@ -9872,7 +10849,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit enum to union which has the enum as the tag type
+ // enum to union which has the enum as the tag type
if (wanted_type->id == TypeTableEntryIdUnion && actual_type->id == TypeTableEntryIdEnum &&
(wanted_type->data.unionation.decl_node->data.container_decl.auto_enum ||
wanted_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
@@ -9883,7 +10860,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit enum to &const union which has the enum as the tag type
+ // enum to &const union which has the enum as the tag type
if (actual_type->id == TypeTableEntryIdEnum && wanted_type->id == TypeTableEntryIdPointer) {
TypeTableEntry *union_type = wanted_type->data.pointer.child_type;
if (union_type->data.unionation.decl_node->data.container_decl.auto_enum ||
@@ -9904,23 +10881,63 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from undefined to anything
- if (actual_type->id == TypeTableEntryIdUndefLit) {
+ // cast from *T to *[1]T
+ if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->id == TypeTableEntryIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle)
+ {
+ TypeTableEntry *array_type = wanted_type->data.pointer.child_type;
+ if (array_type->id == TypeTableEntryIdArray && array_type->data.array.len == 1 &&
+ types_match_const_cast_only(ira, array_type->data.array.child_type,
+ actual_type->data.pointer.child_type, source_node,
+ !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ {
+ if (wanted_type->data.pointer.alignment > actual_type->data.pointer.alignment) {
+ ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("cast increases pointer alignment"));
+ add_error_note(ira->codegen, msg, value->source_node,
+ buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&actual_type->name),
+ actual_type->data.pointer.alignment));
+ add_error_note(ira->codegen, msg, source_instr->source_node,
+ buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&wanted_type->name),
+ wanted_type->data.pointer.alignment));
+ return ira->codegen->invalid_instruction;
+ }
+ return ir_analyze_ptr_to_array(ira, source_instr, value, wanted_type);
+ }
+ }
+
+ // cast from T to *T where T is zero bits
+ if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ actual_type, source_node, !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ {
+ type_ensure_zero_bits_known(ira->codegen, actual_type);
+ if (type_is_invalid(actual_type)) {
+ return ira->codegen->invalid_instruction;
+ }
+ if (!type_has_bits(actual_type)) {
+ return ir_get_ref(ira, source_instr, value, false, false);
+ }
+ }
+
+
+ // cast from undefined to anything
+ if (actual_type->id == TypeTableEntryIdUndefined) {
return ir_analyze_undefined_to_anything(ira, source_instr, value, wanted_type);
}
- // explicit cast from something to const pointer of it
+ // cast from something to const pointer of it
if (!type_requires_comptime(actual_type)) {
TypeTableEntry *const_ptr_actual = get_pointer_to_type(ira->codegen, actual_type, true);
- if (types_match_const_cast_only(ira, wanted_type, const_ptr_actual, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, wanted_type, const_ptr_actual, source_node, false).id == ConstCastResultIdOk) {
return ir_analyze_cast_ref(ira, source_instr, value, wanted_type);
}
}
- ir_add_error_node(ira, source_instr->source_node,
- buf_sprintf("invalid cast from type '%s' to '%s'",
- buf_ptr(&actual_type->name),
- buf_ptr(&wanted_type->name)));
+ ErrorMsg *parent_msg = ir_add_error_node(ira, source_instr->source_node,
+ buf_sprintf("expected type '%s', found '%s'",
+ buf_ptr(&wanted_type->name),
+ buf_ptr(&actual_type->name)));
+ report_recursive_error(ira, source_instr->source_node, &const_cast_result, parent_msg);
return ira->codegen->invalid_instruction;
}
@@ -9937,29 +10954,7 @@ static IrInstruction *ir_implicit_cast(IrAnalyze *ira, IrInstruction *value, Typ
if (value->value.type->id == TypeTableEntryIdUnreachable)
return value;
- ImplicitCastMatchResult result = ir_types_match_with_implicit_cast(ira, expected_type, value->value.type, value);
- switch (result) {
- case ImplicitCastMatchResultNo:
- ir_add_error(ira, value,
- buf_sprintf("expected type '%s', found '%s'",
- buf_ptr(&expected_type->name),
- buf_ptr(&value->value.type->name)));
- return ira->codegen->invalid_instruction;
-
- case ImplicitCastMatchResultYes:
- return ir_analyze_cast(ira, value, expected_type, value);
- case ImplicitCastMatchResultReportedError:
- return ira->codegen->invalid_instruction;
- }
-
- zig_unreachable();
-}
-
-static IrInstruction *ir_implicit_byval_const_ref_cast(IrAnalyze *ira, IrInstruction *inst) {
- if (type_is_copyable(ira->codegen, inst->value.type))
- return inst;
- TypeTableEntry *const_ref_type = get_pointer_to_type(ira->codegen, inst->value.type, true);
- return ir_implicit_cast(ira, inst, const_ref_type);
+ return ir_analyze_cast(ira, value, expected_type, value);
}
static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruction, IrInstruction *ptr) {
@@ -9977,6 +10972,7 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc
IrInstruction *result = ir_create_const(&ira->new_irb, source_instruction->scope,
source_instruction->source_node, child_type);
copy_const_val(&result->value, pointee, ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst);
+ result->value.type = child_type;
return result;
}
}
@@ -9986,21 +10982,6 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc
source_instruction->source_node, ptr);
load_ptr_instruction->value.type = child_type;
return load_ptr_instruction;
- } else if (type_entry->id == TypeTableEntryIdMetaType) {
- ConstExprValue *ptr_val = ir_resolve_const(ira, ptr, UndefBad);
- if (!ptr_val)
- return ira->codegen->invalid_instruction;
-
- TypeTableEntry *ptr_type = ptr_val->data.x_type;
- if (ptr_type->id == TypeTableEntryIdPointer) {
- TypeTableEntry *child_type = ptr_type->data.pointer.child_type;
- return ir_create_const_type(&ira->new_irb, source_instruction->scope,
- source_instruction->source_node, child_type);
- } else {
- ir_add_error(ira, source_instruction,
- buf_sprintf("attempt to dereference non pointer type '%s'", buf_ptr(&ptr_type->name)));
- return ira->codegen->invalid_instruction;
- }
} else {
ir_add_error_node(ira, source_instruction->source_node,
buf_sprintf("attempt to dereference non pointer type '%s'",
@@ -10044,11 +11025,11 @@ static bool ir_resolve_align(IrAnalyze *ira, IrInstruction *value, uint32_t *out
return true;
}
-static bool ir_resolve_usize(IrAnalyze *ira, IrInstruction *value, uint64_t *out) {
+static bool ir_resolve_unsigned(IrAnalyze *ira, IrInstruction *value, TypeTableEntry *int_type, uint64_t *out) {
if (type_is_invalid(value->value.type))
return false;
- IrInstruction *casted_value = ir_implicit_cast(ira, value, ira->codegen->builtin_types.entry_usize);
+ IrInstruction *casted_value = ir_implicit_cast(ira, value, int_type);
if (type_is_invalid(casted_value->value.type))
return false;
@@ -10060,6 +11041,10 @@ static bool ir_resolve_usize(IrAnalyze *ira, IrInstruction *value, uint64_t *out
return true;
}
+static bool ir_resolve_usize(IrAnalyze *ira, IrInstruction *value, uint64_t *out) {
+ return ir_resolve_unsigned(ira, value, ira->codegen->builtin_types.entry_usize, out);
+}
+
static bool ir_resolve_bool(IrAnalyze *ira, IrInstruction *value, bool *out) {
if (type_is_invalid(value->value.type))
return false;
@@ -10169,7 +11154,9 @@ static Buf *ir_resolve_str(IrAnalyze *ira, IrInstruction *value) {
if (type_is_invalid(value->value.type))
return nullptr;
- TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, true);
+ TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
+ true, false, PtrLenUnknown,
+ get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), 0, 0);
TypeTableEntry *str_type = get_slice_type(ira->codegen, ptr_type);
IrInstruction *casted_value = ir_implicit_cast(ira, value, str_type);
if (type_is_invalid(casted_value->value.type))
@@ -10267,10 +11254,15 @@ static TypeTableEntry *ir_analyze_bin_op_bool(IrAnalyze *ira, IrInstructionBinOp
if (casted_op2 == ira->codegen->invalid_instruction)
return ira->codegen->builtin_types.entry_invalid;
- ConstExprValue *op1_val = &casted_op1->value;
- ConstExprValue *op2_val = &casted_op2->value;
- if (op1_val->special != ConstValSpecialRuntime && op2_val->special != ConstValSpecialRuntime) {
+ if (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2)) {
ConstExprValue *out_val = ir_build_const_from(ira, &bin_op_instruction->base);
+ ConstExprValue *op1_val = ir_resolve_const(ira, casted_op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
assert(casted_op1->value.type->id == TypeTableEntryIdBool);
assert(casted_op2->value.type->id == TypeTableEntryIdBool);
@@ -10307,6 +11299,16 @@ static bool resolve_cmp_op_id(IrBinOp op_id, Cmp cmp) {
}
}
+static bool optional_value_is_null(ConstExprValue *val) {
+ assert(val->special == ConstValSpecialStatic);
+ if (get_codegen_ptr_type(val->type) != nullptr) {
+ return val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr &&
+ val->data.x_ptr.data.hard_coded_addr.addr == 0;
+ } else {
+ return val->data.x_optional == nullptr;
+ }
+}
+
static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) {
IrInstruction *op1 = bin_op_instruction->op1->other;
IrInstruction *op2 = bin_op_instruction->op2->other;
@@ -10315,19 +11317,19 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
IrBinOp op_id = bin_op_instruction->op_id;
bool is_equality_cmp = (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq);
if (is_equality_cmp &&
- ((op1->value.type->id == TypeTableEntryIdNullLit && op2->value.type->id == TypeTableEntryIdMaybe) ||
- (op2->value.type->id == TypeTableEntryIdNullLit && op1->value.type->id == TypeTableEntryIdMaybe) ||
- (op1->value.type->id == TypeTableEntryIdNullLit && op2->value.type->id == TypeTableEntryIdNullLit)))
+ ((op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdOptional) ||
+ (op2->value.type->id == TypeTableEntryIdNull && op1->value.type->id == TypeTableEntryIdOptional) ||
+ (op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdNull)))
{
- if (op1->value.type->id == TypeTableEntryIdNullLit && op2->value.type->id == TypeTableEntryIdNullLit) {
+ if (op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdNull) {
ConstExprValue *out_val = ir_build_const_from(ira, &bin_op_instruction->base);
out_val->data.x_bool = (op_id == IrBinOpCmpEq);
return ira->codegen->builtin_types.entry_bool;
}
IrInstruction *maybe_op;
- if (op1->value.type->id == TypeTableEntryIdNullLit) {
+ if (op1->value.type->id == TypeTableEntryIdNull) {
maybe_op = op2;
- } else if (op2->value.type->id == TypeTableEntryIdNullLit) {
+ } else if (op2->value.type->id == TypeTableEntryIdNull) {
maybe_op = op1;
} else {
zig_unreachable();
@@ -10336,7 +11338,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
ConstExprValue *maybe_val = ir_resolve_const(ira, maybe_op, UndefBad);
if (!maybe_val)
return ira->codegen->builtin_types.entry_invalid;
- bool is_null = (maybe_val->data.x_maybe == nullptr);
+ bool is_null = optional_value_is_null(maybe_val);
ConstExprValue *out_val = ir_build_const_from(ira, &bin_op_instruction->base);
out_val->data.x_bool = (op_id == IrBinOpCmpEq) ? is_null : !is_null;
return ira->codegen->builtin_types.entry_bool;
@@ -10364,7 +11366,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
return ira->codegen->builtin_types.entry_invalid;
}
- if (!resolve_inferred_error_set(ira, intersect_type, source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, intersect_type, source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
@@ -10410,9 +11412,14 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
}
}
- ConstExprValue *op1_val = &op1->value;
- ConstExprValue *op2_val = &op2->value;
- if (value_is_comptime(op1_val) && value_is_comptime(op2_val)) {
+ if (instr_is_comptime(op1) && instr_is_comptime(op2)) {
+ ConstExprValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+ ConstExprValue *op2_val = ir_resolve_const(ira, op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
bool answer;
bool are_equal = op1_val->data.x_err_set->value == op2_val->data.x_err_set->value;
if (op_id == IrBinOpCmpEq) {
@@ -10435,22 +11442,23 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
}
IrInstruction *instructions[] = {op1, op2};
- TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, source_node, instructions, 2);
+ TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, source_node, nullptr, instructions, 2);
if (type_is_invalid(resolved_type))
return resolved_type;
type_ensure_zero_bits_known(ira->codegen, resolved_type);
if (type_is_invalid(resolved_type))
return resolved_type;
-
+ bool operator_allowed;
switch (resolved_type->id) {
case TypeTableEntryIdInvalid:
zig_unreachable(); // handled above
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdInt:
case TypeTableEntryIdFloat:
+ operator_allowed = true;
break;
case TypeTableEntryIdBool:
@@ -10465,32 +11473,27 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdArgTuple:
case TypeTableEntryIdPromise:
- if (!is_equality_cmp) {
- ir_add_error_node(ira, source_node,
- buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name)));
- return ira->codegen->builtin_types.entry_invalid;
- }
- break;
-
case TypeTableEntryIdEnum:
- if (!is_equality_cmp) {
- ir_add_error_node(ira, source_node,
- buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name)));
- return ira->codegen->builtin_types.entry_invalid;
- }
+ operator_allowed = is_equality_cmp;
break;
case TypeTableEntryIdUnreachable:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdUnion:
- ir_add_error_node(ira, source_node,
- buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name)));
- return ira->codegen->builtin_types.entry_invalid;
+ operator_allowed = false;
+ break;
+ case TypeTableEntryIdOptional:
+ operator_allowed = is_equality_cmp && get_codegen_ptr_type(resolved_type) != nullptr;
+ break;
+ }
+ if (!operator_allowed) {
+ ir_add_error_node(ira, source_node,
+ buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
}
IrInstruction *casted_op1 = ir_implicit_cast(ira, op1, resolved_type);
@@ -10501,15 +11504,20 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
if (casted_op2 == ira->codegen->invalid_instruction)
return ira->codegen->builtin_types.entry_invalid;
- ConstExprValue *op1_val = &casted_op1->value;
- ConstExprValue *op2_val = &casted_op2->value;
bool one_possible_value = !type_requires_comptime(resolved_type) && !type_has_bits(resolved_type);
- if (one_possible_value || (value_is_comptime(op1_val) && value_is_comptime(op2_val))) {
+ if (one_possible_value || (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2))) {
+ ConstExprValue *op1_val = one_possible_value ? &casted_op1->value : ir_resolve_const(ira, casted_op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+ ConstExprValue *op2_val = one_possible_value ? &casted_op2->value : ir_resolve_const(ira, casted_op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
bool answer;
- if (resolved_type->id == TypeTableEntryIdNumLitFloat || resolved_type->id == TypeTableEntryIdFloat) {
+ if (resolved_type->id == TypeTableEntryIdComptimeFloat || resolved_type->id == TypeTableEntryIdFloat) {
Cmp cmp_result = float_cmp(op1_val, op2_val);
answer = resolve_cmp_op_id(op_id, cmp_result);
- } else if (resolved_type->id == TypeTableEntryIdNumLitInt || resolved_type->id == TypeTableEntryIdInt) {
+ } else if (resolved_type->id == TypeTableEntryIdComptimeInt || resolved_type->id == TypeTableEntryIdInt) {
Cmp cmp_result = bigint_cmp(&op1_val->data.x_bigint, &op2_val->data.x_bigint);
answer = resolve_cmp_op_id(op_id, cmp_result);
} else {
@@ -10532,11 +11540,17 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
if (resolved_type->id == TypeTableEntryIdInt && !resolved_type->data.integral.is_signed) {
ConstExprValue *known_left_val;
IrBinOp flipped_op_id;
- if (value_is_comptime(op1_val)) {
- known_left_val = op1_val;
+ if (instr_is_comptime(casted_op1)) {
+ known_left_val = ir_resolve_const(ira, casted_op1, UndefBad);
+ if (known_left_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
flipped_op_id = op_id;
- } else if (value_is_comptime(op2_val)) {
- known_left_val = op2_val;
+ } else if (instr_is_comptime(casted_op2)) {
+ known_left_val = ir_resolve_const(ira, casted_op2, UndefBad);
+ if (known_left_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
if (op_id == IrBinOpCmpLessThan) {
flipped_op_id = IrBinOpCmpGreaterThan;
} else if (op_id == IrBinOpCmpGreaterThan) {
@@ -10573,12 +11587,12 @@ static int ir_eval_math_op(TypeTableEntry *type_entry, ConstExprValue *op1_val,
bool is_int;
bool is_float;
Cmp op2_zcmp;
- if (type_entry->id == TypeTableEntryIdInt || type_entry->id == TypeTableEntryIdNumLitInt) {
+ if (type_entry->id == TypeTableEntryIdInt || type_entry->id == TypeTableEntryIdComptimeInt) {
is_int = true;
is_float = false;
op2_zcmp = bigint_cmp_zero(&op2_val->data.x_bigint);
} else if (type_entry->id == TypeTableEntryIdFloat ||
- type_entry->id == TypeTableEntryIdNumLitFloat)
+ type_entry->id == TypeTableEntryIdComptimeFloat)
{
is_int = false;
is_float = true;
@@ -10752,7 +11766,7 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp *
if (type_is_invalid(op1->value.type))
return ira->codegen->builtin_types.entry_invalid;
- if (op1->value.type->id != TypeTableEntryIdInt && op1->value.type->id != TypeTableEntryIdNumLitInt) {
+ if (op1->value.type->id != TypeTableEntryIdInt && op1->value.type->id != TypeTableEntryIdComptimeInt) {
ir_add_error(ira, &bin_op_instruction->base,
buf_sprintf("bit shifting operation expected integer type, found '%s'",
buf_ptr(&op1->value.type->name)));
@@ -10765,7 +11779,7 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp *
IrInstruction *casted_op2;
IrBinOp op_id = bin_op_instruction->op_id;
- if (op1->value.type->id == TypeTableEntryIdNumLitInt) {
+ if (op1->value.type->id == TypeTableEntryIdComptimeInt) {
casted_op2 = op2;
if (op_id == IrBinOpBitShiftLeftLossy) {
@@ -10781,6 +11795,26 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp *
} else {
TypeTableEntry *shift_amt_type = get_smallest_unsigned_int_type(ira->codegen,
op1->value.type->data.integral.bit_count - 1);
+ if (bin_op_instruction->op_id == IrBinOpBitShiftLeftLossy &&
+ op2->value.type->id == TypeTableEntryIdComptimeInt) {
+ if (!bigint_fits_in_bits(&op2->value.data.x_bigint,
+ shift_amt_type->data.integral.bit_count,
+ op2->value.data.x_bigint.is_negative)) {
+ Buf *val_buf = buf_alloc();
+ bigint_append_buf(val_buf, &op2->value.data.x_bigint, 10);
+ ErrorMsg* msg = ir_add_error(ira,
+ &bin_op_instruction->base,
+ buf_sprintf("RHS of shift is too large for LHS type"));
+ add_error_note(
+ ira->codegen,
+ msg,
+ op2->source_node,
+ buf_sprintf("value %s cannot fit into type %s",
+ buf_ptr(val_buf),
+ buf_ptr(&shift_amt_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+ }
casted_op2 = ir_implicit_cast(ira, op2, shift_amt_type);
if (casted_op2 == ira->codegen->invalid_instruction)
@@ -10788,8 +11822,14 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp *
}
if (instr_is_comptime(op1) && instr_is_comptime(casted_op2)) {
- ConstExprValue *op1_val = &op1->value;
- ConstExprValue *op2_val = &casted_op2->value;
+ ConstExprValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
IrInstruction *result_instruction = ir_get_const(ira, &bin_op_instruction->base);
ir_link_new_instruction(result_instruction, &bin_op_instruction->base);
ConstExprValue *out_val = &result_instruction->value;
@@ -10810,7 +11850,7 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp *
ir_num_lit_fits_in_other_type(ira, result_instruction, op1->value.type, false);
return op1->value.type;
- } else if (op1->value.type->id == TypeTableEntryIdNumLitInt) {
+ } else if (op1->value.type->id == TypeTableEntryIdComptimeInt) {
ir_add_error(ira, &bin_op_instruction->base,
buf_sprintf("LHS of shift must be an integer type, or RHS must be compile-time known"));
return ira->codegen->builtin_types.entry_invalid;
@@ -10823,22 +11863,44 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp *
static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) {
IrInstruction *op1 = bin_op_instruction->op1->other;
+ if (type_is_invalid(op1->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
IrInstruction *op2 = bin_op_instruction->op2->other;
- IrInstruction *instructions[] = {op1, op2};
- TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, bin_op_instruction->base.source_node, instructions, 2);
- if (type_is_invalid(resolved_type))
- return resolved_type;
+ if (type_is_invalid(op2->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
IrBinOp op_id = bin_op_instruction->op_id;
- bool is_int = resolved_type->id == TypeTableEntryIdInt || resolved_type->id == TypeTableEntryIdNumLitInt;
- bool is_float = resolved_type->id == TypeTableEntryIdFloat || resolved_type->id == TypeTableEntryIdNumLitFloat;
+ // look for pointer math
+ if (op1->value.type->id == TypeTableEntryIdPointer && op1->value.type->data.pointer.ptr_len == PtrLenUnknown &&
+ (op_id == IrBinOpAdd || op_id == IrBinOpSub))
+ {
+ IrInstruction *casted_op2 = ir_implicit_cast(ira, op2, ira->codegen->builtin_types.entry_usize);
+ if (casted_op2 == ira->codegen->invalid_instruction)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *result = ir_build_bin_op(&ira->new_irb, bin_op_instruction->base.scope,
+ bin_op_instruction->base.source_node, op_id, op1, casted_op2, true);
+ result->value.type = op1->value.type;
+ ir_link_new_instruction(result, &bin_op_instruction->base);
+ return result->value.type;
+ }
+
+ IrInstruction *instructions[] = {op1, op2};
+ TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, bin_op_instruction->base.source_node, nullptr, instructions, 2);
+ if (type_is_invalid(resolved_type))
+ return resolved_type;
+
+ bool is_int = resolved_type->id == TypeTableEntryIdInt || resolved_type->id == TypeTableEntryIdComptimeInt;
+ bool is_float = resolved_type->id == TypeTableEntryIdFloat || resolved_type->id == TypeTableEntryIdComptimeFloat;
bool is_signed_div = (
(resolved_type->id == TypeTableEntryIdInt && resolved_type->data.integral.is_signed) ||
resolved_type->id == TypeTableEntryIdFloat ||
- (resolved_type->id == TypeTableEntryIdNumLitFloat &&
+ (resolved_type->id == TypeTableEntryIdComptimeFloat &&
((bigfloat_cmp_zero(&op1->value.data.x_bigfloat) != CmpGT) !=
(bigfloat_cmp_zero(&op2->value.data.x_bigfloat) != CmpGT))) ||
- (resolved_type->id == TypeTableEntryIdNumLitInt &&
+ (resolved_type->id == TypeTableEntryIdComptimeInt &&
((bigint_cmp_zero(&op1->value.data.x_bigint) != CmpGT) !=
(bigint_cmp_zero(&op2->value.data.x_bigint) != CmpGT)))
);
@@ -10846,7 +11908,15 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp
if (is_signed_div) {
bool ok = false;
if (instr_is_comptime(op1) && instr_is_comptime(op2)) {
- if (bigint_cmp_zero(&op2->value.data.x_bigint) == CmpEQ) {
+ ConstExprValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *op2_val = ir_resolve_const(ira, op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (bigint_cmp_zero(&op2_val->data.x_bigint) == CmpEQ) {
// the division by zero error will be caught later, but we don't have a
// division function ambiguity problem.
op_id = IrBinOpDivTrunc;
@@ -10854,8 +11924,8 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp
} else {
BigInt trunc_result;
BigInt floor_result;
- bigint_div_trunc(&trunc_result, &op1->value.data.x_bigint, &op2->value.data.x_bigint);
- bigint_div_floor(&floor_result, &op1->value.data.x_bigint, &op2->value.data.x_bigint);
+ bigint_div_trunc(&trunc_result, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
+ bigint_div_floor(&floor_result, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
if (bigint_cmp(&trunc_result, &floor_result) == CmpEQ) {
ok = true;
op_id = IrBinOpDivTrunc;
@@ -10876,7 +11946,15 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp
if (is_signed_div && (is_int || is_float)) {
bool ok = false;
if (instr_is_comptime(op1) && instr_is_comptime(op2)) {
+ ConstExprValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
if (is_int) {
+ ConstExprValue *op2_val = ir_resolve_const(ira, op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
if (bigint_cmp_zero(&op2->value.data.x_bigint) == CmpEQ) {
// the division by zero error will be caught later, but we don't
// have a remainder function ambiguity problem
@@ -10884,14 +11962,19 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp
} else {
BigInt rem_result;
BigInt mod_result;
- bigint_rem(&rem_result, &op1->value.data.x_bigint, &op2->value.data.x_bigint);
- bigint_mod(&mod_result, &op1->value.data.x_bigint, &op2->value.data.x_bigint);
+ bigint_rem(&rem_result, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
+ bigint_mod(&mod_result, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
ok = bigint_cmp(&rem_result, &mod_result) == CmpEQ;
}
} else {
IrInstruction *casted_op2 = ir_implicit_cast(ira, op2, resolved_type);
if (casted_op2 == ira->codegen->invalid_instruction)
return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
if (float_cmp_zero(&casted_op2->value) == CmpEQ) {
// the division by zero error will be caught later, but we don't
// have a remainder function ambiguity problem
@@ -10899,8 +11982,8 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp
} else {
ConstExprValue rem_result;
ConstExprValue mod_result;
- float_rem(&rem_result, &op1->value, &casted_op2->value);
- float_mod(&mod_result, &op1->value, &casted_op2->value);
+ float_rem(&rem_result, op1_val, op2_val);
+ float_mod(&mod_result, op1_val, op2_val);
ok = float_cmp(&rem_result, &mod_result) == CmpEQ;
}
}
@@ -10939,7 +12022,7 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp
return ira->codegen->builtin_types.entry_invalid;
}
- if (resolved_type->id == TypeTableEntryIdNumLitInt) {
+ if (resolved_type->id == TypeTableEntryIdComptimeInt) {
if (op_id == IrBinOpAddWrap) {
op_id = IrBinOpAdd;
} else if (op_id == IrBinOpSubWrap) {
@@ -10958,8 +12041,13 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp
return ira->codegen->builtin_types.entry_invalid;
if (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2)) {
- ConstExprValue *op1_val = &casted_op1->value;
- ConstExprValue *op2_val = &casted_op2->value;
+ ConstExprValue *op1_val = ir_resolve_const(ira, casted_op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+ ConstExprValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
IrInstruction *result_instruction = ir_get_const(ira, &bin_op_instruction->base);
ir_link_new_instruction(result_instruction, &bin_op_instruction->base);
ConstExprValue *out_val = &result_instruction->value;
@@ -11101,7 +12189,8 @@ static TypeTableEntry *ir_analyze_array_cat(IrAnalyze *ira, IrInstructionBinOp *
out_array_val = out_val;
} else if (is_slice(op1_type) || is_slice(op2_type)) {
- TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, child_type, true);
+ TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, child_type,
+ true, false, PtrLenUnknown, get_abi_alignment(ira->codegen, child_type), 0, 0);
result_type = get_slice_type(ira->codegen, ptr_type);
out_array_val = create_const_vals(1);
out_array_val->special = ConstValSpecialStatic;
@@ -11121,7 +12210,9 @@ static TypeTableEntry *ir_analyze_array_cat(IrAnalyze *ira, IrInstructionBinOp *
} else {
new_len += 1; // null byte
- result_type = get_pointer_to_type(ira->codegen, child_type, true);
+ // TODO make this `[*]null T` instead of `[*]T`
+ result_type = get_pointer_to_type_extra(ira->codegen, child_type, true, false,
+ PtrLenUnknown, get_abi_alignment(ira->codegen, child_type), 0, 0);
out_array_val = create_const_vals(1);
out_array_val->special = ConstValSpecialStatic;
@@ -11131,9 +12222,16 @@ static TypeTableEntry *ir_analyze_array_cat(IrAnalyze *ira, IrInstructionBinOp *
out_val->data.x_ptr.data.base_array.array_val = out_array_val;
out_val->data.x_ptr.data.base_array.elem_index = 0;
}
- out_array_val->data.x_array.s_none.elements = create_const_vals(new_len);
+ if (op1_array_val->data.x_array.special == ConstArraySpecialUndef &&
+ op2_array_val->data.x_array.special == ConstArraySpecialUndef) {
+ out_array_val->data.x_array.special = ConstArraySpecialUndef;
+ return result_type;
+ }
+
+ out_array_val->data.x_array.s_none.elements = create_const_vals(new_len);
expand_undef_array(ira->codegen, op1_array_val);
+ expand_undef_array(ira->codegen, op2_array_val);
size_t next_index = 0;
for (size_t i = op1_array_index; i < op1_array_end; i += 1, next_index += 1) {
@@ -11185,11 +12283,15 @@ static TypeTableEntry *ir_analyze_array_mult(IrAnalyze *ira, IrInstructionBinOp
}
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
+ if (array_val->data.x_array.special == ConstArraySpecialUndef) {
+ out_val->data.x_array.special = ConstArraySpecialUndef;
+
+ TypeTableEntry *child_type = array_type->data.array.child_type;
+ return get_array_type(ira->codegen, child_type, new_array_len);
+ }
out_val->data.x_array.s_none.elements = create_const_vals(new_array_len);
- expand_undef_array(ira->codegen, array_val);
-
uint64_t i = 0;
for (uint64_t x = 0; x < mult_amt; x += 1) {
for (uint64_t y = 0; y < old_array_len; y += 1) {
@@ -11220,11 +12322,11 @@ static TypeTableEntry *ir_analyze_merge_error_sets(IrAnalyze *ira, IrInstruction
return ira->codegen->builtin_types.entry_type;
}
- if (!resolve_inferred_error_set(ira, op1_type, instruction->op1->other->source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, op1_type, instruction->op1->other->source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
- if (!resolve_inferred_error_set(ira, op2_type, instruction->op2->other->source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, op2_type, instruction->op2->other->source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
@@ -11290,61 +12392,6 @@ static TypeTableEntry *ir_analyze_instruction_bin_op(IrAnalyze *ira, IrInstructi
zig_unreachable();
}
-enum VarClassRequired {
- VarClassRequiredAny,
- VarClassRequiredConst,
- VarClassRequiredIllegal,
-};
-
-static VarClassRequired get_var_class_required(TypeTableEntry *type_entry) {
- switch (type_entry->id) {
- case TypeTableEntryIdInvalid:
- zig_unreachable();
- case TypeTableEntryIdUnreachable:
- return VarClassRequiredIllegal;
- case TypeTableEntryIdBool:
- case TypeTableEntryIdInt:
- case TypeTableEntryIdFloat:
- case TypeTableEntryIdVoid:
- case TypeTableEntryIdErrorSet:
- case TypeTableEntryIdFn:
- case TypeTableEntryIdPromise:
- return VarClassRequiredAny;
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdBlock:
- case TypeTableEntryIdNullLit:
- case TypeTableEntryIdOpaque:
- case TypeTableEntryIdMetaType:
- case TypeTableEntryIdNamespace:
- case TypeTableEntryIdBoundFn:
- case TypeTableEntryIdArgTuple:
- return VarClassRequiredConst;
-
- case TypeTableEntryIdPointer:
- if (type_entry->data.pointer.child_type->id == TypeTableEntryIdOpaque) {
- return VarClassRequiredAny;
- } else {
- return get_var_class_required(type_entry->data.pointer.child_type);
- }
- case TypeTableEntryIdArray:
- return get_var_class_required(type_entry->data.array.child_type);
- case TypeTableEntryIdMaybe:
- return get_var_class_required(type_entry->data.maybe.child_type);
- case TypeTableEntryIdErrorUnion:
- return get_var_class_required(type_entry->data.error_union.payload_type);
-
- case TypeTableEntryIdStruct:
- case TypeTableEntryIdEnum:
- case TypeTableEntryIdUnion:
- // TODO check the fields of these things and make sure that they don't recursively
- // contain any of the other variable classes
- return VarClassRequiredAny;
- }
- zig_unreachable();
-}
-
static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstructionDeclVar *decl_var_instruction) {
VariableTableEntry *var = decl_var_instruction->var;
@@ -11379,36 +12426,41 @@ static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstruc
if (type_is_invalid(result_type)) {
result_type = ira->codegen->builtin_types.entry_invalid;
} else {
- switch (get_var_class_required(result_type)) {
- case VarClassRequiredIllegal:
+ type_ensure_zero_bits_known(ira->codegen, result_type);
+ if (type_is_invalid(result_type)) {
+ result_type = ira->codegen->builtin_types.entry_invalid;
+ }
+ }
+
+ if (!type_is_invalid(result_type)) {
+ if (result_type->id == TypeTableEntryIdUnreachable ||
+ result_type->id == TypeTableEntryIdOpaque)
+ {
+ ir_add_error_node(ira, source_node,
+ buf_sprintf("variable of type '%s' not allowed", buf_ptr(&result_type->name)));
+ result_type = ira->codegen->builtin_types.entry_invalid;
+ } else if (type_requires_comptime(result_type)) {
+ var_class_requires_const = true;
+ if (!var->gen_is_const && !is_comptime_var) {
ir_add_error_node(ira, source_node,
- buf_sprintf("variable of type '%s' not allowed", buf_ptr(&result_type->name)));
+ buf_sprintf("variable of type '%s' must be const or comptime",
+ buf_ptr(&result_type->name)));
result_type = ira->codegen->builtin_types.entry_invalid;
- break;
- case VarClassRequiredConst:
+ }
+ } else {
+ if (casted_init_value->value.special == ConstValSpecialStatic &&
+ casted_init_value->value.type->id == TypeTableEntryIdFn &&
+ casted_init_value->value.data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways)
+ {
var_class_requires_const = true;
if (!var->src_is_const && !is_comptime_var) {
- ir_add_error_node(ira, source_node,
- buf_sprintf("variable of type '%s' must be const or comptime",
- buf_ptr(&result_type->name)));
+ ErrorMsg *msg = ir_add_error_node(ira, source_node,
+ buf_sprintf("functions marked inline must be stored in const or comptime var"));
+ AstNode *proto_node = casted_init_value->value.data.x_ptr.data.fn.fn_entry->proto_node;
+ add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here"));
result_type = ira->codegen->builtin_types.entry_invalid;
}
- break;
- case VarClassRequiredAny:
- if (casted_init_value->value.special == ConstValSpecialStatic &&
- casted_init_value->value.type->id == TypeTableEntryIdFn &&
- casted_init_value->value.data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways)
- {
- var_class_requires_const = true;
- if (!var->src_is_const && !is_comptime_var) {
- ErrorMsg *msg = ir_add_error_node(ira, source_node,
- buf_sprintf("functions marked inline must be stored in const or comptime var"));
- AstNode *proto_node = casted_init_value->value.data.x_ptr.data.fn.fn_entry->proto_node;
- add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here"));
- result_type = ira->codegen->builtin_types.entry_invalid;
- }
- }
- break;
+ }
}
}
@@ -11432,7 +12484,8 @@ static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstruc
if (var->mem_slot_index != SIZE_MAX) {
assert(var->mem_slot_index < ira->exec_context.mem_slot_count);
ConstExprValue *mem_slot = &ira->exec_context.mem_slot_list[var->mem_slot_index];
- *mem_slot = casted_init_value->value;
+ copy_const_val(mem_slot, &casted_init_value->value,
+ !is_comptime_var || var->gen_is_const);
if (is_comptime_var || (var_class_requires_const && var->gen_is_const)) {
ir_build_const_from(ira, &decl_var_instruction->base);
@@ -11578,11 +12631,11 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi
case TypeTableEntryIdMetaType:
case TypeTableEntryIdVoid:
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdNamespace:
@@ -11602,11 +12655,11 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi
case TypeTableEntryIdFloat:
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
zig_panic("TODO export const value of type %s", buf_ptr(&target->value.type->name));
@@ -11633,22 +12686,24 @@ static bool exec_has_err_ret_trace(CodeGen *g, IrExecutable *exec) {
static TypeTableEntry *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
IrInstructionErrorReturnTrace *instruction)
{
- if (instruction->nullable == IrInstructionErrorReturnTrace::Null) {
+ if (instruction->optional == IrInstructionErrorReturnTrace::Null) {
TypeTableEntry *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen);
- TypeTableEntry *nullable_type = get_maybe_type(ira->codegen, ptr_to_stack_trace_type);
+ TypeTableEntry *optional_type = get_optional_type(ira->codegen, ptr_to_stack_trace_type);
if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) {
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- out_val->data.x_maybe = nullptr;
- return nullable_type;
+ assert(get_codegen_ptr_type(optional_type) != nullptr);
+ out_val->data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
+ out_val->data.x_ptr.data.hard_coded_addr.addr = 0;
+ return optional_type;
}
IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, instruction->nullable);
+ instruction->base.source_node, instruction->optional);
ir_link_new_instruction(new_instruction, &instruction->base);
- return nullable_type;
+ return optional_type;
} else {
assert(ira->codegen->have_err_ret_tracing);
IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, instruction->nullable);
+ instruction->base.source_node, instruction->optional);
ir_link_new_instruction(new_instruction, &instruction->base);
return get_ptr_to_stack_trace_type(ira->codegen);
}
@@ -11706,7 +12761,7 @@ IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_i
{
VariableTableEntry *coro_allocator_var = ira->old_irb.exec->coro_allocator_var;
assert(coro_allocator_var != nullptr);
- IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var, true, false);
+ IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var);
IrInstruction *result = ir_get_deref(ira, source_instr, var_ptr_inst);
assert(result->value.type != nullptr);
return result;
@@ -11749,7 +12804,7 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *c
TypeTableEntry *async_return_type = get_error_union_type(ira->codegen, alloc_fn_error_set_type, promise_type);
IrInstruction *result = ir_build_call(&ira->new_irb, call_instruction->base.scope, call_instruction->base.source_node,
- fn_entry, fn_ref, arg_count, casted_args, false, FnInlineAuto, true, async_allocator_inst);
+ fn_entry, fn_ref, arg_count, casted_args, false, FnInlineAuto, true, async_allocator_inst, nullptr);
result->value.type = async_return_type;
return result;
}
@@ -11799,7 +12854,7 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod
IrInstruction *casted_arg;
if (is_var_args) {
arg_part_of_generic_id = true;
- casted_arg = ir_implicit_byval_const_ref_cast(ira, arg);
+ casted_arg = arg;
} else {
if (param_decl_node->data.param_decl.var_token == nullptr) {
AstNode *param_type_node = param_decl_node->data.param_decl.type;
@@ -11812,12 +12867,12 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod
return false;
} else {
arg_part_of_generic_id = true;
- casted_arg = ir_implicit_byval_const_ref_cast(ira, arg);
+ casted_arg = arg;
}
}
bool comptime_arg = param_decl_node->data.param_decl.is_inline ||
- casted_arg->value.type->id == TypeTableEntryIdNumLitInt || casted_arg->value.type->id == TypeTableEntryIdNumLitFloat;
+ casted_arg->value.type->id == TypeTableEntryIdComptimeInt || casted_arg->value.type->id == TypeTableEntryIdComptimeFloat;
ConstExprValue *arg_val;
@@ -11835,6 +12890,7 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod
}
Buf *param_name = param_decl_node->data.param_decl.name;
+ if (!param_name) return false;
if (!is_var_args) {
VariableTableEntry *var = add_variable(ira->codegen, param_decl_node,
*child_scope, param_name, true, arg_val, nullptr);
@@ -11842,11 +12898,11 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod
var->shadowable = !comptime_arg;
*next_proto_i += 1;
- } else if (casted_arg->value.type->id == TypeTableEntryIdNumLitInt ||
- casted_arg->value.type->id == TypeTableEntryIdNumLitFloat)
+ } else if (casted_arg->value.type->id == TypeTableEntryIdComptimeInt ||
+ casted_arg->value.type->id == TypeTableEntryIdComptimeFloat)
{
ir_add_error(ira, casted_arg,
- buf_sprintf("compiler bug: integer and float literals in var args function must be casted. https://github.com/zig-lang/zig/issues/557"));
+ buf_sprintf("compiler bug: integer and float literals in var args function must be casted. https://github.com/ziglang/zig/issues/557"));
return false;
}
@@ -11887,7 +12943,7 @@ static VariableTableEntry *get_fn_var_by_index(FnTableEntry *fn_entry, size_t in
}
static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
- VariableTableEntry *var, bool is_const_ptr, bool is_volatile_ptr)
+ VariableTableEntry *var)
{
if (var->mem_slot_index != SIZE_MAX && var->owner_exec->analysis == nullptr) {
assert(ira->codegen->errors.length != 0);
@@ -11913,8 +12969,8 @@ static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
}
}
- bool is_const = (var->value->type->id == TypeTableEntryIdMetaType) ? is_const_ptr : var->src_is_const;
- bool is_volatile = (var->value->type->id == TypeTableEntryIdMetaType) ? is_volatile_ptr : false;
+ bool is_const = var->src_is_const;
+ bool is_volatile = false;
if (mem_slot != nullptr) {
switch (mem_slot->special) {
case ConstValSpecialRuntime:
@@ -11940,9 +12996,9 @@ static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
no_mem_slot:
IrInstruction *var_ptr_instruction = ir_build_var_ptr(&ira->new_irb,
- instruction->scope, instruction->source_node, var, is_const, is_volatile);
+ instruction->scope, instruction->source_node, var);
var_ptr_instruction->value.type = get_pointer_to_type_extra(ira->codegen, var->value->type,
- var->src_is_const, is_volatile, var->align_bytes, 0, 0);
+ var->src_is_const, is_volatile, PtrLenSingle, var->align_bytes, 0, 0);
type_ensure_zero_bits_known(ira->codegen, var->value->type);
bool in_fn_scope = (scope_fn_entry(var->parent_scope) != nullptr);
@@ -11961,14 +13017,22 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
// for extern functions, the var args argument is not counted.
// for zig functions, it is.
size_t var_args_1_or_0;
- if (fn_type_id->cc == CallingConventionUnspecified) {
- var_args_1_or_0 = fn_type_id->is_var_args ? 1 : 0;
- } else {
+ if (fn_type_id->cc == CallingConventionC) {
var_args_1_or_0 = 0;
+ } else {
+ var_args_1_or_0 = fn_type_id->is_var_args ? 1 : 0;
}
size_t src_param_count = fn_type_id->param_count - var_args_1_or_0;
size_t call_param_count = call_instruction->arg_count + first_arg_1_or_0;
+ for (size_t i = 0; i < call_instruction->arg_count; i += 1) {
+ ConstExprValue *arg_tuple_value = &call_instruction->args[i]->other->value;
+ if (arg_tuple_value->type->id == TypeTableEntryIdArgTuple) {
+ call_param_count -= 1;
+ call_param_count += arg_tuple_value->data.x_arg_tuple.end_index -
+ arg_tuple_value->data.x_arg_tuple.start_index;
+ }
+ }
AstNode *source_node = call_instruction->base.source_node;
AstNode *fn_proto_node = fn_entry ? fn_entry->proto_node : nullptr;;
@@ -12031,9 +13095,18 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
size_t next_proto_i = 0;
if (first_arg_ptr) {
- IrInstruction *first_arg;
assert(first_arg_ptr->value.type->id == TypeTableEntryIdPointer);
- if (handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type)) {
+
+ bool first_arg_known_bare = false;
+ if (fn_type_id->next_param_index >= 1) {
+ TypeTableEntry *param_type = fn_type_id->param_info[next_proto_i].type;
+ if (type_is_invalid(param_type))
+ return ira->codegen->builtin_types.entry_invalid;
+ first_arg_known_bare = param_type->id != TypeTableEntryIdPointer;
+ }
+
+ IrInstruction *first_arg;
+ if (!first_arg_known_bare && handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type)) {
first_arg = first_arg_ptr;
} else {
first_arg = ir_get_deref(ira, first_arg_ptr, first_arg_ptr);
@@ -12047,7 +13120,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
if (fn_proto_node->data.fn_proto.is_var_args) {
ir_add_error(ira, &call_instruction->base,
- buf_sprintf("compiler bug: unable to call var args function at compile time. https://github.com/zig-lang/zig/issues/313"));
+ buf_sprintf("compiler bug: unable to call var args function at compile time. https://github.com/ziglang/zig/issues/313"));
return ira->codegen->builtin_types.entry_invalid;
}
@@ -12119,17 +13192,27 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
return ir_finish_anal(ira, return_type);
}
+ IrInstruction *casted_new_stack = nullptr;
+ if (call_instruction->new_stack != nullptr) {
+ TypeTableEntry *u8_ptr = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
+ false, false, PtrLenUnknown,
+ get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), 0, 0);
+ TypeTableEntry *u8_slice = get_slice_type(ira->codegen, u8_ptr);
+ IrInstruction *new_stack = call_instruction->new_stack->other;
+ if (type_is_invalid(new_stack->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ casted_new_stack = ir_implicit_cast(ira, new_stack, u8_slice);
+ if (type_is_invalid(casted_new_stack->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
if (fn_type->data.fn.is_generic) {
if (!fn_entry) {
ir_add_error(ira, call_instruction->fn_ref,
buf_sprintf("calling a generic function requires compile-time known function value"));
return ira->codegen->builtin_types.entry_invalid;
}
- if (call_instruction->is_async && fn_type_id->is_var_args) {
- ir_add_error(ira, call_instruction->fn_ref,
- buf_sprintf("compiler bug: TODO: implement var args async functions. https://github.com/zig-lang/zig/issues/557"));
- return ira->codegen->builtin_types.entry_invalid;
- }
// Count the arguments of the function type id we are creating
size_t new_fn_arg_count = first_arg_1_or_0;
@@ -12168,9 +13251,18 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
size_t next_proto_i = 0;
if (first_arg_ptr) {
- IrInstruction *first_arg;
assert(first_arg_ptr->value.type->id == TypeTableEntryIdPointer);
- if (handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type)) {
+
+ bool first_arg_known_bare = false;
+ if (fn_type_id->next_param_index >= 1) {
+ TypeTableEntry *param_type = fn_type_id->param_info[next_proto_i].type;
+ if (type_is_invalid(param_type))
+ return ira->codegen->builtin_types.entry_invalid;
+ first_arg_known_bare = param_type->id != TypeTableEntryIdPointer;
+ }
+
+ IrInstruction *first_arg;
+ if (!first_arg_known_bare && handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type)) {
first_arg = first_arg_ptr;
} else {
first_arg = ir_get_deref(ira, first_arg_ptr, first_arg_ptr);
@@ -12195,25 +13287,25 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
if (type_is_invalid(arg->value.type))
return ira->codegen->builtin_types.entry_invalid;
- AstNode *param_decl_node = fn_proto_node->data.fn_proto.params.at(next_proto_i);
- assert(param_decl_node->type == NodeTypeParamDecl);
- bool is_var_args = param_decl_node->data.param_decl.is_var_args;
- if (is_var_args && !found_first_var_arg) {
- first_var_arg = inst_fn_type_id.param_count;
- found_first_var_arg = true;
- }
-
if (arg->value.type->id == TypeTableEntryIdArgTuple) {
for (size_t arg_tuple_i = arg->value.data.x_arg_tuple.start_index;
arg_tuple_i < arg->value.data.x_arg_tuple.end_index; arg_tuple_i += 1)
{
+ AstNode *param_decl_node = fn_proto_node->data.fn_proto.params.at(next_proto_i);
+ assert(param_decl_node->type == NodeTypeParamDecl);
+ bool is_var_args = param_decl_node->data.param_decl.is_var_args;
+ if (is_var_args && !found_first_var_arg) {
+ first_var_arg = inst_fn_type_id.param_count;
+ found_first_var_arg = true;
+ }
+
VariableTableEntry *arg_var = get_fn_var_by_index(parent_fn_entry, arg_tuple_i);
if (arg_var == nullptr) {
ir_add_error(ira, arg,
- buf_sprintf("compiler bug: var args can't handle void. https://github.com/zig-lang/zig/issues/557"));
+ buf_sprintf("compiler bug: var args can't handle void. https://github.com/ziglang/zig/issues/557"));
return ira->codegen->builtin_types.entry_invalid;
}
- IrInstruction *arg_var_ptr_inst = ir_get_var_ptr(ira, arg, arg_var, true, false);
+ IrInstruction *arg_var_ptr_inst = ir_get_var_ptr(ira, arg, arg_var);
if (type_is_invalid(arg_var_ptr_inst->value.type))
return ira->codegen->builtin_types.entry_invalid;
@@ -12227,10 +13319,20 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
return ira->codegen->builtin_types.entry_invalid;
}
}
- } else if (!ir_analyze_fn_call_generic_arg(ira, fn_proto_node, arg, &impl_fn->child_scope,
- &next_proto_i, generic_id, &inst_fn_type_id, casted_args, impl_fn))
- {
- return ira->codegen->builtin_types.entry_invalid;
+ } else {
+ AstNode *param_decl_node = fn_proto_node->data.fn_proto.params.at(next_proto_i);
+ assert(param_decl_node->type == NodeTypeParamDecl);
+ bool is_var_args = param_decl_node->data.param_decl.is_var_args;
+ if (is_var_args && !found_first_var_arg) {
+ first_var_arg = inst_fn_type_id.param_count;
+ found_first_var_arg = true;
+ }
+
+ if (!ir_analyze_fn_call_generic_arg(ira, fn_proto_node, arg, &impl_fn->child_scope,
+ &next_proto_i, generic_id, &inst_fn_type_id, casted_args, impl_fn))
+ {
+ return ira->codegen->builtin_types.entry_invalid;
+ }
}
}
@@ -12273,6 +13375,10 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
inst_fn_type_id.return_type = specified_return_type;
}
+ type_ensure_zero_bits_known(ira->codegen, specified_return_type);
+ if (type_is_invalid(specified_return_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
if (type_requires_comptime(specified_return_type)) {
// Throw out our work and call the function as if it were comptime.
return ir_analyze_fn_call(ira, call_instruction, fn_entry, fn_type, fn_ref, first_arg_ptr, true, FnInlineAuto);
@@ -12299,10 +13405,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
return ira->codegen->builtin_types.entry_invalid;
}
if (inst_fn_type_id.async_allocator_type == nullptr) {
- IrInstruction *casted_inst = ir_implicit_byval_const_ref_cast(ira, uncasted_async_allocator_inst);
- if (type_is_invalid(casted_inst->value.type))
- return ira->codegen->builtin_types.entry_invalid;
- inst_fn_type_id.async_allocator_type = casted_inst->value.type;
+ inst_fn_type_id.async_allocator_type = uncasted_async_allocator_inst->value.type;
}
async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, inst_fn_type_id.async_allocator_type);
if (type_is_invalid(async_allocator_inst->value.type))
@@ -12323,6 +13426,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
impl_fn->ir_executable.parent_exec = ira->new_irb.exec;
impl_fn->analyzed_executable.source_node = call_instruction->base.source_node;
impl_fn->analyzed_executable.parent_exec = ira->new_irb.exec;
+ impl_fn->analyzed_executable.backward_branch_quota = ira->new_irb.exec->backward_branch_quota;
impl_fn->analyzed_executable.is_generic_instantiation = true;
ira->codegen->fn_defs.append(impl_fn);
@@ -12345,7 +13449,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
assert(async_allocator_inst == nullptr);
IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base,
impl_fn, nullptr, impl_param_count, casted_args, false, fn_inline,
- call_instruction->is_async, nullptr);
+ call_instruction->is_async, nullptr, casted_new_stack);
ir_add_alloca(ira, new_call_instruction, return_type);
@@ -12363,9 +13467,16 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
IrInstruction **casted_args = allocate(call_param_count);
size_t next_arg_index = 0;
if (first_arg_ptr) {
- IrInstruction *first_arg;
assert(first_arg_ptr->value.type->id == TypeTableEntryIdPointer);
- if (handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type)) {
+
+ TypeTableEntry *param_type = fn_type_id->param_info[next_arg_index].type;
+ if (type_is_invalid(param_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *first_arg;
+ if (param_type->id == TypeTableEntryIdPointer &&
+ handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type))
+ {
first_arg = first_arg_ptr;
} else {
first_arg = ir_get_deref(ira, first_arg_ptr, first_arg_ptr);
@@ -12373,10 +13484,6 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
return ira->codegen->builtin_types.entry_invalid;
}
- TypeTableEntry *param_type = fn_type_id->param_info[next_arg_index].type;
- if (type_is_invalid(param_type))
- return ira->codegen->builtin_types.entry_invalid;
-
IrInstruction *casted_arg = ir_implicit_cast(ira, first_arg, param_type);
if (type_is_invalid(casted_arg->value.type))
return ira->codegen->builtin_types.entry_invalid;
@@ -12436,7 +13543,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base,
- fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline, false, nullptr);
+ fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline, false, nullptr, casted_new_stack);
ir_add_alloca(ira, new_call_instruction, return_type);
return ir_finish_anal(ira, return_type);
@@ -12474,6 +13581,8 @@ static TypeTableEntry *ir_analyze_instruction_call(IrAnalyze *ira, IrInstruction
return ir_finish_anal(ira, cast_instruction->value.type);
} else if (fn_ref->value.type->id == TypeTableEntryIdFn) {
FnTableEntry *fn_table_entry = ir_resolve_fn(ira, fn_ref);
+ if (fn_table_entry == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
return ir_analyze_fn_call(ira, call_instruction, fn_table_entry, fn_table_entry->type_entry,
fn_ref, nullptr, is_comptime, call_instruction->fn_inline);
} else if (fn_ref->value.type->id == TypeTableEntryIdBoundFn) {
@@ -12481,7 +13590,7 @@ static TypeTableEntry *ir_analyze_instruction_call(IrAnalyze *ira, IrInstruction
FnTableEntry *fn_table_entry = fn_ref->value.data.x_bound_fn.fn;
IrInstruction *first_arg_ptr = fn_ref->value.data.x_bound_fn.first_arg;
return ir_analyze_fn_call(ira, call_instruction, fn_table_entry, fn_table_entry->type_entry,
- nullptr, first_arg_ptr, is_comptime, call_instruction->fn_inline);
+ fn_ref, first_arg_ptr, is_comptime, call_instruction->fn_inline);
} else {
ir_add_error_node(ira, fn_ref->source_node,
buf_sprintf("type '%s' not a function", buf_ptr(&fn_ref->value.type->name)));
@@ -12507,6 +13616,12 @@ static TypeTableEntry *ir_analyze_dereference(IrAnalyze *ira, IrInstructionUnOp
if (type_is_invalid(ptr_type)) {
return ira->codegen->builtin_types.entry_invalid;
} else if (ptr_type->id == TypeTableEntryIdPointer) {
+ if (ptr_type->data.pointer.ptr_len == PtrLenUnknown) {
+ ir_add_error_node(ira, un_op_instruction->base.source_node,
+ buf_sprintf("index syntax required for unknown-length pointer type '%s'",
+ buf_ptr(&ptr_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
child_type = ptr_type->data.pointer.child_type;
} else {
ir_add_error_node(ira, un_op_instruction->base.source_node,
@@ -12519,7 +13634,11 @@ static TypeTableEntry *ir_analyze_dereference(IrAnalyze *ira, IrInstructionUnOp
// one of the ptr instructions
if (instr_is_comptime(value)) {
- ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &value->value);
+ ConstExprValue *comptime_value = ir_resolve_const(ira, value, UndefBad);
+ if (comptime_value == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *pointee = const_ptr_pointee(ira->codegen, comptime_value);
if (pointee->type == child_type) {
ConstExprValue *out_val = ir_build_const_from(ira, &un_op_instruction->base);
copy_const_val(out_val, pointee, value->value.data.x_ptr.mut == ConstPtrMutComptimeConst);
@@ -12536,6 +13655,10 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op
TypeTableEntry *type_entry = ir_resolve_type(ira, value);
if (type_is_invalid(type_entry))
return ira->codegen->builtin_types.entry_invalid;
+ ensure_complete_type(ira->codegen, type_entry);
+ if (type_is_invalid(type_entry))
+ return ira->codegen->builtin_types.entry_invalid;
+
switch (type_entry->id) {
case TypeTableEntryIdInvalid:
zig_unreachable();
@@ -12547,11 +13670,11 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -12564,13 +13687,13 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op
case TypeTableEntryIdPromise:
{
ConstExprValue *out_val = ir_build_const_from(ira, &un_op_instruction->base);
- out_val->data.x_type = get_maybe_type(ira->codegen, type_entry);
+ out_val->data.x_type = get_optional_type(ira->codegen, type_entry);
return ira->codegen->builtin_types.entry_type;
}
case TypeTableEntryIdUnreachable:
case TypeTableEntryIdOpaque:
ir_add_error_node(ira, un_op_instruction->base.source_node,
- buf_sprintf("type '%s' not nullable", buf_ptr(&type_entry->name)));
+ buf_sprintf("type '%s' not optional", buf_ptr(&type_entry->name)));
return ira->codegen->builtin_types.entry_invalid;
}
zig_unreachable();
@@ -12584,10 +13707,10 @@ static TypeTableEntry *ir_analyze_negation(IrAnalyze *ira, IrInstructionUnOp *un
bool is_wrap_op = (un_op_instruction->op_id == IrUnOpNegationWrap);
- bool is_float = (expr_type->id == TypeTableEntryIdFloat || expr_type->id == TypeTableEntryIdNumLitFloat);
+ bool is_float = (expr_type->id == TypeTableEntryIdFloat || expr_type->id == TypeTableEntryIdComptimeFloat);
if ((expr_type->id == TypeTableEntryIdInt && expr_type->data.integral.is_signed) ||
- expr_type->id == TypeTableEntryIdNumLitInt || (is_float && !is_wrap_op))
+ expr_type->id == TypeTableEntryIdComptimeInt || (is_float && !is_wrap_op))
{
if (instr_is_comptime(value)) {
ConstExprValue *target_const_val = ir_resolve_const(ira, value, UndefBad);
@@ -12603,7 +13726,7 @@ static TypeTableEntry *ir_analyze_negation(IrAnalyze *ira, IrInstructionUnOp *un
} else {
bigint_negate(&out_val->data.x_bigint, &target_const_val->data.x_bigint);
}
- if (is_wrap_op || is_float || expr_type->id == TypeTableEntryIdNumLitInt) {
+ if (is_wrap_op || is_float || expr_type->id == TypeTableEntryIdComptimeInt) {
return expr_type;
}
@@ -12632,7 +13755,7 @@ static TypeTableEntry *ir_analyze_bin_not(IrAnalyze *ira, IrInstructionUnOp *ins
if (expr_type->id == TypeTableEntryIdInt) {
if (instr_is_comptime(value)) {
ConstExprValue *target_const_val = ir_resolve_const(ira, value, UndefBad);
- if (!target_const_val)
+ if (target_const_val == nullptr)
return ira->codegen->builtin_types.entry_invalid;
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
@@ -12662,7 +13785,7 @@ static TypeTableEntry *ir_analyze_instruction_un_op(IrAnalyze *ira, IrInstructio
return ir_analyze_negation(ira, un_op_instruction);
case IrUnOpDereference:
return ir_analyze_dereference(ira, un_op_instruction);
- case IrUnOpMaybe:
+ case IrUnOpOptional:
return ir_analyze_maybe(ira, un_op_instruction);
}
zig_unreachable();
@@ -12794,15 +13917,15 @@ static TypeTableEntry *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionP
return first_value->value.type;
}
- TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, phi_instruction->base.source_node,
+ TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, phi_instruction->base.source_node, nullptr,
new_incoming_values.items, new_incoming_values.length);
if (type_is_invalid(resolved_type))
return resolved_type;
- if (resolved_type->id == TypeTableEntryIdNumLitFloat ||
- resolved_type->id == TypeTableEntryIdNumLitInt ||
- resolved_type->id == TypeTableEntryIdNullLit ||
- resolved_type->id == TypeTableEntryIdUndefLit)
+ if (resolved_type->id == TypeTableEntryIdComptimeFloat ||
+ resolved_type->id == TypeTableEntryIdComptimeInt ||
+ resolved_type->id == TypeTableEntryIdNull ||
+ resolved_type->id == TypeTableEntryIdUndefined)
{
ir_add_error_node(ira, phi_instruction->base.source_node,
buf_sprintf("unable to infer expression type"));
@@ -12847,17 +13970,16 @@ static TypeTableEntry *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionP
}
static TypeTableEntry *ir_analyze_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
- VariableTableEntry *var, bool is_const_ptr, bool is_volatile_ptr)
+ VariableTableEntry *var)
{
- IrInstruction *result = ir_get_var_ptr(ira, instruction, var, is_const_ptr, is_volatile_ptr);
+ IrInstruction *result = ir_get_var_ptr(ira, instruction, var);
ir_link_new_instruction(result, instruction);
return result->value.type;
}
static TypeTableEntry *ir_analyze_instruction_var_ptr(IrAnalyze *ira, IrInstructionVarPtr *var_ptr_instruction) {
VariableTableEntry *var = var_ptr_instruction->var;
- return ir_analyze_var_ptr(ira, &var_ptr_instruction->base, var, var_ptr_instruction->is_const,
- var_ptr_instruction->is_volatile);
+ return ir_analyze_var_ptr(ira, &var_ptr_instruction->base, var);
}
static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, uint32_t new_align) {
@@ -12865,25 +13987,40 @@ static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, ui
return get_pointer_to_type_extra(g,
ptr_type->data.pointer.child_type,
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
+ ptr_type->data.pointer.ptr_len,
new_align,
ptr_type->data.pointer.bit_offset, ptr_type->data.pointer.unaligned_bit_count);
}
+static TypeTableEntry *adjust_slice_align(CodeGen *g, TypeTableEntry *slice_type, uint32_t new_align) {
+ assert(is_slice(slice_type));
+ TypeTableEntry *ptr_type = adjust_ptr_align(g, slice_type->data.structure.fields[slice_ptr_index].type_entry,
+ new_align);
+ return get_slice_type(g, ptr_type);
+}
+
+static TypeTableEntry *adjust_ptr_len(CodeGen *g, TypeTableEntry *ptr_type, PtrLen ptr_len) {
+ assert(ptr_type->id == TypeTableEntryIdPointer);
+ return get_pointer_to_type_extra(g,
+ ptr_type->data.pointer.child_type,
+ ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
+ ptr_len,
+ ptr_type->data.pointer.alignment,
+ ptr_type->data.pointer.bit_offset, ptr_type->data.pointer.unaligned_bit_count);
+}
+
static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstructionElemPtr *elem_ptr_instruction) {
IrInstruction *array_ptr = elem_ptr_instruction->array_ptr->other;
if (type_is_invalid(array_ptr->value.type))
return ira->codegen->builtin_types.entry_invalid;
+ ConstExprValue *orig_array_ptr_val = &array_ptr->value;
+
IrInstruction *elem_index = elem_ptr_instruction->elem_index->other;
if (type_is_invalid(elem_index->value.type))
return ira->codegen->builtin_types.entry_invalid;
- TypeTableEntry *ptr_type = array_ptr->value.type;
- if (ptr_type->id == TypeTableEntryIdMetaType) {
- ir_add_error(ira, &elem_ptr_instruction->base,
- buf_sprintf("array access of non-array type '%s'", buf_ptr(&ptr_type->name)));
- return ira->codegen->builtin_types.entry_invalid;
- }
+ TypeTableEntry *ptr_type = orig_array_ptr_val->type;
assert(ptr_type->id == TypeTableEntryIdPointer);
TypeTableEntry *array_type = ptr_type->data.pointer.child_type;
@@ -12894,7 +14031,18 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
if (type_is_invalid(array_type)) {
return array_type;
- } else if (array_type->id == TypeTableEntryIdArray) {
+ } else if (array_type->id == TypeTableEntryIdArray ||
+ (array_type->id == TypeTableEntryIdPointer &&
+ array_type->data.pointer.ptr_len == PtrLenSingle &&
+ array_type->data.pointer.child_type->id == TypeTableEntryIdArray))
+ {
+ if (array_type->id == TypeTableEntryIdPointer) {
+ array_type = array_type->data.pointer.child_type;
+ ptr_type = ptr_type->data.pointer.child_type;
+ if (orig_array_ptr_val->special != ConstValSpecialRuntime) {
+ orig_array_ptr_val = const_ptr_pointee(ira->codegen, orig_array_ptr_val);
+ }
+ }
if (array_type->data.array.len == 0) {
ir_add_error_node(ira, elem_ptr_instruction->base.source_node,
buf_sprintf("index 0 outside array of size 0"));
@@ -12904,6 +14052,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
if (ptr_type->data.pointer.unaligned_bit_count == 0) {
return_type = get_pointer_to_type_extra(ira->codegen, child_type,
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
+ elem_ptr_instruction->ptr_len,
ptr_type->data.pointer.alignment, 0, 0);
} else {
uint64_t elem_val_scalar;
@@ -12915,12 +14064,19 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
return_type = get_pointer_to_type_extra(ira->codegen, child_type,
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
+ elem_ptr_instruction->ptr_len,
1, (uint32_t)bit_offset, (uint32_t)bit_width);
}
} else if (array_type->id == TypeTableEntryIdPointer) {
- return_type = array_type;
+ if (array_type->data.pointer.ptr_len == PtrLenSingle) {
+ ir_add_error_node(ira, elem_ptr_instruction->base.source_node,
+ buf_sprintf("index of single-item pointer"));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+ return_type = adjust_ptr_len(ira->codegen, array_type, elem_ptr_instruction->ptr_len);
} else if (is_slice(array_type)) {
- return_type = array_type->data.structure.fields[slice_ptr_index].type_entry;
+ return_type = adjust_ptr_len(ira->codegen, array_type->data.structure.fields[slice_ptr_index].type_entry,
+ elem_ptr_instruction->ptr_len);
} else if (array_type->id == TypeTableEntryIdArgTuple) {
ConstExprValue *ptr_val = ir_resolve_const(ira, array_ptr, UndefBad);
if (!ptr_val)
@@ -12945,8 +14101,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
bool is_const = true;
bool is_volatile = false;
if (var) {
- return ir_analyze_var_ptr(ira, &elem_ptr_instruction->base, var,
- is_const, is_volatile);
+ return ir_analyze_var_ptr(ira, &elem_ptr_instruction->base, var);
} else {
return ir_analyze_const_ptr(ira, &elem_ptr_instruction->base, &ira->codegen->const_void_val,
ira->codegen->builtin_types.entry_void, ConstPtrMutComptimeConst, is_const, is_volatile);
@@ -12964,6 +14119,9 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
bool safety_check_on = elem_ptr_instruction->safety_check_on;
ensure_complete_type(ira->codegen, return_type->data.pointer.child_type);
+ if (type_is_invalid(return_type->data.pointer.child_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
uint64_t elem_size = type_size(ira->codegen, return_type->data.pointer.child_type);
uint64_t abi_align = get_abi_alignment(ira->codegen, return_type->data.pointer.child_type);
uint64_t ptr_align = return_type->data.pointer.alignment;
@@ -13001,9 +14159,9 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
}
ConstExprValue *array_ptr_val;
- if (array_ptr->value.special != ConstValSpecialRuntime &&
- (array_ptr->value.data.x_ptr.mut != ConstPtrMutRuntimeVar || array_type->id == TypeTableEntryIdArray) &&
- (array_ptr_val = const_ptr_pointee(ira->codegen, &array_ptr->value)) &&
+ if (orig_array_ptr_val->special != ConstValSpecialRuntime &&
+ (orig_array_ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar || array_type->id == TypeTableEntryIdArray) &&
+ (array_ptr_val = const_ptr_pointee(ira->codegen, orig_array_ptr_val)) &&
array_ptr_val->special != ConstValSpecialRuntime &&
(array_type->id != TypeTableEntryIdPointer ||
array_ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr))
@@ -13060,8 +14218,10 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
} else if (is_slice(array_type)) {
ConstExprValue *ptr_field = &array_ptr_val->data.x_struct.fields[slice_ptr_index];
if (ptr_field->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) {
- ir_build_elem_ptr_from(&ira->new_irb, &elem_ptr_instruction->base, array_ptr,
- casted_elem_index, false);
+ IrInstruction *result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope, elem_ptr_instruction->base.source_node,
+ array_ptr, casted_elem_index, false, elem_ptr_instruction->ptr_len);
+ result->value.type = return_type;
+ ir_link_new_instruction(result, &elem_ptr_instruction->base);
return return_type;
}
ConstExprValue *len_field = &array_ptr_val->data.x_struct.fields[slice_len_index];
@@ -13106,7 +14266,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
} else if (array_type->id == TypeTableEntryIdArray) {
ConstExprValue *out_val = ir_build_const_from(ira, &elem_ptr_instruction->base);
out_val->data.x_ptr.special = ConstPtrSpecialBaseArray;
- out_val->data.x_ptr.mut = array_ptr->value.data.x_ptr.mut;
+ out_val->data.x_ptr.mut = orig_array_ptr_val->data.x_ptr.mut;
out_val->data.x_ptr.data.base_array.array_val = array_ptr_val;
out_val->data.x_ptr.data.base_array.elem_index = index;
return return_type;
@@ -13129,8 +14289,10 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
}
}
- ir_build_elem_ptr_from(&ira->new_irb, &elem_ptr_instruction->base, array_ptr,
- casted_elem_index, safety_check_on);
+ IrInstruction *result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope, elem_ptr_instruction->base.source_node,
+ array_ptr, casted_elem_index, safety_check_on, elem_ptr_instruction->ptr_len);
+ result->value.type = return_type;
+ ir_link_new_instruction(result, &elem_ptr_instruction->base);
return return_type;
}
@@ -13174,7 +14336,6 @@ static IrInstruction *ir_analyze_container_member_access_inner(IrAnalyze *ira,
return ira->codegen->invalid_instruction;
}
-
static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name,
IrInstruction *source_instr, IrInstruction *container_ptr, TypeTableEntry *container_type)
{
@@ -13206,7 +14367,7 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_
return ira->codegen->invalid_instruction;
ConstExprValue *field_val = &struct_val->data.x_struct.fields[field->src_index];
TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, field_val->type,
- is_const, is_volatile, align_bytes,
+ is_const, is_volatile, PtrLenSingle, align_bytes,
(uint32_t)(ptr_bit_offset + field->packed_bits_offset),
(uint32_t)unaligned_bit_count_for_result_type);
IrInstruction *result = ir_get_const(ira, source_instr);
@@ -13222,6 +14383,7 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_
IrInstruction *result = ir_build_struct_field_ptr(&ira->new_irb, source_instr->scope, source_instr->source_node,
container_ptr, field);
result->value.type = get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const, is_volatile,
+ PtrLenSingle,
align_bytes,
(uint32_t)(ptr_bit_offset + field->packed_bits_offset),
(uint32_t)unaligned_bit_count_for_result_type);
@@ -13236,9 +14398,56 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_
} else if (bare_type->id == TypeTableEntryIdUnion) {
TypeUnionField *field = find_union_type_field(bare_type, field_name);
if (field) {
+ if (instr_is_comptime(container_ptr)) {
+ ConstExprValue *ptr_val = ir_resolve_const(ira, container_ptr, UndefBad);
+ if (!ptr_val)
+ return ira->codegen->invalid_instruction;
+
+ if (ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) {
+ ConstExprValue *union_val = const_ptr_pointee(ira->codegen, ptr_val);
+ if (type_is_invalid(union_val->type))
+ return ira->codegen->invalid_instruction;
+
+ TypeUnionField *actual_field = find_union_field_by_tag(bare_type, &union_val->data.x_union.tag);
+ if (actual_field == nullptr)
+ zig_unreachable();
+
+ if (field != actual_field) {
+ ir_add_error_node(ira, source_instr->source_node,
+ buf_sprintf("accessing union field '%s' while field '%s' is set", buf_ptr(field_name),
+ buf_ptr(actual_field->name)));
+ return ira->codegen->invalid_instruction;
+ }
+
+ ConstExprValue *payload_val = union_val->data.x_union.payload;
+
+ TypeTableEntry *field_type = field->type_entry;
+ if (field_type->id == TypeTableEntryIdVoid)
+ {
+ assert(payload_val == nullptr);
+ payload_val = create_const_vals(1);
+ payload_val->special = ConstValSpecialStatic;
+ payload_val->type = field_type;
+ }
+
+ TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, field_type,
+ is_const, is_volatile,
+ PtrLenSingle,
+ get_abi_alignment(ira->codegen, field_type), 0, 0);
+
+ IrInstruction *result = ir_get_const(ira, source_instr);
+ ConstExprValue *const_val = &result->value;
+ const_val->data.x_ptr.special = ConstPtrSpecialRef;
+ const_val->data.x_ptr.mut = container_ptr->value.data.x_ptr.mut;
+ const_val->data.x_ptr.data.ref.pointee = payload_val;
+ const_val->type = ptr_type;
+ return result;
+ }
+ }
+
IrInstruction *result = ir_build_union_field_ptr(&ira->new_irb, source_instr->scope, source_instr->source_node, container_ptr, field);
result->value.type = get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const, is_volatile,
- get_abi_alignment(ira->codegen, field->type_entry), 0, 0);
+ PtrLenSingle, get_abi_alignment(ira->codegen, field->type_entry), 0, 0);
return result;
} else {
return ir_analyze_container_member_access_inner(ira, bare_type, field_name,
@@ -13286,7 +14495,7 @@ static TypeTableEntry *ir_analyze_decl_ref(IrAnalyze *ira, IrInstruction *source
add_link_lib_symbol(ira, tld_var->extern_lib_name, &var->name, source_instruction->source_node);
}
- return ir_analyze_var_ptr(ira, source_instruction, var, false, false);
+ return ir_analyze_var_ptr(ira, source_instruction, var);
}
case TldIdFn:
{
@@ -13335,16 +14544,18 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
if (type_is_invalid(container_ptr->value.type))
return ira->codegen->builtin_types.entry_invalid;
- TypeTableEntry *container_type;
- if (container_ptr->value.type->id == TypeTableEntryIdPointer) {
- container_type = container_ptr->value.type->data.pointer.child_type;
- } else if (container_ptr->value.type->id == TypeTableEntryIdMetaType) {
- container_type = container_ptr->value.type;
- } else {
- zig_unreachable();
+ TypeTableEntry *container_type = container_ptr->value.type->data.pointer.child_type;
+ assert(container_ptr->value.type->id == TypeTableEntryIdPointer);
+
+ Buf *field_name = field_ptr_instruction->field_name_buffer;
+ if (!field_name) {
+ IrInstruction *field_name_expr = field_ptr_instruction->field_name_expr->other;
+ field_name = ir_resolve_str(ira, field_name_expr);
+ if (!field_name)
+ return ira->codegen->builtin_types.entry_invalid;
}
- Buf *field_name = field_ptr_instruction->field_name;
+
AstNode *source_node = field_ptr_instruction->base.source_node;
if (type_is_invalid(container_type)) {
@@ -13362,10 +14573,14 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
ir_link_new_instruction(result, &field_ptr_instruction->base);
return result->value.type;
}
- } else if (container_type->id == TypeTableEntryIdArray) {
+ } else if (is_array_ref(container_type)) {
if (buf_eql_str(field_name, "len")) {
ConstExprValue *len_val = create_const_vals(1);
- init_const_usize(ira->codegen, len_val, container_type->data.array.len);
+ if (container_type->id == TypeTableEntryIdPointer) {
+ init_const_usize(ira->codegen, len_val, container_type->data.pointer.child_type->data.array.len);
+ } else {
+ init_const_usize(ira->codegen, len_val, container_type->data.array.len);
+ }
TypeTableEntry *usize = ira->codegen->builtin_types.entry_usize;
bool ptr_is_const = true;
@@ -13407,17 +14622,9 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
if (!container_ptr_val)
return ira->codegen->builtin_types.entry_invalid;
- TypeTableEntry *child_type;
- if (container_ptr->value.type->id == TypeTableEntryIdMetaType) {
- TypeTableEntry *ptr_type = container_ptr_val->data.x_type;
- assert(ptr_type->id == TypeTableEntryIdPointer);
- child_type = ptr_type->data.pointer.child_type;
- } else if (container_ptr->value.type->id == TypeTableEntryIdPointer) {
- ConstExprValue *child_val = const_ptr_pointee(ira->codegen, container_ptr_val);
- child_type = child_val->data.x_type;
- } else {
- zig_unreachable();
- }
+ assert(container_ptr->value.type->id == TypeTableEntryIdPointer);
+ ConstExprValue *child_val = const_ptr_pointee(ira->codegen, container_ptr_val);
+ TypeTableEntry *child_type = child_val->data.x_type;
if (type_is_invalid(child_type)) {
return ira->codegen->builtin_types.entry_invalid;
@@ -13435,7 +14642,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
}
if (child_type->id == TypeTableEntryIdEnum) {
ensure_complete_type(ira->codegen, child_type);
- if (child_type->data.enumeration.is_invalid)
+ if (type_is_invalid(child_type))
return ira->codegen->builtin_types.entry_invalid;
TypeEnumField *field = find_enum_type_field(child_type, field_name);
@@ -13446,7 +14653,16 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
create_const_enum(child_type, &field->value), child_type,
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile);
}
- } else if (child_type->id == TypeTableEntryIdUnion &&
+ }
+ ScopeDecls *container_scope = get_container_scope(child_type);
+ if (container_scope != nullptr) {
+ auto entry = container_scope->decl_table.maybe_get(field_name);
+ Tld *tld = entry ? entry->value : nullptr;
+ if (tld) {
+ return ir_analyze_decl_ref(ira, &field_ptr_instruction->base, tld);
+ }
+ }
+ if (child_type->id == TypeTableEntryIdUnion &&
(child_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr ||
child_type->data.unionation.decl_node->data.container_decl.auto_enum))
{
@@ -13463,14 +14679,6 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile);
}
}
- ScopeDecls *container_scope = get_container_scope(child_type);
- if (container_scope != nullptr) {
- auto entry = container_scope->decl_table.maybe_get(field_name);
- Tld *tld = entry ? entry->value : nullptr;
- if (tld) {
- return ir_analyze_decl_ref(ira, &field_ptr_instruction->base, tld);
- }
- }
ir_add_error(ira, &field_ptr_instruction->base,
buf_sprintf("container '%s' has no member called '%s'",
buf_ptr(&child_type->name), buf_ptr(field_name)));
@@ -13501,7 +14709,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
}
err_set_type = err_entry->set_with_only_this_in_it;
} else {
- if (!resolve_inferred_error_set(ira, child_type, field_ptr_instruction->base.source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, child_type, field_ptr_instruction->base.source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
err_entry = find_err_table_entry(child_type, field_name);
@@ -13623,7 +14831,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
buf_ptr(&child_type->name), buf_ptr(field_name)));
return ira->codegen->builtin_types.entry_invalid;
}
- } else if (child_type->id == TypeTableEntryIdMaybe) {
+ } else if (child_type->id == TypeTableEntryIdOptional) {
if (buf_eql_str(field_name, "Child")) {
bool ptr_is_const = true;
bool ptr_is_volatile = false;
@@ -13639,6 +14847,15 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
}
} else if (child_type->id == TypeTableEntryIdFn) {
if (buf_eql_str(field_name, "ReturnType")) {
+ if (child_type->data.fn.fn_type_id.return_type == nullptr) {
+ // Return type can only ever be null, if the function is generic
+ assert(child_type->data.fn.is_generic);
+
+ ir_add_error(ira, &field_ptr_instruction->base,
+ buf_sprintf("ReturnType has not been resolved because '%s' is generic", buf_ptr(&child_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
bool ptr_is_const = true;
bool ptr_is_volatile = false;
return ir_analyze_const_ptr(ira, &field_ptr_instruction->base,
@@ -13707,6 +14924,9 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
static TypeTableEntry *ir_analyze_instruction_load_ptr(IrAnalyze *ira, IrInstructionLoadPtr *load_ptr_instruction) {
IrInstruction *ptr = load_ptr_instruction->ptr->other;
+ if (type_is_invalid(ptr->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
IrInstruction *result = ir_get_deref(ira, &load_ptr_instruction->base, ptr);
ir_link_new_instruction(result, &load_ptr_instruction->base);
assert(result->value.type);
@@ -13779,10 +14999,10 @@ static TypeTableEntry *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructi
switch (type_entry->id) {
case TypeTableEntryIdInvalid:
zig_unreachable(); // handled above
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
@@ -13795,7 +15015,7 @@ static TypeTableEntry *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructi
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -13827,7 +15047,7 @@ static TypeTableEntry *ir_analyze_instruction_to_ptr_type(IrAnalyze *ira,
if (type_entry->id == TypeTableEntryIdArray) {
ptr_type = get_pointer_to_type(ira->codegen, type_entry->data.array.child_type, false);
} else if (is_slice(type_entry)) {
- ptr_type = type_entry->data.structure.fields[0].type_entry;
+ ptr_type = adjust_ptr_len(ira->codegen, type_entry->data.structure.fields[0].type_entry, PtrLenSingle);
} else if (type_entry->id == TypeTableEntryIdArgTuple) {
ConstExprValue *arg_tuple_val = ir_resolve_const(ira, value, UndefBad);
if (!arg_tuple_val)
@@ -14045,8 +15265,8 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira,
case TypeTableEntryIdInvalid: // handled above
zig_unreachable();
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdBlock:
case TypeTableEntryIdArgTuple:
case TypeTableEntryIdOpaque:
@@ -14061,9 +15281,9 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira,
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -14075,7 +15295,7 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira,
{
type_ensure_zero_bits_known(ira->codegen, child_type);
TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, child_type,
- is_const, is_volatile, align_bytes, 0, 0);
+ is_const, is_volatile, PtrLenUnknown, align_bytes, 0, 0);
TypeTableEntry *result_type = get_slice_type(ira->codegen, slice_ptr_type);
ConstExprValue *out_val = ir_build_const_from(ira, &slice_type_instruction->base);
out_val->data.x_type = result_type;
@@ -14153,8 +15373,8 @@ static TypeTableEntry *ir_analyze_instruction_array_type(IrAnalyze *ira,
case TypeTableEntryIdInvalid: // handled above
zig_unreachable();
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdBlock:
case TypeTableEntryIdArgTuple:
case TypeTableEntryIdOpaque:
@@ -14169,9 +15389,9 @@ static TypeTableEntry *ir_analyze_instruction_array_type(IrAnalyze *ira,
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -14222,11 +15442,11 @@ static TypeTableEntry *ir_analyze_instruction_size_of(IrAnalyze *ira,
case TypeTableEntryIdInvalid: // handled above
zig_unreachable();
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdBlock:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdMetaType:
case TypeTableEntryIdNamespace:
@@ -14242,7 +15462,7 @@ static TypeTableEntry *ir_analyze_instruction_size_of(IrAnalyze *ira,
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -14266,20 +15486,20 @@ static TypeTableEntry *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIn
TypeTableEntry *type_entry = value->value.type;
- if (type_entry->id == TypeTableEntryIdMaybe) {
+ if (type_entry->id == TypeTableEntryIdOptional) {
if (instr_is_comptime(value)) {
ConstExprValue *maybe_val = ir_resolve_const(ira, value, UndefBad);
if (!maybe_val)
return ira->codegen->builtin_types.entry_invalid;
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- out_val->data.x_bool = (maybe_val->data.x_maybe != nullptr);
+ out_val->data.x_bool = !optional_value_is_null(maybe_val);
return ira->codegen->builtin_types.entry_bool;
}
ir_build_test_nonnull_from(&ira->new_irb, &instruction->base, value);
return ira->codegen->builtin_types.entry_bool;
- } else if (type_entry->id == TypeTableEntryIdNullLit) {
+ } else if (type_entry->id == TypeTableEntryIdNull) {
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
out_val->data.x_bool = false;
return ira->codegen->builtin_types.entry_bool;
@@ -14291,42 +15511,27 @@ static TypeTableEntry *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIn
}
static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira,
- IrInstructionUnwrapMaybe *unwrap_maybe_instruction)
+ IrInstructionUnwrapOptional *unwrap_maybe_instruction)
{
IrInstruction *value = unwrap_maybe_instruction->value->other;
if (type_is_invalid(value->value.type))
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *ptr_type = value->value.type;
- if (ptr_type->id == TypeTableEntryIdMetaType) {
- // surprise! actually this is just ??T not an unwrap maybe instruction
- TypeTableEntry *ptr_type_ptr = ir_resolve_type(ira, value);
- assert(ptr_type_ptr->id == TypeTableEntryIdPointer);
- TypeTableEntry *child_type = ptr_type_ptr->data.pointer.child_type;
- type_ensure_zero_bits_known(ira->codegen, child_type);
- TypeTableEntry *layer1 = get_maybe_type(ira->codegen, child_type);
- TypeTableEntry *layer2 = get_maybe_type(ira->codegen, layer1);
- TypeTableEntry *result_type = get_pointer_to_type(ira->codegen, layer2, true);
-
- IrInstruction *const_instr = ir_build_const_type(&ira->new_irb, unwrap_maybe_instruction->base.scope,
- unwrap_maybe_instruction->base.source_node, result_type);
- ir_link_new_instruction(const_instr, &unwrap_maybe_instruction->base);
- return const_instr->value.type;
- }
-
assert(ptr_type->id == TypeTableEntryIdPointer);
TypeTableEntry *type_entry = ptr_type->data.pointer.child_type;
if (type_is_invalid(type_entry)) {
return ira->codegen->builtin_types.entry_invalid;
- } else if (type_entry->id != TypeTableEntryIdMaybe) {
+ } else if (type_entry->id != TypeTableEntryIdOptional) {
ir_add_error_node(ira, unwrap_maybe_instruction->value->source_node,
- buf_sprintf("expected nullable type, found '%s'", buf_ptr(&type_entry->name)));
+ buf_sprintf("expected optional type, found '%s'", buf_ptr(&type_entry->name)));
return ira->codegen->builtin_types.entry_invalid;
}
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
TypeTableEntry *result_type = get_pointer_to_type_extra(ira->codegen, child_type,
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
+ PtrLenSingle,
get_abi_alignment(ira->codegen, child_type), 0, 0);
if (instr_is_comptime(value)) {
@@ -14336,13 +15541,18 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira,
ConstExprValue *maybe_val = const_ptr_pointee(ira->codegen, val);
if (val->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
- if (!maybe_val->data.x_maybe) {
+ if (optional_value_is_null(maybe_val)) {
ir_add_error(ira, &unwrap_maybe_instruction->base, buf_sprintf("unable to unwrap null"));
return ira->codegen->builtin_types.entry_invalid;
}
ConstExprValue *out_val = ir_build_const_from(ira, &unwrap_maybe_instruction->base);
out_val->data.x_ptr.special = ConstPtrSpecialRef;
- out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_maybe;
+ out_val->data.x_ptr.mut = val->data.x_ptr.mut;
+ if (type_is_codegen_pointer(child_type)) {
+ out_val->data.x_ptr.data.ref.pointee = maybe_val;
+ } else {
+ out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_optional;
+ }
return result_type;
}
}
@@ -14400,6 +15610,48 @@ static TypeTableEntry *ir_analyze_instruction_clz(IrAnalyze *ira, IrInstructionC
}
}
+static TypeTableEntry *ir_analyze_instruction_pop_count(IrAnalyze *ira, IrInstructionPopCount *instruction) {
+ IrInstruction *value = instruction->value->other;
+ if (type_is_invalid(value->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (value->value.type->id != TypeTableEntryIdInt && value->value.type->id != TypeTableEntryIdComptimeInt) {
+ ir_add_error(ira, value,
+ buf_sprintf("expected integer type, found '%s'", buf_ptr(&value->value.type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ if (instr_is_comptime(value)) {
+ ConstExprValue *val = ir_resolve_const(ira, value, UndefBad);
+ if (!val)
+ return ira->codegen->builtin_types.entry_invalid;
+ if (bigint_cmp_zero(&val->data.x_bigint) != CmpLT) {
+ size_t result = bigint_popcount_unsigned(&val->data.x_bigint);
+ ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
+ bigint_init_unsigned(&out_val->data.x_bigint, result);
+ return ira->codegen->builtin_types.entry_num_lit_int;
+ }
+ if (value->value.type->id == TypeTableEntryIdComptimeInt) {
+ Buf *val_buf = buf_alloc();
+ bigint_append_buf(val_buf, &val->data.x_bigint, 10);
+ ir_add_error(ira, &instruction->base,
+ buf_sprintf("@popCount on negative %s value %s",
+ buf_ptr(&value->value.type->name), buf_ptr(val_buf)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+ size_t result = bigint_popcount_signed(&val->data.x_bigint, value->value.type->data.integral.bit_count);
+ ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
+ bigint_init_unsigned(&out_val->data.x_bigint, result);
+ return ira->codegen->builtin_types.entry_num_lit_int;
+ }
+
+ IrInstruction *result = ir_build_pop_count(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, value);
+ result->value.type = get_smallest_unsigned_int_type(ira->codegen, value->value.type->data.integral.bit_count);
+ ir_link_new_instruction(result, &instruction->base);
+ return result->value.type;
+}
+
static IrInstruction *ir_analyze_union_tag(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value) {
if (type_is_invalid(value->value.type))
return ira->codegen->invalid_instruction;
@@ -14409,7 +15661,7 @@ static IrInstruction *ir_analyze_union_tag(IrAnalyze *ira, IrInstruction *source
}
if (value->value.type->id != TypeTableEntryIdUnion) {
- ir_add_error(ira, source_instr,
+ ir_add_error(ira, value,
buf_sprintf("expected enum or union type, found '%s'", buf_ptr(&value->value.type->name)));
return ira->codegen->invalid_instruction;
}
@@ -14450,6 +15702,13 @@ static TypeTableEntry *ir_analyze_instruction_switch_br(IrAnalyze *ira,
if (type_is_invalid(target_value->value.type))
return ir_unreach_error(ira);
+ if (switch_br_instruction->switch_prongs_void != nullptr) {
+ if (type_is_invalid(switch_br_instruction->switch_prongs_void->other->value.type)) {
+ return ir_unreach_error(ira);
+ }
+ }
+
+
size_t case_count = switch_br_instruction->case_count;
bool is_comptime;
@@ -14540,7 +15799,7 @@ static TypeTableEntry *ir_analyze_instruction_switch_br(IrAnalyze *ira,
IrBasicBlock *new_else_block = ir_get_new_bb(ira, switch_br_instruction->else_block, &switch_br_instruction->base);
ir_build_switch_br_from(&ira->new_irb, &switch_br_instruction->base,
- target_value, new_else_block, case_count, cases, nullptr);
+ target_value, new_else_block, case_count, cases, nullptr, nullptr);
return ir_finish_anal(ira, ira->codegen->builtin_types.entry_unreachable);
}
@@ -14561,7 +15820,10 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira,
return out_val->type;
}
- assert(target_value_ptr->value.type->id == TypeTableEntryIdPointer);
+ if (target_value_ptr->value.type->id != TypeTableEntryIdPointer) {
+ ir_add_error(ira, target_value_ptr, buf_sprintf("invalid deref on switch target"));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
TypeTableEntry *target_type = target_value_ptr->value.type->data.pointer.child_type;
ConstExprValue *pointee_val = nullptr;
@@ -14582,8 +15844,8 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira,
case TypeTableEntryIdBool:
case TypeTableEntryIdInt:
case TypeTableEntryIdFloat:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdPointer:
case TypeTableEntryIdPromise:
case TypeTableEntryIdFn:
@@ -14661,9 +15923,9 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira,
case TypeTableEntryIdUnreachable:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdArgTuple:
@@ -14841,6 +16103,8 @@ static TypeTableEntry *ir_analyze_container_init_fields_union(IrAnalyze *ira, Ir
assert(container_type->id == TypeTableEntryIdUnion);
ensure_complete_type(ira->codegen, container_type);
+ if (type_is_invalid(container_type))
+ return ira->codegen->builtin_types.entry_invalid;
if (instr_field_count != 1) {
ir_add_error(ira, instruction,
@@ -14868,8 +16132,14 @@ static TypeTableEntry *ir_analyze_container_init_fields_union(IrAnalyze *ira, Ir
if (casted_field_value == ira->codegen->invalid_instruction)
return ira->codegen->builtin_types.entry_invalid;
+ type_ensure_zero_bits_known(ira->codegen, casted_field_value->value.type);
+ if (type_is_invalid(casted_field_value->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
bool is_comptime = ir_should_inline(ira->new_irb.exec, instruction->scope);
- if (is_comptime || casted_field_value->value.special != ConstValSpecialRuntime) {
+ if (is_comptime || casted_field_value->value.special != ConstValSpecialRuntime ||
+ !type_has_bits(casted_field_value->value.type))
+ {
ConstExprValue *field_val = ir_resolve_const(ira, casted_field_value, UndefOk);
if (!field_val)
return ira->codegen->builtin_types.entry_invalid;
@@ -14908,6 +16178,8 @@ static TypeTableEntry *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstru
}
ensure_complete_type(ira->codegen, container_type);
+ if (type_is_invalid(container_type))
+ return ira->codegen->builtin_types.entry_invalid;
size_t actual_field_count = container_type->data.structure.src_field_count;
@@ -15176,11 +16448,11 @@ static TypeTableEntry *ir_analyze_min_max(IrAnalyze *ira, IrInstruction *source_
case TypeTableEntryIdPromise:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdUnion:
@@ -15267,7 +16539,8 @@ static TypeTableEntry *ir_analyze_instruction_err_name(IrAnalyze *ira, IrInstruc
if (type_is_invalid(casted_value->value.type))
return ira->codegen->builtin_types.entry_invalid;
- TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, true);
+ TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
+ true, false, PtrLenUnknown, get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), 0, 0);
TypeTableEntry *str_type = get_slice_type(ira->codegen, u8_ptr_type);
if (casted_value->value.special == ConstValSpecialStatic) {
ErrorTableEntry *err = casted_value->value.data.x_err_set;
@@ -15293,6 +16566,9 @@ static TypeTableEntry *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrIn
assert(target->value.type->id == TypeTableEntryIdEnum);
if (instr_is_comptime(target)) {
+ type_ensure_zero_bits_known(ira->codegen, target->value.type);
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
TypeEnumField *field = find_enum_field_by_tag(target->value.type, &target->value.data.x_bigint);
ConstExprValue *array_val = create_const_str_lit(ira->codegen, field->name);
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
@@ -15300,15 +16576,14 @@ static TypeTableEntry *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrIn
return out_val->type;
}
- if (!target->value.type->data.enumeration.generate_name_table) {
- target->value.type->data.enumeration.generate_name_table = true;
- ira->codegen->name_table_enums.append(target->value.type);
- }
-
IrInstruction *result = ir_build_tag_name(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, target);
ir_link_new_instruction(result, &instruction->base);
- TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, true);
+ TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(
+ ira->codegen, ira->codegen->builtin_types.entry_u8,
+ true, false, PtrLenUnknown,
+ get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8),
+ 0, 0);
result->value.type = get_slice_type(ira->codegen, u8_ptr_type);
return result->value.type;
}
@@ -15337,6 +16612,8 @@ static TypeTableEntry *ir_analyze_instruction_field_parent_ptr(IrAnalyze *ira,
}
ensure_complete_type(ira->codegen, container_type);
+ if (type_is_invalid(container_type))
+ return ira->codegen->builtin_types.entry_invalid;
TypeStructField *field = find_struct_type_field(container_type, field_name);
if (field == nullptr) {
@@ -15359,6 +16636,7 @@ static TypeTableEntry *ir_analyze_instruction_field_parent_ptr(IrAnalyze *ira,
TypeTableEntry *field_ptr_type = get_pointer_to_type_extra(ira->codegen, field->type_entry,
field_ptr->value.type->data.pointer.is_const,
field_ptr->value.type->data.pointer.is_volatile,
+ PtrLenSingle,
field_ptr_align, 0, 0);
IrInstruction *casted_field_ptr = ir_implicit_cast(ira, field_ptr, field_ptr_type);
if (type_is_invalid(casted_field_ptr->value.type))
@@ -15367,6 +16645,7 @@ static TypeTableEntry *ir_analyze_instruction_field_parent_ptr(IrAnalyze *ira,
TypeTableEntry *result_type = get_pointer_to_type_extra(ira->codegen, container_type,
casted_field_ptr->value.type->data.pointer.is_const,
casted_field_ptr->value.type->data.pointer.is_volatile,
+ PtrLenSingle,
parent_ptr_align, 0, 0);
if (instr_is_comptime(casted_field_ptr)) {
@@ -15411,6 +16690,8 @@ static TypeTableEntry *ir_analyze_instruction_offset_of(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_invalid;
ensure_complete_type(ira->codegen, container_type);
+ if (type_is_invalid(container_type))
+ return ira->codegen->builtin_types.entry_invalid;
IrInstruction *field_name_value = instruction->field_name->other;
Buf *field_name = ir_resolve_str(ira, field_name_value);
@@ -15443,6 +16724,974 @@ static TypeTableEntry *ir_analyze_instruction_offset_of(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_num_lit_int;
}
+static void ensure_field_index(TypeTableEntry *type, const char *field_name, size_t index)
+{
+ Buf *field_name_buf;
+
+ assert(type != nullptr && !type_is_invalid(type));
+ // Check for our field by creating a buffer in place then using the comma operator to free it so that we don't
+ // leak memory in debug mode.
+ assert(find_struct_type_field(type, field_name_buf = buf_create_from_str(field_name))->src_index == index &&
+ (buf_deinit(field_name_buf), true));
+}
+
+static TypeTableEntry *ir_type_info_get_type(IrAnalyze *ira, const char *type_name, TypeTableEntry *root = nullptr)
+{
+ static ConstExprValue *type_info_var = nullptr;
+ static TypeTableEntry *type_info_type = nullptr;
+ if (type_info_var == nullptr)
+ {
+ type_info_var = get_builtin_value(ira->codegen, "TypeInfo");
+ assert(type_info_var->type->id == TypeTableEntryIdMetaType);
+
+ ensure_complete_type(ira->codegen, type_info_var->data.x_type);
+ if (type_is_invalid(type_info_var->data.x_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ type_info_type = type_info_var->data.x_type;
+ assert(type_info_type->id == TypeTableEntryIdUnion);
+ }
+
+ if (type_name == nullptr && root == nullptr)
+ return type_info_type;
+ else if (type_name == nullptr)
+ return root;
+
+ TypeTableEntry *root_type = (root == nullptr) ? type_info_type : root;
+
+ ScopeDecls *type_info_scope = get_container_scope(root_type);
+ assert(type_info_scope != nullptr);
+
+ Buf field_name = BUF_INIT;
+ buf_init_from_str(&field_name, type_name);
+ auto entry = type_info_scope->decl_table.get(&field_name);
+ buf_deinit(&field_name);
+
+ TldVar *tld = (TldVar *)entry;
+ assert(tld->base.id == TldIdVar);
+
+ VariableTableEntry *var = tld->var;
+
+ ensure_complete_type(ira->codegen, var->value->type);
+ if (type_is_invalid(var->value->type))
+ return ira->codegen->builtin_types.entry_invalid;
+ assert(var->value->type->id == TypeTableEntryIdMetaType);
+ return var->value->data.x_type;
+}
+
+static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, ScopeDecls *decls_scope)
+{
+ TypeTableEntry *type_info_definition_type = ir_type_info_get_type(ira, "Definition");
+ ensure_complete_type(ira->codegen, type_info_definition_type);
+ if (type_is_invalid(type_info_definition_type))
+ return false;
+
+ ensure_field_index(type_info_definition_type, "name", 0);
+ ensure_field_index(type_info_definition_type, "is_pub", 1);
+ ensure_field_index(type_info_definition_type, "data", 2);
+
+ TypeTableEntry *type_info_definition_data_type = ir_type_info_get_type(ira, "Data", type_info_definition_type);
+ ensure_complete_type(ira->codegen, type_info_definition_data_type);
+ if (type_is_invalid(type_info_definition_data_type))
+ return false;
+
+ TypeTableEntry *type_info_fn_def_type = ir_type_info_get_type(ira, "FnDef", type_info_definition_data_type);
+ ensure_complete_type(ira->codegen, type_info_fn_def_type);
+ if (type_is_invalid(type_info_fn_def_type))
+ return false;
+
+ TypeTableEntry *type_info_fn_def_inline_type = ir_type_info_get_type(ira, "Inline", type_info_fn_def_type);
+ ensure_complete_type(ira->codegen, type_info_fn_def_inline_type);
+ if (type_is_invalid(type_info_fn_def_inline_type))
+ return false;
+
+ // Loop through our definitions once to figure out how many definitions we will generate info for.
+ auto decl_it = decls_scope->decl_table.entry_iterator();
+ decltype(decls_scope->decl_table)::Entry *curr_entry = nullptr;
+ int definition_count = 0;
+
+ while ((curr_entry = decl_it.next()) != nullptr)
+ {
+ // If the definition is unresolved, force it to be resolved again.
+ if (curr_entry->value->resolution == TldResolutionUnresolved)
+ {
+ resolve_top_level_decl(ira->codegen, curr_entry->value, false, curr_entry->value->source_node);
+ if (curr_entry->value->resolution != TldResolutionOk)
+ {
+ return false;
+ }
+ }
+
+ // Skip comptime blocks and test functions.
+ if (curr_entry->value->id != TldIdCompTime)
+ {
+ if (curr_entry->value->id == TldIdFn)
+ {
+ FnTableEntry *fn_entry = ((TldFn *)curr_entry->value)->fn_entry;
+ if (fn_entry->is_test)
+ continue;
+ }
+
+ definition_count += 1;
+ }
+ }
+
+ ConstExprValue *definition_array = create_const_vals(1);
+ definition_array->special = ConstValSpecialStatic;
+ definition_array->type = get_array_type(ira->codegen, type_info_definition_type, definition_count);
+ definition_array->data.x_array.special = ConstArraySpecialNone;
+ definition_array->data.x_array.s_none.parent.id = ConstParentIdNone;
+ definition_array->data.x_array.s_none.elements = create_const_vals(definition_count);
+ init_const_slice(ira->codegen, out_val, definition_array, 0, definition_count, false);
+
+ // Loop through the definitions and generate info.
+ decl_it = decls_scope->decl_table.entry_iterator();
+ curr_entry = nullptr;
+ int definition_index = 0;
+ while ((curr_entry = decl_it.next()) != nullptr)
+ {
+ // Skip comptime blocks and test functions.
+ if (curr_entry->value->id == TldIdCompTime)
+ continue;
+ else if (curr_entry->value->id == TldIdFn)
+ {
+ FnTableEntry *fn_entry = ((TldFn *)curr_entry->value)->fn_entry;
+ if (fn_entry->is_test)
+ continue;
+ }
+
+ ConstExprValue *definition_val = &definition_array->data.x_array.s_none.elements[definition_index];
+
+ definition_val->special = ConstValSpecialStatic;
+ definition_val->type = type_info_definition_type;
+
+ ConstExprValue *inner_fields = create_const_vals(3);
+ ConstExprValue *name = create_const_str_lit(ira->codegen, curr_entry->key);
+ init_const_slice(ira->codegen, &inner_fields[0], name, 0, buf_len(curr_entry->key), true);
+ inner_fields[1].special = ConstValSpecialStatic;
+ inner_fields[1].type = ira->codegen->builtin_types.entry_bool;
+ inner_fields[1].data.x_bool = curr_entry->value->visib_mod == VisibModPub;
+ inner_fields[2].special = ConstValSpecialStatic;
+ inner_fields[2].type = type_info_definition_data_type;
+ inner_fields[2].data.x_union.parent.id = ConstParentIdStruct;
+ inner_fields[2].data.x_union.parent.data.p_struct.struct_val = definition_val;
+ inner_fields[2].data.x_union.parent.data.p_struct.field_index = 1;
+
+ switch (curr_entry->value->id)
+ {
+ case TldIdVar:
+ {
+ VariableTableEntry *var = ((TldVar *)curr_entry->value)->var;
+ ensure_complete_type(ira->codegen, var->value->type);
+ if (type_is_invalid(var->value->type))
+ return false;
+
+ if (var->value->type->id == TypeTableEntryIdMetaType)
+ {
+ // We have a variable of type 'type', so it's actually a type definition.
+ // 0: Data.Type: type
+ bigint_init_unsigned(&inner_fields[2].data.x_union.tag, 0);
+ inner_fields[2].data.x_union.payload = var->value;
+ }
+ else
+ {
+ // We have a variable of another type, so we store the type of the variable.
+ // 1: Data.Var: type
+ bigint_init_unsigned(&inner_fields[2].data.x_union.tag, 1);
+
+ ConstExprValue *payload = create_const_vals(1);
+ payload->type = ira->codegen->builtin_types.entry_type;
+ payload->data.x_type = var->value->type;
+
+ inner_fields[2].data.x_union.payload = payload;
+ }
+
+ break;
+ }
+ case TldIdFn:
+ {
+ // 2: Data.Fn: Data.FnDef
+ bigint_init_unsigned(&inner_fields[2].data.x_union.tag, 2);
+
+ FnTableEntry *fn_entry = ((TldFn *)curr_entry->value)->fn_entry;
+ assert(!fn_entry->is_test);
+
+ AstNodeFnProto *fn_node = (AstNodeFnProto *)(fn_entry->proto_node);
+
+ ConstExprValue *fn_def_val = create_const_vals(1);
+ fn_def_val->special = ConstValSpecialStatic;
+ fn_def_val->type = type_info_fn_def_type;
+ fn_def_val->data.x_struct.parent.id = ConstParentIdUnion;
+ fn_def_val->data.x_struct.parent.data.p_union.union_val = &inner_fields[2];
+
+ ConstExprValue *fn_def_fields = create_const_vals(9);
+ fn_def_val->data.x_struct.fields = fn_def_fields;
+
+ // fn_type: type
+ ensure_field_index(fn_def_val->type, "fn_type", 0);
+ fn_def_fields[0].special = ConstValSpecialStatic;
+ fn_def_fields[0].type = ira->codegen->builtin_types.entry_type;
+ fn_def_fields[0].data.x_type = fn_entry->type_entry;
+ // inline_type: Data.FnDef.Inline
+ ensure_field_index(fn_def_val->type, "inline_type", 1);
+ fn_def_fields[1].special = ConstValSpecialStatic;
+ fn_def_fields[1].type = type_info_fn_def_inline_type;
+ bigint_init_unsigned(&fn_def_fields[1].data.x_enum_tag, fn_entry->fn_inline);
+ // calling_convention: TypeInfo.CallingConvention
+ ensure_field_index(fn_def_val->type, "calling_convention", 2);
+ fn_def_fields[2].special = ConstValSpecialStatic;
+ fn_def_fields[2].type = ir_type_info_get_type(ira, "CallingConvention");
+ bigint_init_unsigned(&fn_def_fields[2].data.x_enum_tag, fn_node->cc);
+ // is_var_args: bool
+ ensure_field_index(fn_def_val->type, "is_var_args", 3);
+ bool is_varargs = fn_node->is_var_args;
+ fn_def_fields[3].special = ConstValSpecialStatic;
+ fn_def_fields[3].type = ira->codegen->builtin_types.entry_bool;
+ fn_def_fields[3].data.x_bool = is_varargs;
+ // is_extern: bool
+ ensure_field_index(fn_def_val->type, "is_extern", 4);
+ fn_def_fields[4].special = ConstValSpecialStatic;
+ fn_def_fields[4].type = ira->codegen->builtin_types.entry_bool;
+ fn_def_fields[4].data.x_bool = fn_node->is_extern;
+ // is_export: bool
+ ensure_field_index(fn_def_val->type, "is_export", 5);
+ fn_def_fields[5].special = ConstValSpecialStatic;
+ fn_def_fields[5].type = ira->codegen->builtin_types.entry_bool;
+ fn_def_fields[5].data.x_bool = fn_node->is_export;
+ // lib_name: ?[]const u8
+ ensure_field_index(fn_def_val->type, "lib_name", 6);
+ fn_def_fields[6].special = ConstValSpecialStatic;
+ TypeTableEntry *u8_ptr = get_pointer_to_type_extra(
+ ira->codegen, ira->codegen->builtin_types.entry_u8,
+ true, false, PtrLenUnknown,
+ get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8),
+ 0, 0);
+ fn_def_fields[6].type = get_optional_type(ira->codegen, get_slice_type(ira->codegen, u8_ptr));
+ if (fn_node->is_extern && buf_len(fn_node->lib_name) > 0) {
+ fn_def_fields[6].data.x_optional = create_const_vals(1);
+ ConstExprValue *lib_name = create_const_str_lit(ira->codegen, fn_node->lib_name);
+ init_const_slice(ira->codegen, fn_def_fields[6].data.x_optional, lib_name, 0, buf_len(fn_node->lib_name), true);
+ } else {
+ fn_def_fields[6].data.x_optional = nullptr;
+ }
+ // return_type: type
+ ensure_field_index(fn_def_val->type, "return_type", 7);
+ fn_def_fields[7].special = ConstValSpecialStatic;
+ fn_def_fields[7].type = ira->codegen->builtin_types.entry_type;
+ if (fn_entry->src_implicit_return_type != nullptr)
+ fn_def_fields[7].data.x_type = fn_entry->src_implicit_return_type;
+ else if (fn_entry->type_entry->data.fn.gen_return_type != nullptr)
+ fn_def_fields[7].data.x_type = fn_entry->type_entry->data.fn.gen_return_type;
+ else
+ fn_def_fields[7].data.x_type = fn_entry->type_entry->data.fn.fn_type_id.return_type;
+ // arg_names: [][] const u8
+ ensure_field_index(fn_def_val->type, "arg_names", 8);
+ size_t fn_arg_count = fn_entry->variable_list.length;
+ ConstExprValue *fn_arg_name_array = create_const_vals(1);
+ fn_arg_name_array->special = ConstValSpecialStatic;
+ fn_arg_name_array->type = get_array_type(ira->codegen,
+ get_slice_type(ira->codegen, u8_ptr), fn_arg_count);
+ fn_arg_name_array->data.x_array.special = ConstArraySpecialNone;
+ fn_arg_name_array->data.x_array.s_none.parent.id = ConstParentIdNone;
+ fn_arg_name_array->data.x_array.s_none.elements = create_const_vals(fn_arg_count);
+
+ init_const_slice(ira->codegen, &fn_def_fields[8], fn_arg_name_array, 0, fn_arg_count, false);
+
+ for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++)
+ {
+ VariableTableEntry *arg_var = fn_entry->variable_list.at(fn_arg_index);
+ ConstExprValue *fn_arg_name_val = &fn_arg_name_array->data.x_array.s_none.elements[fn_arg_index];
+ ConstExprValue *arg_name = create_const_str_lit(ira->codegen, &arg_var->name);
+ init_const_slice(ira->codegen, fn_arg_name_val, arg_name, 0, buf_len(&arg_var->name), true);
+ fn_arg_name_val->data.x_struct.parent.id = ConstParentIdArray;
+ fn_arg_name_val->data.x_struct.parent.data.p_array.array_val = fn_arg_name_array;
+ fn_arg_name_val->data.x_struct.parent.data.p_array.elem_index = fn_arg_index;
+ }
+
+ inner_fields[2].data.x_union.payload = fn_def_val;
+ break;
+ }
+ case TldIdContainer:
+ {
+ TypeTableEntry *type_entry = ((TldContainer *)curr_entry->value)->type_entry;
+ ensure_complete_type(ira->codegen, type_entry);
+ if (type_is_invalid(type_entry))
+ return false;
+
+ // This is a type.
+ bigint_init_unsigned(&inner_fields[2].data.x_union.tag, 0);
+
+ ConstExprValue *payload = create_const_vals(1);
+ payload->type = ira->codegen->builtin_types.entry_type;
+ payload->data.x_type = type_entry;
+
+ inner_fields[2].data.x_union.payload = payload;
+
+ break;
+ }
+ default:
+ zig_unreachable();
+ }
+
+ definition_val->data.x_struct.fields = inner_fields;
+ definition_index++;
+ }
+
+ assert(definition_index == definition_count);
+ return true;
+}
+
+static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *type_entry) {
+ assert(type_entry != nullptr);
+ assert(!type_is_invalid(type_entry));
+
+ ensure_complete_type(ira->codegen, type_entry);
+ if (type_is_invalid(type_entry))
+ return nullptr;
+
+ const auto make_enum_field_val = [ira](ConstExprValue *enum_field_val, TypeEnumField *enum_field,
+ TypeTableEntry *type_info_enum_field_type) {
+ enum_field_val->special = ConstValSpecialStatic;
+ enum_field_val->type = type_info_enum_field_type;
+
+ ConstExprValue *inner_fields = create_const_vals(2);
+ inner_fields[1].special = ConstValSpecialStatic;
+ inner_fields[1].type = ira->codegen->builtin_types.entry_usize;
+
+ ConstExprValue *name = create_const_str_lit(ira->codegen, enum_field->name);
+ init_const_slice(ira->codegen, &inner_fields[0], name, 0, buf_len(enum_field->name), true);
+
+ bigint_init_bigint(&inner_fields[1].data.x_bigint, &enum_field->value);
+
+ enum_field_val->data.x_struct.fields = inner_fields;
+ };
+
+ const auto create_ptr_like_type_info = [ira](TypeTableEntry *ptr_type_entry) {
+ TypeTableEntry *attrs_type;
+ uint32_t size_enum_index;
+ if (is_slice(ptr_type_entry)) {
+ attrs_type = ptr_type_entry->data.structure.fields[slice_ptr_index].type_entry;
+ size_enum_index = 2;
+ } else if (ptr_type_entry->id == TypeTableEntryIdPointer) {
+ attrs_type = ptr_type_entry;
+ size_enum_index = (ptr_type_entry->data.pointer.ptr_len == PtrLenSingle) ? 0 : 1;
+ } else {
+ zig_unreachable();
+ }
+
+ TypeTableEntry *type_info_pointer_type = ir_type_info_get_type(ira, "Pointer");
+ ensure_complete_type(ira->codegen, type_info_pointer_type);
+ assert(!type_is_invalid(type_info_pointer_type));
+
+ ConstExprValue *result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = type_info_pointer_type;
+
+ ConstExprValue *fields = create_const_vals(5);
+ result->data.x_struct.fields = fields;
+
+ // size: Size
+ ensure_field_index(result->type, "size", 0);
+ TypeTableEntry *type_info_pointer_size_type = ir_type_info_get_type(ira, "Size", type_info_pointer_type);
+ ensure_complete_type(ira->codegen, type_info_pointer_size_type);
+ assert(!type_is_invalid(type_info_pointer_size_type));
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = type_info_pointer_size_type;
+ bigint_init_unsigned(&fields[0].data.x_enum_tag, size_enum_index);
+
+ // is_const: bool
+ ensure_field_index(result->type, "is_const", 1);
+ fields[1].special = ConstValSpecialStatic;
+ fields[1].type = ira->codegen->builtin_types.entry_bool;
+ fields[1].data.x_bool = attrs_type->data.pointer.is_const;
+ // is_volatile: bool
+ ensure_field_index(result->type, "is_volatile", 2);
+ fields[2].special = ConstValSpecialStatic;
+ fields[2].type = ira->codegen->builtin_types.entry_bool;
+ fields[2].data.x_bool = attrs_type->data.pointer.is_volatile;
+ // alignment: u32
+ ensure_field_index(result->type, "alignment", 3);
+ fields[3].special = ConstValSpecialStatic;
+ fields[3].type = ira->codegen->builtin_types.entry_u32;
+ bigint_init_unsigned(&fields[3].data.x_bigint, attrs_type->data.pointer.alignment);
+ // child: type
+ ensure_field_index(result->type, "child", 4);
+ fields[4].special = ConstValSpecialStatic;
+ fields[4].type = ira->codegen->builtin_types.entry_type;
+ fields[4].data.x_type = attrs_type->data.pointer.child_type;
+
+ return result;
+ };
+
+ if (type_entry == ira->codegen->builtin_types.entry_global_error_set) {
+ zig_panic("TODO implement @typeInfo for global error set");
+ }
+
+ ConstExprValue *result = nullptr;
+ switch (type_entry->id)
+ {
+ case TypeTableEntryIdInvalid:
+ zig_unreachable();
+ case TypeTableEntryIdMetaType:
+ case TypeTableEntryIdVoid:
+ case TypeTableEntryIdBool:
+ case TypeTableEntryIdUnreachable:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
+ case TypeTableEntryIdNamespace:
+ case TypeTableEntryIdBlock:
+ case TypeTableEntryIdArgTuple:
+ case TypeTableEntryIdOpaque:
+ return nullptr;
+ default:
+ {
+ // Lookup an available value in our cache.
+ auto entry = ira->codegen->type_info_cache.maybe_get(type_entry);
+ if (entry != nullptr)
+ return entry->value;
+
+ // Fallthrough if we don't find one.
+ }
+ case TypeTableEntryIdInt:
+ {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "Int");
+
+ ConstExprValue *fields = create_const_vals(2);
+ result->data.x_struct.fields = fields;
+
+ // is_signed: bool
+ ensure_field_index(result->type, "is_signed", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = ira->codegen->builtin_types.entry_bool;
+ fields[0].data.x_bool = type_entry->data.integral.is_signed;
+ // bits: u8
+ ensure_field_index(result->type, "bits", 1);
+ fields[1].special = ConstValSpecialStatic;
+ fields[1].type = ira->codegen->builtin_types.entry_u8;
+ bigint_init_unsigned(&fields[1].data.x_bigint, type_entry->data.integral.bit_count);
+
+ break;
+ }
+ case TypeTableEntryIdFloat:
+ {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "Float");
+
+ ConstExprValue *fields = create_const_vals(1);
+ result->data.x_struct.fields = fields;
+
+ // bits: u8
+ ensure_field_index(result->type, "bits", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = ira->codegen->builtin_types.entry_u8;
+ bigint_init_unsigned(&fields->data.x_bigint, type_entry->data.floating.bit_count);
+
+ break;
+ }
+ case TypeTableEntryIdPointer:
+ {
+ result = create_ptr_like_type_info(type_entry);
+ break;
+ }
+ case TypeTableEntryIdArray:
+ {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "Array");
+
+ ConstExprValue *fields = create_const_vals(2);
+ result->data.x_struct.fields = fields;
+
+ // len: usize
+ ensure_field_index(result->type, "len", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = ira->codegen->builtin_types.entry_usize;
+ bigint_init_unsigned(&fields[0].data.x_bigint, type_entry->data.array.len);
+ // child: type
+ ensure_field_index(result->type, "child", 1);
+ fields[1].special = ConstValSpecialStatic;
+ fields[1].type = ira->codegen->builtin_types.entry_type;
+ fields[1].data.x_type = type_entry->data.array.child_type;
+
+ break;
+ }
+ case TypeTableEntryIdOptional:
+ {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "Optional");
+
+ ConstExprValue *fields = create_const_vals(1);
+ result->data.x_struct.fields = fields;
+
+ // child: type
+ ensure_field_index(result->type, "child", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = ira->codegen->builtin_types.entry_type;
+ fields[0].data.x_type = type_entry->data.maybe.child_type;
+
+ break;
+ }
+ case TypeTableEntryIdPromise:
+ {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "Promise");
+
+ ConstExprValue *fields = create_const_vals(1);
+ result->data.x_struct.fields = fields;
+
+ // child: ?type
+ ensure_field_index(result->type, "child", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
+
+ if (type_entry->data.promise.result_type == nullptr)
+ fields[0].data.x_optional = nullptr;
+ else {
+ ConstExprValue *child_type = create_const_vals(1);
+ child_type->special = ConstValSpecialStatic;
+ child_type->type = ira->codegen->builtin_types.entry_type;
+ child_type->data.x_type = type_entry->data.promise.result_type;
+ fields[0].data.x_optional = child_type;
+ }
+
+ break;
+ }
+ case TypeTableEntryIdEnum:
+ {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "Enum");
+
+ ConstExprValue *fields = create_const_vals(4);
+ result->data.x_struct.fields = fields;
+
+ // layout: ContainerLayout
+ ensure_field_index(result->type, "layout", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = ir_type_info_get_type(ira, "ContainerLayout");
+ bigint_init_unsigned(&fields[0].data.x_enum_tag, type_entry->data.enumeration.layout);
+ // tag_type: type
+ ensure_field_index(result->type, "tag_type", 1);
+ fields[1].special = ConstValSpecialStatic;
+ fields[1].type = ira->codegen->builtin_types.entry_type;
+ fields[1].data.x_type = type_entry->data.enumeration.tag_int_type;
+ // fields: []TypeInfo.EnumField
+ ensure_field_index(result->type, "fields", 2);
+
+ TypeTableEntry *type_info_enum_field_type = ir_type_info_get_type(ira, "EnumField");
+ uint32_t enum_field_count = type_entry->data.enumeration.src_field_count;
+
+ ConstExprValue *enum_field_array = create_const_vals(1);
+ enum_field_array->special = ConstValSpecialStatic;
+ enum_field_array->type = get_array_type(ira->codegen, type_info_enum_field_type, enum_field_count);
+ enum_field_array->data.x_array.special = ConstArraySpecialNone;
+ enum_field_array->data.x_array.s_none.parent.id = ConstParentIdNone;
+ enum_field_array->data.x_array.s_none.elements = create_const_vals(enum_field_count);
+
+ init_const_slice(ira->codegen, &fields[2], enum_field_array, 0, enum_field_count, false);
+
+ for (uint32_t enum_field_index = 0; enum_field_index < enum_field_count; enum_field_index++)
+ {
+ TypeEnumField *enum_field = &type_entry->data.enumeration.fields[enum_field_index];
+ ConstExprValue *enum_field_val = &enum_field_array->data.x_array.s_none.elements[enum_field_index];
+ make_enum_field_val(enum_field_val, enum_field, type_info_enum_field_type);
+ enum_field_val->data.x_struct.parent.id = ConstParentIdArray;
+ enum_field_val->data.x_struct.parent.data.p_array.array_val = enum_field_array;
+ enum_field_val->data.x_struct.parent.data.p_array.elem_index = enum_field_index;
+ }
+ // defs: []TypeInfo.Definition
+ ensure_field_index(result->type, "defs", 3);
+ if (!ir_make_type_info_defs(ira, &fields[3], type_entry->data.enumeration.decls_scope))
+ return nullptr;
+
+ break;
+ }
+ case TypeTableEntryIdErrorSet:
+ {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "ErrorSet");
+
+ ConstExprValue *fields = create_const_vals(1);
+ result->data.x_struct.fields = fields;
+
+ // errors: []TypeInfo.Error
+ ensure_field_index(result->type, "errors", 0);
+
+ TypeTableEntry *type_info_error_type = ir_type_info_get_type(ira, "Error");
+ uint32_t error_count = type_entry->data.error_set.err_count;
+ ConstExprValue *error_array = create_const_vals(1);
+ error_array->special = ConstValSpecialStatic;
+ error_array->type = get_array_type(ira->codegen, type_info_error_type, error_count);
+ error_array->data.x_array.special = ConstArraySpecialNone;
+ error_array->data.x_array.s_none.parent.id = ConstParentIdNone;
+ error_array->data.x_array.s_none.elements = create_const_vals(error_count);
+
+ init_const_slice(ira->codegen, &fields[0], error_array, 0, error_count, false);
+ for (uint32_t error_index = 0; error_index < error_count; error_index++)
+ {
+ ErrorTableEntry *error = type_entry->data.error_set.errors[error_index];
+ ConstExprValue *error_val = &error_array->data.x_array.s_none.elements[error_index];
+
+ error_val->special = ConstValSpecialStatic;
+ error_val->type = type_info_error_type;
+
+ ConstExprValue *inner_fields = create_const_vals(2);
+ inner_fields[1].special = ConstValSpecialStatic;
+ inner_fields[1].type = ira->codegen->builtin_types.entry_usize;
+
+ ConstExprValue *name = nullptr;
+ if (error->cached_error_name_val != nullptr)
+ name = error->cached_error_name_val;
+ if (name == nullptr)
+ name = create_const_str_lit(ira->codegen, &error->name);
+ init_const_slice(ira->codegen, &inner_fields[0], name, 0, buf_len(&error->name), true);
+ bigint_init_unsigned(&inner_fields[1].data.x_bigint, error->value);
+
+ error_val->data.x_struct.fields = inner_fields;
+ error_val->data.x_struct.parent.id = ConstParentIdArray;
+ error_val->data.x_struct.parent.data.p_array.array_val = error_array;
+ error_val->data.x_struct.parent.data.p_array.elem_index = error_index;
+ }
+
+ break;
+ }
+ case TypeTableEntryIdErrorUnion:
+ {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "ErrorUnion");
+
+ ConstExprValue *fields = create_const_vals(2);
+ result->data.x_struct.fields = fields;
+
+ // error_set: type
+ ensure_field_index(result->type, "error_set", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = ira->codegen->builtin_types.entry_type;
+ fields[0].data.x_type = type_entry->data.error_union.err_set_type;
+
+ // payload: type
+ ensure_field_index(result->type, "payload", 1);
+ fields[1].special = ConstValSpecialStatic;
+ fields[1].type = ira->codegen->builtin_types.entry_type;
+ fields[1].data.x_type = type_entry->data.error_union.payload_type;
+
+ break;
+ }
+ case TypeTableEntryIdUnion:
+ {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "Union");
+
+ ConstExprValue *fields = create_const_vals(4);
+ result->data.x_struct.fields = fields;
+
+ // layout: ContainerLayout
+ ensure_field_index(result->type, "layout", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = ir_type_info_get_type(ira, "ContainerLayout");
+ bigint_init_unsigned(&fields[0].data.x_enum_tag, type_entry->data.unionation.layout);
+ // tag_type: ?type
+ ensure_field_index(result->type, "tag_type", 1);
+ fields[1].special = ConstValSpecialStatic;
+ fields[1].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
+
+ AstNode *union_decl_node = type_entry->data.unionation.decl_node;
+ if (union_decl_node->data.container_decl.auto_enum ||
+ union_decl_node->data.container_decl.init_arg_expr != nullptr)
+ {
+ ConstExprValue *tag_type = create_const_vals(1);
+ tag_type->special = ConstValSpecialStatic;
+ tag_type->type = ira->codegen->builtin_types.entry_type;
+ tag_type->data.x_type = type_entry->data.unionation.tag_type;
+ fields[1].data.x_optional = tag_type;
+ }
+ else
+ fields[1].data.x_optional = nullptr;
+ // fields: []TypeInfo.UnionField
+ ensure_field_index(result->type, "fields", 2);
+
+ TypeTableEntry *type_info_union_field_type = ir_type_info_get_type(ira, "UnionField");
+ uint32_t union_field_count = type_entry->data.unionation.src_field_count;
+
+ ConstExprValue *union_field_array = create_const_vals(1);
+ union_field_array->special = ConstValSpecialStatic;
+ union_field_array->type = get_array_type(ira->codegen, type_info_union_field_type, union_field_count);
+ union_field_array->data.x_array.special = ConstArraySpecialNone;
+ union_field_array->data.x_array.s_none.parent.id = ConstParentIdNone;
+ union_field_array->data.x_array.s_none.elements = create_const_vals(union_field_count);
+
+ init_const_slice(ira->codegen, &fields[2], union_field_array, 0, union_field_count, false);
+
+ TypeTableEntry *type_info_enum_field_type = ir_type_info_get_type(ira, "EnumField");
+
+ for (uint32_t union_field_index = 0; union_field_index < union_field_count; union_field_index++) {
+ TypeUnionField *union_field = &type_entry->data.unionation.fields[union_field_index];
+ ConstExprValue *union_field_val = &union_field_array->data.x_array.s_none.elements[union_field_index];
+
+ union_field_val->special = ConstValSpecialStatic;
+ union_field_val->type = type_info_union_field_type;
+
+ ConstExprValue *inner_fields = create_const_vals(3);
+ inner_fields[1].special = ConstValSpecialStatic;
+ inner_fields[1].type = get_optional_type(ira->codegen, type_info_enum_field_type);
+
+ if (fields[1].data.x_optional == nullptr) {
+ inner_fields[1].data.x_optional = nullptr;
+ } else {
+ inner_fields[1].data.x_optional = create_const_vals(1);
+ make_enum_field_val(inner_fields[1].data.x_optional, union_field->enum_field, type_info_enum_field_type);
+ }
+
+ inner_fields[2].special = ConstValSpecialStatic;
+ inner_fields[2].type = ira->codegen->builtin_types.entry_type;
+ inner_fields[2].data.x_type = union_field->type_entry;
+
+ ConstExprValue *name = create_const_str_lit(ira->codegen, union_field->name);
+ init_const_slice(ira->codegen, &inner_fields[0], name, 0, buf_len(union_field->name), true);
+
+ union_field_val->data.x_struct.fields = inner_fields;
+ union_field_val->data.x_struct.parent.id = ConstParentIdArray;
+ union_field_val->data.x_struct.parent.data.p_array.array_val = union_field_array;
+ union_field_val->data.x_struct.parent.data.p_array.elem_index = union_field_index;
+ }
+ // defs: []TypeInfo.Definition
+ ensure_field_index(result->type, "defs", 3);
+ if (!ir_make_type_info_defs(ira, &fields[3], type_entry->data.unionation.decls_scope))
+ return nullptr;
+
+ break;
+ }
+ case TypeTableEntryIdStruct:
+ {
+ if (type_entry->data.structure.is_slice) {
+ result = create_ptr_like_type_info(type_entry);
+ break;
+ }
+
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "Struct");
+
+ ConstExprValue *fields = create_const_vals(3);
+ result->data.x_struct.fields = fields;
+
+ // layout: ContainerLayout
+ ensure_field_index(result->type, "layout", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = ir_type_info_get_type(ira, "ContainerLayout");
+ bigint_init_unsigned(&fields[0].data.x_enum_tag, type_entry->data.structure.layout);
+ // fields: []TypeInfo.StructField
+ ensure_field_index(result->type, "fields", 1);
+
+ TypeTableEntry *type_info_struct_field_type = ir_type_info_get_type(ira, "StructField");
+ uint32_t struct_field_count = type_entry->data.structure.src_field_count;
+
+ ConstExprValue *struct_field_array = create_const_vals(1);
+ struct_field_array->special = ConstValSpecialStatic;
+ struct_field_array->type = get_array_type(ira->codegen, type_info_struct_field_type, struct_field_count);
+ struct_field_array->data.x_array.special = ConstArraySpecialNone;
+ struct_field_array->data.x_array.s_none.parent.id = ConstParentIdNone;
+ struct_field_array->data.x_array.s_none.elements = create_const_vals(struct_field_count);
+
+ init_const_slice(ira->codegen, &fields[1], struct_field_array, 0, struct_field_count, false);
+
+ for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++) {
+ TypeStructField *struct_field = &type_entry->data.structure.fields[struct_field_index];
+ ConstExprValue *struct_field_val = &struct_field_array->data.x_array.s_none.elements[struct_field_index];
+
+ struct_field_val->special = ConstValSpecialStatic;
+ struct_field_val->type = type_info_struct_field_type;
+
+ ConstExprValue *inner_fields = create_const_vals(3);
+ inner_fields[1].special = ConstValSpecialStatic;
+ inner_fields[1].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_usize);
+
+ if (!type_has_bits(struct_field->type_entry)) {
+ inner_fields[1].data.x_optional = nullptr;
+ } else {
+ size_t byte_offset = LLVMOffsetOfElement(ira->codegen->target_data_ref, type_entry->type_ref, struct_field->gen_index);
+ inner_fields[1].data.x_optional = create_const_vals(1);
+ inner_fields[1].data.x_optional->special = ConstValSpecialStatic;
+ inner_fields[1].data.x_optional->type = ira->codegen->builtin_types.entry_usize;
+ bigint_init_unsigned(&inner_fields[1].data.x_optional->data.x_bigint, byte_offset);
+ }
+
+ inner_fields[2].special = ConstValSpecialStatic;
+ inner_fields[2].type = ira->codegen->builtin_types.entry_type;
+ inner_fields[2].data.x_type = struct_field->type_entry;
+
+ ConstExprValue *name = create_const_str_lit(ira->codegen, struct_field->name);
+ init_const_slice(ira->codegen, &inner_fields[0], name, 0, buf_len(struct_field->name), true);
+
+ struct_field_val->data.x_struct.fields = inner_fields;
+ struct_field_val->data.x_struct.parent.id = ConstParentIdArray;
+ struct_field_val->data.x_struct.parent.data.p_array.array_val = struct_field_array;
+ struct_field_val->data.x_struct.parent.data.p_array.elem_index = struct_field_index;
+ }
+ // defs: []TypeInfo.Definition
+ ensure_field_index(result->type, "defs", 2);
+ if (!ir_make_type_info_defs(ira, &fields[2], type_entry->data.structure.decls_scope))
+ return nullptr;
+
+ break;
+ }
+ case TypeTableEntryIdFn:
+ {
+ result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = ir_type_info_get_type(ira, "Fn");
+
+ ConstExprValue *fields = create_const_vals(6);
+ result->data.x_struct.fields = fields;
+
+ // calling_convention: TypeInfo.CallingConvention
+ ensure_field_index(result->type, "calling_convention", 0);
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = ir_type_info_get_type(ira, "CallingConvention");
+ bigint_init_unsigned(&fields[0].data.x_enum_tag, type_entry->data.fn.fn_type_id.cc);
+ // is_generic: bool
+ ensure_field_index(result->type, "is_generic", 1);
+ bool is_generic = type_entry->data.fn.is_generic;
+ fields[1].special = ConstValSpecialStatic;
+ fields[1].type = ira->codegen->builtin_types.entry_bool;
+ fields[1].data.x_bool = is_generic;
+ // is_varargs: bool
+ ensure_field_index(result->type, "is_var_args", 2);
+ bool is_varargs = type_entry->data.fn.fn_type_id.is_var_args;
+ fields[2].special = ConstValSpecialStatic;
+ fields[2].type = ira->codegen->builtin_types.entry_bool;
+ fields[2].data.x_bool = type_entry->data.fn.fn_type_id.is_var_args;
+ // return_type: ?type
+ ensure_field_index(result->type, "return_type", 3);
+ fields[3].special = ConstValSpecialStatic;
+ fields[3].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
+ if (type_entry->data.fn.fn_type_id.return_type == nullptr)
+ fields[3].data.x_optional = nullptr;
+ else {
+ ConstExprValue *return_type = create_const_vals(1);
+ return_type->special = ConstValSpecialStatic;
+ return_type->type = ira->codegen->builtin_types.entry_type;
+ return_type->data.x_type = type_entry->data.fn.fn_type_id.return_type;
+ fields[3].data.x_optional = return_type;
+ }
+ // async_allocator_type: type
+ ensure_field_index(result->type, "async_allocator_type", 4);
+ fields[4].special = ConstValSpecialStatic;
+ fields[4].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
+ if (type_entry->data.fn.fn_type_id.async_allocator_type == nullptr)
+ fields[4].data.x_optional = nullptr;
+ else {
+ ConstExprValue *async_alloc_type = create_const_vals(1);
+ async_alloc_type->special = ConstValSpecialStatic;
+ async_alloc_type->type = ira->codegen->builtin_types.entry_type;
+ async_alloc_type->data.x_type = type_entry->data.fn.fn_type_id.async_allocator_type;
+ fields[4].data.x_optional = async_alloc_type;
+ }
+ // args: []TypeInfo.FnArg
+ TypeTableEntry *type_info_fn_arg_type = ir_type_info_get_type(ira, "FnArg");
+ size_t fn_arg_count = type_entry->data.fn.fn_type_id.param_count -
+ (is_varargs && type_entry->data.fn.fn_type_id.cc != CallingConventionC);
+
+ ConstExprValue *fn_arg_array = create_const_vals(1);
+ fn_arg_array->special = ConstValSpecialStatic;
+ fn_arg_array->type = get_array_type(ira->codegen, type_info_fn_arg_type, fn_arg_count);
+ fn_arg_array->data.x_array.special = ConstArraySpecialNone;
+ fn_arg_array->data.x_array.s_none.parent.id = ConstParentIdNone;
+ fn_arg_array->data.x_array.s_none.elements = create_const_vals(fn_arg_count);
+
+ init_const_slice(ira->codegen, &fields[5], fn_arg_array, 0, fn_arg_count, false);
+
+ for (size_t fn_arg_index = 0; fn_arg_index < fn_arg_count; fn_arg_index++)
+ {
+ FnTypeParamInfo *fn_param_info = &type_entry->data.fn.fn_type_id.param_info[fn_arg_index];
+ ConstExprValue *fn_arg_val = &fn_arg_array->data.x_array.s_none.elements[fn_arg_index];
+
+ fn_arg_val->special = ConstValSpecialStatic;
+ fn_arg_val->type = type_info_fn_arg_type;
+
+ bool arg_is_generic = fn_param_info->type == nullptr;
+ if (arg_is_generic) assert(is_generic);
+
+ ConstExprValue *inner_fields = create_const_vals(3);
+ inner_fields[0].special = ConstValSpecialStatic;
+ inner_fields[0].type = ira->codegen->builtin_types.entry_bool;
+ inner_fields[0].data.x_bool = arg_is_generic;
+ inner_fields[1].special = ConstValSpecialStatic;
+ inner_fields[1].type = ira->codegen->builtin_types.entry_bool;
+ inner_fields[1].data.x_bool = fn_param_info->is_noalias;
+ inner_fields[2].special = ConstValSpecialStatic;
+ inner_fields[2].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
+
+ if (arg_is_generic)
+ inner_fields[2].data.x_optional = nullptr;
+ else {
+ ConstExprValue *arg_type = create_const_vals(1);
+ arg_type->special = ConstValSpecialStatic;
+ arg_type->type = ira->codegen->builtin_types.entry_type;
+ arg_type->data.x_type = fn_param_info->type;
+ inner_fields[2].data.x_optional = arg_type;
+ }
+
+ fn_arg_val->data.x_struct.fields = inner_fields;
+ fn_arg_val->data.x_struct.parent.id = ConstParentIdArray;
+ fn_arg_val->data.x_struct.parent.data.p_array.array_val = fn_arg_array;
+ fn_arg_val->data.x_struct.parent.data.p_array.elem_index = fn_arg_index;
+ }
+
+ break;
+ }
+ case TypeTableEntryIdBoundFn:
+ {
+ TypeTableEntry *fn_type = type_entry->data.bound_fn.fn_type;
+ assert(fn_type->id == TypeTableEntryIdFn);
+ result = ir_make_type_info_value(ira, fn_type);
+
+ break;
+ }
+ }
+
+ assert(result != nullptr);
+ ira->codegen->type_info_cache.put(type_entry, result);
+ return result;
+}
+
+static TypeTableEntry *ir_analyze_instruction_type_info(IrAnalyze *ira,
+ IrInstructionTypeInfo *instruction)
+{
+ IrInstruction *type_value = instruction->type_value->other;
+ TypeTableEntry *type_entry = ir_resolve_type(ira, type_value);
+ if (type_is_invalid(type_entry))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ TypeTableEntry *result_type = ir_type_info_get_type(ira, nullptr);
+
+ ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
+ out_val->type = result_type;
+ bigint_init_unsigned(&out_val->data.x_union.tag, type_id_index(type_entry));
+
+ ConstExprValue *payload = ir_make_type_info_value(ira, type_entry);
+ out_val->data.x_union.payload = payload;
+
+ if (payload != nullptr)
+ {
+ assert(payload->type->id == TypeTableEntryIdStruct);
+ payload->data.x_struct.parent.id = ConstParentIdUnion;
+ payload->data.x_struct.parent.data.p_union.union_val = out_val;
+ }
+
+ return result_type;
+}
+
static TypeTableEntry *ir_analyze_instruction_type_id(IrAnalyze *ira,
IrInstructionTypeId *instruction)
{
@@ -15456,7 +17705,7 @@ static TypeTableEntry *ir_analyze_instruction_type_id(IrAnalyze *ira,
TypeTableEntry *result_type = var_value->data.x_type;
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- bigint_init_unsigned(&out_val->data.x_enum_tag, type_id_index(type_entry->id));
+ bigint_init_unsigned(&out_val->data.x_enum_tag, type_id_index(type_entry));
return result_type;
}
@@ -15659,10 +17908,20 @@ static TypeTableEntry *ir_analyze_instruction_embed_file(IrAnalyze *ira, IrInstr
}
static TypeTableEntry *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstructionCmpxchg *instruction) {
+ TypeTableEntry *operand_type = ir_resolve_atomic_operand_type(ira, instruction->type_value->other);
+ if (type_is_invalid(operand_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
IrInstruction *ptr = instruction->ptr->other;
if (type_is_invalid(ptr->value.type))
return ira->codegen->builtin_types.entry_invalid;
+ // TODO let this be volatile
+ TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, operand_type, false);
+ IrInstruction *casted_ptr = ir_implicit_cast(ira, ptr, ptr_type);
+ if (type_is_invalid(casted_ptr->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
IrInstruction *cmp_value = instruction->cmp_value->other;
if (type_is_invalid(cmp_value->value.type))
return ira->codegen->builtin_types.entry_invalid;
@@ -15687,28 +17946,11 @@ static TypeTableEntry *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstruct
if (!ir_resolve_atomic_order(ira, failure_order_value, &failure_order))
return ira->codegen->builtin_types.entry_invalid;
- if (ptr->value.type->id != TypeTableEntryIdPointer) {
- ir_add_error(ira, instruction->ptr,
- buf_sprintf("expected pointer argument, found '%s'", buf_ptr(&ptr->value.type->name)));
- return ira->codegen->builtin_types.entry_invalid;
- }
-
- TypeTableEntry *child_type = ptr->value.type->data.pointer.child_type;
-
- uint32_t align_bytes = ptr->value.type->data.pointer.alignment;
- uint64_t size_bytes = type_size(ira->codegen, child_type);
- if (align_bytes < size_bytes) {
- ir_add_error(ira, instruction->ptr,
- buf_sprintf("expected pointer alignment of at least %" ZIG_PRI_u64 ", found %" PRIu32,
- size_bytes, align_bytes));
- return ira->codegen->builtin_types.entry_invalid;
- }
-
- IrInstruction *casted_cmp_value = ir_implicit_cast(ira, cmp_value, child_type);
+ IrInstruction *casted_cmp_value = ir_implicit_cast(ira, cmp_value, operand_type);
if (type_is_invalid(casted_cmp_value->value.type))
return ira->codegen->builtin_types.entry_invalid;
- IrInstruction *casted_new_value = ir_implicit_cast(ira, new_value, child_type);
+ IrInstruction *casted_new_value = ir_implicit_cast(ira, new_value, operand_type);
if (type_is_invalid(casted_new_value->value.type))
return ira->codegen->builtin_types.entry_invalid;
@@ -15733,9 +17975,17 @@ static TypeTableEntry *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstruct
return ira->codegen->builtin_types.entry_invalid;
}
- ir_build_cmpxchg_from(&ira->new_irb, &instruction->base, ptr, casted_cmp_value, casted_new_value,
- success_order_value, failure_order_value, success_order, failure_order);
- return ira->codegen->builtin_types.entry_bool;
+ if (instr_is_comptime(casted_ptr) && instr_is_comptime(casted_cmp_value) && instr_is_comptime(casted_new_value)) {
+ zig_panic("TODO compile-time execution of cmpxchg");
+ }
+
+ IrInstruction *result = ir_build_cmpxchg(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
+ nullptr, casted_ptr, casted_cmp_value, casted_new_value, nullptr, nullptr, instruction->is_weak,
+ operand_type, success_order, failure_order);
+ result->value.type = get_optional_type(ira->codegen, operand_type);
+ ir_link_new_instruction(result, &instruction->base);
+ ir_add_alloca(ira, result, result->value.type);
+ return result->value.type;
}
static TypeTableEntry *ir_analyze_instruction_fence(IrAnalyze *ira, IrInstructionFence *instruction) {
@@ -15758,7 +18008,7 @@ static TypeTableEntry *ir_analyze_instruction_truncate(IrAnalyze *ira, IrInstruc
return ira->codegen->builtin_types.entry_invalid;
if (dest_type->id != TypeTableEntryIdInt &&
- dest_type->id != TypeTableEntryIdNumLitInt)
+ dest_type->id != TypeTableEntryIdComptimeInt)
{
ir_add_error(ira, dest_type_value, buf_sprintf("expected integer type, found '%s'", buf_ptr(&dest_type->name)));
return ira->codegen->builtin_types.entry_invalid;
@@ -15770,7 +18020,7 @@ static TypeTableEntry *ir_analyze_instruction_truncate(IrAnalyze *ira, IrInstruc
return ira->codegen->builtin_types.entry_invalid;
if (src_type->id != TypeTableEntryIdInt &&
- src_type->id != TypeTableEntryIdNumLitInt)
+ src_type->id != TypeTableEntryIdComptimeInt)
{
ir_add_error(ira, target, buf_sprintf("expected integer type, found '%s'", buf_ptr(&src_type->name)));
return ira->codegen->builtin_types.entry_invalid;
@@ -15793,10 +18043,326 @@ static TypeTableEntry *ir_analyze_instruction_truncate(IrAnalyze *ira, IrInstruc
return dest_type;
}
- ir_build_truncate_from(&ira->new_irb, &instruction->base, dest_type_value, target);
+ IrInstruction *new_instruction = ir_build_truncate(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, dest_type_value, target);
+ ir_link_new_instruction(new_instruction, &instruction->base);
return dest_type;
}
+static TypeTableEntry *ir_analyze_instruction_int_cast(IrAnalyze *ira, IrInstructionIntCast *instruction) {
+ TypeTableEntry *dest_type = ir_resolve_type(ira, instruction->dest_type->other);
+ if (type_is_invalid(dest_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (dest_type->id != TypeTableEntryIdInt) {
+ ir_add_error(ira, instruction->dest_type, buf_sprintf("expected integer type, found '%s'", buf_ptr(&dest_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (target->value.type->id == TypeTableEntryIdComptimeInt) {
+ if (ir_num_lit_fits_in_other_type(ira, target, dest_type, true)) {
+ IrInstruction *result = ir_resolve_cast(ira, &instruction->base, target, dest_type,
+ CastOpNumLitToConcrete, false);
+ if (type_is_invalid(result->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+ ir_link_new_instruction(result, &instruction->base);
+ return dest_type;
+ } else {
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+ }
+
+ if (target->value.type->id != TypeTableEntryIdInt) {
+ ir_add_error(ira, instruction->target, buf_sprintf("expected integer type, found '%s'",
+ buf_ptr(&target->value.type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ IrInstruction *result = ir_analyze_widen_or_shorten(ira, &instruction->base, target, dest_type);
+ if (type_is_invalid(result->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ ir_link_new_instruction(result, &instruction->base);
+ return dest_type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_float_cast(IrAnalyze *ira, IrInstructionFloatCast *instruction) {
+ TypeTableEntry *dest_type = ir_resolve_type(ira, instruction->dest_type->other);
+ if (type_is_invalid(dest_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (dest_type->id != TypeTableEntryIdFloat) {
+ ir_add_error(ira, instruction->dest_type,
+ buf_sprintf("expected float type, found '%s'", buf_ptr(&dest_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (target->value.type->id == TypeTableEntryIdComptimeInt ||
+ target->value.type->id == TypeTableEntryIdComptimeFloat)
+ {
+ if (ir_num_lit_fits_in_other_type(ira, target, dest_type, true)) {
+ CastOp op;
+ if (target->value.type->id == TypeTableEntryIdComptimeInt) {
+ op = CastOpIntToFloat;
+ } else {
+ op = CastOpNumLitToConcrete;
+ }
+ IrInstruction *result = ir_resolve_cast(ira, &instruction->base, target, dest_type, op, false);
+ if (type_is_invalid(result->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+ ir_link_new_instruction(result, &instruction->base);
+ return dest_type;
+ } else {
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+ }
+
+ if (target->value.type->id != TypeTableEntryIdFloat) {
+ ir_add_error(ira, instruction->target, buf_sprintf("expected float type, found '%s'",
+ buf_ptr(&target->value.type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ IrInstruction *result = ir_analyze_widen_or_shorten(ira, &instruction->base, target, dest_type);
+ if (type_is_invalid(result->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+ ir_link_new_instruction(result, &instruction->base);
+ return dest_type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_err_set_cast(IrAnalyze *ira, IrInstructionErrSetCast *instruction) {
+ TypeTableEntry *dest_type = ir_resolve_type(ira, instruction->dest_type->other);
+ if (type_is_invalid(dest_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (dest_type->id != TypeTableEntryIdErrorSet) {
+ ir_add_error(ira, instruction->dest_type,
+ buf_sprintf("expected error set type, found '%s'", buf_ptr(&dest_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (target->value.type->id != TypeTableEntryIdErrorSet) {
+ ir_add_error(ira, instruction->target,
+ buf_sprintf("expected error set type, found '%s'", buf_ptr(&target->value.type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ IrInstruction *result = ir_analyze_err_set_cast(ira, &instruction->base, target, dest_type);
+ if (type_is_invalid(result->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+ ir_link_new_instruction(result, &instruction->base);
+ return dest_type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_from_bytes(IrAnalyze *ira, IrInstructionFromBytes *instruction) {
+ TypeTableEntry *dest_child_type = ir_resolve_type(ira, instruction->dest_child_type->other);
+ if (type_is_invalid(dest_child_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ bool src_ptr_const;
+ bool src_ptr_volatile;
+ uint32_t src_ptr_align;
+ if (target->value.type->id == TypeTableEntryIdPointer) {
+ src_ptr_const = target->value.type->data.pointer.is_const;
+ src_ptr_volatile = target->value.type->data.pointer.is_volatile;
+ src_ptr_align = target->value.type->data.pointer.alignment;
+ } else if (is_slice(target->value.type)) {
+ TypeTableEntry *src_ptr_type = target->value.type->data.structure.fields[slice_ptr_index].type_entry;
+ src_ptr_const = src_ptr_type->data.pointer.is_const;
+ src_ptr_volatile = src_ptr_type->data.pointer.is_volatile;
+ src_ptr_align = src_ptr_type->data.pointer.alignment;
+ } else {
+ src_ptr_const = true;
+ src_ptr_volatile = false;
+ src_ptr_align = get_abi_alignment(ira->codegen, target->value.type);
+ }
+
+ TypeTableEntry *dest_ptr_type = get_pointer_to_type_extra(ira->codegen, dest_child_type,
+ src_ptr_const, src_ptr_volatile, PtrLenUnknown,
+ src_ptr_align, 0, 0);
+ TypeTableEntry *dest_slice_type = get_slice_type(ira->codegen, dest_ptr_type);
+
+ TypeTableEntry *u8_ptr = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
+ src_ptr_const, src_ptr_volatile, PtrLenUnknown,
+ src_ptr_align, 0, 0);
+ TypeTableEntry *u8_slice = get_slice_type(ira->codegen, u8_ptr);
+
+ IrInstruction *casted_value = ir_implicit_cast(ira, target, u8_slice);
+ if (type_is_invalid(casted_value->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ bool have_known_len = false;
+ uint64_t known_len;
+
+ if (instr_is_comptime(casted_value)) {
+ ConstExprValue *val = ir_resolve_const(ira, casted_value, UndefBad);
+ if (!val)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *len_val = &val->data.x_struct.fields[slice_len_index];
+ if (value_is_comptime(len_val)) {
+ known_len = bigint_as_unsigned(&len_val->data.x_bigint);
+ have_known_len = true;
+ }
+ }
+
+ if (casted_value->value.data.rh_slice.id == RuntimeHintSliceIdLen) {
+ known_len = casted_value->value.data.rh_slice.len;
+ have_known_len = true;
+ }
+
+ if (have_known_len) {
+ uint64_t child_type_size = type_size(ira->codegen, dest_child_type);
+ uint64_t remainder = known_len % child_type_size;
+ if (remainder != 0) {
+ ErrorMsg *msg = ir_add_error(ira, &instruction->base,
+ buf_sprintf("unable to convert [%" ZIG_PRI_u64 "]u8 to %s: size mismatch",
+ known_len, buf_ptr(&dest_slice_type->name)));
+ add_error_note(ira->codegen, msg, instruction->dest_child_type->source_node,
+ buf_sprintf("%s has size %" ZIG_PRI_u64 "; remaining bytes: %" ZIG_PRI_u64,
+ buf_ptr(&dest_child_type->name), child_type_size, remainder));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+ }
+
+ IrInstruction *result = ir_resolve_cast(ira, &instruction->base, casted_value, dest_slice_type, CastOpResizeSlice, true);
+ ir_link_new_instruction(result, &instruction->base);
+ return dest_slice_type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_to_bytes(IrAnalyze *ira, IrInstructionToBytes *instruction) {
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (!is_slice(target->value.type)) {
+ ir_add_error(ira, instruction->target,
+ buf_sprintf("expected slice, found '%s'", buf_ptr(&target->value.type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ TypeTableEntry *src_ptr_type = target->value.type->data.structure.fields[slice_ptr_index].type_entry;
+
+ TypeTableEntry *dest_ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
+ src_ptr_type->data.pointer.is_const, src_ptr_type->data.pointer.is_volatile, PtrLenUnknown,
+ src_ptr_type->data.pointer.alignment, 0, 0);
+ TypeTableEntry *dest_slice_type = get_slice_type(ira->codegen, dest_ptr_type);
+
+ IrInstruction *result = ir_resolve_cast(ira, &instruction->base, target, dest_slice_type, CastOpResizeSlice, true);
+ ir_link_new_instruction(result, &instruction->base);
+ return dest_slice_type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_int_to_float(IrAnalyze *ira, IrInstructionIntToFloat *instruction) {
+ TypeTableEntry *dest_type = ir_resolve_type(ira, instruction->dest_type->other);
+ if (type_is_invalid(dest_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (target->value.type->id != TypeTableEntryIdInt && target->value.type->id != TypeTableEntryIdComptimeInt) {
+ ir_add_error(ira, instruction->target, buf_sprintf("expected int type, found '%s'",
+ buf_ptr(&target->value.type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ IrInstruction *result = ir_resolve_cast(ira, &instruction->base, target, dest_type, CastOpIntToFloat, false);
+ ir_link_new_instruction(result, &instruction->base);
+ return dest_type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_float_to_int(IrAnalyze *ira, IrInstructionFloatToInt *instruction) {
+ TypeTableEntry *dest_type = ir_resolve_type(ira, instruction->dest_type->other);
+ if (type_is_invalid(dest_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *result = ir_resolve_cast(ira, &instruction->base, target, dest_type, CastOpFloatToInt, false);
+ ir_link_new_instruction(result, &instruction->base);
+ return dest_type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_err_to_int(IrAnalyze *ira, IrInstructionErrToInt *instruction) {
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *casted_target;
+ if (target->value.type->id == TypeTableEntryIdErrorSet) {
+ casted_target = target;
+ } else {
+ casted_target = ir_implicit_cast(ira, target, ira->codegen->builtin_types.entry_global_error_set);
+ if (type_is_invalid(casted_target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ IrInstruction *result = ir_analyze_err_to_int(ira, &instruction->base, casted_target, ira->codegen->err_tag_type);
+ ir_link_new_instruction(result, &instruction->base);
+ return result->value.type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_int_to_err(IrAnalyze *ira, IrInstructionIntToErr *instruction) {
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *casted_target = ir_implicit_cast(ira, target, ira->codegen->err_tag_type);
+ if (type_is_invalid(casted_target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *result = ir_analyze_int_to_err(ira, &instruction->base, casted_target, ira->codegen->builtin_types.entry_global_error_set);
+ ir_link_new_instruction(result, &instruction->base);
+ return result->value.type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_bool_to_int(IrAnalyze *ira, IrInstructionBoolToInt *instruction) {
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (target->value.type->id != TypeTableEntryIdBool) {
+ ir_add_error(ira, instruction->target, buf_sprintf("expected bool, found '%s'",
+ buf_ptr(&target->value.type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ if (instr_is_comptime(target)) {
+ bool is_true;
+ if (!ir_resolve_bool(ira, target, &is_true))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
+ bigint_init_unsigned(&out_val->data.x_bigint, is_true ? 1 : 0);
+ return ira->codegen->builtin_types.entry_num_lit_int;
+ }
+
+ TypeTableEntry *u1_type = get_int_type(ira->codegen, false, 1);
+ IrInstruction *result = ir_resolve_cast(ira, &instruction->base, target, u1_type, CastOpBoolToInt, false);
+ ir_link_new_instruction(result, &instruction->base);
+ return u1_type;
+}
+
static TypeTableEntry *ir_analyze_instruction_int_type(IrAnalyze *ira, IrInstructionIntType *instruction) {
IrInstruction *is_signed_value = instruction->is_signed->other;
bool is_signed;
@@ -15805,7 +18371,7 @@ static TypeTableEntry *ir_analyze_instruction_int_type(IrAnalyze *ira, IrInstruc
IrInstruction *bit_count_value = instruction->bit_count->other;
uint64_t bit_count;
- if (!ir_resolve_usize(ira, bit_count_value, &bit_count))
+ if (!ir_resolve_unsigned(ira, bit_count_value, ira->codegen->builtin_types.entry_u32, &bit_count))
return ira->codegen->builtin_types.entry_invalid;
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
@@ -15824,9 +18390,13 @@ static TypeTableEntry *ir_analyze_instruction_bool_not(IrAnalyze *ira, IrInstruc
if (type_is_invalid(casted_value->value.type))
return ira->codegen->builtin_types.entry_invalid;
- if (casted_value->value.special != ConstValSpecialRuntime) {
+ if (instr_is_comptime(casted_value)) {
+ ConstExprValue *value = ir_resolve_const(ira, casted_value, UndefBad);
+ if (value == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- out_val->data.x_bool = !casted_value->value.data.x_bool;
+ out_val->data.x_bool = !value->data.x_bool;
return bool_type;
}
@@ -15855,7 +18425,8 @@ static TypeTableEntry *ir_analyze_instruction_memset(IrAnalyze *ira, IrInstructi
TypeTableEntry *u8 = ira->codegen->builtin_types.entry_u8;
uint32_t dest_align = (dest_uncasted_type->id == TypeTableEntryIdPointer) ?
dest_uncasted_type->data.pointer.alignment : get_abi_alignment(ira->codegen, u8);
- TypeTableEntry *u8_ptr = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile, dest_align, 0, 0);
+ TypeTableEntry *u8_ptr = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile,
+ PtrLenUnknown, dest_align, 0, 0);
IrInstruction *casted_dest_ptr = ir_implicit_cast(ira, dest_ptr, u8_ptr);
if (type_is_invalid(casted_dest_ptr->value.type))
@@ -15951,8 +18522,10 @@ static TypeTableEntry *ir_analyze_instruction_memcpy(IrAnalyze *ira, IrInstructi
src_uncasted_type->data.pointer.alignment : get_abi_alignment(ira->codegen, u8);
TypeTableEntry *usize = ira->codegen->builtin_types.entry_usize;
- TypeTableEntry *u8_ptr_mut = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile, dest_align, 0, 0);
- TypeTableEntry *u8_ptr_const = get_pointer_to_type_extra(ira->codegen, u8, true, src_is_volatile, src_align, 0, 0);
+ TypeTableEntry *u8_ptr_mut = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile,
+ PtrLenUnknown, dest_align, 0, 0);
+ TypeTableEntry *u8_ptr_const = get_pointer_to_type_extra(ira->codegen, u8, true, src_is_volatile,
+ PtrLenUnknown, src_align, 0, 0);
IrInstruction *casted_dest_ptr = ir_implicit_cast(ira, dest_ptr, u8_ptr_mut);
if (type_is_invalid(casted_dest_ptr->value.type))
@@ -16095,18 +18668,38 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio
if (array_type->data.array.len == 0 && byte_alignment == 0) {
byte_alignment = get_abi_alignment(ira->codegen, array_type->data.array.child_type);
}
+ bool is_comptime_const = ptr_ptr->value.special == ConstValSpecialStatic &&
+ ptr_ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst;
TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.array.child_type,
- ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
+ ptr_type->data.pointer.is_const || is_comptime_const,
+ ptr_type->data.pointer.is_volatile,
+ PtrLenUnknown,
byte_alignment, 0, 0);
return_type = get_slice_type(ira->codegen, slice_ptr_type);
} else if (array_type->id == TypeTableEntryIdPointer) {
- TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.pointer.child_type,
- array_type->data.pointer.is_const, array_type->data.pointer.is_volatile,
- array_type->data.pointer.alignment, 0, 0);
- return_type = get_slice_type(ira->codegen, slice_ptr_type);
- if (!end) {
- ir_add_error(ira, &instruction->base, buf_sprintf("slice of pointer must include end value"));
- return ira->codegen->builtin_types.entry_invalid;
+ if (array_type->data.pointer.ptr_len == PtrLenSingle) {
+ TypeTableEntry *main_type = array_type->data.pointer.child_type;
+ if (main_type->id == TypeTableEntryIdArray) {
+ TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen,
+ main_type->data.pointer.child_type,
+ array_type->data.pointer.is_const, array_type->data.pointer.is_volatile,
+ PtrLenUnknown,
+ array_type->data.pointer.alignment, 0, 0);
+ return_type = get_slice_type(ira->codegen, slice_ptr_type);
+ } else {
+ ir_add_error(ira, &instruction->base, buf_sprintf("slice of single-item pointer"));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+ } else {
+ TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.pointer.child_type,
+ array_type->data.pointer.is_const, array_type->data.pointer.is_volatile,
+ PtrLenUnknown,
+ array_type->data.pointer.alignment, 0, 0);
+ return_type = get_slice_type(ira->codegen, slice_ptr_type);
+ if (!end) {
+ ir_add_error(ira, &instruction->base, buf_sprintf("slice of pointer must include end value"));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
}
} else if (is_slice(array_type)) {
TypeTableEntry *ptr_type = array_type->data.structure.fields[slice_ptr_index].type_entry;
@@ -16126,12 +18719,24 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio
size_t abs_offset;
size_t rel_end;
bool ptr_is_undef = false;
- if (array_type->id == TypeTableEntryIdArray) {
- array_val = const_ptr_pointee(ira->codegen, &ptr_ptr->value);
- abs_offset = 0;
- rel_end = array_type->data.array.len;
- parent_ptr = nullptr;
+ if (array_type->id == TypeTableEntryIdArray ||
+ (array_type->id == TypeTableEntryIdPointer && array_type->data.pointer.ptr_len == PtrLenSingle))
+ {
+ if (array_type->id == TypeTableEntryIdPointer) {
+ TypeTableEntry *child_array_type = array_type->data.pointer.child_type;
+ assert(child_array_type->id == TypeTableEntryIdArray);
+ parent_ptr = const_ptr_pointee(ira->codegen, &ptr_ptr->value);
+ array_val = const_ptr_pointee(ira->codegen, parent_ptr);
+ rel_end = child_array_type->data.array.len;
+ abs_offset = 0;
+ } else {
+ array_val = const_ptr_pointee(ira->codegen, &ptr_ptr->value);
+ rel_end = array_type->data.array.len;
+ parent_ptr = nullptr;
+ abs_offset = 0;
+ }
} else if (array_type->id == TypeTableEntryIdPointer) {
+ assert(array_type->data.pointer.ptr_len == PtrLenUnknown);
parent_ptr = const_ptr_pointee(ira->codegen, &ptr_ptr->value);
if (parent_ptr->special == ConstValSpecialUndef) {
array_val = nullptr;
@@ -16230,7 +18835,7 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio
if (array_val) {
size_t index = abs_offset + start_scalar;
bool is_const = slice_is_const(return_type);
- init_const_ptr_array(ira->codegen, ptr_val, array_val, index, is_const);
+ init_const_ptr_array(ira->codegen, ptr_val, array_val, index, is_const, PtrLenUnknown);
if (array_type->id == TypeTableEntryIdArray) {
ptr_val->data.x_ptr.mut = ptr_ptr->value.data.x_ptr.mut;
} else if (is_slice(array_type)) {
@@ -16283,6 +18888,10 @@ static TypeTableEntry *ir_analyze_instruction_member_count(IrAnalyze *ira, IrIns
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *container_type = ir_resolve_type(ira, container);
+ ensure_complete_type(ira->codegen, container_type);
+ if (type_is_invalid(container_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
uint64_t result;
if (type_is_invalid(container_type)) {
return ira->codegen->builtin_types.entry_invalid;
@@ -16293,7 +18902,7 @@ static TypeTableEntry *ir_analyze_instruction_member_count(IrAnalyze *ira, IrIns
} else if (container_type->id == TypeTableEntryIdUnion) {
result = container_type->data.unionation.src_field_count;
} else if (container_type->id == TypeTableEntryIdErrorSet) {
- if (!resolve_inferred_error_set(ira, container_type, instruction->base.source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, container_type, instruction->base.source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
if (type_is_global_error_set(container_type)) {
@@ -16317,6 +18926,11 @@ static TypeTableEntry *ir_analyze_instruction_member_type(IrAnalyze *ira, IrInst
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
+ ensure_complete_type(ira->codegen, container_type);
+ if (type_is_invalid(container_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+
uint64_t member_index;
IrInstruction *index_value = instruction->member_index->other;
if (!ir_resolve_usize(ira, index_value, &member_index))
@@ -16359,6 +18973,10 @@ static TypeTableEntry *ir_analyze_instruction_member_name(IrAnalyze *ira, IrInst
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
+ ensure_complete_type(ira->codegen, container_type);
+ if (type_is_invalid(container_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
uint64_t member_index;
IrInstruction *index_value = instruction->member_index->other;
if (!ir_resolve_usize(ira, index_value, &member_index))
@@ -16428,6 +19046,14 @@ static TypeTableEntry *ir_analyze_instruction_frame_address(IrAnalyze *ira, IrIn
return u8_ptr_const;
}
+static TypeTableEntry *ir_analyze_instruction_handle(IrAnalyze *ira, IrInstructionHandle *instruction) {
+ ir_build_handle_from(&ira->new_irb, &instruction->base);
+
+ FnTableEntry *fn_entry = exec_fn_entry(ira->new_irb.exec);
+ assert(fn_entry != nullptr);
+ return get_promise_type(ira->codegen, fn_entry->type_entry->data.fn.fn_type_id.return_type);
+}
+
static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstructionAlignOf *instruction) {
IrInstruction *type_value = instruction->type_value->other;
if (type_is_invalid(type_value->value.type))
@@ -16443,10 +19069,10 @@ static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruc
zig_unreachable();
case TypeTableEntryIdMetaType:
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
@@ -16463,7 +19089,7 @@ static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruc
case TypeTableEntryIdPromise:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -16525,6 +19151,7 @@ static TypeTableEntry *ir_analyze_instruction_overflow_op(IrAnalyze *ira, IrInst
if (result_ptr->value.type->id == TypeTableEntryIdPointer) {
expected_ptr_type = get_pointer_to_type_extra(ira->codegen, dest_type,
false, result_ptr->value.type->data.pointer.is_volatile,
+ PtrLenSingle,
result_ptr->value.type->data.pointer.alignment, 0, 0);
} else {
expected_ptr_type = get_pointer_to_type(ira->codegen, dest_type, false);
@@ -16597,7 +19224,7 @@ static TypeTableEntry *ir_analyze_instruction_test_err(IrAnalyze *ira, IrInstruc
}
TypeTableEntry *err_set_type = type_entry->data.error_union.err_set_type;
- if (!resolve_inferred_error_set(ira, err_set_type, instruction->base.source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, err_set_type, instruction->base.source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
if (!type_is_global_error_set(err_set_type) &&
@@ -16678,8 +19305,12 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_err_payload(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_invalid;
} else if (type_entry->id == TypeTableEntryIdErrorUnion) {
TypeTableEntry *payload_type = type_entry->data.error_union.payload_type;
+ if (type_is_invalid(payload_type)) {
+ return ira->codegen->builtin_types.entry_invalid;
+ }
TypeTableEntry *result_type = get_pointer_to_type_extra(ira->codegen, payload_type,
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
+ PtrLenSingle,
get_abi_alignment(ira->codegen, payload_type), 0, 0);
if (instr_is_comptime(value)) {
ConstExprValue *ptr_val = ir_resolve_const(ira, value, UndefBad);
@@ -16769,6 +19400,11 @@ static TypeTableEntry *ir_analyze_instruction_fn_proto(IrAnalyze *ira, IrInstruc
fn_type_id.return_type = ir_resolve_type(ira, return_type_value);
if (type_is_invalid(fn_type_id.return_type))
return ira->codegen->builtin_types.entry_invalid;
+ if (fn_type_id.return_type->id == TypeTableEntryIdOpaque) {
+ ir_add_error(ira, instruction->return_type,
+ buf_sprintf("return type cannot be opaque"));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
if (fn_type_id.cc == CallingConventionAsync) {
if (instruction->async_allocator_type_value == nullptr) {
@@ -16820,7 +19456,11 @@ static TypeTableEntry *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira
if (type_is_invalid(end_value->value.type))
return ira->codegen->builtin_types.entry_invalid;
- assert(start_value->value.type->id == TypeTableEntryIdEnum);
+ if (start_value->value.type->id != TypeTableEntryIdEnum) {
+ ir_add_error(ira, range->start, buf_sprintf("not an enum type"));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
BigInt start_index;
bigint_init_bigint(&start_index, &start_value->value.data.x_enum_tag);
@@ -16861,7 +19501,7 @@ static TypeTableEntry *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira
}
}
} else if (switch_type->id == TypeTableEntryIdErrorSet) {
- if (!resolve_inferred_error_set(ira, switch_type, target_value->source_node)) {
+ if (!resolve_inferred_error_set(ira->codegen, switch_type, target_value->source_node)) {
return ira->codegen->builtin_types.entry_invalid;
}
@@ -16925,21 +19565,27 @@ static TypeTableEntry *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira
IrInstruction *start_value = range->start->other;
if (type_is_invalid(start_value->value.type))
return ira->codegen->builtin_types.entry_invalid;
+ IrInstruction *casted_start_value = ir_implicit_cast(ira, start_value, switch_type);
+ if (type_is_invalid(casted_start_value->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
IrInstruction *end_value = range->end->other;
if (type_is_invalid(end_value->value.type))
return ira->codegen->builtin_types.entry_invalid;
+ IrInstruction *casted_end_value = ir_implicit_cast(ira, end_value, switch_type);
+ if (type_is_invalid(casted_end_value->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
- ConstExprValue *start_val = ir_resolve_const(ira, start_value, UndefBad);
+ ConstExprValue *start_val = ir_resolve_const(ira, casted_start_value, UndefBad);
if (!start_val)
return ira->codegen->builtin_types.entry_invalid;
- ConstExprValue *end_val = ir_resolve_const(ira, end_value, UndefBad);
+ ConstExprValue *end_val = ir_resolve_const(ira, casted_end_value, UndefBad);
if (!end_val)
return ira->codegen->builtin_types.entry_invalid;
- assert(start_val->type->id == TypeTableEntryIdInt || start_val->type->id == TypeTableEntryIdNumLitInt);
- assert(end_val->type->id == TypeTableEntryIdInt || end_val->type->id == TypeTableEntryIdNumLitInt);
+ assert(start_val->type->id == TypeTableEntryIdInt || start_val->type->id == TypeTableEntryIdComptimeInt);
+ assert(end_val->type->id == TypeTableEntryIdInt || end_val->type->id == TypeTableEntryIdComptimeInt);
AstNode *prev_node = rangeset_add_range(&rs, &start_val->data.x_bigint, &end_val->data.x_bigint,
start_value->source_node);
if (prev_node != nullptr) {
@@ -16983,30 +19629,6 @@ static TypeTableEntry *ir_analyze_instruction_check_statement_is_void(IrAnalyze
return ira->codegen->builtin_types.entry_void;
}
-static TypeTableEntry *ir_analyze_instruction_can_implicit_cast(IrAnalyze *ira,
- IrInstructionCanImplicitCast *instruction)
-{
- IrInstruction *type_value = instruction->type_value->other;
- TypeTableEntry *type_entry = ir_resolve_type(ira, type_value);
- if (type_is_invalid(type_entry))
- return ira->codegen->builtin_types.entry_invalid;
-
- IrInstruction *target_value = instruction->target_value->other;
- if (type_is_invalid(target_value->value.type))
- return ira->codegen->builtin_types.entry_invalid;
-
- ImplicitCastMatchResult result = ir_types_match_with_implicit_cast(ira, type_entry, target_value->value.type,
- target_value);
-
- if (result == ImplicitCastMatchResultReportedError) {
- zig_panic("TODO refactor implicit cast tester to return bool without reporting errors");
- }
-
- ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- out_val->data.x_bool = (result == ImplicitCastMatchResultYes);
- return ira->codegen->builtin_types.entry_bool;
-}
-
static TypeTableEntry *ir_analyze_instruction_panic(IrAnalyze *ira, IrInstructionPanic *instruction) {
IrInstruction *msg = instruction->msg->other;
if (type_is_invalid(msg->value.type))
@@ -17017,7 +19639,8 @@ static TypeTableEntry *ir_analyze_instruction_panic(IrAnalyze *ira, IrInstructio
return ir_unreach_error(ira);
}
- TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, true);
+ TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
+ true, false, PtrLenUnknown, get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), 0, 0);
TypeTableEntry *str_type = get_slice_type(ira->codegen, u8_ptr_type);
IrInstruction *casted_msg = ir_implicit_cast(ira, msg, str_type);
if (type_is_invalid(casted_msg->value.type))
@@ -17044,22 +19667,22 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3
old_align_bytes = fn_type_id.alignment;
fn_type_id.alignment = align_bytes;
result_type = get_fn_type(ira->codegen, &fn_type_id);
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdPointer)
{
TypeTableEntry *ptr_type = target_type->data.maybe.child_type;
old_align_bytes = ptr_type->data.pointer.alignment;
TypeTableEntry *better_ptr_type = adjust_ptr_align(ira->codegen, ptr_type, align_bytes);
- result_type = get_maybe_type(ira->codegen, better_ptr_type);
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ result_type = get_optional_type(ira->codegen, better_ptr_type);
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdFn)
{
FnTypeId fn_type_id = target_type->data.maybe.child_type->data.fn.fn_type_id;
old_align_bytes = fn_type_id.alignment;
fn_type_id.alignment = align_bytes;
TypeTableEntry *fn_type = get_fn_type(ira->codegen, &fn_type_id);
- result_type = get_maybe_type(ira->codegen, fn_type);
+ result_type = get_optional_type(ira->codegen, fn_type);
} else if (is_slice(target_type)) {
TypeTableEntry *slice_ptr_type = target_type->data.structure.fields[slice_ptr_index].type_entry;
old_align_bytes = slice_ptr_type->data.pointer.alignment;
@@ -17076,6 +19699,15 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3
if (!val)
return ira->codegen->invalid_instruction;
+ if (val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr &&
+ val->data.x_ptr.data.hard_coded_addr.addr % align_bytes != 0)
+ {
+ ir_add_error(ira, target,
+ buf_sprintf("pointer address 0x%" ZIG_PRI_x64 " is not aligned to %" PRIu32 " bytes",
+ val->data.x_ptr.data.hard_coded_addr.addr, align_bytes));
+ return ira->codegen->invalid_instruction;
+ }
+
IrInstruction *result = ir_create_const(&ira->new_irb, target->scope, target->source_node, result_type);
copy_const_val(&result->value, val, false);
result->value.type = result_type;
@@ -17170,10 +19802,10 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdPromise:
zig_unreachable();
case TypeTableEntryIdVoid:
@@ -17210,7 +19842,7 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
return;
case TypeTableEntryIdStruct:
zig_panic("TODO buf_write_value_bytes struct type");
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
zig_panic("TODO buf_write_value_bytes maybe type");
case TypeTableEntryIdErrorUnion:
zig_panic("TODO buf_write_value_bytes error union");
@@ -17237,10 +19869,10 @@ static void buf_read_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
case TypeTableEntryIdPromise:
zig_unreachable();
case TypeTableEntryIdVoid:
@@ -17268,7 +19900,7 @@ static void buf_read_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
zig_panic("TODO buf_read_value_bytes array type");
case TypeTableEntryIdStruct:
zig_panic("TODO buf_read_value_bytes struct type");
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
zig_panic("TODO buf_read_value_bytes maybe type");
case TypeTableEntryIdErrorUnion:
zig_panic("TODO buf_read_value_bytes error union");
@@ -17296,7 +19928,12 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc
return ira->codegen->builtin_types.entry_invalid;
ensure_complete_type(ira->codegen, dest_type);
+ if (type_is_invalid(dest_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
ensure_complete_type(ira->codegen, src_type);
+ if (type_is_invalid(src_type))
+ return ira->codegen->builtin_types.entry_invalid;
if (get_codegen_ptr_type(src_type) != nullptr) {
ir_add_error(ira, value,
@@ -17313,10 +19950,10 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
ir_add_error(ira, dest_type_value,
buf_sprintf("unable to @bitCast from type '%s'", buf_ptr(&src_type->name)));
return ira->codegen->builtin_types.entry_invalid;
@@ -17339,10 +19976,10 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc
case TypeTableEntryIdNamespace:
case TypeTableEntryIdBlock:
case TypeTableEntryIdUnreachable:
- case TypeTableEntryIdNumLitFloat:
- case TypeTableEntryIdNumLitInt:
- case TypeTableEntryIdUndefLit:
- case TypeTableEntryIdNullLit:
+ case TypeTableEntryIdComptimeFloat:
+ case TypeTableEntryIdComptimeInt:
+ case TypeTableEntryIdUndefined:
+ case TypeTableEntryIdNull:
ir_add_error(ira, dest_type_value,
buf_sprintf("unable to @bitCast to type '%s'", buf_ptr(&dest_type->name)));
return ira->codegen->builtin_types.entry_invalid;
@@ -17429,7 +20066,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_ref(IrAnalyze *ira,
Tld *tld = instruction->tld;
LVal lval = instruction->lval;
- resolve_top_level_decl(ira->codegen, tld, lval.is_ptr, instruction->base.source_node);
+ resolve_top_level_decl(ira->codegen, tld, lval == LValPtr, instruction->base.source_node);
if (tld->resolution == TldResolutionInvalid)
return ira->codegen->builtin_types.entry_invalid;
@@ -17442,8 +20079,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_ref(IrAnalyze *ira,
TldVar *tld_var = (TldVar *)tld;
VariableTableEntry *var = tld_var->var;
- IrInstruction *var_ptr = ir_get_var_ptr(ira, &instruction->base, var,
- !lval.is_ptr || lval.is_const, lval.is_ptr && lval.is_volatile);
+ IrInstruction *var_ptr = ir_get_var_ptr(ira, &instruction->base, var);
if (type_is_invalid(var_ptr->value.type))
return ira->codegen->builtin_types.entry_invalid;
@@ -17451,7 +20087,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_ref(IrAnalyze *ira,
add_link_lib_symbol(ira, tld_var->extern_lib_name, &var->name, instruction->base.source_node);
}
- if (lval.is_ptr) {
+ if (lval == LValPtr) {
ir_link_new_instruction(var_ptr, &instruction->base);
return var_ptr->value.type;
} else {
@@ -17472,7 +20108,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_ref(IrAnalyze *ira,
IrInstruction *ref_instruction = ir_create_const_fn(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, fn_entry);
- if (lval.is_ptr) {
+ if (lval == LValPtr) {
IrInstruction *ptr_instr = ir_get_ref(ira, &instruction->base, ref_instruction, true, false);
ir_link_new_instruction(ptr_instr, &instruction->base);
return ptr_instr->value.type;
@@ -17498,13 +20134,16 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr
return ira->codegen->builtin_types.entry_invalid;
}
+ if (!type_has_bits(target->value.type)) {
+ ir_add_error(ira, target,
+ buf_sprintf("pointer to size 0 type has no address"));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
if (instr_is_comptime(target)) {
ConstExprValue *val = ir_resolve_const(ira, target, UndefBad);
if (!val)
return ira->codegen->builtin_types.entry_invalid;
- if (target->value.type->id == TypeTableEntryIdMaybe) {
- val = val->data.x_maybe;
- }
if (val->type->id == TypeTableEntryIdPointer && val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) {
IrInstruction *result = ir_create_const(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, usize);
@@ -17521,22 +20160,34 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr
return usize;
}
-static TypeTableEntry *ir_analyze_instruction_ptr_type_of(IrAnalyze *ira, IrInstructionPtrTypeOf *instruction) {
+static TypeTableEntry *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstructionPtrType *instruction) {
TypeTableEntry *child_type = ir_resolve_type(ira, instruction->child_type->other);
if (type_is_invalid(child_type))
return ira->codegen->builtin_types.entry_invalid;
+ if (child_type->id == TypeTableEntryIdUnreachable) {
+ ir_add_error(ira, &instruction->base, buf_sprintf("pointer to noreturn not allowed"));
+ return ira->codegen->builtin_types.entry_invalid;
+ } else if (child_type->id == TypeTableEntryIdOpaque && instruction->ptr_len == PtrLenUnknown) {
+ ir_add_error(ira, &instruction->base, buf_sprintf("unknown-length pointer to opaque"));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
uint32_t align_bytes;
if (instruction->align_value != nullptr) {
if (!ir_resolve_align(ira, instruction->align_value->other, &align_bytes))
return ira->codegen->builtin_types.entry_invalid;
} else {
+ type_ensure_zero_bits_known(ira->codegen, child_type);
+ if (type_is_invalid(child_type))
+ return ira->codegen->builtin_types.entry_invalid;
align_bytes = get_abi_alignment(ira->codegen, child_type);
}
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
out_val->data.x_type = get_pointer_to_type_extra(ira->codegen, child_type,
- instruction->is_const, instruction->is_volatile, align_bytes,
+ instruction->is_const, instruction->is_volatile,
+ instruction->ptr_len, align_bytes,
instruction->bit_offset_start, instruction->bit_offset_end - instruction->bit_offset_start);
return ira->codegen->builtin_types.entry_type;
@@ -17634,6 +20285,16 @@ static TypeTableEntry *ir_analyze_instruction_arg_type(IrAnalyze *ira, IrInstruc
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
out_val->data.x_type = fn_type_id->param_info[arg_index].type;
+ if (out_val->data.x_type == nullptr) {
+ // Args are only unresolved if our function is generic.
+ assert(fn_type->data.fn.is_generic);
+
+ ir_add_error(ira, arg_index_inst,
+ buf_sprintf("@ArgType could not resolve the type of arg %" ZIG_PRI_u64 " because '%s' is generic",
+ arg_index, buf_ptr(&fn_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
return ira->codegen->builtin_types.entry_type;
}
@@ -17797,7 +20458,7 @@ static TypeTableEntry *ir_analyze_instruction_coro_free(IrAnalyze *ira, IrInstru
instruction->base.source_node, coro_id, coro_handle);
ir_link_new_instruction(result, &instruction->base);
TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false);
- result->value.type = get_maybe_type(ira->codegen, ptr_type);
+ result->value.type = get_optional_type(ira->codegen, ptr_type);
return result->value.type;
}
@@ -17865,43 +20526,52 @@ static TypeTableEntry *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira,
instruction->base.source_node, alloc_fn, coro_size);
ir_link_new_instruction(result, &instruction->base);
TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false);
- result->value.type = get_maybe_type(ira->codegen, u8_ptr_type);
+ result->value.type = get_optional_type(ira->codegen, u8_ptr_type);
return result->value.type;
}
-static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstructionAtomicRmw *instruction) {
- TypeTableEntry *operand_type = ir_resolve_type(ira, instruction->operand_type->other);
- if (type_is_invalid(operand_type)) {
+static TypeTableEntry *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op) {
+ TypeTableEntry *operand_type = ir_resolve_type(ira, op);
+ if (type_is_invalid(operand_type))
return ira->codegen->builtin_types.entry_invalid;
- }
+
if (operand_type->id == TypeTableEntryIdInt) {
if (operand_type->data.integral.bit_count < 8) {
- ir_add_error(ira, &instruction->base,
+ ir_add_error(ira, op,
buf_sprintf("expected integer type 8 bits or larger, found %" PRIu32 "-bit integer type",
operand_type->data.integral.bit_count));
return ira->codegen->builtin_types.entry_invalid;
}
if (operand_type->data.integral.bit_count > ira->codegen->pointer_size_bytes * 8) {
- ir_add_error(ira, &instruction->base,
+ ir_add_error(ira, op,
buf_sprintf("expected integer type pointer size or smaller, found %" PRIu32 "-bit integer type",
operand_type->data.integral.bit_count));
return ira->codegen->builtin_types.entry_invalid;
}
if (!is_power_of_2(operand_type->data.integral.bit_count)) {
- ir_add_error(ira, &instruction->base,
+ ir_add_error(ira, op,
buf_sprintf("%" PRIu32 "-bit integer type is not a power of 2", operand_type->data.integral.bit_count));
return ira->codegen->builtin_types.entry_invalid;
}
} else if (get_codegen_ptr_type(operand_type) == nullptr) {
- ir_add_error(ira, &instruction->base,
+ ir_add_error(ira, op,
buf_sprintf("expected integer or pointer type, found '%s'", buf_ptr(&operand_type->name)));
return ira->codegen->builtin_types.entry_invalid;
}
+ return operand_type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstructionAtomicRmw *instruction) {
+ TypeTableEntry *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->other);
+ if (type_is_invalid(operand_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
IrInstruction *ptr_inst = instruction->ptr->other;
if (type_is_invalid(ptr_inst->value.type))
return ira->codegen->builtin_types.entry_invalid;
+ // TODO let this be volatile
TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, operand_type, false);
IrInstruction *casted_ptr = ir_implicit_cast(ira, ptr_inst, ptr_type);
if (type_is_invalid(casted_ptr->value.type))
@@ -17930,6 +20600,11 @@ static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstr
} else {
if (!ir_resolve_atomic_order(ira, instruction->ordering->other, &ordering))
return ira->codegen->builtin_types.entry_invalid;
+ if (ordering == AtomicOrderUnordered) {
+ ir_add_error(ira, instruction->ordering,
+ buf_sprintf("@atomicRmw atomic ordering must not be Unordered"));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
}
if (instr_is_comptime(casted_operand) && instr_is_comptime(casted_ptr) && casted_ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar)
@@ -17945,6 +20620,49 @@ static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstr
return result->value.type;
}
+static TypeTableEntry *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstructionAtomicLoad *instruction) {
+ TypeTableEntry *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->other);
+ if (type_is_invalid(operand_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *ptr_inst = instruction->ptr->other;
+ if (type_is_invalid(ptr_inst->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, operand_type, true);
+ IrInstruction *casted_ptr = ir_implicit_cast(ira, ptr_inst, ptr_type);
+ if (type_is_invalid(casted_ptr->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ AtomicOrder ordering;
+ if (instruction->ordering == nullptr) {
+ ordering = instruction->resolved_ordering;
+ } else {
+ if (!ir_resolve_atomic_order(ira, instruction->ordering->other, &ordering))
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ if (ordering == AtomicOrderRelease || ordering == AtomicOrderAcqRel) {
+ assert(instruction->ordering != nullptr);
+ ir_add_error(ira, instruction->ordering,
+ buf_sprintf("@atomicLoad atomic ordering must not be Release or AcqRel"));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ if (instr_is_comptime(casted_ptr)) {
+ IrInstruction *result = ir_get_deref(ira, &instruction->base, casted_ptr);
+ ir_link_new_instruction(result, &instruction->base);
+ assert(result->value.type != nullptr);
+ return result->value.type;
+ }
+
+ IrInstruction *result = ir_build_atomic_load(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, nullptr, casted_ptr, nullptr, ordering);
+ ir_link_new_instruction(result, &instruction->base);
+ result->value.type = operand_type;
+ return result->value.type;
+}
+
static TypeTableEntry *ir_analyze_instruction_promise_result_type(IrAnalyze *ira, IrInstructionPromiseResultType *instruction) {
TypeTableEntry *promise_type = ir_resolve_type(ira, instruction->promise_type->other);
if (type_is_invalid(promise_type))
@@ -18031,18 +20749,135 @@ static TypeTableEntry *ir_analyze_instruction_mark_err_ret_trace_ptr(IrAnalyze *
return result->value.type;
}
+static TypeTableEntry *ir_analyze_instruction_sqrt(IrAnalyze *ira, IrInstructionSqrt *instruction) {
+ TypeTableEntry *float_type = ir_resolve_type(ira, instruction->type->other);
+ if (type_is_invalid(float_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *op = instruction->op->other;
+ if (type_is_invalid(op->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ bool ok_type = float_type->id == TypeTableEntryIdComptimeFloat || float_type->id == TypeTableEntryIdFloat;
+ if (!ok_type) {
+ ir_add_error(ira, instruction->type, buf_sprintf("@sqrt does not support type '%s'", buf_ptr(&float_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ IrInstruction *casted_op = ir_implicit_cast(ira, op, float_type);
+ if (type_is_invalid(casted_op->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (instr_is_comptime(casted_op)) {
+ ConstExprValue *val = ir_resolve_const(ira, casted_op, UndefBad);
+ if (!val)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
+
+ if (float_type->id == TypeTableEntryIdComptimeFloat) {
+ bigfloat_sqrt(&out_val->data.x_bigfloat, &val->data.x_bigfloat);
+ } else if (float_type->id == TypeTableEntryIdFloat) {
+ switch (float_type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_sqrt(val->data.x_f16);
+ break;
+ case 32:
+ out_val->data.x_f32 = sqrtf(val->data.x_f32);
+ break;
+ case 64:
+ out_val->data.x_f64 = sqrt(val->data.x_f64);
+ break;
+ case 128:
+ f128M_sqrt(&val->data.x_f128, &out_val->data.x_f128);
+ break;
+ default:
+ zig_unreachable();
+ }
+ } else {
+ zig_unreachable();
+ }
+
+ return float_type;
+ }
+
+ assert(float_type->id == TypeTableEntryIdFloat);
+ if (float_type->data.floating.bit_count != 16 &&
+ float_type->data.floating.bit_count != 32 &&
+ float_type->data.floating.bit_count != 64) {
+ ir_add_error(ira, instruction->type, buf_sprintf("compiler TODO: add implementation of sqrt for '%s'", buf_ptr(&float_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ IrInstruction *result = ir_build_sqrt(&ira->new_irb, instruction->base.scope,
+ instruction->base.source_node, nullptr, casted_op);
+ ir_link_new_instruction(result, &instruction->base);
+ result->value.type = float_type;
+ return result->value.type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_enum_to_int(IrAnalyze *ira, IrInstructionEnumToInt *instruction) {
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (target->value.type->id != TypeTableEntryIdEnum) {
+ ir_add_error(ira, instruction->target,
+ buf_sprintf("expected enum, found type '%s'", buf_ptr(&target->value.type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ type_ensure_zero_bits_known(ira->codegen, target->value.type);
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ TypeTableEntry *tag_type = target->value.type->data.enumeration.tag_int_type;
+
+ IrInstruction *result = ir_analyze_enum_to_int(ira, &instruction->base, target, tag_type);
+ ir_link_new_instruction(result, &instruction->base);
+ return result->value.type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_int_to_enum(IrAnalyze *ira, IrInstructionIntToEnum *instruction) {
+ IrInstruction *dest_type_value = instruction->dest_type->other;
+ TypeTableEntry *dest_type = ir_resolve_type(ira, dest_type_value);
+ if (type_is_invalid(dest_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (dest_type->id != TypeTableEntryIdEnum) {
+ ir_add_error(ira, instruction->dest_type,
+ buf_sprintf("expected enum, found type '%s'", buf_ptr(&dest_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ type_ensure_zero_bits_known(ira->codegen, dest_type);
+ if (type_is_invalid(dest_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ TypeTableEntry *tag_type = dest_type->data.enumeration.tag_int_type;
+
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *casted_target = ir_implicit_cast(ira, target, tag_type);
+ if (type_is_invalid(casted_target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *result = ir_analyze_int_to_enum(ira, &instruction->base, casted_target, dest_type);
+ ir_link_new_instruction(result, &instruction->base);
+ return result->value.type;
+}
+
static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) {
switch (instruction->id) {
case IrInstructionIdInvalid:
case IrInstructionIdWidenOrShorten:
- case IrInstructionIdIntToEnum:
- case IrInstructionIdIntToErr:
- case IrInstructionIdErrToInt:
case IrInstructionIdStructInit:
case IrInstructionIdUnionInit:
case IrInstructionIdStructFieldPtr:
case IrInstructionIdUnionFieldPtr:
- case IrInstructionIdMaybeWrap:
+ case IrInstructionIdOptionalWrap:
case IrInstructionIdErrWrapCode:
case IrInstructionIdErrWrapPayload:
case IrInstructionIdCast:
@@ -18102,12 +20937,14 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_size_of(ira, (IrInstructionSizeOf *)instruction);
case IrInstructionIdTestNonNull:
return ir_analyze_instruction_test_non_null(ira, (IrInstructionTestNonNull *)instruction);
- case IrInstructionIdUnwrapMaybe:
- return ir_analyze_instruction_unwrap_maybe(ira, (IrInstructionUnwrapMaybe *)instruction);
+ case IrInstructionIdUnwrapOptional:
+ return ir_analyze_instruction_unwrap_maybe(ira, (IrInstructionUnwrapOptional *)instruction);
case IrInstructionIdClz:
return ir_analyze_instruction_clz(ira, (IrInstructionClz *)instruction);
case IrInstructionIdCtz:
return ir_analyze_instruction_ctz(ira, (IrInstructionCtz *)instruction);
+ case IrInstructionIdPopCount:
+ return ir_analyze_instruction_pop_count(ira, (IrInstructionPopCount *)instruction);
case IrInstructionIdSwitchBr:
return ir_analyze_instruction_switch_br(ira, (IrInstructionSwitchBr *)instruction);
case IrInstructionIdSwitchTarget:
@@ -18154,6 +20991,22 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_fence(ira, (IrInstructionFence *)instruction);
case IrInstructionIdTruncate:
return ir_analyze_instruction_truncate(ira, (IrInstructionTruncate *)instruction);
+ case IrInstructionIdIntCast:
+ return ir_analyze_instruction_int_cast(ira, (IrInstructionIntCast *)instruction);
+ case IrInstructionIdFloatCast:
+ return ir_analyze_instruction_float_cast(ira, (IrInstructionFloatCast *)instruction);
+ case IrInstructionIdErrSetCast:
+ return ir_analyze_instruction_err_set_cast(ira, (IrInstructionErrSetCast *)instruction);
+ case IrInstructionIdFromBytes:
+ return ir_analyze_instruction_from_bytes(ira, (IrInstructionFromBytes *)instruction);
+ case IrInstructionIdToBytes:
+ return ir_analyze_instruction_to_bytes(ira, (IrInstructionToBytes *)instruction);
+ case IrInstructionIdIntToFloat:
+ return ir_analyze_instruction_int_to_float(ira, (IrInstructionIntToFloat *)instruction);
+ case IrInstructionIdFloatToInt:
+ return ir_analyze_instruction_float_to_int(ira, (IrInstructionFloatToInt *)instruction);
+ case IrInstructionIdBoolToInt:
+ return ir_analyze_instruction_bool_to_int(ira, (IrInstructionBoolToInt *)instruction);
case IrInstructionIdIntType:
return ir_analyze_instruction_int_type(ira, (IrInstructionIntType *)instruction);
case IrInstructionIdBoolNot:
@@ -18176,6 +21029,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_return_address(ira, (IrInstructionReturnAddress *)instruction);
case IrInstructionIdFrameAddress:
return ir_analyze_instruction_frame_address(ira, (IrInstructionFrameAddress *)instruction);
+ case IrInstructionIdHandle:
+ return ir_analyze_instruction_handle(ira, (IrInstructionHandle *)instruction);
case IrInstructionIdAlignOf:
return ir_analyze_instruction_align_of(ira, (IrInstructionAlignOf *)instruction);
case IrInstructionIdOverflowOp:
@@ -18194,8 +21049,6 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_check_switch_prongs(ira, (IrInstructionCheckSwitchProngs *)instruction);
case IrInstructionIdCheckStatementIsVoid:
return ir_analyze_instruction_check_statement_is_void(ira, (IrInstructionCheckStatementIsVoid *)instruction);
- case IrInstructionIdCanImplicitCast:
- return ir_analyze_instruction_can_implicit_cast(ira, (IrInstructionCanImplicitCast *)instruction);
case IrInstructionIdDeclRef:
return ir_analyze_instruction_decl_ref(ira, (IrInstructionDeclRef *)instruction);
case IrInstructionIdPanic:
@@ -18214,12 +21067,14 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_field_parent_ptr(ira, (IrInstructionFieldParentPtr *)instruction);
case IrInstructionIdOffsetOf:
return ir_analyze_instruction_offset_of(ira, (IrInstructionOffsetOf *)instruction);
+ case IrInstructionIdTypeInfo:
+ return ir_analyze_instruction_type_info(ira, (IrInstructionTypeInfo *) instruction);
case IrInstructionIdTypeId:
return ir_analyze_instruction_type_id(ira, (IrInstructionTypeId *)instruction);
case IrInstructionIdSetEvalBranchQuota:
return ir_analyze_instruction_set_eval_branch_quota(ira, (IrInstructionSetEvalBranchQuota *)instruction);
- case IrInstructionIdPtrTypeOf:
- return ir_analyze_instruction_ptr_type_of(ira, (IrInstructionPtrTypeOf *)instruction);
+ case IrInstructionIdPtrType:
+ return ir_analyze_instruction_ptr_type(ira, (IrInstructionPtrType *)instruction);
case IrInstructionIdAlignCast:
return ir_analyze_instruction_align_cast(ira, (IrInstructionAlignCast *)instruction);
case IrInstructionIdOpaqueType:
@@ -18266,6 +21121,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_coro_alloc_helper(ira, (IrInstructionCoroAllocHelper *)instruction);
case IrInstructionIdAtomicRmw:
return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction);
+ case IrInstructionIdAtomicLoad:
+ return ir_analyze_instruction_atomic_load(ira, (IrInstructionAtomicLoad *)instruction);
case IrInstructionIdPromiseResultType:
return ir_analyze_instruction_promise_result_type(ira, (IrInstructionPromiseResultType *)instruction);
case IrInstructionIdAwaitBookkeeping:
@@ -18278,6 +21135,16 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_merge_err_ret_traces(ira, (IrInstructionMergeErrRetTraces *)instruction);
case IrInstructionIdMarkErrRetTracePtr:
return ir_analyze_instruction_mark_err_ret_trace_ptr(ira, (IrInstructionMarkErrRetTracePtr *)instruction);
+ case IrInstructionIdSqrt:
+ return ir_analyze_instruction_sqrt(ira, (IrInstructionSqrt *)instruction);
+ case IrInstructionIdIntToErr:
+ return ir_analyze_instruction_int_to_err(ira, (IrInstructionIntToErr *)instruction);
+ case IrInstructionIdErrToInt:
+ return ir_analyze_instruction_err_to_int(ira, (IrInstructionErrToInt *)instruction);
+ case IrInstructionIdIntToEnum:
+ return ir_analyze_instruction_int_to_enum(ira, (IrInstructionIntToEnum *)instruction);
+ case IrInstructionIdEnumToInt:
+ return ir_analyze_instruction_enum_to_int(ira, (IrInstructionEnumToInt *)instruction);
}
zig_unreachable();
}
@@ -18285,6 +21152,7 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
static TypeTableEntry *ir_analyze_instruction(IrAnalyze *ira, IrInstruction *instruction) {
TypeTableEntry *instruction_type = ir_analyze_instruction_nocast(ira, instruction);
instruction->value.type = instruction_type;
+
if (instruction->other) {
instruction->other->value.type = instruction_type;
} else {
@@ -18354,7 +21222,7 @@ TypeTableEntry *ir_analyze(CodeGen *codegen, IrExecutable *old_exec, IrExecutabl
} else if (ira->src_implicit_return_type_list.length == 0) {
return codegen->builtin_types.entry_unreachable;
} else {
- return ir_resolve_peer_types(ira, expected_type_source_node, ira->src_implicit_return_type_list.items,
+ return ir_resolve_peer_types(ira, expected_type_source_node, expected_type, ira->src_implicit_return_type_list.items,
ira->src_implicit_return_type_list.length);
}
}
@@ -18391,7 +21259,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdCheckStatementIsVoid:
case IrInstructionIdPanic:
case IrInstructionIdSetEvalBranchQuota:
- case IrInstructionIdPtrTypeOf:
+ case IrInstructionIdPtrType:
case IrInstructionIdSetAlignStack:
case IrInstructionIdExport:
case IrInstructionIdCancel:
@@ -18407,6 +21275,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdAddImplicitReturnType:
case IrInstructionIdMergeErrRetTraces:
case IrInstructionIdMarkErrRetTracePtr:
+ case IrInstructionIdAtomicRmw:
return true;
case IrInstructionIdPhi:
@@ -18433,9 +21302,10 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdSliceType:
case IrInstructionIdSizeOf:
case IrInstructionIdTestNonNull:
- case IrInstructionIdUnwrapMaybe:
+ case IrInstructionIdUnwrapOptional:
case IrInstructionIdClz:
case IrInstructionIdCtz:
+ case IrInstructionIdPopCount:
case IrInstructionIdSwitchVar:
case IrInstructionIdSwitchTarget:
case IrInstructionIdUnionTag:
@@ -18453,9 +21323,10 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdAlignOf:
case IrInstructionIdReturnAddress:
case IrInstructionIdFrameAddress:
+ case IrInstructionIdHandle:
case IrInstructionIdTestErr:
case IrInstructionIdUnwrapErrCode:
- case IrInstructionIdMaybeWrap:
+ case IrInstructionIdOptionalWrap:
case IrInstructionIdErrWrapCode:
case IrInstructionIdErrWrapPayload:
case IrInstructionIdFnProto:
@@ -18468,13 +21339,13 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdIntToEnum:
case IrInstructionIdIntToErr:
case IrInstructionIdErrToInt:
- case IrInstructionIdCanImplicitCast:
case IrInstructionIdDeclRef:
case IrInstructionIdErrName:
case IrInstructionIdTypeName:
case IrInstructionIdTagName:
case IrInstructionIdFieldParentPtr:
case IrInstructionIdOffsetOf:
+ case IrInstructionIdTypeInfo:
case IrInstructionIdTypeId:
case IrInstructionIdAlignCast:
case IrInstructionIdOpaqueType:
@@ -18487,9 +21358,19 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdCoroSize:
case IrInstructionIdCoroSuspend:
case IrInstructionIdCoroFree:
- case IrInstructionIdAtomicRmw:
case IrInstructionIdCoroPromise:
case IrInstructionIdPromiseResultType:
+ case IrInstructionIdSqrt:
+ case IrInstructionIdAtomicLoad:
+ case IrInstructionIdIntCast:
+ case IrInstructionIdFloatCast:
+ case IrInstructionIdErrSetCast:
+ case IrInstructionIdIntToFloat:
+ case IrInstructionIdFloatToInt:
+ case IrInstructionIdBoolToInt:
+ case IrInstructionIdFromBytes:
+ case IrInstructionIdToBytes:
+ case IrInstructionIdEnumToInt:
return false;
case IrInstructionIdAsm:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 99f79ff75e..77c7ef47b6 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -45,6 +45,10 @@ static void ir_print_var_instruction(IrPrint *irp, IrInstruction *instruction) {
}
static void ir_print_other_instruction(IrPrint *irp, IrInstruction *instruction) {
+ if (instruction == nullptr) {
+ fprintf(irp->f, "(null)");
+ return;
+ }
if (instruction->value.special != ConstValSpecialRuntime) {
ir_print_const_value(irp, &instruction->value);
} else {
@@ -148,7 +152,7 @@ static const char *ir_un_op_id_str(IrUnOp op_id) {
return "-%";
case IrUnOpDereference:
return "*";
- case IrUnOpMaybe:
+ case IrUnOpOptional:
return "?";
}
zig_unreachable();
@@ -358,9 +362,18 @@ static void ir_print_ptr_type_child(IrPrint *irp, IrInstructionPtrTypeChild *ins
}
static void ir_print_field_ptr(IrPrint *irp, IrInstructionFieldPtr *instruction) {
- fprintf(irp->f, "fieldptr ");
- ir_print_other_instruction(irp, instruction->container_ptr);
- fprintf(irp->f, ".%s", buf_ptr(instruction->field_name));
+ if (instruction->field_name_buffer) {
+ fprintf(irp->f, "fieldptr ");
+ ir_print_other_instruction(irp, instruction->container_ptr);
+ fprintf(irp->f, ".%s", buf_ptr(instruction->field_name_buffer));
+ } else {
+ assert(instruction->field_name_expr);
+ fprintf(irp->f, "@field(");
+ ir_print_other_instruction(irp, instruction->container_ptr);
+ fprintf(irp->f, ", ");
+ ir_print_other_instruction(irp, instruction->field_name_expr);
+ fprintf(irp->f, ")");
+ }
}
static void ir_print_struct_field_ptr(IrPrint *irp, IrInstructionStructFieldPtr *instruction) {
@@ -472,7 +485,7 @@ static void ir_print_test_null(IrPrint *irp, IrInstructionTestNonNull *instructi
fprintf(irp->f, " != null");
}
-static void ir_print_unwrap_maybe(IrPrint *irp, IrInstructionUnwrapMaybe *instruction) {
+static void ir_print_unwrap_maybe(IrPrint *irp, IrInstructionUnwrapOptional *instruction) {
fprintf(irp->f, "&??*");
ir_print_other_instruction(irp, instruction->value);
if (!instruction->safety_check_on) {
@@ -492,6 +505,12 @@ static void ir_print_ctz(IrPrint *irp, IrInstructionCtz *instruction) {
fprintf(irp->f, ")");
}
+static void ir_print_pop_count(IrPrint *irp, IrInstructionPopCount *instruction) {
+ fprintf(irp->f, "@popCount(");
+ ir_print_other_instruction(irp, instruction->value);
+ fprintf(irp->f, ")");
+}
+
static void ir_print_switch_br(IrPrint *irp, IrInstructionSwitchBr *instruction) {
fprintf(irp->f, "switch (");
ir_print_other_instruction(irp, instruction->target_value);
@@ -639,6 +658,66 @@ static void ir_print_truncate(IrPrint *irp, IrInstructionTruncate *instruction)
fprintf(irp->f, ")");
}
+static void ir_print_int_cast(IrPrint *irp, IrInstructionIntCast *instruction) {
+ fprintf(irp->f, "@intCast(");
+ ir_print_other_instruction(irp, instruction->dest_type);
+ fprintf(irp->f, ", ");
+ ir_print_other_instruction(irp, instruction->target);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_float_cast(IrPrint *irp, IrInstructionFloatCast *instruction) {
+ fprintf(irp->f, "@floatCast(");
+ ir_print_other_instruction(irp, instruction->dest_type);
+ fprintf(irp->f, ", ");
+ ir_print_other_instruction(irp, instruction->target);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_err_set_cast(IrPrint *irp, IrInstructionErrSetCast *instruction) {
+ fprintf(irp->f, "@errSetCast(");
+ ir_print_other_instruction(irp, instruction->dest_type);
+ fprintf(irp->f, ", ");
+ ir_print_other_instruction(irp, instruction->target);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_from_bytes(IrPrint *irp, IrInstructionFromBytes *instruction) {
+ fprintf(irp->f, "@bytesToSlice(");
+ ir_print_other_instruction(irp, instruction->dest_child_type);
+ fprintf(irp->f, ", ");
+ ir_print_other_instruction(irp, instruction->target);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_to_bytes(IrPrint *irp, IrInstructionToBytes *instruction) {
+ fprintf(irp->f, "@sliceToBytes(");
+ ir_print_other_instruction(irp, instruction->target);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_int_to_float(IrPrint *irp, IrInstructionIntToFloat *instruction) {
+ fprintf(irp->f, "@intToFloat(");
+ ir_print_other_instruction(irp, instruction->dest_type);
+ fprintf(irp->f, ", ");
+ ir_print_other_instruction(irp, instruction->target);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_float_to_int(IrPrint *irp, IrInstructionFloatToInt *instruction) {
+ fprintf(irp->f, "@floatToInt(");
+ ir_print_other_instruction(irp, instruction->dest_type);
+ fprintf(irp->f, ", ");
+ ir_print_other_instruction(irp, instruction->target);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_bool_to_int(IrPrint *irp, IrInstructionBoolToInt *instruction) {
+ fprintf(irp->f, "@boolToInt(");
+ ir_print_other_instruction(irp, instruction->target);
+ fprintf(irp->f, ")");
+}
+
static void ir_print_int_type(IrPrint *irp, IrInstructionIntType *instruction) {
fprintf(irp->f, "@IntType(");
ir_print_other_instruction(irp, instruction->is_signed);
@@ -712,6 +791,10 @@ static void ir_print_frame_address(IrPrint *irp, IrInstructionFrameAddress *inst
fprintf(irp->f, "@frameAddress()");
}
+static void ir_print_handle(IrPrint *irp, IrInstructionHandle *instruction) {
+ fprintf(irp->f, "@handle()");
+}
+
static void ir_print_return_address(IrPrint *irp, IrInstructionReturnAddress *instruction) {
fprintf(irp->f, "@returnAddress()");
}
@@ -768,7 +851,7 @@ static void ir_print_unwrap_err_payload(IrPrint *irp, IrInstructionUnwrapErrPayl
}
}
-static void ir_print_maybe_wrap(IrPrint *irp, IrInstructionMaybeWrap *instruction) {
+static void ir_print_maybe_wrap(IrPrint *irp, IrInstructionOptionalWrap *instruction) {
fprintf(irp->f, "@maybeWrap(");
ir_print_other_instruction(irp, instruction->value);
fprintf(irp->f, ")");
@@ -859,6 +942,17 @@ static void ir_print_int_to_ptr(IrPrint *irp, IrInstructionIntToPtr *instruction
static void ir_print_int_to_enum(IrPrint *irp, IrInstructionIntToEnum *instruction) {
fprintf(irp->f, "@intToEnum(");
+ if (instruction->dest_type == nullptr) {
+ fprintf(irp->f, "(null)");
+ } else {
+ ir_print_other_instruction(irp, instruction->dest_type);
+ }
+ ir_print_other_instruction(irp, instruction->target);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_enum_to_int(IrPrint *irp, IrInstructionEnumToInt *instruction) {
+ fprintf(irp->f, "@enumToInt(");
ir_print_other_instruction(irp, instruction->target);
fprintf(irp->f, ")");
}
@@ -904,15 +998,7 @@ static void ir_print_tag_name(IrPrint *irp, IrInstructionTagName *instruction) {
ir_print_other_instruction(irp, instruction->target);
}
-static void ir_print_can_implicit_cast(IrPrint *irp, IrInstructionCanImplicitCast *instruction) {
- fprintf(irp->f, "@canImplicitCast(");
- ir_print_other_instruction(irp, instruction->type_value);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->target_value);
- fprintf(irp->f, ")");
-}
-
-static void ir_print_ptr_type_of(IrPrint *irp, IrInstructionPtrTypeOf *instruction) {
+static void ir_print_ptr_type(IrPrint *irp, IrInstructionPtrType *instruction) {
fprintf(irp->f, "&");
if (instruction->align_value != nullptr) {
fprintf(irp->f, "align(");
@@ -927,10 +1013,8 @@ static void ir_print_ptr_type_of(IrPrint *irp, IrInstructionPtrTypeOf *instructi
}
static void ir_print_decl_ref(IrPrint *irp, IrInstructionDeclRef *instruction) {
- const char *ptr_str = instruction->lval.is_ptr ? "ptr " : "";
- const char *const_str = instruction->lval.is_const ? "const " : "";
- const char *volatile_str = instruction->lval.is_volatile ? "volatile " : "";
- fprintf(irp->f, "declref %s%s%s%s", const_str, volatile_str, ptr_str, buf_ptr(instruction->tld->name));
+ const char *ptr_str = (instruction->lval == LValPtr) ? "ptr " : "";
+ fprintf(irp->f, "declref %s%s", ptr_str, buf_ptr(instruction->tld->name));
}
static void ir_print_panic(IrPrint *irp, IrInstructionPanic *instruction) {
@@ -957,6 +1041,12 @@ static void ir_print_offset_of(IrPrint *irp, IrInstructionOffsetOf *instruction)
fprintf(irp->f, ")");
}
+static void ir_print_type_info(IrPrint *irp, IrInstructionTypeInfo *instruction) {
+ fprintf(irp->f, "@typeInfo(");
+ ir_print_other_instruction(irp, instruction->type_value);
+ fprintf(irp->f, ")");
+}
+
static void ir_print_type_id(IrPrint *irp, IrInstructionTypeId *instruction) {
fprintf(irp->f, "@typeId(");
ir_print_other_instruction(irp, instruction->type_value);
@@ -1025,7 +1115,7 @@ static void ir_print_export(IrPrint *irp, IrInstructionExport *instruction) {
static void ir_print_error_return_trace(IrPrint *irp, IrInstructionErrorReturnTrace *instruction) {
fprintf(irp->f, "@errorReturnTrace(");
- switch (instruction->nullable) {
+ switch (instruction->optional) {
case IrInstructionErrorReturnTrace::Null:
fprintf(irp->f, "Null");
break;
@@ -1172,6 +1262,24 @@ static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instructio
fprintf(irp->f, ")");
}
+static void ir_print_atomic_load(IrPrint *irp, IrInstructionAtomicLoad *instruction) {
+ fprintf(irp->f, "@atomicLoad(");
+ if (instruction->operand_type != nullptr) {
+ ir_print_other_instruction(irp, instruction->operand_type);
+ } else {
+ fprintf(irp->f, "[TODO print]");
+ }
+ fprintf(irp->f, ",");
+ ir_print_other_instruction(irp, instruction->ptr);
+ fprintf(irp->f, ",");
+ if (instruction->ordering != nullptr) {
+ ir_print_other_instruction(irp, instruction->ordering);
+ } else {
+ fprintf(irp->f, "[TODO print]");
+ }
+ fprintf(irp->f, ")");
+}
+
static void ir_print_await_bookkeeping(IrPrint *irp, IrInstructionAwaitBookkeeping *instruction) {
fprintf(irp->f, "@awaitBookkeeping(");
ir_print_other_instruction(irp, instruction->promise_result_type);
@@ -1204,6 +1312,18 @@ static void ir_print_mark_err_ret_trace_ptr(IrPrint *irp, IrInstructionMarkErrRe
fprintf(irp->f, ")");
}
+static void ir_print_sqrt(IrPrint *irp, IrInstructionSqrt *instruction) {
+ fprintf(irp->f, "@sqrt(");
+ if (instruction->type != nullptr) {
+ ir_print_other_instruction(irp, instruction->type);
+ } else {
+ fprintf(irp->f, "null");
+ }
+ fprintf(irp->f, ",");
+ ir_print_other_instruction(irp, instruction->op);
+ fprintf(irp->f, ")");
+}
+
static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
ir_print_prefix(irp, instruction);
switch (instruction->id) {
@@ -1311,12 +1431,15 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdTestNonNull:
ir_print_test_null(irp, (IrInstructionTestNonNull *)instruction);
break;
- case IrInstructionIdUnwrapMaybe:
- ir_print_unwrap_maybe(irp, (IrInstructionUnwrapMaybe *)instruction);
+ case IrInstructionIdUnwrapOptional:
+ ir_print_unwrap_maybe(irp, (IrInstructionUnwrapOptional *)instruction);
break;
case IrInstructionIdCtz:
ir_print_ctz(irp, (IrInstructionCtz *)instruction);
break;
+ case IrInstructionIdPopCount:
+ ir_print_pop_count(irp, (IrInstructionPopCount *)instruction);
+ break;
case IrInstructionIdClz:
ir_print_clz(irp, (IrInstructionClz *)instruction);
break;
@@ -1380,6 +1503,30 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdTruncate:
ir_print_truncate(irp, (IrInstructionTruncate *)instruction);
break;
+ case IrInstructionIdIntCast:
+ ir_print_int_cast(irp, (IrInstructionIntCast *)instruction);
+ break;
+ case IrInstructionIdFloatCast:
+ ir_print_float_cast(irp, (IrInstructionFloatCast *)instruction);
+ break;
+ case IrInstructionIdErrSetCast:
+ ir_print_err_set_cast(irp, (IrInstructionErrSetCast *)instruction);
+ break;
+ case IrInstructionIdFromBytes:
+ ir_print_from_bytes(irp, (IrInstructionFromBytes *)instruction);
+ break;
+ case IrInstructionIdToBytes:
+ ir_print_to_bytes(irp, (IrInstructionToBytes *)instruction);
+ break;
+ case IrInstructionIdIntToFloat:
+ ir_print_int_to_float(irp, (IrInstructionIntToFloat *)instruction);
+ break;
+ case IrInstructionIdFloatToInt:
+ ir_print_float_to_int(irp, (IrInstructionFloatToInt *)instruction);
+ break;
+ case IrInstructionIdBoolToInt:
+ ir_print_bool_to_int(irp, (IrInstructionBoolToInt *)instruction);
+ break;
case IrInstructionIdIntType:
ir_print_int_type(irp, (IrInstructionIntType *)instruction);
break;
@@ -1413,6 +1560,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdFrameAddress:
ir_print_frame_address(irp, (IrInstructionFrameAddress *)instruction);
break;
+ case IrInstructionIdHandle:
+ ir_print_handle(irp, (IrInstructionHandle *)instruction);
+ break;
case IrInstructionIdAlignOf:
ir_print_align_of(irp, (IrInstructionAlignOf *)instruction);
break;
@@ -1428,8 +1578,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdUnwrapErrPayload:
ir_print_unwrap_err_payload(irp, (IrInstructionUnwrapErrPayload *)instruction);
break;
- case IrInstructionIdMaybeWrap:
- ir_print_maybe_wrap(irp, (IrInstructionMaybeWrap *)instruction);
+ case IrInstructionIdOptionalWrap:
+ ir_print_maybe_wrap(irp, (IrInstructionOptionalWrap *)instruction);
break;
case IrInstructionIdErrWrapCode:
ir_print_err_wrap_code(irp, (IrInstructionErrWrapCode *)instruction);
@@ -1479,11 +1629,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdTagName:
ir_print_tag_name(irp, (IrInstructionTagName *)instruction);
break;
- case IrInstructionIdCanImplicitCast:
- ir_print_can_implicit_cast(irp, (IrInstructionCanImplicitCast *)instruction);
- break;
- case IrInstructionIdPtrTypeOf:
- ir_print_ptr_type_of(irp, (IrInstructionPtrTypeOf *)instruction);
+ case IrInstructionIdPtrType:
+ ir_print_ptr_type(irp, (IrInstructionPtrType *)instruction);
break;
case IrInstructionIdDeclRef:
ir_print_decl_ref(irp, (IrInstructionDeclRef *)instruction);
@@ -1497,6 +1644,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdOffsetOf:
ir_print_offset_of(irp, (IrInstructionOffsetOf *)instruction);
break;
+ case IrInstructionIdTypeInfo:
+ ir_print_type_info(irp, (IrInstructionTypeInfo *)instruction);
+ break;
case IrInstructionIdTypeId:
ir_print_type_id(irp, (IrInstructionTypeId *)instruction);
break;
@@ -1590,6 +1740,15 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdMarkErrRetTracePtr:
ir_print_mark_err_ret_trace_ptr(irp, (IrInstructionMarkErrRetTracePtr *)instruction);
break;
+ case IrInstructionIdSqrt:
+ ir_print_sqrt(irp, (IrInstructionSqrt *)instruction);
+ break;
+ case IrInstructionIdAtomicLoad:
+ ir_print_atomic_load(irp, (IrInstructionAtomicLoad *)instruction);
+ break;
+ case IrInstructionIdEnumToInt:
+ ir_print_enum_to_int(irp, (IrInstructionEnumToInt *)instruction);
+ break;
}
fprintf(irp->f, "\n");
}
diff --git a/src/link.cpp b/src/link.cpp
index 3c6e27e331..f65c072bac 100644
--- a/src/link.cpp
+++ b/src/link.cpp
@@ -208,7 +208,7 @@ static Buf *get_dynamic_linker_path(CodeGen *g) {
static void construct_linker_job_elf(LinkJob *lj) {
CodeGen *g = lj->codegen;
- if (lj->link_in_crt) {
+ if (g->libc_link_lib != nullptr) {
find_libc_lib_path(g);
}
@@ -217,6 +217,9 @@ static void construct_linker_job_elf(LinkJob *lj) {
lj->args.append(g->linker_script);
}
+ if (g->no_rosegment_workaround) {
+ lj->args.append("--no-rosegment");
+ }
lj->args.append("--gc-sections");
lj->args.append("-m");
@@ -322,10 +325,13 @@ static void construct_linker_job_elf(LinkJob *lj) {
lj->args.append((const char *)buf_ptr(g->link_objects.at(i)));
}
- if (g->libc_link_lib == nullptr && (g->out_type == OutTypeExe || g->out_type == OutTypeLib)) {
- Buf *builtin_o_path = build_o(g, "builtin");
- lj->args.append(buf_ptr(builtin_o_path));
+ if (g->out_type == OutTypeExe || g->out_type == OutTypeLib) {
+ if (g->libc_link_lib == nullptr) {
+ Buf *builtin_o_path = build_o(g, "builtin");
+ lj->args.append(buf_ptr(builtin_o_path));
+ }
+ // sometimes libgcc is missing stuff, so we still build compiler_rt and rely on weak linkage
Buf *compiler_rt_o_path = build_compiler_rt(g);
lj->args.append(buf_ptr(compiler_rt_o_path));
}
@@ -388,6 +394,19 @@ static void construct_linker_job_elf(LinkJob *lj) {
}
}
+static void construct_linker_job_wasm(LinkJob *lj) {
+ CodeGen *g = lj->codegen;
+
+ lj->args.append("--relocatable"); // So lld doesn't look for _start.
+ lj->args.append("-o");
+ lj->args.append(buf_ptr(&lj->out_file));
+
+ // .o files
+ for (size_t i = 0; i < g->link_objects.length; i += 1) {
+ lj->args.append((const char *)buf_ptr(g->link_objects.at(i)));
+ }
+}
+
//static bool is_target_cyg_mingw(const ZigTarget *target) {
// return (target->os == ZigLLVM_Win32 && target->env_type == ZigLLVM_Cygnus) ||
// (target->os == ZigLLVM_Win32 && target->env_type == ZigLLVM_GNU);
@@ -416,7 +435,7 @@ static bool zig_lld_link(ZigLLVM_ObjectFormatType oformat, const char **args, si
static void construct_linker_job_coff(LinkJob *lj) {
CodeGen *g = lj->codegen;
- if (lj->link_in_crt) {
+ if (g->libc_link_lib != nullptr) {
find_libc_lib_path(g);
}
@@ -538,7 +557,7 @@ static void construct_linker_job_coff(LinkJob *lj) {
lj->args.append(buf_ptr(builtin_o_path));
}
- // msvc compiler_rt is missing some stuff, so we still build it and rely on LinkOnce
+ // msvc compiler_rt is missing some stuff, so we still build it and rely on weak linkage
Buf *compiler_rt_o_path = build_compiler_rt(g);
lj->args.append(buf_ptr(compiler_rt_o_path));
}
@@ -882,7 +901,7 @@ static void construct_linker_job_macho(LinkJob *lj) {
if (strchr(buf_ptr(link_lib->name), '/') == nullptr) {
Buf *arg = buf_sprintf("-l%s", buf_ptr(link_lib->name));
lj->args.append(buf_ptr(arg));
- } else {
+ } else {
lj->args.append(buf_ptr(link_lib->name));
}
}
@@ -921,7 +940,7 @@ static void construct_linker_job(LinkJob *lj) {
case ZigLLVM_MachO:
return construct_linker_job_macho(lj);
case ZigLLVM_Wasm:
- zig_panic("TODO link wasm");
+ return construct_linker_job_wasm(lj);
}
}
diff --git a/src/main.cpp b/src/main.cpp
index 35c7462f4b..5f96953f21 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -23,6 +23,7 @@ static int usage(const char *arg0) {
" build-exe [source] create executable from source or object files\n"
" build-lib [source] create library from source or object files\n"
" build-obj [source] create object from source or assembly\n"
+ " builtin show the source code of that @import(\"builtin\")\n"
" run [source] create executable and run immediately\n"
" translate-c [source] convert c code to zig code\n"
" targets list available compilation targets\n"
@@ -33,7 +34,7 @@ static int usage(const char *arg0) {
" --assembly [source] add assembly file to build\n"
" --cache-dir [path] override the cache directory\n"
" --color [auto|off|on] enable or disable colored error messages\n"
- " --emit [filetype] emit a specific file format as compilation output\n"
+ " --emit [asm|bin|llvm-ir] emit a specific file format as compilation output\n"
" --enable-timing-info print timing diagnostics\n"
" --libc-include-dir [path] directory where libc stdlib.h resides\n"
" --name [name] override output name\n"
@@ -43,6 +44,7 @@ static int usage(const char *arg0) {
" --pkg-end pop current pkg\n"
" --release-fast build with optimizations on and safety off\n"
" --release-safe build with optimizations on and safety on\n"
+ " --release-small build with size optimizations on and safety off\n"
" --static output will be statically linked\n"
" --strip exclude debug symbols\n"
" --target-arch [name] specify target architecture\n"
@@ -73,6 +75,7 @@ static int usage(const char *arg0) {
" -L[dir] alias for --library-path\n"
" -rdynamic add all symbols to the dynamic symbol table\n"
" -rpath [path] add directory to the runtime library search path\n"
+ " --no-rosegment compromise security to workaround valgrind bug\n"
" -mconsole (windows) --subsystem console to the linker\n"
" -mwindows (windows) --subsystem windows to the linker\n"
" -framework [name] (darwin) link against framework\n"
@@ -212,6 +215,7 @@ static Buf *resolve_zig_lib_dir(void) {
enum Cmd {
CmdInvalid,
CmdBuild,
+ CmdBuiltin,
CmdRun,
CmdTest,
CmdVersion,
@@ -323,6 +327,7 @@ int main(int argc, char **argv) {
ZigList test_exec_args = {0};
int comptime_args_end = 0;
int runtime_args_start = argc;
+ bool no_rosegment_workaround = false;
if (argc >= 2 && strcmp(argv[1], "build") == 0) {
const char *zig_exe_path = arg0;
@@ -482,6 +487,8 @@ int main(int argc, char **argv) {
build_mode = BuildModeFastRelease;
} else if (strcmp(arg, "--release-safe") == 0) {
build_mode = BuildModeSafeRelease;
+ } else if (strcmp(arg, "--release-small") == 0) {
+ build_mode = BuildModeSmallRelease;
} else if (strcmp(arg, "--strip") == 0) {
strip = true;
} else if (strcmp(arg, "--static") == 0) {
@@ -504,6 +511,8 @@ int main(int argc, char **argv) {
mconsole = true;
} else if (strcmp(arg, "-rdynamic") == 0) {
rdynamic = true;
+ } else if (strcmp(arg, "--no-rosegment") == 0) {
+ no_rosegment_workaround = true;
} else if (strcmp(arg, "--each-lib-rpath") == 0) {
each_lib_rpath = true;
} else if (strcmp(arg, "--enable-timing-info") == 0) {
@@ -657,6 +666,8 @@ int main(int argc, char **argv) {
out_type = OutTypeExe;
} else if (strcmp(arg, "targets") == 0) {
cmd = CmdTargets;
+ } else if (strcmp(arg, "builtin") == 0) {
+ cmd = CmdBuiltin;
} else {
fprintf(stderr, "Unrecognized command: %s\n", arg);
return usage(arg0);
@@ -674,6 +685,7 @@ int main(int argc, char **argv) {
return usage(arg0);
}
break;
+ case CmdBuiltin:
case CmdVersion:
case CmdZen:
case CmdTargets:
@@ -720,6 +732,16 @@ int main(int argc, char **argv) {
}
switch (cmd) {
+ case CmdBuiltin: {
+ Buf *zig_lib_dir_buf = resolve_zig_lib_dir();
+ CodeGen *g = codegen_create(nullptr, target, out_type, build_mode, zig_lib_dir_buf);
+ Buf *builtin_source = codegen_generate_builtin_source(g);
+ if (fwrite(buf_ptr(builtin_source), 1, buf_len(builtin_source), stdout) != buf_len(builtin_source)) {
+ fprintf(stderr, "unable to write to stdout: %s\n", strerror(ferror(stdout)));
+ return EXIT_FAILURE;
+ }
+ return EXIT_SUCCESS;
+ }
case CmdRun:
case CmdBuild:
case CmdTranslateC:
@@ -841,6 +863,7 @@ int main(int argc, char **argv) {
codegen_set_windows_subsystem(g, mwindows, mconsole);
codegen_set_rdynamic(g, rdynamic);
+ g->no_rosegment_workaround = no_rosegment_workaround;
if (mmacosx_version_min && mios_version_min) {
fprintf(stderr, "-mmacosx-version-min and -mios-version-min options not allowed together\n");
return EXIT_FAILURE;
@@ -868,15 +891,19 @@ int main(int argc, char **argv) {
add_package(g, cur_pkg, g->root_package);
- if (cmd == CmdBuild || cmd == CmdRun) {
- codegen_set_emit_file_type(g, emit_file_type);
-
+ if (cmd == CmdBuild || cmd == CmdRun || cmd == CmdTest) {
for (size_t i = 0; i < objects.length; i += 1) {
codegen_add_object(g, buf_create_from_str(objects.at(i)));
}
for (size_t i = 0; i < asm_files.length; i += 1) {
codegen_add_assembly(g, buf_create_from_str(asm_files.at(i)));
}
+ }
+
+
+ if (cmd == CmdBuild || cmd == CmdRun) {
+ codegen_set_emit_file_type(g, emit_file_type);
+
codegen_build(g);
codegen_link(g, out_file);
if (timing_info)
@@ -901,6 +928,8 @@ int main(int argc, char **argv) {
codegen_print_timing_report(g, stdout);
return EXIT_SUCCESS;
} else if (cmd == CmdTest) {
+ codegen_set_emit_file_type(g, emit_file_type);
+
ZigTarget native;
get_native_target(&native);
diff --git a/src/os.cpp b/src/os.cpp
index e0491b21de..91a591a7b6 100644
--- a/src/os.cpp
+++ b/src/os.cpp
@@ -26,7 +26,6 @@
#include
#include
#include
-#include "windows_com.hpp"
typedef SSIZE_T ssize_t;
#else
@@ -225,6 +224,11 @@ void os_path_extname(Buf *full_path, Buf *out_basename, Buf *out_extname) {
}
void os_path_join(Buf *dirname, Buf *basename, Buf *out_full_path) {
+ if (buf_len(dirname) == 0) {
+ buf_init_from_buf(out_full_path, basename);
+ return;
+ }
+
buf_init_from_buf(out_full_path, dirname);
uint8_t c = *(buf_ptr(out_full_path) + buf_len(out_full_path) - 1);
if (!os_is_sep(c))
@@ -989,12 +993,29 @@ int os_self_exe_path(Buf *out_path) {
}
#elif defined(ZIG_OS_DARWIN)
+ // How long is the executable's path?
uint32_t u32_len = 0;
int ret1 = _NSGetExecutablePath(nullptr, &u32_len);
assert(ret1 != 0);
- buf_resize(out_path, u32_len);
- int ret2 = _NSGetExecutablePath(buf_ptr(out_path), &u32_len);
+
+ Buf *tmp = buf_alloc_fixed(u32_len);
+
+ // Fill the executable path.
+ int ret2 = _NSGetExecutablePath(buf_ptr(tmp), &u32_len);
assert(ret2 == 0);
+
+ // According to libuv project, PATH_MAX*2 works around a libc bug where
+ // the resolved path is sometimes bigger than PATH_MAX.
+ buf_resize(out_path, PATH_MAX*2);
+ char *real_path = realpath(buf_ptr(tmp), buf_ptr(out_path));
+ if (!real_path) {
+ buf_init_from_buf(out_path, tmp);
+ return 0;
+ }
+
+ // Resize out_path for the correct length.
+ buf_resize(out_path, strlen(buf_ptr(out_path)));
+
return 0;
#elif defined(ZIG_OS_LINUX)
buf_resize(out_path, 256);
@@ -1007,6 +1028,7 @@ int os_self_exe_path(Buf *out_path) {
buf_resize(out_path, buf_len(out_path) * 2);
continue;
}
+ buf_resize(out_path, amt);
return 0;
}
#endif
@@ -1092,249 +1114,10 @@ void os_stderr_set_color(TermColor color) {
#endif
}
-int os_find_windows_sdk(ZigWindowsSDK **out_sdk) {
-#if defined(ZIG_OS_WINDOWS)
- ZigWindowsSDK *result_sdk = allocate(1);
- buf_resize(&result_sdk->path10, 0);
- buf_resize(&result_sdk->path81, 0);
-
- HKEY key;
- HRESULT rc;
- rc = RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots", 0, KEY_QUERY_VALUE | KEY_WOW64_32KEY | KEY_ENUMERATE_SUB_KEYS, &key);
- if (rc != ERROR_SUCCESS) {
- return ErrorFileNotFound;
- }
-
- {
- DWORD tmp_buf_len = MAX_PATH;
- buf_resize(&result_sdk->path10, tmp_buf_len);
- rc = RegQueryValueEx(key, "KitsRoot10", NULL, NULL, (LPBYTE)buf_ptr(&result_sdk->path10), &tmp_buf_len);
- if (rc == ERROR_FILE_NOT_FOUND) {
- buf_resize(&result_sdk->path10, 0);
- } else {
- buf_resize(&result_sdk->path10, tmp_buf_len);
- }
- }
- {
- DWORD tmp_buf_len = MAX_PATH;
- buf_resize(&result_sdk->path81, tmp_buf_len);
- rc = RegQueryValueEx(key, "KitsRoot81", NULL, NULL, (LPBYTE)buf_ptr(&result_sdk->path81), &tmp_buf_len);
- if (rc == ERROR_FILE_NOT_FOUND) {
- buf_resize(&result_sdk->path81, 0);
- } else {
- buf_resize(&result_sdk->path81, tmp_buf_len);
- }
- }
-
- if (buf_len(&result_sdk->path10) != 0) {
- Buf *sdk_lib_dir = buf_sprintf("%s\\Lib\\*", buf_ptr(&result_sdk->path10));
-
- // enumerate files in sdk path looking for latest version
- WIN32_FIND_DATA ffd;
- HANDLE hFind = FindFirstFileA(buf_ptr(sdk_lib_dir), &ffd);
- if (hFind == INVALID_HANDLE_VALUE) {
- return ErrorFileNotFound;
- }
- int v0 = 0, v1 = 0, v2 = 0, v3 = 0;
- bool found_version_dir = false;
- for (;;) {
- if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
- int c0 = 0, c1 = 0, c2 = 0, c3 = 0;
- sscanf(ffd.cFileName, "%d.%d.%d.%d", &c0, &c1, &c2, &c3);
- if (c0 == 10 && c1 == 0 && c2 == 10240 && c3 == 0) {
- // Microsoft released 26624 as 10240 accidentally.
- // https://developer.microsoft.com/en-us/windows/downloads/sdk-archive
- c2 = 26624;
- }
- if ((c0 > v0) || (c1 > v1) || (c2 > v2) || (c3 > v3)) {
- v0 = c0, v1 = c1, v2 = c2, v3 = c3;
- buf_init_from_str(&result_sdk->version10, ffd.cFileName);
- found_version_dir = true;
- }
- }
- if (FindNextFile(hFind, &ffd) == 0) {
- FindClose(hFind);
- break;
- }
- }
- if (!found_version_dir) {
- buf_resize(&result_sdk->path10, 0);
- }
- }
-
- if (buf_len(&result_sdk->path81) != 0) {
- Buf *sdk_lib_dir = buf_sprintf("%s\\Lib\\winv*", buf_ptr(&result_sdk->path81));
-
- // enumerate files in sdk path looking for latest version
- WIN32_FIND_DATA ffd;
- HANDLE hFind = FindFirstFileA(buf_ptr(sdk_lib_dir), &ffd);
- if (hFind == INVALID_HANDLE_VALUE) {
- return ErrorFileNotFound;
- }
- int v0 = 0, v1 = 0;
- bool found_version_dir = false;
- for (;;) {
- if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
- int c0 = 0, c1 = 0;
- sscanf(ffd.cFileName, "winv%d.%d", &c0, &c1);
- if ((c0 > v0) || (c1 > v1)) {
- v0 = c0, v1 = c1;
- buf_init_from_str(&result_sdk->version81, ffd.cFileName);
- found_version_dir = true;
- }
- }
- if (FindNextFile(hFind, &ffd) == 0) {
- FindClose(hFind);
- break;
- }
- }
- if (!found_version_dir) {
- buf_resize(&result_sdk->path81, 0);
- }
- }
-
- *out_sdk = result_sdk;
- return 0;
-#else
- return ErrorFileNotFound;
-#endif
-}
-
-int os_get_win32_vcruntime_path(Buf* output_buf, ZigLLVM_ArchType platform_type) {
-#if defined(ZIG_OS_WINDOWS)
- buf_resize(output_buf, 0);
- //COM Smart Pointerse requires explicit scope
- {
- HRESULT rc;
- rc = CoInitializeEx(NULL, COINIT_MULTITHREADED);
- if (rc != S_OK) {
- goto com_done;
- }
-
- //This COM class is installed when a VS2017
- ISetupConfigurationPtr setup_config;
- rc = setup_config.CreateInstance(__uuidof(SetupConfiguration));
- if (rc != S_OK) {
- goto com_done;
- }
-
- IEnumSetupInstancesPtr all_instances;
- rc = setup_config->EnumInstances(&all_instances);
- if (rc != S_OK) {
- goto com_done;
- }
-
- ISetupInstance* curr_instance;
- ULONG found_inst;
- while ((rc = all_instances->Next(1, &curr_instance, &found_inst) == S_OK)) {
- BSTR bstr_inst_path;
- rc = curr_instance->GetInstallationPath(&bstr_inst_path);
- if (rc != S_OK) {
- goto com_done;
- }
- //BSTRs are UTF-16 encoded, so we need to convert the string & adjust the length
- UINT bstr_path_len = *((UINT*)bstr_inst_path - 1);
- ULONG tmp_path_len = bstr_path_len / 2 + 1;
- char* conv_path = (char*)bstr_inst_path;
- char *tmp_path = (char*)alloca(tmp_path_len);
- memset(tmp_path, 0, tmp_path_len);
- uint32_t c = 0;
- for (uint32_t i = 0; i < bstr_path_len; i += 2) {
- tmp_path[c] = conv_path[i];
- ++c;
- assert(c != tmp_path_len);
- }
-
- buf_append_str(output_buf, tmp_path);
- buf_append_char(output_buf, '\\');
-
- Buf* tmp_buf = buf_alloc();
- buf_append_buf(tmp_buf, output_buf);
- buf_append_str(tmp_buf, "VC\\Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt");
- FILE* tools_file = fopen(buf_ptr(tmp_buf), "r");
- if (!tools_file) {
- goto com_done;
- }
- memset(tmp_path, 0, tmp_path_len);
- fgets(tmp_path, tmp_path_len, tools_file);
- strtok(tmp_path, " \r\n");
- fclose(tools_file);
- buf_appendf(output_buf, "VC\\Tools\\MSVC\\%s\\lib\\", tmp_path);
- switch (platform_type) {
- case ZigLLVM_x86:
- buf_append_str(output_buf, "x86\\");
- break;
- case ZigLLVM_x86_64:
- buf_append_str(output_buf, "x64\\");
- break;
- case ZigLLVM_arm:
- buf_append_str(output_buf, "arm\\");
- break;
- default:
- zig_panic("Attemped to use vcruntime for non-supported platform.");
- }
- buf_resize(tmp_buf, 0);
- buf_append_buf(tmp_buf, output_buf);
- buf_append_str(tmp_buf, "vcruntime.lib");
-
- if (GetFileAttributesA(buf_ptr(tmp_buf)) != INVALID_FILE_ATTRIBUTES) {
- return 0;
- }
- }
- }
-
-com_done:;
- HKEY key;
- HRESULT rc;
- rc = RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VS7", 0, KEY_QUERY_VALUE | KEY_WOW64_32KEY, &key);
- if (rc != ERROR_SUCCESS) {
- return ErrorFileNotFound;
- }
-
- DWORD dw_type = 0;
- DWORD cb_data = 0;
- rc = RegQueryValueEx(key, "14.0", NULL, &dw_type, NULL, &cb_data);
- if ((rc == ERROR_FILE_NOT_FOUND) || (REG_SZ != dw_type)) {
- return ErrorFileNotFound;
- }
-
- Buf* tmp_buf = buf_alloc_fixed(cb_data);
- RegQueryValueExA(key, "14.0", NULL, NULL, (LPBYTE)buf_ptr(tmp_buf), &cb_data);
- //RegQueryValueExA returns the length of the string INCLUDING the null terminator
- buf_resize(tmp_buf, cb_data-1);
- buf_append_str(tmp_buf, "VC\\Lib\\");
- switch (platform_type) {
- case ZigLLVM_x86:
- //x86 is in the root of the Lib folder
- break;
- case ZigLLVM_x86_64:
- buf_append_str(tmp_buf, "amd64\\");
- break;
- case ZigLLVM_arm:
- buf_append_str(tmp_buf, "arm\\");
- break;
- default:
- zig_panic("Attemped to use vcruntime for non-supported platform.");
- }
-
- buf_append_buf(output_buf, tmp_buf);
- buf_append_str(tmp_buf, "vcruntime.lib");
-
- if (GetFileAttributesA(buf_ptr(tmp_buf)) != INVALID_FILE_ATTRIBUTES) {
- return 0;
- } else {
- buf_resize(output_buf, 0);
- return ErrorFileNotFound;
- }
-#else
- return ErrorFileNotFound;
-#endif
-}
-
int os_get_win32_ucrt_lib_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_ArchType platform_type) {
#if defined(ZIG_OS_WINDOWS)
buf_resize(output_buf, 0);
- buf_appendf(output_buf, "%s\\Lib\\%s\\ucrt\\", buf_ptr(&sdk->path10), buf_ptr(&sdk->version10));
+ buf_appendf(output_buf, "%s\\Lib\\%s\\ucrt\\", sdk->path10_ptr, sdk->version10_ptr);
switch (platform_type) {
case ZigLLVM_x86:
buf_append_str(output_buf, "x86\\");
@@ -1366,7 +1149,7 @@ int os_get_win32_ucrt_lib_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_Arch
int os_get_win32_ucrt_include_path(ZigWindowsSDK *sdk, Buf* output_buf) {
#if defined(ZIG_OS_WINDOWS)
buf_resize(output_buf, 0);
- buf_appendf(output_buf, "%s\\Include\\%s\\ucrt", buf_ptr(&sdk->path10), buf_ptr(&sdk->version10));
+ buf_appendf(output_buf, "%s\\Include\\%s\\ucrt", sdk->path10_ptr, sdk->version10_ptr);
if (GetFileAttributesA(buf_ptr(output_buf)) != INVALID_FILE_ATTRIBUTES) {
return 0;
}
@@ -1383,7 +1166,7 @@ int os_get_win32_kern32_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_ArchTy
#if defined(ZIG_OS_WINDOWS)
{
buf_resize(output_buf, 0);
- buf_appendf(output_buf, "%s\\Lib\\%s\\um\\", buf_ptr(&sdk->path10), buf_ptr(&sdk->version10));
+ buf_appendf(output_buf, "%s\\Lib\\%s\\um\\", sdk->path10_ptr, sdk->version10_ptr);
switch (platform_type) {
case ZigLLVM_x86:
buf_append_str(output_buf, "x86\\");
@@ -1406,7 +1189,7 @@ int os_get_win32_kern32_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_ArchTy
}
{
buf_resize(output_buf, 0);
- buf_appendf(output_buf, "%s\\Lib\\%s\\um\\", buf_ptr(&sdk->path81), buf_ptr(&sdk->version81));
+ buf_appendf(output_buf, "%s\\Lib\\%s\\um\\", sdk->path81_ptr, sdk->version81_ptr);
switch (platform_type) {
case ZigLLVM_x86:
buf_append_str(output_buf, "x86\\");
diff --git a/src/os.hpp b/src/os.hpp
index b94e98ec3d..cfe4e8f3a2 100644
--- a/src/os.hpp
+++ b/src/os.hpp
@@ -12,6 +12,7 @@
#include "buffer.hpp"
#include "error.hpp"
#include "zig_llvm.h"
+#include "windows_sdk.h"
#include
#include
@@ -79,15 +80,6 @@ bool os_is_sep(uint8_t c);
int os_self_exe_path(Buf *out_path);
-struct ZigWindowsSDK {
- Buf path10;
- Buf version10;
- Buf path81;
- Buf version81;
-};
-
-int os_find_windows_sdk(ZigWindowsSDK **out_sdk);
-int os_get_win32_vcruntime_path(Buf *output_buf, ZigLLVM_ArchType platform_type);
int os_get_win32_ucrt_include_path(ZigWindowsSDK *sdk, Buf *output_buf);
int os_get_win32_ucrt_lib_path(ZigWindowsSDK *sdk, Buf *output_buf, ZigLLVM_ArchType platform_type);
int os_get_win32_kern32_path(ZigWindowsSDK *sdk, Buf *output_buf, ZigLLVM_ArchType platform_type);
diff --git a/src/parser.cpp b/src/parser.cpp
index 2bd94033cc..453ab7ce2c 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -648,12 +648,11 @@ static AstNode *ast_parse_asm_expr(ParseContext *pc, size_t *token_index, bool m
}
/*
-SuspendExpression(body) = "suspend" "|" Symbol "|" body
+SuspendExpression(body) = "suspend" option( body )
*/
static AstNode *ast_parse_suspend_block(ParseContext *pc, size_t *token_index, bool mandatory) {
- size_t orig_token_index = *token_index;
-
Token *suspend_token = &pc->tokens->at(*token_index);
+
if (suspend_token->id == TokenIdKeywordSuspend) {
*token_index += 1;
} else if (mandatory) {
@@ -663,23 +662,18 @@ static AstNode *ast_parse_suspend_block(ParseContext *pc, size_t *token_index, b
return nullptr;
}
- Token *bar_token = &pc->tokens->at(*token_index);
- if (bar_token->id == TokenIdBinOr) {
- *token_index += 1;
+ Token *lbrace = &pc->tokens->at(*token_index);
+ if (lbrace->id == TokenIdLBrace) {
+ AstNode *node = ast_create_node(pc, NodeTypeSuspend, suspend_token);
+ node->data.suspend.block = ast_parse_block(pc, token_index, true);
+ return node;
} else if (mandatory) {
- ast_expect_token(pc, suspend_token, TokenIdBinOr);
+ ast_expect_token(pc, lbrace, TokenIdLBrace);
zig_unreachable();
} else {
- *token_index = orig_token_index;
+ *token_index -= 1;
return nullptr;
}
-
- AstNode *node = ast_create_node(pc, NodeTypeSuspend, suspend_token);
- node->data.suspend.promise_symbol = ast_parse_symbol(pc, token_index);
- ast_eat_token(pc, token_index, TokenIdBinOr);
- node->data.suspend.block = ast_parse_block(pc, token_index, true);
-
- return node;
}
/*
@@ -1025,7 +1019,7 @@ static AstNode *ast_parse_fn_proto_partial(ParseContext *pc, size_t *token_index
}
/*
-SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression)
+SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression | ".*" | ".?")
FnCallExpression : token(LParen) list(Expression, token(Comma)) token(RParen)
ArrayAccessExpression : token(LBracket) Expression token(RBracket)
SliceExpression = "[" Expression ".." option(Expression) "]"
@@ -1110,13 +1104,34 @@ static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index,
} else if (first_token->id == TokenIdDot) {
*token_index += 1;
- Token *name_token = ast_eat_token(pc, token_index, TokenIdSymbol);
+ Token *token = &pc->tokens->at(*token_index);
- AstNode *node = ast_create_node(pc, NodeTypeFieldAccessExpr, first_token);
- node->data.field_access_expr.struct_expr = primary_expr;
- node->data.field_access_expr.field_name = token_buf(name_token);
+ if (token->id == TokenIdSymbol) {
+ *token_index += 1;
+
+ AstNode *node = ast_create_node(pc, NodeTypeFieldAccessExpr, first_token);
+ node->data.field_access_expr.struct_expr = primary_expr;
+ node->data.field_access_expr.field_name = token_buf(token);
+
+ primary_expr = node;
+ } else if (token->id == TokenIdStar) {
+ *token_index += 1;
+
+ AstNode *node = ast_create_node(pc, NodeTypePtrDeref, first_token);
+ node->data.ptr_deref_expr.target = primary_expr;
+
+ primary_expr = node;
+ } else if (token->id == TokenIdQuestion) {
+ *token_index += 1;
+
+ AstNode *node = ast_create_node(pc, NodeTypeUnwrapOptional, first_token);
+ node->data.unwrap_optional.expr = primary_expr;
+
+ primary_expr = node;
+ } else {
+ ast_invalid_token_error(pc, token);
+ }
- primary_expr = node;
} else {
return primary_expr;
}
@@ -1129,24 +1144,21 @@ static PrefixOp tok_to_prefix_op(Token *token) {
case TokenIdDash: return PrefixOpNegation;
case TokenIdMinusPercent: return PrefixOpNegationWrap;
case TokenIdTilde: return PrefixOpBinNot;
- case TokenIdStar: return PrefixOpDereference;
- case TokenIdMaybe: return PrefixOpMaybe;
- case TokenIdDoubleQuestion: return PrefixOpUnwrapMaybe;
- case TokenIdStarStar: return PrefixOpDereference;
+ case TokenIdQuestion: return PrefixOpOptional;
+ case TokenIdAmpersand: return PrefixOpAddrOf;
default: return PrefixOpInvalid;
}
}
-static AstNode *ast_parse_addr_of(ParseContext *pc, size_t *token_index) {
- Token *ampersand_tok = ast_eat_token(pc, token_index, TokenIdAmpersand);
-
- AstNode *node = ast_create_node(pc, NodeTypeAddrOfExpr, ampersand_tok);
+static AstNode *ast_parse_pointer_type(ParseContext *pc, size_t *token_index, Token *star_tok) {
+ AstNode *node = ast_create_node(pc, NodeTypePointerType, star_tok);
+ node->data.pointer_type.star_token = star_tok;
Token *token = &pc->tokens->at(*token_index);
if (token->id == TokenIdKeywordAlign) {
*token_index += 1;
ast_eat_token(pc, token_index, TokenIdLParen);
- node->data.addr_of_expr.align_expr = ast_parse_expression(pc, token_index, true);
+ node->data.pointer_type.align_expr = ast_parse_expression(pc, token_index, true);
token = &pc->tokens->at(*token_index);
if (token->id == TokenIdColon) {
@@ -1155,35 +1167,45 @@ static AstNode *ast_parse_addr_of(ParseContext *pc, size_t *token_index) {
ast_eat_token(pc, token_index, TokenIdColon);
Token *bit_offset_end_tok = ast_eat_token(pc, token_index, TokenIdIntLiteral);
- node->data.addr_of_expr.bit_offset_start = token_bigint(bit_offset_start_tok);
- node->data.addr_of_expr.bit_offset_end = token_bigint(bit_offset_end_tok);
+ node->data.pointer_type.bit_offset_start = token_bigint(bit_offset_start_tok);
+ node->data.pointer_type.bit_offset_end = token_bigint(bit_offset_end_tok);
}
ast_eat_token(pc, token_index, TokenIdRParen);
token = &pc->tokens->at(*token_index);
}
if (token->id == TokenIdKeywordConst) {
*token_index += 1;
- node->data.addr_of_expr.is_const = true;
+ node->data.pointer_type.is_const = true;
token = &pc->tokens->at(*token_index);
}
if (token->id == TokenIdKeywordVolatile) {
*token_index += 1;
- node->data.addr_of_expr.is_volatile = true;
+ node->data.pointer_type.is_volatile = true;
}
- node->data.addr_of_expr.op_expr = ast_parse_prefix_op_expr(pc, token_index, true);
+ node->data.pointer_type.op_expr = ast_parse_prefix_op_expr(pc, token_index, true);
return node;
}
/*
PrefixOpExpression = PrefixOp ErrorSetExpr | SuffixOpExpression
-PrefixOp = "!" | "-" | "~" | "*" | ("&" option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" | "await"
+PrefixOp = "!" | "-" | "~" | (("*" | "[*]") option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" | "await"
*/
static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index, bool mandatory) {
Token *token = &pc->tokens->at(*token_index);
- if (token->id == TokenIdAmpersand) {
- return ast_parse_addr_of(pc, token_index);
+ if (token->id == TokenIdStar || token->id == TokenIdBracketStarBracket) {
+ *token_index += 1;
+ return ast_parse_pointer_type(pc, token_index, token);
+ }
+ if (token->id == TokenIdStarStar) {
+ *token_index += 1;
+ AstNode *child_node = ast_parse_pointer_type(pc, token_index, token);
+ child_node->column += 1;
+ AstNode *parent_node = ast_create_node(pc, NodeTypePointerType, token);
+ parent_node->data.pointer_type.star_token = token;
+ parent_node->data.pointer_type.op_expr = child_node;
+ return parent_node;
}
if (token->id == TokenIdKeywordTry) {
return ast_parse_try_expr(pc, token_index);
@@ -1200,22 +1222,12 @@ static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index,
AstNode *node = ast_create_node(pc, NodeTypePrefixOpExpr, token);
- AstNode *parent_node = node;
- if (token->id == TokenIdStarStar) {
- // pretend that we got 2 star tokens
-
- parent_node = ast_create_node(pc, NodeTypePrefixOpExpr, token);
- parent_node->data.prefix_op_expr.primary_expr = node;
- parent_node->data.prefix_op_expr.prefix_op = PrefixOpDereference;
-
- node->column += 1;
- }
AstNode *prefix_op_expr = ast_parse_error_set_expr(pc, token_index, true);
node->data.prefix_op_expr.primary_expr = prefix_op_expr;
node->data.prefix_op_expr.prefix_op = prefix_op;
- return parent_node;
+ return node;
}
@@ -2270,8 +2282,8 @@ static BinOpType ast_parse_ass_op(ParseContext *pc, size_t *token_index, bool ma
}
/*
-UnwrapExpression : BoolOrExpression (UnwrapMaybe | UnwrapError) | BoolOrExpression
-UnwrapMaybe : "??" BoolOrExpression
+UnwrapExpression : BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression
+UnwrapOptional = "orelse" Expression
UnwrapError = "catch" option("|" Symbol "|") Expression
*/
static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, bool mandatory) {
@@ -2281,14 +2293,14 @@ static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, boo
Token *token = &pc->tokens->at(*token_index);
- if (token->id == TokenIdDoubleQuestion) {
+ if (token->id == TokenIdKeywordOrElse) {
*token_index += 1;
AstNode *rhs = ast_parse_expression(pc, token_index, true);
AstNode *node = ast_create_node(pc, NodeTypeBinOpExpr, token);
node->data.bin_op_expr.op1 = lhs;
- node->data.bin_op_expr.bin_op = BinOpTypeUnwrapMaybe;
+ node->data.bin_op_expr.bin_op = BinOpTypeUnwrapOptional;
node->data.bin_op_expr.op2 = rhs;
return node;
@@ -2991,6 +3003,12 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeFieldAccessExpr:
visit_field(&node->data.field_access_expr.struct_expr, visit, context);
break;
+ case NodeTypePtrDeref:
+ visit_field(&node->data.ptr_deref_expr.target, visit, context);
+ break;
+ case NodeTypeUnwrapOptional:
+ visit_field(&node->data.unwrap_optional.expr, visit, context);
+ break;
case NodeTypeUse:
visit_field(&node->data.use.expr, visit, context);
break;
@@ -3093,9 +3111,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeErrorType:
// none
break;
- case NodeTypeAddrOfExpr:
- visit_field(&node->data.addr_of_expr.align_expr, visit, context);
- visit_field(&node->data.addr_of_expr.op_expr, visit, context);
+ case NodeTypePointerType:
+ visit_field(&node->data.pointer_type.align_expr, visit, context);
+ visit_field(&node->data.pointer_type.op_expr, visit, context);
break;
case NodeTypeErrorSetDecl:
visit_node_list(&node->data.err_set_decl.decls, visit, context);
@@ -3110,7 +3128,6 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
visit_field(&node->data.await_expr.expr, visit, context);
break;
case NodeTypeSuspend:
- visit_field(&node->data.suspend.promise_symbol, visit, context);
visit_field(&node->data.suspend.block, visit, context);
break;
}
diff --git a/src/target.cpp b/src/target.cpp
index 5008b51a09..91d36c5109 100644
--- a/src/target.cpp
+++ b/src/target.cpp
@@ -597,12 +597,15 @@ void resolve_target_object_format(ZigTarget *target) {
case ZigLLVM_tce:
case ZigLLVM_tcele:
case ZigLLVM_thumbeb:
- case ZigLLVM_wasm32:
- case ZigLLVM_wasm64:
case ZigLLVM_xcore:
target->oformat= ZigLLVM_ELF;
return;
+ case ZigLLVM_wasm32:
+ case ZigLLVM_wasm64:
+ target->oformat = ZigLLVM_Wasm;
+ return;
+
case ZigLLVM_ppc:
case ZigLLVM_ppc64:
if (is_os_darwin(target)) {
@@ -683,25 +686,46 @@ static int get_arch_pointer_bit_width(ZigLLVM_ArchType arch) {
uint32_t target_c_type_size_in_bits(const ZigTarget *target, CIntType id) {
switch (target->os) {
case OsFreestanding:
- switch (id) {
- case CIntTypeShort:
- case CIntTypeUShort:
- return 16;
- case CIntTypeInt:
- case CIntTypeUInt:
- return 32;
- case CIntTypeLong:
- case CIntTypeULong:
- return get_arch_pointer_bit_width(target->arch.arch);
- case CIntTypeLongLong:
- case CIntTypeULongLong:
- return 64;
- case CIntTypeCount:
- zig_unreachable();
+ switch (target->arch.arch) {
+ case ZigLLVM_msp430:
+ switch (id) {
+ case CIntTypeShort:
+ case CIntTypeUShort:
+ return 16;
+ case CIntTypeInt:
+ case CIntTypeUInt:
+ return 16;
+ case CIntTypeLong:
+ case CIntTypeULong:
+ return 32;
+ case CIntTypeLongLong:
+ case CIntTypeULongLong:
+ return 64;
+ case CIntTypeCount:
+ zig_unreachable();
+ }
+ default:
+ switch (id) {
+ case CIntTypeShort:
+ case CIntTypeUShort:
+ return 16;
+ case CIntTypeInt:
+ case CIntTypeUInt:
+ return 32;
+ case CIntTypeLong:
+ case CIntTypeULong:
+ return get_arch_pointer_bit_width(target->arch.arch);
+ case CIntTypeLongLong:
+ case CIntTypeULongLong:
+ return 64;
+ case CIntTypeCount:
+ zig_unreachable();
+ }
}
case OsLinux:
case OsMacOSX:
case OsZen:
+ case OsOpenBSD:
switch (id) {
case CIntTypeShort:
case CIntTypeUShort:
@@ -742,7 +766,6 @@ uint32_t target_c_type_size_in_bits(const ZigTarget *target, CIntType id) {
case OsKFreeBSD:
case OsLv2:
case OsNetBSD:
- case OsOpenBSD:
case OsSolaris:
case OsHaiku:
case OsMinix:
@@ -896,3 +919,65 @@ bool target_can_exec(const ZigTarget *host_target, const ZigTarget *guest_target
return false;
}
+
+const char *arch_stack_pointer_register_name(const ArchType *arch) {
+ switch (arch->arch) {
+ case ZigLLVM_UnknownArch:
+ zig_unreachable();
+ case ZigLLVM_x86:
+ return "sp";
+ case ZigLLVM_x86_64:
+ return "rsp";
+
+ case ZigLLVM_aarch64:
+ case ZigLLVM_arm:
+ case ZigLLVM_thumb:
+ case ZigLLVM_aarch64_be:
+ case ZigLLVM_amdgcn:
+ case ZigLLVM_amdil:
+ case ZigLLVM_amdil64:
+ case ZigLLVM_armeb:
+ case ZigLLVM_arc:
+ case ZigLLVM_avr:
+ case ZigLLVM_bpfeb:
+ case ZigLLVM_bpfel:
+ case ZigLLVM_hexagon:
+ case ZigLLVM_lanai:
+ case ZigLLVM_hsail:
+ case ZigLLVM_hsail64:
+ case ZigLLVM_kalimba:
+ case ZigLLVM_le32:
+ case ZigLLVM_le64:
+ case ZigLLVM_mips:
+ case ZigLLVM_mips64:
+ case ZigLLVM_mips64el:
+ case ZigLLVM_mipsel:
+ case ZigLLVM_msp430:
+ case ZigLLVM_nios2:
+ case ZigLLVM_nvptx:
+ case ZigLLVM_nvptx64:
+ case ZigLLVM_ppc64le:
+ case ZigLLVM_r600:
+ case ZigLLVM_renderscript32:
+ case ZigLLVM_renderscript64:
+ case ZigLLVM_riscv32:
+ case ZigLLVM_riscv64:
+ case ZigLLVM_shave:
+ case ZigLLVM_sparc:
+ case ZigLLVM_sparcel:
+ case ZigLLVM_sparcv9:
+ case ZigLLVM_spir:
+ case ZigLLVM_spir64:
+ case ZigLLVM_systemz:
+ case ZigLLVM_tce:
+ case ZigLLVM_tcele:
+ case ZigLLVM_thumbeb:
+ case ZigLLVM_wasm32:
+ case ZigLLVM_wasm64:
+ case ZigLLVM_xcore:
+ case ZigLLVM_ppc:
+ case ZigLLVM_ppc64:
+ zig_panic("TODO populate this table with stack pointer register name for this CPU architecture");
+ }
+ zig_unreachable();
+}
diff --git a/src/target.hpp b/src/target.hpp
index 614b0627d5..5a118f6d8d 100644
--- a/src/target.hpp
+++ b/src/target.hpp
@@ -77,6 +77,8 @@ size_t target_arch_count(void);
const ArchType *get_target_arch(size_t index);
void get_arch_name(char *out_str, const ArchType *arch);
+const char *arch_stack_pointer_register_name(const ArchType *arch);
+
size_t target_vendor_count(void);
ZigLLVM_VendorType get_target_vendor(size_t index);
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 365b35cdfd..1d3db5567a 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -134,6 +134,7 @@ static const struct ZigKeyword zig_keywords[] = {
{"noalias", TokenIdKeywordNoAlias},
{"null", TokenIdKeywordNull},
{"or", TokenIdKeywordOr},
+ {"orelse", TokenIdKeywordOrElse},
{"packed", TokenIdKeywordPacked},
{"promise", TokenIdKeywordPromise},
{"pub", TokenIdKeywordPub},
@@ -215,10 +216,11 @@ enum TokenizeState {
TokenizeStateSawGreaterThanGreaterThan,
TokenizeStateSawDot,
TokenizeStateSawDotDot,
- TokenizeStateSawQuestionMark,
TokenizeStateSawAtSign,
TokenizeStateCharCode,
TokenizeStateError,
+ TokenizeStateLBracket,
+ TokenizeStateLBracketStar,
};
@@ -355,12 +357,19 @@ static void end_float_token(Tokenize *t) {
// Mask the sign bit to 0 since always non-negative lex
const uint64_t exp_mask = 0xffffull << exp_shift;
- if (shift >= 64) {
+ // must be special-cased to avoid undefined behavior on shift == 64
+ if (shift == 128) {
+ f_bits.repr[0] = 0;
+ f_bits.repr[1] = sig_bits[0];
+ } else if (shift == 0) {
+ f_bits.repr[0] = sig_bits[0];
+ f_bits.repr[1] = sig_bits[1];
+ } else if (shift >= 64) {
f_bits.repr[0] = 0;
f_bits.repr[1] = sig_bits[0] << (shift - 64);
} else {
f_bits.repr[0] = sig_bits[0] << shift;
- f_bits.repr[1] = ((sig_bits[1] << shift) | (sig_bits[0] >> (64 - shift)));
+ f_bits.repr[1] = (sig_bits[1] << shift) | (sig_bits[0] >> (64 - shift));
}
f_bits.repr[1] &= ~exp_mask;
@@ -451,16 +460,21 @@ static const char* get_escape_shorthand(uint8_t c) {
static void invalid_char_error(Tokenize *t, uint8_t c) {
if (c == '\r') {
tokenize_error(t, "invalid carriage return, only '\\n' line endings are supported");
- } else if (isprint(c)) {
- tokenize_error(t, "invalid character: '%c'", c);
- } else {
- const char *sh = get_escape_shorthand(c);
- if (sh) {
- tokenize_error(t, "invalid character: '%s'", sh);
- } else {
- tokenize_error(t, "invalid character: '\\x%x'", c);
- }
+ return;
}
+
+ const char *sh = get_escape_shorthand(c);
+ if (sh) {
+ tokenize_error(t, "invalid character: '%s'", sh);
+ return;
+ }
+
+ if (isprint(c)) {
+ tokenize_error(t, "invalid character: '%c'", c);
+ return;
+ }
+
+ tokenize_error(t, "invalid character: '\\x%02x'", c);
}
void tokenize(Buf *buf, Tokenization *out) {
@@ -530,6 +544,10 @@ void tokenize(Buf *buf, Tokenization *out) {
begin_token(&t, TokenIdComma);
end_token(&t);
break;
+ case '?':
+ begin_token(&t, TokenIdQuestion);
+ end_token(&t);
+ break;
case '{':
begin_token(&t, TokenIdLBrace);
end_token(&t);
@@ -539,8 +557,8 @@ void tokenize(Buf *buf, Tokenization *out) {
end_token(&t);
break;
case '[':
+ t.state = TokenizeStateLBracket;
begin_token(&t, TokenIdLBracket);
- end_token(&t);
break;
case ']':
begin_token(&t, TokenIdRBracket);
@@ -622,33 +640,10 @@ void tokenize(Buf *buf, Tokenization *out) {
begin_token(&t, TokenIdDot);
t.state = TokenizeStateSawDot;
break;
- case '?':
- begin_token(&t, TokenIdMaybe);
- t.state = TokenizeStateSawQuestionMark;
- break;
default:
invalid_char_error(&t, c);
}
break;
- case TokenizeStateSawQuestionMark:
- switch (c) {
- case '?':
- set_token_id(&t, t.cur_tok, TokenIdDoubleQuestion);
- end_token(&t);
- t.state = TokenizeStateStart;
- break;
- case '=':
- set_token_id(&t, t.cur_tok, TokenIdMaybeAssign);
- end_token(&t);
- t.state = TokenizeStateStart;
- break;
- default:
- t.pos -= 1;
- end_token(&t);
- t.state = TokenizeStateStart;
- continue;
- }
- break;
case TokenizeStateSawDot:
switch (c) {
case '.':
@@ -852,6 +847,30 @@ void tokenize(Buf *buf, Tokenization *out) {
continue;
}
break;
+ case TokenizeStateLBracket:
+ switch (c) {
+ case '*':
+ t.state = TokenizeStateLBracketStar;
+ set_token_id(&t, t.cur_tok, TokenIdBracketStarBracket);
+ break;
+ default:
+ // reinterpret as just an lbracket
+ t.pos -= 1;
+ end_token(&t);
+ t.state = TokenizeStateStart;
+ continue;
+ }
+ break;
+ case TokenizeStateLBracketStar:
+ switch (c) {
+ case ']':
+ end_token(&t);
+ t.state = TokenizeStateStart;
+ break;
+ default:
+ invalid_char_error(&t, c);
+ }
+ break;
case TokenizeStateSawPlusPercent:
switch (c) {
case '=':
@@ -1459,7 +1478,6 @@ void tokenize(Buf *buf, Tokenization *out) {
case TokenizeStateSawGreaterThan:
case TokenizeStateSawGreaterThanGreaterThan:
case TokenizeStateSawDot:
- case TokenizeStateSawQuestionMark:
case TokenizeStateSawAtSign:
case TokenizeStateSawStarPercent:
case TokenizeStateSawPlusPercent:
@@ -1467,12 +1485,14 @@ void tokenize(Buf *buf, Tokenization *out) {
case TokenizeStateLineString:
case TokenizeStateLineStringEnd:
case TokenizeStateSawBarBar:
+ case TokenizeStateLBracket:
end_token(&t);
break;
case TokenizeStateSawDotDot:
case TokenizeStateSawBackslash:
case TokenizeStateLineStringContinue:
case TokenizeStateLineStringContinueC:
+ case TokenizeStateLBracketStar:
tokenize_error(&t, "unexpected EOF");
break;
case TokenizeStateLineComment:
@@ -1509,6 +1529,7 @@ const char * token_name(TokenId id) {
case TokenIdBitShiftRight: return ">>";
case TokenIdBitShiftRightEq: return ">>=";
case TokenIdBitXorEq: return "^=";
+ case TokenIdBracketStarBracket: return "[*]";
case TokenIdCharLiteral: return "CharLiteral";
case TokenIdCmpEq: return "==";
case TokenIdCmpGreaterOrEq: return ">=";
@@ -1521,7 +1542,6 @@ const char * token_name(TokenId id) {
case TokenIdDash: return "-";
case TokenIdDivEq: return "/=";
case TokenIdDot: return ".";
- case TokenIdDoubleQuestion: return "??";
case TokenIdEllipsis2: return "..";
case TokenIdEllipsis3: return "...";
case TokenIdEof: return "EOF";
@@ -1558,6 +1578,7 @@ const char * token_name(TokenId id) {
case TokenIdKeywordNoAlias: return "noalias";
case TokenIdKeywordNull: return "null";
case TokenIdKeywordOr: return "or";
+ case TokenIdKeywordOrElse: return "orelse";
case TokenIdKeywordPacked: return "packed";
case TokenIdKeywordPromise: return "promise";
case TokenIdKeywordPub: return "pub";
@@ -1580,8 +1601,7 @@ const char * token_name(TokenId id) {
case TokenIdLBrace: return "{";
case TokenIdLBracket: return "[";
case TokenIdLParen: return "(";
- case TokenIdMaybe: return "?";
- case TokenIdMaybeAssign: return "?=";
+ case TokenIdQuestion: return "?";
case TokenIdMinusEq: return "-=";
case TokenIdMinusPercent: return "-%";
case TokenIdMinusPercentEq: return "-%=";
diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp
index b719293704..75c7feb476 100644
--- a/src/tokenizer.hpp
+++ b/src/tokenizer.hpp
@@ -28,6 +28,7 @@ enum TokenId {
TokenIdBitShiftRight,
TokenIdBitShiftRightEq,
TokenIdBitXorEq,
+ TokenIdBracketStarBracket,
TokenIdCharLiteral,
TokenIdCmpEq,
TokenIdCmpGreaterOrEq,
@@ -40,7 +41,6 @@ enum TokenId {
TokenIdDash,
TokenIdDivEq,
TokenIdDot,
- TokenIdDoubleQuestion,
TokenIdEllipsis2,
TokenIdEllipsis3,
TokenIdEof,
@@ -75,6 +75,7 @@ enum TokenId {
TokenIdKeywordNoAlias,
TokenIdKeywordNull,
TokenIdKeywordOr,
+ TokenIdKeywordOrElse,
TokenIdKeywordPacked,
TokenIdKeywordPromise,
TokenIdKeywordPub,
@@ -99,8 +100,7 @@ enum TokenId {
TokenIdLBrace,
TokenIdLBracket,
TokenIdLParen,
- TokenIdMaybe,
- TokenIdMaybeAssign,
+ TokenIdQuestion,
TokenIdMinusEq,
TokenIdMinusPercent,
TokenIdMinusPercentEq,
@@ -169,6 +169,8 @@ struct Token {
TokenCharLit char_lit;
} data;
};
+// work around conflicting name Token which is also found in libclang
+typedef Token ZigToken;
struct Tokenization {
ZigList *tokens;
diff --git a/src/translate_c.cpp b/src/translate_c.cpp
index 965a8963bd..735a671bcc 100644
--- a/src/translate_c.cpp
+++ b/src/translate_c.cpp
@@ -247,6 +247,12 @@ static AstNode *trans_create_node_field_access_str(Context *c, AstNode *containe
return trans_create_node_field_access(c, container, buf_create_from_str(field_name));
}
+static AstNode *trans_create_node_ptr_deref(Context *c, AstNode *child_node) {
+ AstNode *node = trans_create_node(c, NodeTypePtrDeref);
+ node->data.ptr_deref_expr.target = child_node;
+ return node;
+}
+
static AstNode *trans_create_node_prefix_op(Context *c, PrefixOp op, AstNode *child_node) {
AstNode *node = trans_create_node(c, NodeTypePrefixOpExpr);
node->data.prefix_op_expr.prefix_op = op;
@@ -254,6 +260,12 @@ static AstNode *trans_create_node_prefix_op(Context *c, PrefixOp op, AstNode *ch
return node;
}
+static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child_node) {
+ AstNode *node = trans_create_node(c, NodeTypeUnwrapOptional);
+ node->data.unwrap_optional.expr = child_node;
+ return node;
+}
+
static AstNode *trans_create_node_bin_op(Context *c, AstNode *lhs_node, BinOpType op, AstNode *rhs_node) {
AstNode *node = trans_create_node(c, NodeTypeBinOpExpr);
node->data.bin_op_expr.op1 = lhs_node;
@@ -270,11 +282,21 @@ static AstNode *maybe_suppress_result(Context *c, ResultUsed result_used, AstNod
node);
}
-static AstNode *trans_create_node_addr_of(Context *c, bool is_const, bool is_volatile, AstNode *child_node) {
- AstNode *node = trans_create_node(c, NodeTypeAddrOfExpr);
- node->data.addr_of_expr.is_const = is_const;
- node->data.addr_of_expr.is_volatile = is_volatile;
- node->data.addr_of_expr.op_expr = child_node;
+static AstNode *trans_create_node_ptr_type(Context *c, bool is_const, bool is_volatile, AstNode *child_node, PtrLen ptr_len) {
+ AstNode *node = trans_create_node(c, NodeTypePointerType);
+ node->data.pointer_type.star_token = allocate(1);
+ node->data.pointer_type.star_token->id = (ptr_len == PtrLenSingle) ? TokenIdStar: TokenIdBracketStarBracket;
+ node->data.pointer_type.is_const = is_const;
+ node->data.pointer_type.is_const = is_const;
+ node->data.pointer_type.is_volatile = is_volatile;
+ node->data.pointer_type.op_expr = child_node;
+ return node;
+}
+
+static AstNode *trans_create_node_addr_of(Context *c, AstNode *child_node) {
+ AstNode *node = trans_create_node(c, NodeTypePrefixOpExpr);
+ node->data.prefix_op_expr.prefix_op = PrefixOpAddrOf;
+ node->data.prefix_op_expr.primary_expr = child_node;
return node;
}
@@ -366,7 +388,7 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r
fn_def->data.fn_def.fn_proto = fn_proto;
fn_proto->data.fn_proto.fn_def_node = fn_def;
- AstNode *unwrap_node = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, ref_node);
+ AstNode *unwrap_node = trans_create_node_unwrap_null(c, ref_node);
AstNode *fn_call_node = trans_create_node(c, NodeTypeFnCallExpr);
fn_call_node->data.fn_call_expr.fn_ref_expr = unwrap_node;
@@ -393,10 +415,6 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r
return fn_def;
}
-static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child) {
- return trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, child);
-}
-
static AstNode *get_global(Context *c, Buf *name) {
{
auto entry = c->global_table.maybe_get(name);
@@ -409,7 +427,7 @@ static AstNode *get_global(Context *c, Buf *name) {
if (entry)
return entry->value;
}
- if (c->codegen->primitive_type_table.maybe_get(name) != nullptr) {
+ if (get_primitive_type(c->codegen, name) != nullptr) {
return trans_create_node_symbol(c, name);
}
return nullptr;
@@ -718,6 +736,30 @@ static bool qual_type_has_wrapping_overflow(Context *c, QualType qt) {
}
}
+static bool type_is_opaque(Context *c, const Type *ty, const SourceLocation &source_loc) {
+ switch (ty->getTypeClass()) {
+ case Type::Builtin: {
+ const BuiltinType *builtin_ty = static_cast(ty);
+ return builtin_ty->getKind() == BuiltinType::Void;
+ }
+ case Type::Record: {
+ const RecordType *record_ty = static_cast(ty);
+ return record_ty->getDecl()->getDefinition() == nullptr;
+ }
+ case Type::Elaborated: {
+ const ElaboratedType *elaborated_ty = static_cast(ty);
+ return type_is_opaque(c, elaborated_ty->getNamedType().getTypePtr(), source_loc);
+ }
+ case Type::Typedef: {
+ const TypedefType *typedef_ty = static_cast(ty);
+ const TypedefNameDecl *typedef_decl = typedef_ty->getDecl();
+ return type_is_opaque(c, typedef_decl->getUnderlyingType().getTypePtr(), source_loc);
+ }
+ default:
+ return false;
+ }
+}
+
static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &source_loc) {
switch (ty->getTypeClass()) {
case Type::Builtin:
@@ -839,12 +881,14 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou
}
if (qual_type_child_is_fn_proto(child_qt)) {
- return trans_create_node_prefix_op(c, PrefixOpMaybe, child_node);
+ return trans_create_node_prefix_op(c, PrefixOpOptional, child_node);
}
- AstNode *pointer_node = trans_create_node_addr_of(c, child_qt.isConstQualified(),
- child_qt.isVolatileQualified(), child_node);
- return trans_create_node_prefix_op(c, PrefixOpMaybe, pointer_node);
+ PtrLen ptr_len = type_is_opaque(c, child_qt.getTypePtr(), source_loc) ? PtrLenSingle : PtrLenUnknown;
+
+ AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(),
+ child_qt.isVolatileQualified(), child_node, ptr_len);
+ return trans_create_node_prefix_op(c, PrefixOpOptional, pointer_node);
}
case Type::Typedef:
{
@@ -1027,8 +1071,8 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou
emit_warning(c, source_loc, "unresolved array element type");
return nullptr;
}
- AstNode *pointer_node = trans_create_node_addr_of(c, child_qt.isConstQualified(),
- child_qt.isVolatileQualified(), child_type_node);
+ AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(),
+ child_qt.isVolatileQualified(), child_type_node, PtrLenUnknown);
return pointer_node;
}
case Type::BlockPointer:
@@ -1396,7 +1440,7 @@ static AstNode *trans_create_compound_assign_shift(Context *c, ResultUsed result
// const _ref = &lhs;
AstNode *lhs = trans_expr(c, ResultUsedYes, &child_scope->base, stmt->getLHS(), TransLValue);
if (lhs == nullptr) return nullptr;
- AstNode *addr_of_lhs = trans_create_node_addr_of(c, false, false, lhs);
+ AstNode *addr_of_lhs = trans_create_node_addr_of(c, lhs);
// TODO: avoid name collisions with generated variable names
Buf* tmp_var_name = buf_create_from_str("_ref");
AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, addr_of_lhs);
@@ -1412,8 +1456,7 @@ static AstNode *trans_create_compound_assign_shift(Context *c, ResultUsed result
AstNode *operation_type_cast = trans_c_cast(c, rhs_location,
stmt->getComputationLHSType(),
stmt->getLHS()->getType(),
- trans_create_node_prefix_op(c, PrefixOpDereference,
- trans_create_node_symbol(c, tmp_var_name)));
+ trans_create_node_ptr_deref(c, trans_create_node_symbol(c, tmp_var_name)));
// result_type(... >> u5(rhs))
AstNode *result_type_cast = trans_c_cast(c, rhs_location,
@@ -1426,7 +1469,7 @@ static AstNode *trans_create_compound_assign_shift(Context *c, ResultUsed result
// *_ref = ...
AstNode *assign_statement = trans_create_node_bin_op(c,
- trans_create_node_prefix_op(c, PrefixOpDereference,
+ trans_create_node_ptr_deref(c,
trans_create_node_symbol(c, tmp_var_name)),
BinOpTypeAssign, result_type_cast);
@@ -1436,7 +1479,7 @@ static AstNode *trans_create_compound_assign_shift(Context *c, ResultUsed result
// break :x *_ref
child_scope->node->data.block.statements.append(
trans_create_node_break(c, label_name,
- trans_create_node_prefix_op(c, PrefixOpDereference,
+ trans_create_node_ptr_deref(c,
trans_create_node_symbol(c, tmp_var_name))));
}
@@ -1471,7 +1514,7 @@ static AstNode *trans_create_compound_assign(Context *c, ResultUsed result_used,
// const _ref = &lhs;
AstNode *lhs = trans_expr(c, ResultUsedYes, &child_scope->base, stmt->getLHS(), TransLValue);
if (lhs == nullptr) return nullptr;
- AstNode *addr_of_lhs = trans_create_node_addr_of(c, false, false, lhs);
+ AstNode *addr_of_lhs = trans_create_node_addr_of(c, lhs);
// TODO: avoid name collisions with generated variable names
Buf* tmp_var_name = buf_create_from_str("_ref");
AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, addr_of_lhs);
@@ -1483,11 +1526,11 @@ static AstNode *trans_create_compound_assign(Context *c, ResultUsed result_used,
if (rhs == nullptr) return nullptr;
AstNode *assign_statement = trans_create_node_bin_op(c,
- trans_create_node_prefix_op(c, PrefixOpDereference,
+ trans_create_node_ptr_deref(c,
trans_create_node_symbol(c, tmp_var_name)),
BinOpTypeAssign,
trans_create_node_bin_op(c,
- trans_create_node_prefix_op(c, PrefixOpDereference,
+ trans_create_node_ptr_deref(c,
trans_create_node_symbol(c, tmp_var_name)),
bin_op,
rhs));
@@ -1496,7 +1539,7 @@ static AstNode *trans_create_compound_assign(Context *c, ResultUsed result_used,
// break :x *_ref
child_scope->node->data.block.statements.append(
trans_create_node_break(c, label_name,
- trans_create_node_prefix_op(c, PrefixOpDereference,
+ trans_create_node_ptr_deref(c,
trans_create_node_symbol(c, tmp_var_name))));
return child_scope->node;
@@ -1808,7 +1851,7 @@ static AstNode *trans_create_post_crement(Context *c, ResultUsed result_used, Tr
// const _ref = &expr;
AstNode *expr = trans_expr(c, ResultUsedYes, &child_scope->base, op_expr, TransLValue);
if (expr == nullptr) return nullptr;
- AstNode *addr_of_expr = trans_create_node_addr_of(c, false, false, expr);
+ AstNode *addr_of_expr = trans_create_node_addr_of(c, expr);
// TODO: avoid name collisions with generated variable names
Buf* ref_var_name = buf_create_from_str("_ref");
AstNode *ref_var_decl = trans_create_node_var_decl_local(c, true, ref_var_name, nullptr, addr_of_expr);
@@ -1817,13 +1860,13 @@ static AstNode *trans_create_post_crement(Context *c, ResultUsed result_used, Tr
// const _tmp = *_ref;
Buf* tmp_var_name = buf_create_from_str("_tmp");
AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr,
- trans_create_node_prefix_op(c, PrefixOpDereference,
+ trans_create_node_ptr_deref(c,
trans_create_node_symbol(c, ref_var_name)));
child_scope->node->data.block.statements.append(tmp_var_decl);
// *_ref += 1;
AstNode *assign_statement = trans_create_node_bin_op(c,
- trans_create_node_prefix_op(c, PrefixOpDereference,
+ trans_create_node_ptr_deref(c,
trans_create_node_symbol(c, ref_var_name)),
assign_op,
trans_create_node_unsigned(c, 1));
@@ -1863,7 +1906,7 @@ static AstNode *trans_create_pre_crement(Context *c, ResultUsed result_used, Tra
// const _ref = &expr;
AstNode *expr = trans_expr(c, ResultUsedYes, &child_scope->base, op_expr, TransLValue);
if (expr == nullptr) return nullptr;
- AstNode *addr_of_expr = trans_create_node_addr_of(c, false, false, expr);
+ AstNode *addr_of_expr = trans_create_node_addr_of(c, expr);
// TODO: avoid name collisions with generated variable names
Buf* ref_var_name = buf_create_from_str("_ref");
AstNode *ref_var_decl = trans_create_node_var_decl_local(c, true, ref_var_name, nullptr, addr_of_expr);
@@ -1871,14 +1914,14 @@ static AstNode *trans_create_pre_crement(Context *c, ResultUsed result_used, Tra
// *_ref += 1;
AstNode *assign_statement = trans_create_node_bin_op(c,
- trans_create_node_prefix_op(c, PrefixOpDereference,
+ trans_create_node_ptr_deref(c,
trans_create_node_symbol(c, ref_var_name)),
assign_op,
trans_create_node_unsigned(c, 1));
child_scope->node->data.block.statements.append(assign_statement);
// break :x *_ref
- AstNode *deref_expr = trans_create_node_prefix_op(c, PrefixOpDereference,
+ AstNode *deref_expr = trans_create_node_ptr_deref(c,
trans_create_node_symbol(c, ref_var_name));
child_scope->node->data.block.statements.append(trans_create_node_break(c, label_name, deref_expr));
@@ -1912,7 +1955,7 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc
AstNode *value_node = trans_expr(c, result_used, scope, stmt->getSubExpr(), TransLValue);
if (value_node == nullptr)
return value_node;
- return trans_create_node_addr_of(c, false, false, value_node);
+ return trans_create_node_addr_of(c, value_node);
}
case UO_Deref:
{
@@ -1922,8 +1965,8 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc
bool is_fn_ptr = qual_type_is_fn_ptr(stmt->getSubExpr()->getType());
if (is_fn_ptr)
return value_node;
- AstNode *unwrapped = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, value_node);
- return trans_create_node_prefix_op(c, PrefixOpDereference, unwrapped);
+ AstNode *unwrapped = trans_create_node_unwrap_null(c, value_node);
+ return trans_create_node_ptr_deref(c, unwrapped);
}
case UO_Plus:
emit_warning(c, stmt->getLocStart(), "TODO handle C translation UO_Plus");
@@ -2546,7 +2589,7 @@ static AstNode *trans_call_expr(Context *c, ResultUsed result_used, TransScope *
}
}
if (callee_node == nullptr) {
- callee_node = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, callee_raw_node);
+ callee_node = trans_create_node_unwrap_null(c, callee_raw_node);
}
} else {
callee_node = callee_raw_node;
@@ -2664,7 +2707,9 @@ static AstNode *trans_do_loop(Context *c, TransScope *parent_scope, const DoStmt
AstNode *child_statement;
child_scope = trans_stmt(c, &child_block_scope->base, stmt->getBody(), &child_statement);
if (child_scope == nullptr) return nullptr;
- body_node->data.block.statements.append(child_statement);
+ if (child_statement != nullptr) {
+ body_node->data.block.statements.append(child_statement);
+ }
}
// if (!cond) break;
@@ -2674,6 +2719,7 @@ static AstNode *trans_do_loop(Context *c, TransScope *parent_scope, const DoStmt
terminator_node->data.if_bool_expr.condition = trans_create_node_prefix_op(c, PrefixOpBoolNot, condition_node);
terminator_node->data.if_bool_expr.then_block = trans_create_node(c, NodeTypeBreak);
+ assert(terminator_node != nullptr);
body_node->data.block.statements.append(terminator_node);
while_scope->node->data.while_expr.body = body_node;
@@ -2737,7 +2783,12 @@ static AstNode *trans_for_loop(Context *c, TransScope *parent_scope, const ForSt
TransScope *body_scope = trans_stmt(c, &while_scope->base, stmt->getBody(), &body_statement);
if (body_scope == nullptr)
return nullptr;
- while_scope->node->data.while_expr.body = body_statement;
+
+ if (body_statement == nullptr) {
+ while_scope->node->data.while_expr.body = trans_create_node(c, NodeTypeBlock);
+ } else {
+ while_scope->node->data.while_expr.body = body_statement;
+ }
return loop_block_node;
}
@@ -2972,9 +3023,14 @@ static int trans_stmt_extra(Context *c, TransScope *scope, const Stmt *stmt,
trans_unary_operator(c, result_used, scope, (const UnaryOperator *)stmt));
case Stmt::DeclStmtClass:
return trans_local_declaration(c, scope, (const DeclStmt *)stmt, out_node, out_child_scope);
- case Stmt::WhileStmtClass:
- return wrap_stmt(out_node, out_child_scope, scope,
- trans_while_loop(c, scope, (const WhileStmt *)stmt));
+ case Stmt::WhileStmtClass: {
+ AstNode *while_node = trans_while_loop(c, scope, (const WhileStmt *)stmt);
+ assert(while_node->type == NodeTypeWhileExpr);
+ if (while_node->data.while_expr.body == nullptr) {
+ while_node->data.while_expr.body = trans_create_node(c, NodeTypeBlock);
+ }
+ return wrap_stmt(out_node, out_child_scope, scope, while_node);
+ }
case Stmt::IfStmtClass:
return wrap_stmt(out_node, out_child_scope, scope,
trans_if_statement(c, scope, (const IfStmt *)stmt));
@@ -2997,12 +3053,18 @@ static int trans_stmt_extra(Context *c, TransScope *scope, const Stmt *stmt,
case Stmt::UnaryExprOrTypeTraitExprClass:
return wrap_stmt(out_node, out_child_scope, scope,
trans_unary_expr_or_type_trait_expr(c, scope, (const UnaryExprOrTypeTraitExpr *)stmt));
- case Stmt::DoStmtClass:
- return wrap_stmt(out_node, out_child_scope, scope,
- trans_do_loop(c, scope, (const DoStmt *)stmt));
- case Stmt::ForStmtClass:
- return wrap_stmt(out_node, out_child_scope, scope,
- trans_for_loop(c, scope, (const ForStmt *)stmt));
+ case Stmt::DoStmtClass: {
+ AstNode *while_node = trans_do_loop(c, scope, (const DoStmt *)stmt);
+ assert(while_node->type == NodeTypeWhileExpr);
+ if (while_node->data.while_expr.body == nullptr) {
+ while_node->data.while_expr.body = trans_create_node(c, NodeTypeBlock);
+ }
+ return wrap_stmt(out_node, out_child_scope, scope, while_node);
+ }
+ case Stmt::ForStmtClass: {
+ AstNode *node = trans_for_loop(c, scope, (const ForStmt *)stmt);
+ return wrap_stmt(out_node, out_child_scope, scope, node);
+ }
case Stmt::StringLiteralClass:
return wrap_stmt(out_node, out_child_scope, scope,
trans_string_literal(c, scope, (const StringLiteral *)stmt));
@@ -3667,6 +3729,7 @@ static AstNode *resolve_typedef_decl(Context *c, const TypedefNameDecl *typedef_
if (existing_entry) {
return existing_entry->value;
}
+
QualType child_qt = typedef_decl->getUnderlyingType();
Buf *type_name = buf_create_from_str(decl_name(typedef_decl));
@@ -3700,16 +3763,19 @@ static AstNode *resolve_typedef_decl(Context *c, const TypedefNameDecl *typedef_
// use the name of this typedef
// TODO
+ // trans_qual_type here might cause us to look at this typedef again so we put the item in the map first
+ AstNode *symbol_node = trans_create_node_symbol(c, type_name);
+ c->decl_table.put(typedef_decl->getCanonicalDecl(), symbol_node);
+
AstNode *type_node = trans_qual_type(c, child_qt, typedef_decl->getLocation());
if (type_node == nullptr) {
emit_warning(c, typedef_decl->getLocation(), "typedef %s - unresolved child type", buf_ptr(type_name));
c->decl_table.put(typedef_decl, nullptr);
+ // TODO add global var with type_name equal to @compileError("unable to resolve C type")
return nullptr;
}
add_global_var(c, type_name, type_node);
- AstNode *symbol_node = trans_create_node_symbol(c, type_name);
- c->decl_table.put(typedef_decl->getCanonicalDecl(), symbol_node);
return symbol_node;
}
@@ -3744,6 +3810,7 @@ static AstNode *resolve_enum_decl(Context *c, const EnumDecl *enum_decl) {
return demote_enum_to_opaque(c, enum_decl, full_type_name, bare_name);
}
+
bool pure_enum = true;
uint32_t field_count = 0;
for (auto it = enum_def->enumerator_begin(),
@@ -3755,84 +3822,53 @@ static AstNode *resolve_enum_decl(Context *c, const EnumDecl *enum_decl) {
pure_enum = false;
}
}
-
AstNode *tag_int_type = trans_qual_type(c, enum_decl->getIntegerType(), enum_decl->getLocation());
assert(tag_int_type);
- if (pure_enum) {
- AstNode *enum_node = trans_create_node(c, NodeTypeContainerDecl);
- enum_node->data.container_decl.kind = ContainerKindEnum;
- enum_node->data.container_decl.layout = ContainerLayoutExtern;
- // TODO only emit this tag type if the enum tag type is not the default.
- // I don't know what the default is, need to figure out how clang is deciding.
- // it appears to at least be different across gcc/msvc
- if (!c_is_builtin_type(c, enum_decl->getIntegerType(), BuiltinType::UInt) &&
- !c_is_builtin_type(c, enum_decl->getIntegerType(), BuiltinType::Int))
- {
- enum_node->data.container_decl.init_arg_expr = tag_int_type;
- }
-
- enum_node->data.container_decl.fields.resize(field_count);
- uint32_t i = 0;
- for (auto it = enum_def->enumerator_begin(),
- it_end = enum_def->enumerator_end();
- it != it_end; ++it, i += 1)
- {
- const EnumConstantDecl *enum_const = *it;
-
- Buf *enum_val_name = buf_create_from_str(decl_name(enum_const));
- Buf *field_name;
- if (bare_name != nullptr && buf_starts_with_buf(enum_val_name, bare_name)) {
- field_name = buf_slice(enum_val_name, buf_len(bare_name), buf_len(enum_val_name));
- } else {
- field_name = enum_val_name;
- }
-
- AstNode *field_node = trans_create_node(c, NodeTypeStructField);
- field_node->data.struct_field.name = field_name;
- field_node->data.struct_field.type = nullptr;
- enum_node->data.container_decl.fields.items[i] = field_node;
-
- // in C each enum value is in the global namespace. so we put them there too.
- // at this point we can rely on the enum emitting successfully
- if (is_anonymous) {
- AstNode *lit_node = trans_create_node_unsigned(c, i);
- add_global_var(c, enum_val_name, lit_node);
- } else {
- AstNode *field_access_node = trans_create_node_field_access(c,
- trans_create_node_symbol(c, full_type_name), field_name);
- add_global_var(c, enum_val_name, field_access_node);
- }
- }
-
- if (is_anonymous) {
- c->decl_table.put(enum_decl->getCanonicalDecl(), enum_node);
- return enum_node;
- } else {
- AstNode *symbol_node = trans_create_node_symbol(c, full_type_name);
- add_global_weak_alias(c, bare_name, full_type_name);
- add_global_var(c, full_type_name, enum_node);
- c->decl_table.put(enum_decl->getCanonicalDecl(), symbol_node);
- return enum_node;
- }
+ AstNode *enum_node = trans_create_node(c, NodeTypeContainerDecl);
+ enum_node->data.container_decl.kind = ContainerKindEnum;
+ enum_node->data.container_decl.layout = ContainerLayoutExtern;
+ // TODO only emit this tag type if the enum tag type is not the default.
+ // I don't know what the default is, need to figure out how clang is deciding.
+ // it appears to at least be different across gcc/msvc
+ if (!c_is_builtin_type(c, enum_decl->getIntegerType(), BuiltinType::UInt) &&
+ !c_is_builtin_type(c, enum_decl->getIntegerType(), BuiltinType::Int))
+ {
+ enum_node->data.container_decl.init_arg_expr = tag_int_type;
}
-
- // TODO after issue #305 is solved, make this be an enum with tag_int_type
- // as the integer type and set the custom enum values
- AstNode *enum_node = tag_int_type;
-
-
- // add variables for all the values with enum_node
+ enum_node->data.container_decl.fields.resize(field_count);
+ uint32_t i = 0;
for (auto it = enum_def->enumerator_begin(),
it_end = enum_def->enumerator_end();
- it != it_end; ++it)
+ it != it_end; ++it, i += 1)
{
const EnumConstantDecl *enum_const = *it;
Buf *enum_val_name = buf_create_from_str(decl_name(enum_const));
- AstNode *int_node = trans_create_node_apint(c, enum_const->getInitVal());
- AstNode *var_node = add_global_var(c, enum_val_name, int_node);
- var_node->data.variable_declaration.type = tag_int_type;
+ Buf *field_name;
+ if (bare_name != nullptr && buf_starts_with_buf(enum_val_name, bare_name)) {
+ field_name = buf_slice(enum_val_name, buf_len(bare_name), buf_len(enum_val_name));
+ } else {
+ field_name = enum_val_name;
+ }
+
+ AstNode *int_node = pure_enum && !is_anonymous ? nullptr : trans_create_node_apint(c, enum_const->getInitVal());
+ AstNode *field_node = trans_create_node(c, NodeTypeStructField);
+ field_node->data.struct_field.name = field_name;
+ field_node->data.struct_field.type = nullptr;
+ field_node->data.struct_field.value = int_node;
+ enum_node->data.container_decl.fields.items[i] = field_node;
+
+ // in C each enum value is in the global namespace. so we put them there too.
+ // at this point we can rely on the enum emitting successfully
+ if (is_anonymous) {
+ Buf *enum_val_name = buf_create_from_str(decl_name(enum_const));
+ add_global_var(c, enum_val_name, int_node);
+ } else {
+ AstNode *field_access_node = trans_create_node_field_access(c,
+ trans_create_node_symbol(c, full_type_name), field_name);
+ add_global_var(c, enum_val_name, field_access_node);
+ }
}
if (is_anonymous) {
@@ -3843,7 +3879,7 @@ static AstNode *resolve_enum_decl(Context *c, const EnumDecl *enum_decl) {
add_global_weak_alias(c, bare_name, full_type_name);
add_global_var(c, full_type_name, enum_node);
c->decl_table.put(enum_decl->getCanonicalDecl(), symbol_node);
- return symbol_node;
+ return enum_node;
}
}
@@ -4286,7 +4322,7 @@ static AstNode *trans_lookup_ast_maybe_fn(Context *c, AstNode *ref_node) {
return nullptr;
if (prefix_node->type != NodeTypePrefixOpExpr)
return nullptr;
- if (prefix_node->data.prefix_op_expr.prefix_op != PrefixOpMaybe)
+ if (prefix_node->data.prefix_op_expr.prefix_op != PrefixOpOptional)
return nullptr;
AstNode *fn_proto_node = prefix_node->data.prefix_op_expr.primary_expr;
@@ -4462,34 +4498,52 @@ static AstNode *parse_ctok_suffix_op_expr(Context *c, CTokenize *ctok, size_t *t
} else if (first_tok->id == CTokIdAsterisk) {
*tok_i += 1;
- node = trans_create_node_addr_of(c, false, false, node);
+ node = trans_create_node_ptr_type(c, false, false, node, PtrLenUnknown);
} else {
return node;
}
}
}
-static PrefixOp ctok_to_prefix_op(CTok *token) {
- switch (token->id) {
- case CTokIdBang: return PrefixOpBoolNot;
- case CTokIdMinus: return PrefixOpNegation;
- case CTokIdTilde: return PrefixOpBinNot;
- case CTokIdAsterisk: return PrefixOpDereference;
- default: return PrefixOpInvalid;
- }
-}
static AstNode *parse_ctok_prefix_op_expr(Context *c, CTokenize *ctok, size_t *tok_i) {
CTok *op_tok = &ctok->tokens.at(*tok_i);
- PrefixOp prefix_op = ctok_to_prefix_op(op_tok);
- if (prefix_op == PrefixOpInvalid) {
- return parse_ctok_suffix_op_expr(c, ctok, tok_i);
- }
- *tok_i += 1;
- AstNode *prefix_op_expr = parse_ctok_prefix_op_expr(c, ctok, tok_i);
- if (prefix_op_expr == nullptr)
- return nullptr;
- return trans_create_node_prefix_op(c, prefix_op, prefix_op_expr);
+ switch (op_tok->id) {
+ case CTokIdBang:
+ {
+ *tok_i += 1;
+ AstNode *prefix_op_expr = parse_ctok_prefix_op_expr(c, ctok, tok_i);
+ if (prefix_op_expr == nullptr)
+ return nullptr;
+ return trans_create_node_prefix_op(c, PrefixOpBoolNot, prefix_op_expr);
+ }
+ case CTokIdMinus:
+ {
+ *tok_i += 1;
+ AstNode *prefix_op_expr = parse_ctok_prefix_op_expr(c, ctok, tok_i);
+ if (prefix_op_expr == nullptr)
+ return nullptr;
+ return trans_create_node_prefix_op(c, PrefixOpNegation, prefix_op_expr);
+ }
+ case CTokIdTilde:
+ {
+ *tok_i += 1;
+ AstNode *prefix_op_expr = parse_ctok_prefix_op_expr(c, ctok, tok_i);
+ if (prefix_op_expr == nullptr)
+ return nullptr;
+ return trans_create_node_prefix_op(c, PrefixOpBinNot, prefix_op_expr);
+ }
+ case CTokIdAsterisk:
+ {
+ *tok_i += 1;
+ AstNode *prefix_op_expr = parse_ctok_prefix_op_expr(c, ctok, tok_i);
+ if (prefix_op_expr == nullptr)
+ return nullptr;
+ return trans_create_node_ptr_deref(c, prefix_op_expr);
+ }
+ default:
+ return parse_ctok_suffix_op_expr(c, ctok, tok_i);
+ }
}
static void process_macro(Context *c, CTokenize *ctok, Buf *name, const char *char_ptr) {
diff --git a/src/util.hpp b/src/util.hpp
index ae33cb84af..b0402137bd 100644
--- a/src/util.hpp
+++ b/src/util.hpp
@@ -31,6 +31,8 @@
#endif
+#include "softfloat.hpp"
+
#define BREAKPOINT __asm("int $0x03")
ATTRIBUTE_COLD
@@ -38,11 +40,11 @@ ATTRIBUTE_NORETURN
ATTRIBUTE_PRINTF(1, 2)
void zig_panic(const char *format, ...);
-ATTRIBUTE_COLD
-ATTRIBUTE_NORETURN
-static inline void zig_unreachable(void) {
- zig_panic("unreachable");
-}
+#ifdef WIN32
+#define __func__ __FUNCTION__
+#endif
+
+#define zig_unreachable() zig_panic("unreachable: %s:%s:%d", __FILE__, __func__, __LINE__)
#if defined(_MSC_VER)
static inline int clzll(unsigned long long mask) {
@@ -65,6 +67,11 @@ static inline int clzll(unsigned long long mask) {
template
ATTRIBUTE_RETURNS_NOALIAS static inline T *allocate_nonzero(size_t count) {
+#ifndef NDEBUG
+ // make behavior when size == 0 portable
+ if (count == 0)
+ return nullptr;
+#endif
T *ptr = reinterpret_cast(malloc(count * sizeof(T)));
if (!ptr)
zig_panic("allocation failed");
@@ -73,6 +80,11 @@ ATTRIBUTE_RETURNS_NOALIAS static inline T *allocate_nonzero(size_t count) {
template
ATTRIBUTE_RETURNS_NOALIAS static inline T *allocate(size_t count) {
+#ifndef NDEBUG
+ // make behavior when size == 0 portable
+ if (count == 0)
+ return nullptr;
+#endif
T *ptr = reinterpret_cast(calloc(count, sizeof(T)));
if (!ptr)
zig_panic("allocation failed");
@@ -93,9 +105,7 @@ static inline void safe_memcpy(T *dest, const T *src, size_t count) {
template
static inline T *reallocate(T *old, size_t old_count, size_t new_count) {
- T *ptr = reinterpret_cast(realloc(old, new_count * sizeof(T)));
- if (!ptr)
- zig_panic("allocation failed");
+ T *ptr = reallocate_nonzero(old, old_count, new_count);
if (new_count > old_count) {
memset(&ptr[old_count], 0, (new_count - old_count) * sizeof(T));
}
@@ -104,6 +114,11 @@ static inline T *reallocate(T *old, size_t old_count, size_t new_count) {
template
static inline T *reallocate_nonzero(T *old, size_t old_count, size_t new_count) {
+#ifndef NDEBUG
+ // make behavior when size == 0 portable
+ if (new_count == 0 && old == nullptr)
+ return nullptr;
+#endif
T *ptr = reinterpret_cast(realloc(old, new_count * sizeof(T)));
if (!ptr)
zig_panic("allocation failed");
@@ -152,4 +167,21 @@ static inline uint8_t log2_u64(uint64_t x) {
return (63 - clzll(x));
}
+static inline float16_t zig_double_to_f16(double x) {
+ float64_t y;
+ static_assert(sizeof(x) == sizeof(y), "");
+ memcpy(&y, &x, sizeof(x));
+ return f64_to_f16(y);
+}
+
+
+// Return value is safe to coerce to float even when |x| is NaN or Infinity.
+static inline double zig_f16_to_double(float16_t x) {
+ float64_t y = f16_to_f64(x);
+ double z;
+ static_assert(sizeof(y) == sizeof(z), "");
+ memcpy(&z, &y, sizeof(y));
+ return z;
+}
+
#endif
diff --git a/src/windows_sdk.cpp b/src/windows_sdk.cpp
new file mode 100644
index 0000000000..0f9d0fc301
--- /dev/null
+++ b/src/windows_sdk.cpp
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2018 Andrew Kelley
+ *
+ * This file is part of zig, which is MIT licensed.
+ * See http://opensource.org/licenses/MIT
+ */
+
+#include "windows_sdk.h"
+
+#if defined(_WIN32)
+
+#include "windows_com.hpp"
+#include
+#include
+
+struct ZigWindowsSDKPrivate {
+ ZigWindowsSDK base;
+};
+
+enum NativeArch {
+ NativeArchArm,
+ NativeArchi386,
+ NativeArchx86_64,
+};
+
+#if defined(_M_ARM) || defined(__arm_)
+static const NativeArch native_arch = NativeArchArm;
+#endif
+#if defined(_M_IX86) || defined(__i386__)
+static const NativeArch native_arch = NativeArchi386;
+#endif
+#if defined(_M_X64) || defined(__x86_64__)
+static const NativeArch native_arch = NativeArchx86_64;
+#endif
+
+void zig_free_windows_sdk(struct ZigWindowsSDK *sdk) {
+ if (sdk == nullptr) {
+ return;
+ }
+ free((void*)sdk->path10_ptr);
+ free((void*)sdk->version10_ptr);
+ free((void*)sdk->path81_ptr);
+ free((void*)sdk->version81_ptr);
+ free((void*)sdk->msvc_lib_dir_ptr);
+}
+
+static ZigFindWindowsSdkError find_msvc_lib_dir(ZigWindowsSDKPrivate *priv) {
+ //COM Smart Pointers requires explicit scope
+ {
+ HRESULT rc = CoInitializeEx(NULL, COINIT_MULTITHREADED);
+ if (rc != S_OK && rc != S_FALSE) {
+ goto com_done;
+ }
+
+ //This COM class is installed when a VS2017
+ ISetupConfigurationPtr setup_config;
+ rc = setup_config.CreateInstance(__uuidof(SetupConfiguration));
+ if (rc != S_OK) {
+ goto com_done;
+ }
+
+ IEnumSetupInstancesPtr all_instances;
+ rc = setup_config->EnumInstances(&all_instances);
+ if (rc != S_OK) {
+ goto com_done;
+ }
+
+ ISetupInstance* curr_instance;
+ ULONG found_inst;
+ while ((rc = all_instances->Next(1, &curr_instance, &found_inst) == S_OK)) {
+ BSTR bstr_inst_path;
+ rc = curr_instance->GetInstallationPath(&bstr_inst_path);
+ if (rc != S_OK) {
+ goto com_done;
+ }
+ //BSTRs are UTF-16 encoded, so we need to convert the string & adjust the length
+ //TODO call an actual function to do this
+ UINT bstr_path_len = *((UINT*)bstr_inst_path - 1);
+ ULONG tmp_path_len = bstr_path_len / 2 + 1;
+ char* conv_path = (char*)bstr_inst_path;
+ // TODO don't use alloca
+ char *tmp_path = (char*)alloca(tmp_path_len);
+ memset(tmp_path, 0, tmp_path_len);
+ uint32_t c = 0;
+ for (uint32_t i = 0; i < bstr_path_len; i += 2) {
+ tmp_path[c] = conv_path[i];
+ ++c;
+ assert(c != tmp_path_len);
+ }
+ char output_path[4096];
+ output_path[0] = 0;
+ char *out_append_ptr = output_path;
+
+ out_append_ptr += sprintf(out_append_ptr, "%s\\", tmp_path);
+
+ char tmp_buf[4096];
+ sprintf(tmp_buf, "%s%s", output_path, "VC\\Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt");
+ FILE* tools_file = fopen(tmp_buf, "rb");
+ if (!tools_file) {
+ goto com_done;
+ }
+ memset(tmp_path, 0, tmp_path_len);
+ fgets(tmp_path, tmp_path_len, tools_file);
+ strtok(tmp_path, " \r\n");
+ fclose(tools_file);
+ out_append_ptr += sprintf(out_append_ptr, "VC\\Tools\\MSVC\\%s\\lib\\", tmp_path);
+ switch (native_arch) {
+ case NativeArchi386:
+ out_append_ptr += sprintf(out_append_ptr, "x86\\");
+ break;
+ case NativeArchx86_64:
+ out_append_ptr += sprintf(out_append_ptr, "x64\\");
+ break;
+ case NativeArchArm:
+ out_append_ptr += sprintf(out_append_ptr, "arm\\");
+ break;
+ }
+ sprintf(tmp_buf, "%s%s", output_path, "vcruntime.lib");
+
+ if (GetFileAttributesA(tmp_buf) != INVALID_FILE_ATTRIBUTES) {
+ priv->base.msvc_lib_dir_ptr = strdup(output_path);
+ if (priv->base.msvc_lib_dir_ptr == nullptr) {
+ return ZigFindWindowsSdkErrorOutOfMemory;
+ }
+ priv->base.msvc_lib_dir_len = strlen(priv->base.msvc_lib_dir_ptr);
+ return ZigFindWindowsSdkErrorNone;
+ }
+ }
+ }
+
+com_done:;
+ HKEY key;
+ HRESULT rc = RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VS7", 0,
+ KEY_QUERY_VALUE | KEY_WOW64_32KEY, &key);
+ if (rc != ERROR_SUCCESS) {
+ return ZigFindWindowsSdkErrorNotFound;
+ }
+
+ DWORD dw_type = 0;
+ DWORD cb_data = 0;
+ rc = RegQueryValueEx(key, "14.0", NULL, &dw_type, NULL, &cb_data);
+ if ((rc == ERROR_FILE_NOT_FOUND) || (REG_SZ != dw_type)) {
+ return ZigFindWindowsSdkErrorNotFound;
+ }
+
+ char tmp_buf[4096];
+
+ RegQueryValueExA(key, "14.0", NULL, NULL, (LPBYTE)tmp_buf, &cb_data);
+ // RegQueryValueExA returns the length of the string INCLUDING the null terminator
+ char *tmp_buf_append_ptr = tmp_buf + (cb_data - 1);
+ tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "VC\\Lib\\");
+ switch (native_arch) {
+ case NativeArchi386:
+ //x86 is in the root of the Lib folder
+ break;
+ case NativeArchx86_64:
+ tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "amd64\\");
+ break;
+ case NativeArchArm:
+ tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "arm\\");
+ break;
+ }
+
+ char *output_path = strdup(tmp_buf);
+ if (output_path == nullptr) {
+ return ZigFindWindowsSdkErrorOutOfMemory;
+ }
+
+ tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "vcruntime.lib");
+
+ if (GetFileAttributesA(tmp_buf) != INVALID_FILE_ATTRIBUTES) {
+ priv->base.msvc_lib_dir_ptr = output_path;
+ priv->base.msvc_lib_dir_len = strlen(output_path);
+ return ZigFindWindowsSdkErrorNone;
+ } else {
+ free(output_path);
+ return ZigFindWindowsSdkErrorNotFound;
+ }
+}
+
+static ZigFindWindowsSdkError find_10_version(ZigWindowsSDKPrivate *priv) {
+ if (priv->base.path10_ptr == nullptr)
+ return ZigFindWindowsSdkErrorNone;
+
+ char sdk_lib_dir[4096];
+ int n = snprintf(sdk_lib_dir, 4096, "%s\\Lib\\*", priv->base.path10_ptr);
+ if (n < 0 || n >= 4096) {
+ return ZigFindWindowsSdkErrorPathTooLong;
+ }
+
+ // enumerate files in sdk path looking for latest version
+ WIN32_FIND_DATA ffd;
+ HANDLE hFind = FindFirstFileA(sdk_lib_dir, &ffd);
+ if (hFind == INVALID_HANDLE_VALUE) {
+ return ZigFindWindowsSdkErrorNotFound;
+ }
+ int v0 = 0, v1 = 0, v2 = 0, v3 = 0;
+ for (;;) {
+ if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
+ int c0 = 0, c1 = 0, c2 = 0, c3 = 0;
+ sscanf(ffd.cFileName, "%d.%d.%d.%d", &c0, &c1, &c2, &c3);
+ if (c0 == 10 && c1 == 0 && c2 == 10240 && c3 == 0) {
+ // Microsoft released 26624 as 10240 accidentally.
+ // https://developer.microsoft.com/en-us/windows/downloads/sdk-archive
+ c2 = 26624;
+ }
+ if ((c0 > v0) || (c1 > v1) || (c2 > v2) || (c3 > v3)) {
+ v0 = c0, v1 = c1, v2 = c2, v3 = c3;
+ free((void*)priv->base.version10_ptr);
+ priv->base.version10_ptr = strdup(ffd.cFileName);
+ if (priv->base.version10_ptr == nullptr) {
+ FindClose(hFind);
+ return ZigFindWindowsSdkErrorOutOfMemory;
+ }
+ }
+ }
+ if (FindNextFile(hFind, &ffd) == 0) {
+ FindClose(hFind);
+ break;
+ }
+ }
+ priv->base.version10_len = strlen(priv->base.version10_ptr);
+ return ZigFindWindowsSdkErrorNone;
+}
+
+static ZigFindWindowsSdkError find_81_version(ZigWindowsSDKPrivate *priv) {
+ if (priv->base.path81_ptr == nullptr)
+ return ZigFindWindowsSdkErrorNone;
+
+ char sdk_lib_dir[4096];
+ int n = snprintf(sdk_lib_dir, 4096, "%s\\Lib\\winv*", priv->base.path81_ptr);
+ if (n < 0 || n >= 4096) {
+ return ZigFindWindowsSdkErrorPathTooLong;
+ }
+
+ // enumerate files in sdk path looking for latest version
+ WIN32_FIND_DATA ffd;
+ HANDLE hFind = FindFirstFileA(sdk_lib_dir, &ffd);
+ if (hFind == INVALID_HANDLE_VALUE) {
+ return ZigFindWindowsSdkErrorNotFound;
+ }
+ int v0 = 0, v1 = 0;
+ for (;;) {
+ if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
+ int c0 = 0, c1 = 0;
+ sscanf(ffd.cFileName, "winv%d.%d", &c0, &c1);
+ if ((c0 > v0) || (c1 > v1)) {
+ v0 = c0, v1 = c1;
+ free((void*)priv->base.version81_ptr);
+ priv->base.version81_ptr = strdup(ffd.cFileName);
+ if (priv->base.version81_ptr == nullptr) {
+ FindClose(hFind);
+ return ZigFindWindowsSdkErrorOutOfMemory;
+ }
+ }
+ }
+ if (FindNextFile(hFind, &ffd) == 0) {
+ FindClose(hFind);
+ break;
+ }
+ }
+ priv->base.version81_len = strlen(priv->base.version81_ptr);
+ return ZigFindWindowsSdkErrorNone;
+}
+
+ZigFindWindowsSdkError zig_find_windows_sdk(struct ZigWindowsSDK **out_sdk) {
+ ZigWindowsSDKPrivate *priv = (ZigWindowsSDKPrivate*)calloc(1, sizeof(ZigWindowsSDKPrivate));
+ if (priv == nullptr) {
+ return ZigFindWindowsSdkErrorOutOfMemory;
+ }
+
+ HKEY key;
+ HRESULT rc;
+ rc = RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots", 0,
+ KEY_QUERY_VALUE | KEY_WOW64_32KEY | KEY_ENUMERATE_SUB_KEYS, &key);
+ if (rc != ERROR_SUCCESS) {
+ zig_free_windows_sdk(&priv->base);
+ return ZigFindWindowsSdkErrorNotFound;
+ }
+
+ {
+ DWORD tmp_buf_len = MAX_PATH;
+ priv->base.path10_ptr = (const char *)calloc(tmp_buf_len, 1);
+ if (priv->base.path10_ptr == nullptr) {
+ zig_free_windows_sdk(&priv->base);
+ return ZigFindWindowsSdkErrorOutOfMemory;
+ }
+ rc = RegQueryValueEx(key, "KitsRoot10", NULL, NULL, (LPBYTE)priv->base.path10_ptr, &tmp_buf_len);
+ if (rc == ERROR_SUCCESS) {
+ priv->base.path10_len = tmp_buf_len - 1;
+ if (priv->base.path10_ptr[priv->base.path10_len - 1] == '\\') {
+ priv->base.path10_len -= 1;
+ }
+ } else {
+ free((void*)priv->base.path10_ptr);
+ priv->base.path10_ptr = nullptr;
+ }
+ }
+ {
+ DWORD tmp_buf_len = MAX_PATH;
+ priv->base.path81_ptr = (const char *)calloc(tmp_buf_len, 1);
+ if (priv->base.path81_ptr == nullptr) {
+ zig_free_windows_sdk(&priv->base);
+ return ZigFindWindowsSdkErrorOutOfMemory;
+ }
+ rc = RegQueryValueEx(key, "KitsRoot81", NULL, NULL, (LPBYTE)priv->base.path81_ptr, &tmp_buf_len);
+ if (rc == ERROR_SUCCESS) {
+ priv->base.path81_len = tmp_buf_len - 1;
+ if (priv->base.path81_ptr[priv->base.path81_len - 1] == '\\') {
+ priv->base.path81_len -= 1;
+ }
+ } else {
+ free((void*)priv->base.path81_ptr);
+ priv->base.path81_ptr = nullptr;
+ }
+ }
+
+ {
+ ZigFindWindowsSdkError err = find_10_version(priv);
+ if (err == ZigFindWindowsSdkErrorOutOfMemory) {
+ zig_free_windows_sdk(&priv->base);
+ return err;
+ }
+ }
+ {
+ ZigFindWindowsSdkError err = find_81_version(priv);
+ if (err == ZigFindWindowsSdkErrorOutOfMemory) {
+ zig_free_windows_sdk(&priv->base);
+ return err;
+ }
+ }
+
+ {
+ ZigFindWindowsSdkError err = find_msvc_lib_dir(priv);
+ if (err == ZigFindWindowsSdkErrorOutOfMemory) {
+ zig_free_windows_sdk(&priv->base);
+ return err;
+ }
+ }
+
+ *out_sdk = &priv->base;
+ return ZigFindWindowsSdkErrorNone;
+}
+
+#else
+
+void zig_free_windows_sdk(struct ZigWindowsSDK *sdk) {}
+ZigFindWindowsSdkError zig_find_windows_sdk(struct ZigWindowsSDK **out_sdk) {
+ return ZigFindWindowsSdkErrorNotFound;
+}
+
+#endif
diff --git a/src/windows_sdk.h b/src/windows_sdk.h
new file mode 100644
index 0000000000..2d531ad372
--- /dev/null
+++ b/src/windows_sdk.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 Andrew Kelley
+ *
+ * This file is part of zig, which is MIT licensed.
+ * See http://opensource.org/licenses/MIT
+ */
+
+#ifndef ZIG_WINDOWS_SDK_H
+#define ZIG_WINDOWS_SDK_H
+
+#ifdef __cplusplus
+#define ZIG_EXTERN_C extern "C"
+#else
+#define ZIG_EXTERN_C
+#endif
+
+#include
+
+struct ZigWindowsSDK {
+ const char *path10_ptr;
+ size_t path10_len;
+
+ const char *version10_ptr;
+ size_t version10_len;
+
+ const char *path81_ptr;
+ size_t path81_len;
+
+ const char *version81_ptr;
+ size_t version81_len;
+
+ const char *msvc_lib_dir_ptr;
+ size_t msvc_lib_dir_len;
+};
+
+enum ZigFindWindowsSdkError {
+ ZigFindWindowsSdkErrorNone,
+ ZigFindWindowsSdkErrorOutOfMemory,
+ ZigFindWindowsSdkErrorNotFound,
+ ZigFindWindowsSdkErrorPathTooLong,
+};
+
+ZIG_EXTERN_C enum ZigFindWindowsSdkError zig_find_windows_sdk(struct ZigWindowsSDK **out_sdk);
+
+ZIG_EXTERN_C void zig_free_windows_sdk(struct ZigWindowsSDK *sdk);
+
+#endif
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index b4eef13cc1..a43d2d182c 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -81,7 +81,7 @@ static const bool assertions_on = false;
#endif
bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMModuleRef module_ref,
- const char *filename, ZigLLVM_EmitOutputType output_type, char **error_message, bool is_debug)
+ const char *filename, ZigLLVM_EmitOutputType output_type, char **error_message, bool is_debug, bool is_small)
{
std::error_code EC;
raw_fd_ostream dest(filename, EC, sys::fs::F_None);
@@ -100,7 +100,7 @@ bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMM
return true;
}
PMBuilder->OptLevel = target_machine->getOptLevel();
- PMBuilder->SizeLevel = 0;
+ PMBuilder->SizeLevel = is_small ? 2 : 0;
PMBuilder->DisableTailCalls = is_debug;
PMBuilder->DisableUnitAtATime = is_debug;
@@ -440,6 +440,11 @@ ZigLLVMDIBuilder *ZigLLVMCreateDIBuilder(LLVMModuleRef module, bool allow_unreso
return reinterpret_cast(di_builder);
}
+void ZigLLVMDisposeDIBuilder(ZigLLVMDIBuilder *dbuilder) {
+ DIBuilder *di_builder = reinterpret_cast(dbuilder);
+ delete di_builder;
+}
+
void ZigLLVMSetCurrentDebugLocation(LLVMBuilderRef builder, int line, int column, ZigLLVMDIScope *scope) {
unwrap(builder)->SetCurrentDebugLocation(DebugLoc::get(
line, column, reinterpret_cast(scope)));
@@ -765,10 +770,12 @@ static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) {
LLVMValueRef ZigLLVMBuildCmpXchg(LLVMBuilderRef builder, LLVMValueRef ptr, LLVMValueRef cmp,
LLVMValueRef new_val, LLVMAtomicOrdering success_ordering,
- LLVMAtomicOrdering failure_ordering)
+ LLVMAtomicOrdering failure_ordering, bool is_weak)
{
- return wrap(unwrap(builder)->CreateAtomicCmpXchg(unwrap(ptr), unwrap(cmp), unwrap(new_val),
- mapFromLLVMOrdering(success_ordering), mapFromLLVMOrdering(failure_ordering)));
+ AtomicCmpXchgInst *inst = unwrap(builder)->CreateAtomicCmpXchg(unwrap(ptr), unwrap(cmp),
+ unwrap(new_val), mapFromLLVMOrdering(success_ordering), mapFromLLVMOrdering(failure_ordering));
+ inst->setWeak(is_weak);
+ return wrap(inst);
}
LLVMValueRef ZigLLVMBuildNSWShl(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS,
@@ -836,7 +843,7 @@ bool ZigLLDLink(ZigLLVM_ObjectFormatType oformat, const char **args, size_t arg_
return lld::mach_o::link(array_ref_args, diag);
case ZigLLVM_Wasm:
- assert(false); // TODO ZigLLDLink for Wasm
+ return lld::wasm::link(array_ref_args, false, diag);
}
assert(false); // unreachable
abort();
diff --git a/src/zig_llvm.h b/src/zig_llvm.h
index d6809000ce..63d69bd23e 100644
--- a/src/zig_llvm.h
+++ b/src/zig_llvm.h
@@ -22,6 +22,9 @@
#define ZIG_EXTERN_C
#endif
+// ATTENTION: If you modify this file, be sure to update the corresponding
+// extern function declarations in the self-hosted compiler.
+
struct ZigLLVMDIType;
struct ZigLLVMDIBuilder;
struct ZigLLVMDICompileUnit;
@@ -39,7 +42,7 @@ struct ZigLLVMInsertionPoint;
ZIG_EXTERN_C void ZigLLVMInitializeLoopStrengthReducePass(LLVMPassRegistryRef R);
ZIG_EXTERN_C void ZigLLVMInitializeLowerIntrinsicsPass(LLVMPassRegistryRef R);
-/// Caller must free memory.
+/// Caller must free memory with LLVMDisposeMessage
ZIG_EXTERN_C char *ZigLLVMGetHostCPUName(void);
ZIG_EXTERN_C char *ZigLLVMGetNativeFeatures(void);
@@ -52,7 +55,7 @@ enum ZigLLVM_EmitOutputType {
};
ZIG_EXTERN_C bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMModuleRef module_ref,
- const char *filename, enum ZigLLVM_EmitOutputType output_type, char **error_message, bool is_debug);
+ const char *filename, enum ZigLLVM_EmitOutputType output_type, char **error_message, bool is_debug, bool is_small);
ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref);
@@ -66,7 +69,7 @@ ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn, LL
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCmpXchg(LLVMBuilderRef builder, LLVMValueRef ptr, LLVMValueRef cmp,
LLVMValueRef new_val, LLVMAtomicOrdering success_ordering,
- LLVMAtomicOrdering failure_ordering);
+ LLVMAtomicOrdering failure_ordering, bool is_weak);
ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildNSWShl(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS,
const char *name);
@@ -139,6 +142,7 @@ ZIG_EXTERN_C unsigned ZigLLVMTag_DW_enumeration_type(void);
ZIG_EXTERN_C unsigned ZigLLVMTag_DW_union_type(void);
ZIG_EXTERN_C struct ZigLLVMDIBuilder *ZigLLVMCreateDIBuilder(LLVMModuleRef module, bool allow_unresolved);
+ZIG_EXTERN_C void ZigLLVMDisposeDIBuilder(struct ZigLLVMDIBuilder *dbuilder);
ZIG_EXTERN_C void ZigLLVMAddModuleDebugInfoFlag(LLVMModuleRef module);
ZIG_EXTERN_C void ZigLLVMAddModuleCodeViewFlag(LLVMModuleRef module);
diff --git a/std/array_list.zig b/std/array_list.zig
index 2a44b66518..298026d11c 100644
--- a/std/array_list.zig
+++ b/std/array_list.zig
@@ -1,6 +1,7 @@
const std = @import("index.zig");
const debug = std.debug;
const assert = debug.assert;
+const assertError = debug.assertError;
const mem = std.mem;
const Allocator = mem.Allocator;
@@ -8,7 +9,7 @@ pub fn ArrayList(comptime T: type) type {
return AlignedArrayList(T, @alignOf(T));
}
-pub fn AlignedArrayList(comptime T: type, comptime A: u29) type{
+pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
return struct {
const Self = this;
@@ -17,38 +18,55 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type{
/// you uninitialized memory.
items: []align(A) T,
len: usize,
- allocator: &Allocator,
+ allocator: *Allocator,
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn init(allocator: &Allocator) Self {
- return Self {
+ pub fn init(allocator: *Allocator) Self {
+ return Self{
.items = []align(A) T{},
.len = 0,
.allocator = allocator,
};
}
- pub fn deinit(l: &Self) void {
- l.allocator.free(l.items);
+ pub fn deinit(self: Self) void {
+ self.allocator.free(self.items);
}
- pub fn toSlice(l: &Self) []align(A) T {
- return l.items[0..l.len];
+ pub fn toSlice(self: Self) []align(A) T {
+ return self.items[0..self.len];
}
- pub fn toSliceConst(l: &const Self) []align(A) const T {
- return l.items[0..l.len];
+ pub fn toSliceConst(self: Self) []align(A) const T {
+ return self.items[0..self.len];
}
- pub fn at(l: &const Self, n: usize) T {
- return l.toSliceConst()[n];
+ pub fn at(self: Self, i: usize) T {
+ return self.toSliceConst()[i];
+ }
+
+ /// Sets the value at index `i`, or returns `error.OutOfBounds` if
+ /// the index is not in range.
+ pub fn setOrError(self: Self, i: usize, item: T) !void {
+ if (i >= self.len) return error.OutOfBounds;
+ self.items[i] = item;
+ }
+
+ /// Sets the value at index `i`, asserting that the value is in range.
+ pub fn set(self: *Self, i: usize, item: T) void {
+ assert(i < self.len);
+ self.items[i] = item;
+ }
+
+ pub fn count(self: Self) usize {
+ return self.len;
}
/// ArrayList takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn fromOwnedSlice(allocator: &Allocator, slice: []align(A) T) Self {
- return Self {
+ pub fn fromOwnedSlice(allocator: *Allocator, slice: []align(A) T) Self {
+ return Self{
.items = slice,
.len = slice.len,
.allocator = allocator,
@@ -56,122 +74,302 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type{
}
/// The caller owns the returned memory. ArrayList becomes empty.
- pub fn toOwnedSlice(self: &Self) []align(A) T {
+ pub fn toOwnedSlice(self: *Self) []align(A) T {
const allocator = self.allocator;
const result = allocator.alignedShrink(T, A, self.items, self.len);
- *self = init(allocator);
+ self.* = init(allocator);
return result;
}
- pub fn insert(l: &Self, n: usize, item: &const T) !void {
- try l.ensureCapacity(l.len + 1);
- l.len += 1;
+ pub fn insert(self: *Self, n: usize, item: T) !void {
+ try self.ensureCapacity(self.len + 1);
+ self.len += 1;
- mem.copy(T, l.items[n+1..l.len], l.items[n..l.len-1]);
- l.items[n] = *item;
+ mem.copyBackwards(T, self.items[n + 1 .. self.len], self.items[n .. self.len - 1]);
+ self.items[n] = item;
}
- pub fn insertSlice(l: &Self, n: usize, items: []align(A) const T) !void {
- try l.ensureCapacity(l.len + items.len);
- l.len += items.len;
+ pub fn insertSlice(self: *Self, n: usize, items: []align(A) const T) !void {
+ try self.ensureCapacity(self.len + items.len);
+ self.len += items.len;
- mem.copy(T, l.items[n+items.len..l.len], l.items[n..l.len-items.len]);
- mem.copy(T, l.items[n..n+items.len], items);
+ mem.copyBackwards(T, self.items[n + items.len .. self.len], self.items[n .. self.len - items.len]);
+ mem.copy(T, self.items[n .. n + items.len], items);
}
- pub fn append(l: &Self, item: &const T) !void {
- const new_item_ptr = try l.addOne();
- *new_item_ptr = *item;
+ pub fn append(self: *Self, item: T) !void {
+ const new_item_ptr = try self.addOne();
+ new_item_ptr.* = item;
}
- pub fn appendSlice(l: &Self, items: []align(A) const T) !void {
- try l.ensureCapacity(l.len + items.len);
- mem.copy(T, l.items[l.len..], items);
- l.len += items.len;
+ /// Removes the element at the specified index and returns it.
+ /// The empty slot is filled from the end of the list.
+ pub fn swapRemove(self: *Self, i: usize) T {
+ if (self.len - 1 == i) return self.pop();
+
+ const slice = self.toSlice();
+ const old_item = slice[i];
+ slice[i] = self.pop();
+ return old_item;
}
- pub fn resize(l: &Self, new_len: usize) !void {
- try l.ensureCapacity(new_len);
- l.len = new_len;
+ /// Removes the element at the specified index and returns it
+ /// or an error.OutOfBounds is returned. If no error then
+ /// the empty slot is filled from the end of the list.
+ pub fn swapRemoveOrError(self: *Self, i: usize) !T {
+ if (i >= self.len) return error.OutOfBounds;
+ return self.swapRemove(i);
}
- pub fn shrink(l: &Self, new_len: usize) void {
- assert(new_len <= l.len);
- l.len = new_len;
+ pub fn appendSlice(self: *Self, items: []align(A) const T) !void {
+ try self.ensureCapacity(self.len + items.len);
+ mem.copy(T, self.items[self.len..], items);
+ self.len += items.len;
}
- pub fn ensureCapacity(l: &Self, new_capacity: usize) !void {
- var better_capacity = l.items.len;
+ pub fn resize(self: *Self, new_len: usize) !void {
+ try self.ensureCapacity(new_len);
+ self.len = new_len;
+ }
+
+ pub fn shrink(self: *Self, new_len: usize) void {
+ assert(new_len <= self.len);
+ self.len = new_len;
+ }
+
+ pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
+ var better_capacity = self.items.len;
if (better_capacity >= new_capacity) return;
while (true) {
better_capacity += better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
}
- l.items = try l.allocator.alignedRealloc(T, A, l.items, better_capacity);
+ self.items = try self.allocator.alignedRealloc(T, A, self.items, better_capacity);
}
- pub fn addOne(l: &Self) !&T {
- const new_length = l.len + 1;
- try l.ensureCapacity(new_length);
- const result = &l.items[l.len];
- l.len = new_length;
+ pub fn addOne(self: *Self) !*T {
+ const new_length = self.len + 1;
+ try self.ensureCapacity(new_length);
+ const result = &self.items[self.len];
+ self.len = new_length;
return result;
}
- pub fn pop(self: &Self) T {
+ pub fn pop(self: *Self) T {
self.len -= 1;
return self.items[self.len];
}
- pub fn popOrNull(self: &Self) ?T {
- if (self.len == 0)
- return null;
+ pub fn popOrNull(self: *Self) ?T {
+ if (self.len == 0) return null;
return self.pop();
}
+
+ pub const Iterator = struct {
+ list: *const Self,
+ // how many items have we returned
+ count: usize,
+
+ pub fn next(it: *Iterator) ?T {
+ if (it.count >= it.list.len) return null;
+ const val = it.list.at(it.count);
+ it.count += 1;
+ return val;
+ }
+
+ pub fn reset(it: *Iterator) void {
+ it.count = 0;
+ }
+ };
+
+ pub fn iterator(self: *const Self) Iterator {
+ return Iterator{
+ .list = self,
+ .count = 0,
+ };
+ }
};
}
-test "basic ArrayList test" {
- var list = ArrayList(i32).init(debug.global_allocator);
+test "std.ArrayList.basic" {
+ var bytes: [1024]u8 = undefined;
+ const allocator = &std.heap.FixedBufferAllocator.init(bytes[0..]).allocator;
+
+ var list = ArrayList(i32).init(allocator);
defer list.deinit();
- {var i: usize = 0; while (i < 10) : (i += 1) {
- list.append(i32(i + 1)) catch unreachable;
- }}
+ // setting on empty list is out of bounds
+ assertError(list.setOrError(0, 1), error.OutOfBounds);
- {var i: usize = 0; while (i < 10) : (i += 1) {
- assert(list.items[i] == i32(i + 1));
- }}
+ {
+ var i: usize = 0;
+ while (i < 10) : (i += 1) {
+ list.append(@intCast(i32, i + 1)) catch unreachable;
+ }
+ }
+
+ {
+ var i: usize = 0;
+ while (i < 10) : (i += 1) {
+ assert(list.items[i] == @intCast(i32, i + 1));
+ }
+ }
+
+ for (list.toSlice()) |v, i| {
+ assert(v == @intCast(i32, i + 1));
+ }
+
+ for (list.toSliceConst()) |v, i| {
+ assert(v == @intCast(i32, i + 1));
+ }
assert(list.pop() == 10);
assert(list.len == 9);
- list.appendSlice([]const i32 { 1, 2, 3 }) catch unreachable;
+ list.appendSlice([]const i32{
+ 1,
+ 2,
+ 3,
+ }) catch unreachable;
assert(list.len == 12);
assert(list.pop() == 3);
assert(list.pop() == 2);
assert(list.pop() == 1);
assert(list.len == 9);
- list.appendSlice([]const i32 {}) catch unreachable;
+ list.appendSlice([]const i32{}) catch unreachable;
assert(list.len == 9);
+
+ // can only set on indices < self.len
+ list.set(7, 33);
+ list.set(8, 42);
+
+ assertError(list.setOrError(9, 99), error.OutOfBounds);
+ assertError(list.setOrError(10, 123), error.OutOfBounds);
+
+ assert(list.pop() == 42);
+ assert(list.pop() == 33);
}
-test "insert ArrayList test" {
+test "std.ArrayList.swapRemove" {
var list = ArrayList(i32).init(debug.global_allocator);
defer list.deinit();
try list.append(1);
+ try list.append(2);
+ try list.append(3);
+ try list.append(4);
+ try list.append(5);
+ try list.append(6);
+ try list.append(7);
+
+ //remove from middle
+ assert(list.swapRemove(3) == 4);
+ assert(list.at(3) == 7);
+ assert(list.len == 6);
+
+ //remove from end
+ assert(list.swapRemove(5) == 6);
+ assert(list.len == 5);
+
+ //remove from front
+ assert(list.swapRemove(0) == 1);
+ assert(list.at(0) == 5);
+ assert(list.len == 4);
+}
+
+test "std.ArrayList.swapRemoveOrError" {
+ var list = ArrayList(i32).init(debug.global_allocator);
+ defer list.deinit();
+
+ // Test just after initialization
+ assertError(list.swapRemoveOrError(0), error.OutOfBounds);
+
+ // Test after adding one item and remote it
+ try list.append(1);
+ assert((try list.swapRemoveOrError(0)) == 1);
+ assertError(list.swapRemoveOrError(0), error.OutOfBounds);
+
+ // Test after adding two items and remote both
+ try list.append(1);
+ try list.append(2);
+ assert((try list.swapRemoveOrError(1)) == 2);
+ assert((try list.swapRemoveOrError(0)) == 1);
+ assertError(list.swapRemoveOrError(0), error.OutOfBounds);
+
+ // Test out of bounds with one item
+ try list.append(1);
+ assertError(list.swapRemoveOrError(1), error.OutOfBounds);
+
+ // Test out of bounds with two items
+ try list.append(2);
+ assertError(list.swapRemoveOrError(2), error.OutOfBounds);
+}
+
+test "std.ArrayList.iterator" {
+ var list = ArrayList(i32).init(debug.global_allocator);
+ defer list.deinit();
+
+ try list.append(1);
+ try list.append(2);
+ try list.append(3);
+
+ var count: i32 = 0;
+ var it = list.iterator();
+ while (it.next()) |next| {
+ assert(next == count + 1);
+ count += 1;
+ }
+
+ assert(count == 3);
+ assert(it.next() == null);
+ it.reset();
+ count = 0;
+ while (it.next()) |next| {
+ assert(next == count + 1);
+ count += 1;
+ if (count == 2) break;
+ }
+
+ it.reset();
+ assert(it.next().? == 1);
+}
+
+test "std.ArrayList.insert" {
+ var list = ArrayList(i32).init(debug.global_allocator);
+ defer list.deinit();
+
+ try list.append(1);
+ try list.append(2);
+ try list.append(3);
try list.insert(0, 5);
assert(list.items[0] == 5);
assert(list.items[1] == 1);
+ assert(list.items[2] == 2);
+ assert(list.items[3] == 3);
+}
- try list.insertSlice(1, []const i32 { 9, 8 });
- assert(list.items[0] == 5);
+test "std.ArrayList.insertSlice" {
+ var list = ArrayList(i32).init(debug.global_allocator);
+ defer list.deinit();
+
+ try list.append(1);
+ try list.append(2);
+ try list.append(3);
+ try list.append(4);
+ try list.insertSlice(1, []const i32{
+ 9,
+ 8,
+ });
+ assert(list.items[0] == 1);
assert(list.items[1] == 9);
assert(list.items[2] == 8);
+ assert(list.items[3] == 2);
+ assert(list.items[4] == 3);
+ assert(list.items[5] == 4);
- const items = []const i32 { 1 };
+ const items = []const i32{1};
try list.insertSlice(0, items[0..0]);
- assert(list.items[0] == 5);
+ assert(list.len == 6);
+ assert(list.items[0] == 1);
}
diff --git a/std/atomic/index.zig b/std/atomic/index.zig
new file mode 100644
index 0000000000..a94cff1973
--- /dev/null
+++ b/std/atomic/index.zig
@@ -0,0 +1,9 @@
+pub const Stack = @import("stack.zig").Stack;
+pub const Queue = @import("queue.zig").Queue;
+pub const Int = @import("int.zig").Int;
+
+test "std.atomic" {
+ _ = @import("stack.zig");
+ _ = @import("queue.zig");
+ _ = @import("int.zig");
+}
diff --git a/std/atomic/int.zig b/std/atomic/int.zig
new file mode 100644
index 0000000000..4103d52719
--- /dev/null
+++ b/std/atomic/int.zig
@@ -0,0 +1,33 @@
+const builtin = @import("builtin");
+const AtomicOrder = builtin.AtomicOrder;
+
+/// Thread-safe, lock-free integer
+pub fn Int(comptime T: type) type {
+ return struct {
+ unprotected_value: T,
+
+ pub const Self = this;
+
+ pub fn init(init_val: T) Self {
+ return Self{ .unprotected_value = init_val };
+ }
+
+ /// Returns previous value
+ pub fn incr(self: *Self) T {
+ return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ }
+
+ /// Returns previous value
+ pub fn decr(self: *Self) T {
+ return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+
+ pub fn get(self: *Self) T {
+ return @atomicLoad(T, &self.unprotected_value, AtomicOrder.SeqCst);
+ }
+
+ pub fn xchg(self: *Self, new_value: T) T {
+ return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Xchg, new_value, AtomicOrder.SeqCst);
+ }
+ };
+}
diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig
new file mode 100644
index 0000000000..df31c88d2a
--- /dev/null
+++ b/std/atomic/queue.zig
@@ -0,0 +1,240 @@
+const builtin = @import("builtin");
+const AtomicOrder = builtin.AtomicOrder;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+
+/// Many producer, many consumer, non-allocating, thread-safe.
+/// Uses a spinlock to protect get() and put().
+pub fn Queue(comptime T: type) type {
+ return struct {
+ head: ?*Node,
+ tail: ?*Node,
+ lock: u8,
+
+ pub const Self = this;
+
+ pub const Node = struct {
+ next: ?*Node,
+ data: T,
+ };
+
+ pub fn init() Self {
+ return Self{
+ .head = null,
+ .tail = null,
+ .lock = 0,
+ };
+ }
+
+ pub fn put(self: *Self, node: *Node) void {
+ node.next = null;
+
+ while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
+ defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+
+ const opt_tail = self.tail;
+ self.tail = node;
+ if (opt_tail) |tail| {
+ tail.next = node;
+ } else {
+ assert(self.head == null);
+ self.head = node;
+ }
+ }
+
+ pub fn get(self: *Self) ?*Node {
+ while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
+ defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+
+ const head = self.head orelse return null;
+ self.head = head.next;
+ if (head.next == null) self.tail = null;
+ return head;
+ }
+
+ pub fn unget(self: *Self, node: *Node) void {
+ while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
+ defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+
+ const opt_head = self.head;
+ self.head = node;
+ if (opt_head) |head| {
+ head.next = node;
+ } else {
+ assert(self.tail == null);
+ self.tail = node;
+ }
+ }
+
+ pub fn isEmpty(self: *Self) bool {
+ return @atomicLoad(?*Node, &self.head, builtin.AtomicOrder.SeqCst) != null;
+ }
+
+ pub fn dump(self: *Self) void {
+ while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
+ defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+
+ std.debug.warn("head: ");
+ dumpRecursive(self.head, 0);
+ std.debug.warn("tail: ");
+ dumpRecursive(self.tail, 0);
+ }
+
+ fn dumpRecursive(optional_node: ?*Node, indent: usize) void {
+ var stderr_file = std.io.getStdErr() catch return;
+ const stderr = &std.io.FileOutStream.init(&stderr_file).stream;
+ stderr.writeByteNTimes(' ', indent) catch return;
+ if (optional_node) |node| {
+ std.debug.warn("0x{x}={}\n", @ptrToInt(node), node.data);
+ dumpRecursive(node.next, indent + 1);
+ } else {
+ std.debug.warn("(null)\n");
+ }
+ }
+ };
+}
+
+const std = @import("../index.zig");
+const assert = std.debug.assert;
+
+const Context = struct {
+ allocator: *std.mem.Allocator,
+ queue: *Queue(i32),
+ put_sum: isize,
+ get_sum: isize,
+ get_count: usize,
+ puts_done: u8, // TODO make this a bool
+};
+
+// TODO add lazy evaluated build options and then put puts_per_thread behind
+// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor
+// CI we would use a less aggressive setting since at 1 core, while we still
+// want this test to pass, we need a smaller value since there is so much thrashing
+// we would also use a less aggressive setting when running in valgrind
+const puts_per_thread = 500;
+const put_thread_count = 3;
+
+test "std.atomic.Queue" {
+ var direct_allocator = std.heap.DirectAllocator.init();
+ defer direct_allocator.deinit();
+
+ var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024);
+ defer direct_allocator.allocator.free(plenty_of_memory);
+
+ var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
+ var a = &fixed_buffer_allocator.allocator;
+
+ var queue = Queue(i32).init();
+ var context = Context{
+ .allocator = a,
+ .queue = &queue,
+ .put_sum = 0,
+ .get_sum = 0,
+ .puts_done = 0,
+ .get_count = 0,
+ };
+
+ var putters: [put_thread_count]*std.os.Thread = undefined;
+ for (putters) |*t| {
+ t.* = try std.os.spawnThread(&context, startPuts);
+ }
+ var getters: [put_thread_count]*std.os.Thread = undefined;
+ for (getters) |*t| {
+ t.* = try std.os.spawnThread(&context, startGets);
+ }
+
+ for (putters) |t|
+ t.wait();
+ _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ for (getters) |t|
+ t.wait();
+
+ if (context.put_sum != context.get_sum) {
+ std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum);
+ }
+
+ if (context.get_count != puts_per_thread * put_thread_count) {
+ std.debug.panic(
+ "failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}",
+ context.get_count,
+ u32(puts_per_thread),
+ u32(put_thread_count),
+ );
+ }
+}
+
+fn startPuts(ctx: *Context) u8 {
+ var put_count: usize = puts_per_thread;
+ var r = std.rand.DefaultPrng.init(0xdeadbeef);
+ while (put_count != 0) : (put_count -= 1) {
+ std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
+ const x = @bitCast(i32, r.random.scalar(u32));
+ const node = ctx.allocator.create(Queue(i32).Node{
+ .next = undefined,
+ .data = x,
+ }) catch unreachable;
+ ctx.queue.put(node);
+ _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
+ }
+ return 0;
+}
+
+fn startGets(ctx: *Context) u8 {
+ while (true) {
+ const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
+
+ while (ctx.queue.get()) |node| {
+ std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
+ _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
+ }
+
+ if (last) return 0;
+ }
+}
+
+test "std.atomic.Queue single-threaded" {
+ var queue = Queue(i32).init();
+
+ var node_0 = Queue(i32).Node{
+ .data = 0,
+ .next = undefined,
+ };
+ queue.put(&node_0);
+
+ var node_1 = Queue(i32).Node{
+ .data = 1,
+ .next = undefined,
+ };
+ queue.put(&node_1);
+
+ assert(queue.get().?.data == 0);
+
+ var node_2 = Queue(i32).Node{
+ .data = 2,
+ .next = undefined,
+ };
+ queue.put(&node_2);
+
+ var node_3 = Queue(i32).Node{
+ .data = 3,
+ .next = undefined,
+ };
+ queue.put(&node_3);
+
+ assert(queue.get().?.data == 1);
+
+ assert(queue.get().?.data == 2);
+
+ var node_4 = Queue(i32).Node{
+ .data = 4,
+ .next = undefined,
+ };
+ queue.put(&node_4);
+
+ assert(queue.get().?.data == 3);
+ node_3.next = null;
+
+ assert(queue.get().?.data == 4);
+
+ assert(queue.get() == null);
+}
diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig
new file mode 100644
index 0000000000..16d5c9503b
--- /dev/null
+++ b/std/atomic/stack.zig
@@ -0,0 +1,150 @@
+const assert = std.debug.assert;
+const builtin = @import("builtin");
+const AtomicOrder = builtin.AtomicOrder;
+
+/// Many reader, many writer, non-allocating, thread-safe
+/// Uses a spinlock to protect push() and pop()
+pub fn Stack(comptime T: type) type {
+ return struct {
+ root: ?*Node,
+ lock: u8,
+
+ pub const Self = this;
+
+ pub const Node = struct {
+ next: ?*Node,
+ data: T,
+ };
+
+ pub fn init() Self {
+ return Self{
+ .root = null,
+ .lock = 0,
+ };
+ }
+
+ /// push operation, but only if you are the first item in the stack. if you did not succeed in
+ /// being the first item in the stack, returns the other item that was there.
+ pub fn pushFirst(self: *Self, node: *Node) ?*Node {
+ node.next = null;
+ return @cmpxchgStrong(?*Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst);
+ }
+
+ pub fn push(self: *Self, node: *Node) void {
+ while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
+ defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+
+ node.next = self.root;
+ self.root = node;
+ }
+
+ pub fn pop(self: *Self) ?*Node {
+ while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
+ defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+
+ const root = self.root orelse return null;
+ self.root = root.next;
+ return root;
+ }
+
+ pub fn isEmpty(self: *Self) bool {
+ return @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst) == null;
+ }
+ };
+}
+
+const std = @import("../index.zig");
+const Context = struct {
+ allocator: *std.mem.Allocator,
+ stack: *Stack(i32),
+ put_sum: isize,
+ get_sum: isize,
+ get_count: usize,
+ puts_done: u8, // TODO make this a bool
+};
+// TODO add lazy evaluated build options and then put puts_per_thread behind
+// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor
+// CI we would use a less aggressive setting since at 1 core, while we still
+// want this test to pass, we need a smaller value since there is so much thrashing
+// we would also use a less aggressive setting when running in valgrind
+const puts_per_thread = 500;
+const put_thread_count = 3;
+
+test "std.atomic.stack" {
+ var direct_allocator = std.heap.DirectAllocator.init();
+ defer direct_allocator.deinit();
+
+ var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024);
+ defer direct_allocator.allocator.free(plenty_of_memory);
+
+ var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
+ var a = &fixed_buffer_allocator.allocator;
+
+ var stack = Stack(i32).init();
+ var context = Context{
+ .allocator = a,
+ .stack = &stack,
+ .put_sum = 0,
+ .get_sum = 0,
+ .puts_done = 0,
+ .get_count = 0,
+ };
+
+ var putters: [put_thread_count]*std.os.Thread = undefined;
+ for (putters) |*t| {
+ t.* = try std.os.spawnThread(&context, startPuts);
+ }
+ var getters: [put_thread_count]*std.os.Thread = undefined;
+ for (getters) |*t| {
+ t.* = try std.os.spawnThread(&context, startGets);
+ }
+
+ for (putters) |t|
+ t.wait();
+ _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ for (getters) |t|
+ t.wait();
+
+ if (context.put_sum != context.get_sum) {
+ std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum);
+ }
+
+ if (context.get_count != puts_per_thread * put_thread_count) {
+ std.debug.panic(
+ "failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}",
+ context.get_count,
+ u32(puts_per_thread),
+ u32(put_thread_count),
+ );
+ }
+}
+
+fn startPuts(ctx: *Context) u8 {
+ var put_count: usize = puts_per_thread;
+ var r = std.rand.DefaultPrng.init(0xdeadbeef);
+ while (put_count != 0) : (put_count -= 1) {
+ std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
+ const x = @bitCast(i32, r.random.scalar(u32));
+ const node = ctx.allocator.create(Stack(i32).Node{
+ .next = undefined,
+ .data = x,
+ }) catch unreachable;
+ ctx.stack.push(node);
+ _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
+ }
+ return 0;
+}
+
+fn startGets(ctx: *Context) u8 {
+ while (true) {
+ const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
+
+ while (ctx.stack.pop()) |node| {
+ std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
+ _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
+ }
+
+ if (last) return 0;
+ }
+}
diff --git a/std/base64.zig b/std/base64.zig
index 13f3ea5714..45c8e22c7e 100644
--- a/std/base64.zig
+++ b/std/base64.zig
@@ -32,7 +32,7 @@ pub const Base64Encoder = struct {
}
/// dest.len must be what you get from ::calcSize.
- pub fn encode(encoder: &const Base64Encoder, dest: []u8, source: []const u8) void {
+ pub fn encode(encoder: *const Base64Encoder, dest: []u8, source: []const u8) void {
assert(dest.len == Base64Encoder.calcSize(source.len));
var i: usize = 0;
@@ -41,12 +41,10 @@ pub const Base64Encoder = struct {
dest[out_index] = encoder.alphabet_chars[(source[i] >> 2) & 0x3f];
out_index += 1;
- dest[out_index] = encoder.alphabet_chars[((source[i] & 0x3) << 4) |
- ((source[i + 1] & 0xf0) >> 4)];
+ dest[out_index] = encoder.alphabet_chars[((source[i] & 0x3) << 4) | ((source[i + 1] & 0xf0) >> 4)];
out_index += 1;
- dest[out_index] = encoder.alphabet_chars[((source[i + 1] & 0xf) << 2) |
- ((source[i + 2] & 0xc0) >> 6)];
+ dest[out_index] = encoder.alphabet_chars[((source[i + 1] & 0xf) << 2) | ((source[i + 2] & 0xc0) >> 6)];
out_index += 1;
dest[out_index] = encoder.alphabet_chars[source[i + 2] & 0x3f];
@@ -64,8 +62,7 @@ pub const Base64Encoder = struct {
dest[out_index] = encoder.pad_char;
out_index += 1;
} else {
- dest[out_index] = encoder.alphabet_chars[((source[i] & 0x3) << 4) |
- ((source[i + 1] & 0xf0) >> 4)];
+ dest[out_index] = encoder.alphabet_chars[((source[i] & 0x3) << 4) | ((source[i + 1] & 0xf0) >> 4)];
out_index += 1;
dest[out_index] = encoder.alphabet_chars[(source[i + 1] & 0xf) << 2];
@@ -84,6 +81,7 @@ pub const Base64Decoder = struct {
/// e.g. 'A' => 0.
/// undefined for any value not in the 64 alphabet chars.
char_to_index: [256]u8,
+
/// true only for the 64 chars in the alphabet, not the pad char.
char_in_alphabet: [256]bool,
pad_char: u8,
@@ -101,7 +99,7 @@ pub const Base64Decoder = struct {
assert(!result.char_in_alphabet[c]);
assert(c != pad_char);
- result.char_to_index[c] = u8(i);
+ result.char_to_index[c] = @intCast(u8, i);
result.char_in_alphabet[c] = true;
}
@@ -109,7 +107,7 @@ pub const Base64Decoder = struct {
}
/// If the encoded buffer is detected to be invalid, returns error.InvalidPadding.
- pub fn calcSize(decoder: &const Base64Decoder, source: []const u8) !usize {
+ pub fn calcSize(decoder: *const Base64Decoder, source: []const u8) !usize {
if (source.len % 4 != 0) return error.InvalidPadding;
return calcDecodedSizeExactUnsafe(source, decoder.pad_char);
}
@@ -117,7 +115,7 @@ pub const Base64Decoder = struct {
/// dest.len must be what you get from ::calcSize.
/// invalid characters result in error.InvalidCharacter.
/// invalid padding results in error.InvalidPadding.
- pub fn decode(decoder: &const Base64Decoder, dest: []u8, source: []const u8) !void {
+ pub fn decode(decoder: *const Base64Decoder, dest: []u8, source: []const u8) !void {
assert(dest.len == (decoder.calcSize(source) catch unreachable));
assert(source.len % 4 == 0);
@@ -131,26 +129,20 @@ pub const Base64Decoder = struct {
// common case
if (!decoder.char_in_alphabet[source[src_cursor + 2]]) return error.InvalidCharacter;
if (!decoder.char_in_alphabet[source[src_cursor + 3]]) return error.InvalidCharacter;
- dest[dest_cursor + 0] = decoder.char_to_index[source[src_cursor + 0]] << 2 |
- decoder.char_to_index[source[src_cursor + 1]] >> 4;
- dest[dest_cursor + 1] = decoder.char_to_index[source[src_cursor + 1]] << 4 |
- decoder.char_to_index[source[src_cursor + 2]] >> 2;
- dest[dest_cursor + 2] = decoder.char_to_index[source[src_cursor + 2]] << 6 |
- decoder.char_to_index[source[src_cursor + 3]];
+ dest[dest_cursor + 0] = decoder.char_to_index[source[src_cursor + 0]] << 2 | decoder.char_to_index[source[src_cursor + 1]] >> 4;
+ dest[dest_cursor + 1] = decoder.char_to_index[source[src_cursor + 1]] << 4 | decoder.char_to_index[source[src_cursor + 2]] >> 2;
+ dest[dest_cursor + 2] = decoder.char_to_index[source[src_cursor + 2]] << 6 | decoder.char_to_index[source[src_cursor + 3]];
dest_cursor += 3;
} else if (source[src_cursor + 2] != decoder.pad_char) {
// one pad char
if (!decoder.char_in_alphabet[source[src_cursor + 2]]) return error.InvalidCharacter;
- dest[dest_cursor + 0] = decoder.char_to_index[source[src_cursor + 0]] << 2 |
- decoder.char_to_index[source[src_cursor + 1]] >> 4;
- dest[dest_cursor + 1] = decoder.char_to_index[source[src_cursor + 1]] << 4 |
- decoder.char_to_index[source[src_cursor + 2]] >> 2;
+ dest[dest_cursor + 0] = decoder.char_to_index[source[src_cursor + 0]] << 2 | decoder.char_to_index[source[src_cursor + 1]] >> 4;
+ dest[dest_cursor + 1] = decoder.char_to_index[source[src_cursor + 1]] << 4 | decoder.char_to_index[source[src_cursor + 2]] >> 2;
if (decoder.char_to_index[source[src_cursor + 2]] << 6 != 0) return error.InvalidPadding;
dest_cursor += 2;
} else {
// two pad chars
- dest[dest_cursor + 0] = decoder.char_to_index[source[src_cursor + 0]] << 2 |
- decoder.char_to_index[source[src_cursor + 1]] >> 4;
+ dest[dest_cursor + 0] = decoder.char_to_index[source[src_cursor + 0]] << 2 | decoder.char_to_index[source[src_cursor + 1]] >> 4;
if (decoder.char_to_index[source[src_cursor + 1]] << 4 != 0) return error.InvalidPadding;
dest_cursor += 1;
}
@@ -165,7 +157,7 @@ pub const Base64DecoderWithIgnore = struct {
decoder: Base64Decoder,
char_is_ignored: [256]bool,
pub fn init(alphabet_chars: []const u8, pad_char: u8, ignore_chars: []const u8) Base64DecoderWithIgnore {
- var result = Base64DecoderWithIgnore {
+ var result = Base64DecoderWithIgnore{
.decoder = Base64Decoder.init(alphabet_chars, pad_char),
.char_is_ignored = []bool{false} ** 256,
};
@@ -189,7 +181,7 @@ pub const Base64DecoderWithIgnore = struct {
/// Invalid padding results in error.InvalidPadding.
/// Decoding more data than can fit in dest results in error.OutputTooSmall. See also ::calcSizeUpperBound.
/// Returns the number of bytes writen to dest.
- pub fn decode(decoder_with_ignore: &const Base64DecoderWithIgnore, dest: []u8, source: []const u8) !usize {
+ pub fn decode(decoder_with_ignore: *const Base64DecoderWithIgnore, dest: []u8, source: []const u8) !usize {
const decoder = &decoder_with_ignore.decoder;
var src_cursor: usize = 0;
@@ -223,10 +215,12 @@ pub const Base64DecoderWithIgnore = struct {
} else if (decoder_with_ignore.char_is_ignored[c]) {
// we can even ignore chars during the padding
continue;
- } else return error.InvalidCharacter;
+ } else
+ return error.InvalidCharacter;
}
break;
- } else return error.InvalidCharacter;
+ } else
+ return error.InvalidCharacter;
}
switch (available_chars) {
@@ -234,22 +228,17 @@ pub const Base64DecoderWithIgnore = struct {
// common case
if (dest_cursor + 3 > dest.len) return error.OutputTooSmall;
assert(pad_char_count == 0);
- dest[dest_cursor + 0] = decoder.char_to_index[next_4_chars[0]] << 2 |
- decoder.char_to_index[next_4_chars[1]] >> 4;
- dest[dest_cursor + 1] = decoder.char_to_index[next_4_chars[1]] << 4 |
- decoder.char_to_index[next_4_chars[2]] >> 2;
- dest[dest_cursor + 2] = decoder.char_to_index[next_4_chars[2]] << 6 |
- decoder.char_to_index[next_4_chars[3]];
+ dest[dest_cursor + 0] = decoder.char_to_index[next_4_chars[0]] << 2 | decoder.char_to_index[next_4_chars[1]] >> 4;
+ dest[dest_cursor + 1] = decoder.char_to_index[next_4_chars[1]] << 4 | decoder.char_to_index[next_4_chars[2]] >> 2;
+ dest[dest_cursor + 2] = decoder.char_to_index[next_4_chars[2]] << 6 | decoder.char_to_index[next_4_chars[3]];
dest_cursor += 3;
continue;
},
3 => {
if (dest_cursor + 2 > dest.len) return error.OutputTooSmall;
if (pad_char_count != 1) return error.InvalidPadding;
- dest[dest_cursor + 0] = decoder.char_to_index[next_4_chars[0]] << 2 |
- decoder.char_to_index[next_4_chars[1]] >> 4;
- dest[dest_cursor + 1] = decoder.char_to_index[next_4_chars[1]] << 4 |
- decoder.char_to_index[next_4_chars[2]] >> 2;
+ dest[dest_cursor + 0] = decoder.char_to_index[next_4_chars[0]] << 2 | decoder.char_to_index[next_4_chars[1]] >> 4;
+ dest[dest_cursor + 1] = decoder.char_to_index[next_4_chars[1]] << 4 | decoder.char_to_index[next_4_chars[2]] >> 2;
if (decoder.char_to_index[next_4_chars[2]] << 6 != 0) return error.InvalidPadding;
dest_cursor += 2;
break;
@@ -257,8 +246,7 @@ pub const Base64DecoderWithIgnore = struct {
2 => {
if (dest_cursor + 1 > dest.len) return error.OutputTooSmall;
if (pad_char_count != 2) return error.InvalidPadding;
- dest[dest_cursor + 0] = decoder.char_to_index[next_4_chars[0]] << 2 |
- decoder.char_to_index[next_4_chars[1]] >> 4;
+ dest[dest_cursor + 0] = decoder.char_to_index[next_4_chars[0]] << 2 | decoder.char_to_index[next_4_chars[1]] >> 4;
if (decoder.char_to_index[next_4_chars[1]] << 4 != 0) return error.InvalidPadding;
dest_cursor += 1;
break;
@@ -280,7 +268,6 @@ pub const Base64DecoderWithIgnore = struct {
}
};
-
pub const standard_decoder_unsafe = Base64DecoderUnsafe.init(standard_alphabet_chars, standard_pad_char);
pub const Base64DecoderUnsafe = struct {
@@ -291,25 +278,25 @@ pub const Base64DecoderUnsafe = struct {
pub fn init(alphabet_chars: []const u8, pad_char: u8) Base64DecoderUnsafe {
assert(alphabet_chars.len == 64);
- var result = Base64DecoderUnsafe {
+ var result = Base64DecoderUnsafe{
.char_to_index = undefined,
.pad_char = pad_char,
};
for (alphabet_chars) |c, i| {
assert(c != pad_char);
- result.char_to_index[c] = u8(i);
+ result.char_to_index[c] = @intCast(u8, i);
}
return result;
}
/// The source buffer must be valid.
- pub fn calcSize(decoder: &const Base64DecoderUnsafe, source: []const u8) usize {
+ pub fn calcSize(decoder: *const Base64DecoderUnsafe, source: []const u8) usize {
return calcDecodedSizeExactUnsafe(source, decoder.pad_char);
}
/// dest.len must be what you get from ::calcDecodedSizeExactUnsafe.
/// invalid characters or padding will result in undefined values.
- pub fn decode(decoder: &const Base64DecoderUnsafe, dest: []u8, source: []const u8) void {
+ pub fn decode(decoder: *const Base64DecoderUnsafe, dest: []u8, source: []const u8) void {
assert(dest.len == decoder.calcSize(source));
var src_index: usize = 0;
@@ -321,16 +308,13 @@ pub const Base64DecoderUnsafe = struct {
}
while (in_buf_len > 4) {
- dest[dest_index] = decoder.char_to_index[source[src_index + 0]] << 2 |
- decoder.char_to_index[source[src_index + 1]] >> 4;
+ dest[dest_index] = decoder.char_to_index[source[src_index + 0]] << 2 | decoder.char_to_index[source[src_index + 1]] >> 4;
dest_index += 1;
- dest[dest_index] = decoder.char_to_index[source[src_index + 1]] << 4 |
- decoder.char_to_index[source[src_index + 2]] >> 2;
+ dest[dest_index] = decoder.char_to_index[source[src_index + 1]] << 4 | decoder.char_to_index[source[src_index + 2]] >> 2;
dest_index += 1;
- dest[dest_index] = decoder.char_to_index[source[src_index + 2]] << 6 |
- decoder.char_to_index[source[src_index + 3]];
+ dest[dest_index] = decoder.char_to_index[source[src_index + 2]] << 6 | decoder.char_to_index[source[src_index + 3]];
dest_index += 1;
src_index += 4;
@@ -338,18 +322,15 @@ pub const Base64DecoderUnsafe = struct {
}
if (in_buf_len > 1) {
- dest[dest_index] = decoder.char_to_index[source[src_index + 0]] << 2 |
- decoder.char_to_index[source[src_index + 1]] >> 4;
+ dest[dest_index] = decoder.char_to_index[source[src_index + 0]] << 2 | decoder.char_to_index[source[src_index + 1]] >> 4;
dest_index += 1;
}
if (in_buf_len > 2) {
- dest[dest_index] = decoder.char_to_index[source[src_index + 1]] << 4 |
- decoder.char_to_index[source[src_index + 2]] >> 2;
+ dest[dest_index] = decoder.char_to_index[source[src_index + 1]] << 4 | decoder.char_to_index[source[src_index + 2]] >> 2;
dest_index += 1;
}
if (in_buf_len > 3) {
- dest[dest_index] = decoder.char_to_index[source[src_index + 2]] << 6 |
- decoder.char_to_index[source[src_index + 3]];
+ dest[dest_index] = decoder.char_to_index[source[src_index + 2]] << 6 | decoder.char_to_index[source[src_index + 3]];
dest_index += 1;
}
}
@@ -367,7 +348,6 @@ fn calcDecodedSizeExactUnsafe(source: []const u8, pad_char: u8) usize {
return result;
}
-
test "base64" {
@setEvalBranchQuota(8000);
testBase64() catch unreachable;
@@ -375,26 +355,26 @@ test "base64" {
}
fn testBase64() !void {
- try testAllApis("", "");
- try testAllApis("f", "Zg==");
- try testAllApis("fo", "Zm8=");
- try testAllApis("foo", "Zm9v");
- try testAllApis("foob", "Zm9vYg==");
- try testAllApis("fooba", "Zm9vYmE=");
+ try testAllApis("", "");
+ try testAllApis("f", "Zg==");
+ try testAllApis("fo", "Zm8=");
+ try testAllApis("foo", "Zm9v");
+ try testAllApis("foob", "Zm9vYg==");
+ try testAllApis("fooba", "Zm9vYmE=");
try testAllApis("foobar", "Zm9vYmFy");
- try testDecodeIgnoreSpace("", " ");
- try testDecodeIgnoreSpace("f", "Z g= =");
- try testDecodeIgnoreSpace("fo", " Zm8=");
- try testDecodeIgnoreSpace("foo", "Zm9v ");
- try testDecodeIgnoreSpace("foob", "Zm9vYg = = ");
- try testDecodeIgnoreSpace("fooba", "Zm9v YmE=");
+ try testDecodeIgnoreSpace("", " ");
+ try testDecodeIgnoreSpace("f", "Z g= =");
+ try testDecodeIgnoreSpace("fo", " Zm8=");
+ try testDecodeIgnoreSpace("foo", "Zm9v ");
+ try testDecodeIgnoreSpace("foob", "Zm9vYg = = ");
+ try testDecodeIgnoreSpace("fooba", "Zm9v YmE=");
try testDecodeIgnoreSpace("foobar", " Z m 9 v Y m F y ");
// test getting some api errors
- try testError("A", error.InvalidPadding);
- try testError("AA", error.InvalidPadding);
- try testError("AAA", error.InvalidPadding);
+ try testError("A", error.InvalidPadding);
+ try testError("AA", error.InvalidPadding);
+ try testError("AAA", error.InvalidPadding);
try testError("A..A", error.InvalidCharacter);
try testError("AA=A", error.InvalidCharacter);
try testError("AA/=", error.InvalidPadding);
@@ -427,8 +407,7 @@ fn testAllApis(expected_decoded: []const u8, expected_encoded: []const u8) !void
// Base64DecoderWithIgnore
{
- const standard_decoder_ignore_nothing = Base64DecoderWithIgnore.init(
- standard_alphabet_chars, standard_pad_char, "");
+ const standard_decoder_ignore_nothing = Base64DecoderWithIgnore.init(standard_alphabet_chars, standard_pad_char, "");
var buffer: [0x100]u8 = undefined;
var decoded = buffer[0..Base64DecoderWithIgnore.calcSizeUpperBound(expected_encoded.len)];
var written = try standard_decoder_ignore_nothing.decode(decoded, expected_encoded);
@@ -446,8 +425,7 @@ fn testAllApis(expected_decoded: []const u8, expected_encoded: []const u8) !void
}
fn testDecodeIgnoreSpace(expected_decoded: []const u8, encoded: []const u8) !void {
- const standard_decoder_ignore_space = Base64DecoderWithIgnore.init(
- standard_alphabet_chars, standard_pad_char, " ");
+ const standard_decoder_ignore_space = Base64DecoderWithIgnore.init(standard_alphabet_chars, standard_pad_char, " ");
var buffer: [0x100]u8 = undefined;
var decoded = buffer[0..Base64DecoderWithIgnore.calcSizeUpperBound(encoded.len)];
var written = try standard_decoder_ignore_space.decode(decoded, encoded);
@@ -455,8 +433,7 @@ fn testDecodeIgnoreSpace(expected_decoded: []const u8, encoded: []const u8) !voi
}
fn testError(encoded: []const u8, expected_err: error) !void {
- const standard_decoder_ignore_space = Base64DecoderWithIgnore.init(
- standard_alphabet_chars, standard_pad_char, " ");
+ const standard_decoder_ignore_space = Base64DecoderWithIgnore.init(standard_alphabet_chars, standard_pad_char, " ");
var buffer: [0x100]u8 = undefined;
if (standard_decoder.calcSize(encoded)) |decoded_size| {
var decoded = buffer[0..decoded_size];
@@ -471,10 +448,9 @@ fn testError(encoded: []const u8, expected_err: error) !void {
}
fn testOutputTooSmallError(encoded: []const u8) !void {
- const standard_decoder_ignore_space = Base64DecoderWithIgnore.init(
- standard_alphabet_chars, standard_pad_char, " ");
+ const standard_decoder_ignore_space = Base64DecoderWithIgnore.init(standard_alphabet_chars, standard_pad_char, " ");
var buffer: [0x100]u8 = undefined;
- var decoded = buffer[0..calcDecodedSizeExactUnsafe(encoded, standard_pad_char) - 1];
+ var decoded = buffer[0 .. calcDecodedSizeExactUnsafe(encoded, standard_pad_char) - 1];
if (standard_decoder_ignore_space.decode(decoded, encoded)) |_| {
return error.ExpectedError;
} else |err| if (err != error.OutputTooSmall) return err;
diff --git a/std/buf_map.zig b/std/buf_map.zig
index 3e12d9a7d9..a82d1b731a 100644
--- a/std/buf_map.zig
+++ b/std/buf_map.zig
@@ -11,17 +11,15 @@ pub const BufMap = struct {
const BufMapHashMap = HashMap([]const u8, []const u8, mem.hash_slice_u8, mem.eql_slice_u8);
- pub fn init(allocator: &Allocator) BufMap {
- var self = BufMap {
- .hash_map = BufMapHashMap.init(allocator),
- };
+ pub fn init(allocator: *Allocator) BufMap {
+ var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) };
return self;
}
- pub fn deinit(self: &BufMap) void {
+ pub fn deinit(self: *const BufMap) void {
var it = self.hash_map.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
self.free(entry.key);
self.free(entry.value);
}
@@ -29,7 +27,7 @@ pub const BufMap = struct {
self.hash_map.deinit();
}
- pub fn set(self: &BufMap, key: []const u8, value: []const u8) !void {
+ pub fn set(self: *BufMap, key: []const u8, value: []const u8) !void {
self.delete(key);
const key_copy = try self.copy(key);
errdefer self.free(key_copy);
@@ -38,30 +36,30 @@ pub const BufMap = struct {
_ = try self.hash_map.put(key_copy, value_copy);
}
- pub fn get(self: &BufMap, key: []const u8) ?[]const u8 {
- const entry = self.hash_map.get(key) ?? return null;
+ pub fn get(self: *const BufMap, key: []const u8) ?[]const u8 {
+ const entry = self.hash_map.get(key) orelse return null;
return entry.value;
}
- pub fn delete(self: &BufMap, key: []const u8) void {
- const entry = self.hash_map.remove(key) ?? return;
+ pub fn delete(self: *BufMap, key: []const u8) void {
+ const entry = self.hash_map.remove(key) orelse return;
self.free(entry.key);
self.free(entry.value);
}
- pub fn count(self: &const BufMap) usize {
- return self.hash_map.size;
+ pub fn count(self: *const BufMap) usize {
+ return self.hash_map.count();
}
- pub fn iterator(self: &const BufMap) BufMapHashMap.Iterator {
+ pub fn iterator(self: *const BufMap) BufMapHashMap.Iterator {
return self.hash_map.iterator();
}
- fn free(self: &BufMap, value: []const u8) void {
+ fn free(self: *const BufMap, value: []const u8) void {
self.hash_map.allocator.free(value);
}
- fn copy(self: &BufMap, value: []const u8) ![]const u8 {
+ fn copy(self: *const BufMap, value: []const u8) ![]const u8 {
return mem.dupe(self.hash_map.allocator, u8, value);
}
};
@@ -74,15 +72,15 @@ test "BufMap" {
defer bufmap.deinit();
try bufmap.set("x", "1");
- assert(mem.eql(u8, ??bufmap.get("x"), "1"));
+ assert(mem.eql(u8, bufmap.get("x").?, "1"));
assert(1 == bufmap.count());
try bufmap.set("x", "2");
- assert(mem.eql(u8, ??bufmap.get("x"), "2"));
+ assert(mem.eql(u8, bufmap.get("x").?, "2"));
assert(1 == bufmap.count());
try bufmap.set("x", "3");
- assert(mem.eql(u8, ??bufmap.get("x"), "3"));
+ assert(mem.eql(u8, bufmap.get("x").?, "3"));
assert(1 == bufmap.count());
bufmap.delete("x");
diff --git a/std/buf_set.zig b/std/buf_set.zig
index 618b985c41..ab2d8e7c34 100644
--- a/std/buf_set.zig
+++ b/std/buf_set.zig
@@ -1,30 +1,30 @@
+const std = @import("index.zig");
const HashMap = @import("hash_map.zig").HashMap;
const mem = @import("mem.zig");
const Allocator = mem.Allocator;
+const assert = std.debug.assert;
pub const BufSet = struct {
hash_map: BufSetHashMap,
const BufSetHashMap = HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8);
- pub fn init(a: &Allocator) BufSet {
- var self = BufSet {
- .hash_map = BufSetHashMap.init(a),
- };
+ pub fn init(a: *Allocator) BufSet {
+ var self = BufSet{ .hash_map = BufSetHashMap.init(a) };
return self;
}
- pub fn deinit(self: &BufSet) void {
+ pub fn deinit(self: *const BufSet) void {
var it = self.hash_map.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
self.free(entry.key);
}
self.hash_map.deinit();
}
- pub fn put(self: &BufSet, key: []const u8) !void {
+ pub fn put(self: *BufSet, key: []const u8) !void {
if (self.hash_map.get(key) == null) {
const key_copy = try self.copy(key);
errdefer self.free(key_copy);
@@ -32,31 +32,47 @@ pub const BufSet = struct {
}
}
- pub fn delete(self: &BufSet, key: []const u8) void {
- const entry = self.hash_map.remove(key) ?? return;
+ pub fn delete(self: *BufSet, key: []const u8) void {
+ const entry = self.hash_map.remove(key) orelse return;
self.free(entry.key);
}
- pub fn count(self: &const BufSet) usize {
- return self.hash_map.size;
+ pub fn count(self: *const BufSet) usize {
+ return self.hash_map.count();
}
- pub fn iterator(self: &const BufSet) BufSetHashMap.Iterator {
+ pub fn iterator(self: *const BufSet) BufSetHashMap.Iterator {
return self.hash_map.iterator();
}
- pub fn allocator(self: &const BufSet) &Allocator {
+ pub fn allocator(self: *const BufSet) *Allocator {
return self.hash_map.allocator;
}
- fn free(self: &BufSet, value: []const u8) void {
+ fn free(self: *const BufSet, value: []const u8) void {
self.hash_map.allocator.free(value);
}
- fn copy(self: &BufSet, value: []const u8) ![]const u8 {
+ fn copy(self: *const BufSet, value: []const u8) ![]const u8 {
const result = try self.hash_map.allocator.alloc(u8, value.len);
mem.copy(u8, result, value);
return result;
}
};
+test "BufSet" {
+ var direct_allocator = std.heap.DirectAllocator.init();
+ defer direct_allocator.deinit();
+
+ var bufset = BufSet.init(&direct_allocator.allocator);
+ defer bufset.deinit();
+
+ try bufset.put("x");
+ assert(bufset.count() == 1);
+ bufset.delete("x");
+ assert(bufset.count() == 0);
+
+ try bufset.put("x");
+ try bufset.put("y");
+ try bufset.put("z");
+}
diff --git a/std/buffer.zig b/std/buffer.zig
index e0892d5933..3b58002aba 100644
--- a/std/buffer.zig
+++ b/std/buffer.zig
@@ -5,21 +5,19 @@ const Allocator = mem.Allocator;
const assert = debug.assert;
const ArrayList = std.ArrayList;
-const fmt = std.fmt;
-
/// A buffer that allocates memory and maintains a null byte at the end.
pub const Buffer = struct {
list: ArrayList(u8),
/// Must deinitialize with deinit.
- pub fn init(allocator: &Allocator, m: []const u8) !Buffer {
+ pub fn init(allocator: *Allocator, m: []const u8) !Buffer {
var self = try initSize(allocator, m.len);
mem.copy(u8, self.list.items, m);
return self;
}
/// Must deinitialize with deinit.
- pub fn initSize(allocator: &Allocator, size: usize) !Buffer {
+ pub fn initSize(allocator: *Allocator, size: usize) !Buffer {
var self = initNull(allocator);
try self.resize(size);
return self;
@@ -28,122 +26,113 @@ pub const Buffer = struct {
/// Must deinitialize with deinit.
/// None of the other operations are valid until you do one of these:
/// * ::replaceContents
- /// * ::replaceContentsBuffer
/// * ::resize
- pub fn initNull(allocator: &Allocator) Buffer {
- return Buffer {
- .list = ArrayList(u8).init(allocator),
- };
+ pub fn initNull(allocator: *Allocator) Buffer {
+ return Buffer{ .list = ArrayList(u8).init(allocator) };
}
/// Must deinitialize with deinit.
- pub fn initFromBuffer(buffer: &const Buffer) !Buffer {
+ pub fn initFromBuffer(buffer: *const Buffer) !Buffer {
return Buffer.init(buffer.list.allocator, buffer.toSliceConst());
}
/// Buffer takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Must deinitialize with deinit.
- pub fn fromOwnedSlice(allocator: &Allocator, slice: []u8) Buffer {
- var self = Buffer {
- .list = ArrayList(u8).fromOwnedSlice(allocator, slice),
- };
- self.list.append(0);
+ pub fn fromOwnedSlice(allocator: *Allocator, slice: []u8) !Buffer {
+ var self = Buffer{ .list = ArrayList(u8).fromOwnedSlice(allocator, slice) };
+ try self.list.append(0);
return self;
}
/// The caller owns the returned memory. The Buffer becomes null and
/// is safe to `deinit`.
- pub fn toOwnedSlice(self: &Buffer) []u8 {
+ pub fn toOwnedSlice(self: *Buffer) []u8 {
const allocator = self.list.allocator;
const result = allocator.shrink(u8, self.list.items, self.len());
- *self = initNull(allocator);
+ self.* = initNull(allocator);
return result;
}
+ pub fn allocPrint(allocator: *Allocator, comptime format: []const u8, args: ...) !Buffer {
+ const countSize = struct {
+ fn countSize(size: *usize, bytes: []const u8) (error{}!void) {
+ size.* += bytes.len;
+ }
+ }.countSize;
+ var size: usize = 0;
+ std.fmt.format(&size, error{}, countSize, format, args) catch |err| switch (err) {};
+ var self = try Buffer.initSize(allocator, size);
+ assert((std.fmt.bufPrint(self.list.items, format, args) catch unreachable).len == size);
+ return self;
+ }
- pub fn deinit(self: &Buffer) void {
+ pub fn deinit(self: *Buffer) void {
self.list.deinit();
}
- pub fn toSlice(self: &Buffer) []u8 {
+ pub fn toSlice(self: *const Buffer) []u8 {
return self.list.toSlice()[0..self.len()];
}
- pub fn toSliceConst(self: &const Buffer) []const u8 {
+ pub fn toSliceConst(self: *const Buffer) []const u8 {
return self.list.toSliceConst()[0..self.len()];
}
- pub fn shrink(self: &Buffer, new_len: usize) void {
+ pub fn shrink(self: *Buffer, new_len: usize) void {
assert(new_len <= self.len());
self.list.shrink(new_len + 1);
self.list.items[self.len()] = 0;
}
- pub fn resize(self: &Buffer, new_len: usize) !void {
+ pub fn resize(self: *Buffer, new_len: usize) !void {
try self.list.resize(new_len + 1);
self.list.items[self.len()] = 0;
}
- pub fn isNull(self: &const Buffer) bool {
+ pub fn isNull(self: *const Buffer) bool {
return self.list.len == 0;
}
- pub fn len(self: &const Buffer) usize {
+ pub fn len(self: *const Buffer) usize {
return self.list.len - 1;
}
- pub fn append(self: &Buffer, m: []const u8) !void {
+ pub fn append(self: *Buffer, m: []const u8) !void {
const old_len = self.len();
try self.resize(old_len + m.len);
mem.copy(u8, self.list.toSlice()[old_len..], m);
}
- // TODO: remove, use OutStream for this
- pub fn appendFormat(self: &Buffer, comptime format: []const u8, args: ...) !void {
- return fmt.format(self, append, format, args);
+ pub fn appendByte(self: *Buffer, byte: u8) !void {
+ const old_len = self.len();
+ try self.resize(old_len + 1);
+ self.list.toSlice()[old_len] = byte;
}
- // TODO: remove, use OutStream for this
- pub fn appendByte(self: &Buffer, byte: u8) !void {
- return self.appendByteNTimes(byte, 1);
- }
-
- // TODO: remove, use OutStream for this
- pub fn appendByteNTimes(self: &Buffer, byte: u8, count: usize) !void {
- var prev_size: usize = self.len();
- const new_size = prev_size + count;
- try self.resize(new_size);
-
- var i: usize = prev_size;
- while (i < new_size) : (i += 1) {
- self.list.items[i] = byte;
- }
- }
-
- pub fn eql(self: &const Buffer, m: []const u8) bool {
+ pub fn eql(self: *const Buffer, m: []const u8) bool {
return mem.eql(u8, self.toSliceConst(), m);
}
- pub fn startsWith(self: &const Buffer, m: []const u8) bool {
+ pub fn startsWith(self: *const Buffer, m: []const u8) bool {
if (self.len() < m.len) return false;
return mem.eql(u8, self.list.items[0..m.len], m);
}
- pub fn endsWith(self: &const Buffer, m: []const u8) bool {
+ pub fn endsWith(self: *const Buffer, m: []const u8) bool {
const l = self.len();
if (l < m.len) return false;
const start = l - m.len;
return mem.eql(u8, self.list.items[start..l], m);
}
- pub fn replaceContents(self: &const Buffer, m: []const u8) !void {
+ pub fn replaceContents(self: *Buffer, m: []const u8) !void {
try self.resize(m.len);
mem.copy(u8, self.list.toSlice(), m);
}
/// For passing to C functions.
- pub fn ptr(self: &const Buffer) &u8 {
+ pub fn ptr(self: *const Buffer) [*]u8 {
return self.list.items.ptr;
}
};
@@ -154,7 +143,7 @@ test "simple Buffer" {
var buf = try Buffer.init(debug.global_allocator, "");
assert(buf.len() == 0);
try buf.append("hello");
- try buf.appendByte(' ');
+ try buf.append(" ");
try buf.append("world");
assert(buf.eql("hello world"));
assert(mem.eql(u8, cstr.toSliceConst(buf.toSliceConst().ptr), buf.toSliceConst()));
@@ -166,5 +155,5 @@ test "simple Buffer" {
assert(buf.endsWith("orld"));
try buf2.resize(4);
- assert(buf.startsWith(buf2.toSliceConst()));
+ assert(buf.startsWith(buf2.toSlice()));
}
diff --git a/std/build.zig b/std/build.zig
index a4d745e450..68cf13c1eb 100644
--- a/std/build.zig
+++ b/std/build.zig
@@ -20,7 +20,7 @@ pub const Builder = struct {
install_tls: TopLevelStep,
have_uninstall_step: bool,
have_install_step: bool,
- allocator: &Allocator,
+ allocator: *Allocator,
lib_paths: ArrayList([]const u8),
include_paths: ArrayList([]const u8),
rpaths: ArrayList([]const u8),
@@ -36,9 +36,9 @@ pub const Builder = struct {
verbose_cimport: bool,
invalid_user_input: bool,
zig_exe: []const u8,
- default_step: &Step,
+ default_step: *Step,
env_map: BufMap,
- top_level_steps: ArrayList(&TopLevelStep),
+ top_level_steps: ArrayList(*TopLevelStep),
prefix: []const u8,
search_prefixes: ArrayList([]const u8),
lib_dir: []const u8,
@@ -82,10 +82,8 @@ pub const Builder = struct {
description: []const u8,
};
- pub fn init(allocator: &Allocator, zig_exe: []const u8, build_root: []const u8,
- cache_root: []const u8) Builder
- {
- var self = Builder {
+ pub fn init(allocator: *Allocator, zig_exe: []const u8, build_root: []const u8, cache_root: []const u8) Builder {
+ var self = Builder{
.zig_exe = zig_exe,
.build_root = build_root,
.cache_root = os.path.relative(allocator, build_root, cache_root) catch unreachable,
@@ -104,7 +102,7 @@ pub const Builder = struct {
.user_input_options = UserInputOptionsMap.init(allocator),
.available_options_map = AvailableOptionsMap.init(allocator),
.available_options_list = ArrayList(AvailableOption).init(allocator),
- .top_level_steps = ArrayList(&TopLevelStep).init(allocator),
+ .top_level_steps = ArrayList(*TopLevelStep).init(allocator),
.default_step = undefined,
.env_map = os.getEnvMap(allocator) catch unreachable,
.prefix = undefined,
@@ -112,12 +110,12 @@ pub const Builder = struct {
.lib_dir = undefined,
.exe_dir = undefined,
.installed_files = ArrayList([]const u8).init(allocator),
- .uninstall_tls = TopLevelStep {
+ .uninstall_tls = TopLevelStep{
.step = Step.init("uninstall", allocator, makeUninstall),
.description = "Remove build artifacts from prefix path",
},
.have_uninstall_step = false,
- .install_tls = TopLevelStep {
+ .install_tls = TopLevelStep{
.step = Step.initNoOp("install", allocator),
.description = "Copy build artifacts to prefix path",
},
@@ -129,7 +127,7 @@ pub const Builder = struct {
return self;
}
- pub fn deinit(self: &Builder) void {
+ pub fn deinit(self: *Builder) void {
self.lib_paths.deinit();
self.include_paths.deinit();
self.rpaths.deinit();
@@ -137,110 +135,102 @@ pub const Builder = struct {
self.top_level_steps.deinit();
}
- pub fn setInstallPrefix(self: &Builder, maybe_prefix: ?[]const u8) void {
- self.prefix = maybe_prefix ?? "/usr/local"; // TODO better default
+ pub fn setInstallPrefix(self: *Builder, maybe_prefix: ?[]const u8) void {
+ self.prefix = maybe_prefix orelse "/usr/local"; // TODO better default
self.lib_dir = os.path.join(self.allocator, self.prefix, "lib") catch unreachable;
self.exe_dir = os.path.join(self.allocator, self.prefix, "bin") catch unreachable;
}
- pub fn addExecutable(self: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
+ pub fn addExecutable(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
return LibExeObjStep.createExecutable(self, name, root_src);
}
- pub fn addObject(self: &Builder, name: []const u8, root_src: []const u8) &LibExeObjStep {
+ pub fn addObject(self: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep {
return LibExeObjStep.createObject(self, name, root_src);
}
- pub fn addSharedLibrary(self: &Builder, name: []const u8, root_src: ?[]const u8,
- ver: &const Version) &LibExeObjStep
- {
+ pub fn addSharedLibrary(self: *Builder, name: []const u8, root_src: ?[]const u8, ver: *const Version) *LibExeObjStep {
return LibExeObjStep.createSharedLibrary(self, name, root_src, ver);
}
- pub fn addStaticLibrary(self: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
+ pub fn addStaticLibrary(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
return LibExeObjStep.createStaticLibrary(self, name, root_src);
}
- pub fn addTest(self: &Builder, root_src: []const u8) &TestStep {
- const test_step = self.allocator.create(TestStep) catch unreachable;
- *test_step = TestStep.init(self, root_src);
+ pub fn addTest(self: *Builder, root_src: []const u8) *TestStep {
+ const test_step = self.allocator.create(TestStep.init(self, root_src)) catch unreachable;
return test_step;
}
- pub fn addAssemble(self: &Builder, name: []const u8, src: []const u8) &LibExeObjStep {
+ pub fn addAssemble(self: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
const obj_step = LibExeObjStep.createObject(self, name, null);
obj_step.addAssemblyFile(src);
return obj_step;
}
- pub fn addCStaticLibrary(self: &Builder, name: []const u8) &LibExeObjStep {
+ pub fn addCStaticLibrary(self: *Builder, name: []const u8) *LibExeObjStep {
return LibExeObjStep.createCStaticLibrary(self, name);
}
- pub fn addCSharedLibrary(self: &Builder, name: []const u8, ver: &const Version) &LibExeObjStep {
+ pub fn addCSharedLibrary(self: *Builder, name: []const u8, ver: *const Version) *LibExeObjStep {
return LibExeObjStep.createCSharedLibrary(self, name, ver);
}
- pub fn addCExecutable(self: &Builder, name: []const u8) &LibExeObjStep {
+ pub fn addCExecutable(self: *Builder, name: []const u8) *LibExeObjStep {
return LibExeObjStep.createCExecutable(self, name);
}
- pub fn addCObject(self: &Builder, name: []const u8, src: []const u8) &LibExeObjStep {
+ pub fn addCObject(self: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
return LibExeObjStep.createCObject(self, name, src);
}
/// ::argv is copied.
- pub fn addCommand(self: &Builder, cwd: ?[]const u8, env_map: &const BufMap,
- argv: []const []const u8) &CommandStep
- {
+ pub fn addCommand(self: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) *CommandStep {
return CommandStep.create(self, cwd, env_map, argv);
}
- pub fn addWriteFile(self: &Builder, file_path: []const u8, data: []const u8) &WriteFileStep {
- const write_file_step = self.allocator.create(WriteFileStep) catch unreachable;
- *write_file_step = WriteFileStep.init(self, file_path, data);
+ pub fn addWriteFile(self: *Builder, file_path: []const u8, data: []const u8) *WriteFileStep {
+ const write_file_step = self.allocator.create(WriteFileStep.init(self, file_path, data)) catch unreachable;
return write_file_step;
}
- pub fn addLog(self: &Builder, comptime format: []const u8, args: ...) &LogStep {
+ pub fn addLog(self: *Builder, comptime format: []const u8, args: ...) *LogStep {
const data = self.fmt(format, args);
- const log_step = self.allocator.create(LogStep) catch unreachable;
- *log_step = LogStep.init(self, data);
+ const log_step = self.allocator.create(LogStep.init(self, data)) catch unreachable;
return log_step;
}
- pub fn addRemoveDirTree(self: &Builder, dir_path: []const u8) &RemoveDirStep {
- const remove_dir_step = self.allocator.create(RemoveDirStep) catch unreachable;
- *remove_dir_step = RemoveDirStep.init(self, dir_path);
+ pub fn addRemoveDirTree(self: *Builder, dir_path: []const u8) *RemoveDirStep {
+ const remove_dir_step = self.allocator.create(RemoveDirStep.init(self, dir_path)) catch unreachable;
return remove_dir_step;
}
- pub fn version(self: &const Builder, major: u32, minor: u32, patch: u32) Version {
- return Version {
+ pub fn version(self: *const Builder, major: u32, minor: u32, patch: u32) Version {
+ return Version{
.major = major,
.minor = minor,
.patch = patch,
};
}
- pub fn addCIncludePath(self: &Builder, path: []const u8) void {
+ pub fn addCIncludePath(self: *Builder, path: []const u8) void {
self.include_paths.append(path) catch unreachable;
}
- pub fn addRPath(self: &Builder, path: []const u8) void {
+ pub fn addRPath(self: *Builder, path: []const u8) void {
self.rpaths.append(path) catch unreachable;
}
- pub fn addLibPath(self: &Builder, path: []const u8) void {
+ pub fn addLibPath(self: *Builder, path: []const u8) void {
self.lib_paths.append(path) catch unreachable;
}
- pub fn make(self: &Builder, step_names: []const []const u8) !void {
- var wanted_steps = ArrayList(&Step).init(self.allocator);
+ pub fn make(self: *Builder, step_names: []const []const u8) !void {
+ var wanted_steps = ArrayList(*Step).init(self.allocator);
defer wanted_steps.deinit();
if (step_names.len == 0) {
- try wanted_steps.append(&self.default_step);
+ try wanted_steps.append(self.default_step);
} else {
for (step_names) |step_name| {
const s = try self.getTopLevelStepByName(step_name);
@@ -253,25 +243,23 @@ pub const Builder = struct {
}
}
- pub fn getInstallStep(self: &Builder) &Step {
- if (self.have_install_step)
- return &self.install_tls.step;
+ pub fn getInstallStep(self: *Builder) *Step {
+ if (self.have_install_step) return &self.install_tls.step;
self.top_level_steps.append(&self.install_tls) catch unreachable;
self.have_install_step = true;
return &self.install_tls.step;
}
- pub fn getUninstallStep(self: &Builder) &Step {
- if (self.have_uninstall_step)
- return &self.uninstall_tls.step;
+ pub fn getUninstallStep(self: *Builder) *Step {
+ if (self.have_uninstall_step) return &self.uninstall_tls.step;
self.top_level_steps.append(&self.uninstall_tls) catch unreachable;
self.have_uninstall_step = true;
return &self.uninstall_tls.step;
}
- fn makeUninstall(uninstall_step: &Step) error!void {
+ fn makeUninstall(uninstall_step: *Step) error!void {
const uninstall_tls = @fieldParentPtr(TopLevelStep, "step", uninstall_step);
const self = @fieldParentPtr(Builder, "uninstall_tls", uninstall_tls);
@@ -285,7 +273,7 @@ pub const Builder = struct {
// TODO remove empty directories
}
- fn makeOneStep(self: &Builder, s: &Step) error!void {
+ fn makeOneStep(self: *Builder, s: *Step) error!void {
if (s.loop_flag) {
warn("Dependency loop detected:\n {}\n", s.name);
return error.DependencyLoopDetected;
@@ -306,7 +294,7 @@ pub const Builder = struct {
try s.make();
}
- fn getTopLevelStepByName(self: &Builder, name: []const u8) !&Step {
+ fn getTopLevelStepByName(self: *Builder, name: []const u8) !*Step {
for (self.top_level_steps.toSliceConst()) |top_level_step| {
if (mem.eql(u8, top_level_step.step.name, name)) {
return &top_level_step.step;
@@ -316,13 +304,13 @@ pub const Builder = struct {
return error.InvalidStepName;
}
- fn processNixOSEnvVars(self: &Builder) void {
+ fn processNixOSEnvVars(self: *Builder) void {
if (os.getEnvVarOwned(self.allocator, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| {
var it = mem.split(nix_cflags_compile, " ");
while (true) {
- const word = it.next() ?? break;
+ const word = it.next() orelse break;
if (mem.eql(u8, word, "-isystem")) {
- const include_path = it.next() ?? {
+ const include_path = it.next() orelse {
warn("Expected argument after -isystem in NIX_CFLAGS_COMPILE\n");
break;
};
@@ -338,9 +326,9 @@ pub const Builder = struct {
if (os.getEnvVarOwned(self.allocator, "NIX_LDFLAGS")) |nix_ldflags| {
var it = mem.split(nix_ldflags, " ");
while (true) {
- const word = it.next() ?? break;
+ const word = it.next() orelse break;
if (mem.eql(u8, word, "-rpath")) {
- const rpath = it.next() ?? {
+ const rpath = it.next() orelse {
warn("Expected argument after -rpath in NIX_LDFLAGS\n");
break;
};
@@ -358,9 +346,9 @@ pub const Builder = struct {
}
}
- pub fn option(self: &Builder, comptime T: type, name: []const u8, description: []const u8) ?T {
+ pub fn option(self: *Builder, comptime T: type, name: []const u8, description: []const u8) ?T {
const type_id = comptime typeToEnum(T);
- const available_option = AvailableOption {
+ const available_option = AvailableOption{
.name = name,
.type_id = type_id,
.description = description,
@@ -370,7 +358,7 @@ pub const Builder = struct {
}
self.available_options_list.append(available_option) catch unreachable;
- const entry = self.user_input_options.get(name) ?? return null;
+ const entry = self.user_input_options.get(name) orelse return null;
entry.value.used = true;
switch (type_id) {
TypeId.Bool => switch (entry.value.value) {
@@ -411,30 +399,24 @@ pub const Builder = struct {
}
}
- pub fn step(self: &Builder, name: []const u8, description: []const u8) &Step {
- const step_info = self.allocator.create(TopLevelStep) catch unreachable;
- *step_info = TopLevelStep {
+ pub fn step(self: *Builder, name: []const u8, description: []const u8) *Step {
+ const step_info = self.allocator.create(TopLevelStep{
.step = Step.initNoOp(name, self.allocator),
.description = description,
- };
+ }) catch unreachable;
self.top_level_steps.append(step_info) catch unreachable;
return &step_info.step;
}
- pub fn standardReleaseOptions(self: &Builder) builtin.Mode {
+ pub fn standardReleaseOptions(self: *Builder) builtin.Mode {
if (self.release_mode) |mode| return mode;
- const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") ?? false;
- const release_fast = self.option(bool, "release-fast", "optimizations on and safety off") ?? false;
+ const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") orelse false;
+ const release_fast = self.option(bool, "release-fast", "optimizations on and safety off") orelse false;
+ const release_small = self.option(bool, "release-small", "size optimizations on and safety off") orelse false;
- const mode = if (release_safe and !release_fast)
- builtin.Mode.ReleaseSafe
- else if (release_fast and !release_safe)
- builtin.Mode.ReleaseFast
- else if (!release_fast and !release_safe)
- builtin.Mode.Debug
- else x: {
- warn("Both -Drelease-safe and -Drelease-fast specified");
+ const mode = if (release_safe and !release_fast and !release_small) builtin.Mode.ReleaseSafe else if (release_fast and !release_safe and !release_small) builtin.Mode.ReleaseFast else if (release_small and !release_fast and !release_safe) builtin.Mode.ReleaseSmall else if (!release_fast and !release_safe and !release_small) builtin.Mode.Debug else x: {
+ warn("Multiple release modes (of -Drelease-safe, -Drelease-fast and -Drelease-small)");
self.markInvalidUserInput();
break :x builtin.Mode.Debug;
};
@@ -442,10 +424,10 @@ pub const Builder = struct {
return mode;
}
- pub fn addUserInputOption(self: &Builder, name: []const u8, value: []const u8) bool {
- if (self.user_input_options.put(name, UserInputOption {
+ pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) bool {
+ if (self.user_input_options.put(name, UserInputOption{
.name = name,
- .value = UserValue { .Scalar = value },
+ .value = UserValue{ .Scalar = value },
.used = false,
}) catch unreachable) |*prev_value| {
// option already exists
@@ -455,18 +437,18 @@ pub const Builder = struct {
var list = ArrayList([]const u8).init(self.allocator);
list.append(s) catch unreachable;
list.append(value) catch unreachable;
- _ = self.user_input_options.put(name, UserInputOption {
+ _ = self.user_input_options.put(name, UserInputOption{
.name = name,
- .value = UserValue { .List = list },
+ .value = UserValue{ .List = list },
.used = false,
}) catch unreachable;
},
UserValue.List => |*list| {
// append to the list
list.append(value) catch unreachable;
- _ = self.user_input_options.put(name, UserInputOption {
+ _ = self.user_input_options.put(name, UserInputOption{
.name = name,
- .value = UserValue { .List = *list },
+ .value = UserValue{ .List = list.* },
.used = false,
}) catch unreachable;
},
@@ -479,10 +461,10 @@ pub const Builder = struct {
return false;
}
- pub fn addUserInputFlag(self: &Builder, name: []const u8) bool {
- if (self.user_input_options.put(name, UserInputOption {
+ pub fn addUserInputFlag(self: *Builder, name: []const u8) bool {
+ if (self.user_input_options.put(name, UserInputOption{
.name = name,
- .value = UserValue {.Flag = {} },
+ .value = UserValue{ .Flag = {} },
.used = false,
}) catch unreachable) |*prev_value| {
switch (prev_value.value) {
@@ -513,7 +495,7 @@ pub const Builder = struct {
};
}
- fn markInvalidUserInput(self: &Builder) void {
+ fn markInvalidUserInput(self: *Builder) void {
self.invalid_user_input = true;
}
@@ -527,11 +509,11 @@ pub const Builder = struct {
};
}
- pub fn validateUserInputDidItFail(self: &Builder) bool {
+ pub fn validateUserInputDidItFail(self: *Builder) bool {
// make sure all args are used
var it = self.user_input_options.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
if (!entry.value.used) {
warn("Invalid option: -D{}\n\n", entry.key);
self.markInvalidUserInput();
@@ -541,7 +523,7 @@ pub const Builder = struct {
return self.invalid_user_input;
}
- fn spawnChild(self: &Builder, argv: []const []const u8) !void {
+ fn spawnChild(self: *Builder, argv: []const []const u8) !void {
return self.spawnChildEnvMap(null, &self.env_map, argv);
}
@@ -553,9 +535,7 @@ pub const Builder = struct {
warn("\n");
}
- fn spawnChildEnvMap(self: &Builder, cwd: ?[]const u8, env_map: &const BufMap,
- argv: []const []const u8) !void
- {
+ fn spawnChildEnvMap(self: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) !void {
if (self.verbose) {
printCmd(cwd, argv);
}
@@ -588,51 +568,50 @@ pub const Builder = struct {
}
}
- pub fn makePath(self: &Builder, path: []const u8) !void {
+ pub fn makePath(self: *Builder, path: []const u8) !void {
os.makePath(self.allocator, self.pathFromRoot(path)) catch |err| {
warn("Unable to create path {}: {}\n", path, @errorName(err));
return err;
};
}
- pub fn installArtifact(self: &Builder, artifact: &LibExeObjStep) void {
+ pub fn installArtifact(self: *Builder, artifact: *LibExeObjStep) void {
self.getInstallStep().dependOn(&self.addInstallArtifact(artifact).step);
}
- pub fn addInstallArtifact(self: &Builder, artifact: &LibExeObjStep) &InstallArtifactStep {
+ pub fn addInstallArtifact(self: *Builder, artifact: *LibExeObjStep) *InstallArtifactStep {
return InstallArtifactStep.create(self, artifact);
}
///::dest_rel_path is relative to prefix path or it can be an absolute path
- pub fn installFile(self: &Builder, src_path: []const u8, dest_rel_path: []const u8) void {
+ pub fn installFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) void {
self.getInstallStep().dependOn(&self.addInstallFile(src_path, dest_rel_path).step);
}
///::dest_rel_path is relative to prefix path or it can be an absolute path
- pub fn addInstallFile(self: &Builder, src_path: []const u8, dest_rel_path: []const u8) &InstallFileStep {
+ pub fn addInstallFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) *InstallFileStep {
const full_dest_path = os.path.resolve(self.allocator, self.prefix, dest_rel_path) catch unreachable;
self.pushInstalledFile(full_dest_path);
- const install_step = self.allocator.create(InstallFileStep) catch unreachable;
- *install_step = InstallFileStep.init(self, src_path, full_dest_path);
+ const install_step = self.allocator.create(InstallFileStep.init(self, src_path, full_dest_path)) catch unreachable;
return install_step;
}
- pub fn pushInstalledFile(self: &Builder, full_path: []const u8) void {
+ pub fn pushInstalledFile(self: *Builder, full_path: []const u8) void {
_ = self.getUninstallStep();
self.installed_files.append(full_path) catch unreachable;
}
- fn copyFile(self: &Builder, source_path: []const u8, dest_path: []const u8) !void {
+ fn copyFile(self: *Builder, source_path: []const u8, dest_path: []const u8) !void {
return self.copyFileMode(source_path, dest_path, os.default_file_mode);
}
- fn copyFileMode(self: &Builder, source_path: []const u8, dest_path: []const u8, mode: os.FileMode) !void {
+ fn copyFileMode(self: *Builder, source_path: []const u8, dest_path: []const u8, mode: os.FileMode) !void {
if (self.verbose) {
warn("cp {} {}\n", source_path, dest_path);
}
- const dirname = os.path.dirname(dest_path);
+ const dirname = os.path.dirname(dest_path) orelse ".";
const abs_source_path = self.pathFromRoot(source_path);
os.makePath(self.allocator, dirname) catch |err| {
warn("Unable to create path {}: {}\n", dirname, @errorName(err));
@@ -644,37 +623,31 @@ pub const Builder = struct {
};
}
- fn pathFromRoot(self: &Builder, rel_path: []const u8) []u8 {
+ fn pathFromRoot(self: *Builder, rel_path: []const u8) []u8 {
return os.path.resolve(self.allocator, self.build_root, rel_path) catch unreachable;
}
- pub fn fmt(self: &Builder, comptime format: []const u8, args: ...) []u8 {
+ pub fn fmt(self: *Builder, comptime format: []const u8, args: ...) []u8 {
return fmt_lib.allocPrint(self.allocator, format, args) catch unreachable;
}
- fn getCCExe(self: &Builder) []const u8 {
+ fn getCCExe(self: *Builder) []const u8 {
if (builtin.environ == builtin.Environ.msvc) {
return "cl.exe";
} else {
- return os.getEnvVarOwned(self.allocator, "CC") catch |err|
- if (err == error.EnvironmentVariableNotFound)
- ([]const u8)("cc")
- else
- debug.panic("Unable to get environment variable: {}", err)
- ;
+ return os.getEnvVarOwned(self.allocator, "CC") catch |err| if (err == error.EnvironmentVariableNotFound) ([]const u8)("cc") else debug.panic("Unable to get environment variable: {}", err);
}
}
- pub fn findProgram(self: &Builder, names: []const []const u8, paths: []const []const u8) ![]const u8 {
+ pub fn findProgram(self: *Builder, names: []const []const u8, paths: []const []const u8) ![]const u8 {
// TODO report error for ambiguous situations
- const exe_extension = (Target { .Native = {}}).exeFileExt();
+ const exe_extension = (Target{ .Native = {} }).exeFileExt();
for (self.search_prefixes.toSliceConst()) |search_prefix| {
for (names) |name| {
if (os.path.isAbsolute(name)) {
return name;
}
- const full_path = try os.path.join(self.allocator, search_prefix, "bin",
- self.fmt("{}{}", name, exe_extension));
+ const full_path = try os.path.join(self.allocator, search_prefix, "bin", self.fmt("{}{}", name, exe_extension));
if (os.path.real(self.allocator, full_path)) |real_path| {
return real_path;
} else |_| {
@@ -714,7 +687,7 @@ pub const Builder = struct {
return error.FileNotFound;
}
- pub fn exec(self: &Builder, argv: []const []const u8) ![]u8 {
+ pub fn exec(self: *Builder, argv: []const []const u8) ![]u8 {
const max_output_size = 100 * 1024;
const result = try os.ChildProcess.exec(self.allocator, argv, null, null, max_output_size);
switch (result.term) {
@@ -736,7 +709,7 @@ pub const Builder = struct {
}
}
- pub fn addSearchPrefix(self: &Builder, search_prefix: []const u8) void {
+ pub fn addSearchPrefix(self: *Builder, search_prefix: []const u8) void {
self.search_prefixes.append(search_prefix) catch unreachable;
}
};
@@ -757,8 +730,8 @@ pub const Target = union(enum) {
Native: void,
Cross: CrossTarget,
- pub fn oFileExt(self: &const Target) []const u8 {
- const environ = switch (*self) {
+ pub fn oFileExt(self: *const Target) []const u8 {
+ const environ = switch (self.*) {
Target.Native => builtin.environ,
Target.Cross => |t| t.environ,
};
@@ -768,49 +741,49 @@ pub const Target = union(enum) {
};
}
- pub fn exeFileExt(self: &const Target) []const u8 {
+ pub fn exeFileExt(self: *const Target) []const u8 {
return switch (self.getOs()) {
builtin.Os.windows => ".exe",
else => "",
};
}
- pub fn libFileExt(self: &const Target) []const u8 {
+ pub fn libFileExt(self: *const Target) []const u8 {
return switch (self.getOs()) {
builtin.Os.windows => ".lib",
else => ".a",
};
}
- pub fn getOs(self: &const Target) builtin.Os {
- return switch (*self) {
+ pub fn getOs(self: *const Target) builtin.Os {
+ return switch (self.*) {
Target.Native => builtin.os,
Target.Cross => |t| t.os,
};
}
- pub fn isDarwin(self: &const Target) bool {
+ pub fn isDarwin(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.ios, builtin.Os.macosx => true,
else => false,
};
}
- pub fn isWindows(self: &const Target) bool {
+ pub fn isWindows(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.windows => true,
else => false,
};
}
- pub fn wantSharedLibSymLinks(self: &const Target) bool {
+ pub fn wantSharedLibSymLinks(self: *const Target) bool {
return !self.isWindows();
}
};
pub const LibExeObjStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
name: []const u8,
target: Target,
link_libs: BufSet,
@@ -834,6 +807,7 @@ pub const LibExeObjStep = struct {
disable_libc: bool,
frameworks: BufSet,
verbose_link: bool,
+ no_rosegment: bool,
// zig only stuff
root_src: ?[]const u8,
@@ -841,6 +815,7 @@ pub const LibExeObjStep = struct {
out_h_filename: []const u8,
assembly_files: ArrayList([]const u8),
packages: ArrayList(Pkg),
+ build_options_contents: std.Buffer,
// C only stuff
source_files: ArrayList([]const u8),
@@ -857,61 +832,50 @@ pub const LibExeObjStep = struct {
Obj,
};
- pub fn createSharedLibrary(builder: &Builder, name: []const u8, root_src: ?[]const u8,
- ver: &const Version) &LibExeObjStep
- {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- *self = initExtraArgs(builder, name, root_src, Kind.Lib, false, ver);
+ pub fn createSharedLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8, ver: *const Version) *LibExeObjStep {
+ const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Lib, false, ver)) catch unreachable;
return self;
}
- pub fn createCSharedLibrary(builder: &Builder, name: []const u8, version: &const Version) &LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- *self = initC(builder, name, Kind.Lib, version, false);
+ pub fn createCSharedLibrary(builder: *Builder, name: []const u8, version: *const Version) *LibExeObjStep {
+ const self = builder.allocator.create(initC(builder, name, Kind.Lib, version, false)) catch unreachable;
return self;
}
- pub fn createStaticLibrary(builder: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- *self = initExtraArgs(builder, name, root_src, Kind.Lib, true, builder.version(0, 0, 0));
+ pub fn createStaticLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
+ const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Lib, true, builder.version(0, 0, 0))) catch unreachable;
return self;
}
- pub fn createCStaticLibrary(builder: &Builder, name: []const u8) &LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- *self = initC(builder, name, Kind.Lib, builder.version(0, 0, 0), true);
+ pub fn createCStaticLibrary(builder: *Builder, name: []const u8) *LibExeObjStep {
+ const self = builder.allocator.create(initC(builder, name, Kind.Lib, builder.version(0, 0, 0), true)) catch unreachable;
return self;
}
- pub fn createObject(builder: &Builder, name: []const u8, root_src: []const u8) &LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- *self = initExtraArgs(builder, name, root_src, Kind.Obj, false, builder.version(0, 0, 0));
+ pub fn createObject(builder: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep {
+ const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Obj, false, builder.version(0, 0, 0))) catch unreachable;
return self;
}
- pub fn createCObject(builder: &Builder, name: []const u8, src: []const u8) &LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- *self = initC(builder, name, Kind.Obj, builder.version(0, 0, 0), false);
+ pub fn createCObject(builder: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
+ const self = builder.allocator.create(initC(builder, name, Kind.Obj, builder.version(0, 0, 0), false)) catch unreachable;
self.object_src = src;
return self;
}
- pub fn createExecutable(builder: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- *self = initExtraArgs(builder, name, root_src, Kind.Exe, false, builder.version(0, 0, 0));
+ pub fn createExecutable(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
+ const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Exe, false, builder.version(0, 0, 0))) catch unreachable;
return self;
}
- pub fn createCExecutable(builder: &Builder, name: []const u8) &LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- *self = initC(builder, name, Kind.Exe, builder.version(0, 0, 0), false);
+ pub fn createCExecutable(builder: *Builder, name: []const u8) *LibExeObjStep {
+ const self = builder.allocator.create(initC(builder, name, Kind.Exe, builder.version(0, 0, 0), false)) catch unreachable;
return self;
}
- fn initExtraArgs(builder: &Builder, name: []const u8, root_src: ?[]const u8, kind: Kind,
- static: bool, ver: &const Version) LibExeObjStep
- {
- var self = LibExeObjStep {
+ fn initExtraArgs(builder: *Builder, name: []const u8, root_src: ?[]const u8, kind: Kind, static: bool, ver: *const Version) LibExeObjStep {
+ var self = LibExeObjStep{
+ .no_rosegment = false,
.strip = false,
.builder = builder,
.verbose_link = false,
@@ -927,7 +891,7 @@ pub const LibExeObjStep = struct {
.step = Step.init(name, builder.allocator, make),
.output_path = null,
.output_h_path = null,
- .version = *ver,
+ .version = ver.*,
.out_filename = undefined,
.out_h_filename = builder.fmt("{}.h", name),
.major_only_filename = undefined,
@@ -944,17 +908,19 @@ pub const LibExeObjStep = struct {
.lib_paths = ArrayList([]const u8).init(builder.allocator),
.object_src = undefined,
.disable_libc = true,
+ .build_options_contents = std.Buffer.initSize(builder.allocator, 0) catch unreachable,
};
self.computeOutFileNames();
return self;
}
- fn initC(builder: &Builder, name: []const u8, kind: Kind, version: &const Version, static: bool) LibExeObjStep {
- var self = LibExeObjStep {
+ fn initC(builder: *Builder, name: []const u8, kind: Kind, version: *const Version, static: bool) LibExeObjStep {
+ var self = LibExeObjStep{
+ .no_rosegment = false,
.builder = builder,
.name = name,
.kind = kind,
- .version = *version,
+ .version = version.*,
.static = static,
.target = Target.Native,
.cflags = ArrayList([]const u8).init(builder.allocator),
@@ -984,12 +950,17 @@ pub const LibExeObjStep = struct {
.out_h_filename = undefined,
.assembly_files = undefined,
.packages = undefined,
+ .build_options_contents = undefined,
};
self.computeOutFileNames();
return self;
}
- fn computeOutFileNames(self: &LibExeObjStep) void {
+ pub fn setNoRoSegment(self: *LibExeObjStep, value: bool) void {
+ self.no_rosegment = value;
+ }
+
+ fn computeOutFileNames(self: *LibExeObjStep) void {
switch (self.kind) {
Kind.Obj => {
self.out_filename = self.builder.fmt("{}{}", self.name, self.target.oFileExt());
@@ -1003,8 +974,7 @@ pub const LibExeObjStep = struct {
} else {
switch (self.target.getOs()) {
builtin.Os.ios, builtin.Os.macosx => {
- self.out_filename = self.builder.fmt("lib{}.{d}.{d}.{d}.dylib",
- self.name, self.version.major, self.version.minor, self.version.patch);
+ self.out_filename = self.builder.fmt("lib{}.{d}.{d}.{d}.dylib", self.name, self.version.major, self.version.minor, self.version.patch);
self.major_only_filename = self.builder.fmt("lib{}.{d}.dylib", self.name, self.version.major);
self.name_only_filename = self.builder.fmt("lib{}.dylib", self.name);
},
@@ -1012,8 +982,7 @@ pub const LibExeObjStep = struct {
self.out_filename = self.builder.fmt("{}.dll", self.name);
},
else => {
- self.out_filename = self.builder.fmt("lib{}.so.{d}.{d}.{d}",
- self.name, self.version.major, self.version.minor, self.version.patch);
+ self.out_filename = self.builder.fmt("lib{}.so.{d}.{d}.{d}", self.name, self.version.major, self.version.minor, self.version.patch);
self.major_only_filename = self.builder.fmt("lib{}.so.{d}", self.name, self.version.major);
self.name_only_filename = self.builder.fmt("lib{}.so", self.name);
},
@@ -1023,30 +992,28 @@ pub const LibExeObjStep = struct {
}
}
- pub fn setTarget(self: &LibExeObjStep, target_arch: builtin.Arch, target_os: builtin.Os,
- target_environ: builtin.Environ) void
- {
- self.target = Target {
- .Cross = CrossTarget {
+ pub fn setTarget(self: *LibExeObjStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
+ self.target = Target{
+ .Cross = CrossTarget{
.arch = target_arch,
.os = target_os,
.environ = target_environ,
- }
+ },
};
self.computeOutFileNames();
}
// TODO respect this in the C args
- pub fn setLinkerScriptPath(self: &LibExeObjStep, path: []const u8) void {
+ pub fn setLinkerScriptPath(self: *LibExeObjStep, path: []const u8) void {
self.linker_script = path;
}
- pub fn linkFramework(self: &LibExeObjStep, framework_name: []const u8) void {
+ pub fn linkFramework(self: *LibExeObjStep, framework_name: []const u8) void {
assert(self.target.isDarwin());
self.frameworks.put(framework_name) catch unreachable;
}
- pub fn linkLibrary(self: &LibExeObjStep, lib: &LibExeObjStep) void {
+ pub fn linkLibrary(self: *LibExeObjStep, lib: *LibExeObjStep) void {
assert(self.kind != Kind.Obj);
assert(lib.kind == Kind.Lib);
@@ -1067,26 +1034,26 @@ pub const LibExeObjStep = struct {
}
}
- pub fn linkSystemLibrary(self: &LibExeObjStep, name: []const u8) void {
+ pub fn linkSystemLibrary(self: *LibExeObjStep, name: []const u8) void {
assert(self.kind != Kind.Obj);
self.link_libs.put(name) catch unreachable;
}
- pub fn addSourceFile(self: &LibExeObjStep, file: []const u8) void {
+ pub fn addSourceFile(self: *LibExeObjStep, file: []const u8) void {
assert(self.kind != Kind.Obj);
assert(!self.is_zig);
self.source_files.append(file) catch unreachable;
}
- pub fn setVerboseLink(self: &LibExeObjStep, value: bool) void {
+ pub fn setVerboseLink(self: *LibExeObjStep, value: bool) void {
self.verbose_link = value;
}
- pub fn setBuildMode(self: &LibExeObjStep, mode: builtin.Mode) void {
+ pub fn setBuildMode(self: *LibExeObjStep, mode: builtin.Mode) void {
self.build_mode = mode;
}
- pub fn setOutputPath(self: &LibExeObjStep, file_path: []const u8) void {
+ pub fn setOutputPath(self: *LibExeObjStep, file_path: []const u8) void {
self.output_path = file_path;
// catch a common mistake
@@ -1095,14 +1062,11 @@ pub const LibExeObjStep = struct {
}
}
- pub fn getOutputPath(self: &LibExeObjStep) []const u8 {
- return if (self.output_path) |output_path|
- output_path
- else
- os.path.join(self.builder.allocator, self.builder.cache_root, self.out_filename) catch unreachable;
+ pub fn getOutputPath(self: *LibExeObjStep) []const u8 {
+ return if (self.output_path) |output_path| output_path else os.path.join(self.builder.allocator, self.builder.cache_root, self.out_filename) catch unreachable;
}
- pub fn setOutputHPath(self: &LibExeObjStep, file_path: []const u8) void {
+ pub fn setOutputHPath(self: *LibExeObjStep, file_path: []const u8) void {
self.output_h_path = file_path;
// catch a common mistake
@@ -1111,24 +1075,21 @@ pub const LibExeObjStep = struct {
}
}
- pub fn getOutputHPath(self: &LibExeObjStep) []const u8 {
- return if (self.output_h_path) |output_h_path|
- output_h_path
- else
- os.path.join(self.builder.allocator, self.builder.cache_root, self.out_h_filename) catch unreachable;
+ pub fn getOutputHPath(self: *LibExeObjStep) []const u8 {
+ return if (self.output_h_path) |output_h_path| output_h_path else os.path.join(self.builder.allocator, self.builder.cache_root, self.out_h_filename) catch unreachable;
}
- pub fn addAssemblyFile(self: &LibExeObjStep, path: []const u8) void {
+ pub fn addAssemblyFile(self: *LibExeObjStep, path: []const u8) void {
self.assembly_files.append(path) catch unreachable;
}
- pub fn addObjectFile(self: &LibExeObjStep, path: []const u8) void {
+ pub fn addObjectFile(self: *LibExeObjStep, path: []const u8) void {
assert(self.kind != Kind.Obj);
self.object_files.append(path) catch unreachable;
}
- pub fn addObject(self: &LibExeObjStep, obj: &LibExeObjStep) void {
+ pub fn addObject(self: *LibExeObjStep, obj: *LibExeObjStep) void {
assert(obj.kind == Kind.Obj);
assert(self.kind != Kind.Obj);
@@ -1145,40 +1106,46 @@ pub const LibExeObjStep = struct {
self.include_dirs.append(self.builder.cache_root) catch unreachable;
}
- pub fn addIncludeDir(self: &LibExeObjStep, path: []const u8) void {
+ pub fn addBuildOption(self: *LibExeObjStep, comptime T: type, name: []const u8, value: T) void {
+ assert(self.is_zig);
+ const out = &std.io.BufferOutStream.init(&self.build_options_contents).stream;
+ out.print("pub const {} = {};\n", name, value) catch unreachable;
+ }
+
+ pub fn addIncludeDir(self: *LibExeObjStep, path: []const u8) void {
self.include_dirs.append(path) catch unreachable;
}
- pub fn addLibPath(self: &LibExeObjStep, path: []const u8) void {
+ pub fn addLibPath(self: *LibExeObjStep, path: []const u8) void {
self.lib_paths.append(path) catch unreachable;
}
- pub fn addPackagePath(self: &LibExeObjStep, name: []const u8, pkg_index_path: []const u8) void {
+ pub fn addPackagePath(self: *LibExeObjStep, name: []const u8, pkg_index_path: []const u8) void {
assert(self.is_zig);
- self.packages.append(Pkg {
+ self.packages.append(Pkg{
.name = name,
.path = pkg_index_path,
}) catch unreachable;
}
- pub fn addCompileFlags(self: &LibExeObjStep, flags: []const []const u8) void {
+ pub fn addCompileFlags(self: *LibExeObjStep, flags: []const []const u8) void {
for (flags) |flag| {
self.cflags.append(flag) catch unreachable;
}
}
- pub fn setNoStdLib(self: &LibExeObjStep, disable: bool) void {
+ pub fn setNoStdLib(self: *LibExeObjStep, disable: bool) void {
assert(!self.is_zig);
self.disable_libc = disable;
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(LibExeObjStep, "step", step);
return if (self.is_zig) self.makeZig() else self.makeC();
}
- fn makeZig(self: &LibExeObjStep) !void {
+ fn makeZig(self: *LibExeObjStep) !void {
const builder = self.builder;
assert(self.is_zig);
@@ -1204,6 +1171,15 @@ pub const LibExeObjStep = struct {
zig_args.append(builder.pathFromRoot(root_src)) catch unreachable;
}
+ if (self.build_options_contents.len() > 0) {
+ const build_options_file = try os.path.join(builder.allocator, builder.cache_root, builder.fmt("{}_build_options.zig", self.name));
+ try std.io.writeFile(builder.allocator, build_options_file, self.build_options_contents.toSliceConst());
+ try zig_args.append("--pkg-begin");
+ try zig_args.append("build_options");
+ try zig_args.append(builder.pathFromRoot(build_options_file));
+ try zig_args.append("--pkg-end");
+ }
+
for (self.object_files.toSliceConst()) |object_file| {
zig_args.append("--object") catch unreachable;
zig_args.append(builder.pathFromRoot(object_file)) catch unreachable;
@@ -1229,6 +1205,7 @@ pub const LibExeObjStep = struct {
builtin.Mode.Debug => {},
builtin.Mode.ReleaseSafe => zig_args.append("--release-safe") catch unreachable,
builtin.Mode.ReleaseFast => zig_args.append("--release-fast") catch unreachable,
+ builtin.Mode.ReleaseSmall => zig_args.append("--release-small") catch unreachable,
}
zig_args.append("--cache-dir") catch unreachable;
@@ -1280,7 +1257,7 @@ pub const LibExeObjStep = struct {
{
var it = self.link_libs.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
zig_args.append("--library") catch unreachable;
zig_args.append(entry.key) catch unreachable;
}
@@ -1336,15 +1313,18 @@ pub const LibExeObjStep = struct {
}
}
+ if (self.no_rosegment) {
+ try zig_args.append("--no-rosegment");
+ }
+
try builder.spawnChild(zig_args.toSliceConst());
if (self.kind == Kind.Lib and !self.static and self.target.wantSharedLibSymLinks()) {
- try doAtomicSymLinks(builder.allocator, output_path, self.major_only_filename,
- self.name_only_filename);
+ try doAtomicSymLinks(builder.allocator, output_path, self.major_only_filename, self.name_only_filename);
}
}
- fn appendCompileFlags(self: &LibExeObjStep, args: &ArrayList([]const u8)) void {
+ fn appendCompileFlags(self: *LibExeObjStep, args: *ArrayList([]const u8)) void {
if (!self.strip) {
args.append("-g") catch unreachable;
}
@@ -1369,7 +1349,7 @@ pub const LibExeObjStep = struct {
args.append("ssp-buffer-size=4") catch unreachable;
}
},
- builtin.Mode.ReleaseFast => {
+ builtin.Mode.ReleaseFast, builtin.Mode.ReleaseSmall => {
args.append("-O2") catch unreachable;
args.append("-fno-stack-protector") catch unreachable;
},
@@ -1389,7 +1369,7 @@ pub const LibExeObjStep = struct {
}
}
- fn makeC(self: &LibExeObjStep) !void {
+ fn makeC(self: *LibExeObjStep) !void {
const builder = self.builder;
const cc = builder.getCCExe();
@@ -1430,8 +1410,9 @@ pub const LibExeObjStep = struct {
cc_args.append(abs_source_file) catch unreachable;
const cache_o_src = os.path.join(builder.allocator, builder.cache_root, source_file) catch unreachable;
- const cache_o_dir = os.path.dirname(cache_o_src);
- try builder.makePath(cache_o_dir);
+ if (os.path.dirname(cache_o_src)) |cache_o_dir| {
+ try builder.makePath(cache_o_dir);
+ }
const cache_o_file = builder.fmt("{}{}", cache_o_src, self.target.oFileExt());
cc_args.append("-o") catch unreachable;
cc_args.append(builder.pathFromRoot(cache_o_file)) catch unreachable;
@@ -1501,8 +1482,7 @@ pub const LibExeObjStep = struct {
}
if (!is_darwin) {
- const rpath_arg = builder.fmt("-Wl,-rpath,{}",
- os.path.real(builder.allocator, builder.pathFromRoot(builder.cache_root)) catch unreachable);
+ const rpath_arg = builder.fmt("-Wl,-rpath,{}", os.path.real(builder.allocator, builder.pathFromRoot(builder.cache_root)) catch unreachable);
defer builder.allocator.free(rpath_arg);
cc_args.append(rpath_arg) catch unreachable;
@@ -1531,8 +1511,7 @@ pub const LibExeObjStep = struct {
try builder.spawnChild(cc_args.toSliceConst());
if (self.target.wantSharedLibSymLinks()) {
- try doAtomicSymLinks(builder.allocator, output_path, self.major_only_filename,
- self.name_only_filename);
+ try doAtomicSymLinks(builder.allocator, output_path, self.major_only_filename, self.name_only_filename);
}
}
},
@@ -1546,8 +1525,9 @@ pub const LibExeObjStep = struct {
cc_args.append(abs_source_file) catch unreachable;
const cache_o_src = os.path.join(builder.allocator, builder.cache_root, source_file) catch unreachable;
- const cache_o_dir = os.path.dirname(cache_o_src);
- try builder.makePath(cache_o_dir);
+ if (os.path.dirname(cache_o_src)) |cache_o_dir| {
+ try builder.makePath(cache_o_dir);
+ }
const cache_o_file = builder.fmt("{}{}", cache_o_src, self.target.oFileExt());
cc_args.append("-o") catch unreachable;
cc_args.append(builder.pathFromRoot(cache_o_file)) catch unreachable;
@@ -1577,8 +1557,7 @@ pub const LibExeObjStep = struct {
cc_args.append("-o") catch unreachable;
cc_args.append(output_path) catch unreachable;
- const rpath_arg = builder.fmt("-Wl,-rpath,{}",
- os.path.real(builder.allocator, builder.pathFromRoot(builder.cache_root)) catch unreachable);
+ const rpath_arg = builder.fmt("-Wl,-rpath,{}", os.path.real(builder.allocator, builder.pathFromRoot(builder.cache_root)) catch unreachable);
defer builder.allocator.free(rpath_arg);
cc_args.append(rpath_arg) catch unreachable;
@@ -1618,7 +1597,7 @@ pub const LibExeObjStep = struct {
pub const TestStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
root_src: []const u8,
build_mode: builtin.Mode,
verbose: bool,
@@ -1628,10 +1607,13 @@ pub const TestStep = struct {
target: Target,
exec_cmd_args: ?[]const ?[]const u8,
include_dirs: ArrayList([]const u8),
+ lib_paths: ArrayList([]const u8),
+ object_files: ArrayList([]const u8),
+ no_rosegment: bool,
- pub fn init(builder: &Builder, root_src: []const u8) TestStep {
+ pub fn init(builder: *Builder, root_src: []const u8) TestStep {
const step_name = builder.fmt("test {}", root_src);
- return TestStep {
+ return TestStep{
.step = Step.init(step_name, builder.allocator, make),
.builder = builder,
.root_src = root_src,
@@ -1640,53 +1622,66 @@ pub const TestStep = struct {
.name_prefix = "",
.filter = null,
.link_libs = BufSet.init(builder.allocator),
- .target = Target { .Native = {} },
+ .target = Target{ .Native = {} },
.exec_cmd_args = null,
.include_dirs = ArrayList([]const u8).init(builder.allocator),
+ .lib_paths = ArrayList([]const u8).init(builder.allocator),
+ .object_files = ArrayList([]const u8).init(builder.allocator),
+ .no_rosegment = false,
};
}
- pub fn setVerbose(self: &TestStep, value: bool) void {
+ pub fn setNoRoSegment(self: *TestStep, value: bool) void {
+ self.no_rosegment = value;
+ }
+
+ pub fn addLibPath(self: *TestStep, path: []const u8) void {
+ self.lib_paths.append(path) catch unreachable;
+ }
+
+ pub fn setVerbose(self: *TestStep, value: bool) void {
self.verbose = value;
}
- pub fn addIncludeDir(self: &TestStep, path: []const u8) void {
+ pub fn addIncludeDir(self: *TestStep, path: []const u8) void {
self.include_dirs.append(path) catch unreachable;
}
- pub fn setBuildMode(self: &TestStep, mode: builtin.Mode) void {
+ pub fn setBuildMode(self: *TestStep, mode: builtin.Mode) void {
self.build_mode = mode;
}
- pub fn linkSystemLibrary(self: &TestStep, name: []const u8) void {
+ pub fn linkSystemLibrary(self: *TestStep, name: []const u8) void {
self.link_libs.put(name) catch unreachable;
}
- pub fn setNamePrefix(self: &TestStep, text: []const u8) void {
+ pub fn setNamePrefix(self: *TestStep, text: []const u8) void {
self.name_prefix = text;
}
- pub fn setFilter(self: &TestStep, text: ?[]const u8) void {
+ pub fn setFilter(self: *TestStep, text: ?[]const u8) void {
self.filter = text;
}
- pub fn setTarget(self: &TestStep, target_arch: builtin.Arch, target_os: builtin.Os,
- target_environ: builtin.Environ) void
- {
- self.target = Target {
- .Cross = CrossTarget {
+ pub fn addObjectFile(self: *TestStep, path: []const u8) void {
+ self.object_files.append(path) catch unreachable;
+ }
+
+ pub fn setTarget(self: *TestStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
+ self.target = Target{
+ .Cross = CrossTarget{
.arch = target_arch,
.os = target_os,
.environ = target_environ,
- }
+ },
};
}
- pub fn setExecCmd(self: &TestStep, args: []const ?[]const u8) void {
+ pub fn setExecCmd(self: *TestStep, args: []const ?[]const u8) void {
self.exec_cmd_args = args;
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(TestStep, "step", step);
const builder = self.builder;
@@ -1706,6 +1701,7 @@ pub const TestStep = struct {
builtin.Mode.Debug => {},
builtin.Mode.ReleaseSafe => try zig_args.append("--release-safe"),
builtin.Mode.ReleaseFast => try zig_args.append("--release-fast"),
+ builtin.Mode.ReleaseSmall => try zig_args.append("--release-small"),
}
switch (self.target) {
@@ -1732,10 +1728,15 @@ pub const TestStep = struct {
try zig_args.append(self.name_prefix);
}
+ for (self.object_files.toSliceConst()) |object_file| {
+ try zig_args.append("--object");
+ try zig_args.append(builder.pathFromRoot(object_file));
+ }
+
{
var it = self.link_libs.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
try zig_args.append("--library");
try zig_args.append(entry.key);
}
@@ -1767,40 +1768,47 @@ pub const TestStep = struct {
try zig_args.append(rpath);
}
+ for (self.lib_paths.toSliceConst()) |lib_path| {
+ try zig_args.append("--library-path");
+ try zig_args.append(lib_path);
+ }
+
for (builder.lib_paths.toSliceConst()) |lib_path| {
try zig_args.append("--library-path");
try zig_args.append(lib_path);
}
+ if (self.no_rosegment) {
+ try zig_args.append("--no-rosegment");
+ }
+
try builder.spawnChild(zig_args.toSliceConst());
}
};
pub const CommandStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
argv: [][]const u8,
cwd: ?[]const u8,
- env_map: &const BufMap,
+ env_map: *const BufMap,
/// ::argv is copied.
- pub fn create(builder: &Builder, cwd: ?[]const u8, env_map: &const BufMap,
- argv: []const []const u8) &CommandStep
- {
- const self = builder.allocator.create(CommandStep) catch unreachable;
- *self = CommandStep {
+ pub fn create(builder: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) *CommandStep {
+ const self = builder.allocator.create(CommandStep{
.builder = builder,
.step = Step.init(argv[0], builder.allocator, make),
.argv = builder.allocator.alloc([]u8, argv.len) catch unreachable,
.cwd = cwd,
.env_map = env_map,
- };
+ }) catch unreachable;
+
mem.copy([]const u8, self.argv, argv);
self.step.name = self.argv[0];
return self;
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(CommandStep, "step", step);
const cwd = if (self.cwd) |cwd| self.builder.pathFromRoot(cwd) else self.builder.build_root;
@@ -1810,37 +1818,34 @@ pub const CommandStep = struct {
const InstallArtifactStep = struct {
step: Step,
- builder: &Builder,
- artifact: &LibExeObjStep,
+ builder: *Builder,
+ artifact: *LibExeObjStep,
dest_file: []const u8,
const Self = this;
- pub fn create(builder: &Builder, artifact: &LibExeObjStep) &Self {
- const self = builder.allocator.create(Self) catch unreachable;
+ pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self {
const dest_dir = switch (artifact.kind) {
LibExeObjStep.Kind.Obj => unreachable,
LibExeObjStep.Kind.Exe => builder.exe_dir,
LibExeObjStep.Kind.Lib => builder.lib_dir,
};
- *self = Self {
+ const self = builder.allocator.create(Self{
.builder = builder,
.step = Step.init(builder.fmt("install {}", artifact.step.name), builder.allocator, make),
.artifact = artifact,
.dest_file = os.path.join(builder.allocator, dest_dir, artifact.out_filename) catch unreachable,
- };
+ }) catch unreachable;
self.step.dependOn(&artifact.step);
builder.pushInstalledFile(self.dest_file);
if (self.artifact.kind == LibExeObjStep.Kind.Lib and !self.artifact.static) {
- builder.pushInstalledFile(os.path.join(builder.allocator, builder.lib_dir,
- artifact.major_only_filename) catch unreachable);
- builder.pushInstalledFile(os.path.join(builder.allocator, builder.lib_dir,
- artifact.name_only_filename) catch unreachable);
+ builder.pushInstalledFile(os.path.join(builder.allocator, builder.lib_dir, artifact.major_only_filename) catch unreachable);
+ builder.pushInstalledFile(os.path.join(builder.allocator, builder.lib_dir, artifact.name_only_filename) catch unreachable);
}
return self;
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(Self, "step", step);
const builder = self.builder;
@@ -1854,20 +1859,19 @@ const InstallArtifactStep = struct {
};
try builder.copyFileMode(self.artifact.getOutputPath(), self.dest_file, mode);
if (self.artifact.kind == LibExeObjStep.Kind.Lib and !self.artifact.static) {
- try doAtomicSymLinks(builder.allocator, self.dest_file,
- self.artifact.major_only_filename, self.artifact.name_only_filename);
+ try doAtomicSymLinks(builder.allocator, self.dest_file, self.artifact.major_only_filename, self.artifact.name_only_filename);
}
}
};
pub const InstallFileStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
src_path: []const u8,
dest_path: []const u8,
- pub fn init(builder: &Builder, src_path: []const u8, dest_path: []const u8) InstallFileStep {
- return InstallFileStep {
+ pub fn init(builder: *Builder, src_path: []const u8, dest_path: []const u8) InstallFileStep {
+ return InstallFileStep{
.builder = builder,
.step = Step.init(builder.fmt("install {}", src_path), builder.allocator, make),
.src_path = src_path,
@@ -1875,7 +1879,7 @@ pub const InstallFileStep = struct {
};
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(InstallFileStep, "step", step);
try self.builder.copyFile(self.src_path, self.dest_path);
}
@@ -1883,12 +1887,12 @@ pub const InstallFileStep = struct {
pub const WriteFileStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
file_path: []const u8,
data: []const u8,
- pub fn init(builder: &Builder, file_path: []const u8, data: []const u8) WriteFileStep {
- return WriteFileStep {
+ pub fn init(builder: *Builder, file_path: []const u8, data: []const u8) WriteFileStep {
+ return WriteFileStep{
.builder = builder,
.step = Step.init(builder.fmt("writefile {}", file_path), builder.allocator, make),
.file_path = file_path,
@@ -1896,10 +1900,10 @@ pub const WriteFileStep = struct {
};
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(WriteFileStep, "step", step);
const full_path = self.builder.pathFromRoot(self.file_path);
- const full_path_dir = os.path.dirname(full_path);
+ const full_path_dir = os.path.dirname(full_path) orelse ".";
os.makePath(self.builder.allocator, full_path_dir) catch |err| {
warn("unable to make path {}: {}\n", full_path_dir, @errorName(err));
return err;
@@ -1913,18 +1917,18 @@ pub const WriteFileStep = struct {
pub const LogStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
data: []const u8,
- pub fn init(builder: &Builder, data: []const u8) LogStep {
- return LogStep {
+ pub fn init(builder: *Builder, data: []const u8) LogStep {
+ return LogStep{
.builder = builder,
.step = Step.init(builder.fmt("log {}", data), builder.allocator, make),
.data = data,
};
}
- fn make(step: &Step) error!void {
+ fn make(step: *Step) error!void {
const self = @fieldParentPtr(LogStep, "step", step);
warn("{}", self.data);
}
@@ -1932,18 +1936,18 @@ pub const LogStep = struct {
pub const RemoveDirStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
dir_path: []const u8,
- pub fn init(builder: &Builder, dir_path: []const u8) RemoveDirStep {
- return RemoveDirStep {
+ pub fn init(builder: *Builder, dir_path: []const u8) RemoveDirStep {
+ return RemoveDirStep{
.builder = builder,
.step = Step.init(builder.fmt("RemoveDir {}", dir_path), builder.allocator, make),
.dir_path = dir_path,
};
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(RemoveDirStep, "step", step);
const full_path = self.builder.pathFromRoot(self.dir_path);
@@ -1956,43 +1960,40 @@ pub const RemoveDirStep = struct {
pub const Step = struct {
name: []const u8,
- makeFn: fn(self: &Step) error!void,
- dependencies: ArrayList(&Step),
+ makeFn: fn (self: *Step) error!void,
+ dependencies: ArrayList(*Step),
loop_flag: bool,
done_flag: bool,
- pub fn init(name: []const u8, allocator: &Allocator, makeFn: fn (&Step)error!void) Step {
- return Step {
+ pub fn init(name: []const u8, allocator: *Allocator, makeFn: fn (*Step) error!void) Step {
+ return Step{
.name = name,
.makeFn = makeFn,
- .dependencies = ArrayList(&Step).init(allocator),
+ .dependencies = ArrayList(*Step).init(allocator),
.loop_flag = false,
.done_flag = false,
};
}
- pub fn initNoOp(name: []const u8, allocator: &Allocator) Step {
+ pub fn initNoOp(name: []const u8, allocator: *Allocator) Step {
return init(name, allocator, makeNoOp);
}
- pub fn make(self: &Step) !void {
- if (self.done_flag)
- return;
+ pub fn make(self: *Step) !void {
+ if (self.done_flag) return;
try self.makeFn(self);
self.done_flag = true;
}
- pub fn dependOn(self: &Step, other: &Step) void {
+ pub fn dependOn(self: *Step, other: *Step) void {
self.dependencies.append(other) catch unreachable;
}
- fn makeNoOp(self: &Step) error!void {}
+ fn makeNoOp(self: *Step) error!void {}
};
-fn doAtomicSymLinks(allocator: &Allocator, output_path: []const u8, filename_major_only: []const u8,
- filename_name_only: []const u8) !void
-{
- const out_dir = os.path.dirname(output_path);
+fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
+ const out_dir = os.path.dirname(output_path) orelse ".";
const out_basename = os.path.basename(output_path);
// sym link for libfoo.so.1 to libfoo.so.1.2.3
const major_only_path = os.path.join(allocator, out_dir, filename_major_only) catch unreachable;
diff --git a/std/c/darwin.zig b/std/c/darwin.zig
index feb689cdc5..1bd1d6c4c9 100644
--- a/std/c/darwin.zig
+++ b/std/c/darwin.zig
@@ -1,12 +1,54 @@
-extern "c" fn __error() &c_int;
-pub extern "c" fn _NSGetExecutablePath(buf: &u8, bufsize: &u32) c_int;
+extern "c" fn __error() *c_int;
+pub extern "c" fn _NSGetExecutablePath(buf: [*]u8, bufsize: *u32) c_int;
-pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: &u8, buf_len: usize, basep: &i64) usize;
+pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: [*]u8, buf_len: usize, basep: *i64) usize;
-pub use @import("../os/darwin_errno.zig");
+pub extern "c" fn mach_absolute_time() u64;
+pub extern "c" fn mach_timebase_info(tinfo: ?*mach_timebase_info_data) void;
+
+pub extern "c" fn kqueue() c_int;
+pub extern "c" fn kevent(
+ kq: c_int,
+ changelist: [*]const Kevent,
+ nchanges: c_int,
+ eventlist: [*]Kevent,
+ nevents: c_int,
+ timeout: ?*const timespec,
+) c_int;
+
+pub extern "c" fn kevent64(
+ kq: c_int,
+ changelist: [*]const kevent64_s,
+ nchanges: c_int,
+ eventlist: [*]kevent64_s,
+ nevents: c_int,
+ flags: c_uint,
+ timeout: ?*const timespec,
+) c_int;
+
+pub extern "c" fn sysctl(name: [*]c_int, namelen: c_uint, oldp: ?*c_void, oldlenp: ?*usize, newp: ?*c_void, newlen: usize) c_int;
+pub extern "c" fn sysctlbyname(name: [*]const u8, oldp: ?*c_void, oldlenp: ?*usize, newp: ?*c_void, newlen: usize) c_int;
+pub extern "c" fn sysctlnametomib(name: [*]const u8, mibp: ?*c_int, sizep: ?*usize) c_int;
+
+pub use @import("../os/darwin/errno.zig");
pub const _errno = __error;
+pub const timeval = extern struct {
+ tv_sec: isize,
+ tv_usec: isize,
+};
+
+pub const timezone = extern struct {
+ tz_minuteswest: i32,
+ tz_dsttime: i32,
+};
+
+pub const mach_timebase_info_data = extern struct {
+ numer: u32,
+ denom: u32,
+};
+
/// Renamed to Stat to not conflict with the stat function.
pub const Stat = extern struct {
dev: i32,
@@ -42,7 +84,7 @@ pub const sigset_t = u32;
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with function name.
pub const Sigaction = extern struct {
- handler: extern fn(c_int)void,
+ handler: extern fn (c_int) void,
sa_mask: sigset_t,
sa_flags: c_int,
};
@@ -63,3 +105,56 @@ pub const sockaddr = extern struct {
};
pub const sa_family_t = u8;
+
+pub const pthread_attr_t = extern struct {
+ __sig: c_long,
+ __opaque: [56]u8,
+};
+
+/// Renamed from `kevent` to `Kevent` to avoid conflict with function name.
+pub const Kevent = extern struct {
+ ident: usize,
+ filter: i16,
+ flags: u16,
+ fflags: u32,
+ data: isize,
+ udata: usize,
+};
+
+// sys/types.h on macos uses #pragma pack(4) so these checks are
+// to make sure the struct is laid out the same. These values were
+// produced from C code using the offsetof macro.
+const std = @import("../index.zig");
+const assert = std.debug.assert;
+
+comptime {
+ assert(@offsetOf(Kevent, "ident") == 0);
+ assert(@offsetOf(Kevent, "filter") == 8);
+ assert(@offsetOf(Kevent, "flags") == 10);
+ assert(@offsetOf(Kevent, "fflags") == 12);
+ assert(@offsetOf(Kevent, "data") == 16);
+ assert(@offsetOf(Kevent, "udata") == 24);
+}
+
+pub const kevent64_s = extern struct {
+ ident: u64,
+ filter: i16,
+ flags: u16,
+ fflags: u32,
+ data: i64,
+ udata: u64,
+ ext: [2]u64,
+};
+
+// sys/types.h on macos uses #pragma pack() so these checks are
+// to make sure the struct is laid out the same. These values were
+// produced from C code using the offsetof macro.
+comptime {
+ assert(@offsetOf(kevent64_s, "ident") == 0);
+ assert(@offsetOf(kevent64_s, "filter") == 8);
+ assert(@offsetOf(kevent64_s, "flags") == 10);
+ assert(@offsetOf(kevent64_s, "fflags") == 12);
+ assert(@offsetOf(kevent64_s, "data") == 16);
+ assert(@offsetOf(kevent64_s, "udata") == 24);
+ assert(@offsetOf(kevent64_s, "ext") == 32);
+}
diff --git a/std/c/index.zig b/std/c/index.zig
index 02321f1f34..7de8634d07 100644
--- a/std/c/index.zig
+++ b/std/c/index.zig
@@ -1,7 +1,7 @@
const builtin = @import("builtin");
const Os = builtin.Os;
-pub use switch(builtin.os) {
+pub use switch (builtin.os) {
Os.linux => @import("linux.zig"),
Os.windows => @import("windows.zig"),
Os.macosx, Os.ios => @import("darwin.zig"),
@@ -9,46 +9,55 @@ pub use switch(builtin.os) {
};
const empty_import = @import("../empty.zig");
+// TODO https://github.com/ziglang/zig/issues/265 on this whole file
+
pub extern "c" fn abort() noreturn;
pub extern "c" fn exit(code: c_int) noreturn;
pub extern "c" fn isatty(fd: c_int) c_int;
pub extern "c" fn close(fd: c_int) c_int;
-pub extern "c" fn fstat(fd: c_int, buf: &Stat) c_int;
-pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: &Stat) c_int;
+pub extern "c" fn fstat(fd: c_int, buf: *Stat) c_int;
+pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: *Stat) c_int;
pub extern "c" fn lseek(fd: c_int, offset: isize, whence: c_int) isize;
-pub extern "c" fn open(path: &const u8, oflag: c_int, ...) c_int;
+pub extern "c" fn open(path: [*]const u8, oflag: c_int, ...) c_int;
pub extern "c" fn raise(sig: c_int) c_int;
-pub extern "c" fn read(fd: c_int, buf: &c_void, nbyte: usize) isize;
-pub extern "c" fn stat(noalias path: &const u8, noalias buf: &Stat) c_int;
-pub extern "c" fn write(fd: c_int, buf: &const c_void, nbyte: usize) isize;
-pub extern "c" fn mmap(addr: ?&c_void, len: usize, prot: c_int, flags: c_int,
- fd: c_int, offset: isize) ?&c_void;
-pub extern "c" fn munmap(addr: &c_void, len: usize) c_int;
-pub extern "c" fn unlink(path: &const u8) c_int;
-pub extern "c" fn getcwd(buf: &u8, size: usize) ?&u8;
-pub extern "c" fn waitpid(pid: c_int, stat_loc: &c_int, options: c_int) c_int;
+pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize;
+pub extern "c" fn stat(noalias path: [*]const u8, noalias buf: *Stat) c_int;
+pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize;
+pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void;
+pub extern "c" fn munmap(addr: *c_void, len: usize) c_int;
+pub extern "c" fn unlink(path: [*]const u8) c_int;
+pub extern "c" fn getcwd(buf: [*]u8, size: usize) ?[*]u8;
+pub extern "c" fn waitpid(pid: c_int, stat_loc: *c_int, options: c_int) c_int;
pub extern "c" fn fork() c_int;
-pub extern "c" fn access(path: &const u8, mode: c_uint) c_int;
-pub extern "c" fn pipe(fds: &c_int) c_int;
-pub extern "c" fn mkdir(path: &const u8, mode: c_uint) c_int;
-pub extern "c" fn symlink(existing: &const u8, new: &const u8) c_int;
-pub extern "c" fn rename(old: &const u8, new: &const u8) c_int;
-pub extern "c" fn chdir(path: &const u8) c_int;
-pub extern "c" fn execve(path: &const u8, argv: &const ?&const u8,
- envp: &const ?&const u8) c_int;
+pub extern "c" fn access(path: [*]const u8, mode: c_uint) c_int;
+pub extern "c" fn pipe(fds: *[2]c_int) c_int;
+pub extern "c" fn mkdir(path: [*]const u8, mode: c_uint) c_int;
+pub extern "c" fn symlink(existing: [*]const u8, new: [*]const u8) c_int;
+pub extern "c" fn rename(old: [*]const u8, new: [*]const u8) c_int;
+pub extern "c" fn chdir(path: [*]const u8) c_int;
+pub extern "c" fn execve(path: [*]const u8, argv: [*]const ?[*]const u8, envp: [*]const ?[*]const u8) c_int;
pub extern "c" fn dup(fd: c_int) c_int;
pub extern "c" fn dup2(old_fd: c_int, new_fd: c_int) c_int;
-pub extern "c" fn readlink(noalias path: &const u8, noalias buf: &u8, bufsize: usize) isize;
-pub extern "c" fn realpath(noalias file_name: &const u8, noalias resolved_name: &u8) ?&u8;
-pub extern "c" fn sigprocmask(how: c_int, noalias set: &const sigset_t, noalias oset: ?&sigset_t) c_int;
-pub extern "c" fn sigaction(sig: c_int, noalias act: &const Sigaction, noalias oact: ?&Sigaction) c_int;
-pub extern "c" fn nanosleep(rqtp: &const timespec, rmtp: ?×pec) c_int;
+pub extern "c" fn readlink(noalias path: [*]const u8, noalias buf: [*]u8, bufsize: usize) isize;
+pub extern "c" fn realpath(noalias file_name: [*]const u8, noalias resolved_name: [*]u8) ?[*]u8;
+pub extern "c" fn sigprocmask(how: c_int, noalias set: *const sigset_t, noalias oset: ?*sigset_t) c_int;
+pub extern "c" fn gettimeofday(tv: ?*timeval, tz: ?*timezone) c_int;
+pub extern "c" fn sigaction(sig: c_int, noalias act: *const Sigaction, noalias oact: ?*Sigaction) c_int;
+pub extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int;
pub extern "c" fn setreuid(ruid: c_uint, euid: c_uint) c_int;
pub extern "c" fn setregid(rgid: c_uint, egid: c_uint) c_int;
-pub extern "c" fn rmdir(path: &const u8) c_int;
+pub extern "c" fn rmdir(path: [*]const u8) c_int;
-pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?&c_void;
-pub extern "c" fn malloc(usize) ?&c_void;
-pub extern "c" fn realloc(&c_void, usize) ?&c_void;
-pub extern "c" fn free(&c_void) void;
-pub extern "c" fn posix_memalign(memptr: &&c_void, alignment: usize, size: usize) c_int;
+pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void;
+pub extern "c" fn malloc(usize) ?*c_void;
+pub extern "c" fn realloc(*c_void, usize) ?*c_void;
+pub extern "c" fn free(*c_void) void;
+pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int;
+
+pub extern "pthread" fn pthread_create(noalias newthread: *pthread_t, noalias attr: ?*const pthread_attr_t, start_routine: extern fn (?*c_void) ?*c_void, noalias arg: ?*c_void) c_int;
+pub extern "pthread" fn pthread_attr_init(attr: *pthread_attr_t) c_int;
+pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *c_void, stacksize: usize) c_int;
+pub extern "pthread" fn pthread_attr_destroy(attr: *pthread_attr_t) c_int;
+pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?*?*c_void) c_int;
+
+pub const pthread_t = *@OpaqueType();
diff --git a/std/c/linux.zig b/std/c/linux.zig
index b2ac05eba5..2699e9bd09 100644
--- a/std/c/linux.zig
+++ b/std/c/linux.zig
@@ -1,5 +1,10 @@
pub use @import("../os/linux/errno.zig");
-pub extern "c" fn getrandom(buf_ptr: &u8, buf_len: usize, flags: c_uint) c_int;
-extern "c" fn __errno_location() &c_int;
+pub extern "c" fn getrandom(buf_ptr: [*]u8, buf_len: usize, flags: c_uint) c_int;
+extern "c" fn __errno_location() *c_int;
pub const _errno = __errno_location;
+
+pub const pthread_attr_t = extern struct {
+ __size: [56]u8,
+ __align: c_long,
+};
diff --git a/std/c/windows.zig b/std/c/windows.zig
index 6e8b17eda8..35ca217131 100644
--- a/std/c/windows.zig
+++ b/std/c/windows.zig
@@ -1 +1 @@
-pub extern "c" fn _errno() &c_int;
+pub extern "c" fn _errno() *c_int;
diff --git a/std/crypto/blake2.zig b/std/crypto/blake2.zig
index 99f0e629cd..947133e4cf 100644
--- a/std/crypto/blake2.zig
+++ b/std/crypto/blake2.zig
@@ -6,11 +6,23 @@ const builtin = @import("builtin");
const htest = @import("test.zig");
const RoundParam = struct {
- a: usize, b: usize, c: usize, d: usize, x: usize, y: usize,
+ a: usize,
+ b: usize,
+ c: usize,
+ d: usize,
+ x: usize,
+ y: usize,
};
fn Rp(a: usize, b: usize, c: usize, d: usize, x: usize, y: usize) RoundParam {
- return RoundParam { .a = a, .b = b, .c = c, .d = d, .x = x, .y = y, };
+ return RoundParam{
+ .a = a,
+ .b = b,
+ .c = c,
+ .d = d,
+ .x = x,
+ .y = y,
+ };
}
/////////////////////
@@ -19,145 +31,153 @@ fn Rp(a: usize, b: usize, c: usize, d: usize, x: usize, y: usize) RoundParam {
pub const Blake2s224 = Blake2s(224);
pub const Blake2s256 = Blake2s(256);
-fn Blake2s(comptime out_len: usize) type { return struct {
- const Self = this;
- const block_size = 64;
- const digest_size = out_len / 8;
+fn Blake2s(comptime out_len: usize) type {
+ return struct {
+ const Self = this;
+ const block_size = 64;
+ const digest_size = out_len / 8;
- const iv = [8]u32 {
- 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
- 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
- };
+ const iv = [8]u32{
+ 0x6A09E667,
+ 0xBB67AE85,
+ 0x3C6EF372,
+ 0xA54FF53A,
+ 0x510E527F,
+ 0x9B05688C,
+ 0x1F83D9AB,
+ 0x5BE0CD19,
+ };
- const sigma = [10][16]u8 {
- []const u8 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
- []const u8 { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
- []const u8 { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
- []const u8 { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
- []const u8 { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
- []const u8 { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
- []const u8 { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
- []const u8 { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
- []const u8 { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
- []const u8 { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
- };
+ const sigma = [10][16]u8{
+ []const u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ []const u8{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
+ []const u8{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
+ []const u8{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
+ []const u8{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
+ []const u8{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
+ []const u8{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
+ []const u8{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
+ []const u8{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
+ []const u8{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
+ };
- h: [8]u32,
- t: u64,
- // Streaming cache
- buf: [64]u8,
- buf_len: u8,
+ h: [8]u32,
+ t: u64,
+ // Streaming cache
+ buf: [64]u8,
+ buf_len: u8,
- pub fn init() Self {
- debug.assert(8 <= out_len and out_len <= 512);
+ pub fn init() Self {
+ debug.assert(8 <= out_len and out_len <= 512);
- var s: Self = undefined;
- s.reset();
- return s;
- }
+ var s: Self = undefined;
+ s.reset();
+ return s;
+ }
- pub fn reset(d: &Self) void {
- mem.copy(u32, d.h[0..], iv[0..]);
+ pub fn reset(d: *Self) void {
+ mem.copy(u32, d.h[0..], iv[0..]);
- // No key plus default parameters
- d.h[0] ^= 0x01010000 ^ u32(out_len >> 3);
- d.t = 0;
- d.buf_len = 0;
- }
-
- pub fn hash(b: []const u8, out: []u8) void {
- var d = Self.init();
- d.update(b);
- d.final(out);
- }
-
- pub fn update(d: &Self, b: []const u8) void {
- var off: usize = 0;
-
- // Partial buffer exists from previous update. Copy into buffer then hash.
- if (d.buf_len != 0 and d.buf_len + b.len > 64) {
- off += 64 - d.buf_len;
- mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
- d.t += 64;
- d.round(d.buf[0..], false);
+ // No key plus default parameters
+ d.h[0] ^= 0x01010000 ^ @intCast(u32, out_len >> 3);
+ d.t = 0;
d.buf_len = 0;
}
- // Full middle blocks.
- while (off + 64 <= b.len) : (off += 64) {
- d.t += 64;
- d.round(b[off..off + 64], false);
+ pub fn hash(b: []const u8, out: []u8) void {
+ var d = Self.init();
+ d.update(b);
+ d.final(out);
}
- // Copy any remainder for next pass.
- mem.copy(u8, d.buf[d.buf_len..], b[off..]);
- d.buf_len += u8(b[off..].len);
- }
+ pub fn update(d: *Self, b: []const u8) void {
+ var off: usize = 0;
- pub fn final(d: &Self, out: []u8) void {
- debug.assert(out.len >= out_len / 8);
+ // Partial buffer exists from previous update. Copy into buffer then hash.
+ if (d.buf_len != 0 and d.buf_len + b.len > 64) {
+ off += 64 - d.buf_len;
+ mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
+ d.t += 64;
+ d.round(d.buf[0..], false);
+ d.buf_len = 0;
+ }
- mem.set(u8, d.buf[d.buf_len..], 0);
- d.t += d.buf_len;
- d.round(d.buf[0..], true);
+ // Full middle blocks.
+ while (off + 64 <= b.len) : (off += 64) {
+ d.t += 64;
+ d.round(b[off .. off + 64], false);
+ }
- const rr = d.h[0 .. out_len / 32];
-
- for (rr) |s, j| {
- mem.writeInt(out[4*j .. 4*j + 4], s, builtin.Endian.Little);
- }
- }
-
- fn round(d: &Self, b: []const u8, last: bool) void {
- debug.assert(b.len == 64);
-
- var m: [16]u32 = undefined;
- var v: [16]u32 = undefined;
-
- for (m) |*r, i| {
- *r = mem.readIntLE(u32, b[4*i .. 4*i + 4]);
+ // Copy any remainder for next pass.
+ mem.copy(u8, d.buf[d.buf_len..], b[off..]);
+ d.buf_len += @intCast(u8, b[off..].len);
}
- var k: usize = 0;
- while (k < 8) : (k += 1) {
- v[k] = d.h[k];
- v[k+8] = iv[k];
- }
+ pub fn final(d: *Self, out: []u8) void {
+ debug.assert(out.len >= out_len / 8);
- v[12] ^= @truncate(u32, d.t);
- v[13] ^= u32(d.t >> 32);
- if (last) v[14] = ~v[14];
+ mem.set(u8, d.buf[d.buf_len..], 0);
+ d.t += d.buf_len;
+ d.round(d.buf[0..], true);
- const rounds = comptime []RoundParam {
- Rp(0, 4, 8, 12, 0, 1),
- Rp(1, 5, 9, 13, 2, 3),
- Rp(2, 6, 10, 14, 4, 5),
- Rp(3, 7, 11, 15, 6, 7),
- Rp(0, 5, 10, 15, 8, 9),
- Rp(1, 6, 11, 12, 10, 11),
- Rp(2, 7, 8, 13, 12, 13),
- Rp(3, 4, 9, 14, 14, 15),
- };
+ const rr = d.h[0 .. out_len / 32];
- comptime var j: usize = 0;
- inline while (j < 10) : (j += 1) {
- inline for (rounds) |r| {
- v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.x]];
- v[r.d] = math.rotr(u32, v[r.d] ^ v[r.a], usize(16));
- v[r.c] = v[r.c] +% v[r.d];
- v[r.b] = math.rotr(u32, v[r.b] ^ v[r.c], usize(12));
- v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.y]];
- v[r.d] = math.rotr(u32, v[r.d] ^ v[r.a], usize(8));
- v[r.c] = v[r.c] +% v[r.d];
- v[r.b] = math.rotr(u32, v[r.b] ^ v[r.c], usize(7));
+ for (rr) |s, j| {
+ mem.writeInt(out[4 * j .. 4 * j + 4], s, builtin.Endian.Little);
}
}
- for (d.h) |*r, i| {
- *r ^= v[i] ^ v[i + 8];
+ fn round(d: *Self, b: []const u8, last: bool) void {
+ debug.assert(b.len == 64);
+
+ var m: [16]u32 = undefined;
+ var v: [16]u32 = undefined;
+
+ for (m) |*r, i| {
+ r.* = mem.readIntLE(u32, b[4 * i .. 4 * i + 4]);
+ }
+
+ var k: usize = 0;
+ while (k < 8) : (k += 1) {
+ v[k] = d.h[k];
+ v[k + 8] = iv[k];
+ }
+
+ v[12] ^= @truncate(u32, d.t);
+ v[13] ^= @intCast(u32, d.t >> 32);
+ if (last) v[14] = ~v[14];
+
+ const rounds = comptime []RoundParam{
+ Rp(0, 4, 8, 12, 0, 1),
+ Rp(1, 5, 9, 13, 2, 3),
+ Rp(2, 6, 10, 14, 4, 5),
+ Rp(3, 7, 11, 15, 6, 7),
+ Rp(0, 5, 10, 15, 8, 9),
+ Rp(1, 6, 11, 12, 10, 11),
+ Rp(2, 7, 8, 13, 12, 13),
+ Rp(3, 4, 9, 14, 14, 15),
+ };
+
+ comptime var j: usize = 0;
+ inline while (j < 10) : (j += 1) {
+ inline for (rounds) |r| {
+ v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.x]];
+ v[r.d] = math.rotr(u32, v[r.d] ^ v[r.a], usize(16));
+ v[r.c] = v[r.c] +% v[r.d];
+ v[r.b] = math.rotr(u32, v[r.b] ^ v[r.c], usize(12));
+ v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.y]];
+ v[r.d] = math.rotr(u32, v[r.d] ^ v[r.a], usize(8));
+ v[r.c] = v[r.c] +% v[r.d];
+ v[r.b] = math.rotr(u32, v[r.b] ^ v[r.c], usize(7));
+ }
+ }
+
+ for (d.h) |*r, i| {
+ r.* ^= v[i] ^ v[i + 8];
+ }
}
- }
-};}
+ };
+}
test "blake2s224 single" {
const h1 = "1fa1291e65248b37b3433475b2a0dd63d54a11ecc4e3e034e7bc1ef4";
@@ -230,7 +250,7 @@ test "blake2s256 streaming" {
}
test "blake2s256 aligned final" {
- var block = []u8 {0} ** Blake2s256.block_size;
+ var block = []u8{0} ** Blake2s256.block_size;
var out: [Blake2s256.digest_size]u8 = undefined;
var h = Blake2s256.init();
@@ -238,154 +258,159 @@ test "blake2s256 aligned final" {
h.final(out[0..]);
}
-
/////////////////////
// Blake2b
pub const Blake2b384 = Blake2b(384);
pub const Blake2b512 = Blake2b(512);
-fn Blake2b(comptime out_len: usize) type { return struct {
- const Self = this;
- const block_size = 128;
- const digest_size = out_len / 8;
+fn Blake2b(comptime out_len: usize) type {
+ return struct {
+ const Self = this;
+ const block_size = 128;
+ const digest_size = out_len / 8;
- const iv = [8]u64 {
- 0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
- 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
- 0x510e527fade682d1, 0x9b05688c2b3e6c1f,
- 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
- };
+ const iv = [8]u64{
+ 0x6a09e667f3bcc908,
+ 0xbb67ae8584caa73b,
+ 0x3c6ef372fe94f82b,
+ 0xa54ff53a5f1d36f1,
+ 0x510e527fade682d1,
+ 0x9b05688c2b3e6c1f,
+ 0x1f83d9abfb41bd6b,
+ 0x5be0cd19137e2179,
+ };
- const sigma = [12][16]u8 {
- []const u8 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
- []const u8 { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
- []const u8 { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
- []const u8 { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
- []const u8 { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
- []const u8 { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
- []const u8 { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
- []const u8 { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
- []const u8 { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
- []const u8 { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 },
- []const u8 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
- []const u8 { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
- };
+ const sigma = [12][16]u8{
+ []const u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ []const u8{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
+ []const u8{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
+ []const u8{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
+ []const u8{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
+ []const u8{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
+ []const u8{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
+ []const u8{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
+ []const u8{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
+ []const u8{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
+ []const u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ []const u8{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
+ };
- h: [8]u64,
- t: u128,
- // Streaming cache
- buf: [128]u8,
- buf_len: u8,
+ h: [8]u64,
+ t: u128,
+ // Streaming cache
+ buf: [128]u8,
+ buf_len: u8,
- pub fn init() Self {
- debug.assert(8 <= out_len and out_len <= 512);
+ pub fn init() Self {
+ debug.assert(8 <= out_len and out_len <= 512);
- var s: Self = undefined;
- s.reset();
- return s;
- }
+ var s: Self = undefined;
+ s.reset();
+ return s;
+ }
- pub fn reset(d: &Self) void {
- mem.copy(u64, d.h[0..], iv[0..]);
+ pub fn reset(d: *Self) void {
+ mem.copy(u64, d.h[0..], iv[0..]);
- // No key plus default parameters
- d.h[0] ^= 0x01010000 ^ (out_len >> 3);
- d.t = 0;
- d.buf_len = 0;
- }
-
- pub fn hash(b: []const u8, out: []u8) void {
- var d = Self.init();
- d.update(b);
- d.final(out);
- }
-
- pub fn update(d: &Self, b: []const u8) void {
- var off: usize = 0;
-
- // Partial buffer exists from previous update. Copy into buffer then hash.
- if (d.buf_len != 0 and d.buf_len + b.len > 128) {
- off += 128 - d.buf_len;
- mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
- d.t += 128;
- d.round(d.buf[0..], false);
+ // No key plus default parameters
+ d.h[0] ^= 0x01010000 ^ (out_len >> 3);
+ d.t = 0;
d.buf_len = 0;
}
- // Full middle blocks.
- while (off + 128 <= b.len) : (off += 128) {
- d.t += 128;
- d.round(b[off..off + 128], false);
+ pub fn hash(b: []const u8, out: []u8) void {
+ var d = Self.init();
+ d.update(b);
+ d.final(out);
}
- // Copy any remainder for next pass.
- mem.copy(u8, d.buf[d.buf_len..], b[off..]);
- d.buf_len += u8(b[off..].len);
- }
+ pub fn update(d: *Self, b: []const u8) void {
+ var off: usize = 0;
- pub fn final(d: &Self, out: []u8) void {
- mem.set(u8, d.buf[d.buf_len..], 0);
- d.t += d.buf_len;
- d.round(d.buf[0..], true);
+ // Partial buffer exists from previous update. Copy into buffer then hash.
+ if (d.buf_len != 0 and d.buf_len + b.len > 128) {
+ off += 128 - d.buf_len;
+ mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
+ d.t += 128;
+ d.round(d.buf[0..], false);
+ d.buf_len = 0;
+ }
- const rr = d.h[0 .. out_len / 64];
+ // Full middle blocks.
+ while (off + 128 <= b.len) : (off += 128) {
+ d.t += 128;
+ d.round(b[off .. off + 128], false);
+ }
- for (rr) |s, j| {
- mem.writeInt(out[8*j .. 8*j + 8], s, builtin.Endian.Little);
- }
- }
-
- fn round(d: &Self, b: []const u8, last: bool) void {
- debug.assert(b.len == 128);
-
- var m: [16]u64 = undefined;
- var v: [16]u64 = undefined;
-
- for (m) |*r, i| {
- *r = mem.readIntLE(u64, b[8*i .. 8*i + 8]);
+ // Copy any remainder for next pass.
+ mem.copy(u8, d.buf[d.buf_len..], b[off..]);
+ d.buf_len += @intCast(u8, b[off..].len);
}
- var k: usize = 0;
- while (k < 8) : (k += 1) {
- v[k] = d.h[k];
- v[k+8] = iv[k];
- }
+ pub fn final(d: *Self, out: []u8) void {
+ mem.set(u8, d.buf[d.buf_len..], 0);
+ d.t += d.buf_len;
+ d.round(d.buf[0..], true);
- v[12] ^= @truncate(u64, d.t);
- v[13] ^= u64(d.t >> 64);
- if (last) v[14] = ~v[14];
+ const rr = d.h[0 .. out_len / 64];
- const rounds = comptime []RoundParam {
- Rp(0, 4, 8, 12, 0, 1),
- Rp(1, 5, 9, 13, 2, 3),
- Rp(2, 6, 10, 14, 4, 5),
- Rp(3, 7, 11, 15, 6, 7),
- Rp(0, 5, 10, 15, 8, 9),
- Rp(1, 6, 11, 12, 10, 11),
- Rp(2, 7, 8, 13, 12, 13),
- Rp(3, 4, 9, 14, 14, 15),
- };
-
- comptime var j: usize = 0;
- inline while (j < 12) : (j += 1) {
- inline for (rounds) |r| {
- v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.x]];
- v[r.d] = math.rotr(u64, v[r.d] ^ v[r.a], usize(32));
- v[r.c] = v[r.c] +% v[r.d];
- v[r.b] = math.rotr(u64, v[r.b] ^ v[r.c], usize(24));
- v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.y]];
- v[r.d] = math.rotr(u64, v[r.d] ^ v[r.a], usize(16));
- v[r.c] = v[r.c] +% v[r.d];
- v[r.b] = math.rotr(u64, v[r.b] ^ v[r.c], usize(63));
+ for (rr) |s, j| {
+ mem.writeInt(out[8 * j .. 8 * j + 8], s, builtin.Endian.Little);
}
}
- for (d.h) |*r, i| {
- *r ^= v[i] ^ v[i + 8];
+ fn round(d: *Self, b: []const u8, last: bool) void {
+ debug.assert(b.len == 128);
+
+ var m: [16]u64 = undefined;
+ var v: [16]u64 = undefined;
+
+ for (m) |*r, i| {
+ r.* = mem.readIntLE(u64, b[8 * i .. 8 * i + 8]);
+ }
+
+ var k: usize = 0;
+ while (k < 8) : (k += 1) {
+ v[k] = d.h[k];
+ v[k + 8] = iv[k];
+ }
+
+ v[12] ^= @truncate(u64, d.t);
+ v[13] ^= @intCast(u64, d.t >> 64);
+ if (last) v[14] = ~v[14];
+
+ const rounds = comptime []RoundParam{
+ Rp(0, 4, 8, 12, 0, 1),
+ Rp(1, 5, 9, 13, 2, 3),
+ Rp(2, 6, 10, 14, 4, 5),
+ Rp(3, 7, 11, 15, 6, 7),
+ Rp(0, 5, 10, 15, 8, 9),
+ Rp(1, 6, 11, 12, 10, 11),
+ Rp(2, 7, 8, 13, 12, 13),
+ Rp(3, 4, 9, 14, 14, 15),
+ };
+
+ comptime var j: usize = 0;
+ inline while (j < 12) : (j += 1) {
+ inline for (rounds) |r| {
+ v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.x]];
+ v[r.d] = math.rotr(u64, v[r.d] ^ v[r.a], usize(32));
+ v[r.c] = v[r.c] +% v[r.d];
+ v[r.b] = math.rotr(u64, v[r.b] ^ v[r.c], usize(24));
+ v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.y]];
+ v[r.d] = math.rotr(u64, v[r.d] ^ v[r.a], usize(16));
+ v[r.c] = v[r.c] +% v[r.d];
+ v[r.b] = math.rotr(u64, v[r.b] ^ v[r.c], usize(63));
+ }
+ }
+
+ for (d.h) |*r, i| {
+ r.* ^= v[i] ^ v[i + 8];
+ }
}
- }
-};}
+ };
+}
test "blake2b384 single" {
const h1 = "b32811423377f52d7862286ee1a72ee540524380fda1724a6f25d7978c6fd3244a6caf0498812673c5e05ef583825100";
@@ -458,7 +483,7 @@ test "blake2b512 streaming" {
}
test "blake2b512 aligned final" {
- var block = []u8 {0} ** Blake2b512.block_size;
+ var block = []u8{0} ** Blake2b512.block_size;
var out: [Blake2b512.digest_size]u8 = undefined;
var h = Blake2b512.init();
diff --git a/std/crypto/hmac.zig b/std/crypto/hmac.zig
index 2a36f15b71..1415e88cf4 100644
--- a/std/crypto/hmac.zig
+++ b/std/crypto/hmac.zig
@@ -29,12 +29,12 @@ pub fn Hmac(comptime H: type) type {
var o_key_pad: [H.block_size]u8 = undefined;
for (o_key_pad) |*b, i| {
- *b = scratch[i] ^ 0x5c;
+ b.* = scratch[i] ^ 0x5c;
}
var i_key_pad: [H.block_size]u8 = undefined;
for (i_key_pad) |*b, i| {
- *b = scratch[i] ^ 0x36;
+ b.* = scratch[i] ^ 0x36;
}
// HMAC(k, m) = H(o_key_pad | H(i_key_pad | message)) where | is concatenation
diff --git a/std/crypto/md5.zig b/std/crypto/md5.zig
index 705b2428a7..23fe2313a0 100644
--- a/std/crypto/md5.zig
+++ b/std/crypto/md5.zig
@@ -6,12 +6,25 @@ const debug = @import("../debug/index.zig");
const fmt = @import("../fmt/index.zig");
const RoundParam = struct {
- a: usize, b: usize, c: usize, d: usize,
- k: usize, s: u32, t: u32
+ a: usize,
+ b: usize,
+ c: usize,
+ d: usize,
+ k: usize,
+ s: u32,
+ t: u32,
};
fn Rp(a: usize, b: usize, c: usize, d: usize, k: usize, s: u32, t: u32) RoundParam {
- return RoundParam { .a = a, .b = b, .c = c, .d = d, .k = k, .s = s, .t = t };
+ return RoundParam{
+ .a = a,
+ .b = b,
+ .c = c,
+ .d = d,
+ .k = k,
+ .s = s,
+ .t = t,
+ };
}
pub const Md5 = struct {
@@ -31,7 +44,7 @@ pub const Md5 = struct {
return d;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
d.s[0] = 0x67452301;
d.s[1] = 0xEFCDAB89;
d.s[2] = 0x98BADCFE;
@@ -46,7 +59,7 @@ pub const Md5 = struct {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -60,18 +73,18 @@ pub const Md5 = struct {
// Full middle blocks.
while (off + 64 <= b.len) : (off += 64) {
- d.round(b[off..off + 64]);
+ d.round(b[off .. off + 64]);
}
// Copy any remainder for next pass.
mem.copy(u8, d.buf[d.buf_len..], b[off..]);
- d.buf_len += u8(b[off..].len);
+ d.buf_len += @intCast(u8, b[off..].len);
// Md5 uses the bottom 64-bits for length padding
d.total_len +%= b.len;
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= 16);
// The buffer here will never be completely full.
@@ -90,20 +103,20 @@ pub const Md5 = struct {
// Append message length.
var i: usize = 1;
var len = d.total_len >> 5;
- d.buf[56] = u8(d.total_len & 0x1f) << 3;
+ d.buf[56] = @intCast(u8, d.total_len & 0x1f) << 3;
while (i < 8) : (i += 1) {
- d.buf[56 + i] = u8(len & 0xff);
+ d.buf[56 + i] = @intCast(u8, len & 0xff);
len >>= 8;
}
d.round(d.buf[0..]);
for (d.s) |s, j| {
- mem.writeInt(out[4*j .. 4*j + 4], s, builtin.Endian.Little);
+ mem.writeInt(out[4 * j .. 4 * j + 4], s, builtin.Endian.Little);
}
}
- fn round(d: &Self, b: []const u8) void {
+ fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 64);
var s: [16]u32 = undefined;
@@ -112,30 +125,33 @@ pub const Md5 = struct {
while (i < 16) : (i += 1) {
// NOTE: Performing or's separately improves perf by ~10%
s[i] = 0;
- s[i] |= u32(b[i*4+0]);
- s[i] |= u32(b[i*4+1]) << 8;
- s[i] |= u32(b[i*4+2]) << 16;
- s[i] |= u32(b[i*4+3]) << 24;
+ s[i] |= u32(b[i * 4 + 0]);
+ s[i] |= u32(b[i * 4 + 1]) << 8;
+ s[i] |= u32(b[i * 4 + 2]) << 16;
+ s[i] |= u32(b[i * 4 + 3]) << 24;
}
- var v: [4]u32 = []u32 {
- d.s[0], d.s[1], d.s[2], d.s[3],
+ var v: [4]u32 = []u32{
+ d.s[0],
+ d.s[1],
+ d.s[2],
+ d.s[3],
};
- const round0 = comptime []RoundParam {
- Rp(0, 1, 2, 3, 0, 7, 0xD76AA478),
- Rp(3, 0, 1, 2, 1, 12, 0xE8C7B756),
- Rp(2, 3, 0, 1, 2, 17, 0x242070DB),
- Rp(1, 2, 3, 0, 3, 22, 0xC1BDCEEE),
- Rp(0, 1, 2, 3, 4, 7, 0xF57C0FAF),
- Rp(3, 0, 1, 2, 5, 12, 0x4787C62A),
- Rp(2, 3, 0, 1, 6, 17, 0xA8304613),
- Rp(1, 2, 3, 0, 7, 22, 0xFD469501),
- Rp(0, 1, 2, 3, 8, 7, 0x698098D8),
- Rp(3, 0, 1, 2, 9, 12, 0x8B44F7AF),
+ const round0 = comptime []RoundParam{
+ Rp(0, 1, 2, 3, 0, 7, 0xD76AA478),
+ Rp(3, 0, 1, 2, 1, 12, 0xE8C7B756),
+ Rp(2, 3, 0, 1, 2, 17, 0x242070DB),
+ Rp(1, 2, 3, 0, 3, 22, 0xC1BDCEEE),
+ Rp(0, 1, 2, 3, 4, 7, 0xF57C0FAF),
+ Rp(3, 0, 1, 2, 5, 12, 0x4787C62A),
+ Rp(2, 3, 0, 1, 6, 17, 0xA8304613),
+ Rp(1, 2, 3, 0, 7, 22, 0xFD469501),
+ Rp(0, 1, 2, 3, 8, 7, 0x698098D8),
+ Rp(3, 0, 1, 2, 9, 12, 0x8B44F7AF),
Rp(2, 3, 0, 1, 10, 17, 0xFFFF5BB1),
Rp(1, 2, 3, 0, 11, 22, 0x895CD7BE),
- Rp(0, 1, 2, 3, 12, 7, 0x6B901122),
+ Rp(0, 1, 2, 3, 12, 7, 0x6B901122),
Rp(3, 0, 1, 2, 13, 12, 0xFD987193),
Rp(2, 3, 0, 1, 14, 17, 0xA679438E),
Rp(1, 2, 3, 0, 15, 22, 0x49B40821),
@@ -145,22 +161,22 @@ pub const Md5 = struct {
v[r.a] = v[r.b] +% math.rotl(u32, v[r.a], r.s);
}
- const round1 = comptime []RoundParam {
- Rp(0, 1, 2, 3, 1, 5, 0xF61E2562),
- Rp(3, 0, 1, 2, 6, 9, 0xC040B340),
+ const round1 = comptime []RoundParam{
+ Rp(0, 1, 2, 3, 1, 5, 0xF61E2562),
+ Rp(3, 0, 1, 2, 6, 9, 0xC040B340),
Rp(2, 3, 0, 1, 11, 14, 0x265E5A51),
- Rp(1, 2, 3, 0, 0, 20, 0xE9B6C7AA),
- Rp(0, 1, 2, 3, 5, 5, 0xD62F105D),
- Rp(3, 0, 1, 2, 10, 9, 0x02441453),
+ Rp(1, 2, 3, 0, 0, 20, 0xE9B6C7AA),
+ Rp(0, 1, 2, 3, 5, 5, 0xD62F105D),
+ Rp(3, 0, 1, 2, 10, 9, 0x02441453),
Rp(2, 3, 0, 1, 15, 14, 0xD8A1E681),
- Rp(1, 2, 3, 0, 4, 20, 0xE7D3FBC8),
- Rp(0, 1, 2, 3, 9, 5, 0x21E1CDE6),
- Rp(3, 0, 1, 2, 14, 9, 0xC33707D6),
- Rp(2, 3, 0, 1, 3, 14, 0xF4D50D87),
- Rp(1, 2, 3, 0, 8, 20, 0x455A14ED),
- Rp(0, 1, 2, 3, 13, 5, 0xA9E3E905),
- Rp(3, 0, 1, 2, 2, 9, 0xFCEFA3F8),
- Rp(2, 3, 0, 1, 7, 14, 0x676F02D9),
+ Rp(1, 2, 3, 0, 4, 20, 0xE7D3FBC8),
+ Rp(0, 1, 2, 3, 9, 5, 0x21E1CDE6),
+ Rp(3, 0, 1, 2, 14, 9, 0xC33707D6),
+ Rp(2, 3, 0, 1, 3, 14, 0xF4D50D87),
+ Rp(1, 2, 3, 0, 8, 20, 0x455A14ED),
+ Rp(0, 1, 2, 3, 13, 5, 0xA9E3E905),
+ Rp(3, 0, 1, 2, 2, 9, 0xFCEFA3F8),
+ Rp(2, 3, 0, 1, 7, 14, 0x676F02D9),
Rp(1, 2, 3, 0, 12, 20, 0x8D2A4C8A),
};
inline for (round1) |r| {
@@ -168,46 +184,46 @@ pub const Md5 = struct {
v[r.a] = v[r.b] +% math.rotl(u32, v[r.a], r.s);
}
- const round2 = comptime []RoundParam {
- Rp(0, 1, 2, 3, 5, 4, 0xFFFA3942),
- Rp(3, 0, 1, 2, 8, 11, 0x8771F681),
+ const round2 = comptime []RoundParam{
+ Rp(0, 1, 2, 3, 5, 4, 0xFFFA3942),
+ Rp(3, 0, 1, 2, 8, 11, 0x8771F681),
Rp(2, 3, 0, 1, 11, 16, 0x6D9D6122),
Rp(1, 2, 3, 0, 14, 23, 0xFDE5380C),
- Rp(0, 1, 2, 3, 1, 4, 0xA4BEEA44),
- Rp(3, 0, 1, 2, 4, 11, 0x4BDECFA9),
- Rp(2, 3, 0, 1, 7, 16, 0xF6BB4B60),
+ Rp(0, 1, 2, 3, 1, 4, 0xA4BEEA44),
+ Rp(3, 0, 1, 2, 4, 11, 0x4BDECFA9),
+ Rp(2, 3, 0, 1, 7, 16, 0xF6BB4B60),
Rp(1, 2, 3, 0, 10, 23, 0xBEBFBC70),
- Rp(0, 1, 2, 3, 13, 4, 0x289B7EC6),
- Rp(3, 0, 1, 2, 0, 11, 0xEAA127FA),
- Rp(2, 3, 0, 1, 3, 16, 0xD4EF3085),
- Rp(1, 2, 3, 0, 6, 23, 0x04881D05),
- Rp(0, 1, 2, 3, 9, 4, 0xD9D4D039),
+ Rp(0, 1, 2, 3, 13, 4, 0x289B7EC6),
+ Rp(3, 0, 1, 2, 0, 11, 0xEAA127FA),
+ Rp(2, 3, 0, 1, 3, 16, 0xD4EF3085),
+ Rp(1, 2, 3, 0, 6, 23, 0x04881D05),
+ Rp(0, 1, 2, 3, 9, 4, 0xD9D4D039),
Rp(3, 0, 1, 2, 12, 11, 0xE6DB99E5),
Rp(2, 3, 0, 1, 15, 16, 0x1FA27CF8),
- Rp(1, 2, 3, 0, 2, 23, 0xC4AC5665),
+ Rp(1, 2, 3, 0, 2, 23, 0xC4AC5665),
};
inline for (round2) |r| {
v[r.a] = v[r.a] +% (v[r.b] ^ v[r.c] ^ v[r.d]) +% r.t +% s[r.k];
v[r.a] = v[r.b] +% math.rotl(u32, v[r.a], r.s);
}
- const round3 = comptime []RoundParam {
- Rp(0, 1, 2, 3, 0, 6, 0xF4292244),
- Rp(3, 0, 1, 2, 7, 10, 0x432AFF97),
+ const round3 = comptime []RoundParam{
+ Rp(0, 1, 2, 3, 0, 6, 0xF4292244),
+ Rp(3, 0, 1, 2, 7, 10, 0x432AFF97),
Rp(2, 3, 0, 1, 14, 15, 0xAB9423A7),
- Rp(1, 2, 3, 0, 5, 21, 0xFC93A039),
- Rp(0, 1, 2, 3, 12, 6, 0x655B59C3),
- Rp(3, 0, 1, 2, 3, 10, 0x8F0CCC92),
+ Rp(1, 2, 3, 0, 5, 21, 0xFC93A039),
+ Rp(0, 1, 2, 3, 12, 6, 0x655B59C3),
+ Rp(3, 0, 1, 2, 3, 10, 0x8F0CCC92),
Rp(2, 3, 0, 1, 10, 15, 0xFFEFF47D),
- Rp(1, 2, 3, 0, 1, 21, 0x85845DD1),
- Rp(0, 1, 2, 3, 8, 6, 0x6FA87E4F),
+ Rp(1, 2, 3, 0, 1, 21, 0x85845DD1),
+ Rp(0, 1, 2, 3, 8, 6, 0x6FA87E4F),
Rp(3, 0, 1, 2, 15, 10, 0xFE2CE6E0),
- Rp(2, 3, 0, 1, 6, 15, 0xA3014314),
+ Rp(2, 3, 0, 1, 6, 15, 0xA3014314),
Rp(1, 2, 3, 0, 13, 21, 0x4E0811A1),
- Rp(0, 1, 2, 3, 4, 6, 0xF7537E82),
+ Rp(0, 1, 2, 3, 4, 6, 0xF7537E82),
Rp(3, 0, 1, 2, 11, 10, 0xBD3AF235),
- Rp(2, 3, 0, 1, 2, 15, 0x2AD7D2BB),
- Rp(1, 2, 3, 0, 9, 21, 0xEB86D391),
+ Rp(2, 3, 0, 1, 2, 15, 0x2AD7D2BB),
+ Rp(1, 2, 3, 0, 9, 21, 0xEB86D391),
};
inline for (round3) |r| {
v[r.a] = v[r.a] +% (v[r.c] ^ (v[r.b] | ~v[r.d])) +% r.t +% s[r.k];
@@ -255,7 +271,7 @@ test "md5 streaming" {
}
test "md5 aligned final" {
- var block = []u8 {0} ** Md5.block_size;
+ var block = []u8{0} ** Md5.block_size;
var out: [Md5.digest_size]u8 = undefined;
var h = Md5.init();
diff --git a/std/crypto/sha1.zig b/std/crypto/sha1.zig
index 333597b12d..451cfb3122 100644
--- a/std/crypto/sha1.zig
+++ b/std/crypto/sha1.zig
@@ -4,14 +4,24 @@ const endian = @import("../endian.zig");
const debug = @import("../debug/index.zig");
const builtin = @import("builtin");
-pub const u160 = @IntType(false, 160);
-
const RoundParam = struct {
- a: usize, b: usize, c: usize, d: usize, e: usize, i: u32,
+ a: usize,
+ b: usize,
+ c: usize,
+ d: usize,
+ e: usize,
+ i: u32,
};
fn Rp(a: usize, b: usize, c: usize, d: usize, e: usize, i: u32) RoundParam {
- return RoundParam { .a = a, .b = b, .c = c, .d = d, .e = e, .i = i };
+ return RoundParam{
+ .a = a,
+ .b = b,
+ .c = c,
+ .d = d,
+ .e = e,
+ .i = i,
+ };
}
pub const Sha1 = struct {
@@ -31,7 +41,7 @@ pub const Sha1 = struct {
return d;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
d.s[0] = 0x67452301;
d.s[1] = 0xEFCDAB89;
d.s[2] = 0x98BADCFE;
@@ -47,7 +57,7 @@ pub const Sha1 = struct {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -61,17 +71,17 @@ pub const Sha1 = struct {
// Full middle blocks.
while (off + 64 <= b.len) : (off += 64) {
- d.round(b[off..off + 64]);
+ d.round(b[off .. off + 64]);
}
// Copy any remainder for next pass.
mem.copy(u8, d.buf[d.buf_len..], b[off..]);
- d.buf_len += u8(b[off..].len);
+ d.buf_len += @intCast(u8, b[off..].len);
d.total_len += b.len;
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= 20);
// The buffer here will never be completely full.
@@ -90,39 +100,43 @@ pub const Sha1 = struct {
// Append message length.
var i: usize = 1;
var len = d.total_len >> 5;
- d.buf[63] = u8(d.total_len & 0x1f) << 3;
+ d.buf[63] = @intCast(u8, d.total_len & 0x1f) << 3;
while (i < 8) : (i += 1) {
- d.buf[63 - i] = u8(len & 0xff);
+ d.buf[63 - i] = @intCast(u8, len & 0xff);
len >>= 8;
}
d.round(d.buf[0..]);
for (d.s) |s, j| {
- mem.writeInt(out[4*j .. 4*j + 4], s, builtin.Endian.Big);
+ mem.writeInt(out[4 * j .. 4 * j + 4], s, builtin.Endian.Big);
}
}
- fn round(d: &Self, b: []const u8) void {
+ fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 64);
var s: [16]u32 = undefined;
- var v: [5]u32 = []u32 {
- d.s[0], d.s[1], d.s[2], d.s[3], d.s[4],
+ var v: [5]u32 = []u32{
+ d.s[0],
+ d.s[1],
+ d.s[2],
+ d.s[3],
+ d.s[4],
};
- const round0a = comptime []RoundParam {
- Rp(0, 1, 2, 3, 4, 0),
- Rp(4, 0, 1, 2, 3, 1),
- Rp(3, 4, 0, 1, 2, 2),
- Rp(2, 3, 4, 0, 1, 3),
- Rp(1, 2, 3, 4, 0, 4),
- Rp(0, 1, 2, 3, 4, 5),
- Rp(4, 0, 1, 2, 3, 6),
- Rp(3, 4, 0, 1, 2, 7),
- Rp(2, 3, 4, 0, 1, 8),
- Rp(1, 2, 3, 4, 0, 9),
+ const round0a = comptime []RoundParam{
+ Rp(0, 1, 2, 3, 4, 0),
+ Rp(4, 0, 1, 2, 3, 1),
+ Rp(3, 4, 0, 1, 2, 2),
+ Rp(2, 3, 4, 0, 1, 3),
+ Rp(1, 2, 3, 4, 0, 4),
+ Rp(0, 1, 2, 3, 4, 5),
+ Rp(4, 0, 1, 2, 3, 6),
+ Rp(3, 4, 0, 1, 2, 7),
+ Rp(2, 3, 4, 0, 1, 8),
+ Rp(1, 2, 3, 4, 0, 9),
Rp(0, 1, 2, 3, 4, 10),
Rp(4, 0, 1, 2, 3, 11),
Rp(3, 4, 0, 1, 2, 12),
@@ -131,32 +145,27 @@ pub const Sha1 = struct {
Rp(0, 1, 2, 3, 4, 15),
};
inline for (round0a) |r| {
- s[r.i] = (u32(b[r.i * 4 + 0]) << 24) |
- (u32(b[r.i * 4 + 1]) << 16) |
- (u32(b[r.i * 4 + 2]) << 8) |
- (u32(b[r.i * 4 + 3]) << 0);
+ s[r.i] = (u32(b[r.i * 4 + 0]) << 24) | (u32(b[r.i * 4 + 1]) << 16) | (u32(b[r.i * 4 + 2]) << 8) | (u32(b[r.i * 4 + 3]) << 0);
- v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x5A827999 +% s[r.i & 0xf]
- +% ((v[r.b] & v[r.c]) | (~v[r.b] & v[r.d]));
+ v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x5A827999 +% s[r.i & 0xf] +% ((v[r.b] & v[r.c]) | (~v[r.b] & v[r.d]));
v[r.b] = math.rotl(u32, v[r.b], u32(30));
}
- const round0b = comptime []RoundParam {
+ const round0b = comptime []RoundParam{
Rp(4, 0, 1, 2, 3, 16),
Rp(3, 4, 0, 1, 2, 17),
Rp(2, 3, 4, 0, 1, 18),
Rp(1, 2, 3, 4, 0, 19),
};
inline for (round0b) |r| {
- const t = s[(r.i-3) & 0xf] ^ s[(r.i-8) & 0xf] ^ s[(r.i-14) & 0xf] ^ s[(r.i-16) & 0xf];
+ const t = s[(r.i - 3) & 0xf] ^ s[(r.i - 8) & 0xf] ^ s[(r.i - 14) & 0xf] ^ s[(r.i - 16) & 0xf];
s[r.i & 0xf] = math.rotl(u32, t, u32(1));
- v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x5A827999 +% s[r.i & 0xf]
- +% ((v[r.b] & v[r.c]) | (~v[r.b] & v[r.d]));
+ v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x5A827999 +% s[r.i & 0xf] +% ((v[r.b] & v[r.c]) | (~v[r.b] & v[r.d]));
v[r.b] = math.rotl(u32, v[r.b], u32(30));
}
- const round1 = comptime []RoundParam {
+ const round1 = comptime []RoundParam{
Rp(0, 1, 2, 3, 4, 20),
Rp(4, 0, 1, 2, 3, 21),
Rp(3, 4, 0, 1, 2, 22),
@@ -179,15 +188,14 @@ pub const Sha1 = struct {
Rp(1, 2, 3, 4, 0, 39),
};
inline for (round1) |r| {
- const t = s[(r.i-3) & 0xf] ^ s[(r.i-8) & 0xf] ^ s[(r.i-14) & 0xf] ^ s[(r.i-16) & 0xf];
+ const t = s[(r.i - 3) & 0xf] ^ s[(r.i - 8) & 0xf] ^ s[(r.i - 14) & 0xf] ^ s[(r.i - 16) & 0xf];
s[r.i & 0xf] = math.rotl(u32, t, u32(1));
- v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x6ED9EBA1 +% s[r.i & 0xf]
- +% (v[r.b] ^ v[r.c] ^ v[r.d]);
+ v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x6ED9EBA1 +% s[r.i & 0xf] +% (v[r.b] ^ v[r.c] ^ v[r.d]);
v[r.b] = math.rotl(u32, v[r.b], u32(30));
}
- const round2 = comptime []RoundParam {
+ const round2 = comptime []RoundParam{
Rp(0, 1, 2, 3, 4, 40),
Rp(4, 0, 1, 2, 3, 41),
Rp(3, 4, 0, 1, 2, 42),
@@ -210,15 +218,14 @@ pub const Sha1 = struct {
Rp(1, 2, 3, 4, 0, 59),
};
inline for (round2) |r| {
- const t = s[(r.i-3) & 0xf] ^ s[(r.i-8) & 0xf] ^ s[(r.i-14) & 0xf] ^ s[(r.i-16) & 0xf];
+ const t = s[(r.i - 3) & 0xf] ^ s[(r.i - 8) & 0xf] ^ s[(r.i - 14) & 0xf] ^ s[(r.i - 16) & 0xf];
s[r.i & 0xf] = math.rotl(u32, t, u32(1));
- v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x8F1BBCDC +% s[r.i & 0xf]
- +% ((v[r.b] & v[r.c]) ^ (v[r.b] & v[r.d]) ^ (v[r.c] & v[r.d]));
+ v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x8F1BBCDC +% s[r.i & 0xf] +% ((v[r.b] & v[r.c]) ^ (v[r.b] & v[r.d]) ^ (v[r.c] & v[r.d]));
v[r.b] = math.rotl(u32, v[r.b], u32(30));
}
- const round3 = comptime []RoundParam {
+ const round3 = comptime []RoundParam{
Rp(0, 1, 2, 3, 4, 60),
Rp(4, 0, 1, 2, 3, 61),
Rp(3, 4, 0, 1, 2, 62),
@@ -241,11 +248,10 @@ pub const Sha1 = struct {
Rp(1, 2, 3, 4, 0, 79),
};
inline for (round3) |r| {
- const t = s[(r.i-3) & 0xf] ^ s[(r.i-8) & 0xf] ^ s[(r.i-14) & 0xf] ^ s[(r.i-16) & 0xf];
+ const t = s[(r.i - 3) & 0xf] ^ s[(r.i - 8) & 0xf] ^ s[(r.i - 14) & 0xf] ^ s[(r.i - 16) & 0xf];
s[r.i & 0xf] = math.rotl(u32, t, u32(1));
- v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0xCA62C1D6 +% s[r.i & 0xf]
- +% (v[r.b] ^ v[r.c] ^ v[r.d]);
+ v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0xCA62C1D6 +% s[r.i & 0xf] +% (v[r.b] ^ v[r.c] ^ v[r.d]);
v[r.b] = math.rotl(u32, v[r.b], u32(30));
}
@@ -286,7 +292,7 @@ test "sha1 streaming" {
}
test "sha1 aligned final" {
- var block = []u8 {0} ** Sha1.block_size;
+ var block = []u8{0} ** Sha1.block_size;
var out: [Sha1.digest_size]u8 = undefined;
var h = Sha1.init();
diff --git a/std/crypto/sha2.zig b/std/crypto/sha2.zig
index b70450c0ad..d1b915835c 100644
--- a/std/crypto/sha2.zig
+++ b/std/crypto/sha2.zig
@@ -9,12 +9,31 @@ const htest = @import("test.zig");
// Sha224 + Sha256
const RoundParam256 = struct {
- a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, g: usize, h: usize,
- i: usize, k: u32,
+ a: usize,
+ b: usize,
+ c: usize,
+ d: usize,
+ e: usize,
+ f: usize,
+ g: usize,
+ h: usize,
+ i: usize,
+ k: u32,
};
fn Rp256(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, g: usize, h: usize, i: usize, k: u32) RoundParam256 {
- return RoundParam256 { .a = a, .b = b, .c = c, .d = d, .e = e, .f = f, .g = g, .h = h, .i = i, .k = k };
+ return RoundParam256{
+ .a = a,
+ .b = b,
+ .c = c,
+ .d = d,
+ .e = e,
+ .f = f,
+ .g = g,
+ .h = h,
+ .i = i,
+ .k = k,
+ };
}
const Sha2Params32 = struct {
@@ -29,7 +48,7 @@ const Sha2Params32 = struct {
out_len: usize,
};
-const Sha224Params = Sha2Params32 {
+const Sha224Params = Sha2Params32{
.iv0 = 0xC1059ED8,
.iv1 = 0x367CD507,
.iv2 = 0x3070DD17,
@@ -41,7 +60,7 @@ const Sha224Params = Sha2Params32 {
.out_len = 224,
};
-const Sha256Params = Sha2Params32 {
+const Sha256Params = Sha2Params32{
.iv0 = 0x6A09E667,
.iv1 = 0xBB67AE85,
.iv2 = 0x3C6EF372,
@@ -56,216 +75,215 @@ const Sha256Params = Sha2Params32 {
pub const Sha224 = Sha2_32(Sha224Params);
pub const Sha256 = Sha2_32(Sha256Params);
-fn Sha2_32(comptime params: Sha2Params32) type { return struct {
- const Self = this;
- const block_size = 64;
- const digest_size = params.out_len / 8;
+fn Sha2_32(comptime params: Sha2Params32) type {
+ return struct {
+ const Self = this;
+ const block_size = 64;
+ const digest_size = params.out_len / 8;
- s: [8]u32,
- // Streaming Cache
- buf: [64]u8,
- buf_len: u8,
- total_len: u64,
+ s: [8]u32,
+ // Streaming Cache
+ buf: [64]u8,
+ buf_len: u8,
+ total_len: u64,
- pub fn init() Self {
- var d: Self = undefined;
- d.reset();
- return d;
- }
+ pub fn init() Self {
+ var d: Self = undefined;
+ d.reset();
+ return d;
+ }
- pub fn reset(d: &Self) void {
- d.s[0] = params.iv0;
- d.s[1] = params.iv1;
- d.s[2] = params.iv2;
- d.s[3] = params.iv3;
- d.s[4] = params.iv4;
- d.s[5] = params.iv5;
- d.s[6] = params.iv6;
- d.s[7] = params.iv7;
- d.buf_len = 0;
- d.total_len = 0;
- }
-
- pub fn hash(b: []const u8, out: []u8) void {
- var d = Self.init();
- d.update(b);
- d.final(out);
- }
-
- pub fn update(d: &Self, b: []const u8) void {
- var off: usize = 0;
-
- // Partial buffer exists from previous update. Copy into buffer then hash.
- if (d.buf_len != 0 and d.buf_len + b.len > 64) {
- off += 64 - d.buf_len;
- mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
-
- d.round(d.buf[0..]);
+ pub fn reset(d: *Self) void {
+ d.s[0] = params.iv0;
+ d.s[1] = params.iv1;
+ d.s[2] = params.iv2;
+ d.s[3] = params.iv3;
+ d.s[4] = params.iv4;
+ d.s[5] = params.iv5;
+ d.s[6] = params.iv6;
+ d.s[7] = params.iv7;
d.buf_len = 0;
+ d.total_len = 0;
}
- // Full middle blocks.
- while (off + 64 <= b.len) : (off += 64) {
- d.round(b[off..off + 64]);
+ pub fn hash(b: []const u8, out: []u8) void {
+ var d = Self.init();
+ d.update(b);
+ d.final(out);
}
- // Copy any remainder for next pass.
- mem.copy(u8, d.buf[d.buf_len..], b[off..]);
- d.buf_len += u8(b[off..].len);
+ pub fn update(d: *Self, b: []const u8) void {
+ var off: usize = 0;
- d.total_len += b.len;
- }
+ // Partial buffer exists from previous update. Copy into buffer then hash.
+ if (d.buf_len != 0 and d.buf_len + b.len > 64) {
+ off += 64 - d.buf_len;
+ mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
- pub fn final(d: &Self, out: []u8) void {
- debug.assert(out.len >= params.out_len / 8);
+ d.round(d.buf[0..]);
+ d.buf_len = 0;
+ }
- // The buffer here will never be completely full.
- mem.set(u8, d.buf[d.buf_len..], 0);
+ // Full middle blocks.
+ while (off + 64 <= b.len) : (off += 64) {
+ d.round(b[off .. off + 64]);
+ }
- // Append padding bits.
- d.buf[d.buf_len] = 0x80;
- d.buf_len += 1;
+ // Copy any remainder for next pass.
+ mem.copy(u8, d.buf[d.buf_len..], b[off..]);
+ d.buf_len += @intCast(u8, b[off..].len);
+
+ d.total_len += b.len;
+ }
+
+ pub fn final(d: *Self, out: []u8) void {
+ debug.assert(out.len >= params.out_len / 8);
+
+ // The buffer here will never be completely full.
+ mem.set(u8, d.buf[d.buf_len..], 0);
+
+ // Append padding bits.
+ d.buf[d.buf_len] = 0x80;
+ d.buf_len += 1;
+
+ // > 448 mod 512 so need to add an extra round to wrap around.
+ if (64 - d.buf_len < 8) {
+ d.round(d.buf[0..]);
+ mem.set(u8, d.buf[0..], 0);
+ }
+
+ // Append message length.
+ var i: usize = 1;
+ var len = d.total_len >> 5;
+ d.buf[63] = @intCast(u8, d.total_len & 0x1f) << 3;
+ while (i < 8) : (i += 1) {
+ d.buf[63 - i] = @intCast(u8, len & 0xff);
+ len >>= 8;
+ }
- // > 448 mod 512 so need to add an extra round to wrap around.
- if (64 - d.buf_len < 8) {
d.round(d.buf[0..]);
- mem.set(u8, d.buf[0..], 0);
+
+ // May truncate for possible 224 output
+ const rr = d.s[0 .. params.out_len / 32];
+
+ for (rr) |s, j| {
+ mem.writeInt(out[4 * j .. 4 * j + 4], s, builtin.Endian.Big);
+ }
}
- // Append message length.
- var i: usize = 1;
- var len = d.total_len >> 5;
- d.buf[63] = u8(d.total_len & 0x1f) << 3;
- while (i < 8) : (i += 1) {
- d.buf[63 - i] = u8(len & 0xff);
- len >>= 8;
+ fn round(d: *Self, b: []const u8) void {
+ debug.assert(b.len == 64);
+
+ var s: [64]u32 = undefined;
+
+ var i: usize = 0;
+ while (i < 16) : (i += 1) {
+ s[i] = 0;
+ s[i] |= u32(b[i * 4 + 0]) << 24;
+ s[i] |= u32(b[i * 4 + 1]) << 16;
+ s[i] |= u32(b[i * 4 + 2]) << 8;
+ s[i] |= u32(b[i * 4 + 3]) << 0;
+ }
+ while (i < 64) : (i += 1) {
+ s[i] = s[i - 16] +% s[i - 7] +% (math.rotr(u32, s[i - 15], u32(7)) ^ math.rotr(u32, s[i - 15], u32(18)) ^ (s[i - 15] >> 3)) +% (math.rotr(u32, s[i - 2], u32(17)) ^ math.rotr(u32, s[i - 2], u32(19)) ^ (s[i - 2] >> 10));
+ }
+
+ var v: [8]u32 = []u32{
+ d.s[0],
+ d.s[1],
+ d.s[2],
+ d.s[3],
+ d.s[4],
+ d.s[5],
+ d.s[6],
+ d.s[7],
+ };
+
+ const round0 = comptime []RoundParam256{
+ Rp256(0, 1, 2, 3, 4, 5, 6, 7, 0, 0x428A2F98),
+ Rp256(7, 0, 1, 2, 3, 4, 5, 6, 1, 0x71374491),
+ Rp256(6, 7, 0, 1, 2, 3, 4, 5, 2, 0xB5C0FBCF),
+ Rp256(5, 6, 7, 0, 1, 2, 3, 4, 3, 0xE9B5DBA5),
+ Rp256(4, 5, 6, 7, 0, 1, 2, 3, 4, 0x3956C25B),
+ Rp256(3, 4, 5, 6, 7, 0, 1, 2, 5, 0x59F111F1),
+ Rp256(2, 3, 4, 5, 6, 7, 0, 1, 6, 0x923F82A4),
+ Rp256(1, 2, 3, 4, 5, 6, 7, 0, 7, 0xAB1C5ED5),
+ Rp256(0, 1, 2, 3, 4, 5, 6, 7, 8, 0xD807AA98),
+ Rp256(7, 0, 1, 2, 3, 4, 5, 6, 9, 0x12835B01),
+ Rp256(6, 7, 0, 1, 2, 3, 4, 5, 10, 0x243185BE),
+ Rp256(5, 6, 7, 0, 1, 2, 3, 4, 11, 0x550C7DC3),
+ Rp256(4, 5, 6, 7, 0, 1, 2, 3, 12, 0x72BE5D74),
+ Rp256(3, 4, 5, 6, 7, 0, 1, 2, 13, 0x80DEB1FE),
+ Rp256(2, 3, 4, 5, 6, 7, 0, 1, 14, 0x9BDC06A7),
+ Rp256(1, 2, 3, 4, 5, 6, 7, 0, 15, 0xC19BF174),
+ Rp256(0, 1, 2, 3, 4, 5, 6, 7, 16, 0xE49B69C1),
+ Rp256(7, 0, 1, 2, 3, 4, 5, 6, 17, 0xEFBE4786),
+ Rp256(6, 7, 0, 1, 2, 3, 4, 5, 18, 0x0FC19DC6),
+ Rp256(5, 6, 7, 0, 1, 2, 3, 4, 19, 0x240CA1CC),
+ Rp256(4, 5, 6, 7, 0, 1, 2, 3, 20, 0x2DE92C6F),
+ Rp256(3, 4, 5, 6, 7, 0, 1, 2, 21, 0x4A7484AA),
+ Rp256(2, 3, 4, 5, 6, 7, 0, 1, 22, 0x5CB0A9DC),
+ Rp256(1, 2, 3, 4, 5, 6, 7, 0, 23, 0x76F988DA),
+ Rp256(0, 1, 2, 3, 4, 5, 6, 7, 24, 0x983E5152),
+ Rp256(7, 0, 1, 2, 3, 4, 5, 6, 25, 0xA831C66D),
+ Rp256(6, 7, 0, 1, 2, 3, 4, 5, 26, 0xB00327C8),
+ Rp256(5, 6, 7, 0, 1, 2, 3, 4, 27, 0xBF597FC7),
+ Rp256(4, 5, 6, 7, 0, 1, 2, 3, 28, 0xC6E00BF3),
+ Rp256(3, 4, 5, 6, 7, 0, 1, 2, 29, 0xD5A79147),
+ Rp256(2, 3, 4, 5, 6, 7, 0, 1, 30, 0x06CA6351),
+ Rp256(1, 2, 3, 4, 5, 6, 7, 0, 31, 0x14292967),
+ Rp256(0, 1, 2, 3, 4, 5, 6, 7, 32, 0x27B70A85),
+ Rp256(7, 0, 1, 2, 3, 4, 5, 6, 33, 0x2E1B2138),
+ Rp256(6, 7, 0, 1, 2, 3, 4, 5, 34, 0x4D2C6DFC),
+ Rp256(5, 6, 7, 0, 1, 2, 3, 4, 35, 0x53380D13),
+ Rp256(4, 5, 6, 7, 0, 1, 2, 3, 36, 0x650A7354),
+ Rp256(3, 4, 5, 6, 7, 0, 1, 2, 37, 0x766A0ABB),
+ Rp256(2, 3, 4, 5, 6, 7, 0, 1, 38, 0x81C2C92E),
+ Rp256(1, 2, 3, 4, 5, 6, 7, 0, 39, 0x92722C85),
+ Rp256(0, 1, 2, 3, 4, 5, 6, 7, 40, 0xA2BFE8A1),
+ Rp256(7, 0, 1, 2, 3, 4, 5, 6, 41, 0xA81A664B),
+ Rp256(6, 7, 0, 1, 2, 3, 4, 5, 42, 0xC24B8B70),
+ Rp256(5, 6, 7, 0, 1, 2, 3, 4, 43, 0xC76C51A3),
+ Rp256(4, 5, 6, 7, 0, 1, 2, 3, 44, 0xD192E819),
+ Rp256(3, 4, 5, 6, 7, 0, 1, 2, 45, 0xD6990624),
+ Rp256(2, 3, 4, 5, 6, 7, 0, 1, 46, 0xF40E3585),
+ Rp256(1, 2, 3, 4, 5, 6, 7, 0, 47, 0x106AA070),
+ Rp256(0, 1, 2, 3, 4, 5, 6, 7, 48, 0x19A4C116),
+ Rp256(7, 0, 1, 2, 3, 4, 5, 6, 49, 0x1E376C08),
+ Rp256(6, 7, 0, 1, 2, 3, 4, 5, 50, 0x2748774C),
+ Rp256(5, 6, 7, 0, 1, 2, 3, 4, 51, 0x34B0BCB5),
+ Rp256(4, 5, 6, 7, 0, 1, 2, 3, 52, 0x391C0CB3),
+ Rp256(3, 4, 5, 6, 7, 0, 1, 2, 53, 0x4ED8AA4A),
+ Rp256(2, 3, 4, 5, 6, 7, 0, 1, 54, 0x5B9CCA4F),
+ Rp256(1, 2, 3, 4, 5, 6, 7, 0, 55, 0x682E6FF3),
+ Rp256(0, 1, 2, 3, 4, 5, 6, 7, 56, 0x748F82EE),
+ Rp256(7, 0, 1, 2, 3, 4, 5, 6, 57, 0x78A5636F),
+ Rp256(6, 7, 0, 1, 2, 3, 4, 5, 58, 0x84C87814),
+ Rp256(5, 6, 7, 0, 1, 2, 3, 4, 59, 0x8CC70208),
+ Rp256(4, 5, 6, 7, 0, 1, 2, 3, 60, 0x90BEFFFA),
+ Rp256(3, 4, 5, 6, 7, 0, 1, 2, 61, 0xA4506CEB),
+ Rp256(2, 3, 4, 5, 6, 7, 0, 1, 62, 0xBEF9A3F7),
+ Rp256(1, 2, 3, 4, 5, 6, 7, 0, 63, 0xC67178F2),
+ };
+ inline for (round0) |r| {
+ v[r.h] = v[r.h] +% (math.rotr(u32, v[r.e], u32(6)) ^ math.rotr(u32, v[r.e], u32(11)) ^ math.rotr(u32, v[r.e], u32(25))) +% (v[r.g] ^ (v[r.e] & (v[r.f] ^ v[r.g]))) +% r.k +% s[r.i];
+
+ v[r.d] = v[r.d] +% v[r.h];
+
+ v[r.h] = v[r.h] +% (math.rotr(u32, v[r.a], u32(2)) ^ math.rotr(u32, v[r.a], u32(13)) ^ math.rotr(u32, v[r.a], u32(22))) +% ((v[r.a] & (v[r.b] | v[r.c])) | (v[r.b] & v[r.c]));
+ }
+
+ d.s[0] +%= v[0];
+ d.s[1] +%= v[1];
+ d.s[2] +%= v[2];
+ d.s[3] +%= v[3];
+ d.s[4] +%= v[4];
+ d.s[5] +%= v[5];
+ d.s[6] +%= v[6];
+ d.s[7] +%= v[7];
}
-
- d.round(d.buf[0..]);
-
- // May truncate for possible 224 output
- const rr = d.s[0 .. params.out_len / 32];
-
- for (rr) |s, j| {
- mem.writeInt(out[4*j .. 4*j + 4], s, builtin.Endian.Big);
- }
- }
-
- fn round(d: &Self, b: []const u8) void {
- debug.assert(b.len == 64);
-
- var s: [64]u32 = undefined;
-
- var i: usize = 0;
- while (i < 16) : (i += 1) {
- s[i] = 0;
- s[i] |= u32(b[i*4+0]) << 24;
- s[i] |= u32(b[i*4+1]) << 16;
- s[i] |= u32(b[i*4+2]) << 8;
- s[i] |= u32(b[i*4+3]) << 0;
- }
- while (i < 64) : (i += 1) {
- s[i] =
- s[i-16] +% s[i-7] +%
- (math.rotr(u32, s[i-15], u32(7)) ^ math.rotr(u32, s[i-15], u32(18)) ^ (s[i-15] >> 3)) +%
- (math.rotr(u32, s[i-2], u32(17)) ^ math.rotr(u32, s[i-2], u32(19)) ^ (s[i-2] >> 10));
- }
-
- var v: [8]u32 = []u32 {
- d.s[0], d.s[1], d.s[2], d.s[3], d.s[4], d.s[5], d.s[6], d.s[7],
- };
-
- const round0 = comptime []RoundParam256 {
- Rp256(0, 1, 2, 3, 4, 5, 6, 7, 0, 0x428A2F98),
- Rp256(7, 0, 1, 2, 3, 4, 5, 6, 1, 0x71374491),
- Rp256(6, 7, 0, 1, 2, 3, 4, 5, 2, 0xB5C0FBCF),
- Rp256(5, 6, 7, 0, 1, 2, 3, 4, 3, 0xE9B5DBA5),
- Rp256(4, 5, 6, 7, 0, 1, 2, 3, 4, 0x3956C25B),
- Rp256(3, 4, 5, 6, 7, 0, 1, 2, 5, 0x59F111F1),
- Rp256(2, 3, 4, 5, 6, 7, 0, 1, 6, 0x923F82A4),
- Rp256(1, 2, 3, 4, 5, 6, 7, 0, 7, 0xAB1C5ED5),
- Rp256(0, 1, 2, 3, 4, 5, 6, 7, 8, 0xD807AA98),
- Rp256(7, 0, 1, 2, 3, 4, 5, 6, 9, 0x12835B01),
- Rp256(6, 7, 0, 1, 2, 3, 4, 5, 10, 0x243185BE),
- Rp256(5, 6, 7, 0, 1, 2, 3, 4, 11, 0x550C7DC3),
- Rp256(4, 5, 6, 7, 0, 1, 2, 3, 12, 0x72BE5D74),
- Rp256(3, 4, 5, 6, 7, 0, 1, 2, 13, 0x80DEB1FE),
- Rp256(2, 3, 4, 5, 6, 7, 0, 1, 14, 0x9BDC06A7),
- Rp256(1, 2, 3, 4, 5, 6, 7, 0, 15, 0xC19BF174),
- Rp256(0, 1, 2, 3, 4, 5, 6, 7, 16, 0xE49B69C1),
- Rp256(7, 0, 1, 2, 3, 4, 5, 6, 17, 0xEFBE4786),
- Rp256(6, 7, 0, 1, 2, 3, 4, 5, 18, 0x0FC19DC6),
- Rp256(5, 6, 7, 0, 1, 2, 3, 4, 19, 0x240CA1CC),
- Rp256(4, 5, 6, 7, 0, 1, 2, 3, 20, 0x2DE92C6F),
- Rp256(3, 4, 5, 6, 7, 0, 1, 2, 21, 0x4A7484AA),
- Rp256(2, 3, 4, 5, 6, 7, 0, 1, 22, 0x5CB0A9DC),
- Rp256(1, 2, 3, 4, 5, 6, 7, 0, 23, 0x76F988DA),
- Rp256(0, 1, 2, 3, 4, 5, 6, 7, 24, 0x983E5152),
- Rp256(7, 0, 1, 2, 3, 4, 5, 6, 25, 0xA831C66D),
- Rp256(6, 7, 0, 1, 2, 3, 4, 5, 26, 0xB00327C8),
- Rp256(5, 6, 7, 0, 1, 2, 3, 4, 27, 0xBF597FC7),
- Rp256(4, 5, 6, 7, 0, 1, 2, 3, 28, 0xC6E00BF3),
- Rp256(3, 4, 5, 6, 7, 0, 1, 2, 29, 0xD5A79147),
- Rp256(2, 3, 4, 5, 6, 7, 0, 1, 30, 0x06CA6351),
- Rp256(1, 2, 3, 4, 5, 6, 7, 0, 31, 0x14292967),
- Rp256(0, 1, 2, 3, 4, 5, 6, 7, 32, 0x27B70A85),
- Rp256(7, 0, 1, 2, 3, 4, 5, 6, 33, 0x2E1B2138),
- Rp256(6, 7, 0, 1, 2, 3, 4, 5, 34, 0x4D2C6DFC),
- Rp256(5, 6, 7, 0, 1, 2, 3, 4, 35, 0x53380D13),
- Rp256(4, 5, 6, 7, 0, 1, 2, 3, 36, 0x650A7354),
- Rp256(3, 4, 5, 6, 7, 0, 1, 2, 37, 0x766A0ABB),
- Rp256(2, 3, 4, 5, 6, 7, 0, 1, 38, 0x81C2C92E),
- Rp256(1, 2, 3, 4, 5, 6, 7, 0, 39, 0x92722C85),
- Rp256(0, 1, 2, 3, 4, 5, 6, 7, 40, 0xA2BFE8A1),
- Rp256(7, 0, 1, 2, 3, 4, 5, 6, 41, 0xA81A664B),
- Rp256(6, 7, 0, 1, 2, 3, 4, 5, 42, 0xC24B8B70),
- Rp256(5, 6, 7, 0, 1, 2, 3, 4, 43, 0xC76C51A3),
- Rp256(4, 5, 6, 7, 0, 1, 2, 3, 44, 0xD192E819),
- Rp256(3, 4, 5, 6, 7, 0, 1, 2, 45, 0xD6990624),
- Rp256(2, 3, 4, 5, 6, 7, 0, 1, 46, 0xF40E3585),
- Rp256(1, 2, 3, 4, 5, 6, 7, 0, 47, 0x106AA070),
- Rp256(0, 1, 2, 3, 4, 5, 6, 7, 48, 0x19A4C116),
- Rp256(7, 0, 1, 2, 3, 4, 5, 6, 49, 0x1E376C08),
- Rp256(6, 7, 0, 1, 2, 3, 4, 5, 50, 0x2748774C),
- Rp256(5, 6, 7, 0, 1, 2, 3, 4, 51, 0x34B0BCB5),
- Rp256(4, 5, 6, 7, 0, 1, 2, 3, 52, 0x391C0CB3),
- Rp256(3, 4, 5, 6, 7, 0, 1, 2, 53, 0x4ED8AA4A),
- Rp256(2, 3, 4, 5, 6, 7, 0, 1, 54, 0x5B9CCA4F),
- Rp256(1, 2, 3, 4, 5, 6, 7, 0, 55, 0x682E6FF3),
- Rp256(0, 1, 2, 3, 4, 5, 6, 7, 56, 0x748F82EE),
- Rp256(7, 0, 1, 2, 3, 4, 5, 6, 57, 0x78A5636F),
- Rp256(6, 7, 0, 1, 2, 3, 4, 5, 58, 0x84C87814),
- Rp256(5, 6, 7, 0, 1, 2, 3, 4, 59, 0x8CC70208),
- Rp256(4, 5, 6, 7, 0, 1, 2, 3, 60, 0x90BEFFFA),
- Rp256(3, 4, 5, 6, 7, 0, 1, 2, 61, 0xA4506CEB),
- Rp256(2, 3, 4, 5, 6, 7, 0, 1, 62, 0xBEF9A3F7),
- Rp256(1, 2, 3, 4, 5, 6, 7, 0, 63, 0xC67178F2),
- };
- inline for (round0) |r| {
- v[r.h] =
- v[r.h] +%
- (math.rotr(u32, v[r.e], u32(6)) ^ math.rotr(u32, v[r.e], u32(11)) ^ math.rotr(u32, v[r.e], u32(25))) +%
- (v[r.g] ^ (v[r.e] & (v[r.f] ^ v[r.g]))) +%
- r.k +% s[r.i];
-
- v[r.d] = v[r.d] +% v[r.h];
-
- v[r.h] =
- v[r.h] +%
- (math.rotr(u32, v[r.a], u32(2)) ^ math.rotr(u32, v[r.a], u32(13)) ^ math.rotr(u32, v[r.a], u32(22))) +%
- ((v[r.a] & (v[r.b] | v[r.c])) | (v[r.b] & v[r.c]));
- }
-
- d.s[0] +%= v[0];
- d.s[1] +%= v[1];
- d.s[2] +%= v[2];
- d.s[3] +%= v[3];
- d.s[4] +%= v[4];
- d.s[5] +%= v[5];
- d.s[6] +%= v[6];
- d.s[7] +%= v[7];
- }
-};}
+ };
+}
test "sha224 single" {
htest.assertEqualHash(Sha224, "d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f", "");
@@ -320,7 +338,7 @@ test "sha256 streaming" {
}
test "sha256 aligned final" {
- var block = []u8 {0} ** Sha256.block_size;
+ var block = []u8{0} ** Sha256.block_size;
var out: [Sha256.digest_size]u8 = undefined;
var h = Sha256.init();
@@ -328,17 +346,35 @@ test "sha256 aligned final" {
h.final(out[0..]);
}
-
/////////////////////
// Sha384 + Sha512
const RoundParam512 = struct {
- a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, g: usize, h: usize,
- i: usize, k: u64,
+ a: usize,
+ b: usize,
+ c: usize,
+ d: usize,
+ e: usize,
+ f: usize,
+ g: usize,
+ h: usize,
+ i: usize,
+ k: u64,
};
fn Rp512(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, g: usize, h: usize, i: usize, k: u64) RoundParam512 {
- return RoundParam512 { .a = a, .b = b, .c = c, .d = d, .e = e, .f = f, .g = g, .h = h, .i = i, .k = k };
+ return RoundParam512{
+ .a = a,
+ .b = b,
+ .c = c,
+ .d = d,
+ .e = e,
+ .f = f,
+ .g = g,
+ .h = h,
+ .i = i,
+ .k = k,
+ };
}
const Sha2Params64 = struct {
@@ -353,7 +389,7 @@ const Sha2Params64 = struct {
out_len: usize,
};
-const Sha384Params = Sha2Params64 {
+const Sha384Params = Sha2Params64{
.iv0 = 0xCBBB9D5DC1059ED8,
.iv1 = 0x629A292A367CD507,
.iv2 = 0x9159015A3070DD17,
@@ -365,7 +401,7 @@ const Sha384Params = Sha2Params64 {
.out_len = 384,
};
-const Sha512Params = Sha2Params64 {
+const Sha512Params = Sha2Params64{
.iv0 = 0x6A09E667F3BCC908,
.iv1 = 0xBB67AE8584CAA73B,
.iv2 = 0x3C6EF372FE94F82B,
@@ -374,242 +410,241 @@ const Sha512Params = Sha2Params64 {
.iv5 = 0x9B05688C2B3E6C1F,
.iv6 = 0x1F83D9ABFB41BD6B,
.iv7 = 0x5BE0CD19137E2179,
- .out_len = 512
+ .out_len = 512,
};
pub const Sha384 = Sha2_64(Sha384Params);
pub const Sha512 = Sha2_64(Sha512Params);
-fn Sha2_64(comptime params: Sha2Params64) type { return struct {
- const Self = this;
- const block_size = 128;
- const digest_size = params.out_len / 8;
+fn Sha2_64(comptime params: Sha2Params64) type {
+ return struct {
+ const Self = this;
+ const block_size = 128;
+ const digest_size = params.out_len / 8;
- s: [8]u64,
- // Streaming Cache
- buf: [128]u8,
- buf_len: u8,
- total_len: u128,
+ s: [8]u64,
+ // Streaming Cache
+ buf: [128]u8,
+ buf_len: u8,
+ total_len: u128,
- pub fn init() Self {
- var d: Self = undefined;
- d.reset();
- return d;
- }
+ pub fn init() Self {
+ var d: Self = undefined;
+ d.reset();
+ return d;
+ }
- pub fn reset(d: &Self) void {
- d.s[0] = params.iv0;
- d.s[1] = params.iv1;
- d.s[2] = params.iv2;
- d.s[3] = params.iv3;
- d.s[4] = params.iv4;
- d.s[5] = params.iv5;
- d.s[6] = params.iv6;
- d.s[7] = params.iv7;
- d.buf_len = 0;
- d.total_len = 0;
- }
-
- pub fn hash(b: []const u8, out: []u8) void {
- var d = Self.init();
- d.update(b);
- d.final(out);
- }
-
- pub fn update(d: &Self, b: []const u8) void {
- var off: usize = 0;
-
- // Partial buffer exists from previous update. Copy into buffer then hash.
- if (d.buf_len != 0 and d.buf_len + b.len > 128) {
- off += 128 - d.buf_len;
- mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
-
- d.round(d.buf[0..]);
+ pub fn reset(d: *Self) void {
+ d.s[0] = params.iv0;
+ d.s[1] = params.iv1;
+ d.s[2] = params.iv2;
+ d.s[3] = params.iv3;
+ d.s[4] = params.iv4;
+ d.s[5] = params.iv5;
+ d.s[6] = params.iv6;
+ d.s[7] = params.iv7;
d.buf_len = 0;
+ d.total_len = 0;
}
- // Full middle blocks.
- while (off + 128 <= b.len) : (off += 128) {
- d.round(b[off..off + 128]);
+ pub fn hash(b: []const u8, out: []u8) void {
+ var d = Self.init();
+ d.update(b);
+ d.final(out);
}
- // Copy any remainder for next pass.
- mem.copy(u8, d.buf[d.buf_len..], b[off..]);
- d.buf_len += u8(b[off..].len);
+ pub fn update(d: *Self, b: []const u8) void {
+ var off: usize = 0;
- d.total_len += b.len;
- }
+ // Partial buffer exists from previous update. Copy into buffer then hash.
+ if (d.buf_len != 0 and d.buf_len + b.len > 128) {
+ off += 128 - d.buf_len;
+ mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
- pub fn final(d: &Self, out: []u8) void {
- debug.assert(out.len >= params.out_len / 8);
+ d.round(d.buf[0..]);
+ d.buf_len = 0;
+ }
- // The buffer here will never be completely full.
- mem.set(u8, d.buf[d.buf_len..], 0);
+ // Full middle blocks.
+ while (off + 128 <= b.len) : (off += 128) {
+ d.round(b[off .. off + 128]);
+ }
- // Append padding bits.
- d.buf[d.buf_len] = 0x80;
- d.buf_len += 1;
+ // Copy any remainder for next pass.
+ mem.copy(u8, d.buf[d.buf_len..], b[off..]);
+ d.buf_len += @intCast(u8, b[off..].len);
+
+ d.total_len += b.len;
+ }
+
+ pub fn final(d: *Self, out: []u8) void {
+ debug.assert(out.len >= params.out_len / 8);
+
+ // The buffer here will never be completely full.
+ mem.set(u8, d.buf[d.buf_len..], 0);
+
+ // Append padding bits.
+ d.buf[d.buf_len] = 0x80;
+ d.buf_len += 1;
+
+ // > 896 mod 1024 so need to add an extra round to wrap around.
+ if (128 - d.buf_len < 16) {
+ d.round(d.buf[0..]);
+ mem.set(u8, d.buf[0..], 0);
+ }
+
+ // Append message length.
+ var i: usize = 1;
+ var len = d.total_len >> 5;
+ d.buf[127] = @intCast(u8, d.total_len & 0x1f) << 3;
+ while (i < 16) : (i += 1) {
+ d.buf[127 - i] = @intCast(u8, len & 0xff);
+ len >>= 8;
+ }
- // > 896 mod 1024 so need to add an extra round to wrap around.
- if (128 - d.buf_len < 16) {
d.round(d.buf[0..]);
- mem.set(u8, d.buf[0..], 0);
+
+ // May truncate for possible 384 output
+ const rr = d.s[0 .. params.out_len / 64];
+
+ for (rr) |s, j| {
+ mem.writeInt(out[8 * j .. 8 * j + 8], s, builtin.Endian.Big);
+ }
}
- // Append message length.
- var i: usize = 1;
- var len = d.total_len >> 5;
- d.buf[127] = u8(d.total_len & 0x1f) << 3;
- while (i < 16) : (i += 1) {
- d.buf[127 - i] = u8(len & 0xff);
- len >>= 8;
+ fn round(d: *Self, b: []const u8) void {
+ debug.assert(b.len == 128);
+
+ var s: [80]u64 = undefined;
+
+ var i: usize = 0;
+ while (i < 16) : (i += 1) {
+ s[i] = 0;
+ s[i] |= u64(b[i * 8 + 0]) << 56;
+ s[i] |= u64(b[i * 8 + 1]) << 48;
+ s[i] |= u64(b[i * 8 + 2]) << 40;
+ s[i] |= u64(b[i * 8 + 3]) << 32;
+ s[i] |= u64(b[i * 8 + 4]) << 24;
+ s[i] |= u64(b[i * 8 + 5]) << 16;
+ s[i] |= u64(b[i * 8 + 6]) << 8;
+ s[i] |= u64(b[i * 8 + 7]) << 0;
+ }
+ while (i < 80) : (i += 1) {
+ s[i] = s[i - 16] +% s[i - 7] +% (math.rotr(u64, s[i - 15], u64(1)) ^ math.rotr(u64, s[i - 15], u64(8)) ^ (s[i - 15] >> 7)) +% (math.rotr(u64, s[i - 2], u64(19)) ^ math.rotr(u64, s[i - 2], u64(61)) ^ (s[i - 2] >> 6));
+ }
+
+ var v: [8]u64 = []u64{
+ d.s[0],
+ d.s[1],
+ d.s[2],
+ d.s[3],
+ d.s[4],
+ d.s[5],
+ d.s[6],
+ d.s[7],
+ };
+
+ const round0 = comptime []RoundParam512{
+ Rp512(0, 1, 2, 3, 4, 5, 6, 7, 0, 0x428A2F98D728AE22),
+ Rp512(7, 0, 1, 2, 3, 4, 5, 6, 1, 0x7137449123EF65CD),
+ Rp512(6, 7, 0, 1, 2, 3, 4, 5, 2, 0xB5C0FBCFEC4D3B2F),
+ Rp512(5, 6, 7, 0, 1, 2, 3, 4, 3, 0xE9B5DBA58189DBBC),
+ Rp512(4, 5, 6, 7, 0, 1, 2, 3, 4, 0x3956C25BF348B538),
+ Rp512(3, 4, 5, 6, 7, 0, 1, 2, 5, 0x59F111F1B605D019),
+ Rp512(2, 3, 4, 5, 6, 7, 0, 1, 6, 0x923F82A4AF194F9B),
+ Rp512(1, 2, 3, 4, 5, 6, 7, 0, 7, 0xAB1C5ED5DA6D8118),
+ Rp512(0, 1, 2, 3, 4, 5, 6, 7, 8, 0xD807AA98A3030242),
+ Rp512(7, 0, 1, 2, 3, 4, 5, 6, 9, 0x12835B0145706FBE),
+ Rp512(6, 7, 0, 1, 2, 3, 4, 5, 10, 0x243185BE4EE4B28C),
+ Rp512(5, 6, 7, 0, 1, 2, 3, 4, 11, 0x550C7DC3D5FFB4E2),
+ Rp512(4, 5, 6, 7, 0, 1, 2, 3, 12, 0x72BE5D74F27B896F),
+ Rp512(3, 4, 5, 6, 7, 0, 1, 2, 13, 0x80DEB1FE3B1696B1),
+ Rp512(2, 3, 4, 5, 6, 7, 0, 1, 14, 0x9BDC06A725C71235),
+ Rp512(1, 2, 3, 4, 5, 6, 7, 0, 15, 0xC19BF174CF692694),
+ Rp512(0, 1, 2, 3, 4, 5, 6, 7, 16, 0xE49B69C19EF14AD2),
+ Rp512(7, 0, 1, 2, 3, 4, 5, 6, 17, 0xEFBE4786384F25E3),
+ Rp512(6, 7, 0, 1, 2, 3, 4, 5, 18, 0x0FC19DC68B8CD5B5),
+ Rp512(5, 6, 7, 0, 1, 2, 3, 4, 19, 0x240CA1CC77AC9C65),
+ Rp512(4, 5, 6, 7, 0, 1, 2, 3, 20, 0x2DE92C6F592B0275),
+ Rp512(3, 4, 5, 6, 7, 0, 1, 2, 21, 0x4A7484AA6EA6E483),
+ Rp512(2, 3, 4, 5, 6, 7, 0, 1, 22, 0x5CB0A9DCBD41FBD4),
+ Rp512(1, 2, 3, 4, 5, 6, 7, 0, 23, 0x76F988DA831153B5),
+ Rp512(0, 1, 2, 3, 4, 5, 6, 7, 24, 0x983E5152EE66DFAB),
+ Rp512(7, 0, 1, 2, 3, 4, 5, 6, 25, 0xA831C66D2DB43210),
+ Rp512(6, 7, 0, 1, 2, 3, 4, 5, 26, 0xB00327C898FB213F),
+ Rp512(5, 6, 7, 0, 1, 2, 3, 4, 27, 0xBF597FC7BEEF0EE4),
+ Rp512(4, 5, 6, 7, 0, 1, 2, 3, 28, 0xC6E00BF33DA88FC2),
+ Rp512(3, 4, 5, 6, 7, 0, 1, 2, 29, 0xD5A79147930AA725),
+ Rp512(2, 3, 4, 5, 6, 7, 0, 1, 30, 0x06CA6351E003826F),
+ Rp512(1, 2, 3, 4, 5, 6, 7, 0, 31, 0x142929670A0E6E70),
+ Rp512(0, 1, 2, 3, 4, 5, 6, 7, 32, 0x27B70A8546D22FFC),
+ Rp512(7, 0, 1, 2, 3, 4, 5, 6, 33, 0x2E1B21385C26C926),
+ Rp512(6, 7, 0, 1, 2, 3, 4, 5, 34, 0x4D2C6DFC5AC42AED),
+ Rp512(5, 6, 7, 0, 1, 2, 3, 4, 35, 0x53380D139D95B3DF),
+ Rp512(4, 5, 6, 7, 0, 1, 2, 3, 36, 0x650A73548BAF63DE),
+ Rp512(3, 4, 5, 6, 7, 0, 1, 2, 37, 0x766A0ABB3C77B2A8),
+ Rp512(2, 3, 4, 5, 6, 7, 0, 1, 38, 0x81C2C92E47EDAEE6),
+ Rp512(1, 2, 3, 4, 5, 6, 7, 0, 39, 0x92722C851482353B),
+ Rp512(0, 1, 2, 3, 4, 5, 6, 7, 40, 0xA2BFE8A14CF10364),
+ Rp512(7, 0, 1, 2, 3, 4, 5, 6, 41, 0xA81A664BBC423001),
+ Rp512(6, 7, 0, 1, 2, 3, 4, 5, 42, 0xC24B8B70D0F89791),
+ Rp512(5, 6, 7, 0, 1, 2, 3, 4, 43, 0xC76C51A30654BE30),
+ Rp512(4, 5, 6, 7, 0, 1, 2, 3, 44, 0xD192E819D6EF5218),
+ Rp512(3, 4, 5, 6, 7, 0, 1, 2, 45, 0xD69906245565A910),
+ Rp512(2, 3, 4, 5, 6, 7, 0, 1, 46, 0xF40E35855771202A),
+ Rp512(1, 2, 3, 4, 5, 6, 7, 0, 47, 0x106AA07032BBD1B8),
+ Rp512(0, 1, 2, 3, 4, 5, 6, 7, 48, 0x19A4C116B8D2D0C8),
+ Rp512(7, 0, 1, 2, 3, 4, 5, 6, 49, 0x1E376C085141AB53),
+ Rp512(6, 7, 0, 1, 2, 3, 4, 5, 50, 0x2748774CDF8EEB99),
+ Rp512(5, 6, 7, 0, 1, 2, 3, 4, 51, 0x34B0BCB5E19B48A8),
+ Rp512(4, 5, 6, 7, 0, 1, 2, 3, 52, 0x391C0CB3C5C95A63),
+ Rp512(3, 4, 5, 6, 7, 0, 1, 2, 53, 0x4ED8AA4AE3418ACB),
+ Rp512(2, 3, 4, 5, 6, 7, 0, 1, 54, 0x5B9CCA4F7763E373),
+ Rp512(1, 2, 3, 4, 5, 6, 7, 0, 55, 0x682E6FF3D6B2B8A3),
+ Rp512(0, 1, 2, 3, 4, 5, 6, 7, 56, 0x748F82EE5DEFB2FC),
+ Rp512(7, 0, 1, 2, 3, 4, 5, 6, 57, 0x78A5636F43172F60),
+ Rp512(6, 7, 0, 1, 2, 3, 4, 5, 58, 0x84C87814A1F0AB72),
+ Rp512(5, 6, 7, 0, 1, 2, 3, 4, 59, 0x8CC702081A6439EC),
+ Rp512(4, 5, 6, 7, 0, 1, 2, 3, 60, 0x90BEFFFA23631E28),
+ Rp512(3, 4, 5, 6, 7, 0, 1, 2, 61, 0xA4506CEBDE82BDE9),
+ Rp512(2, 3, 4, 5, 6, 7, 0, 1, 62, 0xBEF9A3F7B2C67915),
+ Rp512(1, 2, 3, 4, 5, 6, 7, 0, 63, 0xC67178F2E372532B),
+ Rp512(0, 1, 2, 3, 4, 5, 6, 7, 64, 0xCA273ECEEA26619C),
+ Rp512(7, 0, 1, 2, 3, 4, 5, 6, 65, 0xD186B8C721C0C207),
+ Rp512(6, 7, 0, 1, 2, 3, 4, 5, 66, 0xEADA7DD6CDE0EB1E),
+ Rp512(5, 6, 7, 0, 1, 2, 3, 4, 67, 0xF57D4F7FEE6ED178),
+ Rp512(4, 5, 6, 7, 0, 1, 2, 3, 68, 0x06F067AA72176FBA),
+ Rp512(3, 4, 5, 6, 7, 0, 1, 2, 69, 0x0A637DC5A2C898A6),
+ Rp512(2, 3, 4, 5, 6, 7, 0, 1, 70, 0x113F9804BEF90DAE),
+ Rp512(1, 2, 3, 4, 5, 6, 7, 0, 71, 0x1B710B35131C471B),
+ Rp512(0, 1, 2, 3, 4, 5, 6, 7, 72, 0x28DB77F523047D84),
+ Rp512(7, 0, 1, 2, 3, 4, 5, 6, 73, 0x32CAAB7B40C72493),
+ Rp512(6, 7, 0, 1, 2, 3, 4, 5, 74, 0x3C9EBE0A15C9BEBC),
+ Rp512(5, 6, 7, 0, 1, 2, 3, 4, 75, 0x431D67C49C100D4C),
+ Rp512(4, 5, 6, 7, 0, 1, 2, 3, 76, 0x4CC5D4BECB3E42B6),
+ Rp512(3, 4, 5, 6, 7, 0, 1, 2, 77, 0x597F299CFC657E2A),
+ Rp512(2, 3, 4, 5, 6, 7, 0, 1, 78, 0x5FCB6FAB3AD6FAEC),
+ Rp512(1, 2, 3, 4, 5, 6, 7, 0, 79, 0x6C44198C4A475817),
+ };
+ inline for (round0) |r| {
+ v[r.h] = v[r.h] +% (math.rotr(u64, v[r.e], u64(14)) ^ math.rotr(u64, v[r.e], u64(18)) ^ math.rotr(u64, v[r.e], u64(41))) +% (v[r.g] ^ (v[r.e] & (v[r.f] ^ v[r.g]))) +% r.k +% s[r.i];
+
+ v[r.d] = v[r.d] +% v[r.h];
+
+ v[r.h] = v[r.h] +% (math.rotr(u64, v[r.a], u64(28)) ^ math.rotr(u64, v[r.a], u64(34)) ^ math.rotr(u64, v[r.a], u64(39))) +% ((v[r.a] & (v[r.b] | v[r.c])) | (v[r.b] & v[r.c]));
+ }
+
+ d.s[0] +%= v[0];
+ d.s[1] +%= v[1];
+ d.s[2] +%= v[2];
+ d.s[3] +%= v[3];
+ d.s[4] +%= v[4];
+ d.s[5] +%= v[5];
+ d.s[6] +%= v[6];
+ d.s[7] +%= v[7];
}
-
- d.round(d.buf[0..]);
-
- // May truncate for possible 384 output
- const rr = d.s[0 .. params.out_len / 64];
-
- for (rr) |s, j| {
- mem.writeInt(out[8*j .. 8*j + 8], s, builtin.Endian.Big);
- }
- }
-
- fn round(d: &Self, b: []const u8) void {
- debug.assert(b.len == 128);
-
- var s: [80]u64 = undefined;
-
- var i: usize = 0;
- while (i < 16) : (i += 1) {
- s[i] = 0;
- s[i] |= u64(b[i*8+0]) << 56;
- s[i] |= u64(b[i*8+1]) << 48;
- s[i] |= u64(b[i*8+2]) << 40;
- s[i] |= u64(b[i*8+3]) << 32;
- s[i] |= u64(b[i*8+4]) << 24;
- s[i] |= u64(b[i*8+5]) << 16;
- s[i] |= u64(b[i*8+6]) << 8;
- s[i] |= u64(b[i*8+7]) << 0;
- }
- while (i < 80) : (i += 1) {
- s[i] =
- s[i-16] +% s[i-7] +%
- (math.rotr(u64, s[i-15], u64(1)) ^ math.rotr(u64, s[i-15], u64(8)) ^ (s[i-15] >> 7)) +%
- (math.rotr(u64, s[i-2], u64(19)) ^ math.rotr(u64, s[i-2], u64(61)) ^ (s[i-2] >> 6));
- }
-
- var v: [8]u64 = []u64 {
- d.s[0], d.s[1], d.s[2], d.s[3], d.s[4], d.s[5], d.s[6], d.s[7],
- };
-
- const round0 = comptime []RoundParam512 {
- Rp512(0, 1, 2, 3, 4, 5, 6, 7, 0, 0x428A2F98D728AE22),
- Rp512(7, 0, 1, 2, 3, 4, 5, 6, 1, 0x7137449123EF65CD),
- Rp512(6, 7, 0, 1, 2, 3, 4, 5, 2, 0xB5C0FBCFEC4D3B2F),
- Rp512(5, 6, 7, 0, 1, 2, 3, 4, 3, 0xE9B5DBA58189DBBC),
- Rp512(4, 5, 6, 7, 0, 1, 2, 3, 4, 0x3956C25BF348B538),
- Rp512(3, 4, 5, 6, 7, 0, 1, 2, 5, 0x59F111F1B605D019),
- Rp512(2, 3, 4, 5, 6, 7, 0, 1, 6, 0x923F82A4AF194F9B),
- Rp512(1, 2, 3, 4, 5, 6, 7, 0, 7, 0xAB1C5ED5DA6D8118),
- Rp512(0, 1, 2, 3, 4, 5, 6, 7, 8, 0xD807AA98A3030242),
- Rp512(7, 0, 1, 2, 3, 4, 5, 6, 9, 0x12835B0145706FBE),
- Rp512(6, 7, 0, 1, 2, 3, 4, 5, 10, 0x243185BE4EE4B28C),
- Rp512(5, 6, 7, 0, 1, 2, 3, 4, 11, 0x550C7DC3D5FFB4E2),
- Rp512(4, 5, 6, 7, 0, 1, 2, 3, 12, 0x72BE5D74F27B896F),
- Rp512(3, 4, 5, 6, 7, 0, 1, 2, 13, 0x80DEB1FE3B1696B1),
- Rp512(2, 3, 4, 5, 6, 7, 0, 1, 14, 0x9BDC06A725C71235),
- Rp512(1, 2, 3, 4, 5, 6, 7, 0, 15, 0xC19BF174CF692694),
- Rp512(0, 1, 2, 3, 4, 5, 6, 7, 16, 0xE49B69C19EF14AD2),
- Rp512(7, 0, 1, 2, 3, 4, 5, 6, 17, 0xEFBE4786384F25E3),
- Rp512(6, 7, 0, 1, 2, 3, 4, 5, 18, 0x0FC19DC68B8CD5B5),
- Rp512(5, 6, 7, 0, 1, 2, 3, 4, 19, 0x240CA1CC77AC9C65),
- Rp512(4, 5, 6, 7, 0, 1, 2, 3, 20, 0x2DE92C6F592B0275),
- Rp512(3, 4, 5, 6, 7, 0, 1, 2, 21, 0x4A7484AA6EA6E483),
- Rp512(2, 3, 4, 5, 6, 7, 0, 1, 22, 0x5CB0A9DCBD41FBD4),
- Rp512(1, 2, 3, 4, 5, 6, 7, 0, 23, 0x76F988DA831153B5),
- Rp512(0, 1, 2, 3, 4, 5, 6, 7, 24, 0x983E5152EE66DFAB),
- Rp512(7, 0, 1, 2, 3, 4, 5, 6, 25, 0xA831C66D2DB43210),
- Rp512(6, 7, 0, 1, 2, 3, 4, 5, 26, 0xB00327C898FB213F),
- Rp512(5, 6, 7, 0, 1, 2, 3, 4, 27, 0xBF597FC7BEEF0EE4),
- Rp512(4, 5, 6, 7, 0, 1, 2, 3, 28, 0xC6E00BF33DA88FC2),
- Rp512(3, 4, 5, 6, 7, 0, 1, 2, 29, 0xD5A79147930AA725),
- Rp512(2, 3, 4, 5, 6, 7, 0, 1, 30, 0x06CA6351E003826F),
- Rp512(1, 2, 3, 4, 5, 6, 7, 0, 31, 0x142929670A0E6E70),
- Rp512(0, 1, 2, 3, 4, 5, 6, 7, 32, 0x27B70A8546D22FFC),
- Rp512(7, 0, 1, 2, 3, 4, 5, 6, 33, 0x2E1B21385C26C926),
- Rp512(6, 7, 0, 1, 2, 3, 4, 5, 34, 0x4D2C6DFC5AC42AED),
- Rp512(5, 6, 7, 0, 1, 2, 3, 4, 35, 0x53380D139D95B3DF),
- Rp512(4, 5, 6, 7, 0, 1, 2, 3, 36, 0x650A73548BAF63DE),
- Rp512(3, 4, 5, 6, 7, 0, 1, 2, 37, 0x766A0ABB3C77B2A8),
- Rp512(2, 3, 4, 5, 6, 7, 0, 1, 38, 0x81C2C92E47EDAEE6),
- Rp512(1, 2, 3, 4, 5, 6, 7, 0, 39, 0x92722C851482353B),
- Rp512(0, 1, 2, 3, 4, 5, 6, 7, 40, 0xA2BFE8A14CF10364),
- Rp512(7, 0, 1, 2, 3, 4, 5, 6, 41, 0xA81A664BBC423001),
- Rp512(6, 7, 0, 1, 2, 3, 4, 5, 42, 0xC24B8B70D0F89791),
- Rp512(5, 6, 7, 0, 1, 2, 3, 4, 43, 0xC76C51A30654BE30),
- Rp512(4, 5, 6, 7, 0, 1, 2, 3, 44, 0xD192E819D6EF5218),
- Rp512(3, 4, 5, 6, 7, 0, 1, 2, 45, 0xD69906245565A910),
- Rp512(2, 3, 4, 5, 6, 7, 0, 1, 46, 0xF40E35855771202A),
- Rp512(1, 2, 3, 4, 5, 6, 7, 0, 47, 0x106AA07032BBD1B8),
- Rp512(0, 1, 2, 3, 4, 5, 6, 7, 48, 0x19A4C116B8D2D0C8),
- Rp512(7, 0, 1, 2, 3, 4, 5, 6, 49, 0x1E376C085141AB53),
- Rp512(6, 7, 0, 1, 2, 3, 4, 5, 50, 0x2748774CDF8EEB99),
- Rp512(5, 6, 7, 0, 1, 2, 3, 4, 51, 0x34B0BCB5E19B48A8),
- Rp512(4, 5, 6, 7, 0, 1, 2, 3, 52, 0x391C0CB3C5C95A63),
- Rp512(3, 4, 5, 6, 7, 0, 1, 2, 53, 0x4ED8AA4AE3418ACB),
- Rp512(2, 3, 4, 5, 6, 7, 0, 1, 54, 0x5B9CCA4F7763E373),
- Rp512(1, 2, 3, 4, 5, 6, 7, 0, 55, 0x682E6FF3D6B2B8A3),
- Rp512(0, 1, 2, 3, 4, 5, 6, 7, 56, 0x748F82EE5DEFB2FC),
- Rp512(7, 0, 1, 2, 3, 4, 5, 6, 57, 0x78A5636F43172F60),
- Rp512(6, 7, 0, 1, 2, 3, 4, 5, 58, 0x84C87814A1F0AB72),
- Rp512(5, 6, 7, 0, 1, 2, 3, 4, 59, 0x8CC702081A6439EC),
- Rp512(4, 5, 6, 7, 0, 1, 2, 3, 60, 0x90BEFFFA23631E28),
- Rp512(3, 4, 5, 6, 7, 0, 1, 2, 61, 0xA4506CEBDE82BDE9),
- Rp512(2, 3, 4, 5, 6, 7, 0, 1, 62, 0xBEF9A3F7B2C67915),
- Rp512(1, 2, 3, 4, 5, 6, 7, 0, 63, 0xC67178F2E372532B),
- Rp512(0, 1, 2, 3, 4, 5, 6, 7, 64, 0xCA273ECEEA26619C),
- Rp512(7, 0, 1, 2, 3, 4, 5, 6, 65, 0xD186B8C721C0C207),
- Rp512(6, 7, 0, 1, 2, 3, 4, 5, 66, 0xEADA7DD6CDE0EB1E),
- Rp512(5, 6, 7, 0, 1, 2, 3, 4, 67, 0xF57D4F7FEE6ED178),
- Rp512(4, 5, 6, 7, 0, 1, 2, 3, 68, 0x06F067AA72176FBA),
- Rp512(3, 4, 5, 6, 7, 0, 1, 2, 69, 0x0A637DC5A2C898A6),
- Rp512(2, 3, 4, 5, 6, 7, 0, 1, 70, 0x113F9804BEF90DAE),
- Rp512(1, 2, 3, 4, 5, 6, 7, 0, 71, 0x1B710B35131C471B),
- Rp512(0, 1, 2, 3, 4, 5, 6, 7, 72, 0x28DB77F523047D84),
- Rp512(7, 0, 1, 2, 3, 4, 5, 6, 73, 0x32CAAB7B40C72493),
- Rp512(6, 7, 0, 1, 2, 3, 4, 5, 74, 0x3C9EBE0A15C9BEBC),
- Rp512(5, 6, 7, 0, 1, 2, 3, 4, 75, 0x431D67C49C100D4C),
- Rp512(4, 5, 6, 7, 0, 1, 2, 3, 76, 0x4CC5D4BECB3E42B6),
- Rp512(3, 4, 5, 6, 7, 0, 1, 2, 77, 0x597F299CFC657E2A),
- Rp512(2, 3, 4, 5, 6, 7, 0, 1, 78, 0x5FCB6FAB3AD6FAEC),
- Rp512(1, 2, 3, 4, 5, 6, 7, 0, 79, 0x6C44198C4A475817),
- };
- inline for (round0) |r| {
- v[r.h] =
- v[r.h] +%
- (math.rotr(u64, v[r.e], u64(14)) ^ math.rotr(u64, v[r.e], u64(18)) ^ math.rotr(u64, v[r.e], u64(41))) +%
- (v[r.g] ^ (v[r.e] & (v[r.f] ^ v[r.g]))) +%
- r.k +% s[r.i];
-
- v[r.d] = v[r.d] +% v[r.h];
-
- v[r.h] =
- v[r.h] +%
- (math.rotr(u64, v[r.a], u64(28)) ^ math.rotr(u64, v[r.a], u64(34)) ^ math.rotr(u64, v[r.a], u64(39))) +%
- ((v[r.a] & (v[r.b] | v[r.c])) | (v[r.b] & v[r.c]));
- }
-
- d.s[0] +%= v[0];
- d.s[1] +%= v[1];
- d.s[2] +%= v[2];
- d.s[3] +%= v[3];
- d.s[4] +%= v[4];
- d.s[5] +%= v[5];
- d.s[6] +%= v[6];
- d.s[7] +%= v[7];
- }
-};}
+ };
+}
test "sha384 single" {
const h1 = "38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b";
@@ -680,7 +715,7 @@ test "sha512 streaming" {
}
test "sha512 aligned final" {
- var block = []u8 {0} ** Sha512.block_size;
+ var block = []u8{0} ** Sha512.block_size;
var out: [Sha512.digest_size]u8 = undefined;
var h = Sha512.init();
diff --git a/std/crypto/sha3.zig b/std/crypto/sha3.zig
index f92f56d68f..ae02d7a482 100644
--- a/std/crypto/sha3.zig
+++ b/std/crypto/sha3.zig
@@ -10,148 +10,228 @@ pub const Sha3_256 = Keccak(256, 0x06);
pub const Sha3_384 = Keccak(384, 0x06);
pub const Sha3_512 = Keccak(512, 0x06);
-fn Keccak(comptime bits: usize, comptime delim: u8) type { return struct {
- const Self = this;
- const block_size = 200;
- const digest_size = bits / 8;
+fn Keccak(comptime bits: usize, comptime delim: u8) type {
+ return struct {
+ const Self = this;
+ const block_size = 200;
+ const digest_size = bits / 8;
- s: [200]u8,
- offset: usize,
- rate: usize,
+ s: [200]u8,
+ offset: usize,
+ rate: usize,
- pub fn init() Self {
- var d: Self = undefined;
- d.reset();
- return d;
- }
+ pub fn init() Self {
+ var d: Self = undefined;
+ d.reset();
+ return d;
+ }
- pub fn reset(d: &Self) void {
- mem.set(u8, d.s[0..], 0);
- d.offset = 0;
- d.rate = 200 - (bits / 4);
- }
+ pub fn reset(d: *Self) void {
+ mem.set(u8, d.s[0..], 0);
+ d.offset = 0;
+ d.rate = 200 - (bits / 4);
+ }
- pub fn hash(b: []const u8, out: []u8) void {
- var d = Self.init();
- d.update(b);
- d.final(out);
- }
+ pub fn hash(b: []const u8, out: []u8) void {
+ var d = Self.init();
+ d.update(b);
+ d.final(out);
+ }
- pub fn update(d: &Self, b: []const u8) void {
- var ip: usize = 0;
- var len = b.len;
- var rate = d.rate - d.offset;
- var offset = d.offset;
+ pub fn update(d: *Self, b: []const u8) void {
+ var ip: usize = 0;
+ var len = b.len;
+ var rate = d.rate - d.offset;
+ var offset = d.offset;
- // absorb
- while (len >= rate) {
- for (d.s[offset .. offset + rate]) |*r, i|
- *r ^= b[ip..][i];
+ // absorb
+ while (len >= rate) {
+ for (d.s[offset .. offset + rate]) |*r, i|
+ r.* ^= b[ip..][i];
+
+ keccak_f(1600, d.s[0..]);
+
+ ip += rate;
+ len -= rate;
+ rate = d.rate;
+ offset = 0;
+ }
+
+ for (d.s[offset .. offset + len]) |*r, i|
+ r.* ^= b[ip..][i];
+
+ d.offset = offset + len;
+ }
+
+ pub fn final(d: *Self, out: []u8) void {
+ // padding
+ d.s[d.offset] ^= delim;
+ d.s[d.rate - 1] ^= 0x80;
keccak_f(1600, d.s[0..]);
- ip += rate;
- len -= rate;
- rate = d.rate;
- offset = 0;
+ // squeeze
+ var op: usize = 0;
+ var len: usize = bits / 8;
+
+ while (len >= d.rate) {
+ mem.copy(u8, out[op..], d.s[0..d.rate]);
+ keccak_f(1600, d.s[0..]);
+ op += d.rate;
+ len -= d.rate;
+ }
+
+ mem.copy(u8, out[op..], d.s[0..len]);
}
+ };
+}
- for (d.s[offset .. offset + len]) |*r, i|
- *r ^= b[ip..][i];
-
- d.offset = offset + len;
- }
-
- pub fn final(d: &Self, out: []u8) void {
- // padding
- d.s[d.offset] ^= delim;
- d.s[d.rate - 1] ^= 0x80;
-
- keccak_f(1600, d.s[0..]);
-
- // squeeze
- var op: usize = 0;
- var len: usize = bits / 8;
-
- while (len >= d.rate) {
- mem.copy(u8, out[op..], d.s[0..d.rate]);
- keccak_f(1600, d.s[0..]);
- op += d.rate;
- len -= d.rate;
- }
-
- mem.copy(u8, out[op..], d.s[0..len]);
- }
-};}
-
-const RC = []const u64 {
- 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, 0x8000000080008000,
- 0x000000000000808b, 0x0000000080000001, 0x8000000080008081, 0x8000000000008009,
- 0x000000000000008a, 0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
- 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, 0x8000000000008003,
- 0x8000000000008002, 0x8000000000000080, 0x000000000000800a, 0x800000008000000a,
- 0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008,
+const RC = []const u64{
+ 0x0000000000000001,
+ 0x0000000000008082,
+ 0x800000000000808a,
+ 0x8000000080008000,
+ 0x000000000000808b,
+ 0x0000000080000001,
+ 0x8000000080008081,
+ 0x8000000000008009,
+ 0x000000000000008a,
+ 0x0000000000000088,
+ 0x0000000080008009,
+ 0x000000008000000a,
+ 0x000000008000808b,
+ 0x800000000000008b,
+ 0x8000000000008089,
+ 0x8000000000008003,
+ 0x8000000000008002,
+ 0x8000000000000080,
+ 0x000000000000800a,
+ 0x800000008000000a,
+ 0x8000000080008081,
+ 0x8000000000008080,
+ 0x0000000080000001,
+ 0x8000000080008008,
};
-const ROTC = []const usize {
- 1, 3, 6, 10, 15, 21, 28, 36,
- 45, 55, 2, 14, 27, 41, 56, 8,
- 25, 43, 62, 18, 39, 61, 20, 44
+const ROTC = []const usize{
+ 1,
+ 3,
+ 6,
+ 10,
+ 15,
+ 21,
+ 28,
+ 36,
+ 45,
+ 55,
+ 2,
+ 14,
+ 27,
+ 41,
+ 56,
+ 8,
+ 25,
+ 43,
+ 62,
+ 18,
+ 39,
+ 61,
+ 20,
+ 44,
};
-const PIL = []const usize {
- 10, 7, 11, 17, 18, 3, 5, 16,
- 8, 21, 24, 4, 15, 23, 19, 13,
- 12, 2, 20, 14, 22, 9, 6, 1
+const PIL = []const usize{
+ 10,
+ 7,
+ 11,
+ 17,
+ 18,
+ 3,
+ 5,
+ 16,
+ 8,
+ 21,
+ 24,
+ 4,
+ 15,
+ 23,
+ 19,
+ 13,
+ 12,
+ 2,
+ 20,
+ 14,
+ 22,
+ 9,
+ 6,
+ 1,
};
-const M5 = []const usize {
- 0, 1, 2, 3, 4, 0, 1, 2, 3, 4
+const M5 = []const usize{
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
};
fn keccak_f(comptime F: usize, d: []u8) void {
debug.assert(d.len == F / 8);
const B = F / 25;
- const no_rounds = comptime x: { break :x 12 + 2 * math.log2(B); };
+ const no_rounds = comptime x: {
+ break :x 12 + 2 * math.log2(B);
+ };
- var s = []const u64 {0} ** 25;
- var t = []const u64 {0} ** 1;
- var c = []const u64 {0} ** 5;
+ var s = []const u64{0} ** 25;
+ var t = []const u64{0} ** 1;
+ var c = []const u64{0} ** 5;
for (s) |*r, i| {
- *r = mem.readIntLE(u64, d[8*i .. 8*i + 8]);
+ r.* = mem.readIntLE(u64, d[8 * i .. 8 * i + 8]);
}
comptime var x: usize = 0;
comptime var y: usize = 0;
for (RC[0..no_rounds]) |round| {
// theta
- x = 0; inline while (x < 5) : (x += 1) {
- c[x] = s[x] ^ s[x+5] ^ s[x+10] ^ s[x+15] ^ s[x+20];
+ x = 0;
+ inline while (x < 5) : (x += 1) {
+ c[x] = s[x] ^ s[x + 5] ^ s[x + 10] ^ s[x + 15] ^ s[x + 20];
}
- x = 0; inline while (x < 5) : (x += 1) {
- t[0] = c[M5[x+4]] ^ math.rotl(u64, c[M5[x+1]], usize(1));
- y = 0; inline while (y < 5) : (y += 1) {
- s[x + y*5] ^= t[0];
+ x = 0;
+ inline while (x < 5) : (x += 1) {
+ t[0] = c[M5[x + 4]] ^ math.rotl(u64, c[M5[x + 1]], usize(1));
+ y = 0;
+ inline while (y < 5) : (y += 1) {
+ s[x + y * 5] ^= t[0];
}
}
// rho+pi
t[0] = s[1];
- x = 0; inline while (x < 24) : (x += 1) {
+ x = 0;
+ inline while (x < 24) : (x += 1) {
c[0] = s[PIL[x]];
s[PIL[x]] = math.rotl(u64, t[0], ROTC[x]);
t[0] = c[0];
}
// chi
- y = 0; inline while (y < 5) : (y += 1) {
- x = 0; inline while (x < 5) : (x += 1) {
- c[x] = s[x + y*5];
+ y = 0;
+ inline while (y < 5) : (y += 1) {
+ x = 0;
+ inline while (x < 5) : (x += 1) {
+ c[x] = s[x + y * 5];
}
- x = 0; inline while (x < 5) : (x += 1) {
- s[x + y*5] = c[x] ^ (~c[M5[x+1]] & c[M5[x+2]]);
+ x = 0;
+ inline while (x < 5) : (x += 1) {
+ s[x + y * 5] = c[x] ^ (~c[M5[x + 1]] & c[M5[x + 2]]);
}
}
@@ -160,11 +240,10 @@ fn keccak_f(comptime F: usize, d: []u8) void {
}
for (s) |r, i| {
- mem.writeInt(d[8*i .. 8*i + 8], r, builtin.Endian.Little);
+ mem.writeInt(d[8 * i .. 8 * i + 8], r, builtin.Endian.Little);
}
}
-
test "sha3-224 single" {
htest.assertEqualHash(Sha3_224, "6b4e03423667dbb73b6e15454f0eb1abd4597f9a1b078e3f5b5a6bc7", "");
htest.assertEqualHash(Sha3_224, "e642824c3f8cf24ad09234ee7d3c766fc9a3a5168d0c94ad73b46fdf", "abc");
@@ -192,7 +271,7 @@ test "sha3-224 streaming" {
}
test "sha3-256 single" {
- htest.assertEqualHash(Sha3_256, "a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a" , "");
+ htest.assertEqualHash(Sha3_256, "a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a", "");
htest.assertEqualHash(Sha3_256, "3a985da74fe225b2045c172d6bd390bd855f086e3e9d525b46bfe24511431532", "abc");
htest.assertEqualHash(Sha3_256, "916f6061fe879741ca6469b43971dfdb28b1a32dc36cb3254e812be27aad1d18", "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
}
@@ -218,7 +297,7 @@ test "sha3-256 streaming" {
}
test "sha3-256 aligned final" {
- var block = []u8 {0} ** Sha3_256.block_size;
+ var block = []u8{0} ** Sha3_256.block_size;
var out: [Sha3_256.digest_size]u8 = undefined;
var h = Sha3_256.init();
@@ -228,7 +307,7 @@ test "sha3-256 aligned final" {
test "sha3-384 single" {
const h1 = "0c63a75b845e4f7d01107d852e4c2485c51a50aaaa94fc61995e71bbee983a2ac3713831264adb47fb6bd1e058d5f004";
- htest.assertEqualHash(Sha3_384, h1 , "");
+ htest.assertEqualHash(Sha3_384, h1, "");
const h2 = "ec01498288516fc926459f58e2c6ad8df9b473cb0fc08c2596da7cf0e49be4b298d88cea927ac7f539f1edf228376d25";
htest.assertEqualHash(Sha3_384, h2, "abc");
const h3 = "79407d3b5916b59c3e30b09822974791c313fb9ecc849e406f23592d04f625dc8c709b98b43b3852b337216179aa7fc7";
@@ -259,7 +338,7 @@ test "sha3-384 streaming" {
test "sha3-512 single" {
const h1 = "a69f73cca23a9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a615b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281dcd26";
- htest.assertEqualHash(Sha3_512, h1 , "");
+ htest.assertEqualHash(Sha3_512, h1, "");
const h2 = "b751850b1a57168a5693cd924b6b096e08f621827444f70d884f5d0240d2712e10e116e9192af3c91a7ec57647e3934057340b4cf408d5a56592f8274eec53f0";
htest.assertEqualHash(Sha3_512, h2, "abc");
const h3 = "afebb2ef542e6579c50cad06d2e578f9f8dd6881d7dc824d26360feebf18a4fa73e3261122948efcfd492e74e82e2189ed0fb440d187f382270cb455f21dd185";
@@ -289,7 +368,7 @@ test "sha3-512 streaming" {
}
test "sha3-512 aligned final" {
- var block = []u8 {0} ** Sha3_512.block_size;
+ var block = []u8{0} ** Sha3_512.block_size;
var out: [Sha3_512.digest_size]u8 = undefined;
var h = Sha3_512.init();
diff --git a/std/crypto/test.zig b/std/crypto/test.zig
index e41c6a7a2d..3fa24272e5 100644
--- a/std/crypto/test.zig
+++ b/std/crypto/test.zig
@@ -14,9 +14,8 @@ pub fn assertEqualHash(comptime Hasher: var, comptime expected: []const u8, inpu
pub fn assertEqual(comptime expected: []const u8, input: []const u8) void {
var expected_bytes: [expected.len / 2]u8 = undefined;
for (expected_bytes) |*r, i| {
- *r = fmt.parseInt(u8, expected[2*i .. 2*i+2], 16) catch unreachable;
+ r.* = fmt.parseInt(u8, expected[2 * i .. 2 * i + 2], 16) catch unreachable;
}
debug.assert(mem.eql(u8, expected_bytes, input));
}
-
diff --git a/std/crypto/throughput_test.zig b/std/crypto/throughput_test.zig
index 60610411b5..c21838e607 100644
--- a/std/crypto/throughput_test.zig
+++ b/std/crypto/throughput_test.zig
@@ -1,22 +1,17 @@
// Modify the HashFunction variable to the one wanted to test.
//
-// NOTE: The throughput measurement may be slightly lower than other measurements since we run
-// through our block alignment functions as well. Be aware when comparing against other tests.
-//
// ```
-// zig build-exe --release-fast --library c throughput_test.zig
+// zig build-exe --release-fast throughput_test.zig
// ./throughput_test
// ```
-const HashFunction = @import("md5.zig").Md5;
-const BytesToHash = 1024 * Mb;
const std = @import("std");
+const time = std.os.time;
+const Timer = time.Timer;
+const HashFunction = @import("md5.zig").Md5;
-const c = @cImport({
- @cInclude("time.h");
-});
-
-const Mb = 1024 * 1024;
+const MiB = 1024 * 1024;
+const BytesToHash = 1024 * MiB;
pub fn main() !void {
var stdout_file = try std.io.getStdOut();
@@ -29,15 +24,15 @@ pub fn main() !void {
var h = HashFunction.init();
var offset: usize = 0;
- const start = c.clock();
+ var timer = try Timer.start();
+ const start = timer.lap();
while (offset < BytesToHash) : (offset += block.len) {
h.update(block[0..]);
}
- const end = c.clock();
+ const end = timer.read();
- const elapsed_s = f64((end - start) * c.CLOCKS_PER_SEC) / 1000000;
- const throughput = u64(BytesToHash / elapsed_s);
+ const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s;
+ const throughput = @floatToInt(u64, BytesToHash / elapsed_s);
- try stdout.print("{}: ", @typeName(HashFunction));
- try stdout.print("{} Mb/s\n", throughput);
+ try stdout.print("{}: {} MiB/s\n", @typeName(HashFunction), throughput / (1 * MiB));
}
diff --git a/std/cstr.zig b/std/cstr.zig
index d396dcbce3..e83d5a39e9 100644
--- a/std/cstr.zig
+++ b/std/cstr.zig
@@ -9,14 +9,13 @@ pub const line_sep = switch (builtin.os) {
else => "\n",
};
-
-pub fn len(ptr: &const u8) usize {
+pub fn len(ptr: [*]const u8) usize {
var count: usize = 0;
while (ptr[count] != 0) : (count += 1) {}
return count;
}
-pub fn cmp(a: &const u8, b: &const u8) i8 {
+pub fn cmp(a: [*]const u8, b: [*]const u8) i8 {
var index: usize = 0;
while (a[index] == b[index] and a[index] != 0) : (index += 1) {}
if (a[index] > b[index]) {
@@ -28,11 +27,11 @@ pub fn cmp(a: &const u8, b: &const u8) i8 {
}
}
-pub fn toSliceConst(str: &const u8) []const u8 {
+pub fn toSliceConst(str: [*]const u8) []const u8 {
return str[0..len(str)];
}
-pub fn toSlice(str: &u8) []u8 {
+pub fn toSlice(str: [*]u8) []u8 {
return str[0..len(str)];
}
@@ -48,7 +47,7 @@ fn testCStrFnsImpl() void {
/// Returns a mutable slice with 1 more byte of length which is a null byte.
/// Caller owns the returned memory.
-pub fn addNullByte(allocator: &mem.Allocator, slice: []const u8) ![]u8 {
+pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![]u8 {
const result = try allocator.alloc(u8, slice.len + 1);
mem.copy(u8, result, slice);
result[slice.len] = 0;
@@ -56,13 +55,13 @@ pub fn addNullByte(allocator: &mem.Allocator, slice: []const u8) ![]u8 {
}
pub const NullTerminated2DArray = struct {
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
byte_count: usize,
- ptr: ?&?&u8,
+ ptr: ?[*]?[*]u8,
/// Takes N lists of strings, concatenates the lists together, and adds a null terminator
/// Caller must deinit result
- pub fn fromSlices(allocator: &mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
+ pub fn fromSlices(allocator: *mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
var new_len: usize = 1; // 1 for the list null
var byte_count: usize = 0;
for (slices) |slice| {
@@ -76,16 +75,16 @@ pub const NullTerminated2DArray = struct {
const index_size = @sizeOf(usize) * new_len; // size of the ptrs
byte_count += index_size;
- const buf = try allocator.alignedAlloc(u8, @alignOf(?&u8), byte_count);
+ const buf = try allocator.alignedAlloc(u8, @alignOf(?*u8), byte_count);
errdefer allocator.free(buf);
var write_index = index_size;
- const index_buf = ([]?&u8)(buf);
+ const index_buf = @bytesToSlice(?[*]u8, buf);
var i: usize = 0;
for (slices) |slice| {
for (slice) |inner| {
- index_buf[i] = &buf[write_index];
+ index_buf[i] = buf.ptr + write_index;
i += 1;
mem.copy(u8, buf[write_index..], inner);
write_index += inner.len;
@@ -95,16 +94,15 @@ pub const NullTerminated2DArray = struct {
}
index_buf[i] = null;
- return NullTerminated2DArray {
+ return NullTerminated2DArray{
.allocator = allocator,
.byte_count = byte_count,
- .ptr = @ptrCast(?&?&u8, buf.ptr),
+ .ptr = @ptrCast(?[*]?[*]u8, buf.ptr),
};
}
- pub fn deinit(self: &NullTerminated2DArray) void {
- const buf = @ptrCast(&u8, self.ptr);
+ pub fn deinit(self: *NullTerminated2DArray) void {
+ const buf = @ptrCast([*]u8, self.ptr);
self.allocator.free(buf[0..self.byte_count]);
}
};
-
diff --git a/std/debug/failing_allocator.zig b/std/debug/failing_allocator.zig
index f876b7902d..e16dd21db4 100644
--- a/std/debug/failing_allocator.zig
+++ b/std/debug/failing_allocator.zig
@@ -7,20 +7,20 @@ pub const FailingAllocator = struct {
allocator: mem.Allocator,
index: usize,
fail_index: usize,
- internal_allocator: &mem.Allocator,
+ internal_allocator: *mem.Allocator,
allocated_bytes: usize,
freed_bytes: usize,
deallocations: usize,
- pub fn init(allocator: &mem.Allocator, fail_index: usize) FailingAllocator {
- return FailingAllocator {
+ pub fn init(allocator: *mem.Allocator, fail_index: usize) FailingAllocator {
+ return FailingAllocator{
.internal_allocator = allocator,
.fail_index = fail_index,
.index = 0,
.allocated_bytes = 0,
.freed_bytes = 0,
.deallocations = 0,
- .allocator = mem.Allocator {
+ .allocator = mem.Allocator{
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
@@ -28,7 +28,7 @@ pub const FailingAllocator = struct {
};
}
- fn alloc(allocator: &mem.Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *mem.Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
if (self.index == self.fail_index) {
return error.OutOfMemory;
@@ -39,7 +39,7 @@ pub const FailingAllocator = struct {
return result;
}
- fn realloc(allocator: &mem.Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *mem.Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
if (new_size <= old_mem.len) {
self.freed_bytes += old_mem.len - new_size;
@@ -55,7 +55,7 @@ pub const FailingAllocator = struct {
return result;
}
- fn free(allocator: &mem.Allocator, bytes: []u8) void {
+ fn free(allocator: *mem.Allocator, bytes: []u8) void {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
self.freed_bytes += bytes.len;
self.deallocations += 1;
diff --git a/std/debug/index.zig b/std/debug/index.zig
index a573dc5549..ab50d79db3 100644
--- a/std/debug/index.zig
+++ b/std/debug/index.zig
@@ -10,18 +10,24 @@ const ArrayList = std.ArrayList;
const builtin = @import("builtin");
pub const FailingAllocator = @import("failing_allocator.zig").FailingAllocator;
+pub const failing_allocator = FailingAllocator.init(global_allocator, 0);
+
+pub const runtime_safety = switch (builtin.mode) {
+ builtin.Mode.Debug, builtin.Mode.ReleaseSafe => true,
+ builtin.Mode.ReleaseFast, builtin.Mode.ReleaseSmall => false,
+};
/// Tries to write to stderr, unbuffered, and ignores any error returned.
/// Does not append a newline.
/// TODO atomic/multithread support
var stderr_file: os.File = undefined;
var stderr_file_out_stream: io.FileOutStream = undefined;
-var stderr_stream: ?&io.OutStream(io.FileOutStream.Error) = null;
+var stderr_stream: ?*io.OutStream(io.FileOutStream.Error) = null;
pub fn warn(comptime fmt: []const u8, args: ...) void {
const stderr = getStderrStream() catch return;
stderr.print(fmt, args) catch return;
}
-fn getStderrStream() !&io.OutStream(io.FileOutStream.Error) {
+pub fn getStderrStream() !*io.OutStream(io.FileOutStream.Error) {
if (stderr_stream) |st| {
return st;
} else {
@@ -33,17 +39,23 @@ fn getStderrStream() !&io.OutStream(io.FileOutStream.Error) {
}
}
-var self_debug_info: ?&ElfStackTrace = null;
-pub fn getSelfDebugInfo() !&ElfStackTrace {
+var self_debug_info: ?*ElfStackTrace = null;
+pub fn getSelfDebugInfo() !*ElfStackTrace {
if (self_debug_info) |info| {
return info;
} else {
- const info = try openSelfDebugInfo(global_allocator);
+ const info = try openSelfDebugInfo(getDebugInfoAllocator());
self_debug_info = info;
return info;
}
}
+fn wantTtyColor() bool {
+ var bytes: [128]u8 = undefined;
+ const allocator = &std.heap.FixedBufferAllocator.init(bytes[0..]).allocator;
+ return if (std.os.getEnvVarOwned(allocator, "ZIG_DEBUG_COLOR")) |_| true else |_| stderr_file.isTty();
+}
+
/// Tries to print the current stack trace to stderr, unbuffered, and ignores any error returned.
pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
const stderr = getStderrStream() catch return;
@@ -51,20 +63,20 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", @errorName(err)) catch return;
return;
};
- writeCurrentStackTrace(stderr, global_allocator, debug_info, stderr_file.isTty(), start_addr) catch |err| {
+ writeCurrentStackTrace(stderr, getDebugInfoAllocator(), debug_info, wantTtyColor(), start_addr) catch |err| {
stderr.print("Unable to dump stack trace: {}\n", @errorName(err)) catch return;
return;
};
}
/// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned.
-pub fn dumpStackTrace(stack_trace: &const builtin.StackTrace) void {
+pub fn dumpStackTrace(stack_trace: *const builtin.StackTrace) void {
const stderr = getStderrStream() catch return;
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", @errorName(err)) catch return;
return;
};
- writeStackTrace(stack_trace, stderr, global_allocator, debug_info, stderr_file.isTty()) catch |err| {
+ writeStackTrace(stack_trace, stderr, getDebugInfoAllocator(), debug_info, wantTtyColor()) catch |err| {
stderr.print("Unable to dump stack trace: {}\n", @errorName(err)) catch return;
return;
};
@@ -88,6 +100,16 @@ pub fn assert(ok: bool) void {
}
}
+/// TODO: add `==` operator for `error_union == error_set`, and then
+/// remove this function
+pub fn assertError(value: var, expected_error: error) void {
+ if (value) {
+ @panic("expected error");
+ } else |actual_error| {
+ assert(actual_error == expected_error);
+ }
+}
+
/// Call this function when you want to panic if the condition is not true.
/// If `ok` is `false`, this function will panic in every release mode.
pub fn assertOrPanic(ok: bool) void {
@@ -104,9 +126,7 @@ pub fn panic(comptime format: []const u8, args: ...) noreturn {
var panicking: u8 = 0; // TODO make this a bool
-pub fn panicExtra(trace: ?&const builtin.StackTrace, first_trace_addr: ?usize,
- comptime format: []const u8, args: ...) noreturn
-{
+pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: ...) noreturn {
@setCold(true);
if (@atomicRmw(u8, &panicking, builtin.AtomicRmwOp.Xchg, 1, builtin.AtomicOrder.SeqCst) == 1) {
@@ -132,9 +152,7 @@ const WHITE = "\x1b[37;1m";
const DIM = "\x1b[2m";
const RESET = "\x1b[0m";
-pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: var, allocator: &mem.Allocator,
- debug_info: &ElfStackTrace, tty_color: bool) !void
-{
+pub fn writeStackTrace(stack_trace: *const builtin.StackTrace, out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool) !void {
var frame_index: usize = undefined;
var frames_left: usize = undefined;
if (stack_trace.index < stack_trace.instruction_addresses.len) {
@@ -150,13 +168,21 @@ pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: var,
frame_index = (frame_index + 1) % stack_trace.instruction_addresses.len;
}) {
const return_address = stack_trace.instruction_addresses[frame_index];
- try printSourceAtAddress(debug_info, out_stream, return_address);
+ try printSourceAtAddress(debug_info, out_stream, return_address, tty_color);
}
}
-pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator,
- debug_info: &ElfStackTrace, tty_color: bool, start_addr: ?usize) !void
-{
+pub inline fn getReturnAddress(frame_count: usize) usize {
+ var fp = @ptrToInt(@frameAddress());
+ var i: usize = 0;
+ while (fp != 0 and i < frame_count) {
+ fp = @intToPtr(*const usize, fp).*;
+ i += 1;
+ }
+ return @intToPtr(*const usize, fp + @sizeOf(usize)).*;
+}
+
+pub fn writeCurrentStackTrace(out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool, start_addr: ?usize) !void {
const AddressState = union(enum) {
NotLookingForStartAddress,
LookingForStartAddress: usize,
@@ -166,14 +192,14 @@ pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator,
// else AddressState.NotLookingForStartAddress;
var addr_state: AddressState = undefined;
if (start_addr) |addr| {
- addr_state = AddressState { .LookingForStartAddress = addr };
+ addr_state = AddressState{ .LookingForStartAddress = addr };
} else {
addr_state = AddressState.NotLookingForStartAddress;
}
var fp = @ptrToInt(@frameAddress());
- while (fp != 0) : (fp = *@intToPtr(&const usize, fp)) {
- const return_address = *@intToPtr(&const usize, fp + @sizeOf(usize));
+ while (fp != 0) : (fp = @intToPtr(*const usize, fp).*) {
+ const return_address = @intToPtr(*const usize, fp + @sizeOf(usize)).*;
switch (addr_state) {
AddressState.NotLookingForStartAddress => {},
@@ -185,13 +211,11 @@ pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator,
}
},
}
- try printSourceAtAddress(debug_info, out_stream, return_address);
+ try printSourceAtAddress(debug_info, out_stream, return_address, tty_color);
}
}
-fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: var, address: usize) !void {
- const ptr_hex = "0x{x}";
-
+pub fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: usize, tty_color: bool) !void {
switch (builtin.os) {
builtin.Os.windows => return error.UnsupportedDebugInfo,
builtin.Os.macosx => {
@@ -200,41 +224,63 @@ fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: var, address: us
// in practice because the compiler dumps everything in a single
// object file. Future improvement: use external dSYM data when
// available.
- const unknown = macho.Symbol { .name = "???", .address = address };
- const symbol = debug_info.symbol_table.search(address) ?? &unknown;
- try out_stream.print(WHITE ++ "{}" ++ RESET ++ ": " ++
- DIM ++ ptr_hex ++ " in ??? (???)" ++ RESET ++ "\n",
- symbol.name, address);
+ const unknown = macho.Symbol{
+ .name = "???",
+ .address = address,
+ };
+ const symbol = debug_info.symbol_table.search(address) orelse &unknown;
+ try out_stream.print(WHITE ++ "{}" ++ RESET ++ ": " ++ DIM ++ "0x{x}" ++ " in ??? (???)" ++ RESET ++ "\n", symbol.name, address);
},
else => {
const compile_unit = findCompileUnit(debug_info, address) catch {
- try out_stream.print("???:?:?: " ++ DIM ++ ptr_hex ++ " in ??? (???)" ++ RESET ++ "\n ???\n\n",
- address);
+ if (tty_color) {
+ try out_stream.print("???:?:?: " ++ DIM ++ "0x{x} in ??? (???)" ++ RESET ++ "\n ???\n\n", address);
+ } else {
+ try out_stream.print("???:?:?: 0x{x} in ??? (???)\n ???\n\n", address);
+ }
return;
};
const compile_unit_name = try compile_unit.die.getAttrString(debug_info, DW.AT_name);
if (getLineNumberInfo(debug_info, compile_unit, address - 1)) |line_info| {
defer line_info.deinit();
- try out_stream.print(WHITE ++ "{}:{}:{}" ++ RESET ++ ": " ++
- DIM ++ ptr_hex ++ " in ??? ({})" ++ RESET ++ "\n",
- line_info.file_name, line_info.line, line_info.column,
- address, compile_unit_name);
- if (printLineFromFile(debug_info.allocator(), out_stream, line_info)) {
- if (line_info.column == 0) {
- try out_stream.write("\n");
- } else {
- {var col_i: usize = 1; while (col_i < line_info.column) : (col_i += 1) {
- try out_stream.writeByte(' ');
- }}
- try out_stream.write(GREEN ++ "^" ++ RESET ++ "\n");
+ if (tty_color) {
+ try out_stream.print(
+ WHITE ++ "{}:{}:{}" ++ RESET ++ ": " ++ DIM ++ "0x{x} in ??? ({})" ++ RESET ++ "\n",
+ line_info.file_name,
+ line_info.line,
+ line_info.column,
+ address,
+ compile_unit_name,
+ );
+ if (printLineFromFile(debug_info.allocator(), out_stream, line_info)) {
+ if (line_info.column == 0) {
+ try out_stream.write("\n");
+ } else {
+ {
+ var col_i: usize = 1;
+ while (col_i < line_info.column) : (col_i += 1) {
+ try out_stream.writeByte(' ');
+ }
+ }
+ try out_stream.write(GREEN ++ "^" ++ RESET ++ "\n");
+ }
+ } else |err| switch (err) {
+ error.EndOfFile => {},
+ else => return err,
}
- } else |err| switch (err) {
- error.EndOfFile => {},
- else => return err,
+ } else {
+ try out_stream.print(
+ "{}:{}:{}: 0x{x} in ??? ({})\n",
+ line_info.file_name,
+ line_info.line,
+ line_info.column,
+ address,
+ compile_unit_name,
+ );
}
} else |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => {
- try out_stream.print(ptr_hex ++ " in ??? ({})\n", address, compile_unit_name);
+ try out_stream.print("0x{x} in ??? ({})\n", address, compile_unit_name);
},
else => return err,
}
@@ -242,12 +288,10 @@ fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: var, address: us
}
}
-pub fn openSelfDebugInfo(allocator: &mem.Allocator) !&ElfStackTrace {
+pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
switch (builtin.object_format) {
builtin.ObjectFormat.elf => {
- const st = try allocator.create(ElfStackTrace);
- errdefer allocator.destroy(st);
- *st = ElfStackTrace {
+ const st = try allocator.create(ElfStackTrace{
.self_exe_file = undefined,
.elf = undefined,
.debug_info = undefined,
@@ -257,17 +301,18 @@ pub fn openSelfDebugInfo(allocator: &mem.Allocator) !&ElfStackTrace {
.debug_ranges = null,
.abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator),
.compile_unit_list = ArrayList(CompileUnit).init(allocator),
- };
+ });
+ errdefer allocator.destroy(st);
st.self_exe_file = try os.openSelfExe();
errdefer st.self_exe_file.close();
try st.elf.openFile(allocator, &st.self_exe_file);
errdefer st.elf.close();
- st.debug_info = (try st.elf.findSection(".debug_info")) ?? return error.MissingDebugInfo;
- st.debug_abbrev = (try st.elf.findSection(".debug_abbrev")) ?? return error.MissingDebugInfo;
- st.debug_str = (try st.elf.findSection(".debug_str")) ?? return error.MissingDebugInfo;
- st.debug_line = (try st.elf.findSection(".debug_line")) ?? return error.MissingDebugInfo;
+ st.debug_info = (try st.elf.findSection(".debug_info")) orelse return error.MissingDebugInfo;
+ st.debug_abbrev = (try st.elf.findSection(".debug_abbrev")) orelse return error.MissingDebugInfo;
+ st.debug_str = (try st.elf.findSection(".debug_str")) orelse return error.MissingDebugInfo;
+ st.debug_line = (try st.elf.findSection(".debug_line")) orelse return error.MissingDebugInfo;
st.debug_ranges = (try st.elf.findSection(".debug_ranges"));
try scanAllCompileUnits(st);
return st;
@@ -276,13 +321,8 @@ pub fn openSelfDebugInfo(allocator: &mem.Allocator) !&ElfStackTrace {
var exe_file = try os.openSelfExe();
defer exe_file.close();
- const st = try allocator.create(ElfStackTrace);
+ const st = try allocator.create(ElfStackTrace{ .symbol_table = try macho.loadSymbols(allocator, &io.FileInStream.init(&exe_file)) });
errdefer allocator.destroy(st);
-
- *st = ElfStackTrace {
- .symbol_table = try macho.loadSymbols(allocator, &io.FileInStream.init(&exe_file)),
- };
-
return st;
},
builtin.ObjectFormat.coff => {
@@ -297,7 +337,7 @@ pub fn openSelfDebugInfo(allocator: &mem.Allocator) !&ElfStackTrace {
}
}
-fn printLineFromFile(allocator: &mem.Allocator, out_stream: var, line_info: &const LineInfo) !void {
+fn printLineFromFile(allocator: *mem.Allocator, out_stream: var, line_info: *const LineInfo) !void {
var f = try os.File.openRead(allocator, line_info.file_name);
defer f.close();
// TODO fstat and make sure that the file has the correct size
@@ -325,8 +365,7 @@ fn printLineFromFile(allocator: &mem.Allocator, out_stream: var, line_info: &con
}
}
- if (amt_read < buf.len)
- return error.EndOfFile;
+ if (amt_read < buf.len) return error.EndOfFile;
}
}
@@ -334,32 +373,32 @@ pub const ElfStackTrace = switch (builtin.os) {
builtin.Os.macosx => struct {
symbol_table: macho.SymbolTable,
- pub fn close(self: &ElfStackTrace) void {
+ pub fn close(self: *ElfStackTrace) void {
self.symbol_table.deinit();
}
},
else => struct {
self_exe_file: os.File,
elf: elf.Elf,
- debug_info: &elf.SectionHeader,
- debug_abbrev: &elf.SectionHeader,
- debug_str: &elf.SectionHeader,
- debug_line: &elf.SectionHeader,
- debug_ranges: ?&elf.SectionHeader,
+ debug_info: *elf.SectionHeader,
+ debug_abbrev: *elf.SectionHeader,
+ debug_str: *elf.SectionHeader,
+ debug_line: *elf.SectionHeader,
+ debug_ranges: ?*elf.SectionHeader,
abbrev_table_list: ArrayList(AbbrevTableHeader),
compile_unit_list: ArrayList(CompileUnit),
- pub fn allocator(self: &const ElfStackTrace) &mem.Allocator {
+ pub fn allocator(self: *const ElfStackTrace) *mem.Allocator {
return self.abbrev_table_list.allocator;
}
- pub fn readString(self: &ElfStackTrace) ![]u8 {
+ pub fn readString(self: *ElfStackTrace) ![]u8 {
var in_file_stream = io.FileInStream.init(&self.self_exe_file);
const in_stream = &in_file_stream.stream;
return readStringRaw(self.allocator(), in_stream);
}
- pub fn close(self: &ElfStackTrace) void {
+ pub fn close(self: *ElfStackTrace) void {
self.self_exe_file.close();
self.elf.close();
}
@@ -374,7 +413,7 @@ const PcRange = struct {
const CompileUnit = struct {
version: u16,
is_64: bool,
- die: &Die,
+ die: *Die,
index: usize,
pc_range: ?PcRange,
};
@@ -417,11 +456,9 @@ const Constant = struct {
payload: []u8,
signed: bool,
- fn asUnsignedLe(self: &const Constant) !u64 {
- if (self.payload.len > @sizeOf(u64))
- return error.InvalidDebugInfo;
- if (self.signed)
- return error.InvalidDebugInfo;
+ fn asUnsignedLe(self: *const Constant) !u64 {
+ if (self.payload.len > @sizeOf(u64)) return error.InvalidDebugInfo;
+ if (self.signed) return error.InvalidDebugInfo;
return mem.readInt(self.payload, u64, builtin.Endian.Little);
}
};
@@ -436,42 +473,41 @@ const Die = struct {
value: FormValue,
};
- fn getAttr(self: &const Die, id: u64) ?&const FormValue {
+ fn getAttr(self: *const Die, id: u64) ?*const FormValue {
for (self.attrs.toSliceConst()) |*attr| {
- if (attr.id == id)
- return &attr.value;
+ if (attr.id == id) return &attr.value;
}
return null;
}
- fn getAttrAddr(self: &const Die, id: u64) !u64 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
- return switch (*form_value) {
+ fn getAttrAddr(self: *const Die, id: u64) !u64 {
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
+ return switch (form_value.*) {
FormValue.Address => |value| value,
else => error.InvalidDebugInfo,
};
}
- fn getAttrSecOffset(self: &const Die, id: u64) !u64 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
- return switch (*form_value) {
+ fn getAttrSecOffset(self: *const Die, id: u64) !u64 {
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
+ return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
FormValue.SecOffset => |value| value,
else => error.InvalidDebugInfo,
};
}
- fn getAttrUnsignedLe(self: &const Die, id: u64) !u64 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
- return switch (*form_value) {
+ fn getAttrUnsignedLe(self: *const Die, id: u64) !u64 {
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
+ return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
else => error.InvalidDebugInfo,
};
}
- fn getAttrString(self: &const Die, st: &ElfStackTrace, id: u64) ![]u8 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
- return switch (*form_value) {
+ fn getAttrString(self: *const Die, st: *ElfStackTrace, id: u64) ![]u8 {
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
+ return switch (form_value.*) {
FormValue.String => |value| value,
FormValue.StrPtr => |offset| getString(st, offset),
else => error.InvalidDebugInfo,
@@ -490,9 +526,9 @@ const LineInfo = struct {
line: usize,
column: usize,
file_name: []u8,
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
- fn deinit(self: &const LineInfo) void {
+ fn deinit(self: *const LineInfo) void {
self.allocator.free(self.file_name);
}
};
@@ -508,7 +544,7 @@ const LineNumberProgram = struct {
target_address: usize,
include_dirs: []const []const u8,
- file_entries: &ArrayList(FileEntry),
+ file_entries: *ArrayList(FileEntry),
prev_address: usize,
prev_file: usize,
@@ -518,10 +554,8 @@ const LineNumberProgram = struct {
prev_basic_block: bool,
prev_end_sequence: bool,
- pub fn init(is_stmt: bool, include_dirs: []const []const u8,
- file_entries: &ArrayList(FileEntry), target_address: usize) LineNumberProgram
- {
- return LineNumberProgram {
+ pub fn init(is_stmt: bool, include_dirs: []const []const u8, file_entries: *ArrayList(FileEntry), target_address: usize) LineNumberProgram {
+ return LineNumberProgram{
.address = 0,
.file = 1,
.line = 1,
@@ -542,21 +576,23 @@ const LineNumberProgram = struct {
};
}
- pub fn checkLineMatch(self: &LineNumberProgram) !?LineInfo {
+ pub fn checkLineMatch(self: *LineNumberProgram) !?LineInfo {
if (self.target_address >= self.prev_address and self.target_address < self.address) {
const file_entry = if (self.prev_file == 0) {
return error.MissingDebugInfo;
} else if (self.prev_file - 1 >= self.file_entries.len) {
return error.InvalidDebugInfo;
- } else &self.file_entries.items[self.prev_file - 1];
+ } else
+ &self.file_entries.items[self.prev_file - 1];
const dir_name = if (file_entry.dir_index >= self.include_dirs.len) {
return error.InvalidDebugInfo;
- } else self.include_dirs[file_entry.dir_index];
+ } else
+ self.include_dirs[file_entry.dir_index];
const file_name = try os.path.join(self.file_entries.allocator, dir_name, file_entry.file_name);
errdefer self.file_entries.allocator.free(file_name);
- return LineInfo {
- .line = if (self.prev_line >= 0) usize(self.prev_line) else 0,
+ return LineInfo{
+ .line = if (self.prev_line >= 0) @intCast(usize, self.prev_line) else 0,
.column = self.prev_column,
.file_name = file_name,
.allocator = self.file_entries.allocator,
@@ -574,83 +610,80 @@ const LineNumberProgram = struct {
}
};
-fn readStringRaw(allocator: &mem.Allocator, in_stream: var) ![]u8 {
+fn readStringRaw(allocator: *mem.Allocator, in_stream: var) ![]u8 {
var buf = ArrayList(u8).init(allocator);
while (true) {
const byte = try in_stream.readByte();
- if (byte == 0)
- break;
+ if (byte == 0) break;
try buf.append(byte);
}
return buf.toSlice();
}
-fn getString(st: &ElfStackTrace, offset: u64) ![]u8 {
+fn getString(st: *ElfStackTrace, offset: u64) ![]u8 {
const pos = st.debug_str.offset + offset;
try st.self_exe_file.seekTo(pos);
return st.readString();
}
-fn readAllocBytes(allocator: &mem.Allocator, in_stream: var, size: usize) ![]u8 {
- const buf = try global_allocator.alloc(u8, size);
- errdefer global_allocator.free(buf);
+fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8 {
+ const buf = try allocator.alloc(u8, size);
+ errdefer allocator.free(buf);
if ((try in_stream.read(buf)) < size) return error.EndOfFile;
return buf;
}
-fn parseFormValueBlockLen(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
+fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
- return FormValue { .Block = buf };
+ return FormValue{ .Block = buf };
}
-fn parseFormValueBlock(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
+fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
const block_len = try in_stream.readVarInt(builtin.Endian.Little, usize, size);
return parseFormValueBlockLen(allocator, in_stream, block_len);
}
-fn parseFormValueConstant(allocator: &mem.Allocator, in_stream: var, signed: bool, size: usize) !FormValue {
- return FormValue { .Const = Constant {
- .signed = signed,
- .payload = try readAllocBytes(allocator, in_stream, size),
- }};
+fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: bool, size: usize) !FormValue {
+ return FormValue{
+ .Const = Constant{
+ .signed = signed,
+ .payload = try readAllocBytes(allocator, in_stream, size),
+ },
+ };
}
fn parseFormValueDwarfOffsetSize(in_stream: var, is_64: bool) !u64 {
- return if (is_64) try in_stream.readIntLe(u64)
- else u64(try in_stream.readIntLe(u32)) ;
+ return if (is_64) try in_stream.readIntLe(u64) else u64(try in_stream.readIntLe(u32));
}
fn parseFormValueTargetAddrSize(in_stream: var) !u64 {
- return if (@sizeOf(usize) == 4) u64(try in_stream.readIntLe(u32))
- else if (@sizeOf(usize) == 8) try in_stream.readIntLe(u64)
- else unreachable;
+ return if (@sizeOf(usize) == 4) u64(try in_stream.readIntLe(u32)) else if (@sizeOf(usize) == 8) try in_stream.readIntLe(u64) else unreachable;
}
-fn parseFormValueRefLen(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
+fn parseFormValueRefLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
- return FormValue { .Ref = buf };
+ return FormValue{ .Ref = buf };
}
-fn parseFormValueRef(allocator: &mem.Allocator, in_stream: var, comptime T: type) !FormValue {
+fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, comptime T: type) !FormValue {
const block_len = try in_stream.readIntLe(T);
return parseFormValueRefLen(allocator, in_stream, block_len);
}
-const ParseFormValueError = error {
+const ParseFormValueError = error{
EndOfStream,
Io,
BadFd,
Unexpected,
InvalidDebugInfo,
EndOfFile,
+ IsDir,
OutOfMemory,
};
-fn parseFormValue(allocator: &mem.Allocator, in_stream: var, form_id: u64, is_64: bool)
- ParseFormValueError!FormValue
-{
+fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue {
return switch (form_id) {
- DW.FORM_addr => FormValue { .Address = try parseFormValueTargetAddrSize(in_stream) },
+ DW.FORM_addr => FormValue{ .Address = try parseFormValueTargetAddrSize(in_stream) },
DW.FORM_block1 => parseFormValueBlock(allocator, in_stream, 1),
DW.FORM_block2 => parseFormValueBlock(allocator, in_stream, 2),
DW.FORM_block4 => parseFormValueBlock(allocator, in_stream, 4),
@@ -670,11 +703,11 @@ fn parseFormValue(allocator: &mem.Allocator, in_stream: var, form_id: u64, is_64
DW.FORM_exprloc => {
const size = try readULeb128(in_stream);
const buf = try readAllocBytes(allocator, in_stream, size);
- return FormValue { .ExprLoc = buf };
+ return FormValue{ .ExprLoc = buf };
},
- DW.FORM_flag => FormValue { .Flag = (try in_stream.readByte()) != 0 },
- DW.FORM_flag_present => FormValue { .Flag = true },
- DW.FORM_sec_offset => FormValue { .SecOffset = try parseFormValueDwarfOffsetSize(in_stream, is_64) },
+ DW.FORM_flag => FormValue{ .Flag = (try in_stream.readByte()) != 0 },
+ DW.FORM_flag_present => FormValue{ .Flag = true },
+ DW.FORM_sec_offset => FormValue{ .SecOffset = try parseFormValueDwarfOffsetSize(in_stream, is_64) },
DW.FORM_ref1 => parseFormValueRef(allocator, in_stream, u8),
DW.FORM_ref2 => parseFormValueRef(allocator, in_stream, u16),
@@ -685,11 +718,11 @@ fn parseFormValue(allocator: &mem.Allocator, in_stream: var, form_id: u64, is_64
return parseFormValueRefLen(allocator, in_stream, ref_len);
},
- DW.FORM_ref_addr => FormValue { .RefAddr = try parseFormValueDwarfOffsetSize(in_stream, is_64) },
- DW.FORM_ref_sig8 => FormValue { .RefSig8 = try in_stream.readIntLe(u64) },
+ DW.FORM_ref_addr => FormValue{ .RefAddr = try parseFormValueDwarfOffsetSize(in_stream, is_64) },
+ DW.FORM_ref_sig8 => FormValue{ .RefSig8 = try in_stream.readIntLe(u64) },
- DW.FORM_string => FormValue { .String = try readStringRaw(allocator, in_stream) },
- DW.FORM_strp => FormValue { .StrPtr = try parseFormValueDwarfOffsetSize(in_stream, is_64) },
+ DW.FORM_string => FormValue{ .String = try readStringRaw(allocator, in_stream) },
+ DW.FORM_strp => FormValue{ .StrPtr = try parseFormValueDwarfOffsetSize(in_stream, is_64) },
DW.FORM_indirect => {
const child_form_id = try readULeb128(in_stream);
return parseFormValue(allocator, in_stream, child_form_id, is_64);
@@ -698,16 +731,15 @@ fn parseFormValue(allocator: &mem.Allocator, in_stream: var, form_id: u64, is_64
};
}
-fn parseAbbrevTable(st: &ElfStackTrace) !AbbrevTable {
+fn parseAbbrevTable(st: *ElfStackTrace) !AbbrevTable {
const in_file = &st.self_exe_file;
var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream;
var result = AbbrevTable.init(st.allocator());
while (true) {
const abbrev_code = try readULeb128(in_stream);
- if (abbrev_code == 0)
- return result;
- try result.append(AbbrevTableEntry {
+ if (abbrev_code == 0) return result;
+ try result.append(AbbrevTableEntry{
.abbrev_code = abbrev_code,
.tag_id = try readULeb128(in_stream),
.has_children = (try in_stream.readByte()) == DW.CHILDREN_yes,
@@ -718,9 +750,8 @@ fn parseAbbrevTable(st: &ElfStackTrace) !AbbrevTable {
while (true) {
const attr_id = try readULeb128(in_stream);
const form_id = try readULeb128(in_stream);
- if (attr_id == 0 and form_id == 0)
- break;
- try attrs.append(AbbrevAttr {
+ if (attr_id == 0 and form_id == 0) break;
+ try attrs.append(AbbrevAttr{
.attr_id = attr_id,
.form_id = form_id,
});
@@ -730,43 +761,42 @@ fn parseAbbrevTable(st: &ElfStackTrace) !AbbrevTable {
/// Gets an already existing AbbrevTable given the abbrev_offset, or if not found,
/// seeks in the stream and parses it.
-fn getAbbrevTable(st: &ElfStackTrace, abbrev_offset: u64) !&const AbbrevTable {
+fn getAbbrevTable(st: *ElfStackTrace, abbrev_offset: u64) !*const AbbrevTable {
for (st.abbrev_table_list.toSlice()) |*header| {
if (header.offset == abbrev_offset) {
return &header.table;
}
}
try st.self_exe_file.seekTo(st.debug_abbrev.offset + abbrev_offset);
- try st.abbrev_table_list.append(AbbrevTableHeader {
+ try st.abbrev_table_list.append(AbbrevTableHeader{
.offset = abbrev_offset,
.table = try parseAbbrevTable(st),
});
return &st.abbrev_table_list.items[st.abbrev_table_list.len - 1].table;
}
-fn getAbbrevTableEntry(abbrev_table: &const AbbrevTable, abbrev_code: u64) ?&const AbbrevTableEntry {
+fn getAbbrevTableEntry(abbrev_table: *const AbbrevTable, abbrev_code: u64) ?*const AbbrevTableEntry {
for (abbrev_table.toSliceConst()) |*table_entry| {
- if (table_entry.abbrev_code == abbrev_code)
- return table_entry;
+ if (table_entry.abbrev_code == abbrev_code) return table_entry;
}
return null;
}
-fn parseDie(st: &ElfStackTrace, abbrev_table: &const AbbrevTable, is_64: bool) !Die {
+fn parseDie(st: *ElfStackTrace, abbrev_table: *const AbbrevTable, is_64: bool) !Die {
const in_file = &st.self_exe_file;
var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream;
const abbrev_code = try readULeb128(in_stream);
- const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) ?? return error.InvalidDebugInfo;
+ const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) orelse return error.InvalidDebugInfo;
- var result = Die {
+ var result = Die{
.tag_id = table_entry.tag_id,
.has_children = table_entry.has_children,
.attrs = ArrayList(Die.Attr).init(st.allocator()),
};
try result.attrs.resize(table_entry.attrs.len);
for (table_entry.attrs.toSliceConst()) |attr, i| {
- result.attrs.items[i] = Die.Attr {
+ result.attrs.items[i] = Die.Attr{
.id = attr.attr_id,
.value = try parseFormValue(st.allocator(), in_stream, attr.form_id, is_64),
};
@@ -774,7 +804,7 @@ fn parseDie(st: &ElfStackTrace, abbrev_table: &const AbbrevTable, is_64: bool) !
return result;
}
-fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, target_address: usize) !LineInfo {
+fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, target_address: usize) !LineInfo {
const compile_unit_cwd = try compile_unit.die.getAttrString(st, DW.AT_comp_dir);
const in_file = &st.self_exe_file;
@@ -790,8 +820,7 @@ fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, targe
var is_64: bool = undefined;
const unit_length = try readInitialLength(@typeOf(in_stream.readFn).ReturnType.ErrorSet, in_stream, &is_64);
- if (unit_length == 0)
- return error.MissingDebugInfo;
+ if (unit_length == 0) return error.MissingDebugInfo;
const next_offset = unit_length + (if (is_64) usize(12) else usize(4));
if (compile_unit.index != this_index) {
@@ -803,8 +832,7 @@ fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, targe
// TODO support 3 and 5
if (version != 2 and version != 4) return error.InvalidDebugInfo;
- const prologue_length = if (is_64) try in_stream.readInt(st.elf.endian, u64)
- else try in_stream.readInt(st.elf.endian, u32);
+ const prologue_length = if (is_64) try in_stream.readInt(st.elf.endian, u64) else try in_stream.readInt(st.elf.endian, u32);
const prog_start_offset = (try in_file.getPos()) + prologue_length;
const minimum_instruction_length = try in_stream.readByte();
@@ -819,38 +847,37 @@ fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, targe
const line_base = try in_stream.readByteSigned();
const line_range = try in_stream.readByte();
- if (line_range == 0)
- return error.InvalidDebugInfo;
+ if (line_range == 0) return error.InvalidDebugInfo;
const opcode_base = try in_stream.readByte();
const standard_opcode_lengths = try st.allocator().alloc(u8, opcode_base - 1);
- {var i: usize = 0; while (i < opcode_base - 1) : (i += 1) {
- standard_opcode_lengths[i] = try in_stream.readByte();
- }}
+ {
+ var i: usize = 0;
+ while (i < opcode_base - 1) : (i += 1) {
+ standard_opcode_lengths[i] = try in_stream.readByte();
+ }
+ }
var include_directories = ArrayList([]u8).init(st.allocator());
try include_directories.append(compile_unit_cwd);
while (true) {
const dir = try st.readString();
- if (dir.len == 0)
- break;
+ if (dir.len == 0) break;
try include_directories.append(dir);
}
var file_entries = ArrayList(FileEntry).init(st.allocator());
- var prog = LineNumberProgram.init(default_is_stmt, include_directories.toSliceConst(),
- &file_entries, target_address);
+ var prog = LineNumberProgram.init(default_is_stmt, include_directories.toSliceConst(), &file_entries, target_address);
while (true) {
const file_name = try st.readString();
- if (file_name.len == 0)
- break;
+ if (file_name.len == 0) break;
const dir_index = try readULeb128(in_stream);
const mtime = try readULeb128(in_stream);
const len_bytes = try readULeb128(in_stream);
- try file_entries.append(FileEntry {
+ try file_entries.append(FileEntry{
.file_name = file_name,
.dir_index = dir_index,
.mtime = mtime,
@@ -866,8 +893,7 @@ fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, targe
var sub_op: u8 = undefined; // TODO move this to the correct scope and fix the compiler crash
if (opcode == DW.LNS_extended_op) {
const op_size = try readULeb128(in_stream);
- if (op_size < 1)
- return error.InvalidDebugInfo;
+ if (op_size < 1) return error.InvalidDebugInfo;
sub_op = try in_stream.readByte();
switch (sub_op) {
DW.LNE_end_sequence => {
@@ -884,7 +910,7 @@ fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, targe
const dir_index = try readULeb128(in_stream);
const mtime = try readULeb128(in_stream);
const len_bytes = try readULeb128(in_stream);
- try file_entries.append(FileEntry {
+ try file_entries.append(FileEntry{
.file_name = file_name,
.dir_index = dir_index,
.mtime = mtime,
@@ -941,11 +967,9 @@ fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, targe
const arg = try in_stream.readInt(st.elf.endian, u16);
prog.address += arg;
},
- DW.LNS_set_prologue_end => {
- },
+ DW.LNS_set_prologue_end => {},
else => {
- if (opcode - 1 >= standard_opcode_lengths.len)
- return error.InvalidDebugInfo;
+ if (opcode - 1 >= standard_opcode_lengths.len) return error.InvalidDebugInfo;
const len_bytes = standard_opcode_lengths[opcode - 1];
try in_file.seekForward(len_bytes);
},
@@ -959,7 +983,7 @@ fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, targe
return error.MissingDebugInfo;
}
-fn scanAllCompileUnits(st: &ElfStackTrace) !void {
+fn scanAllCompileUnits(st: *ElfStackTrace) !void {
const debug_info_end = st.debug_info.offset + st.debug_info.size;
var this_unit_offset = st.debug_info.offset;
var cu_index: usize = 0;
@@ -972,16 +996,13 @@ fn scanAllCompileUnits(st: &ElfStackTrace) !void {
var is_64: bool = undefined;
const unit_length = try readInitialLength(@typeOf(in_stream.readFn).ReturnType.ErrorSet, in_stream, &is_64);
- if (unit_length == 0)
- return;
+ if (unit_length == 0) return;
const next_offset = unit_length + (if (is_64) usize(12) else usize(4));
const version = try in_stream.readInt(st.elf.endian, u16);
if (version < 2 or version > 5) return error.InvalidDebugInfo;
- const debug_abbrev_offset =
- if (is_64) try in_stream.readInt(st.elf.endian, u64)
- else try in_stream.readInt(st.elf.endian, u32);
+ const debug_abbrev_offset = if (is_64) try in_stream.readInt(st.elf.endian, u64) else try in_stream.readInt(st.elf.endian, u32);
const address_size = try in_stream.readByte();
if (address_size != @sizeOf(usize)) return error.InvalidDebugInfo;
@@ -991,16 +1012,14 @@ fn scanAllCompileUnits(st: &ElfStackTrace) !void {
try st.self_exe_file.seekTo(compile_unit_pos);
- const compile_unit_die = try st.allocator().create(Die);
- *compile_unit_die = try parseDie(st, abbrev_table, is_64);
+ const compile_unit_die = try st.allocator().create(try parseDie(st, abbrev_table, is_64));
- if (compile_unit_die.tag_id != DW.TAG_compile_unit)
- return error.InvalidDebugInfo;
+ if (compile_unit_die.tag_id != DW.TAG_compile_unit) return error.InvalidDebugInfo;
const pc_range = x: {
if (compile_unit_die.getAttrAddr(DW.AT_low_pc)) |low_pc| {
if (compile_unit_die.getAttr(DW.AT_high_pc)) |high_pc_value| {
- const pc_end = switch (*high_pc_value) {
+ const pc_end = switch (high_pc_value.*) {
FormValue.Address => |value| value,
FormValue.Const => |value| b: {
const offset = try value.asUnsignedLe();
@@ -1008,7 +1027,7 @@ fn scanAllCompileUnits(st: &ElfStackTrace) !void {
},
else => return error.InvalidDebugInfo,
};
- break :x PcRange {
+ break :x PcRange{
.start = low_pc,
.end = pc_end,
};
@@ -1016,13 +1035,12 @@ fn scanAllCompileUnits(st: &ElfStackTrace) !void {
break :x null;
}
} else |err| {
- if (err != error.MissingDebugInfo)
- return err;
+ if (err != error.MissingDebugInfo) return err;
break :x null;
}
};
- try st.compile_unit_list.append(CompileUnit {
+ try st.compile_unit_list.append(CompileUnit{
.version = version,
.is_64 = is_64,
.pc_range = pc_range,
@@ -1035,13 +1053,12 @@ fn scanAllCompileUnits(st: &ElfStackTrace) !void {
}
}
-fn findCompileUnit(st: &ElfStackTrace, target_address: u64) !&const CompileUnit {
+fn findCompileUnit(st: *ElfStackTrace, target_address: u64) !*const CompileUnit {
var in_file_stream = io.FileInStream.init(&st.self_exe_file);
const in_stream = &in_file_stream.stream;
for (st.compile_unit_list.toSlice()) |*compile_unit| {
if (compile_unit.pc_range) |range| {
- if (target_address >= range.start and target_address < range.end)
- return compile_unit;
+ if (target_address >= range.start and target_address < range.end) return compile_unit;
}
if (compile_unit.die.getAttrSecOffset(DW.AT_ranges)) |ranges_offset| {
var base_address: usize = 0;
@@ -1063,18 +1080,17 @@ fn findCompileUnit(st: &ElfStackTrace, target_address: u64) !&const CompileUnit
}
}
} else |err| {
- if (err != error.MissingDebugInfo)
- return err;
+ if (err != error.MissingDebugInfo) return err;
continue;
}
}
return error.MissingDebugInfo;
}
-fn readInitialLength(comptime E: type, in_stream: &io.InStream(E), is_64: &bool) !u64 {
+fn readInitialLength(comptime E: type, in_stream: *io.InStream(E), is_64: *bool) !u64 {
const first_32_bits = try in_stream.readIntLe(u32);
- *is_64 = (first_32_bits == 0xffffffff);
- if (*is_64) {
+ is_64.* = (first_32_bits == 0xffffffff);
+ if (is_64.*) {
return in_stream.readIntLe(u64);
} else {
if (first_32_bits >= 0xfffffff0) return error.InvalidDebugInfo;
@@ -1091,13 +1107,11 @@ fn readULeb128(in_stream: var) !u64 {
var operand: u64 = undefined;
- if (@shlWithOverflow(u64, byte & 0b01111111, u6(shift), &operand))
- return error.InvalidDebugInfo;
+ if (@shlWithOverflow(u64, byte & 0b01111111, @intCast(u6, shift), &operand)) return error.InvalidDebugInfo;
result |= operand;
- if ((byte & 0b10000000) == 0)
- return result;
+ if ((byte & 0b10000000) == 0) return result;
shift += 7;
}
@@ -1112,20 +1126,32 @@ fn readILeb128(in_stream: var) !i64 {
var operand: i64 = undefined;
- if (@shlWithOverflow(i64, byte & 0b01111111, u6(shift), &operand))
- return error.InvalidDebugInfo;
+ if (@shlWithOverflow(i64, byte & 0b01111111, @intCast(u6, shift), &operand)) return error.InvalidDebugInfo;
result |= operand;
shift += 7;
if ((byte & 0b10000000) == 0) {
- if (shift < @sizeOf(i64) * 8 and (byte & 0b01000000) != 0)
- result |= -(i64(1) << u6(shift));
+ if (shift < @sizeOf(i64) * 8 and (byte & 0b01000000) != 0) result |= -(i64(1) << @intCast(u6, shift));
return result;
}
}
}
+/// This should only be used in temporary test programs.
pub const global_allocator = &global_fixed_allocator.allocator;
-var global_fixed_allocator = std.heap.FixedBufferAllocator.init(global_allocator_mem[0..]);
+var global_fixed_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(global_allocator_mem[0..]);
var global_allocator_mem: [100 * 1024]u8 = undefined;
+
+// TODO make thread safe
+var debug_info_allocator: ?*mem.Allocator = null;
+var debug_info_direct_allocator: std.heap.DirectAllocator = undefined;
+var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;
+fn getDebugInfoAllocator() *mem.Allocator {
+ if (debug_info_allocator) |a| return a;
+
+ debug_info_direct_allocator = std.heap.DirectAllocator.init();
+ debug_info_arena_allocator = std.heap.ArenaAllocator.init(&debug_info_direct_allocator.allocator);
+ debug_info_allocator = &debug_info_arena_allocator.allocator;
+ return &debug_info_arena_allocator.allocator;
+}
diff --git a/std/dwarf.zig b/std/dwarf.zig
index 04456d9e5c..2cf8ed953e 100644
--- a/std/dwarf.zig
+++ b/std/dwarf.zig
@@ -337,7 +337,6 @@ pub const AT_PGI_lbase = 0x3a00;
pub const AT_PGI_soffset = 0x3a01;
pub const AT_PGI_lstride = 0x3a02;
-
pub const OP_addr = 0x03;
pub const OP_deref = 0x06;
pub const OP_const1u = 0x08;
@@ -577,7 +576,6 @@ pub const ATE_HP_unsigned_fixed = 0x8e; // Cobol.
pub const ATE_HP_VAX_complex_float = 0x8f; // F or G floating complex.
pub const ATE_HP_VAX_complex_float_d = 0x90; // D floating complex.
-
pub const CFA_advance_loc = 0x40;
pub const CFA_offset = 0x80;
pub const CFA_restore = 0xc0;
@@ -641,3 +639,40 @@ pub const LNE_define_file = 0x03;
pub const LNE_set_discriminator = 0x04;
pub const LNE_lo_user = 0x80;
pub const LNE_hi_user = 0xff;
+
+pub const LANG_C89 = 0x0001;
+pub const LANG_C = 0x0002;
+pub const LANG_Ada83 = 0x0003;
+pub const LANG_C_plus_plus = 0x0004;
+pub const LANG_Cobol74 = 0x0005;
+pub const LANG_Cobol85 = 0x0006;
+pub const LANG_Fortran77 = 0x0007;
+pub const LANG_Fortran90 = 0x0008;
+pub const LANG_Pascal83 = 0x0009;
+pub const LANG_Modula2 = 0x000a;
+pub const LANG_Java = 0x000b;
+pub const LANG_C99 = 0x000c;
+pub const LANG_Ada95 = 0x000d;
+pub const LANG_Fortran95 = 0x000e;
+pub const LANG_PLI = 0x000f;
+pub const LANG_ObjC = 0x0010;
+pub const LANG_ObjC_plus_plus = 0x0011;
+pub const LANG_UPC = 0x0012;
+pub const LANG_D = 0x0013;
+pub const LANG_Python = 0x0014;
+pub const LANG_Go = 0x0016;
+pub const LANG_C_plus_plus_11 = 0x001a;
+pub const LANG_Rust = 0x001c;
+pub const LANG_C11 = 0x001d;
+pub const LANG_C_plus_plus_14 = 0x0021;
+pub const LANG_Fortran03 = 0x0022;
+pub const LANG_Fortran08 = 0x0023;
+pub const LANG_lo_user = 0x8000;
+pub const LANG_hi_user = 0xffff;
+pub const LANG_Mips_Assembler = 0x8001;
+pub const LANG_Upc = 0x8765;
+pub const LANG_HP_Bliss = 0x8003;
+pub const LANG_HP_Basic91 = 0x8004;
+pub const LANG_HP_Pascal91 = 0x8005;
+pub const LANG_HP_IMacro = 0x8006;
+pub const LANG_HP_Assembler = 0x8007;
diff --git a/std/dynamic_library.zig b/std/dynamic_library.zig
new file mode 100644
index 0000000000..ed190f7deb
--- /dev/null
+++ b/std/dynamic_library.zig
@@ -0,0 +1,156 @@
+const std = @import("index.zig");
+const mem = std.mem;
+const elf = std.elf;
+const cstr = std.cstr;
+const linux = std.os.linux;
+
+pub const DynLib = struct {
+ allocator: *mem.Allocator,
+ elf_lib: ElfLib,
+ fd: i32,
+ map_addr: usize,
+ map_size: usize,
+
+ /// Trusts the file
+ pub fn open(allocator: *mem.Allocator, path: []const u8) !DynLib {
+ const fd = try std.os.posixOpen(allocator, path, 0, linux.O_RDONLY | linux.O_CLOEXEC);
+ errdefer std.os.close(fd);
+
+ const size = @intCast(usize, (try std.os.posixFStat(fd)).size);
+
+ const addr = linux.mmap(
+ null,
+ size,
+ linux.PROT_READ | linux.PROT_EXEC,
+ linux.MAP_PRIVATE | linux.MAP_LOCKED,
+ fd,
+ 0,
+ );
+ errdefer _ = linux.munmap(addr, size);
+
+ const bytes = @intToPtr([*]align(std.os.page_size) u8, addr)[0..size];
+
+ return DynLib{
+ .allocator = allocator,
+ .elf_lib = try ElfLib.init(bytes),
+ .fd = fd,
+ .map_addr = addr,
+ .map_size = size,
+ };
+ }
+
+ pub fn close(self: *DynLib) void {
+ _ = linux.munmap(self.map_addr, self.map_size);
+ std.os.close(self.fd);
+ self.* = undefined;
+ }
+
+ pub fn lookup(self: *DynLib, name: []const u8) ?usize {
+ return self.elf_lib.lookup("", name);
+ }
+};
+
+pub const ElfLib = struct {
+ strings: [*]u8,
+ syms: [*]elf.Sym,
+ hashtab: [*]linux.Elf_Symndx,
+ versym: ?[*]u16,
+ verdef: ?*elf.Verdef,
+ base: usize,
+
+ // Trusts the memory
+ pub fn init(bytes: []align(@alignOf(elf.Ehdr)) u8) !ElfLib {
+ const eh = @ptrCast(*elf.Ehdr, bytes.ptr);
+ if (!mem.eql(u8, eh.e_ident[0..4], "\x7fELF")) return error.NotElfFile;
+ if (eh.e_type != elf.ET_DYN) return error.NotDynamicLibrary;
+
+ const elf_addr = @ptrToInt(bytes.ptr);
+ var ph_addr: usize = elf_addr + eh.e_phoff;
+
+ var base: usize = @maxValue(usize);
+ var maybe_dynv: ?[*]usize = null;
+ {
+ var i: usize = 0;
+ while (i < eh.e_phnum) : ({
+ i += 1;
+ ph_addr += eh.e_phentsize;
+ }) {
+ const ph = @intToPtr(*elf.Phdr, ph_addr);
+ switch (ph.p_type) {
+ elf.PT_LOAD => base = elf_addr + ph.p_offset - ph.p_vaddr,
+ elf.PT_DYNAMIC => maybe_dynv = @intToPtr([*]usize, elf_addr + ph.p_offset),
+ else => {},
+ }
+ }
+ }
+ const dynv = maybe_dynv orelse return error.MissingDynamicLinkingInformation;
+ if (base == @maxValue(usize)) return error.BaseNotFound;
+
+ var maybe_strings: ?[*]u8 = null;
+ var maybe_syms: ?[*]elf.Sym = null;
+ var maybe_hashtab: ?[*]linux.Elf_Symndx = null;
+ var maybe_versym: ?[*]u16 = null;
+ var maybe_verdef: ?*elf.Verdef = null;
+
+ {
+ var i: usize = 0;
+ while (dynv[i] != 0) : (i += 2) {
+ const p = base + dynv[i + 1];
+ switch (dynv[i]) {
+ elf.DT_STRTAB => maybe_strings = @intToPtr([*]u8, p),
+ elf.DT_SYMTAB => maybe_syms = @intToPtr([*]elf.Sym, p),
+ elf.DT_HASH => maybe_hashtab = @intToPtr([*]linux.Elf_Symndx, p),
+ elf.DT_VERSYM => maybe_versym = @intToPtr([*]u16, p),
+ elf.DT_VERDEF => maybe_verdef = @intToPtr(*elf.Verdef, p),
+ else => {},
+ }
+ }
+ }
+
+ return ElfLib{
+ .base = base,
+ .strings = maybe_strings orelse return error.ElfStringSectionNotFound,
+ .syms = maybe_syms orelse return error.ElfSymSectionNotFound,
+ .hashtab = maybe_hashtab orelse return error.ElfHashTableNotFound,
+ .versym = maybe_versym,
+ .verdef = maybe_verdef,
+ };
+ }
+
+ /// Returns the address of the symbol
+ pub fn lookup(self: *const ElfLib, vername: []const u8, name: []const u8) ?usize {
+ const maybe_versym = if (self.verdef == null) null else self.versym;
+
+ const OK_TYPES = (1 << elf.STT_NOTYPE | 1 << elf.STT_OBJECT | 1 << elf.STT_FUNC | 1 << elf.STT_COMMON);
+ const OK_BINDS = (1 << elf.STB_GLOBAL | 1 << elf.STB_WEAK | 1 << elf.STB_GNU_UNIQUE);
+
+ var i: usize = 0;
+ while (i < self.hashtab[1]) : (i += 1) {
+ if (0 == (u32(1) << @intCast(u5, self.syms[i].st_info & 0xf) & OK_TYPES)) continue;
+ if (0 == (u32(1) << @intCast(u5, self.syms[i].st_info >> 4) & OK_BINDS)) continue;
+ if (0 == self.syms[i].st_shndx) continue;
+ if (!mem.eql(u8, name, cstr.toSliceConst(self.strings + self.syms[i].st_name))) continue;
+ if (maybe_versym) |versym| {
+ if (!checkver(self.verdef.?, versym[i], vername, self.strings))
+ continue;
+ }
+ return self.base + self.syms[i].st_value;
+ }
+
+ return null;
+ }
+};
+
+fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*]u8) bool {
+ var def = def_arg;
+ const vsym = @bitCast(u32, vsym_arg) & 0x7fff;
+ while (true) {
+ if (0 == (def.vd_flags & elf.VER_FLG_BASE) and (def.vd_ndx & 0x7fff) == vsym)
+ break;
+ if (def.vd_next == 0)
+ return false;
+ def = @intToPtr(*elf.Verdef, @ptrToInt(def) + def.vd_next);
+ }
+ const aux = @intToPtr(*elf.Verdaux, @ptrToInt(def) + def.vd_aux);
+ return mem.eql(u8, vername, cstr.toSliceConst(strings + aux.vda_name));
+}
diff --git a/std/elf.zig b/std/elf.zig
index 7e20fa000f..8e6445c631 100644
--- a/std/elf.zig
+++ b/std/elf.zig
@@ -7,6 +7,241 @@ const mem = std.mem;
const debug = std.debug;
const InStream = std.stream.InStream;
+pub const AT_NULL = 0;
+pub const AT_IGNORE = 1;
+pub const AT_EXECFD = 2;
+pub const AT_PHDR = 3;
+pub const AT_PHENT = 4;
+pub const AT_PHNUM = 5;
+pub const AT_PAGESZ = 6;
+pub const AT_BASE = 7;
+pub const AT_FLAGS = 8;
+pub const AT_ENTRY = 9;
+pub const AT_NOTELF = 10;
+pub const AT_UID = 11;
+pub const AT_EUID = 12;
+pub const AT_GID = 13;
+pub const AT_EGID = 14;
+pub const AT_CLKTCK = 17;
+pub const AT_PLATFORM = 15;
+pub const AT_HWCAP = 16;
+pub const AT_FPUCW = 18;
+pub const AT_DCACHEBSIZE = 19;
+pub const AT_ICACHEBSIZE = 20;
+pub const AT_UCACHEBSIZE = 21;
+pub const AT_IGNOREPPC = 22;
+pub const AT_SECURE = 23;
+pub const AT_BASE_PLATFORM = 24;
+pub const AT_RANDOM = 25;
+pub const AT_HWCAP2 = 26;
+pub const AT_EXECFN = 31;
+pub const AT_SYSINFO = 32;
+pub const AT_SYSINFO_EHDR = 33;
+pub const AT_L1I_CACHESHAPE = 34;
+pub const AT_L1D_CACHESHAPE = 35;
+pub const AT_L2_CACHESHAPE = 36;
+pub const AT_L3_CACHESHAPE = 37;
+pub const AT_L1I_CACHESIZE = 40;
+pub const AT_L1I_CACHEGEOMETRY = 41;
+pub const AT_L1D_CACHESIZE = 42;
+pub const AT_L1D_CACHEGEOMETRY = 43;
+pub const AT_L2_CACHESIZE = 44;
+pub const AT_L2_CACHEGEOMETRY = 45;
+pub const AT_L3_CACHESIZE = 46;
+pub const AT_L3_CACHEGEOMETRY = 47;
+
+pub const DT_NULL = 0;
+pub const DT_NEEDED = 1;
+pub const DT_PLTRELSZ = 2;
+pub const DT_PLTGOT = 3;
+pub const DT_HASH = 4;
+pub const DT_STRTAB = 5;
+pub const DT_SYMTAB = 6;
+pub const DT_RELA = 7;
+pub const DT_RELASZ = 8;
+pub const DT_RELAENT = 9;
+pub const DT_STRSZ = 10;
+pub const DT_SYMENT = 11;
+pub const DT_INIT = 12;
+pub const DT_FINI = 13;
+pub const DT_SONAME = 14;
+pub const DT_RPATH = 15;
+pub const DT_SYMBOLIC = 16;
+pub const DT_REL = 17;
+pub const DT_RELSZ = 18;
+pub const DT_RELENT = 19;
+pub const DT_PLTREL = 20;
+pub const DT_DEBUG = 21;
+pub const DT_TEXTREL = 22;
+pub const DT_JMPREL = 23;
+pub const DT_BIND_NOW = 24;
+pub const DT_INIT_ARRAY = 25;
+pub const DT_FINI_ARRAY = 26;
+pub const DT_INIT_ARRAYSZ = 27;
+pub const DT_FINI_ARRAYSZ = 28;
+pub const DT_RUNPATH = 29;
+pub const DT_FLAGS = 30;
+pub const DT_ENCODING = 32;
+pub const DT_PREINIT_ARRAY = 32;
+pub const DT_PREINIT_ARRAYSZ = 33;
+pub const DT_SYMTAB_SHNDX = 34;
+pub const DT_NUM = 35;
+pub const DT_LOOS = 0x6000000d;
+pub const DT_HIOS = 0x6ffff000;
+pub const DT_LOPROC = 0x70000000;
+pub const DT_HIPROC = 0x7fffffff;
+pub const DT_PROCNUM = DT_MIPS_NUM;
+
+pub const DT_VALRNGLO = 0x6ffffd00;
+pub const DT_GNU_PRELINKED = 0x6ffffdf5;
+pub const DT_GNU_CONFLICTSZ = 0x6ffffdf6;
+pub const DT_GNU_LIBLISTSZ = 0x6ffffdf7;
+pub const DT_CHECKSUM = 0x6ffffdf8;
+pub const DT_PLTPADSZ = 0x6ffffdf9;
+pub const DT_MOVEENT = 0x6ffffdfa;
+pub const DT_MOVESZ = 0x6ffffdfb;
+pub const DT_FEATURE_1 = 0x6ffffdfc;
+pub const DT_POSFLAG_1 = 0x6ffffdfd;
+
+pub const DT_SYMINSZ = 0x6ffffdfe;
+pub const DT_SYMINENT = 0x6ffffdff;
+pub const DT_VALRNGHI = 0x6ffffdff;
+pub const DT_VALNUM = 12;
+
+pub const DT_ADDRRNGLO = 0x6ffffe00;
+pub const DT_GNU_HASH = 0x6ffffef5;
+pub const DT_TLSDESC_PLT = 0x6ffffef6;
+pub const DT_TLSDESC_GOT = 0x6ffffef7;
+pub const DT_GNU_CONFLICT = 0x6ffffef8;
+pub const DT_GNU_LIBLIST = 0x6ffffef9;
+pub const DT_CONFIG = 0x6ffffefa;
+pub const DT_DEPAUDIT = 0x6ffffefb;
+pub const DT_AUDIT = 0x6ffffefc;
+pub const DT_PLTPAD = 0x6ffffefd;
+pub const DT_MOVETAB = 0x6ffffefe;
+pub const DT_SYMINFO = 0x6ffffeff;
+pub const DT_ADDRRNGHI = 0x6ffffeff;
+pub const DT_ADDRNUM = 11;
+
+pub const DT_VERSYM = 0x6ffffff0;
+
+pub const DT_RELACOUNT = 0x6ffffff9;
+pub const DT_RELCOUNT = 0x6ffffffa;
+
+pub const DT_FLAGS_1 = 0x6ffffffb;
+pub const DT_VERDEF = 0x6ffffffc;
+
+pub const DT_VERDEFNUM = 0x6ffffffd;
+pub const DT_VERNEED = 0x6ffffffe;
+
+pub const DT_VERNEEDNUM = 0x6fffffff;
+pub const DT_VERSIONTAGNUM = 16;
+
+pub const DT_AUXILIARY = 0x7ffffffd;
+pub const DT_FILTER = 0x7fffffff;
+pub const DT_EXTRANUM = 3;
+
+pub const DT_SPARC_REGISTER = 0x70000001;
+pub const DT_SPARC_NUM = 2;
+
+pub const DT_MIPS_RLD_VERSION = 0x70000001;
+pub const DT_MIPS_TIME_STAMP = 0x70000002;
+pub const DT_MIPS_ICHECKSUM = 0x70000003;
+pub const DT_MIPS_IVERSION = 0x70000004;
+pub const DT_MIPS_FLAGS = 0x70000005;
+pub const DT_MIPS_BASE_ADDRESS = 0x70000006;
+pub const DT_MIPS_MSYM = 0x70000007;
+pub const DT_MIPS_CONFLICT = 0x70000008;
+pub const DT_MIPS_LIBLIST = 0x70000009;
+pub const DT_MIPS_LOCAL_GOTNO = 0x7000000a;
+pub const DT_MIPS_CONFLICTNO = 0x7000000b;
+pub const DT_MIPS_LIBLISTNO = 0x70000010;
+pub const DT_MIPS_SYMTABNO = 0x70000011;
+pub const DT_MIPS_UNREFEXTNO = 0x70000012;
+pub const DT_MIPS_GOTSYM = 0x70000013;
+pub const DT_MIPS_HIPAGENO = 0x70000014;
+pub const DT_MIPS_RLD_MAP = 0x70000016;
+pub const DT_MIPS_DELTA_CLASS = 0x70000017;
+pub const DT_MIPS_DELTA_CLASS_NO = 0x70000018;
+
+pub const DT_MIPS_DELTA_INSTANCE = 0x70000019;
+pub const DT_MIPS_DELTA_INSTANCE_NO = 0x7000001a;
+
+pub const DT_MIPS_DELTA_RELOC = 0x7000001b;
+pub const DT_MIPS_DELTA_RELOC_NO = 0x7000001c;
+
+pub const DT_MIPS_DELTA_SYM = 0x7000001d;
+
+pub const DT_MIPS_DELTA_SYM_NO = 0x7000001e;
+
+pub const DT_MIPS_DELTA_CLASSSYM = 0x70000020;
+
+pub const DT_MIPS_DELTA_CLASSSYM_NO = 0x70000021;
+
+pub const DT_MIPS_CXX_FLAGS = 0x70000022;
+pub const DT_MIPS_PIXIE_INIT = 0x70000023;
+pub const DT_MIPS_SYMBOL_LIB = 0x70000024;
+pub const DT_MIPS_LOCALPAGE_GOTIDX = 0x70000025;
+pub const DT_MIPS_LOCAL_GOTIDX = 0x70000026;
+pub const DT_MIPS_HIDDEN_GOTIDX = 0x70000027;
+pub const DT_MIPS_PROTECTED_GOTIDX = 0x70000028;
+pub const DT_MIPS_OPTIONS = 0x70000029;
+pub const DT_MIPS_INTERFACE = 0x7000002a;
+pub const DT_MIPS_DYNSTR_ALIGN = 0x7000002b;
+pub const DT_MIPS_INTERFACE_SIZE = 0x7000002c;
+pub const DT_MIPS_RLD_TEXT_RESOLVE_ADDR = 0x7000002d;
+
+pub const DT_MIPS_PERF_SUFFIX = 0x7000002e;
+
+pub const DT_MIPS_COMPACT_SIZE = 0x7000002f;
+pub const DT_MIPS_GP_VALUE = 0x70000030;
+pub const DT_MIPS_AUX_DYNAMIC = 0x70000031;
+
+pub const DT_MIPS_PLTGOT = 0x70000032;
+
+pub const DT_MIPS_RWPLT = 0x70000034;
+pub const DT_MIPS_RLD_MAP_REL = 0x70000035;
+pub const DT_MIPS_NUM = 0x36;
+
+pub const DT_ALPHA_PLTRO = (DT_LOPROC + 0);
+pub const DT_ALPHA_NUM = 1;
+
+pub const DT_PPC_GOT = (DT_LOPROC + 0);
+pub const DT_PPC_OPT = (DT_LOPROC + 1);
+pub const DT_PPC_NUM = 2;
+
+pub const DT_PPC64_GLINK = (DT_LOPROC + 0);
+pub const DT_PPC64_OPD = (DT_LOPROC + 1);
+pub const DT_PPC64_OPDSZ = (DT_LOPROC + 2);
+pub const DT_PPC64_OPT = (DT_LOPROC + 3);
+pub const DT_PPC64_NUM = 4;
+
+pub const DT_IA_64_PLT_RESERVE = (DT_LOPROC + 0);
+pub const DT_IA_64_NUM = 1;
+
+pub const DT_NIOS2_GP = 0x70000002;
+
+pub const PT_NULL = 0;
+pub const PT_LOAD = 1;
+pub const PT_DYNAMIC = 2;
+pub const PT_INTERP = 3;
+pub const PT_NOTE = 4;
+pub const PT_SHLIB = 5;
+pub const PT_PHDR = 6;
+pub const PT_TLS = 7;
+pub const PT_NUM = 8;
+pub const PT_LOOS = 0x60000000;
+pub const PT_GNU_EH_FRAME = 0x6474e550;
+pub const PT_GNU_STACK = 0x6474e551;
+pub const PT_GNU_RELRO = 0x6474e552;
+pub const PT_LOSUNW = 0x6ffffffa;
+pub const PT_SUNWBSS = 0x6ffffffa;
+pub const PT_SUNWSTACK = 0x6ffffffb;
+pub const PT_HISUNW = 0x6fffffff;
+pub const PT_HIOS = 0x6fffffff;
+pub const PT_LOPROC = 0x70000000;
+pub const PT_HIPROC = 0x7fffffff;
+
pub const SHT_NULL = 0;
pub const SHT_PROGBITS = 1;
pub const SHT_SYMTAB = 2;
@@ -31,6 +266,60 @@ pub const SHT_HIPROC = 0x7fffffff;
pub const SHT_LOUSER = 0x80000000;
pub const SHT_HIUSER = 0xffffffff;
+pub const STB_LOCAL = 0;
+pub const STB_GLOBAL = 1;
+pub const STB_WEAK = 2;
+pub const STB_NUM = 3;
+pub const STB_LOOS = 10;
+pub const STB_GNU_UNIQUE = 10;
+pub const STB_HIOS = 12;
+pub const STB_LOPROC = 13;
+pub const STB_HIPROC = 15;
+
+pub const STB_MIPS_SPLIT_COMMON = 13;
+
+pub const STT_NOTYPE = 0;
+pub const STT_OBJECT = 1;
+pub const STT_FUNC = 2;
+pub const STT_SECTION = 3;
+pub const STT_FILE = 4;
+pub const STT_COMMON = 5;
+pub const STT_TLS = 6;
+pub const STT_NUM = 7;
+pub const STT_LOOS = 10;
+pub const STT_GNU_IFUNC = 10;
+pub const STT_HIOS = 12;
+pub const STT_LOPROC = 13;
+pub const STT_HIPROC = 15;
+
+pub const STT_SPARC_REGISTER = 13;
+
+pub const STT_PARISC_MILLICODE = 13;
+
+pub const STT_HP_OPAQUE = (STT_LOOS + 0x1);
+pub const STT_HP_STUB = (STT_LOOS + 0x2);
+
+pub const STT_ARM_TFUNC = STT_LOPROC;
+pub const STT_ARM_16BIT = STT_HIPROC;
+
+pub const VER_FLG_BASE = 0x1;
+pub const VER_FLG_WEAK = 0x2;
+
+/// An unknown type.
+pub const ET_NONE = 0;
+
+/// A relocatable file.
+pub const ET_REL = 1;
+
+/// An executable file.
+pub const ET_EXEC = 2;
+
+/// A shared object.
+pub const ET_DYN = 3;
+
+/// A core file.
+pub const ET_CORE = 4;
+
pub const FileType = enum {
Relocatable,
Executable,
@@ -64,7 +353,7 @@ pub const SectionHeader = struct {
};
pub const Elf = struct {
- in_file: &os.File,
+ in_file: *os.File,
auto_close_stream: bool,
is_64: bool,
endian: builtin.Endian,
@@ -74,20 +363,20 @@ pub const Elf = struct {
program_header_offset: u64,
section_header_offset: u64,
string_section_index: u64,
- string_section: &SectionHeader,
+ string_section: *SectionHeader,
section_headers: []SectionHeader,
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
prealloc_file: os.File,
/// Call close when done.
- pub fn openPath(elf: &Elf, allocator: &mem.Allocator, path: []const u8) !void {
+ pub fn openPath(elf: *Elf, allocator: *mem.Allocator, path: []const u8) !void {
try elf.prealloc_file.open(path);
- try elf.openFile(allocator, &elf.prealloc_file);
+ try elf.openFile(allocator, *elf.prealloc_file);
elf.auto_close_stream = true;
}
/// Call close when done.
- pub fn openFile(elf: &Elf, allocator: &mem.Allocator, file: &os.File) !void {
+ pub fn openFile(elf: *Elf, allocator: *mem.Allocator, file: *os.File) !void {
elf.allocator = allocator;
elf.in_file = file;
elf.auto_close_stream = false;
@@ -155,9 +444,7 @@ pub const Elf = struct {
try elf.in_file.seekForward(4);
const header_size = try in.readInt(elf.endian, u16);
- if ((elf.is_64 and header_size != 64) or
- (!elf.is_64 and header_size != 52))
- {
+ if ((elf.is_64 and header_size != 64) or (!elf.is_64 and header_size != 52)) {
return error.InvalidFormat;
}
@@ -188,16 +475,16 @@ pub const Elf = struct {
if (sh_entry_size != 64) return error.InvalidFormat;
for (elf.section_headers) |*elf_section| {
- elf_section.name = try in.readInt(elf.endian, u32);
- elf_section.sh_type = try in.readInt(elf.endian, u32);
- elf_section.flags = try in.readInt(elf.endian, u64);
- elf_section.addr = try in.readInt(elf.endian, u64);
- elf_section.offset = try in.readInt(elf.endian, u64);
- elf_section.size = try in.readInt(elf.endian, u64);
- elf_section.link = try in.readInt(elf.endian, u32);
- elf_section.info = try in.readInt(elf.endian, u32);
- elf_section.addr_align = try in.readInt(elf.endian, u64);
- elf_section.ent_size = try in.readInt(elf.endian, u64);
+ elf_section.name = try in.readInt(elf.endian, u32);
+ elf_section.sh_type = try in.readInt(elf.endian, u32);
+ elf_section.flags = try in.readInt(elf.endian, u64);
+ elf_section.addr = try in.readInt(elf.endian, u64);
+ elf_section.offset = try in.readInt(elf.endian, u64);
+ elf_section.size = try in.readInt(elf.endian, u64);
+ elf_section.link = try in.readInt(elf.endian, u32);
+ elf_section.info = try in.readInt(elf.endian, u32);
+ elf_section.addr_align = try in.readInt(elf.endian, u64);
+ elf_section.ent_size = try in.readInt(elf.endian, u64);
}
} else {
if (sh_entry_size != 40) return error.InvalidFormat;
@@ -231,14 +518,13 @@ pub const Elf = struct {
}
}
- pub fn close(elf: &Elf) void {
+ pub fn close(elf: *Elf) void {
elf.allocator.free(elf.section_headers);
- if (elf.auto_close_stream)
- elf.in_file.close();
+ if (elf.auto_close_stream) elf.in_file.close();
}
- pub fn findSection(elf: &Elf, name: []const u8) !?&SectionHeader {
+ pub fn findSection(elf: *Elf, name: []const u8) !?*SectionHeader {
var file_stream = io.FileInStream.init(elf.in_file);
const in = &file_stream.stream;
@@ -262,7 +548,339 @@ pub const Elf = struct {
return null;
}
- pub fn seekToSection(elf: &Elf, elf_section: &SectionHeader) !void {
+ pub fn seekToSection(elf: *Elf, elf_section: *SectionHeader) !void {
try elf.in_file.seekTo(elf_section.offset);
}
};
+
+pub const EI_NIDENT = 16;
+pub const Elf32_Half = u16;
+pub const Elf64_Half = u16;
+pub const Elf32_Word = u32;
+pub const Elf32_Sword = i32;
+pub const Elf64_Word = u32;
+pub const Elf64_Sword = i32;
+pub const Elf32_Xword = u64;
+pub const Elf32_Sxword = i64;
+pub const Elf64_Xword = u64;
+pub const Elf64_Sxword = i64;
+pub const Elf32_Addr = u32;
+pub const Elf64_Addr = u64;
+pub const Elf32_Off = u32;
+pub const Elf64_Off = u64;
+pub const Elf32_Section = u16;
+pub const Elf64_Section = u16;
+pub const Elf32_Versym = Elf32_Half;
+pub const Elf64_Versym = Elf64_Half;
+pub const Elf32_Ehdr = extern struct {
+ e_ident: [EI_NIDENT]u8,
+ e_type: Elf32_Half,
+ e_machine: Elf32_Half,
+ e_version: Elf32_Word,
+ e_entry: Elf32_Addr,
+ e_phoff: Elf32_Off,
+ e_shoff: Elf32_Off,
+ e_flags: Elf32_Word,
+ e_ehsize: Elf32_Half,
+ e_phentsize: Elf32_Half,
+ e_phnum: Elf32_Half,
+ e_shentsize: Elf32_Half,
+ e_shnum: Elf32_Half,
+ e_shstrndx: Elf32_Half,
+};
+pub const Elf64_Ehdr = extern struct {
+ e_ident: [EI_NIDENT]u8,
+ e_type: Elf64_Half,
+ e_machine: Elf64_Half,
+ e_version: Elf64_Word,
+ e_entry: Elf64_Addr,
+ e_phoff: Elf64_Off,
+ e_shoff: Elf64_Off,
+ e_flags: Elf64_Word,
+ e_ehsize: Elf64_Half,
+ e_phentsize: Elf64_Half,
+ e_phnum: Elf64_Half,
+ e_shentsize: Elf64_Half,
+ e_shnum: Elf64_Half,
+ e_shstrndx: Elf64_Half,
+};
+pub const Elf32_Shdr = extern struct {
+ sh_name: Elf32_Word,
+ sh_type: Elf32_Word,
+ sh_flags: Elf32_Word,
+ sh_addr: Elf32_Addr,
+ sh_offset: Elf32_Off,
+ sh_size: Elf32_Word,
+ sh_link: Elf32_Word,
+ sh_info: Elf32_Word,
+ sh_addralign: Elf32_Word,
+ sh_entsize: Elf32_Word,
+};
+pub const Elf64_Shdr = extern struct {
+ sh_name: Elf64_Word,
+ sh_type: Elf64_Word,
+ sh_flags: Elf64_Xword,
+ sh_addr: Elf64_Addr,
+ sh_offset: Elf64_Off,
+ sh_size: Elf64_Xword,
+ sh_link: Elf64_Word,
+ sh_info: Elf64_Word,
+ sh_addralign: Elf64_Xword,
+ sh_entsize: Elf64_Xword,
+};
+pub const Elf32_Chdr = extern struct {
+ ch_type: Elf32_Word,
+ ch_size: Elf32_Word,
+ ch_addralign: Elf32_Word,
+};
+pub const Elf64_Chdr = extern struct {
+ ch_type: Elf64_Word,
+ ch_reserved: Elf64_Word,
+ ch_size: Elf64_Xword,
+ ch_addralign: Elf64_Xword,
+};
+pub const Elf32_Sym = extern struct {
+ st_name: Elf32_Word,
+ st_value: Elf32_Addr,
+ st_size: Elf32_Word,
+ st_info: u8,
+ st_other: u8,
+ st_shndx: Elf32_Section,
+};
+pub const Elf64_Sym = extern struct {
+ st_name: Elf64_Word,
+ st_info: u8,
+ st_other: u8,
+ st_shndx: Elf64_Section,
+ st_value: Elf64_Addr,
+ st_size: Elf64_Xword,
+};
+pub const Elf32_Syminfo = extern struct {
+ si_boundto: Elf32_Half,
+ si_flags: Elf32_Half,
+};
+pub const Elf64_Syminfo = extern struct {
+ si_boundto: Elf64_Half,
+ si_flags: Elf64_Half,
+};
+pub const Elf32_Rel = extern struct {
+ r_offset: Elf32_Addr,
+ r_info: Elf32_Word,
+};
+pub const Elf64_Rel = extern struct {
+ r_offset: Elf64_Addr,
+ r_info: Elf64_Xword,
+};
+pub const Elf32_Rela = extern struct {
+ r_offset: Elf32_Addr,
+ r_info: Elf32_Word,
+ r_addend: Elf32_Sword,
+};
+pub const Elf64_Rela = extern struct {
+ r_offset: Elf64_Addr,
+ r_info: Elf64_Xword,
+ r_addend: Elf64_Sxword,
+};
+pub const Elf32_Phdr = extern struct {
+ p_type: Elf32_Word,
+ p_offset: Elf32_Off,
+ p_vaddr: Elf32_Addr,
+ p_paddr: Elf32_Addr,
+ p_filesz: Elf32_Word,
+ p_memsz: Elf32_Word,
+ p_flags: Elf32_Word,
+ p_align: Elf32_Word,
+};
+pub const Elf64_Phdr = extern struct {
+ p_type: Elf64_Word,
+ p_flags: Elf64_Word,
+ p_offset: Elf64_Off,
+ p_vaddr: Elf64_Addr,
+ p_paddr: Elf64_Addr,
+ p_filesz: Elf64_Xword,
+ p_memsz: Elf64_Xword,
+ p_align: Elf64_Xword,
+};
+pub const Elf32_Dyn = extern struct {
+ d_tag: Elf32_Sword,
+ d_un: extern union {
+ d_val: Elf32_Word,
+ d_ptr: Elf32_Addr,
+ },
+};
+pub const Elf64_Dyn = extern struct {
+ d_tag: Elf64_Sxword,
+ d_un: extern union {
+ d_val: Elf64_Xword,
+ d_ptr: Elf64_Addr,
+ },
+};
+pub const Elf32_Verdef = extern struct {
+ vd_version: Elf32_Half,
+ vd_flags: Elf32_Half,
+ vd_ndx: Elf32_Half,
+ vd_cnt: Elf32_Half,
+ vd_hash: Elf32_Word,
+ vd_aux: Elf32_Word,
+ vd_next: Elf32_Word,
+};
+pub const Elf64_Verdef = extern struct {
+ vd_version: Elf64_Half,
+ vd_flags: Elf64_Half,
+ vd_ndx: Elf64_Half,
+ vd_cnt: Elf64_Half,
+ vd_hash: Elf64_Word,
+ vd_aux: Elf64_Word,
+ vd_next: Elf64_Word,
+};
+pub const Elf32_Verdaux = extern struct {
+ vda_name: Elf32_Word,
+ vda_next: Elf32_Word,
+};
+pub const Elf64_Verdaux = extern struct {
+ vda_name: Elf64_Word,
+ vda_next: Elf64_Word,
+};
+pub const Elf32_Verneed = extern struct {
+ vn_version: Elf32_Half,
+ vn_cnt: Elf32_Half,
+ vn_file: Elf32_Word,
+ vn_aux: Elf32_Word,
+ vn_next: Elf32_Word,
+};
+pub const Elf64_Verneed = extern struct {
+ vn_version: Elf64_Half,
+ vn_cnt: Elf64_Half,
+ vn_file: Elf64_Word,
+ vn_aux: Elf64_Word,
+ vn_next: Elf64_Word,
+};
+pub const Elf32_Vernaux = extern struct {
+ vna_hash: Elf32_Word,
+ vna_flags: Elf32_Half,
+ vna_other: Elf32_Half,
+ vna_name: Elf32_Word,
+ vna_next: Elf32_Word,
+};
+pub const Elf64_Vernaux = extern struct {
+ vna_hash: Elf64_Word,
+ vna_flags: Elf64_Half,
+ vna_other: Elf64_Half,
+ vna_name: Elf64_Word,
+ vna_next: Elf64_Word,
+};
+pub const Elf32_auxv_t = extern struct {
+ a_type: u32,
+ a_un: extern union {
+ a_val: u32,
+ },
+};
+pub const Elf64_auxv_t = extern struct {
+ a_type: u64,
+ a_un: extern union {
+ a_val: u64,
+ },
+};
+pub const Elf32_Nhdr = extern struct {
+ n_namesz: Elf32_Word,
+ n_descsz: Elf32_Word,
+ n_type: Elf32_Word,
+};
+pub const Elf64_Nhdr = extern struct {
+ n_namesz: Elf64_Word,
+ n_descsz: Elf64_Word,
+ n_type: Elf64_Word,
+};
+pub const Elf32_Move = extern struct {
+ m_value: Elf32_Xword,
+ m_info: Elf32_Word,
+ m_poffset: Elf32_Word,
+ m_repeat: Elf32_Half,
+ m_stride: Elf32_Half,
+};
+pub const Elf64_Move = extern struct {
+ m_value: Elf64_Xword,
+ m_info: Elf64_Xword,
+ m_poffset: Elf64_Xword,
+ m_repeat: Elf64_Half,
+ m_stride: Elf64_Half,
+};
+pub const Elf32_gptab = extern union {
+ gt_header: extern struct {
+ gt_current_g_value: Elf32_Word,
+ gt_unused: Elf32_Word,
+ },
+ gt_entry: extern struct {
+ gt_g_value: Elf32_Word,
+ gt_bytes: Elf32_Word,
+ },
+};
+pub const Elf32_RegInfo = extern struct {
+ ri_gprmask: Elf32_Word,
+ ri_cprmask: [4]Elf32_Word,
+ ri_gp_value: Elf32_Sword,
+};
+pub const Elf_Options = extern struct {
+ kind: u8,
+ size: u8,
+ @"section": Elf32_Section,
+ info: Elf32_Word,
+};
+pub const Elf_Options_Hw = extern struct {
+ hwp_flags1: Elf32_Word,
+ hwp_flags2: Elf32_Word,
+};
+pub const Elf32_Lib = extern struct {
+ l_name: Elf32_Word,
+ l_time_stamp: Elf32_Word,
+ l_checksum: Elf32_Word,
+ l_version: Elf32_Word,
+ l_flags: Elf32_Word,
+};
+pub const Elf64_Lib = extern struct {
+ l_name: Elf64_Word,
+ l_time_stamp: Elf64_Word,
+ l_checksum: Elf64_Word,
+ l_version: Elf64_Word,
+ l_flags: Elf64_Word,
+};
+pub const Elf32_Conflict = Elf32_Addr;
+pub const Elf_MIPS_ABIFlags_v0 = extern struct {
+ version: Elf32_Half,
+ isa_level: u8,
+ isa_rev: u8,
+ gpr_size: u8,
+ cpr1_size: u8,
+ cpr2_size: u8,
+ fp_abi: u8,
+ isa_ext: Elf32_Word,
+ ases: Elf32_Word,
+ flags1: Elf32_Word,
+ flags2: Elf32_Word,
+};
+
+pub const Ehdr = switch (@sizeOf(usize)) {
+ 4 => Elf32_Ehdr,
+ 8 => Elf64_Ehdr,
+ else => @compileError("expected pointer size of 32 or 64"),
+};
+pub const Phdr = switch (@sizeOf(usize)) {
+ 4 => Elf32_Phdr,
+ 8 => Elf64_Phdr,
+ else => @compileError("expected pointer size of 32 or 64"),
+};
+pub const Sym = switch (@sizeOf(usize)) {
+ 4 => Elf32_Sym,
+ 8 => Elf64_Sym,
+ else => @compileError("expected pointer size of 32 or 64"),
+};
+pub const Verdef = switch (@sizeOf(usize)) {
+ 4 => Elf32_Verdef,
+ 8 => Elf64_Verdef,
+ else => @compileError("expected pointer size of 32 or 64"),
+};
+pub const Verdaux = switch (@sizeOf(usize)) {
+ 4 => Elf32_Verdaux,
+ 8 => Elf64_Verdaux,
+ else => @compileError("expected pointer size of 32 or 64"),
+};
diff --git a/std/event.zig b/std/event.zig
index bdad7fcc18..1e52086286 100644
--- a/std/event.zig
+++ b/std/event.zig
@@ -1,235 +1,17 @@
-const std = @import("index.zig");
-const builtin = @import("builtin");
-const assert = std.debug.assert;
-const event = this;
-const mem = std.mem;
-const posix = std.os.posix;
+pub const Locked = @import("event/locked.zig").Locked;
+pub const Loop = @import("event/loop.zig").Loop;
+pub const Lock = @import("event/lock.zig").Lock;
+pub const tcp = @import("event/tcp.zig");
+pub const Channel = @import("event/channel.zig").Channel;
+pub const Group = @import("event/group.zig").Group;
+pub const Future = @import("event/future.zig").Future;
-pub const TcpServer = struct {
- handleRequestFn: async<&mem.Allocator> fn (&TcpServer, &const std.net.Address, &const std.os.File) void,
-
- loop: &Loop,
- sockfd: i32,
- accept_coro: ?promise,
- listen_address: std.net.Address,
-
- waiting_for_emfile_node: PromiseNode,
-
- const PromiseNode = std.LinkedList(promise).Node;
-
- pub fn init(loop: &Loop) !TcpServer {
- const sockfd = try std.os.posixSocket(posix.AF_INET,
- posix.SOCK_STREAM|posix.SOCK_CLOEXEC|posix.SOCK_NONBLOCK,
- posix.PROTO_tcp);
- errdefer std.os.close(sockfd);
-
- // TODO can't initialize handler coroutine here because we need well defined copy elision
- return TcpServer {
- .loop = loop,
- .sockfd = sockfd,
- .accept_coro = null,
- .handleRequestFn = undefined,
- .waiting_for_emfile_node = undefined,
- .listen_address = undefined,
- };
- }
-
- pub fn listen(self: &TcpServer, address: &const std.net.Address,
- handleRequestFn: async<&mem.Allocator> fn (&TcpServer, &const std.net.Address, &const std.os.File)void) !void
- {
- self.handleRequestFn = handleRequestFn;
-
- try std.os.posixBind(self.sockfd, &address.os_addr);
- try std.os.posixListen(self.sockfd, posix.SOMAXCONN);
- self.listen_address = std.net.Address.initPosix(try std.os.posixGetSockName(self.sockfd));
-
- self.accept_coro = try async TcpServer.handler(self);
- errdefer cancel ??self.accept_coro;
-
- try self.loop.addFd(self.sockfd, ??self.accept_coro);
- errdefer self.loop.removeFd(self.sockfd);
-
- }
-
- pub fn deinit(self: &TcpServer) void {
- self.loop.removeFd(self.sockfd);
- if (self.accept_coro) |accept_coro| cancel accept_coro;
- std.os.close(self.sockfd);
- }
-
- pub async fn handler(self: &TcpServer) void {
- while (true) {
- var accepted_addr: std.net.Address = undefined;
- if (std.os.posixAccept(self.sockfd, &accepted_addr.os_addr,
- posix.SOCK_NONBLOCK | posix.SOCK_CLOEXEC)) |accepted_fd|
- {
- var socket = std.os.File.openHandle(accepted_fd);
- _ = async self.handleRequestFn(self, accepted_addr, socket) catch |err| switch (err) {
- error.OutOfMemory => {
- socket.close();
- continue;
- },
- };
- } else |err| switch (err) {
- error.WouldBlock => {
- suspend; // we will get resumed by epoll_wait in the event loop
- continue;
- },
- error.ProcessFdQuotaExceeded => {
- errdefer std.os.emfile_promise_queue.remove(&self.waiting_for_emfile_node);
- suspend |p| {
- self.waiting_for_emfile_node = PromiseNode.init(p);
- std.os.emfile_promise_queue.append(&self.waiting_for_emfile_node);
- }
- continue;
- },
- error.ConnectionAborted,
- error.FileDescriptorClosed => continue,
-
- error.PageFault => unreachable,
- error.InvalidSyscall => unreachable,
- error.FileDescriptorNotASocket => unreachable,
- error.OperationNotSupported => unreachable,
-
- error.SystemFdQuotaExceeded,
- error.SystemResources,
- error.ProtocolFailure,
- error.BlockedByFirewall,
- error.Unexpected => {
- @panic("TODO handle this error");
- },
- }
- }
- }
-};
-
-pub const Loop = struct {
- allocator: &mem.Allocator,
- epollfd: i32,
- keep_running: bool,
-
- fn init(allocator: &mem.Allocator) !Loop {
- const epollfd = try std.os.linuxEpollCreate(std.os.linux.EPOLL_CLOEXEC);
- return Loop {
- .keep_running = true,
- .allocator = allocator,
- .epollfd = epollfd,
- };
- }
-
- pub fn addFd(self: &Loop, fd: i32, prom: promise) !void {
- var ev = std.os.linux.epoll_event {
- .events = std.os.linux.EPOLLIN|std.os.linux.EPOLLOUT|std.os.linux.EPOLLET,
- .data = std.os.linux.epoll_data {
- .ptr = @ptrToInt(prom),
- },
- };
- try std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_ADD, fd, &ev);
- }
-
- pub fn removeFd(self: &Loop, fd: i32) void {
- std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
- }
-
- async fn waitFd(self: &Loop, fd: i32) !void {
- defer self.removeFd(fd);
- suspend |p| {
- try self.addFd(fd, p);
- }
- }
-
- pub fn stop(self: &Loop) void {
- // TODO make atomic
- self.keep_running = false;
- // TODO activate an fd in the epoll set
- }
-
- pub fn run(self: &Loop) void {
- while (self.keep_running) {
- var events: [16]std.os.linux.epoll_event = undefined;
- const count = std.os.linuxEpollWait(self.epollfd, events[0..], -1);
- for (events[0..count]) |ev| {
- const p = @intToPtr(promise, ev.data.ptr);
- resume p;
- }
- }
- }
-};
-
-pub async fn connect(loop: &Loop, _address: &const std.net.Address) !std.os.File {
- var address = *_address; // TODO https://github.com/zig-lang/zig/issues/733
-
- const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM|posix.SOCK_CLOEXEC|posix.SOCK_NONBLOCK, posix.PROTO_tcp);
- errdefer std.os.close(sockfd);
-
- try std.os.posixConnectAsync(sockfd, &address.os_addr);
- try await try async loop.waitFd(sockfd);
- try std.os.posixGetSockOptConnectError(sockfd);
-
- return std.os.File.openHandle(sockfd);
-}
-
-test "listen on a port, send bytes, receive bytes" {
- if (builtin.os != builtin.Os.linux) {
- // TODO build abstractions for other operating systems
- return;
- }
- const MyServer = struct {
- tcp_server: TcpServer,
-
- const Self = this;
-
- async<&mem.Allocator> fn handler(tcp_server: &TcpServer, _addr: &const std.net.Address,
- _socket: &const std.os.File) void
- {
- const self = @fieldParentPtr(Self, "tcp_server", tcp_server);
- var socket = *_socket; // TODO https://github.com/zig-lang/zig/issues/733
- defer socket.close();
- const next_handler = async errorableHandler(self, _addr, socket) catch |err| switch (err) {
- error.OutOfMemory => @panic("unable to handle connection: out of memory"),
- };
- (await next_handler) catch |err| {
- std.debug.panic("unable to handle connection: {}\n", err);
- };
- suspend |p| { cancel p; }
- }
-
- async fn errorableHandler(self: &Self, _addr: &const std.net.Address,
- _socket: &const std.os.File) !void
- {
- const addr = *_addr; // TODO https://github.com/zig-lang/zig/issues/733
- var socket = *_socket; // TODO https://github.com/zig-lang/zig/issues/733
-
- var adapter = std.io.FileOutStream.init(&socket);
- var stream = &adapter.stream;
- try stream.print("hello from server\n");
- }
- };
-
- const ip4addr = std.net.parseIp4("127.0.0.1") catch unreachable;
- const addr = std.net.Address.initIp4(ip4addr, 0);
-
- var loop = try Loop.init(std.debug.global_allocator);
- var server = MyServer {
- .tcp_server = try TcpServer.init(&loop),
- };
- defer server.tcp_server.deinit();
- try server.tcp_server.listen(addr, MyServer.handler);
-
- const p = try async doAsyncTest(&loop, server.tcp_server.listen_address);
- defer cancel p;
- loop.run();
-}
-
-async fn doAsyncTest(loop: &Loop, address: &const std.net.Address) void {
- errdefer @panic("test failure");
-
- var socket_file = try await try async event.connect(loop, address);
- defer socket_file.close();
-
- var buf: [512]u8 = undefined;
- const amt_read = try socket_file.read(buf[0..]);
- const msg = buf[0..amt_read];
- assert(mem.eql(u8, msg, "hello from server\n"));
- loop.stop();
+test "import event tests" {
+ _ = @import("event/locked.zig");
+ _ = @import("event/loop.zig");
+ _ = @import("event/lock.zig");
+ _ = @import("event/tcp.zig");
+ _ = @import("event/channel.zig");
+ _ = @import("event/group.zig");
+ _ = @import("event/future.zig");
}
diff --git a/std/event/channel.zig b/std/event/channel.zig
new file mode 100644
index 0000000000..71e97f6e78
--- /dev/null
+++ b/std/event/channel.zig
@@ -0,0 +1,235 @@
+const std = @import("../index.zig");
+const builtin = @import("builtin");
+const assert = std.debug.assert;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
+const Loop = std.event.Loop;
+
+/// many producer, many consumer, thread-safe, lock-free, runtime configurable buffer size
+/// when buffer is empty, consumers suspend and are resumed by producers
+/// when buffer is full, producers suspend and are resumed by consumers
+pub fn Channel(comptime T: type) type {
+ return struct {
+ loop: *Loop,
+
+ getters: std.atomic.Queue(GetNode),
+ putters: std.atomic.Queue(PutNode),
+ get_count: usize,
+ put_count: usize,
+ dispatch_lock: u8, // TODO make this a bool
+ need_dispatch: u8, // TODO make this a bool
+
+ // simple fixed size ring buffer
+ buffer_nodes: []T,
+ buffer_index: usize,
+ buffer_len: usize,
+
+ const SelfChannel = this;
+ const GetNode = struct {
+ ptr: *T,
+ tick_node: *Loop.NextTickNode,
+ };
+ const PutNode = struct {
+ data: T,
+ tick_node: *Loop.NextTickNode,
+ };
+
+ /// call destroy when done
+ pub fn create(loop: *Loop, capacity: usize) !*SelfChannel {
+ const buffer_nodes = try loop.allocator.alloc(T, capacity);
+ errdefer loop.allocator.free(buffer_nodes);
+
+ const self = try loop.allocator.create(SelfChannel{
+ .loop = loop,
+ .buffer_len = 0,
+ .buffer_nodes = buffer_nodes,
+ .buffer_index = 0,
+ .dispatch_lock = 0,
+ .need_dispatch = 0,
+ .getters = std.atomic.Queue(GetNode).init(),
+ .putters = std.atomic.Queue(PutNode).init(),
+ .get_count = 0,
+ .put_count = 0,
+ });
+ errdefer loop.allocator.destroy(self);
+
+ return self;
+ }
+
+ /// must be called when all calls to put and get have suspended and no more calls occur
+ pub fn destroy(self: *SelfChannel) void {
+ while (self.getters.get()) |get_node| {
+ cancel get_node.data.tick_node.data;
+ }
+ while (self.putters.get()) |put_node| {
+ cancel put_node.data.tick_node.data;
+ }
+ self.loop.allocator.free(self.buffer_nodes);
+ self.loop.allocator.destroy(self);
+ }
+
+ /// puts a data item in the channel. The promise completes when the value has been added to the
+ /// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter.
+ pub async fn put(self: *SelfChannel, data: T) void {
+ suspend {
+ var my_tick_node = Loop.NextTickNode{
+ .next = undefined,
+ .data = @handle(),
+ };
+ var queue_node = std.atomic.Queue(PutNode).Node{
+ .data = PutNode{
+ .tick_node = &my_tick_node,
+ .data = data,
+ },
+ .next = undefined,
+ };
+ self.putters.put(&queue_node);
+ _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+
+ self.dispatch();
+ }
+ }
+
+ /// await this function to get an item from the channel. If the buffer is empty, the promise will
+ /// complete when the next item is put in the channel.
+ pub async fn get(self: *SelfChannel) T {
+ // TODO integrate this function with named return values
+ // so we can get rid of this extra result copy
+ var result: T = undefined;
+ suspend {
+ var my_tick_node = Loop.NextTickNode{
+ .next = undefined,
+ .data = @handle(),
+ };
+ var queue_node = std.atomic.Queue(GetNode).Node{
+ .data = GetNode{
+ .ptr = &result,
+ .tick_node = &my_tick_node,
+ },
+ .next = undefined,
+ };
+ self.getters.put(&queue_node);
+ _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+
+ self.dispatch();
+ }
+ return result;
+ }
+
+ fn dispatch(self: *SelfChannel) void {
+ // set the "need dispatch" flag
+ _ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+
+ lock: while (true) {
+ // set the lock flag
+ const prev_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ if (prev_lock != 0) return;
+
+ // clear the need_dispatch flag since we're about to do it
+ _ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+
+ while (true) {
+ one_dispatch: {
+ // later we correct these extra subtractions
+ var get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ var put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+
+ // transfer self.buffer to self.getters
+ while (self.buffer_len != 0) {
+ if (get_count == 0) break :one_dispatch;
+
+ const get_node = &self.getters.get().?.data;
+ get_node.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len];
+ self.loop.onNextTick(get_node.tick_node);
+ self.buffer_len -= 1;
+
+ get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+
+ // direct transfer self.putters to self.getters
+ while (get_count != 0 and put_count != 0) {
+ const get_node = &self.getters.get().?.data;
+ const put_node = &self.putters.get().?.data;
+
+ get_node.ptr.* = put_node.data;
+ self.loop.onNextTick(get_node.tick_node);
+ self.loop.onNextTick(put_node.tick_node);
+
+ get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+
+ // transfer self.putters to self.buffer
+ while (self.buffer_len != self.buffer_nodes.len and put_count != 0) {
+ const put_node = &self.putters.get().?.data;
+
+ self.buffer_nodes[self.buffer_index] = put_node.data;
+ self.loop.onNextTick(put_node.tick_node);
+ self.buffer_index +%= 1;
+ self.buffer_len += 1;
+
+ put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+ }
+
+ // undo the extra subtractions
+ _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+
+ // clear need-dispatch flag
+ const need_dispatch = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ if (need_dispatch != 0) continue;
+
+ const my_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ assert(my_lock != 0);
+
+ // we have to check again now that we unlocked
+ if (@atomicLoad(u8, &self.need_dispatch, AtomicOrder.SeqCst) != 0) continue :lock;
+
+ return;
+ }
+ }
+ }
+ };
+}
+
+test "std.event.Channel" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const allocator = &da.allocator;
+
+ var loop: Loop = undefined;
+ // TODO make a multi threaded test
+ try loop.initSingleThreaded(allocator);
+ defer loop.deinit();
+
+ const channel = try Channel(i32).create(&loop, 0);
+ defer channel.destroy();
+
+ const handle = try async testChannelGetter(&loop, channel);
+ defer cancel handle;
+
+ const putter = try async testChannelPutter(channel);
+ defer cancel putter;
+
+ loop.run();
+}
+
+async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void {
+ errdefer @panic("test failed");
+
+ const value1_promise = try async channel.get();
+ const value1 = await value1_promise;
+ assert(value1 == 1234);
+
+ const value2_promise = try async channel.get();
+ const value2 = await value2_promise;
+ assert(value2 == 4567);
+}
+
+async fn testChannelPutter(channel: *Channel(i32)) void {
+ await (async channel.put(1234) catch @panic("out of memory"));
+ await (async channel.put(4567) catch @panic("out of memory"));
+}
+
diff --git a/std/event/future.zig b/std/event/future.zig
new file mode 100644
index 0000000000..8abdce7d02
--- /dev/null
+++ b/std/event/future.zig
@@ -0,0 +1,130 @@
+const std = @import("../index.zig");
+const assert = std.debug.assert;
+const builtin = @import("builtin");
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
+const Lock = std.event.Lock;
+const Loop = std.event.Loop;
+
+/// This is a value that starts out unavailable, until resolve() is called
+/// While it is unavailable, coroutines suspend when they try to get() it,
+/// and then are resumed when resolve() is called.
+/// At this point the value remains forever available, and another resolve() is not allowed.
+pub fn Future(comptime T: type) type {
+ return struct {
+ lock: Lock,
+ data: T,
+
+ /// TODO make this an enum
+ /// 0 - not started
+ /// 1 - started
+ /// 2 - finished
+ available: u8,
+
+ const Self = this;
+ const Queue = std.atomic.Queue(promise);
+
+ pub fn init(loop: *Loop) Self {
+ return Self{
+ .lock = Lock.initLocked(loop),
+ .available = 0,
+ .data = undefined,
+ };
+ }
+
+ /// Obtain the value. If it's not available, wait until it becomes
+ /// available.
+ /// Thread-safe.
+ pub async fn get(self: *Self) *T {
+ if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 2) {
+ return &self.data;
+ }
+ const held = await (async self.lock.acquire() catch unreachable);
+ held.release();
+
+ return &self.data;
+ }
+
+ /// Gets the data without waiting for it. If it's available, a pointer is
+ /// returned. Otherwise, null is returned.
+ pub fn getOrNull(self: *Self) ?*T {
+ if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 2) {
+ return &self.data;
+ } else {
+ return null;
+ }
+ }
+
+ /// If someone else has started working on the data, wait for them to complete
+ /// and return a pointer to the data. Otherwise, return null, and the caller
+ /// should start working on the data.
+ /// It's not required to call start() before resolve() but it can be useful since
+ /// this method is thread-safe.
+ pub async fn start(self: *Self) ?*T {
+ const state = @cmpxchgStrong(u8, &self.available, 0, 1, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return null;
+ switch (state) {
+ 1 => {
+ const held = await (async self.lock.acquire() catch unreachable);
+ held.release();
+ return &self.data;
+ },
+ 2 => return &self.data,
+ else => unreachable,
+ }
+ }
+
+ /// Make the data become available. May be called only once.
+ /// Before calling this, modify the `data` property.
+ pub fn resolve(self: *Self) void {
+ const prev = @atomicRmw(u8, &self.available, AtomicRmwOp.Xchg, 2, AtomicOrder.SeqCst);
+ assert(prev == 0 or prev == 1); // resolve() called twice
+ Lock.Held.release(Lock.Held{ .lock = &self.lock });
+ }
+ };
+}
+
+test "std.event.Future" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const allocator = &da.allocator;
+
+ var loop: Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ const handle = try async testFuture(&loop);
+ defer cancel handle;
+
+ loop.run();
+}
+
+async fn testFuture(loop: *Loop) void {
+ suspend {
+ resume @handle();
+ }
+ var future = Future(i32).init(loop);
+
+ const a = async waitOnFuture(&future) catch @panic("memory");
+ const b = async waitOnFuture(&future) catch @panic("memory");
+ const c = async resolveFuture(&future) catch @panic("memory");
+
+ const result = (await a) + (await b);
+ cancel c;
+ assert(result == 12);
+}
+
+async fn waitOnFuture(future: *Future(i32)) i32 {
+ suspend {
+ resume @handle();
+ }
+ return (await (async future.get() catch @panic("memory"))).*;
+}
+
+async fn resolveFuture(future: *Future(i32)) void {
+ suspend {
+ resume @handle();
+ }
+ future.data = 6;
+ future.resolve();
+}
diff --git a/std/event/group.zig b/std/event/group.zig
new file mode 100644
index 0000000000..6c7fc63699
--- /dev/null
+++ b/std/event/group.zig
@@ -0,0 +1,170 @@
+const std = @import("../index.zig");
+const builtin = @import("builtin");
+const Lock = std.event.Lock;
+const Loop = std.event.Loop;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
+const assert = std.debug.assert;
+
+/// ReturnType must be `void` or `E!void`
+pub fn Group(comptime ReturnType: type) type {
+ return struct {
+ coro_stack: Stack,
+ alloc_stack: Stack,
+ lock: Lock,
+
+ const Self = this;
+
+ const Error = switch (@typeInfo(ReturnType)) {
+ builtin.TypeId.ErrorUnion => |payload| payload.error_set,
+ else => void,
+ };
+ const Stack = std.atomic.Stack(promise->ReturnType);
+
+ pub fn init(loop: *Loop) Self {
+ return Self{
+ .coro_stack = Stack.init(),
+ .alloc_stack = Stack.init(),
+ .lock = Lock.init(loop),
+ };
+ }
+
+ /// Add a promise to the group. Thread-safe.
+ pub fn add(self: *Self, handle: promise->ReturnType) (error{OutOfMemory}!void) {
+ const node = try self.lock.loop.allocator.create(Stack.Node{
+ .next = undefined,
+ .data = handle,
+ });
+ self.alloc_stack.push(node);
+ }
+
+ /// Add a node to the group. Thread-safe. Cannot fail.
+ /// `node.data` should be the promise handle to add to the group.
+ /// The node's memory should be in the coroutine frame of
+ /// the handle that is in the node, or somewhere guaranteed to live
+ /// at least as long.
+ pub fn addNode(self: *Self, node: *Stack.Node) void {
+ self.coro_stack.push(node);
+ }
+
+ /// This is equivalent to an async call, but the async function is added to the group, instead
+ /// of returning a promise. func must be async and have return type ReturnType.
+ /// Thread-safe.
+ pub fn call(self: *Self, comptime func: var, args: ...) (error{OutOfMemory}!void) {
+ const S = struct {
+ async fn asyncFunc(node: **Stack.Node, args2: ...) ReturnType {
+ // TODO this is a hack to make the memory following be inside the coro frame
+ suspend {
+ var my_node: Stack.Node = undefined;
+ node.* = &my_node;
+ resume @handle();
+ }
+
+ // TODO this allocation elision should be guaranteed because we await it in
+ // this coro frame
+ return await (async func(args2) catch unreachable);
+ }
+ };
+ var node: *Stack.Node = undefined;
+ const handle = try async S.asyncFunc(&node, args);
+ node.* = Stack.Node{
+ .next = undefined,
+ .data = handle,
+ };
+ self.coro_stack.push(node);
+ }
+
+ /// Wait for all the calls and promises of the group to complete.
+ /// Thread-safe.
+ /// Safe to call any number of times.
+ pub async fn wait(self: *Self) ReturnType {
+ // TODO catch unreachable because the allocation can be grouped with
+ // the coro frame allocation
+ const held = await (async self.lock.acquire() catch unreachable);
+ defer held.release();
+
+ while (self.coro_stack.pop()) |node| {
+ if (Error == void) {
+ await node.data;
+ } else {
+ (await node.data) catch |err| {
+ self.cancelAll();
+ return err;
+ };
+ }
+ }
+ while (self.alloc_stack.pop()) |node| {
+ const handle = node.data;
+ self.lock.loop.allocator.destroy(node);
+ if (Error == void) {
+ await handle;
+ } else {
+ (await handle) catch |err| {
+ self.cancelAll();
+ return err;
+ };
+ }
+ }
+ }
+
+ /// Cancel all the outstanding promises. May only be called if wait was never called.
+ /// TODO These should be `cancelasync` not `cancel`.
+ /// See https://github.com/ziglang/zig/issues/1261
+ pub fn cancelAll(self: *Self) void {
+ while (self.coro_stack.pop()) |node| {
+ cancel node.data;
+ }
+ while (self.alloc_stack.pop()) |node| {
+ cancel node.data;
+ self.lock.loop.allocator.destroy(node);
+ }
+ }
+ };
+}
+
+test "std.event.Group" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const allocator = &da.allocator;
+
+ var loop: Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ const handle = try async testGroup(&loop);
+ defer cancel handle;
+
+ loop.run();
+}
+
+async fn testGroup(loop: *Loop) void {
+ var count: usize = 0;
+ var group = Group(void).init(loop);
+ group.add(async sleepALittle(&count) catch @panic("memory")) catch @panic("memory");
+ group.call(increaseByTen, &count) catch @panic("memory");
+ await (async group.wait() catch @panic("memory"));
+ assert(count == 11);
+
+ var another = Group(error!void).init(loop);
+ another.add(async somethingElse() catch @panic("memory")) catch @panic("memory");
+ another.call(doSomethingThatFails) catch @panic("memory");
+ std.debug.assertError(await (async another.wait() catch @panic("memory")), error.ItBroke);
+}
+
+async fn sleepALittle(count: *usize) void {
+ std.os.time.sleep(0, 1000000);
+ _ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+}
+
+async fn increaseByTen(count: *usize) void {
+ var i: usize = 0;
+ while (i < 10) : (i += 1) {
+ _ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ }
+}
+
+async fn doSomethingThatFails() error!void {}
+async fn somethingElse() error!void {
+ return error.ItBroke;
+}
diff --git a/std/event/lock.zig b/std/event/lock.zig
new file mode 100644
index 0000000000..c4cb1a3f0e
--- /dev/null
+++ b/std/event/lock.zig
@@ -0,0 +1,190 @@
+const std = @import("../index.zig");
+const builtin = @import("builtin");
+const assert = std.debug.assert;
+const mem = std.mem;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
+const Loop = std.event.Loop;
+
+/// Thread-safe async/await lock.
+/// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
+/// are resumed when the lock is released, in order.
+pub const Lock = struct {
+ loop: *Loop,
+ shared_bit: u8, // TODO make this a bool
+ queue: Queue,
+ queue_empty_bit: u8, // TODO make this a bool
+
+ const Queue = std.atomic.Queue(promise);
+
+ pub const Held = struct {
+ lock: *Lock,
+
+ pub fn release(self: Held) void {
+ // Resume the next item from the queue.
+ if (self.lock.queue.get()) |node| {
+ self.lock.loop.onNextTick(node);
+ return;
+ }
+
+ // We need to release the lock.
+ _ = @atomicRmw(u8, &self.lock.queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+
+ // There might be a queue item. If we know the queue is empty, we can be done,
+ // because the other actor will try to obtain the lock.
+ // But if there's a queue item, we are the actor which must loop and attempt
+ // to grab the lock again.
+ if (@atomicLoad(u8, &self.lock.queue_empty_bit, AtomicOrder.SeqCst) == 1) {
+ return;
+ }
+
+ while (true) {
+ const old_bit = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ if (old_bit != 0) {
+ // We did not obtain the lock. Great, the queue is someone else's problem.
+ return;
+ }
+
+ // Resume the next item from the queue.
+ if (self.lock.queue.get()) |node| {
+ self.lock.loop.onNextTick(node);
+ return;
+ }
+
+ // Release the lock again.
+ _ = @atomicRmw(u8, &self.lock.queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.lock.shared_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+
+ // Find out if we can be done.
+ if (@atomicLoad(u8, &self.lock.queue_empty_bit, AtomicOrder.SeqCst) == 1) {
+ return;
+ }
+ }
+ }
+ };
+
+ pub fn init(loop: *Loop) Lock {
+ return Lock{
+ .loop = loop,
+ .shared_bit = 0,
+ .queue = Queue.init(),
+ .queue_empty_bit = 1,
+ };
+ }
+
+ pub fn initLocked(loop: *Loop) Lock {
+ return Lock{
+ .loop = loop,
+ .shared_bit = 1,
+ .queue = Queue.init(),
+ .queue_empty_bit = 1,
+ };
+ }
+
+ /// Must be called when not locked. Not thread safe.
+ /// All calls to acquire() and release() must complete before calling deinit().
+ pub fn deinit(self: *Lock) void {
+ assert(self.shared_bit == 0);
+ while (self.queue.get()) |node| cancel node.data;
+ }
+
+ pub async fn acquire(self: *Lock) Held {
+ suspend {
+ // TODO explicitly put this memory in the coroutine frame #1194
+ var my_tick_node = Loop.NextTickNode{
+ .data = @handle(),
+ .next = undefined,
+ };
+
+ self.queue.put(&my_tick_node);
+
+ // At this point, we are in the queue, so we might have already been resumed and this coroutine
+ // frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame.
+
+ // We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor
+ // will attempt to grab the lock.
+ _ = @atomicRmw(u8, &self.queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+
+ const old_bit = @atomicRmw(u8, &self.shared_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ if (old_bit == 0) {
+ if (self.queue.get()) |node| {
+ // Whether this node is us or someone else, we tail resume it.
+ resume node.data;
+ }
+ }
+ }
+
+ return Held{ .lock = self };
+ }
+};
+
+test "std.event.Lock" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const allocator = &da.allocator;
+
+ var loop: Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ var lock = Lock.init(&loop);
+ defer lock.deinit();
+
+ const handle = try async testLock(&loop, &lock);
+ defer cancel handle;
+ loop.run();
+
+ assert(mem.eql(i32, shared_test_data, [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len));
+}
+
+async fn testLock(loop: *Loop, lock: *Lock) void {
+ // TODO explicitly put next tick node memory in the coroutine frame #1194
+ suspend {
+ resume @handle();
+ }
+ const handle1 = async lockRunner(lock) catch @panic("out of memory");
+ var tick_node1 = Loop.NextTickNode{
+ .next = undefined,
+ .data = handle1,
+ };
+ loop.onNextTick(&tick_node1);
+
+ const handle2 = async lockRunner(lock) catch @panic("out of memory");
+ var tick_node2 = Loop.NextTickNode{
+ .next = undefined,
+ .data = handle2,
+ };
+ loop.onNextTick(&tick_node2);
+
+ const handle3 = async lockRunner(lock) catch @panic("out of memory");
+ var tick_node3 = Loop.NextTickNode{
+ .next = undefined,
+ .data = handle3,
+ };
+ loop.onNextTick(&tick_node3);
+
+ await handle1;
+ await handle2;
+ await handle3;
+}
+
+var shared_test_data = [1]i32{0} ** 10;
+var shared_test_index: usize = 0;
+
+async fn lockRunner(lock: *Lock) void {
+ suspend; // resumed by onNextTick
+
+ var i: usize = 0;
+ while (i < shared_test_data.len) : (i += 1) {
+ const lock_promise = async lock.acquire() catch @panic("out of memory");
+ const handle = await lock_promise;
+ defer handle.release();
+
+ shared_test_index = 0;
+ while (shared_test_index < shared_test_data.len) : (shared_test_index += 1) {
+ shared_test_data[shared_test_index] = shared_test_data[shared_test_index] + 1;
+ }
+ }
+}
diff --git a/std/event/locked.zig b/std/event/locked.zig
new file mode 100644
index 0000000000..e7ad544d78
--- /dev/null
+++ b/std/event/locked.zig
@@ -0,0 +1,43 @@
+const std = @import("../index.zig");
+const Lock = std.event.Lock;
+const Loop = std.event.Loop;
+
+/// Thread-safe async/await lock that protects one piece of data.
+/// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
+/// are resumed when the lock is released, in order.
+pub fn Locked(comptime T: type) type {
+ return struct {
+ lock: Lock,
+ private_data: T,
+
+ const Self = this;
+
+ pub const HeldLock = struct {
+ value: *T,
+ held: Lock.Held,
+
+ pub fn release(self: HeldLock) void {
+ self.held.release();
+ }
+ };
+
+ pub fn init(loop: *Loop, data: T) Self {
+ return Self{
+ .lock = Lock.init(loop),
+ .private_data = data,
+ };
+ }
+
+ pub fn deinit(self: *Self) void {
+ self.lock.deinit();
+ }
+
+ pub async fn acquire(self: *Self) HeldLock {
+ return HeldLock{
+ // TODO guaranteed allocation elision
+ .held = await (async self.lock.acquire() catch unreachable),
+ .value = &self.private_data,
+ };
+ }
+ };
+}
diff --git a/std/event/loop.zig b/std/event/loop.zig
new file mode 100644
index 0000000000..8b1b2e53db
--- /dev/null
+++ b/std/event/loop.zig
@@ -0,0 +1,634 @@
+const std = @import("../index.zig");
+const builtin = @import("builtin");
+const assert = std.debug.assert;
+const mem = std.mem;
+const posix = std.os.posix;
+const windows = std.os.windows;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
+
+pub const Loop = struct {
+ allocator: *mem.Allocator,
+ next_tick_queue: std.atomic.Queue(promise),
+ os_data: OsData,
+ final_resume_node: ResumeNode,
+ pending_event_count: usize,
+ extra_threads: []*std.os.Thread,
+
+ // pre-allocated eventfds. all permanently active.
+ // this is how we send promises to be resumed on other threads.
+ available_eventfd_resume_nodes: std.atomic.Stack(ResumeNode.EventFd),
+ eventfd_resume_nodes: []std.atomic.Stack(ResumeNode.EventFd).Node,
+
+ pub const NextTickNode = std.atomic.Queue(promise).Node;
+
+ pub const ResumeNode = struct {
+ id: Id,
+ handle: promise,
+
+ pub const Id = enum {
+ Basic,
+ Stop,
+ EventFd,
+ };
+
+ pub const EventFd = switch (builtin.os) {
+ builtin.Os.macosx => MacOsEventFd,
+ builtin.Os.linux => struct {
+ base: ResumeNode,
+ epoll_op: u32,
+ eventfd: i32,
+ },
+ builtin.Os.windows => struct {
+ base: ResumeNode,
+ completion_key: usize,
+ },
+ else => @compileError("unsupported OS"),
+ };
+
+ const MacOsEventFd = struct {
+ base: ResumeNode,
+ kevent: posix.Kevent,
+ };
+ };
+
+ /// After initialization, call run().
+ /// TODO copy elision / named return values so that the threads referencing *Loop
+ /// have the correct pointer value.
+ pub fn initSingleThreaded(self: *Loop, allocator: *mem.Allocator) !void {
+ return self.initInternal(allocator, 1);
+ }
+
+ /// The allocator must be thread-safe because we use it for multiplexing
+ /// coroutines onto kernel threads.
+ /// After initialization, call run().
+ /// TODO copy elision / named return values so that the threads referencing *Loop
+ /// have the correct pointer value.
+ pub fn initMultiThreaded(self: *Loop, allocator: *mem.Allocator) !void {
+ const core_count = try std.os.cpuCount(allocator);
+ return self.initInternal(allocator, core_count);
+ }
+
+ /// Thread count is the total thread count. The thread pool size will be
+ /// max(thread_count - 1, 0)
+ fn initInternal(self: *Loop, allocator: *mem.Allocator, thread_count: usize) !void {
+ self.* = Loop{
+ .pending_event_count = 1,
+ .allocator = allocator,
+ .os_data = undefined,
+ .next_tick_queue = std.atomic.Queue(promise).init(),
+ .extra_threads = undefined,
+ .available_eventfd_resume_nodes = std.atomic.Stack(ResumeNode.EventFd).init(),
+ .eventfd_resume_nodes = undefined,
+ .final_resume_node = ResumeNode{
+ .id = ResumeNode.Id.Stop,
+ .handle = undefined,
+ },
+ };
+ const extra_thread_count = thread_count - 1;
+ self.eventfd_resume_nodes = try self.allocator.alloc(
+ std.atomic.Stack(ResumeNode.EventFd).Node,
+ extra_thread_count,
+ );
+ errdefer self.allocator.free(self.eventfd_resume_nodes);
+
+ self.extra_threads = try self.allocator.alloc(*std.os.Thread, extra_thread_count);
+ errdefer self.allocator.free(self.extra_threads);
+
+ try self.initOsData(extra_thread_count);
+ errdefer self.deinitOsData();
+ }
+
+ pub fn deinit(self: *Loop) void {
+ self.deinitOsData();
+ self.allocator.free(self.extra_threads);
+ }
+
+ const InitOsDataError = std.os.LinuxEpollCreateError || mem.Allocator.Error || std.os.LinuxEventFdError ||
+ std.os.SpawnThreadError || std.os.LinuxEpollCtlError || std.os.BsdKEventError ||
+ std.os.WindowsCreateIoCompletionPortError;
+
+ const wakeup_bytes = []u8{0x1} ** 8;
+
+ fn initOsData(self: *Loop, extra_thread_count: usize) InitOsDataError!void {
+ switch (builtin.os) {
+ builtin.Os.linux => {
+ errdefer {
+ while (self.available_eventfd_resume_nodes.pop()) |node| std.os.close(node.data.eventfd);
+ }
+ for (self.eventfd_resume_nodes) |*eventfd_node| {
+ eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
+ .data = ResumeNode.EventFd{
+ .base = ResumeNode{
+ .id = ResumeNode.Id.EventFd,
+ .handle = undefined,
+ },
+ .eventfd = try std.os.linuxEventFd(1, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK),
+ .epoll_op = posix.EPOLL_CTL_ADD,
+ },
+ .next = undefined,
+ };
+ self.available_eventfd_resume_nodes.push(eventfd_node);
+ }
+
+ self.os_data.epollfd = try std.os.linuxEpollCreate(posix.EPOLL_CLOEXEC);
+ errdefer std.os.close(self.os_data.epollfd);
+
+ self.os_data.final_eventfd = try std.os.linuxEventFd(0, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK);
+ errdefer std.os.close(self.os_data.final_eventfd);
+
+ self.os_data.final_eventfd_event = posix.epoll_event{
+ .events = posix.EPOLLIN,
+ .data = posix.epoll_data{ .ptr = @ptrToInt(&self.final_resume_node) },
+ };
+ try std.os.linuxEpollCtl(
+ self.os_data.epollfd,
+ posix.EPOLL_CTL_ADD,
+ self.os_data.final_eventfd,
+ &self.os_data.final_eventfd_event,
+ );
+
+ var extra_thread_index: usize = 0;
+ errdefer {
+ // writing 8 bytes to an eventfd cannot fail
+ std.os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
+ while (extra_thread_index != 0) {
+ extra_thread_index -= 1;
+ self.extra_threads[extra_thread_index].wait();
+ }
+ }
+ while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
+ self.extra_threads[extra_thread_index] = try std.os.spawnThread(self, workerRun);
+ }
+ },
+ builtin.Os.macosx => {
+ self.os_data.kqfd = try std.os.bsdKQueue();
+ errdefer std.os.close(self.os_data.kqfd);
+
+ self.os_data.kevents = try self.allocator.alloc(posix.Kevent, extra_thread_count);
+ errdefer self.allocator.free(self.os_data.kevents);
+
+ const eventlist = ([*]posix.Kevent)(undefined)[0..0];
+
+ for (self.eventfd_resume_nodes) |*eventfd_node, i| {
+ eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
+ .data = ResumeNode.EventFd{
+ .base = ResumeNode{
+ .id = ResumeNode.Id.EventFd,
+ .handle = undefined,
+ },
+ // this one is for sending events
+ .kevent = posix.Kevent{
+ .ident = i,
+ .filter = posix.EVFILT_USER,
+ .flags = posix.EV_CLEAR | posix.EV_ADD | posix.EV_DISABLE,
+ .fflags = 0,
+ .data = 0,
+ .udata = @ptrToInt(&eventfd_node.data.base),
+ },
+ },
+ .next = undefined,
+ };
+ self.available_eventfd_resume_nodes.push(eventfd_node);
+ const kevent_array = (*[1]posix.Kevent)(&eventfd_node.data.kevent);
+ _ = try std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null);
+ eventfd_node.data.kevent.flags = posix.EV_CLEAR | posix.EV_ENABLE;
+ eventfd_node.data.kevent.fflags = posix.NOTE_TRIGGER;
+ // this one is for waiting for events
+ self.os_data.kevents[i] = posix.Kevent{
+ .ident = i,
+ .filter = posix.EVFILT_USER,
+ .flags = 0,
+ .fflags = 0,
+ .data = 0,
+ .udata = @ptrToInt(&eventfd_node.data.base),
+ };
+ }
+
+ // Pre-add so that we cannot get error.SystemResources
+ // later when we try to activate it.
+ self.os_data.final_kevent = posix.Kevent{
+ .ident = extra_thread_count,
+ .filter = posix.EVFILT_USER,
+ .flags = posix.EV_ADD | posix.EV_DISABLE,
+ .fflags = 0,
+ .data = 0,
+ .udata = @ptrToInt(&self.final_resume_node),
+ };
+ const kevent_array = (*[1]posix.Kevent)(&self.os_data.final_kevent);
+ _ = try std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null);
+ self.os_data.final_kevent.flags = posix.EV_ENABLE;
+ self.os_data.final_kevent.fflags = posix.NOTE_TRIGGER;
+
+ var extra_thread_index: usize = 0;
+ errdefer {
+ _ = std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null) catch unreachable;
+ while (extra_thread_index != 0) {
+ extra_thread_index -= 1;
+ self.extra_threads[extra_thread_index].wait();
+ }
+ }
+ while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
+ self.extra_threads[extra_thread_index] = try std.os.spawnThread(self, workerRun);
+ }
+ },
+ builtin.Os.windows => {
+ self.os_data.io_port = try std.os.windowsCreateIoCompletionPort(
+ windows.INVALID_HANDLE_VALUE,
+ null,
+ undefined,
+ undefined,
+ );
+ errdefer std.os.close(self.os_data.io_port);
+
+ for (self.eventfd_resume_nodes) |*eventfd_node, i| {
+ eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
+ .data = ResumeNode.EventFd{
+ .base = ResumeNode{
+ .id = ResumeNode.Id.EventFd,
+ .handle = undefined,
+ },
+ // this one is for sending events
+ .completion_key = @ptrToInt(&eventfd_node.data.base),
+ },
+ .next = undefined,
+ };
+ self.available_eventfd_resume_nodes.push(eventfd_node);
+ }
+
+ var extra_thread_index: usize = 0;
+ errdefer {
+ var i: usize = 0;
+ while (i < extra_thread_index) : (i += 1) {
+ while (true) {
+ const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1);
+ std.os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue;
+ break;
+ }
+ }
+ while (extra_thread_index != 0) {
+ extra_thread_index -= 1;
+ self.extra_threads[extra_thread_index].wait();
+ }
+ }
+ while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
+ self.extra_threads[extra_thread_index] = try std.os.spawnThread(self, workerRun);
+ }
+ },
+ else => {},
+ }
+ }
+
+ fn deinitOsData(self: *Loop) void {
+ switch (builtin.os) {
+ builtin.Os.linux => {
+ std.os.close(self.os_data.final_eventfd);
+ while (self.available_eventfd_resume_nodes.pop()) |node| std.os.close(node.data.eventfd);
+ std.os.close(self.os_data.epollfd);
+ self.allocator.free(self.eventfd_resume_nodes);
+ },
+ builtin.Os.macosx => {
+ self.allocator.free(self.os_data.kevents);
+ std.os.close(self.os_data.kqfd);
+ },
+ builtin.Os.windows => {
+ std.os.close(self.os_data.io_port);
+ },
+ else => {},
+ }
+ }
+
+ /// resume_node must live longer than the promise that it holds a reference to.
+ pub fn addFd(self: *Loop, fd: i32, resume_node: *ResumeNode) !void {
+ _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ errdefer {
+ self.finishOneEvent();
+ }
+ try self.modFd(
+ fd,
+ posix.EPOLL_CTL_ADD,
+ std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | std.os.linux.EPOLLET,
+ resume_node,
+ );
+ }
+
+ pub fn modFd(self: *Loop, fd: i32, op: u32, events: u32, resume_node: *ResumeNode) !void {
+ var ev = std.os.linux.epoll_event{
+ .events = events,
+ .data = std.os.linux.epoll_data{ .ptr = @ptrToInt(resume_node) },
+ };
+ try std.os.linuxEpollCtl(self.os_data.epollfd, op, fd, &ev);
+ }
+
+ pub fn removeFd(self: *Loop, fd: i32) void {
+ self.removeFdNoCounter(fd);
+ self.finishOneEvent();
+ }
+
+ fn removeFdNoCounter(self: *Loop, fd: i32) void {
+ std.os.linuxEpollCtl(self.os_data.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
+ }
+
+ pub async fn waitFd(self: *Loop, fd: i32) !void {
+ defer self.removeFd(fd);
+ suspend {
+ // TODO explicitly put this memory in the coroutine frame #1194
+ var resume_node = ResumeNode{
+ .id = ResumeNode.Id.Basic,
+ .handle = @handle(),
+ };
+ try self.addFd(fd, &resume_node);
+ }
+ }
+
+ fn dispatch(self: *Loop) void {
+ while (self.available_eventfd_resume_nodes.pop()) |resume_stack_node| {
+ const next_tick_node = self.next_tick_queue.get() orelse {
+ self.available_eventfd_resume_nodes.push(resume_stack_node);
+ return;
+ };
+ const eventfd_node = &resume_stack_node.data;
+ eventfd_node.base.handle = next_tick_node.data;
+ switch (builtin.os) {
+ builtin.Os.macosx => {
+ const kevent_array = (*[1]posix.Kevent)(&eventfd_node.kevent);
+ const eventlist = ([*]posix.Kevent)(undefined)[0..0];
+ _ = std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null) catch {
+ self.next_tick_queue.unget(next_tick_node);
+ self.available_eventfd_resume_nodes.push(resume_stack_node);
+ return;
+ };
+ },
+ builtin.Os.linux => {
+ // the pending count is already accounted for
+ const epoll_events = posix.EPOLLONESHOT | std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT |
+ std.os.linux.EPOLLET;
+ self.modFd(
+ eventfd_node.eventfd,
+ eventfd_node.epoll_op,
+ epoll_events,
+ &eventfd_node.base,
+ ) catch {
+ self.next_tick_queue.unget(next_tick_node);
+ self.available_eventfd_resume_nodes.push(resume_stack_node);
+ return;
+ };
+ },
+ builtin.Os.windows => {
+ // this value is never dereferenced but we need it to be non-null so that
+ // the consumer code can decide whether to read the completion key.
+ // it has to do this for normal I/O, so we match that behavior here.
+ const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1);
+ std.os.windowsPostQueuedCompletionStatus(
+ self.os_data.io_port,
+ undefined,
+ eventfd_node.completion_key,
+ overlapped,
+ ) catch {
+ self.next_tick_queue.unget(next_tick_node);
+ self.available_eventfd_resume_nodes.push(resume_stack_node);
+ return;
+ };
+ },
+ else => @compileError("unsupported OS"),
+ }
+ }
+ }
+
+ /// Bring your own linked list node. This means it can't fail.
+ pub fn onNextTick(self: *Loop, node: *NextTickNode) void {
+ _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ self.next_tick_queue.put(node);
+ self.dispatch();
+ }
+
+ pub fn run(self: *Loop) void {
+ self.finishOneEvent(); // the reference we start with
+
+ self.workerRun();
+ for (self.extra_threads) |extra_thread| {
+ extra_thread.wait();
+ }
+ }
+
+ /// This is equivalent to an async call, except instead of beginning execution of the async function,
+ /// it immediately returns to the caller, and the async function is queued in the event loop. It still
+ /// returns a promise to be awaited.
+ pub fn call(self: *Loop, comptime func: var, args: ...) !(promise->@typeOf(func).ReturnType) {
+ const S = struct {
+ async fn asyncFunc(loop: *Loop, handle: *promise->@typeOf(func).ReturnType, args2: ...) @typeOf(func).ReturnType {
+ suspend {
+ handle.* = @handle();
+ var my_tick_node = Loop.NextTickNode{
+ .next = undefined,
+ .data = @handle(),
+ };
+ loop.onNextTick(&my_tick_node);
+ }
+ // TODO guaranteed allocation elision for await in same func as async
+ return await (async func(args2) catch unreachable);
+ }
+ };
+ var handle: promise->@typeOf(func).ReturnType = undefined;
+ return async S.asyncFunc(self, &handle, args);
+ }
+
+ /// Awaiting a yield lets the event loop run, starting any unstarted async operations.
+ /// Note that async operations automatically start when a function yields for any other reason,
+ /// for example, when async I/O is performed. This function is intended to be used only when
+ /// CPU bound tasks would be waiting in the event loop but never get started because no async I/O
+ /// is performed.
+ pub async fn yield(self: *Loop) void {
+ suspend {
+ var my_tick_node = Loop.NextTickNode{
+ .next = undefined,
+ .data = @handle(),
+ };
+ self.onNextTick(&my_tick_node);
+ }
+ }
+
+ fn finishOneEvent(self: *Loop) void {
+ if (@atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst) == 1) {
+ // cause all the threads to stop
+ switch (builtin.os) {
+ builtin.Os.linux => {
+ // writing 8 bytes to an eventfd cannot fail
+ std.os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
+ return;
+ },
+ builtin.Os.macosx => {
+ const final_kevent = (*[1]posix.Kevent)(&self.os_data.final_kevent);
+ const eventlist = ([*]posix.Kevent)(undefined)[0..0];
+ // cannot fail because we already added it and this just enables it
+ _ = std.os.bsdKEvent(self.os_data.kqfd, final_kevent, eventlist, null) catch unreachable;
+ return;
+ },
+ builtin.Os.windows => {
+ var i: usize = 0;
+ while (i < self.extra_threads.len + 1) : (i += 1) {
+ while (true) {
+ const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1);
+ std.os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue;
+ break;
+ }
+ }
+ return;
+ },
+ else => @compileError("unsupported OS"),
+ }
+ }
+ }
+
+ fn workerRun(self: *Loop) void {
+ while (true) {
+ while (true) {
+ const next_tick_node = self.next_tick_queue.get() orelse break;
+ self.dispatch();
+ resume next_tick_node.data;
+ self.finishOneEvent();
+ }
+
+ switch (builtin.os) {
+ builtin.Os.linux => {
+ // only process 1 event so we don't steal from other threads
+ var events: [1]std.os.linux.epoll_event = undefined;
+ const count = std.os.linuxEpollWait(self.os_data.epollfd, events[0..], -1);
+ for (events[0..count]) |ev| {
+ const resume_node = @intToPtr(*ResumeNode, ev.data.ptr);
+ const handle = resume_node.handle;
+ const resume_node_id = resume_node.id;
+ switch (resume_node_id) {
+ ResumeNode.Id.Basic => {},
+ ResumeNode.Id.Stop => return,
+ ResumeNode.Id.EventFd => {
+ const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
+ event_fd_node.epoll_op = posix.EPOLL_CTL_MOD;
+ const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
+ self.available_eventfd_resume_nodes.push(stack_node);
+ },
+ }
+ resume handle;
+ if (resume_node_id == ResumeNode.Id.EventFd) {
+ self.finishOneEvent();
+ }
+ }
+ },
+ builtin.Os.macosx => {
+ var eventlist: [1]posix.Kevent = undefined;
+ const count = std.os.bsdKEvent(self.os_data.kqfd, self.os_data.kevents, eventlist[0..], null) catch unreachable;
+ for (eventlist[0..count]) |ev| {
+ const resume_node = @intToPtr(*ResumeNode, ev.udata);
+ const handle = resume_node.handle;
+ const resume_node_id = resume_node.id;
+ switch (resume_node_id) {
+ ResumeNode.Id.Basic => {},
+ ResumeNode.Id.Stop => return,
+ ResumeNode.Id.EventFd => {
+ const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
+ const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
+ self.available_eventfd_resume_nodes.push(stack_node);
+ },
+ }
+ resume handle;
+ if (resume_node_id == ResumeNode.Id.EventFd) {
+ self.finishOneEvent();
+ }
+ }
+ },
+ builtin.Os.windows => {
+ var completion_key: usize = undefined;
+ while (true) {
+ var nbytes: windows.DWORD = undefined;
+ var overlapped: ?*windows.OVERLAPPED = undefined;
+ switch (std.os.windowsGetQueuedCompletionStatus(self.os_data.io_port, &nbytes, &completion_key, &overlapped, windows.INFINITE)) {
+ std.os.WindowsWaitResult.Aborted => return,
+ std.os.WindowsWaitResult.Normal => {},
+ }
+ if (overlapped != null) break;
+ }
+ const resume_node = @intToPtr(*ResumeNode, completion_key);
+ const handle = resume_node.handle;
+ const resume_node_id = resume_node.id;
+ switch (resume_node_id) {
+ ResumeNode.Id.Basic => {},
+ ResumeNode.Id.Stop => return,
+ ResumeNode.Id.EventFd => {
+ const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
+ const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
+ self.available_eventfd_resume_nodes.push(stack_node);
+ },
+ }
+ resume handle;
+ if (resume_node_id == ResumeNode.Id.EventFd) {
+ self.finishOneEvent();
+ }
+ },
+ else => @compileError("unsupported OS"),
+ }
+ }
+ }
+
+ const OsData = switch (builtin.os) {
+ builtin.Os.linux => struct {
+ epollfd: i32,
+ final_eventfd: i32,
+ final_eventfd_event: std.os.linux.epoll_event,
+ },
+ builtin.Os.macosx => MacOsData,
+ builtin.Os.windows => struct {
+ io_port: windows.HANDLE,
+ extra_thread_count: usize,
+ },
+ else => struct {},
+ };
+
+ const MacOsData = struct {
+ kqfd: i32,
+ final_kevent: posix.Kevent,
+ kevents: []posix.Kevent,
+ };
+};
+
+test "std.event.Loop - basic" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const allocator = &da.allocator;
+
+ var loop: Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ loop.run();
+}
+
+test "std.event.Loop - call" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const allocator = &da.allocator;
+
+ var loop: Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ var did_it = false;
+ const handle = try loop.call(testEventLoop);
+ const handle2 = try loop.call(testEventLoop2, handle, &did_it);
+ defer cancel handle2;
+
+ loop.run();
+
+ assert(did_it);
+}
+
+async fn testEventLoop() i32 {
+ return 1234;
+}
+
+async fn testEventLoop2(h: promise->i32, did_it: *bool) void {
+ const value = await h;
+ assert(value == 1234);
+ did_it.* = true;
+}
diff --git a/std/event/tcp.zig b/std/event/tcp.zig
new file mode 100644
index 0000000000..ea803a9322
--- /dev/null
+++ b/std/event/tcp.zig
@@ -0,0 +1,184 @@
+const std = @import("../index.zig");
+const builtin = @import("builtin");
+const assert = std.debug.assert;
+const event = std.event;
+const mem = std.mem;
+const posix = std.os.posix;
+const windows = std.os.windows;
+const Loop = std.event.Loop;
+
+pub const Server = struct {
+ handleRequestFn: async<*mem.Allocator> fn (*Server, *const std.net.Address, *const std.os.File) void,
+
+ loop: *Loop,
+ sockfd: ?i32,
+ accept_coro: ?promise,
+ listen_address: std.net.Address,
+
+ waiting_for_emfile_node: PromiseNode,
+ listen_resume_node: event.Loop.ResumeNode,
+
+ const PromiseNode = std.LinkedList(promise).Node;
+
+ pub fn init(loop: *Loop) Server {
+ // TODO can't initialize handler coroutine here because we need well defined copy elision
+ return Server{
+ .loop = loop,
+ .sockfd = null,
+ .accept_coro = null,
+ .handleRequestFn = undefined,
+ .waiting_for_emfile_node = undefined,
+ .listen_address = undefined,
+ .listen_resume_node = event.Loop.ResumeNode{
+ .id = event.Loop.ResumeNode.Id.Basic,
+ .handle = undefined,
+ },
+ };
+ }
+
+ pub fn listen(
+ self: *Server,
+ address: *const std.net.Address,
+ handleRequestFn: async<*mem.Allocator> fn (*Server, *const std.net.Address, *const std.os.File) void,
+ ) !void {
+ self.handleRequestFn = handleRequestFn;
+
+ const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM | posix.SOCK_CLOEXEC | posix.SOCK_NONBLOCK, posix.PROTO_tcp);
+ errdefer std.os.close(sockfd);
+ self.sockfd = sockfd;
+
+ try std.os.posixBind(sockfd, &address.os_addr);
+ try std.os.posixListen(sockfd, posix.SOMAXCONN);
+ self.listen_address = std.net.Address.initPosix(try std.os.posixGetSockName(sockfd));
+
+ self.accept_coro = try async Server.handler(self);
+ errdefer cancel self.accept_coro.?;
+
+ self.listen_resume_node.handle = self.accept_coro.?;
+ try self.loop.addFd(sockfd, &self.listen_resume_node);
+ errdefer self.loop.removeFd(sockfd);
+ }
+
+ /// Stop listening
+ pub fn close(self: *Server) void {
+ self.loop.removeFd(self.sockfd.?);
+ std.os.close(self.sockfd.?);
+ }
+
+ pub fn deinit(self: *Server) void {
+ if (self.accept_coro) |accept_coro| cancel accept_coro;
+ if (self.sockfd) |sockfd| std.os.close(sockfd);
+ }
+
+ pub async fn handler(self: *Server) void {
+ while (true) {
+ var accepted_addr: std.net.Address = undefined;
+ if (std.os.posixAccept(self.sockfd.?, &accepted_addr.os_addr, posix.SOCK_NONBLOCK | posix.SOCK_CLOEXEC)) |accepted_fd| {
+ var socket = std.os.File.openHandle(accepted_fd);
+ _ = async self.handleRequestFn(self, accepted_addr, socket) catch |err| switch (err) {
+ error.OutOfMemory => {
+ socket.close();
+ continue;
+ },
+ };
+ } else |err| switch (err) {
+ error.WouldBlock => {
+ suspend; // we will get resumed by epoll_wait in the event loop
+ continue;
+ },
+ error.ProcessFdQuotaExceeded => {
+ errdefer std.os.emfile_promise_queue.remove(&self.waiting_for_emfile_node);
+ suspend {
+ self.waiting_for_emfile_node = PromiseNode.init( @handle() );
+ std.os.emfile_promise_queue.append(&self.waiting_for_emfile_node);
+ }
+ continue;
+ },
+ error.ConnectionAborted, error.FileDescriptorClosed => continue,
+
+ error.PageFault => unreachable,
+ error.InvalidSyscall => unreachable,
+ error.FileDescriptorNotASocket => unreachable,
+ error.OperationNotSupported => unreachable,
+
+ error.SystemFdQuotaExceeded, error.SystemResources, error.ProtocolFailure, error.BlockedByFirewall, error.Unexpected => {
+ @panic("TODO handle this error");
+ },
+ }
+ }
+ }
+};
+
+pub async fn connect(loop: *Loop, _address: *const std.net.Address) !std.os.File {
+ var address = _address.*; // TODO https://github.com/ziglang/zig/issues/733
+
+ const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM | posix.SOCK_CLOEXEC | posix.SOCK_NONBLOCK, posix.PROTO_tcp);
+ errdefer std.os.close(sockfd);
+
+ try std.os.posixConnectAsync(sockfd, &address.os_addr);
+ try await try async loop.waitFd(sockfd);
+ try std.os.posixGetSockOptConnectError(sockfd);
+
+ return std.os.File.openHandle(sockfd);
+}
+
+test "listen on a port, send bytes, receive bytes" {
+ if (builtin.os != builtin.Os.linux) {
+ // TODO build abstractions for other operating systems
+ return error.SkipZigTest;
+ }
+
+ const MyServer = struct {
+ tcp_server: Server,
+
+ const Self = this;
+ async<*mem.Allocator> fn handler(tcp_server: *Server, _addr: *const std.net.Address, _socket: *const std.os.File) void {
+ const self = @fieldParentPtr(Self, "tcp_server", tcp_server);
+ var socket = _socket.*; // TODO https://github.com/ziglang/zig/issues/733
+ defer socket.close();
+ // TODO guarantee elision of this allocation
+ const next_handler = async errorableHandler(self, _addr, socket) catch unreachable;
+ (await next_handler) catch |err| {
+ std.debug.panic("unable to handle connection: {}\n", err);
+ };
+ suspend {
+ cancel @handle();
+ }
+ }
+ async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: *const std.os.File) !void {
+ const addr = _addr.*; // TODO https://github.com/ziglang/zig/issues/733
+ var socket = _socket.*; // TODO https://github.com/ziglang/zig/issues/733
+
+ var adapter = std.io.FileOutStream.init(&socket);
+ var stream = &adapter.stream;
+ try stream.print("hello from server\n");
+ }
+ };
+
+ const ip4addr = std.net.parseIp4("127.0.0.1") catch unreachable;
+ const addr = std.net.Address.initIp4(ip4addr, 0);
+
+ var loop: Loop = undefined;
+ try loop.initSingleThreaded(std.debug.global_allocator);
+ var server = MyServer{ .tcp_server = Server.init(&loop) };
+ defer server.tcp_server.deinit();
+ try server.tcp_server.listen(addr, MyServer.handler);
+
+ const p = try async doAsyncTest(&loop, server.tcp_server.listen_address, &server.tcp_server);
+ defer cancel p;
+ loop.run();
+}
+
+async fn doAsyncTest(loop: *Loop, address: *const std.net.Address, server: *Server) void {
+ errdefer @panic("test failure");
+
+ var socket_file = try await try async connect(loop, address);
+ defer socket_file.close();
+
+ var buf: [512]u8 = undefined;
+ const amt_read = try socket_file.read(buf[0..]);
+ const msg = buf[0..amt_read];
+ assert(mem.eql(u8, msg, "hello from server\n"));
+ server.close();
+}
+
diff --git a/std/fmt/errol/enum3.zig b/std/fmt/errol/enum3.zig
index f8299d3c6f..7663f9b5d9 100644
--- a/std/fmt/errol/enum3.zig
+++ b/std/fmt/errol/enum3.zig
@@ -1,4 +1,4 @@
-pub const enum3 = []u64 {
+pub const enum3 = []u64{
0x4e2e2785c3a2a20b,
0x240a28877a09a4e1,
0x728fca36c06cf106,
@@ -439,13 +439,13 @@ const Slab = struct {
};
fn slab(str: []const u8, exp: i32) Slab {
- return Slab {
+ return Slab{
.str = str,
.exp = exp,
};
}
-pub const enum3_data = []Slab {
+pub const enum3_data = []Slab{
slab("40648030339495312", 69),
slab("4498645355592131", -134),
slab("678321594594593", 244),
@@ -879,4 +879,3 @@ pub const enum3_data = []Slab {
slab("32216657306260762", 218),
slab("30423431424080128", 219),
};
-
diff --git a/std/fmt/errol/index.zig b/std/fmt/errol/index.zig
index 42287bd25b..3222913107 100644
--- a/std/fmt/errol/index.zig
+++ b/std/fmt/errol/index.zig
@@ -12,15 +12,81 @@ pub const FloatDecimal = struct {
exp: i32,
};
+pub const RoundMode = enum {
+ // Round only the fractional portion (e.g. 1234.23 has precision 2)
+ Decimal,
+ // Round the entire whole/fractional portion (e.g. 1.23423e3 has precision 5)
+ Scientific,
+};
+
+/// Round a FloatDecimal as returned by errol3 to the specified fractional precision.
+/// All digits after the specified precision should be considered invalid.
+pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: RoundMode) void {
+ // The round digit refers to the index which we should look at to determine
+ // whether we need to round to match the specified precision.
+ var round_digit: usize = 0;
+
+ switch (mode) {
+ RoundMode.Decimal => {
+ if (float_decimal.exp >= 0) {
+ round_digit = precision + @intCast(usize, float_decimal.exp);
+ } else {
+ // if a small negative exp, then adjust we need to offset by the number
+ // of leading zeros that will occur.
+ const min_exp_required = @intCast(usize, -float_decimal.exp);
+ if (precision > min_exp_required) {
+ round_digit = precision - min_exp_required;
+ }
+ }
+ },
+ RoundMode.Scientific => {
+ round_digit = 1 + precision;
+ },
+ }
+
+ // It suffices to look at just this digit. We don't round and propagate say 0.04999 to 0.05
+ // first, and then to 0.1 in the case of a {.1} single precision.
+
+ // Find the digit which will signify the round point and start rounding backwards.
+ if (round_digit < float_decimal.digits.len and float_decimal.digits[round_digit] - '0' >= 5) {
+ assert(round_digit >= 0);
+
+ var i = round_digit;
+ while (true) {
+ if (i == 0) {
+ // Rounded all the way past the start. This was of the form 9.999...
+ // Slot the new digit in place and increase the exponent.
+ float_decimal.exp += 1;
+
+ // Re-size the buffer to use the reserved leading byte.
+ const one_before = @intToPtr([*]u8, @ptrToInt(&float_decimal.digits[0]) - 1);
+ float_decimal.digits = one_before[0 .. float_decimal.digits.len + 1];
+ float_decimal.digits[0] = '1';
+ return;
+ }
+
+ i -= 1;
+
+ const new_value = (float_decimal.digits[i] - '0' + 1) % 10;
+ float_decimal.digits[i] = new_value + '0';
+
+ // must continue rounding until non-9
+ if (new_value != 0) {
+ return;
+ }
+ }
+ }
+}
+
/// Corrected Errol3 double to ASCII conversion.
pub fn errol3(value: f64, buffer: []u8) FloatDecimal {
const bits = @bitCast(u64, value);
const i = tableLowerBound(bits);
if (i < enum3.len and enum3[i] == bits) {
const data = enum3_data[i];
- const digits = buffer[0..data.str.len];
+ const digits = buffer[1 .. data.str.len + 1];
mem.copy(u8, digits, data.str);
- return FloatDecimal {
+ return FloatDecimal{
.digits = digits,
.exp = data.exp,
};
@@ -32,27 +98,25 @@ pub fn errol3(value: f64, buffer: []u8) FloatDecimal {
/// Uncorrected Errol3 double to ASCII conversion.
fn errol3u(val: f64, buffer: []u8) FloatDecimal {
// check if in integer or fixed range
-
if (val > 9.007199254740992e15 and val < 3.40282366920938e+38) {
return errolInt(val, buffer);
} else if (val >= 16.0 and val < 9.007199254740992e15) {
return errolFixed(val, buffer);
}
-
// normalize the midpoint
const e = math.frexp(val).exponent;
- var exp = i16(math.floor(307 + f64(e) * 0.30103));
+ var exp = @floatToInt(i16, math.floor(307 + @intToFloat(f64, e) * 0.30103));
if (exp < 20) {
exp = 20;
- } else if (usize(exp) >= lookup_table.len) {
- exp = i16(lookup_table.len - 1);
+ } else if (@intCast(usize, exp) >= lookup_table.len) {
+ exp = @intCast(i16, lookup_table.len - 1);
}
- var mid = lookup_table[usize(exp)];
+ var mid = lookup_table[@intCast(usize, exp)];
mid = hpProd(mid, val);
- const lten = lookup_table[usize(exp)].val;
+ const lten = lookup_table[@intCast(usize, exp)].val;
exp -= 307;
@@ -71,11 +135,11 @@ fn errol3u(val: f64, buffer: []u8) FloatDecimal {
}
// compute boundaries
- var high = HP {
+ var high = HP{
.val = mid.val,
.off = mid.off + (fpnext(val) - val) * lten * ten / 2.0,
};
- var low = HP {
+ var low = HP{
.val = mid.val,
.off = mid.off + (fpprev(val) - val) * lten * ten / 2.0,
};
@@ -98,37 +162,37 @@ fn errol3u(val: f64, buffer: []u8) FloatDecimal {
}
// digit generation
- var buf_index: usize = 0;
+
+ // We generate digits starting at index 1. If rounding a buffer later then it may be
+ // required to generate a preceeding digit in some cases (9.999) in which case we use
+ // the 0-index for this extra digit.
+ var buf_index: usize = 1;
while (true) {
- var hdig = u8(math.floor(high.val));
- if ((high.val == f64(hdig)) and (high.off < 0))
- hdig -= 1;
+ var hdig = @floatToInt(u8, math.floor(high.val));
+ if ((high.val == @intToFloat(f64, hdig)) and (high.off < 0)) hdig -= 1;
- var ldig = u8(math.floor(low.val));
- if ((low.val == f64(ldig)) and (low.off < 0))
- ldig -= 1;
+ var ldig = @floatToInt(u8, math.floor(low.val));
+ if ((low.val == @intToFloat(f64, ldig)) and (low.off < 0)) ldig -= 1;
- if (ldig != hdig)
- break;
+ if (ldig != hdig) break;
buffer[buf_index] = hdig + '0';
buf_index += 1;
- high.val -= f64(hdig);
- low.val -= f64(ldig);
+ high.val -= @intToFloat(f64, hdig);
+ low.val -= @intToFloat(f64, ldig);
hpMul10(&high);
hpMul10(&low);
}
const tmp = (high.val + low.val) / 2.0;
- var mdig = u8(math.floor(tmp + 0.5));
- if ((f64(mdig) - tmp) == 0.5 and (mdig & 0x1) != 0)
- mdig -= 1;
+ var mdig = @floatToInt(u8, math.floor(tmp + 0.5));
+ if ((@intToFloat(f64, mdig) - tmp) == 0.5 and (mdig & 0x1) != 0) mdig -= 1;
buffer[buf_index] = mdig + '0';
buf_index += 1;
- return FloatDecimal {
- .digits = buffer[0..buf_index],
+ return FloatDecimal{
+ .digits = buffer[1..buf_index],
.exp = exp,
};
}
@@ -153,7 +217,7 @@ fn tableLowerBound(k: u64) usize {
/// @in: The HP number.
/// @val: The double.
/// &returns: The HP number.
-fn hpProd(in: &const HP, val: f64) HP {
+fn hpProd(in: *const HP, val: f64) HP {
var hi: f64 = undefined;
var lo: f64 = undefined;
split(in.val, &hi, &lo);
@@ -165,7 +229,7 @@ fn hpProd(in: &const HP, val: f64) HP {
const p = in.val * val;
const e = ((hi * hi2 - p) + lo * hi2 + hi * lo2) + lo * lo2;
- return HP {
+ return HP{
.val = p,
.off = in.off * val + e,
};
@@ -175,9 +239,9 @@ fn hpProd(in: &const HP, val: f64) HP {
/// @val: The double.
/// @hi: The high bits.
/// @lo: The low bits.
-fn split(val: f64, hi: &f64, lo: &f64) void {
- *hi = gethi(val);
- *lo = val - *hi;
+fn split(val: f64, hi: *f64, lo: *f64) void {
+ hi.* = gethi(val);
+ lo.* = val - hi.*;
}
fn gethi(in: f64) f64 {
@@ -188,7 +252,10 @@ fn gethi(in: f64) f64 {
/// Normalize the number by factoring in the error.
/// @hp: The float pair.
-fn hpNormalize(hp: &HP) void {
+fn hpNormalize(hp: *HP) void {
+ // Required to avoid segfaults causing buffer overrun during errol3 digit output termination.
+ @setFloatMode(this, @import("builtin").FloatMode.Strict);
+
const val = hp.val;
hp.val += hp.off;
@@ -197,7 +264,7 @@ fn hpNormalize(hp: &HP) void {
/// Divide the high-precision number by ten.
/// @hp: The high-precision number
-fn hpDiv10(hp: &HP) void {
+fn hpDiv10(hp: *HP) void {
var val = hp.val;
hp.val /= 10.0;
@@ -213,7 +280,7 @@ fn hpDiv10(hp: &HP) void {
/// Multiply the high-precision number by ten.
/// @hp: The high-precision number
-fn hpMul10(hp: &HP) void {
+fn hpMul10(hp: *HP) void {
const val = hp.val;
hp.val *= 10.0;
@@ -228,7 +295,6 @@ fn hpMul10(hp: &HP) void {
hpNormalize(hp);
}
-
/// Integer conversion algorithm, guaranteed correct, optimal, and best.
/// @val: The val.
/// @buf: The output buffer.
@@ -238,7 +304,7 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal {
assert((val > 9.007199254740992e15) and val < (3.40282366920938e38));
- var mid = u128(val);
+ var mid = @floatToInt(u128, val);
var low: u128 = mid - fpeint((fpnext(val) - val) / 2.0);
var high: u128 = mid + fpeint((val - fpprev(val)) / 2.0);
@@ -248,11 +314,11 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal {
low -= 1;
}
- var l64 = u64(low % pow19);
- const lf = u64((low / pow19) % pow19);
+ var l64 = @intCast(u64, low % pow19);
+ const lf = @intCast(u64, (low / pow19) % pow19);
- var h64 = u64(high % pow19);
- const hf = u64((high / pow19) % pow19);
+ var h64 = @intCast(u64, high % pow19);
+ const hf = @intCast(u64, (high / pow19) % pow19);
if (lf != hf) {
l64 = lf;
@@ -263,27 +329,26 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal {
var mi: i32 = mismatch10(l64, h64);
var x: u64 = 1;
{
- var i = i32(lf == hf);
+ var i: i32 = @boolToInt(lf == hf);
while (i < mi) : (i += 1) {
x *= 10;
}
}
const m64 = @truncate(u64, @divTrunc(mid, x));
- if (lf != hf)
- mi += 19;
+ if (lf != hf) mi += 19;
var buf_index = u64toa(m64, buffer) - 1;
if (mi != 0) {
- buffer[buf_index - 1] += u8(buffer[buf_index] >= '5');
+ buffer[buf_index - 1] += @boolToInt(buffer[buf_index] >= '5');
} else {
buf_index += 1;
}
- return FloatDecimal {
+ return FloatDecimal{
.digits = buffer[0..buf_index],
- .exp = i32(buf_index) + mi,
+ .exp = @intCast(i32, buf_index) + mi,
};
}
@@ -294,54 +359,53 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal {
fn errolFixed(val: f64, buffer: []u8) FloatDecimal {
assert((val >= 16.0) and (val < 9.007199254740992e15));
- const u = u64(val);
- const n = f64(u);
+ const u = @floatToInt(u64, val);
+ const n = @intToFloat(f64, u);
var mid = val - n;
var lo = ((fpprev(val) - n) + mid) / 2.0;
var hi = ((fpnext(val) - n) + mid) / 2.0;
var buf_index = u64toa(u, buffer);
- var exp = i32(buf_index);
+ var exp = @intCast(i32, buf_index);
var j = buf_index;
buffer[j] = 0;
if (mid != 0.0) {
while (mid != 0.0) {
lo *= 10.0;
- const ldig = i32(lo);
- lo -= f64(ldig);
+ const ldig = @floatToInt(i32, lo);
+ lo -= @intToFloat(f64, ldig);
mid *= 10.0;
- const mdig = i32(mid);
- mid -= f64(mdig);
+ const mdig = @floatToInt(i32, mid);
+ mid -= @intToFloat(f64, mdig);
hi *= 10.0;
- const hdig = i32(hi);
- hi -= f64(hdig);
+ const hdig = @floatToInt(i32, hi);
+ hi -= @intToFloat(f64, hdig);
- buffer[j] = u8(mdig + '0');
+ buffer[j] = @intCast(u8, mdig + '0');
j += 1;
- if(hdig != ldig or j > 50)
- break;
+ if (hdig != ldig or j > 50) break;
}
if (mid > 0.5) {
- buffer[j-1] += 1;
- } else if ((mid == 0.5) and (buffer[j-1] & 0x1) != 0) {
- buffer[j-1] += 1;
+ buffer[j - 1] += 1;
+ } else if ((mid == 0.5) and (buffer[j - 1] & 0x1) != 0) {
+ buffer[j - 1] += 1;
}
} else {
- while (buffer[j-1] == '0') {
- buffer[j-1] = 0;
+ while (buffer[j - 1] == '0') {
+ buffer[j - 1] = 0;
j -= 1;
}
}
buffer[j] = 0;
- return FloatDecimal {
+ return FloatDecimal{
.digits = buffer[0..j],
.exp = exp,
};
@@ -355,7 +419,7 @@ fn fpprev(val: f64) f64 {
return @bitCast(f64, @bitCast(u64, val) -% 1);
}
-pub const c_digits_lut = []u8 {
+pub const c_digits_lut = []u8{
'0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6',
'0', '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3',
'1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0',
@@ -388,7 +452,7 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
var buf_index: usize = 0;
if (value < kTen8) {
- const v = u32(value);
+ const v = @intCast(u32, value);
if (v < 10000) {
const d1: u32 = (v / 100) << 1;
const d2: u32 = (v % 100) << 1;
@@ -443,8 +507,8 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
buf_index += 1;
}
} else if (value < kTen16) {
- const v0: u32 = u32(value / kTen8);
- const v1: u32 = u32(value % kTen8);
+ const v0: u32 = @intCast(u32, value / kTen8);
+ const v1: u32 = @intCast(u32, value % kTen8);
const b0: u32 = v0 / 10000;
const c0: u32 = v0 % 10000;
@@ -514,11 +578,11 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
buffer[buf_index] = c_digits_lut[d8 + 1];
buf_index += 1;
} else {
- const a = u32(value / kTen16); // 1 to 1844
+ const a = @intCast(u32, value / kTen16); // 1 to 1844
value %= kTen16;
if (a < 10) {
- buffer[buf_index] = '0' + u8(a);
+ buffer[buf_index] = '0' + @intCast(u8, a);
buf_index += 1;
} else if (a < 100) {
const i: u32 = a << 1;
@@ -527,7 +591,7 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
buffer[buf_index] = c_digits_lut[i + 1];
buf_index += 1;
} else if (a < 1000) {
- buffer[buf_index] = '0' + u8(a / 100);
+ buffer[buf_index] = '0' + @intCast(u8, a / 100);
buf_index += 1;
const i: u32 = (a % 100) << 1;
@@ -548,8 +612,8 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
buf_index += 1;
}
- const v0 = u32(value / kTen8);
- const v1 = u32(value % kTen8);
+ const v0 = @intCast(u32, value / kTen8);
+ const v1 = @intCast(u32, value % kTen8);
const b0: u32 = v0 / 10000;
const c0: u32 = v0 % 10000;
@@ -613,7 +677,6 @@ fn fpeint(from: f64) u128 {
return u128(1) << @truncate(u7, (bits >> 52) -% 1023);
}
-
/// Given two different integers with the same length in terms of the number
/// of decimal digits, index the digits from the right-most position starting
/// from zero, find the first index where the digits in the two integers
@@ -640,7 +703,6 @@ fn mismatch10(a: u64, b: u64) i32 {
a_copy /= 10;
b_copy /= 10;
- if (a_copy == b_copy)
- return i;
+ if (a_copy == b_copy) return i;
}
}
diff --git a/std/fmt/errol/lookup.zig b/std/fmt/errol/lookup.zig
index b7b89ba732..bd0a4ac8d1 100644
--- a/std/fmt/errol/lookup.zig
+++ b/std/fmt/errol/lookup.zig
@@ -3,604 +3,604 @@ pub const HP = struct {
off: f64,
};
pub const lookup_table = []HP{
- HP{.val=1.000000e+308, .off= -1.097906362944045488e+291 },
- HP{.val=1.000000e+307, .off= 1.396894023974354241e+290 },
- HP{.val=1.000000e+306, .off= -1.721606459673645508e+289 },
- HP{.val=1.000000e+305, .off= 6.074644749446353973e+288 },
- HP{.val=1.000000e+304, .off= 6.074644749446353567e+287 },
- HP{.val=1.000000e+303, .off= -1.617650767864564452e+284 },
- HP{.val=1.000000e+302, .off= -7.629703079084895055e+285 },
- HP{.val=1.000000e+301, .off= -5.250476025520442286e+284 },
- HP{.val=1.000000e+300, .off= -5.250476025520441956e+283 },
- HP{.val=1.000000e+299, .off= -5.250476025520441750e+282 },
- HP{.val=1.000000e+298, .off= 4.043379652465702264e+281 },
- HP{.val=1.000000e+297, .off= -1.765280146275637946e+280 },
- HP{.val=1.000000e+296, .off= 1.865132227937699609e+279 },
- HP{.val=1.000000e+295, .off= 1.865132227937699609e+278 },
- HP{.val=1.000000e+294, .off= -6.643646774124810287e+277 },
- HP{.val=1.000000e+293, .off= 7.537651562646039934e+276 },
- HP{.val=1.000000e+292, .off= -1.325659897835741608e+275 },
- HP{.val=1.000000e+291, .off= 4.213909764965371606e+274 },
- HP{.val=1.000000e+290, .off= -6.172783352786715670e+273 },
- HP{.val=1.000000e+289, .off= -6.172783352786715670e+272 },
- HP{.val=1.000000e+288, .off= -7.630473539575035471e+270 },
- HP{.val=1.000000e+287, .off= -7.525217352494018700e+270 },
- HP{.val=1.000000e+286, .off= -3.298861103408696612e+269 },
- HP{.val=1.000000e+285, .off= 1.984084207947955778e+268 },
- HP{.val=1.000000e+284, .off= -7.921438250845767591e+267 },
- HP{.val=1.000000e+283, .off= 4.460464822646386735e+266 },
- HP{.val=1.000000e+282, .off= -3.278224598286209647e+265 },
- HP{.val=1.000000e+281, .off= -3.278224598286209737e+264 },
- HP{.val=1.000000e+280, .off= -3.278224598286209961e+263 },
- HP{.val=1.000000e+279, .off= -5.797329227496039232e+262 },
- HP{.val=1.000000e+278, .off= 3.649313132040821498e+261 },
- HP{.val=1.000000e+277, .off= -2.867878510995372374e+259 },
- HP{.val=1.000000e+276, .off= -5.206914080024985409e+259 },
- HP{.val=1.000000e+275, .off= 4.018322599210230404e+258 },
- HP{.val=1.000000e+274, .off= 7.862171215558236495e+257 },
- HP{.val=1.000000e+273, .off= 5.459765830340732821e+256 },
- HP{.val=1.000000e+272, .off= -6.552261095746788047e+255 },
- HP{.val=1.000000e+271, .off= 4.709014147460262298e+254 },
- HP{.val=1.000000e+270, .off= -4.675381888545612729e+253 },
- HP{.val=1.000000e+269, .off= -4.675381888545612892e+252 },
- HP{.val=1.000000e+268, .off= 2.656177514583977380e+251 },
- HP{.val=1.000000e+267, .off= 2.656177514583977190e+250 },
- HP{.val=1.000000e+266, .off= -3.071603269111014892e+249 },
- HP{.val=1.000000e+265, .off= -6.651466258920385440e+248 },
- HP{.val=1.000000e+264, .off= -4.414051890289528972e+247 },
- HP{.val=1.000000e+263, .off= -1.617283929500958387e+246 },
- HP{.val=1.000000e+262, .off= -1.617283929500958241e+245 },
- HP{.val=1.000000e+261, .off= 7.122615947963323868e+244 },
- HP{.val=1.000000e+260, .off= -6.533477610574617382e+243 },
- HP{.val=1.000000e+259, .off= 7.122615947963323982e+242 },
- HP{.val=1.000000e+258, .off= -5.679971763165996225e+241 },
- HP{.val=1.000000e+257, .off= -3.012765990014054219e+240 },
- HP{.val=1.000000e+256, .off= -3.012765990014054219e+239 },
- HP{.val=1.000000e+255, .off= 1.154743030535854616e+238 },
- HP{.val=1.000000e+254, .off= 6.364129306223240767e+237 },
- HP{.val=1.000000e+253, .off= 6.364129306223241129e+236 },
- HP{.val=1.000000e+252, .off= -9.915202805299840595e+235 },
- HP{.val=1.000000e+251, .off= -4.827911520448877980e+234 },
- HP{.val=1.000000e+250, .off= 7.890316691678530146e+233 },
- HP{.val=1.000000e+249, .off= 7.890316691678529484e+232 },
- HP{.val=1.000000e+248, .off= -4.529828046727141859e+231 },
- HP{.val=1.000000e+247, .off= 4.785280507077111924e+230 },
- HP{.val=1.000000e+246, .off= -6.858605185178205305e+229 },
- HP{.val=1.000000e+245, .off= -4.432795665958347728e+228 },
- HP{.val=1.000000e+244, .off= -7.465057564983169531e+227 },
- HP{.val=1.000000e+243, .off= -7.465057564983169741e+226 },
- HP{.val=1.000000e+242, .off= -5.096102956370027445e+225 },
- HP{.val=1.000000e+241, .off= -5.096102956370026952e+224 },
- HP{.val=1.000000e+240, .off= -1.394611380411992474e+223 },
- HP{.val=1.000000e+239, .off= 9.188208545617793960e+221 },
- HP{.val=1.000000e+238, .off= -4.864759732872650359e+221 },
- HP{.val=1.000000e+237, .off= 5.979453868566904629e+220 },
- HP{.val=1.000000e+236, .off= -5.316601966265964857e+219 },
- HP{.val=1.000000e+235, .off= -5.316601966265964701e+218 },
- HP{.val=1.000000e+234, .off= -1.786584517880693123e+217 },
- HP{.val=1.000000e+233, .off= 2.625937292600896716e+216 },
- HP{.val=1.000000e+232, .off= -5.647541102052084079e+215 },
- HP{.val=1.000000e+231, .off= -5.647541102052083888e+214 },
- HP{.val=1.000000e+230, .off= -9.956644432600511943e+213 },
- HP{.val=1.000000e+229, .off= 8.161138937705571862e+211 },
- HP{.val=1.000000e+228, .off= 7.549087847752475275e+211 },
- HP{.val=1.000000e+227, .off= -9.283347037202319948e+210 },
- HP{.val=1.000000e+226, .off= 3.866992716668613820e+209 },
- HP{.val=1.000000e+225, .off= 7.154577655136347262e+208 },
- HP{.val=1.000000e+224, .off= 3.045096482051680688e+207 },
- HP{.val=1.000000e+223, .off= -4.660180717482069567e+206 },
- HP{.val=1.000000e+222, .off= -4.660180717482070101e+205 },
- HP{.val=1.000000e+221, .off= -4.660180717482069544e+204 },
- HP{.val=1.000000e+220, .off= 3.562757926310489022e+202 },
- HP{.val=1.000000e+219, .off= 3.491561111451748149e+202 },
- HP{.val=1.000000e+218, .off= -8.265758834125874135e+201 },
- HP{.val=1.000000e+217, .off= 3.981449442517482365e+200 },
- HP{.val=1.000000e+216, .off= -2.142154695804195936e+199 },
- HP{.val=1.000000e+215, .off= 9.339603063548950188e+198 },
- HP{.val=1.000000e+214, .off= 4.555537330485139746e+197 },
- HP{.val=1.000000e+213, .off= 1.565496247320257804e+196 },
- HP{.val=1.000000e+212, .off= 9.040598955232462036e+195 },
- HP{.val=1.000000e+211, .off= 4.368659762787334780e+194 },
- HP{.val=1.000000e+210, .off= 7.288621758065539072e+193 },
- HP{.val=1.000000e+209, .off= -7.311188218325485628e+192 },
- HP{.val=1.000000e+208, .off= 1.813693016918905189e+191 },
- HP{.val=1.000000e+207, .off= -3.889357755108838992e+190 },
- HP{.val=1.000000e+206, .off= -3.889357755108838992e+189 },
- HP{.val=1.000000e+205, .off= -1.661603547285501360e+188 },
- HP{.val=1.000000e+204, .off= 1.123089212493670643e+187 },
- HP{.val=1.000000e+203, .off= 1.123089212493670643e+186 },
- HP{.val=1.000000e+202, .off= 9.825254086803583029e+185 },
- HP{.val=1.000000e+201, .off= -3.771878529305654999e+184 },
- HP{.val=1.000000e+200, .off= 3.026687778748963675e+183 },
- HP{.val=1.000000e+199, .off= -9.720624048853446693e+182 },
- HP{.val=1.000000e+198, .off= -1.753554156601940139e+181 },
- HP{.val=1.000000e+197, .off= 4.885670753607648963e+180 },
- HP{.val=1.000000e+196, .off= 4.885670753607648963e+179 },
- HP{.val=1.000000e+195, .off= 2.292223523057028076e+178 },
- HP{.val=1.000000e+194, .off= 5.534032561245303825e+177 },
- HP{.val=1.000000e+193, .off= -6.622751331960730683e+176 },
- HP{.val=1.000000e+192, .off= -4.090088020876139692e+175 },
- HP{.val=1.000000e+191, .off= -7.255917159731877552e+174 },
- HP{.val=1.000000e+190, .off= -7.255917159731877992e+173 },
- HP{.val=1.000000e+189, .off= -2.309309130269787104e+172 },
- HP{.val=1.000000e+188, .off= -2.309309130269787019e+171 },
- HP{.val=1.000000e+187, .off= 9.284303438781988230e+170 },
- HP{.val=1.000000e+186, .off= 2.038295583124628364e+169 },
- HP{.val=1.000000e+185, .off= 2.038295583124628532e+168 },
- HP{.val=1.000000e+184, .off= -1.735666841696912925e+167 },
- HP{.val=1.000000e+183, .off= 5.340512704843477241e+166 },
- HP{.val=1.000000e+182, .off= -6.453119872723839321e+165 },
- HP{.val=1.000000e+181, .off= 8.288920849235306587e+164 },
- HP{.val=1.000000e+180, .off= -9.248546019891598293e+162 },
- HP{.val=1.000000e+179, .off= 1.954450226518486016e+162 },
- HP{.val=1.000000e+178, .off= -5.243811844750628197e+161 },
- HP{.val=1.000000e+177, .off= -7.448980502074320639e+159 },
- HP{.val=1.000000e+176, .off= -7.448980502074319858e+158 },
- HP{.val=1.000000e+175, .off= 6.284654753766312753e+158 },
- HP{.val=1.000000e+174, .off= -6.895756753684458388e+157 },
- HP{.val=1.000000e+173, .off= -1.403918625579970616e+156 },
- HP{.val=1.000000e+172, .off= -8.268716285710580522e+155 },
- HP{.val=1.000000e+171, .off= 4.602779327034313170e+154 },
- HP{.val=1.000000e+170, .off= -3.441905430931244940e+153 },
- HP{.val=1.000000e+169, .off= 6.613950516525702884e+152 },
- HP{.val=1.000000e+168, .off= 6.613950516525702652e+151 },
- HP{.val=1.000000e+167, .off= -3.860899428741951187e+150 },
- HP{.val=1.000000e+166, .off= 5.959272394946474605e+149 },
- HP{.val=1.000000e+165, .off= 1.005101065481665103e+149 },
- HP{.val=1.000000e+164, .off= -1.783349948587918355e+146 },
- HP{.val=1.000000e+163, .off= 6.215006036188360099e+146 },
- HP{.val=1.000000e+162, .off= 6.215006036188360099e+145 },
- HP{.val=1.000000e+161, .off= -3.774589324822814903e+144 },
- HP{.val=1.000000e+160, .off= -6.528407745068226929e+142 },
- HP{.val=1.000000e+159, .off= 7.151530601283157561e+142 },
- HP{.val=1.000000e+158, .off= 4.712664546348788765e+141 },
- HP{.val=1.000000e+157, .off= 1.664081977680827856e+140 },
- HP{.val=1.000000e+156, .off= 1.664081977680827750e+139 },
- HP{.val=1.000000e+155, .off= -7.176231540910168265e+137 },
- HP{.val=1.000000e+154, .off= -3.694754568805822650e+137 },
- HP{.val=1.000000e+153, .off= 2.665969958768462622e+134 },
- HP{.val=1.000000e+152, .off= -4.625108135904199522e+135 },
- HP{.val=1.000000e+151, .off= -1.717753238721771919e+134 },
- HP{.val=1.000000e+150, .off= 1.916440382756262433e+133 },
- HP{.val=1.000000e+149, .off= -4.897672657515052040e+132 },
- HP{.val=1.000000e+148, .off= -4.897672657515052198e+131 },
- HP{.val=1.000000e+147, .off= 2.200361759434233991e+130 },
- HP{.val=1.000000e+146, .off= 6.636633270027537273e+129 },
- HP{.val=1.000000e+145, .off= 1.091293881785907977e+128 },
- HP{.val=1.000000e+144, .off= -2.374543235865110597e+127 },
- HP{.val=1.000000e+143, .off= -2.374543235865110537e+126 },
- HP{.val=1.000000e+142, .off= -5.082228484029969099e+125 },
- HP{.val=1.000000e+141, .off= -1.697621923823895943e+124 },
- HP{.val=1.000000e+140, .off= -5.928380124081487212e+123 },
- HP{.val=1.000000e+139, .off= -3.284156248920492522e+122 },
- HP{.val=1.000000e+138, .off= -3.284156248920492706e+121 },
- HP{.val=1.000000e+137, .off= -3.284156248920492476e+120 },
- HP{.val=1.000000e+136, .off= -5.866406127007401066e+119 },
- HP{.val=1.000000e+135, .off= 3.817030915818506056e+118 },
- HP{.val=1.000000e+134, .off= 7.851796350329300951e+117 },
- HP{.val=1.000000e+133, .off= -2.235117235947686077e+116 },
- HP{.val=1.000000e+132, .off= 9.170432597638723691e+114 },
- HP{.val=1.000000e+131, .off= 8.797444499042767883e+114 },
- HP{.val=1.000000e+130, .off= -5.978307824605161274e+113 },
- HP{.val=1.000000e+129, .off= 1.782556435814758516e+111 },
- HP{.val=1.000000e+128, .off= -7.517448691651820362e+111 },
- HP{.val=1.000000e+127, .off= 4.507089332150205498e+110 },
- HP{.val=1.000000e+126, .off= 7.513223838100711695e+109 },
- HP{.val=1.000000e+125, .off= 7.513223838100712113e+108 },
- HP{.val=1.000000e+124, .off= 5.164681255326878494e+107 },
- HP{.val=1.000000e+123, .off= 2.229003026859587122e+106 },
- HP{.val=1.000000e+122, .off= -1.440594758724527399e+105 },
- HP{.val=1.000000e+121, .off= -3.734093374714598783e+104 },
- HP{.val=1.000000e+120, .off= 1.999653165260579757e+103 },
- HP{.val=1.000000e+119, .off= 5.583244752745066693e+102 },
- HP{.val=1.000000e+118, .off= 3.343500010567262234e+101 },
- HP{.val=1.000000e+117, .off= -5.055542772599503556e+100 },
- HP{.val=1.000000e+116, .off= -1.555941612946684331e+99 },
- HP{.val=1.000000e+115, .off= -1.555941612946684331e+98 },
- HP{.val=1.000000e+114, .off= -1.555941612946684293e+97 },
- HP{.val=1.000000e+113, .off= -1.555941612946684246e+96 },
- HP{.val=1.000000e+112, .off= 6.988006530736955847e+95 },
- HP{.val=1.000000e+111, .off= 4.318022735835818244e+94 },
- HP{.val=1.000000e+110, .off= -2.356936751417025578e+93 },
- HP{.val=1.000000e+109, .off= 1.814912928116001926e+92 },
- HP{.val=1.000000e+108, .off= -3.399899171300282744e+91 },
- HP{.val=1.000000e+107, .off= 3.118615952970072913e+90 },
- HP{.val=1.000000e+106, .off= -9.103599905036843605e+89 },
- HP{.val=1.000000e+105, .off= 6.174169917471802325e+88 },
- HP{.val=1.000000e+104, .off= -1.915675085734668657e+86 },
- HP{.val=1.000000e+103, .off= -1.915675085734668864e+85 },
- HP{.val=1.000000e+102, .off= 2.295048673475466221e+85 },
- HP{.val=1.000000e+101, .off= 2.295048673475466135e+84 },
- HP{.val=1.000000e+100, .off= -1.590289110975991792e+83 },
- HP{.val=1.000000e+99, .off= 3.266383119588331155e+82 },
- HP{.val=1.000000e+98, .off= 2.309629754856292029e+80 },
- HP{.val=1.000000e+97, .off= -7.357587384771124533e+80 },
- HP{.val=1.000000e+96, .off= -4.986165397190889509e+79 },
- HP{.val=1.000000e+95, .off= -2.021887912715594741e+78 },
- HP{.val=1.000000e+94, .off= -2.021887912715594638e+77 },
- HP{.val=1.000000e+93, .off= -4.337729697461918675e+76 },
- HP{.val=1.000000e+92, .off= -4.337729697461918997e+75 },
- HP{.val=1.000000e+91, .off= -7.956232486128049702e+74 },
- HP{.val=1.000000e+90, .off= 3.351588728453609882e+73 },
- HP{.val=1.000000e+89, .off= 5.246334248081951113e+71 },
- HP{.val=1.000000e+88, .off= 4.058327554364963672e+71 },
- HP{.val=1.000000e+87, .off= 4.058327554364963918e+70 },
- HP{.val=1.000000e+86, .off= -1.463069523067487266e+69 },
- HP{.val=1.000000e+85, .off= -1.463069523067487314e+68 },
- HP{.val=1.000000e+84, .off= -5.776660989811589441e+67 },
- HP{.val=1.000000e+83, .off= -3.080666323096525761e+66 },
- HP{.val=1.000000e+82, .off= 3.659320343691134468e+65 },
- HP{.val=1.000000e+81, .off= 7.871812010433421235e+64 },
- HP{.val=1.000000e+80, .off= -2.660986470836727449e+61 },
- HP{.val=1.000000e+79, .off= 3.264399249934044627e+62 },
- HP{.val=1.000000e+78, .off= -8.493621433689703070e+60 },
- HP{.val=1.000000e+77, .off= 1.721738727445414063e+60 },
- HP{.val=1.000000e+76, .off= -4.706013449590547218e+59 },
- HP{.val=1.000000e+75, .off= 7.346021882351880518e+58 },
- HP{.val=1.000000e+74, .off= 4.835181188197207515e+57 },
- HP{.val=1.000000e+73, .off= 1.696630320503867482e+56 },
- HP{.val=1.000000e+72, .off= 5.619818905120542959e+55 },
- HP{.val=1.000000e+71, .off= -4.188152556421145598e+54 },
- HP{.val=1.000000e+70, .off= -7.253143638152923145e+53 },
- HP{.val=1.000000e+69, .off= -7.253143638152923145e+52 },
- HP{.val=1.000000e+68, .off= 4.719477774861832896e+51 },
- HP{.val=1.000000e+67, .off= 1.726322421608144052e+50 },
- HP{.val=1.000000e+66, .off= 5.467766613175255107e+49 },
- HP{.val=1.000000e+65, .off= 7.909613737163661911e+47 },
- HP{.val=1.000000e+64, .off= -2.132041900945439564e+47 },
- HP{.val=1.000000e+63, .off= -5.785795994272697265e+46 },
- HP{.val=1.000000e+62, .off= -3.502199685943161329e+45 },
- HP{.val=1.000000e+61, .off= 5.061286470292598274e+44 },
- HP{.val=1.000000e+60, .off= 5.061286470292598472e+43 },
- HP{.val=1.000000e+59, .off= 2.831211950439536034e+42 },
- HP{.val=1.000000e+58, .off= 5.618805100255863927e+41 },
- HP{.val=1.000000e+57, .off= -4.834669211555366251e+40 },
- HP{.val=1.000000e+56, .off= -9.190283508143378583e+39 },
- HP{.val=1.000000e+55, .off= -1.023506702040855158e+38 },
- HP{.val=1.000000e+54, .off= -7.829154040459624616e+37 },
- HP{.val=1.000000e+53, .off= 6.779051325638372659e+35 },
- HP{.val=1.000000e+52, .off= 6.779051325638372290e+34 },
- HP{.val=1.000000e+51, .off= 6.779051325638371598e+33 },
- HP{.val=1.000000e+50, .off= -7.629769841091887392e+33 },
- HP{.val=1.000000e+49, .off= 5.350972305245182400e+32 },
- HP{.val=1.000000e+48, .off= -4.384584304507619764e+31 },
- HP{.val=1.000000e+47, .off= -4.384584304507619876e+30 },
- HP{.val=1.000000e+46, .off= 6.860180964052978705e+28 },
- HP{.val=1.000000e+45, .off= 7.024271097546444878e+28 },
- HP{.val=1.000000e+44, .off= -8.821361405306422641e+27 },
- HP{.val=1.000000e+43, .off= -1.393721169594140991e+26 },
- HP{.val=1.000000e+42, .off= -4.488571267807591679e+25 },
- HP{.val=1.000000e+41, .off= -6.200086450407783195e+23 },
- HP{.val=1.000000e+40, .off= -3.037860284270036669e+23 },
- HP{.val=1.000000e+39, .off= 6.029083362839682141e+22 },
- HP{.val=1.000000e+38, .off= 2.251190176543965970e+21 },
- HP{.val=1.000000e+37, .off= 4.612373417978788577e+20 },
- HP{.val=1.000000e+36, .off= -4.242063737401796198e+19 },
- HP{.val=1.000000e+35, .off= 3.136633892082024448e+18 },
- HP{.val=1.000000e+34, .off= 5.442476901295718400e+17 },
- HP{.val=1.000000e+33, .off= 5.442476901295718400e+16 },
- HP{.val=1.000000e+32, .off= -5.366162204393472000e+15 },
- HP{.val=1.000000e+31, .off= 3.641037050347520000e+14 },
- HP{.val=1.000000e+30, .off= -1.988462483865600000e+13 },
- HP{.val=1.000000e+29, .off= 8.566849142784000000e+12 },
- HP{.val=1.000000e+28, .off= 4.168802631680000000e+11 },
- HP{.val=1.000000e+27, .off= -1.328755507200000000e+10 },
- HP{.val=1.000000e+26, .off= -4.764729344000000000e+09 },
- HP{.val=1.000000e+25, .off= -9.059696640000000000e+08 },
- HP{.val=1.000000e+24, .off= 1.677721600000000000e+07 },
- HP{.val=1.000000e+23, .off= 8.388608000000000000e+06 },
- HP{.val=1.000000e+22, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+21, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+20, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+19, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+18, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+17, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+16, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+15, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+14, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+13, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+12, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+11, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+10, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+09, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+08, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+07, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+06, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+05, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+04, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+03, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+02, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+01, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e+00, .off= 0.000000000000000000e+00 },
- HP{.val=1.000000e-01, .off= -5.551115123125783010e-18 },
- HP{.val=1.000000e-02, .off= -2.081668171172168436e-19 },
- HP{.val=1.000000e-03, .off= -2.081668171172168557e-20 },
- HP{.val=1.000000e-04, .off= -4.792173602385929943e-21 },
- HP{.val=1.000000e-05, .off= -8.180305391403130547e-22 },
- HP{.val=1.000000e-06, .off= 4.525188817411374069e-23 },
- HP{.val=1.000000e-07, .off= 4.525188817411373922e-24 },
- HP{.val=1.000000e-08, .off= -2.092256083012847109e-25 },
- HP{.val=1.000000e-09, .off= -6.228159145777985254e-26 },
- HP{.val=1.000000e-10, .off= -3.643219731549774344e-27 },
- HP{.val=1.000000e-11, .off= 6.050303071806019080e-28 },
- HP{.val=1.000000e-12, .off= 2.011335237074438524e-29 },
- HP{.val=1.000000e-13, .off= -3.037374556340037101e-30 },
- HP{.val=1.000000e-14, .off= 1.180690645440101289e-32 },
- HP{.val=1.000000e-15, .off= -7.770539987666107583e-32 },
- HP{.val=1.000000e-16, .off= 2.090221327596539779e-33 },
- HP{.val=1.000000e-17, .off= -7.154242405462192144e-34 },
- HP{.val=1.000000e-18, .off= -7.154242405462192572e-35 },
- HP{.val=1.000000e-19, .off= 2.475407316473986894e-36 },
- HP{.val=1.000000e-20, .off= 5.484672854579042914e-37 },
- HP{.val=1.000000e-21, .off= 9.246254777210362522e-38 },
- HP{.val=1.000000e-22, .off= -4.859677432657087182e-39 },
- HP{.val=1.000000e-23, .off= 3.956530198510069291e-40 },
- HP{.val=1.000000e-24, .off= 7.629950044829717753e-41 },
- HP{.val=1.000000e-25, .off= -3.849486974919183692e-42 },
- HP{.val=1.000000e-26, .off= -3.849486974919184170e-43 },
- HP{.val=1.000000e-27, .off= -3.849486974919184070e-44 },
- HP{.val=1.000000e-28, .off= 2.876745653839937870e-45 },
- HP{.val=1.000000e-29, .off= 5.679342582489572168e-46 },
- HP{.val=1.000000e-30, .off= -8.333642060758598930e-47 },
- HP{.val=1.000000e-31, .off= -8.333642060758597958e-48 },
- HP{.val=1.000000e-32, .off= -5.596730997624190224e-49 },
- HP{.val=1.000000e-33, .off= -5.596730997624190604e-50 },
- HP{.val=1.000000e-34, .off= 7.232539610818348498e-51 },
- HP{.val=1.000000e-35, .off= -7.857545194582380514e-53 },
- HP{.val=1.000000e-36, .off= 5.896157255772251528e-53 },
- HP{.val=1.000000e-37, .off= -6.632427322784915796e-54 },
- HP{.val=1.000000e-38, .off= 3.808059826012723592e-55 },
- HP{.val=1.000000e-39, .off= 7.070712060011985131e-56 },
- HP{.val=1.000000e-40, .off= 7.070712060011985584e-57 },
- HP{.val=1.000000e-41, .off= -5.761291134237854167e-59 },
- HP{.val=1.000000e-42, .off= -3.762312935688689794e-59 },
- HP{.val=1.000000e-43, .off= -7.745042713519821150e-60 },
- HP{.val=1.000000e-44, .off= 4.700987842202462817e-61 },
- HP{.val=1.000000e-45, .off= 1.589480203271891964e-62 },
- HP{.val=1.000000e-46, .off= -2.299904345391321765e-63 },
- HP{.val=1.000000e-47, .off= 2.561826340437695261e-64 },
- HP{.val=1.000000e-48, .off= 2.561826340437695345e-65 },
- HP{.val=1.000000e-49, .off= 6.360053438741614633e-66 },
- HP{.val=1.000000e-50, .off= -7.616223705782342295e-68 },
- HP{.val=1.000000e-51, .off= -7.616223705782343324e-69 },
- HP{.val=1.000000e-52, .off= -7.616223705782342295e-70 },
- HP{.val=1.000000e-53, .off= -3.079876214757872338e-70 },
- HP{.val=1.000000e-54, .off= -3.079876214757872821e-71 },
- HP{.val=1.000000e-55, .off= 5.423954167728123147e-73 },
- HP{.val=1.000000e-56, .off= -3.985444122640543680e-73 },
- HP{.val=1.000000e-57, .off= 4.504255013759498850e-74 },
- HP{.val=1.000000e-58, .off= -2.570494266573869991e-75 },
- HP{.val=1.000000e-59, .off= -2.570494266573869930e-76 },
- HP{.val=1.000000e-60, .off= 2.956653608686574324e-77 },
- HP{.val=1.000000e-61, .off= -3.952281235388981376e-78 },
- HP{.val=1.000000e-62, .off= -3.952281235388981376e-79 },
- HP{.val=1.000000e-63, .off= -6.651083908855995172e-80 },
- HP{.val=1.000000e-64, .off= 3.469426116645307030e-81 },
- HP{.val=1.000000e-65, .off= 7.686305293937516319e-82 },
- HP{.val=1.000000e-66, .off= 2.415206322322254927e-83 },
- HP{.val=1.000000e-67, .off= 5.709643179581793251e-84 },
- HP{.val=1.000000e-68, .off= -6.644495035141475923e-85 },
- HP{.val=1.000000e-69, .off= 3.650620143794581913e-86 },
- HP{.val=1.000000e-70, .off= 4.333966503770636492e-88 },
- HP{.val=1.000000e-71, .off= 8.476455383920859113e-88 },
- HP{.val=1.000000e-72, .off= 3.449543675455986564e-89 },
- HP{.val=1.000000e-73, .off= 3.077238576654418974e-91 },
- HP{.val=1.000000e-74, .off= 4.234998629903623140e-91 },
- HP{.val=1.000000e-75, .off= 4.234998629903623412e-92 },
- HP{.val=1.000000e-76, .off= 7.303182045714702338e-93 },
- HP{.val=1.000000e-77, .off= 7.303182045714701699e-94 },
- HP{.val=1.000000e-78, .off= 1.121271649074855759e-96 },
- HP{.val=1.000000e-79, .off= 1.121271649074855863e-97 },
- HP{.val=1.000000e-80, .off= 3.857468248661243988e-97 },
- HP{.val=1.000000e-81, .off= 3.857468248661244248e-98 },
- HP{.val=1.000000e-82, .off= 3.857468248661244410e-99 },
- HP{.val=1.000000e-83, .off= -3.457651055545315679e-100 },
- HP{.val=1.000000e-84, .off= -3.457651055545315933e-101 },
- HP{.val=1.000000e-85, .off= 2.257285900866059216e-102 },
- HP{.val=1.000000e-86, .off= -8.458220892405268345e-103 },
- HP{.val=1.000000e-87, .off= -1.761029146610688867e-104 },
- HP{.val=1.000000e-88, .off= 6.610460535632536565e-105 },
- HP{.val=1.000000e-89, .off= -3.853901567171494935e-106 },
- HP{.val=1.000000e-90, .off= 5.062493089968513723e-108 },
- HP{.val=1.000000e-91, .off= -2.218844988608365240e-108 },
- HP{.val=1.000000e-92, .off= 1.187522883398155383e-109 },
- HP{.val=1.000000e-93, .off= 9.703442563414457296e-110 },
- HP{.val=1.000000e-94, .off= 4.380992763404268896e-111 },
- HP{.val=1.000000e-95, .off= 1.054461638397900823e-112 },
- HP{.val=1.000000e-96, .off= 9.370789450913819736e-113 },
- HP{.val=1.000000e-97, .off= -3.623472756142303998e-114 },
- HP{.val=1.000000e-98, .off= 6.122223899149788839e-115 },
- HP{.val=1.000000e-99, .off= -1.999189980260288281e-116 },
- HP{.val=1.000000e-100, .off= -1.999189980260288281e-117 },
- HP{.val=1.000000e-101, .off= -5.171617276904849634e-118 },
- HP{.val=1.000000e-102, .off= 6.724985085512256320e-119 },
- HP{.val=1.000000e-103, .off= 4.246526260008692213e-120 },
- HP{.val=1.000000e-104, .off= 7.344599791888147003e-121 },
- HP{.val=1.000000e-105, .off= 3.472007877038828407e-122 },
- HP{.val=1.000000e-106, .off= 5.892377823819652194e-123 },
- HP{.val=1.000000e-107, .off= -1.585470431324073925e-125 },
- HP{.val=1.000000e-108, .off= -3.940375084977444795e-125 },
- HP{.val=1.000000e-109, .off= 7.869099673288519908e-127 },
- HP{.val=1.000000e-110, .off= -5.122196348054018581e-127 },
- HP{.val=1.000000e-111, .off= -8.815387795168313713e-128 },
- HP{.val=1.000000e-112, .off= 5.034080131510290214e-129 },
- HP{.val=1.000000e-113, .off= 2.148774313452247863e-130 },
- HP{.val=1.000000e-114, .off= -5.064490231692858416e-131 },
- HP{.val=1.000000e-115, .off= -5.064490231692858166e-132 },
- HP{.val=1.000000e-116, .off= 5.708726942017560559e-134 },
- HP{.val=1.000000e-117, .off= -2.951229134482377772e-134 },
- HP{.val=1.000000e-118, .off= 1.451398151372789513e-135 },
- HP{.val=1.000000e-119, .off= -1.300243902286690040e-136 },
- HP{.val=1.000000e-120, .off= 2.139308664787659449e-137 },
- HP{.val=1.000000e-121, .off= 2.139308664787659329e-138 },
- HP{.val=1.000000e-122, .off= -5.922142664292847471e-139 },
- HP{.val=1.000000e-123, .off= -5.922142664292846912e-140 },
- HP{.val=1.000000e-124, .off= 6.673875037395443799e-141 },
- HP{.val=1.000000e-125, .off= -1.198636026159737932e-142 },
- HP{.val=1.000000e-126, .off= 5.361789860136246995e-143 },
- HP{.val=1.000000e-127, .off= -2.838742497733733936e-144 },
- HP{.val=1.000000e-128, .off= -5.401408859568103261e-145 },
- HP{.val=1.000000e-129, .off= 7.411922949603743011e-146 },
- HP{.val=1.000000e-130, .off= -8.604741811861064385e-147 },
- HP{.val=1.000000e-131, .off= 1.405673664054439890e-148 },
- HP{.val=1.000000e-132, .off= 1.405673664054439933e-149 },
- HP{.val=1.000000e-133, .off= -6.414963426504548053e-150 },
- HP{.val=1.000000e-134, .off= -3.971014335704864578e-151 },
- HP{.val=1.000000e-135, .off= -3.971014335704864748e-152 },
- HP{.val=1.000000e-136, .off= -1.523438813303585576e-154 },
- HP{.val=1.000000e-137, .off= 2.234325152653707766e-154 },
- HP{.val=1.000000e-138, .off= -6.715683724786540160e-155 },
- HP{.val=1.000000e-139, .off= -2.986513359186437306e-156 },
- HP{.val=1.000000e-140, .off= 1.674949597813692102e-157 },
- HP{.val=1.000000e-141, .off= -4.151879098436469092e-158 },
- HP{.val=1.000000e-142, .off= -4.151879098436469295e-159 },
- HP{.val=1.000000e-143, .off= 4.952540739454407825e-160 },
- HP{.val=1.000000e-144, .off= 4.952540739454407667e-161 },
- HP{.val=1.000000e-145, .off= 8.508954738630531443e-162 },
- HP{.val=1.000000e-146, .off= -2.604839008794855481e-163 },
- HP{.val=1.000000e-147, .off= 2.952057864917838382e-164 },
- HP{.val=1.000000e-148, .off= 6.425118410988271757e-165 },
- HP{.val=1.000000e-149, .off= 2.083792728400229858e-166 },
- HP{.val=1.000000e-150, .off= -6.295358232172964237e-168 },
- HP{.val=1.000000e-151, .off= 6.153785555826519421e-168 },
- HP{.val=1.000000e-152, .off= -6.564942029880634994e-169 },
- HP{.val=1.000000e-153, .off= -3.915207116191644540e-170 },
- HP{.val=1.000000e-154, .off= 2.709130168030831503e-171 },
- HP{.val=1.000000e-155, .off= -1.431080634608215966e-172 },
- HP{.val=1.000000e-156, .off= -4.018712386257620994e-173 },
- HP{.val=1.000000e-157, .off= 5.684906682427646782e-174 },
- HP{.val=1.000000e-158, .off= -6.444617153428937489e-175 },
- HP{.val=1.000000e-159, .off= 1.136335243981427681e-176 },
- HP{.val=1.000000e-160, .off= 1.136335243981427725e-177 },
- HP{.val=1.000000e-161, .off= -2.812077463003137395e-178 },
- HP{.val=1.000000e-162, .off= 4.591196362592922204e-179 },
- HP{.val=1.000000e-163, .off= 7.675893789924613703e-180 },
- HP{.val=1.000000e-164, .off= 3.820022005759999543e-181 },
- HP{.val=1.000000e-165, .off= -9.998177244457686588e-183 },
- HP{.val=1.000000e-166, .off= -4.012217555824373639e-183 },
- HP{.val=1.000000e-167, .off= -2.467177666011174334e-185 },
- HP{.val=1.000000e-168, .off= -4.953592503130188139e-185 },
- HP{.val=1.000000e-169, .off= -2.011795792799518887e-186 },
- HP{.val=1.000000e-170, .off= 1.665450095113817423e-187 },
- HP{.val=1.000000e-171, .off= 1.665450095113817487e-188 },
- HP{.val=1.000000e-172, .off= -4.080246604750770577e-189 },
- HP{.val=1.000000e-173, .off= -4.080246604750770677e-190 },
- HP{.val=1.000000e-174, .off= 4.085789420184387951e-192 },
- HP{.val=1.000000e-175, .off= 4.085789420184388146e-193 },
- HP{.val=1.000000e-176, .off= 4.085789420184388146e-194 },
- HP{.val=1.000000e-177, .off= 4.792197640035244894e-194 },
- HP{.val=1.000000e-178, .off= 4.792197640035244742e-195 },
- HP{.val=1.000000e-179, .off= -2.057206575616014662e-196 },
- HP{.val=1.000000e-180, .off= -2.057206575616014662e-197 },
- HP{.val=1.000000e-181, .off= -4.732755097354788053e-198 },
- HP{.val=1.000000e-182, .off= -4.732755097354787867e-199 },
- HP{.val=1.000000e-183, .off= -5.522105321379546765e-201 },
- HP{.val=1.000000e-184, .off= -5.777891238658996019e-201 },
- HP{.val=1.000000e-185, .off= 7.542096444923057046e-203 },
- HP{.val=1.000000e-186, .off= 8.919335748431433483e-203 },
- HP{.val=1.000000e-187, .off= -1.287071881492476028e-204 },
- HP{.val=1.000000e-188, .off= 5.091932887209967018e-205 },
- HP{.val=1.000000e-189, .off= -6.868701054107114024e-206 },
- HP{.val=1.000000e-190, .off= -1.885103578558330118e-207 },
- HP{.val=1.000000e-191, .off= -1.885103578558330205e-208 },
- HP{.val=1.000000e-192, .off= -9.671974634103305058e-209 },
- HP{.val=1.000000e-193, .off= -4.805180224387695640e-210 },
- HP{.val=1.000000e-194, .off= -1.763433718315439838e-211 },
- HP{.val=1.000000e-195, .off= -9.367799983496079132e-212 },
- HP{.val=1.000000e-196, .off= -4.615071067758179837e-213 },
- HP{.val=1.000000e-197, .off= 1.325840076914194777e-214 },
- HP{.val=1.000000e-198, .off= 8.751979007754662425e-215 },
- HP{.val=1.000000e-199, .off= 1.789973760091724198e-216 },
- HP{.val=1.000000e-200, .off= 1.789973760091724077e-217 },
- HP{.val=1.000000e-201, .off= 5.416018159916171171e-218 },
- HP{.val=1.000000e-202, .off= -3.649092839644947067e-219 },
- HP{.val=1.000000e-203, .off= -3.649092839644947067e-220 },
- HP{.val=1.000000e-204, .off= -1.080338554413850956e-222 },
- HP{.val=1.000000e-205, .off= -1.080338554413850841e-223 },
- HP{.val=1.000000e-206, .off= -2.874486186850417807e-223 },
- HP{.val=1.000000e-207, .off= 7.499710055933455072e-224 },
- HP{.val=1.000000e-208, .off= -9.790617015372999087e-225 },
- HP{.val=1.000000e-209, .off= -4.387389805589732612e-226 },
- HP{.val=1.000000e-210, .off= -4.387389805589732612e-227 },
- HP{.val=1.000000e-211, .off= -8.608661063232909897e-228 },
- HP{.val=1.000000e-212, .off= 4.582811616902018972e-229 },
- HP{.val=1.000000e-213, .off= 4.582811616902019155e-230 },
- HP{.val=1.000000e-214, .off= 8.705146829444184930e-231 },
- HP{.val=1.000000e-215, .off= -4.177150709750081830e-232 },
- HP{.val=1.000000e-216, .off= -4.177150709750082366e-233 },
- HP{.val=1.000000e-217, .off= -8.202868690748290237e-234 },
- HP{.val=1.000000e-218, .off= -3.170721214500530119e-235 },
- HP{.val=1.000000e-219, .off= -3.170721214500529857e-236 },
- HP{.val=1.000000e-220, .off= 7.606440013180328441e-238 },
- HP{.val=1.000000e-221, .off= -1.696459258568569049e-238 },
- HP{.val=1.000000e-222, .off= -4.767838333426821244e-239 },
- HP{.val=1.000000e-223, .off= 2.910609353718809138e-240 },
- HP{.val=1.000000e-224, .off= -1.888420450747209784e-241 },
- HP{.val=1.000000e-225, .off= 4.110366804835314035e-242 },
- HP{.val=1.000000e-226, .off= 7.859608839574391006e-243 },
- HP{.val=1.000000e-227, .off= 5.516332567862468419e-244 },
- HP{.val=1.000000e-228, .off= -3.270953451057244613e-245 },
- HP{.val=1.000000e-229, .off= -6.932322625607124670e-246 },
- HP{.val=1.000000e-230, .off= -4.643966891513449762e-247 },
- HP{.val=1.000000e-231, .off= 1.076922443720738305e-248 },
- HP{.val=1.000000e-232, .off= -2.498633390800628939e-249 },
- HP{.val=1.000000e-233, .off= 4.205533798926934891e-250 },
- HP{.val=1.000000e-234, .off= 4.205533798926934891e-251 },
- HP{.val=1.000000e-235, .off= 4.205533798926934697e-252 },
- HP{.val=1.000000e-236, .off= -4.523850562697497656e-253 },
- HP{.val=1.000000e-237, .off= 9.320146633177728298e-255 },
- HP{.val=1.000000e-238, .off= 9.320146633177728062e-256 },
- HP{.val=1.000000e-239, .off= -7.592774752331086440e-256 },
- HP{.val=1.000000e-240, .off= 3.063212017229987840e-257 },
- HP{.val=1.000000e-241, .off= 3.063212017229987562e-258 },
- HP{.val=1.000000e-242, .off= 3.063212017229987562e-259 },
- HP{.val=1.000000e-243, .off= 4.616527473176159842e-261 },
- HP{.val=1.000000e-244, .off= 6.965550922098544975e-261 },
- HP{.val=1.000000e-245, .off= 6.965550922098544749e-262 },
- HP{.val=1.000000e-246, .off= 4.424965697574744679e-263 },
- HP{.val=1.000000e-247, .off= -1.926497363734756420e-264 },
- HP{.val=1.000000e-248, .off= 2.043167049583681740e-265 },
- HP{.val=1.000000e-249, .off= -5.399953725388390154e-266 },
- HP{.val=1.000000e-250, .off= -5.399953725388389982e-267 },
- HP{.val=1.000000e-251, .off= -1.523328321757102663e-268 },
- HP{.val=1.000000e-252, .off= 5.745344310051561161e-269 },
- HP{.val=1.000000e-253, .off= -6.369110076296211879e-270 },
- HP{.val=1.000000e-254, .off= 8.773957906638504842e-271 },
- HP{.val=1.000000e-255, .off= -6.904595826956931908e-273 },
- HP{.val=1.000000e-256, .off= 2.267170882721243669e-273 },
- HP{.val=1.000000e-257, .off= 2.267170882721243669e-274 },
- HP{.val=1.000000e-258, .off= 4.577819683828225398e-275 },
- HP{.val=1.000000e-259, .off= -6.975424321706684210e-276 },
- HP{.val=1.000000e-260, .off= 3.855741933482293648e-277 },
- HP{.val=1.000000e-261, .off= 1.599248963651256552e-278 },
- HP{.val=1.000000e-262, .off= -1.221367248637539543e-279 },
- HP{.val=1.000000e-263, .off= -1.221367248637539494e-280 },
- HP{.val=1.000000e-264, .off= -1.221367248637539647e-281 },
- HP{.val=1.000000e-265, .off= 1.533140771175737943e-282 },
- HP{.val=1.000000e-266, .off= 1.533140771175737895e-283 },
- HP{.val=1.000000e-267, .off= 1.533140771175738074e-284 },
- HP{.val=1.000000e-268, .off= 4.223090009274641634e-285 },
- HP{.val=1.000000e-269, .off= 4.223090009274641634e-286 },
- HP{.val=1.000000e-270, .off= -4.183001359784432924e-287 },
- HP{.val=1.000000e-271, .off= 3.697709298708449474e-288 },
- HP{.val=1.000000e-272, .off= 6.981338739747150474e-289 },
- HP{.val=1.000000e-273, .off= -9.436808465446354751e-290 },
- HP{.val=1.000000e-274, .off= 3.389869038611071740e-291 },
- HP{.val=1.000000e-275, .off= 6.596538414625427829e-292 },
- HP{.val=1.000000e-276, .off= -9.436808465446354618e-293 },
- HP{.val=1.000000e-277, .off= 3.089243784609725523e-294 },
- HP{.val=1.000000e-278, .off= 6.220756847123745836e-295 },
- HP{.val=1.000000e-279, .off= -5.522417137303829470e-296 },
- HP{.val=1.000000e-280, .off= 4.263561183052483059e-297 },
- HP{.val=1.000000e-281, .off= -1.852675267170212272e-298 },
- HP{.val=1.000000e-282, .off= -1.852675267170212378e-299 },
- HP{.val=1.000000e-283, .off= 5.314789322934508480e-300 },
- HP{.val=1.000000e-284, .off= -3.644541414696392675e-301 },
- HP{.val=1.000000e-285, .off= -7.377595888709267777e-302 },
- HP{.val=1.000000e-286, .off= -5.044436842451220838e-303 },
- HP{.val=1.000000e-287, .off= -2.127988034628661760e-304 },
- HP{.val=1.000000e-288, .off= -5.773549044406860911e-305 },
- HP{.val=1.000000e-289, .off= -1.216597782184112068e-306 },
- HP{.val=1.000000e-290, .off= -6.912786859962547924e-307 },
- HP{.val=1.000000e-291, .off= 3.767567660872018813e-308 },
+ HP{ .val = 1.000000e+308, .off = -1.097906362944045488e+291 },
+ HP{ .val = 1.000000e+307, .off = 1.396894023974354241e+290 },
+ HP{ .val = 1.000000e+306, .off = -1.721606459673645508e+289 },
+ HP{ .val = 1.000000e+305, .off = 6.074644749446353973e+288 },
+ HP{ .val = 1.000000e+304, .off = 6.074644749446353567e+287 },
+ HP{ .val = 1.000000e+303, .off = -1.617650767864564452e+284 },
+ HP{ .val = 1.000000e+302, .off = -7.629703079084895055e+285 },
+ HP{ .val = 1.000000e+301, .off = -5.250476025520442286e+284 },
+ HP{ .val = 1.000000e+300, .off = -5.250476025520441956e+283 },
+ HP{ .val = 1.000000e+299, .off = -5.250476025520441750e+282 },
+ HP{ .val = 1.000000e+298, .off = 4.043379652465702264e+281 },
+ HP{ .val = 1.000000e+297, .off = -1.765280146275637946e+280 },
+ HP{ .val = 1.000000e+296, .off = 1.865132227937699609e+279 },
+ HP{ .val = 1.000000e+295, .off = 1.865132227937699609e+278 },
+ HP{ .val = 1.000000e+294, .off = -6.643646774124810287e+277 },
+ HP{ .val = 1.000000e+293, .off = 7.537651562646039934e+276 },
+ HP{ .val = 1.000000e+292, .off = -1.325659897835741608e+275 },
+ HP{ .val = 1.000000e+291, .off = 4.213909764965371606e+274 },
+ HP{ .val = 1.000000e+290, .off = -6.172783352786715670e+273 },
+ HP{ .val = 1.000000e+289, .off = -6.172783352786715670e+272 },
+ HP{ .val = 1.000000e+288, .off = -7.630473539575035471e+270 },
+ HP{ .val = 1.000000e+287, .off = -7.525217352494018700e+270 },
+ HP{ .val = 1.000000e+286, .off = -3.298861103408696612e+269 },
+ HP{ .val = 1.000000e+285, .off = 1.984084207947955778e+268 },
+ HP{ .val = 1.000000e+284, .off = -7.921438250845767591e+267 },
+ HP{ .val = 1.000000e+283, .off = 4.460464822646386735e+266 },
+ HP{ .val = 1.000000e+282, .off = -3.278224598286209647e+265 },
+ HP{ .val = 1.000000e+281, .off = -3.278224598286209737e+264 },
+ HP{ .val = 1.000000e+280, .off = -3.278224598286209961e+263 },
+ HP{ .val = 1.000000e+279, .off = -5.797329227496039232e+262 },
+ HP{ .val = 1.000000e+278, .off = 3.649313132040821498e+261 },
+ HP{ .val = 1.000000e+277, .off = -2.867878510995372374e+259 },
+ HP{ .val = 1.000000e+276, .off = -5.206914080024985409e+259 },
+ HP{ .val = 1.000000e+275, .off = 4.018322599210230404e+258 },
+ HP{ .val = 1.000000e+274, .off = 7.862171215558236495e+257 },
+ HP{ .val = 1.000000e+273, .off = 5.459765830340732821e+256 },
+ HP{ .val = 1.000000e+272, .off = -6.552261095746788047e+255 },
+ HP{ .val = 1.000000e+271, .off = 4.709014147460262298e+254 },
+ HP{ .val = 1.000000e+270, .off = -4.675381888545612729e+253 },
+ HP{ .val = 1.000000e+269, .off = -4.675381888545612892e+252 },
+ HP{ .val = 1.000000e+268, .off = 2.656177514583977380e+251 },
+ HP{ .val = 1.000000e+267, .off = 2.656177514583977190e+250 },
+ HP{ .val = 1.000000e+266, .off = -3.071603269111014892e+249 },
+ HP{ .val = 1.000000e+265, .off = -6.651466258920385440e+248 },
+ HP{ .val = 1.000000e+264, .off = -4.414051890289528972e+247 },
+ HP{ .val = 1.000000e+263, .off = -1.617283929500958387e+246 },
+ HP{ .val = 1.000000e+262, .off = -1.617283929500958241e+245 },
+ HP{ .val = 1.000000e+261, .off = 7.122615947963323868e+244 },
+ HP{ .val = 1.000000e+260, .off = -6.533477610574617382e+243 },
+ HP{ .val = 1.000000e+259, .off = 7.122615947963323982e+242 },
+ HP{ .val = 1.000000e+258, .off = -5.679971763165996225e+241 },
+ HP{ .val = 1.000000e+257, .off = -3.012765990014054219e+240 },
+ HP{ .val = 1.000000e+256, .off = -3.012765990014054219e+239 },
+ HP{ .val = 1.000000e+255, .off = 1.154743030535854616e+238 },
+ HP{ .val = 1.000000e+254, .off = 6.364129306223240767e+237 },
+ HP{ .val = 1.000000e+253, .off = 6.364129306223241129e+236 },
+ HP{ .val = 1.000000e+252, .off = -9.915202805299840595e+235 },
+ HP{ .val = 1.000000e+251, .off = -4.827911520448877980e+234 },
+ HP{ .val = 1.000000e+250, .off = 7.890316691678530146e+233 },
+ HP{ .val = 1.000000e+249, .off = 7.890316691678529484e+232 },
+ HP{ .val = 1.000000e+248, .off = -4.529828046727141859e+231 },
+ HP{ .val = 1.000000e+247, .off = 4.785280507077111924e+230 },
+ HP{ .val = 1.000000e+246, .off = -6.858605185178205305e+229 },
+ HP{ .val = 1.000000e+245, .off = -4.432795665958347728e+228 },
+ HP{ .val = 1.000000e+244, .off = -7.465057564983169531e+227 },
+ HP{ .val = 1.000000e+243, .off = -7.465057564983169741e+226 },
+ HP{ .val = 1.000000e+242, .off = -5.096102956370027445e+225 },
+ HP{ .val = 1.000000e+241, .off = -5.096102956370026952e+224 },
+ HP{ .val = 1.000000e+240, .off = -1.394611380411992474e+223 },
+ HP{ .val = 1.000000e+239, .off = 9.188208545617793960e+221 },
+ HP{ .val = 1.000000e+238, .off = -4.864759732872650359e+221 },
+ HP{ .val = 1.000000e+237, .off = 5.979453868566904629e+220 },
+ HP{ .val = 1.000000e+236, .off = -5.316601966265964857e+219 },
+ HP{ .val = 1.000000e+235, .off = -5.316601966265964701e+218 },
+ HP{ .val = 1.000000e+234, .off = -1.786584517880693123e+217 },
+ HP{ .val = 1.000000e+233, .off = 2.625937292600896716e+216 },
+ HP{ .val = 1.000000e+232, .off = -5.647541102052084079e+215 },
+ HP{ .val = 1.000000e+231, .off = -5.647541102052083888e+214 },
+ HP{ .val = 1.000000e+230, .off = -9.956644432600511943e+213 },
+ HP{ .val = 1.000000e+229, .off = 8.161138937705571862e+211 },
+ HP{ .val = 1.000000e+228, .off = 7.549087847752475275e+211 },
+ HP{ .val = 1.000000e+227, .off = -9.283347037202319948e+210 },
+ HP{ .val = 1.000000e+226, .off = 3.866992716668613820e+209 },
+ HP{ .val = 1.000000e+225, .off = 7.154577655136347262e+208 },
+ HP{ .val = 1.000000e+224, .off = 3.045096482051680688e+207 },
+ HP{ .val = 1.000000e+223, .off = -4.660180717482069567e+206 },
+ HP{ .val = 1.000000e+222, .off = -4.660180717482070101e+205 },
+ HP{ .val = 1.000000e+221, .off = -4.660180717482069544e+204 },
+ HP{ .val = 1.000000e+220, .off = 3.562757926310489022e+202 },
+ HP{ .val = 1.000000e+219, .off = 3.491561111451748149e+202 },
+ HP{ .val = 1.000000e+218, .off = -8.265758834125874135e+201 },
+ HP{ .val = 1.000000e+217, .off = 3.981449442517482365e+200 },
+ HP{ .val = 1.000000e+216, .off = -2.142154695804195936e+199 },
+ HP{ .val = 1.000000e+215, .off = 9.339603063548950188e+198 },
+ HP{ .val = 1.000000e+214, .off = 4.555537330485139746e+197 },
+ HP{ .val = 1.000000e+213, .off = 1.565496247320257804e+196 },
+ HP{ .val = 1.000000e+212, .off = 9.040598955232462036e+195 },
+ HP{ .val = 1.000000e+211, .off = 4.368659762787334780e+194 },
+ HP{ .val = 1.000000e+210, .off = 7.288621758065539072e+193 },
+ HP{ .val = 1.000000e+209, .off = -7.311188218325485628e+192 },
+ HP{ .val = 1.000000e+208, .off = 1.813693016918905189e+191 },
+ HP{ .val = 1.000000e+207, .off = -3.889357755108838992e+190 },
+ HP{ .val = 1.000000e+206, .off = -3.889357755108838992e+189 },
+ HP{ .val = 1.000000e+205, .off = -1.661603547285501360e+188 },
+ HP{ .val = 1.000000e+204, .off = 1.123089212493670643e+187 },
+ HP{ .val = 1.000000e+203, .off = 1.123089212493670643e+186 },
+ HP{ .val = 1.000000e+202, .off = 9.825254086803583029e+185 },
+ HP{ .val = 1.000000e+201, .off = -3.771878529305654999e+184 },
+ HP{ .val = 1.000000e+200, .off = 3.026687778748963675e+183 },
+ HP{ .val = 1.000000e+199, .off = -9.720624048853446693e+182 },
+ HP{ .val = 1.000000e+198, .off = -1.753554156601940139e+181 },
+ HP{ .val = 1.000000e+197, .off = 4.885670753607648963e+180 },
+ HP{ .val = 1.000000e+196, .off = 4.885670753607648963e+179 },
+ HP{ .val = 1.000000e+195, .off = 2.292223523057028076e+178 },
+ HP{ .val = 1.000000e+194, .off = 5.534032561245303825e+177 },
+ HP{ .val = 1.000000e+193, .off = -6.622751331960730683e+176 },
+ HP{ .val = 1.000000e+192, .off = -4.090088020876139692e+175 },
+ HP{ .val = 1.000000e+191, .off = -7.255917159731877552e+174 },
+ HP{ .val = 1.000000e+190, .off = -7.255917159731877992e+173 },
+ HP{ .val = 1.000000e+189, .off = -2.309309130269787104e+172 },
+ HP{ .val = 1.000000e+188, .off = -2.309309130269787019e+171 },
+ HP{ .val = 1.000000e+187, .off = 9.284303438781988230e+170 },
+ HP{ .val = 1.000000e+186, .off = 2.038295583124628364e+169 },
+ HP{ .val = 1.000000e+185, .off = 2.038295583124628532e+168 },
+ HP{ .val = 1.000000e+184, .off = -1.735666841696912925e+167 },
+ HP{ .val = 1.000000e+183, .off = 5.340512704843477241e+166 },
+ HP{ .val = 1.000000e+182, .off = -6.453119872723839321e+165 },
+ HP{ .val = 1.000000e+181, .off = 8.288920849235306587e+164 },
+ HP{ .val = 1.000000e+180, .off = -9.248546019891598293e+162 },
+ HP{ .val = 1.000000e+179, .off = 1.954450226518486016e+162 },
+ HP{ .val = 1.000000e+178, .off = -5.243811844750628197e+161 },
+ HP{ .val = 1.000000e+177, .off = -7.448980502074320639e+159 },
+ HP{ .val = 1.000000e+176, .off = -7.448980502074319858e+158 },
+ HP{ .val = 1.000000e+175, .off = 6.284654753766312753e+158 },
+ HP{ .val = 1.000000e+174, .off = -6.895756753684458388e+157 },
+ HP{ .val = 1.000000e+173, .off = -1.403918625579970616e+156 },
+ HP{ .val = 1.000000e+172, .off = -8.268716285710580522e+155 },
+ HP{ .val = 1.000000e+171, .off = 4.602779327034313170e+154 },
+ HP{ .val = 1.000000e+170, .off = -3.441905430931244940e+153 },
+ HP{ .val = 1.000000e+169, .off = 6.613950516525702884e+152 },
+ HP{ .val = 1.000000e+168, .off = 6.613950516525702652e+151 },
+ HP{ .val = 1.000000e+167, .off = -3.860899428741951187e+150 },
+ HP{ .val = 1.000000e+166, .off = 5.959272394946474605e+149 },
+ HP{ .val = 1.000000e+165, .off = 1.005101065481665103e+149 },
+ HP{ .val = 1.000000e+164, .off = -1.783349948587918355e+146 },
+ HP{ .val = 1.000000e+163, .off = 6.215006036188360099e+146 },
+ HP{ .val = 1.000000e+162, .off = 6.215006036188360099e+145 },
+ HP{ .val = 1.000000e+161, .off = -3.774589324822814903e+144 },
+ HP{ .val = 1.000000e+160, .off = -6.528407745068226929e+142 },
+ HP{ .val = 1.000000e+159, .off = 7.151530601283157561e+142 },
+ HP{ .val = 1.000000e+158, .off = 4.712664546348788765e+141 },
+ HP{ .val = 1.000000e+157, .off = 1.664081977680827856e+140 },
+ HP{ .val = 1.000000e+156, .off = 1.664081977680827750e+139 },
+ HP{ .val = 1.000000e+155, .off = -7.176231540910168265e+137 },
+ HP{ .val = 1.000000e+154, .off = -3.694754568805822650e+137 },
+ HP{ .val = 1.000000e+153, .off = 2.665969958768462622e+134 },
+ HP{ .val = 1.000000e+152, .off = -4.625108135904199522e+135 },
+ HP{ .val = 1.000000e+151, .off = -1.717753238721771919e+134 },
+ HP{ .val = 1.000000e+150, .off = 1.916440382756262433e+133 },
+ HP{ .val = 1.000000e+149, .off = -4.897672657515052040e+132 },
+ HP{ .val = 1.000000e+148, .off = -4.897672657515052198e+131 },
+ HP{ .val = 1.000000e+147, .off = 2.200361759434233991e+130 },
+ HP{ .val = 1.000000e+146, .off = 6.636633270027537273e+129 },
+ HP{ .val = 1.000000e+145, .off = 1.091293881785907977e+128 },
+ HP{ .val = 1.000000e+144, .off = -2.374543235865110597e+127 },
+ HP{ .val = 1.000000e+143, .off = -2.374543235865110537e+126 },
+ HP{ .val = 1.000000e+142, .off = -5.082228484029969099e+125 },
+ HP{ .val = 1.000000e+141, .off = -1.697621923823895943e+124 },
+ HP{ .val = 1.000000e+140, .off = -5.928380124081487212e+123 },
+ HP{ .val = 1.000000e+139, .off = -3.284156248920492522e+122 },
+ HP{ .val = 1.000000e+138, .off = -3.284156248920492706e+121 },
+ HP{ .val = 1.000000e+137, .off = -3.284156248920492476e+120 },
+ HP{ .val = 1.000000e+136, .off = -5.866406127007401066e+119 },
+ HP{ .val = 1.000000e+135, .off = 3.817030915818506056e+118 },
+ HP{ .val = 1.000000e+134, .off = 7.851796350329300951e+117 },
+ HP{ .val = 1.000000e+133, .off = -2.235117235947686077e+116 },
+ HP{ .val = 1.000000e+132, .off = 9.170432597638723691e+114 },
+ HP{ .val = 1.000000e+131, .off = 8.797444499042767883e+114 },
+ HP{ .val = 1.000000e+130, .off = -5.978307824605161274e+113 },
+ HP{ .val = 1.000000e+129, .off = 1.782556435814758516e+111 },
+ HP{ .val = 1.000000e+128, .off = -7.517448691651820362e+111 },
+ HP{ .val = 1.000000e+127, .off = 4.507089332150205498e+110 },
+ HP{ .val = 1.000000e+126, .off = 7.513223838100711695e+109 },
+ HP{ .val = 1.000000e+125, .off = 7.513223838100712113e+108 },
+ HP{ .val = 1.000000e+124, .off = 5.164681255326878494e+107 },
+ HP{ .val = 1.000000e+123, .off = 2.229003026859587122e+106 },
+ HP{ .val = 1.000000e+122, .off = -1.440594758724527399e+105 },
+ HP{ .val = 1.000000e+121, .off = -3.734093374714598783e+104 },
+ HP{ .val = 1.000000e+120, .off = 1.999653165260579757e+103 },
+ HP{ .val = 1.000000e+119, .off = 5.583244752745066693e+102 },
+ HP{ .val = 1.000000e+118, .off = 3.343500010567262234e+101 },
+ HP{ .val = 1.000000e+117, .off = -5.055542772599503556e+100 },
+ HP{ .val = 1.000000e+116, .off = -1.555941612946684331e+99 },
+ HP{ .val = 1.000000e+115, .off = -1.555941612946684331e+98 },
+ HP{ .val = 1.000000e+114, .off = -1.555941612946684293e+97 },
+ HP{ .val = 1.000000e+113, .off = -1.555941612946684246e+96 },
+ HP{ .val = 1.000000e+112, .off = 6.988006530736955847e+95 },
+ HP{ .val = 1.000000e+111, .off = 4.318022735835818244e+94 },
+ HP{ .val = 1.000000e+110, .off = -2.356936751417025578e+93 },
+ HP{ .val = 1.000000e+109, .off = 1.814912928116001926e+92 },
+ HP{ .val = 1.000000e+108, .off = -3.399899171300282744e+91 },
+ HP{ .val = 1.000000e+107, .off = 3.118615952970072913e+90 },
+ HP{ .val = 1.000000e+106, .off = -9.103599905036843605e+89 },
+ HP{ .val = 1.000000e+105, .off = 6.174169917471802325e+88 },
+ HP{ .val = 1.000000e+104, .off = -1.915675085734668657e+86 },
+ HP{ .val = 1.000000e+103, .off = -1.915675085734668864e+85 },
+ HP{ .val = 1.000000e+102, .off = 2.295048673475466221e+85 },
+ HP{ .val = 1.000000e+101, .off = 2.295048673475466135e+84 },
+ HP{ .val = 1.000000e+100, .off = -1.590289110975991792e+83 },
+ HP{ .val = 1.000000e+99, .off = 3.266383119588331155e+82 },
+ HP{ .val = 1.000000e+98, .off = 2.309629754856292029e+80 },
+ HP{ .val = 1.000000e+97, .off = -7.357587384771124533e+80 },
+ HP{ .val = 1.000000e+96, .off = -4.986165397190889509e+79 },
+ HP{ .val = 1.000000e+95, .off = -2.021887912715594741e+78 },
+ HP{ .val = 1.000000e+94, .off = -2.021887912715594638e+77 },
+ HP{ .val = 1.000000e+93, .off = -4.337729697461918675e+76 },
+ HP{ .val = 1.000000e+92, .off = -4.337729697461918997e+75 },
+ HP{ .val = 1.000000e+91, .off = -7.956232486128049702e+74 },
+ HP{ .val = 1.000000e+90, .off = 3.351588728453609882e+73 },
+ HP{ .val = 1.000000e+89, .off = 5.246334248081951113e+71 },
+ HP{ .val = 1.000000e+88, .off = 4.058327554364963672e+71 },
+ HP{ .val = 1.000000e+87, .off = 4.058327554364963918e+70 },
+ HP{ .val = 1.000000e+86, .off = -1.463069523067487266e+69 },
+ HP{ .val = 1.000000e+85, .off = -1.463069523067487314e+68 },
+ HP{ .val = 1.000000e+84, .off = -5.776660989811589441e+67 },
+ HP{ .val = 1.000000e+83, .off = -3.080666323096525761e+66 },
+ HP{ .val = 1.000000e+82, .off = 3.659320343691134468e+65 },
+ HP{ .val = 1.000000e+81, .off = 7.871812010433421235e+64 },
+ HP{ .val = 1.000000e+80, .off = -2.660986470836727449e+61 },
+ HP{ .val = 1.000000e+79, .off = 3.264399249934044627e+62 },
+ HP{ .val = 1.000000e+78, .off = -8.493621433689703070e+60 },
+ HP{ .val = 1.000000e+77, .off = 1.721738727445414063e+60 },
+ HP{ .val = 1.000000e+76, .off = -4.706013449590547218e+59 },
+ HP{ .val = 1.000000e+75, .off = 7.346021882351880518e+58 },
+ HP{ .val = 1.000000e+74, .off = 4.835181188197207515e+57 },
+ HP{ .val = 1.000000e+73, .off = 1.696630320503867482e+56 },
+ HP{ .val = 1.000000e+72, .off = 5.619818905120542959e+55 },
+ HP{ .val = 1.000000e+71, .off = -4.188152556421145598e+54 },
+ HP{ .val = 1.000000e+70, .off = -7.253143638152923145e+53 },
+ HP{ .val = 1.000000e+69, .off = -7.253143638152923145e+52 },
+ HP{ .val = 1.000000e+68, .off = 4.719477774861832896e+51 },
+ HP{ .val = 1.000000e+67, .off = 1.726322421608144052e+50 },
+ HP{ .val = 1.000000e+66, .off = 5.467766613175255107e+49 },
+ HP{ .val = 1.000000e+65, .off = 7.909613737163661911e+47 },
+ HP{ .val = 1.000000e+64, .off = -2.132041900945439564e+47 },
+ HP{ .val = 1.000000e+63, .off = -5.785795994272697265e+46 },
+ HP{ .val = 1.000000e+62, .off = -3.502199685943161329e+45 },
+ HP{ .val = 1.000000e+61, .off = 5.061286470292598274e+44 },
+ HP{ .val = 1.000000e+60, .off = 5.061286470292598472e+43 },
+ HP{ .val = 1.000000e+59, .off = 2.831211950439536034e+42 },
+ HP{ .val = 1.000000e+58, .off = 5.618805100255863927e+41 },
+ HP{ .val = 1.000000e+57, .off = -4.834669211555366251e+40 },
+ HP{ .val = 1.000000e+56, .off = -9.190283508143378583e+39 },
+ HP{ .val = 1.000000e+55, .off = -1.023506702040855158e+38 },
+ HP{ .val = 1.000000e+54, .off = -7.829154040459624616e+37 },
+ HP{ .val = 1.000000e+53, .off = 6.779051325638372659e+35 },
+ HP{ .val = 1.000000e+52, .off = 6.779051325638372290e+34 },
+ HP{ .val = 1.000000e+51, .off = 6.779051325638371598e+33 },
+ HP{ .val = 1.000000e+50, .off = -7.629769841091887392e+33 },
+ HP{ .val = 1.000000e+49, .off = 5.350972305245182400e+32 },
+ HP{ .val = 1.000000e+48, .off = -4.384584304507619764e+31 },
+ HP{ .val = 1.000000e+47, .off = -4.384584304507619876e+30 },
+ HP{ .val = 1.000000e+46, .off = 6.860180964052978705e+28 },
+ HP{ .val = 1.000000e+45, .off = 7.024271097546444878e+28 },
+ HP{ .val = 1.000000e+44, .off = -8.821361405306422641e+27 },
+ HP{ .val = 1.000000e+43, .off = -1.393721169594140991e+26 },
+ HP{ .val = 1.000000e+42, .off = -4.488571267807591679e+25 },
+ HP{ .val = 1.000000e+41, .off = -6.200086450407783195e+23 },
+ HP{ .val = 1.000000e+40, .off = -3.037860284270036669e+23 },
+ HP{ .val = 1.000000e+39, .off = 6.029083362839682141e+22 },
+ HP{ .val = 1.000000e+38, .off = 2.251190176543965970e+21 },
+ HP{ .val = 1.000000e+37, .off = 4.612373417978788577e+20 },
+ HP{ .val = 1.000000e+36, .off = -4.242063737401796198e+19 },
+ HP{ .val = 1.000000e+35, .off = 3.136633892082024448e+18 },
+ HP{ .val = 1.000000e+34, .off = 5.442476901295718400e+17 },
+ HP{ .val = 1.000000e+33, .off = 5.442476901295718400e+16 },
+ HP{ .val = 1.000000e+32, .off = -5.366162204393472000e+15 },
+ HP{ .val = 1.000000e+31, .off = 3.641037050347520000e+14 },
+ HP{ .val = 1.000000e+30, .off = -1.988462483865600000e+13 },
+ HP{ .val = 1.000000e+29, .off = 8.566849142784000000e+12 },
+ HP{ .val = 1.000000e+28, .off = 4.168802631680000000e+11 },
+ HP{ .val = 1.000000e+27, .off = -1.328755507200000000e+10 },
+ HP{ .val = 1.000000e+26, .off = -4.764729344000000000e+09 },
+ HP{ .val = 1.000000e+25, .off = -9.059696640000000000e+08 },
+ HP{ .val = 1.000000e+24, .off = 1.677721600000000000e+07 },
+ HP{ .val = 1.000000e+23, .off = 8.388608000000000000e+06 },
+ HP{ .val = 1.000000e+22, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+21, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+20, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+19, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+18, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+17, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+16, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+15, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+14, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+13, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+12, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+11, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+10, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+09, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+08, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+07, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+06, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+05, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+04, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+03, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+02, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+01, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e+00, .off = 0.000000000000000000e+00 },
+ HP{ .val = 1.000000e-01, .off = -5.551115123125783010e-18 },
+ HP{ .val = 1.000000e-02, .off = -2.081668171172168436e-19 },
+ HP{ .val = 1.000000e-03, .off = -2.081668171172168557e-20 },
+ HP{ .val = 1.000000e-04, .off = -4.792173602385929943e-21 },
+ HP{ .val = 1.000000e-05, .off = -8.180305391403130547e-22 },
+ HP{ .val = 1.000000e-06, .off = 4.525188817411374069e-23 },
+ HP{ .val = 1.000000e-07, .off = 4.525188817411373922e-24 },
+ HP{ .val = 1.000000e-08, .off = -2.092256083012847109e-25 },
+ HP{ .val = 1.000000e-09, .off = -6.228159145777985254e-26 },
+ HP{ .val = 1.000000e-10, .off = -3.643219731549774344e-27 },
+ HP{ .val = 1.000000e-11, .off = 6.050303071806019080e-28 },
+ HP{ .val = 1.000000e-12, .off = 2.011335237074438524e-29 },
+ HP{ .val = 1.000000e-13, .off = -3.037374556340037101e-30 },
+ HP{ .val = 1.000000e-14, .off = 1.180690645440101289e-32 },
+ HP{ .val = 1.000000e-15, .off = -7.770539987666107583e-32 },
+ HP{ .val = 1.000000e-16, .off = 2.090221327596539779e-33 },
+ HP{ .val = 1.000000e-17, .off = -7.154242405462192144e-34 },
+ HP{ .val = 1.000000e-18, .off = -7.154242405462192572e-35 },
+ HP{ .val = 1.000000e-19, .off = 2.475407316473986894e-36 },
+ HP{ .val = 1.000000e-20, .off = 5.484672854579042914e-37 },
+ HP{ .val = 1.000000e-21, .off = 9.246254777210362522e-38 },
+ HP{ .val = 1.000000e-22, .off = -4.859677432657087182e-39 },
+ HP{ .val = 1.000000e-23, .off = 3.956530198510069291e-40 },
+ HP{ .val = 1.000000e-24, .off = 7.629950044829717753e-41 },
+ HP{ .val = 1.000000e-25, .off = -3.849486974919183692e-42 },
+ HP{ .val = 1.000000e-26, .off = -3.849486974919184170e-43 },
+ HP{ .val = 1.000000e-27, .off = -3.849486974919184070e-44 },
+ HP{ .val = 1.000000e-28, .off = 2.876745653839937870e-45 },
+ HP{ .val = 1.000000e-29, .off = 5.679342582489572168e-46 },
+ HP{ .val = 1.000000e-30, .off = -8.333642060758598930e-47 },
+ HP{ .val = 1.000000e-31, .off = -8.333642060758597958e-48 },
+ HP{ .val = 1.000000e-32, .off = -5.596730997624190224e-49 },
+ HP{ .val = 1.000000e-33, .off = -5.596730997624190604e-50 },
+ HP{ .val = 1.000000e-34, .off = 7.232539610818348498e-51 },
+ HP{ .val = 1.000000e-35, .off = -7.857545194582380514e-53 },
+ HP{ .val = 1.000000e-36, .off = 5.896157255772251528e-53 },
+ HP{ .val = 1.000000e-37, .off = -6.632427322784915796e-54 },
+ HP{ .val = 1.000000e-38, .off = 3.808059826012723592e-55 },
+ HP{ .val = 1.000000e-39, .off = 7.070712060011985131e-56 },
+ HP{ .val = 1.000000e-40, .off = 7.070712060011985584e-57 },
+ HP{ .val = 1.000000e-41, .off = -5.761291134237854167e-59 },
+ HP{ .val = 1.000000e-42, .off = -3.762312935688689794e-59 },
+ HP{ .val = 1.000000e-43, .off = -7.745042713519821150e-60 },
+ HP{ .val = 1.000000e-44, .off = 4.700987842202462817e-61 },
+ HP{ .val = 1.000000e-45, .off = 1.589480203271891964e-62 },
+ HP{ .val = 1.000000e-46, .off = -2.299904345391321765e-63 },
+ HP{ .val = 1.000000e-47, .off = 2.561826340437695261e-64 },
+ HP{ .val = 1.000000e-48, .off = 2.561826340437695345e-65 },
+ HP{ .val = 1.000000e-49, .off = 6.360053438741614633e-66 },
+ HP{ .val = 1.000000e-50, .off = -7.616223705782342295e-68 },
+ HP{ .val = 1.000000e-51, .off = -7.616223705782343324e-69 },
+ HP{ .val = 1.000000e-52, .off = -7.616223705782342295e-70 },
+ HP{ .val = 1.000000e-53, .off = -3.079876214757872338e-70 },
+ HP{ .val = 1.000000e-54, .off = -3.079876214757872821e-71 },
+ HP{ .val = 1.000000e-55, .off = 5.423954167728123147e-73 },
+ HP{ .val = 1.000000e-56, .off = -3.985444122640543680e-73 },
+ HP{ .val = 1.000000e-57, .off = 4.504255013759498850e-74 },
+ HP{ .val = 1.000000e-58, .off = -2.570494266573869991e-75 },
+ HP{ .val = 1.000000e-59, .off = -2.570494266573869930e-76 },
+ HP{ .val = 1.000000e-60, .off = 2.956653608686574324e-77 },
+ HP{ .val = 1.000000e-61, .off = -3.952281235388981376e-78 },
+ HP{ .val = 1.000000e-62, .off = -3.952281235388981376e-79 },
+ HP{ .val = 1.000000e-63, .off = -6.651083908855995172e-80 },
+ HP{ .val = 1.000000e-64, .off = 3.469426116645307030e-81 },
+ HP{ .val = 1.000000e-65, .off = 7.686305293937516319e-82 },
+ HP{ .val = 1.000000e-66, .off = 2.415206322322254927e-83 },
+ HP{ .val = 1.000000e-67, .off = 5.709643179581793251e-84 },
+ HP{ .val = 1.000000e-68, .off = -6.644495035141475923e-85 },
+ HP{ .val = 1.000000e-69, .off = 3.650620143794581913e-86 },
+ HP{ .val = 1.000000e-70, .off = 4.333966503770636492e-88 },
+ HP{ .val = 1.000000e-71, .off = 8.476455383920859113e-88 },
+ HP{ .val = 1.000000e-72, .off = 3.449543675455986564e-89 },
+ HP{ .val = 1.000000e-73, .off = 3.077238576654418974e-91 },
+ HP{ .val = 1.000000e-74, .off = 4.234998629903623140e-91 },
+ HP{ .val = 1.000000e-75, .off = 4.234998629903623412e-92 },
+ HP{ .val = 1.000000e-76, .off = 7.303182045714702338e-93 },
+ HP{ .val = 1.000000e-77, .off = 7.303182045714701699e-94 },
+ HP{ .val = 1.000000e-78, .off = 1.121271649074855759e-96 },
+ HP{ .val = 1.000000e-79, .off = 1.121271649074855863e-97 },
+ HP{ .val = 1.000000e-80, .off = 3.857468248661243988e-97 },
+ HP{ .val = 1.000000e-81, .off = 3.857468248661244248e-98 },
+ HP{ .val = 1.000000e-82, .off = 3.857468248661244410e-99 },
+ HP{ .val = 1.000000e-83, .off = -3.457651055545315679e-100 },
+ HP{ .val = 1.000000e-84, .off = -3.457651055545315933e-101 },
+ HP{ .val = 1.000000e-85, .off = 2.257285900866059216e-102 },
+ HP{ .val = 1.000000e-86, .off = -8.458220892405268345e-103 },
+ HP{ .val = 1.000000e-87, .off = -1.761029146610688867e-104 },
+ HP{ .val = 1.000000e-88, .off = 6.610460535632536565e-105 },
+ HP{ .val = 1.000000e-89, .off = -3.853901567171494935e-106 },
+ HP{ .val = 1.000000e-90, .off = 5.062493089968513723e-108 },
+ HP{ .val = 1.000000e-91, .off = -2.218844988608365240e-108 },
+ HP{ .val = 1.000000e-92, .off = 1.187522883398155383e-109 },
+ HP{ .val = 1.000000e-93, .off = 9.703442563414457296e-110 },
+ HP{ .val = 1.000000e-94, .off = 4.380992763404268896e-111 },
+ HP{ .val = 1.000000e-95, .off = 1.054461638397900823e-112 },
+ HP{ .val = 1.000000e-96, .off = 9.370789450913819736e-113 },
+ HP{ .val = 1.000000e-97, .off = -3.623472756142303998e-114 },
+ HP{ .val = 1.000000e-98, .off = 6.122223899149788839e-115 },
+ HP{ .val = 1.000000e-99, .off = -1.999189980260288281e-116 },
+ HP{ .val = 1.000000e-100, .off = -1.999189980260288281e-117 },
+ HP{ .val = 1.000000e-101, .off = -5.171617276904849634e-118 },
+ HP{ .val = 1.000000e-102, .off = 6.724985085512256320e-119 },
+ HP{ .val = 1.000000e-103, .off = 4.246526260008692213e-120 },
+ HP{ .val = 1.000000e-104, .off = 7.344599791888147003e-121 },
+ HP{ .val = 1.000000e-105, .off = 3.472007877038828407e-122 },
+ HP{ .val = 1.000000e-106, .off = 5.892377823819652194e-123 },
+ HP{ .val = 1.000000e-107, .off = -1.585470431324073925e-125 },
+ HP{ .val = 1.000000e-108, .off = -3.940375084977444795e-125 },
+ HP{ .val = 1.000000e-109, .off = 7.869099673288519908e-127 },
+ HP{ .val = 1.000000e-110, .off = -5.122196348054018581e-127 },
+ HP{ .val = 1.000000e-111, .off = -8.815387795168313713e-128 },
+ HP{ .val = 1.000000e-112, .off = 5.034080131510290214e-129 },
+ HP{ .val = 1.000000e-113, .off = 2.148774313452247863e-130 },
+ HP{ .val = 1.000000e-114, .off = -5.064490231692858416e-131 },
+ HP{ .val = 1.000000e-115, .off = -5.064490231692858166e-132 },
+ HP{ .val = 1.000000e-116, .off = 5.708726942017560559e-134 },
+ HP{ .val = 1.000000e-117, .off = -2.951229134482377772e-134 },
+ HP{ .val = 1.000000e-118, .off = 1.451398151372789513e-135 },
+ HP{ .val = 1.000000e-119, .off = -1.300243902286690040e-136 },
+ HP{ .val = 1.000000e-120, .off = 2.139308664787659449e-137 },
+ HP{ .val = 1.000000e-121, .off = 2.139308664787659329e-138 },
+ HP{ .val = 1.000000e-122, .off = -5.922142664292847471e-139 },
+ HP{ .val = 1.000000e-123, .off = -5.922142664292846912e-140 },
+ HP{ .val = 1.000000e-124, .off = 6.673875037395443799e-141 },
+ HP{ .val = 1.000000e-125, .off = -1.198636026159737932e-142 },
+ HP{ .val = 1.000000e-126, .off = 5.361789860136246995e-143 },
+ HP{ .val = 1.000000e-127, .off = -2.838742497733733936e-144 },
+ HP{ .val = 1.000000e-128, .off = -5.401408859568103261e-145 },
+ HP{ .val = 1.000000e-129, .off = 7.411922949603743011e-146 },
+ HP{ .val = 1.000000e-130, .off = -8.604741811861064385e-147 },
+ HP{ .val = 1.000000e-131, .off = 1.405673664054439890e-148 },
+ HP{ .val = 1.000000e-132, .off = 1.405673664054439933e-149 },
+ HP{ .val = 1.000000e-133, .off = -6.414963426504548053e-150 },
+ HP{ .val = 1.000000e-134, .off = -3.971014335704864578e-151 },
+ HP{ .val = 1.000000e-135, .off = -3.971014335704864748e-152 },
+ HP{ .val = 1.000000e-136, .off = -1.523438813303585576e-154 },
+ HP{ .val = 1.000000e-137, .off = 2.234325152653707766e-154 },
+ HP{ .val = 1.000000e-138, .off = -6.715683724786540160e-155 },
+ HP{ .val = 1.000000e-139, .off = -2.986513359186437306e-156 },
+ HP{ .val = 1.000000e-140, .off = 1.674949597813692102e-157 },
+ HP{ .val = 1.000000e-141, .off = -4.151879098436469092e-158 },
+ HP{ .val = 1.000000e-142, .off = -4.151879098436469295e-159 },
+ HP{ .val = 1.000000e-143, .off = 4.952540739454407825e-160 },
+ HP{ .val = 1.000000e-144, .off = 4.952540739454407667e-161 },
+ HP{ .val = 1.000000e-145, .off = 8.508954738630531443e-162 },
+ HP{ .val = 1.000000e-146, .off = -2.604839008794855481e-163 },
+ HP{ .val = 1.000000e-147, .off = 2.952057864917838382e-164 },
+ HP{ .val = 1.000000e-148, .off = 6.425118410988271757e-165 },
+ HP{ .val = 1.000000e-149, .off = 2.083792728400229858e-166 },
+ HP{ .val = 1.000000e-150, .off = -6.295358232172964237e-168 },
+ HP{ .val = 1.000000e-151, .off = 6.153785555826519421e-168 },
+ HP{ .val = 1.000000e-152, .off = -6.564942029880634994e-169 },
+ HP{ .val = 1.000000e-153, .off = -3.915207116191644540e-170 },
+ HP{ .val = 1.000000e-154, .off = 2.709130168030831503e-171 },
+ HP{ .val = 1.000000e-155, .off = -1.431080634608215966e-172 },
+ HP{ .val = 1.000000e-156, .off = -4.018712386257620994e-173 },
+ HP{ .val = 1.000000e-157, .off = 5.684906682427646782e-174 },
+ HP{ .val = 1.000000e-158, .off = -6.444617153428937489e-175 },
+ HP{ .val = 1.000000e-159, .off = 1.136335243981427681e-176 },
+ HP{ .val = 1.000000e-160, .off = 1.136335243981427725e-177 },
+ HP{ .val = 1.000000e-161, .off = -2.812077463003137395e-178 },
+ HP{ .val = 1.000000e-162, .off = 4.591196362592922204e-179 },
+ HP{ .val = 1.000000e-163, .off = 7.675893789924613703e-180 },
+ HP{ .val = 1.000000e-164, .off = 3.820022005759999543e-181 },
+ HP{ .val = 1.000000e-165, .off = -9.998177244457686588e-183 },
+ HP{ .val = 1.000000e-166, .off = -4.012217555824373639e-183 },
+ HP{ .val = 1.000000e-167, .off = -2.467177666011174334e-185 },
+ HP{ .val = 1.000000e-168, .off = -4.953592503130188139e-185 },
+ HP{ .val = 1.000000e-169, .off = -2.011795792799518887e-186 },
+ HP{ .val = 1.000000e-170, .off = 1.665450095113817423e-187 },
+ HP{ .val = 1.000000e-171, .off = 1.665450095113817487e-188 },
+ HP{ .val = 1.000000e-172, .off = -4.080246604750770577e-189 },
+ HP{ .val = 1.000000e-173, .off = -4.080246604750770677e-190 },
+ HP{ .val = 1.000000e-174, .off = 4.085789420184387951e-192 },
+ HP{ .val = 1.000000e-175, .off = 4.085789420184388146e-193 },
+ HP{ .val = 1.000000e-176, .off = 4.085789420184388146e-194 },
+ HP{ .val = 1.000000e-177, .off = 4.792197640035244894e-194 },
+ HP{ .val = 1.000000e-178, .off = 4.792197640035244742e-195 },
+ HP{ .val = 1.000000e-179, .off = -2.057206575616014662e-196 },
+ HP{ .val = 1.000000e-180, .off = -2.057206575616014662e-197 },
+ HP{ .val = 1.000000e-181, .off = -4.732755097354788053e-198 },
+ HP{ .val = 1.000000e-182, .off = -4.732755097354787867e-199 },
+ HP{ .val = 1.000000e-183, .off = -5.522105321379546765e-201 },
+ HP{ .val = 1.000000e-184, .off = -5.777891238658996019e-201 },
+ HP{ .val = 1.000000e-185, .off = 7.542096444923057046e-203 },
+ HP{ .val = 1.000000e-186, .off = 8.919335748431433483e-203 },
+ HP{ .val = 1.000000e-187, .off = -1.287071881492476028e-204 },
+ HP{ .val = 1.000000e-188, .off = 5.091932887209967018e-205 },
+ HP{ .val = 1.000000e-189, .off = -6.868701054107114024e-206 },
+ HP{ .val = 1.000000e-190, .off = -1.885103578558330118e-207 },
+ HP{ .val = 1.000000e-191, .off = -1.885103578558330205e-208 },
+ HP{ .val = 1.000000e-192, .off = -9.671974634103305058e-209 },
+ HP{ .val = 1.000000e-193, .off = -4.805180224387695640e-210 },
+ HP{ .val = 1.000000e-194, .off = -1.763433718315439838e-211 },
+ HP{ .val = 1.000000e-195, .off = -9.367799983496079132e-212 },
+ HP{ .val = 1.000000e-196, .off = -4.615071067758179837e-213 },
+ HP{ .val = 1.000000e-197, .off = 1.325840076914194777e-214 },
+ HP{ .val = 1.000000e-198, .off = 8.751979007754662425e-215 },
+ HP{ .val = 1.000000e-199, .off = 1.789973760091724198e-216 },
+ HP{ .val = 1.000000e-200, .off = 1.789973760091724077e-217 },
+ HP{ .val = 1.000000e-201, .off = 5.416018159916171171e-218 },
+ HP{ .val = 1.000000e-202, .off = -3.649092839644947067e-219 },
+ HP{ .val = 1.000000e-203, .off = -3.649092839644947067e-220 },
+ HP{ .val = 1.000000e-204, .off = -1.080338554413850956e-222 },
+ HP{ .val = 1.000000e-205, .off = -1.080338554413850841e-223 },
+ HP{ .val = 1.000000e-206, .off = -2.874486186850417807e-223 },
+ HP{ .val = 1.000000e-207, .off = 7.499710055933455072e-224 },
+ HP{ .val = 1.000000e-208, .off = -9.790617015372999087e-225 },
+ HP{ .val = 1.000000e-209, .off = -4.387389805589732612e-226 },
+ HP{ .val = 1.000000e-210, .off = -4.387389805589732612e-227 },
+ HP{ .val = 1.000000e-211, .off = -8.608661063232909897e-228 },
+ HP{ .val = 1.000000e-212, .off = 4.582811616902018972e-229 },
+ HP{ .val = 1.000000e-213, .off = 4.582811616902019155e-230 },
+ HP{ .val = 1.000000e-214, .off = 8.705146829444184930e-231 },
+ HP{ .val = 1.000000e-215, .off = -4.177150709750081830e-232 },
+ HP{ .val = 1.000000e-216, .off = -4.177150709750082366e-233 },
+ HP{ .val = 1.000000e-217, .off = -8.202868690748290237e-234 },
+ HP{ .val = 1.000000e-218, .off = -3.170721214500530119e-235 },
+ HP{ .val = 1.000000e-219, .off = -3.170721214500529857e-236 },
+ HP{ .val = 1.000000e-220, .off = 7.606440013180328441e-238 },
+ HP{ .val = 1.000000e-221, .off = -1.696459258568569049e-238 },
+ HP{ .val = 1.000000e-222, .off = -4.767838333426821244e-239 },
+ HP{ .val = 1.000000e-223, .off = 2.910609353718809138e-240 },
+ HP{ .val = 1.000000e-224, .off = -1.888420450747209784e-241 },
+ HP{ .val = 1.000000e-225, .off = 4.110366804835314035e-242 },
+ HP{ .val = 1.000000e-226, .off = 7.859608839574391006e-243 },
+ HP{ .val = 1.000000e-227, .off = 5.516332567862468419e-244 },
+ HP{ .val = 1.000000e-228, .off = -3.270953451057244613e-245 },
+ HP{ .val = 1.000000e-229, .off = -6.932322625607124670e-246 },
+ HP{ .val = 1.000000e-230, .off = -4.643966891513449762e-247 },
+ HP{ .val = 1.000000e-231, .off = 1.076922443720738305e-248 },
+ HP{ .val = 1.000000e-232, .off = -2.498633390800628939e-249 },
+ HP{ .val = 1.000000e-233, .off = 4.205533798926934891e-250 },
+ HP{ .val = 1.000000e-234, .off = 4.205533798926934891e-251 },
+ HP{ .val = 1.000000e-235, .off = 4.205533798926934697e-252 },
+ HP{ .val = 1.000000e-236, .off = -4.523850562697497656e-253 },
+ HP{ .val = 1.000000e-237, .off = 9.320146633177728298e-255 },
+ HP{ .val = 1.000000e-238, .off = 9.320146633177728062e-256 },
+ HP{ .val = 1.000000e-239, .off = -7.592774752331086440e-256 },
+ HP{ .val = 1.000000e-240, .off = 3.063212017229987840e-257 },
+ HP{ .val = 1.000000e-241, .off = 3.063212017229987562e-258 },
+ HP{ .val = 1.000000e-242, .off = 3.063212017229987562e-259 },
+ HP{ .val = 1.000000e-243, .off = 4.616527473176159842e-261 },
+ HP{ .val = 1.000000e-244, .off = 6.965550922098544975e-261 },
+ HP{ .val = 1.000000e-245, .off = 6.965550922098544749e-262 },
+ HP{ .val = 1.000000e-246, .off = 4.424965697574744679e-263 },
+ HP{ .val = 1.000000e-247, .off = -1.926497363734756420e-264 },
+ HP{ .val = 1.000000e-248, .off = 2.043167049583681740e-265 },
+ HP{ .val = 1.000000e-249, .off = -5.399953725388390154e-266 },
+ HP{ .val = 1.000000e-250, .off = -5.399953725388389982e-267 },
+ HP{ .val = 1.000000e-251, .off = -1.523328321757102663e-268 },
+ HP{ .val = 1.000000e-252, .off = 5.745344310051561161e-269 },
+ HP{ .val = 1.000000e-253, .off = -6.369110076296211879e-270 },
+ HP{ .val = 1.000000e-254, .off = 8.773957906638504842e-271 },
+ HP{ .val = 1.000000e-255, .off = -6.904595826956931908e-273 },
+ HP{ .val = 1.000000e-256, .off = 2.267170882721243669e-273 },
+ HP{ .val = 1.000000e-257, .off = 2.267170882721243669e-274 },
+ HP{ .val = 1.000000e-258, .off = 4.577819683828225398e-275 },
+ HP{ .val = 1.000000e-259, .off = -6.975424321706684210e-276 },
+ HP{ .val = 1.000000e-260, .off = 3.855741933482293648e-277 },
+ HP{ .val = 1.000000e-261, .off = 1.599248963651256552e-278 },
+ HP{ .val = 1.000000e-262, .off = -1.221367248637539543e-279 },
+ HP{ .val = 1.000000e-263, .off = -1.221367248637539494e-280 },
+ HP{ .val = 1.000000e-264, .off = -1.221367248637539647e-281 },
+ HP{ .val = 1.000000e-265, .off = 1.533140771175737943e-282 },
+ HP{ .val = 1.000000e-266, .off = 1.533140771175737895e-283 },
+ HP{ .val = 1.000000e-267, .off = 1.533140771175738074e-284 },
+ HP{ .val = 1.000000e-268, .off = 4.223090009274641634e-285 },
+ HP{ .val = 1.000000e-269, .off = 4.223090009274641634e-286 },
+ HP{ .val = 1.000000e-270, .off = -4.183001359784432924e-287 },
+ HP{ .val = 1.000000e-271, .off = 3.697709298708449474e-288 },
+ HP{ .val = 1.000000e-272, .off = 6.981338739747150474e-289 },
+ HP{ .val = 1.000000e-273, .off = -9.436808465446354751e-290 },
+ HP{ .val = 1.000000e-274, .off = 3.389869038611071740e-291 },
+ HP{ .val = 1.000000e-275, .off = 6.596538414625427829e-292 },
+ HP{ .val = 1.000000e-276, .off = -9.436808465446354618e-293 },
+ HP{ .val = 1.000000e-277, .off = 3.089243784609725523e-294 },
+ HP{ .val = 1.000000e-278, .off = 6.220756847123745836e-295 },
+ HP{ .val = 1.000000e-279, .off = -5.522417137303829470e-296 },
+ HP{ .val = 1.000000e-280, .off = 4.263561183052483059e-297 },
+ HP{ .val = 1.000000e-281, .off = -1.852675267170212272e-298 },
+ HP{ .val = 1.000000e-282, .off = -1.852675267170212378e-299 },
+ HP{ .val = 1.000000e-283, .off = 5.314789322934508480e-300 },
+ HP{ .val = 1.000000e-284, .off = -3.644541414696392675e-301 },
+ HP{ .val = 1.000000e-285, .off = -7.377595888709267777e-302 },
+ HP{ .val = 1.000000e-286, .off = -5.044436842451220838e-303 },
+ HP{ .val = 1.000000e-287, .off = -2.127988034628661760e-304 },
+ HP{ .val = 1.000000e-288, .off = -5.773549044406860911e-305 },
+ HP{ .val = 1.000000e-289, .off = -1.216597782184112068e-306 },
+ HP{ .val = 1.000000e-290, .off = -6.912786859962547924e-307 },
+ HP{ .val = 1.000000e-291, .off = 3.767567660872018813e-308 },
};
diff --git a/std/fmt/index.zig b/std/fmt/index.zig
index cfdd70e95b..f4f9efee37 100644
--- a/std/fmt/index.zig
+++ b/std/fmt/index.zig
@@ -4,36 +4,26 @@ const debug = std.debug;
const assert = debug.assert;
const mem = std.mem;
const builtin = @import("builtin");
-const errol3 = @import("errol/index.zig").errol3;
+const errol = @import("errol/index.zig");
+const lossyCast = std.math.lossyCast;
const max_int_digits = 65;
-const State = enum { // TODO put inside format function and make sure the name and debug info is correct
- Start,
- OpenBrace,
- CloseBrace,
- Integer,
- IntegerWidth,
- Float,
- FloatWidth,
- Character,
- Buf,
- BufWidth,
-};
-
/// Renders fmt string with args, calling output with slices of bytes.
/// If `output` returns an error, the error is returned from `format` and
/// `output` is not called again.
-pub fn format(context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8) Errors!void,
- comptime fmt: []const u8, args: ...) Errors!void
-{
+pub fn format(context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void, comptime fmt: []const u8, args: ...) Errors!void {
+ const State = enum {
+ Start,
+ OpenBrace,
+ CloseBrace,
+ FormatString,
+ Pointer,
+ };
+
comptime var start_index = 0;
comptime var state = State.Start;
comptime var next_arg = 0;
- comptime var radix = 0;
- comptime var uppercase = false;
- comptime var width = 0;
- comptime var width_start = 0;
inline for (fmt) |c, i| {
switch (state) {
@@ -42,8 +32,10 @@ pub fn format(context: var, comptime Errors: type, output: fn(@typeOf(context),
if (start_index < i) {
try output(context, fmt[start_index..i]);
}
+ start_index = i;
state = State.OpenBrace;
},
+
'}' => {
if (start_index < i) {
try output(context, fmt[start_index..i]);
@@ -58,48 +50,15 @@ pub fn format(context: var, comptime Errors: type, output: fn(@typeOf(context),
start_index = i;
},
'}' => {
- try formatValue(args[next_arg], context, Errors, output);
+ try formatType(args[next_arg], fmt[0..0], context, Errors, output);
next_arg += 1;
state = State.Start;
start_index = i + 1;
},
- 'd' => {
- radix = 10;
- uppercase = false;
- width = 0;
- state = State.Integer;
+ '*' => state = State.Pointer,
+ else => {
+ state = State.FormatString;
},
- 'x' => {
- radix = 16;
- uppercase = false;
- width = 0;
- state = State.Integer;
- },
- 'X' => {
- radix = 16;
- uppercase = true;
- width = 0;
- state = State.Integer;
- },
- 'c' => {
- state = State.Character;
- },
- 's' => {
- state = State.Buf;
- },'.' => {
- state = State.Float;
- },
- else => @compileError("Unknown format character: " ++ []u8{c}),
- },
- State.Buf => switch (c) {
- '}' => {
- return output(context, args[next_arg]);
- },
- '0' ... '9' => {
- width_start = i;
- state = State.BufWidth;
- },
- else => @compileError("Unexpected character in format string: " ++ []u8{c}),
},
State.CloseBrace => switch (c) {
'}' => {
@@ -108,73 +67,26 @@ pub fn format(context: var, comptime Errors: type, output: fn(@typeOf(context),
},
else => @compileError("Single '}' encountered in format string"),
},
- State.Integer => switch (c) {
+ State.FormatString => switch (c) {
'}' => {
- try formatInt(args[next_arg], radix, uppercase, width, context, Errors, output);
+ const s = start_index + 1;
+ try formatType(args[next_arg], fmt[s..i], context, Errors, output);
next_arg += 1;
state = State.Start;
start_index = i + 1;
},
- '0' ... '9' => {
- width_start = i;
- state = State.IntegerWidth;
- },
- else => @compileError("Unexpected character in format string: " ++ []u8{c}),
+ else => {},
},
- State.IntegerWidth => switch (c) {
+ State.Pointer => switch (c) {
'}' => {
- width = comptime (parseUnsigned(usize, fmt[width_start..i], 10) catch unreachable);
- try formatInt(args[next_arg], radix, uppercase, width, context, Errors, output);
+ try output(context, @typeName(@typeOf(args[next_arg]).Child));
+ try output(context, "@");
+ try formatInt(@ptrToInt(args[next_arg]), 16, false, 0, context, Errors, output);
next_arg += 1;
state = State.Start;
start_index = i + 1;
},
- '0' ... '9' => {},
- else => @compileError("Unexpected character in format string: " ++ []u8{c}),
- },
- State.Float => switch (c) {
- '}' => {
- try formatFloatDecimal(args[next_arg], 0, context, Errors, output);
- next_arg += 1;
- state = State.Start;
- start_index = i + 1;
- },
- '0' ... '9' => {
- width_start = i;
- state = State.FloatWidth;
- },
- else => @compileError("Unexpected character in format string: " ++ []u8{c}),
- },
- State.FloatWidth => switch (c) {
- '}' => {
- width = comptime (parseUnsigned(usize, fmt[width_start..i], 10) catch unreachable);
- try formatFloatDecimal(args[next_arg], width, context, Errors, output);
- next_arg += 1;
- state = State.Start;
- start_index = i + 1;
- },
- '0' ... '9' => {},
- else => @compileError("Unexpected character in format string: " ++ []u8{c}),
- },
- State.BufWidth => switch (c) {
- '}' => {
- width = comptime (parseUnsigned(usize, fmt[width_start..i], 10) catch unreachable);
- try formatBuf(args[next_arg], width, context, Errors, output);
- next_arg += 1;
- state = State.Start;
- start_index = i + 1;
- },
- '0' ... '9' => {},
- else => @compileError("Unexpected character in format string: " ++ []u8{c}),
- },
- State.Character => switch (c) {
- '}' => {
- try formatAsciiChar(args[next_arg], context, Errors, output);
- next_arg += 1;
- state = State.Start;
- start_index = i + 1;
- },
- else => @compileError("Unexpected character in format string: " ++ []u8{c}),
+ else => @compileError("Unexpected format character after '*'"),
},
}
}
@@ -191,14 +103,21 @@ pub fn format(context: var, comptime Errors: type, output: fn(@typeOf(context),
}
}
-pub fn formatValue(value: var, context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8)Errors!void) Errors!void {
+pub fn formatType(
+ value: var,
+ comptime fmt: []const u8,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+) Errors!void {
const T = @typeOf(value);
- switch (@typeId(T)) {
- builtin.TypeId.Int => {
- return formatInt(value, 10, false, 0, context, Errors, output);
- },
- builtin.TypeId.Float => {
- return formatFloat(value, context, Errors, output);
+ if (T == error) {
+ try output(context, "error.");
+ return output(context, @errorName(value));
+ }
+ switch (@typeInfo(T)) {
+ builtin.TypeId.Int, builtin.TypeId.Float => {
+ return formatValue(value, fmt, context, Errors, output);
},
builtin.TypeId.Void => {
return output(context, "void");
@@ -206,138 +125,511 @@ pub fn formatValue(value: var, context: var, comptime Errors: type, output: fn(@
builtin.TypeId.Bool => {
return output(context, if (value) "true" else "false");
},
- builtin.TypeId.Nullable => {
+ builtin.TypeId.Optional => {
if (value) |payload| {
- return formatValue(payload, context, Errors, output);
+ return formatType(payload, fmt, context, Errors, output);
} else {
return output(context, "null");
}
},
builtin.TypeId.ErrorUnion => {
if (value) |payload| {
- return formatValue(payload, context, Errors, output);
+ return formatType(payload, fmt, context, Errors, output);
} else |err| {
- return formatValue(err, context, Errors, output);
+ return formatType(err, fmt, context, Errors, output);
}
},
builtin.TypeId.ErrorSet => {
try output(context, "error.");
return output(context, @errorName(value));
},
- builtin.TypeId.Pointer => {
- if (@typeId(T.Child) == builtin.TypeId.Array and T.Child.Child == u8) {
- return output(context, (*value)[0..]);
- } else {
+ builtin.TypeId.Promise => {
+ return format(context, Errors, output, "promise@{x}", @ptrToInt(value));
+ },
+ builtin.TypeId.Pointer => |ptr_info| switch (ptr_info.size) {
+ builtin.TypeInfo.Pointer.Size.One => switch (@typeInfo(ptr_info.child)) {
+ builtin.TypeId.Array => |info| {
+ if (info.child == u8) {
+ return formatText(value, fmt, context, Errors, output);
+ }
+ return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
+ },
+ builtin.TypeId.Enum, builtin.TypeId.Union, builtin.TypeId.Struct => {
+ const has_cust_fmt = comptime cf: {
+ const info = @typeInfo(T.Child);
+ const defs = switch (info) {
+ builtin.TypeId.Struct => |s| s.defs,
+ builtin.TypeId.Union => |u| u.defs,
+ builtin.TypeId.Enum => |e| e.defs,
+ else => unreachable,
+ };
+
+ for (defs) |def| {
+ if (mem.eql(u8, def.name, "format")) {
+ break :cf true;
+ }
+ }
+ break :cf false;
+ };
+
+ if (has_cust_fmt) return value.format(fmt, context, Errors, output);
+ return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
+ },
+ else => return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)),
+ },
+ builtin.TypeInfo.Pointer.Size.Many => {
+ if (ptr_info.child == u8) {
+ if (fmt[0] == 's') {
+ const len = std.cstr.len(value);
+ return formatText(value[0..len], fmt, context, Errors, output);
+ }
+ }
return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
+ },
+ builtin.TypeInfo.Pointer.Size.Slice => {
+ const casted_value = ([]const u8)(value);
+ return output(context, casted_value);
+ },
+ },
+ builtin.TypeId.Array => |info| {
+ if (info.child == u8) {
+ return formatText(value, fmt, context, Errors, output);
}
+ return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(&value));
},
- else => if (@canImplicitCast([]const u8, value)) {
- const casted_value = ([]const u8)(value);
- return output(context, casted_value);
- } else {
- @compileError("Unable to format type '" ++ @typeName(T) ++ "'");
- },
+ else => @compileError("Unable to format type '" ++ @typeName(T) ++ "'"),
}
}
-pub fn formatAsciiChar(c: u8, context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8)Errors!void) Errors!void {
- return output(context, (&c)[0..1]);
+fn formatValue(
+ value: var,
+ comptime fmt: []const u8,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+) Errors!void {
+ if (fmt.len > 0) {
+ if (fmt[0] == 'B') {
+ comptime var width: ?usize = null;
+ if (fmt.len > 1) {
+ if (fmt[1] == 'i') {
+ if (fmt.len > 2) width = comptime (parseUnsigned(usize, fmt[2..], 10) catch unreachable);
+ return formatBytes(value, width, 1024, context, Errors, output);
+ }
+ width = comptime (parseUnsigned(usize, fmt[1..], 10) catch unreachable);
+ }
+ return formatBytes(value, width, 1000, context, Errors, output);
+ }
+ }
+
+ comptime var T = @typeOf(value);
+ switch (@typeId(T)) {
+ builtin.TypeId.Float => return formatFloatValue(value, fmt, context, Errors, output),
+ builtin.TypeId.Int => return formatIntValue(value, fmt, context, Errors, output),
+ else => unreachable,
+ }
}
-pub fn formatBuf(buf: []const u8, width: usize,
- context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8)Errors!void) Errors!void
-{
+pub fn formatIntValue(
+ value: var,
+ comptime fmt: []const u8,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+) Errors!void {
+ comptime var radix = 10;
+ comptime var uppercase = false;
+ comptime var width = 0;
+ if (fmt.len > 0) {
+ switch (fmt[0]) {
+ 'c' => {
+ if (@typeOf(value) == u8) {
+ if (fmt.len > 1) @compileError("Unknown format character: " ++ []u8{fmt[1]});
+ return formatAsciiChar(value, context, Errors, output);
+ }
+ },
+ 'b' => {
+ radix = 2;
+ uppercase = false;
+ width = 0;
+ },
+ 'd' => {
+ radix = 10;
+ uppercase = false;
+ width = 0;
+ },
+ 'x' => {
+ radix = 16;
+ uppercase = false;
+ width = 0;
+ },
+ 'X' => {
+ radix = 16;
+ uppercase = true;
+ width = 0;
+ },
+ else => @compileError("Unknown format character: " ++ []u8{fmt[0]}),
+ }
+ if (fmt.len > 1) width = comptime (parseUnsigned(usize, fmt[1..], 10) catch unreachable);
+ }
+ return formatInt(value, radix, uppercase, width, context, Errors, output);
+}
+
+fn formatFloatValue(
+ value: var,
+ comptime fmt: []const u8,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+) Errors!void {
+ comptime var width: ?usize = null;
+ comptime var float_fmt = 'e';
+ if (fmt.len > 0) {
+ float_fmt = fmt[0];
+ if (fmt.len > 1) width = comptime (parseUnsigned(usize, fmt[1..], 10) catch unreachable);
+ }
+
+ switch (float_fmt) {
+ 'e' => try formatFloatScientific(value, width, context, Errors, output),
+ '.' => try formatFloatDecimal(value, width, context, Errors, output),
+ else => @compileError("Unknown format character: " ++ []u8{float_fmt}),
+ }
+}
+
+pub fn formatText(
+ bytes: []const u8,
+ comptime fmt: []const u8,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+) Errors!void {
+ if (fmt.len > 0) {
+ if (fmt[0] == 's') {
+ comptime var width = 0;
+ if (fmt.len > 1) width = comptime (parseUnsigned(usize, fmt[1..], 10) catch unreachable);
+ return formatBuf(bytes, width, context, Errors, output);
+ } else @compileError("Unknown format character: " ++ []u8{fmt[0]});
+ }
+ return output(context, bytes);
+}
+
+pub fn formatAsciiChar(
+ c: u8,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+) Errors!void {
+ return output(context, (*[1]u8)(&c)[0..]);
+}
+
+pub fn formatBuf(
+ buf: []const u8,
+ width: usize,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+) Errors!void {
try output(context, buf);
var leftover_padding = if (width > buf.len) (width - buf.len) else return;
const pad_byte: u8 = ' ';
while (leftover_padding > 0) : (leftover_padding -= 1) {
- try output(context, (&pad_byte)[0..1]);
+ try output(context, (*[1]u8)(&pad_byte)[0..1]);
}
}
-pub fn formatFloat(value: var, context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8)Errors!void) Errors!void {
- var x = f64(value);
+// Print a float in scientific notation to the specified precision. Null uses full precision.
+// It should be the case that every full precision, printed value can be re-parsed back to the
+// same type unambiguously.
+pub fn formatFloatScientific(
+ value: var,
+ maybe_precision: ?usize,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+) Errors!void {
+ var x = @floatCast(f64, value);
// Errol doesn't handle these special cases.
- if (math.isNan(x)) {
- return output(context, "NaN");
- }
if (math.signbit(x)) {
try output(context, "-");
x = -x;
}
+
+ if (math.isNan(x)) {
+ return output(context, "nan");
+ }
if (math.isPositiveInf(x)) {
- return output(context, "Infinity");
+ return output(context, "inf");
}
if (x == 0.0) {
- return output(context, "0.0");
+ try output(context, "0");
+
+ if (maybe_precision) |precision| {
+ if (precision != 0) {
+ try output(context, ".");
+ var i: usize = 0;
+ while (i < precision) : (i += 1) {
+ try output(context, "0");
+ }
+ }
+ } else {
+ try output(context, ".0");
+ }
+
+ try output(context, "e+00");
+ return;
}
var buffer: [32]u8 = undefined;
- const float_decimal = errol3(x, buffer[0..]);
- try output(context, float_decimal.digits[0..1]);
- try output(context, ".");
- if (float_decimal.digits.len > 1) {
- const num_digits = if (@typeOf(value) == f32)
- math.min(usize(9), float_decimal.digits.len)
- else
- float_decimal.digits.len;
- try output(context, float_decimal.digits[1 .. num_digits]);
+ var float_decimal = errol.errol3(x, buffer[0..]);
+
+ if (maybe_precision) |precision| {
+ errol.roundToPrecision(&float_decimal, precision, errol.RoundMode.Scientific);
+
+ try output(context, float_decimal.digits[0..1]);
+
+ // {e0} case prints no `.`
+ if (precision != 0) {
+ try output(context, ".");
+
+ var printed: usize = 0;
+ if (float_decimal.digits.len > 1) {
+ const num_digits = math.min(float_decimal.digits.len, precision + 1);
+ try output(context, float_decimal.digits[1..num_digits]);
+ printed += num_digits - 1;
+ }
+
+ while (printed < precision) : (printed += 1) {
+ try output(context, "0");
+ }
+ }
} else {
- try output(context, "0");
+ try output(context, float_decimal.digits[0..1]);
+ try output(context, ".");
+ if (float_decimal.digits.len > 1) {
+ const num_digits = if (@typeOf(value) == f32) math.min(usize(9), float_decimal.digits.len) else float_decimal.digits.len;
+
+ try output(context, float_decimal.digits[1..num_digits]);
+ } else {
+ try output(context, "0");
+ }
}
- if (float_decimal.exp != 1) {
- try output(context, "e");
- try formatInt(float_decimal.exp - 1, 10, false, 0, context, Errors, output);
+ try output(context, "e");
+ const exp = float_decimal.exp - 1;
+
+ if (exp >= 0) {
+ try output(context, "+");
+ if (exp > -10 and exp < 10) {
+ try output(context, "0");
+ }
+ try formatInt(exp, 10, false, 0, context, Errors, output);
+ } else {
+ try output(context, "-");
+ if (exp > -10 and exp < 10) {
+ try output(context, "0");
+ }
+ try formatInt(-exp, 10, false, 0, context, Errors, output);
}
}
-pub fn formatFloatDecimal(value: var, precision: usize, context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8)Errors!void) Errors!void {
+// Print a float of the format x.yyyyy where the number of y is specified by the precision argument.
+// By default floats are printed at full precision (no rounding).
+pub fn formatFloatDecimal(
+ value: var,
+ maybe_precision: ?usize,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+) Errors!void {
var x = f64(value);
// Errol doesn't handle these special cases.
- if (math.isNan(x)) {
- return output(context, "NaN");
- }
if (math.signbit(x)) {
try output(context, "-");
x = -x;
}
+
+ if (math.isNan(x)) {
+ return output(context, "nan");
+ }
if (math.isPositiveInf(x)) {
- return output(context, "Infinity");
+ return output(context, "inf");
}
if (x == 0.0) {
- return output(context, "0.0");
+ try output(context, "0");
+
+ if (maybe_precision) |precision| {
+ if (precision != 0) {
+ try output(context, ".");
+ var i: usize = 0;
+ while (i < precision) : (i += 1) {
+ try output(context, "0");
+ }
+ } else {
+ try output(context, ".0");
+ }
+ } else {
+ try output(context, "0");
+ }
+
+ return;
}
+ // non-special case, use errol3
var buffer: [32]u8 = undefined;
- const float_decimal = errol3(x, buffer[0..]);
+ var float_decimal = errol.errol3(x, buffer[0..]);
- const num_left_digits = if (float_decimal.exp > 0) usize(float_decimal.exp) else 1;
+ if (maybe_precision) |precision| {
+ errol.roundToPrecision(&float_decimal, precision, errol.RoundMode.Decimal);
- try output(context, float_decimal.digits[0 .. num_left_digits]);
- try output(context, ".");
- if (float_decimal.digits.len > 1) {
- const num_valid_digtis = if (@typeOf(value) == f32) math.min(usize(7), float_decimal.digits.len)
- else
- float_decimal.digits.len;
+ // exp < 0 means the leading is always 0 as errol result is normalized.
+ var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0;
- const num_right_digits = if (precision != 0)
- math.min(precision, (num_valid_digtis-num_left_digits))
- else
- num_valid_digtis - num_left_digits;
- try output(context, float_decimal.digits[num_left_digits .. (num_left_digits + num_right_digits)]);
+ // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this.
+ var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len);
+
+ if (num_digits_whole > 0) {
+ // We may have to zero pad, for instance 1e4 requires zero padding.
+ try output(context, float_decimal.digits[0..num_digits_whole_no_pad]);
+
+ var i = num_digits_whole_no_pad;
+ while (i < num_digits_whole) : (i += 1) {
+ try output(context, "0");
+ }
+ } else {
+ try output(context, "0");
+ }
+
+ // {.0} special case doesn't want a trailing '.'
+ if (precision == 0) {
+ return;
+ }
+
+ try output(context, ".");
+
+ // Keep track of fractional count printed for case where we pre-pad then post-pad with 0's.
+ var printed: usize = 0;
+
+ // Zero-fill until we reach significant digits or run out of precision.
+ if (float_decimal.exp <= 0) {
+ const zero_digit_count = @intCast(usize, -float_decimal.exp);
+ const zeros_to_print = math.min(zero_digit_count, precision);
+
+ var i: usize = 0;
+ while (i < zeros_to_print) : (i += 1) {
+ try output(context, "0");
+ printed += 1;
+ }
+
+ if (printed >= precision) {
+ return;
+ }
+ }
+
+ // Remaining fractional portion, zero-padding if insufficient.
+ debug.assert(precision >= printed);
+ if (num_digits_whole_no_pad + precision - printed < float_decimal.digits.len) {
+ try output(context, float_decimal.digits[num_digits_whole_no_pad .. num_digits_whole_no_pad + precision - printed]);
+ return;
+ } else {
+ try output(context, float_decimal.digits[num_digits_whole_no_pad..]);
+ printed += float_decimal.digits.len - num_digits_whole_no_pad;
+
+ while (printed < precision) : (printed += 1) {
+ try output(context, "0");
+ }
+ }
} else {
- try output(context, "0");
+ // exp < 0 means the leading is always 0 as errol result is normalized.
+ var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0;
+
+ // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this.
+ var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len);
+
+ if (num_digits_whole > 0) {
+ // We may have to zero pad, for instance 1e4 requires zero padding.
+ try output(context, float_decimal.digits[0..num_digits_whole_no_pad]);
+
+ var i = num_digits_whole_no_pad;
+ while (i < num_digits_whole) : (i += 1) {
+ try output(context, "0");
+ }
+ } else {
+ try output(context, "0");
+ }
+
+ // Omit `.` if no fractional portion
+ if (float_decimal.exp >= 0 and num_digits_whole_no_pad == float_decimal.digits.len) {
+ return;
+ }
+
+ try output(context, ".");
+
+ // Zero-fill until we reach significant digits or run out of precision.
+ if (float_decimal.exp < 0) {
+ const zero_digit_count = @intCast(usize, -float_decimal.exp);
+
+ var i: usize = 0;
+ while (i < zero_digit_count) : (i += 1) {
+ try output(context, "0");
+ }
+ }
+
+ try output(context, float_decimal.digits[num_digits_whole_no_pad..]);
}
}
+pub fn formatBytes(
+ value: var,
+ width: ?usize,
+ comptime radix: usize,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+) Errors!void {
+ if (value == 0) {
+ return output(context, "0B");
+ }
-pub fn formatInt(value: var, base: u8, uppercase: bool, width: usize,
- context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8)Errors!void) Errors!void
-{
+ const mags_si = " kMGTPEZY";
+ const mags_iec = " KMGTPEZY";
+ const magnitude = switch (radix) {
+ 1000 => math.min(math.log2(value) / comptime math.log2(1000), mags_si.len - 1),
+ 1024 => math.min(math.log2(value) / 10, mags_iec.len - 1),
+ else => unreachable,
+ };
+ const new_value = lossyCast(f64, value) / math.pow(f64, lossyCast(f64, radix), lossyCast(f64, magnitude));
+ const suffix = switch (radix) {
+ 1000 => mags_si[magnitude],
+ 1024 => mags_iec[magnitude],
+ else => unreachable,
+ };
+
+ try formatFloatDecimal(new_value, width, context, Errors, output);
+
+ if (suffix == ' ') {
+ return output(context, "B");
+ }
+
+ const buf = switch (radix) {
+ 1000 => []u8{ suffix, 'B' },
+ 1024 => []u8{ suffix, 'i', 'B' },
+ else => unreachable,
+ };
+ return output(context, buf);
+}
+
+pub fn formatInt(
+ value: var,
+ base: u8,
+ uppercase: bool,
+ width: usize,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+) Errors!void {
if (@typeOf(value).is_signed) {
return formatIntSigned(value, base, uppercase, width, context, Errors, output);
} else {
@@ -345,30 +637,42 @@ pub fn formatInt(value: var, base: u8, uppercase: bool, width: usize,
}
}
-fn formatIntSigned(value: var, base: u8, uppercase: bool, width: usize,
- context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8)Errors!void) Errors!void
-{
+fn formatIntSigned(
+ value: var,
+ base: u8,
+ uppercase: bool,
+ width: usize,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+) Errors!void {
const uint = @IntType(false, @typeOf(value).bit_count);
if (value < 0) {
const minus_sign: u8 = '-';
- try output(context, (&minus_sign)[0..1]);
- const new_value = uint(-(value + 1)) + 1;
+ try output(context, (*[1]u8)(&minus_sign)[0..]);
+ const new_value = @intCast(uint, -(value + 1)) + 1;
const new_width = if (width == 0) 0 else (width - 1);
return formatIntUnsigned(new_value, base, uppercase, new_width, context, Errors, output);
} else if (width == 0) {
- return formatIntUnsigned(uint(value), base, uppercase, width, context, Errors, output);
+ return formatIntUnsigned(@intCast(uint, value), base, uppercase, width, context, Errors, output);
} else {
const plus_sign: u8 = '+';
- try output(context, (&plus_sign)[0..1]);
- const new_value = uint(value);
+ try output(context, (*[1]u8)(&plus_sign)[0..]);
+ const new_value = @intCast(uint, value);
const new_width = if (width == 0) 0 else (width - 1);
return formatIntUnsigned(new_value, base, uppercase, new_width, context, Errors, output);
}
}
-fn formatIntUnsigned(value: var, base: u8, uppercase: bool, width: usize,
- context: var, comptime Errors: type, output: fn(@typeOf(context), []const u8)Errors!void) Errors!void
-{
+fn formatIntUnsigned(
+ value: var,
+ base: u8,
+ uppercase: bool,
+ width: usize,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+) Errors!void {
// max_int_digits accounts for the minus sign. when printing an unsigned
// number we don't need to do that.
var buf: [max_int_digits - 1]u8 = undefined;
@@ -378,10 +682,9 @@ fn formatIntUnsigned(value: var, base: u8, uppercase: bool, width: usize,
while (true) {
const digit = a % base;
index -= 1;
- buf[index] = digitToChar(u8(digit), uppercase);
+ buf[index] = digitToChar(@intCast(u8, digit), uppercase);
a /= base;
- if (a == 0)
- break;
+ if (a == 0) break;
}
const digits_buf = buf[index..];
@@ -391,22 +694,21 @@ fn formatIntUnsigned(value: var, base: u8, uppercase: bool, width: usize,
const zero_byte: u8 = '0';
var leftover_padding = padding - index;
while (true) {
- try output(context, (&zero_byte)[0..1]);
+ try output(context, (*[1]u8)(&zero_byte)[0..]);
leftover_padding -= 1;
- if (leftover_padding == 0)
- break;
+ if (leftover_padding == 0) break;
}
mem.set(u8, buf[0..index], '0');
return output(context, buf);
} else {
- const padded_buf = buf[index - padding..];
+ const padded_buf = buf[index - padding ..];
mem.set(u8, padded_buf[0..padding], '0');
return output(context, padded_buf);
}
}
pub fn formatIntBuf(out_buf: []u8, value: var, base: u8, uppercase: bool, width: usize) usize {
- var context = FormatIntBuf {
+ var context = FormatIntBuf{
.out_buf = out_buf,
.index = 0,
};
@@ -417,16 +719,14 @@ const FormatIntBuf = struct {
out_buf: []u8,
index: usize,
};
-fn formatIntCallback(context: &FormatIntBuf, bytes: []const u8) (error{}!void) {
+fn formatIntCallback(context: *FormatIntBuf, bytes: []const u8) (error{}!void) {
mem.copy(u8, context.out_buf[context.index..], bytes);
context.index += bytes.len;
}
pub fn parseInt(comptime T: type, buf: []const u8, radix: u8) !T {
- if (!T.is_signed)
- return parseUnsigned(T, buf, radix);
- if (buf.len == 0)
- return T(0);
+ if (!T.is_signed) return parseUnsigned(T, buf, radix);
+ if (buf.len == 0) return T(0);
if (buf[0] == '-') {
return math.negate(try parseUnsigned(T, buf[1..], radix));
} else if (buf[0] == '+') {
@@ -446,9 +746,10 @@ test "fmt.parseInt" {
assert(if (parseInt(u8, "256", 10)) |_| false else |err| err == error.Overflow);
}
-const ParseUnsignedError = error {
+const ParseUnsignedError = error{
/// The result cannot fit in the type specified
Overflow,
+
/// The input had a byte that was not a digit
InvalidCharacter,
};
@@ -467,22 +768,21 @@ pub fn parseUnsigned(comptime T: type, buf: []const u8, radix: u8) ParseUnsigned
pub fn charToDigit(c: u8, radix: u8) (error{InvalidCharacter}!u8) {
const value = switch (c) {
- '0' ... '9' => c - '0',
- 'A' ... 'Z' => c - 'A' + 10,
- 'a' ... 'z' => c - 'a' + 10,
+ '0'...'9' => c - '0',
+ 'A'...'Z' => c - 'A' + 10,
+ 'a'...'z' => c - 'a' + 10,
else => return error.InvalidCharacter,
};
- if (value >= radix)
- return error.InvalidCharacter;
+ if (value >= radix) return error.InvalidCharacter;
return value;
}
fn digitToChar(digit: u8, uppercase: bool) u8 {
return switch (digit) {
- 0 ... 9 => digit + '0',
- 10 ... 35 => digit + ((if (uppercase) u8('A') else u8('a')) - 10),
+ 0...9 => digit + '0',
+ 10...35 => digit + ((if (uppercase) u8('A') else u8('a')) - 10),
else => unreachable,
};
}
@@ -491,27 +791,31 @@ const BufPrintContext = struct {
remaining: []u8,
};
-fn bufPrintWrite(context: &BufPrintContext, bytes: []const u8) !void {
+fn bufPrintWrite(context: *BufPrintContext, bytes: []const u8) !void {
if (context.remaining.len < bytes.len) return error.BufferTooSmall;
mem.copy(u8, context.remaining, bytes);
context.remaining = context.remaining[bytes.len..];
}
pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: ...) ![]u8 {
- var context = BufPrintContext { .remaining = buf, };
+ var context = BufPrintContext{ .remaining = buf };
try format(&context, error{BufferTooSmall}, bufPrintWrite, fmt, args);
- return buf[0..buf.len - context.remaining.len];
+ return buf[0 .. buf.len - context.remaining.len];
}
-pub fn allocPrint(allocator: &mem.Allocator, comptime fmt: []const u8, args: ...) ![]u8 {
+pub const AllocPrintError = error{OutOfMemory};
+
+pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: ...) AllocPrintError![]u8 {
var size: usize = 0;
format(&size, error{}, countSize, fmt, args) catch |err| switch (err) {};
const buf = try allocator.alloc(u8, size);
- return bufPrint(buf, fmt, args);
+ return bufPrint(buf, fmt, args) catch |err| switch (err) {
+ error.BufferTooSmall => unreachable, // we just counted the size above
+ };
}
-fn countSize(size: &usize, bytes: []const u8) (error{}!void) {
- *size += bytes.len;
+fn countSize(size: *usize, bytes: []const u8) (error{}!void) {
+ size.* += bytes.len;
}
test "buf print int" {
@@ -552,117 +856,329 @@ test "parse unsigned comptime" {
test "fmt.format" {
{
- var buf1: [32]u8 = undefined;
const value: ?i32 = 1234;
- const result = try bufPrint(buf1[0..], "nullable: {}\n", value);
- assert(mem.eql(u8, result, "nullable: 1234\n"));
+ try testFmt("optional: 1234\n", "optional: {}\n", value);
}
{
- var buf1: [32]u8 = undefined;
const value: ?i32 = null;
- const result = try bufPrint(buf1[0..], "nullable: {}\n", value);
- assert(mem.eql(u8, result, "nullable: null\n"));
+ try testFmt("optional: null\n", "optional: {}\n", value);
}
{
- var buf1: [32]u8 = undefined;
const value: error!i32 = 1234;
- const result = try bufPrint(buf1[0..], "error union: {}\n", value);
- assert(mem.eql(u8, result, "error union: 1234\n"));
+ try testFmt("error union: 1234\n", "error union: {}\n", value);
}
{
- var buf1: [32]u8 = undefined;
const value: error!i32 = error.InvalidChar;
- const result = try bufPrint(buf1[0..], "error union: {}\n", value);
- assert(mem.eql(u8, result, "error union: error.InvalidChar\n"));
+ try testFmt("error union: error.InvalidChar\n", "error union: {}\n", value);
}
{
- var buf1: [32]u8 = undefined;
const value: u3 = 0b101;
- const result = try bufPrint(buf1[0..], "u3: {}\n", value);
- assert(mem.eql(u8, result, "u3: 5\n"));
+ try testFmt("u3: 5\n", "u3: {}\n", value);
}
{
- // Dummy field because of https://github.com/zig-lang/zig/issues/557.
+ const value: u8 = 'a';
+ try testFmt("u8: a\n", "u8: {c}\n", value);
+ }
+ {
+ const value: u8 = 0b1100;
+ try testFmt("u8: 0b1100\n", "u8: 0b{b}\n", value);
+ }
+ {
+ const value: [3]u8 = "abc";
+ try testFmt("array: abc\n", "array: {}\n", value);
+ try testFmt("array: abc\n", "array: {}\n", &value);
+
+ var buf: [100]u8 = undefined;
+ try testFmt(
+ try bufPrint(buf[0..], "array: [3]u8@{x}\n", @ptrToInt(&value)),
+ "array: {*}\n",
+ &value,
+ );
+ }
+ {
+ const value: []const u8 = "abc";
+ try testFmt("slice: abc\n", "slice: {}\n", value);
+ }
+ {
+ const value = @intToPtr(*i32, 0xdeadbeef);
+ try testFmt("pointer: i32@deadbeef\n", "pointer: {}\n", value);
+ try testFmt("pointer: i32@deadbeef\n", "pointer: {*}\n", value);
+ }
+ try testFmt("buf: Test \n", "buf: {s5}\n", "Test");
+ try testFmt("buf: Test\n Other text", "buf: {s}\n Other text", "Test");
+ try testFmt("cstr: Test C\n", "cstr: {s}\n", c"Test C");
+ try testFmt("cstr: Test C \n", "cstr: {s10}\n", c"Test C");
+ try testFmt("file size: 63MiB\n", "file size: {Bi}\n", usize(63 * 1024 * 1024));
+ try testFmt("file size: 66.06MB\n", "file size: {B2}\n", usize(63 * 1024 * 1024));
+ {
+ // Dummy field because of https://github.com/ziglang/zig/issues/557.
const Struct = struct {
unused: u8,
};
var buf1: [32]u8 = undefined;
- const value = Struct {
- .unused = 42,
- };
+ const value = Struct{ .unused = 42 };
const result = try bufPrint(buf1[0..], "pointer: {}\n", &value);
assert(mem.startsWith(u8, result, "pointer: Struct@"));
}
-
- // TODO get these tests passing in release modes
- // https://github.com/zig-lang/zig/issues/564
- if (builtin.mode == builtin.Mode.Debug) {
- {
- var buf1: [32]u8 = undefined;
- const value: f32 = 12.34;
- const result = try bufPrint(buf1[0..], "f32: {}\n", value);
- assert(mem.eql(u8, result, "f32: 1.23400001e1\n"));
- }
- {
- var buf1: [32]u8 = undefined;
- const value: f64 = -12.34e10;
- const result = try bufPrint(buf1[0..], "f64: {}\n", value);
- assert(mem.eql(u8, result, "f64: -1.234e11\n"));
- }
- {
- var buf1: [32]u8 = undefined;
- const result = try bufPrint(buf1[0..], "f64: {}\n", math.nan_f64);
- assert(mem.eql(u8, result, "f64: NaN\n"));
- }
- {
- var buf1: [32]u8 = undefined;
- const result = try bufPrint(buf1[0..], "f64: {}\n", math.inf_f64);
- assert(mem.eql(u8, result, "f64: Infinity\n"));
- }
- {
- var buf1: [32]u8 = undefined;
- const result = try bufPrint(buf1[0..], "f64: {}\n", -math.inf_f64);
- assert(mem.eql(u8, result, "f64: -Infinity\n"));
- }
- {
- var buf1: [32]u8 = undefined;
- const value: f32 = 1.1234;
- const result = try bufPrint(buf1[0..], "f32: {.1}\n", value);
- assert(mem.eql(u8, result, "f32: 1.1\n"));
- }
- {
- var buf1: [32]u8 = undefined;
- const value: f32 = 1234.567;
- const result = try bufPrint(buf1[0..], "f32: {.2}\n", value);
- assert(mem.eql(u8, result, "f32: 1234.56\n"));
- }
- {
- var buf1: [32]u8 = undefined;
- const value: f32 = -11.1234;
- const result = try bufPrint(buf1[0..], "f32: {.4}\n", value);
- // -11.1234 is converted to f64 -11.12339... internally (errol3() function takes f64).
- // -11.12339... is truncated to -11.1233
- assert(mem.eql(u8, result, "f32: -11.1233\n"));
- }
- {
- var buf1: [32]u8 = undefined;
- const value: f32 = 91.12345;
- const result = try bufPrint(buf1[0..], "f32: {.}\n", value);
- assert(mem.eql(u8, result, "f32: 91.12345\n"));
- }
- {
- var buf1: [32]u8 = undefined;
- const value: f64 = 91.12345678901235;
- const result = try bufPrint(buf1[0..], "f64: {.10}\n", value);
- assert(mem.eql(u8, result, "f64: 91.1234567890\n"));
- }
-
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f32 = 1.34;
+ const result = try bufPrint(buf1[0..], "f32: {e}\n", value);
+ assert(mem.eql(u8, result, "f32: 1.34000003e+00\n"));
}
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f32 = 12.34;
+ const result = try bufPrint(buf1[0..], "f32: {e}\n", value);
+ assert(mem.eql(u8, result, "f32: 1.23400001e+01\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = -12.34e10;
+ const result = try bufPrint(buf1[0..], "f64: {e}\n", value);
+ assert(mem.eql(u8, result, "f64: -1.234e+11\n"));
+ }
+ {
+ // This fails on release due to a minor rounding difference.
+ // --release-fast outputs 9.999960000000001e-40 vs. the expected.
+ if (builtin.mode == builtin.Mode.Debug) {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = 9.999960e-40;
+ const result = try bufPrint(buf1[0..], "f64: {e}\n", value);
+ assert(mem.eql(u8, result, "f64: 9.99996e-40\n"));
+ }
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = 1.409706e-42;
+ const result = try bufPrint(buf1[0..], "f64: {e5}\n", value);
+ assert(mem.eql(u8, result, "f64: 1.40971e-42\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = @bitCast(f32, u32(814313563));
+ const result = try bufPrint(buf1[0..], "f64: {e5}\n", value);
+ assert(mem.eql(u8, result, "f64: 1.00000e-09\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = @bitCast(f32, u32(1006632960));
+ const result = try bufPrint(buf1[0..], "f64: {e5}\n", value);
+ assert(mem.eql(u8, result, "f64: 7.81250e-03\n"));
+ }
+ {
+ // libc rounds 1.000005e+05 to 1.00000e+05 but zig does 1.00001e+05.
+ // In fact, libc doesn't round a lot of 5 cases up when one past the precision point.
+ var buf1: [32]u8 = undefined;
+ const value: f64 = @bitCast(f32, u32(1203982400));
+ const result = try bufPrint(buf1[0..], "f64: {e5}\n", value);
+ assert(mem.eql(u8, result, "f64: 1.00001e+05\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const result = try bufPrint(buf1[0..], "f64: {}\n", math.nan_f64);
+ assert(mem.eql(u8, result, "f64: nan\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const result = try bufPrint(buf1[0..], "f64: {}\n", -math.nan_f64);
+ assert(mem.eql(u8, result, "f64: -nan\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const result = try bufPrint(buf1[0..], "f64: {}\n", math.inf_f64);
+ assert(mem.eql(u8, result, "f64: inf\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const result = try bufPrint(buf1[0..], "f64: {}\n", -math.inf_f64);
+ assert(mem.eql(u8, result, "f64: -inf\n"));
+ }
+ {
+ var buf1: [64]u8 = undefined;
+ const value: f64 = 1.52314e+29;
+ const result = try bufPrint(buf1[0..], "f64: {.}\n", value);
+ assert(mem.eql(u8, result, "f64: 152314000000000000000000000000\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f32 = 1.1234;
+ const result = try bufPrint(buf1[0..], "f32: {.1}\n", value);
+ assert(mem.eql(u8, result, "f32: 1.1\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f32 = 1234.567;
+ const result = try bufPrint(buf1[0..], "f32: {.2}\n", value);
+ assert(mem.eql(u8, result, "f32: 1234.57\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f32 = -11.1234;
+ const result = try bufPrint(buf1[0..], "f32: {.4}\n", value);
+ // -11.1234 is converted to f64 -11.12339... internally (errol3() function takes f64).
+ // -11.12339... is rounded back up to -11.1234
+ assert(mem.eql(u8, result, "f32: -11.1234\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f32 = 91.12345;
+ const result = try bufPrint(buf1[0..], "f32: {.5}\n", value);
+ assert(mem.eql(u8, result, "f32: 91.12345\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = 91.12345678901235;
+ const result = try bufPrint(buf1[0..], "f64: {.10}\n", value);
+ assert(mem.eql(u8, result, "f64: 91.1234567890\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = 0.0;
+ const result = try bufPrint(buf1[0..], "f64: {.5}\n", value);
+ assert(mem.eql(u8, result, "f64: 0.00000\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = 5.700;
+ const result = try bufPrint(buf1[0..], "f64: {.0}\n", value);
+ assert(mem.eql(u8, result, "f64: 6\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = 9.999;
+ const result = try bufPrint(buf1[0..], "f64: {.1}\n", value);
+ assert(mem.eql(u8, result, "f64: 10.0\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = 1.0;
+ const result = try bufPrint(buf1[0..], "f64: {.3}\n", value);
+ assert(mem.eql(u8, result, "f64: 1.000\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = 0.0003;
+ const result = try bufPrint(buf1[0..], "f64: {.8}\n", value);
+ assert(mem.eql(u8, result, "f64: 0.00030000\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = 1.40130e-45;
+ const result = try bufPrint(buf1[0..], "f64: {.5}\n", value);
+ assert(mem.eql(u8, result, "f64: 0.00000\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = 9.999960e-40;
+ const result = try bufPrint(buf1[0..], "f64: {.5}\n", value);
+ assert(mem.eql(u8, result, "f64: 0.00000\n"));
+ }
+ // libc checks
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = f64(@bitCast(f32, u32(916964781)));
+ const result = try bufPrint(buf1[0..], "f64: {.5}\n", value);
+ assert(mem.eql(u8, result, "f64: 0.00001\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = f64(@bitCast(f32, u32(925353389)));
+ const result = try bufPrint(buf1[0..], "f64: {.5}\n", value);
+ assert(mem.eql(u8, result, "f64: 0.00001\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = f64(@bitCast(f32, u32(1036831278)));
+ const result = try bufPrint(buf1[0..], "f64: {.5}\n", value);
+ assert(mem.eql(u8, result, "f64: 0.10000\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = f64(@bitCast(f32, u32(1065353133)));
+ const result = try bufPrint(buf1[0..], "f64: {.5}\n", value);
+ assert(mem.eql(u8, result, "f64: 1.00000\n"));
+ }
+ {
+ var buf1: [32]u8 = undefined;
+ const value: f64 = f64(@bitCast(f32, u32(1092616192)));
+ const result = try bufPrint(buf1[0..], "f64: {.5}\n", value);
+ assert(mem.eql(u8, result, "f64: 10.00000\n"));
+ }
+ // libc differences
+ {
+ var buf1: [32]u8 = undefined;
+ // This is 0.015625 exactly according to gdb. We thus round down,
+ // however glibc rounds up for some reason. This occurs for all
+ // floats of the form x.yyyy25 on a precision point.
+ const value: f64 = f64(@bitCast(f32, u32(1015021568)));
+ const result = try bufPrint(buf1[0..], "f64: {.5}\n", value);
+ assert(mem.eql(u8, result, "f64: 0.01563\n"));
+ }
+ // std-windows-x86_64-Debug-bare test case fails
+ {
+ // errol3 rounds to ... 630 but libc rounds to ...632. Grisu3
+ // also rounds to 630 so I'm inclined to believe libc is not
+ // optimal here.
+ var buf1: [32]u8 = undefined;
+ const value: f64 = f64(@bitCast(f32, u32(1518338049)));
+ const result = try bufPrint(buf1[0..], "f64: {.5}\n", value);
+ assert(mem.eql(u8, result, "f64: 18014400656965630.00000\n"));
+ }
+ //custom type format
+ {
+ const Vec2 = struct {
+ const SelfType = this;
+ x: f32,
+ y: f32,
+
+ pub fn format(
+ self: *SelfType,
+ comptime fmt: []const u8,
+ context: var,
+ comptime Errors: type,
+ output: fn (@typeOf(context), []const u8) Errors!void,
+ ) Errors!void {
+ if (fmt.len > 0) {
+ if (fmt.len > 1) unreachable;
+ switch (fmt[0]) {
+ //point format
+ 'p' => return std.fmt.format(context, Errors, output, "({.3},{.3})", self.x, self.y),
+ //dimension format
+ 'd' => return std.fmt.format(context, Errors, output, "{.3}x{.3}", self.x, self.y),
+ else => unreachable,
+ }
+ }
+ return std.fmt.format(context, Errors, output, "({.3},{.3})", self.x, self.y);
+ }
+ };
+
+ var buf1: [32]u8 = undefined;
+ var value = Vec2{
+ .x = 10.2,
+ .y = 2.22,
+ };
+ try testFmt("point: (10.200,2.220)\n", "point: {}\n", &value);
+ try testFmt("dim: 10.200x2.220\n", "dim: {d}\n", &value);
+ }
+}
+
+fn testFmt(expected: []const u8, comptime template: []const u8, args: ...) !void {
+ var buf: [100]u8 = undefined;
+ const result = try bufPrint(buf[0..], template, args);
+ if (mem.eql(u8, result, expected)) return;
+
+ std.debug.warn("\n====== expected this output: =========\n");
+ std.debug.warn("{}", expected);
+ std.debug.warn("\n======== instead found this: =========\n");
+ std.debug.warn("{}", result);
+ std.debug.warn("\n======================================\n");
+ return error.TestFailed;
}
pub fn trim(buf: []const u8) []const u8 {
var start: usize = 0;
- while (start < buf.len and isWhiteSpace(buf[start])) : (start += 1) { }
+ while (start < buf.len and isWhiteSpace(buf[start])) : (start += 1) {}
var end: usize = buf.len;
while (true) {
@@ -674,7 +1190,6 @@ pub fn trim(buf: []const u8) []const u8 {
}
}
break;
-
}
return buf[start..end];
}
diff --git a/std/hash/adler.zig b/std/hash/adler.zig
index c77a5aaf50..9c5966f89b 100644
--- a/std/hash/adler.zig
+++ b/std/hash/adler.zig
@@ -13,14 +13,12 @@ pub const Adler32 = struct {
adler: u32,
pub fn init() Adler32 {
- return Adler32 {
- .adler = 1,
- };
+ return Adler32{ .adler = 1 };
}
// This fast variant is taken from zlib. It reduces the required modulos and unrolls longer
// buffer inputs and should be much quicker.
- pub fn update(self: &Adler32, input: []const u8) void {
+ pub fn update(self: *Adler32, input: []const u8) void {
var s1 = self.adler & 0xffff;
var s2 = (self.adler >> 16) & 0xffff;
@@ -33,8 +31,7 @@ pub const Adler32 = struct {
if (s2 >= base) {
s2 -= base;
}
- }
- else if (input.len < 16) {
+ } else if (input.len < 16) {
for (input) |b| {
s1 +%= b;
s2 +%= s1;
@@ -44,8 +41,7 @@ pub const Adler32 = struct {
}
s2 %= base;
- }
- else {
+ } else {
var i: usize = 0;
while (i + nmax <= input.len) : (i += nmax) {
const n = nmax / 16; // note: 16 | nmax
@@ -81,7 +77,7 @@ pub const Adler32 = struct {
self.adler = s1 | (s2 << 16);
}
- pub fn final(self: &Adler32) u32 {
+ pub fn final(self: *Adler32) u32 {
return self.adler;
}
@@ -98,15 +94,14 @@ test "adler32 sanity" {
}
test "adler32 long" {
- const long1 = []u8 {1} ** 1024;
+ const long1 = []u8{1} ** 1024;
debug.assert(Adler32.hash(long1[0..]) == 0x06780401);
- const long2 = []u8 {1} ** 1025;
+ const long2 = []u8{1} ** 1025;
debug.assert(Adler32.hash(long2[0..]) == 0x0a7a0402);
}
test "adler32 very long" {
- const long = []u8 {1} ** 5553;
+ const long = []u8{1} ** 5553;
debug.assert(Adler32.hash(long[0..]) == 0x707f15b2);
}
-
diff --git a/std/hash/crc.zig b/std/hash/crc.zig
index f88069ce3c..c455140785 100644
--- a/std/hash/crc.zig
+++ b/std/hash/crc.zig
@@ -9,9 +9,9 @@ const std = @import("../index.zig");
const debug = std.debug;
pub const Polynomial = struct {
- const IEEE = 0xedb88320;
+ const IEEE = 0xedb88320;
const Castagnoli = 0x82f63b78;
- const Koopman = 0xeb31d82e;
+ const Koopman = 0xeb31d82e;
};
// IEEE is by far the most common CRC and so is aliased by default.
@@ -26,21 +26,23 @@ pub fn Crc32WithPoly(comptime poly: u32) type {
var tables: [8][256]u32 = undefined;
for (tables[0]) |*e, i| {
- var crc = u32(i);
- var j: usize = 0; while (j < 8) : (j += 1) {
+ var crc = @intCast(u32, i);
+ var j: usize = 0;
+ while (j < 8) : (j += 1) {
if (crc & 1 == 1) {
crc = (crc >> 1) ^ poly;
} else {
crc = (crc >> 1);
}
}
- *e = crc;
+ e.* = crc;
}
var i: usize = 0;
while (i < 256) : (i += 1) {
var crc = tables[0][i];
- var j: usize = 1; while (j < 8) : (j += 1) {
+ var j: usize = 1;
+ while (j < 8) : (j += 1) {
const index = @truncate(u8, crc);
crc = tables[0][index] ^ (crc >> 8);
tables[j][i] = crc;
@@ -53,19 +55,17 @@ pub fn Crc32WithPoly(comptime poly: u32) type {
crc: u32,
pub fn init() Self {
- return Self {
- .crc = 0xffffffff,
- };
+ return Self{ .crc = 0xffffffff };
}
- pub fn update(self: &Self, input: []const u8) void {
+ pub fn update(self: *Self, input: []const u8) void {
var i: usize = 0;
while (i + 8 <= input.len) : (i += 8) {
- const p = input[i..i+8];
+ const p = input[i .. i + 8];
// Unrolling this way gives ~50Mb/s increase
- self.crc ^= (u32(p[0]) << 0);
- self.crc ^= (u32(p[1]) << 8);
+ self.crc ^= (u32(p[0]) << 0);
+ self.crc ^= (u32(p[1]) << 8);
self.crc ^= (u32(p[2]) << 16);
self.crc ^= (u32(p[3]) << 24);
@@ -76,8 +76,8 @@ pub fn Crc32WithPoly(comptime poly: u32) type {
lookup_tables[3][p[4]] ^
lookup_tables[4][@truncate(u8, self.crc >> 24)] ^
lookup_tables[5][@truncate(u8, self.crc >> 16)] ^
- lookup_tables[6][@truncate(u8, self.crc >> 8)] ^
- lookup_tables[7][@truncate(u8, self.crc >> 0)];
+ lookup_tables[6][@truncate(u8, self.crc >> 8)] ^
+ lookup_tables[7][@truncate(u8, self.crc >> 0)];
}
while (i < input.len) : (i += 1) {
@@ -86,7 +86,7 @@ pub fn Crc32WithPoly(comptime poly: u32) type {
}
}
- pub fn final(self: &Self) u32 {
+ pub fn final(self: *Self) u32 {
return ~self.crc;
}
@@ -122,15 +122,16 @@ pub fn Crc32SmallWithPoly(comptime poly: u32) type {
var table: [16]u32 = undefined;
for (table) |*e, i| {
- var crc = u32(i * 16);
- var j: usize = 0; while (j < 8) : (j += 1) {
+ var crc = @intCast(u32, i * 16);
+ var j: usize = 0;
+ while (j < 8) : (j += 1) {
if (crc & 1 == 1) {
crc = (crc >> 1) ^ poly;
} else {
crc = (crc >> 1);
}
}
- *e = crc;
+ e.* = crc;
}
break :block table;
@@ -139,19 +140,17 @@ pub fn Crc32SmallWithPoly(comptime poly: u32) type {
crc: u32,
pub fn init() Self {
- return Self {
- .crc = 0xffffffff,
- };
+ return Self{ .crc = 0xffffffff };
}
- pub fn update(self: &Self, input: []const u8) void {
+ pub fn update(self: *Self, input: []const u8) void {
for (input) |b| {
self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 0))] ^ (self.crc >> 4);
self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 4))] ^ (self.crc >> 4);
}
}
- pub fn final(self: &Self) u32 {
+ pub fn final(self: *Self) u32 {
return ~self.crc;
}
diff --git a/std/hash/fnv.zig b/std/hash/fnv.zig
index 88b965b76a..447c996772 100644
--- a/std/hash/fnv.zig
+++ b/std/hash/fnv.zig
@@ -7,7 +7,7 @@
const std = @import("../index.zig");
const debug = std.debug;
-pub const Fnv1a_32 = Fnv1a(u32, 0x01000193 , 0x811c9dc5);
+pub const Fnv1a_32 = Fnv1a(u32, 0x01000193, 0x811c9dc5);
pub const Fnv1a_64 = Fnv1a(u64, 0x100000001b3, 0xcbf29ce484222325);
pub const Fnv1a_128 = Fnv1a(u128, 0x1000000000000000000013b, 0x6c62272e07bb014262b821756295c58d);
@@ -18,19 +18,17 @@ fn Fnv1a(comptime T: type, comptime prime: T, comptime offset: T) type {
value: T,
pub fn init() Self {
- return Self {
- .value = offset,
- };
+ return Self{ .value = offset };
}
- pub fn update(self: &Self, input: []const u8) void {
+ pub fn update(self: *Self, input: []const u8) void {
for (input) |b| {
self.value ^= b;
self.value *%= prime;
}
}
- pub fn final(self: &Self) T {
+ pub fn final(self: *Self) T {
return self.value;
}
diff --git a/std/hash/siphash.zig b/std/hash/siphash.zig
index 301c35cf05..cdad77e59e 100644
--- a/std/hash/siphash.zig
+++ b/std/hash/siphash.zig
@@ -45,7 +45,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
const k0 = mem.readInt(key[0..8], u64, Endian.Little);
const k1 = mem.readInt(key[8..16], u64, Endian.Little);
- var d = Self {
+ var d = Self{
.v0 = k0 ^ 0x736f6d6570736575,
.v1 = k1 ^ 0x646f72616e646f6d,
.v2 = k0 ^ 0x6c7967656e657261,
@@ -63,7 +63,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
return d;
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial from previous.
@@ -76,16 +76,16 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
// Full middle blocks.
while (off + 8 <= b.len) : (off += 8) {
- d.round(b[off..off + 8]);
+ d.round(b[off .. off + 8]);
}
// Remainder for next pass.
mem.copy(u8, d.buf[d.buf_len..], b[off..]);
- d.buf_len += u8(b[off..].len);
+ d.buf_len += @intCast(u8, b[off..].len);
d.msg_len +%= @truncate(u8, b.len);
}
- pub fn final(d: &Self) T {
+ pub fn final(d: *Self) T {
// Padding
mem.set(u8, d.buf[d.buf_len..], 0);
d.buf[7] = d.msg_len;
@@ -118,7 +118,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
return (u128(b2) << 64) | b1;
}
- fn round(d: &Self, b: []const u8) void {
+ fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 8);
const m = mem.readInt(b[0..], u64, Endian.Little);
@@ -132,7 +132,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
d.v0 ^= m;
}
- fn sipRound(d: &Self) void {
+ fn sipRound(d: *Self) void {
d.v0 +%= d.v1;
d.v1 = math.rotl(u64, d.v1, u64(13));
d.v1 ^= d.v0;
@@ -162,7 +162,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
const test_key = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f";
test "siphash64-2-4 sanity" {
- const vectors = [][]const u8 {
+ const vectors = [][]const u8{
"\x31\x0e\x0e\xdd\x47\xdb\x6f\x72", // ""
"\xfd\x67\xdc\x93\xc5\x39\xf8\x74", // "\x00"
"\x5a\x4f\xa9\xd9\x09\x80\x6c\x0d", // "\x00\x01" ... etc
@@ -233,7 +233,7 @@ test "siphash64-2-4 sanity" {
var buffer: [64]u8 = undefined;
for (vectors) |vector, i| {
- buffer[i] = u8(i);
+ buffer[i] = @intCast(u8, i);
const expected = mem.readInt(vector, u64, Endian.Little);
debug.assert(siphash.hash(test_key, buffer[0..i]) == expected);
@@ -241,7 +241,7 @@ test "siphash64-2-4 sanity" {
}
test "siphash128-2-4 sanity" {
- const vectors = [][]const u8 {
+ const vectors = [][]const u8{
"\xa3\x81\x7f\x04\xba\x25\xa8\xe6\x6d\xf6\x72\x14\xc7\x55\x02\x93",
"\xda\x87\xc1\xd8\x6b\x99\xaf\x44\x34\x76\x59\x11\x9b\x22\xfc\x45",
"\x81\x77\x22\x8d\xa4\xa4\x5d\xc7\xfc\xa3\x8b\xde\xf6\x0a\xff\xe4",
@@ -312,7 +312,7 @@ test "siphash128-2-4 sanity" {
var buffer: [64]u8 = undefined;
for (vectors) |vector, i| {
- buffer[i] = u8(i);
+ buffer[i] = @intCast(u8, i);
const expected = mem.readInt(vector, u128, Endian.Little);
debug.assert(siphash.hash(test_key, buffer[0..i]) == expected);
diff --git a/std/hash_map.zig b/std/hash_map.zig
index 29dd233753..cebd5272c0 100644
--- a/std/hash_map.zig
+++ b/std/hash_map.zig
@@ -9,15 +9,12 @@ const builtin = @import("builtin");
const want_modification_safety = builtin.mode != builtin.Mode.ReleaseFast;
const debug_u32 = if (want_modification_safety) u32 else void;
-pub fn HashMap(comptime K: type, comptime V: type,
- comptime hash: fn(key: K)u32,
- comptime eql: fn(a: K, b: K)bool) type
-{
+pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u32, comptime eql: fn (a: K, b: K) bool) type {
return struct {
entries: []Entry,
size: usize,
max_distance_from_start_index: usize,
- allocator: &Allocator,
+ allocator: *Allocator,
// this is used to detect bugs where a hashtable is edited while an iterator is running.
modification_count: debug_u32,
@@ -31,7 +28,7 @@ pub fn HashMap(comptime K: type, comptime V: type,
};
pub const Iterator = struct {
- hm: &const Self,
+ hm: *const Self,
// how many items have we returned
count: usize,
// iterator through the entry array
@@ -39,7 +36,7 @@ pub fn HashMap(comptime K: type, comptime V: type,
// used to detect concurrent modification
initial_modification_count: debug_u32,
- pub fn next(it: &Iterator) ?&Entry {
+ pub fn next(it: *Iterator) ?*Entry {
if (want_modification_safety) {
assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
}
@@ -54,10 +51,18 @@ pub fn HashMap(comptime K: type, comptime V: type,
}
unreachable; // no next item
}
+
+ // Reset the iterator to the initial index
+ pub fn reset(it: *Iterator) void {
+ it.count = 0;
+ it.index = 0;
+ // Resetting the modification count too
+ it.initial_modification_count = it.hm.modification_count;
+ }
};
- pub fn init(allocator: &Allocator) Self {
- return Self {
+ pub fn init(allocator: *Allocator) Self {
+ return Self{
.entries = []Entry{},
.allocator = allocator,
.size = 0,
@@ -66,11 +71,11 @@ pub fn HashMap(comptime K: type, comptime V: type,
};
}
- pub fn deinit(hm: &Self) void {
+ pub fn deinit(hm: *const Self) void {
hm.allocator.free(hm.entries);
}
- pub fn clear(hm: &Self) void {
+ pub fn clear(hm: *Self) void {
for (hm.entries) |*entry| {
entry.used = false;
}
@@ -79,8 +84,12 @@ pub fn HashMap(comptime K: type, comptime V: type,
hm.incrementModificationCount();
}
+ pub fn count(hm: *const Self) usize {
+ return hm.size;
+ }
+
/// Returns the value that was already there.
- pub fn put(hm: &Self, key: K, value: &const V) !?V {
+ pub fn put(hm: *Self, key: K, value: *const V) !?V {
if (hm.entries.len == 0) {
try hm.initCapacity(16);
}
@@ -102,49 +111,51 @@ pub fn HashMap(comptime K: type, comptime V: type,
return hm.internalPut(key, value);
}
- pub fn get(hm: &Self, key: K) ?&Entry {
+ pub fn get(hm: *const Self, key: K) ?*Entry {
if (hm.entries.len == 0) {
return null;
}
return hm.internalGet(key);
}
- pub fn contains(hm: &Self, key: K) bool {
+ pub fn contains(hm: *const Self, key: K) bool {
return hm.get(key) != null;
}
- pub fn remove(hm: &Self, key: K) ?&Entry {
+ pub fn remove(hm: *Self, key: K) ?*Entry {
if (hm.entries.len == 0) return null;
hm.incrementModificationCount();
const start_index = hm.keyToIndex(key);
- {var roll_over: usize = 0; while (roll_over <= hm.max_distance_from_start_index) : (roll_over += 1) {
- const index = (start_index + roll_over) % hm.entries.len;
- var entry = &hm.entries[index];
+ {
+ var roll_over: usize = 0;
+ while (roll_over <= hm.max_distance_from_start_index) : (roll_over += 1) {
+ const index = (start_index + roll_over) % hm.entries.len;
+ var entry = &hm.entries[index];
- if (!entry.used)
- return null;
+ if (!entry.used) return null;
- if (!eql(entry.key, key)) continue;
+ if (!eql(entry.key, key)) continue;
- while (roll_over < hm.entries.len) : (roll_over += 1) {
- const next_index = (start_index + roll_over + 1) % hm.entries.len;
- const next_entry = &hm.entries[next_index];
- if (!next_entry.used or next_entry.distance_from_start_index == 0) {
- entry.used = false;
- hm.size -= 1;
- return entry;
+ while (roll_over < hm.entries.len) : (roll_over += 1) {
+ const next_index = (start_index + roll_over + 1) % hm.entries.len;
+ const next_entry = &hm.entries[next_index];
+ if (!next_entry.used or next_entry.distance_from_start_index == 0) {
+ entry.used = false;
+ hm.size -= 1;
+ return entry;
+ }
+ entry.* = next_entry.*;
+ entry.distance_from_start_index -= 1;
+ entry = next_entry;
}
- *entry = *next_entry;
- entry.distance_from_start_index -= 1;
- entry = next_entry;
+ unreachable; // shifting everything in the table
}
- unreachable; // shifting everything in the table
- }}
+ }
return null;
}
- pub fn iterator(hm: &const Self) Iterator {
- return Iterator {
+ pub fn iterator(hm: *const Self) Iterator {
+ return Iterator{
.hm = hm,
.count = 0,
.index = 0,
@@ -152,7 +163,7 @@ pub fn HashMap(comptime K: type, comptime V: type,
};
}
- fn initCapacity(hm: &Self, capacity: usize) !void {
+ fn initCapacity(hm: *Self, capacity: usize) !void {
hm.entries = try hm.allocator.alloc(Entry, capacity);
hm.size = 0;
hm.max_distance_from_start_index = 0;
@@ -161,30 +172,32 @@ pub fn HashMap(comptime K: type, comptime V: type,
}
}
- fn incrementModificationCount(hm: &Self) void {
+ fn incrementModificationCount(hm: *Self) void {
if (want_modification_safety) {
hm.modification_count +%= 1;
}
}
/// Returns the value that was already there.
- fn internalPut(hm: &Self, orig_key: K, orig_value: &const V) ?V {
+ fn internalPut(hm: *Self, orig_key: K, orig_value: *const V) ?V {
var key = orig_key;
- var value = *orig_value;
+ var value = orig_value.*;
const start_index = hm.keyToIndex(key);
var roll_over: usize = 0;
var distance_from_start_index: usize = 0;
- while (roll_over < hm.entries.len) : ({roll_over += 1; distance_from_start_index += 1;}) {
+ while (roll_over < hm.entries.len) : ({
+ roll_over += 1;
+ distance_from_start_index += 1;
+ }) {
const index = (start_index + roll_over) % hm.entries.len;
const entry = &hm.entries[index];
if (entry.used and !eql(entry.key, key)) {
if (entry.distance_from_start_index < distance_from_start_index) {
// robin hood to the rescue
- const tmp = *entry;
- hm.max_distance_from_start_index = math.max(hm.max_distance_from_start_index,
- distance_from_start_index);
- *entry = Entry {
+ const tmp = entry.*;
+ hm.max_distance_from_start_index = math.max(hm.max_distance_from_start_index, distance_from_start_index);
+ entry.* = Entry{
.used = true,
.distance_from_start_index = distance_from_start_index,
.key = key,
@@ -207,7 +220,7 @@ pub fn HashMap(comptime K: type, comptime V: type,
}
hm.max_distance_from_start_index = math.max(distance_from_start_index, hm.max_distance_from_start_index);
- *entry = Entry {
+ entry.* = Entry{
.used = true,
.distance_from_start_index = distance_from_start_index,
.key = key,
@@ -218,19 +231,22 @@ pub fn HashMap(comptime K: type, comptime V: type,
unreachable; // put into a full map
}
- fn internalGet(hm: &Self, key: K) ?&Entry {
+ fn internalGet(hm: *const Self, key: K) ?*Entry {
const start_index = hm.keyToIndex(key);
- {var roll_over: usize = 0; while (roll_over <= hm.max_distance_from_start_index) : (roll_over += 1) {
- const index = (start_index + roll_over) % hm.entries.len;
- const entry = &hm.entries[index];
+ {
+ var roll_over: usize = 0;
+ while (roll_over <= hm.max_distance_from_start_index) : (roll_over += 1) {
+ const index = (start_index + roll_over) % hm.entries.len;
+ const entry = &hm.entries[index];
- if (!entry.used) return null;
- if (eql(entry.key, key)) return entry;
- }}
+ if (!entry.used) return null;
+ if (eql(entry.key, key)) return entry;
+ }
+ }
return null;
}
- fn keyToIndex(hm: &Self, key: K) usize {
+ fn keyToIndex(hm: *const Self, key: K) usize {
return usize(hash(key)) % hm.entries.len;
}
};
@@ -243,21 +259,69 @@ test "basic hash map usage" {
var map = HashMap(i32, i32, hash_i32, eql_i32).init(&direct_allocator.allocator);
defer map.deinit();
- assert((map.put(1, 11) catch unreachable) == null);
- assert((map.put(2, 22) catch unreachable) == null);
- assert((map.put(3, 33) catch unreachable) == null);
- assert((map.put(4, 44) catch unreachable) == null);
- assert((map.put(5, 55) catch unreachable) == null);
+ assert((try map.put(1, 11)) == null);
+ assert((try map.put(2, 22)) == null);
+ assert((try map.put(3, 33)) == null);
+ assert((try map.put(4, 44)) == null);
+ assert((try map.put(5, 55)) == null);
- assert(??(map.put(5, 66) catch unreachable) == 55);
- assert(??(map.put(5, 55) catch unreachable) == 66);
+ assert((try map.put(5, 66)).? == 55);
+ assert((try map.put(5, 55)).? == 66);
- assert((??map.get(2)).value == 22);
+ assert(map.contains(2));
+ assert(map.get(2).?.value == 22);
_ = map.remove(2);
assert(map.remove(2) == null);
assert(map.get(2) == null);
}
+test "iterator hash map" {
+ var direct_allocator = std.heap.DirectAllocator.init();
+ defer direct_allocator.deinit();
+
+ var reset_map = HashMap(i32, i32, hash_i32, eql_i32).init(&direct_allocator.allocator);
+ defer reset_map.deinit();
+
+ assert((try reset_map.put(1, 11)) == null);
+ assert((try reset_map.put(2, 22)) == null);
+ assert((try reset_map.put(3, 33)) == null);
+
+ var keys = []i32{
+ 1,
+ 2,
+ 3,
+ };
+ var values = []i32{
+ 11,
+ 22,
+ 33,
+ };
+
+ var it = reset_map.iterator();
+ var count: usize = 0;
+ while (it.next()) |next| {
+ assert(next.key == keys[count]);
+ assert(next.value == values[count]);
+ count += 1;
+ }
+
+ assert(count == 3);
+ assert(it.next() == null);
+ it.reset();
+ count = 0;
+ while (it.next()) |next| {
+ assert(next.key == keys[count]);
+ assert(next.value == values[count]);
+ count += 1;
+ if (count == 2) break;
+ }
+
+ it.reset();
+ var entry = it.next().?;
+ assert(entry.key == keys[0]);
+ assert(entry.value == values[0]);
+}
+
fn hash_i32(x: i32) u32 {
return @bitCast(u32, x);
}
diff --git a/std/heap.zig b/std/heap.zig
index ca6736af1e..f5e0484b25 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -10,24 +10,21 @@ const c = std.c;
const Allocator = mem.Allocator;
pub const c_allocator = &c_allocator_state;
-var c_allocator_state = Allocator {
+var c_allocator_state = Allocator{
.allocFn = cAlloc,
.reallocFn = cRealloc,
.freeFn = cFree,
};
-fn cAlloc(self: &Allocator, n: usize, alignment: u29) ![]u8 {
+fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 {
assert(alignment <= @alignOf(c_longdouble));
- return if (c.malloc(n)) |buf|
- @ptrCast(&u8, buf)[0..n]
- else
- error.OutOfMemory;
+ return if (c.malloc(n)) |buf| @ptrCast([*]u8, buf)[0..n] else error.OutOfMemory;
}
-fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
- const old_ptr = @ptrCast(&c_void, old_mem.ptr);
+fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ const old_ptr = @ptrCast(*c_void, old_mem.ptr);
if (c.realloc(old_ptr, new_size)) |buf| {
- return @ptrCast(&u8, buf)[0..new_size];
+ return @ptrCast([*]u8, buf)[0..new_size];
} else if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -35,28 +32,22 @@ fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![
}
}
-fn cFree(self: &Allocator, old_mem: []u8) void {
- const old_ptr = @ptrCast(&c_void, old_mem.ptr);
+fn cFree(self: *Allocator, old_mem: []u8) void {
+ const old_ptr = @ptrCast(*c_void, old_mem.ptr);
c.free(old_ptr);
}
/// This allocator makes a syscall directly for every allocation and free.
+/// Thread-safe and lock-free.
pub const DirectAllocator = struct {
allocator: Allocator,
heap_handle: ?HeapHandle,
const HeapHandle = if (builtin.os == Os.windows) os.windows.HANDLE else void;
- //pub const canary_bytes = []u8 {48, 239, 128, 46, 18, 49, 147, 9, 195, 59, 203, 3, 245, 54, 9, 122};
- //pub const want_safety = switch (builtin.mode) {
- // builtin.Mode.Debug => true,
- // builtin.Mode.ReleaseSafe => true,
- // else => false,
- //};
-
pub fn init() DirectAllocator {
- return DirectAllocator {
- .allocator = Allocator {
+ return DirectAllocator{
+ .allocator = Allocator{
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
@@ -65,7 +56,7 @@ pub const DirectAllocator = struct {
};
}
- pub fn deinit(self: &DirectAllocator) void {
+ pub fn deinit(self: *DirectAllocator) void {
switch (builtin.os) {
Os.windows => if (self.heap_handle) |heap_handle| {
_ = os.windows.HeapDestroy(heap_handle);
@@ -74,41 +65,58 @@ pub const DirectAllocator = struct {
}
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
- assert(alignment <= os.page_size);
const p = os.posix;
- const addr = p.mmap(null, n, p.PROT_READ|p.PROT_WRITE,
- p.MAP_PRIVATE|p.MAP_ANONYMOUS, -1, 0);
- if (addr == p.MAP_FAILED) {
- return error.OutOfMemory;
- }
- return @intToPtr(&u8, addr)[0..n];
+ const alloc_size = if (alignment <= os.page_size) n else n + alignment;
+ const addr = p.mmap(null, alloc_size, p.PROT_READ | p.PROT_WRITE, p.MAP_PRIVATE | p.MAP_ANONYMOUS, -1, 0);
+ if (addr == p.MAP_FAILED) return error.OutOfMemory;
+ if (alloc_size == n) return @intToPtr([*]u8, addr)[0..n];
+
+ const aligned_addr = (addr & ~usize(alignment - 1)) + alignment;
+
+ // We can unmap the unused portions of our mmap, but we must only
+ // pass munmap bytes that exist outside our allocated pages or it
+ // will happily eat us too.
+
+ // Since alignment > page_size, we are by definition on a page boundary.
+ const unused_start = addr;
+ const unused_len = aligned_addr - 1 - unused_start;
+
+ const err = p.munmap(unused_start, unused_len);
+ assert(p.getErrno(err) == 0);
+
+ // It is impossible that there is an unoccupied page at the top of our
+ // mmap.
+
+ return @intToPtr([*]u8, aligned_addr)[0..n];
},
Os.windows => {
const amt = n + alignment + @sizeOf(usize);
- const heap_handle = self.heap_handle ?? blk: {
- const hh = os.windows.HeapCreate(os.windows.HEAP_NO_SERIALIZE, amt, 0) ?? return error.OutOfMemory;
- self.heap_handle = hh;
- break :blk hh;
+ const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, builtin.AtomicOrder.SeqCst);
+ const heap_handle = optional_heap_handle orelse blk: {
+ const hh = os.windows.HeapCreate(0, amt, 0) orelse return error.OutOfMemory;
+ const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse break :blk hh;
+ _ = os.windows.HeapDestroy(hh);
+ break :blk other_hh.?; // can't be null because of the cmpxchg
};
- const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) ?? return error.OutOfMemory;
+ const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory;
const root_addr = @ptrToInt(ptr);
const rem = @rem(root_addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_addr = root_addr + march_forward_bytes;
const record_addr = adjusted_addr + n;
- *@intToPtr(&align(1) usize, record_addr) = root_addr;
- return @intToPtr(&u8, adjusted_addr)[0..n];
+ @intToPtr(*align(1) usize, record_addr).* = root_addr;
+ return @intToPtr([*]u8, adjusted_addr)[0..n];
},
else => @compileError("Unsupported OS"),
}
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
@@ -120,7 +128,7 @@ pub const DirectAllocator = struct {
const rem = @rem(new_addr_end, os.page_size);
const new_addr_end_rounded = new_addr_end + if (rem == 0) 0 else (os.page_size - rem);
if (old_addr_end > new_addr_end_rounded) {
- _ = os.posix.munmap(@intToPtr(&u8, new_addr_end_rounded), old_addr_end - new_addr_end_rounded);
+ _ = os.posix.munmap(new_addr_end_rounded, old_addr_end - new_addr_end_rounded);
}
return old_mem[0..new_size];
}
@@ -132,13 +140,13 @@ pub const DirectAllocator = struct {
Os.windows => {
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
const old_record_addr = old_adjusted_addr + old_mem.len;
- const root_addr = *@intToPtr(&align(1) usize, old_record_addr);
- const old_ptr = @intToPtr(os.windows.LPVOID, root_addr);
+ const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
+ const old_ptr = @intToPtr(*c_void, root_addr);
const amt = new_size + alignment + @sizeOf(usize);
- const new_ptr = os.windows.HeapReAlloc(??self.heap_handle, 0, old_ptr, amt) ?? blk: {
+ const new_ptr = os.windows.HeapReAlloc(self.heap_handle.?, 0, old_ptr, amt) orelse blk: {
if (new_size > old_mem.len) return error.OutOfMemory;
const new_record_addr = old_record_addr - new_size + old_mem.len;
- *@intToPtr(&align(1) usize, new_record_addr) = root_addr;
+ @intToPtr(*align(1) usize, new_record_addr).* = root_addr;
return old_mem[0..new_size];
};
const offset = old_adjusted_addr - root_addr;
@@ -146,25 +154,25 @@ pub const DirectAllocator = struct {
const new_adjusted_addr = new_root_addr + offset;
assert(new_adjusted_addr % alignment == 0);
const new_record_addr = new_adjusted_addr + new_size;
- *@intToPtr(&align(1) usize, new_record_addr) = new_root_addr;
- return @intToPtr(&u8, new_adjusted_addr)[0..new_size];
+ @intToPtr(*align(1) usize, new_record_addr).* = new_root_addr;
+ return @intToPtr([*]u8, new_adjusted_addr)[0..new_size];
},
else => @compileError("Unsupported OS"),
}
}
- fn free(allocator: &Allocator, bytes: []u8) void {
+ fn free(allocator: *Allocator, bytes: []u8) void {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
- _ = os.posix.munmap(bytes.ptr, bytes.len);
+ _ = os.posix.munmap(@ptrToInt(bytes.ptr), bytes.len);
},
Os.windows => {
const record_addr = @ptrToInt(bytes.ptr) + bytes.len;
- const root_addr = *@intToPtr(&align(1) usize, record_addr);
- const ptr = @intToPtr(os.windows.LPVOID, root_addr);
- _ = os.windows.HeapFree(??self.heap_handle, 0, ptr);
+ const root_addr = @intToPtr(*align(1) usize, record_addr).*;
+ const ptr = @intToPtr(*c_void, root_addr);
+ _ = os.windows.HeapFree(self.heap_handle.?, 0, ptr);
},
else => @compileError("Unsupported OS"),
}
@@ -176,15 +184,15 @@ pub const DirectAllocator = struct {
pub const ArenaAllocator = struct {
pub allocator: Allocator,
- child_allocator: &Allocator,
+ child_allocator: *Allocator,
buffer_list: std.LinkedList([]u8),
end_index: usize,
const BufNode = std.LinkedList([]u8).Node;
- pub fn init(child_allocator: &Allocator) ArenaAllocator {
- return ArenaAllocator {
- .allocator = Allocator {
+ pub fn init(child_allocator: *Allocator) ArenaAllocator {
+ return ArenaAllocator{
+ .allocator = Allocator{
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
@@ -195,7 +203,7 @@ pub const ArenaAllocator = struct {
};
}
- pub fn deinit(self: &ArenaAllocator) void {
+ pub fn deinit(self: *ArenaAllocator) void {
var it = self.buffer_list.first;
while (it) |node| {
// this has to occur before the free because the free frees node
@@ -205,7 +213,7 @@ pub const ArenaAllocator = struct {
}
}
- fn createNode(self: &ArenaAllocator, prev_len: usize, minimum_size: usize) !&BufNode {
+ fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) !*BufNode {
const actual_min_size = minimum_size + @sizeOf(BufNode);
var len = prev_len;
while (true) {
@@ -214,9 +222,9 @@ pub const ArenaAllocator = struct {
if (len >= actual_min_size) break;
}
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
- const buf_node_slice = ([]BufNode)(buf[0..@sizeOf(BufNode)]);
+ const buf_node_slice = @bytesToSlice(BufNode, buf[0..@sizeOf(BufNode)]);
const buf_node = &buf_node_slice[0];
- *buf_node = BufNode {
+ buf_node.* = BufNode{
.data = buf,
.prev = null,
.next = null,
@@ -226,7 +234,7 @@ pub const ArenaAllocator = struct {
return buf_node;
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
var cur_node = if (self.buffer_list.last) |last_node| last_node else try self.createNode(0, n + alignment);
@@ -241,13 +249,13 @@ pub const ArenaAllocator = struct {
cur_node = try self.createNode(cur_buf.len, n + alignment);
continue;
}
- const result = cur_buf[adjusted_index .. new_end_index];
+ const result = cur_buf[adjusted_index..new_end_index];
self.end_index = new_end_index;
return result;
}
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -257,7 +265,7 @@ pub const ArenaAllocator = struct {
}
}
- fn free(allocator: &Allocator, bytes: []u8) void { }
+ fn free(allocator: *Allocator, bytes: []u8) void {}
};
pub const FixedBufferAllocator = struct {
@@ -266,8 +274,8 @@ pub const FixedBufferAllocator = struct {
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
- return FixedBufferAllocator {
- .allocator = Allocator {
+ return FixedBufferAllocator{
+ .allocator = Allocator{
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
@@ -277,9 +285,9 @@ pub const FixedBufferAllocator = struct {
};
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
- const addr = @ptrToInt(&self.buffer[self.end_index]);
+ const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
const rem = @rem(addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_index = self.end_index + march_forward_bytes;
@@ -287,13 +295,69 @@ pub const FixedBufferAllocator = struct {
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
- const result = self.buffer[adjusted_index .. new_end_index];
+ const result = self.buffer[adjusted_index..new_end_index];
self.end_index = new_end_index;
return result;
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
+ assert(old_mem.len <= self.end_index);
+ if (new_size <= old_mem.len) {
+ return old_mem[0..new_size];
+ } else if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len) {
+ const start_index = self.end_index - old_mem.len;
+ const new_end_index = start_index + new_size;
+ if (new_end_index > self.buffer.len) return error.OutOfMemory;
+ const result = self.buffer[start_index..new_end_index];
+ self.end_index = new_end_index;
+ return result;
+ } else {
+ const result = try alloc(allocator, new_size, alignment);
+ mem.copy(u8, result, old_mem);
+ return result;
+ }
+ }
+
+ fn free(allocator: *Allocator, bytes: []u8) void {}
+};
+
+/// lock free
+pub const ThreadSafeFixedBufferAllocator = struct {
+ allocator: Allocator,
+ end_index: usize,
+ buffer: []u8,
+
+ pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator {
+ return ThreadSafeFixedBufferAllocator{
+ .allocator = Allocator{
+ .allocFn = alloc,
+ .reallocFn = realloc,
+ .freeFn = free,
+ },
+ .buffer = buffer,
+ .end_index = 0,
+ };
+ }
+
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
+ const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
+ var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
+ while (true) {
+ const addr = @ptrToInt(self.buffer.ptr) + end_index;
+ const rem = @rem(addr, alignment);
+ const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
+ const adjusted_index = end_index + march_forward_bytes;
+ const new_end_index = adjusted_index + n;
+ if (new_end_index > self.buffer.len) {
+ return error.OutOfMemory;
+ }
+ end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
+ }
+ }
+
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -303,10 +367,75 @@ pub const FixedBufferAllocator = struct {
}
}
- fn free(allocator: &Allocator, bytes: []u8) void { }
+ fn free(allocator: *Allocator, bytes: []u8) void {}
};
+pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) StackFallbackAllocator(size) {
+ return StackFallbackAllocator(size){
+ .buffer = undefined,
+ .fallback_allocator = fallback_allocator,
+ .fixed_buffer_allocator = undefined,
+ .allocator = Allocator{
+ .allocFn = StackFallbackAllocator(size).alloc,
+ .reallocFn = StackFallbackAllocator(size).realloc,
+ .freeFn = StackFallbackAllocator(size).free,
+ },
+ };
+}
+pub fn StackFallbackAllocator(comptime size: usize) type {
+ return struct {
+ const Self = this;
+
+ buffer: [size]u8,
+ allocator: Allocator,
+ fallback_allocator: *Allocator,
+ fixed_buffer_allocator: FixedBufferAllocator,
+
+ pub fn get(self: *Self) *Allocator {
+ self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
+ return &self.allocator;
+ }
+
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
+ const self = @fieldParentPtr(Self, "allocator", allocator);
+ return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator.allocator, n, alignment) catch
+ self.fallback_allocator.allocFn(self.fallback_allocator, n, alignment);
+ }
+
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ const self = @fieldParentPtr(Self, "allocator", allocator);
+ const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
+ @ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
+ if (in_buffer) {
+ return FixedBufferAllocator.realloc(
+ &self.fixed_buffer_allocator.allocator,
+ old_mem,
+ new_size,
+ alignment,
+ ) catch {
+ const result = try self.fallback_allocator.allocFn(
+ self.fallback_allocator,
+ new_size,
+ alignment,
+ );
+ mem.copy(u8, result, old_mem);
+ return result;
+ };
+ }
+ return self.fallback_allocator.reallocFn(self.fallback_allocator, old_mem, new_size, alignment);
+ }
+
+ fn free(allocator: *Allocator, bytes: []u8) void {
+ const self = @fieldParentPtr(Self, "allocator", allocator);
+ const in_buffer = @ptrToInt(bytes.ptr) >= @ptrToInt(&self.buffer) and
+ @ptrToInt(bytes.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
+ if (!in_buffer) {
+ return self.fallback_allocator.freeFn(self.fallback_allocator, bytes);
+ }
+ }
+ };
+}
test "c_allocator" {
if (builtin.link_libc) {
@@ -322,6 +451,8 @@ test "DirectAllocator" {
const allocator = &direct_allocator.allocator;
try testAllocator(allocator);
+ try testAllocatorAligned(allocator, 16);
+ try testAllocatorLargeAlignment(allocator);
}
test "ArenaAllocator" {
@@ -332,6 +463,8 @@ test "ArenaAllocator" {
defer arena_allocator.deinit();
try testAllocator(&arena_allocator.allocator);
+ try testAllocatorAligned(&arena_allocator.allocator, 16);
+ try testAllocatorLargeAlignment(&arena_allocator.allocator);
}
var test_fixed_buffer_allocator_memory: [30000 * @sizeOf(usize)]u8 = undefined;
@@ -339,24 +472,123 @@ test "FixedBufferAllocator" {
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
try testAllocator(&fixed_buffer_allocator.allocator);
+ try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
+ try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
}
-fn testAllocator(allocator: &mem.Allocator) !void {
- var slice = try allocator.alloc(&i32, 100);
+test "FixedBufferAllocator Reuse memory on realloc" {
+ var small_fixed_buffer: [10]u8 = undefined;
+ // check if we re-use the memory
+ {
+ var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
+ var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 5);
+ assert(slice0.len == 5);
+ var slice1 = try fixed_buffer_allocator.allocator.realloc(u8, slice0, 10);
+ assert(slice1.ptr == slice0.ptr);
+ assert(slice1.len == 10);
+ debug.assertError(fixed_buffer_allocator.allocator.realloc(u8, slice1, 11), error.OutOfMemory);
+ }
+ // check that we don't re-use the memory if it's not the most recent block
+ {
+ var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
+
+ var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
+ slice0[0] = 1;
+ slice0[1] = 2;
+ var slice1 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
+ var slice2 = try fixed_buffer_allocator.allocator.realloc(u8, slice0, 4);
+ assert(slice0.ptr != slice2.ptr);
+ assert(slice1.ptr != slice2.ptr);
+ assert(slice2[0] == 1);
+ assert(slice2[1] == 2);
+ }
+}
+
+test "ThreadSafeFixedBufferAllocator" {
+ var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
+
+ try testAllocator(&fixed_buffer_allocator.allocator);
+ try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
+ try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
+}
+
+fn testAllocator(allocator: *mem.Allocator) !void {
+ var slice = try allocator.alloc(*i32, 100);
+ assert(slice.len == 100);
for (slice) |*item, i| {
- *item = try allocator.create(i32);
- **item = i32(i);
+ item.* = try allocator.create(@intCast(i32, i));
}
- for (slice) |item, i| {
+ slice = try allocator.realloc(*i32, slice, 20000);
+ assert(slice.len == 20000);
+
+ for (slice[0..100]) |item, i| {
+ assert(item.* == @intCast(i32, i));
allocator.destroy(item);
}
- slice = try allocator.realloc(&i32, slice, 20000);
- slice = try allocator.realloc(&i32, slice, 50);
- slice = try allocator.realloc(&i32, slice, 25);
- slice = try allocator.realloc(&i32, slice, 10);
+ slice = try allocator.realloc(*i32, slice, 50);
+ assert(slice.len == 50);
+ slice = try allocator.realloc(*i32, slice, 25);
+ assert(slice.len == 25);
+ slice = try allocator.realloc(*i32, slice, 0);
+ assert(slice.len == 0);
+ slice = try allocator.realloc(*i32, slice, 10);
+ assert(slice.len == 10);
+
+ allocator.free(slice);
+}
+
+fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !void {
+ // initial
+ var slice = try allocator.alignedAlloc(u8, alignment, 10);
+ assert(slice.len == 10);
+ // grow
+ slice = try allocator.alignedRealloc(u8, alignment, slice, 100);
+ assert(slice.len == 100);
+ // shrink
+ slice = try allocator.alignedRealloc(u8, alignment, slice, 10);
+ assert(slice.len == 10);
+ // go to zero
+ slice = try allocator.alignedRealloc(u8, alignment, slice, 0);
+ assert(slice.len == 0);
+ // realloc from zero
+ slice = try allocator.alignedRealloc(u8, alignment, slice, 100);
+ assert(slice.len == 100);
+ // shrink with shrink
+ slice = allocator.alignedShrink(u8, alignment, slice, 10);
+ assert(slice.len == 10);
+ // shrink to zero
+ slice = allocator.alignedShrink(u8, alignment, slice, 0);
+ assert(slice.len == 0);
+}
+
+fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void {
+ //Maybe a platform's page_size is actually the same as or
+ // very near usize?
+ if (os.page_size << 2 > @maxValue(usize)) return;
+
+ const USizeShift = @IntType(false, std.math.log2(usize.bit_count));
+ const large_align = u29(os.page_size << 2);
+
+ var align_mask: usize = undefined;
+ _ = @shlWithOverflow(usize, ~usize(0), USizeShift(@ctz(large_align)), &align_mask);
+
+ var slice = try allocator.allocFn(allocator, 500, large_align);
+ debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
+
+ slice = try allocator.reallocFn(allocator, slice, 100, large_align);
+ debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
+
+ slice = try allocator.reallocFn(allocator, slice, 5000, large_align);
+ debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
+
+ slice = try allocator.reallocFn(allocator, slice, 10, large_align);
+ debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
+
+ slice = try allocator.reallocFn(allocator, slice, 20000, large_align);
+ debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
allocator.free(slice);
}
diff --git a/std/index.zig b/std/index.zig
index 07c4360aab..2f4cfb7553 100644
--- a/std/index.zig
+++ b/std/index.zig
@@ -7,7 +7,10 @@ pub const BufferOutStream = @import("buffer.zig").BufferOutStream;
pub const HashMap = @import("hash_map.zig").HashMap;
pub const LinkedList = @import("linked_list.zig").LinkedList;
pub const IntrusiveLinkedList = @import("linked_list.zig").IntrusiveLinkedList;
+pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
+pub const DynLib = @import("dynamic_library.zig").DynLib;
+pub const atomic = @import("atomic/index.zig");
pub const base64 = @import("base64.zig");
pub const build = @import("build.zig");
pub const c = @import("c/index.zig");
@@ -22,6 +25,7 @@ pub const fmt = @import("fmt/index.zig");
pub const hash = @import("hash/index.zig");
pub const heap = @import("heap.zig");
pub const io = @import("io.zig");
+pub const json = @import("json.zig");
pub const macho = @import("macho.zig");
pub const math = @import("math/index.zig");
pub const mem = @import("mem.zig");
@@ -32,14 +36,18 @@ pub const sort = @import("sort.zig");
pub const unicode = @import("unicode.zig");
pub const zig = @import("zig/index.zig");
+pub const lazyInit = @import("lazy_init.zig").lazyInit;
+
test "std" {
// run tests from these
+ _ = @import("atomic/index.zig");
_ = @import("array_list.zig");
_ = @import("buf_map.zig");
_ = @import("buf_set.zig");
_ = @import("buffer.zig");
_ = @import("hash_map.zig");
_ = @import("linked_list.zig");
+ _ = @import("segmented_list.zig");
_ = @import("base64.zig");
_ = @import("build.zig");
@@ -54,6 +62,7 @@ test "std" {
_ = @import("fmt/index.zig");
_ = @import("hash/index.zig");
_ = @import("io.zig");
+ _ = @import("json.zig");
_ = @import("macho.zig");
_ = @import("math/index.zig");
_ = @import("mem.zig");
@@ -64,4 +73,5 @@ test "std" {
_ = @import("sort.zig");
_ = @import("unicode.zig");
_ = @import("zig/index.zig");
+ _ = @import("lazy_init.zig");
}
diff --git a/std/io.zig b/std/io.zig
index 7b72af15e4..ff73c04f78 100644
--- a/std/io.zig
+++ b/std/io.zig
@@ -18,53 +18,36 @@ const is_windows = builtin.os == builtin.Os.windows;
const GetStdIoErrs = os.WindowsGetStdHandleErrs;
pub fn getStdErr() GetStdIoErrs!File {
- const handle = if (is_windows)
- try os.windowsGetStdHandle(os.windows.STD_ERROR_HANDLE)
- else if (is_posix)
- os.posix.STDERR_FILENO
- else
- unreachable;
+ const handle = if (is_windows) try os.windowsGetStdHandle(os.windows.STD_ERROR_HANDLE) else if (is_posix) os.posix.STDERR_FILENO else unreachable;
return File.openHandle(handle);
}
pub fn getStdOut() GetStdIoErrs!File {
- const handle = if (is_windows)
- try os.windowsGetStdHandle(os.windows.STD_OUTPUT_HANDLE)
- else if (is_posix)
- os.posix.STDOUT_FILENO
- else
- unreachable;
+ const handle = if (is_windows) try os.windowsGetStdHandle(os.windows.STD_OUTPUT_HANDLE) else if (is_posix) os.posix.STDOUT_FILENO else unreachable;
return File.openHandle(handle);
}
pub fn getStdIn() GetStdIoErrs!File {
- const handle = if (is_windows)
- try os.windowsGetStdHandle(os.windows.STD_INPUT_HANDLE)
- else if (is_posix)
- os.posix.STDIN_FILENO
- else
- unreachable;
+ const handle = if (is_windows) try os.windowsGetStdHandle(os.windows.STD_INPUT_HANDLE) else if (is_posix) os.posix.STDIN_FILENO else unreachable;
return File.openHandle(handle);
}
/// Implementation of InStream trait for File
pub const FileInStream = struct {
- file: &File,
+ file: *File,
stream: Stream,
pub const Error = @typeOf(File.read).ReturnType.ErrorSet;
pub const Stream = InStream(Error);
- pub fn init(file: &File) FileInStream {
- return FileInStream {
+ pub fn init(file: *File) FileInStream {
+ return FileInStream{
.file = file,
- .stream = Stream {
- .readFn = readFn,
- },
+ .stream = Stream{ .readFn = readFn },
};
}
- fn readFn(in_stream: &Stream, buffer: []u8) Error!usize {
+ fn readFn(in_stream: *Stream, buffer: []u8) Error!usize {
const self = @fieldParentPtr(FileInStream, "stream", in_stream);
return self.file.read(buffer);
}
@@ -72,22 +55,20 @@ pub const FileInStream = struct {
/// Implementation of OutStream trait for File
pub const FileOutStream = struct {
- file: &File,
+ file: *File,
stream: Stream,
pub const Error = File.WriteError;
pub const Stream = OutStream(Error);
- pub fn init(file: &File) FileOutStream {
- return FileOutStream {
+ pub fn init(file: *File) FileOutStream {
+ return FileOutStream{
.file = file,
- .stream = Stream {
- .writeFn = writeFn,
- },
+ .stream = Stream{ .writeFn = writeFn },
};
}
- fn writeFn(out_stream: &Stream, bytes: []const u8) !void {
+ fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
const self = @fieldParentPtr(FileOutStream, "stream", out_stream);
return self.file.write(bytes);
}
@@ -101,12 +82,12 @@ pub fn InStream(comptime ReadError: type) type {
/// Return the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
- readFn: fn(self: &Self, buffer: []u8) Error!usize,
+ readFn: fn (self: *Self, buffer: []u8) Error!usize,
/// Replaces `buffer` contents by reading from the stream until it is finished.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and
/// the contents read from the stream are lost.
- pub fn readAllBuffer(self: &Self, buffer: &Buffer, max_size: usize) !void {
+ pub fn readAllBuffer(self: *Self, buffer: *Buffer, max_size: usize) !void {
try buffer.resize(0);
var actual_buf_len: usize = 0;
@@ -121,8 +102,7 @@ pub fn InStream(comptime ReadError: type) type {
}
const new_buf_size = math.min(max_size, actual_buf_len + os.page_size);
- if (new_buf_size == actual_buf_len)
- return error.StreamTooLong;
+ if (new_buf_size == actual_buf_len) return error.StreamTooLong;
try buffer.resize(new_buf_size);
}
}
@@ -131,7 +111,7 @@ pub fn InStream(comptime ReadError: type) type {
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
- pub fn readAllAlloc(self: &Self, allocator: &mem.Allocator, max_size: usize) ![]u8 {
+ pub fn readAllAlloc(self: *Self, allocator: *mem.Allocator, max_size: usize) ![]u8 {
var buf = Buffer.initNull(allocator);
defer buf.deinit();
@@ -143,7 +123,7 @@ pub fn InStream(comptime ReadError: type) type {
/// Does not include the delimiter in the result.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and the contents
/// read from the stream so far are lost.
- pub fn readUntilDelimiterBuffer(self: &Self, buffer: &Buffer, delimiter: u8, max_size: usize) !void {
+ pub fn readUntilDelimiterBuffer(self: *Self, buffer: *Buffer, delimiter: u8, max_size: usize) !void {
try buffer.resize(0);
while (true) {
@@ -165,9 +145,7 @@ pub fn InStream(comptime ReadError: type) type {
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
- pub fn readUntilDelimiterAlloc(self: &Self, allocator: &mem.Allocator,
- delimiter: u8, max_size: usize) ![]u8
- {
+ pub fn readUntilDelimiterAlloc(self: *Self, allocator: *mem.Allocator, delimiter: u8, max_size: usize) ![]u8 {
var buf = Buffer.initNull(allocator);
defer buf.deinit();
@@ -178,43 +156,43 @@ pub fn InStream(comptime ReadError: type) type {
/// Returns the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
- pub fn read(self: &Self, buffer: []u8) !usize {
+ pub fn read(self: *Self, buffer: []u8) !usize {
return self.readFn(self, buffer);
}
/// Same as `read` but end of stream returns `error.EndOfStream`.
- pub fn readNoEof(self: &Self, buf: []u8) !void {
+ pub fn readNoEof(self: *Self, buf: []u8) !void {
const amt_read = try self.read(buf);
if (amt_read < buf.len) return error.EndOfStream;
}
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
- pub fn readByte(self: &Self) !u8 {
+ pub fn readByte(self: *Self) !u8 {
var result: [1]u8 = undefined;
try self.readNoEof(result[0..]);
return result[0];
}
/// Same as `readByte` except the returned byte is signed.
- pub fn readByteSigned(self: &Self) !i8 {
+ pub fn readByteSigned(self: *Self) !i8 {
return @bitCast(i8, try self.readByte());
}
- pub fn readIntLe(self: &Self, comptime T: type) !T {
+ pub fn readIntLe(self: *Self, comptime T: type) !T {
return self.readInt(builtin.Endian.Little, T);
}
- pub fn readIntBe(self: &Self, comptime T: type) !T {
+ pub fn readIntBe(self: *Self, comptime T: type) !T {
return self.readInt(builtin.Endian.Big, T);
}
- pub fn readInt(self: &Self, endian: builtin.Endian, comptime T: type) !T {
+ pub fn readInt(self: *Self, endian: builtin.Endian, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
try self.readNoEof(bytes[0..]);
return mem.readInt(bytes, T, endian);
}
- pub fn readVarInt(self: &Self, endian: builtin.Endian, comptime T: type, size: usize) !T {
+ pub fn readVarInt(self: *Self, endian: builtin.Endian, comptime T: type, size: usize) !T {
assert(size <= @sizeOf(T));
assert(size <= 8);
var input_buf: [8]u8 = undefined;
@@ -222,6 +200,13 @@ pub fn InStream(comptime ReadError: type) type {
try self.readNoEof(input_slice);
return mem.readInt(input_slice, T, endian);
}
+
+ pub fn skipBytes(self: *Self, num_bytes: usize) !void {
+ var i: usize = 0;
+ while (i < num_bytes) : (i += 1) {
+ _ = try self.readByte();
+ }
+ }
};
}
@@ -230,45 +215,64 @@ pub fn OutStream(comptime WriteError: type) type {
const Self = this;
pub const Error = WriteError;
- writeFn: fn(self: &Self, bytes: []const u8) Error!void,
+ writeFn: fn (self: *Self, bytes: []const u8) Error!void,
- pub fn print(self: &Self, comptime format: []const u8, args: ...) !void {
+ pub fn print(self: *Self, comptime format: []const u8, args: ...) !void {
return std.fmt.format(self, Error, self.writeFn, format, args);
}
- pub fn write(self: &Self, bytes: []const u8) !void {
+ pub fn write(self: *Self, bytes: []const u8) !void {
return self.writeFn(self, bytes);
}
- pub fn writeByte(self: &Self, byte: u8) !void {
- const slice = (&byte)[0..1];
+ pub fn writeByte(self: *Self, byte: u8) !void {
+ const slice = (*[1]u8)(&byte)[0..];
return self.writeFn(self, slice);
}
- pub fn writeByteNTimes(self: &Self, byte: u8, n: usize) !void {
- const slice = (&byte)[0..1];
+ pub fn writeByteNTimes(self: *Self, byte: u8, n: usize) !void {
+ const slice = (*[1]u8)(&byte)[0..];
var i: usize = 0;
while (i < n) : (i += 1) {
try self.writeFn(self, slice);
}
}
+
+ pub fn writeIntLe(self: *Self, comptime T: type, value: T) !void {
+ return self.writeInt(builtin.Endian.Little, T, value);
+ }
+
+ pub fn writeIntBe(self: *Self, comptime T: type, value: T) !void {
+ return self.writeInt(builtin.Endian.Big, T, value);
+ }
+
+ pub fn writeInt(self: *Self, endian: builtin.Endian, comptime T: type, value: T) !void {
+ var bytes: [@sizeOf(T)]u8 = undefined;
+ mem.writeInt(bytes[0..], value, endian);
+ return self.writeFn(self, bytes);
+ }
};
}
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
-pub fn writeFile(allocator: &mem.Allocator, path: []const u8, data: []const u8) !void {
+pub fn writeFile(allocator: *mem.Allocator, path: []const u8, data: []const u8) !void {
var file = try File.openWrite(allocator, path);
defer file.close();
try file.write(data);
}
/// On success, caller owns returned buffer.
-pub fn readFileAlloc(allocator: &mem.Allocator, path: []const u8) ![]u8 {
+pub fn readFileAlloc(allocator: *mem.Allocator, path: []const u8) ![]u8 {
+ return readFileAllocAligned(allocator, path, @alignOf(u8));
+}
+
+/// On success, caller owns returned buffer.
+pub fn readFileAllocAligned(allocator: *mem.Allocator, path: []const u8, comptime A: u29) ![]align(A) u8 {
var file = try File.openRead(allocator, path);
defer file.close();
const size = try file.getEndPos();
- const buf = try allocator.alloc(u8, size);
+ const buf = try allocator.alignedAlloc(u8, A, size);
errdefer allocator.free(buf);
var adapter = FileInStream.init(&file);
@@ -283,18 +287,18 @@ pub fn BufferedInStream(comptime Error: type) type {
pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type) type {
return struct {
const Self = this;
- const Stream = InStream(Error);
+ const Stream = InStream(Error);
pub stream: Stream,
- unbuffered_in_stream: &Stream,
+ unbuffered_in_stream: *Stream,
buffer: [buffer_size]u8,
start_index: usize,
end_index: usize,
- pub fn init(unbuffered_in_stream: &Stream) Self {
- return Self {
+ pub fn init(unbuffered_in_stream: *Stream) Self {
+ return Self{
.unbuffered_in_stream = unbuffered_in_stream,
.buffer = undefined,
@@ -305,13 +309,11 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type)
.start_index = buffer_size,
.end_index = buffer_size,
- .stream = Stream {
- .readFn = readFn,
- },
+ .stream = Stream{ .readFn = readFn },
};
}
- fn readFn(in_stream: &Stream, dest: []u8) !usize {
+ fn readFn(in_stream: *Stream, dest: []u8) !usize {
const self = @fieldParentPtr(Self, "stream", in_stream);
var dest_index: usize = 0;
@@ -350,6 +352,150 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type)
};
}
+/// Creates a stream which supports 'un-reading' data, so that it can be read again.
+/// This makes look-ahead style parsing much easier.
+pub fn PeekStream(comptime buffer_size: usize, comptime InStreamError: type) type {
+ return struct {
+ const Self = this;
+ pub const Error = InStreamError;
+ pub const Stream = InStream(Error);
+
+ pub stream: Stream,
+ base: *Stream,
+
+ // Right now the look-ahead space is statically allocated, but a version with dynamic allocation
+ // is not too difficult to derive from this.
+ buffer: [buffer_size]u8,
+ index: usize,
+ at_end: bool,
+
+ pub fn init(base: *Stream) Self {
+ return Self{
+ .base = base,
+ .buffer = undefined,
+ .index = 0,
+ .at_end = false,
+ .stream = Stream{ .readFn = readFn },
+ };
+ }
+
+ pub fn putBackByte(self: *Self, byte: u8) void {
+ self.buffer[self.index] = byte;
+ self.index += 1;
+ }
+
+ pub fn putBack(self: *Self, bytes: []const u8) void {
+ var pos = bytes.len;
+ while (pos != 0) {
+ pos -= 1;
+ self.putBackByte(bytes[pos]);
+ }
+ }
+
+ fn readFn(in_stream: *Stream, dest: []u8) Error!usize {
+ const self = @fieldParentPtr(Self, "stream", in_stream);
+
+ // copy over anything putBack()'d
+ var pos: usize = 0;
+ while (pos < dest.len and self.index != 0) {
+ dest[pos] = self.buffer[self.index - 1];
+ self.index -= 1;
+ pos += 1;
+ }
+
+ if (pos == dest.len or self.at_end) {
+ return pos;
+ }
+
+ // ask the backing stream for more
+ const left = dest.len - pos;
+ const read = try self.base.read(dest[pos..]);
+ assert(read <= left);
+
+ self.at_end = (read < left);
+ return pos + read;
+ }
+
+ };
+}
+
+pub const SliceInStream = struct {
+ const Self = this;
+ pub const Error = error { };
+ pub const Stream = InStream(Error);
+
+ pub stream: Stream,
+
+ pos: usize,
+ slice: []const u8,
+
+ pub fn init(slice: []const u8) Self {
+ return Self{
+ .slice = slice,
+ .pos = 0,
+ .stream = Stream{ .readFn = readFn },
+ };
+ }
+
+ fn readFn(in_stream: *Stream, dest: []u8) Error!usize {
+ const self = @fieldParentPtr(Self, "stream", in_stream);
+ const size = math.min(dest.len, self.slice.len - self.pos);
+ const end = self.pos + size;
+
+ mem.copy(u8, dest[0..size], self.slice[self.pos..end]);
+ self.pos = end;
+
+ return size;
+ }
+};
+
+/// This is a simple OutStream that writes to a slice, and returns an error
+/// when it runs out of space.
+pub const SliceOutStream = struct {
+ pub const Error = error{OutOfSpace};
+ pub const Stream = OutStream(Error);
+
+ pub stream: Stream,
+
+ pos: usize,
+ slice: []u8,
+
+ pub fn init(slice: []u8) SliceOutStream {
+ return SliceOutStream{
+ .slice = slice,
+ .pos = 0,
+ .stream = Stream{ .writeFn = writeFn },
+ };
+ }
+
+ pub fn getWritten(self: *const SliceOutStream) []const u8 {
+ return self.slice[0..self.pos];
+ }
+
+ pub fn reset(self: *SliceOutStream) void {
+ self.pos = 0;
+ }
+
+ fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
+ const self = @fieldParentPtr(SliceOutStream, "stream", out_stream);
+
+ assert(self.pos <= self.slice.len);
+
+ const n =
+ if (self.pos + bytes.len <= self.slice.len)
+ bytes.len
+ else
+ self.slice.len - self.pos;
+
+ std.mem.copy(u8, self.slice[self.pos..self.pos + n], bytes[0..n]);
+ self.pos += n;
+
+ if (n < bytes.len) {
+ return Error.OutOfSpace;
+ }
+ }
+};
+
pub fn BufferedOutStream(comptime Error: type) type {
return BufferedOutStreamCustom(os.page_size, Error);
}
@@ -362,28 +508,26 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
pub stream: Stream,
- unbuffered_out_stream: &Stream,
+ unbuffered_out_stream: *Stream,
buffer: [buffer_size]u8,
index: usize,
- pub fn init(unbuffered_out_stream: &Stream) Self {
- return Self {
+ pub fn init(unbuffered_out_stream: *Stream) Self {
+ return Self{
.unbuffered_out_stream = unbuffered_out_stream,
.buffer = undefined,
.index = 0,
- .stream = Stream {
- .writeFn = writeFn,
- },
+ .stream = Stream{ .writeFn = writeFn },
};
}
- pub fn flush(self: &Self) !void {
+ pub fn flush(self: *Self) !void {
try self.unbuffered_out_stream.write(self.buffer[0..self.index]);
self.index = 0;
}
- fn writeFn(out_stream: &Stream, bytes: []const u8) !void {
+ fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
const self = @fieldParentPtr(Self, "stream", out_stream);
if (bytes.len >= self.buffer.len) {
@@ -395,7 +539,7 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
while (src_index < bytes.len) {
const dest_space_left = self.buffer.len - self.index;
const copy_amt = math.min(dest_space_left, bytes.len - src_index);
- mem.copy(u8, self.buffer[self.index..], bytes[src_index..src_index + copy_amt]);
+ mem.copy(u8, self.buffer[self.index..], bytes[src_index .. src_index + copy_amt]);
self.index += copy_amt;
assert(self.index <= self.buffer.len);
if (self.index == self.buffer.len) {
@@ -409,43 +553,38 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
/// Implementation of OutStream trait for Buffer
pub const BufferOutStream = struct {
- buffer: &Buffer,
+ buffer: *Buffer,
stream: Stream,
pub const Error = error{OutOfMemory};
pub const Stream = OutStream(Error);
- pub fn init(buffer: &Buffer) BufferOutStream {
- return BufferOutStream {
+ pub fn init(buffer: *Buffer) BufferOutStream {
+ return BufferOutStream{
.buffer = buffer,
- .stream = Stream {
- .writeFn = writeFn,
- },
+ .stream = Stream{ .writeFn = writeFn },
};
}
- fn writeFn(out_stream: &Stream, bytes: []const u8) !void {
+ fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
const self = @fieldParentPtr(BufferOutStream, "stream", out_stream);
return self.buffer.append(bytes);
}
};
-
pub const BufferedAtomicFile = struct {
atomic_file: os.AtomicFile,
file_stream: FileOutStream,
buffered_stream: BufferedOutStream(FileOutStream.Error),
- pub fn create(allocator: &mem.Allocator, dest_path: []const u8) !&BufferedAtomicFile {
+ pub fn create(allocator: *mem.Allocator, dest_path: []const u8) !*BufferedAtomicFile {
// TODO with well defined copy elision we don't need this allocation
- var self = try allocator.create(BufferedAtomicFile);
- errdefer allocator.destroy(self);
-
- *self = BufferedAtomicFile {
+ var self = try allocator.create(BufferedAtomicFile{
.atomic_file = undefined,
.file_stream = undefined,
.buffered_stream = undefined,
- };
+ });
+ errdefer allocator.destroy(self);
self.atomic_file = try os.AtomicFile.init(allocator, dest_path, os.default_file_mode);
errdefer self.atomic_file.deinit();
@@ -456,18 +595,18 @@ pub const BufferedAtomicFile = struct {
}
/// always call destroy, even after successful finish()
- pub fn destroy(self: &BufferedAtomicFile) void {
+ pub fn destroy(self: *BufferedAtomicFile) void {
const allocator = self.atomic_file.allocator;
self.atomic_file.deinit();
allocator.destroy(self);
}
- pub fn finish(self: &BufferedAtomicFile) !void {
+ pub fn finish(self: *BufferedAtomicFile) !void {
try self.buffered_stream.flush();
try self.atomic_file.finish();
}
- pub fn stream(self: &BufferedAtomicFile) &OutStream(FileOutStream.Error) {
+ pub fn stream(self: *BufferedAtomicFile) *OutStream(FileOutStream.Error) {
return &self.buffered_stream.stream;
}
};
@@ -489,7 +628,7 @@ pub fn readLine(buf: []u8) !usize {
'\r' => {
// trash the following \n
_ = stream.readByte() catch return error.EndOfFile;
- return index;
+ return index;
},
'\n' => return index,
else => {
diff --git a/std/io_test.zig b/std/io_test.zig
index 89959b7b54..56f8a9a6ad 100644
--- a/std/io_test.zig
+++ b/std/io_test.zig
@@ -1,13 +1,16 @@
const std = @import("index.zig");
const io = std.io;
-const allocator = std.debug.global_allocator;
const DefaultPrng = std.rand.DefaultPrng;
const assert = std.debug.assert;
+const assertError = std.debug.assertError;
const mem = std.mem;
const os = std.os;
const builtin = @import("builtin");
test "write a file, read it, then delete it" {
+ var raw_bytes: [200 * 1024]u8 = undefined;
+ var allocator = &std.heap.FixedBufferAllocator.init(raw_bytes[0..]).allocator;
+
var data: [1024]u8 = undefined;
var prng = DefaultPrng.init(1234);
prng.random.bytes(data[0..]);
@@ -39,8 +42,93 @@ test "write a file, read it, then delete it" {
defer allocator.free(contents);
assert(mem.eql(u8, contents[0.."begin".len], "begin"));
- assert(mem.eql(u8, contents["begin".len..contents.len - "end".len], data));
+ assert(mem.eql(u8, contents["begin".len .. contents.len - "end".len], data));
assert(mem.eql(u8, contents[contents.len - "end".len ..], "end"));
}
try os.deleteFile(allocator, tmp_file_name);
}
+
+test "BufferOutStream" {
+ var bytes: [100]u8 = undefined;
+ var allocator = &std.heap.FixedBufferAllocator.init(bytes[0..]).allocator;
+
+ var buffer = try std.Buffer.initSize(allocator, 0);
+ var buf_stream = &std.io.BufferOutStream.init(&buffer).stream;
+
+ const x: i32 = 42;
+ const y: i32 = 1234;
+ try buf_stream.print("x: {}\ny: {}\n", x, y);
+
+ assert(mem.eql(u8, buffer.toSlice(), "x: 42\ny: 1234\n"));
+}
+
+test "SliceInStream" {
+ const bytes = []const u8 { 1, 2, 3, 4, 5, 6, 7 };
+ var ss = io.SliceInStream.init(bytes);
+
+ var dest: [4]u8 = undefined;
+
+ var read = try ss.stream.read(dest[0..4]);
+ assert(read == 4);
+ assert(mem.eql(u8, dest[0..4], bytes[0..4]));
+
+ read = try ss.stream.read(dest[0..4]);
+ assert(read == 3);
+ assert(mem.eql(u8, dest[0..3], bytes[4..7]));
+
+ read = try ss.stream.read(dest[0..4]);
+ assert(read == 0);
+}
+
+test "PeekStream" {
+ const bytes = []const u8 { 1, 2, 3, 4, 5, 6, 7, 8 };
+ var ss = io.SliceInStream.init(bytes);
+ var ps = io.PeekStream(2, io.SliceInStream.Error).init(&ss.stream);
+
+ var dest: [4]u8 = undefined;
+
+ ps.putBackByte(9);
+ ps.putBackByte(10);
+
+ var read = try ps.stream.read(dest[0..4]);
+ assert(read == 4);
+ assert(dest[0] == 10);
+ assert(dest[1] == 9);
+ assert(mem.eql(u8, dest[2..4], bytes[0..2]));
+
+ read = try ps.stream.read(dest[0..4]);
+ assert(read == 4);
+ assert(mem.eql(u8, dest[0..4], bytes[2..6]));
+
+ read = try ps.stream.read(dest[0..4]);
+ assert(read == 2);
+ assert(mem.eql(u8, dest[0..2], bytes[6..8]));
+
+ ps.putBackByte(11);
+ ps.putBackByte(12);
+
+ read = try ps.stream.read(dest[0..4]);
+ assert(read == 2);
+ assert(dest[0] == 12);
+ assert(dest[1] == 11);
+}
+
+test "SliceOutStream" {
+ var buffer: [10]u8 = undefined;
+ var ss = io.SliceOutStream.init(buffer[0..]);
+
+ try ss.stream.write("Hello");
+ assert(mem.eql(u8, ss.getWritten(), "Hello"));
+
+ try ss.stream.write("world");
+ assert(mem.eql(u8, ss.getWritten(), "Helloworld"));
+
+ assertError(ss.stream.write("!"), error.OutOfSpace);
+ assert(mem.eql(u8, ss.getWritten(), "Helloworld"));
+
+ ss.reset();
+ assert(ss.getWritten().len == 0);
+
+ assertError(ss.stream.write("Hello world!"), error.OutOfSpace);
+ assert(mem.eql(u8, ss.getWritten(), "Hello worl"));
+}
diff --git a/std/json.zig b/std/json.zig
new file mode 100644
index 0000000000..e62d5a3466
--- /dev/null
+++ b/std/json.zig
@@ -0,0 +1,1389 @@
+// JSON parser conforming to RFC8259.
+//
+// https://tools.ietf.org/html/rfc8259
+
+const std = @import("index.zig");
+const debug = std.debug;
+const mem = std.mem;
+
+// A single token slice into the parent string.
+//
+// Use `token.slice()` on the input at the current position to get the current slice.
+pub const Token = struct {
+ id: Id,
+ // How many bytes do we skip before counting
+ offset: u1,
+ // Whether string contains a \uXXXX sequence and cannot be zero-copied
+ string_has_escape: bool,
+ // Whether number is simple and can be represented by an integer (i.e. no `.` or `e`)
+ number_is_integer: bool,
+ // How many bytes from the current position behind the start of this token is.
+ count: usize,
+
+ pub const Id = enum {
+ ObjectBegin,
+ ObjectEnd,
+ ArrayBegin,
+ ArrayEnd,
+ String,
+ Number,
+ True,
+ False,
+ Null,
+ };
+
+ pub fn init(id: Id, count: usize, offset: u1) Token {
+ return Token{
+ .id = id,
+ .offset = offset,
+ .string_has_escape = false,
+ .number_is_integer = true,
+ .count = count,
+ };
+ }
+
+ pub fn initString(count: usize, has_unicode_escape: bool) Token {
+ return Token{
+ .id = Id.String,
+ .offset = 0,
+ .string_has_escape = has_unicode_escape,
+ .number_is_integer = true,
+ .count = count,
+ };
+ }
+
+ pub fn initNumber(count: usize, number_is_integer: bool) Token {
+ return Token{
+ .id = Id.Number,
+ .offset = 0,
+ .string_has_escape = false,
+ .number_is_integer = number_is_integer,
+ .count = count,
+ };
+ }
+
+ // A marker token is a zero-length
+ pub fn initMarker(id: Id) Token {
+ return Token{
+ .id = id,
+ .offset = 0,
+ .string_has_escape = false,
+ .number_is_integer = true,
+ .count = 0,
+ };
+ }
+
+ // Slice into the underlying input string.
+ pub fn slice(self: *const Token, input: []const u8, i: usize) []const u8 {
+ return input[i + self.offset - self.count .. i + self.offset];
+ }
+};
+
+// A small streaming JSON parser. This accepts input one byte at a time and returns tokens as
+// they are encountered. No copies or allocations are performed during parsing and the entire
+// parsing state requires ~40-50 bytes of stack space.
+//
+// Conforms strictly to RFC8529.
+//
+// For a non-byte based wrapper, consider using TokenStream instead.
+pub const StreamingParser = struct {
+ // Current state
+ state: State,
+ // How many bytes we have counted for the current token
+ count: usize,
+ // What state to follow after parsing a string (either property or value string)
+ after_string_state: State,
+ // What state to follow after parsing a value (either top-level or value end)
+ after_value_state: State,
+ // If we stopped now, would the complete parsed string to now be a valid json string
+ complete: bool,
+ // Current token flags to pass through to the next generated, see Token.
+ string_has_escape: bool,
+ number_is_integer: bool,
+
+ // Bit-stack for nested object/map literals (max 255 nestings).
+ stack: u256,
+ stack_used: u8,
+
+ const object_bit = 0;
+ const array_bit = 1;
+ const max_stack_size = @maxValue(u8);
+
+ pub fn init() StreamingParser {
+ var p: StreamingParser = undefined;
+ p.reset();
+ return p;
+ }
+
+ pub fn reset(p: *StreamingParser) void {
+ p.state = State.TopLevelBegin;
+ p.count = 0;
+ // Set before ever read in main transition function
+ p.after_string_state = undefined;
+ p.after_value_state = State.ValueEnd; // handle end of values normally
+ p.stack = 0;
+ p.stack_used = 0;
+ p.complete = false;
+ p.string_has_escape = false;
+ p.number_is_integer = true;
+ }
+
+ pub const State = enum {
+ // These must be first with these explicit values as we rely on them for indexing the
+ // bit-stack directly and avoiding a branch.
+ ObjectSeparator = 0,
+ ValueEnd = 1,
+
+ TopLevelBegin,
+ TopLevelEnd,
+
+ ValueBegin,
+ ValueBeginNoClosing,
+
+ String,
+ StringUtf8Byte3,
+ StringUtf8Byte2,
+ StringUtf8Byte1,
+ StringEscapeCharacter,
+ StringEscapeHexUnicode4,
+ StringEscapeHexUnicode3,
+ StringEscapeHexUnicode2,
+ StringEscapeHexUnicode1,
+
+ Number,
+ NumberMaybeDotOrExponent,
+ NumberMaybeDigitOrDotOrExponent,
+ NumberFractionalRequired,
+ NumberFractional,
+ NumberMaybeExponent,
+ NumberExponent,
+ NumberExponentDigitsRequired,
+ NumberExponentDigits,
+
+ TrueLiteral1,
+ TrueLiteral2,
+ TrueLiteral3,
+
+ FalseLiteral1,
+ FalseLiteral2,
+ FalseLiteral3,
+ FalseLiteral4,
+
+ NullLiteral1,
+ NullLiteral2,
+ NullLiteral3,
+
+ // Only call this function to generate array/object final state.
+ pub fn fromInt(x: var) State {
+ debug.assert(x == 0 or x == 1);
+ const T = @TagType(State);
+ return @intToEnum(State, @intCast(T, x));
+ }
+ };
+
+ pub const Error = error{
+ InvalidTopLevel,
+ TooManyNestedItems,
+ TooManyClosingItems,
+ InvalidValueBegin,
+ InvalidValueEnd,
+ UnbalancedBrackets,
+ UnbalancedBraces,
+ UnexpectedClosingBracket,
+ UnexpectedClosingBrace,
+ InvalidNumber,
+ InvalidSeparator,
+ InvalidLiteral,
+ InvalidEscapeCharacter,
+ InvalidUnicodeHexSymbol,
+ InvalidUtf8Byte,
+ InvalidTopLevelTrailing,
+ InvalidControlCharacter,
+ };
+
+ // Give another byte to the parser and obtain any new tokens. This may (rarely) return two
+ // tokens. token2 is always null if token1 is null.
+ //
+ // There is currently no error recovery on a bad stream.
+ pub fn feed(p: *StreamingParser, c: u8, token1: *?Token, token2: *?Token) Error!void {
+ token1.* = null;
+ token2.* = null;
+ p.count += 1;
+
+ // unlikely
+ if (try p.transition(c, token1)) {
+ _ = try p.transition(c, token2);
+ }
+ }
+
+ // Perform a single transition on the state machine and return any possible token.
+ fn transition(p: *StreamingParser, c: u8, token: *?Token) Error!bool {
+ switch (p.state) {
+ State.TopLevelBegin => switch (c) {
+ '{' => {
+ p.stack <<= 1;
+ p.stack |= object_bit;
+ p.stack_used += 1;
+
+ p.state = State.ValueBegin;
+ p.after_string_state = State.ObjectSeparator;
+
+ token.* = Token.initMarker(Token.Id.ObjectBegin);
+ },
+ '[' => {
+ p.stack <<= 1;
+ p.stack |= array_bit;
+ p.stack_used += 1;
+
+ p.state = State.ValueBegin;
+ p.after_string_state = State.ValueEnd;
+
+ token.* = Token.initMarker(Token.Id.ArrayBegin);
+ },
+ '-' => {
+ p.number_is_integer = true;
+ p.state = State.Number;
+ p.after_value_state = State.TopLevelEnd;
+ p.count = 0;
+ },
+ '0' => {
+ p.number_is_integer = true;
+ p.state = State.NumberMaybeDotOrExponent;
+ p.after_value_state = State.TopLevelEnd;
+ p.count = 0;
+ },
+ '1'...'9' => {
+ p.number_is_integer = true;
+ p.state = State.NumberMaybeDigitOrDotOrExponent;
+ p.after_value_state = State.TopLevelEnd;
+ p.count = 0;
+ },
+ '"' => {
+ p.state = State.String;
+ p.after_value_state = State.TopLevelEnd;
+ // We don't actually need the following since after_value_state should override.
+ p.after_string_state = State.ValueEnd;
+ p.string_has_escape = false;
+ p.count = 0;
+ },
+ 't' => {
+ p.state = State.TrueLiteral1;
+ p.after_value_state = State.TopLevelEnd;
+ p.count = 0;
+ },
+ 'f' => {
+ p.state = State.FalseLiteral1;
+ p.after_value_state = State.TopLevelEnd;
+ p.count = 0;
+ },
+ 'n' => {
+ p.state = State.NullLiteral1;
+ p.after_value_state = State.TopLevelEnd;
+ p.count = 0;
+ },
+ 0x09, 0x0A, 0x0D, 0x20 => {
+ // whitespace
+ },
+ else => {
+ return error.InvalidTopLevel;
+ },
+ },
+
+ State.TopLevelEnd => switch (c) {
+ 0x09, 0x0A, 0x0D, 0x20 => {
+ // whitespace
+ },
+ else => {
+ return error.InvalidTopLevelTrailing;
+ },
+ },
+
+ State.ValueBegin => switch (c) {
+ // NOTE: These are shared in ValueEnd as well, think we can reorder states to
+ // be a bit clearer and avoid this duplication.
+ '}' => {
+ // unlikely
+ if (p.stack & 1 != object_bit) {
+ return error.UnexpectedClosingBracket;
+ }
+ if (p.stack_used == 0) {
+ return error.TooManyClosingItems;
+ }
+
+ p.state = State.ValueBegin;
+ p.after_string_state = State.fromInt(p.stack & 1);
+
+ p.stack >>= 1;
+ p.stack_used -= 1;
+
+ switch (p.stack_used) {
+ 0 => {
+ p.complete = true;
+ p.state = State.TopLevelEnd;
+ },
+ else => {
+ p.state = State.ValueEnd;
+ },
+ }
+
+ token.* = Token.initMarker(Token.Id.ObjectEnd);
+ },
+ ']' => {
+ if (p.stack & 1 != array_bit) {
+ return error.UnexpectedClosingBrace;
+ }
+ if (p.stack_used == 0) {
+ return error.TooManyClosingItems;
+ }
+
+ p.state = State.ValueBegin;
+ p.after_string_state = State.fromInt(p.stack & 1);
+
+ p.stack >>= 1;
+ p.stack_used -= 1;
+
+ switch (p.stack_used) {
+ 0 => {
+ p.complete = true;
+ p.state = State.TopLevelEnd;
+ },
+ else => {
+ p.state = State.ValueEnd;
+ },
+ }
+
+ token.* = Token.initMarker(Token.Id.ArrayEnd);
+ },
+ '{' => {
+ if (p.stack_used == max_stack_size) {
+ return error.TooManyNestedItems;
+ }
+
+ p.stack <<= 1;
+ p.stack |= object_bit;
+ p.stack_used += 1;
+
+ p.state = State.ValueBegin;
+ p.after_string_state = State.ObjectSeparator;
+
+ token.* = Token.initMarker(Token.Id.ObjectBegin);
+ },
+ '[' => {
+ if (p.stack_used == max_stack_size) {
+ return error.TooManyNestedItems;
+ }
+
+ p.stack <<= 1;
+ p.stack |= array_bit;
+ p.stack_used += 1;
+
+ p.state = State.ValueBegin;
+ p.after_string_state = State.ValueEnd;
+
+ token.* = Token.initMarker(Token.Id.ArrayBegin);
+ },
+ '-' => {
+ p.state = State.Number;
+ p.count = 0;
+ },
+ '0' => {
+ p.state = State.NumberMaybeDotOrExponent;
+ p.count = 0;
+ },
+ '1'...'9' => {
+ p.state = State.NumberMaybeDigitOrDotOrExponent;
+ p.count = 0;
+ },
+ '"' => {
+ p.state = State.String;
+ p.count = 0;
+ },
+ 't' => {
+ p.state = State.TrueLiteral1;
+ p.count = 0;
+ },
+ 'f' => {
+ p.state = State.FalseLiteral1;
+ p.count = 0;
+ },
+ 'n' => {
+ p.state = State.NullLiteral1;
+ p.count = 0;
+ },
+ 0x09, 0x0A, 0x0D, 0x20 => {
+ // whitespace
+ },
+ else => {
+ return error.InvalidValueBegin;
+ },
+ },
+
+ // TODO: A bit of duplication here and in the following state, redo.
+ State.ValueBeginNoClosing => switch (c) {
+ '{' => {
+ if (p.stack_used == max_stack_size) {
+ return error.TooManyNestedItems;
+ }
+
+ p.stack <<= 1;
+ p.stack |= object_bit;
+ p.stack_used += 1;
+
+ p.state = State.ValueBegin;
+ p.after_string_state = State.ObjectSeparator;
+
+ token.* = Token.initMarker(Token.Id.ObjectBegin);
+ },
+ '[' => {
+ if (p.stack_used == max_stack_size) {
+ return error.TooManyNestedItems;
+ }
+
+ p.stack <<= 1;
+ p.stack |= array_bit;
+ p.stack_used += 1;
+
+ p.state = State.ValueBegin;
+ p.after_string_state = State.ValueEnd;
+
+ token.* = Token.initMarker(Token.Id.ArrayBegin);
+ },
+ '-' => {
+ p.state = State.Number;
+ p.count = 0;
+ },
+ '0' => {
+ p.state = State.NumberMaybeDotOrExponent;
+ p.count = 0;
+ },
+ '1'...'9' => {
+ p.state = State.NumberMaybeDigitOrDotOrExponent;
+ p.count = 0;
+ },
+ '"' => {
+ p.state = State.String;
+ p.count = 0;
+ },
+ 't' => {
+ p.state = State.TrueLiteral1;
+ p.count = 0;
+ },
+ 'f' => {
+ p.state = State.FalseLiteral1;
+ p.count = 0;
+ },
+ 'n' => {
+ p.state = State.NullLiteral1;
+ p.count = 0;
+ },
+ 0x09, 0x0A, 0x0D, 0x20 => {
+ // whitespace
+ },
+ else => {
+ return error.InvalidValueBegin;
+ },
+ },
+
+ State.ValueEnd => switch (c) {
+ ',' => {
+ p.after_string_state = State.fromInt(p.stack & 1);
+ p.state = State.ValueBeginNoClosing;
+ },
+ ']' => {
+ if (p.stack_used == 0) {
+ return error.UnbalancedBrackets;
+ }
+
+ p.state = State.ValueEnd;
+ p.after_string_state = State.fromInt(p.stack & 1);
+
+ p.stack >>= 1;
+ p.stack_used -= 1;
+
+ if (p.stack_used == 0) {
+ p.complete = true;
+ p.state = State.TopLevelEnd;
+ }
+
+ token.* = Token.initMarker(Token.Id.ArrayEnd);
+ },
+ '}' => {
+ if (p.stack_used == 0) {
+ return error.UnbalancedBraces;
+ }
+
+ p.state = State.ValueEnd;
+ p.after_string_state = State.fromInt(p.stack & 1);
+
+ p.stack >>= 1;
+ p.stack_used -= 1;
+
+ if (p.stack_used == 0) {
+ p.complete = true;
+ p.state = State.TopLevelEnd;
+ }
+
+ token.* = Token.initMarker(Token.Id.ObjectEnd);
+ },
+ 0x09, 0x0A, 0x0D, 0x20 => {
+ // whitespace
+ },
+ else => {
+ return error.InvalidValueEnd;
+ },
+ },
+
+ State.ObjectSeparator => switch (c) {
+ ':' => {
+ p.state = State.ValueBegin;
+ p.after_string_state = State.ValueEnd;
+ },
+ 0x09, 0x0A, 0x0D, 0x20 => {
+ // whitespace
+ },
+ else => {
+ return error.InvalidSeparator;
+ },
+ },
+
+ State.String => switch (c) {
+ 0x00...0x1F => {
+ return error.InvalidControlCharacter;
+ },
+ '"' => {
+ p.state = p.after_string_state;
+ if (p.after_value_state == State.TopLevelEnd) {
+ p.state = State.TopLevelEnd;
+ p.complete = true;
+ }
+
+ token.* = Token.initString(p.count - 1, p.string_has_escape);
+ },
+ '\\' => {
+ p.state = State.StringEscapeCharacter;
+ },
+ 0x20, 0x21, 0x23...0x5B, 0x5D...0x7F => {
+ // non-control ascii
+ },
+ 0xC0...0xDF => {
+ p.state = State.StringUtf8Byte1;
+ },
+ 0xE0...0xEF => {
+ p.state = State.StringUtf8Byte2;
+ },
+ 0xF0...0xFF => {
+ p.state = State.StringUtf8Byte3;
+ },
+ else => {
+ return error.InvalidUtf8Byte;
+ },
+ },
+
+ State.StringUtf8Byte3 => switch (c >> 6) {
+ 0b10 => p.state = State.StringUtf8Byte2,
+ else => return error.InvalidUtf8Byte,
+ },
+
+ State.StringUtf8Byte2 => switch (c >> 6) {
+ 0b10 => p.state = State.StringUtf8Byte1,
+ else => return error.InvalidUtf8Byte,
+ },
+
+ State.StringUtf8Byte1 => switch (c >> 6) {
+ 0b10 => p.state = State.String,
+ else => return error.InvalidUtf8Byte,
+ },
+
+ State.StringEscapeCharacter => switch (c) {
+ // NOTE: '/' is allowed as an escaped character but it also is allowed
+ // as unescaped according to the RFC. There is a reported errata which suggests
+ // removing the non-escaped variant but it makes more sense to simply disallow
+ // it as an escape code here.
+ //
+ // The current JSONTestSuite tests rely on both of this behaviour being present
+ // however, so we default to the status quo where both are accepted until this
+ // is further clarified.
+ '"', '\\', '/', 'b', 'f', 'n', 'r', 't' => {
+ p.string_has_escape = true;
+ p.state = State.String;
+ },
+ 'u' => {
+ p.string_has_escape = true;
+ p.state = State.StringEscapeHexUnicode4;
+ },
+ else => {
+ return error.InvalidEscapeCharacter;
+ },
+ },
+
+ State.StringEscapeHexUnicode4 => switch (c) {
+ '0'...'9', 'A'...'F', 'a'...'f' => {
+ p.state = State.StringEscapeHexUnicode3;
+ },
+ else => return error.InvalidUnicodeHexSymbol,
+ },
+
+ State.StringEscapeHexUnicode3 => switch (c) {
+ '0'...'9', 'A'...'F', 'a'...'f' => {
+ p.state = State.StringEscapeHexUnicode2;
+ },
+ else => return error.InvalidUnicodeHexSymbol,
+ },
+
+ State.StringEscapeHexUnicode2 => switch (c) {
+ '0'...'9', 'A'...'F', 'a'...'f' => {
+ p.state = State.StringEscapeHexUnicode1;
+ },
+ else => return error.InvalidUnicodeHexSymbol,
+ },
+
+ State.StringEscapeHexUnicode1 => switch (c) {
+ '0'...'9', 'A'...'F', 'a'...'f' => {
+ p.state = State.String;
+ },
+ else => return error.InvalidUnicodeHexSymbol,
+ },
+
+ State.Number => {
+ p.complete = p.after_value_state == State.TopLevelEnd;
+ switch (c) {
+ '0' => {
+ p.state = State.NumberMaybeDotOrExponent;
+ },
+ '1'...'9' => {
+ p.state = State.NumberMaybeDigitOrDotOrExponent;
+ },
+ else => {
+ return error.InvalidNumber;
+ },
+ }
+ },
+
+ State.NumberMaybeDotOrExponent => {
+ p.complete = p.after_value_state == State.TopLevelEnd;
+ switch (c) {
+ '.' => {
+ p.number_is_integer = false;
+ p.state = State.NumberFractionalRequired;
+ },
+ 'e', 'E' => {
+ p.number_is_integer = false;
+ p.state = State.NumberExponent;
+ },
+ else => {
+ p.state = p.after_value_state;
+ token.* = Token.initNumber(p.count, p.number_is_integer);
+ return true;
+ },
+ }
+ },
+
+ State.NumberMaybeDigitOrDotOrExponent => {
+ p.complete = p.after_value_state == State.TopLevelEnd;
+ switch (c) {
+ '.' => {
+ p.number_is_integer = false;
+ p.state = State.NumberFractionalRequired;
+ },
+ 'e', 'E' => {
+ p.number_is_integer = false;
+ p.state = State.NumberExponent;
+ },
+ '0'...'9' => {
+ // another digit
+ },
+ else => {
+ p.state = p.after_value_state;
+ token.* = Token.initNumber(p.count, p.number_is_integer);
+ return true;
+ },
+ }
+ },
+
+ State.NumberFractionalRequired => {
+ p.complete = p.after_value_state == State.TopLevelEnd;
+ switch (c) {
+ '0'...'9' => {
+ p.state = State.NumberFractional;
+ },
+ else => {
+ return error.InvalidNumber;
+ },
+ }
+ },
+
+ State.NumberFractional => {
+ p.complete = p.after_value_state == State.TopLevelEnd;
+ switch (c) {
+ '0'...'9' => {
+ // another digit
+ },
+ 'e', 'E' => {
+ p.number_is_integer = false;
+ p.state = State.NumberExponent;
+ },
+ else => {
+ p.state = p.after_value_state;
+ token.* = Token.initNumber(p.count, p.number_is_integer);
+ return true;
+ },
+ }
+ },
+
+ State.NumberMaybeExponent => {
+ p.complete = p.after_value_state == State.TopLevelEnd;
+ switch (c) {
+ 'e', 'E' => {
+ p.number_is_integer = false;
+ p.state = State.NumberExponent;
+ },
+ else => {
+ p.state = p.after_value_state;
+ token.* = Token.initNumber(p.count, p.number_is_integer);
+ return true;
+ },
+ }
+ },
+
+ State.NumberExponent => switch (c) {
+ '-', '+' => {
+ p.complete = false;
+ p.state = State.NumberExponentDigitsRequired;
+ },
+ '0'...'9' => {
+ p.complete = p.after_value_state == State.TopLevelEnd;
+ p.state = State.NumberExponentDigits;
+ },
+ else => {
+ return error.InvalidNumber;
+ },
+ },
+
+ State.NumberExponentDigitsRequired => switch (c) {
+ '0'...'9' => {
+ p.complete = p.after_value_state == State.TopLevelEnd;
+ p.state = State.NumberExponentDigits;
+ },
+ else => {
+ return error.InvalidNumber;
+ },
+ },
+
+ State.NumberExponentDigits => {
+ p.complete = p.after_value_state == State.TopLevelEnd;
+ switch (c) {
+ '0'...'9' => {
+ // another digit
+ },
+ else => {
+ p.state = p.after_value_state;
+ token.* = Token.initNumber(p.count, p.number_is_integer);
+ return true;
+ },
+ }
+ },
+
+ State.TrueLiteral1 => switch (c) {
+ 'r' => p.state = State.TrueLiteral2,
+ else => return error.InvalidLiteral,
+ },
+
+ State.TrueLiteral2 => switch (c) {
+ 'u' => p.state = State.TrueLiteral3,
+ else => return error.InvalidLiteral,
+ },
+
+ State.TrueLiteral3 => switch (c) {
+ 'e' => {
+ p.state = p.after_value_state;
+ p.complete = p.state == State.TopLevelEnd;
+ token.* = Token.init(Token.Id.True, p.count + 1, 1);
+ },
+ else => {
+ return error.InvalidLiteral;
+ },
+ },
+
+ State.FalseLiteral1 => switch (c) {
+ 'a' => p.state = State.FalseLiteral2,
+ else => return error.InvalidLiteral,
+ },
+
+ State.FalseLiteral2 => switch (c) {
+ 'l' => p.state = State.FalseLiteral3,
+ else => return error.InvalidLiteral,
+ },
+
+ State.FalseLiteral3 => switch (c) {
+ 's' => p.state = State.FalseLiteral4,
+ else => return error.InvalidLiteral,
+ },
+
+ State.FalseLiteral4 => switch (c) {
+ 'e' => {
+ p.state = p.after_value_state;
+ p.complete = p.state == State.TopLevelEnd;
+ token.* = Token.init(Token.Id.False, p.count + 1, 1);
+ },
+ else => {
+ return error.InvalidLiteral;
+ },
+ },
+
+ State.NullLiteral1 => switch (c) {
+ 'u' => p.state = State.NullLiteral2,
+ else => return error.InvalidLiteral,
+ },
+
+ State.NullLiteral2 => switch (c) {
+ 'l' => p.state = State.NullLiteral3,
+ else => return error.InvalidLiteral,
+ },
+
+ State.NullLiteral3 => switch (c) {
+ 'l' => {
+ p.state = p.after_value_state;
+ p.complete = p.state == State.TopLevelEnd;
+ token.* = Token.init(Token.Id.Null, p.count + 1, 1);
+ },
+ else => {
+ return error.InvalidLiteral;
+ },
+ },
+ }
+
+ return false;
+ }
+};
+
+// A small wrapper over a StreamingParser for full slices. Returns a stream of json Tokens.
+pub const TokenStream = struct {
+ i: usize,
+ slice: []const u8,
+ parser: StreamingParser,
+ token: ?Token,
+
+ pub fn init(slice: []const u8) TokenStream {
+ return TokenStream{
+ .i = 0,
+ .slice = slice,
+ .parser = StreamingParser.init(),
+ .token = null,
+ };
+ }
+
+ pub fn next(self: *TokenStream) !?Token {
+ if (self.token) |token| {
+ self.token = null;
+ return token;
+ }
+
+ var t1: ?Token = undefined;
+ var t2: ?Token = undefined;
+
+ while (self.i < self.slice.len) {
+ try self.parser.feed(self.slice[self.i], &t1, &t2);
+ self.i += 1;
+
+ if (t1) |token| {
+ self.token = t2;
+ return token;
+ }
+ }
+
+ if (self.i > self.slice.len) {
+ try self.parser.feed(' ', &t1, &t2);
+ self.i += 1;
+
+ if (t1) |token| {
+ return token;
+ }
+ }
+
+ return null;
+ }
+};
+
+fn checkNext(p: *TokenStream, id: Token.Id) void {
+ const token = (p.next() catch unreachable).?;
+ debug.assert(token.id == id);
+}
+
+test "token" {
+ const s =
+ \\{
+ \\ "Image": {
+ \\ "Width": 800,
+ \\ "Height": 600,
+ \\ "Title": "View from 15th Floor",
+ \\ "Thumbnail": {
+ \\ "Url": "http://www.example.com/image/481989943",
+ \\ "Height": 125,
+ \\ "Width": 100
+ \\ },
+ \\ "Animated" : false,
+ \\ "IDs": [116, 943, 234, 38793]
+ \\ }
+ \\}
+ ;
+
+ var p = TokenStream.init(s);
+
+ checkNext(&p, Token.Id.ObjectBegin);
+ checkNext(&p, Token.Id.String); // Image
+ checkNext(&p, Token.Id.ObjectBegin);
+ checkNext(&p, Token.Id.String); // Width
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.String); // Height
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.String); // Title
+ checkNext(&p, Token.Id.String);
+ checkNext(&p, Token.Id.String); // Thumbnail
+ checkNext(&p, Token.Id.ObjectBegin);
+ checkNext(&p, Token.Id.String); // Url
+ checkNext(&p, Token.Id.String);
+ checkNext(&p, Token.Id.String); // Height
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.String); // Width
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.ObjectEnd);
+ checkNext(&p, Token.Id.String); // Animated
+ checkNext(&p, Token.Id.False);
+ checkNext(&p, Token.Id.String); // IDs
+ checkNext(&p, Token.Id.ArrayBegin);
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.ArrayEnd);
+ checkNext(&p, Token.Id.ObjectEnd);
+ checkNext(&p, Token.Id.ObjectEnd);
+
+ debug.assert((try p.next()) == null);
+}
+
+// Validate a JSON string. This does not limit number precision so a decoder may not necessarily
+// be able to decode the string even if this returns true.
+pub fn validate(s: []const u8) bool {
+ var p = StreamingParser.init();
+
+ for (s) |c, i| {
+ var token1: ?Token = undefined;
+ var token2: ?Token = undefined;
+
+ p.feed(c, &token1, &token2) catch |err| {
+ return false;
+ };
+ }
+
+ return p.complete;
+}
+
+test "json validate" {
+ debug.assert(validate("{}"));
+}
+
+const Allocator = std.mem.Allocator;
+const ArenaAllocator = std.heap.ArenaAllocator;
+const ArrayList = std.ArrayList;
+const HashMap = std.HashMap;
+
+pub const ValueTree = struct {
+ arena: ArenaAllocator,
+ root: Value,
+
+ pub fn deinit(self: *ValueTree) void {
+ self.arena.deinit();
+ }
+};
+
+pub const ObjectMap = HashMap([]const u8, Value, mem.hash_slice_u8, mem.eql_slice_u8);
+
+pub const Value = union(enum) {
+ Null,
+ Bool: bool,
+ Integer: i64,
+ Float: f64,
+ String: []const u8,
+ Array: ArrayList(Value),
+ Object: ObjectMap,
+
+ pub fn dump(self: *const Value) void {
+ switch (self.*) {
+ Value.Null => {
+ debug.warn("null");
+ },
+ Value.Bool => |inner| {
+ debug.warn("{}", inner);
+ },
+ Value.Integer => |inner| {
+ debug.warn("{}", inner);
+ },
+ Value.Float => |inner| {
+ debug.warn("{.5}", inner);
+ },
+ Value.String => |inner| {
+ debug.warn("\"{}\"", inner);
+ },
+ Value.Array => |inner| {
+ var not_first = false;
+ debug.warn("[");
+ for (inner.toSliceConst()) |value| {
+ if (not_first) {
+ debug.warn(",");
+ }
+ not_first = true;
+ value.dump();
+ }
+ debug.warn("]");
+ },
+ Value.Object => |inner| {
+ var not_first = false;
+ debug.warn("{{");
+ var it = inner.iterator();
+
+ while (it.next()) |entry| {
+ if (not_first) {
+ debug.warn(",");
+ }
+ not_first = true;
+ debug.warn("\"{}\":", entry.key);
+ entry.value.dump();
+ }
+ debug.warn("}}");
+ },
+ }
+ }
+
+ pub fn dumpIndent(self: *const Value, indent: usize) void {
+ if (indent == 0) {
+ self.dump();
+ } else {
+ self.dumpIndentLevel(indent, 0);
+ }
+ }
+
+ fn dumpIndentLevel(self: *const Value, indent: usize, level: usize) void {
+ switch (self.*) {
+ Value.Null => {
+ debug.warn("null");
+ },
+ Value.Bool => |inner| {
+ debug.warn("{}", inner);
+ },
+ Value.Integer => |inner| {
+ debug.warn("{}", inner);
+ },
+ Value.Float => |inner| {
+ debug.warn("{.5}", inner);
+ },
+ Value.String => |inner| {
+ debug.warn("\"{}\"", inner);
+ },
+ Value.Array => |inner| {
+ var not_first = false;
+ debug.warn("[\n");
+
+ for (inner.toSliceConst()) |value| {
+ if (not_first) {
+ debug.warn(",\n");
+ }
+ not_first = true;
+ padSpace(level + indent);
+ value.dumpIndentLevel(indent, level + indent);
+ }
+ debug.warn("\n");
+ padSpace(level);
+ debug.warn("]");
+ },
+ Value.Object => |inner| {
+ var not_first = false;
+ debug.warn("{{\n");
+ var it = inner.iterator();
+
+ while (it.next()) |entry| {
+ if (not_first) {
+ debug.warn(",\n");
+ }
+ not_first = true;
+ padSpace(level + indent);
+ debug.warn("\"{}\": ", entry.key);
+ entry.value.dumpIndentLevel(indent, level + indent);
+ }
+ debug.warn("\n");
+ padSpace(level);
+ debug.warn("}}");
+ },
+ }
+ }
+
+ fn padSpace(indent: usize) void {
+ var i: usize = 0;
+ while (i < indent) : (i += 1) {
+ debug.warn(" ");
+ }
+ }
+};
+
+// A non-stream JSON parser which constructs a tree of Value's.
+pub const Parser = struct {
+ allocator: *Allocator,
+ state: State,
+ copy_strings: bool,
+ // Stores parent nodes and un-combined Values.
+ stack: ArrayList(Value),
+
+ const State = enum {
+ ObjectKey,
+ ObjectValue,
+ ArrayValue,
+ Simple,
+ };
+
+ pub fn init(allocator: *Allocator, copy_strings: bool) Parser {
+ return Parser{
+ .allocator = allocator,
+ .state = State.Simple,
+ .copy_strings = copy_strings,
+ .stack = ArrayList(Value).init(allocator),
+ };
+ }
+
+ pub fn deinit(p: *Parser) void {
+ p.stack.deinit();
+ }
+
+ pub fn reset(p: *Parser) void {
+ p.state = State.Simple;
+ p.stack.shrink(0);
+ }
+
+ pub fn parse(p: *Parser, input: []const u8) !ValueTree {
+ var s = TokenStream.init(input);
+
+ var arena = ArenaAllocator.init(p.allocator);
+ errdefer arena.deinit();
+
+ while (try s.next()) |token| {
+ try p.transition(&arena.allocator, input, s.i - 1, token);
+ }
+
+ debug.assert(p.stack.len == 1);
+
+ return ValueTree{
+ .arena = arena,
+ .root = p.stack.at(0),
+ };
+ }
+
+ // Even though p.allocator exists, we take an explicit allocator so that allocation state
+ // can be cleaned up on error correctly during a `parse` on call.
+ fn transition(p: *Parser, allocator: *Allocator, input: []const u8, i: usize, token: *const Token) !void {
+ switch (p.state) {
+ State.ObjectKey => switch (token.id) {
+ Token.Id.ObjectEnd => {
+ if (p.stack.len == 1) {
+ return;
+ }
+
+ var value = p.stack.pop();
+ try p.pushToParent(value);
+ },
+ Token.Id.String => {
+ try p.stack.append(try p.parseString(allocator, token, input, i));
+ p.state = State.ObjectValue;
+ },
+ else => {
+ unreachable;
+ },
+ },
+ State.ObjectValue => {
+ var object = &p.stack.items[p.stack.len - 2].Object;
+ var key = p.stack.items[p.stack.len - 1].String;
+
+ switch (token.id) {
+ Token.Id.ObjectBegin => {
+ try p.stack.append(Value{ .Object = ObjectMap.init(allocator) });
+ p.state = State.ObjectKey;
+ },
+ Token.Id.ArrayBegin => {
+ try p.stack.append(Value{ .Array = ArrayList(Value).init(allocator) });
+ p.state = State.ArrayValue;
+ },
+ Token.Id.String => {
+ _ = try object.put(key, try p.parseString(allocator, token, input, i));
+ _ = p.stack.pop();
+ p.state = State.ObjectKey;
+ },
+ Token.Id.Number => {
+ _ = try object.put(key, try p.parseNumber(token, input, i));
+ _ = p.stack.pop();
+ p.state = State.ObjectKey;
+ },
+ Token.Id.True => {
+ _ = try object.put(key, Value{ .Bool = true });
+ _ = p.stack.pop();
+ p.state = State.ObjectKey;
+ },
+ Token.Id.False => {
+ _ = try object.put(key, Value{ .Bool = false });
+ _ = p.stack.pop();
+ p.state = State.ObjectKey;
+ },
+ Token.Id.Null => {
+ _ = try object.put(key, Value.Null);
+ _ = p.stack.pop();
+ p.state = State.ObjectKey;
+ },
+ Token.Id.ObjectEnd, Token.Id.ArrayEnd => {
+ unreachable;
+ },
+ }
+ },
+ State.ArrayValue => {
+ var array = &p.stack.items[p.stack.len - 1].Array;
+
+ switch (token.id) {
+ Token.Id.ArrayEnd => {
+ if (p.stack.len == 1) {
+ return;
+ }
+
+ var value = p.stack.pop();
+ try p.pushToParent(value);
+ },
+ Token.Id.ObjectBegin => {
+ try p.stack.append(Value{ .Object = ObjectMap.init(allocator) });
+ p.state = State.ObjectKey;
+ },
+ Token.Id.ArrayBegin => {
+ try p.stack.append(Value{ .Array = ArrayList(Value).init(allocator) });
+ p.state = State.ArrayValue;
+ },
+ Token.Id.String => {
+ try array.append(try p.parseString(allocator, token, input, i));
+ },
+ Token.Id.Number => {
+ try array.append(try p.parseNumber(token, input, i));
+ },
+ Token.Id.True => {
+ try array.append(Value{ .Bool = true });
+ },
+ Token.Id.False => {
+ try array.append(Value{ .Bool = false });
+ },
+ Token.Id.Null => {
+ try array.append(Value.Null);
+ },
+ Token.Id.ObjectEnd => {
+ unreachable;
+ },
+ }
+ },
+ State.Simple => switch (token.id) {
+ Token.Id.ObjectBegin => {
+ try p.stack.append(Value{ .Object = ObjectMap.init(allocator) });
+ p.state = State.ObjectKey;
+ },
+ Token.Id.ArrayBegin => {
+ try p.stack.append(Value{ .Array = ArrayList(Value).init(allocator) });
+ p.state = State.ArrayValue;
+ },
+ Token.Id.String => {
+ try p.stack.append(try p.parseString(allocator, token, input, i));
+ },
+ Token.Id.Number => {
+ try p.stack.append(try p.parseNumber(token, input, i));
+ },
+ Token.Id.True => {
+ try p.stack.append(Value{ .Bool = true });
+ },
+ Token.Id.False => {
+ try p.stack.append(Value{ .Bool = false });
+ },
+ Token.Id.Null => {
+ try p.stack.append(Value.Null);
+ },
+ Token.Id.ObjectEnd, Token.Id.ArrayEnd => {
+ unreachable;
+ },
+ },
+ }
+ }
+
+ fn pushToParent(p: *Parser, value: *const Value) !void {
+ switch (p.stack.at(p.stack.len - 1)) {
+ // Object Parent -> [ ..., object, , value ]
+ Value.String => |key| {
+ _ = p.stack.pop();
+
+ var object = &p.stack.items[p.stack.len - 1].Object;
+ _ = try object.put(key, value);
+ p.state = State.ObjectKey;
+ },
+ // Array Parent -> [ ..., , value ]
+ Value.Array => |*array| {
+ try array.append(value.*);
+ p.state = State.ArrayValue;
+ },
+ else => {
+ unreachable;
+ },
+ }
+ }
+
+ fn parseString(p: *Parser, allocator: *Allocator, token: *const Token, input: []const u8, i: usize) !Value {
+ // TODO: We don't strictly have to copy values which do not contain any escape
+ // characters if flagged with the option.
+ const slice = token.slice(input, i);
+ return Value{ .String = try mem.dupe(p.allocator, u8, slice) };
+ }
+
+ fn parseNumber(p: *Parser, token: *const Token, input: []const u8, i: usize) !Value {
+ return if (token.number_is_integer)
+ Value{ .Integer = try std.fmt.parseInt(i64, token.slice(input, i), 10) }
+ else
+ @panic("TODO: fmt.parseFloat not yet implemented");
+ }
+};
+
+test "json parser dynamic" {
+ var p = Parser.init(debug.global_allocator, false);
+ defer p.deinit();
+
+ const s =
+ \\{
+ \\ "Image": {
+ \\ "Width": 800,
+ \\ "Height": 600,
+ \\ "Title": "View from 15th Floor",
+ \\ "Thumbnail": {
+ \\ "Url": "http://www.example.com/image/481989943",
+ \\ "Height": 125,
+ \\ "Width": 100
+ \\ },
+ \\ "Animated" : false,
+ \\ "IDs": [116, 943, 234, 38793]
+ \\ }
+ \\}
+ ;
+
+ var tree = try p.parse(s);
+ defer tree.deinit();
+
+ var root = tree.root;
+
+ var image = root.Object.get("Image").?.value;
+
+ const width = image.Object.get("Width").?.value;
+ debug.assert(width.Integer == 800);
+
+ const height = image.Object.get("Height").?.value;
+ debug.assert(height.Integer == 600);
+
+ const title = image.Object.get("Title").?.value;
+ debug.assert(mem.eql(u8, title.String, "View from 15th Floor"));
+
+ const animated = image.Object.get("Animated").?.value;
+ debug.assert(animated.Bool == false);
+}
diff --git a/std/json_test.zig b/std/json_test.zig
new file mode 100644
index 0000000000..8c8862441a
--- /dev/null
+++ b/std/json_test.zig
@@ -0,0 +1,1904 @@
+// RFC 8529 conformance tests.
+//
+// Tests are taken from https://github.com/nst/JSONTestSuite
+// Read also http://seriot.ch/parsing_json.php for a good overview.
+
+const std = @import("index.zig");
+
+fn ok(comptime s: []const u8) void {
+ std.debug.assert(std.json.validate(s));
+}
+
+fn err(comptime s: []const u8) void {
+ std.debug.assert(!std.json.validate(s));
+}
+
+fn any(comptime s: []const u8) void {
+ std.debug.assert(true);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+//
+// Additional tests not part of test JSONTestSuite.
+
+test "y_trailing_comma_after_empty" {
+ ok(
+ \\{"1":[],"2":{},"3":"4"}
+ );
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+test "y_array_arraysWithSpaces" {
+ ok(
+ \\[[] ]
+ );
+}
+
+test "y_array_empty" {
+ ok(
+ \\[]
+ );
+}
+
+test "y_array_empty-string" {
+ ok(
+ \\[""]
+ );
+}
+
+test "y_array_ending_with_newline" {
+ ok(
+ \\["a"]
+ );
+}
+
+test "y_array_false" {
+ ok(
+ \\[false]
+ );
+}
+
+test "y_array_heterogeneous" {
+ ok(
+ \\[null, 1, "1", {}]
+ );
+}
+
+test "y_array_null" {
+ ok(
+ \\[null]
+ );
+}
+
+test "y_array_with_1_and_newline" {
+ ok(
+ \\[1
+ \\]
+ );
+}
+
+test "y_array_with_leading_space" {
+ ok(
+ \\ [1]
+ );
+}
+
+test "y_array_with_several_null" {
+ ok(
+ \\[1,null,null,null,2]
+ );
+}
+
+test "y_array_with_trailing_space" {
+ ok("[2] ");
+}
+
+test "y_number_0e+1" {
+ ok(
+ \\[0e+1]
+ );
+}
+
+test "y_number_0e1" {
+ ok(
+ \\[0e1]
+ );
+}
+
+test "y_number_after_space" {
+ ok(
+ \\[ 4]
+ );
+}
+
+test "y_number_double_close_to_zero" {
+ ok(
+ \\[-0.000000000000000000000000000000000000000000000000000000000000000000000000000001]
+ );
+}
+
+test "y_number_int_with_exp" {
+ ok(
+ \\[20e1]
+ );
+}
+
+test "y_number" {
+ ok(
+ \\[123e65]
+ );
+}
+
+test "y_number_minus_zero" {
+ ok(
+ \\[-0]
+ );
+}
+
+test "y_number_negative_int" {
+ ok(
+ \\[-123]
+ );
+}
+
+test "y_number_negative_one" {
+ ok(
+ \\[-1]
+ );
+}
+
+test "y_number_negative_zero" {
+ ok(
+ \\[-0]
+ );
+}
+
+test "y_number_real_capital_e" {
+ ok(
+ \\[1E22]
+ );
+}
+
+test "y_number_real_capital_e_neg_exp" {
+ ok(
+ \\[1E-2]
+ );
+}
+
+test "y_number_real_capital_e_pos_exp" {
+ ok(
+ \\[1E+2]
+ );
+}
+
+test "y_number_real_exponent" {
+ ok(
+ \\[123e45]
+ );
+}
+
+test "y_number_real_fraction_exponent" {
+ ok(
+ \\[123.456e78]
+ );
+}
+
+test "y_number_real_neg_exp" {
+ ok(
+ \\[1e-2]
+ );
+}
+
+test "y_number_real_pos_exponent" {
+ ok(
+ \\[1e+2]
+ );
+}
+
+test "y_number_simple_int" {
+ ok(
+ \\[123]
+ );
+}
+
+test "y_number_simple_real" {
+ ok(
+ \\[123.456789]
+ );
+}
+
+test "y_object_basic" {
+ ok(
+ \\{"asd":"sdf"}
+ );
+}
+
+test "y_object_duplicated_key_and_value" {
+ ok(
+ \\{"a":"b","a":"b"}
+ );
+}
+
+test "y_object_duplicated_key" {
+ ok(
+ \\{"a":"b","a":"c"}
+ );
+}
+
+test "y_object_empty" {
+ ok(
+ \\{}
+ );
+}
+
+test "y_object_empty_key" {
+ ok(
+ \\{"":0}
+ );
+}
+
+test "y_object_escaped_null_in_key" {
+ ok(
+ \\{"foo\u0000bar": 42}
+ );
+}
+
+test "y_object_extreme_numbers" {
+ ok(
+ \\{ "min": -1.0e+28, "max": 1.0e+28 }
+ );
+}
+
+test "y_object" {
+ ok(
+ \\{"asd":"sdf", "dfg":"fgh"}
+ );
+}
+
+test "y_object_long_strings" {
+ ok(
+ \\{"x":[{"id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}], "id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}
+ );
+}
+
+test "y_object_simple" {
+ ok(
+ \\{"a":[]}
+ );
+}
+
+test "y_object_string_unicode" {
+ ok(
+ \\{"title":"\u041f\u043e\u043b\u0442\u043e\u0440\u0430 \u0417\u0435\u043c\u043b\u0435\u043a\u043e\u043f\u0430" }
+ );
+}
+
+test "y_object_with_newlines" {
+ ok(
+ \\{
+ \\"a": "b"
+ \\}
+ );
+}
+
+test "y_string_1_2_3_bytes_UTF-8_sequences" {
+ ok(
+ \\["\u0060\u012a\u12AB"]
+ );
+}
+
+test "y_string_accepted_surrogate_pair" {
+ ok(
+ \\["\uD801\udc37"]
+ );
+}
+
+test "y_string_accepted_surrogate_pairs" {
+ ok(
+ \\["\ud83d\ude39\ud83d\udc8d"]
+ );
+}
+
+test "y_string_allowed_escapes" {
+ ok(
+ \\["\"\\\/\b\f\n\r\t"]
+ );
+}
+
+test "y_string_backslash_and_u_escaped_zero" {
+ ok(
+ \\["\\u0000"]
+ );
+}
+
+test "y_string_backslash_doublequotes" {
+ ok(
+ \\["\""]
+ );
+}
+
+test "y_string_comments" {
+ ok(
+ \\["a/*b*/c/*d//e"]
+ );
+}
+
+test "y_string_double_escape_a" {
+ ok(
+ \\["\\a"]
+ );
+}
+
+test "y_string_double_escape_n" {
+ ok(
+ \\["\\n"]
+ );
+}
+
+test "y_string_escaped_control_character" {
+ ok(
+ \\["\u0012"]
+ );
+}
+
+test "y_string_escaped_noncharacter" {
+ ok(
+ \\["\uFFFF"]
+ );
+}
+
+test "y_string_in_array" {
+ ok(
+ \\["asd"]
+ );
+}
+
+test "y_string_in_array_with_leading_space" {
+ ok(
+ \\[ "asd"]
+ );
+}
+
+test "y_string_last_surrogates_1_and_2" {
+ ok(
+ \\["\uDBFF\uDFFF"]
+ );
+}
+
+test "y_string_nbsp_uescaped" {
+ ok(
+ \\["new\u00A0line"]
+ );
+}
+
+test "y_string_nonCharacterInUTF-8_U+10FFFF" {
+ ok(
+ \\[""]
+ );
+}
+
+test "y_string_nonCharacterInUTF-8_U+FFFF" {
+ ok(
+ \\[""]
+ );
+}
+
+test "y_string_null_escape" {
+ ok(
+ \\["\u0000"]
+ );
+}
+
+test "y_string_one-byte-utf-8" {
+ ok(
+ \\["\u002c"]
+ );
+}
+
+test "y_string_pi" {
+ ok(
+ \\["π"]
+ );
+}
+
+test "y_string_reservedCharacterInUTF-8_U+1BFFF" {
+ ok(
+ \\[""]
+ );
+}
+
+test "y_string_simple_ascii" {
+ ok(
+ \\["asd "]
+ );
+}
+
+test "y_string_space" {
+ ok(
+ \\" "
+ );
+}
+
+test "y_string_surrogates_U+1D11E_MUSICAL_SYMBOL_G_CLEF" {
+ ok(
+ \\["\uD834\uDd1e"]
+ );
+}
+
+test "y_string_three-byte-utf-8" {
+ ok(
+ \\["\u0821"]
+ );
+}
+
+test "y_string_two-byte-utf-8" {
+ ok(
+ \\["\u0123"]
+ );
+}
+
+test "y_string_u+2028_line_sep" {
+ ok("[\"\xe2\x80\xa8\"]");
+}
+
+test "y_string_u+2029_par_sep" {
+ ok("[\"\xe2\x80\xa9\"]");
+}
+
+test "y_string_uescaped_newline" {
+ ok(
+ \\["new\u000Aline"]
+ );
+}
+
+test "y_string_uEscape" {
+ ok(
+ \\["\u0061\u30af\u30EA\u30b9"]
+ );
+}
+
+test "y_string_unescaped_char_delete" {
+ ok("[\"\x7f\"]");
+}
+
+test "y_string_unicode_2" {
+ ok(
+ \\["⍂㈴⍂"]
+ );
+}
+
+test "y_string_unicodeEscapedBackslash" {
+ ok(
+ \\["\u005C"]
+ );
+}
+
+test "y_string_unicode_escaped_double_quote" {
+ ok(
+ \\["\u0022"]
+ );
+}
+
+test "y_string_unicode" {
+ ok(
+ \\["\uA66D"]
+ );
+}
+
+test "y_string_unicode_U+10FFFE_nonchar" {
+ ok(
+ \\["\uDBFF\uDFFE"]
+ );
+}
+
+test "y_string_unicode_U+1FFFE_nonchar" {
+ ok(
+ \\["\uD83F\uDFFE"]
+ );
+}
+
+test "y_string_unicode_U+200B_ZERO_WIDTH_SPACE" {
+ ok(
+ \\["\u200B"]
+ );
+}
+
+test "y_string_unicode_U+2064_invisible_plus" {
+ ok(
+ \\["\u2064"]
+ );
+}
+
+test "y_string_unicode_U+FDD0_nonchar" {
+ ok(
+ \\["\uFDD0"]
+ );
+}
+
+test "y_string_unicode_U+FFFE_nonchar" {
+ ok(
+ \\["\uFFFE"]
+ );
+}
+
+test "y_string_utf8" {
+ ok(
+ \\["€𝄞"]
+ );
+}
+
+test "y_string_with_del_character" {
+ ok("[\"a\x7fa\"]");
+}
+
+test "y_structure_lonely_false" {
+ ok(
+ \\false
+ );
+}
+
+test "y_structure_lonely_int" {
+ ok(
+ \\42
+ );
+}
+
+test "y_structure_lonely_negative_real" {
+ ok(
+ \\-0.1
+ );
+}
+
+test "y_structure_lonely_null" {
+ ok(
+ \\null
+ );
+}
+
+test "y_structure_lonely_string" {
+ ok(
+ \\"asd"
+ );
+}
+
+test "y_structure_lonely_true" {
+ ok(
+ \\true
+ );
+}
+
+test "y_structure_string_empty" {
+ ok(
+ \\""
+ );
+}
+
+test "y_structure_trailing_newline" {
+ ok(
+ \\["a"]
+ );
+}
+
+test "y_structure_true_in_array" {
+ ok(
+ \\[true]
+ );
+}
+
+test "y_structure_whitespace_array" {
+ ok(" [] ");
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+test "n_array_1_true_without_comma" {
+ err(
+ \\[1 true]
+ );
+}
+
+test "n_array_a_invalid_utf8" {
+ err(
+ \\[aå]
+ );
+}
+
+test "n_array_colon_instead_of_comma" {
+ err(
+ \\["": 1]
+ );
+}
+
+test "n_array_comma_after_close" {
+ //err(
+ // \\[""],
+ //);
+}
+
+test "n_array_comma_and_number" {
+ err(
+ \\[,1]
+ );
+}
+
+test "n_array_double_comma" {
+ err(
+ \\[1,,2]
+ );
+}
+
+test "n_array_double_extra_comma" {
+ err(
+ \\["x",,]
+ );
+}
+
+test "n_array_extra_close" {
+ err(
+ \\["x"]]
+ );
+}
+
+test "n_array_extra_comma" {
+ //err(
+ // \\["",]
+ //);
+}
+
+test "n_array_incomplete_invalid_value" {
+ err(
+ \\[x
+ );
+}
+
+test "n_array_incomplete" {
+ err(
+ \\["x"
+ );
+}
+
+test "n_array_inner_array_no_comma" {
+ err(
+ \\[3[4]]
+ );
+}
+
+test "n_array_invalid_utf8" {
+ err(
+ \\[ÿ]
+ );
+}
+
+test "n_array_items_separated_by_semicolon" {
+ err(
+ \\[1:2]
+ );
+}
+
+test "n_array_just_comma" {
+ err(
+ \\[,]
+ );
+}
+
+test "n_array_just_minus" {
+ err(
+ \\[-]
+ );
+}
+
+test "n_array_missing_value" {
+ err(
+ \\[ , ""]
+ );
+}
+
+test "n_array_newlines_unclosed" {
+ err(
+ \\["a",
+ \\4
+ \\,1,
+ );
+}
+
+test "n_array_number_and_comma" {
+ err(
+ \\[1,]
+ );
+}
+
+test "n_array_number_and_several_commas" {
+ err(
+ \\[1,,]
+ );
+}
+
+test "n_array_spaces_vertical_tab_formfeed" {
+ err("[\"\x0aa\"\\f]");
+}
+
+test "n_array_star_inside" {
+ err(
+ \\[*]
+ );
+}
+
+test "n_array_unclosed" {
+ err(
+ \\[""
+ );
+}
+
+test "n_array_unclosed_trailing_comma" {
+ err(
+ \\[1,
+ );
+}
+
+test "n_array_unclosed_with_new_lines" {
+ err(
+ \\[1,
+ \\1
+ \\,1
+ );
+}
+
+test "n_array_unclosed_with_object_inside" {
+ err(
+ \\[{}
+ );
+}
+
+test "n_incomplete_false" {
+ err(
+ \\[fals]
+ );
+}
+
+test "n_incomplete_null" {
+ err(
+ \\[nul]
+ );
+}
+
+test "n_incomplete_true" {
+ err(
+ \\[tru]
+ );
+}
+
+test "n_multidigit_number_then_00" {
+ err("123\x00");
+}
+
+test "n_number_0.1.2" {
+ err(
+ \\[0.1.2]
+ );
+}
+
+test "n_number_-01" {
+ err(
+ \\[-01]
+ );
+}
+
+test "n_number_0.3e" {
+ err(
+ \\[0.3e]
+ );
+}
+
+test "n_number_0.3e+" {
+ err(
+ \\[0.3e+]
+ );
+}
+
+test "n_number_0_capital_E" {
+ err(
+ \\[0E]
+ );
+}
+
+test "n_number_0_capital_E+" {
+ err(
+ \\[0E+]
+ );
+}
+
+test "n_number_0.e1" {
+ err(
+ \\[0.e1]
+ );
+}
+
+test "n_number_0e" {
+ err(
+ \\[0e]
+ );
+}
+
+test "n_number_0e+" {
+ err(
+ \\[0e+]
+ );
+}
+
+test "n_number_1_000" {
+ err(
+ \\[1 000.0]
+ );
+}
+
+test "n_number_1.0e-" {
+ err(
+ \\[1.0e-]
+ );
+}
+
+test "n_number_1.0e" {
+ err(
+ \\[1.0e]
+ );
+}
+
+test "n_number_1.0e+" {
+ err(
+ \\[1.0e+]
+ );
+}
+
+test "n_number_-1.0." {
+ err(
+ \\[-1.0.]
+ );
+}
+
+test "n_number_1eE2" {
+ err(
+ \\[1eE2]
+ );
+}
+
+test "n_number_.-1" {
+ err(
+ \\[.-1]
+ );
+}
+
+test "n_number_+1" {
+ err(
+ \\[+1]
+ );
+}
+
+test "n_number_.2e-3" {
+ err(
+ \\[.2e-3]
+ );
+}
+
+test "n_number_2.e-3" {
+ err(
+ \\[2.e-3]
+ );
+}
+
+test "n_number_2.e+3" {
+ err(
+ \\[2.e+3]
+ );
+}
+
+test "n_number_2.e3" {
+ err(
+ \\[2.e3]
+ );
+}
+
+test "n_number_-2." {
+ err(
+ \\[-2.]
+ );
+}
+
+test "n_number_9.e+" {
+ err(
+ \\[9.e+]
+ );
+}
+
+test "n_number_expression" {
+ err(
+ \\[1+2]
+ );
+}
+
+test "n_number_hex_1_digit" {
+ err(
+ \\[0x1]
+ );
+}
+
+test "n_number_hex_2_digits" {
+ err(
+ \\[0x42]
+ );
+}
+
+test "n_number_infinity" {
+ err(
+ \\[Infinity]
+ );
+}
+
+test "n_number_+Inf" {
+ err(
+ \\[+Inf]
+ );
+}
+
+test "n_number_Inf" {
+ err(
+ \\[Inf]
+ );
+}
+
+test "n_number_invalid+-" {
+ err(
+ \\[0e+-1]
+ );
+}
+
+test "n_number_invalid-negative-real" {
+ err(
+ \\[-123.123foo]
+ );
+}
+
+test "n_number_invalid-utf-8-in-bigger-int" {
+ err(
+ \\[123å]
+ );
+}
+
+test "n_number_invalid-utf-8-in-exponent" {
+ err(
+ \\[1e1å]
+ );
+}
+
+test "n_number_invalid-utf-8-in-int" {
+ err(
+ \\[0å]
+ );
+}
+
+test "n_number_++" {
+ err(
+ \\[++1234]
+ );
+}
+
+test "n_number_minus_infinity" {
+ err(
+ \\[-Infinity]
+ );
+}
+
+test "n_number_minus_sign_with_trailing_garbage" {
+ err(
+ \\[-foo]
+ );
+}
+
+test "n_number_minus_space_1" {
+ err(
+ \\[- 1]
+ );
+}
+
+test "n_number_-NaN" {
+ err(
+ \\[-NaN]
+ );
+}
+
+test "n_number_NaN" {
+ err(
+ \\[NaN]
+ );
+}
+
+test "n_number_neg_int_starting_with_zero" {
+ err(
+ \\[-012]
+ );
+}
+
+test "n_number_neg_real_without_int_part" {
+ err(
+ \\[-.123]
+ );
+}
+
+test "n_number_neg_with_garbage_at_end" {
+ err(
+ \\[-1x]
+ );
+}
+
+test "n_number_real_garbage_after_e" {
+ err(
+ \\[1ea]
+ );
+}
+
+test "n_number_real_with_invalid_utf8_after_e" {
+ err(
+ \\[1eå]
+ );
+}
+
+test "n_number_real_without_fractional_part" {
+ err(
+ \\[1.]
+ );
+}
+
+test "n_number_starting_with_dot" {
+ err(
+ \\[.123]
+ );
+}
+
+test "n_number_U+FF11_fullwidth_digit_one" {
+ err(
+ \\[ï¼]
+ );
+}
+
+test "n_number_with_alpha_char" {
+ err(
+ \\[1.8011670033376514H-308]
+ );
+}
+
+test "n_number_with_alpha" {
+ err(
+ \\[1.2a-3]
+ );
+}
+
+test "n_number_with_leading_zero" {
+ err(
+ \\[012]
+ );
+}
+
+test "n_object_bad_value" {
+ err(
+ \\["x", truth]
+ );
+}
+
+test "n_object_bracket_key" {
+ err(
+ \\{[: "x"}
+ );
+}
+
+test "n_object_comma_instead_of_colon" {
+ err(
+ \\{"x", null}
+ );
+}
+
+test "n_object_double_colon" {
+ err(
+ \\{"x"::"b"}
+ );
+}
+
+test "n_object_emoji" {
+ err(
+ \\{ð¨ð}
+ );
+}
+
+test "n_object_garbage_at_end" {
+ err(
+ \\{"a":"a" 123}
+ );
+}
+
+test "n_object_key_with_single_quotes" {
+ err(
+ \\{key: 'value'}
+ );
+}
+
+test "n_object_lone_continuation_byte_in_key_and_trailing_comma" {
+ err(
+ \\{"¹":"0",}
+ );
+}
+
+test "n_object_missing_colon" {
+ err(
+ \\{"a" b}
+ );
+}
+
+test "n_object_missing_key" {
+ err(
+ \\{:"b"}
+ );
+}
+
+test "n_object_missing_semicolon" {
+ err(
+ \\{"a" "b"}
+ );
+}
+
+test "n_object_missing_value" {
+ err(
+ \\{"a":
+ );
+}
+
+test "n_object_no-colon" {
+ err(
+ \\{"a"
+ );
+}
+
+test "n_object_non_string_key_but_huge_number_instead" {
+ err(
+ \\{9999E9999:1}
+ );
+}
+
+test "n_object_non_string_key" {
+ err(
+ \\{1:1}
+ );
+}
+
+test "n_object_repeated_null_null" {
+ err(
+ \\{null:null,null:null}
+ );
+}
+
+test "n_object_several_trailing_commas" {
+ err(
+ \\{"id":0,,,,,}
+ );
+}
+
+test "n_object_single_quote" {
+ err(
+ \\{'a':0}
+ );
+}
+
+test "n_object_trailing_comma" {
+ err(
+ \\{"id":0,}
+ );
+}
+
+test "n_object_trailing_comment" {
+ err(
+ \\{"a":"b"}/**/
+ );
+}
+
+test "n_object_trailing_comment_open" {
+ err(
+ \\{"a":"b"}/**//
+ );
+}
+
+test "n_object_trailing_comment_slash_open_incomplete" {
+ err(
+ \\{"a":"b"}/
+ );
+}
+
+test "n_object_trailing_comment_slash_open" {
+ err(
+ \\{"a":"b"}//
+ );
+}
+
+test "n_object_two_commas_in_a_row" {
+ err(
+ \\{"a":"b",,"c":"d"}
+ );
+}
+
+test "n_object_unquoted_key" {
+ err(
+ \\{a: "b"}
+ );
+}
+
+test "n_object_unterminated-value" {
+ err(
+ \\{"a":"a
+ );
+}
+
+test "n_object_with_single_string" {
+ err(
+ \\{ "foo" : "bar", "a" }
+ );
+}
+
+test "n_object_with_trailing_garbage" {
+ err(
+ \\{"a":"b"}#
+ );
+}
+
+test "n_single_space" {
+ err(" ");
+}
+
+test "n_string_1_surrogate_then_escape" {
+ err(
+ \\["\uD800\"]
+ );
+}
+
+test "n_string_1_surrogate_then_escape_u1" {
+ err(
+ \\["\uD800\u1"]
+ );
+}
+
+test "n_string_1_surrogate_then_escape_u1x" {
+ err(
+ \\["\uD800\u1x"]
+ );
+}
+
+test "n_string_1_surrogate_then_escape_u" {
+ err(
+ \\["\uD800\u"]
+ );
+}
+
+test "n_string_accentuated_char_no_quotes" {
+ err(
+ \\[é]
+ );
+}
+
+test "n_string_backslash_00" {
+ err("[\"\x00\"]");
+}
+
+test "n_string_escaped_backslash_bad" {
+ err(
+ \\["\\\"]
+ );
+}
+
+test "n_string_escaped_ctrl_char_tab" {
+ err("\x5b\x22\x5c\x09\x22\x5d");
+}
+
+test "n_string_escaped_emoji" {
+ err("[\"\x5c\xc3\xb0\xc2\x9f\xc2\x8c\xc2\x80\"]");
+}
+
+test "n_string_escape_x" {
+ err(
+ \\["\x00"]
+ );
+}
+
+test "n_string_incomplete_escaped_character" {
+ err(
+ \\["\u00A"]
+ );
+}
+
+test "n_string_incomplete_escape" {
+ err(
+ \\["\"]
+ );
+}
+
+test "n_string_incomplete_surrogate_escape_invalid" {
+ err(
+ \\["\uD800\uD800\x"]
+ );
+}
+
+test "n_string_incomplete_surrogate" {
+ err(
+ \\["\uD834\uDd"]
+ );
+}
+
+test "n_string_invalid_backslash_esc" {
+ err(
+ \\["\a"]
+ );
+}
+
+test "n_string_invalid_unicode_escape" {
+ err(
+ \\["\uqqqq"]
+ );
+}
+
+test "n_string_invalid_utf8_after_escape" {
+ err("[\"\\\x75\xc3\xa5\"]");
+}
+
+test "n_string_invalid-utf-8-in-escape" {
+ err(
+ \\["\uå"]
+ );
+}
+
+test "n_string_leading_uescaped_thinspace" {
+ err(
+ \\[\u0020"asd"]
+ );
+}
+
+test "n_string_no_quotes_with_bad_escape" {
+ err(
+ \\[\n]
+ );
+}
+
+test "n_string_single_doublequote" {
+ err(
+ \\"
+ );
+}
+
+test "n_string_single_quote" {
+ err(
+ \\['single quote']
+ );
+}
+
+test "n_string_single_string_no_double_quotes" {
+ err(
+ \\abc
+ );
+}
+
+test "n_string_start_escape_unclosed" {
+ err(
+ \\["\
+ );
+}
+
+test "n_string_unescaped_crtl_char" {
+ err("[\"a\x00a\"]");
+}
+
+test "n_string_unescaped_newline" {
+ err(
+ \\["new
+ \\line"]
+ );
+}
+
+test "n_string_unescaped_tab" {
+ err("[\"\t\"]");
+}
+
+test "n_string_unicode_CapitalU" {
+ err(
+ \\"\UA66D"
+ );
+}
+
+test "n_string_with_trailing_garbage" {
+ err(
+ \\""x
+ );
+}
+
+test "n_structure_100000_opening_arrays" {
+ err("[" ** 100000);
+}
+
+test "n_structure_angle_bracket_." {
+ err(
+ \\<.>
+ );
+}
+
+test "n_structure_angle_bracket_null" {
+ err(
+ \\[]
+ );
+}
+
+test "n_structure_array_trailing_garbage" {
+ err(
+ \\[1]x
+ );
+}
+
+test "n_structure_array_with_extra_array_close" {
+ err(
+ \\[1]]
+ );
+}
+
+test "n_structure_array_with_unclosed_string" {
+ err(
+ \\["asd]
+ );
+}
+
+test "n_structure_ascii-unicode-identifier" {
+ err(
+ \\aå
+ );
+}
+
+test "n_structure_capitalized_True" {
+ err(
+ \\[True]
+ );
+}
+
+test "n_structure_close_unopened_array" {
+ err(
+ \\1]
+ );
+}
+
+test "n_structure_comma_instead_of_closing_brace" {
+ err(
+ \\{"x": true,
+ );
+}
+
+test "n_structure_double_array" {
+ err(
+ \\[][]
+ );
+}
+
+test "n_structure_end_array" {
+ err(
+ \\]
+ );
+}
+
+test "n_structure_incomplete_UTF8_BOM" {
+ err(
+ \\ï»{}
+ );
+}
+
+test "n_structure_lone-invalid-utf-8" {
+ err(
+ \\å
+ );
+}
+
+test "n_structure_lone-open-bracket" {
+ err(
+ \\[
+ );
+}
+
+test "n_structure_no_data" {
+ err(
+ \\
+ );
+}
+
+test "n_structure_null-byte-outside-string" {
+ err("[\x00]");
+}
+
+test "n_structure_number_with_trailing_garbage" {
+ err(
+ \\2@
+ );
+}
+
+test "n_structure_object_followed_by_closing_object" {
+ err(
+ \\{}}
+ );
+}
+
+test "n_structure_object_unclosed_no_value" {
+ err(
+ \\{"":
+ );
+}
+
+test "n_structure_object_with_comment" {
+ err(
+ \\{"a":/*comment*/"b"}
+ );
+}
+
+test "n_structure_object_with_trailing_garbage" {
+ err(
+ \\{"a": true} "x"
+ );
+}
+
+test "n_structure_open_array_apostrophe" {
+ err(
+ \\['
+ );
+}
+
+test "n_structure_open_array_comma" {
+ err(
+ \\[,
+ );
+}
+
+test "n_structure_open_array_object" {
+ err("[{\"\":" ** 50000);
+}
+
+test "n_structure_open_array_open_object" {
+ err(
+ \\[{
+ );
+}
+
+test "n_structure_open_array_open_string" {
+ err(
+ \\["a
+ );
+}
+
+test "n_structure_open_array_string" {
+ err(
+ \\["a"
+ );
+}
+
+test "n_structure_open_object_close_array" {
+ err(
+ \\{]
+ );
+}
+
+test "n_structure_open_object_comma" {
+ err(
+ \\{,
+ );
+}
+
+test "n_structure_open_object" {
+ err(
+ \\{
+ );
+}
+
+test "n_structure_open_object_open_array" {
+ err(
+ \\{[
+ );
+}
+
+test "n_structure_open_object_open_string" {
+ err(
+ \\{"a
+ );
+}
+
+test "n_structure_open_object_string_with_apostrophes" {
+ err(
+ \\{'a'
+ );
+}
+
+test "n_structure_open_open" {
+ err(
+ \\["\{["\{["\{["\{
+ );
+}
+
+test "n_structure_single_eacute" {
+ err(
+ \\é
+ );
+}
+
+test "n_structure_single_star" {
+ err(
+ \\*
+ );
+}
+
+test "n_structure_trailing_#" {
+ err(
+ \\{"a":"b"}#{}
+ );
+}
+
+test "n_structure_U+2060_word_joined" {
+ err(
+ \\[â ]
+ );
+}
+
+test "n_structure_uescaped_LF_before_string" {
+ err(
+ \\[\u000A""]
+ );
+}
+
+test "n_structure_unclosed_array" {
+ err(
+ \\[1
+ );
+}
+
+test "n_structure_unclosed_array_partial_null" {
+ err(
+ \\[ false, nul
+ );
+}
+
+test "n_structure_unclosed_array_unfinished_false" {
+ err(
+ \\[ true, fals
+ );
+}
+
+test "n_structure_unclosed_array_unfinished_true" {
+ err(
+ \\[ false, tru
+ );
+}
+
+test "n_structure_unclosed_object" {
+ err(
+ \\{"asd":"asd"
+ );
+}
+
+test "n_structure_unicode-identifier" {
+ err(
+ \\Ã¥
+ );
+}
+
+test "n_structure_UTF8_BOM_no_data" {
+ err(
+ \\
+ );
+}
+
+test "n_structure_whitespace_formfeed" {
+ err("[\x0c]");
+}
+
+test "n_structure_whitespace_U+2060_word_joiner" {
+ err(
+ \\[â ]
+ );
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+test "i_number_double_huge_neg_exp" {
+ any(
+ \\[123.456e-789]
+ );
+}
+
+test "i_number_huge_exp" {
+ any(
+ \\[0.4e00669999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999969999999006]
+ );
+}
+
+test "i_number_neg_int_huge_exp" {
+ any(
+ \\[-1e+9999]
+ );
+}
+
+test "i_number_pos_double_huge_exp" {
+ any(
+ \\[1.5e+9999]
+ );
+}
+
+test "i_number_real_neg_overflow" {
+ any(
+ \\[-123123e100000]
+ );
+}
+
+test "i_number_real_pos_overflow" {
+ any(
+ \\[123123e100000]
+ );
+}
+
+test "i_number_real_underflow" {
+ any(
+ \\[123e-10000000]
+ );
+}
+
+test "i_number_too_big_neg_int" {
+ any(
+ \\[-123123123123123123123123123123]
+ );
+}
+
+test "i_number_too_big_pos_int" {
+ any(
+ \\[100000000000000000000]
+ );
+}
+
+test "i_number_very_big_negative_int" {
+ any(
+ \\[-237462374673276894279832749832423479823246327846]
+ );
+}
+
+test "i_object_key_lone_2nd_surrogate" {
+ any(
+ \\{"\uDFAA":0}
+ );
+}
+
+test "i_string_1st_surrogate_but_2nd_missing" {
+ any(
+ \\["\uDADA"]
+ );
+}
+
+test "i_string_1st_valid_surrogate_2nd_invalid" {
+ any(
+ \\["\uD888\u1234"]
+ );
+}
+
+test "i_string_incomplete_surrogate_and_escape_valid" {
+ any(
+ \\["\uD800\n"]
+ );
+}
+
+test "i_string_incomplete_surrogate_pair" {
+ any(
+ \\["\uDd1ea"]
+ );
+}
+
+test "i_string_incomplete_surrogates_escape_valid" {
+ any(
+ \\["\uD800\uD800\n"]
+ );
+}
+
+test "i_string_invalid_lonely_surrogate" {
+ any(
+ \\["\ud800"]
+ );
+}
+
+test "i_string_invalid_surrogate" {
+ any(
+ \\["\ud800abc"]
+ );
+}
+
+test "i_string_invalid_utf-8" {
+ any(
+ \\["ÿ"]
+ );
+}
+
+test "i_string_inverted_surrogates_U+1D11E" {
+ any(
+ \\["\uDd1e\uD834"]
+ );
+}
+
+test "i_string_iso_latin_1" {
+ any(
+ \\["é"]
+ );
+}
+
+test "i_string_lone_second_surrogate" {
+ any(
+ \\["\uDFAA"]
+ );
+}
+
+test "i_string_lone_utf8_continuation_byte" {
+ any(
+ \\[""]
+ );
+}
+
+test "i_string_not_in_unicode_range" {
+ any(
+ \\["ô¿¿¿"]
+ );
+}
+
+test "i_string_overlong_sequence_2_bytes" {
+ any(
+ \\["À¯"]
+ );
+}
+
+test "i_string_overlong_sequence_6_bytes" {
+ any(
+ \\["ü¿¿¿¿"]
+ );
+}
+
+test "i_string_overlong_sequence_6_bytes_null" {
+ any(
+ \\["ü"]
+ );
+}
+
+test "i_string_truncated-utf-8" {
+ any(
+ \\["àÿ"]
+ );
+}
+
+test "i_string_utf16BE_no_BOM" {
+ any("\x00\x5b\x00\x22\x00\xc3\xa9\x00\x22\x00\x5d");
+}
+
+test "i_string_utf16LE_no_BOM" {
+ any("\x5b\x00\x22\x00\xc3\xa9\x00\x22\x00\x5d\x00");
+}
+
+test "i_string_UTF-16LE_with_BOM" {
+ any("\xc3\xbf\xc3\xbe\x5b\x00\x22\x00\xc3\xa9\x00\x22\x00\x5d\x00");
+}
+
+test "i_string_UTF-8_invalid_sequence" {
+ any(
+ \\["æ¥Ñú"]
+ );
+}
+
+test "i_string_UTF8_surrogate_U+D800" {
+ any(
+ \\["í "]
+ );
+}
+
+test "i_structure_500_nested_arrays" {
+ any(("[" ** 500) ++ ("]" ** 500));
+}
+
+test "i_structure_UTF-8_BOM_empty_object" {
+ any(
+ \\{}
+ );
+}
diff --git a/std/lazy_init.zig b/std/lazy_init.zig
new file mode 100644
index 0000000000..c46c067810
--- /dev/null
+++ b/std/lazy_init.zig
@@ -0,0 +1,85 @@
+const std = @import("index.zig");
+const builtin = @import("builtin");
+const assert = std.debug.assert;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
+
+/// Thread-safe initialization of global data.
+/// TODO use a mutex instead of a spinlock
+pub fn lazyInit(comptime T: type) LazyInit(T) {
+ return LazyInit(T){
+ .data = undefined,
+ .state = 0,
+ };
+}
+
+fn LazyInit(comptime T: type) type {
+ return struct {
+ state: u8, // TODO make this an enum
+ data: Data,
+
+ const Self = this;
+
+ // TODO this isn't working for void, investigate and then remove this special case
+ const Data = if (@sizeOf(T) == 0) u8 else T;
+ const Ptr = if (T == void) void else *T;
+
+ /// Returns a usable pointer to the initialized data,
+ /// or returns null, indicating that the caller should
+ /// perform the initialization and then call resolve().
+ pub fn get(self: *Self) ?Ptr {
+ while (true) {
+ var state = @cmpxchgWeak(u8, &self.state, 0, 1, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return null;
+ switch (state) {
+ 0 => continue,
+ 1 => {
+ // TODO mutex instead of a spinlock
+ continue;
+ },
+ 2 => {
+ if (@sizeOf(T) == 0) {
+ return T(undefined);
+ } else {
+ return &self.data;
+ }
+ },
+ else => unreachable,
+ }
+ }
+ }
+
+ pub fn resolve(self: *Self) void {
+ const prev = @atomicRmw(u8, &self.state, AtomicRmwOp.Xchg, 2, AtomicOrder.SeqCst);
+ assert(prev == 1); // resolve() called twice
+ }
+ };
+}
+
+var global_number = lazyInit(i32);
+
+test "std.lazyInit" {
+ if (global_number.get()) |_| @panic("bad") else {
+ global_number.data = 1234;
+ global_number.resolve();
+ }
+ if (global_number.get()) |x| {
+ assert(x.* == 1234);
+ } else {
+ @panic("bad");
+ }
+ if (global_number.get()) |x| {
+ assert(x.* == 1234);
+ } else {
+ @panic("bad");
+ }
+}
+
+var global_void = lazyInit(void);
+
+test "std.lazyInit(void)" {
+ if (global_void.get()) |_| @panic("bad") else {
+ global_void.resolve();
+ }
+ assert(global_void.get() != null);
+ assert(global_void.get() != null);
+}
diff --git a/std/linked_list.zig b/std/linked_list.zig
index 45595f3efb..62cd5ca2bb 100644
--- a/std/linked_list.zig
+++ b/std/linked_list.zig
@@ -21,15 +21,15 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Node inside the linked list wrapping the actual data.
pub const Node = struct {
- prev: ?&Node,
- next: ?&Node,
+ prev: ?*Node,
+ next: ?*Node,
data: T,
- pub fn init(value: &const T) Node {
- return Node {
+ pub fn init(value: *const T) Node {
+ return Node{
.prev = null,
.next = null,
- .data = *value,
+ .data = value.*,
};
}
@@ -38,25 +38,25 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
return Node.init({});
}
- pub fn toData(node: &Node) &ParentType {
+ pub fn toData(node: *Node) *ParentType {
comptime assert(isIntrusive());
return @fieldParentPtr(ParentType, field_name, node);
}
};
- first: ?&Node,
- last: ?&Node,
- len: usize,
+ first: ?*Node,
+ last: ?*Node,
+ len: usize,
/// Initialize a linked list.
///
/// Returns:
/// An empty linked list.
pub fn init() Self {
- return Self {
+ return Self{
.first = null,
- .last = null,
- .len = 0,
+ .last = null,
+ .len = 0,
};
}
@@ -69,7 +69,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Arguments:
/// node: Pointer to a node in the list.
/// new_node: Pointer to the new node to insert.
- pub fn insertAfter(list: &Self, node: &Node, new_node: &Node) void {
+ pub fn insertAfter(list: *Self, node: *Node, new_node: *Node) void {
new_node.prev = node;
if (node.next) |next_node| {
// Intermediate node.
@@ -90,7 +90,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Arguments:
/// node: Pointer to a node in the list.
/// new_node: Pointer to the new node to insert.
- pub fn insertBefore(list: &Self, node: &Node, new_node: &Node) void {
+ pub fn insertBefore(list: *Self, node: *Node, new_node: *Node) void {
new_node.next = node;
if (node.prev) |prev_node| {
// Intermediate node.
@@ -110,7 +110,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Arguments:
/// new_node: Pointer to the new node to insert.
- pub fn append(list: &Self, new_node: &Node) void {
+ pub fn append(list: *Self, new_node: *Node) void {
if (list.last) |last| {
// Insert after last.
list.insertAfter(last, new_node);
@@ -124,14 +124,14 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Arguments:
/// new_node: Pointer to the new node to insert.
- pub fn prepend(list: &Self, new_node: &Node) void {
+ pub fn prepend(list: *Self, new_node: *Node) void {
if (list.first) |first| {
// Insert before first.
list.insertBefore(first, new_node);
} else {
// Empty list.
list.first = new_node;
- list.last = new_node;
+ list.last = new_node;
new_node.prev = null;
new_node.next = null;
@@ -143,7 +143,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Arguments:
/// node: Pointer to the node to be removed.
- pub fn remove(list: &Self, node: &Node) void {
+ pub fn remove(list: *Self, node: *Node) void {
if (node.prev) |prev_node| {
// Intermediate node.
prev_node.next = node.next;
@@ -168,8 +168,8 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the last node in the list.
- pub fn pop(list: &Self) ?&Node {
- const last = list.last ?? return null;
+ pub fn pop(list: *Self) ?*Node {
+ const last = list.last orelse return null;
list.remove(last);
return last;
}
@@ -178,8 +178,8 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the first node in the list.
- pub fn popFirst(list: &Self) ?&Node {
- const first = list.first ?? return null;
+ pub fn popFirst(list: *Self) ?*Node {
+ const first = list.first orelse return null;
list.remove(first);
return first;
}
@@ -191,9 +191,9 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the new node.
- pub fn allocateNode(list: &Self, allocator: &Allocator) !&Node {
+ pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
- return allocator.create(Node);
+ return allocator.create(Node(undefined));
}
/// Deallocate a node.
@@ -201,7 +201,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Arguments:
/// node: Pointer to the node to deallocate.
/// allocator: Dynamic memory allocator.
- pub fn destroyNode(list: &Self, node: &Node, allocator: &Allocator) void {
+ pub fn destroyNode(list: *Self, node: *Node, allocator: *Allocator) void {
comptime assert(!isIntrusive());
allocator.destroy(node);
}
@@ -214,10 +214,10 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the new node.
- pub fn createNode(list: &Self, data: &const T, allocator: &Allocator) !&Node {
+ pub fn createNode(list: *Self, data: *const T, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
var node = try list.allocateNode(allocator);
- *node = Node.init(data);
+ node.* = Node.init(data);
return node;
}
};
@@ -227,11 +227,11 @@ test "basic linked list test" {
const allocator = debug.global_allocator;
var list = LinkedList(u32).init();
- var one = try list.createNode(1, allocator);
- var two = try list.createNode(2, allocator);
+ var one = try list.createNode(1, allocator);
+ var two = try list.createNode(2, allocator);
var three = try list.createNode(3, allocator);
- var four = try list.createNode(4, allocator);
- var five = try list.createNode(5, allocator);
+ var four = try list.createNode(4, allocator);
+ var five = try list.createNode(5, allocator);
defer {
list.destroyNode(one, allocator);
list.destroyNode(two, allocator);
@@ -240,11 +240,11 @@ test "basic linked list test" {
list.destroyNode(five, allocator);
}
- list.append(two); // {2}
- list.append(five); // {2, 5}
- list.prepend(one); // {1, 2, 5}
- list.insertBefore(five, four); // {1, 2, 4, 5}
- list.insertAfter(two, three); // {1, 2, 3, 4, 5}
+ list.append(two); // {2}
+ list.append(five); // {2, 5}
+ list.prepend(one); // {1, 2, 5}
+ list.insertBefore(five, four); // {1, 2, 4, 5}
+ list.insertAfter(two, three); // {1, 2, 3, 4, 5}
// Traverse forwards.
{
@@ -266,13 +266,13 @@ test "basic linked list test" {
}
}
- var first = list.popFirst(); // {2, 3, 4, 5}
- var last = list.pop(); // {2, 3, 4}
- list.remove(three); // {2, 4}
+ var first = list.popFirst(); // {2, 3, 4, 5}
+ var last = list.pop(); // {2, 3, 4}
+ list.remove(three); // {2, 4}
- assert ((??list.first).data == 2);
- assert ((??list.last ).data == 4);
- assert (list.len == 2);
+ assert(list.first.?.data == 2);
+ assert(list.last.?.data == 4);
+ assert(list.len == 2);
}
const ElementList = IntrusiveLinkedList(Element, "link");
@@ -285,17 +285,32 @@ test "basic intrusive linked list test" {
const allocator = debug.global_allocator;
var list = ElementList.init();
- var one = Element { .value = 1, .link = ElementList.Node.initIntrusive() };
- var two = Element { .value = 2, .link = ElementList.Node.initIntrusive() };
- var three = Element { .value = 3, .link = ElementList.Node.initIntrusive() };
- var four = Element { .value = 4, .link = ElementList.Node.initIntrusive() };
- var five = Element { .value = 5, .link = ElementList.Node.initIntrusive() };
+ var one = Element{
+ .value = 1,
+ .link = ElementList.Node.initIntrusive(),
+ };
+ var two = Element{
+ .value = 2,
+ .link = ElementList.Node.initIntrusive(),
+ };
+ var three = Element{
+ .value = 3,
+ .link = ElementList.Node.initIntrusive(),
+ };
+ var four = Element{
+ .value = 4,
+ .link = ElementList.Node.initIntrusive(),
+ };
+ var five = Element{
+ .value = 5,
+ .link = ElementList.Node.initIntrusive(),
+ };
- list.append(&two.link); // {2}
- list.append(&five.link); // {2, 5}
- list.prepend(&one.link); // {1, 2, 5}
- list.insertBefore(&five.link, &four.link); // {1, 2, 4, 5}
- list.insertAfter(&two.link, &three.link); // {1, 2, 3, 4, 5}
+ list.append(&two.link); // {2}
+ list.append(&five.link); // {2, 5}
+ list.prepend(&one.link); // {1, 2, 5}
+ list.insertBefore(&five.link, &four.link); // {1, 2, 4, 5}
+ list.insertAfter(&two.link, &three.link); // {1, 2, 3, 4, 5}
// Traverse forwards.
{
@@ -317,11 +332,11 @@ test "basic intrusive linked list test" {
}
}
- var first = list.popFirst(); // {2, 3, 4, 5}
- var last = list.pop(); // {2, 3, 4}
- list.remove(&three.link); // {2, 4}
+ var first = list.popFirst(); // {2, 3, 4, 5}
+ var last = list.pop(); // {2, 3, 4}
+ list.remove(&three.link); // {2, 4}
- assert ((??list.first).toData().value == 2);
- assert ((??list.last ).toData().value == 4);
- assert (list.len == 2);
+ assert(list.first.?.toData().value == 2);
+ assert(list.last.?.toData().value == 4);
+ assert(list.len == 2);
}
diff --git a/std/macho.zig b/std/macho.zig
index 70e2c09788..ddc4d334e4 100644
--- a/std/macho.zig
+++ b/std/macho.zig
@@ -42,13 +42,13 @@ pub const Symbol = struct {
name: []const u8,
address: u64,
- fn addressLessThan(lhs: &const Symbol, rhs: &const Symbol) bool {
+ fn addressLessThan(lhs: Symbol, rhs: Symbol) bool {
return lhs.address < rhs.address;
}
};
pub const SymbolTable = struct {
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
symbols: []const Symbol,
strings: []const u8,
@@ -56,17 +56,17 @@ pub const SymbolTable = struct {
// Ideally we'd use _mh_execute_header because it's always at 0x100000000
// in the image but as it's located in a different section than executable
// code, its displacement is different.
- pub fn deinit(self: &SymbolTable) void {
+ pub fn deinit(self: *SymbolTable) void {
self.allocator.free(self.symbols);
- self.symbols = []const Symbol {};
+ self.symbols = []const Symbol{};
self.allocator.free(self.strings);
- self.strings = []const u8 {};
+ self.strings = []const u8{};
}
- pub fn search(self: &const SymbolTable, address: usize) ?&const Symbol {
+ pub fn search(self: *const SymbolTable, address: usize) ?*const Symbol {
var min: usize = 0;
- var max: usize = self.symbols.len - 1; // Exclude sentinel.
+ var max: usize = self.symbols.len - 1; // Exclude sentinel.
while (min < max) {
const mid = min + (max - min) / 2;
const curr = &self.symbols[mid];
@@ -83,7 +83,7 @@ pub const SymbolTable = struct {
}
};
-pub fn loadSymbols(allocator: &mem.Allocator, in: &io.FileInStream) !SymbolTable {
+pub fn loadSymbols(allocator: *mem.Allocator, in: *io.FileInStream) !SymbolTable {
var file = in.file;
try file.seekTo(0);
@@ -118,10 +118,11 @@ pub fn loadSymbols(allocator: &mem.Allocator, in: &io.FileInStream) !SymbolTable
try in.stream.readNoEof(strings);
var nsyms: usize = 0;
- for (syms) |sym| if (isSymbol(sym)) nsyms += 1;
+ for (syms) |sym|
+ if (isSymbol(sym)) nsyms += 1;
if (nsyms == 0) return error.MissingDebugInfo;
- var symbols = try allocator.alloc(Symbol, nsyms + 1); // Room for sentinel.
+ var symbols = try allocator.alloc(Symbol, nsyms + 1); // Room for sentinel.
errdefer allocator.free(symbols);
var pie_slide: usize = 0;
@@ -129,10 +130,10 @@ pub fn loadSymbols(allocator: &mem.Allocator, in: &io.FileInStream) !SymbolTable
for (syms) |sym| {
if (!isSymbol(sym)) continue;
const start = sym.n_strx;
- const end = ??mem.indexOfScalarPos(u8, strings, start, 0);
+ const end = mem.indexOfScalarPos(u8, strings, start, 0).?;
const name = strings[start..end];
const address = sym.n_value;
- symbols[nsym] = Symbol { .name = name, .address = address };
+ symbols[nsym] = Symbol{ .name = name, .address = address };
nsym += 1;
if (is_pie and mem.eql(u8, name, "_SymbolTable_deinit")) {
pie_slide = @ptrToInt(SymbolTable.deinit) - address;
@@ -140,31 +141,32 @@ pub fn loadSymbols(allocator: &mem.Allocator, in: &io.FileInStream) !SymbolTable
}
// Effectively a no-op, lld emits symbols in ascending order.
- std.sort.insertionSort(Symbol, symbols[0..nsyms], Symbol.addressLessThan);
+ std.sort.sort(Symbol, symbols[0..nsyms], Symbol.addressLessThan);
// Insert the sentinel. Since we don't know where the last function ends,
// we arbitrarily limit it to the start address + 4 KB.
const top = symbols[nsyms - 1].address + 4096;
- symbols[nsyms] = Symbol { .name = "", .address = top };
+ symbols[nsyms] = Symbol{ .name = "", .address = top };
if (pie_slide != 0) {
- for (symbols) |*symbol| symbol.address += pie_slide;
+ for (symbols) |*symbol|
+ symbol.address += pie_slide;
}
- return SymbolTable {
+ return SymbolTable{
.allocator = allocator,
.symbols = symbols,
.strings = strings,
};
}
-fn readNoEof(in: &io.FileInStream, comptime T: type, result: []T) !void {
- return in.stream.readNoEof(([]u8)(result));
+fn readNoEof(in: *io.FileInStream, comptime T: type, result: []T) !void {
+ return in.stream.readNoEof(@sliceToBytes(result));
}
-fn readOneNoEof(in: &io.FileInStream, comptime T: type, result: &T) !void {
- return readNoEof(in, T, result[0..1]);
+fn readOneNoEof(in: *io.FileInStream, comptime T: type, result: *T) !void {
+ return readNoEof(in, T, (*[1]T)(result)[0..]);
}
-fn isSymbol(sym: &const Nlist64) bool {
+fn isSymbol(sym: *const Nlist64) bool {
return sym.n_value != 0 and sym.n_desc == 0;
}
diff --git a/std/math/acos.zig b/std/math/acos.zig
index a4f08af306..54844e8f6e 100644
--- a/std/math/acos.zig
+++ b/std/math/acos.zig
@@ -16,7 +16,7 @@ pub fn acos(x: var) @typeOf(x) {
}
fn r32(z: f32) f32 {
- const pS0 = 1.6666586697e-01;
+ const pS0 = 1.6666586697e-01;
const pS1 = -4.2743422091e-02;
const pS2 = -8.6563630030e-03;
const qS1 = -7.0662963390e-01;
@@ -74,16 +74,16 @@ fn acos32(x: f32) f32 {
}
fn r64(z: f64) f64 {
- const pS0: f64 = 1.66666666666666657415e-01;
+ const pS0: f64 = 1.66666666666666657415e-01;
const pS1: f64 = -3.25565818622400915405e-01;
- const pS2: f64 = 2.01212532134862925881e-01;
+ const pS2: f64 = 2.01212532134862925881e-01;
const pS3: f64 = -4.00555345006794114027e-02;
- const pS4: f64 = 7.91534994289814532176e-04;
- const pS5: f64 = 3.47933107596021167570e-05;
+ const pS4: f64 = 7.91534994289814532176e-04;
+ const pS5: f64 = 3.47933107596021167570e-05;
const qS1: f64 = -2.40339491173441421878e+00;
- const qS2: f64 = 2.02094576023350569471e+00;
+ const qS2: f64 = 2.02094576023350569471e+00;
const qS3: f64 = -6.88283971605453293030e-01;
- const qS4: f64 = 7.70381505559019352791e-02;
+ const qS4: f64 = 7.70381505559019352791e-02;
const p = z * (pS0 + z * (pS1 + z * (pS2 + z * (pS3 + z * (pS4 + z * pS5)))));
const q = 1.0 + z * (qS1 + z * (qS2 + z * (qS3 + z * qS4)));
@@ -95,12 +95,12 @@ fn acos64(x: f64) f64 {
const pio2_lo: f64 = 6.12323399573676603587e-17;
const ux = @bitCast(u64, x);
- const hx = u32(ux >> 32);
+ const hx = @intCast(u32, ux >> 32);
const ix = hx & 0x7FFFFFFF;
// |x| >= 1 or nan
if (ix >= 0x3FF00000) {
- const lx = u32(ux & 0xFFFFFFFF);
+ const lx = @intCast(u32, ux & 0xFFFFFFFF);
// acos(1) = 0, acos(-1) = pi
if ((ix - 0x3FF00000) | lx == 0) {
diff --git a/std/math/asin.zig b/std/math/asin.zig
index 9fa5a80ea5..30b3a57e32 100644
--- a/std/math/asin.zig
+++ b/std/math/asin.zig
@@ -17,7 +17,7 @@ pub fn asin(x: var) @typeOf(x) {
}
fn r32(z: f32) f32 {
- const pS0 = 1.6666586697e-01;
+ const pS0 = 1.6666586697e-01;
const pS1 = -4.2743422091e-02;
const pS2 = -8.6563630030e-03;
const qS1 = -7.0662963390e-01;
@@ -37,9 +37,9 @@ fn asin32(x: f32) f32 {
if (ix >= 0x3F800000) {
// |x| >= 1
if (ix == 0x3F800000) {
- return x * pio2 + 0x1.0p-120; // asin(+-1) = +-pi/2 with inexact
+ return x * pio2 + 0x1.0p-120; // asin(+-1) = +-pi/2 with inexact
} else {
- return math.nan(f32); // asin(|x| > 1) is nan
+ return math.nan(f32); // asin(|x| > 1) is nan
}
}
@@ -66,16 +66,16 @@ fn asin32(x: f32) f32 {
}
fn r64(z: f64) f64 {
- const pS0: f64 = 1.66666666666666657415e-01;
+ const pS0: f64 = 1.66666666666666657415e-01;
const pS1: f64 = -3.25565818622400915405e-01;
- const pS2: f64 = 2.01212532134862925881e-01;
+ const pS2: f64 = 2.01212532134862925881e-01;
const pS3: f64 = -4.00555345006794114027e-02;
- const pS4: f64 = 7.91534994289814532176e-04;
- const pS5: f64 = 3.47933107596021167570e-05;
+ const pS4: f64 = 7.91534994289814532176e-04;
+ const pS5: f64 = 3.47933107596021167570e-05;
const qS1: f64 = -2.40339491173441421878e+00;
- const qS2: f64 = 2.02094576023350569471e+00;
+ const qS2: f64 = 2.02094576023350569471e+00;
const qS3: f64 = -6.88283971605453293030e-01;
- const qS4: f64 = 7.70381505559019352791e-02;
+ const qS4: f64 = 7.70381505559019352791e-02;
const p = z * (pS0 + z * (pS1 + z * (pS2 + z * (pS3 + z * (pS4 + z * pS5)))));
const q = 1.0 + z * (qS1 + z * (qS2 + z * (qS3 + z * qS4)));
@@ -87,12 +87,12 @@ fn asin64(x: f64) f64 {
const pio2_lo: f64 = 6.12323399573676603587e-17;
const ux = @bitCast(u64, x);
- const hx = u32(ux >> 32);
+ const hx = @intCast(u32, ux >> 32);
const ix = hx & 0x7FFFFFFF;
// |x| >= 1 or nan
if (ix >= 0x3FF00000) {
- const lx = u32(ux & 0xFFFFFFFF);
+ const lx = @intCast(u32, ux & 0xFFFFFFFF);
// asin(1) = +-pi/2 with inexact
if ((ix - 0x3FF00000) | lx == 0) {
diff --git a/std/math/atan.zig b/std/math/atan.zig
index c315adc42d..6ca94dd84a 100644
--- a/std/math/atan.zig
+++ b/std/math/atan.zig
@@ -17,25 +17,25 @@ pub fn atan(x: var) @typeOf(x) {
}
fn atan32(x_: f32) f32 {
- const atanhi = []const f32 {
+ const atanhi = []const f32{
4.6364760399e-01, // atan(0.5)hi
7.8539812565e-01, // atan(1.0)hi
9.8279368877e-01, // atan(1.5)hi
1.5707962513e+00, // atan(inf)hi
};
- const atanlo = []const f32 {
+ const atanlo = []const f32{
5.0121582440e-09, // atan(0.5)lo
3.7748947079e-08, // atan(1.0)lo
3.4473217170e-08, // atan(1.5)lo
7.5497894159e-08, // atan(inf)lo
};
- const aT = []const f32 {
+ const aT = []const f32{
3.3333328366e-01,
- -1.9999158382e-01,
+ -1.9999158382e-01,
1.4253635705e-01,
- -1.0648017377e-01,
+ -1.0648017377e-01,
6.1687607318e-02,
};
@@ -80,8 +80,7 @@ fn atan32(x_: f32) f32 {
id = 1;
x = (x - 1.0) / (x + 1.0);
}
- }
- else {
+ } else {
// |x| < 2.4375
if (ix < 0x401C0000) {
id = 2;
@@ -109,37 +108,37 @@ fn atan32(x_: f32) f32 {
}
fn atan64(x_: f64) f64 {
- const atanhi = []const f64 {
+ const atanhi = []const f64{
4.63647609000806093515e-01, // atan(0.5)hi
7.85398163397448278999e-01, // atan(1.0)hi
9.82793723247329054082e-01, // atan(1.5)hi
1.57079632679489655800e+00, // atan(inf)hi
};
- const atanlo = []const f64 {
+ const atanlo = []const f64{
2.26987774529616870924e-17, // atan(0.5)lo
3.06161699786838301793e-17, // atan(1.0)lo
1.39033110312309984516e-17, // atan(1.5)lo
6.12323399573676603587e-17, // atan(inf)lo
};
- const aT = []const f64 {
+ const aT = []const f64{
3.33333333333329318027e-01,
- -1.99999999998764832476e-01,
+ -1.99999999998764832476e-01,
1.42857142725034663711e-01,
- -1.11111104054623557880e-01,
+ -1.11111104054623557880e-01,
9.09088713343650656196e-02,
- -7.69187620504482999495e-02,
+ -7.69187620504482999495e-02,
6.66107313738753120669e-02,
- -5.83357013379057348645e-02,
+ -5.83357013379057348645e-02,
4.97687799461593236017e-02,
- -3.65315727442169155270e-02,
+ -3.65315727442169155270e-02,
1.62858201153657823623e-02,
};
var x = x_;
var ux = @bitCast(u64, x);
- var ix = u32(ux >> 32);
+ var ix = @intCast(u32, ux >> 32);
const sign = ix >> 31;
ix &= 0x7FFFFFFF;
@@ -160,7 +159,7 @@ fn atan64(x_: f64) f64 {
// |x| < 2^(-27)
if (ix < 0x3E400000) {
if (ix < 0x00100000) {
- math.forceEval(f32(x));
+ math.forceEval(@floatCast(f32, x));
}
return x;
}
@@ -179,8 +178,7 @@ fn atan64(x_: f64) f64 {
id = 1;
x = (x - 1.0) / (x + 1.0);
}
- }
- else {
+ } else {
// |x| < 2.4375
if (ix < 0x40038000) {
id = 2;
diff --git a/std/math/atan2.zig b/std/math/atan2.zig
index 37c520da46..b3e45ba045 100644
--- a/std/math/atan2.zig
+++ b/std/math/atan2.zig
@@ -31,7 +31,7 @@ pub fn atan2(comptime T: type, x: T, y: T) T {
}
fn atan2_32(y: f32, x: f32) f32 {
- const pi: f32 = 3.1415927410e+00;
+ const pi: f32 = 3.1415927410e+00;
const pi_lo: f32 = -8.7422776573e-08;
if (math.isNan(x) or math.isNan(y)) {
@@ -53,9 +53,9 @@ fn atan2_32(y: f32, x: f32) f32 {
if (iy == 0) {
switch (m) {
- 0, 1 => return y, // atan(+-0, +...)
- 2 => return pi, // atan(+0, -...)
- 3 => return -pi, // atan(-0, -...)
+ 0, 1 => return y, // atan(+-0, +...)
+ 2 => return pi, // atan(+0, -...)
+ 3 => return -pi, // atan(-0, -...)
else => unreachable,
}
}
@@ -71,18 +71,18 @@ fn atan2_32(y: f32, x: f32) f32 {
if (ix == 0x7F800000) {
if (iy == 0x7F800000) {
switch (m) {
- 0 => return pi / 4, // atan(+inf, +inf)
- 1 => return -pi / 4, // atan(-inf, +inf)
- 2 => return 3*pi / 4, // atan(+inf, -inf)
- 3 => return -3*pi / 4, // atan(-inf, -inf)
+ 0 => return pi / 4, // atan(+inf, +inf)
+ 1 => return -pi / 4, // atan(-inf, +inf)
+ 2 => return 3 * pi / 4, // atan(+inf, -inf)
+ 3 => return -3 * pi / 4, // atan(-inf, -inf)
else => unreachable,
}
} else {
switch (m) {
- 0 => return 0.0, // atan(+..., +inf)
- 1 => return -0.0, // atan(-..., +inf)
- 2 => return pi, // atan(+..., -inf)
- 3 => return -pi, // atan(-...f, -inf)
+ 0 => return 0.0, // atan(+..., +inf)
+ 1 => return -0.0, // atan(-..., +inf)
+ 2 => return pi, // atan(+..., -inf)
+ 3 => return -pi, // atan(-...f, -inf)
else => unreachable,
}
}
@@ -107,16 +107,16 @@ fn atan2_32(y: f32, x: f32) f32 {
};
switch (m) {
- 0 => return z, // atan(+, +)
- 1 => return -z, // atan(-, +)
- 2 => return pi - (z - pi_lo), // atan(+, -)
- 3 => return (z - pi_lo) - pi, // atan(-, -)
+ 0 => return z, // atan(+, +)
+ 1 => return -z, // atan(-, +)
+ 2 => return pi - (z - pi_lo), // atan(+, -)
+ 3 => return (z - pi_lo) - pi, // atan(-, -)
else => unreachable,
}
}
fn atan2_64(y: f64, x: f64) f64 {
- const pi: f64 = 3.1415926535897931160E+00;
+ const pi: f64 = 3.1415926535897931160E+00;
const pi_lo: f64 = 1.2246467991473531772E-16;
if (math.isNan(x) or math.isNan(y)) {
@@ -124,12 +124,12 @@ fn atan2_64(y: f64, x: f64) f64 {
}
var ux = @bitCast(u64, x);
- var ix = u32(ux >> 32);
- var lx = u32(ux & 0xFFFFFFFF);
+ var ix = @intCast(u32, ux >> 32);
+ var lx = @intCast(u32, ux & 0xFFFFFFFF);
var uy = @bitCast(u64, y);
- var iy = u32(uy >> 32);
- var ly = u32(uy & 0xFFFFFFFF);
+ var iy = @intCast(u32, uy >> 32);
+ var ly = @intCast(u32, uy & 0xFFFFFFFF);
// x = 1.0
if ((ix -% 0x3FF00000) | lx == 0) {
@@ -143,9 +143,9 @@ fn atan2_64(y: f64, x: f64) f64 {
if (iy | ly == 0) {
switch (m) {
- 0, 1 => return y, // atan(+-0, +...)
- 2 => return pi, // atan(+0, -...)
- 3 => return -pi, // atan(-0, -...)
+ 0, 1 => return y, // atan(+-0, +...)
+ 2 => return pi, // atan(+0, -...)
+ 3 => return -pi, // atan(-0, -...)
else => unreachable,
}
}
@@ -161,18 +161,18 @@ fn atan2_64(y: f64, x: f64) f64 {
if (ix == 0x7FF00000) {
if (iy == 0x7FF00000) {
switch (m) {
- 0 => return pi / 4, // atan(+inf, +inf)
- 1 => return -pi / 4, // atan(-inf, +inf)
- 2 => return 3*pi / 4, // atan(+inf, -inf)
- 3 => return -3*pi / 4, // atan(-inf, -inf)
+ 0 => return pi / 4, // atan(+inf, +inf)
+ 1 => return -pi / 4, // atan(-inf, +inf)
+ 2 => return 3 * pi / 4, // atan(+inf, -inf)
+ 3 => return -3 * pi / 4, // atan(-inf, -inf)
else => unreachable,
}
} else {
switch (m) {
- 0 => return 0.0, // atan(+..., +inf)
- 1 => return -0.0, // atan(-..., +inf)
- 2 => return pi, // atan(+..., -inf)
- 3 => return -pi, // atan(-...f, -inf)
+ 0 => return 0.0, // atan(+..., +inf)
+ 1 => return -0.0, // atan(-..., +inf)
+ 2 => return pi, // atan(+..., -inf)
+ 3 => return -pi, // atan(-...f, -inf)
else => unreachable,
}
}
@@ -197,10 +197,10 @@ fn atan2_64(y: f64, x: f64) f64 {
};
switch (m) {
- 0 => return z, // atan(+, +)
- 1 => return -z, // atan(-, +)
- 2 => return pi - (z - pi_lo), // atan(+, -)
- 3 => return (z - pi_lo) - pi, // atan(-, -)
+ 0 => return z, // atan(+, +)
+ 1 => return -z, // atan(-, +)
+ 2 => return pi - (z - pi_lo), // atan(+, -)
+ 3 => return (z - pi_lo) - pi, // atan(-, -)
else => unreachable,
}
}
diff --git a/std/math/atanh.zig b/std/math/atanh.zig
index 8ca0cc85bc..4ae8a66bc0 100644
--- a/std/math/atanh.zig
+++ b/std/math/atanh.zig
@@ -62,7 +62,7 @@ fn atanh_64(x: f64) f64 {
if (e < 0x3FF - 32) {
// underflow
if (e == 0) {
- math.forceEval(f32(y));
+ math.forceEval(@floatCast(f32, y));
}
}
// |x| < 0.5
diff --git a/std/math/big/index.zig b/std/math/big/index.zig
new file mode 100644
index 0000000000..26fa538c4f
--- /dev/null
+++ b/std/math/big/index.zig
@@ -0,0 +1,5 @@
+pub use @import("int.zig");
+
+test "math.big" {
+ _ = @import("int.zig");
+}
diff --git a/std/math/big/int.zig b/std/math/big/int.zig
new file mode 100644
index 0000000000..41e1503d49
--- /dev/null
+++ b/std/math/big/int.zig
@@ -0,0 +1,2103 @@
+const std = @import("../../index.zig");
+const builtin = @import("builtin");
+const debug = std.debug;
+const math = std.math;
+const mem = std.mem;
+const Allocator = mem.Allocator;
+const ArrayList = std.ArrayList;
+
+const TypeId = builtin.TypeId;
+
+pub const Limb = usize;
+pub const DoubleLimb = @IntType(false, 2 * Limb.bit_count);
+pub const Log2Limb = math.Log2Int(Limb);
+
+comptime {
+ debug.assert(math.floorPowerOfTwo(usize, Limb.bit_count) == Limb.bit_count);
+ debug.assert(Limb.bit_count <= 64); // u128 set is unsupported
+ debug.assert(Limb.is_signed == false);
+}
+
+pub const Int = struct {
+ allocator: *Allocator,
+ positive: bool,
+ // - little-endian ordered
+ // - len >= 1 always
+ // - zero value -> len == 1 with limbs[0] == 0
+ limbs: []Limb,
+ len: usize,
+
+ const default_capacity = 4;
+
+ pub fn init(allocator: *Allocator) !Int {
+ return try Int.initCapacity(allocator, default_capacity);
+ }
+
+ pub fn initSet(allocator: *Allocator, value: var) !Int {
+ var s = try Int.init(allocator);
+ try s.set(value);
+ return s;
+ }
+
+ pub fn initCapacity(allocator: *Allocator, capacity: usize) !Int {
+ return Int{
+ .allocator = allocator,
+ .positive = true,
+ .limbs = block: {
+ var limbs = try allocator.alloc(Limb, math.max(default_capacity, capacity));
+ limbs[0] = 0;
+ break :block limbs;
+ },
+ .len = 1,
+ };
+ }
+
+ pub fn ensureCapacity(self: *Int, capacity: usize) !void {
+ if (capacity <= self.limbs.len) {
+ return;
+ }
+
+ self.limbs = try self.allocator.realloc(Limb, self.limbs, capacity);
+ }
+
+ pub fn deinit(self: *Int) void {
+ self.allocator.free(self.limbs);
+ self.* = undefined;
+ }
+
+ pub fn clone(other: Int) !Int {
+ return Int{
+ .allocator = other.allocator,
+ .positive = other.positive,
+ .limbs = block: {
+ var limbs = try other.allocator.alloc(Limb, other.len);
+ mem.copy(Limb, limbs[0..], other.limbs[0..other.len]);
+ break :block limbs;
+ },
+ .len = other.len,
+ };
+ }
+
+ pub fn copy(self: *Int, other: Int) !void {
+ if (self == &other) {
+ return;
+ }
+
+ self.positive = other.positive;
+ try self.ensureCapacity(other.len);
+ mem.copy(Limb, self.limbs[0..], other.limbs[0..other.len]);
+ self.len = other.len;
+ }
+
+ pub fn swap(self: *Int, other: *Int) void {
+ mem.swap(Int, self, other);
+ }
+
+ pub fn dump(self: Int) void {
+ for (self.limbs) |limb| {
+ debug.warn("{x} ", limb);
+ }
+ debug.warn("\n");
+ }
+
+ pub fn negate(r: *Int) void {
+ r.positive = !r.positive;
+ }
+
+ pub fn abs(r: *Int) void {
+ r.positive = true;
+ }
+
+ pub fn isOdd(r: Int) bool {
+ return r.limbs[0] & 1 != 0;
+ }
+
+ pub fn isEven(r: Int) bool {
+ return !r.isOdd();
+ }
+
+ // Returns the number of bits required to represent the absolute value of self.
+ fn bitCountAbs(self: Int) usize {
+ return (self.len - 1) * Limb.bit_count + (Limb.bit_count - @clz(self.limbs[self.len - 1]));
+ }
+
+ // Returns the number of bits required to represent the integer in twos-complement form.
+ //
+ // If the integer is negative the value returned is the number of bits needed by a signed
+ // integer to represent the value. If positive the value is the number of bits for an
+ // unsigned integer. Any unsigned integer will fit in the signed integer with bitcount
+ // one greater than the returned value.
+ //
+ // e.g. -127 returns 8 as it will fit in an i8. 127 returns 7 since it fits in a u7.
+ fn bitCountTwosComp(self: Int) usize {
+ var bits = self.bitCountAbs();
+
+ // If the entire value has only one bit set (e.g. 0b100000000) then the negation in twos
+ // complement requires one less bit.
+ if (!self.positive) block: {
+ bits += 1;
+
+ if (@popCount(self.limbs[self.len - 1]) == 1) {
+ for (self.limbs[0 .. self.len - 1]) |limb| {
+ if (@popCount(limb) != 0) {
+ break :block;
+ }
+ }
+
+ bits -= 1;
+ }
+ }
+
+ return bits;
+ }
+
+ pub fn fitsInTwosComp(self: Int, is_signed: bool, bit_count: usize) bool {
+ if (self.eqZero()) {
+ return true;
+ }
+ if (!is_signed and !self.positive) {
+ return false;
+ }
+
+ const req_bits = self.bitCountTwosComp() + @boolToInt(self.positive and is_signed);
+ return bit_count >= req_bits;
+ }
+
+ pub fn fits(self: Int, comptime T: type) bool {
+ return self.fitsInTwosComp(T.is_signed, T.bit_count);
+ }
+
+ // Returns the approximate size of the integer in the given base. Negative values accomodate for
+ // the minus sign. This is used for determining the number of characters needed to print the
+ // value. It is inexact and will exceed the given value by 1-2 digits.
+ pub fn sizeInBase(self: Int, base: usize) usize {
+ const bit_count = usize(@boolToInt(!self.positive)) + self.bitCountAbs();
+ return (bit_count / math.log2(base)) + 1;
+ }
+
+ pub fn set(self: *Int, value: var) Allocator.Error!void {
+ const T = @typeOf(value);
+
+ switch (@typeInfo(T)) {
+ TypeId.Int => |info| {
+ const UT = if (T.is_signed) @IntType(false, T.bit_count - 1) else T;
+
+ try self.ensureCapacity(@sizeOf(UT) / @sizeOf(Limb));
+ self.positive = value >= 0;
+ self.len = 0;
+
+ var w_value: UT = if (value < 0) @intCast(UT, -value) else @intCast(UT, value);
+
+ if (info.bits <= Limb.bit_count) {
+ self.limbs[0] = Limb(w_value);
+ self.len = 1;
+ } else {
+ var i: usize = 0;
+ while (w_value != 0) : (i += 1) {
+ self.limbs[i] = @truncate(Limb, w_value);
+ self.len += 1;
+
+ // TODO: shift == 64 at compile-time fails. Fails on u128 limbs.
+ w_value >>= Limb.bit_count / 2;
+ w_value >>= Limb.bit_count / 2;
+ }
+ }
+ },
+ TypeId.ComptimeInt => {
+ comptime var w_value = if (value < 0) -value else value;
+
+ const req_limbs = @divFloor(math.log2(w_value), Limb.bit_count) + 1;
+ try self.ensureCapacity(req_limbs);
+
+ self.positive = value >= 0;
+ self.len = req_limbs;
+
+ if (w_value <= @maxValue(Limb)) {
+ self.limbs[0] = w_value;
+ } else {
+ const mask = (1 << Limb.bit_count) - 1;
+
+ comptime var i = 0;
+ inline while (w_value != 0) : (i += 1) {
+ self.limbs[i] = w_value & mask;
+
+ w_value >>= Limb.bit_count / 2;
+ w_value >>= Limb.bit_count / 2;
+ }
+ }
+ },
+ else => {
+ @compileError("cannot set Int using type " ++ @typeName(T));
+ },
+ }
+ }
+
+ pub const ConvertError = error{
+ NegativeIntoUnsigned,
+ TargetTooSmall,
+ };
+
+ pub fn to(self: Int, comptime T: type) ConvertError!T {
+ switch (@typeId(T)) {
+ TypeId.Int => {
+ const UT = @IntType(false, T.bit_count);
+
+ if (self.bitCountTwosComp() > T.bit_count) {
+ return error.TargetTooSmall;
+ }
+
+ var r: UT = 0;
+
+ if (@sizeOf(UT) <= @sizeOf(Limb)) {
+ r = @intCast(UT, self.limbs[0]);
+ } else {
+ for (self.limbs[0..self.len]) |_, ri| {
+ const limb = self.limbs[self.len - ri - 1];
+ r <<= Limb.bit_count;
+ r |= limb;
+ }
+ }
+
+ if (!T.is_signed) {
+ return if (self.positive) @intCast(T, r) else error.NegativeIntoUnsigned;
+ } else {
+ if (self.positive) {
+ return @intCast(T, r);
+ } else {
+ if (math.cast(T, r)) |ok| {
+ return -ok;
+ } else |_| {
+ return @minValue(T);
+ }
+ }
+ }
+ },
+ else => {
+ @compileError("cannot convert Int to type " ++ @typeName(T));
+ },
+ }
+ }
+
+ fn charToDigit(ch: u8, base: u8) !u8 {
+ const d = switch (ch) {
+ '0'...'9' => ch - '0',
+ 'a'...'f' => (ch - 'a') + 0xa,
+ else => return error.InvalidCharForDigit,
+ };
+
+ return if (d < base) d else return error.DigitTooLargeForBase;
+ }
+
+ fn digitToChar(d: u8, base: u8) !u8 {
+ if (d >= base) {
+ return error.DigitTooLargeForBase;
+ }
+
+ return switch (d) {
+ 0...9 => '0' + d,
+ 0xa...0xf => ('a' - 0xa) + d,
+ else => unreachable,
+ };
+ }
+
+ pub fn setString(self: *Int, base: u8, value: []const u8) !void {
+ if (base < 2 or base > 16) {
+ return error.InvalidBase;
+ }
+
+ var i: usize = 0;
+ var positive = true;
+ if (value.len > 0 and value[0] == '-') {
+ positive = false;
+ i += 1;
+ }
+
+ // TODO values less than limb size should guarantee non allocating
+ var base_buffer: [512]u8 = undefined;
+ const base_al = &std.heap.FixedBufferAllocator.init(base_buffer[0..]).allocator;
+ const base_ap = try Int.initSet(base_al, base);
+
+ var d_buffer: [512]u8 = undefined;
+ var d_fba = std.heap.FixedBufferAllocator.init(d_buffer[0..]);
+ const d_al = &d_fba.allocator;
+
+ try self.set(0);
+ for (value[i..]) |ch| {
+ const d = try charToDigit(ch, base);
+ d_fba.end_index = 0;
+ const d_ap = try Int.initSet(d_al, d);
+
+ try self.mul(self.*, base_ap);
+ try self.add(self.*, d_ap);
+ }
+ self.positive = positive;
+ }
+
+ /// TODO make this call format instead of the other way around
+ pub fn toString(self: Int, allocator: *Allocator, base: u8) ![]const u8 {
+ if (base < 2 or base > 16) {
+ return error.InvalidBase;
+ }
+
+ var digits = ArrayList(u8).init(allocator);
+ try digits.ensureCapacity(self.sizeInBase(base) + 1);
+ defer digits.deinit();
+
+ if (self.eqZero()) {
+ try digits.append('0');
+ return digits.toOwnedSlice();
+ }
+
+ // Power of two: can do a single pass and use masks to extract digits.
+ if (base & (base - 1) == 0) {
+ const base_shift = math.log2_int(Limb, base);
+
+ for (self.limbs[0..self.len]) |limb| {
+ var shift: usize = 0;
+ while (shift < Limb.bit_count) : (shift += base_shift) {
+ const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & Limb(base - 1));
+ const ch = try digitToChar(r, base);
+ try digits.append(ch);
+ }
+ }
+
+ while (true) {
+ // always will have a non-zero digit somewhere
+ const c = digits.pop();
+ if (c != '0') {
+ digits.append(c) catch unreachable;
+ break;
+ }
+ }
+ } // Non power-of-two: batch divisions per word size.
+ else {
+ const digits_per_limb = math.log(Limb, base, @maxValue(Limb));
+ var limb_base: Limb = 1;
+ var j: usize = 0;
+ while (j < digits_per_limb) : (j += 1) {
+ limb_base *= base;
+ }
+
+ var q = try self.clone();
+ q.positive = true;
+ var r = try Int.init(allocator);
+ var b = try Int.initSet(allocator, limb_base);
+
+ while (q.len >= 2) {
+ try Int.divTrunc(&q, &r, q, b);
+
+ var r_word = r.limbs[0];
+ var i: usize = 0;
+ while (i < digits_per_limb) : (i += 1) {
+ const ch = try digitToChar(@intCast(u8, r_word % base), base);
+ r_word /= base;
+ try digits.append(ch);
+ }
+ }
+
+ {
+ debug.assert(q.len == 1);
+
+ var r_word = q.limbs[0];
+ while (r_word != 0) {
+ const ch = try digitToChar(@intCast(u8, r_word % base), base);
+ r_word /= base;
+ try digits.append(ch);
+ }
+ }
+ }
+
+ if (!self.positive) {
+ try digits.append('-');
+ }
+
+ var s = digits.toOwnedSlice();
+ mem.reverse(u8, s);
+ return s;
+ }
+
+ /// for the std lib format function
+ /// TODO make this non-allocating
+ pub fn format(
+ self: Int,
+ comptime fmt: []const u8,
+ context: var,
+ comptime FmtError: type,
+ output: fn (@typeOf(context), []const u8) FmtError!void,
+ ) FmtError!void {
+ // TODO look at fmt and support other bases
+ const str = self.toString(self.allocator, 10) catch @panic("TODO make this non allocating");
+ defer self.allocator.free(str);
+ return output(context, str);
+ }
+
+ // returns -1, 0, 1 if |a| < |b|, |a| == |b| or |a| > |b| respectively.
+ pub fn cmpAbs(a: Int, b: Int) i8 {
+ if (a.len < b.len) {
+ return -1;
+ }
+ if (a.len > b.len) {
+ return 1;
+ }
+
+ var i: usize = a.len - 1;
+ while (i != 0) : (i -= 1) {
+ if (a.limbs[i] != b.limbs[i]) {
+ break;
+ }
+ }
+
+ if (a.limbs[i] < b.limbs[i]) {
+ return -1;
+ } else if (a.limbs[i] > b.limbs[i]) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ // returns -1, 0, 1 if a < b, a == b or a > b respectively.
+ pub fn cmp(a: Int, b: Int) i8 {
+ if (a.positive != b.positive) {
+ return if (a.positive) i8(1) else -1;
+ } else {
+ const r = cmpAbs(a, b);
+ return if (a.positive) r else -r;
+ }
+ }
+
+ // if a == 0
+ pub fn eqZero(a: Int) bool {
+ return a.len == 1 and a.limbs[0] == 0;
+ }
+
+ // if |a| == |b|
+ pub fn eqAbs(a: Int, b: Int) bool {
+ return cmpAbs(a, b) == 0;
+ }
+
+ // if a == b
+ pub fn eq(a: Int, b: Int) bool {
+ return cmp(a, b) == 0;
+ }
+
+ // Normalize for a possible single carry digit.
+ //
+ // [1, 2, 3, 4, 0] -> [1, 2, 3, 4]
+ // [1, 2, 3, 4, 5] -> [1, 2, 3, 4, 5]
+ // [0] -> [0]
+ fn norm1(r: *Int, length: usize) void {
+ debug.assert(length > 0);
+ debug.assert(length <= r.limbs.len);
+
+ if (r.limbs[length - 1] == 0) {
+ r.len = if (length > 1) length - 1 else 1;
+ } else {
+ r.len = length;
+ }
+ }
+
+ // Normalize a possible sequence of leading zeros.
+ //
+ // [1, 2, 3, 4, 0] -> [1, 2, 3, 4]
+ // [1, 2, 0, 0, 0] -> [1, 2]
+ // [0, 0, 0, 0, 0] -> [0]
+ fn normN(r: *Int, length: usize) void {
+ debug.assert(length > 0);
+ debug.assert(length <= r.limbs.len);
+
+ var j = length;
+ while (j > 0) : (j -= 1) {
+ if (r.limbs[j - 1] != 0) {
+ break;
+ }
+ }
+
+ // Handle zero
+ r.len = if (j != 0) j else 1;
+ }
+
+ // r = a + b
+ pub fn add(r: *Int, a: Int, b: Int) Allocator.Error!void {
+ if (a.eqZero()) {
+ try r.copy(b);
+ return;
+ } else if (b.eqZero()) {
+ try r.copy(a);
+ return;
+ }
+
+ if (a.positive != b.positive) {
+ if (a.positive) {
+ // (a) + (-b) => a - b
+ const bp = Int{
+ .allocator = undefined,
+ .positive = true,
+ .limbs = b.limbs,
+ .len = b.len,
+ };
+ try r.sub(a, bp);
+ } else {
+ // (-a) + (b) => b - a
+ const ap = Int{
+ .allocator = undefined,
+ .positive = true,
+ .limbs = a.limbs,
+ .len = a.len,
+ };
+ try r.sub(b, ap);
+ }
+ } else {
+ if (a.len >= b.len) {
+ try r.ensureCapacity(a.len + 1);
+ lladd(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.norm1(a.len + 1);
+ } else {
+ try r.ensureCapacity(b.len + 1);
+ lladd(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.norm1(b.len + 1);
+ }
+
+ r.positive = a.positive;
+ }
+ }
+
+ // Knuth 4.3.1, Algorithm A.
+ fn lladd(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len != 0 and b.len != 0);
+ debug.assert(a.len >= b.len);
+ debug.assert(r.len >= a.len + 1);
+
+ var i: usize = 0;
+ var carry: Limb = 0;
+
+ while (i < b.len) : (i += 1) {
+ var c: Limb = 0;
+ c += @boolToInt(@addWithOverflow(Limb, a[i], b[i], &r[i]));
+ c += @boolToInt(@addWithOverflow(Limb, r[i], carry, &r[i]));
+ carry = c;
+ }
+
+ while (i < a.len) : (i += 1) {
+ carry = @boolToInt(@addWithOverflow(Limb, a[i], carry, &r[i]));
+ }
+
+ r[i] = carry;
+ }
+
+ // r = a - b
+ pub fn sub(r: *Int, a: Int, b: Int) !void {
+ if (a.positive != b.positive) {
+ if (a.positive) {
+ // (a) - (-b) => a + b
+ const bp = Int{
+ .allocator = undefined,
+ .positive = true,
+ .limbs = b.limbs,
+ .len = b.len,
+ };
+ try r.add(a, bp);
+ } else {
+ // (-a) - (b) => -(a + b)
+ const ap = Int{
+ .allocator = undefined,
+ .positive = true,
+ .limbs = a.limbs,
+ .len = a.len,
+ };
+ try r.add(ap, b);
+ r.positive = false;
+ }
+ } else {
+ if (a.positive) {
+ // (a) - (b) => a - b
+ if (a.cmp(b) >= 0) {
+ try r.ensureCapacity(a.len + 1);
+ llsub(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.normN(a.len);
+ r.positive = true;
+ } else {
+ try r.ensureCapacity(b.len + 1);
+ llsub(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.normN(b.len);
+ r.positive = false;
+ }
+ } else {
+ // (-a) - (-b) => -(a - b)
+ if (a.cmp(b) < 0) {
+ try r.ensureCapacity(a.len + 1);
+ llsub(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.normN(a.len);
+ r.positive = false;
+ } else {
+ try r.ensureCapacity(b.len + 1);
+ llsub(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.normN(b.len);
+ r.positive = true;
+ }
+ }
+ }
+ }
+
+ // Knuth 4.3.1, Algorithm S.
+ fn llsub(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len != 0 and b.len != 0);
+ debug.assert(a.len > b.len or (a.len == b.len and a[a.len - 1] >= b[b.len - 1]));
+ debug.assert(r.len >= a.len);
+
+ var i: usize = 0;
+ var borrow: Limb = 0;
+
+ while (i < b.len) : (i += 1) {
+ var c: Limb = 0;
+ c += @boolToInt(@subWithOverflow(Limb, a[i], b[i], &r[i]));
+ c += @boolToInt(@subWithOverflow(Limb, r[i], borrow, &r[i]));
+ borrow = c;
+ }
+
+ while (i < a.len) : (i += 1) {
+ borrow = @boolToInt(@subWithOverflow(Limb, a[i], borrow, &r[i]));
+ }
+
+ debug.assert(borrow == 0);
+ }
+
+ // rma = a * b
+ //
+ // For greatest efficiency, ensure rma does not alias a or b.
+ pub fn mul(rma: *Int, a: Int, b: Int) !void {
+ var r = rma;
+ var aliased = rma.limbs.ptr == a.limbs.ptr or rma.limbs.ptr == b.limbs.ptr;
+
+ var sr: Int = undefined;
+ if (aliased) {
+ sr = try Int.initCapacity(rma.allocator, a.len + b.len);
+ r = &sr;
+ aliased = true;
+ }
+ defer if (aliased) {
+ rma.swap(r);
+ r.deinit();
+ };
+
+ try r.ensureCapacity(a.len + b.len);
+
+ if (a.len >= b.len) {
+ llmul(r.limbs, a.limbs[0..a.len], b.limbs[0..b.len]);
+ } else {
+ llmul(r.limbs, b.limbs[0..b.len], a.limbs[0..a.len]);
+ }
+
+ r.positive = a.positive == b.positive;
+ r.normN(a.len + b.len);
+ }
+
+ // a + b * c + *carry, sets carry to the overflow bits
+ pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
+ var r1: Limb = undefined;
+
+ // r1 = a + *carry
+ const c1: Limb = @boolToInt(@addWithOverflow(Limb, a, carry.*, &r1));
+
+ // r2 = b * c
+ //
+ // We still use a DoubleLimb here since the @mulWithOverflow builtin does not
+ // return the carry and lower bits separately so we would need to perform this
+ // anyway to get the carry bits. The branch on the overflow case costs more than
+ // just computing them unconditionally and splitting.
+ //
+ // This could be a single x86 mul instruction, which stores the carry/lower in rdx:rax.
+ const bc = DoubleLimb(b) * DoubleLimb(c);
+ const r2 = @truncate(Limb, bc);
+ const c2 = @truncate(Limb, bc >> Limb.bit_count);
+
+ // r1 = r1 + r2
+ const c3: Limb = @boolToInt(@addWithOverflow(Limb, r1, r2, &r1));
+
+ // This never overflows, c1, c3 are either 0 or 1 and if both are 1 then
+ // c2 is at least <= @maxValue(Limb) - 2.
+ carry.* = c1 + c2 + c3;
+
+ return r1;
+ }
+
+ // Knuth 4.3.1, Algorithm M.
+ //
+ // r MUST NOT alias any of a or b.
+ fn llmul(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len >= b.len);
+ debug.assert(r.len >= a.len + b.len);
+
+ mem.set(Limb, r[0 .. a.len + b.len], 0);
+
+ var i: usize = 0;
+ while (i < a.len) : (i += 1) {
+ var carry: Limb = 0;
+ var j: usize = 0;
+ while (j < b.len) : (j += 1) {
+ r[i + j] = @inlineCall(addMulLimbWithCarry, r[i + j], a[i], b[j], &carry);
+ }
+ r[i + j] = carry;
+ }
+ }
+
+ pub fn divFloor(q: *Int, r: *Int, a: Int, b: Int) !void {
+ try div(q, r, a, b);
+
+ // Trunc -> Floor.
+ if (!q.positive) {
+ // TODO values less than limb size should guarantee non allocating
+ var one_buffer: [512]u8 = undefined;
+ const one_al = &std.heap.FixedBufferAllocator.init(one_buffer[0..]).allocator;
+ const one_ap = try Int.initSet(one_al, 1);
+
+ try q.sub(q.*, one_ap);
+ try r.add(q.*, one_ap);
+ }
+ r.positive = b.positive;
+ }
+
+ pub fn divTrunc(q: *Int, r: *Int, a: Int, b: Int) !void {
+ try div(q, r, a, b);
+ r.positive = a.positive;
+ }
+
+ // Truncates by default.
+ fn div(quo: *Int, rem: *Int, a: Int, b: Int) !void {
+ if (b.eqZero()) {
+ @panic("division by zero");
+ }
+ if (quo == rem) {
+ @panic("quo and rem cannot be same variable");
+ }
+
+ if (a.cmpAbs(b) < 0) {
+ // quo may alias a so handle rem first
+ try rem.copy(a);
+ rem.positive = a.positive == b.positive;
+
+ quo.positive = true;
+ quo.len = 1;
+ quo.limbs[0] = 0;
+ return;
+ }
+
+ if (b.len == 1) {
+ try quo.ensureCapacity(a.len);
+
+ lldiv1(quo.limbs[0..], &rem.limbs[0], a.limbs[0..a.len], b.limbs[0]);
+ quo.norm1(a.len);
+ quo.positive = a.positive == b.positive;
+
+ rem.len = 1;
+ rem.positive = true;
+ } else {
+ // x and y are modified during division
+ var x = try a.clone();
+ defer x.deinit();
+
+ var y = try b.clone();
+ defer y.deinit();
+
+ // x may grow one limb during normalization
+ try quo.ensureCapacity(a.len + y.len);
+ try divN(quo.allocator, quo, rem, &x, &y);
+
+ quo.positive = a.positive == b.positive;
+ }
+ }
+
+ // Knuth 4.3.1, Exercise 16.
+ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len > 1 or a[0] >= b);
+ debug.assert(quo.len >= a.len);
+
+ rem.* = 0;
+ for (a) |_, ri| {
+ const i = a.len - ri - 1;
+ const pdiv = ((DoubleLimb(rem.*) << Limb.bit_count) | a[i]);
+
+ if (pdiv == 0) {
+ quo[i] = 0;
+ rem.* = 0;
+ } else if (pdiv < b) {
+ quo[i] = 0;
+ rem.* = @truncate(Limb, pdiv);
+ } else if (pdiv == b) {
+ quo[i] = 1;
+ rem.* = 0;
+ } else {
+ quo[i] = @truncate(Limb, @divTrunc(pdiv, b));
+ rem.* = @truncate(Limb, pdiv - (quo[i] *% b));
+ }
+ }
+ }
+
+ // Handbook of Applied Cryptography, 14.20
+ //
+ // x = qy + r where 0 <= r < y
+ fn divN(allocator: *Allocator, q: *Int, r: *Int, x: *Int, y: *Int) !void {
+ debug.assert(y.len >= 2);
+ debug.assert(x.len >= y.len);
+ debug.assert(q.limbs.len >= x.len + y.len - 1);
+ debug.assert(default_capacity >= 3); // see 3.2
+
+ var tmp = try Int.init(allocator);
+ defer tmp.deinit();
+
+ // Normalize so y > Limb.bit_count / 2 (i.e. leading bit is set)
+ const norm_shift = @clz(y.limbs[y.len - 1]);
+ try x.shiftLeft(x.*, norm_shift);
+ try y.shiftLeft(y.*, norm_shift);
+
+ const n = x.len - 1;
+ const t = y.len - 1;
+
+ // 1.
+ q.len = n - t + 1;
+ mem.set(Limb, q.limbs[0..q.len], 0);
+
+ // 2.
+ try tmp.shiftLeft(y.*, Limb.bit_count * (n - t));
+ while (x.cmp(tmp) >= 0) {
+ q.limbs[n - t] += 1;
+ try x.sub(x.*, tmp);
+ }
+
+ // 3.
+ var i = n;
+ while (i > t) : (i -= 1) {
+ // 3.1
+ if (x.limbs[i] == y.limbs[t]) {
+ q.limbs[i - t - 1] = @maxValue(Limb);
+ } else {
+ const num = (DoubleLimb(x.limbs[i]) << Limb.bit_count) | DoubleLimb(x.limbs[i - 1]);
+ const z = @intCast(Limb, num / DoubleLimb(y.limbs[t]));
+ q.limbs[i - t - 1] = if (z > @maxValue(Limb)) @maxValue(Limb) else Limb(z);
+ }
+
+ // 3.2
+ tmp.limbs[0] = if (i >= 2) x.limbs[i - 2] else 0;
+ tmp.limbs[1] = if (i >= 1) x.limbs[i - 1] else 0;
+ tmp.limbs[2] = x.limbs[i];
+ tmp.normN(3);
+
+ while (true) {
+ // 2x1 limb multiplication unrolled against single-limb q[i-t-1]
+ var carry: Limb = 0;
+ r.limbs[0] = addMulLimbWithCarry(0, if (t >= 1) y.limbs[t - 1] else 0, q.limbs[i - t - 1], &carry);
+ r.limbs[1] = addMulLimbWithCarry(0, y.limbs[t], q.limbs[i - t - 1], &carry);
+ r.limbs[2] = carry;
+ r.normN(3);
+
+ if (r.cmpAbs(tmp) <= 0) {
+ break;
+ }
+
+ q.limbs[i - t - 1] -= 1;
+ }
+
+ // 3.3
+ try tmp.set(q.limbs[i - t - 1]);
+ try tmp.mul(tmp, y.*);
+ try tmp.shiftLeft(tmp, Limb.bit_count * (i - t - 1));
+ try x.sub(x.*, tmp);
+
+ if (!x.positive) {
+ try tmp.shiftLeft(y.*, Limb.bit_count * (i - t - 1));
+ try x.add(x.*, tmp);
+ q.limbs[i - t - 1] -= 1;
+ }
+ }
+
+ // Denormalize
+ q.normN(q.len);
+
+ try r.shiftRight(x.*, norm_shift);
+ r.normN(r.len);
+ }
+
+ // r = a << shift, in other words, r = a * 2^shift
+ pub fn shiftLeft(r: *Int, a: Int, shift: usize) !void {
+ try r.ensureCapacity(a.len + (shift / Limb.bit_count) + 1);
+ llshl(r.limbs[0..], a.limbs[0..a.len], shift);
+ r.norm1(a.len + (shift / Limb.bit_count) + 1);
+ r.positive = a.positive;
+ }
+
+ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len >= 1);
+ debug.assert(r.len >= a.len + (shift / Limb.bit_count) + 1);
+
+ const limb_shift = shift / Limb.bit_count + 1;
+ const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count);
+
+ var carry: Limb = 0;
+ var i: usize = 0;
+ while (i < a.len) : (i += 1) {
+ const src_i = a.len - i - 1;
+ const dst_i = src_i + limb_shift;
+
+ const src_digit = a[src_i];
+ r[dst_i] = carry | @inlineCall(math.shr, Limb, src_digit, Limb.bit_count - @intCast(Limb, interior_limb_shift));
+ carry = (src_digit << interior_limb_shift);
+ }
+
+ r[limb_shift - 1] = carry;
+ mem.set(Limb, r[0 .. limb_shift - 1], 0);
+ }
+
+ // r = a >> shift
+ pub fn shiftRight(r: *Int, a: Int, shift: usize) !void {
+ if (a.len <= shift / Limb.bit_count) {
+ r.len = 1;
+ r.limbs[0] = 0;
+ r.positive = true;
+ return;
+ }
+
+ try r.ensureCapacity(a.len - (shift / Limb.bit_count));
+ const r_len = llshr(r.limbs[0..], a.limbs[0..a.len], shift);
+ r.len = a.len - (shift / Limb.bit_count);
+ r.positive = a.positive;
+ }
+
+ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len >= 1);
+ debug.assert(r.len >= a.len - (shift / Limb.bit_count));
+
+ const limb_shift = shift / Limb.bit_count;
+ const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count);
+
+ var carry: Limb = 0;
+ var i: usize = 0;
+ while (i < a.len - limb_shift) : (i += 1) {
+ const src_i = a.len - i - 1;
+ const dst_i = src_i - limb_shift;
+
+ const src_digit = a[src_i];
+ r[dst_i] = carry | (src_digit >> interior_limb_shift);
+ carry = @inlineCall(math.shl, Limb, src_digit, Limb.bit_count - @intCast(Limb, interior_limb_shift));
+ }
+ }
+
+ // r = a | b
+ pub fn bitOr(r: *Int, a: Int, b: Int) !void {
+ if (a.len > b.len) {
+ try r.ensureCapacity(a.len);
+ llor(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.len = a.len;
+ } else {
+ try r.ensureCapacity(b.len);
+ llor(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.len = b.len;
+ }
+ }
+
+ fn llor(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(r.len >= a.len);
+ debug.assert(a.len >= b.len);
+
+ var i: usize = 0;
+ while (i < b.len) : (i += 1) {
+ r[i] = a[i] | b[i];
+ }
+ while (i < a.len) : (i += 1) {
+ r[i] = a[i];
+ }
+ }
+
+ // r = a & b
+ pub fn bitAnd(r: *Int, a: Int, b: Int) !void {
+ if (a.len > b.len) {
+ try r.ensureCapacity(b.len);
+ lland(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.normN(b.len);
+ } else {
+ try r.ensureCapacity(a.len);
+ lland(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.normN(a.len);
+ }
+ }
+
+ fn lland(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(r.len >= b.len);
+ debug.assert(a.len >= b.len);
+
+ var i: usize = 0;
+ while (i < b.len) : (i += 1) {
+ r[i] = a[i] & b[i];
+ }
+ }
+
+ // r = a ^ b
+ pub fn bitXor(r: *Int, a: Int, b: Int) !void {
+ if (a.len > b.len) {
+ try r.ensureCapacity(a.len);
+ llxor(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.normN(a.len);
+ } else {
+ try r.ensureCapacity(b.len);
+ llxor(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.normN(b.len);
+ }
+ }
+
+ fn llxor(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(r.len >= a.len);
+ debug.assert(a.len >= b.len);
+
+ var i: usize = 0;
+ while (i < b.len) : (i += 1) {
+ r[i] = a[i] ^ b[i];
+ }
+ while (i < a.len) : (i += 1) {
+ r[i] = a[i];
+ }
+ }
+};
+
+// NOTE: All the following tests assume the max machine-word will be 64-bit.
+//
+// They will still run on larger than this and should pass, but the multi-limb code-paths
+// may be untested in some cases.
+
+const al = debug.global_allocator;
+
+test "big.int comptime_int set" {
+ comptime var s = 0xefffffff00000001eeeeeeefaaaaaaab;
+ var a = try Int.initSet(al, s);
+
+ const s_limb_count = 128 / Limb.bit_count;
+
+ comptime var i: usize = 0;
+ inline while (i < s_limb_count) : (i += 1) {
+ const result = Limb(s & @maxValue(Limb));
+ s >>= Limb.bit_count / 2;
+ s >>= Limb.bit_count / 2;
+ debug.assert(a.limbs[i] == result);
+ }
+}
+
+test "big.int comptime_int set negative" {
+ var a = try Int.initSet(al, -10);
+
+ debug.assert(a.limbs[0] == 10);
+ debug.assert(a.positive == false);
+}
+
+test "big.int int set unaligned small" {
+ var a = try Int.initSet(al, u7(45));
+
+ debug.assert(a.limbs[0] == 45);
+ debug.assert(a.positive == true);
+}
+
+test "big.int comptime_int to" {
+ const a = try Int.initSet(al, 0xefffffff00000001eeeeeeefaaaaaaab);
+
+ debug.assert((try a.to(u128)) == 0xefffffff00000001eeeeeeefaaaaaaab);
+}
+
+test "big.int sub-limb to" {
+ const a = try Int.initSet(al, 10);
+
+ debug.assert((try a.to(u8)) == 10);
+}
+
+test "big.int to target too small error" {
+ const a = try Int.initSet(al, 0xffffffff);
+
+ if (a.to(u8)) |_| {
+ unreachable;
+ } else |err| {
+ debug.assert(err == error.TargetTooSmall);
+ }
+}
+
+test "big.int norm1" {
+ var a = try Int.init(al);
+ try a.ensureCapacity(8);
+
+ a.limbs[0] = 1;
+ a.limbs[1] = 2;
+ a.limbs[2] = 3;
+ a.limbs[3] = 0;
+ a.norm1(4);
+ debug.assert(a.len == 3);
+
+ a.limbs[0] = 1;
+ a.limbs[1] = 2;
+ a.limbs[2] = 3;
+ a.norm1(3);
+ debug.assert(a.len == 3);
+
+ a.limbs[0] = 0;
+ a.limbs[1] = 0;
+ a.norm1(2);
+ debug.assert(a.len == 1);
+
+ a.limbs[0] = 0;
+ a.norm1(1);
+ debug.assert(a.len == 1);
+}
+
+test "big.int normN" {
+ var a = try Int.init(al);
+ try a.ensureCapacity(8);
+
+ a.limbs[0] = 1;
+ a.limbs[1] = 2;
+ a.limbs[2] = 0;
+ a.limbs[3] = 0;
+ a.normN(4);
+ debug.assert(a.len == 2);
+
+ a.limbs[0] = 1;
+ a.limbs[1] = 2;
+ a.limbs[2] = 3;
+ a.normN(3);
+ debug.assert(a.len == 3);
+
+ a.limbs[0] = 0;
+ a.limbs[1] = 0;
+ a.limbs[2] = 0;
+ a.limbs[3] = 0;
+ a.normN(4);
+ debug.assert(a.len == 1);
+
+ a.limbs[0] = 0;
+ a.normN(1);
+ debug.assert(a.len == 1);
+}
+
+test "big.int parity" {
+ var a = try Int.init(al);
+ try a.set(0);
+ debug.assert(a.isEven());
+ debug.assert(!a.isOdd());
+
+ try a.set(7);
+ debug.assert(!a.isEven());
+ debug.assert(a.isOdd());
+}
+
+test "big.int bitcount + sizeInBase" {
+ var a = try Int.init(al);
+
+ try a.set(0b100);
+ debug.assert(a.bitCountAbs() == 3);
+ debug.assert(a.sizeInBase(2) >= 3);
+ debug.assert(a.sizeInBase(10) >= 1);
+
+ a.negate();
+ debug.assert(a.bitCountAbs() == 3);
+ debug.assert(a.sizeInBase(2) >= 4);
+ debug.assert(a.sizeInBase(10) >= 2);
+
+ try a.set(0xffffffff);
+ debug.assert(a.bitCountAbs() == 32);
+ debug.assert(a.sizeInBase(2) >= 32);
+ debug.assert(a.sizeInBase(10) >= 10);
+
+ try a.shiftLeft(a, 5000);
+ debug.assert(a.bitCountAbs() == 5032);
+ debug.assert(a.sizeInBase(2) >= 5032);
+ a.positive = false;
+
+ debug.assert(a.bitCountAbs() == 5032);
+ debug.assert(a.sizeInBase(2) >= 5033);
+}
+
+test "big.int bitcount/to" {
+ var a = try Int.init(al);
+
+ try a.set(0);
+ debug.assert(a.bitCountTwosComp() == 0);
+
+ // TODO: stack smashing
+ // debug.assert((try a.to(u0)) == 0);
+ // TODO: sigsegv
+ // debug.assert((try a.to(i0)) == 0);
+
+ try a.set(-1);
+ debug.assert(a.bitCountTwosComp() == 1);
+ debug.assert((try a.to(i1)) == -1);
+
+ try a.set(-8);
+ debug.assert(a.bitCountTwosComp() == 4);
+ debug.assert((try a.to(i4)) == -8);
+
+ try a.set(127);
+ debug.assert(a.bitCountTwosComp() == 7);
+ debug.assert((try a.to(u7)) == 127);
+
+ try a.set(-128);
+ debug.assert(a.bitCountTwosComp() == 8);
+ debug.assert((try a.to(i8)) == -128);
+
+ try a.set(-129);
+ debug.assert(a.bitCountTwosComp() == 9);
+ debug.assert((try a.to(i9)) == -129);
+}
+
+test "big.int fits" {
+ var a = try Int.init(al);
+
+ try a.set(0);
+ debug.assert(a.fits(u0));
+ debug.assert(a.fits(i0));
+
+ try a.set(255);
+ debug.assert(!a.fits(u0));
+ debug.assert(!a.fits(u1));
+ debug.assert(!a.fits(i8));
+ debug.assert(a.fits(u8));
+ debug.assert(a.fits(u9));
+ debug.assert(a.fits(i9));
+
+ try a.set(-128);
+ debug.assert(!a.fits(i7));
+ debug.assert(a.fits(i8));
+ debug.assert(a.fits(i9));
+ debug.assert(!a.fits(u9));
+
+ try a.set(0x1ffffffffeeeeeeee);
+ debug.assert(!a.fits(u32));
+ debug.assert(!a.fits(u64));
+ debug.assert(a.fits(u65));
+}
+
+test "big.int string set" {
+ var a = try Int.init(al);
+ try a.setString(10, "120317241209124781241290847124");
+
+ debug.assert((try a.to(u128)) == 120317241209124781241290847124);
+}
+
+test "big.int string negative" {
+ var a = try Int.init(al);
+ try a.setString(10, "-1023");
+ debug.assert((try a.to(i32)) == -1023);
+}
+
+test "big.int string set bad char error" {
+ var a = try Int.init(al);
+ a.setString(10, "x") catch |err| debug.assert(err == error.InvalidCharForDigit);
+}
+
+test "big.int string set bad base error" {
+ var a = try Int.init(al);
+ a.setString(45, "10") catch |err| debug.assert(err == error.InvalidBase);
+}
+
+test "big.int string to" {
+ const a = try Int.initSet(al, 120317241209124781241290847124);
+
+ const as = try a.toString(al, 10);
+ const es = "120317241209124781241290847124";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int string to base base error" {
+ const a = try Int.initSet(al, 0xffffffff);
+
+ if (a.toString(al, 45)) |_| {
+ unreachable;
+ } else |err| {
+ debug.assert(err == error.InvalidBase);
+ }
+}
+
+test "big.int string to base 2" {
+ const a = try Int.initSet(al, -0b1011);
+
+ const as = try a.toString(al, 2);
+ const es = "-1011";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int string to base 16" {
+ const a = try Int.initSet(al, 0xefffffff00000001eeeeeeefaaaaaaab);
+
+ const as = try a.toString(al, 16);
+ const es = "efffffff00000001eeeeeeefaaaaaaab";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int neg string to" {
+ const a = try Int.initSet(al, -123907434);
+
+ const as = try a.toString(al, 10);
+ const es = "-123907434";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int zero string to" {
+ const a = try Int.initSet(al, 0);
+
+ const as = try a.toString(al, 10);
+ const es = "0";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int clone" {
+ var a = try Int.initSet(al, 1234);
+ const b = try a.clone();
+
+ debug.assert((try a.to(u32)) == 1234);
+ debug.assert((try b.to(u32)) == 1234);
+
+ try a.set(77);
+ debug.assert((try a.to(u32)) == 77);
+ debug.assert((try b.to(u32)) == 1234);
+}
+
+test "big.int swap" {
+ var a = try Int.initSet(al, 1234);
+ var b = try Int.initSet(al, 5678);
+
+ debug.assert((try a.to(u32)) == 1234);
+ debug.assert((try b.to(u32)) == 5678);
+
+ a.swap(&b);
+
+ debug.assert((try a.to(u32)) == 5678);
+ debug.assert((try b.to(u32)) == 1234);
+}
+
+test "big.int to negative" {
+ var a = try Int.initSet(al, -10);
+
+ debug.assert((try a.to(i32)) == -10);
+}
+
+test "big.int compare" {
+ var a = try Int.initSet(al, -11);
+ var b = try Int.initSet(al, 10);
+
+ debug.assert(a.cmpAbs(b) == 1);
+ debug.assert(a.cmp(b) == -1);
+}
+
+test "big.int compare similar" {
+ var a = try Int.initSet(al, 0xffffffffeeeeeeeeffffffffeeeeeeee);
+ var b = try Int.initSet(al, 0xffffffffeeeeeeeeffffffffeeeeeeef);
+
+ debug.assert(a.cmpAbs(b) == -1);
+ debug.assert(b.cmpAbs(a) == 1);
+}
+
+test "big.int compare different limb size" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, 1);
+
+ debug.assert(a.cmpAbs(b) == 1);
+ debug.assert(b.cmpAbs(a) == -1);
+}
+
+test "big.int compare multi-limb" {
+ var a = try Int.initSet(al, -0x7777777799999999ffffeeeeffffeeeeffffeeeef);
+ var b = try Int.initSet(al, 0x7777777799999999ffffeeeeffffeeeeffffeeeee);
+
+ debug.assert(a.cmpAbs(b) == 1);
+ debug.assert(a.cmp(b) == -1);
+}
+
+test "big.int equality" {
+ var a = try Int.initSet(al, 0xffffffff1);
+ var b = try Int.initSet(al, -0xffffffff1);
+
+ debug.assert(a.eqAbs(b));
+ debug.assert(!a.eq(b));
+}
+
+test "big.int abs" {
+ var a = try Int.initSet(al, -5);
+
+ a.abs();
+ debug.assert((try a.to(u32)) == 5);
+
+ a.abs();
+ debug.assert((try a.to(u32)) == 5);
+}
+
+test "big.int negate" {
+ var a = try Int.initSet(al, 5);
+
+ a.negate();
+ debug.assert((try a.to(i32)) == -5);
+
+ a.negate();
+ debug.assert((try a.to(i32)) == 5);
+}
+
+test "big.int add single-single" {
+ var a = try Int.initSet(al, 50);
+ var b = try Int.initSet(al, 5);
+
+ var c = try Int.init(al);
+ try c.add(a, b);
+
+ debug.assert((try c.to(u32)) == 55);
+}
+
+test "big.int add multi-single" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, 1);
+
+ var c = try Int.init(al);
+
+ try c.add(a, b);
+ debug.assert((try c.to(DoubleLimb)) == @maxValue(Limb) + 2);
+
+ try c.add(b, a);
+ debug.assert((try c.to(DoubleLimb)) == @maxValue(Limb) + 2);
+}
+
+test "big.int add multi-multi" {
+ const op1 = 0xefefefef7f7f7f7f;
+ const op2 = 0xfefefefe9f9f9f9f;
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var c = try Int.init(al);
+ try c.add(a, b);
+
+ debug.assert((try c.to(u128)) == op1 + op2);
+}
+
+test "big.int add zero-zero" {
+ var a = try Int.initSet(al, 0);
+ var b = try Int.initSet(al, 0);
+
+ var c = try Int.init(al);
+ try c.add(a, b);
+
+ debug.assert((try c.to(u32)) == 0);
+}
+
+test "big.int add alias multi-limb nonzero-zero" {
+ const op1 = 0xffffffff777777771;
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, 0);
+
+ try a.add(a, b);
+
+ debug.assert((try a.to(u128)) == op1);
+}
+
+test "big.int add sign" {
+ var a = try Int.init(al);
+
+ const one = try Int.initSet(al, 1);
+ const two = try Int.initSet(al, 2);
+ const neg_one = try Int.initSet(al, -1);
+ const neg_two = try Int.initSet(al, -2);
+
+ try a.add(one, two);
+ debug.assert((try a.to(i32)) == 3);
+
+ try a.add(neg_one, two);
+ debug.assert((try a.to(i32)) == 1);
+
+ try a.add(one, neg_two);
+ debug.assert((try a.to(i32)) == -1);
+
+ try a.add(neg_one, neg_two);
+ debug.assert((try a.to(i32)) == -3);
+}
+
+test "big.int sub single-single" {
+ var a = try Int.initSet(al, 50);
+ var b = try Int.initSet(al, 5);
+
+ var c = try Int.init(al);
+ try c.sub(a, b);
+
+ debug.assert((try c.to(u32)) == 45);
+}
+
+test "big.int sub multi-single" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, 1);
+
+ var c = try Int.init(al);
+ try c.sub(a, b);
+
+ debug.assert((try c.to(Limb)) == @maxValue(Limb));
+}
+
+test "big.int sub multi-multi" {
+ const op1 = 0xefefefefefefefefefefefef;
+ const op2 = 0xabababababababababababab;
+
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var c = try Int.init(al);
+ try c.sub(a, b);
+
+ debug.assert((try c.to(u128)) == op1 - op2);
+}
+
+test "big.int sub equal" {
+ var a = try Int.initSet(al, 0x11efefefefefefefefefefefef);
+ var b = try Int.initSet(al, 0x11efefefefefefefefefefefef);
+
+ var c = try Int.init(al);
+ try c.sub(a, b);
+
+ debug.assert((try c.to(u32)) == 0);
+}
+
+test "big.int sub sign" {
+ var a = try Int.init(al);
+
+ const one = try Int.initSet(al, 1);
+ const two = try Int.initSet(al, 2);
+ const neg_one = try Int.initSet(al, -1);
+ const neg_two = try Int.initSet(al, -2);
+
+ try a.sub(one, two);
+ debug.assert((try a.to(i32)) == -1);
+
+ try a.sub(neg_one, two);
+ debug.assert((try a.to(i32)) == -3);
+
+ try a.sub(one, neg_two);
+ debug.assert((try a.to(i32)) == 3);
+
+ try a.sub(neg_one, neg_two);
+ debug.assert((try a.to(i32)) == 1);
+
+ try a.sub(neg_two, neg_one);
+ debug.assert((try a.to(i32)) == -1);
+}
+
+test "big.int mul single-single" {
+ var a = try Int.initSet(al, 50);
+ var b = try Int.initSet(al, 5);
+
+ var c = try Int.init(al);
+ try c.mul(a, b);
+
+ debug.assert((try c.to(u64)) == 250);
+}
+
+test "big.int mul multi-single" {
+ var a = try Int.initSet(al, @maxValue(Limb));
+ var b = try Int.initSet(al, 2);
+
+ var c = try Int.init(al);
+ try c.mul(a, b);
+
+ debug.assert((try c.to(DoubleLimb)) == 2 * @maxValue(Limb));
+}
+
+test "big.int mul multi-multi" {
+ const op1 = 0x998888efefefefefefefef;
+ const op2 = 0x333000abababababababab;
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var c = try Int.init(al);
+ try c.mul(a, b);
+
+ debug.assert((try c.to(u256)) == op1 * op2);
+}
+
+test "big.int mul alias r with a" {
+ var a = try Int.initSet(al, @maxValue(Limb));
+ var b = try Int.initSet(al, 2);
+
+ try a.mul(a, b);
+
+ debug.assert((try a.to(DoubleLimb)) == 2 * @maxValue(Limb));
+}
+
+test "big.int mul alias r with b" {
+ var a = try Int.initSet(al, @maxValue(Limb));
+ var b = try Int.initSet(al, 2);
+
+ try a.mul(b, a);
+
+ debug.assert((try a.to(DoubleLimb)) == 2 * @maxValue(Limb));
+}
+
+test "big.int mul alias r with a and b" {
+ var a = try Int.initSet(al, @maxValue(Limb));
+
+ try a.mul(a, a);
+
+ debug.assert((try a.to(DoubleLimb)) == @maxValue(Limb) * @maxValue(Limb));
+}
+
+test "big.int mul a*0" {
+ var a = try Int.initSet(al, 0xefefefefefefefef);
+ var b = try Int.initSet(al, 0);
+
+ var c = try Int.init(al);
+ try c.mul(a, b);
+
+ debug.assert((try c.to(u32)) == 0);
+}
+
+test "big.int mul 0*0" {
+ var a = try Int.initSet(al, 0);
+ var b = try Int.initSet(al, 0);
+
+ var c = try Int.init(al);
+ try c.mul(a, b);
+
+ debug.assert((try c.to(u32)) == 0);
+}
+
+test "big.int div single-single no rem" {
+ var a = try Int.initSet(al, 50);
+ var b = try Int.initSet(al, 5);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ debug.assert((try q.to(u32)) == 10);
+ debug.assert((try r.to(u32)) == 0);
+}
+
+test "big.int div single-single with rem" {
+ var a = try Int.initSet(al, 49);
+ var b = try Int.initSet(al, 5);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ debug.assert((try q.to(u32)) == 9);
+ debug.assert((try r.to(u32)) == 4);
+}
+
+test "big.int div multi-single no rem" {
+ const op1 = 0xffffeeeeddddcccc;
+ const op2 = 34;
+
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ debug.assert((try q.to(u64)) == op1 / op2);
+ debug.assert((try r.to(u64)) == 0);
+}
+
+test "big.int div multi-single with rem" {
+ const op1 = 0xffffeeeeddddcccf;
+ const op2 = 34;
+
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ debug.assert((try q.to(u64)) == op1 / op2);
+ debug.assert((try r.to(u64)) == 3);
+}
+
+test "big.int div multi>2-single" {
+ const op1 = 0xfefefefefefefefefefefefefefefefe;
+ const op2 = 0xefab8;
+
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ debug.assert((try q.to(u128)) == op1 / op2);
+ debug.assert((try r.to(u32)) == 0x3e4e);
+}
+
+test "big.int div single-single q < r" {
+ var a = try Int.initSet(al, 0x0078f432);
+ var b = try Int.initSet(al, 0x01000000);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ debug.assert((try q.to(u64)) == 0);
+ debug.assert((try r.to(u64)) == 0x0078f432);
+}
+
+test "big.int div single-single q == r" {
+ var a = try Int.initSet(al, 10);
+ var b = try Int.initSet(al, 10);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ debug.assert((try q.to(u64)) == 1);
+ debug.assert((try r.to(u64)) == 0);
+}
+
+test "big.int div q=0 alias" {
+ var a = try Int.initSet(al, 3);
+ var b = try Int.initSet(al, 10);
+
+ try Int.divTrunc(&a, &b, a, b);
+
+ debug.assert((try a.to(u64)) == 0);
+ debug.assert((try b.to(u64)) == 3);
+}
+
+test "big.int div multi-multi q < r" {
+ const op1 = 0x1ffffffff0078f432;
+ const op2 = 0x1ffffffff01000000;
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ debug.assert((try q.to(u128)) == 0);
+ debug.assert((try r.to(u128)) == op1);
+}
+
+test "big.int div trunc single-single +/+" {
+ const u: i32 = 5;
+ const v: i32 = 3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ // n = q * d + r
+ // 5 = 1 * 3 + 2
+ const eq = @divTrunc(u, v);
+ const er = @mod(u, v);
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div trunc single-single -/+" {
+ const u: i32 = -5;
+ const v: i32 = 3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ // n = q * d + r
+ // -5 = 1 * -3 - 2
+ const eq = -1;
+ const er = -2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div trunc single-single +/-" {
+ const u: i32 = 5;
+ const v: i32 = -3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ // n = q * d + r
+ // 5 = -1 * -3 + 2
+ const eq = -1;
+ const er = 2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div trunc single-single -/-" {
+ const u: i32 = -5;
+ const v: i32 = -3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ // n = q * d + r
+ // -5 = 1 * -3 - 2
+ const eq = 1;
+ const er = -2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div floor single-single +/+" {
+ const u: i32 = 5;
+ const v: i32 = 3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divFloor(&q, &r, a, b);
+
+ // n = q * d + r
+ // 5 = 1 * 3 + 2
+ const eq = 1;
+ const er = 2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div floor single-single -/+" {
+ const u: i32 = -5;
+ const v: i32 = 3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divFloor(&q, &r, a, b);
+
+ // n = q * d + r
+ // -5 = -2 * 3 + 1
+ const eq = -2;
+ const er = 1;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div floor single-single +/-" {
+ const u: i32 = 5;
+ const v: i32 = -3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divFloor(&q, &r, a, b);
+
+ // n = q * d + r
+ // 5 = -2 * -3 - 1
+ const eq = -2;
+ const er = -1;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div floor single-single -/-" {
+ const u: i32 = -5;
+ const v: i32 = -3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divFloor(&q, &r, a, b);
+
+ // n = q * d + r
+ // -5 = 2 * -3 + 1
+ const eq = 1;
+ const er = -2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div multi-multi with rem" {
+ var a = try Int.initSet(al, 0x8888999911110000ffffeeeeddddccccbbbbaaaa9999);
+ var b = try Int.initSet(al, 0x99990000111122223333);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ debug.assert((try q.to(u128)) == 0xe38f38e39161aaabd03f0f1b);
+ debug.assert((try r.to(u128)) == 0x28de0acacd806823638);
+}
+
+test "big.int div multi-multi no rem" {
+ var a = try Int.initSet(al, 0x8888999911110000ffffeeeedb4fec200ee3a4286361);
+ var b = try Int.initSet(al, 0x99990000111122223333);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ debug.assert((try q.to(u128)) == 0xe38f38e39161aaabd03f0f1b);
+ debug.assert((try r.to(u128)) == 0);
+}
+
+test "big.int div multi-multi (2 branch)" {
+ var a = try Int.initSet(al, 0x866666665555555588888887777777761111111111111111);
+ var b = try Int.initSet(al, 0x86666666555555554444444433333333);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ debug.assert((try q.to(u128)) == 0x10000000000000000);
+ debug.assert((try r.to(u128)) == 0x44444443444444431111111111111111);
+}
+
+test "big.int div multi-multi (3.1/3.3 branch)" {
+ var a = try Int.initSet(al, 0x11111111111111111111111111111111111111111111111111111111111111);
+ var b = try Int.initSet(al, 0x1111111111111111111111111111111111111111171);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, a, b);
+
+ debug.assert((try q.to(u128)) == 0xfffffffffffffffffff);
+ debug.assert((try r.to(u256)) == 0x1111111111111111111110b12222222222222222282);
+}
+
+test "big.int shift-right single" {
+ var a = try Int.initSet(al, 0xffff0000);
+ try a.shiftRight(a, 16);
+
+ debug.assert((try a.to(u32)) == 0xffff);
+}
+
+test "big.int shift-right multi" {
+ var a = try Int.initSet(al, 0xffff0000eeee1111dddd2222cccc3333);
+ try a.shiftRight(a, 67);
+
+ debug.assert((try a.to(u64)) == 0x1fffe0001dddc222);
+}
+
+test "big.int shift-left single" {
+ var a = try Int.initSet(al, 0xffff);
+ try a.shiftLeft(a, 16);
+
+ debug.assert((try a.to(u64)) == 0xffff0000);
+}
+
+test "big.int shift-left multi" {
+ var a = try Int.initSet(al, 0x1fffe0001dddc222);
+ try a.shiftLeft(a, 67);
+
+ debug.assert((try a.to(u128)) == 0xffff0000eeee11100000000000000000);
+}
+
+test "big.int shift-right negative" {
+ var a = try Int.init(al);
+
+ try a.shiftRight(try Int.initSet(al, -20), 2);
+ debug.assert((try a.to(i32)) == -20 >> 2);
+
+ try a.shiftRight(try Int.initSet(al, -5), 10);
+ debug.assert((try a.to(i32)) == -5 >> 10);
+}
+
+test "big.int shift-left negative" {
+ var a = try Int.init(al);
+
+ try a.shiftRight(try Int.initSet(al, -10), 1232);
+ debug.assert((try a.to(i32)) == -10 >> 1232);
+}
+
+test "big.int bitwise and simple" {
+ var a = try Int.initSet(al, 0xffffffff11111111);
+ var b = try Int.initSet(al, 0xeeeeeeee22222222);
+
+ try a.bitAnd(a, b);
+
+ debug.assert((try a.to(u64)) == 0xeeeeeeee00000000);
+}
+
+test "big.int bitwise and multi-limb" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, @maxValue(Limb));
+
+ try a.bitAnd(a, b);
+
+ debug.assert((try a.to(u128)) == 0);
+}
+
+test "big.int bitwise xor simple" {
+ var a = try Int.initSet(al, 0xffffffff11111111);
+ var b = try Int.initSet(al, 0xeeeeeeee22222222);
+
+ try a.bitXor(a, b);
+
+ debug.assert((try a.to(u64)) == 0x1111111133333333);
+}
+
+test "big.int bitwise xor multi-limb" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, @maxValue(Limb));
+
+ try a.bitXor(a, b);
+
+ debug.assert((try a.to(DoubleLimb)) == (@maxValue(Limb) + 1) ^ @maxValue(Limb));
+}
+
+test "big.int bitwise or simple" {
+ var a = try Int.initSet(al, 0xffffffff11111111);
+ var b = try Int.initSet(al, 0xeeeeeeee22222222);
+
+ try a.bitOr(a, b);
+
+ debug.assert((try a.to(u64)) == 0xffffffff33333333);
+}
+
+test "big.int bitwise or multi-limb" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, @maxValue(Limb));
+
+ try a.bitOr(a, b);
+
+ // TODO: big.int.cpp or is wrong on multi-limb.
+ debug.assert((try a.to(DoubleLimb)) == (@maxValue(Limb) + 1) + @maxValue(Limb));
+}
+
+test "big.int var args" {
+ var a = try Int.initSet(al, 5);
+
+ try a.add(a, try Int.initSet(al, 6));
+ debug.assert((try a.to(u64)) == 11);
+
+ debug.assert(a.cmp(try Int.initSet(al, 11)) == 0);
+ debug.assert(a.cmp(try Int.initSet(al, 14)) <= 0);
+}
diff --git a/std/math/cbrt.zig b/std/math/cbrt.zig
index a265392ff7..c067c5155a 100644
--- a/std/math/cbrt.zig
+++ b/std/math/cbrt.zig
@@ -54,22 +54,22 @@ fn cbrt32(x: f32) f32 {
r = t * t * t;
t = t * (f64(x) + x + r) / (x + r + r);
- return f32(t);
+ return @floatCast(f32, t);
}
fn cbrt64(x: f64) f64 {
- const B1: u32 = 715094163; // (1023 - 1023 / 3 - 0.03306235651 * 2^20
- const B2: u32 = 696219795; // (1023 - 1023 / 3 - 54 / 3 - 0.03306235651 * 2^20
+ const B1: u32 = 715094163; // (1023 - 1023 / 3 - 0.03306235651 * 2^20
+ const B2: u32 = 696219795; // (1023 - 1023 / 3 - 54 / 3 - 0.03306235651 * 2^20
// |1 / cbrt(x) - p(x)| < 2^(23.5)
- const P0: f64 = 1.87595182427177009643;
+ const P0: f64 = 1.87595182427177009643;
const P1: f64 = -1.88497979543377169875;
- const P2: f64 = 1.621429720105354466140;
+ const P2: f64 = 1.621429720105354466140;
const P3: f64 = -0.758397934778766047437;
- const P4: f64 = 0.145996192886612446982;
+ const P4: f64 = 0.145996192886612446982;
var u = @bitCast(u64, x);
- var hx = u32(u >> 32) & 0x7FFFFFFF;
+ var hx = @intCast(u32, u >> 32) & 0x7FFFFFFF;
// cbrt(nan, inf) = itself
if (hx >= 0x7FF00000) {
@@ -79,7 +79,7 @@ fn cbrt64(x: f64) f64 {
// cbrt to ~5bits
if (hx < 0x00100000) {
u = @bitCast(u64, x * 0x1.0p54);
- hx = u32(u >> 32) & 0x7FFFFFFF;
+ hx = @intCast(u32, u >> 32) & 0x7FFFFFFF;
// cbrt(0) is itself
if (hx == 0) {
diff --git a/std/math/ceil.zig b/std/math/ceil.zig
index 5bdb84ca00..1c429504e8 100644
--- a/std/math/ceil.zig
+++ b/std/math/ceil.zig
@@ -20,7 +20,7 @@ pub fn ceil(x: var) @typeOf(x) {
fn ceil32(x: f32) f32 {
var u = @bitCast(u32, x);
- var e = i32((u >> 23) & 0xFF) - 0x7F;
+ var e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F;
var m: u32 = undefined;
// TODO: Shouldn't need this explicit check.
@@ -31,7 +31,7 @@ fn ceil32(x: f32) f32 {
if (e >= 23) {
return x;
} else if (e >= 0) {
- m = u32(0x007FFFFF) >> u5(e);
+ m = u32(0x007FFFFF) >> @intCast(u5, e);
if (u & m == 0) {
return x;
}
@@ -56,7 +56,7 @@ fn ceil64(x: f64) f64 {
const e = (u >> 52) & 0x7FF;
var y: f64 = undefined;
- if (e >= 0x3FF+52 or x == 0) {
+ if (e >= 0x3FF + 52 or x == 0) {
return x;
}
@@ -68,7 +68,7 @@ fn ceil64(x: f64) f64 {
y = x + math.f64_toint - math.f64_toint - x;
}
- if (e <= 0x3FF-1) {
+ if (e <= 0x3FF - 1) {
math.forceEval(y);
if (u >> 63 != 0) {
return -0.0;
diff --git a/std/math/complex/abs.zig b/std/math/complex/abs.zig
new file mode 100644
index 0000000000..4cd095c46b
--- /dev/null
+++ b/std/math/complex/abs.zig
@@ -0,0 +1,18 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn abs(z: var) @typeOf(z.re) {
+ const T = @typeOf(z.re);
+ return math.hypot(T, z.re, z.im);
+}
+
+const epsilon = 0.0001;
+
+test "complex.cabs" {
+ const a = Complex(f32).new(5, 3);
+ const c = abs(a);
+ debug.assert(math.approxEq(f32, c, 5.83095, epsilon));
+}
diff --git a/std/math/complex/acos.zig b/std/math/complex/acos.zig
new file mode 100644
index 0000000000..a5760b4ace
--- /dev/null
+++ b/std/math/complex/acos.zig
@@ -0,0 +1,21 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn acos(z: var) Complex(@typeOf(z.re)) {
+ const T = @typeOf(z.re);
+ const q = cmath.asin(z);
+ return Complex(T).new(T(math.pi) / 2 - q.re, -q.im);
+}
+
+const epsilon = 0.0001;
+
+test "complex.cacos" {
+ const a = Complex(f32).new(5, 3);
+ const c = acos(a);
+
+ debug.assert(math.approxEq(f32, c.re, 0.546975, epsilon));
+ debug.assert(math.approxEq(f32, c.im, -2.452914, epsilon));
+}
diff --git a/std/math/complex/acosh.zig b/std/math/complex/acosh.zig
new file mode 100644
index 0000000000..8dd91b2836
--- /dev/null
+++ b/std/math/complex/acosh.zig
@@ -0,0 +1,21 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn acosh(z: var) Complex(@typeOf(z.re)) {
+ const T = @typeOf(z.re);
+ const q = cmath.acos(z);
+ return Complex(T).new(-q.im, q.re);
+}
+
+const epsilon = 0.0001;
+
+test "complex.cacosh" {
+ const a = Complex(f32).new(5, 3);
+ const c = acosh(a);
+
+ debug.assert(math.approxEq(f32, c.re, 2.452914, epsilon));
+ debug.assert(math.approxEq(f32, c.im, 0.546975, epsilon));
+}
diff --git a/std/math/complex/arg.zig b/std/math/complex/arg.zig
new file mode 100644
index 0000000000..f24512ac73
--- /dev/null
+++ b/std/math/complex/arg.zig
@@ -0,0 +1,18 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn arg(z: var) @typeOf(z.re) {
+ const T = @typeOf(z.re);
+ return math.atan2(T, z.im, z.re);
+}
+
+const epsilon = 0.0001;
+
+test "complex.carg" {
+ const a = Complex(f32).new(5, 3);
+ const c = arg(a);
+ debug.assert(math.approxEq(f32, c, 0.540420, epsilon));
+}
diff --git a/std/math/complex/asin.zig b/std/math/complex/asin.zig
new file mode 100644
index 0000000000..584a3a1a9b
--- /dev/null
+++ b/std/math/complex/asin.zig
@@ -0,0 +1,27 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn asin(z: var) Complex(@typeOf(z.re)) {
+ const T = @typeOf(z.re);
+ const x = z.re;
+ const y = z.im;
+
+ const p = Complex(T).new(1.0 - (x - y) * (x + y), -2.0 * x * y);
+ const q = Complex(T).new(-y, x);
+ const r = cmath.log(q.add(cmath.sqrt(p)));
+
+ return Complex(T).new(r.im, -r.re);
+}
+
+const epsilon = 0.0001;
+
+test "complex.casin" {
+ const a = Complex(f32).new(5, 3);
+ const c = asin(a);
+
+ debug.assert(math.approxEq(f32, c.re, 1.023822, epsilon));
+ debug.assert(math.approxEq(f32, c.im, 2.452914, epsilon));
+}
diff --git a/std/math/complex/asinh.zig b/std/math/complex/asinh.zig
new file mode 100644
index 0000000000..0c4dc2b6e4
--- /dev/null
+++ b/std/math/complex/asinh.zig
@@ -0,0 +1,22 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn asinh(z: var) Complex(@typeOf(z.re)) {
+ const T = @typeOf(z.re);
+ const q = Complex(T).new(-z.im, z.re);
+ const r = cmath.asin(q);
+ return Complex(T).new(r.im, -r.re);
+}
+
+const epsilon = 0.0001;
+
+test "complex.casinh" {
+ const a = Complex(f32).new(5, 3);
+ const c = asinh(a);
+
+ debug.assert(math.approxEq(f32, c.re, 2.459831, epsilon));
+ debug.assert(math.approxEq(f32, c.im, 0.533999, epsilon));
+}
diff --git a/std/math/complex/atan.zig b/std/math/complex/atan.zig
new file mode 100644
index 0000000000..de60f2546d
--- /dev/null
+++ b/std/math/complex/atan.zig
@@ -0,0 +1,130 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn atan(z: var) @typeOf(z) {
+ const T = @typeOf(z.re);
+ return switch (T) {
+ f32 => atan32(z),
+ f64 => atan64(z),
+ else => @compileError("atan not implemented for " ++ @typeName(z)),
+ };
+}
+
+fn redupif32(x: f32) f32 {
+ const DP1 = 3.140625;
+ const DP2 = 9.67502593994140625e-4;
+ const DP3 = 1.509957990978376432e-7;
+
+ var t = x / math.pi;
+ if (t >= 0.0) {
+ t += 0.5;
+ } else {
+ t -= 0.5;
+ }
+
+ const u = @intToFloat(f32, @floatToInt(i32, t));
+ return ((x - u * DP1) - u * DP2) - t * DP3;
+}
+
+fn atan32(z: Complex(f32)) Complex(f32) {
+ const maxnum = 1.0e38;
+
+ const x = z.re;
+ const y = z.im;
+
+ if ((x == 0.0) and (y > 1.0)) {
+ // overflow
+ return Complex(f32).new(maxnum, maxnum);
+ }
+
+ const x2 = x * x;
+ var a = 1.0 - x2 - (y * y);
+ if (a == 0.0) {
+ // overflow
+ return Complex(f32).new(maxnum, maxnum);
+ }
+
+ var t = 0.5 * math.atan2(f32, 2.0 * x, a);
+ var w = redupif32(t);
+
+ t = y - 1.0;
+ a = x2 + t * t;
+ if (a == 0.0) {
+ // overflow
+ return Complex(f32).new(maxnum, maxnum);
+ }
+
+ t = y + 1.0;
+ a = (x2 + (t * t)) / a;
+ return Complex(f32).new(w, 0.25 * math.ln(a));
+}
+
+fn redupif64(x: f64) f64 {
+ const DP1 = 3.14159265160560607910;
+ const DP2 = 1.98418714791870343106e-9;
+ const DP3 = 1.14423774522196636802e-17;
+
+ var t = x / math.pi;
+ if (t >= 0.0) {
+ t += 0.5;
+ } else {
+ t -= 0.5;
+ }
+
+ const u = @intToFloat(f64, @floatToInt(i64, t));
+ return ((x - u * DP1) - u * DP2) - t * DP3;
+}
+
+fn atan64(z: Complex(f64)) Complex(f64) {
+ const maxnum = 1.0e308;
+
+ const x = z.re;
+ const y = z.im;
+
+ if ((x == 0.0) and (y > 1.0)) {
+ // overflow
+ return Complex(f64).new(maxnum, maxnum);
+ }
+
+ const x2 = x * x;
+ var a = 1.0 - x2 - (y * y);
+ if (a == 0.0) {
+ // overflow
+ return Complex(f64).new(maxnum, maxnum);
+ }
+
+ var t = 0.5 * math.atan2(f64, 2.0 * x, a);
+ var w = redupif64(t);
+
+ t = y - 1.0;
+ a = x2 + t * t;
+ if (a == 0.0) {
+ // overflow
+ return Complex(f64).new(maxnum, maxnum);
+ }
+
+ t = y + 1.0;
+ a = (x2 + (t * t)) / a;
+ return Complex(f64).new(w, 0.25 * math.ln(a));
+}
+
+const epsilon = 0.0001;
+
+test "complex.catan32" {
+ const a = Complex(f32).new(5, 3);
+ const c = atan(a);
+
+ debug.assert(math.approxEq(f32, c.re, 1.423679, epsilon));
+ debug.assert(math.approxEq(f32, c.im, 0.086569, epsilon));
+}
+
+test "complex.catan64" {
+ const a = Complex(f64).new(5, 3);
+ const c = atan(a);
+
+ debug.assert(math.approxEq(f64, c.re, 1.423679, epsilon));
+ debug.assert(math.approxEq(f64, c.im, 0.086569, epsilon));
+}
diff --git a/std/math/complex/atanh.zig b/std/math/complex/atanh.zig
new file mode 100644
index 0000000000..f70c741765
--- /dev/null
+++ b/std/math/complex/atanh.zig
@@ -0,0 +1,22 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn atanh(z: var) Complex(@typeOf(z.re)) {
+ const T = @typeOf(z.re);
+ const q = Complex(T).new(-z.im, z.re);
+ const r = cmath.atan(q);
+ return Complex(T).new(r.im, -r.re);
+}
+
+const epsilon = 0.0001;
+
+test "complex.catanh" {
+ const a = Complex(f32).new(5, 3);
+ const c = atanh(a);
+
+ debug.assert(math.approxEq(f32, c.re, 0.146947, epsilon));
+ debug.assert(math.approxEq(f32, c.im, 1.480870, epsilon));
+}
diff --git a/std/math/complex/conj.zig b/std/math/complex/conj.zig
new file mode 100644
index 0000000000..ad3e8b5036
--- /dev/null
+++ b/std/math/complex/conj.zig
@@ -0,0 +1,17 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn conj(z: var) Complex(@typeOf(z.re)) {
+ const T = @typeOf(z.re);
+ return Complex(T).new(z.re, -z.im);
+}
+
+test "complex.conj" {
+ const a = Complex(f32).new(5, 3);
+ const c = a.conjugate();
+
+ debug.assert(c.re == 5 and c.im == -3);
+}
diff --git a/std/math/complex/cos.zig b/std/math/complex/cos.zig
new file mode 100644
index 0000000000..96e4ffcdb0
--- /dev/null
+++ b/std/math/complex/cos.zig
@@ -0,0 +1,21 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn cos(z: var) Complex(@typeOf(z.re)) {
+ const T = @typeOf(z.re);
+ const p = Complex(T).new(-z.im, z.re);
+ return cmath.cosh(p);
+}
+
+const epsilon = 0.0001;
+
+test "complex.ccos" {
+ const a = Complex(f32).new(5, 3);
+ const c = cos(a);
+
+ debug.assert(math.approxEq(f32, c.re, 2.855815, epsilon));
+ debug.assert(math.approxEq(f32, c.im, 9.606383, epsilon));
+}
diff --git a/std/math/complex/cosh.zig b/std/math/complex/cosh.zig
new file mode 100644
index 0000000000..a2e31631ea
--- /dev/null
+++ b/std/math/complex/cosh.zig
@@ -0,0 +1,165 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+const ldexp_cexp = @import("ldexp.zig").ldexp_cexp;
+
+pub fn cosh(z: var) Complex(@typeOf(z.re)) {
+ const T = @typeOf(z.re);
+ return switch (T) {
+ f32 => cosh32(z),
+ f64 => cosh64(z),
+ else => @compileError("cosh not implemented for " ++ @typeName(z)),
+ };
+}
+
+fn cosh32(z: *const Complex(f32)) Complex(f32) {
+ const x = z.re;
+ const y = z.im;
+
+ const hx = @bitCast(u32, x);
+ const ix = hx & 0x7fffffff;
+
+ const hy = @bitCast(u32, y);
+ const iy = hy & 0x7fffffff;
+
+ if (ix < 0x7f800000 and iy < 0x7f800000) {
+ if (iy == 0) {
+ return Complex(f32).new(math.cosh(x), y);
+ }
+ // small x: normal case
+ if (ix < 0x41100000) {
+ return Complex(f32).new(math.cosh(x) * math.cos(y), math.sinh(x) * math.sin(y));
+ }
+
+ // |x|>= 9, so cosh(x) ~= exp(|x|)
+ if (ix < 0x42b17218) {
+ // x < 88.7: exp(|x|) won't overflow
+ const h = math.exp(math.fabs(x)) * 0.5;
+ return Complex(f32).new(math.copysign(f32, h, x) * math.cos(y), h * math.sin(y));
+ }
+ // x < 192.7: scale to avoid overflow
+ else if (ix < 0x4340b1e7) {
+ const v = Complex(f32).new(math.fabs(x), y);
+ const r = ldexp_cexp(v, -1);
+ return Complex(f32).new(x, y * math.copysign(f32, 1, x));
+ }
+ // x >= 192.7: result always overflows
+ else {
+ const h = 0x1p127 * x;
+ return Complex(f32).new(h * h * math.cos(y), h * math.sin(y));
+ }
+ }
+
+ if (ix == 0 and iy >= 0x7f800000) {
+ return Complex(f32).new(y - y, math.copysign(f32, 0, x * (y - y)));
+ }
+
+ if (iy == 0 and ix >= 0x7f800000) {
+ if (hx & 0x7fffff == 0) {
+ return Complex(f32).new(x * x, math.copysign(f32, 0, x) * y);
+ }
+ return Complex(f32).new(x, math.copysign(f32, 0, (x + x) * y));
+ }
+
+ if (ix < 0x7f800000 and iy >= 0x7f800000) {
+ return Complex(f32).new(y - y, x * (y - y));
+ }
+
+ if (ix >= 0x7f800000 and (hx & 0x7fffff) == 0) {
+ if (iy >= 0x7f800000) {
+ return Complex(f32).new(x * x, x * (y - y));
+ }
+ return Complex(f32).new((x * x) * math.cos(y), x * math.sin(y));
+ }
+
+ return Complex(f32).new((x * x) * (y - y), (x + x) * (y - y));
+}
+
+fn cosh64(z: *const Complex(f64)) Complex(f64) {
+ const x = z.re;
+ const y = z.im;
+
+ const fx = @bitCast(u64, x);
+ const hx = @intCast(u32, fx >> 32);
+ const lx = @truncate(u32, fx);
+ const ix = hx & 0x7fffffff;
+
+ const fy = @bitCast(u64, y);
+ const hy = @intCast(u32, fy >> 32);
+ const ly = @truncate(u32, fy);
+ const iy = hy & 0x7fffffff;
+
+ // nearly non-exceptional case where x, y are finite
+ if (ix < 0x7ff00000 and iy < 0x7ff00000) {
+ if (iy | ly == 0) {
+ return Complex(f64).new(math.cosh(x), x * y);
+ }
+ // small x: normal case
+ if (ix < 0x40360000) {
+ return Complex(f64).new(math.cosh(x) * math.cos(y), math.sinh(x) * math.sin(y));
+ }
+
+ // |x|>= 22, so cosh(x) ~= exp(|x|)
+ if (ix < 0x40862e42) {
+ // x < 710: exp(|x|) won't overflow
+ const h = math.exp(math.fabs(x)) * 0.5;
+ return Complex(f64).new(h * math.cos(y), math.copysign(f64, h, x) * math.sin(y));
+ }
+ // x < 1455: scale to avoid overflow
+ else if (ix < 0x4096bbaa) {
+ const v = Complex(f64).new(math.fabs(x), y);
+ const r = ldexp_cexp(v, -1);
+ return Complex(f64).new(x, y * math.copysign(f64, 1, x));
+ }
+ // x >= 1455: result always overflows
+ else {
+ const h = 0x1p1023;
+ return Complex(f64).new(h * h * math.cos(y), h * math.sin(y));
+ }
+ }
+
+ if (ix | lx == 0 and iy >= 0x7ff00000) {
+ return Complex(f64).new(y - y, math.copysign(f64, 0, x * (y - y)));
+ }
+
+ if (iy | ly == 0 and ix >= 0x7ff00000) {
+ if ((hx & 0xfffff) | lx == 0) {
+ return Complex(f64).new(x * x, math.copysign(f64, 0, x) * y);
+ }
+ return Complex(f64).new(x * x, math.copysign(f64, 0, (x + x) * y));
+ }
+
+ if (ix < 0x7ff00000 and iy >= 0x7ff00000) {
+ return Complex(f64).new(y - y, x * (y - y));
+ }
+
+ if (ix >= 0x7ff00000 and (hx & 0xfffff) | lx == 0) {
+ if (iy >= 0x7ff00000) {
+ return Complex(f64).new(x * x, x * (y - y));
+ }
+ return Complex(f64).new(x * x * math.cos(y), x * math.sin(y));
+ }
+
+ return Complex(f64).new((x * x) * (y - y), (x + x) * (y - y));
+}
+
+const epsilon = 0.0001;
+
+test "complex.ccosh32" {
+ const a = Complex(f32).new(5, 3);
+ const c = cosh(a);
+
+ debug.assert(math.approxEq(f32, c.re, -73.467300, epsilon));
+ debug.assert(math.approxEq(f32, c.im, 10.471557, epsilon));
+}
+
+test "complex.ccosh64" {
+ const a = Complex(f64).new(5, 3);
+ const c = cosh(a);
+
+ debug.assert(math.approxEq(f64, c.re, -73.467300, epsilon));
+ debug.assert(math.approxEq(f64, c.im, 10.471557, epsilon));
+}
diff --git a/std/math/complex/exp.zig b/std/math/complex/exp.zig
new file mode 100644
index 0000000000..48fb132d97
--- /dev/null
+++ b/std/math/complex/exp.zig
@@ -0,0 +1,134 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+const ldexp_cexp = @import("ldexp.zig").ldexp_cexp;
+
+pub fn exp(z: var) @typeOf(z) {
+ const T = @typeOf(z.re);
+
+ return switch (T) {
+ f32 => exp32(z),
+ f64 => exp64(z),
+ else => @compileError("exp not implemented for " ++ @typeName(z)),
+ };
+}
+
+fn exp32(z: Complex(f32)) Complex(f32) {
+ @setFloatMode(this, @import("builtin").FloatMode.Strict);
+
+ const exp_overflow = 0x42b17218; // max_exp * ln2 ~= 88.72283955
+ const cexp_overflow = 0x43400074; // (max_exp - min_denom_exp) * ln2
+
+ const x = z.re;
+ const y = z.im;
+
+ const hy = @bitCast(u32, y) & 0x7fffffff;
+ // cexp(x + i0) = exp(x) + i0
+ if (hy == 0) {
+ return Complex(f32).new(math.exp(x), y);
+ }
+
+ const hx = @bitCast(u32, x);
+ // cexp(0 + iy) = cos(y) + isin(y)
+ if ((hx & 0x7fffffff) == 0) {
+ return Complex(f32).new(math.cos(y), math.sin(y));
+ }
+
+ if (hy >= 0x7f800000) {
+ // cexp(finite|nan +- i inf|nan) = nan + i nan
+ if ((hx & 0x7fffffff) != 0x7f800000) {
+ return Complex(f32).new(y - y, y - y);
+ } // cexp(-inf +- i inf|nan) = 0 + i0
+ else if (hx & 0x80000000 != 0) {
+ return Complex(f32).new(0, 0);
+ } // cexp(+inf +- i inf|nan) = inf + i nan
+ else {
+ return Complex(f32).new(x, y - y);
+ }
+ }
+
+ // 88.7 <= x <= 192 so must scale
+ if (hx >= exp_overflow and hx <= cexp_overflow) {
+ return ldexp_cexp(z, 0);
+ } // - x < exp_overflow => exp(x) won't overflow (common)
+ // - x > cexp_overflow, so exp(x) * s overflows for s > 0
+ // - x = +-inf
+ // - x = nan
+ else {
+ const exp_x = math.exp(x);
+ return Complex(f32).new(exp_x * math.cos(y), exp_x * math.sin(y));
+ }
+}
+
+fn exp64(z: Complex(f64)) Complex(f64) {
+ const exp_overflow = 0x40862e42; // high bits of max_exp * ln2 ~= 710
+ const cexp_overflow = 0x4096b8e4; // (max_exp - min_denorm_exp) * ln2
+
+ const x = z.re;
+ const y = z.im;
+
+ const fy = @bitCast(u64, y);
+ const hy = u32(fy >> 32) & 0x7fffffff;
+ const ly = @truncate(u32, fy);
+
+ // cexp(x + i0) = exp(x) + i0
+ if (hy | ly == 0) {
+ return Complex(f64).new(math.exp(x), y);
+ }
+
+ const fx = @bitCast(u64, x);
+ const hx = u32(fx >> 32);
+ const lx = @truncate(u32, fx);
+
+ // cexp(0 + iy) = cos(y) + isin(y)
+ if ((hx & 0x7fffffff) | lx == 0) {
+ return Complex(f64).new(math.cos(y), math.sin(y));
+ }
+
+ if (hy >= 0x7ff00000) {
+ // cexp(finite|nan +- i inf|nan) = nan + i nan
+ if (lx != 0 or (hx & 0x7fffffff) != 0x7ff00000) {
+ return Complex(f64).new(y - y, y - y);
+ } // cexp(-inf +- i inf|nan) = 0 + i0
+ else if (hx & 0x80000000 != 0) {
+ return Complex(f64).new(0, 0);
+ } // cexp(+inf +- i inf|nan) = inf + i nan
+ else {
+ return Complex(f64).new(x, y - y);
+ }
+ }
+
+ // 709.7 <= x <= 1454.3 so must scale
+ if (hx >= exp_overflow and hx <= cexp_overflow) {
+ const r = ldexp_cexp(z, 0);
+ return r.*;
+ } // - x < exp_overflow => exp(x) won't overflow (common)
+ // - x > cexp_overflow, so exp(x) * s overflows for s > 0
+ // - x = +-inf
+ // - x = nan
+ else {
+ const exp_x = math.exp(x);
+ return Complex(f64).new(exp_x * math.cos(y), exp_x * math.sin(y));
+ }
+}
+
+const epsilon = 0.0001;
+
+test "complex.cexp32" {
+ const a = Complex(f32).new(5, 3);
+ const c = exp(a);
+
+ debug.assert(math.approxEq(f32, c.re, -146.927917, epsilon));
+ debug.assert(math.approxEq(f32, c.im, 20.944065, epsilon));
+}
+
+test "complex.cexp64" {
+ const a = Complex(f32).new(5, 3);
+ const c = exp(a);
+
+ debug.assert(math.approxEq(f64, c.re, -146.927917, epsilon));
+ debug.assert(math.approxEq(f64, c.im, 20.944065, epsilon));
+}
diff --git a/std/math/complex/index.zig b/std/math/complex/index.zig
new file mode 100644
index 0000000000..63a2616984
--- /dev/null
+++ b/std/math/complex/index.zig
@@ -0,0 +1,171 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+
+pub const abs = @import("abs.zig").abs;
+pub const acosh = @import("acosh.zig").acosh;
+pub const acos = @import("acos.zig").acos;
+pub const arg = @import("arg.zig").arg;
+pub const asinh = @import("asinh.zig").asinh;
+pub const asin = @import("asin.zig").asin;
+pub const atanh = @import("atanh.zig").atanh;
+pub const atan = @import("atan.zig").atan;
+pub const conj = @import("conj.zig").conj;
+pub const cosh = @import("cosh.zig").cosh;
+pub const cos = @import("cos.zig").cos;
+pub const exp = @import("exp.zig").exp;
+pub const log = @import("log.zig").log;
+pub const pow = @import("pow.zig").pow;
+pub const proj = @import("proj.zig").proj;
+pub const sinh = @import("sinh.zig").sinh;
+pub const sin = @import("sin.zig").sin;
+pub const sqrt = @import("sqrt.zig").sqrt;
+pub const tanh = @import("tanh.zig").tanh;
+pub const tan = @import("tan.zig").tan;
+
+pub fn Complex(comptime T: type) type {
+ return struct {
+ const Self = this;
+
+ re: T,
+ im: T,
+
+ pub fn new(re: T, im: T) Self {
+ return Self{
+ .re = re,
+ .im = im,
+ };
+ }
+
+ pub fn add(self: Self, other: Self) Self {
+ return Self{
+ .re = self.re + other.re,
+ .im = self.im + other.im,
+ };
+ }
+
+ pub fn sub(self: Self, other: Self) Self {
+ return Self{
+ .re = self.re - other.re,
+ .im = self.im - other.im,
+ };
+ }
+
+ pub fn mul(self: Self, other: Self) Self {
+ return Self{
+ .re = self.re * other.re - self.im * other.im,
+ .im = self.im * other.re + self.re * other.im,
+ };
+ }
+
+ pub fn div(self: Self, other: Self) Self {
+ const re_num = self.re * other.re + self.im * other.im;
+ const im_num = self.im * other.re - self.re * other.im;
+ const den = other.re * other.re + other.im * other.im;
+
+ return Self{
+ .re = re_num / den,
+ .im = im_num / den,
+ };
+ }
+
+ pub fn conjugate(self: Self) Self {
+ return Self{
+ .re = self.re,
+ .im = -self.im,
+ };
+ }
+
+ pub fn reciprocal(self: Self) Self {
+ const m = self.re * self.re + self.im * self.im;
+ return Self{
+ .re = self.re / m,
+ .im = -self.im / m,
+ };
+ }
+
+ pub fn magnitude(self: Self) T {
+ return math.sqrt(self.re * self.re + self.im * self.im);
+ }
+ };
+}
+
+const epsilon = 0.0001;
+
+test "complex.add" {
+ const a = Complex(f32).new(5, 3);
+ const b = Complex(f32).new(2, 7);
+ const c = a.add(b);
+
+ debug.assert(c.re == 7 and c.im == 10);
+}
+
+test "complex.sub" {
+ const a = Complex(f32).new(5, 3);
+ const b = Complex(f32).new(2, 7);
+ const c = a.sub(b);
+
+ debug.assert(c.re == 3 and c.im == -4);
+}
+
+test "complex.mul" {
+ const a = Complex(f32).new(5, 3);
+ const b = Complex(f32).new(2, 7);
+ const c = a.mul(b);
+
+ debug.assert(c.re == -11 and c.im == 41);
+}
+
+test "complex.div" {
+ const a = Complex(f32).new(5, 3);
+ const b = Complex(f32).new(2, 7);
+ const c = a.div(b);
+
+ debug.assert(math.approxEq(f32, c.re, f32(31) / 53, epsilon) and
+ math.approxEq(f32, c.im, f32(-29) / 53, epsilon));
+}
+
+test "complex.conjugate" {
+ const a = Complex(f32).new(5, 3);
+ const c = a.conjugate();
+
+ debug.assert(c.re == 5 and c.im == -3);
+}
+
+test "complex.reciprocal" {
+ const a = Complex(f32).new(5, 3);
+ const c = a.reciprocal();
+
+ debug.assert(math.approxEq(f32, c.re, f32(5) / 34, epsilon) and
+ math.approxEq(f32, c.im, f32(-3) / 34, epsilon));
+}
+
+test "complex.magnitude" {
+ const a = Complex(f32).new(5, 3);
+ const c = a.magnitude();
+
+ debug.assert(math.approxEq(f32, c, 5.83095, epsilon));
+}
+
+test "complex.cmath" {
+ _ = @import("abs.zig");
+ _ = @import("acosh.zig");
+ _ = @import("acos.zig");
+ _ = @import("arg.zig");
+ _ = @import("asinh.zig");
+ _ = @import("asin.zig");
+ _ = @import("atanh.zig");
+ _ = @import("atan.zig");
+ _ = @import("conj.zig");
+ _ = @import("cosh.zig");
+ _ = @import("cos.zig");
+ _ = @import("exp.zig");
+ _ = @import("log.zig");
+ _ = @import("pow.zig");
+ _ = @import("proj.zig");
+ _ = @import("sinh.zig");
+ _ = @import("sin.zig");
+ _ = @import("sqrt.zig");
+ _ = @import("tanh.zig");
+ _ = @import("tan.zig");
+}
diff --git a/std/math/complex/ldexp.zig b/std/math/complex/ldexp.zig
new file mode 100644
index 0000000000..e919ef6bec
--- /dev/null
+++ b/std/math/complex/ldexp.zig
@@ -0,0 +1,73 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn ldexp_cexp(z: var, expt: i32) @typeOf(z) {
+ const T = @typeOf(z.re);
+
+ return switch (T) {
+ f32 => ldexp_cexp32(z, expt),
+ f64 => ldexp_cexp64(z, expt),
+ else => unreachable,
+ };
+}
+
+fn frexp_exp32(x: f32, expt: *i32) f32 {
+ const k = 235; // reduction constant
+ const kln2 = 162.88958740; // k * ln2
+
+ const exp_x = math.exp(x - kln2);
+ const hx = @bitCast(u32, exp_x);
+ // TODO zig should allow this cast implicitly because it should know the value is in range
+ expt.* = @intCast(i32, hx >> 23) - (0x7f + 127) + k;
+ return @bitCast(f32, (hx & 0x7fffff) | ((0x7f + 127) << 23));
+}
+
+fn ldexp_cexp32(z: Complex(f32), expt: i32) Complex(f32) {
+ var ex_expt: i32 = undefined;
+ const exp_x = frexp_exp32(z.re, &ex_expt);
+ const exptf = expt + ex_expt;
+
+ const half_expt1 = @divTrunc(exptf, 2);
+ const scale1 = @bitCast(f32, (0x7f + half_expt1) << 23);
+
+ const half_expt2 = exptf - half_expt1;
+ const scale2 = @bitCast(f32, (0x7f + half_expt2) << 23);
+
+ return Complex(f32).new(math.cos(z.im) * exp_x * scale1 * scale2, math.sin(z.im) * exp_x * scale1 * scale2);
+}
+
+fn frexp_exp64(x: f64, expt: *i32) f64 {
+ const k = 1799; // reduction constant
+ const kln2 = 1246.97177782734161156; // k * ln2
+
+ const exp_x = math.exp(x - kln2);
+
+ const fx = @bitCast(u64, x);
+ const hx = @intCast(u32, fx >> 32);
+ const lx = @truncate(u32, fx);
+
+ expt.* = @intCast(i32, hx >> 20) - (0x3ff + 1023) + k;
+
+ const high_word = (hx & 0xfffff) | ((0x3ff + 1023) << 20);
+ return @bitCast(f64, (u64(high_word) << 32) | lx);
+}
+
+fn ldexp_cexp64(z: Complex(f64), expt: i32) Complex(f64) {
+ var ex_expt: i32 = undefined;
+ const exp_x = frexp_exp64(z.re, &ex_expt);
+ const exptf = i64(expt + ex_expt);
+
+ const half_expt1 = @divTrunc(exptf, 2);
+ const scale1 = @bitCast(f64, (0x3ff + half_expt1) << 20);
+
+ const half_expt2 = exptf - half_expt1;
+ const scale2 = @bitCast(f64, (0x3ff + half_expt2) << 20);
+
+ return Complex(f64).new(
+ math.cos(z.im) * exp_x * scale1 * scale2,
+ math.sin(z.im) * exp_x * scale1 * scale2,
+ );
+}
diff --git a/std/math/complex/log.zig b/std/math/complex/log.zig
new file mode 100644
index 0000000000..a4a1d1664f
--- /dev/null
+++ b/std/math/complex/log.zig
@@ -0,0 +1,23 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn log(z: var) Complex(@typeOf(z.re)) {
+ const T = @typeOf(z.re);
+ const r = cmath.abs(z);
+ const phi = cmath.arg(z);
+
+ return Complex(T).new(math.ln(r), phi);
+}
+
+const epsilon = 0.0001;
+
+test "complex.clog" {
+ const a = Complex(f32).new(5, 3);
+ const c = log(a);
+
+ debug.assert(math.approxEq(f32, c.re, 1.763180, epsilon));
+ debug.assert(math.approxEq(f32, c.im, 0.540419, epsilon));
+}
diff --git a/std/math/complex/pow.zig b/std/math/complex/pow.zig
new file mode 100644
index 0000000000..4c2cd9cf34
--- /dev/null
+++ b/std/math/complex/pow.zig
@@ -0,0 +1,22 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn pow(comptime T: type, z: *const T, c: *const T) T {
+ const p = cmath.log(z);
+ const q = c.mul(p);
+ return cmath.exp(q);
+}
+
+const epsilon = 0.0001;
+
+test "complex.cpow" {
+ const a = Complex(f32).new(5, 3);
+ const b = Complex(f32).new(2.3, -1.3);
+ const c = pow(Complex(f32), a, b);
+
+ debug.assert(math.approxEq(f32, c.re, 58.049110, epsilon));
+ debug.assert(math.approxEq(f32, c.im, -101.003433, epsilon));
+}
diff --git a/std/math/complex/proj.zig b/std/math/complex/proj.zig
new file mode 100644
index 0000000000..b6c4cc046e
--- /dev/null
+++ b/std/math/complex/proj.zig
@@ -0,0 +1,24 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn proj(z: var) Complex(@typeOf(z.re)) {
+ const T = @typeOf(z.re);
+
+ if (math.isInf(z.re) or math.isInf(z.im)) {
+ return Complex(T).new(math.inf(T), math.copysign(T, 0, z.re));
+ }
+
+ return Complex(T).new(z.re, z.im);
+}
+
+const epsilon = 0.0001;
+
+test "complex.cproj" {
+ const a = Complex(f32).new(5, 3);
+ const c = proj(a);
+
+ debug.assert(c.re == 5 and c.im == 3);
+}
diff --git a/std/math/complex/sin.zig b/std/math/complex/sin.zig
new file mode 100644
index 0000000000..d32b771d3b
--- /dev/null
+++ b/std/math/complex/sin.zig
@@ -0,0 +1,22 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn sin(z: var) Complex(@typeOf(z.re)) {
+ const T = @typeOf(z.re);
+ const p = Complex(T).new(-z.im, z.re);
+ const q = cmath.sinh(p);
+ return Complex(T).new(q.im, -q.re);
+}
+
+const epsilon = 0.0001;
+
+test "complex.csin" {
+ const a = Complex(f32).new(5, 3);
+ const c = sin(a);
+
+ debug.assert(math.approxEq(f32, c.re, -9.654126, epsilon));
+ debug.assert(math.approxEq(f32, c.im, 2.841692, epsilon));
+}
diff --git a/std/math/complex/sinh.zig b/std/math/complex/sinh.zig
new file mode 100644
index 0000000000..ab23c5c74d
--- /dev/null
+++ b/std/math/complex/sinh.zig
@@ -0,0 +1,164 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+const ldexp_cexp = @import("ldexp.zig").ldexp_cexp;
+
+pub fn sinh(z: var) @typeOf(z) {
+ const T = @typeOf(z.re);
+ return switch (T) {
+ f32 => sinh32(z),
+ f64 => sinh64(z),
+ else => @compileError("tan not implemented for " ++ @typeName(z)),
+ };
+}
+
+fn sinh32(z: Complex(f32)) Complex(f32) {
+ const x = z.re;
+ const y = z.im;
+
+ const hx = @bitCast(u32, x);
+ const ix = hx & 0x7fffffff;
+
+ const hy = @bitCast(u32, y);
+ const iy = hy & 0x7fffffff;
+
+ if (ix < 0x7f800000 and iy < 0x7f800000) {
+ if (iy == 0) {
+ return Complex(f32).new(math.sinh(x), y);
+ }
+ // small x: normal case
+ if (ix < 0x41100000) {
+ return Complex(f32).new(math.sinh(x) * math.cos(y), math.cosh(x) * math.sin(y));
+ }
+
+ // |x|>= 9, so cosh(x) ~= exp(|x|)
+ if (ix < 0x42b17218) {
+ // x < 88.7: exp(|x|) won't overflow
+ const h = math.exp(math.fabs(x)) * 0.5;
+ return Complex(f32).new(math.copysign(f32, h, x) * math.cos(y), h * math.sin(y));
+ }
+ // x < 192.7: scale to avoid overflow
+ else if (ix < 0x4340b1e7) {
+ const v = Complex(f32).new(math.fabs(x), y);
+ const r = ldexp_cexp(v, -1);
+ return Complex(f32).new(x * math.copysign(f32, 1, x), y);
+ }
+ // x >= 192.7: result always overflows
+ else {
+ const h = 0x1p127 * x;
+ return Complex(f32).new(h * math.cos(y), h * h * math.sin(y));
+ }
+ }
+
+ if (ix == 0 and iy >= 0x7f800000) {
+ return Complex(f32).new(math.copysign(f32, 0, x * (y - y)), y - y);
+ }
+
+ if (iy == 0 and ix >= 0x7f800000) {
+ if (hx & 0x7fffff == 0) {
+ return Complex(f32).new(x, y);
+ }
+ return Complex(f32).new(x, math.copysign(f32, 0, y));
+ }
+
+ if (ix < 0x7f800000 and iy >= 0x7f800000) {
+ return Complex(f32).new(y - y, x * (y - y));
+ }
+
+ if (ix >= 0x7f800000 and (hx & 0x7fffff) == 0) {
+ if (iy >= 0x7f800000) {
+ return Complex(f32).new(x * x, x * (y - y));
+ }
+ return Complex(f32).new(x * math.cos(y), math.inf_f32 * math.sin(y));
+ }
+
+ return Complex(f32).new((x * x) * (y - y), (x + x) * (y - y));
+}
+
+fn sinh64(z: Complex(f64)) Complex(f64) {
+ const x = z.re;
+ const y = z.im;
+
+ const fx = @bitCast(u64, x);
+ const hx = @intCast(u32, fx >> 32);
+ const lx = @truncate(u32, fx);
+ const ix = hx & 0x7fffffff;
+
+ const fy = @bitCast(u64, y);
+ const hy = @intCast(u32, fy >> 32);
+ const ly = @truncate(u32, fy);
+ const iy = hy & 0x7fffffff;
+
+ if (ix < 0x7ff00000 and iy < 0x7ff00000) {
+ if (iy | ly == 0) {
+ return Complex(f64).new(math.sinh(x), y);
+ }
+ // small x: normal case
+ if (ix < 0x40360000) {
+ return Complex(f64).new(math.sinh(x) * math.cos(y), math.cosh(x) * math.sin(y));
+ }
+
+ // |x|>= 22, so cosh(x) ~= exp(|x|)
+ if (ix < 0x40862e42) {
+ // x < 710: exp(|x|) won't overflow
+ const h = math.exp(math.fabs(x)) * 0.5;
+ return Complex(f64).new(math.copysign(f64, h, x) * math.cos(y), h * math.sin(y));
+ }
+ // x < 1455: scale to avoid overflow
+ else if (ix < 0x4096bbaa) {
+ const v = Complex(f64).new(math.fabs(x), y);
+ const r = ldexp_cexp(v, -1);
+ return Complex(f64).new(x * math.copysign(f64, 1, x), y);
+ }
+ // x >= 1455: result always overflows
+ else {
+ const h = 0x1p1023 * x;
+ return Complex(f64).new(h * math.cos(y), h * h * math.sin(y));
+ }
+ }
+
+ if (ix | lx == 0 and iy >= 0x7ff00000) {
+ return Complex(f64).new(math.copysign(f64, 0, x * (y - y)), y - y);
+ }
+
+ if (iy | ly == 0 and ix >= 0x7ff00000) {
+ if ((hx & 0xfffff) | lx == 0) {
+ return Complex(f64).new(x, y);
+ }
+ return Complex(f64).new(x, math.copysign(f64, 0, y));
+ }
+
+ if (ix < 0x7ff00000 and iy >= 0x7ff00000) {
+ return Complex(f64).new(y - y, x * (y - y));
+ }
+
+ if (ix >= 0x7ff00000 and (hx & 0xfffff) | lx == 0) {
+ if (iy >= 0x7ff00000) {
+ return Complex(f64).new(x * x, x * (y - y));
+ }
+ return Complex(f64).new(x * math.cos(y), math.inf_f64 * math.sin(y));
+ }
+
+ return Complex(f64).new((x * x) * (y - y), (x + x) * (y - y));
+}
+
+const epsilon = 0.0001;
+
+test "complex.csinh32" {
+ const a = Complex(f32).new(5, 3);
+ const c = sinh(a);
+
+ debug.assert(math.approxEq(f32, c.re, -73.460617, epsilon));
+ debug.assert(math.approxEq(f32, c.im, 10.472508, epsilon));
+}
+
+test "complex.csinh64" {
+ const a = Complex(f64).new(5, 3);
+ const c = sinh(a);
+
+ debug.assert(math.approxEq(f64, c.re, -73.460617, epsilon));
+ debug.assert(math.approxEq(f64, c.im, 10.472508, epsilon));
+}
diff --git a/std/math/complex/sqrt.zig b/std/math/complex/sqrt.zig
new file mode 100644
index 0000000000..47367816f7
--- /dev/null
+++ b/std/math/complex/sqrt.zig
@@ -0,0 +1,138 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn sqrt(z: var) @typeOf(z) {
+ const T = @typeOf(z.re);
+
+ return switch (T) {
+ f32 => sqrt32(z),
+ f64 => sqrt64(z),
+ else => @compileError("sqrt not implemented for " ++ @typeName(T)),
+ };
+}
+
+fn sqrt32(z: Complex(f32)) Complex(f32) {
+ const x = z.re;
+ const y = z.im;
+
+ if (x == 0 and y == 0) {
+ return Complex(f32).new(0, y);
+ }
+ if (math.isInf(y)) {
+ return Complex(f32).new(math.inf(f32), y);
+ }
+ if (math.isNan(x)) {
+ // raise invalid if y is not nan
+ const t = (y - y) / (y - y);
+ return Complex(f32).new(x, t);
+ }
+ if (math.isInf(x)) {
+ // sqrt(inf + i nan) = inf + nan i
+ // sqrt(inf + iy) = inf + i0
+ // sqrt(-inf + i nan) = nan +- inf i
+ // sqrt(-inf + iy) = 0 + inf i
+ if (math.signbit(x)) {
+ return Complex(f32).new(math.fabs(x - y), math.copysign(f32, x, y));
+ } else {
+ return Complex(f32).new(x, math.copysign(f32, y - y, y));
+ }
+ }
+
+ // y = nan special case is handled fine below
+
+ // double-precision avoids overflow with correct rounding.
+ const dx = f64(x);
+ const dy = f64(y);
+
+ if (dx >= 0) {
+ const t = math.sqrt((dx + math.hypot(f64, dx, dy)) * 0.5);
+ return Complex(f32).new(
+ @floatCast(f32, t),
+ @floatCast(f32, dy / (2.0 * t)),
+ );
+ } else {
+ const t = math.sqrt((-dx + math.hypot(f64, dx, dy)) * 0.5);
+ return Complex(f32).new(
+ @floatCast(f32, math.fabs(y) / (2.0 * t)),
+ @floatCast(f32, math.copysign(f64, t, y)),
+ );
+ }
+}
+
+fn sqrt64(z: Complex(f64)) Complex(f64) {
+ // may encounter overflow for im,re >= DBL_MAX / (1 + sqrt(2))
+ const threshold = 0x1.a827999fcef32p+1022;
+
+ var x = z.re;
+ var y = z.im;
+
+ if (x == 0 and y == 0) {
+ return Complex(f64).new(0, y);
+ }
+ if (math.isInf(y)) {
+ return Complex(f64).new(math.inf(f64), y);
+ }
+ if (math.isNan(x)) {
+ // raise invalid if y is not nan
+ const t = (y - y) / (y - y);
+ return Complex(f64).new(x, t);
+ }
+ if (math.isInf(x)) {
+ // sqrt(inf + i nan) = inf + nan i
+ // sqrt(inf + iy) = inf + i0
+ // sqrt(-inf + i nan) = nan +- inf i
+ // sqrt(-inf + iy) = 0 + inf i
+ if (math.signbit(x)) {
+ return Complex(f64).new(math.fabs(x - y), math.copysign(f64, x, y));
+ } else {
+ return Complex(f64).new(x, math.copysign(f64, y - y, y));
+ }
+ }
+
+ // y = nan special case is handled fine below
+
+ // scale to avoid overflow
+ var scale = false;
+ if (math.fabs(x) >= threshold or math.fabs(y) >= threshold) {
+ x *= 0.25;
+ y *= 0.25;
+ scale = true;
+ }
+
+ var result: Complex(f64) = undefined;
+ if (x >= 0) {
+ const t = math.sqrt((x + math.hypot(f64, x, y)) * 0.5);
+ result = Complex(f64).new(t, y / (2.0 * t));
+ } else {
+ const t = math.sqrt((-x + math.hypot(f64, x, y)) * 0.5);
+ result = Complex(f64).new(math.fabs(y) / (2.0 * t), math.copysign(f64, t, y));
+ }
+
+ if (scale) {
+ result.re *= 2;
+ result.im *= 2;
+ }
+
+ return result;
+}
+
+const epsilon = 0.0001;
+
+test "complex.csqrt32" {
+ const a = Complex(f32).new(5, 3);
+ const c = sqrt(a);
+
+ debug.assert(math.approxEq(f32, c.re, 2.327117, epsilon));
+ debug.assert(math.approxEq(f32, c.im, 0.644574, epsilon));
+}
+
+test "complex.csqrt64" {
+ const a = Complex(f64).new(5, 3);
+ const c = sqrt(a);
+
+ debug.assert(math.approxEq(f64, c.re, 2.3271175190399496, epsilon));
+ debug.assert(math.approxEq(f64, c.im, 0.6445742373246469, epsilon));
+}
diff --git a/std/math/complex/tan.zig b/std/math/complex/tan.zig
new file mode 100644
index 0000000000..4ea5182fa7
--- /dev/null
+++ b/std/math/complex/tan.zig
@@ -0,0 +1,22 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn tan(z: var) Complex(@typeOf(z.re)) {
+ const T = @typeOf(z.re);
+ const q = Complex(T).new(-z.im, z.re);
+ const r = cmath.tanh(q);
+ return Complex(T).new(r.im, -r.re);
+}
+
+const epsilon = 0.0001;
+
+test "complex.ctan" {
+ const a = Complex(f32).new(5, 3);
+ const c = tan(a);
+
+ debug.assert(math.approxEq(f32, c.re, -0.002708233, epsilon));
+ debug.assert(math.approxEq(f32, c.im, 1.004165, epsilon));
+}
diff --git a/std/math/complex/tanh.zig b/std/math/complex/tanh.zig
new file mode 100644
index 0000000000..e48d438783
--- /dev/null
+++ b/std/math/complex/tanh.zig
@@ -0,0 +1,113 @@
+const std = @import("../../index.zig");
+const debug = std.debug;
+const math = std.math;
+const cmath = math.complex;
+const Complex = cmath.Complex;
+
+pub fn tanh(z: var) @typeOf(z) {
+ const T = @typeOf(z.re);
+ return switch (T) {
+ f32 => tanh32(z),
+ f64 => tanh64(z),
+ else => @compileError("tan not implemented for " ++ @typeName(z)),
+ };
+}
+
+fn tanh32(z: Complex(f32)) Complex(f32) {
+ const x = z.re;
+ const y = z.im;
+
+ const hx = @bitCast(u32, x);
+ const ix = hx & 0x7fffffff;
+
+ if (ix >= 0x7f800000) {
+ if (ix & 0x7fffff != 0) {
+ const r = if (y == 0) y else x * y;
+ return Complex(f32).new(x, r);
+ }
+ const xx = @bitCast(f32, hx - 0x40000000);
+ const r = if (math.isInf(y)) y else math.sin(y) * math.cos(y);
+ return Complex(f32).new(xx, math.copysign(f32, 0, r));
+ }
+
+ if (!math.isFinite(y)) {
+ const r = if (ix != 0) y - y else x;
+ return Complex(f32).new(r, y - y);
+ }
+
+ // x >= 11
+ if (ix >= 0x41300000) {
+ const exp_mx = math.exp(-math.fabs(x));
+ return Complex(f32).new(math.copysign(f32, 1, x), 4 * math.sin(y) * math.cos(y) * exp_mx * exp_mx);
+ }
+
+ // Kahan's algorithm
+ const t = math.tan(y);
+ const beta = 1.0 + t * t;
+ const s = math.sinh(x);
+ const rho = math.sqrt(1 + s * s);
+ const den = 1 + beta * s * s;
+
+ return Complex(f32).new((beta * rho * s) / den, t / den);
+}
+
+fn tanh64(z: Complex(f64)) Complex(f64) {
+ const x = z.re;
+ const y = z.im;
+
+ const fx = @bitCast(u64, x);
+ // TODO: zig should allow this conversion implicitly because it can notice that the value necessarily
+ // fits in range.
+ const hx = @intCast(u32, fx >> 32);
+ const lx = @truncate(u32, fx);
+ const ix = hx & 0x7fffffff;
+
+ if (ix >= 0x7ff00000) {
+ if ((ix & 0x7fffff) | lx != 0) {
+ const r = if (y == 0) y else x * y;
+ return Complex(f64).new(x, r);
+ }
+
+ const xx = @bitCast(f64, (u64(hx - 0x40000000) << 32) | lx);
+ const r = if (math.isInf(y)) y else math.sin(y) * math.cos(y);
+ return Complex(f64).new(xx, math.copysign(f64, 0, r));
+ }
+
+ if (!math.isFinite(y)) {
+ const r = if (ix != 0) y - y else x;
+ return Complex(f64).new(r, y - y);
+ }
+
+ // x >= 22
+ if (ix >= 0x40360000) {
+ const exp_mx = math.exp(-math.fabs(x));
+ return Complex(f64).new(math.copysign(f64, 1, x), 4 * math.sin(y) * math.cos(y) * exp_mx * exp_mx);
+ }
+
+ // Kahan's algorithm
+ const t = math.tan(y);
+ const beta = 1.0 + t * t;
+ const s = math.sinh(x);
+ const rho = math.sqrt(1 + s * s);
+ const den = 1 + beta * s * s;
+
+ return Complex(f64).new((beta * rho * s) / den, t / den);
+}
+
+const epsilon = 0.0001;
+
+test "complex.ctanh32" {
+ const a = Complex(f32).new(5, 3);
+ const c = tanh(a);
+
+ debug.assert(math.approxEq(f32, c.re, 0.999913, epsilon));
+ debug.assert(math.approxEq(f32, c.im, -0.000025, epsilon));
+}
+
+test "complex.ctanh64" {
+ const a = Complex(f64).new(5, 3);
+ const c = tanh(a);
+
+ debug.assert(math.approxEq(f64, c.re, 0.999913, epsilon));
+ debug.assert(math.approxEq(f64, c.im, -0.000025, epsilon));
+}
diff --git a/std/math/copysign.zig b/std/math/copysign.zig
index 4ca8f82f4b..8c71dcb0bc 100644
--- a/std/math/copysign.zig
+++ b/std/math/copysign.zig
@@ -4,12 +4,22 @@ const assert = std.debug.assert;
pub fn copysign(comptime T: type, x: T, y: T) T {
return switch (T) {
+ f16 => copysign16(x, y),
f32 => copysign32(x, y),
f64 => copysign64(x, y),
else => @compileError("copysign not implemented for " ++ @typeName(T)),
};
}
+fn copysign16(x: f16, y: f16) f16 {
+ const ux = @bitCast(u16, x);
+ const uy = @bitCast(u16, y);
+
+ const h1 = ux & (@maxValue(u16) / 2);
+ const h2 = uy & (u16(1) << 15);
+ return @bitCast(f16, h1 | h2);
+}
+
fn copysign32(x: f32, y: f32) f32 {
const ux = @bitCast(u32, x);
const uy = @bitCast(u32, y);
@@ -29,10 +39,18 @@ fn copysign64(x: f64, y: f64) f64 {
}
test "math.copysign" {
+ assert(copysign(f16, 1.0, 1.0) == copysign16(1.0, 1.0));
assert(copysign(f32, 1.0, 1.0) == copysign32(1.0, 1.0));
assert(copysign(f64, 1.0, 1.0) == copysign64(1.0, 1.0));
}
+test "math.copysign16" {
+ assert(copysign16(5.0, 1.0) == 5.0);
+ assert(copysign16(5.0, -1.0) == -5.0);
+ assert(copysign16(-5.0, -1.0) == -5.0);
+ assert(copysign16(-5.0, 1.0) == 5.0);
+}
+
test "math.copysign32" {
assert(copysign32(5.0, 1.0) == 5.0);
assert(copysign32(5.0, -1.0) == -5.0);
diff --git a/std/math/cos.zig b/std/math/cos.zig
index bb405b0d10..71d5e4a8f6 100644
--- a/std/math/cos.zig
+++ b/std/math/cos.zig
@@ -18,20 +18,20 @@ pub fn cos(x: var) @typeOf(x) {
}
// sin polynomial coefficients
-const S0 = 1.58962301576546568060E-10;
+const S0 = 1.58962301576546568060E-10;
const S1 = -2.50507477628578072866E-8;
-const S2 = 2.75573136213857245213E-6;
+const S2 = 2.75573136213857245213E-6;
const S3 = -1.98412698295895385996E-4;
-const S4 = 8.33333333332211858878E-3;
+const S4 = 8.33333333332211858878E-3;
const S5 = -1.66666666666666307295E-1;
// cos polynomial coeffiecients
const C0 = -1.13585365213876817300E-11;
-const C1 = 2.08757008419747316778E-9;
+const C1 = 2.08757008419747316778E-9;
const C2 = -2.75573141792967388112E-7;
-const C3 = 2.48015872888517045348E-5;
+const C3 = 2.48015872888517045348E-5;
const C4 = -1.38888888888730564116E-3;
-const C5 = 4.16666666666665929218E-2;
+const C5 = 4.16666666666665929218E-2;
// NOTE: This is taken from the go stdlib. The musl implementation is much more complex.
//
@@ -55,7 +55,7 @@ fn cos32(x_: f32) f32 {
}
var y = math.floor(x * m4pi);
- var j = i64(y);
+ var j = @floatToInt(i64, y);
if (j & 1 == 1) {
j += 1;
@@ -106,7 +106,7 @@ fn cos64(x_: f64) f64 {
}
var y = math.floor(x * m4pi);
- var j = i64(y);
+ var j = @floatToInt(i64, y);
if (j & 1 == 1) {
j += 1;
diff --git a/std/math/cosh.zig b/std/math/cosh.zig
index fa46219986..52beafb642 100644
--- a/std/math/cosh.zig
+++ b/std/math/cosh.zig
@@ -49,7 +49,7 @@ fn cosh32(x: f32) f32 {
fn cosh64(x: f64) f64 {
const u = @bitCast(u64, x);
- const w = u32(u >> 32);
+ const w = @intCast(u32, u >> 32);
const ax = @bitCast(f64, u & (@maxValue(u64) >> 1));
// TODO: Shouldn't need this explicit check.
diff --git a/std/math/exp.zig b/std/math/exp.zig
index 4032930a43..d6185d4f0b 100644
--- a/std/math/exp.zig
+++ b/std/math/exp.zig
@@ -6,6 +6,7 @@
const std = @import("../index.zig");
const math = std.math;
const assert = std.debug.assert;
+const builtin = @import("builtin");
pub fn exp(x: var) @typeOf(x) {
const T = @typeOf(x);
@@ -17,16 +18,18 @@ pub fn exp(x: var) @typeOf(x) {
}
fn exp32(x_: f32) f32 {
- const half = []f32 { 0.5, -0.5 };
+ @setFloatMode(this, builtin.FloatMode.Strict);
+
+ const half = []f32{ 0.5, -0.5 };
const ln2hi = 6.9314575195e-1;
const ln2lo = 1.4286067653e-6;
- const invln2 = 1.4426950216e+0;
+ const invln2 = 1.4426950216e+0;
const P1 = 1.6666625440e-1;
const P2 = -2.7667332906e-3;
var x = x_;
var hx = @bitCast(u32, x);
- const sign = i32(hx >> 31);
+ const sign = @intCast(i32, hx >> 31);
hx &= 0x7FFFFFFF;
if (math.isNan(x)) {
@@ -44,7 +47,7 @@ fn exp32(x_: f32) f32 {
return x * 0x1.0p127;
}
if (sign != 0) {
- math.forceEval(-0x1.0p-149 / x); // overflow
+ math.forceEval(-0x1.0p-149 / x); // overflow
// x <= -103.972084
if (hx >= 0x42CFF1B5) {
return 0;
@@ -60,13 +63,12 @@ fn exp32(x_: f32) f32 {
if (hx > 0x3EB17218) {
// |x| > 1.5 * ln2
if (hx > 0x3F851592) {
- k = i32(invln2 * x + half[usize(sign)]);
- }
- else {
+ k = @floatToInt(i32, invln2 * x + half[@intCast(usize, sign)]);
+ } else {
k = 1 - sign - sign;
}
- const fk = f32(k);
+ const fk = @intToFloat(f32, k);
hi = x - fk * ln2hi;
lo = fk * ln2lo;
x = hi - lo;
@@ -76,8 +78,7 @@ fn exp32(x_: f32) f32 {
k = 0;
hi = x;
lo = 0;
- }
- else {
+ } else {
math.forceEval(0x1.0p127 + x); // inexact
return 1 + x;
}
@@ -94,20 +95,22 @@ fn exp32(x_: f32) f32 {
}
fn exp64(x_: f64) f64 {
- const half = []const f64 { 0.5, -0.5 };
+ @setFloatMode(this, builtin.FloatMode.Strict);
+
+ const half = []const f64{ 0.5, -0.5 };
const ln2hi: f64 = 6.93147180369123816490e-01;
const ln2lo: f64 = 1.90821492927058770002e-10;
const invln2: f64 = 1.44269504088896338700e+00;
- const P1: f64 = 1.66666666666666019037e-01;
- const P2: f64 = -2.77777777770155933842e-03;
- const P3: f64 = 6.61375632143793436117e-05;
- const P4: f64 = -1.65339022054652515390e-06;
- const P5: f64 = 4.13813679705723846039e-08;
+ const P1: f64 = 1.66666666666666019037e-01;
+ const P2: f64 = -2.77777777770155933842e-03;
+ const P3: f64 = 6.61375632143793436117e-05;
+ const P4: f64 = -1.65339022054652515390e-06;
+ const P5: f64 = 4.13813679705723846039e-08;
var x = x_;
var ux = @bitCast(u64, x);
var hx = ux >> 32;
- const sign = i32(hx >> 31);
+ const sign = @intCast(i32, hx >> 31);
hx &= 0x7FFFFFFF;
if (math.isNan(x)) {
@@ -145,13 +148,12 @@ fn exp64(x_: f64) f64 {
if (hx > 0x3EB17218) {
// |x| >= 1.5 * ln2
if (hx > 0x3FF0A2B2) {
- k = i32(invln2 * x + half[usize(sign)]);
- }
- else {
+ k = @floatToInt(i32, invln2 * x + half[@intCast(usize, sign)]);
+ } else {
k = 1 - sign - sign;
}
- const dk = f64(k);
+ const dk = @intToFloat(f64, k);
hi = x - dk * ln2hi;
lo = dk * ln2lo;
x = hi - lo;
@@ -161,8 +163,7 @@ fn exp64(x_: f64) f64 {
k = 0;
hi = x;
lo = 0;
- }
- else {
+ } else {
// inexact if x != 0
// math.forceEval(0x1.0p1023 + x);
return 1 + x;
diff --git a/std/math/exp2.zig b/std/math/exp2.zig
index 790bd1a558..d590b0b60b 100644
--- a/std/math/exp2.zig
+++ b/std/math/exp2.zig
@@ -16,7 +16,7 @@ pub fn exp2(x: var) @typeOf(x) {
};
}
-const exp2ft = []const f64 {
+const exp2ft = []const f64{
0x1.6a09e667f3bcdp-1,
0x1.7a11473eb0187p-1,
0x1.8ace5422aa0dbp-1,
@@ -38,8 +38,8 @@ const exp2ft = []const f64 {
fn exp2_32(x: f32) f32 {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
- const tblsiz = u32(exp2ft.len);
- const redux: f32 = 0x1.8p23 / f32(tblsiz);
+ const tblsiz = @intCast(u32, exp2ft.len);
+ const redux: f32 = 0x1.8p23 / @intToFloat(f32, tblsiz);
const P1: f32 = 0x1.62e430p-1;
const P2: f32 = 0x1.ebfbe0p-3;
const P3: f32 = 0x1.c6b348p-5;
@@ -75,212 +75,212 @@ fn exp2_32(x: f32) f32 {
}
var uf = x + redux;
- var i0 = @bitCast(u32, uf);
- i0 += tblsiz / 2;
+ var i_0 = @bitCast(u32, uf);
+ i_0 += tblsiz / 2;
- const k = i0 / tblsiz;
+ const k = i_0 / tblsiz;
// NOTE: musl relies on undefined overflow shift behaviour. Appears that this produces the
// intended result but should confirm how GCC/Clang handle this to ensure.
const uk = @bitCast(f64, u64(0x3FF + k) << 52);
- i0 &= tblsiz - 1;
+ i_0 &= tblsiz - 1;
uf -= redux;
const z: f64 = x - uf;
- var r: f64 = exp2ft[i0];
+ var r: f64 = exp2ft[i_0];
const t: f64 = r * z;
r = r + t * (P1 + z * P2) + t * (z * z) * (P3 + z * P4);
- return f32(r * uk);
+ return @floatCast(f32, r * uk);
}
-const exp2dt = []f64 {
+const exp2dt = []f64{
// exp2(z + eps) eps
- 0x1.6a09e667f3d5dp-1, 0x1.9880p-44,
- 0x1.6b052fa751744p-1, 0x1.8000p-50,
+ 0x1.6a09e667f3d5dp-1, 0x1.9880p-44,
+ 0x1.6b052fa751744p-1, 0x1.8000p-50,
0x1.6c012750bd9fep-1, -0x1.8780p-45,
- 0x1.6cfdcddd476bfp-1, 0x1.ec00p-46,
+ 0x1.6cfdcddd476bfp-1, 0x1.ec00p-46,
0x1.6dfb23c651a29p-1, -0x1.8000p-50,
0x1.6ef9298593ae3p-1, -0x1.c000p-52,
0x1.6ff7df9519386p-1, -0x1.fd80p-45,
0x1.70f7466f42da3p-1, -0x1.c880p-45,
- 0x1.71f75e8ec5fc3p-1, 0x1.3c00p-46,
+ 0x1.71f75e8ec5fc3p-1, 0x1.3c00p-46,
0x1.72f8286eacf05p-1, -0x1.8300p-44,
0x1.73f9a48a58152p-1, -0x1.0c00p-47,
- 0x1.74fbd35d7ccfcp-1, 0x1.f880p-45,
- 0x1.75feb564267f1p-1, 0x1.3e00p-47,
+ 0x1.74fbd35d7ccfcp-1, 0x1.f880p-45,
+ 0x1.75feb564267f1p-1, 0x1.3e00p-47,
0x1.77024b1ab6d48p-1, -0x1.7d00p-45,
0x1.780694fde5d38p-1, -0x1.d000p-50,
- 0x1.790b938ac1d00p-1, 0x1.3000p-49,
+ 0x1.790b938ac1d00p-1, 0x1.3000p-49,
0x1.7a11473eb0178p-1, -0x1.d000p-49,
- 0x1.7b17b0976d060p-1, 0x1.0400p-45,
- 0x1.7c1ed0130c133p-1, 0x1.0000p-53,
+ 0x1.7b17b0976d060p-1, 0x1.0400p-45,
+ 0x1.7c1ed0130c133p-1, 0x1.0000p-53,
0x1.7d26a62ff8636p-1, -0x1.6900p-45,
0x1.7e2f336cf4e3bp-1, -0x1.2e00p-47,
0x1.7f3878491c3e8p-1, -0x1.4580p-45,
- 0x1.80427543e1b4ep-1, 0x1.3000p-44,
- 0x1.814d2add1071ap-1, 0x1.f000p-47,
+ 0x1.80427543e1b4ep-1, 0x1.3000p-44,
+ 0x1.814d2add1071ap-1, 0x1.f000p-47,
0x1.82589994ccd7ep-1, -0x1.1c00p-45,
- 0x1.8364c1eb942d0p-1, 0x1.9d00p-45,
- 0x1.8471a4623cab5p-1, 0x1.7100p-43,
- 0x1.857f4179f5bbcp-1, 0x1.2600p-45,
+ 0x1.8364c1eb942d0p-1, 0x1.9d00p-45,
+ 0x1.8471a4623cab5p-1, 0x1.7100p-43,
+ 0x1.857f4179f5bbcp-1, 0x1.2600p-45,
0x1.868d99b4491afp-1, -0x1.2c40p-44,
0x1.879cad931a395p-1, -0x1.3000p-45,
0x1.88ac7d98a65b8p-1, -0x1.a800p-45,
0x1.89bd0a4785800p-1, -0x1.d000p-49,
- 0x1.8ace5422aa223p-1, 0x1.3280p-44,
- 0x1.8be05bad619fap-1, 0x1.2b40p-43,
+ 0x1.8ace5422aa223p-1, 0x1.3280p-44,
+ 0x1.8be05bad619fap-1, 0x1.2b40p-43,
0x1.8cf3216b54383p-1, -0x1.ed00p-45,
0x1.8e06a5e08664cp-1, -0x1.0500p-45,
- 0x1.8f1ae99157807p-1, 0x1.8280p-45,
+ 0x1.8f1ae99157807p-1, 0x1.8280p-45,
0x1.902fed0282c0ep-1, -0x1.cb00p-46,
0x1.9145b0b91ff96p-1, -0x1.5e00p-47,
- 0x1.925c353aa2ff9p-1, 0x1.5400p-48,
- 0x1.93737b0cdc64ap-1, 0x1.7200p-46,
+ 0x1.925c353aa2ff9p-1, 0x1.5400p-48,
+ 0x1.93737b0cdc64ap-1, 0x1.7200p-46,
0x1.948b82b5f98aep-1, -0x1.9000p-47,
- 0x1.95a44cbc852cbp-1, 0x1.5680p-45,
+ 0x1.95a44cbc852cbp-1, 0x1.5680p-45,
0x1.96bdd9a766f21p-1, -0x1.6d00p-44,
0x1.97d829fde4e2ap-1, -0x1.1000p-47,
- 0x1.98f33e47a23a3p-1, 0x1.d000p-45,
+ 0x1.98f33e47a23a3p-1, 0x1.d000p-45,
0x1.9a0f170ca0604p-1, -0x1.8a40p-44,
- 0x1.9b2bb4d53ff89p-1, 0x1.55c0p-44,
- 0x1.9c49182a3f15bp-1, 0x1.6b80p-45,
+ 0x1.9b2bb4d53ff89p-1, 0x1.55c0p-44,
+ 0x1.9c49182a3f15bp-1, 0x1.6b80p-45,
0x1.9d674194bb8c5p-1, -0x1.c000p-49,
- 0x1.9e86319e3238ep-1, 0x1.7d00p-46,
- 0x1.9fa5e8d07f302p-1, 0x1.6400p-46,
+ 0x1.9e86319e3238ep-1, 0x1.7d00p-46,
+ 0x1.9fa5e8d07f302p-1, 0x1.6400p-46,
0x1.a0c667b5de54dp-1, -0x1.5000p-48,
- 0x1.a1e7aed8eb8f6p-1, 0x1.9e00p-47,
- 0x1.a309bec4a2e27p-1, 0x1.ad80p-45,
+ 0x1.a1e7aed8eb8f6p-1, 0x1.9e00p-47,
+ 0x1.a309bec4a2e27p-1, 0x1.ad80p-45,
0x1.a42c980460a5dp-1, -0x1.af00p-46,
- 0x1.a5503b23e259bp-1, 0x1.b600p-47,
- 0x1.a674a8af46213p-1, 0x1.8880p-44,
- 0x1.a799e1330b3a7p-1, 0x1.1200p-46,
- 0x1.a8bfe53c12e8dp-1, 0x1.6c00p-47,
+ 0x1.a5503b23e259bp-1, 0x1.b600p-47,
+ 0x1.a674a8af46213p-1, 0x1.8880p-44,
+ 0x1.a799e1330b3a7p-1, 0x1.1200p-46,
+ 0x1.a8bfe53c12e8dp-1, 0x1.6c00p-47,
0x1.a9e6b5579fcd2p-1, -0x1.9b80p-45,
- 0x1.ab0e521356fb8p-1, 0x1.b700p-45,
- 0x1.ac36bbfd3f381p-1, 0x1.9000p-50,
- 0x1.ad5ff3a3c2780p-1, 0x1.4000p-49,
+ 0x1.ab0e521356fb8p-1, 0x1.b700p-45,
+ 0x1.ac36bbfd3f381p-1, 0x1.9000p-50,
+ 0x1.ad5ff3a3c2780p-1, 0x1.4000p-49,
0x1.ae89f995ad2a3p-1, -0x1.c900p-45,
- 0x1.afb4ce622f367p-1, 0x1.6500p-46,
- 0x1.b0e07298db790p-1, 0x1.fd40p-45,
- 0x1.b20ce6c9a89a9p-1, 0x1.2700p-46,
- 0x1.b33a2b84f1a4bp-1, 0x1.d470p-43,
+ 0x1.afb4ce622f367p-1, 0x1.6500p-46,
+ 0x1.b0e07298db790p-1, 0x1.fd40p-45,
+ 0x1.b20ce6c9a89a9p-1, 0x1.2700p-46,
+ 0x1.b33a2b84f1a4bp-1, 0x1.d470p-43,
0x1.b468415b747e7p-1, -0x1.8380p-44,
- 0x1.b59728de5593ap-1, 0x1.8000p-54,
- 0x1.b6c6e29f1c56ap-1, 0x1.ad00p-47,
- 0x1.b7f76f2fb5e50p-1, 0x1.e800p-50,
+ 0x1.b59728de5593ap-1, 0x1.8000p-54,
+ 0x1.b6c6e29f1c56ap-1, 0x1.ad00p-47,
+ 0x1.b7f76f2fb5e50p-1, 0x1.e800p-50,
0x1.b928cf22749b2p-1, -0x1.4c00p-47,
0x1.ba5b030a10603p-1, -0x1.d700p-47,
- 0x1.bb8e0b79a6f66p-1, 0x1.d900p-47,
- 0x1.bcc1e904bc1ffp-1, 0x1.2a00p-47,
+ 0x1.bb8e0b79a6f66p-1, 0x1.d900p-47,
+ 0x1.bcc1e904bc1ffp-1, 0x1.2a00p-47,
0x1.bdf69c3f3a16fp-1, -0x1.f780p-46,
0x1.bf2c25bd71db8p-1, -0x1.0a00p-46,
0x1.c06286141b2e9p-1, -0x1.1400p-46,
- 0x1.c199bdd8552e0p-1, 0x1.be00p-47,
+ 0x1.c199bdd8552e0p-1, 0x1.be00p-47,
0x1.c2d1cd9fa64eep-1, -0x1.9400p-47,
0x1.c40ab5fffd02fp-1, -0x1.ed00p-47,
- 0x1.c544778fafd15p-1, 0x1.9660p-44,
+ 0x1.c544778fafd15p-1, 0x1.9660p-44,
0x1.c67f12e57d0cbp-1, -0x1.a100p-46,
0x1.c7ba88988c1b6p-1, -0x1.8458p-42,
0x1.c8f6d9406e733p-1, -0x1.a480p-46,
- 0x1.ca3405751c4dfp-1, 0x1.b000p-51,
- 0x1.cb720dcef9094p-1, 0x1.1400p-47,
- 0x1.ccb0f2e6d1689p-1, 0x1.0200p-48,
- 0x1.cdf0b555dc412p-1, 0x1.3600p-48,
+ 0x1.ca3405751c4dfp-1, 0x1.b000p-51,
+ 0x1.cb720dcef9094p-1, 0x1.1400p-47,
+ 0x1.ccb0f2e6d1689p-1, 0x1.0200p-48,
+ 0x1.cdf0b555dc412p-1, 0x1.3600p-48,
0x1.cf3155b5bab3bp-1, -0x1.6900p-47,
- 0x1.d072d4a0789bcp-1, 0x1.9a00p-47,
+ 0x1.d072d4a0789bcp-1, 0x1.9a00p-47,
0x1.d1b532b08c8fap-1, -0x1.5e00p-46,
- 0x1.d2f87080d8a85p-1, 0x1.d280p-46,
- 0x1.d43c8eacaa203p-1, 0x1.1a00p-47,
- 0x1.d5818dcfba491p-1, 0x1.f000p-50,
+ 0x1.d2f87080d8a85p-1, 0x1.d280p-46,
+ 0x1.d43c8eacaa203p-1, 0x1.1a00p-47,
+ 0x1.d5818dcfba491p-1, 0x1.f000p-50,
0x1.d6c76e862e6a1p-1, -0x1.3a00p-47,
0x1.d80e316c9834ep-1, -0x1.cd80p-47,
- 0x1.d955d71ff6090p-1, 0x1.4c00p-48,
- 0x1.da9e603db32aep-1, 0x1.f900p-48,
- 0x1.dbe7cd63a8325p-1, 0x1.9800p-49,
+ 0x1.d955d71ff6090p-1, 0x1.4c00p-48,
+ 0x1.da9e603db32aep-1, 0x1.f900p-48,
+ 0x1.dbe7cd63a8325p-1, 0x1.9800p-49,
0x1.dd321f301b445p-1, -0x1.5200p-48,
0x1.de7d5641c05bfp-1, -0x1.d700p-46,
0x1.dfc97337b9aecp-1, -0x1.6140p-46,
- 0x1.e11676b197d5ep-1, 0x1.b480p-47,
- 0x1.e264614f5a3e7p-1, 0x1.0ce0p-43,
- 0x1.e3b333b16ee5cp-1, 0x1.c680p-47,
+ 0x1.e11676b197d5ep-1, 0x1.b480p-47,
+ 0x1.e264614f5a3e7p-1, 0x1.0ce0p-43,
+ 0x1.e3b333b16ee5cp-1, 0x1.c680p-47,
0x1.e502ee78b3fb4p-1, -0x1.9300p-47,
0x1.e653924676d68p-1, -0x1.5000p-49,
0x1.e7a51fbc74c44p-1, -0x1.7f80p-47,
0x1.e8f7977cdb726p-1, -0x1.3700p-48,
- 0x1.ea4afa2a490e8p-1, 0x1.5d00p-49,
- 0x1.eb9f4867ccae4p-1, 0x1.61a0p-46,
- 0x1.ecf482d8e680dp-1, 0x1.5500p-48,
- 0x1.ee4aaa2188514p-1, 0x1.6400p-51,
+ 0x1.ea4afa2a490e8p-1, 0x1.5d00p-49,
+ 0x1.eb9f4867ccae4p-1, 0x1.61a0p-46,
+ 0x1.ecf482d8e680dp-1, 0x1.5500p-48,
+ 0x1.ee4aaa2188514p-1, 0x1.6400p-51,
0x1.efa1bee615a13p-1, -0x1.e800p-49,
0x1.f0f9c1cb64106p-1, -0x1.a880p-48,
0x1.f252b376bb963p-1, -0x1.c900p-45,
- 0x1.f3ac948dd7275p-1, 0x1.a000p-53,
+ 0x1.f3ac948dd7275p-1, 0x1.a000p-53,
0x1.f50765b6e4524p-1, -0x1.4f00p-48,
- 0x1.f6632798844fdp-1, 0x1.a800p-51,
- 0x1.f7bfdad9cbe38p-1, 0x1.abc0p-48,
+ 0x1.f6632798844fdp-1, 0x1.a800p-51,
+ 0x1.f7bfdad9cbe38p-1, 0x1.abc0p-48,
0x1.f91d802243c82p-1, -0x1.4600p-50,
0x1.fa7c1819e908ep-1, -0x1.b0c0p-47,
0x1.fbdba3692d511p-1, -0x1.0e00p-51,
0x1.fd3c22b8f7194p-1, -0x1.0de8p-46,
- 0x1.fe9d96b2a23eep-1, 0x1.e430p-49,
- 0x1.0000000000000p+0, 0x0.0000p+0,
+ 0x1.fe9d96b2a23eep-1, 0x1.e430p-49,
+ 0x1.0000000000000p+0, 0x0.0000p+0,
0x1.00b1afa5abcbep+0, -0x1.3400p-52,
0x1.0163da9fb3303p+0, -0x1.2170p-46,
- 0x1.02168143b0282p+0, 0x1.a400p-52,
- 0x1.02c9a3e77806cp+0, 0x1.f980p-49,
+ 0x1.02168143b0282p+0, 0x1.a400p-52,
+ 0x1.02c9a3e77806cp+0, 0x1.f980p-49,
0x1.037d42e11bbcap+0, -0x1.7400p-51,
- 0x1.04315e86e7f89p+0, 0x1.8300p-50,
+ 0x1.04315e86e7f89p+0, 0x1.8300p-50,
0x1.04e5f72f65467p+0, -0x1.a3f0p-46,
0x1.059b0d315855ap+0, -0x1.2840p-47,
- 0x1.0650a0e3c1f95p+0, 0x1.1600p-48,
- 0x1.0706b29ddf71ap+0, 0x1.5240p-46,
+ 0x1.0650a0e3c1f95p+0, 0x1.1600p-48,
+ 0x1.0706b29ddf71ap+0, 0x1.5240p-46,
0x1.07bd42b72a82dp+0, -0x1.9a00p-49,
- 0x1.0874518759bd0p+0, 0x1.6400p-49,
+ 0x1.0874518759bd0p+0, 0x1.6400p-49,
0x1.092bdf66607c8p+0, -0x1.0780p-47,
0x1.09e3ecac6f383p+0, -0x1.8000p-54,
- 0x1.0a9c79b1f3930p+0, 0x1.fa00p-48,
+ 0x1.0a9c79b1f3930p+0, 0x1.fa00p-48,
0x1.0b5586cf988fcp+0, -0x1.ac80p-48,
- 0x1.0c0f145e46c8ap+0, 0x1.9c00p-50,
- 0x1.0cc922b724816p+0, 0x1.5200p-47,
+ 0x1.0c0f145e46c8ap+0, 0x1.9c00p-50,
+ 0x1.0cc922b724816p+0, 0x1.5200p-47,
0x1.0d83b23395dd8p+0, -0x1.ad00p-48,
- 0x1.0e3ec32d3d1f3p+0, 0x1.bac0p-46,
+ 0x1.0e3ec32d3d1f3p+0, 0x1.bac0p-46,
0x1.0efa55fdfa9a6p+0, -0x1.4e80p-47,
0x1.0fb66affed2f0p+0, -0x1.d300p-47,
- 0x1.1073028d7234bp+0, 0x1.1500p-48,
- 0x1.11301d0125b5bp+0, 0x1.c000p-49,
- 0x1.11edbab5e2af9p+0, 0x1.6bc0p-46,
- 0x1.12abdc06c31d5p+0, 0x1.8400p-49,
+ 0x1.1073028d7234bp+0, 0x1.1500p-48,
+ 0x1.11301d0125b5bp+0, 0x1.c000p-49,
+ 0x1.11edbab5e2af9p+0, 0x1.6bc0p-46,
+ 0x1.12abdc06c31d5p+0, 0x1.8400p-49,
0x1.136a814f2047dp+0, -0x1.ed00p-47,
- 0x1.1429aaea92de9p+0, 0x1.8e00p-49,
- 0x1.14e95934f3138p+0, 0x1.b400p-49,
- 0x1.15a98c8a58e71p+0, 0x1.5300p-47,
- 0x1.166a45471c3dfp+0, 0x1.3380p-47,
- 0x1.172b83c7d5211p+0, 0x1.8d40p-45,
+ 0x1.1429aaea92de9p+0, 0x1.8e00p-49,
+ 0x1.14e95934f3138p+0, 0x1.b400p-49,
+ 0x1.15a98c8a58e71p+0, 0x1.5300p-47,
+ 0x1.166a45471c3dfp+0, 0x1.3380p-47,
+ 0x1.172b83c7d5211p+0, 0x1.8d40p-45,
0x1.17ed48695bb9fp+0, -0x1.5d00p-47,
0x1.18af9388c8d93p+0, -0x1.c880p-46,
- 0x1.1972658375d66p+0, 0x1.1f00p-46,
- 0x1.1a35beb6fcba7p+0, 0x1.0480p-46,
+ 0x1.1972658375d66p+0, 0x1.1f00p-46,
+ 0x1.1a35beb6fcba7p+0, 0x1.0480p-46,
0x1.1af99f81387e3p+0, -0x1.7390p-43,
- 0x1.1bbe084045d54p+0, 0x1.4e40p-45,
+ 0x1.1bbe084045d54p+0, 0x1.4e40p-45,
0x1.1c82f95281c43p+0, -0x1.a200p-47,
- 0x1.1d4873168b9b2p+0, 0x1.3800p-49,
- 0x1.1e0e75eb44031p+0, 0x1.ac00p-49,
- 0x1.1ed5022fcd938p+0, 0x1.1900p-47,
+ 0x1.1d4873168b9b2p+0, 0x1.3800p-49,
+ 0x1.1e0e75eb44031p+0, 0x1.ac00p-49,
+ 0x1.1ed5022fcd938p+0, 0x1.1900p-47,
0x1.1f9c18438cdf7p+0, -0x1.b780p-46,
- 0x1.2063b88628d8fp+0, 0x1.d940p-45,
- 0x1.212be3578a81ep+0, 0x1.8000p-50,
- 0x1.21f49917ddd41p+0, 0x1.b340p-45,
- 0x1.22bdda2791323p+0, 0x1.9f80p-46,
+ 0x1.2063b88628d8fp+0, 0x1.d940p-45,
+ 0x1.212be3578a81ep+0, 0x1.8000p-50,
+ 0x1.21f49917ddd41p+0, 0x1.b340p-45,
+ 0x1.22bdda2791323p+0, 0x1.9f80p-46,
0x1.2387a6e7561e7p+0, -0x1.9c80p-46,
- 0x1.2451ffb821427p+0, 0x1.2300p-47,
+ 0x1.2451ffb821427p+0, 0x1.2300p-47,
0x1.251ce4fb2a602p+0, -0x1.3480p-46,
- 0x1.25e85711eceb0p+0, 0x1.2700p-46,
- 0x1.26b4565e27d16p+0, 0x1.1d00p-46,
- 0x1.2780e341de00fp+0, 0x1.1ee0p-44,
+ 0x1.25e85711eceb0p+0, 0x1.2700p-46,
+ 0x1.26b4565e27d16p+0, 0x1.1d00p-46,
+ 0x1.2780e341de00fp+0, 0x1.1ee0p-44,
0x1.284dfe1f5633ep+0, -0x1.4c00p-46,
0x1.291ba7591bb30p+0, -0x1.3d80p-46,
- 0x1.29e9df51fdf09p+0, 0x1.8b00p-47,
+ 0x1.29e9df51fdf09p+0, 0x1.8b00p-47,
0x1.2ab8a66d10e9bp+0, -0x1.27c0p-45,
- 0x1.2b87fd0dada3ap+0, 0x1.a340p-45,
+ 0x1.2b87fd0dada3ap+0, 0x1.a340p-45,
0x1.2c57e39771af9p+0, -0x1.0800p-46,
0x1.2d285a6e402d9p+0, -0x1.ed00p-47,
0x1.2df961f641579p+0, -0x1.4200p-48,
@@ -290,81 +290,81 @@ const exp2dt = []f64 {
0x1.31432edeea50bp+0, -0x1.0df8p-40,
0x1.32170fc4cd7b8p+0, -0x1.2480p-45,
0x1.32eb83ba8e9a2p+0, -0x1.5980p-45,
- 0x1.33c08b2641766p+0, 0x1.ed00p-46,
+ 0x1.33c08b2641766p+0, 0x1.ed00p-46,
0x1.3496266e3fa27p+0, -0x1.c000p-50,
0x1.356c55f929f0fp+0, -0x1.0d80p-44,
- 0x1.36431a2de88b9p+0, 0x1.2c80p-45,
- 0x1.371a7373aaa39p+0, 0x1.0600p-45,
+ 0x1.36431a2de88b9p+0, 0x1.2c80p-45,
+ 0x1.371a7373aaa39p+0, 0x1.0600p-45,
0x1.37f26231e74fep+0, -0x1.6600p-46,
0x1.38cae6d05d838p+0, -0x1.ae00p-47,
0x1.39a401b713ec3p+0, -0x1.4720p-43,
- 0x1.3a7db34e5a020p+0, 0x1.8200p-47,
- 0x1.3b57fbfec6e95p+0, 0x1.e800p-44,
- 0x1.3c32dc313a8f2p+0, 0x1.f800p-49,
+ 0x1.3a7db34e5a020p+0, 0x1.8200p-47,
+ 0x1.3b57fbfec6e95p+0, 0x1.e800p-44,
+ 0x1.3c32dc313a8f2p+0, 0x1.f800p-49,
0x1.3d0e544ede122p+0, -0x1.7a00p-46,
- 0x1.3dea64c1234bbp+0, 0x1.6300p-45,
+ 0x1.3dea64c1234bbp+0, 0x1.6300p-45,
0x1.3ec70df1c4eccp+0, -0x1.8a60p-43,
0x1.3fa4504ac7e8cp+0, -0x1.cdc0p-44,
- 0x1.40822c367a0bbp+0, 0x1.5b80p-45,
- 0x1.4160a21f72e95p+0, 0x1.ec00p-46,
+ 0x1.40822c367a0bbp+0, 0x1.5b80p-45,
+ 0x1.4160a21f72e95p+0, 0x1.ec00p-46,
0x1.423fb27094646p+0, -0x1.3600p-46,
- 0x1.431f5d950a920p+0, 0x1.3980p-45,
- 0x1.43ffa3f84b9ebp+0, 0x1.a000p-48,
+ 0x1.431f5d950a920p+0, 0x1.3980p-45,
+ 0x1.43ffa3f84b9ebp+0, 0x1.a000p-48,
0x1.44e0860618919p+0, -0x1.6c00p-48,
0x1.45c2042a7d201p+0, -0x1.bc00p-47,
0x1.46a41ed1d0016p+0, -0x1.2800p-46,
- 0x1.4786d668b3326p+0, 0x1.0e00p-44,
+ 0x1.4786d668b3326p+0, 0x1.0e00p-44,
0x1.486a2b5c13c00p+0, -0x1.d400p-45,
- 0x1.494e1e192af04p+0, 0x1.c200p-47,
+ 0x1.494e1e192af04p+0, 0x1.c200p-47,
0x1.4a32af0d7d372p+0, -0x1.e500p-46,
- 0x1.4b17dea6db801p+0, 0x1.7800p-47,
+ 0x1.4b17dea6db801p+0, 0x1.7800p-47,
0x1.4bfdad53629e1p+0, -0x1.3800p-46,
- 0x1.4ce41b817c132p+0, 0x1.0800p-47,
- 0x1.4dcb299fddddbp+0, 0x1.c700p-45,
+ 0x1.4ce41b817c132p+0, 0x1.0800p-47,
+ 0x1.4dcb299fddddbp+0, 0x1.c700p-45,
0x1.4eb2d81d8ab96p+0, -0x1.ce00p-46,
- 0x1.4f9b2769d2d02p+0, 0x1.9200p-46,
+ 0x1.4f9b2769d2d02p+0, 0x1.9200p-46,
0x1.508417f4531c1p+0, -0x1.8c00p-47,
0x1.516daa2cf662ap+0, -0x1.a000p-48,
- 0x1.5257de83f51eap+0, 0x1.a080p-43,
+ 0x1.5257de83f51eap+0, 0x1.a080p-43,
0x1.5342b569d4edap+0, -0x1.6d80p-45,
0x1.542e2f4f6ac1ap+0, -0x1.2440p-44,
- 0x1.551a4ca5d94dbp+0, 0x1.83c0p-43,
- 0x1.56070dde9116bp+0, 0x1.4b00p-45,
- 0x1.56f4736b529dep+0, 0x1.15a0p-43,
+ 0x1.551a4ca5d94dbp+0, 0x1.83c0p-43,
+ 0x1.56070dde9116bp+0, 0x1.4b00p-45,
+ 0x1.56f4736b529dep+0, 0x1.15a0p-43,
0x1.57e27dbe2c40ep+0, -0x1.9e00p-45,
0x1.58d12d497c76fp+0, -0x1.3080p-45,
- 0x1.59c0827ff0b4cp+0, 0x1.dec0p-43,
+ 0x1.59c0827ff0b4cp+0, 0x1.dec0p-43,
0x1.5ab07dd485427p+0, -0x1.4000p-51,
- 0x1.5ba11fba87af4p+0, 0x1.0080p-44,
+ 0x1.5ba11fba87af4p+0, 0x1.0080p-44,
0x1.5c9268a59460bp+0, -0x1.6c80p-45,
- 0x1.5d84590998e3fp+0, 0x1.69a0p-43,
+ 0x1.5d84590998e3fp+0, 0x1.69a0p-43,
0x1.5e76f15ad20e1p+0, -0x1.b400p-46,
- 0x1.5f6a320dcebcap+0, 0x1.7700p-46,
- 0x1.605e1b976dcb8p+0, 0x1.6f80p-45,
- 0x1.6152ae6cdf715p+0, 0x1.1000p-47,
+ 0x1.5f6a320dcebcap+0, 0x1.7700p-46,
+ 0x1.605e1b976dcb8p+0, 0x1.6f80p-45,
+ 0x1.6152ae6cdf715p+0, 0x1.1000p-47,
0x1.6247eb03a5531p+0, -0x1.5d00p-46,
0x1.633dd1d1929b5p+0, -0x1.2d00p-46,
0x1.6434634ccc313p+0, -0x1.a800p-49,
0x1.652b9febc8efap+0, -0x1.8600p-45,
- 0x1.6623882553397p+0, 0x1.1fe0p-40,
+ 0x1.6623882553397p+0, 0x1.1fe0p-40,
0x1.671c1c708328ep+0, -0x1.7200p-44,
- 0x1.68155d44ca97ep+0, 0x1.6800p-49,
+ 0x1.68155d44ca97ep+0, 0x1.6800p-49,
0x1.690f4b19e9471p+0, -0x1.9780p-45,
};
fn exp2_64(x: f64) f64 {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
- const tblsiz = u32(exp2dt.len / 2);
- const redux: f64 = 0x1.8p52 / f64(tblsiz);
- const P1: f64 = 0x1.62e42fefa39efp-1;
- const P2: f64 = 0x1.ebfbdff82c575p-3;
- const P3: f64 = 0x1.c6b08d704a0a6p-5;
- const P4: f64 = 0x1.3b2ab88f70400p-7;
- const P5: f64 = 0x1.5d88003875c74p-10;
+ const tblsiz = @intCast(u32, exp2dt.len / 2);
+ const redux: f64 = 0x1.8p52 / @intToFloat(f64, tblsiz);
+ const P1: f64 = 0x1.62e42fefa39efp-1;
+ const P2: f64 = 0x1.ebfbdff82c575p-3;
+ const P3: f64 = 0x1.c6b08d704a0a6p-5;
+ const P4: f64 = 0x1.3b2ab88f70400p-7;
+ const P5: f64 = 0x1.5d88003875c74p-10;
const ux = @bitCast(u64, x);
- const ix = u32(ux >> 32) & 0x7FFFFFFF;
+ const ix = @intCast(u32, ux >> 32) & 0x7FFFFFFF;
// TODO: This should be handled beneath.
if (math.isNan(x)) {
@@ -386,7 +386,7 @@ fn exp2_64(x: f64) f64 {
if (ux >> 63 != 0) {
// underflow
if (x <= -1075 or x - 0x1.0p52 + 0x1.0p52 != x) {
- math.forceEval(f32(-0x1.0p-149 / x));
+ math.forceEval(@floatCast(f32, -0x1.0p-149 / x));
}
if (x <= -1075) {
return 0;
@@ -401,18 +401,18 @@ fn exp2_64(x: f64) f64 {
// reduce x
var uf = x + redux;
// NOTE: musl performs an implicit 64-bit to 32-bit u32 truncation here
- var i0 = @truncate(u32, @bitCast(u64, uf));
- i0 += tblsiz / 2;
+ var i_0 = @truncate(u32, @bitCast(u64, uf));
+ i_0 += tblsiz / 2;
- const k: u32 = i0 / tblsiz * tblsiz;
+ const k: u32 = i_0 / tblsiz * tblsiz;
const ik = @bitCast(i32, k / tblsiz);
- i0 %= tblsiz;
+ i_0 %= tblsiz;
uf -= redux;
- // r = exp2(y) = exp2t[i0] * p(z - eps[i])
+ // r = exp2(y) = exp2t[i_0] * p(z - eps[i])
var z = x - uf;
- const t = exp2dt[2 * i0];
- z -= exp2dt[2 * i0 + 1];
+ const t = exp2dt[2 * i_0];
+ z -= exp2dt[2 * i_0 + 1];
const r = t + t * z * (P1 + z * (P2 + z * (P3 + z * (P4 + z * P5))));
return math.scalbn(r, ik);
diff --git a/std/math/expm1.zig b/std/math/expm1.zig
index 316f0e3e71..6fa0194b32 100644
--- a/std/math/expm1.zig
+++ b/std/math/expm1.zig
@@ -20,12 +20,16 @@ pub fn expm1(x: var) @typeOf(x) {
fn expm1_32(x_: f32) f32 {
@setFloatMode(this, builtin.FloatMode.Strict);
+
+ if (math.isNan(x_))
+ return math.nan(f32);
+
const o_threshold: f32 = 8.8721679688e+01;
- const ln2_hi: f32 = 6.9313812256e-01;
- const ln2_lo: f32 = 9.0580006145e-06;
- const invln2: f32 = 1.4426950216e+00;
+ const ln2_hi: f32 = 6.9313812256e-01;
+ const ln2_lo: f32 = 9.0580006145e-06;
+ const invln2: f32 = 1.4426950216e+00;
const Q1: f32 = -3.3333212137e-2;
- const Q2: f32 = 1.5807170421e-3;
+ const Q2: f32 = 1.5807170421e-3;
var x = x_;
const ux = @bitCast(u32, x);
@@ -78,8 +82,8 @@ fn expm1_32(x_: f32) f32 {
kf += 0.5;
}
- k = i32(kf);
- const t = f32(k);
+ k = @floatToInt(i32, kf);
+ const t = @intToFloat(f32, k);
hi = x - t * ln2_hi;
lo = t * ln2_lo;
}
@@ -93,8 +97,7 @@ fn expm1_32(x_: f32) f32 {
math.forceEval(x * x);
}
return x;
- }
- else {
+ } else {
k = 0;
}
@@ -124,7 +127,7 @@ fn expm1_32(x_: f32) f32 {
}
}
- const twopk = @bitCast(f32, u32((0x7F +% k) << 23));
+ const twopk = @bitCast(f32, @intCast(u32, (0x7F +% k) << 23));
if (k < 0 or k > 56) {
var y = x - e + 1.0;
@@ -137,7 +140,7 @@ fn expm1_32(x_: f32) f32 {
return y - 1.0;
}
- const uf = @bitCast(f32, u32(0x7F -% k) << 23);
+ const uf = @bitCast(f32, @intCast(u32, 0x7F -% k) << 23);
if (k < 23) {
return (x - e + (1 - uf)) * twopk;
} else {
@@ -147,19 +150,23 @@ fn expm1_32(x_: f32) f32 {
fn expm1_64(x_: f64) f64 {
@setFloatMode(this, builtin.FloatMode.Strict);
+
+ if (math.isNan(x_))
+ return math.nan(f64);
+
const o_threshold: f64 = 7.09782712893383973096e+02;
- const ln2_hi: f64 = 6.93147180369123816490e-01;
- const ln2_lo: f64 = 1.90821492927058770002e-10;
- const invln2: f64 = 1.44269504088896338700e+00;
+ const ln2_hi: f64 = 6.93147180369123816490e-01;
+ const ln2_lo: f64 = 1.90821492927058770002e-10;
+ const invln2: f64 = 1.44269504088896338700e+00;
const Q1: f64 = -3.33333333333331316428e-02;
- const Q2: f64 = 1.58730158725481460165e-03;
+ const Q2: f64 = 1.58730158725481460165e-03;
const Q3: f64 = -7.93650757867487942473e-05;
- const Q4: f64 = 4.00821782732936239552e-06;
+ const Q4: f64 = 4.00821782732936239552e-06;
const Q5: f64 = -2.01099218183624371326e-07;
var x = x_;
const ux = @bitCast(u64, x);
- const hx = u32(ux >> 32) & 0x7FFFFFFF;
+ const hx = @intCast(u32, ux >> 32) & 0x7FFFFFFF;
const sign = ux >> 63;
if (math.isNegativeInf(x)) {
@@ -208,8 +215,8 @@ fn expm1_64(x_: f64) f64 {
kf += 0.5;
}
- k = i32(kf);
- const t = f64(k);
+ k = @floatToInt(i32, kf);
+ const t = @intToFloat(f64, k);
hi = x - t * ln2_hi;
lo = t * ln2_lo;
}
@@ -220,11 +227,10 @@ fn expm1_64(x_: f64) f64 {
// |x| < 2^(-54)
else if (hx < 0x3C900000) {
if (hx < 0x00100000) {
- math.forceEval(f32(x));
+ math.forceEval(@floatCast(f32, x));
}
return x;
- }
- else {
+ } else {
k = 0;
}
@@ -254,7 +260,7 @@ fn expm1_64(x_: f64) f64 {
}
}
- const twopk = @bitCast(f64, u64(0x3FF +% k) << 52);
+ const twopk = @bitCast(f64, @intCast(u64, 0x3FF +% k) << 52);
if (k < 0 or k > 56) {
var y = x - e + 1.0;
@@ -267,7 +273,7 @@ fn expm1_64(x_: f64) f64 {
return y - 1.0;
}
- const uf = @bitCast(f64, u64(0x3FF -% k) << 52);
+ const uf = @bitCast(f64, @intCast(u64, 0x3FF -% k) << 52);
if (k < 20) {
return (x - e + (1 - uf)) * twopk;
} else {
diff --git a/std/math/fabs.zig b/std/math/fabs.zig
index 821624e1bc..ae8f9616a8 100644
--- a/std/math/fabs.zig
+++ b/std/math/fabs.zig
@@ -10,12 +10,19 @@ const assert = std.debug.assert;
pub fn fabs(x: var) @typeOf(x) {
const T = @typeOf(x);
return switch (T) {
+ f16 => fabs16(x),
f32 => fabs32(x),
f64 => fabs64(x),
else => @compileError("fabs not implemented for " ++ @typeName(T)),
};
}
+fn fabs16(x: f16) f16 {
+ var u = @bitCast(u16, x);
+ u &= 0x7FFF;
+ return @bitCast(f16, u);
+}
+
fn fabs32(x: f32) f32 {
var u = @bitCast(u32, x);
u &= 0x7FFFFFFF;
@@ -29,10 +36,16 @@ fn fabs64(x: f64) f64 {
}
test "math.fabs" {
+ assert(fabs(f16(1.0)) == fabs16(1.0));
assert(fabs(f32(1.0)) == fabs32(1.0));
assert(fabs(f64(1.0)) == fabs64(1.0));
}
+test "math.fabs16" {
+ assert(fabs16(1.0) == 1.0);
+ assert(fabs16(-1.0) == 1.0);
+}
+
test "math.fabs32" {
assert(fabs32(1.0) == 1.0);
assert(fabs32(-1.0) == 1.0);
@@ -43,6 +56,12 @@ test "math.fabs64" {
assert(fabs64(-1.0) == 1.0);
}
+test "math.fabs16.special" {
+ assert(math.isPositiveInf(fabs(math.inf(f16))));
+ assert(math.isPositiveInf(fabs(-math.inf(f16))));
+ assert(math.isNan(fabs(math.nan(f16))));
+}
+
test "math.fabs32.special" {
assert(math.isPositiveInf(fabs(math.inf(f32))));
assert(math.isPositiveInf(fabs(-math.inf(f32))));
diff --git a/std/math/floor.zig b/std/math/floor.zig
index 1b8e2dfeed..0858598eea 100644
--- a/std/math/floor.zig
+++ b/std/math/floor.zig
@@ -12,15 +12,50 @@ const math = std.math;
pub fn floor(x: var) @typeOf(x) {
const T = @typeOf(x);
return switch (T) {
+ f16 => floor16(x),
f32 => floor32(x),
f64 => floor64(x),
else => @compileError("floor not implemented for " ++ @typeName(T)),
};
}
+fn floor16(x: f16) f16 {
+ var u = @bitCast(u16, x);
+ const e = @intCast(i16, (u >> 10) & 31) - 15;
+ var m: u16 = undefined;
+
+ // TODO: Shouldn't need this explicit check.
+ if (x == 0.0) {
+ return x;
+ }
+
+ if (e >= 10) {
+ return x;
+ }
+
+ if (e >= 0) {
+ m = u16(1023) >> @intCast(u4, e);
+ if (u & m == 0) {
+ return x;
+ }
+ math.forceEval(x + 0x1.0p120);
+ if (u >> 15 != 0) {
+ u += m;
+ }
+ return @bitCast(f16, u & ~m);
+ } else {
+ math.forceEval(x + 0x1.0p120);
+ if (u >> 15 == 0) {
+ return 0.0;
+ } else {
+ return -1.0;
+ }
+ }
+}
+
fn floor32(x: f32) f32 {
var u = @bitCast(u32, x);
- const e = i32((u >> 23) & 0xFF) - 0x7F;
+ const e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F;
var m: u32 = undefined;
// TODO: Shouldn't need this explicit check.
@@ -33,7 +68,7 @@ fn floor32(x: f32) f32 {
}
if (e >= 0) {
- m = u32(0x007FFFFF) >> u5(e);
+ m = u32(0x007FFFFF) >> @intCast(u5, e);
if (u & m == 0) {
return x;
}
@@ -57,7 +92,7 @@ fn floor64(x: f64) f64 {
const e = (u >> 52) & 0x7FF;
var y: f64 = undefined;
- if (e >= 0x3FF+52 or x == 0) {
+ if (e >= 0x3FF + 52 or x == 0) {
return x;
}
@@ -69,7 +104,7 @@ fn floor64(x: f64) f64 {
y = x + math.f64_toint - math.f64_toint - x;
}
- if (e <= 0x3FF-1) {
+ if (e <= 0x3FF - 1) {
math.forceEval(y);
if (u >> 63 != 0) {
return -1.0;
@@ -84,10 +119,17 @@ fn floor64(x: f64) f64 {
}
test "math.floor" {
+ assert(floor(f16(1.3)) == floor16(1.3));
assert(floor(f32(1.3)) == floor32(1.3));
assert(floor(f64(1.3)) == floor64(1.3));
}
+test "math.floor16" {
+ assert(floor16(1.3) == 1.0);
+ assert(floor16(-1.3) == -2.0);
+ assert(floor16(0.2) == 0.0);
+}
+
test "math.floor32" {
assert(floor32(1.3) == 1.0);
assert(floor32(-1.3) == -2.0);
@@ -100,6 +142,14 @@ test "math.floor64" {
assert(floor64(0.2) == 0.0);
}
+test "math.floor16.special" {
+ assert(floor16(0.0) == 0.0);
+ assert(floor16(-0.0) == -0.0);
+ assert(math.isPositiveInf(floor16(math.inf(f16))));
+ assert(math.isNegativeInf(floor16(-math.inf(f16))));
+ assert(math.isNan(floor16(math.nan(f16))));
+}
+
test "math.floor32.special" {
assert(floor32(0.0) == 0.0);
assert(floor32(-0.0) == -0.0);
diff --git a/std/math/fma.zig b/std/math/fma.zig
index e8d146db34..21faf4118d 100644
--- a/std/math/fma.zig
+++ b/std/math/fma.zig
@@ -5,7 +5,7 @@ const assert = std.debug.assert;
pub fn fma(comptime T: type, x: T, y: T, z: T) T {
return switch (T) {
f32 => fma32(x, y, z),
- f64 => fma64(x, y ,z),
+ f64 => fma64(x, y, z),
else => @compileError("fma not implemented for " ++ @typeName(T)),
};
}
@@ -17,10 +17,10 @@ fn fma32(x: f32, y: f32, z: f32) f32 {
const e = (u >> 52) & 0x7FF;
if ((u & 0x1FFFFFFF) != 0x10000000 or e == 0x7FF or xy_z - xy == z) {
- return f32(xy_z);
+ return @floatCast(f32, xy_z);
} else {
// TODO: Handle inexact case with double-rounding
- return f32(xy_z);
+ return @floatCast(f32, xy_z);
}
}
@@ -71,7 +71,10 @@ fn fma64(x: f64, y: f64, z: f64) f64 {
}
}
-const dd = struct { hi: f64, lo: f64, };
+const dd = struct {
+ hi: f64,
+ lo: f64,
+};
fn dd_add(a: f64, b: f64) dd {
var ret: dd = undefined;
@@ -121,7 +124,7 @@ fn add_and_denorm(a: f64, b: f64, scale: i32) f64 {
var sum = dd_add(a, b);
if (sum.lo != 0) {
var uhii = @bitCast(u64, sum.hi);
- const bits_lost = -i32((uhii >> 52) & 0x7FF) - scale + 1;
+ const bits_lost = -@intCast(i32, (uhii >> 52) & 0x7FF) - scale + 1;
if ((bits_lost != 1) == (uhii & 1 != 0)) {
const uloi = @bitCast(u64, sum.lo);
uhii += 1 - (((uhii ^ uloi) >> 62) & 2);
diff --git a/std/math/frexp.zig b/std/math/frexp.zig
index b58af0a9bc..dfc790fdd9 100644
--- a/std/math/frexp.zig
+++ b/std/math/frexp.zig
@@ -30,7 +30,7 @@ fn frexp32(x: f32) frexp32_result {
var result: frexp32_result = undefined;
var y = @bitCast(u32, x);
- const e = i32(y >> 23) & 0xFF;
+ const e = @intCast(i32, y >> 23) & 0xFF;
if (e == 0) {
if (x != 0) {
@@ -67,7 +67,7 @@ fn frexp64(x: f64) frexp64_result {
var result: frexp64_result = undefined;
var y = @bitCast(u64, x);
- const e = i32(y >> 52) & 0x7FF;
+ const e = @intCast(i32, y >> 52) & 0x7FF;
if (e == 0) {
if (x != 0) {
diff --git a/std/math/hypot.zig b/std/math/hypot.zig
index 06427d0865..f834f422e6 100644
--- a/std/math/hypot.zig
+++ b/std/math/hypot.zig
@@ -39,26 +39,26 @@ fn hypot32(x: f32, y: f32) f32 {
}
var z: f32 = 1.0;
- if (ux >= (0x7F+60) << 23) {
+ if (ux >= (0x7F + 60) << 23) {
z = 0x1.0p90;
xx *= 0x1.0p-90;
yy *= 0x1.0p-90;
- } else if (uy < (0x7F-60) << 23) {
+ } else if (uy < (0x7F - 60) << 23) {
z = 0x1.0p-90;
xx *= 0x1.0p-90;
yy *= 0x1.0p-90;
}
- return z * math.sqrt(f32(f64(x) * x + f64(y) * y));
+ return z * math.sqrt(@floatCast(f32, f64(x) * x + f64(y) * y));
}
-fn sq(hi: &f64, lo: &f64, x: f64) void {
+fn sq(hi: *f64, lo: *f64, x: f64) void {
const split: f64 = 0x1.0p27 + 1.0;
const xc = x * split;
const xh = x - xc + xc;
const xl = x - xh;
- *hi = x * x;
- *lo = xh * xh - *hi + 2 * xh * xl + xl * xl;
+ hi.* = x * x;
+ lo.* = xh * xh - hi.* + 2 * xh * xl + xl * xl;
}
fn hypot64(x: f64, y: f64) f64 {
diff --git a/std/math/ilogb.zig b/std/math/ilogb.zig
index f1f33aff55..a24f580a32 100644
--- a/std/math/ilogb.zig
+++ b/std/math/ilogb.zig
@@ -23,7 +23,7 @@ const fp_ilogb0 = fp_ilogbnan;
fn ilogb32(x: f32) i32 {
var u = @bitCast(u32, x);
- var e = i32((u >> 23) & 0xFF);
+ var e = @intCast(i32, (u >> 23) & 0xFF);
// TODO: We should be able to merge this with the lower check.
if (math.isNan(x)) {
@@ -59,7 +59,7 @@ fn ilogb32(x: f32) i32 {
fn ilogb64(x: f64) i32 {
var u = @bitCast(u64, x);
- var e = i32((u >> 52) & 0x7FF);
+ var e = @intCast(i32, (u >> 52) & 0x7FF);
if (math.isNan(x)) {
return @maxValue(i32);
diff --git a/std/math/index.zig b/std/math/index.zig
index 477dafcbcc..e5fd0f3685 100644
--- a/std/math/index.zig
+++ b/std/math/index.zig
@@ -19,6 +19,18 @@ pub const f32_max = 3.40282346638528859812e+38;
pub const f32_epsilon = 1.1920928955078125e-07;
pub const f32_toint = 1.0 / f32_epsilon;
+pub const f16_true_min = 0.000000059604644775390625; // 2**-24
+pub const f16_min = 0.00006103515625; // 2**-14
+pub const f16_max = 65504;
+pub const f16_epsilon = 0.0009765625; // 2**-10
+pub const f16_toint = 1.0 / f16_epsilon;
+
+pub const nan_u16 = u16(0x7C01);
+pub const nan_f16 = @bitCast(f16, nan_u16);
+
+pub const inf_u16 = u16(0x7C00);
+pub const inf_f16 = @bitCast(f16, inf_u16);
+
pub const nan_u32 = u32(0x7F800001);
pub const nan_f32 = @bitCast(f32, nan_u32);
@@ -44,15 +56,20 @@ pub fn approxEq(comptime T: type, x: T, y: T, epsilon: T) bool {
pub fn forceEval(value: var) void {
const T = @typeOf(value);
switch (T) {
+ f16 => {
+ var x: f16 = undefined;
+ const p = @ptrCast(*volatile f16, &x);
+ p.* = x;
+ },
f32 => {
var x: f32 = undefined;
- const p = @ptrCast(&volatile f32, &x);
- *p = x;
+ const p = @ptrCast(*volatile f32, &x);
+ p.* = x;
},
f64 => {
var x: f64 = undefined;
- const p = @ptrCast(&volatile f64, &x);
- *p = x;
+ const p = @ptrCast(*volatile f64, &x);
+ p.* = x;
},
else => {
@compileError("forceEval not implemented for " ++ @typeName(T));
@@ -129,6 +146,11 @@ pub const cos = @import("cos.zig").cos;
pub const sin = @import("sin.zig").sin;
pub const tan = @import("tan.zig").tan;
+pub const complex = @import("complex/index.zig");
+pub const Complex = complex.Complex;
+
+pub const big = @import("big/index.zig");
+
test "math" {
_ = @import("nan.zig");
_ = @import("isnan.zig");
@@ -172,8 +194,37 @@ test "math" {
_ = @import("sin.zig");
_ = @import("cos.zig");
_ = @import("tan.zig");
+
+ _ = @import("complex/index.zig");
+
+ _ = @import("big/index.zig");
}
+pub fn floatMantissaBits(comptime T: type) comptime_int {
+ assert(@typeId(T) == builtin.TypeId.Float);
+
+ return switch (T.bit_count) {
+ 16 => 10,
+ 32 => 23,
+ 64 => 52,
+ 80 => 64,
+ 128 => 112,
+ else => @compileError("unknown floating point type " ++ @typeName(T)),
+ };
+}
+
+pub fn floatExponentBits(comptime T: type) comptime_int {
+ assert(@typeId(T) == builtin.TypeId.Float);
+
+ return switch (T.bit_count) {
+ 16 => 5,
+ 32 => 8,
+ 64 => 11,
+ 80 => 15,
+ 128 => 15,
+ else => @compileError("unknown floating point type " ++ @typeName(T)),
+ };
+}
pub fn min(x: var, y: var) @typeOf(x + y) {
return if (x < y) x else y;
@@ -219,7 +270,7 @@ pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T {
/// A negative shift amount results in a right shift.
pub fn shl(comptime T: type, a: T, shift_amt: var) T {
const abs_shift_amt = absCast(shift_amt);
- const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else Log2Int(T)(abs_shift_amt);
+ const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
if (@typeOf(shift_amt).is_signed) {
if (shift_amt >= 0) {
@@ -243,7 +294,7 @@ test "math.shl" {
/// A negative shift amount results in a lefft shift.
pub fn shr(comptime T: type, a: T, shift_amt: var) T {
const abs_shift_amt = absCast(shift_amt);
- const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else Log2Int(T)(abs_shift_amt);
+ const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
if (@typeOf(shift_amt).is_signed) {
if (shift_amt >= 0) {
@@ -275,10 +326,10 @@ pub fn rotr(comptime T: type, x: T, r: var) T {
}
test "math.rotr" {
- assert(rotr(u8, 0b00000001, usize(0)) == 0b00000001);
- assert(rotr(u8, 0b00000001, usize(9)) == 0b10000000);
- assert(rotr(u8, 0b00000001, usize(8)) == 0b00000001);
- assert(rotr(u8, 0b00000001, usize(4)) == 0b00010000);
+ assert(rotr(u8, 0b00000001, usize(0)) == 0b00000001);
+ assert(rotr(u8, 0b00000001, usize(9)) == 0b10000000);
+ assert(rotr(u8, 0b00000001, usize(8)) == 0b00000001);
+ assert(rotr(u8, 0b00000001, usize(4)) == 0b00010000);
assert(rotr(u8, 0b00000001, isize(-1)) == 0b00000010);
}
@@ -294,16 +345,22 @@ pub fn rotl(comptime T: type, x: T, r: var) T {
}
test "math.rotl" {
- assert(rotl(u8, 0b00000001, usize(0)) == 0b00000001);
- assert(rotl(u8, 0b00000001, usize(9)) == 0b00000010);
- assert(rotl(u8, 0b00000001, usize(8)) == 0b00000001);
- assert(rotl(u8, 0b00000001, usize(4)) == 0b00010000);
+ assert(rotl(u8, 0b00000001, usize(0)) == 0b00000001);
+ assert(rotl(u8, 0b00000001, usize(9)) == 0b00000010);
+ assert(rotl(u8, 0b00000001, usize(8)) == 0b00000001);
+ assert(rotl(u8, 0b00000001, usize(4)) == 0b00010000);
assert(rotl(u8, 0b00000001, isize(-1)) == 0b10000000);
}
-
pub fn Log2Int(comptime T: type) type {
- return @IntType(false, log2(T.bit_count));
+ // comptime ceil log2
+ comptime var count = 0;
+ comptime var s = T.bit_count - 1;
+ inline while (s != 0) : (s >>= 1) {
+ count += 1;
+ }
+
+ return @IntType(false, count);
}
test "math overflow functions" {
@@ -318,14 +375,14 @@ fn testOverflow() void {
assert((shlExact(i32, 0b11, 4) catch unreachable) == 0b110000);
}
-
pub fn absInt(x: var) !@typeOf(x) {
const T = @typeOf(x);
comptime assert(@typeId(T) == builtin.TypeId.Int); // must pass an integer to absInt
comptime assert(T.is_signed); // must pass a signed integer to absInt
- if (x == @minValue(@typeOf(x)))
+
+ if (x == @minValue(@typeOf(x))) {
return error.Overflow;
- {
+ } else {
@setRuntimeSafety(false);
return if (x < 0) -x else x;
}
@@ -344,10 +401,8 @@ pub const absFloat = @import("fabs.zig").fabs;
pub fn divTrunc(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
- if (denominator == 0)
- return error.DivisionByZero;
- if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == @minValue(T) and denominator == -1)
- return error.Overflow;
+ if (denominator == 0) return error.DivisionByZero;
+ if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == @minValue(T) and denominator == -1) return error.Overflow;
return @divTrunc(numerator, denominator);
}
@@ -367,10 +422,8 @@ fn testDivTrunc() void {
pub fn divFloor(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
- if (denominator == 0)
- return error.DivisionByZero;
- if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == @minValue(T) and denominator == -1)
- return error.Overflow;
+ if (denominator == 0) return error.DivisionByZero;
+ if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == @minValue(T) and denominator == -1) return error.Overflow;
return @divFloor(numerator, denominator);
}
@@ -390,13 +443,10 @@ fn testDivFloor() void {
pub fn divExact(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
- if (denominator == 0)
- return error.DivisionByZero;
- if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == @minValue(T) and denominator == -1)
- return error.Overflow;
+ if (denominator == 0) return error.DivisionByZero;
+ if (@typeId(T) == builtin.TypeId.Int and T.is_signed and numerator == @minValue(T) and denominator == -1) return error.Overflow;
const result = @divTrunc(numerator, denominator);
- if (result * denominator != numerator)
- return error.UnexpectedRemainder;
+ if (result * denominator != numerator) return error.UnexpectedRemainder;
return result;
}
@@ -418,10 +468,8 @@ fn testDivExact() void {
pub fn mod(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
- if (denominator == 0)
- return error.DivisionByZero;
- if (denominator < 0)
- return error.NegativeDenominator;
+ if (denominator == 0) return error.DivisionByZero;
+ if (denominator < 0) return error.NegativeDenominator;
return @mod(numerator, denominator);
}
@@ -443,10 +491,8 @@ fn testMod() void {
pub fn rem(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
- if (denominator == 0)
- return error.DivisionByZero;
- if (denominator < 0)
- return error.NegativeDenominator;
+ if (denominator == 0) return error.DivisionByZero;
+ if (denominator < 0) return error.NegativeDenominator;
return @rem(numerator, denominator);
}
@@ -470,10 +516,9 @@ fn testRem() void {
/// Result is an unsigned integer.
pub fn absCast(x: var) @IntType(false, @typeOf(x).bit_count) {
const uint = @IntType(false, @typeOf(x).bit_count);
- if (x >= 0)
- return uint(x);
+ if (x >= 0) return @intCast(uint, x);
- return uint(-(x + 1)) + 1;
+ return @intCast(uint, -(x + 1)) + 1;
}
test "math.absCast" {
@@ -490,17 +535,14 @@ test "math.absCast" {
/// Returns the negation of the integer parameter.
/// Result is a signed integer.
pub fn negateCast(x: var) !@IntType(true, @typeOf(x).bit_count) {
- if (@typeOf(x).is_signed)
- return negate(x);
+ if (@typeOf(x).is_signed) return negate(x);
const int = @IntType(true, @typeOf(x).bit_count);
- if (x > -@minValue(int))
- return error.Overflow;
+ if (x > -@minValue(int)) return error.Overflow;
- if (x == -@minValue(int))
- return @minValue(int);
+ if (x == -@minValue(int)) return @minValue(int);
- return -int(x);
+ return -@intCast(int, x);
}
test "math.negateCast" {
@@ -513,7 +555,7 @@ test "math.negateCast" {
if (negateCast(u32(@maxValue(i32) + 10))) |_| unreachable else |err| assert(err == error.Overflow);
}
-/// Cast an integer to a different integer type. If the value doesn't fit,
+/// Cast an integer to a different integer type. If the value doesn't fit,
/// return an error.
pub fn cast(comptime T: type, x: var) (error{Overflow}!T) {
comptime assert(@typeId(T) == builtin.TypeId.Int); // must pass an integer
@@ -523,7 +565,7 @@ pub fn cast(comptime T: type, x: var) (error{Overflow}!T) {
} else if (@minValue(@typeOf(x)) < @minValue(T) and x < @minValue(T)) {
return error.Overflow;
} else {
- return T(x);
+ return @intCast(T, x);
}
}
@@ -537,11 +579,22 @@ test "math.cast" {
assert(@typeOf(try cast(u8, u32(255))) == u8);
}
+pub const AlignCastError = error{UnalignedMemory};
+
+/// Align cast a pointer but return an error if it's the wrong alignment
+pub fn alignCast(comptime alignment: u29, ptr: var) AlignCastError!@typeOf(@alignCast(alignment, ptr)) {
+ const addr = @ptrToInt(ptr);
+ if (addr % alignment != 0) {
+ return error.UnalignedMemory;
+ }
+ return @alignCast(alignment, ptr);
+}
+
pub fn floorPowerOfTwo(comptime T: type, value: T) T {
var x = value;
comptime var i = 1;
- inline while(T.bit_count > i) : (i *= 2) {
+ inline while (T.bit_count > i) : (i *= 2) {
x |= (x >> i);
}
@@ -553,6 +606,32 @@ test "math.floorPowerOfTwo" {
comptime testFloorPowerOfTwo();
}
+pub fn log2_int(comptime T: type, x: T) Log2Int(T) {
+ assert(x != 0);
+ return @intCast(Log2Int(T), T.bit_count - 1 - @clz(x));
+}
+
+pub fn log2_int_ceil(comptime T: type, x: T) Log2Int(T) {
+ assert(x != 0);
+ const log2_val = log2_int(T, x);
+ if (T(1) << log2_val == x)
+ return log2_val;
+ return log2_val + 1;
+}
+
+test "std.math.log2_int_ceil" {
+ assert(log2_int_ceil(u32, 1) == 0);
+ assert(log2_int_ceil(u32, 2) == 1);
+ assert(log2_int_ceil(u32, 3) == 2);
+ assert(log2_int_ceil(u32, 4) == 2);
+ assert(log2_int_ceil(u32, 5) == 3);
+ assert(log2_int_ceil(u32, 6) == 3);
+ assert(log2_int_ceil(u32, 7) == 3);
+ assert(log2_int_ceil(u32, 8) == 3);
+ assert(log2_int_ceil(u32, 9) == 4);
+ assert(log2_int_ceil(u32, 10) == 4);
+}
+
fn testFloorPowerOfTwo() void {
assert(floorPowerOfTwo(u32, 63) == 32);
assert(floorPowerOfTwo(u32, 64) == 64);
@@ -561,3 +640,13 @@ fn testFloorPowerOfTwo() void {
assert(floorPowerOfTwo(u4, 8) == 8);
assert(floorPowerOfTwo(u4, 9) == 8);
}
+
+pub fn lossyCast(comptime T: type, value: var) T {
+ switch (@typeInfo(@typeOf(value))) {
+ builtin.TypeId.Int => return @intToFloat(T, value),
+ builtin.TypeId.Float => return @floatCast(T, value),
+ builtin.TypeId.ComptimeInt => return T(value),
+ builtin.TypeId.ComptimeFloat => return T(value),
+ else => @compileError("bad type"),
+ }
+}
diff --git a/std/math/inf.zig b/std/math/inf.zig
index bde90b2be1..62f5ef7c0d 100644
--- a/std/math/inf.zig
+++ b/std/math/inf.zig
@@ -1,9 +1,9 @@
const std = @import("../index.zig");
const math = std.math;
-const assert = std.debug.assert;
pub fn inf(comptime T: type) T {
return switch (T) {
+ f16 => @bitCast(f16, math.inf_u16),
f32 => @bitCast(f32, math.inf_u32),
f64 => @bitCast(f64, math.inf_u64),
else => @compileError("inf not implemented for " ++ @typeName(T)),
diff --git a/std/math/isfinite.zig b/std/math/isfinite.zig
index 37ead03bba..3a5d4f01bb 100644
--- a/std/math/isfinite.zig
+++ b/std/math/isfinite.zig
@@ -5,6 +5,10 @@ const assert = std.debug.assert;
pub fn isFinite(x: var) bool {
const T = @typeOf(x);
switch (T) {
+ f16 => {
+ const bits = @bitCast(u16, x);
+ return bits & 0x7FFF < 0x7C00;
+ },
f32 => {
const bits = @bitCast(u32, x);
return bits & 0x7FFFFFFF < 0x7F800000;
@@ -20,10 +24,14 @@ pub fn isFinite(x: var) bool {
}
test "math.isFinite" {
+ assert(isFinite(f16(0.0)));
+ assert(isFinite(f16(-0.0)));
assert(isFinite(f32(0.0)));
assert(isFinite(f32(-0.0)));
assert(isFinite(f64(0.0)));
assert(isFinite(f64(-0.0)));
+ assert(!isFinite(math.inf(f16)));
+ assert(!isFinite(-math.inf(f16)));
assert(!isFinite(math.inf(f32)));
assert(!isFinite(-math.inf(f32)));
assert(!isFinite(math.inf(f64)));
diff --git a/std/math/isinf.zig b/std/math/isinf.zig
index a976fb73d2..cf68b5769c 100644
--- a/std/math/isinf.zig
+++ b/std/math/isinf.zig
@@ -5,6 +5,10 @@ const assert = std.debug.assert;
pub fn isInf(x: var) bool {
const T = @typeOf(x);
switch (T) {
+ f16 => {
+ const bits = @bitCast(u16, x);
+ return bits & 0x7FFF == 0x7C00;
+ },
f32 => {
const bits = @bitCast(u32, x);
return bits & 0x7FFFFFFF == 0x7F800000;
@@ -22,6 +26,9 @@ pub fn isInf(x: var) bool {
pub fn isPositiveInf(x: var) bool {
const T = @typeOf(x);
switch (T) {
+ f16 => {
+ return @bitCast(u16, x) == 0x7C00;
+ },
f32 => {
return @bitCast(u32, x) == 0x7F800000;
},
@@ -37,6 +44,9 @@ pub fn isPositiveInf(x: var) bool {
pub fn isNegativeInf(x: var) bool {
const T = @typeOf(x);
switch (T) {
+ f16 => {
+ return @bitCast(u16, x) == 0xFC00;
+ },
f32 => {
return @bitCast(u32, x) == 0xFF800000;
},
@@ -50,10 +60,14 @@ pub fn isNegativeInf(x: var) bool {
}
test "math.isInf" {
+ assert(!isInf(f16(0.0)));
+ assert(!isInf(f16(-0.0)));
assert(!isInf(f32(0.0)));
assert(!isInf(f32(-0.0)));
assert(!isInf(f64(0.0)));
assert(!isInf(f64(-0.0)));
+ assert(isInf(math.inf(f16)));
+ assert(isInf(-math.inf(f16)));
assert(isInf(math.inf(f32)));
assert(isInf(-math.inf(f32)));
assert(isInf(math.inf(f64)));
@@ -61,10 +75,14 @@ test "math.isInf" {
}
test "math.isPositiveInf" {
+ assert(!isPositiveInf(f16(0.0)));
+ assert(!isPositiveInf(f16(-0.0)));
assert(!isPositiveInf(f32(0.0)));
assert(!isPositiveInf(f32(-0.0)));
assert(!isPositiveInf(f64(0.0)));
assert(!isPositiveInf(f64(-0.0)));
+ assert(isPositiveInf(math.inf(f16)));
+ assert(!isPositiveInf(-math.inf(f16)));
assert(isPositiveInf(math.inf(f32)));
assert(!isPositiveInf(-math.inf(f32)));
assert(isPositiveInf(math.inf(f64)));
@@ -72,10 +90,14 @@ test "math.isPositiveInf" {
}
test "math.isNegativeInf" {
+ assert(!isNegativeInf(f16(0.0)));
+ assert(!isNegativeInf(f16(-0.0)));
assert(!isNegativeInf(f32(0.0)));
assert(!isNegativeInf(f32(-0.0)));
assert(!isNegativeInf(f64(0.0)));
assert(!isNegativeInf(f64(-0.0)));
+ assert(!isNegativeInf(math.inf(f16)));
+ assert(isNegativeInf(-math.inf(f16)));
assert(!isNegativeInf(math.inf(f32)));
assert(isNegativeInf(-math.inf(f32)));
assert(!isNegativeInf(math.inf(f64)));
diff --git a/std/math/isnan.zig b/std/math/isnan.zig
index 67971e3d0c..ef3002d8e1 100644
--- a/std/math/isnan.zig
+++ b/std/math/isnan.zig
@@ -5,6 +5,10 @@ const assert = std.debug.assert;
pub fn isNan(x: var) bool {
const T = @typeOf(x);
switch (T) {
+ f16 => {
+ const bits = @bitCast(u16, x);
+ return (bits & 0x7fff) > 0x7c00;
+ },
f32 => {
const bits = @bitCast(u32, x);
return bits & 0x7FFFFFFF > 0x7F800000;
@@ -26,8 +30,10 @@ pub fn isSignalNan(x: var) bool {
}
test "math.isNan" {
+ assert(isNan(math.nan(f16)));
assert(isNan(math.nan(f32)));
assert(isNan(math.nan(f64)));
+ assert(!isNan(f16(1.0)));
assert(!isNan(f32(1.0)));
assert(!isNan(f64(1.0)));
}
diff --git a/std/math/isnormal.zig b/std/math/isnormal.zig
index d5c1061cb1..22109936c4 100644
--- a/std/math/isnormal.zig
+++ b/std/math/isnormal.zig
@@ -5,6 +5,10 @@ const assert = std.debug.assert;
pub fn isNormal(x: var) bool {
const T = @typeOf(x);
switch (T) {
+ f16 => {
+ const bits = @bitCast(u16, x);
+ return (bits + 1024) & 0x7FFF >= 2048;
+ },
f32 => {
const bits = @bitCast(u32, x);
return (bits + 0x00800000) & 0x7FFFFFFF >= 0x01000000;
@@ -20,8 +24,13 @@ pub fn isNormal(x: var) bool {
}
test "math.isNormal" {
+ assert(!isNormal(math.nan(f16)));
assert(!isNormal(math.nan(f32)));
assert(!isNormal(math.nan(f64)));
+ assert(!isNormal(f16(0)));
+ assert(!isNormal(f32(0)));
+ assert(!isNormal(f64(0)));
+ assert(isNormal(f16(1.0)));
assert(isNormal(f32(1.0)));
assert(isNormal(f64(1.0)));
}
diff --git a/std/math/ln.zig b/std/math/ln.zig
index c349ed7c6f..e78cc379e0 100644
--- a/std/math/ln.zig
+++ b/std/math/ln.zig
@@ -14,7 +14,7 @@ const TypeId = builtin.TypeId;
pub fn ln(x: var) @typeOf(x) {
const T = @typeOf(x);
switch (@typeId(T)) {
- TypeId.FloatLiteral => {
+ TypeId.ComptimeFloat => {
return @typeOf(1.0)(ln_64(x));
},
TypeId.Float => {
@@ -24,7 +24,7 @@ pub fn ln(x: var) @typeOf(x) {
else => @compileError("ln not implemented for " ++ @typeName(T)),
};
},
- TypeId.IntLiteral => {
+ TypeId.ComptimeInt => {
return @typeOf(1)(math.floor(ln_64(f64(x))));
},
TypeId.Int => {
@@ -71,7 +71,7 @@ pub fn ln_32(x_: f32) f32 {
// x into [sqrt(2) / 2, sqrt(2)]
ix += 0x3F800000 - 0x3F3504F3;
- k += i32(ix >> 23) - 0x7F;
+ k += @intCast(i32, ix >> 23) - 0x7F;
ix = (ix & 0x007FFFFF) + 0x3F3504F3;
x = @bitCast(f32, ix);
@@ -83,12 +83,14 @@ pub fn ln_32(x_: f32) f32 {
const t2 = z * (Lg1 + w * Lg3);
const R = t2 + t1;
const hfsq = 0.5 * f * f;
- const dk = f32(k);
+ const dk = @intToFloat(f32, k);
return s * (hfsq + R) + dk * ln2_lo - hfsq + f + dk * ln2_hi;
}
pub fn ln_64(x_: f64) f64 {
+ @setFloatMode(this, @import("builtin").FloatMode.Strict);
+
const ln2_hi: f64 = 6.93147180369123816490e-01;
const ln2_lo: f64 = 1.90821492927058770002e-10;
const Lg1: f64 = 6.666666666666735130e-01;
@@ -101,7 +103,7 @@ pub fn ln_64(x_: f64) f64 {
var x = x_;
var ix = @bitCast(u64, x);
- var hx = u32(ix >> 32);
+ var hx = @intCast(u32, ix >> 32);
var k: i32 = 0;
if (hx < 0x00100000 or hx >> 31 != 0) {
@@ -117,18 +119,16 @@ pub fn ln_64(x_: f64) f64 {
// subnormal, scale x
k -= 54;
x *= 0x1.0p54;
- hx = u32(@bitCast(u64, ix) >> 32);
- }
- else if (hx >= 0x7FF00000) {
+ hx = @intCast(u32, @bitCast(u64, ix) >> 32);
+ } else if (hx >= 0x7FF00000) {
return x;
- }
- else if (hx == 0x3FF00000 and ix << 32 == 0) {
+ } else if (hx == 0x3FF00000 and ix << 32 == 0) {
return 0;
}
// x into [sqrt(2) / 2, sqrt(2)]
hx += 0x3FF00000 - 0x3FE6A09E;
- k += i32(hx >> 20) - 0x3FF;
+ k += @intCast(i32, hx >> 20) - 0x3FF;
hx = (hx & 0x000FFFFF) + 0x3FE6A09E;
ix = (u64(hx) << 32) | (ix & 0xFFFFFFFF);
x = @bitCast(f64, ix);
@@ -141,7 +141,7 @@ pub fn ln_64(x_: f64) f64 {
const t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
const t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
const R = t2 + t1;
- const dk = f64(k);
+ const dk = @intToFloat(f64, k);
return s * (hfsq + R) + dk * ln2_lo - hfsq + f + dk * ln2_hi;
}
diff --git a/std/math/log.zig b/std/math/log.zig
index 1cba1138db..20b6d055e8 100644
--- a/std/math/log.zig
+++ b/std/math/log.zig
@@ -9,26 +9,27 @@ pub fn log(comptime T: type, base: T, x: T) T {
return math.log2(x);
} else if (base == 10) {
return math.log10(x);
- } else if ((@typeId(T) == TypeId.Float or @typeId(T) == TypeId.FloatLiteral) and base == math.e) {
+ } else if ((@typeId(T) == TypeId.Float or @typeId(T) == TypeId.ComptimeFloat) and base == math.e) {
return math.ln(x);
}
+ const float_base = math.lossyCast(f64, base);
switch (@typeId(T)) {
- TypeId.FloatLiteral => {
- return @typeOf(1.0)(math.ln(f64(x)) / math.ln(f64(base)));
+ TypeId.ComptimeFloat => {
+ return @typeOf(1.0)(math.ln(f64(x)) / math.ln(float_base));
},
- TypeId.IntLiteral => {
- return @typeOf(1)(math.floor(math.ln(f64(x)) / math.ln(f64(base))));
+ TypeId.ComptimeInt => {
+ return @typeOf(1)(math.floor(math.ln(f64(x)) / math.ln(float_base)));
},
builtin.TypeId.Int => {
// TODO implement integer log without using float math
- return T(math.floor(math.ln(f64(x)) / math.ln(f64(base))));
+ return @floatToInt(T, math.floor(math.ln(@intToFloat(f64, x)) / math.ln(float_base)));
},
builtin.TypeId.Float => {
switch (T) {
- f32 => return f32(math.ln(f64(x)) / math.ln(f64(base))),
- f64 => return math.ln(x) / math.ln(f64(base)),
+ f32 => return @floatCast(f32, math.ln(f64(x)) / math.ln(float_base)),
+ f64 => return math.ln(x) / math.ln(float_base),
else => @compileError("log not implemented for " ++ @typeName(T)),
}
},
diff --git a/std/math/log10.zig b/std/math/log10.zig
index aa74caa901..a93ce48fb7 100644
--- a/std/math/log10.zig
+++ b/std/math/log10.zig
@@ -14,7 +14,7 @@ const TypeId = builtin.TypeId;
pub fn log10(x: var) @typeOf(x) {
const T = @typeOf(x);
switch (@typeId(T)) {
- TypeId.FloatLiteral => {
+ TypeId.ComptimeFloat => {
return @typeOf(1.0)(log10_64(x));
},
TypeId.Float => {
@@ -24,21 +24,21 @@ pub fn log10(x: var) @typeOf(x) {
else => @compileError("log10 not implemented for " ++ @typeName(T)),
};
},
- TypeId.IntLiteral => {
+ TypeId.ComptimeInt => {
return @typeOf(1)(math.floor(log10_64(f64(x))));
},
TypeId.Int => {
- return T(math.floor(log10_64(f64(x))));
+ return @floatToInt(T, math.floor(log10_64(@intToFloat(f64, x))));
},
else => @compileError("log10 not implemented for " ++ @typeName(T)),
}
}
pub fn log10_32(x_: f32) f32 {
- const ivln10hi: f32 = 4.3432617188e-01;
- const ivln10lo: f32 = -3.1689971365e-05;
- const log10_2hi: f32 = 3.0102920532e-01;
- const log10_2lo: f32 = 7.9034151668e-07;
+ const ivln10hi: f32 = 4.3432617188e-01;
+ const ivln10lo: f32 = -3.1689971365e-05;
+ const log10_2hi: f32 = 3.0102920532e-01;
+ const log10_2lo: f32 = 7.9034151668e-07;
const Lg1: f32 = 0xaaaaaa.0p-24;
const Lg2: f32 = 0xccce13.0p-25;
const Lg3: f32 = 0x91e9ee.0p-25;
@@ -71,7 +71,7 @@ pub fn log10_32(x_: f32) f32 {
// x into [sqrt(2) / 2, sqrt(2)]
ix += 0x3F800000 - 0x3F3504F3;
- k += i32(ix >> 23) - 0x7F;
+ k += @intCast(i32, ix >> 23) - 0x7F;
ix = (ix & 0x007FFFFF) + 0x3F3504F3;
x = @bitCast(f32, ix);
@@ -89,14 +89,14 @@ pub fn log10_32(x_: f32) f32 {
u &= 0xFFFFF000;
hi = @bitCast(f32, u);
const lo = f - hi - hfsq + s * (hfsq + R);
- const dk = f32(k);
+ const dk = @intToFloat(f32, k);
return dk * log10_2lo + (lo + hi) * ivln10lo + lo * ivln10hi + hi * ivln10hi + dk * log10_2hi;
}
pub fn log10_64(x_: f64) f64 {
- const ivln10hi: f64 = 4.34294481878168880939e-01;
- const ivln10lo: f64 = 2.50829467116452752298e-11;
+ const ivln10hi: f64 = 4.34294481878168880939e-01;
+ const ivln10lo: f64 = 2.50829467116452752298e-11;
const log10_2hi: f64 = 3.01029995663611771306e-01;
const log10_2lo: f64 = 3.69423907715893078616e-13;
const Lg1: f64 = 6.666666666666735130e-01;
@@ -109,7 +109,7 @@ pub fn log10_64(x_: f64) f64 {
var x = x_;
var ix = @bitCast(u64, x);
- var hx = u32(ix >> 32);
+ var hx = @intCast(u32, ix >> 32);
var k: i32 = 0;
if (hx < 0x00100000 or hx >> 31 != 0) {
@@ -125,18 +125,16 @@ pub fn log10_64(x_: f64) f64 {
// subnormal, scale x
k -= 54;
x *= 0x1.0p54;
- hx = u32(@bitCast(u64, x) >> 32);
- }
- else if (hx >= 0x7FF00000) {
+ hx = @intCast(u32, @bitCast(u64, x) >> 32);
+ } else if (hx >= 0x7FF00000) {
return x;
- }
- else if (hx == 0x3FF00000 and ix << 32 == 0) {
+ } else if (hx == 0x3FF00000 and ix << 32 == 0) {
return 0;
}
// x into [sqrt(2) / 2, sqrt(2)]
hx += 0x3FF00000 - 0x3FE6A09E;
- k += i32(hx >> 20) - 0x3FF;
+ k += @intCast(i32, hx >> 20) - 0x3FF;
hx = (hx & 0x000FFFFF) + 0x3FE6A09E;
ix = (u64(hx) << 32) | (ix & 0xFFFFFFFF);
x = @bitCast(f64, ix);
@@ -159,7 +157,7 @@ pub fn log10_64(x_: f64) f64 {
// val_hi + val_lo ~ log10(1 + f) + k * log10(2)
var val_hi = hi * ivln10hi;
- const dk = f64(k);
+ const dk = @intToFloat(f64, k);
const y = dk * log10_2hi;
var val_lo = dk * log10_2lo + (lo + hi) * ivln10lo + lo * ivln10hi;
diff --git a/std/math/log1p.zig b/std/math/log1p.zig
index 4616a2f2ba..7d3be6bb49 100644
--- a/std/math/log1p.zig
+++ b/std/math/log1p.zig
@@ -68,7 +68,7 @@ fn log1p_32(x: f32) f32 {
const uf = 1 + x;
var iu = @bitCast(u32, uf);
iu += 0x3F800000 - 0x3F3504F3;
- k = i32(iu >> 23) - 0x7F;
+ k = @intCast(i32, iu >> 23) - 0x7F;
// correction to avoid underflow in c / u
if (k < 25) {
@@ -90,7 +90,7 @@ fn log1p_32(x: f32) f32 {
const t2 = z * (Lg1 + w * Lg3);
const R = t2 + t1;
const hfsq = 0.5 * f * f;
- const dk = f32(k);
+ const dk = @intToFloat(f32, k);
return s * (hfsq + R) + (dk * ln2_lo + c) - hfsq + f + dk * ln2_hi;
}
@@ -107,7 +107,7 @@ fn log1p_64(x: f64) f64 {
const Lg7: f64 = 1.479819860511658591e-01;
var ix = @bitCast(u64, x);
- var hx = u32(ix >> 32);
+ var hx = @intCast(u32, ix >> 32);
var k: i32 = 1;
var c: f64 = undefined;
var f: f64 = undefined;
@@ -138,17 +138,16 @@ fn log1p_64(x: f64) f64 {
c = 0;
f = x;
}
- }
- else if (hx >= 0x7FF00000) {
+ } else if (hx >= 0x7FF00000) {
return x;
}
if (k != 0) {
const uf = 1 + x;
const hu = @bitCast(u64, uf);
- var iu = u32(hu >> 32);
+ var iu = @intCast(u32, hu >> 32);
iu += 0x3FF00000 - 0x3FE6A09E;
- k = i32(iu >> 20) - 0x3FF;
+ k = @intCast(i32, iu >> 20) - 0x3FF;
// correction to avoid underflow in c / u
if (k < 54) {
@@ -171,7 +170,7 @@ fn log1p_64(x: f64) f64 {
const t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
const t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
const R = t2 + t1;
- const dk = f64(k);
+ const dk = @intToFloat(f64, k);
return s * (hfsq + R) + (dk * ln2_lo + c) - hfsq + f + dk * ln2_hi;
}
diff --git a/std/math/log2.zig b/std/math/log2.zig
index 998d6d6c5e..858f6ffa02 100644
--- a/std/math/log2.zig
+++ b/std/math/log2.zig
@@ -14,7 +14,7 @@ const TypeId = builtin.TypeId;
pub fn log2(x: var) @typeOf(x) {
const T = @typeOf(x);
switch (@typeId(T)) {
- TypeId.FloatLiteral => {
+ TypeId.ComptimeFloat => {
return @typeOf(1.0)(log2_64(x));
},
TypeId.Float => {
@@ -24,26 +24,24 @@ pub fn log2(x: var) @typeOf(x) {
else => @compileError("log2 not implemented for " ++ @typeName(T)),
};
},
- TypeId.IntLiteral => comptime {
+ TypeId.ComptimeInt => comptime {
var result = 0;
var x_shifted = x;
- while (b: {x_shifted >>= 1; break :b x_shifted != 0;}) : (result += 1) {}
+ while (b: {
+ x_shifted >>= 1;
+ break :b x_shifted != 0;
+ }) : (result += 1) {}
return result;
},
TypeId.Int => {
- return log2_int(T, x);
+ return math.log2_int(T, x);
},
else => @compileError("log2 not implemented for " ++ @typeName(T)),
}
}
-pub fn log2_int(comptime T: type, x: T) T {
- assert(x != 0);
- return T.bit_count - 1 - T(@clz(x));
-}
-
pub fn log2_32(x_: f32) f32 {
- const ivln2hi: f32 = 1.4428710938e+00;
+ const ivln2hi: f32 = 1.4428710938e+00;
const ivln2lo: f32 = -1.7605285393e-04;
const Lg1: f32 = 0xaaaaaa.0p-24;
const Lg2: f32 = 0xccce13.0p-25;
@@ -77,7 +75,7 @@ pub fn log2_32(x_: f32) f32 {
// x into [sqrt(2) / 2, sqrt(2)]
ix += 0x3F800000 - 0x3F3504F3;
- k += i32(ix >> 23) - 0x7F;
+ k += @intCast(i32, ix >> 23) - 0x7F;
ix = (ix & 0x007FFFFF) + 0x3F3504F3;
x = @bitCast(f32, ix);
@@ -95,7 +93,7 @@ pub fn log2_32(x_: f32) f32 {
u &= 0xFFFFF000;
hi = @bitCast(f32, u);
const lo = f - hi - hfsq + s * (hfsq + R);
- return (lo + hi) * ivln2lo + lo * ivln2hi + hi * ivln2hi + f32(k);
+ return (lo + hi) * ivln2lo + lo * ivln2hi + hi * ivln2hi + @intToFloat(f32, k);
}
pub fn log2_64(x_: f64) f64 {
@@ -111,7 +109,7 @@ pub fn log2_64(x_: f64) f64 {
var x = x_;
var ix = @bitCast(u64, x);
- var hx = u32(ix >> 32);
+ var hx = @intCast(u32, ix >> 32);
var k: i32 = 0;
if (hx < 0x00100000 or hx >> 31 != 0) {
@@ -127,7 +125,7 @@ pub fn log2_64(x_: f64) f64 {
// subnormal, scale x
k -= 54;
x *= 0x1.0p54;
- hx = u32(@bitCast(u64, x) >> 32);
+ hx = @intCast(u32, @bitCast(u64, x) >> 32);
} else if (hx >= 0x7FF00000) {
return x;
} else if (hx == 0x3FF00000 and ix << 32 == 0) {
@@ -136,7 +134,7 @@ pub fn log2_64(x_: f64) f64 {
// x into [sqrt(2) / 2, sqrt(2)]
hx += 0x3FF00000 - 0x3FE6A09E;
- k += i32(hx >> 20) - 0x3FF;
+ k += @intCast(i32, hx >> 20) - 0x3FF;
hx = (hx & 0x000FFFFF) + 0x3FE6A09E;
ix = (u64(hx) << 32) | (ix & 0xFFFFFFFF);
x = @bitCast(f64, ix);
@@ -161,7 +159,7 @@ pub fn log2_64(x_: f64) f64 {
var val_lo = (lo + hi) * ivln2lo + lo * ivln2hi;
// spadd(val_hi, val_lo, y)
- const y = f64(k);
+ const y = @intToFloat(f64, k);
const ww = y + val_hi;
val_lo += (y - ww) + val_hi;
val_hi = ww;
diff --git a/std/math/modf.zig b/std/math/modf.zig
index a6606ce34c..b6dd78f022 100644
--- a/std/math/modf.zig
+++ b/std/math/modf.zig
@@ -29,7 +29,7 @@ fn modf32(x: f32) modf32_result {
var result: modf32_result = undefined;
const u = @bitCast(u32, x);
- const e = i32((u >> 23) & 0xFF) - 0x7F;
+ const e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F;
const us = u & 0x80000000;
// TODO: Shouldn't need this.
@@ -57,7 +57,7 @@ fn modf32(x: f32) modf32_result {
return result;
}
- const mask = u32(0x007FFFFF) >> u5(e);
+ const mask = u32(0x007FFFFF) >> @intCast(u5, e);
if (u & mask == 0) {
result.ipart = x;
result.fpart = @bitCast(f32, us);
@@ -74,7 +74,7 @@ fn modf64(x: f64) modf64_result {
var result: modf64_result = undefined;
const u = @bitCast(u64, x);
- const e = i32((u >> 52) & 0x7FF) - 0x3FF;
+ const e = @intCast(i32, (u >> 52) & 0x7FF) - 0x3FF;
const us = u & (1 << 63);
if (math.isInf(x)) {
@@ -101,7 +101,7 @@ fn modf64(x: f64) modf64_result {
return result;
}
- const mask = u64(@maxValue(u64) >> 12) >> u6(e);
+ const mask = u64(@maxValue(u64) >> 12) >> @intCast(u6, e);
if (u & mask == 0) {
result.ipart = x;
result.fpart = @bitCast(f64, us);
diff --git a/std/math/nan.zig b/std/math/nan.zig
index 22461711d0..2cbcbee81b 100644
--- a/std/math/nan.zig
+++ b/std/math/nan.zig
@@ -2,6 +2,7 @@ const math = @import("index.zig");
pub fn nan(comptime T: type) T {
return switch (T) {
+ f16 => @bitCast(f16, math.nan_u16),
f32 => @bitCast(f32, math.nan_u32),
f64 => @bitCast(f64, math.nan_u64),
else => @compileError("nan not implemented for " ++ @typeName(T)),
@@ -12,6 +13,7 @@ pub fn nan(comptime T: type) T {
// representation in the future when required.
pub fn snan(comptime T: type) T {
return switch (T) {
+ f16 => @bitCast(f16, math.nan_u16),
f32 => @bitCast(f32, math.nan_u32),
f64 => @bitCast(f64, math.nan_u64),
else => @compileError("snan not implemented for " ++ @typeName(T)),
diff --git a/std/math/pow.zig b/std/math/pow.zig
index 51908f30ea..7fc334c06b 100644
--- a/std/math/pow.zig
+++ b/std/math/pow.zig
@@ -28,7 +28,6 @@ const assert = std.debug.assert;
// This implementation is taken from the go stlib, musl is a bit more complex.
pub fn pow(comptime T: type, x: T, y: T) T {
-
@setFloatMode(this, @import("builtin").FloatMode.Strict);
if (T != f32 and T != f64) {
@@ -147,7 +146,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
var xe = r2.exponent;
var x1 = r2.significand;
- var i = i32(yi);
+ var i = @floatToInt(i32, yi);
while (i != 0) : (i >>= 1) {
if (i & 1 == 1) {
a1 *= x1;
@@ -172,7 +171,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
fn isOddInteger(x: f64) bool {
const r = math.modf(x);
- return r.fpart == 0.0 and i64(r.ipart) & 1 == 1;
+ return r.fpart == 0.0 and @floatToInt(i64, r.ipart) & 1 == 1;
}
test "math.pow" {
diff --git a/std/math/round.zig b/std/math/round.zig
index c16190da21..c8d9eb4fd4 100644
--- a/std/math/round.zig
+++ b/std/math/round.zig
@@ -24,13 +24,13 @@ fn round32(x_: f32) f32 {
const e = (u >> 23) & 0xFF;
var y: f32 = undefined;
- if (e >= 0x7F+23) {
+ if (e >= 0x7F + 23) {
return x;
}
if (u >> 31 != 0) {
x = -x;
}
- if (e < 0x7F-1) {
+ if (e < 0x7F - 1) {
math.forceEval(x + math.f32_toint);
return 0 * @bitCast(f32, u);
}
@@ -61,13 +61,13 @@ fn round64(x_: f64) f64 {
const e = (u >> 52) & 0x7FF;
var y: f64 = undefined;
- if (e >= 0x3FF+52) {
+ if (e >= 0x3FF + 52) {
return x;
}
if (u >> 63 != 0) {
x = -x;
}
- if (e < 0x3ff-1) {
+ if (e < 0x3ff - 1) {
math.forceEval(x + math.f64_toint);
return 0 * @bitCast(f64, u);
}
diff --git a/std/math/scalbn.zig b/std/math/scalbn.zig
index deb5d989d2..f72c7e866f 100644
--- a/std/math/scalbn.zig
+++ b/std/math/scalbn.zig
@@ -37,7 +37,7 @@ fn scalbn32(x: f32, n_: i32) f32 {
}
}
- const u = u32(n +% 0x7F) << 23;
+ const u = @intCast(u32, n +% 0x7F) << 23;
return y * @bitCast(f32, u);
}
@@ -67,7 +67,7 @@ fn scalbn64(x: f64, n_: i32) f64 {
}
}
- const u = u64(n +% 0x3FF) << 52;
+ const u = @intCast(u64, n +% 0x3FF) << 52;
return y * @bitCast(f64, u);
}
diff --git a/std/math/signbit.zig b/std/math/signbit.zig
index a0191bed5c..8c6829dfcd 100644
--- a/std/math/signbit.zig
+++ b/std/math/signbit.zig
@@ -5,12 +5,18 @@ const assert = std.debug.assert;
pub fn signbit(x: var) bool {
const T = @typeOf(x);
return switch (T) {
+ f16 => signbit16(x),
f32 => signbit32(x),
f64 => signbit64(x),
else => @compileError("signbit not implemented for " ++ @typeName(T)),
};
}
+fn signbit16(x: f16) bool {
+ const bits = @bitCast(u16, x);
+ return bits >> 15 != 0;
+}
+
fn signbit32(x: f32) bool {
const bits = @bitCast(u32, x);
return bits >> 31 != 0;
@@ -22,10 +28,16 @@ fn signbit64(x: f64) bool {
}
test "math.signbit" {
+ assert(signbit(f16(4.0)) == signbit16(4.0));
assert(signbit(f32(4.0)) == signbit32(4.0));
assert(signbit(f64(4.0)) == signbit64(4.0));
}
+test "math.signbit16" {
+ assert(!signbit16(4.0));
+ assert(signbit16(-3.0));
+}
+
test "math.signbit32" {
assert(!signbit32(4.0));
assert(signbit32(-3.0));
diff --git a/std/math/sin.zig b/std/math/sin.zig
index 5dd869545b..3796d74812 100644
--- a/std/math/sin.zig
+++ b/std/math/sin.zig
@@ -19,20 +19,20 @@ pub fn sin(x: var) @typeOf(x) {
}
// sin polynomial coefficients
-const S0 = 1.58962301576546568060E-10;
+const S0 = 1.58962301576546568060E-10;
const S1 = -2.50507477628578072866E-8;
-const S2 = 2.75573136213857245213E-6;
+const S2 = 2.75573136213857245213E-6;
const S3 = -1.98412698295895385996E-4;
-const S4 = 8.33333333332211858878E-3;
+const S4 = 8.33333333332211858878E-3;
const S5 = -1.66666666666666307295E-1;
// cos polynomial coeffiecients
const C0 = -1.13585365213876817300E-11;
-const C1 = 2.08757008419747316778E-9;
+const C1 = 2.08757008419747316778E-9;
const C2 = -2.75573141792967388112E-7;
-const C3 = 2.48015872888517045348E-5;
+const C3 = 2.48015872888517045348E-5;
const C4 = -1.38888888888730564116E-3;
-const C5 = 4.16666666666665929218E-2;
+const C5 = 4.16666666666665929218E-2;
// NOTE: This is taken from the go stdlib. The musl implementation is much more complex.
//
@@ -60,7 +60,7 @@ fn sin32(x_: f32) f32 {
}
var y = math.floor(x * m4pi);
- var j = i64(y);
+ var j = @floatToInt(i64, y);
if (j & 1 == 1) {
j += 1;
@@ -112,7 +112,7 @@ fn sin64(x_: f64) f64 {
}
var y = math.floor(x * m4pi);
- var j = i64(y);
+ var j = @floatToInt(i64, y);
if (j & 1 == 1) {
j += 1;
diff --git a/std/math/sinh.zig b/std/math/sinh.zig
index 85c9ae979b..bb3af280ab 100644
--- a/std/math/sinh.zig
+++ b/std/math/sinh.zig
@@ -57,7 +57,7 @@ fn sinh64(x: f64) f64 {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const u = @bitCast(u64, x);
- const w = u32(u >> 32);
+ const w = @intCast(u32, u >> 32);
const ax = @bitCast(f64, u & (@maxValue(u64) >> 1));
if (x == 0.0 or math.isNan(x)) {
diff --git a/std/math/sqrt.zig b/std/math/sqrt.zig
index 690f8b6901..e12ecf9683 100644
--- a/std/math/sqrt.zig
+++ b/std/math/sqrt.zig
@@ -14,27 +14,9 @@ const TypeId = builtin.TypeId;
pub fn sqrt(x: var) (if (@typeId(@typeOf(x)) == TypeId.Int) @IntType(false, @typeOf(x).bit_count / 2) else @typeOf(x)) {
const T = @typeOf(x);
switch (@typeId(T)) {
- TypeId.FloatLiteral => {
- return T(sqrt64(x));
- },
- TypeId.Float => {
- switch (T) {
- f32 => {
- switch (builtin.arch) {
- builtin.Arch.x86_64 => return @import("x86_64/sqrt.zig").sqrt32(x),
- else => return sqrt32(x),
- }
- },
- f64 => {
- switch (builtin.arch) {
- builtin.Arch.x86_64 => return @import("x86_64/sqrt.zig").sqrt64(x),
- else => return sqrt64(x),
- }
- },
- else => @compileError("sqrt not implemented for " ++ @typeName(T)),
- }
- },
- TypeId.IntLiteral => comptime {
+ TypeId.ComptimeFloat => return T(@sqrt(f64, x)), // TODO upgrade to f128
+ TypeId.Float => return @sqrt(T, x),
+ TypeId.ComptimeInt => comptime {
if (x > @maxValue(u128)) {
@compileError("sqrt not implemented for comptime_int greater than 128 bits");
}
@@ -43,269 +25,81 @@ pub fn sqrt(x: var) (if (@typeId(@typeOf(x)) == TypeId.Int) @IntType(false, @typ
}
return T(sqrt_int(u128, x));
},
- TypeId.Int => {
- return sqrt_int(T, x);
- },
+ TypeId.Int => return sqrt_int(T, x),
else => @compileError("sqrt not implemented for " ++ @typeName(T)),
}
}
-fn sqrt32(x: f32) f32 {
- const tiny: f32 = 1.0e-30;
- const sign: i32 = @bitCast(i32, u32(0x80000000));
- var ix: i32 = @bitCast(i32, x);
-
- if ((ix & 0x7F800000) == 0x7F800000) {
- return x * x + x; // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = snan
- }
-
- // zero
- if (ix <= 0) {
- if (ix & ~sign == 0) {
- return x; // sqrt (+-0) = +-0
- }
- if (ix < 0) {
- return math.snan(f32);
- }
- }
-
- // normalize
- var m = ix >> 23;
- if (m == 0) {
- // subnormal
- var i: i32 = 0;
- while (ix & 0x00800000 == 0) : (i += 1) {
- ix <<= 1;
- }
- m -= i - 1;
- }
-
- m -= 127; // unbias exponent
- ix = (ix & 0x007FFFFF) | 0x00800000;
-
- if (m & 1 != 0) { // odd m, double x to even
- ix += ix;
- }
-
- m >>= 1; // m = [m / 2]
-
- // sqrt(x) bit by bit
- ix += ix;
- var q: i32 = 0; // q = sqrt(x)
- var s: i32 = 0;
- var r: i32 = 0x01000000; // r = moving bit right -> left
-
- while (r != 0) {
- const t = s + r;
- if (t <= ix) {
- s = t + r;
- ix -= t;
- q += r;
- }
- ix += ix;
- r >>= 1;
- }
-
- // floating add to find rounding direction
- if (ix != 0) {
- var z = 1.0 - tiny; // inexact
- if (z >= 1.0) {
- z = 1.0 + tiny;
- if (z > 1.0) {
- q += 2;
- } else {
- if (q & 1 != 0) {
- q += 1;
- }
- }
- }
- }
-
- ix = (q >> 1) + 0x3f000000;
- ix += m << 23;
- return @bitCast(f32, ix);
-}
-
-// NOTE: The original code is full of implicit signed -> unsigned assumptions and u32 wraparound
-// behaviour. Most intermediate i32 values are changed to u32 where appropriate but there are
-// potentially some edge cases remaining that are not handled in the same way.
-fn sqrt64(x: f64) f64 {
- const tiny: f64 = 1.0e-300;
- const sign: u32 = 0x80000000;
- const u = @bitCast(u64, x);
-
- var ix0 = u32(u >> 32);
- var ix1 = u32(u & 0xFFFFFFFF);
-
- // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = nan
- if (ix0 & 0x7FF00000 == 0x7FF00000) {
- return x * x + x;
- }
-
- // sqrt(+-0) = +-0
- if (x == 0.0) {
- return x;
- }
- // sqrt(-ve) = snan
- if (ix0 & sign != 0) {
- return math.snan(f64);
- }
-
- // normalize x
- var m = i32(ix0 >> 20);
- if (m == 0) {
- // subnormal
- while (ix0 == 0) {
- m -= 21;
- ix0 |= ix1 >> 11;
- ix1 <<= 21;
- }
-
- // subnormal
- var i: u32 = 0;
- while (ix0 & 0x00100000 == 0) : (i += 1) {
- ix0 <<= 1;
- }
- m -= i32(i) - 1;
- ix0 |= ix1 >> u5(32 - i);
- ix1 <<= u5(i);
- }
-
- // unbias exponent
- m -= 1023;
- ix0 = (ix0 & 0x000FFFFF) | 0x00100000;
- if (m & 1 != 0) {
- ix0 += ix0 + (ix1 >> 31);
- ix1 = ix1 +% ix1;
- }
- m >>= 1;
-
- // sqrt(x) bit by bit
- ix0 += ix0 + (ix1 >> 31);
- ix1 = ix1 +% ix1;
-
- var q: u32 = 0;
- var q1: u32 = 0;
- var s0: u32 = 0;
- var s1: u32 = 0;
- var r: u32 = 0x00200000;
- var t: u32 = undefined;
- var t1: u32 = undefined;
-
- while (r != 0) {
- t = s0 +% r;
- if (t <= ix0) {
- s0 = t + r;
- ix0 -= t;
- q += r;
- }
- ix0 = ix0 +% ix0 +% (ix1 >> 31);
- ix1 = ix1 +% ix1;
- r >>= 1;
- }
-
- r = sign;
- while (r != 0) {
- t = s1 +% r;
- t = s0;
- if (t < ix0 or (t == ix0 and t1 <= ix1)) {
- s1 = t1 +% r;
- if (t1 & sign == sign and s1 & sign == 0) {
- s0 += 1;
- }
- ix0 -= t;
- if (ix1 < t1) {
- ix0 -= 1;
- }
- ix1 = ix1 -% t1;
- q1 += r;
- }
- ix0 = ix0 +% ix0 +% (ix1 >> 31);
- ix1 = ix1 +% ix1;
- r >>= 1;
- }
-
- // rounding direction
- if (ix0 | ix1 != 0) {
- var z = 1.0 - tiny; // raise inexact
- if (z >= 1.0) {
- z = 1.0 + tiny;
- if (q1 == 0xFFFFFFFF) {
- q1 = 0;
- q += 1;
- } else if (z > 1.0) {
- if (q1 == 0xFFFFFFFE) {
- q += 1;
- }
- q1 += 2;
- } else {
- q1 += q1 & 1;
- }
- }
- }
-
- ix0 = (q >> 1) + 0x3FE00000;
- ix1 = q1 >> 1;
- if (q & 1 != 0) {
- ix1 |= 0x80000000;
- }
-
- // NOTE: musl here appears to rely on signed twos-complement wraparound. +% has the same
- // behaviour at least.
- var iix0 = i32(ix0);
- iix0 = iix0 +% (m << 20);
-
- const uz = (u64(iix0) << 32) | ix1;
- return @bitCast(f64, uz);
-}
-
test "math.sqrt" {
- assert(sqrt(f32(0.0)) == sqrt32(0.0));
- assert(sqrt(f64(0.0)) == sqrt64(0.0));
+ assert(sqrt(f16(0.0)) == @sqrt(f16, 0.0));
+ assert(sqrt(f32(0.0)) == @sqrt(f32, 0.0));
+ assert(sqrt(f64(0.0)) == @sqrt(f64, 0.0));
+}
+
+test "math.sqrt16" {
+ const epsilon = 0.000001;
+
+ assert(@sqrt(f16, 0.0) == 0.0);
+ assert(math.approxEq(f16, @sqrt(f16, 2.0), 1.414214, epsilon));
+ assert(math.approxEq(f16, @sqrt(f16, 3.6), 1.897367, epsilon));
+ assert(@sqrt(f16, 4.0) == 2.0);
+ assert(math.approxEq(f16, @sqrt(f16, 7.539840), 2.745877, epsilon));
+ assert(math.approxEq(f16, @sqrt(f16, 19.230934), 4.385309, epsilon));
+ assert(@sqrt(f16, 64.0) == 8.0);
+ assert(math.approxEq(f16, @sqrt(f16, 64.1), 8.006248, epsilon));
+ assert(math.approxEq(f16, @sqrt(f16, 8942.230469), 94.563370, epsilon));
}
test "math.sqrt32" {
const epsilon = 0.000001;
- assert(sqrt32(0.0) == 0.0);
- assert(math.approxEq(f32, sqrt32(2.0), 1.414214, epsilon));
- assert(math.approxEq(f32, sqrt32(3.6), 1.897367, epsilon));
- assert(sqrt32(4.0) == 2.0);
- assert(math.approxEq(f32, sqrt32(7.539840), 2.745877, epsilon));
- assert(math.approxEq(f32, sqrt32(19.230934), 4.385309, epsilon));
- assert(sqrt32(64.0) == 8.0);
- assert(math.approxEq(f32, sqrt32(64.1), 8.006248, epsilon));
- assert(math.approxEq(f32, sqrt32(8942.230469), 94.563370, epsilon));
+ assert(@sqrt(f32, 0.0) == 0.0);
+ assert(math.approxEq(f32, @sqrt(f32, 2.0), 1.414214, epsilon));
+ assert(math.approxEq(f32, @sqrt(f32, 3.6), 1.897367, epsilon));
+ assert(@sqrt(f32, 4.0) == 2.0);
+ assert(math.approxEq(f32, @sqrt(f32, 7.539840), 2.745877, epsilon));
+ assert(math.approxEq(f32, @sqrt(f32, 19.230934), 4.385309, epsilon));
+ assert(@sqrt(f32, 64.0) == 8.0);
+ assert(math.approxEq(f32, @sqrt(f32, 64.1), 8.006248, epsilon));
+ assert(math.approxEq(f32, @sqrt(f32, 8942.230469), 94.563370, epsilon));
}
test "math.sqrt64" {
const epsilon = 0.000001;
- assert(sqrt64(0.0) == 0.0);
- assert(math.approxEq(f64, sqrt64(2.0), 1.414214, epsilon));
- assert(math.approxEq(f64, sqrt64(3.6), 1.897367, epsilon));
- assert(sqrt64(4.0) == 2.0);
- assert(math.approxEq(f64, sqrt64(7.539840), 2.745877, epsilon));
- assert(math.approxEq(f64, sqrt64(19.230934), 4.385309, epsilon));
- assert(sqrt64(64.0) == 8.0);
- assert(math.approxEq(f64, sqrt64(64.1), 8.006248, epsilon));
- assert(math.approxEq(f64, sqrt64(8942.230469), 94.563367, epsilon));
+ assert(@sqrt(f64, 0.0) == 0.0);
+ assert(math.approxEq(f64, @sqrt(f64, 2.0), 1.414214, epsilon));
+ assert(math.approxEq(f64, @sqrt(f64, 3.6), 1.897367, epsilon));
+ assert(@sqrt(f64, 4.0) == 2.0);
+ assert(math.approxEq(f64, @sqrt(f64, 7.539840), 2.745877, epsilon));
+ assert(math.approxEq(f64, @sqrt(f64, 19.230934), 4.385309, epsilon));
+ assert(@sqrt(f64, 64.0) == 8.0);
+ assert(math.approxEq(f64, @sqrt(f64, 64.1), 8.006248, epsilon));
+ assert(math.approxEq(f64, @sqrt(f64, 8942.230469), 94.563367, epsilon));
+}
+
+test "math.sqrt16.special" {
+ assert(math.isPositiveInf(@sqrt(f16, math.inf(f16))));
+ assert(@sqrt(f16, 0.0) == 0.0);
+ assert(@sqrt(f16, -0.0) == -0.0);
+ assert(math.isNan(@sqrt(f16, -1.0)));
+ assert(math.isNan(@sqrt(f16, math.nan(f16))));
}
test "math.sqrt32.special" {
- assert(math.isPositiveInf(sqrt32(math.inf(f32))));
- assert(sqrt32(0.0) == 0.0);
- assert(sqrt32(-0.0) == -0.0);
- assert(math.isNan(sqrt32(-1.0)));
- assert(math.isNan(sqrt32(math.nan(f32))));
+ assert(math.isPositiveInf(@sqrt(f32, math.inf(f32))));
+ assert(@sqrt(f32, 0.0) == 0.0);
+ assert(@sqrt(f32, -0.0) == -0.0);
+ assert(math.isNan(@sqrt(f32, -1.0)));
+ assert(math.isNan(@sqrt(f32, math.nan(f32))));
}
test "math.sqrt64.special" {
- assert(math.isPositiveInf(sqrt64(math.inf(f64))));
- assert(sqrt64(0.0) == 0.0);
- assert(sqrt64(-0.0) == -0.0);
- assert(math.isNan(sqrt64(-1.0)));
- assert(math.isNan(sqrt64(math.nan(f64))));
+ assert(math.isPositiveInf(@sqrt(f64, math.inf(f64))));
+ assert(@sqrt(f64, 0.0) == 0.0);
+ assert(@sqrt(f64, -0.0) == -0.0);
+ assert(math.isNan(@sqrt(f64, -1.0)));
+ assert(math.isNan(@sqrt(f64, math.nan(f64))));
}
fn sqrt_int(comptime T: type, value: T) @IntType(false, T.bit_count / 2) {
@@ -328,7 +122,7 @@ fn sqrt_int(comptime T: type, value: T) @IntType(false, T.bit_count / 2) {
}
const ResultType = @IntType(false, T.bit_count / 2);
- return ResultType(res);
+ return @intCast(ResultType, res);
}
test "math.sqrt_int" {
diff --git a/std/math/tan.zig b/std/math/tan.zig
index 11428b6e8b..ff3ed06186 100644
--- a/std/math/tan.zig
+++ b/std/math/tan.zig
@@ -19,12 +19,12 @@ pub fn tan(x: var) @typeOf(x) {
}
const Tp0 = -1.30936939181383777646E4;
-const Tp1 = 1.15351664838587416140E6;
+const Tp1 = 1.15351664838587416140E6;
const Tp2 = -1.79565251976484877988E7;
-const Tq1 = 1.36812963470692954678E4;
+const Tq1 = 1.36812963470692954678E4;
const Tq2 = -1.32089234440210967447E6;
-const Tq3 = 2.50083801823357915839E7;
+const Tq3 = 2.50083801823357915839E7;
const Tq4 = -5.38695755929454629881E7;
// NOTE: This is taken from the go stdlib. The musl implementation is much more complex.
@@ -53,7 +53,7 @@ fn tan32(x_: f32) f32 {
}
var y = math.floor(x * m4pi);
- var j = i64(y);
+ var j = @floatToInt(i64, y);
if (j & 1 == 1) {
j += 1;
@@ -102,7 +102,7 @@ fn tan64(x_: f64) f64 {
}
var y = math.floor(x * m4pi);
- var j = i64(y);
+ var j = @floatToInt(i64, y);
if (j & 1 == 1) {
j += 1;
diff --git a/std/math/tanh.zig b/std/math/tanh.zig
index c1f5a0ca46..6204b2a374 100644
--- a/std/math/tanh.zig
+++ b/std/math/tanh.zig
@@ -68,7 +68,7 @@ fn tanh32(x: f32) f32 {
fn tanh64(x: f64) f64 {
const u = @bitCast(u64, x);
- const w = u32(u >> 32);
+ const w = @intCast(u32, u >> 32);
const ax = @bitCast(f64, u & (@maxValue(u64) >> 1));
var t: f64 = undefined;
@@ -100,7 +100,7 @@ fn tanh64(x: f64) f64 {
}
// |x| is subnormal
else {
- math.forceEval(f32(x));
+ math.forceEval(@floatCast(f32, x));
t = x;
}
diff --git a/std/math/trunc.zig b/std/math/trunc.zig
index 54aa6943f7..92d5bfebc5 100644
--- a/std/math/trunc.zig
+++ b/std/math/trunc.zig
@@ -19,7 +19,7 @@ pub fn trunc(x: var) @typeOf(x) {
fn trunc32(x: f32) f32 {
const u = @bitCast(u32, x);
- var e = i32(((u >> 23) & 0xFF)) - 0x7F + 9;
+ var e = @intCast(i32, ((u >> 23) & 0xFF)) - 0x7F + 9;
var m: u32 = undefined;
if (e >= 23 + 9) {
@@ -29,7 +29,7 @@ fn trunc32(x: f32) f32 {
e = 1;
}
- m = u32(@maxValue(u32)) >> u5(e);
+ m = u32(@maxValue(u32)) >> @intCast(u5, e);
if (u & m == 0) {
return x;
} else {
@@ -40,7 +40,7 @@ fn trunc32(x: f32) f32 {
fn trunc64(x: f64) f64 {
const u = @bitCast(u64, x);
- var e = i32(((u >> 52) & 0x7FF)) - 0x3FF + 12;
+ var e = @intCast(i32, ((u >> 52) & 0x7FF)) - 0x3FF + 12;
var m: u64 = undefined;
if (e >= 52 + 12) {
@@ -50,7 +50,7 @@ fn trunc64(x: f64) f64 {
e = 1;
}
- m = u64(@maxValue(u64)) >> u6(e);
+ m = u64(@maxValue(u64)) >> @intCast(u6, e);
if (u & m == 0) {
return x;
} else {
diff --git a/std/math/x86_64/sqrt.zig b/std/math/x86_64/sqrt.zig
deleted file mode 100644
index ad9ce0c96c..0000000000
--- a/std/math/x86_64/sqrt.zig
+++ /dev/null
@@ -1,15 +0,0 @@
-pub fn sqrt32(x: f32) f32 {
- return asm (
- \\sqrtss %%xmm0, %%xmm0
- : [ret] "={xmm0}" (-> f32)
- : [x] "{xmm0}" (x)
- );
-}
-
-pub fn sqrt64(x: f64) f64 {
- return asm (
- \\sqrtsd %%xmm0, %%xmm0
- : [ret] "={xmm0}" (-> f64)
- : [x] "{xmm0}" (x)
- );
-}
diff --git a/std/mem.zig b/std/mem.zig
index 8a59d6251b..43961a6d14 100644
--- a/std/mem.zig
+++ b/std/mem.zig
@@ -6,94 +6,111 @@ const builtin = @import("builtin");
const mem = this;
pub const Allocator = struct {
- const Error = error {OutOfMemory};
+ pub const Error = error{OutOfMemory};
/// Allocate byte_count bytes and return them in a slice, with the
/// slice's pointer aligned at least to alignment bytes.
/// The returned newly allocated memory is undefined.
- allocFn: fn (self: &Allocator, byte_count: usize, alignment: u29) Error![]u8,
+ /// `alignment` is guaranteed to be >= 1
+ /// `alignment` is guaranteed to be a power of 2
+ allocFn: fn (self: *Allocator, byte_count: usize, alignment: u29) Error![]u8,
/// If `new_byte_count > old_mem.len`:
/// * `old_mem.len` is the same as what was returned from allocFn or reallocFn.
/// * alignment >= alignment of old_mem.ptr
///
/// If `new_byte_count <= old_mem.len`:
- /// * this function must return successfully.
+ /// * this function must return successfully.
/// * alignment <= alignment of old_mem.ptr
///
- /// The returned newly allocated memory is undefined.
- reallocFn: fn (self: &Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
+ /// When `reallocFn` returns,
+ /// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same
+ /// as `old_mem` was when `reallocFn` is called. The bytes of
+ /// `return_value[old_mem.len..]` have undefined values.
+ /// `alignment` is guaranteed to be >= 1
+ /// `alignment` is guaranteed to be a power of 2
+ reallocFn: fn (self: *Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
/// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn`
- freeFn: fn (self: &Allocator, old_mem: []u8) void,
+ freeFn: fn (self: *Allocator, old_mem: []u8) void,
- fn create(self: &Allocator, comptime T: type) !&T {
+ /// Call `destroy` with the result
+ /// TODO this is deprecated. use createOne instead
+ pub fn create(self: *Allocator, init: var) Error!*@typeOf(init) {
+ const T = @typeOf(init);
+ if (@sizeOf(T) == 0) return &(T{});
+ const slice = try self.alloc(T, 1);
+ const ptr = &slice[0];
+ ptr.* = init;
+ return ptr;
+ }
+
+ /// Call `destroy` with the result.
+ /// Returns undefined memory.
+ pub fn createOne(self: *Allocator, comptime T: type) Error!*T {
+ if (@sizeOf(T) == 0) return &(T{});
const slice = try self.alloc(T, 1);
return &slice[0];
}
- fn destroy(self: &Allocator, ptr: var) void {
- self.free(ptr[0..1]);
+ /// `ptr` should be the return value of `create`
+ pub fn destroy(self: *Allocator, ptr: var) void {
+ const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
+ self.freeFn(self, non_const_ptr[0..@sizeOf(@typeOf(ptr).Child)]);
}
- fn alloc(self: &Allocator, comptime T: type, n: usize) ![]T {
+ pub fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T {
return self.alignedAlloc(T, @alignOf(T), n);
}
- fn alignedAlloc(self: &Allocator, comptime T: type, comptime alignment: u29,
- n: usize) ![]align(alignment) T
- {
+ pub fn alignedAlloc(self: *Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T {
if (n == 0) {
- return (&align(alignment) T)(undefined)[0..0];
+ return ([*]align(alignment) T)(undefined)[0..0];
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.allocFn(self, byte_count, alignment);
assert(byte_slice.len == byte_count);
- // This loop should get optimized out in ReleaseFast mode
+ // This loop gets optimized out in ReleaseFast mode
for (byte_slice) |*byte| {
- *byte = undefined;
+ byte.* = undefined;
}
- return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
+ return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
- fn realloc(self: &Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
+ pub fn realloc(self: *Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
return self.alignedRealloc(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
}
- fn alignedRealloc(self: &Allocator, comptime T: type, comptime alignment: u29,
- old_mem: []align(alignment) T, n: usize) ![]align(alignment) T
- {
+ pub fn alignedRealloc(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T {
if (old_mem.len == 0) {
- return self.alloc(T, n);
+ return self.alignedAlloc(T, alignment, n);
}
if (n == 0) {
self.free(old_mem);
- return (&align(alignment) T)(undefined)[0..0];
+ return ([*]align(alignment) T)(undefined)[0..0];
}
- const old_byte_slice = ([]u8)(old_mem);
+ const old_byte_slice = @sliceToBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.reallocFn(self, old_byte_slice, byte_count, alignment);
assert(byte_slice.len == byte_count);
if (n > old_mem.len) {
- // This loop should get optimized out in ReleaseFast mode
+ // This loop gets optimized out in ReleaseFast mode
for (byte_slice[old_byte_slice.len..]) |*byte| {
- *byte = undefined;
+ byte.* = undefined;
}
}
- return ([]T)(@alignCast(alignment, byte_slice));
+ return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
/// Reallocate, but `n` must be less than or equal to `old_mem.len`.
/// Unlike `realloc`, this function cannot fail.
/// Shrinking to 0 is the same as calling `free`.
- fn shrink(self: &Allocator, comptime T: type, old_mem: []T, n: usize) []T {
+ pub fn shrink(self: *Allocator, comptime T: type, old_mem: []T, n: usize) []T {
return self.alignedShrink(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
}
- fn alignedShrink(self: &Allocator, comptime T: type, comptime alignment: u29,
- old_mem: []align(alignment) T, n: usize) []align(alignment) T
- {
+ pub fn alignedShrink(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T {
if (n == 0) {
self.free(old_mem);
return old_mem[0..0];
@@ -105,33 +122,51 @@ pub const Allocator = struct {
// n <= old_mem.len and the multiplication didn't overflow for that operation.
const byte_count = @sizeOf(T) * n;
- const byte_slice = self.reallocFn(self, ([]u8)(old_mem), byte_count, alignment) catch unreachable;
+ const byte_slice = self.reallocFn(self, @sliceToBytes(old_mem), byte_count, alignment) catch unreachable;
assert(byte_slice.len == byte_count);
- return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
+ return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
- fn free(self: &Allocator, memory: var) void {
- const bytes = ([]const u8)(memory);
- if (bytes.len == 0)
- return;
- const non_const_ptr = @intToPtr(&u8, @ptrToInt(bytes.ptr));
+ pub fn free(self: *Allocator, memory: var) void {
+ const bytes = @sliceToBytes(memory);
+ if (bytes.len == 0) return;
+ const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
self.freeFn(self, non_const_ptr[0..bytes.len]);
}
};
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.
+/// dest.ptr must be <= src.ptr.
pub fn copy(comptime T: type, dest: []T, source: []const T) void {
// TODO instead of manually doing this check for the whole array
// and turning off runtime safety, the compiler should detect loops like
// this and automatically omit safety checks for loops
@setRuntimeSafety(false);
assert(dest.len >= source.len);
- for (source) |s, i| dest[i] = s;
+ for (source) |s, i|
+ dest[i] = s;
+}
+
+/// Copy all of source into dest at position 0.
+/// dest.len must be >= source.len.
+/// dest.ptr must be >= src.ptr.
+pub fn copyBackwards(comptime T: type, dest: []T, source: []const T) void {
+ // TODO instead of manually doing this check for the whole array
+ // and turning off runtime safety, the compiler should detect loops like
+ // this and automatically omit safety checks for loops
+ @setRuntimeSafety(false);
+ assert(dest.len >= source.len);
+ var i = source.len;
+ while (i > 0) {
+ i -= 1;
+ dest[i] = source[i];
+ }
}
pub fn set(comptime T: type, dest: []T, value: T) void {
- for (dest) |*d| *d = value;
+ for (dest) |*d|
+ d.* = value;
}
/// Returns true if lhs < rhs, false otherwise
@@ -163,13 +198,35 @@ pub fn eql(comptime T: type, a: []const T, b: []const T) bool {
return true;
}
+/// Returns true if all elements in a slice are equal to the scalar value provided
+pub fn allEqual(comptime T: type, slice: []const T, scalar: T) bool {
+ for (slice) |item| {
+ if (item != scalar) return false;
+ }
+ return true;
+}
+
/// Copies ::m to newly allocated memory. Caller is responsible to free it.
-pub fn dupe(allocator: &Allocator, comptime T: type, m: []const T) ![]T {
+pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
const new_buf = try allocator.alloc(T, m.len);
copy(T, new_buf, m);
return new_buf;
}
+/// Remove values from the beginning of a slice.
+pub fn trimLeft(comptime T: type, slice: []const T, values_to_strip: []const T) []const T {
+ var begin: usize = 0;
+ while (begin < slice.len and indexOfScalar(T, values_to_strip, slice[begin]) != null) : (begin += 1) {}
+ return slice[begin..];
+}
+
+/// Remove values from the end of a slice.
+pub fn trimRight(comptime T: type, slice: []const T, values_to_strip: []const T) []const T {
+ var end: usize = slice.len;
+ while (end > 0 and indexOfScalar(T, values_to_strip, slice[end - 1]) != null) : (end -= 1) {}
+ return slice[0..end];
+}
+
/// Remove values from the beginning and end of a slice.
pub fn trim(comptime T: type, slice: []const T, values_to_strip: []const T) []const T {
var begin: usize = 0;
@@ -180,6 +237,8 @@ pub fn trim(comptime T: type, slice: []const T, values_to_strip: []const T) []co
}
test "mem.trim" {
+ assert(eql(u8, trimLeft(u8, " foo\n ", " \n"), "foo\n "));
+ assert(eql(u8, trimRight(u8, " foo\n ", " \n"), " foo"));
assert(eql(u8, trim(u8, " foo\n ", " \n"), "foo"));
assert(eql(u8, trim(u8, "foo", " \n"), "foo"));
}
@@ -189,11 +248,20 @@ pub fn indexOfScalar(comptime T: type, slice: []const T, value: T) ?usize {
return indexOfScalarPos(T, slice, 0, value);
}
+/// Linear search for the last index of a scalar value inside a slice.
+pub fn lastIndexOfScalar(comptime T: type, slice: []const T, value: T) ?usize {
+ var i: usize = slice.len;
+ while (i != 0) {
+ i -= 1;
+ if (slice[i] == value) return i;
+ }
+ return null;
+}
+
pub fn indexOfScalarPos(comptime T: type, slice: []const T, start_index: usize, value: T) ?usize {
var i: usize = start_index;
while (i < slice.len) : (i += 1) {
- if (slice[i] == value)
- return i;
+ if (slice[i] == value) return i;
}
return null;
}
@@ -202,12 +270,22 @@ pub fn indexOfAny(comptime T: type, slice: []const T, values: []const T) ?usize
return indexOfAnyPos(T, slice, 0, values);
}
+pub fn lastIndexOfAny(comptime T: type, slice: []const T, values: []const T) ?usize {
+ var i: usize = slice.len;
+ while (i != 0) {
+ i -= 1;
+ for (values) |value| {
+ if (slice[i] == value) return i;
+ }
+ }
+ return null;
+}
+
pub fn indexOfAnyPos(comptime T: type, slice: []const T, start_index: usize, values: []const T) ?usize {
var i: usize = start_index;
while (i < slice.len) : (i += 1) {
for (values) |value| {
- if (slice[i] == value)
- return i;
+ if (slice[i] == value) return i;
}
}
return null;
@@ -217,25 +295,46 @@ pub fn indexOf(comptime T: type, haystack: []const T, needle: []const T) ?usize
return indexOfPos(T, haystack, 0, needle);
}
+/// Find the index in a slice of a sub-slice, searching from the end backwards.
+/// To start looking at a different index, slice the haystack first.
+/// TODO is there even a better algorithm for this?
+pub fn lastIndexOf(comptime T: type, haystack: []const T, needle: []const T) ?usize {
+ if (needle.len > haystack.len) return null;
+
+ var i: usize = haystack.len - needle.len;
+ while (true) : (i -= 1) {
+ if (mem.eql(T, haystack[i .. i + needle.len], needle)) return i;
+ if (i == 0) return null;
+ }
+}
+
// TODO boyer-moore algorithm
pub fn indexOfPos(comptime T: type, haystack: []const T, start_index: usize, needle: []const T) ?usize {
- if (needle.len > haystack.len)
- return null;
+ if (needle.len > haystack.len) return null;
var i: usize = start_index;
const end = haystack.len - needle.len;
while (i <= end) : (i += 1) {
- if (eql(T, haystack[i .. i + needle.len], needle))
- return i;
+ if (eql(T, haystack[i .. i + needle.len], needle)) return i;
}
return null;
}
test "mem.indexOf" {
- assert(??indexOf(u8, "one two three four", "four") == 14);
+ assert(indexOf(u8, "one two three four", "four").? == 14);
+ assert(lastIndexOf(u8, "one two three two four", "two").? == 14);
assert(indexOf(u8, "one two three four", "gour") == null);
- assert(??indexOf(u8, "foo", "foo") == 0);
+ assert(lastIndexOf(u8, "one two three four", "gour") == null);
+ assert(indexOf(u8, "foo", "foo").? == 0);
+ assert(lastIndexOf(u8, "foo", "foo").? == 0);
assert(indexOf(u8, "foo", "fool") == null);
+ assert(lastIndexOf(u8, "foo", "lfoo") == null);
+ assert(lastIndexOf(u8, "foo", "fool") == null);
+
+ assert(indexOf(u8, "foo foo", "foo").? == 0);
+ assert(lastIndexOf(u8, "foo foo", "foo").? == 4);
+ assert(lastIndexOfAny(u8, "boo, cat", "abo").? == 6);
+ assert(lastIndexOfScalar(u8, "boo", 'o').? == 2);
}
/// Reads an integer from memory with size equal to bytes.len.
@@ -256,7 +355,7 @@ pub fn readInt(bytes: []const u8, comptime T: type, endian: builtin.Endian) T {
builtin.Endian.Little => {
const ShiftType = math.Log2Int(T);
for (bytes) |b, index| {
- result = result | (T(b) << ShiftType(index * 8));
+ result = result | (T(b) << @intCast(ShiftType, index * 8));
}
},
}
@@ -271,9 +370,12 @@ pub fn readIntBE(comptime T: type, bytes: []const u8) T {
}
assert(bytes.len == @sizeOf(T));
var result: T = 0;
- {comptime var i = 0; inline while (i < @sizeOf(T)) : (i += 1) {
- result = (result << 8) | T(bytes[i]);
- }}
+ {
+ comptime var i = 0;
+ inline while (i < @sizeOf(T)) : (i += 1) {
+ result = (result << 8) | T(bytes[i]);
+ }
+ }
return result;
}
@@ -285,9 +387,12 @@ pub fn readIntLE(comptime T: type, bytes: []const u8) T {
}
assert(bytes.len == @sizeOf(T));
var result: T = 0;
- {comptime var i = 0; inline while (i < @sizeOf(T)) : (i += 1) {
- result |= T(bytes[i]) << i * 8;
- }}
+ {
+ comptime var i = 0;
+ inline while (i < @sizeOf(T)) : (i += 1) {
+ result |= T(bytes[i]) << i * 8;
+ }
+ }
return result;
}
@@ -309,7 +414,7 @@ pub fn writeInt(buf: []u8, value: var, endian: builtin.Endian) void {
},
builtin.Endian.Little => {
for (buf) |*b| {
- *b = @truncate(u8, bits);
+ b.* = @truncate(u8, bits);
bits >>= 8;
}
},
@@ -317,7 +422,6 @@ pub fn writeInt(buf: []u8, value: var, endian: builtin.Endian) void {
assert(bits == 0);
}
-
pub fn hash_slice_u8(k: []const u8) u32 {
// FNV 32-bit hash
var h: u32 = 2166136261;
@@ -336,7 +440,7 @@ pub fn eql_slice_u8(a: []const u8, b: []const u8) bool {
/// split(" abc def ghi ", " ")
/// Will return slices for "abc", "def", "ghi", null, in that order.
pub fn split(buffer: []const u8, split_bytes: []const u8) SplitIterator {
- return SplitIterator {
+ return SplitIterator{
.index = 0,
.buffer = buffer,
.split_bytes = split_bytes,
@@ -345,22 +449,36 @@ pub fn split(buffer: []const u8, split_bytes: []const u8) SplitIterator {
test "mem.split" {
var it = split(" abc def ghi ", " ");
- assert(eql(u8, ??it.next(), "abc"));
- assert(eql(u8, ??it.next(), "def"));
- assert(eql(u8, ??it.next(), "ghi"));
+ assert(eql(u8, it.next().?, "abc"));
+ assert(eql(u8, it.next().?, "def"));
+ assert(eql(u8, it.next().?, "ghi"));
assert(it.next() == null);
}
pub fn startsWith(comptime T: type, haystack: []const T, needle: []const T) bool {
- return if (needle.len > haystack.len) false else eql(T, haystack[0 .. needle.len], needle);
+ return if (needle.len > haystack.len) false else eql(T, haystack[0..needle.len], needle);
+}
+
+test "mem.startsWith" {
+ assert(startsWith(u8, "Bob", "Bo"));
+ assert(!startsWith(u8, "Needle in haystack", "haystack"));
+}
+
+pub fn endsWith(comptime T: type, haystack: []const T, needle: []const T) bool {
+ return if (needle.len > haystack.len) false else eql(T, haystack[haystack.len - needle.len ..], needle);
+}
+
+test "mem.endsWith" {
+ assert(endsWith(u8, "Needle in haystack", "haystack"));
+ assert(!endsWith(u8, "Bob", "Bo"));
}
pub const SplitIterator = struct {
buffer: []const u8,
- split_bytes: []const u8,
+ split_bytes: []const u8,
index: usize,
- pub fn next(self: &SplitIterator) ?[]const u8 {
+ pub fn next(self: *SplitIterator) ?[]const u8 {
// move to beginning of token
while (self.index < self.buffer.len and self.isSplitByte(self.buffer[self.index])) : (self.index += 1) {}
const start = self.index;
@@ -376,14 +494,14 @@ pub const SplitIterator = struct {
}
/// Returns a slice of the remaining bytes. Does not affect iterator state.
- pub fn rest(self: &const SplitIterator) []const u8 {
+ pub fn rest(self: *const SplitIterator) []const u8 {
// move to beginning of token
var index: usize = self.index;
while (index < self.buffer.len and self.isSplitByte(self.buffer[index])) : (index += 1) {}
return self.buffer[index..];
}
- fn isSplitByte(self: &const SplitIterator, byte: u8) bool {
+ fn isSplitByte(self: *const SplitIterator, byte: u8) bool {
for (self.split_bytes) |split_byte| {
if (byte == split_byte) {
return true;
@@ -395,7 +513,7 @@ pub const SplitIterator = struct {
/// Naively combines a series of strings with a separator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn join(allocator: &Allocator, sep: u8, strings: ...) ![]u8 {
+pub fn join(allocator: *Allocator, sep: u8, strings: ...) ![]u8 {
comptime assert(strings.len >= 1);
var total_strings_len: usize = strings.len; // 1 sep per string
{
@@ -443,29 +561,47 @@ test "testReadInt" {
}
fn testReadIntImpl() void {
{
- const bytes = []u8{ 0x12, 0x34, 0x56, 0x78 };
- assert(readInt(bytes, u32, builtin.Endian.Big) == 0x12345678);
- assert(readIntBE(u32, bytes) == 0x12345678);
- assert(readIntBE(i32, bytes) == 0x12345678);
+ const bytes = []u8{
+ 0x12,
+ 0x34,
+ 0x56,
+ 0x78,
+ };
+ assert(readInt(bytes, u32, builtin.Endian.Big) == 0x12345678);
+ assert(readIntBE(u32, bytes) == 0x12345678);
+ assert(readIntBE(i32, bytes) == 0x12345678);
assert(readInt(bytes, u32, builtin.Endian.Little) == 0x78563412);
- assert(readIntLE(u32, bytes) == 0x78563412);
- assert(readIntLE(i32, bytes) == 0x78563412);
+ assert(readIntLE(u32, bytes) == 0x78563412);
+ assert(readIntLE(i32, bytes) == 0x78563412);
}
{
- const buf = []u8{0x00, 0x00, 0x12, 0x34};
+ const buf = []u8{
+ 0x00,
+ 0x00,
+ 0x12,
+ 0x34,
+ };
const answer = readInt(buf, u64, builtin.Endian.Big);
assert(answer == 0x00001234);
}
{
- const buf = []u8{0x12, 0x34, 0x00, 0x00};
+ const buf = []u8{
+ 0x12,
+ 0x34,
+ 0x00,
+ 0x00,
+ };
const answer = readInt(buf, u64, builtin.Endian.Little);
assert(answer == 0x00003412);
}
{
- const bytes = []u8{0xff, 0xfe};
- assert(readIntBE(u16, bytes) == 0xfffe);
+ const bytes = []u8{
+ 0xff,
+ 0xfe,
+ };
+ assert(readIntBE(u16, bytes) == 0xfffe);
assert(readIntBE(i16, bytes) == -0x0002);
- assert(readIntLE(u16, bytes) == 0xfeff);
+ assert(readIntLE(u16, bytes) == 0xfeff);
assert(readIntLE(i16, bytes) == -0x0101);
}
}
@@ -478,19 +614,38 @@ fn testWriteIntImpl() void {
var bytes: [4]u8 = undefined;
writeInt(bytes[0..], u32(0x12345678), builtin.Endian.Big);
- assert(eql(u8, bytes, []u8{ 0x12, 0x34, 0x56, 0x78 }));
+ assert(eql(u8, bytes, []u8{
+ 0x12,
+ 0x34,
+ 0x56,
+ 0x78,
+ }));
writeInt(bytes[0..], u32(0x78563412), builtin.Endian.Little);
- assert(eql(u8, bytes, []u8{ 0x12, 0x34, 0x56, 0x78 }));
+ assert(eql(u8, bytes, []u8{
+ 0x12,
+ 0x34,
+ 0x56,
+ 0x78,
+ }));
writeInt(bytes[0..], u16(0x1234), builtin.Endian.Big);
- assert(eql(u8, bytes, []u8{ 0x00, 0x00, 0x12, 0x34 }));
+ assert(eql(u8, bytes, []u8{
+ 0x00,
+ 0x00,
+ 0x12,
+ 0x34,
+ }));
writeInt(bytes[0..], u16(0x1234), builtin.Endian.Little);
- assert(eql(u8, bytes, []u8{ 0x34, 0x12, 0x00, 0x00 }));
+ assert(eql(u8, bytes, []u8{
+ 0x34,
+ 0x12,
+ 0x00,
+ 0x00,
+ }));
}
-
pub fn min(comptime T: type, slice: []const T) T {
var best = slice[0];
for (slice[1..]) |item| {
@@ -515,10 +670,10 @@ test "mem.max" {
assert(max(u8, "abcdefg") == 'g');
}
-pub fn swap(comptime T: type, a: &T, b: &T) void {
- const tmp = *a;
- *a = *b;
- *b = tmp;
+pub fn swap(comptime T: type, a: *T, b: *T) void {
+ const tmp = a.*;
+ a.* = b.*;
+ b.* = tmp;
}
/// In-place order reversal of a slice
@@ -531,10 +686,22 @@ pub fn reverse(comptime T: type, items: []T) void {
}
test "std.mem.reverse" {
- var arr = []i32{ 5, 3, 1, 2, 4 };
+ var arr = []i32{
+ 5,
+ 3,
+ 1,
+ 2,
+ 4,
+ };
reverse(i32, arr[0..]);
- assert(eql(i32, arr, []i32{ 4, 2, 1, 3, 5 }));
+ assert(eql(i32, arr, []i32{
+ 4,
+ 2,
+ 1,
+ 3,
+ 5,
+ }));
}
/// In-place rotation of the values in an array ([0 1 2 3] becomes [1 2 3 0] if we rotate by 1)
@@ -546,13 +713,25 @@ pub fn rotate(comptime T: type, items: []T, amount: usize) void {
}
test "std.mem.rotate" {
- var arr = []i32{ 5, 3, 1, 2, 4 };
+ var arr = []i32{
+ 5,
+ 3,
+ 1,
+ 2,
+ 4,
+ };
rotate(i32, arr[0..], 2);
- assert(eql(i32, arr, []i32{ 1, 2, 4, 5, 3 }));
+ assert(eql(i32, arr, []i32{
+ 1,
+ 2,
+ 4,
+ 5,
+ 3,
+ }));
}
-// TODO: When https://github.com/zig-lang/zig/issues/649 is solved these can be done by
+// TODO: When https://github.com/ziglang/zig/issues/649 is solved these can be done by
// endian-casting the pointer and then dereferencing
pub fn endianSwapIfLe(comptime T: type, x: T) T {
diff --git a/std/net.zig b/std/net.zig
index 8e1b8d97b2..8c1aeb92d7 100644
--- a/std/net.zig
+++ b/std/net.zig
@@ -19,9 +19,9 @@ pub const Address = struct {
os_addr: OsAddress,
pub fn initIp4(ip4: u32, port: u16) Address {
- return Address {
- .os_addr = posix.sockaddr {
- .in = posix.sockaddr_in {
+ return Address{
+ .os_addr = posix.sockaddr{
+ .in = posix.sockaddr_in{
.family = posix.AF_INET,
.port = std.mem.endianSwapIfLe(u16, port),
.addr = ip4,
@@ -31,11 +31,11 @@ pub const Address = struct {
};
}
- pub fn initIp6(ip6: &const Ip6Addr, port: u16) Address {
- return Address {
+ pub fn initIp6(ip6: *const Ip6Addr, port: u16) Address {
+ return Address{
.family = posix.AF_INET6,
- .os_addr = posix.sockaddr {
- .in6 = posix.sockaddr_in6 {
+ .os_addr = posix.sockaddr{
+ .in6 = posix.sockaddr_in6{
.family = posix.AF_INET6,
.port = std.mem.endianSwapIfLe(u16, port),
.flowinfo = 0,
@@ -46,17 +46,15 @@ pub const Address = struct {
};
}
- pub fn initPosix(addr: &const posix.sockaddr) Address {
- return Address {
- .os_addr = *addr,
- };
+ pub fn initPosix(addr: *const posix.sockaddr) Address {
+ return Address{ .os_addr = addr.* };
}
- pub fn format(self: &const Address, out_stream: var) !void {
+ pub fn format(self: *const Address, out_stream: var) !void {
switch (self.os_addr.in.family) {
posix.AF_INET => {
const native_endian_port = std.mem.endianSwapIfLe(u16, self.os_addr.in.port);
- const bytes = ([]const u8)((&self.os_addr.in.addr)[0..1]);
+ const bytes = ([]const u8)((*self.os_addr.in.addr)[0..1]);
try out_stream.print("{}.{}.{}.{}:{}", bytes[0], bytes[1], bytes[2], bytes[3], native_endian_port);
},
posix.AF_INET6 => {
@@ -70,7 +68,7 @@ pub const Address = struct {
pub fn parseIp4(buf: []const u8) !u32 {
var result: u32 = undefined;
- const out_ptr = ([]u8)((&result)[0..1]);
+ const out_ptr = @sliceToBytes((*[1]u32)(&result)[0..]);
var x: u8 = 0;
var index: u8 = 0;
@@ -98,7 +96,7 @@ pub fn parseIp4(buf: []const u8) !u32 {
}
} else {
return error.InvalidCharacter;
- }
+ }
}
if (index == 3 and saw_any_digits) {
out_ptr[index] = x;
diff --git a/std/os/child_process.zig b/std/os/child_process.zig
index 8bb8b2d7e7..693129eea8 100644
--- a/std/os/child_process.zig
+++ b/std/os/child_process.zig
@@ -20,7 +20,7 @@ pub const ChildProcess = struct {
pub handle: if (is_windows) windows.HANDLE else void,
pub thread_handle: if (is_windows) windows.HANDLE else void,
- pub allocator: &mem.Allocator,
+ pub allocator: *mem.Allocator,
pub stdin: ?os.File,
pub stdout: ?os.File,
@@ -31,7 +31,7 @@ pub const ChildProcess = struct {
pub argv: []const []const u8,
/// Leave as null to use the current env map using the supplied allocator.
- pub env_map: ?&const BufMap,
+ pub env_map: ?*const BufMap,
pub stdin_behavior: StdIo,
pub stdout_behavior: StdIo,
@@ -47,9 +47,9 @@ pub const ChildProcess = struct {
pub cwd: ?[]const u8,
err_pipe: if (is_windows) void else [2]i32,
- llnode: if (is_windows) void else LinkedList(&ChildProcess).Node,
+ llnode: if (is_windows) void else LinkedList(*ChildProcess).Node,
- pub const SpawnError = error {
+ pub const SpawnError = error{
ProcessFdQuotaExceeded,
Unexpected,
NotDir,
@@ -84,11 +84,8 @@ pub const ChildProcess = struct {
/// First argument in argv is the executable.
/// On success must call deinit.
- pub fn init(argv: []const []const u8, allocator: &mem.Allocator) !&ChildProcess {
- const child = try allocator.create(ChildProcess);
- errdefer allocator.destroy(child);
-
- *child = ChildProcess {
+ pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess {
+ const child = try allocator.create(ChildProcess{
.allocator = allocator,
.argv = argv,
.pid = undefined,
@@ -99,27 +96,29 @@ pub const ChildProcess = struct {
.term = null,
.env_map = null,
.cwd = null,
- .uid = if (is_windows) {} else null,
- .gid = if (is_windows) {} else null,
+ .uid = if (is_windows) {} else
+ null,
+ .gid = if (is_windows) {} else
+ null,
.stdin = null,
.stdout = null,
.stderr = null,
.stdin_behavior = StdIo.Inherit,
.stdout_behavior = StdIo.Inherit,
.stderr_behavior = StdIo.Inherit,
- };
-
+ });
+ errdefer allocator.destroy(child);
return child;
}
- pub fn setUserName(self: &ChildProcess, name: []const u8) !void {
+ pub fn setUserName(self: *ChildProcess, name: []const u8) !void {
const user_info = try os.getUserInfo(name);
self.uid = user_info.uid;
self.gid = user_info.gid;
}
/// On success must call `kill` or `wait`.
- pub fn spawn(self: &ChildProcess) !void {
+ pub fn spawn(self: *ChildProcess) !void {
if (is_windows) {
return self.spawnWindows();
} else {
@@ -127,13 +126,13 @@ pub const ChildProcess = struct {
}
}
- pub fn spawnAndWait(self: &ChildProcess) !Term {
+ pub fn spawnAndWait(self: *ChildProcess) !Term {
try self.spawn();
return self.wait();
}
/// Forcibly terminates child process and then cleans up all resources.
- pub fn kill(self: &ChildProcess) !Term {
+ pub fn kill(self: *ChildProcess) !Term {
if (is_windows) {
return self.killWindows(1);
} else {
@@ -141,7 +140,7 @@ pub const ChildProcess = struct {
}
}
- pub fn killWindows(self: &ChildProcess, exit_code: windows.UINT) !Term {
+ pub fn killWindows(self: *ChildProcess, exit_code: windows.UINT) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@@ -154,10 +153,10 @@ pub const ChildProcess = struct {
};
}
try self.waitUnwrappedWindows();
- return ??self.term;
+ return self.term.?;
}
- pub fn killPosix(self: &ChildProcess) !Term {
+ pub fn killPosix(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@@ -173,11 +172,11 @@ pub const ChildProcess = struct {
};
}
self.waitUnwrapped();
- return ??self.term;
+ return self.term.?;
}
/// Blocks until child process terminates and then cleans up all resources.
- pub fn wait(self: &ChildProcess) !Term {
+ pub fn wait(self: *ChildProcess) !Term {
if (is_windows) {
return self.waitWindows();
} else {
@@ -193,9 +192,7 @@ pub const ChildProcess = struct {
/// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
/// If it succeeds, the caller owns result.stdout and result.stderr memory.
- pub fn exec(allocator: &mem.Allocator, argv: []const []const u8, cwd: ?[]const u8,
- env_map: ?&const BufMap, max_output_size: usize) !ExecResult
- {
+ pub fn exec(allocator: *mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env_map: ?*const BufMap, max_output_size: usize) !ExecResult {
const child = try ChildProcess.init(argv, allocator);
defer child.deinit();
@@ -212,52 +209,52 @@ pub const ChildProcess = struct {
defer Buffer.deinit(&stdout);
defer Buffer.deinit(&stderr);
- var stdout_file_in_stream = io.FileInStream.init(&??child.stdout);
- var stderr_file_in_stream = io.FileInStream.init(&??child.stderr);
+ var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?);
+ var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?);
try stdout_file_in_stream.stream.readAllBuffer(&stdout, max_output_size);
try stderr_file_in_stream.stream.readAllBuffer(&stderr, max_output_size);
- return ExecResult {
+ return ExecResult{
.term = try child.wait(),
.stdout = stdout.toOwnedSlice(),
.stderr = stderr.toOwnedSlice(),
};
}
- fn waitWindows(self: &ChildProcess) !Term {
+ fn waitWindows(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
}
try self.waitUnwrappedWindows();
- return ??self.term;
+ return self.term.?;
}
- fn waitPosix(self: &ChildProcess) !Term {
+ fn waitPosix(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
}
self.waitUnwrapped();
- return ??self.term;
+ return self.term.?;
}
- pub fn deinit(self: &ChildProcess) void {
+ pub fn deinit(self: *ChildProcess) void {
self.allocator.destroy(self);
}
- fn waitUnwrappedWindows(self: &ChildProcess) !void {
+ fn waitUnwrappedWindows(self: *ChildProcess) !void {
const result = os.windowsWaitSingle(self.handle, windows.INFINITE);
self.term = (SpawnError!Term)(x: {
var exit_code: windows.DWORD = undefined;
if (windows.GetExitCodeProcess(self.handle, &exit_code) == 0) {
- break :x Term { .Unknown = 0 };
+ break :x Term{ .Unknown = 0 };
} else {
- break :x Term { .Exited = @bitCast(i32, exit_code)};
+ break :x Term{ .Exited = @bitCast(i32, exit_code) };
}
});
@@ -267,7 +264,7 @@ pub const ChildProcess = struct {
return result;
}
- fn waitUnwrapped(self: &ChildProcess) void {
+ fn waitUnwrapped(self: *ChildProcess) void {
var status: i32 = undefined;
while (true) {
const err = posix.getErrno(posix.waitpid(self.pid, &status, 0));
@@ -283,17 +280,26 @@ pub const ChildProcess = struct {
}
}
- fn handleWaitResult(self: &ChildProcess, status: i32) void {
+ fn handleWaitResult(self: *ChildProcess, status: i32) void {
self.term = self.cleanupAfterWait(status);
}
- fn cleanupStreams(self: &ChildProcess) void {
- if (self.stdin) |*stdin| { stdin.close(); self.stdin = null; }
- if (self.stdout) |*stdout| { stdout.close(); self.stdout = null; }
- if (self.stderr) |*stderr| { stderr.close(); self.stderr = null; }
+ fn cleanupStreams(self: *ChildProcess) void {
+ if (self.stdin) |*stdin| {
+ stdin.close();
+ self.stdin = null;
+ }
+ if (self.stdout) |*stdout| {
+ stdout.close();
+ self.stdout = null;
+ }
+ if (self.stderr) |*stderr| {
+ stderr.close();
+ self.stderr = null;
+ }
}
- fn cleanupAfterWait(self: &ChildProcess, status: i32) !Term {
+ fn cleanupAfterWait(self: *ChildProcess, status: i32) !Term {
defer {
os.close(self.err_pipe[0]);
os.close(self.err_pipe[1]);
@@ -309,7 +315,7 @@ pub const ChildProcess = struct {
// Here we potentially return the fork child's error
// from the parent pid.
if (err_int != @maxValue(ErrInt)) {
- return SpawnError(err_int);
+ return @errSetCast(SpawnError, @intToError(err_int));
}
return statusToTerm(status);
@@ -317,25 +323,30 @@ pub const ChildProcess = struct {
fn statusToTerm(status: i32) Term {
return if (posix.WIFEXITED(status))
- Term { .Exited = posix.WEXITSTATUS(status) }
+ Term{ .Exited = posix.WEXITSTATUS(status) }
else if (posix.WIFSIGNALED(status))
- Term { .Signal = posix.WTERMSIG(status) }
+ Term{ .Signal = posix.WTERMSIG(status) }
else if (posix.WIFSTOPPED(status))
- Term { .Stopped = posix.WSTOPSIG(status) }
+ Term{ .Stopped = posix.WSTOPSIG(status) }
else
- Term { .Unknown = status }
- ;
+ Term{ .Unknown = status };
}
- fn spawnPosix(self: &ChildProcess) !void {
+ fn spawnPosix(self: *ChildProcess) !void {
const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try makePipe() else undefined;
- errdefer if (self.stdin_behavior == StdIo.Pipe) { destroyPipe(stdin_pipe); };
+ errdefer if (self.stdin_behavior == StdIo.Pipe) {
+ destroyPipe(stdin_pipe);
+ };
const stdout_pipe = if (self.stdout_behavior == StdIo.Pipe) try makePipe() else undefined;
- errdefer if (self.stdout_behavior == StdIo.Pipe) { destroyPipe(stdout_pipe); };
+ errdefer if (self.stdout_behavior == StdIo.Pipe) {
+ destroyPipe(stdout_pipe);
+ };
const stderr_pipe = if (self.stderr_behavior == StdIo.Pipe) try makePipe() else undefined;
- errdefer if (self.stderr_behavior == StdIo.Pipe) { destroyPipe(stderr_pipe); };
+ errdefer if (self.stderr_behavior == StdIo.Pipe) {
+ destroyPipe(stderr_pipe);
+ };
const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
const dev_null_fd = if (any_ignore) blk: {
@@ -346,7 +357,9 @@ pub const ChildProcess = struct {
} else blk: {
break :blk undefined;
};
- defer { if (any_ignore) os.close(dev_null_fd); }
+ defer {
+ if (any_ignore) os.close(dev_null_fd);
+ }
var env_map_owned: BufMap = undefined;
var we_own_env_map: bool = undefined;
@@ -358,7 +371,9 @@ pub const ChildProcess = struct {
env_map_owned = try os.getEnvMap(self.allocator);
break :x &env_map_owned;
};
- defer { if (we_own_env_map) env_map_owned.deinit(); }
+ defer {
+ if (we_own_env_map) env_map_owned.deinit();
+ }
// This pipe is used to communicate errors between the time of fork
// and execve from the child process to the parent process.
@@ -375,17 +390,12 @@ pub const ChildProcess = struct {
}
if (pid_result == 0) {
// we are the child
-
- setUpChildIo(self.stdin_behavior, stdin_pipe[0], posix.STDIN_FILENO, dev_null_fd) catch
- |err| forkChildErrReport(err_pipe[1], err);
- setUpChildIo(self.stdout_behavior, stdout_pipe[1], posix.STDOUT_FILENO, dev_null_fd) catch
- |err| forkChildErrReport(err_pipe[1], err);
- setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch
- |err| forkChildErrReport(err_pipe[1], err);
+ setUpChildIo(self.stdin_behavior, stdin_pipe[0], posix.STDIN_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err);
+ setUpChildIo(self.stdout_behavior, stdout_pipe[1], posix.STDOUT_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err);
+ setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err);
if (self.cwd) |cwd| {
- os.changeCurDir(self.allocator, cwd) catch
- |err| forkChildErrReport(err_pipe[1], err);
+ os.changeCurDir(self.allocator, cwd) catch |err| forkChildErrReport(err_pipe[1], err);
}
if (self.gid) |gid| {
@@ -396,12 +406,11 @@ pub const ChildProcess = struct {
os.posix_setreuid(uid, uid) catch |err| forkChildErrReport(err_pipe[1], err);
}
- os.posixExecve(self.argv, env_map, self.allocator) catch
- |err| forkChildErrReport(err_pipe[1], err);
+ os.posixExecve(self.argv, env_map, self.allocator) catch |err| forkChildErrReport(err_pipe[1], err);
}
// we are the parent
- const pid = i32(pid_result);
+ const pid = @intCast(i32, pid_result);
if (self.stdin_behavior == StdIo.Pipe) {
self.stdin = os.File.openHandle(stdin_pipe[1]);
} else {
@@ -420,40 +429,44 @@ pub const ChildProcess = struct {
self.pid = pid;
self.err_pipe = err_pipe;
- self.llnode = LinkedList(&ChildProcess).Node.init(self);
+ self.llnode = LinkedList(*ChildProcess).Node.init(self);
self.term = null;
- if (self.stdin_behavior == StdIo.Pipe) { os.close(stdin_pipe[0]); }
- if (self.stdout_behavior == StdIo.Pipe) { os.close(stdout_pipe[1]); }
- if (self.stderr_behavior == StdIo.Pipe) { os.close(stderr_pipe[1]); }
+ if (self.stdin_behavior == StdIo.Pipe) {
+ os.close(stdin_pipe[0]);
+ }
+ if (self.stdout_behavior == StdIo.Pipe) {
+ os.close(stdout_pipe[1]);
+ }
+ if (self.stderr_behavior == StdIo.Pipe) {
+ os.close(stderr_pipe[1]);
+ }
}
- fn spawnWindows(self: &ChildProcess) !void {
- const saAttr = windows.SECURITY_ATTRIBUTES {
+ fn spawnWindows(self: *ChildProcess) !void {
+ const saAttr = windows.SECURITY_ATTRIBUTES{
.nLength = @sizeOf(windows.SECURITY_ATTRIBUTES),
.bInheritHandle = windows.TRUE,
.lpSecurityDescriptor = null,
};
- const any_ignore = (self.stdin_behavior == StdIo.Ignore or
- self.stdout_behavior == StdIo.Ignore or
- self.stderr_behavior == StdIo.Ignore);
+ const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
const nul_handle = if (any_ignore) blk: {
const nul_file_path = "NUL";
var fixed_buffer_mem: [nul_file_path.len + 1]u8 = undefined;
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- break :blk try os.windowsOpen(&fixed_allocator.allocator, "NUL", windows.GENERIC_READ, windows.FILE_SHARE_READ,
- windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL);
+ break :blk try os.windowsOpen(&fixed_allocator.allocator, "NUL", windows.GENERIC_READ, windows.FILE_SHARE_READ, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL);
} else blk: {
break :blk undefined;
};
- defer { if (any_ignore) os.close(nul_handle); }
+ defer {
+ if (any_ignore) os.close(nul_handle);
+ }
if (any_ignore) {
try windowsSetHandleInfo(nul_handle, windows.HANDLE_FLAG_INHERIT, 0);
}
-
var g_hChildStd_IN_Rd: ?windows.HANDLE = null;
var g_hChildStd_IN_Wr: ?windows.HANDLE = null;
switch (self.stdin_behavior) {
@@ -470,7 +483,9 @@ pub const ChildProcess = struct {
g_hChildStd_IN_Rd = null;
},
}
- errdefer if (self.stdin_behavior == StdIo.Pipe) { windowsDestroyPipe(g_hChildStd_IN_Rd, g_hChildStd_IN_Wr); };
+ errdefer if (self.stdin_behavior == StdIo.Pipe) {
+ windowsDestroyPipe(g_hChildStd_IN_Rd, g_hChildStd_IN_Wr);
+ };
var g_hChildStd_OUT_Rd: ?windows.HANDLE = null;
var g_hChildStd_OUT_Wr: ?windows.HANDLE = null;
@@ -488,7 +503,9 @@ pub const ChildProcess = struct {
g_hChildStd_OUT_Wr = null;
},
}
- errdefer if (self.stdin_behavior == StdIo.Pipe) { windowsDestroyPipe(g_hChildStd_OUT_Rd, g_hChildStd_OUT_Wr); };
+ errdefer if (self.stdin_behavior == StdIo.Pipe) {
+ windowsDestroyPipe(g_hChildStd_OUT_Rd, g_hChildStd_OUT_Wr);
+ };
var g_hChildStd_ERR_Rd: ?windows.HANDLE = null;
var g_hChildStd_ERR_Wr: ?windows.HANDLE = null;
@@ -506,12 +523,14 @@ pub const ChildProcess = struct {
g_hChildStd_ERR_Wr = null;
},
}
- errdefer if (self.stdin_behavior == StdIo.Pipe) { windowsDestroyPipe(g_hChildStd_ERR_Rd, g_hChildStd_ERR_Wr); };
+ errdefer if (self.stdin_behavior == StdIo.Pipe) {
+ windowsDestroyPipe(g_hChildStd_ERR_Rd, g_hChildStd_ERR_Wr);
+ };
const cmd_line = try windowsCreateCommandLine(self.allocator, self.argv);
defer self.allocator.free(cmd_line);
- var siStartInfo = windows.STARTUPINFOA {
+ var siStartInfo = windows.STARTUPINFOA{
.cb = @sizeOf(windows.STARTUPINFOA),
.hStdError = g_hChildStd_ERR_Wr,
.hStdOutput = g_hChildStd_OUT_Wr,
@@ -534,19 +553,11 @@ pub const ChildProcess = struct {
};
var piProcInfo: windows.PROCESS_INFORMATION = undefined;
- const cwd_slice = if (self.cwd) |cwd|
- try cstr.addNullByte(self.allocator, cwd)
- else
- null
- ;
+ const cwd_slice = if (self.cwd) |cwd| try cstr.addNullByte(self.allocator, cwd) else null;
defer if (cwd_slice) |cwd| self.allocator.free(cwd);
const cwd_ptr = if (cwd_slice) |cwd| cwd.ptr else null;
- const maybe_envp_buf = if (self.env_map) |env_map|
- try os.createWindowsEnvBlock(self.allocator, env_map)
- else
- null
- ;
+ const maybe_envp_buf = if (self.env_map) |env_map| try os.createWindowsEnvBlock(self.allocator, env_map) else null;
defer if (maybe_envp_buf) |envp_buf| self.allocator.free(envp_buf);
const envp_ptr = if (maybe_envp_buf) |envp_buf| envp_buf.ptr else null;
@@ -563,11 +574,8 @@ pub const ChildProcess = struct {
};
defer self.allocator.free(app_name);
- windowsCreateProcess(app_name.ptr, cmd_line.ptr, envp_ptr, cwd_ptr,
- &siStartInfo, &piProcInfo) catch |no_path_err|
- {
- if (no_path_err != error.FileNotFound)
- return no_path_err;
+ windowsCreateProcess(app_name.ptr, cmd_line.ptr, envp_ptr, cwd_ptr, &siStartInfo, &piProcInfo) catch |no_path_err| {
+ if (no_path_err != error.FileNotFound) return no_path_err;
const PATH = try os.getEnvVarOwned(self.allocator, "PATH");
defer self.allocator.free(PATH);
@@ -577,9 +585,7 @@ pub const ChildProcess = struct {
const joined_path = try os.path.join(self.allocator, search_path, app_name);
defer self.allocator.free(joined_path);
- if (windowsCreateProcess(joined_path.ptr, cmd_line.ptr, envp_ptr, cwd_ptr,
- &siStartInfo, &piProcInfo)) |_|
- {
+ if (windowsCreateProcess(joined_path.ptr, cmd_line.ptr, envp_ptr, cwd_ptr, &siStartInfo, &piProcInfo)) |_| {
break;
} else |err| if (err == error.FileNotFound) {
continue;
@@ -609,9 +615,15 @@ pub const ChildProcess = struct {
self.thread_handle = piProcInfo.hThread;
self.term = null;
- if (self.stdin_behavior == StdIo.Pipe) { os.close(??g_hChildStd_IN_Rd); }
- if (self.stderr_behavior == StdIo.Pipe) { os.close(??g_hChildStd_ERR_Wr); }
- if (self.stdout_behavior == StdIo.Pipe) { os.close(??g_hChildStd_OUT_Wr); }
+ if (self.stdin_behavior == StdIo.Pipe) {
+ os.close(g_hChildStd_IN_Rd.?);
+ }
+ if (self.stderr_behavior == StdIo.Pipe) {
+ os.close(g_hChildStd_ERR_Wr.?);
+ }
+ if (self.stdout_behavior == StdIo.Pipe) {
+ os.close(g_hChildStd_OUT_Wr.?);
+ }
}
fn setUpChildIo(stdio: StdIo, pipe_fd: i32, std_fileno: i32, dev_null_fd: i32) !void {
@@ -622,15 +634,10 @@ pub const ChildProcess = struct {
StdIo.Ignore => try os.posixDup2(dev_null_fd, std_fileno),
}
}
-
};
-fn windowsCreateProcess(app_name: &u8, cmd_line: &u8, envp_ptr: ?&u8, cwd_ptr: ?&u8,
- lpStartupInfo: &windows.STARTUPINFOA, lpProcessInformation: &windows.PROCESS_INFORMATION) !void
-{
- if (windows.CreateProcessA(app_name, cmd_line, null, null, windows.TRUE, 0,
- @ptrCast(?&c_void, envp_ptr), cwd_ptr, lpStartupInfo, lpProcessInformation) == 0)
- {
+fn windowsCreateProcess(app_name: [*]u8, cmd_line: [*]u8, envp_ptr: ?[*]u8, cwd_ptr: ?[*]u8, lpStartupInfo: *windows.STARTUPINFOA, lpProcessInformation: *windows.PROCESS_INFORMATION) !void {
+ if (windows.CreateProcessA(app_name, cmd_line, null, null, windows.TRUE, 0, @ptrCast(?*c_void, envp_ptr), cwd_ptr, lpStartupInfo, lpProcessInformation) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.FILE_NOT_FOUND, windows.ERROR.PATH_NOT_FOUND => error.FileNotFound,
@@ -641,18 +648,16 @@ fn windowsCreateProcess(app_name: &u8, cmd_line: &u8, envp_ptr: ?&u8, cwd_ptr: ?
}
}
-
-
-
/// Caller must dealloc.
/// Guarantees a null byte at result[result.len].
-fn windowsCreateCommandLine(allocator: &mem.Allocator, argv: []const []const u8) ![]u8 {
+fn windowsCreateCommandLine(allocator: *mem.Allocator, argv: []const []const u8) ![]u8 {
var buf = try Buffer.initSize(allocator, 0);
defer buf.deinit();
+ var buf_stream = &io.BufferOutStream.init(&buf).stream;
+
for (argv) |arg, arg_i| {
- if (arg_i != 0)
- try buf.appendByte(' ');
+ if (arg_i != 0) try buf.appendByte(' ');
if (mem.indexOfAny(u8, arg, " \t\n\"") == null) {
try buf.append(arg);
continue;
@@ -663,18 +668,18 @@ fn windowsCreateCommandLine(allocator: &mem.Allocator, argv: []const []const u8)
switch (byte) {
'\\' => backslash_count += 1,
'"' => {
- try buf.appendByteNTimes('\\', backslash_count * 2 + 1);
+ try buf_stream.writeByteNTimes('\\', backslash_count * 2 + 1);
try buf.appendByte('"');
backslash_count = 0;
},
else => {
- try buf.appendByteNTimes('\\', backslash_count);
+ try buf_stream.writeByteNTimes('\\', backslash_count);
try buf.appendByte(byte);
backslash_count = 0;
},
}
}
- try buf.appendByteNTimes('\\', backslash_count * 2);
+ try buf_stream.writeByteNTimes('\\', backslash_count * 2);
try buf.appendByte('"');
}
@@ -686,12 +691,11 @@ fn windowsDestroyPipe(rd: ?windows.HANDLE, wr: ?windows.HANDLE) void {
if (wr) |h| os.close(h);
}
-
// TODO: workaround for bug where the `const` from `&const` is dropped when the type is
// a namespace field lookup
const SECURITY_ATTRIBUTES = windows.SECURITY_ATTRIBUTES;
-fn windowsMakePipe(rd: &windows.HANDLE, wr: &windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void {
+fn windowsMakePipe(rd: *windows.HANDLE, wr: *windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void {
if (windows.CreatePipe(rd, wr, sattr, 0) == 0) {
const err = windows.GetLastError();
return switch (err) {
@@ -709,24 +713,24 @@ fn windowsSetHandleInfo(h: windows.HANDLE, mask: windows.DWORD, flags: windows.D
}
}
-fn windowsMakePipeIn(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void {
+fn windowsMakePipeIn(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void {
var rd_h: windows.HANDLE = undefined;
var wr_h: windows.HANDLE = undefined;
try windowsMakePipe(&rd_h, &wr_h, sattr);
errdefer windowsDestroyPipe(rd_h, wr_h);
try windowsSetHandleInfo(wr_h, windows.HANDLE_FLAG_INHERIT, 0);
- *rd = rd_h;
- *wr = wr_h;
+ rd.* = rd_h;
+ wr.* = wr_h;
}
-fn windowsMakePipeOut(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void {
+fn windowsMakePipeOut(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void {
var rd_h: windows.HANDLE = undefined;
var wr_h: windows.HANDLE = undefined;
try windowsMakePipe(&rd_h, &wr_h, sattr);
errdefer windowsDestroyPipe(rd_h, wr_h);
try windowsSetHandleInfo(rd_h, windows.HANDLE_FLAG_INHERIT, 0);
- *rd = rd_h;
- *wr = wr_h;
+ rd.* = rd_h;
+ wr.* = wr_h;
}
fn makePipe() ![2]i32 {
@@ -741,15 +745,15 @@ fn makePipe() ![2]i32 {
return fds;
}
-fn destroyPipe(pipe: &const [2]i32) void {
- os.close((*pipe)[0]);
- os.close((*pipe)[1]);
+fn destroyPipe(pipe: *const [2]i32) void {
+ os.close((pipe.*)[0]);
+ os.close((pipe.*)[1]);
}
// Child of fork calls this to report an error to the fork parent.
// Then the child exits.
fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
- _ = writeIntFd(fd, ErrInt(err));
+ _ = writeIntFd(fd, ErrInt(@errorToInt(err)));
posix.exit(1);
}
diff --git a/std/os/darwin.zig b/std/os/darwin.zig
index 42b9917210..cf67b01d5a 100644
--- a/std/os/darwin.zig
+++ b/std/os/darwin.zig
@@ -2,7 +2,7 @@ const std = @import("../index.zig");
const c = std.c;
const assert = std.debug.assert;
-pub use @import("darwin_errno.zig");
+pub use @import("darwin/errno.zig");
pub const PATH_MAX = 1024;
@@ -10,33 +10,75 @@ pub const STDIN_FILENO = 0;
pub const STDOUT_FILENO = 1;
pub const STDERR_FILENO = 2;
-pub const PROT_NONE = 0x00; /// [MC2] no permissions
-pub const PROT_READ = 0x01; /// [MC2] pages can be read
-pub const PROT_WRITE = 0x02; /// [MC2] pages can be written
-pub const PROT_EXEC = 0x04; /// [MC2] pages can be executed
+/// [MC2] no permissions
+pub const PROT_NONE = 0x00;
-pub const MAP_ANONYMOUS = 0x1000; /// allocated from memory, swap space
-pub const MAP_FILE = 0x0000; /// map from file (default)
-pub const MAP_FIXED = 0x0010; /// interpret addr exactly
-pub const MAP_HASSEMAPHORE = 0x0200; /// region may contain semaphores
-pub const MAP_PRIVATE = 0x0002; /// changes are private
-pub const MAP_SHARED = 0x0001; /// share changes
-pub const MAP_NOCACHE = 0x0400; /// don't cache pages for this mapping
-pub const MAP_NORESERVE = 0x0040; /// don't reserve needed swap area
+/// [MC2] pages can be read
+pub const PROT_READ = 0x01;
+
+/// [MC2] pages can be written
+pub const PROT_WRITE = 0x02;
+
+/// [MC2] pages can be executed
+pub const PROT_EXEC = 0x04;
+
+/// allocated from memory, swap space
+pub const MAP_ANONYMOUS = 0x1000;
+
+/// map from file (default)
+pub const MAP_FILE = 0x0000;
+
+/// interpret addr exactly
+pub const MAP_FIXED = 0x0010;
+
+/// region may contain semaphores
+pub const MAP_HASSEMAPHORE = 0x0200;
+
+/// changes are private
+pub const MAP_PRIVATE = 0x0002;
+
+/// share changes
+pub const MAP_SHARED = 0x0001;
+
+/// don't cache pages for this mapping
+pub const MAP_NOCACHE = 0x0400;
+
+/// don't reserve needed swap area
+pub const MAP_NORESERVE = 0x0040;
pub const MAP_FAILED = @maxValue(usize);
-pub const WNOHANG = 0x00000001; /// [XSI] no hang in wait/no child to reap
-pub const WUNTRACED = 0x00000002; /// [XSI] notify on stop, untraced child
+/// [XSI] no hang in wait/no child to reap
+pub const WNOHANG = 0x00000001;
-pub const SA_ONSTACK = 0x0001; /// take signal on signal stack
-pub const SA_RESTART = 0x0002; /// restart system on signal return
-pub const SA_RESETHAND = 0x0004; /// reset to SIG_DFL when taking signal
-pub const SA_NOCLDSTOP = 0x0008; /// do not generate SIGCHLD on child stop
-pub const SA_NODEFER = 0x0010; /// don't mask the signal we're delivering
-pub const SA_NOCLDWAIT = 0x0020; /// don't keep zombies around
-pub const SA_SIGINFO = 0x0040; /// signal handler with SA_SIGINFO args
-pub const SA_USERTRAMP = 0x0100; /// do not bounce off kernel's sigtramp
-pub const SA_64REGSET = 0x0200; /// signal handler with SA_SIGINFO args with 64bit regs information
+/// [XSI] notify on stop, untraced child
+pub const WUNTRACED = 0x00000002;
+
+/// take signal on signal stack
+pub const SA_ONSTACK = 0x0001;
+
+/// restart system on signal return
+pub const SA_RESTART = 0x0002;
+
+/// reset to SIG_DFL when taking signal
+pub const SA_RESETHAND = 0x0004;
+
+/// do not generate SIGCHLD on child stop
+pub const SA_NOCLDSTOP = 0x0008;
+
+/// don't mask the signal we're delivering
+pub const SA_NODEFER = 0x0010;
+
+/// don't keep zombies around
+pub const SA_NOCLDWAIT = 0x0020;
+
+/// signal handler with SA_SIGINFO args
+pub const SA_SIGINFO = 0x0040;
+
+/// do not bounce off kernel's sigtramp
+pub const SA_USERTRAMP = 0x0100;
+
+/// signal handler with SA_SIGINFO args with 64bit regs information
+pub const SA_64REGSET = 0x0200;
pub const O_LARGEFILE = 0x0000;
pub const O_PATH = 0x0000;
@@ -46,20 +88,47 @@ pub const X_OK = 1;
pub const W_OK = 2;
pub const R_OK = 4;
-pub const O_RDONLY = 0x0000; /// open for reading only
-pub const O_WRONLY = 0x0001; /// open for writing only
-pub const O_RDWR = 0x0002; /// open for reading and writing
-pub const O_NONBLOCK = 0x0004; /// do not block on open or for data to become available
-pub const O_APPEND = 0x0008; /// append on each write
-pub const O_CREAT = 0x0200; /// create file if it does not exist
-pub const O_TRUNC = 0x0400; /// truncate size to 0
-pub const O_EXCL = 0x0800; /// error if O_CREAT and the file exists
-pub const O_SHLOCK = 0x0010; /// atomically obtain a shared lock
-pub const O_EXLOCK = 0x0020; /// atomically obtain an exclusive lock
-pub const O_NOFOLLOW = 0x0100; /// do not follow symlinks
-pub const O_SYMLINK = 0x200000; /// allow open of symlinks
-pub const O_EVTONLY = 0x8000; /// descriptor requested for event notifications only
-pub const O_CLOEXEC = 0x1000000; /// mark as close-on-exec
+/// open for reading only
+pub const O_RDONLY = 0x0000;
+
+/// open for writing only
+pub const O_WRONLY = 0x0001;
+
+/// open for reading and writing
+pub const O_RDWR = 0x0002;
+
+/// do not block on open or for data to become available
+pub const O_NONBLOCK = 0x0004;
+
+/// append on each write
+pub const O_APPEND = 0x0008;
+
+/// create file if it does not exist
+pub const O_CREAT = 0x0200;
+
+/// truncate size to 0
+pub const O_TRUNC = 0x0400;
+
+/// error if O_CREAT and the file exists
+pub const O_EXCL = 0x0800;
+
+/// atomically obtain a shared lock
+pub const O_SHLOCK = 0x0010;
+
+/// atomically obtain an exclusive lock
+pub const O_EXLOCK = 0x0020;
+
+/// do not follow symlinks
+pub const O_NOFOLLOW = 0x0100;
+
+/// allow open of symlinks
+pub const O_SYMLINK = 0x200000;
+
+/// descriptor requested for event notifications only
+pub const O_EVTONLY = 0x8000;
+
+/// mark as close-on-exec
+pub const O_CLOEXEC = 0x1000000;
pub const O_ACCMODE = 3;
pub const O_ALERT = 536870912;
@@ -87,57 +156,445 @@ pub const DT_LNK = 10;
pub const DT_SOCK = 12;
pub const DT_WHT = 14;
-pub const SIG_BLOCK = 1; /// block specified signal set
-pub const SIG_UNBLOCK = 2; /// unblock specified signal set
-pub const SIG_SETMASK = 3; /// set specified signal set
+/// block specified signal set
+pub const SIG_BLOCK = 1;
-pub const SIGHUP = 1; /// hangup
-pub const SIGINT = 2; /// interrupt
-pub const SIGQUIT = 3; /// quit
-pub const SIGILL = 4; /// illegal instruction (not reset when caught)
-pub const SIGTRAP = 5; /// trace trap (not reset when caught)
-pub const SIGABRT = 6; /// abort()
-pub const SIGPOLL = 7; /// pollable event ([XSR] generated, not supported)
-pub const SIGIOT = SIGABRT; /// compatibility
-pub const SIGEMT = 7; /// EMT instruction
-pub const SIGFPE = 8; /// floating point exception
-pub const SIGKILL = 9; /// kill (cannot be caught or ignored)
-pub const SIGBUS = 10; /// bus error
-pub const SIGSEGV = 11; /// segmentation violation
-pub const SIGSYS = 12; /// bad argument to system call
-pub const SIGPIPE = 13; /// write on a pipe with no one to read it
-pub const SIGALRM = 14; /// alarm clock
-pub const SIGTERM = 15; /// software termination signal from kill
-pub const SIGURG = 16; /// urgent condition on IO channel
-pub const SIGSTOP = 17; /// sendable stop signal not from tty
-pub const SIGTSTP = 18; /// stop signal from tty
-pub const SIGCONT = 19; /// continue a stopped process
-pub const SIGCHLD = 20; /// to parent on child stop or exit
-pub const SIGTTIN = 21; /// to readers pgrp upon background tty read
-pub const SIGTTOU = 22; /// like TTIN for output if (tp->t_local<OSTOP)
-pub const SIGIO = 23; /// input/output possible signal
-pub const SIGXCPU = 24; /// exceeded CPU time limit
-pub const SIGXFSZ = 25; /// exceeded file size limit
-pub const SIGVTALRM = 26; /// virtual time alarm
-pub const SIGPROF = 27; /// profiling time alarm
-pub const SIGWINCH = 28; /// window size changes
-pub const SIGINFO = 29; /// information request
-pub const SIGUSR1 = 30; /// user defined signal 1
-pub const SIGUSR2 = 31; /// user defined signal 2
+/// unblock specified signal set
+pub const SIG_UNBLOCK = 2;
-fn wstatus(x: i32) i32 { return x & 0o177; }
+/// set specified signal set
+pub const SIG_SETMASK = 3;
+
+/// hangup
+pub const SIGHUP = 1;
+
+/// interrupt
+pub const SIGINT = 2;
+
+/// quit
+pub const SIGQUIT = 3;
+
+/// illegal instruction (not reset when caught)
+pub const SIGILL = 4;
+
+/// trace trap (not reset when caught)
+pub const SIGTRAP = 5;
+
+/// abort()
+pub const SIGABRT = 6;
+
+/// pollable event ([XSR] generated, not supported)
+pub const SIGPOLL = 7;
+
+/// compatibility
+pub const SIGIOT = SIGABRT;
+
+/// EMT instruction
+pub const SIGEMT = 7;
+
+/// floating point exception
+pub const SIGFPE = 8;
+
+/// kill (cannot be caught or ignored)
+pub const SIGKILL = 9;
+
+/// bus error
+pub const SIGBUS = 10;
+
+/// segmentation violation
+pub const SIGSEGV = 11;
+
+/// bad argument to system call
+pub const SIGSYS = 12;
+
+/// write on a pipe with no one to read it
+pub const SIGPIPE = 13;
+
+/// alarm clock
+pub const SIGALRM = 14;
+
+/// software termination signal from kill
+pub const SIGTERM = 15;
+
+/// urgent condition on IO channel
+pub const SIGURG = 16;
+
+/// sendable stop signal not from tty
+pub const SIGSTOP = 17;
+
+/// stop signal from tty
+pub const SIGTSTP = 18;
+
+/// continue a stopped process
+pub const SIGCONT = 19;
+
+/// to parent on child stop or exit
+pub const SIGCHLD = 20;
+
+/// to readers pgrp upon background tty read
+pub const SIGTTIN = 21;
+
+/// like TTIN for output if (tp->t_local<OSTOP)
+pub const SIGTTOU = 22;
+
+/// input/output possible signal
+pub const SIGIO = 23;
+
+/// exceeded CPU time limit
+pub const SIGXCPU = 24;
+
+/// exceeded file size limit
+pub const SIGXFSZ = 25;
+
+/// virtual time alarm
+pub const SIGVTALRM = 26;
+
+/// profiling time alarm
+pub const SIGPROF = 27;
+
+/// window size changes
+pub const SIGWINCH = 28;
+
+/// information request
+pub const SIGINFO = 29;
+
+/// user defined signal 1
+pub const SIGUSR1 = 30;
+
+/// user defined signal 2
+pub const SIGUSR2 = 31;
+
+/// no flag value
+pub const KEVENT_FLAG_NONE = 0x000;
+
+/// immediate timeout
+pub const KEVENT_FLAG_IMMEDIATE = 0x001;
+
+/// output events only include change
+pub const KEVENT_FLAG_ERROR_EVENTS = 0x002;
+
+/// add event to kq (implies enable)
+pub const EV_ADD = 0x0001;
+
+/// delete event from kq
+pub const EV_DELETE = 0x0002;
+
+/// enable event
+pub const EV_ENABLE = 0x0004;
+
+/// disable event (not reported)
+pub const EV_DISABLE = 0x0008;
+
+/// only report one occurrence
+pub const EV_ONESHOT = 0x0010;
+
+/// clear event state after reporting
+pub const EV_CLEAR = 0x0020;
+
+/// force immediate event output
+/// ... with or without EV_ERROR
+/// ... use KEVENT_FLAG_ERROR_EVENTS
+/// on syscalls supporting flags
+pub const EV_RECEIPT = 0x0040;
+
+/// disable event after reporting
+pub const EV_DISPATCH = 0x0080;
+
+/// unique kevent per udata value
+pub const EV_UDATA_SPECIFIC = 0x0100;
+
+/// ... in combination with EV_DELETE
+/// will defer delete until udata-specific
+/// event enabled. EINPROGRESS will be
+/// returned to indicate the deferral
+pub const EV_DISPATCH2 = EV_DISPATCH | EV_UDATA_SPECIFIC;
+
+/// report that source has vanished
+/// ... only valid with EV_DISPATCH2
+pub const EV_VANISHED = 0x0200;
+
+/// reserved by system
+pub const EV_SYSFLAGS = 0xF000;
+
+/// filter-specific flag
+pub const EV_FLAG0 = 0x1000;
+
+/// filter-specific flag
+pub const EV_FLAG1 = 0x2000;
+
+/// EOF detected
+pub const EV_EOF = 0x8000;
+
+/// error, data contains errno
+pub const EV_ERROR = 0x4000;
+
+pub const EV_POLL = EV_FLAG0;
+pub const EV_OOBAND = EV_FLAG1;
+
+pub const EVFILT_READ = -1;
+pub const EVFILT_WRITE = -2;
+
+/// attached to aio requests
+pub const EVFILT_AIO = -3;
+
+/// attached to vnodes
+pub const EVFILT_VNODE = -4;
+
+/// attached to struct proc
+pub const EVFILT_PROC = -5;
+
+/// attached to struct proc
+pub const EVFILT_SIGNAL = -6;
+
+/// timers
+pub const EVFILT_TIMER = -7;
+
+/// Mach portsets
+pub const EVFILT_MACHPORT = -8;
+
+/// Filesystem events
+pub const EVFILT_FS = -9;
+
+/// User events
+pub const EVFILT_USER = -10;
+
+/// Virtual memory events
+pub const EVFILT_VM = -12;
+
+/// Exception events
+pub const EVFILT_EXCEPT = -15;
+
+pub const EVFILT_SYSCOUNT = 17;
+
+/// On input, NOTE_TRIGGER causes the event to be triggered for output.
+pub const NOTE_TRIGGER = 0x01000000;
+
+/// ignore input fflags
+pub const NOTE_FFNOP = 0x00000000;
+
+/// and fflags
+pub const NOTE_FFAND = 0x40000000;
+
+/// or fflags
+pub const NOTE_FFOR = 0x80000000;
+
+/// copy fflags
+pub const NOTE_FFCOPY = 0xc0000000;
+
+/// mask for operations
+pub const NOTE_FFCTRLMASK = 0xc0000000;
+pub const NOTE_FFLAGSMASK = 0x00ffffff;
+
+/// low water mark
+pub const NOTE_LOWAT = 0x00000001;
+
+/// OOB data
+pub const NOTE_OOB = 0x00000002;
+
+/// vnode was removed
+pub const NOTE_DELETE = 0x00000001;
+
+/// data contents changed
+pub const NOTE_WRITE = 0x00000002;
+
+/// size increased
+pub const NOTE_EXTEND = 0x00000004;
+
+/// attributes changed
+pub const NOTE_ATTRIB = 0x00000008;
+
+/// link count changed
+pub const NOTE_LINK = 0x00000010;
+
+/// vnode was renamed
+pub const NOTE_RENAME = 0x00000020;
+
+/// vnode access was revoked
+pub const NOTE_REVOKE = 0x00000040;
+
+/// No specific vnode event: to test for EVFILT_READ activation
+pub const NOTE_NONE = 0x00000080;
+
+/// vnode was unlocked by flock(2)
+pub const NOTE_FUNLOCK = 0x00000100;
+
+/// process exited
+pub const NOTE_EXIT = 0x80000000;
+
+/// process forked
+pub const NOTE_FORK = 0x40000000;
+
+/// process exec'd
+pub const NOTE_EXEC = 0x20000000;
+
+/// shared with EVFILT_SIGNAL
+pub const NOTE_SIGNAL = 0x08000000;
+
+/// exit status to be returned, valid for child process only
+pub const NOTE_EXITSTATUS = 0x04000000;
+
+/// provide details on reasons for exit
+pub const NOTE_EXIT_DETAIL = 0x02000000;
+
+/// mask for signal & exit status
+pub const NOTE_PDATAMASK = 0x000fffff;
+pub const NOTE_PCTRLMASK = (~NOTE_PDATAMASK);
+
+pub const NOTE_EXIT_DETAIL_MASK = 0x00070000;
+pub const NOTE_EXIT_DECRYPTFAIL = 0x00010000;
+pub const NOTE_EXIT_MEMORY = 0x00020000;
+pub const NOTE_EXIT_CSERROR = 0x00040000;
+
+/// will react on memory pressure
+pub const NOTE_VM_PRESSURE = 0x80000000;
+
+/// will quit on memory pressure, possibly after cleaning up dirty state
+pub const NOTE_VM_PRESSURE_TERMINATE = 0x40000000;
+
+/// will quit immediately on memory pressure
+pub const NOTE_VM_PRESSURE_SUDDEN_TERMINATE = 0x20000000;
+
+/// there was an error
+pub const NOTE_VM_ERROR = 0x10000000;
+
+/// data is seconds
+pub const NOTE_SECONDS = 0x00000001;
+
+/// data is microseconds
+pub const NOTE_USECONDS = 0x00000002;
+
+/// data is nanoseconds
+pub const NOTE_NSECONDS = 0x00000004;
+
+/// absolute timeout
+pub const NOTE_ABSOLUTE = 0x00000008;
+
+/// ext[1] holds leeway for power aware timers
+pub const NOTE_LEEWAY = 0x00000010;
+
+/// system does minimal timer coalescing
+pub const NOTE_CRITICAL = 0x00000020;
+
+/// system does maximum timer coalescing
+pub const NOTE_BACKGROUND = 0x00000040;
+pub const NOTE_MACH_CONTINUOUS_TIME = 0x00000080;
+
+/// data is mach absolute time units
+pub const NOTE_MACHTIME = 0x00000100;
+
+pub const AF_UNSPEC: c_int = 0;
+pub const AF_LOCAL: c_int = 1;
+pub const AF_UNIX: c_int = AF_LOCAL;
+pub const AF_INET: c_int = 2;
+pub const AF_SYS_CONTROL: c_int = 2;
+pub const AF_IMPLINK: c_int = 3;
+pub const AF_PUP: c_int = 4;
+pub const AF_CHAOS: c_int = 5;
+pub const AF_NS: c_int = 6;
+pub const AF_ISO: c_int = 7;
+pub const AF_OSI: c_int = AF_ISO;
+pub const AF_ECMA: c_int = 8;
+pub const AF_DATAKIT: c_int = 9;
+pub const AF_CCITT: c_int = 10;
+pub const AF_SNA: c_int = 11;
+pub const AF_DECnet: c_int = 12;
+pub const AF_DLI: c_int = 13;
+pub const AF_LAT: c_int = 14;
+pub const AF_HYLINK: c_int = 15;
+pub const AF_APPLETALK: c_int = 16;
+pub const AF_ROUTE: c_int = 17;
+pub const AF_LINK: c_int = 18;
+pub const AF_XTP: c_int = 19;
+pub const AF_COIP: c_int = 20;
+pub const AF_CNT: c_int = 21;
+pub const AF_RTIP: c_int = 22;
+pub const AF_IPX: c_int = 23;
+pub const AF_SIP: c_int = 24;
+pub const AF_PIP: c_int = 25;
+pub const AF_ISDN: c_int = 28;
+pub const AF_E164: c_int = AF_ISDN;
+pub const AF_KEY: c_int = 29;
+pub const AF_INET6: c_int = 30;
+pub const AF_NATM: c_int = 31;
+pub const AF_SYSTEM: c_int = 32;
+pub const AF_NETBIOS: c_int = 33;
+pub const AF_PPP: c_int = 34;
+pub const AF_MAX: c_int = 40;
+
+pub const PF_UNSPEC: c_int = AF_UNSPEC;
+pub const PF_LOCAL: c_int = AF_LOCAL;
+pub const PF_UNIX: c_int = PF_LOCAL;
+pub const PF_INET: c_int = AF_INET;
+pub const PF_IMPLINK: c_int = AF_IMPLINK;
+pub const PF_PUP: c_int = AF_PUP;
+pub const PF_CHAOS: c_int = AF_CHAOS;
+pub const PF_NS: c_int = AF_NS;
+pub const PF_ISO: c_int = AF_ISO;
+pub const PF_OSI: c_int = AF_ISO;
+pub const PF_ECMA: c_int = AF_ECMA;
+pub const PF_DATAKIT: c_int = AF_DATAKIT;
+pub const PF_CCITT: c_int = AF_CCITT;
+pub const PF_SNA: c_int = AF_SNA;
+pub const PF_DECnet: c_int = AF_DECnet;
+pub const PF_DLI: c_int = AF_DLI;
+pub const PF_LAT: c_int = AF_LAT;
+pub const PF_HYLINK: c_int = AF_HYLINK;
+pub const PF_APPLETALK: c_int = AF_APPLETALK;
+pub const PF_ROUTE: c_int = AF_ROUTE;
+pub const PF_LINK: c_int = AF_LINK;
+pub const PF_XTP: c_int = AF_XTP;
+pub const PF_COIP: c_int = AF_COIP;
+pub const PF_CNT: c_int = AF_CNT;
+pub const PF_SIP: c_int = AF_SIP;
+pub const PF_IPX: c_int = AF_IPX;
+pub const PF_RTIP: c_int = AF_RTIP;
+pub const PF_PIP: c_int = AF_PIP;
+pub const PF_ISDN: c_int = AF_ISDN;
+pub const PF_KEY: c_int = AF_KEY;
+pub const PF_INET6: c_int = AF_INET6;
+pub const PF_NATM: c_int = AF_NATM;
+pub const PF_SYSTEM: c_int = AF_SYSTEM;
+pub const PF_NETBIOS: c_int = AF_NETBIOS;
+pub const PF_PPP: c_int = AF_PPP;
+pub const PF_MAX: c_int = AF_MAX;
+
+pub const SYSPROTO_EVENT: c_int = 1;
+pub const SYSPROTO_CONTROL: c_int = 2;
+
+pub const SOCK_STREAM: c_int = 1;
+pub const SOCK_DGRAM: c_int = 2;
+pub const SOCK_RAW: c_int = 3;
+pub const SOCK_RDM: c_int = 4;
+pub const SOCK_SEQPACKET: c_int = 5;
+pub const SOCK_MAXADDRLEN: c_int = 255;
+
+fn wstatus(x: i32) i32 {
+ return x & 0o177;
+}
const wstopped = 0o177;
-pub fn WEXITSTATUS(x: i32) i32 { return x >> 8; }
-pub fn WTERMSIG(x: i32) i32 { return wstatus(x); }
-pub fn WSTOPSIG(x: i32) i32 { return x >> 8; }
-pub fn WIFEXITED(x: i32) bool { return wstatus(x) == 0; }
-pub fn WIFSTOPPED(x: i32) bool { return wstatus(x) == wstopped and WSTOPSIG(x) != 0x13; }
-pub fn WIFSIGNALED(x: i32) bool { return wstatus(x) != wstopped and wstatus(x) != 0; }
+pub fn WEXITSTATUS(x: i32) i32 {
+ return x >> 8;
+}
+pub fn WTERMSIG(x: i32) i32 {
+ return wstatus(x);
+}
+pub fn WSTOPSIG(x: i32) i32 {
+ return x >> 8;
+}
+pub fn WIFEXITED(x: i32) bool {
+ return wstatus(x) == 0;
+}
+pub fn WIFSTOPPED(x: i32) bool {
+ return wstatus(x) == wstopped and WSTOPSIG(x) != 0x13;
+}
+pub fn WIFSIGNALED(x: i32) bool {
+ return wstatus(x) != wstopped and wstatus(x) != 0;
+}
/// Get the errno from a syscall return value, or 0 for no error.
pub fn getErrno(r: usize) usize {
const signed_r = @bitCast(isize, r);
- return if (signed_r > -4096 and signed_r < 0) usize(-signed_r) else 0;
+ return if (signed_r > -4096 and signed_r < 0) @intCast(usize, -signed_r) else 0;
}
pub fn close(fd: i32) usize {
@@ -156,7 +613,7 @@ pub fn isatty(fd: i32) bool {
return c.isatty(fd) != 0;
}
-pub fn fstat(fd: i32, buf: &c.Stat) usize {
+pub fn fstat(fd: i32, buf: *c.Stat) usize {
return errnoWrap(c.@"fstat$INODE64"(fd, buf));
}
@@ -164,7 +621,8 @@ pub fn lseek(fd: i32, offset: isize, whence: c_int) usize {
return errnoWrap(c.lseek(fd, offset, whence));
}
-pub fn open(path: &const u8, flags: u32, mode: usize) usize {
+// TODO https://github.com/ziglang/zig/issues/265 on the whole file
+pub fn open(path: [*]const u8, flags: u32, mode: usize) usize {
return errnoWrap(c.open(path, @bitCast(c_int, flags), mode));
}
@@ -172,83 +630,123 @@ pub fn raise(sig: i32) usize {
return errnoWrap(c.raise(sig));
}
-pub fn read(fd: i32, buf: &u8, nbyte: usize) usize {
- return errnoWrap(c.read(fd, @ptrCast(&c_void, buf), nbyte));
+pub fn read(fd: i32, buf: [*]u8, nbyte: usize) usize {
+ return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte));
}
-pub fn stat(noalias path: &const u8, noalias buf: &stat) usize {
+pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize {
return errnoWrap(c.stat(path, buf));
}
-pub fn write(fd: i32, buf: &const u8, nbyte: usize) usize {
- return errnoWrap(c.write(fd, @ptrCast(&const c_void, buf), nbyte));
+pub fn write(fd: i32, buf: [*]const u8, nbyte: usize) usize {
+ return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte));
}
-pub fn mmap(address: ?&u8, length: usize, prot: usize, flags: usize, fd: i32,
- offset: isize) usize
-{
- const ptr_result = c.mmap(@ptrCast(&c_void, address), length,
- @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset);
+pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
+ const ptr_result = c.mmap(
+ @ptrCast(*c_void, address),
+ length,
+ @bitCast(c_int, @intCast(c_uint, prot)),
+ @bitCast(c_int, c_uint(flags)),
+ fd,
+ offset,
+ );
const isize_result = @bitCast(isize, @ptrToInt(ptr_result));
return errnoWrap(isize_result);
}
-pub fn munmap(address: &u8, length: usize) usize {
- return errnoWrap(c.munmap(@ptrCast(&c_void, address), length));
+pub fn munmap(address: usize, length: usize) usize {
+ return errnoWrap(c.munmap(@intToPtr(*c_void, address), length));
}
-pub fn unlink(path: &const u8) usize {
+pub fn unlink(path: [*]const u8) usize {
return errnoWrap(c.unlink(path));
}
-pub fn getcwd(buf: &u8, size: usize) usize {
- return if (c.getcwd(buf, size) == null) @bitCast(usize, -isize(*c._errno())) else 0;
+pub fn getcwd(buf: [*]u8, size: usize) usize {
+ return if (c.getcwd(buf, size) == null) @bitCast(usize, -isize(c._errno().*)) else 0;
}
-pub fn waitpid(pid: i32, status: &i32, options: u32) usize {
+pub fn waitpid(pid: i32, status: *i32, options: u32) usize {
comptime assert(i32.bit_count == c_int.bit_count);
- return errnoWrap(c.waitpid(pid, @ptrCast(&c_int, status), @bitCast(c_int, options)));
+ return errnoWrap(c.waitpid(pid, @ptrCast(*c_int, status), @bitCast(c_int, options)));
}
pub fn fork() usize {
return errnoWrap(c.fork());
}
-pub fn access(path: &const u8, mode: u32) usize {
+pub fn access(path: [*]const u8, mode: u32) usize {
return errnoWrap(c.access(path, mode));
}
-pub fn pipe(fds: &[2]i32) usize {
+pub fn pipe(fds: *[2]i32) usize {
comptime assert(i32.bit_count == c_int.bit_count);
- return errnoWrap(c.pipe(@ptrCast(&c_int, fds)));
+ return errnoWrap(c.pipe(@ptrCast(*[2]c_int, fds)));
}
-
-pub fn getdirentries64(fd: i32, buf_ptr: &u8, buf_len: usize, basep: &i64) usize {
+pub fn getdirentries64(fd: i32, buf_ptr: [*]u8, buf_len: usize, basep: *i64) usize {
return errnoWrap(@bitCast(isize, c.__getdirentries64(fd, buf_ptr, buf_len, basep)));
}
-pub fn mkdir(path: &const u8, mode: u32) usize {
+pub fn kqueue() usize {
+ return errnoWrap(c.kqueue());
+}
+
+pub fn kevent(kq: i32, changelist: []const Kevent, eventlist: []Kevent, timeout: ?*const timespec) usize {
+ return errnoWrap(c.kevent(
+ kq,
+ changelist.ptr,
+ @intCast(c_int, changelist.len),
+ eventlist.ptr,
+ @intCast(c_int, eventlist.len),
+ timeout,
+ ));
+}
+
+pub fn kevent64(
+ kq: i32,
+ changelist: []const kevent64_s,
+ eventlist: []kevent64_s,
+ flags: u32,
+ timeout: ?*const timespec,
+) usize {
+ return errnoWrap(c.kevent64(kq, changelist.ptr, changelist.len, eventlist.ptr, eventlist.len, flags, timeout));
+}
+
+pub fn mkdir(path: [*]const u8, mode: u32) usize {
return errnoWrap(c.mkdir(path, mode));
}
-pub fn symlink(existing: &const u8, new: &const u8) usize {
+pub fn symlink(existing: [*]const u8, new: [*]const u8) usize {
return errnoWrap(c.symlink(existing, new));
}
-pub fn rename(old: &const u8, new: &const u8) usize {
+pub fn sysctl(name: [*]c_int, namelen: c_uint, oldp: ?*c_void, oldlenp: ?*usize, newp: ?*c_void, newlen: usize) usize {
+ return errnoWrap(c.sysctl(name, namelen, oldp, oldlenp, newp, newlen));
+}
+
+pub fn sysctlbyname(name: [*]const u8, oldp: ?*c_void, oldlenp: ?*usize, newp: ?*c_void, newlen: usize) usize {
+ return errnoWrap(c.sysctlbyname(name, oldp, oldlenp, newp, newlen));
+}
+
+pub fn sysctlnametomib(name: [*]const u8, mibp: ?*c_int, sizep: ?*usize) usize {
+ return errnoWrap(c.sysctlnametomib(name, wibp, sizep));
+}
+
+pub fn rename(old: [*]const u8, new: [*]const u8) usize {
return errnoWrap(c.rename(old, new));
}
-pub fn rmdir(path: &const u8) usize {
+pub fn rmdir(path: [*]const u8) usize {
return errnoWrap(c.rmdir(path));
}
-pub fn chdir(path: &const u8) usize {
+pub fn chdir(path: [*]const u8) usize {
return errnoWrap(c.chdir(path));
}
-pub fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) usize {
+pub fn execve(path: [*]const u8, argv: [*]const ?[*]const u8, envp: [*]const ?[*]const u8) usize {
return errnoWrap(c.execve(path, argv, envp));
}
@@ -256,16 +754,20 @@ pub fn dup2(old: i32, new: i32) usize {
return errnoWrap(c.dup2(old, new));
}
-pub fn readlink(noalias path: &const u8, noalias buf_ptr: &u8, buf_len: usize) usize {
+pub fn readlink(noalias path: [*]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize {
return errnoWrap(c.readlink(path, buf_ptr, buf_len));
}
-pub fn nanosleep(req: &const timespec, rem: ?×pec) usize {
+pub fn gettimeofday(tv: ?*timeval, tz: ?*timezone) usize {
+ return errnoWrap(c.gettimeofday(tv, tz));
+}
+
+pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return errnoWrap(c.nanosleep(req, rem));
}
-pub fn realpath(noalias filename: &const u8, noalias resolved_name: &u8) usize {
- return if (c.realpath(filename, resolved_name) == null) @bitCast(usize, -isize(*c._errno())) else 0;
+pub fn realpath(noalias filename: [*]const u8, noalias resolved_name: [*]u8) usize {
+ return if (c.realpath(filename, resolved_name) == null) @bitCast(usize, -isize(c._errno().*)) else 0;
}
pub fn setreuid(ruid: u32, euid: u32) usize {
@@ -276,26 +778,26 @@ pub fn setregid(rgid: u32, egid: u32) usize {
return errnoWrap(c.setregid(rgid, egid));
}
-pub fn sigprocmask(flags: u32, noalias set: &const sigset_t, noalias oldset: ?&sigset_t) usize {
+pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize {
return errnoWrap(c.sigprocmask(@bitCast(c_int, flags), set, oldset));
}
-pub fn sigaction(sig: u5, noalias act: &const Sigaction, noalias oact: ?&Sigaction) usize {
+pub fn sigaction(sig: u5, noalias act: *const Sigaction, noalias oact: ?*Sigaction) usize {
assert(sig != SIGKILL);
assert(sig != SIGSTOP);
- var cact = c.Sigaction {
- .handler = @ptrCast(extern fn(c_int)void, act.handler),
+ var cact = c.Sigaction{
+ .handler = @ptrCast(extern fn (c_int) void, act.handler),
.sa_flags = @bitCast(c_int, act.flags),
.sa_mask = act.mask,
};
var coact: c.Sigaction = undefined;
- const result = errnoWrap(c.sigaction(sig, &cact, &coact));
+ const result = errnoWrap(c.sigaction(sig, *cact, *coact));
if (result != 0) {
return result;
}
if (oact) |old| {
- *old = Sigaction {
- .handler = @ptrCast(extern fn(i32)void, coact.handler),
+ old.* = Sigaction{
+ .handler = @ptrCast(extern fn (i32) void, coact.handler),
.flags = @bitCast(u32, coact.sa_flags),
.mask = coact.sa_mask,
};
@@ -313,20 +815,31 @@ pub const dirent = c.dirent;
pub const sa_family_t = c.sa_family_t;
pub const sockaddr = c.sockaddr;
+/// Renamed from `kevent` to `Kevent` to avoid conflict with the syscall.
+pub const Kevent = c.Kevent;
+pub const kevent64_s = c.kevent64_s;
+
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = struct {
- handler: extern fn(i32)void,
+ handler: extern fn (i32) void,
mask: sigset_t,
flags: u32,
};
-pub fn sigaddset(set: &sigset_t, signo: u5) void {
- *set |= u32(1) << (signo - 1);
+pub fn sigaddset(set: *sigset_t, signo: u5) void {
+ set.* |= u32(1) << (signo - 1);
}
/// Takes the return value from a syscall and formats it back in the way
/// that the kernel represents it to libc. Errno was a mistake, let's make
/// it go away forever.
fn errnoWrap(value: isize) usize {
- return @bitCast(usize, if (value == -1) -isize(*c._errno()) else value);
+ return @bitCast(usize, if (value == -1) -isize(c._errno().*) else value);
}
+
+pub const timezone = c.timezone;
+pub const timeval = c.timeval;
+pub const mach_timebase_info_data = c.mach_timebase_info_data;
+
+pub const mach_absolute_time = c.mach_absolute_time;
+pub const mach_timebase_info = c.mach_timebase_info;
diff --git a/std/os/darwin/errno.zig b/std/os/darwin/errno.zig
new file mode 100644
index 0000000000..438f3382ad
--- /dev/null
+++ b/std/os/darwin/errno.zig
@@ -0,0 +1,328 @@
+/// Operation not permitted
+pub const EPERM = 1;
+
+/// No such file or directory
+pub const ENOENT = 2;
+
+/// No such process
+pub const ESRCH = 3;
+
+/// Interrupted system call
+pub const EINTR = 4;
+
+/// Input/output error
+pub const EIO = 5;
+
+/// Device not configured
+pub const ENXIO = 6;
+
+/// Argument list too long
+pub const E2BIG = 7;
+
+/// Exec format error
+pub const ENOEXEC = 8;
+
+/// Bad file descriptor
+pub const EBADF = 9;
+
+/// No child processes
+pub const ECHILD = 10;
+
+/// Resource deadlock avoided
+pub const EDEADLK = 11;
+
+/// Cannot allocate memory
+pub const ENOMEM = 12;
+
+/// Permission denied
+pub const EACCES = 13;
+
+/// Bad address
+pub const EFAULT = 14;
+
+/// Block device required
+pub const ENOTBLK = 15;
+
+/// Device / Resource busy
+pub const EBUSY = 16;
+
+/// File exists
+pub const EEXIST = 17;
+
+/// Cross-device link
+pub const EXDEV = 18;
+
+/// Operation not supported by device
+pub const ENODEV = 19;
+
+/// Not a directory
+pub const ENOTDIR = 20;
+
+/// Is a directory
+pub const EISDIR = 21;
+
+/// Invalid argument
+pub const EINVAL = 22;
+
+/// Too many open files in system
+pub const ENFILE = 23;
+
+/// Too many open files
+pub const EMFILE = 24;
+
+/// Inappropriate ioctl for device
+pub const ENOTTY = 25;
+
+/// Text file busy
+pub const ETXTBSY = 26;
+
+/// File too large
+pub const EFBIG = 27;
+
+/// No space left on device
+pub const ENOSPC = 28;
+
+/// Illegal seek
+pub const ESPIPE = 29;
+
+/// Read-only file system
+pub const EROFS = 30;
+
+/// Too many links
+pub const EMLINK = 31;
+/// Broken pipe
+
+// math software
+pub const EPIPE = 32;
+
+/// Numerical argument out of domain
+pub const EDOM = 33;
+/// Result too large
+
+// non-blocking and interrupt i/o
+pub const ERANGE = 34;
+
+/// Resource temporarily unavailable
+pub const EAGAIN = 35;
+
+/// Operation would block
+pub const EWOULDBLOCK = EAGAIN;
+
+/// Operation now in progress
+pub const EINPROGRESS = 36;
+/// Operation already in progress
+
+// ipc/network software -- argument errors
+pub const EALREADY = 37;
+
+/// Socket operation on non-socket
+pub const ENOTSOCK = 38;
+
+/// Destination address required
+pub const EDESTADDRREQ = 39;
+
+/// Message too long
+pub const EMSGSIZE = 40;
+
+/// Protocol wrong type for socket
+pub const EPROTOTYPE = 41;
+
+/// Protocol not available
+pub const ENOPROTOOPT = 42;
+
+/// Protocol not supported
+pub const EPROTONOSUPPORT = 43;
+
+/// Socket type not supported
+pub const ESOCKTNOSUPPORT = 44;
+
+/// Operation not supported
+pub const ENOTSUP = 45;
+
+/// Protocol family not supported
+pub const EPFNOSUPPORT = 46;
+
+/// Address family not supported by protocol family
+pub const EAFNOSUPPORT = 47;
+
+/// Address already in use
+pub const EADDRINUSE = 48;
+/// Can't assign requested address
+
+// ipc/network software -- operational errors
+pub const EADDRNOTAVAIL = 49;
+
+/// Network is down
+pub const ENETDOWN = 50;
+
+/// Network is unreachable
+pub const ENETUNREACH = 51;
+
+/// Network dropped connection on reset
+pub const ENETRESET = 52;
+
+/// Software caused connection abort
+pub const ECONNABORTED = 53;
+
+/// Connection reset by peer
+pub const ECONNRESET = 54;
+
+/// No buffer space available
+pub const ENOBUFS = 55;
+
+/// Socket is already connected
+pub const EISCONN = 56;
+
+/// Socket is not connected
+pub const ENOTCONN = 57;
+
+/// Can't send after socket shutdown
+pub const ESHUTDOWN = 58;
+
+/// Too many references: can't splice
+pub const ETOOMANYREFS = 59;
+
+/// Operation timed out
+pub const ETIMEDOUT = 60;
+
+/// Connection refused
+pub const ECONNREFUSED = 61;
+
+/// Too many levels of symbolic links
+pub const ELOOP = 62;
+
+/// File name too long
+pub const ENAMETOOLONG = 63;
+
+/// Host is down
+pub const EHOSTDOWN = 64;
+
+/// No route to host
+pub const EHOSTUNREACH = 65;
+/// Directory not empty
+
+// quotas & mush
+pub const ENOTEMPTY = 66;
+
+/// Too many processes
+pub const EPROCLIM = 67;
+
+/// Too many users
+pub const EUSERS = 68;
+/// Disc quota exceeded
+
+// Network File System
+pub const EDQUOT = 69;
+
+/// Stale NFS file handle
+pub const ESTALE = 70;
+
+/// Too many levels of remote in path
+pub const EREMOTE = 71;
+
+/// RPC struct is bad
+pub const EBADRPC = 72;
+
+/// RPC version wrong
+pub const ERPCMISMATCH = 73;
+
+/// RPC prog. not avail
+pub const EPROGUNAVAIL = 74;
+
+/// Program version wrong
+pub const EPROGMISMATCH = 75;
+
+/// Bad procedure for program
+pub const EPROCUNAVAIL = 76;
+
+/// No locks available
+pub const ENOLCK = 77;
+
+/// Function not implemented
+pub const ENOSYS = 78;
+
+/// Inappropriate file type or format
+pub const EFTYPE = 79;
+
+/// Authentication error
+pub const EAUTH = 80;
+/// Need authenticator
+
+// Intelligent device errors
+pub const ENEEDAUTH = 81;
+
+/// Device power is off
+pub const EPWROFF = 82;
+
+/// Device error, e.g. paper out
+pub const EDEVERR = 83;
+/// Value too large to be stored in data type
+
+// Program loading errors
+pub const EOVERFLOW = 84;
+
+/// Bad executable
+pub const EBADEXEC = 85;
+
+/// Bad CPU type in executable
+pub const EBADARCH = 86;
+
+/// Shared library version mismatch
+pub const ESHLIBVERS = 87;
+
+/// Malformed Macho file
+pub const EBADMACHO = 88;
+
+/// Operation canceled
+pub const ECANCELED = 89;
+
+/// Identifier removed
+pub const EIDRM = 90;
+
+/// No message of desired type
+pub const ENOMSG = 91;
+
+/// Illegal byte sequence
+pub const EILSEQ = 92;
+
+/// Attribute not found
+pub const ENOATTR = 93;
+
+/// Bad message
+pub const EBADMSG = 94;
+
+/// Reserved
+pub const EMULTIHOP = 95;
+
+/// No message available on STREAM
+pub const ENODATA = 96;
+
+/// Reserved
+pub const ENOLINK = 97;
+
+/// No STREAM resources
+pub const ENOSR = 98;
+
+/// Not a STREAM
+pub const ENOSTR = 99;
+
+/// Protocol error
+pub const EPROTO = 100;
+
+/// STREAM ioctl timeout
+pub const ETIME = 101;
+
+/// No such policy registered
+pub const ENOPOLICY = 103;
+
+/// State not recoverable
+pub const ENOTRECOVERABLE = 104;
+
+/// Previous owner died
+pub const EOWNERDEAD = 105;
+
+/// Interface output queue is full
+pub const EQFULL = 106;
+
+/// Must be equal largest errno
+pub const ELAST = 106;
diff --git a/std/os/darwin_errno.zig b/std/os/darwin_errno.zig
deleted file mode 100644
index 6b47e5a9fa..0000000000
--- a/std/os/darwin_errno.zig
+++ /dev/null
@@ -1,142 +0,0 @@
-
-pub const EPERM = 1; /// Operation not permitted
-pub const ENOENT = 2; /// No such file or directory
-pub const ESRCH = 3; /// No such process
-pub const EINTR = 4; /// Interrupted system call
-pub const EIO = 5; /// Input/output error
-pub const ENXIO = 6; /// Device not configured
-pub const E2BIG = 7; /// Argument list too long
-pub const ENOEXEC = 8; /// Exec format error
-pub const EBADF = 9; /// Bad file descriptor
-pub const ECHILD = 10; /// No child processes
-pub const EDEADLK = 11; /// Resource deadlock avoided
-
-pub const ENOMEM = 12; /// Cannot allocate memory
-pub const EACCES = 13; /// Permission denied
-pub const EFAULT = 14; /// Bad address
-pub const ENOTBLK = 15; /// Block device required
-pub const EBUSY = 16; /// Device / Resource busy
-pub const EEXIST = 17; /// File exists
-pub const EXDEV = 18; /// Cross-device link
-pub const ENODEV = 19; /// Operation not supported by device
-pub const ENOTDIR = 20; /// Not a directory
-pub const EISDIR = 21; /// Is a directory
-pub const EINVAL = 22; /// Invalid argument
-pub const ENFILE = 23; /// Too many open files in system
-pub const EMFILE = 24; /// Too many open files
-pub const ENOTTY = 25; /// Inappropriate ioctl for device
-pub const ETXTBSY = 26; /// Text file busy
-pub const EFBIG = 27; /// File too large
-pub const ENOSPC = 28; /// No space left on device
-pub const ESPIPE = 29; /// Illegal seek
-pub const EROFS = 30; /// Read-only file system
-pub const EMLINK = 31; /// Too many links
-pub const EPIPE = 32; /// Broken pipe
-
-// math software
-pub const EDOM = 33; /// Numerical argument out of domain
-pub const ERANGE = 34; /// Result too large
-
-// non-blocking and interrupt i/o
-pub const EAGAIN = 35; /// Resource temporarily unavailable
-pub const EWOULDBLOCK = EAGAIN; /// Operation would block
-pub const EINPROGRESS = 36; /// Operation now in progress
-pub const EALREADY = 37; /// Operation already in progress
-
-// ipc/network software -- argument errors
-pub const ENOTSOCK = 38; /// Socket operation on non-socket
-pub const EDESTADDRREQ = 39; /// Destination address required
-pub const EMSGSIZE = 40; /// Message too long
-pub const EPROTOTYPE = 41; /// Protocol wrong type for socket
-pub const ENOPROTOOPT = 42; /// Protocol not available
-pub const EPROTONOSUPPORT = 43; /// Protocol not supported
-
-pub const ESOCKTNOSUPPORT = 44; /// Socket type not supported
-
-pub const ENOTSUP = 45; /// Operation not supported
-
-pub const EPFNOSUPPORT = 46; /// Protocol family not supported
-pub const EAFNOSUPPORT = 47; /// Address family not supported by protocol family
-pub const EADDRINUSE = 48; /// Address already in use
-pub const EADDRNOTAVAIL = 49; /// Can't assign requested address
-
-// ipc/network software -- operational errors
-pub const ENETDOWN = 50; /// Network is down
-pub const ENETUNREACH = 51; /// Network is unreachable
-pub const ENETRESET = 52; /// Network dropped connection on reset
-pub const ECONNABORTED = 53; /// Software caused connection abort
-pub const ECONNRESET = 54; /// Connection reset by peer
-pub const ENOBUFS = 55; /// No buffer space available
-pub const EISCONN = 56; /// Socket is already connected
-pub const ENOTCONN = 57; /// Socket is not connected
-
-pub const ESHUTDOWN = 58; /// Can't send after socket shutdown
-pub const ETOOMANYREFS = 59; /// Too many references: can't splice
-
-pub const ETIMEDOUT = 60; /// Operation timed out
-pub const ECONNREFUSED = 61; /// Connection refused
-
-pub const ELOOP = 62; /// Too many levels of symbolic links
-pub const ENAMETOOLONG = 63; /// File name too long
-
-pub const EHOSTDOWN = 64; /// Host is down
-pub const EHOSTUNREACH = 65; /// No route to host
-pub const ENOTEMPTY = 66; /// Directory not empty
-
-// quotas & mush
-pub const EPROCLIM = 67; /// Too many processes
-pub const EUSERS = 68; /// Too many users
-pub const EDQUOT = 69; /// Disc quota exceeded
-
-// Network File System
-pub const ESTALE = 70; /// Stale NFS file handle
-pub const EREMOTE = 71; /// Too many levels of remote in path
-pub const EBADRPC = 72; /// RPC struct is bad
-pub const ERPCMISMATCH = 73; /// RPC version wrong
-pub const EPROGUNAVAIL = 74; /// RPC prog. not avail
-pub const EPROGMISMATCH = 75; /// Program version wrong
-pub const EPROCUNAVAIL = 76; /// Bad procedure for program
-
-pub const ENOLCK = 77; /// No locks available
-pub const ENOSYS = 78; /// Function not implemented
-
-pub const EFTYPE = 79; /// Inappropriate file type or format
-pub const EAUTH = 80; /// Authentication error
-pub const ENEEDAUTH = 81; /// Need authenticator
-
-// Intelligent device errors
-pub const EPWROFF = 82; /// Device power is off
-pub const EDEVERR = 83; /// Device error, e.g. paper out
-
-pub const EOVERFLOW = 84; /// Value too large to be stored in data type
-
-// Program loading errors
-pub const EBADEXEC = 85; /// Bad executable
-pub const EBADARCH = 86; /// Bad CPU type in executable
-pub const ESHLIBVERS = 87; /// Shared library version mismatch
-pub const EBADMACHO = 88; /// Malformed Macho file
-
-pub const ECANCELED = 89; /// Operation canceled
-
-pub const EIDRM = 90; /// Identifier removed
-pub const ENOMSG = 91; /// No message of desired type
-pub const EILSEQ = 92; /// Illegal byte sequence
-pub const ENOATTR = 93; /// Attribute not found
-
-pub const EBADMSG = 94; /// Bad message
-pub const EMULTIHOP = 95; /// Reserved
-pub const ENODATA = 96; /// No message available on STREAM
-pub const ENOLINK = 97; /// Reserved
-pub const ENOSR = 98; /// No STREAM resources
-pub const ENOSTR = 99; /// Not a STREAM
-pub const EPROTO = 100; /// Protocol error
-pub const ETIME = 101; /// STREAM ioctl timeout
-
-pub const ENOPOLICY = 103; /// No such policy registered
-
-pub const ENOTRECOVERABLE = 104; /// State not recoverable
-pub const EOWNERDEAD = 105; /// Previous owner died
-
-pub const EQFULL = 106; /// Interface output queue is full
-pub const ELAST = 106; /// Must be equal largest errno
-
diff --git a/std/os/epoch.zig b/std/os/epoch.zig
new file mode 100644
index 0000000000..fc031521a5
--- /dev/null
+++ b/std/os/epoch.zig
@@ -0,0 +1,26 @@
+/// Epoch reference times in terms of their difference from
+/// posix epoch in seconds.
+pub const posix = 0; //Jan 01, 1970 AD
+pub const dos = 315532800; //Jan 01, 1980 AD
+pub const ios = 978307200; //Jan 01, 2001 AD
+pub const openvms = -3506716800; //Nov 17, 1858 AD
+pub const zos = -2208988800; //Jan 01, 1900 AD
+pub const windows = -11644473600; //Jan 01, 1601 AD
+pub const amiga = 252460800; //Jan 01, 1978 AD
+pub const pickos = -63244800; //Dec 31, 1967 AD
+pub const gps = 315964800; //Jan 06, 1980 AD
+pub const clr = -62135769600; //Jan 01, 0001 AD
+
+pub const unix = posix;
+pub const android = posix;
+pub const os2 = dos;
+pub const bios = dos;
+pub const vfat = dos;
+pub const ntfs = windows;
+pub const ntp = zos;
+pub const jbase = pickos;
+pub const aros = amiga;
+pub const morphos = amiga;
+pub const brew = gps;
+pub const atsc = gps;
+pub const go = clr;
diff --git a/std/os/file.zig b/std/os/file.zig
index 61fc2b1455..6998ba00d1 100644
--- a/std/os/file.zig
+++ b/std/os/file.zig
@@ -15,18 +15,24 @@ pub const File = struct {
/// The OS-specific file descriptor or file handle.
handle: os.FileHandle,
- const OpenError = os.WindowsOpenError || os.PosixOpenError;
+ pub const OpenError = os.WindowsOpenError || os.PosixOpenError;
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
- pub fn openRead(allocator: &mem.Allocator, path: []const u8) OpenError!File {
+ pub fn openRead(allocator: *mem.Allocator, path: []const u8) OpenError!File {
if (is_posix) {
- const flags = posix.O_LARGEFILE|posix.O_RDONLY;
+ const flags = posix.O_LARGEFILE | posix.O_RDONLY;
const fd = try os.posixOpen(allocator, path, flags, 0);
return openHandle(fd);
} else if (is_windows) {
- const handle = try os.windowsOpen(allocator, path, windows.GENERIC_READ, windows.FILE_SHARE_READ,
- windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL);
+ const handle = try os.windowsOpen(
+ allocator,
+ path,
+ windows.GENERIC_READ,
+ windows.FILE_SHARE_READ,
+ windows.OPEN_EXISTING,
+ windows.FILE_ATTRIBUTE_NORMAL,
+ );
return openHandle(handle);
} else {
@compileError("TODO implement openRead for this OS");
@@ -34,94 +40,111 @@ pub const File = struct {
}
/// Calls `openWriteMode` with os.default_file_mode for the mode.
- pub fn openWrite(allocator: &mem.Allocator, path: []const u8) OpenError!File {
+ pub fn openWrite(allocator: *mem.Allocator, path: []const u8) OpenError!File {
return openWriteMode(allocator, path, os.default_file_mode);
-
}
/// If the path does not exist it will be created.
/// If a file already exists in the destination it will be truncated.
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
- pub fn openWriteMode(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
+ pub fn openWriteMode(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
if (is_posix) {
- const flags = posix.O_LARGEFILE|posix.O_WRONLY|posix.O_CREAT|posix.O_CLOEXEC|posix.O_TRUNC;
+ const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_TRUNC;
const fd = try os.posixOpen(allocator, path, flags, file_mode);
return openHandle(fd);
} else if (is_windows) {
- const handle = try os.windowsOpen(allocator, path, windows.GENERIC_WRITE,
- windows.FILE_SHARE_WRITE|windows.FILE_SHARE_READ|windows.FILE_SHARE_DELETE,
- windows.CREATE_ALWAYS, windows.FILE_ATTRIBUTE_NORMAL);
+ const handle = try os.windowsOpen(
+ allocator,
+ path,
+ windows.GENERIC_WRITE,
+ windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
+ windows.CREATE_ALWAYS,
+ windows.FILE_ATTRIBUTE_NORMAL,
+ );
return openHandle(handle);
} else {
@compileError("TODO implement openWriteMode for this OS");
}
-
}
/// If the path does not exist it will be created.
/// If a file already exists in the destination this returns OpenError.PathAlreadyExists
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
- pub fn openWriteNoClobber(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
+ pub fn openWriteNoClobber(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
if (is_posix) {
- const flags = posix.O_LARGEFILE|posix.O_WRONLY|posix.O_CREAT|posix.O_CLOEXEC|posix.O_EXCL;
+ const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_EXCL;
const fd = try os.posixOpen(allocator, path, flags, file_mode);
return openHandle(fd);
} else if (is_windows) {
- const handle = try os.windowsOpen(allocator, path, windows.GENERIC_WRITE,
- windows.FILE_SHARE_WRITE|windows.FILE_SHARE_READ|windows.FILE_SHARE_DELETE,
- windows.CREATE_NEW, windows.FILE_ATTRIBUTE_NORMAL);
+ const handle = try os.windowsOpen(
+ allocator,
+ path,
+ windows.GENERIC_WRITE,
+ windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
+ windows.CREATE_NEW,
+ windows.FILE_ATTRIBUTE_NORMAL,
+ );
return openHandle(handle);
} else {
@compileError("TODO implement openWriteMode for this OS");
}
-
}
pub fn openHandle(handle: os.FileHandle) File {
- return File {
- .handle = handle,
- };
+ return File{ .handle = handle };
}
- pub fn access(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) !bool {
+ pub const AccessError = error{
+ PermissionDenied,
+ NotFound,
+ NameTooLong,
+ BadMode,
+ BadPathName,
+ Io,
+ SystemResources,
+ OutOfMemory,
+
+ Unexpected,
+ };
+
+ pub fn access(allocator: *mem.Allocator, path: []const u8) AccessError!void {
const path_with_null = try std.cstr.addNullByte(allocator, path);
defer allocator.free(path_with_null);
if (is_posix) {
- // mode is ignored and is always F_OK for now
const result = posix.access(path_with_null.ptr, posix.F_OK);
const err = posix.getErrno(result);
- if (err > 0) {
- return switch (err) {
- posix.EACCES => error.PermissionDenied,
- posix.EROFS => error.PermissionDenied,
- posix.ELOOP => error.PermissionDenied,
- posix.ETXTBSY => error.PermissionDenied,
- posix.ENOTDIR => error.NotFound,
- posix.ENOENT => error.NotFound,
+ switch (err) {
+ 0 => return,
+ posix.EACCES => return error.PermissionDenied,
+ posix.EROFS => return error.PermissionDenied,
+ posix.ELOOP => return error.PermissionDenied,
+ posix.ETXTBSY => return error.PermissionDenied,
+ posix.ENOTDIR => return error.NotFound,
+ posix.ENOENT => return error.NotFound,
- posix.ENAMETOOLONG => error.NameTooLong,
- posix.EINVAL => error.BadMode,
- posix.EFAULT => error.BadPathName,
- posix.EIO => error.Io,
- posix.ENOMEM => error.SystemResources,
- else => os.unexpectedErrorPosix(err),
- };
+ posix.ENAMETOOLONG => return error.NameTooLong,
+ posix.EINVAL => unreachable,
+ posix.EFAULT => return error.BadPathName,
+ posix.EIO => return error.Io,
+ posix.ENOMEM => return error.SystemResources,
+ else => return os.unexpectedErrorPosix(err),
}
- return true;
} else if (is_windows) {
- if (os.windows.PathFileExists(path_with_null.ptr) == os.windows.TRUE) {
- return true;
+ if (os.windows.GetFileAttributesA(path_with_null.ptr) != os.windows.INVALID_FILE_ATTRIBUTES) {
+ return;
}
const err = windows.GetLastError();
- return switch (err) {
- windows.ERROR.FILE_NOT_FOUND => error.NotFound,
- windows.ERROR.ACCESS_DENIED => error.PermissionDenied,
- else => os.unexpectedErrorWindows(err),
- };
+ switch (err) {
+ windows.ERROR.FILE_NOT_FOUND,
+ windows.ERROR.PATH_NOT_FOUND,
+ => return error.NotFound,
+ windows.ERROR.ACCESS_DENIED => return error.PermissionDenied,
+ else => return os.unexpectedErrorWindows(err),
+ }
} else {
@compileError("TODO implement access for this OS");
}
@@ -129,17 +152,17 @@ pub const File = struct {
/// Upon success, the stream is in an uninitialized state. To continue using it,
/// you must use the open() function.
- pub fn close(self: &File) void {
+ pub fn close(self: *File) void {
os.close(self.handle);
self.handle = undefined;
}
/// Calls `os.isTty` on `self.handle`.
- pub fn isTty(self: &File) bool {
+ pub fn isTty(self: *File) bool {
return os.isTty(self.handle);
}
- pub fn seekForward(self: &File, amount: isize) !void {
+ pub fn seekForward(self: *File, amount: isize) !void {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
const result = posix.lseek(self.handle, amount, posix.SEEK_CUR);
@@ -168,7 +191,7 @@ pub const File = struct {
}
}
- pub fn seekTo(self: &File, pos: usize) !void {
+ pub fn seekTo(self: *File, pos: usize) !void {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
const ipos = try math.cast(isize, pos);
@@ -199,7 +222,7 @@ pub const File = struct {
}
}
- pub fn getPos(self: &File) !usize {
+ pub fn getPos(self: *File) !usize {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
const result = posix.lseek(self.handle, 0, posix.SEEK_CUR);
@@ -217,7 +240,7 @@ pub const File = struct {
return result;
},
Os.windows => {
- var pos : windows.LARGE_INTEGER = undefined;
+ var pos: windows.LARGE_INTEGER = undefined;
if (windows.SetFilePointerEx(self.handle, 0, &pos, windows.FILE_CURRENT) == 0) {
const err = windows.GetLastError();
return switch (err) {
@@ -227,31 +250,16 @@ pub const File = struct {
}
assert(pos >= 0);
- if (@sizeOf(@typeOf(pos)) > @sizeOf(usize)) {
- if (pos > @maxValue(usize)) {
- return error.FilePosLargerThanPointerRange;
- }
- }
-
- return usize(pos);
+ return math.cast(usize, pos) catch error.FilePosLargerThanPointerRange;
},
else => @compileError("unsupported OS"),
}
}
- pub fn getEndPos(self: &File) !usize {
+ pub fn getEndPos(self: *File) !usize {
if (is_posix) {
- var stat: posix.Stat = undefined;
- const err = posix.getErrno(posix.fstat(self.handle, &stat));
- if (err > 0) {
- return switch (err) {
- posix.EBADF => error.BadFd,
- posix.ENOMEM => error.SystemResources,
- else => os.unexpectedErrorPosix(err),
- };
- }
-
- return usize(stat.size);
+ const stat = try os.posixFStat(self.handle);
+ return @intCast(usize, stat.size);
} else if (is_windows) {
var file_size: windows.LARGE_INTEGER = undefined;
if (windows.GetFileSizeEx(self.handle, &file_size) == 0) {
@@ -262,19 +270,19 @@ pub const File = struct {
}
if (file_size < 0)
return error.Overflow;
- return math.cast(usize, u64(file_size));
+ return math.cast(usize, @intCast(u64, file_size));
} else {
@compileError("TODO support getEndPos on this OS");
}
}
- pub const ModeError = error {
+ pub const ModeError = error{
BadFd,
SystemResources,
Unexpected,
};
- fn mode(self: &File) ModeError!os.FileMode {
+ pub fn mode(self: *File) ModeError!os.FileMode {
if (is_posix) {
var stat: posix.Stat = undefined;
const err = posix.getErrno(posix.fstat(self.handle, &stat));
@@ -296,22 +304,29 @@ pub const File = struct {
}
}
- pub const ReadError = error {};
+ pub const ReadError = error{
+ BadFd,
+ Io,
+ IsDir,
- pub fn read(self: &File, buffer: []u8) !usize {
+ Unexpected,
+ };
+
+ pub fn read(self: *File, buffer: []u8) ReadError!usize {
if (is_posix) {
var index: usize = 0;
while (index < buffer.len) {
- const amt_read = posix.read(self.handle, &buffer[index], buffer.len - index);
+ const amt_read = posix.read(self.handle, buffer.ptr + index, buffer.len - index);
const read_err = posix.getErrno(amt_read);
if (read_err > 0) {
switch (read_err) {
- posix.EINTR => continue,
+ posix.EINTR => continue,
posix.EINVAL => unreachable,
posix.EFAULT => unreachable,
- posix.EBADF => return error.BadFd,
- posix.EIO => return error.Io,
- else => return os.unexpectedErrorPosix(read_err),
+ posix.EBADF => return error.BadFd,
+ posix.EIO => return error.Io,
+ posix.EISDIR => return error.IsDir,
+ else => return os.unexpectedErrorPosix(read_err),
}
}
if (amt_read == 0) return index;
@@ -321,9 +336,9 @@ pub const File = struct {
} else if (is_windows) {
var index: usize = 0;
while (index < buffer.len) {
- const want_read_count = windows.DWORD(math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index));
+ const want_read_count = @intCast(windows.DWORD, math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index));
var amt_read: windows.DWORD = undefined;
- if (windows.ReadFile(self.handle, @ptrCast(&c_void, &buffer[index]), want_read_count, &amt_read, null) == 0) {
+ if (windows.ReadFile(self.handle, @ptrCast(*c_void, buffer.ptr + index), want_read_count, &amt_read, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.OPERATION_ABORTED => continue,
@@ -342,7 +357,7 @@ pub const File = struct {
pub const WriteError = os.WindowsWriteError || os.PosixWriteError;
- fn write(self: &File, bytes: []const u8) WriteError!void {
+ pub fn write(self: *File, bytes: []const u8) WriteError!void {
if (is_posix) {
try os.posixWrite(self.handle, bytes);
} else if (is_windows) {
diff --git a/std/os/get_app_data_dir.zig b/std/os/get_app_data_dir.zig
new file mode 100644
index 0000000000..e8ae5dd490
--- /dev/null
+++ b/std/os/get_app_data_dir.zig
@@ -0,0 +1,69 @@
+const std = @import("../index.zig");
+const builtin = @import("builtin");
+const unicode = std.unicode;
+const mem = std.mem;
+const os = std.os;
+
+pub const GetAppDataDirError = error{
+ OutOfMemory,
+ AppDataDirUnavailable,
+};
+
+/// Caller owns returned memory.
+pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 {
+ switch (builtin.os) {
+ builtin.Os.windows => {
+ var dir_path_ptr: [*]u16 = undefined;
+ switch (os.windows.SHGetKnownFolderPath(
+ &os.windows.FOLDERID_LocalAppData,
+ os.windows.KF_FLAG_CREATE,
+ null,
+ &dir_path_ptr,
+ )) {
+ os.windows.S_OK => {
+ defer os.windows.CoTaskMemFree(@ptrCast(*c_void, dir_path_ptr));
+ const global_dir = unicode.utf16leToUtf8(allocator, utf16lePtrSlice(dir_path_ptr)) catch |err| switch (err) {
+ error.UnexpectedSecondSurrogateHalf => return error.AppDataDirUnavailable,
+ error.ExpectedSecondSurrogateHalf => return error.AppDataDirUnavailable,
+ error.DanglingSurrogateHalf => return error.AppDataDirUnavailable,
+ error.OutOfMemory => return error.OutOfMemory,
+ };
+ defer allocator.free(global_dir);
+ return os.path.join(allocator, global_dir, appname);
+ },
+ os.windows.E_OUTOFMEMORY => return error.OutOfMemory,
+ else => return error.AppDataDirUnavailable,
+ }
+ },
+ builtin.Os.macosx => {
+ const home_dir = os.getEnvPosix("HOME") orelse {
+ // TODO look in /etc/passwd
+ return error.AppDataDirUnavailable;
+ };
+ return os.path.join(allocator, home_dir, "Library", "Application Support", appname);
+ },
+ builtin.Os.linux => {
+ const home_dir = os.getEnvPosix("HOME") orelse {
+ // TODO look in /etc/passwd
+ return error.AppDataDirUnavailable;
+ };
+ return os.path.join(allocator, home_dir, ".local", "share", appname);
+ },
+ else => @compileError("Unsupported OS"),
+ }
+}
+
+fn utf16lePtrSlice(ptr: [*]const u16) []const u16 {
+ var index: usize = 0;
+ while (ptr[index] != 0) : (index += 1) {}
+ return ptr[0..index];
+}
+
+test "std.os.getAppDataDir" {
+ var buf: [512]u8 = undefined;
+ const allocator = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
+
+ // We can't actually validate the result
+ _ = getAppDataDir(allocator, "zig") catch return;
+}
+
diff --git a/std/os/get_user_id.zig b/std/os/get_user_id.zig
index 11410ffa64..c0c1b1cc4b 100644
--- a/std/os/get_user_id.zig
+++ b/std/os/get_user_id.zig
@@ -74,27 +74,27 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
'\n' => return error.CorruptPasswordFile,
else => {
const digit = switch (byte) {
- '0' ... '9' => byte - '0',
+ '0'...'9' => byte - '0',
else => return error.CorruptPasswordFile,
};
- if (@mulWithOverflow(u32, uid, 10, &uid)) return error.CorruptPasswordFile;
- if (@addWithOverflow(u32, uid, digit, &uid)) return error.CorruptPasswordFile;
+ if (@mulWithOverflow(u32, uid, 10, *uid)) return error.CorruptPasswordFile;
+ if (@addWithOverflow(u32, uid, digit, *uid)) return error.CorruptPasswordFile;
},
},
State.ReadGroupId => switch (byte) {
'\n', ':' => {
- return UserInfo {
+ return UserInfo{
.uid = uid,
.gid = gid,
};
},
else => {
const digit = switch (byte) {
- '0' ... '9' => byte - '0',
+ '0'...'9' => byte - '0',
else => return error.CorruptPasswordFile,
};
- if (@mulWithOverflow(u32, gid, 10, &gid)) return error.CorruptPasswordFile;
- if (@addWithOverflow(u32, gid, digit, &gid)) return error.CorruptPasswordFile;
+ if (@mulWithOverflow(u32, gid, 10, *gid)) return error.CorruptPasswordFile;
+ if (@addWithOverflow(u32, gid, digit, *gid)) return error.CorruptPasswordFile;
},
},
}
diff --git a/std/os/index.zig b/std/os/index.zig
index b6caed6f53..425a900a71 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -2,26 +2,30 @@ const std = @import("../index.zig");
const builtin = @import("builtin");
const Os = builtin.Os;
const is_windows = builtin.os == Os.windows;
+const is_posix = switch (builtin.os) {
+ builtin.Os.linux, builtin.Os.macosx => true,
+ else => false,
+};
const os = this;
test "std.os" {
_ = @import("child_process.zig");
_ = @import("darwin.zig");
- _ = @import("darwin_errno.zig");
+ _ = @import("darwin/errno.zig");
_ = @import("get_user_id.zig");
- _ = @import("linux/errno.zig");
_ = @import("linux/index.zig");
- _ = @import("linux/x86_64.zig");
_ = @import("path.zig");
_ = @import("test.zig");
+ _ = @import("time.zig");
_ = @import("windows/index.zig");
+ _ = @import("get_app_data_dir.zig");
}
pub const windows = @import("windows/index.zig");
pub const darwin = @import("darwin.zig");
pub const linux = @import("linux/index.zig");
pub const zen = @import("zen.zig");
-pub const posix = switch(builtin.os) {
+pub const posix = switch (builtin.os) {
Os.linux => linux,
Os.macosx, Os.ios => darwin,
Os.zen => zen,
@@ -32,6 +36,7 @@ pub const net = @import("net.zig");
pub const ChildProcess = @import("child_process.zig").ChildProcess;
pub const path = @import("path.zig");
pub const File = @import("file.zig").File;
+pub const time = @import("time.zig");
pub const FileMode = switch (builtin.os) {
Os.windows => void,
@@ -54,15 +59,27 @@ pub const windowsWrite = windows_util.windowsWrite;
pub const windowsIsCygwinPty = windows_util.windowsIsCygwinPty;
pub const windowsOpen = windows_util.windowsOpen;
pub const windowsLoadDll = windows_util.windowsLoadDll;
-pub const windowsUnloadDll = windows_util.windowsUnloadDll;
+pub const windowsUnloadDll = windows_util.windowsUnloadDll;
pub const createWindowsEnvBlock = windows_util.createWindowsEnvBlock;
+pub const WindowsCreateIoCompletionPortError = windows_util.WindowsCreateIoCompletionPortError;
+pub const windowsCreateIoCompletionPort = windows_util.windowsCreateIoCompletionPort;
+
+pub const WindowsPostQueuedCompletionStatusError = windows_util.WindowsPostQueuedCompletionStatusError;
+pub const windowsPostQueuedCompletionStatus = windows_util.windowsPostQueuedCompletionStatus;
+
+pub const WindowsWaitResult = windows_util.WindowsWaitResult;
+pub const windowsGetQueuedCompletionStatus = windows_util.windowsGetQueuedCompletionStatus;
+
pub const WindowsWaitError = windows_util.WaitError;
pub const WindowsOpenError = windows_util.OpenError;
pub const WindowsWriteError = windows_util.WriteError;
pub const FileHandle = if (is_windows) windows.HANDLE else i32;
+pub const getAppDataDir = @import("get_app_data_dir.zig").getAppDataDir;
+pub const GetAppDataDirError = @import("get_app_data_dir.zig").GetAppDataDirError;
+
const debug = std.debug;
const assert = debug.assert;
@@ -93,9 +110,9 @@ pub fn getRandomBytes(buf: []u8) !void {
switch (err) {
posix.EINVAL => unreachable,
posix.EFAULT => unreachable,
- posix.EINTR => continue,
+ posix.EINTR => continue,
posix.ENOSYS => {
- const fd = try posixOpenC(c"/dev/urandom", posix.O_RDONLY|posix.O_CLOEXEC, 0);
+ const fd = try posixOpenC(c"/dev/urandom", posix.O_RDONLY | posix.O_CLOEXEC, 0);
defer close(fd);
try posixRead(fd, buf);
@@ -107,22 +124,16 @@ pub fn getRandomBytes(buf: []u8) !void {
return;
},
Os.macosx, Os.ios => {
- const fd = try posixOpenC(c"/dev/urandom", posix.O_RDONLY|posix.O_CLOEXEC, 0);
+ const fd = try posixOpenC(c"/dev/urandom", posix.O_RDONLY | posix.O_CLOEXEC, 0);
defer close(fd);
try posixRead(fd, buf);
},
Os.windows => {
- var hCryptProv: windows.HCRYPTPROV = undefined;
- if (windows.CryptAcquireContextA(&hCryptProv, null, null, windows.PROV_RSA_FULL, 0) == 0) {
- const err = windows.GetLastError();
- return switch (err) {
- else => unexpectedErrorWindows(err),
- };
- }
- defer _ = windows.CryptReleaseContext(hCryptProv, 0);
-
- if (windows.CryptGenRandom(hCryptProv, windows.DWORD(buf.len), buf.ptr) == 0) {
+ // Call RtlGenRandom() instead of CryptGetRandom() on Windows
+ // https://github.com/rust-lang-nursery/rand/issues/111
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=504270
+ if (windows.RtlGenRandom(buf.ptr, buf.len) == 0) {
const err = windows.GetLastError();
return switch (err) {
else => unexpectedErrorWindows(err),
@@ -130,7 +141,7 @@ pub fn getRandomBytes(buf: []u8) !void {
}
},
Os.zen => {
- const randomness = []u8 {42, 1, 7, 12, 22, 17, 99, 16, 26, 87, 41, 45};
+ const randomness = []u8{ 42, 1, 7, 12, 22, 17, 99, 16, 26, 87, 41, 45 };
var i: usize = 0;
while (i < buf.len) : (i += 1) {
if (i > randomness.len) return error.Unknown;
@@ -142,8 +153,14 @@ pub fn getRandomBytes(buf: []u8) !void {
}
test "os.getRandomBytes" {
- var buf: [50]u8 = undefined;
- try getRandomBytes(buf[0..]);
+ var buf_a: [50]u8 = undefined;
+ var buf_b: [50]u8 = undefined;
+ // Call Twice
+ try getRandomBytes(buf_a[0..]);
+ try getRandomBytes(buf_b[0..]);
+
+ // Check if random (not 100% conclusive)
+ assert( !mem.eql(u8, buf_a, buf_b) );
}
/// Raises a signal in the current kernel thread, ending its execution.
@@ -215,13 +232,13 @@ pub fn close(handle: FileHandle) void {
/// Calls POSIX read, and keeps trying if it gets interrupted.
pub fn posixRead(fd: i32, buf: []u8) !void {
// Linux can return EINVAL when read amount is > 0x7ffff000
- // See https://github.com/zig-lang/zig/pull/743#issuecomment-363158274
+ // See https://github.com/ziglang/zig/pull/743#issuecomment-363158274
const max_buf_len = 0x7ffff000;
var index: usize = 0;
while (index < buf.len) {
const want_to_read = math.min(buf.len - index, usize(max_buf_len));
- const rc = posix.read(fd, &buf[index], want_to_read);
+ const rc = posix.read(fd, buf.ptr + index, want_to_read);
const err = posix.getErrno(rc);
if (err > 0) {
return switch (err) {
@@ -239,7 +256,7 @@ pub fn posixRead(fd: i32, buf: []u8) !void {
}
}
-pub const PosixWriteError = error {
+pub const PosixWriteError = error{
WouldBlock,
FileClosed,
DestinationAddressRequired,
@@ -255,35 +272,35 @@ pub const PosixWriteError = error {
/// Calls POSIX write, and keeps trying if it gets interrupted.
pub fn posixWrite(fd: i32, bytes: []const u8) !void {
// Linux can return EINVAL when write amount is > 0x7ffff000
- // See https://github.com/zig-lang/zig/pull/743#issuecomment-363165856
+ // See https://github.com/ziglang/zig/pull/743#issuecomment-363165856
const max_bytes_len = 0x7ffff000;
var index: usize = 0;
while (index < bytes.len) {
const amt_to_write = math.min(bytes.len - index, usize(max_bytes_len));
- const rc = posix.write(fd, &bytes[index], amt_to_write);
+ const rc = posix.write(fd, bytes.ptr + index, amt_to_write);
const write_err = posix.getErrno(rc);
if (write_err > 0) {
return switch (write_err) {
- posix.EINTR => continue,
+ posix.EINTR => continue,
posix.EINVAL, posix.EFAULT => unreachable,
posix.EAGAIN => PosixWriteError.WouldBlock,
posix.EBADF => PosixWriteError.FileClosed,
posix.EDESTADDRREQ => PosixWriteError.DestinationAddressRequired,
posix.EDQUOT => PosixWriteError.DiskQuota,
- posix.EFBIG => PosixWriteError.FileTooBig,
- posix.EIO => PosixWriteError.InputOutput,
+ posix.EFBIG => PosixWriteError.FileTooBig,
+ posix.EIO => PosixWriteError.InputOutput,
posix.ENOSPC => PosixWriteError.NoSpaceLeft,
- posix.EPERM => PosixWriteError.AccessDenied,
- posix.EPIPE => PosixWriteError.BrokenPipe,
- else => unexpectedErrorPosix(write_err),
+ posix.EPERM => PosixWriteError.AccessDenied,
+ posix.EPIPE => PosixWriteError.BrokenPipe,
+ else => unexpectedErrorPosix(write_err),
};
}
index += rc;
}
}
-pub const PosixOpenError = error {
+pub const PosixOpenError = error{
OutOfMemory,
AccessDenied,
FileTooBig,
@@ -304,14 +321,15 @@ pub const PosixOpenError = error {
/// ::file_path needs to be copied in memory to add a null terminating byte.
/// Calls POSIX open, keeps trying if it gets interrupted, and translates
/// the return value into zig errors.
-pub fn posixOpen(allocator: &Allocator, file_path: []const u8, flags: u32, perm: usize) PosixOpenError!i32 {
+pub fn posixOpen(allocator: *Allocator, file_path: []const u8, flags: u32, perm: usize) PosixOpenError!i32 {
const path_with_null = try cstr.addNullByte(allocator, file_path);
defer allocator.free(path_with_null);
return posixOpenC(path_with_null.ptr, flags, perm);
}
-pub fn posixOpenC(file_path: &const u8, flags: u32, perm: usize) !i32 {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn posixOpenC(file_path: [*]const u8, flags: u32, perm: usize) !i32 {
while (true) {
const result = posix.open(file_path, flags, perm);
const err = posix.getErrno(result);
@@ -338,7 +356,7 @@ pub fn posixOpenC(file_path: &const u8, flags: u32, perm: usize) !i32 {
else => return unexpectedErrorPosix(err),
}
}
- return i32(result);
+ return @intCast(i32, result);
}
}
@@ -357,19 +375,19 @@ pub fn posixDup2(old_fd: i32, new_fd: i32) !void {
}
}
-pub fn createNullDelimitedEnvMap(allocator: &Allocator, env_map: &const BufMap) ![]?&u8 {
+pub fn createNullDelimitedEnvMap(allocator: *Allocator, env_map: *const BufMap) ![]?[*]u8 {
const envp_count = env_map.count();
- const envp_buf = try allocator.alloc(?&u8, envp_count + 1);
- mem.set(?&u8, envp_buf, null);
+ const envp_buf = try allocator.alloc(?[*]u8, envp_count + 1);
+ mem.set(?[*]u8, envp_buf, null);
errdefer freeNullDelimitedEnvMap(allocator, envp_buf);
{
var it = env_map.iterator();
var i: usize = 0;
while (it.next()) |pair| : (i += 1) {
const env_buf = try allocator.alloc(u8, pair.key.len + pair.value.len + 2);
- @memcpy(&env_buf[0], pair.key.ptr, pair.key.len);
+ @memcpy(env_buf.ptr, pair.key.ptr, pair.key.len);
env_buf[pair.key.len] = '=';
- @memcpy(&env_buf[pair.key.len + 1], pair.value.ptr, pair.value.len);
+ @memcpy(env_buf.ptr + pair.key.len + 1, pair.value.ptr, pair.value.len);
env_buf[env_buf.len - 1] = 0;
envp_buf[i] = env_buf.ptr;
@@ -380,7 +398,7 @@ pub fn createNullDelimitedEnvMap(allocator: &Allocator, env_map: &const BufMap)
return envp_buf;
}
-pub fn freeNullDelimitedEnvMap(allocator: &Allocator, envp_buf: []?&u8) void {
+pub fn freeNullDelimitedEnvMap(allocator: *Allocator, envp_buf: []?[*]u8) void {
for (envp_buf) |env| {
const env_buf = if (env) |ptr| ptr[0 .. cstr.len(ptr) + 1] else break;
allocator.free(env_buf);
@@ -393,11 +411,9 @@ pub fn freeNullDelimitedEnvMap(allocator: &Allocator, envp_buf: []?&u8) void {
/// pointers after the args and after the environment variables.
/// `argv[0]` is the executable path.
/// This function also uses the PATH environment variable to get the full path to the executable.
-pub fn posixExecve(argv: []const []const u8, env_map: &const BufMap,
- allocator: &Allocator) !void
-{
- const argv_buf = try allocator.alloc(?&u8, argv.len + 1);
- mem.set(?&u8, argv_buf, null);
+pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator: *Allocator) !void {
+ const argv_buf = try allocator.alloc(?[*]u8, argv.len + 1);
+ mem.set(?[*]u8, argv_buf, null);
defer {
for (argv_buf) |arg| {
const arg_buf = if (arg) |ptr| cstr.toSlice(ptr) else break;
@@ -407,7 +423,7 @@ pub fn posixExecve(argv: []const []const u8, env_map: &const BufMap,
}
for (argv) |arg, i| {
const arg_buf = try allocator.alloc(u8, arg.len + 1);
- @memcpy(&arg_buf[0], arg.ptr, arg.len);
+ @memcpy(arg_buf.ptr, arg.ptr, arg.len);
arg_buf[arg.len] = 0;
argv_buf[i] = arg_buf.ptr;
@@ -419,10 +435,10 @@ pub fn posixExecve(argv: []const []const u8, env_map: &const BufMap,
const exe_path = argv[0];
if (mem.indexOfScalar(u8, exe_path, '/') != null) {
- return posixExecveErrnoToErr(posix.getErrno(posix.execve(??argv_buf[0], argv_buf.ptr, envp_buf.ptr)));
+ return posixExecveErrnoToErr(posix.getErrno(posix.execve(argv_buf[0].?, argv_buf.ptr, envp_buf.ptr)));
}
- const PATH = getEnvPosix("PATH") ?? "/usr/local/bin:/bin/:/usr/bin";
+ const PATH = getEnvPosix("PATH") orelse "/usr/local/bin:/bin/:/usr/bin";
// PATH.len because it is >= the largest search_path
// +1 for the / to join the search path and exe_path
// +1 for the null terminating byte
@@ -450,7 +466,7 @@ pub fn posixExecve(argv: []const []const u8, env_map: &const BufMap,
return posixExecveErrnoToErr(err);
}
-pub const PosixExecveError = error {
+pub const PosixExecveError = error{
SystemResources,
AccessDenied,
InvalidExe,
@@ -478,21 +494,22 @@ fn posixExecveErrnoToErr(err: usize) PosixExecveError {
};
}
-pub var posix_environ_raw: []&u8 = undefined;
+pub var linux_aux_raw = []usize{0} ** 38;
+pub var posix_environ_raw: [][*]u8 = undefined;
/// Caller must free result when done.
-pub fn getEnvMap(allocator: &Allocator) !BufMap {
+/// TODO make this go through libc when we have it
+pub fn getEnvMap(allocator: *Allocator) !BufMap {
var result = BufMap.init(allocator);
errdefer result.deinit();
if (is_windows) {
- const ptr = windows.GetEnvironmentStringsA() ?? return error.OutOfMemory;
+ const ptr = windows.GetEnvironmentStringsA() orelse return error.OutOfMemory;
defer assert(windows.FreeEnvironmentStringsA(ptr) != 0);
var i: usize = 0;
while (true) {
- if (ptr[i] == 0)
- return result;
+ if (ptr[i] == 0) return result;
const key_start = i;
@@ -517,7 +534,7 @@ pub fn getEnvMap(allocator: &Allocator) !BufMap {
var end_i: usize = line_i;
while (ptr[end_i] != 0) : (end_i += 1) {}
- const value = ptr[line_i + 1..end_i];
+ const value = ptr[line_i + 1 .. end_i];
try result.set(key, value);
}
@@ -525,25 +542,31 @@ pub fn getEnvMap(allocator: &Allocator) !BufMap {
}
}
+/// TODO make this go through libc when we have it
pub fn getEnvPosix(key: []const u8) ?[]const u8 {
for (posix_environ_raw) |ptr| {
var line_i: usize = 0;
while (ptr[line_i] != 0 and ptr[line_i] != '=') : (line_i += 1) {}
const this_key = ptr[0..line_i];
- if (!mem.eql(u8, key, this_key))
- continue;
+ if (!mem.eql(u8, key, this_key)) continue;
var end_i: usize = line_i;
while (ptr[end_i] != 0) : (end_i += 1) {}
- const this_value = ptr[line_i + 1..end_i];
+ const this_value = ptr[line_i + 1 .. end_i];
return this_value;
}
return null;
}
+pub const GetEnvVarOwnedError = error{
+ OutOfMemory,
+ EnvironmentVariableNotFound,
+};
+
/// Caller must free returned memory.
-pub fn getEnvVarOwned(allocator: &mem.Allocator, key: []const u8) ![]u8 {
+/// TODO make this go through libc when we have it
+pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 {
if (is_windows) {
const key_with_null = try cstr.addNullByte(allocator, key);
defer allocator.free(key_with_null);
@@ -552,14 +575,17 @@ pub fn getEnvVarOwned(allocator: &mem.Allocator, key: []const u8) ![]u8 {
errdefer allocator.free(buf);
while (true) {
- const windows_buf_len = try math.cast(windows.DWORD, buf.len);
+ const windows_buf_len = math.cast(windows.DWORD, buf.len) catch return error.OutOfMemory;
const result = windows.GetEnvironmentVariableA(key_with_null.ptr, buf.ptr, windows_buf_len);
if (result == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.ENVVAR_NOT_FOUND => error.EnvironmentVariableNotFound,
- else => unexpectedErrorWindows(err),
+ else => {
+ _ = unexpectedErrorWindows(err);
+ return error.EnvironmentVariableNotFound;
+ },
};
}
@@ -571,20 +597,20 @@ pub fn getEnvVarOwned(allocator: &mem.Allocator, key: []const u8) ![]u8 {
return allocator.shrink(u8, buf, result);
}
} else {
- const result = getEnvPosix(key) ?? return error.EnvironmentVariableNotFound;
+ const result = getEnvPosix(key) orelse return error.EnvironmentVariableNotFound;
return mem.dupe(allocator, u8, result);
}
}
/// Caller must free the returned memory.
-pub fn getCwd(allocator: &Allocator) ![]u8 {
+pub fn getCwd(allocator: *Allocator) ![]u8 {
switch (builtin.os) {
Os.windows => {
var buf = try allocator.alloc(u8, 256);
errdefer allocator.free(buf);
while (true) {
- const result = windows.GetCurrentDirectoryA(windows.WORD(buf.len), buf.ptr);
+ const result = windows.GetCurrentDirectoryA(@intCast(windows.WORD, buf.len), buf.ptr);
if (result == 0) {
const err = windows.GetLastError();
@@ -626,7 +652,7 @@ test "os.getCwd" {
pub const SymLinkError = PosixSymLinkError || WindowsSymLinkError;
-pub fn symLink(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) SymLinkError!void {
+pub fn symLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) SymLinkError!void {
if (is_windows) {
return symLinkWindows(allocator, existing_path, new_path);
} else {
@@ -634,12 +660,12 @@ pub fn symLink(allocator: &Allocator, existing_path: []const u8, new_path: []con
}
}
-pub const WindowsSymLinkError = error {
+pub const WindowsSymLinkError = error{
OutOfMemory,
Unexpected,
};
-pub fn symLinkWindows(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) WindowsSymLinkError!void {
+pub fn symLinkWindows(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) WindowsSymLinkError!void {
const existing_with_null = try cstr.addNullByte(allocator, existing_path);
defer allocator.free(existing_with_null);
const new_with_null = try cstr.addNullByte(allocator, new_path);
@@ -653,7 +679,7 @@ pub fn symLinkWindows(allocator: &Allocator, existing_path: []const u8, new_path
}
}
-pub const PosixSymLinkError = error {
+pub const PosixSymLinkError = error{
OutOfMemory,
AccessDenied,
DiskQuota,
@@ -669,7 +695,7 @@ pub const PosixSymLinkError = error {
Unexpected,
};
-pub fn symLinkPosix(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) PosixSymLinkError!void {
+pub fn symLinkPosix(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) PosixSymLinkError!void {
const full_buf = try allocator.alloc(u8, existing_path.len + new_path.len + 2);
defer allocator.free(full_buf);
@@ -677,7 +703,7 @@ pub fn symLinkPosix(allocator: &Allocator, existing_path: []const u8, new_path:
mem.copy(u8, existing_buf, existing_path);
existing_buf[existing_path.len] = 0;
- const new_buf = full_buf[existing_path.len + 1..];
+ const new_buf = full_buf[existing_path.len + 1 ..];
mem.copy(u8, new_buf, new_path);
new_buf[new_path.len] = 0;
@@ -702,11 +728,9 @@ pub fn symLinkPosix(allocator: &Allocator, existing_path: []const u8, new_path:
}
// here we replace the standard +/ with -_ so that it can be used in a file name
-const b64_fs_encoder = base64.Base64Encoder.init(
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_",
- base64.standard_pad_char);
+const b64_fs_encoder = base64.Base64Encoder.init("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_", base64.standard_pad_char);
-pub fn atomicSymLink(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) !void {
+pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
if (symLink(allocator, existing_path, new_path)) {
return;
} else |err| switch (err) {
@@ -714,7 +738,7 @@ pub fn atomicSymLink(allocator: &Allocator, existing_path: []const u8, new_path:
else => return err, // TODO zig should know this set does not include PathAlreadyExists
}
- const dirname = os.path.dirname(new_path);
+ const dirname = os.path.dirname(new_path) orelse ".";
var rand_buf: [12]u8 = undefined;
const tmp_path = try allocator.alloc(u8, dirname.len + 1 + base64.Base64Encoder.calcSize(rand_buf.len));
@@ -732,10 +756,25 @@ pub fn atomicSymLink(allocator: &Allocator, existing_path: []const u8, new_path:
else => return err, // TODO zig should know this set does not include PathAlreadyExists
}
}
-
}
-pub fn deleteFile(allocator: &Allocator, file_path: []const u8) !void {
+pub const DeleteFileError = error{
+ FileNotFound,
+ AccessDenied,
+ FileBusy,
+ FileSystem,
+ IsDir,
+ SymLinkLoop,
+ NameTooLong,
+ NotDir,
+ SystemResources,
+ ReadOnlyFileSystem,
+ OutOfMemory,
+
+ Unexpected,
+};
+
+pub fn deleteFile(allocator: *Allocator, file_path: []const u8) DeleteFileError!void {
if (builtin.os == Os.windows) {
return deleteFileWindows(allocator, file_path);
} else {
@@ -743,7 +782,7 @@ pub fn deleteFile(allocator: &Allocator, file_path: []const u8) !void {
}
}
-pub fn deleteFileWindows(allocator: &Allocator, file_path: []const u8) !void {
+pub fn deleteFileWindows(allocator: *Allocator, file_path: []const u8) !void {
const buf = try allocator.alloc(u8, file_path.len + 1);
defer allocator.free(buf);
@@ -761,7 +800,7 @@ pub fn deleteFileWindows(allocator: &Allocator, file_path: []const u8) !void {
}
}
-pub fn deleteFilePosix(allocator: &Allocator, file_path: []const u8) !void {
+pub fn deleteFilePosix(allocator: *Allocator, file_path: []const u8) !void {
const buf = try allocator.alloc(u8, file_path.len + 1);
defer allocator.free(buf);
@@ -792,7 +831,7 @@ pub fn deleteFilePosix(allocator: &Allocator, file_path: []const u8) !void {
/// there is a possibility of power loss or application termination leaving temporary files present
/// in the same directory as dest_path.
/// Destination file will have the same mode as the source file.
-pub fn copyFile(allocator: &Allocator, source_path: []const u8, dest_path: []const u8) !void {
+pub fn copyFile(allocator: *Allocator, source_path: []const u8, dest_path: []const u8) !void {
var in_file = try os.File.openRead(allocator, source_path);
defer in_file.close();
@@ -814,7 +853,7 @@ pub fn copyFile(allocator: &Allocator, source_path: []const u8, dest_path: []con
/// Guaranteed to be atomic. However until https://patchwork.kernel.org/patch/9636735/ is
/// merged and readily available,
/// there is a possibility of power loss or application termination leaving temporary files present
-pub fn copyFileMode(allocator: &Allocator, source_path: []const u8, dest_path: []const u8, mode: FileMode) !void {
+pub fn copyFileMode(allocator: *Allocator, source_path: []const u8, dest_path: []const u8, mode: FileMode) !void {
var in_file = try os.File.openRead(allocator, source_path);
defer in_file.close();
@@ -832,7 +871,7 @@ pub fn copyFileMode(allocator: &Allocator, source_path: []const u8, dest_path: [
}
pub const AtomicFile = struct {
- allocator: &Allocator,
+ allocator: *Allocator,
file: os.File,
tmp_path: []u8,
dest_path: []const u8,
@@ -840,18 +879,24 @@ pub const AtomicFile = struct {
/// dest_path must remain valid for the lifetime of AtomicFile
/// call finish to atomically replace dest_path with contents
- pub fn init(allocator: &Allocator, dest_path: []const u8, mode: FileMode) !AtomicFile {
+ pub fn init(allocator: *Allocator, dest_path: []const u8, mode: FileMode) !AtomicFile {
const dirname = os.path.dirname(dest_path);
var rand_buf: [12]u8 = undefined;
- const tmp_path = try allocator.alloc(u8, dirname.len + 1 + base64.Base64Encoder.calcSize(rand_buf.len));
+
+ const dirname_component_len = if (dirname) |d| d.len + 1 else 0;
+ const tmp_path = try allocator.alloc(u8, dirname_component_len +
+ base64.Base64Encoder.calcSize(rand_buf.len));
errdefer allocator.free(tmp_path);
- mem.copy(u8, tmp_path[0..], dirname);
- tmp_path[dirname.len] = os.path.sep;
+
+ if (dirname) |dir| {
+ mem.copy(u8, tmp_path[0..], dir);
+ tmp_path[dir.len] = os.path.sep;
+ }
while (true) {
try getRandomBytes(rand_buf[0..]);
- b64_fs_encoder.encode(tmp_path[dirname.len + 1 ..], rand_buf);
+ b64_fs_encoder.encode(tmp_path[dirname_component_len..], rand_buf);
const file = os.File.openWriteNoClobber(allocator, tmp_path, mode) catch |err| switch (err) {
error.PathAlreadyExists => continue,
@@ -860,7 +905,7 @@ pub const AtomicFile = struct {
else => return err,
};
- return AtomicFile {
+ return AtomicFile{
.allocator = allocator,
.file = file,
.tmp_path = tmp_path,
@@ -871,7 +916,7 @@ pub const AtomicFile = struct {
}
/// always call deinit, even after successful finish()
- pub fn deinit(self: &AtomicFile) void {
+ pub fn deinit(self: *AtomicFile) void {
if (!self.finished) {
self.file.close();
deleteFile(self.allocator, self.tmp_path) catch {};
@@ -880,7 +925,7 @@ pub const AtomicFile = struct {
}
}
- pub fn finish(self: &AtomicFile) !void {
+ pub fn finish(self: *AtomicFile) !void {
assert(!self.finished);
self.file.close();
try rename(self.allocator, self.tmp_path, self.dest_path);
@@ -889,7 +934,7 @@ pub const AtomicFile = struct {
}
};
-pub fn rename(allocator: &Allocator, old_path: []const u8, new_path: []const u8) !void {
+pub fn rename(allocator: *Allocator, old_path: []const u8, new_path: []const u8) !void {
const full_buf = try allocator.alloc(u8, old_path.len + new_path.len + 2);
defer allocator.free(full_buf);
@@ -897,12 +942,12 @@ pub fn rename(allocator: &Allocator, old_path: []const u8, new_path: []const u8)
mem.copy(u8, old_buf, old_path);
old_buf[old_path.len] = 0;
- const new_buf = full_buf[old_path.len + 1..];
+ const new_buf = full_buf[old_path.len + 1 ..];
mem.copy(u8, new_buf, new_path);
new_buf[new_path.len] = 0;
if (is_windows) {
- const flags = windows.MOVEFILE_REPLACE_EXISTING|windows.MOVEFILE_WRITE_THROUGH;
+ const flags = windows.MOVEFILE_REPLACE_EXISTING | windows.MOVEFILE_WRITE_THROUGH;
if (windows.MoveFileExA(old_buf.ptr, new_buf.ptr, flags) == 0) {
const err = windows.GetLastError();
return switch (err) {
@@ -934,7 +979,7 @@ pub fn rename(allocator: &Allocator, old_path: []const u8, new_path: []const u8)
}
}
-pub fn makeDir(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn makeDir(allocator: *Allocator, dir_path: []const u8) !void {
if (is_windows) {
return makeDirWindows(allocator, dir_path);
} else {
@@ -942,7 +987,7 @@ pub fn makeDir(allocator: &Allocator, dir_path: []const u8) !void {
}
}
-pub fn makeDirWindows(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn makeDirWindows(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try cstr.addNullByte(allocator, dir_path);
defer allocator.free(path_buf);
@@ -956,7 +1001,7 @@ pub fn makeDirWindows(allocator: &Allocator, dir_path: []const u8) !void {
}
}
-pub fn makeDirPosix(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn makeDirPosix(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try cstr.addNullByte(allocator, dir_path);
defer allocator.free(path_buf);
@@ -982,7 +1027,7 @@ pub fn makeDirPosix(allocator: &Allocator, dir_path: []const u8) !void {
/// Calls makeDir recursively to make an entire path. Returns success if the path
/// already exists and is a directory.
-pub fn makePath(allocator: &Allocator, full_path: []const u8) !void {
+pub fn makePath(allocator: *Allocator, full_path: []const u8) !void {
const resolved_path = try path.resolve(allocator, full_path);
defer allocator.free(resolved_path);
@@ -993,63 +1038,88 @@ pub fn makePath(allocator: &Allocator, full_path: []const u8) !void {
// TODO stat the file and return an error if it's not a directory
// this is important because otherwise a dangling symlink
// could cause an infinite loop
- if (end_index == resolved_path.len)
- return;
+ if (end_index == resolved_path.len) return;
} else if (err == error.FileNotFound) {
// march end_index backward until next path component
while (true) {
end_index -= 1;
- if (os.path.isSep(resolved_path[end_index]))
- break;
+ if (os.path.isSep(resolved_path[end_index])) break;
}
continue;
} else {
return err;
}
};
- if (end_index == resolved_path.len)
- return;
+ if (end_index == resolved_path.len) return;
// march end_index forward until next path component
while (true) {
end_index += 1;
- if (end_index == resolved_path.len or os.path.isSep(resolved_path[end_index]))
- break;
+ if (end_index == resolved_path.len or os.path.isSep(resolved_path[end_index])) break;
}
}
}
+pub const DeleteDirError = error{
+ AccessDenied,
+ FileBusy,
+ SymLinkLoop,
+ NameTooLong,
+ FileNotFound,
+ SystemResources,
+ NotDir,
+ DirNotEmpty,
+ ReadOnlyFileSystem,
+ OutOfMemory,
+
+ Unexpected,
+};
+
/// Returns ::error.DirNotEmpty if the directory is not empty.
/// To delete a directory recursively, see ::deleteTree
-pub fn deleteDir(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn deleteDir(allocator: *Allocator, dir_path: []const u8) DeleteDirError!void {
const path_buf = try allocator.alloc(u8, dir_path.len + 1);
defer allocator.free(path_buf);
mem.copy(u8, path_buf, dir_path);
path_buf[dir_path.len] = 0;
- const err = posix.getErrno(posix.rmdir(path_buf.ptr));
- if (err > 0) {
- return switch (err) {
- posix.EACCES, posix.EPERM => error.AccessDenied,
- posix.EBUSY => error.FileBusy,
- posix.EFAULT, posix.EINVAL => unreachable,
- posix.ELOOP => error.SymLinkLoop,
- posix.ENAMETOOLONG => error.NameTooLong,
- posix.ENOENT => error.FileNotFound,
- posix.ENOMEM => error.SystemResources,
- posix.ENOTDIR => error.NotDir,
- posix.EEXIST, posix.ENOTEMPTY => error.DirNotEmpty,
- posix.EROFS => error.ReadOnlyFileSystem,
- else => unexpectedErrorPosix(err),
- };
+ switch (builtin.os) {
+ Os.windows => {
+ if (windows.RemoveDirectoryA(path_buf.ptr) == 0) {
+ const err = windows.GetLastError();
+ return switch (err) {
+ windows.ERROR.PATH_NOT_FOUND => error.FileNotFound,
+ windows.ERROR.DIR_NOT_EMPTY => error.DirNotEmpty,
+ else => unexpectedErrorWindows(err),
+ };
+ }
+ },
+ Os.linux, Os.macosx, Os.ios => {
+ const err = posix.getErrno(posix.rmdir(path_buf.ptr));
+ if (err > 0) {
+ return switch (err) {
+ posix.EACCES, posix.EPERM => error.AccessDenied,
+ posix.EBUSY => error.FileBusy,
+ posix.EFAULT, posix.EINVAL => unreachable,
+ posix.ELOOP => error.SymLinkLoop,
+ posix.ENAMETOOLONG => error.NameTooLong,
+ posix.ENOENT => error.FileNotFound,
+ posix.ENOMEM => error.SystemResources,
+ posix.ENOTDIR => error.NotDir,
+ posix.EEXIST, posix.ENOTEMPTY => error.DirNotEmpty,
+ posix.EROFS => error.ReadOnlyFileSystem,
+ else => unexpectedErrorPosix(err),
+ };
+ }
+ },
+ else => @compileError("unimplemented"),
}
}
/// Whether ::full_path describes a symlink, file, or directory, this function
/// removes it. If it cannot be removed because it is a non-empty directory,
/// this function recursively removes its entries and then tries again.
-// TODO non-recursive implementation
-const DeleteTreeError = error {
+const DeleteTreeError = error{
OutOfMemory,
AccessDenied,
FileTooBig,
@@ -1071,7 +1141,7 @@ const DeleteTreeError = error {
DirNotEmpty,
Unexpected,
};
-pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError!void {
+pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!void {
start_over: while (true) {
var got_access_denied = false;
// First, try deleting the item as a file. This way we don't follow sym links.
@@ -1090,8 +1160,8 @@ pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError!
error.NotDir,
error.FileSystem,
error.FileBusy,
- error.Unexpected
- => return err,
+ error.Unexpected,
+ => return err,
}
{
var dir = Dir.open(allocator, full_path) catch |err| switch (err) {
@@ -1115,8 +1185,8 @@ pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError!
error.SystemResources,
error.NoSpaceLeft,
error.PathAlreadyExists,
- error.Unexpected
- => return err,
+ error.Unexpected,
+ => return err,
};
defer dir.close();
@@ -1127,8 +1197,8 @@ pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError!
try full_entry_buf.resize(full_path.len + entry.name.len + 1);
const full_entry_path = full_entry_buf.toSlice();
mem.copy(u8, full_entry_path, full_path);
- full_entry_path[full_path.len] = '/';
- mem.copy(u8, full_entry_path[full_path.len + 1..], entry.name);
+ full_entry_path[full_path.len] = path.sep;
+ mem.copy(u8, full_entry_path[full_path.len + 1 ..], entry.name);
try deleteTree(allocator, full_entry_path);
}
@@ -1138,16 +1208,29 @@ pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError!
}
pub const Dir = struct {
- fd: i32,
- darwin_seek: darwin_seek_t,
- allocator: &Allocator,
- buf: []u8,
- index: usize,
- end_index: usize,
+ handle: Handle,
+ allocator: *Allocator,
- const darwin_seek_t = switch (builtin.os) {
- Os.macosx, Os.ios => i64,
- else => void,
+ pub const Handle = switch (builtin.os) {
+ Os.macosx, Os.ios => struct {
+ fd: i32,
+ seek: i64,
+ buf: []u8,
+ index: usize,
+ end_index: usize,
+ },
+ Os.linux => struct {
+ fd: i32,
+ buf: []u8,
+ index: usize,
+ end_index: usize,
+ },
+ Os.windows => struct {
+ handle: windows.HANDLE,
+ find_file_data: windows.WIN32_FIND_DATAA,
+ first: bool,
+ },
+ else => @compileError("unimplemented"),
};
pub const Entry = struct {
@@ -1167,78 +1250,122 @@ pub const Dir = struct {
};
};
- pub fn open(allocator: &Allocator, dir_path: []const u8) !Dir {
- const fd = switch (builtin.os) {
- Os.windows => @compileError("TODO support Dir.open for windows"),
- Os.linux => try posixOpen(allocator, dir_path, posix.O_RDONLY|posix.O_DIRECTORY|posix.O_CLOEXEC, 0),
- Os.macosx, Os.ios => try posixOpen(allocator, dir_path, posix.O_RDONLY|posix.O_NONBLOCK|posix.O_DIRECTORY|posix.O_CLOEXEC, 0),
- else => @compileError("Dir.open is not supported for this platform"),
- };
- const darwin_seek_init = switch (builtin.os) {
- Os.macosx, Os.ios => 0,
- else => {},
- };
- return Dir {
+ pub const OpenError = error{
+ PathNotFound,
+ NotDir,
+ AccessDenied,
+ FileTooBig,
+ IsDir,
+ SymLinkLoop,
+ ProcessFdQuotaExceeded,
+ NameTooLong,
+ SystemFdQuotaExceeded,
+ NoDevice,
+ SystemResources,
+ NoSpaceLeft,
+ PathAlreadyExists,
+ OutOfMemory,
+
+ Unexpected,
+ };
+
+ pub fn open(allocator: *Allocator, dir_path: []const u8) OpenError!Dir {
+ return Dir{
.allocator = allocator,
- .fd = fd,
- .darwin_seek = darwin_seek_init,
- .index = 0,
- .end_index = 0,
- .buf = []u8{},
+ .handle = switch (builtin.os) {
+ Os.windows => blk: {
+ var find_file_data: windows.WIN32_FIND_DATAA = undefined;
+ const handle = try windows_util.windowsFindFirstFile(allocator, dir_path, &find_file_data);
+ break :blk Handle{
+ .handle = handle,
+ .find_file_data = find_file_data, // TODO guaranteed copy elision
+ .first = true,
+ };
+ },
+ Os.macosx, Os.ios => Handle{
+ .fd = try posixOpen(
+ allocator,
+ dir_path,
+ posix.O_RDONLY | posix.O_NONBLOCK | posix.O_DIRECTORY | posix.O_CLOEXEC,
+ 0,
+ ),
+ .seek = 0,
+ .index = 0,
+ .end_index = 0,
+ .buf = []u8{},
+ },
+ Os.linux => Handle{
+ .fd = try posixOpen(
+ allocator,
+ dir_path,
+ posix.O_RDONLY | posix.O_DIRECTORY | posix.O_CLOEXEC,
+ 0,
+ ),
+ .index = 0,
+ .end_index = 0,
+ .buf = []u8{},
+ },
+ else => @compileError("unimplemented"),
+ },
};
}
- pub fn close(self: &Dir) void {
- self.allocator.free(self.buf);
- os.close(self.fd);
+ pub fn close(self: *Dir) void {
+ switch (builtin.os) {
+ Os.windows => {
+ _ = windows.FindClose(self.handle.handle);
+ },
+ Os.macosx, Os.ios, Os.linux => {
+ self.allocator.free(self.handle.buf);
+ os.close(self.handle.fd);
+ },
+ else => @compileError("unimplemented"),
+ }
}
/// Memory such as file names referenced in this returned entry becomes invalid
- /// with subsequent calls to next, as well as when this ::Dir is deinitialized.
- pub fn next(self: &Dir) !?Entry {
+ /// with subsequent calls to next, as well as when this `Dir` is deinitialized.
+ pub fn next(self: *Dir) !?Entry {
switch (builtin.os) {
Os.linux => return self.nextLinux(),
Os.macosx, Os.ios => return self.nextDarwin(),
Os.windows => return self.nextWindows(),
- else => @compileError("Dir.next not supported on " ++ @tagName(builtin.os)),
+ else => @compileError("unimplemented"),
}
}
- fn nextDarwin(self: &Dir) !?Entry {
+ fn nextDarwin(self: *Dir) !?Entry {
start_over: while (true) {
- if (self.index >= self.end_index) {
- if (self.buf.len == 0) {
- self.buf = try self.allocator.alloc(u8, page_size);
+ if (self.handle.index >= self.handle.end_index) {
+ if (self.handle.buf.len == 0) {
+ self.handle.buf = try self.allocator.alloc(u8, page_size);
}
while (true) {
- const result = posix.getdirentries64(self.fd, self.buf.ptr, self.buf.len,
- &self.darwin_seek);
+ const result = posix.getdirentries64(self.handle.fd, self.handle.buf.ptr, self.handle.buf.len, &self.handle.seek);
const err = posix.getErrno(result);
if (err > 0) {
switch (err) {
posix.EBADF, posix.EFAULT, posix.ENOTDIR => unreachable,
posix.EINVAL => {
- self.buf = try self.allocator.realloc(u8, self.buf, self.buf.len * 2);
+ self.handle.buf = try self.allocator.realloc(u8, self.handle.buf, self.handle.buf.len * 2);
continue;
},
else => return unexpectedErrorPosix(err),
}
}
- if (result == 0)
- return null;
- self.index = 0;
- self.end_index = result;
+ if (result == 0) return null;
+ self.handle.index = 0;
+ self.handle.end_index = result;
break;
}
}
- const darwin_entry = @ptrCast(& align(1) posix.dirent, &self.buf[self.index]);
- const next_index = self.index + darwin_entry.d_reclen;
- self.index = next_index;
+ const darwin_entry = @ptrCast(*align(1) posix.dirent, &self.handle.buf[self.handle.index]);
+ const next_index = self.handle.index + darwin_entry.d_reclen;
+ self.handle.index = next_index;
- const name = (&darwin_entry.d_name)[0..darwin_entry.d_namlen];
+ const name = @ptrCast([*]u8, &darwin_entry.d_name)[0..darwin_entry.d_namlen];
- // skip . and .. entries
if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) {
continue :start_over;
}
@@ -1254,56 +1381,76 @@ pub const Dir = struct {
posix.DT_WHT => Entry.Kind.Whiteout,
else => Entry.Kind.Unknown,
};
- return Entry {
+ return Entry{
.name = name,
.kind = entry_kind,
};
}
}
- fn nextWindows(self: &Dir) !?Entry {
- @compileError("TODO support Dir.next for windows");
+ fn nextWindows(self: *Dir) !?Entry {
+ while (true) {
+ if (self.handle.first) {
+ self.handle.first = false;
+ } else {
+ if (!try windows_util.windowsFindNextFile(self.handle.handle, &self.handle.find_file_data))
+ return null;
+ }
+ const name = std.cstr.toSlice(self.handle.find_file_data.cFileName[0..].ptr);
+ if (mem.eql(u8, name, ".") or mem.eql(u8, name, ".."))
+ continue;
+ const kind = blk: {
+ const attrs = self.handle.find_file_data.dwFileAttributes;
+ if (attrs & windows.FILE_ATTRIBUTE_DIRECTORY != 0) break :blk Entry.Kind.Directory;
+ if (attrs & windows.FILE_ATTRIBUTE_REPARSE_POINT != 0) break :blk Entry.Kind.SymLink;
+ if (attrs & windows.FILE_ATTRIBUTE_NORMAL != 0) break :blk Entry.Kind.File;
+ break :blk Entry.Kind.Unknown;
+ };
+ return Entry{
+ .name = name,
+ .kind = kind,
+ };
+ }
}
- fn nextLinux(self: &Dir) !?Entry {
+ fn nextLinux(self: *Dir) !?Entry {
start_over: while (true) {
- if (self.index >= self.end_index) {
- if (self.buf.len == 0) {
- self.buf = try self.allocator.alloc(u8, page_size);
+ if (self.handle.index >= self.handle.end_index) {
+ if (self.handle.buf.len == 0) {
+ self.handle.buf = try self.allocator.alloc(u8, page_size);
}
while (true) {
- const result = posix.getdents(self.fd, self.buf.ptr, self.buf.len);
+ const result = posix.getdents(self.handle.fd, self.handle.buf.ptr, self.handle.buf.len);
const err = posix.getErrno(result);
if (err > 0) {
switch (err) {
posix.EBADF, posix.EFAULT, posix.ENOTDIR => unreachable,
posix.EINVAL => {
- self.buf = try self.allocator.realloc(u8, self.buf, self.buf.len * 2);
+ self.handle.buf = try self.allocator.realloc(u8, self.handle.buf, self.handle.buf.len * 2);
continue;
},
else => return unexpectedErrorPosix(err),
}
}
- if (result == 0)
- return null;
- self.index = 0;
- self.end_index = result;
+ if (result == 0) return null;
+ self.handle.index = 0;
+ self.handle.end_index = result;
break;
}
}
- const linux_entry = @ptrCast(& align(1) posix.dirent, &self.buf[self.index]);
- const next_index = self.index + linux_entry.d_reclen;
- self.index = next_index;
+ const linux_entry = @ptrCast(*align(1) posix.dirent, &self.handle.buf[self.handle.index]);
+ const next_index = self.handle.index + linux_entry.d_reclen;
+ self.handle.index = next_index;
- const name = cstr.toSlice(&linux_entry.d_name);
+ const name = cstr.toSlice(@ptrCast([*]u8, &linux_entry.d_name));
// skip . and .. entries
if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) {
continue :start_over;
}
- const type_char = self.buf[next_index - 1];
+ const type_char = self.handle.buf[next_index - 1];
const entry_kind = switch (type_char) {
posix.DT_BLK => Entry.Kind.BlockDevice,
posix.DT_CHR => Entry.Kind.CharacterDevice,
@@ -1314,7 +1461,7 @@ pub const Dir = struct {
posix.DT_SOCK => Entry.Kind.UnixDomainSocket,
else => Entry.Kind.Unknown,
};
- return Entry {
+ return Entry{
.name = name,
.kind = entry_kind,
};
@@ -1322,7 +1469,7 @@ pub const Dir = struct {
}
};
-pub fn changeCurDir(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn changeCurDir(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try allocator.alloc(u8, dir_path.len + 1);
defer allocator.free(path_buf);
@@ -1346,7 +1493,7 @@ pub fn changeCurDir(allocator: &Allocator, dir_path: []const u8) !void {
}
/// Read value of a symbolic link.
-pub fn readLink(allocator: &Allocator, pathname: []const u8) ![]u8 {
+pub fn readLink(allocator: *Allocator, pathname: []const u8) ![]u8 {
const path_buf = try allocator.alloc(u8, pathname.len + 1);
defer allocator.free(path_buf);
@@ -1379,50 +1526,6 @@ pub fn readLink(allocator: &Allocator, pathname: []const u8) ![]u8 {
}
}
-pub fn sleep(seconds: usize, nanoseconds: usize) void {
- switch(builtin.os) {
- Os.linux, Os.macosx, Os.ios => {
- posixSleep(u63(seconds), u63(nanoseconds));
- },
- Os.windows => {
- const milliseconds = seconds * 1000 + nanoseconds / 1000000;
- windows.Sleep(windows.DWORD(milliseconds));
- },
- else => @compileError("Unsupported OS"),
- }
-}
-
-const u63 = @IntType(false, 63);
-pub fn posixSleep(seconds: u63, nanoseconds: u63) void {
- var req = posix.timespec {
- .tv_sec = seconds,
- .tv_nsec = nanoseconds,
- };
- var rem: posix.timespec = undefined;
- while (true) {
- const ret_val = posix.nanosleep(&req, &rem);
- const err = posix.getErrno(ret_val);
- if (err == 0) return;
- switch (err) {
- posix.EFAULT => unreachable,
- posix.EINVAL => {
- // Sometimes Darwin returns EINVAL for no reason.
- // We treat it as a spurious wakeup.
- return;
- },
- posix.EINTR => {
- req = rem;
- continue;
- },
- else => return,
- }
- }
-}
-
-test "os.sleep" {
- sleep(0, 1);
-}
-
pub fn posix_setuid(uid: u32) !void {
const err = posix.getErrno(posix.setuid(uid));
if (err == 0) return;
@@ -1467,7 +1570,7 @@ pub fn posix_setregid(rgid: u32, egid: u32) !void {
};
}
-pub const WindowsGetStdHandleErrs = error {
+pub const WindowsGetStdHandleErrs = error{
NoStdHandles,
Unexpected,
};
@@ -1491,24 +1594,22 @@ pub const ArgIteratorPosix = struct {
count: usize,
pub fn init() ArgIteratorPosix {
- return ArgIteratorPosix {
+ return ArgIteratorPosix{
.index = 0,
.count = raw.len,
};
}
- pub fn next(self: &ArgIteratorPosix) ?[]const u8 {
- if (self.index == self.count)
- return null;
+ pub fn next(self: *ArgIteratorPosix) ?[]const u8 {
+ if (self.index == self.count) return null;
const s = raw[self.index];
self.index += 1;
return cstr.toSlice(s);
}
- pub fn skip(self: &ArgIteratorPosix) bool {
- if (self.index == self.count)
- return false;
+ pub fn skip(self: *ArgIteratorPosix) bool {
+ if (self.index == self.count) return false;
self.index += 1;
return true;
@@ -1516,12 +1617,12 @@ pub const ArgIteratorPosix = struct {
/// This is marked as public but actually it's only meant to be used
/// internally by zig's startup code.
- pub var raw: []&u8 = undefined;
+ pub var raw: [][*]u8 = undefined;
};
pub const ArgIteratorWindows = struct {
index: usize,
- cmd_line: &const u8,
+ cmd_line: [*]const u8,
in_quote: bool,
quote_count: usize,
seen_quote_count: usize,
@@ -1532,8 +1633,8 @@ pub const ArgIteratorWindows = struct {
return initWithCmdLine(windows.GetCommandLineA());
}
- pub fn initWithCmdLine(cmd_line: &const u8) ArgIteratorWindows {
- return ArgIteratorWindows {
+ pub fn initWithCmdLine(cmd_line: [*]const u8) ArgIteratorWindows {
+ return ArgIteratorWindows{
.index = 0,
.cmd_line = cmd_line,
.in_quote = false,
@@ -1543,7 +1644,7 @@ pub const ArgIteratorWindows = struct {
}
/// You must free the returned memory when done.
- pub fn next(self: &ArgIteratorWindows, allocator: &Allocator) ?(NextError![]u8) {
+ pub fn next(self: *ArgIteratorWindows, allocator: *Allocator) ?(NextError![]u8) {
// march forward over whitespace
while (true) : (self.index += 1) {
const byte = self.cmd_line[self.index];
@@ -1557,7 +1658,7 @@ pub const ArgIteratorWindows = struct {
return self.internalNext(allocator);
}
- pub fn skip(self: &ArgIteratorWindows) bool {
+ pub fn skip(self: *ArgIteratorWindows) bool {
// march forward over whitespace
while (true) : (self.index += 1) {
const byte = self.cmd_line[self.index];
@@ -1596,7 +1697,7 @@ pub const ArgIteratorWindows = struct {
}
}
- fn internalNext(self: &ArgIteratorWindows, allocator: &Allocator) NextError![]u8 {
+ fn internalNext(self: *ArgIteratorWindows, allocator: *Allocator) NextError![]u8 {
var buf = try Buffer.initSize(allocator, 0);
defer buf.deinit();
@@ -1640,14 +1741,14 @@ pub const ArgIteratorWindows = struct {
}
}
- fn emitBackslashes(self: &ArgIteratorWindows, buf: &Buffer, emit_count: usize) !void {
+ fn emitBackslashes(self: *ArgIteratorWindows, buf: *Buffer, emit_count: usize) !void {
var i: usize = 0;
while (i < emit_count) : (i += 1) {
try buf.appendByte('\\');
}
}
- fn countQuotes(cmd_line: &const u8) usize {
+ fn countQuotes(cmd_line: [*]const u8) usize {
var result: usize = 0;
var backslash_count: usize = 0;
var index: usize = 0;
@@ -1666,7 +1767,6 @@ pub const ArgIteratorWindows = struct {
}
}
}
-
};
pub const ArgIterator = struct {
@@ -1675,30 +1775,28 @@ pub const ArgIterator = struct {
inner: InnerType,
pub fn init() ArgIterator {
- return ArgIterator {
- .inner = InnerType.init(),
- };
+ return ArgIterator{ .inner = InnerType.init() };
}
pub const NextError = ArgIteratorWindows.NextError;
-
+
/// You must free the returned memory when done.
- pub fn next(self: &ArgIterator, allocator: &Allocator) ?(NextError![]u8) {
+ pub fn next(self: *ArgIterator, allocator: *Allocator) ?(NextError![]u8) {
if (builtin.os == Os.windows) {
return self.inner.next(allocator);
} else {
- return mem.dupe(allocator, u8, self.inner.next() ?? return null);
+ return mem.dupe(allocator, u8, self.inner.next() orelse return null);
}
}
/// If you only are targeting posix you can call this and not need an allocator.
- pub fn nextPosix(self: &ArgIterator) ?[]const u8 {
+ pub fn nextPosix(self: *ArgIterator) ?[]const u8 {
return self.inner.next();
}
/// Parse past 1 argument without capturing it.
/// Returns `true` if skipped an arg, `false` if we are at the end.
- pub fn skip(self: &ArgIterator) bool {
+ pub fn skip(self: *ArgIterator) bool {
return self.inner.skip();
}
};
@@ -1708,7 +1806,7 @@ pub fn args() ArgIterator {
}
/// Caller must call freeArgs on result.
-pub fn argsAlloc(allocator: &mem.Allocator) ![]const []u8 {
+pub fn argsAlloc(allocator: *mem.Allocator) ![]const []u8 {
// TODO refactor to only make 1 allocation.
var it = args();
var contents = try Buffer.initSize(allocator, 0);
@@ -1731,7 +1829,7 @@ pub fn argsAlloc(allocator: &mem.Allocator) ![]const []u8 {
const buf = try allocator.alignedAlloc(u8, @alignOf([]u8), total_bytes);
errdefer allocator.free(buf);
- const result_slice_list = ([][]u8)(buf[0..slice_list_bytes]);
+ const result_slice_list = @bytesToSlice([]u8, buf[0..slice_list_bytes]);
const result_contents = buf[slice_list_bytes..];
mem.copy(u8, result_contents, contents_slice);
@@ -1745,32 +1843,37 @@ pub fn argsAlloc(allocator: &mem.Allocator) ![]const []u8 {
return result_slice_list;
}
-pub fn argsFree(allocator: &mem.Allocator, args_alloc: []const []u8) void {
+pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const []u8) void {
var total_bytes: usize = 0;
for (args_alloc) |arg| {
total_bytes += @sizeOf([]u8) + arg.len;
}
- const unaligned_allocated_buf = @ptrCast(&const u8, args_alloc.ptr)[0..total_bytes];
+ const unaligned_allocated_buf = @ptrCast([*]const u8, args_alloc.ptr)[0..total_bytes];
const aligned_allocated_buf = @alignCast(@alignOf([]u8), unaligned_allocated_buf);
return allocator.free(aligned_allocated_buf);
}
test "windows arg parsing" {
- testWindowsCmdLine(c"a b\tc d", [][]const u8{"a", "b", "c", "d"});
- testWindowsCmdLine(c"\"abc\" d e", [][]const u8{"abc", "d", "e"});
- testWindowsCmdLine(c"a\\\\\\b d\"e f\"g h", [][]const u8{"a\\\\\\b", "de fg", "h"});
- testWindowsCmdLine(c"a\\\\\\\"b c d", [][]const u8{"a\\\"b", "c", "d"});
- testWindowsCmdLine(c"a\\\\\\\\\"b c\" d e", [][]const u8{"a\\\\b c", "d", "e"});
- testWindowsCmdLine(c"a b\tc \"d f", [][]const u8{"a", "b", "c", "\"d", "f"});
+ testWindowsCmdLine(c"a b\tc d", [][]const u8{ "a", "b", "c", "d" });
+ testWindowsCmdLine(c"\"abc\" d e", [][]const u8{ "abc", "d", "e" });
+ testWindowsCmdLine(c"a\\\\\\b d\"e f\"g h", [][]const u8{ "a\\\\\\b", "de fg", "h" });
+ testWindowsCmdLine(c"a\\\\\\\"b c d", [][]const u8{ "a\\\"b", "c", "d" });
+ testWindowsCmdLine(c"a\\\\\\\\\"b c\" d e", [][]const u8{ "a\\\\b c", "d", "e" });
+ testWindowsCmdLine(c"a b\tc \"d f", [][]const u8{ "a", "b", "c", "\"d", "f" });
- testWindowsCmdLine(c"\".\\..\\zig-cache\\build\" \"bin\\zig.exe\" \".\\..\" \".\\..\\zig-cache\" \"--help\"",
- [][]const u8{".\\..\\zig-cache\\build", "bin\\zig.exe", ".\\..", ".\\..\\zig-cache", "--help"});
+ testWindowsCmdLine(c"\".\\..\\zig-cache\\build\" \"bin\\zig.exe\" \".\\..\" \".\\..\\zig-cache\" \"--help\"", [][]const u8{
+ ".\\..\\zig-cache\\build",
+ "bin\\zig.exe",
+ ".\\..",
+ ".\\..\\zig-cache",
+ "--help",
+ });
}
-fn testWindowsCmdLine(input_cmd_line: &const u8, expected_args: []const []const u8) void {
+fn testWindowsCmdLine(input_cmd_line: [*]const u8, expected_args: []const []const u8) void {
var it = ArgIteratorWindows.initWithCmdLine(input_cmd_line);
for (expected_args) |expected_arg| {
- const arg = ??it.next(debug.global_allocator) catch unreachable;
+ const arg = it.next(debug.global_allocator).? catch unreachable;
assert(mem.eql(u8, arg, expected_arg));
}
assert(it.next(debug.global_allocator) == null);
@@ -1778,7 +1881,7 @@ fn testWindowsCmdLine(input_cmd_line: &const u8, expected_args: []const []const
// TODO make this a build variable that you can set
const unexpected_error_tracing = false;
-const UnexpectedError = error {
+const UnexpectedError = error{
/// The Operating System returned an undocumented error code.
Unexpected,
};
@@ -1824,7 +1927,7 @@ pub fn openSelfExe() !os.File {
test "openSelfExe" {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => (try openSelfExe()).close(),
- else => return, // Unsupported OS.
+ else => return, // Unsupported OS.
}
}
@@ -1834,7 +1937,7 @@ test "openSelfExe" {
/// This function may return an error if the current executable
/// was deleted after spawning.
/// Caller owns returned memory.
-pub fn selfExePath(allocator: &mem.Allocator) ![]u8 {
+pub fn selfExePath(allocator: *mem.Allocator) ![]u8 {
switch (builtin.os) {
Os.linux => {
// If the currently executing binary has been deleted,
@@ -1877,7 +1980,7 @@ pub fn selfExePath(allocator: &mem.Allocator) ![]u8 {
/// Get the directory path that contains the current executable.
/// Caller owns returned memory.
-pub fn selfExeDirPath(allocator: &mem.Allocator) ![]u8 {
+pub fn selfExeDirPath(allocator: *mem.Allocator) ![]u8 {
switch (builtin.os) {
Os.linux => {
// If the currently executing binary has been deleted,
@@ -1886,13 +1989,13 @@ pub fn selfExeDirPath(allocator: &mem.Allocator) ![]u8 {
// the executable was in when it was run.
const full_exe_path = try readLink(allocator, "/proc/self/exe");
errdefer allocator.free(full_exe_path);
- const dir = path.dirname(full_exe_path);
+ const dir = path.dirname(full_exe_path) orelse ".";
return allocator.shrink(u8, full_exe_path, dir.len);
},
Os.windows, Os.macosx, Os.ios => {
const self_exe_path = try selfExePath(allocator);
errdefer allocator.free(self_exe_path);
- const dirname = os.path.dirname(self_exe_path);
+ const dirname = os.path.dirname(self_exe_path) orelse ".";
return allocator.shrink(u8, self_exe_path, dirname.len);
},
else => @compileError("unimplemented: std.os.selfExeDirPath for " ++ @tagName(builtin.os)),
@@ -1911,7 +2014,7 @@ pub fn isTty(handle: FileHandle) bool {
}
}
-pub const PosixSocketError = error {
+pub const PosixSocketError = error{
/// Permission to create a socket of the specified type and/or
/// pro‐tocol is denied.
PermissionDenied,
@@ -1940,7 +2043,7 @@ pub fn posixSocket(domain: u32, socket_type: u32, protocol: u32) !i32 {
const rc = posix.socket(domain, socket_type, protocol);
const err = posix.getErrno(rc);
switch (err) {
- 0 => return i32(rc),
+ 0 => return @intCast(i32, rc),
posix.EACCES => return PosixSocketError.PermissionDenied,
posix.EAFNOSUPPORT => return PosixSocketError.AddressFamilyNotSupported,
posix.EINVAL => return PosixSocketError.ProtocolFamilyNotAvailable,
@@ -1952,9 +2055,9 @@ pub fn posixSocket(domain: u32, socket_type: u32, protocol: u32) !i32 {
}
}
-pub const PosixBindError = error {
+pub const PosixBindError = error{
/// The address is protected, and the user is not the superuser.
- /// For UNIX domain sockets: Search permission is denied on a component
+ /// For UNIX domain sockets: Search permission is denied on a component
/// of the path prefix.
AccessDenied,
@@ -1977,7 +2080,7 @@ pub const PosixBindError = error {
/// A nonexistent interface was requested or the requested address was not local.
AddressNotAvailable,
-
+
/// addr points outside the user's accessible address space.
PageFault,
@@ -2003,7 +2106,7 @@ pub const PosixBindError = error {
};
/// addr is `&const T` where T is one of the sockaddr
-pub fn posixBind(fd: i32, addr: &const posix.sockaddr) PosixBindError!void {
+pub fn posixBind(fd: i32, addr: *const posix.sockaddr) PosixBindError!void {
const rc = posix.bind(fd, addr, @sizeOf(posix.sockaddr));
const err = posix.getErrno(rc);
switch (err) {
@@ -2025,7 +2128,7 @@ pub fn posixBind(fd: i32, addr: &const posix.sockaddr) PosixBindError!void {
}
}
-const PosixListenError = error {
+const PosixListenError = error{
/// Another socket is already listening on the same port.
/// For Internet domain sockets, the socket referred to by sockfd had not previously
/// been bound to an address and, upon attempting to bind it to an ephemeral port, it
@@ -2058,7 +2161,7 @@ pub fn posixListen(sockfd: i32, backlog: u32) PosixListenError!void {
}
}
-pub const PosixAcceptError = error {
+pub const PosixAcceptError = error{
/// The socket is marked nonblocking and no connections are present to be accepted.
WouldBlock,
@@ -2066,7 +2169,7 @@ pub const PosixAcceptError = error {
FileDescriptorClosed,
ConnectionAborted,
-
+
/// The addr argument is not in a writable part of the user address space.
PageFault,
@@ -2079,7 +2182,7 @@ pub const PosixAcceptError = error {
/// The system-wide limit on the total number of open files has been reached.
SystemFdQuotaExceeded,
-
+
/// Not enough free memory. This often means that the memory allocation is limited
/// by the socket buffer limits, not by the system memory.
SystemResources,
@@ -2098,13 +2201,13 @@ pub const PosixAcceptError = error {
Unexpected,
};
-pub fn posixAccept(fd: i32, addr: &posix.sockaddr, flags: u32) PosixAcceptError!i32 {
+pub fn posixAccept(fd: i32, addr: *posix.sockaddr, flags: u32) PosixAcceptError!i32 {
while (true) {
var sockaddr_size = u32(@sizeOf(posix.sockaddr));
const rc = posix.accept4(fd, addr, &sockaddr_size, flags);
const err = posix.getErrno(rc);
switch (err) {
- 0 => return i32(rc),
+ 0 => return @intCast(i32, rc),
posix.EINTR => continue,
else => return unexpectedErrorPosix(err),
@@ -2124,7 +2227,7 @@ pub fn posixAccept(fd: i32, addr: &posix.sockaddr, flags: u32) PosixAcceptError!
}
}
-pub const LinuxEpollCreateError = error {
+pub const LinuxEpollCreateError = error{
/// Invalid value specified in flags.
InvalidSyscall,
@@ -2147,7 +2250,7 @@ pub fn linuxEpollCreate(flags: u32) LinuxEpollCreateError!i32 {
const rc = posix.epoll_create1(flags);
const err = posix.getErrno(rc);
switch (err) {
- 0 => return i32(rc),
+ 0 => return @intCast(i32, rc),
else => return unexpectedErrorPosix(err),
posix.EINVAL => return LinuxEpollCreateError.InvalidSyscall,
@@ -2157,7 +2260,7 @@ pub fn linuxEpollCreate(flags: u32) LinuxEpollCreateError!i32 {
}
}
-pub const LinuxEpollCtlError = error {
+pub const LinuxEpollCtlError = error{
/// epfd or fd is not a valid file descriptor.
InvalidFileDescriptor,
@@ -2197,7 +2300,7 @@ pub const LinuxEpollCtlError = error {
Unexpected,
};
-pub fn linuxEpollCtl(epfd: i32, op: u32, fd: i32, event: &linux.epoll_event) LinuxEpollCtlError!void {
+pub fn linuxEpollCtl(epfd: i32, op: u32, fd: i32, event: *linux.epoll_event) LinuxEpollCtlError!void {
const rc = posix.epoll_ctl(epfd, op, fd, event);
const err = posix.getErrno(rc);
switch (err) {
@@ -2217,7 +2320,7 @@ pub fn linuxEpollCtl(epfd: i32, op: u32, fd: i32, event: &linux.epoll_event) Lin
pub fn linuxEpollWait(epfd: i32, events: []linux.epoll_event, timeout: i32) usize {
while (true) {
- const rc = posix.epoll_wait(epfd, events.ptr, u32(events.len), timeout);
+ const rc = posix.epoll_wait(epfd, events.ptr, @intCast(u32, events.len), timeout);
const err = posix.getErrno(rc);
switch (err) {
0 => return rc,
@@ -2230,7 +2333,31 @@ pub fn linuxEpollWait(epfd: i32, events: []linux.epoll_event, timeout: i32) usiz
}
}
-pub const PosixGetSockNameError = error {
+pub const LinuxEventFdError = error{
+ InvalidFlagValue,
+ SystemResources,
+ ProcessFdQuotaExceeded,
+ SystemFdQuotaExceeded,
+
+ Unexpected,
+};
+
+pub fn linuxEventFd(initval: u32, flags: u32) LinuxEventFdError!i32 {
+ const rc = posix.eventfd(initval, flags);
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => return @intCast(i32, rc),
+ else => return unexpectedErrorPosix(err),
+
+ posix.EINVAL => return LinuxEventFdError.InvalidFlagValue,
+ posix.EMFILE => return LinuxEventFdError.ProcessFdQuotaExceeded,
+ posix.ENFILE => return LinuxEventFdError.SystemFdQuotaExceeded,
+ posix.ENODEV => return LinuxEventFdError.SystemResources,
+ posix.ENOMEM => return LinuxEventFdError.SystemResources,
+ }
+}
+
+pub const PosixGetSockNameError = error{
/// Insufficient resources were available in the system to perform the operation.
SystemResources,
@@ -2254,7 +2381,7 @@ pub fn posixGetSockName(sockfd: i32) PosixGetSockNameError!posix.sockaddr {
}
}
-pub const PosixConnectError = error {
+pub const PosixConnectError = error{
/// For UNIX domain sockets, which are identified by pathname: Write permission is denied on the socket
/// file, or search permission is denied for one of the directories in the path prefix.
/// or
@@ -2290,7 +2417,7 @@ pub const PosixConnectError = error {
Unexpected,
};
-pub fn posixConnect(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectError!void {
+pub fn posixConnect(sockfd: i32, sockaddr: *const posix.sockaddr) PosixConnectError!void {
while (true) {
const rc = posix.connect(sockfd, sockaddr, @sizeOf(posix.sockaddr));
const err = posix.getErrno(rc);
@@ -2321,7 +2448,7 @@ pub fn posixConnect(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectEr
/// Same as posixConnect except it is for blocking socket file descriptors.
/// It expects to receive EINPROGRESS.
-pub fn posixConnectAsync(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectError!void {
+pub fn posixConnectAsync(sockfd: i32, sockaddr: *const posix.sockaddr) PosixConnectError!void {
while (true) {
const rc = posix.connect(sockfd, sockaddr, @sizeOf(posix.sockaddr));
const err = posix.getErrno(rc);
@@ -2352,7 +2479,7 @@ pub fn posixConnectAsync(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConn
pub fn posixGetSockOptConnectError(sockfd: i32) PosixConnectError!void {
var err_code: i32 = undefined;
var size: u32 = @sizeOf(i32);
- const rc = posix.getsockopt(sockfd, posix.SOL_SOCKET, posix.SO_ERROR, @ptrCast(&u8, &err_code), &size);
+ const rc = posix.getsockopt(sockfd, posix.SOL_SOCKET, posix.SO_ERROR, @ptrCast([*]u8, &err_code), &size);
assert(size == 4);
const err = posix.getErrno(rc);
switch (err) {
@@ -2378,9 +2505,401 @@ pub fn posixGetSockOptConnectError(sockfd: i32) PosixConnectError!void {
},
else => return unexpectedErrorPosix(err),
posix.EBADF => unreachable, // The argument sockfd is not a valid file descriptor.
- posix.EFAULT => unreachable, // The address pointed to by optval or optlen is not in a valid part of the process address space.
+ posix.EFAULT => unreachable, // The address pointed to by optval or optlen is not in a valid part of the process address space.
posix.EINVAL => unreachable,
posix.ENOPROTOOPT => unreachable, // The option is unknown at the level indicated.
posix.ENOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket.
}
}
+
+pub const Thread = struct {
+ data: Data,
+
+ pub const use_pthreads = is_posix and builtin.link_libc;
+ pub const Data = if (use_pthreads)
+ struct {
+ handle: c.pthread_t,
+ stack_addr: usize,
+ stack_len: usize,
+ }
+ else switch (builtin.os) {
+ builtin.Os.linux => struct {
+ pid: i32,
+ stack_addr: usize,
+ stack_len: usize,
+ },
+ builtin.Os.windows => struct {
+ handle: windows.HANDLE,
+ alloc_start: *c_void,
+ heap_handle: windows.HANDLE,
+ },
+ else => @compileError("Unsupported OS"),
+ };
+
+ pub fn wait(self: *const Thread) void {
+ if (use_pthreads) {
+ const err = c.pthread_join(self.data.handle, null);
+ switch (err) {
+ 0 => {},
+ posix.EINVAL => unreachable,
+ posix.ESRCH => unreachable,
+ posix.EDEADLK => unreachable,
+ else => unreachable,
+ }
+ assert(posix.munmap(self.data.stack_addr, self.data.stack_len) == 0);
+ } else switch (builtin.os) {
+ builtin.Os.linux => {
+ while (true) {
+ const pid_value = @atomicLoad(i32, &self.data.pid, builtin.AtomicOrder.SeqCst);
+ if (pid_value == 0) break;
+ const rc = linux.futex_wait(@ptrToInt(&self.data.pid), linux.FUTEX_WAIT, pid_value, null);
+ switch (linux.getErrno(rc)) {
+ 0 => continue,
+ posix.EINTR => continue,
+ posix.EAGAIN => continue,
+ else => unreachable,
+ }
+ }
+ assert(posix.munmap(self.data.stack_addr, self.data.stack_len) == 0);
+ },
+ builtin.Os.windows => {
+ assert(windows.WaitForSingleObject(self.data.handle, windows.INFINITE) == windows.WAIT_OBJECT_0);
+ assert(windows.CloseHandle(self.data.handle) != 0);
+ assert(windows.HeapFree(self.data.heap_handle, 0, self.data.alloc_start) != 0);
+ },
+ else => @compileError("Unsupported OS"),
+ }
+ }
+};
+
+pub const SpawnThreadError = error{
+ /// A system-imposed limit on the number of threads was encountered.
+ /// There are a number of limits that may trigger this error:
+ /// * the RLIMIT_NPROC soft resource limit (set via setrlimit(2)),
+ /// which limits the number of processes and threads for a real
+ /// user ID, was reached;
+ /// * the kernel's system-wide limit on the number of processes and
+ /// threads, /proc/sys/kernel/threads-max, was reached (see
+ /// proc(5));
+ /// * the maximum number of PIDs, /proc/sys/kernel/pid_max, was
+ /// reached (see proc(5)); or
+ /// * the PID limit (pids.max) imposed by the cgroup "process num‐
+ /// ber" (PIDs) controller was reached.
+ ThreadQuotaExceeded,
+
+ /// The kernel cannot allocate sufficient memory to allocate a task structure
+ /// for the child, or to copy those parts of the caller's context that need to
+ /// be copied.
+ SystemResources,
+
+ /// Not enough userland memory to spawn the thread.
+ OutOfMemory,
+
+ Unexpected,
+};
+
+/// caller must call wait on the returned thread
+/// fn startFn(@typeOf(context)) T
+/// where T is u8, noreturn, void, or !void
+/// caller must call wait on the returned thread
+pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread {
+ // TODO compile-time call graph analysis to determine stack upper bound
+ // https://github.com/ziglang/zig/issues/157
+ const default_stack_size = 8 * 1024 * 1024;
+
+ const Context = @typeOf(context);
+ comptime assert(@ArgType(@typeOf(startFn), 0) == Context);
+
+ if (builtin.os == builtin.Os.windows) {
+ const WinThread = struct {
+ const OuterContext = struct {
+ thread: Thread,
+ inner: Context,
+ };
+ extern fn threadMain(raw_arg: windows.LPVOID) windows.DWORD {
+ const arg = if (@sizeOf(Context) == 0) {} else @ptrCast(*Context, @alignCast(@alignOf(Context), raw_arg)).*;
+ switch (@typeId(@typeOf(startFn).ReturnType)) {
+ builtin.TypeId.Int => {
+ return startFn(arg);
+ },
+ builtin.TypeId.Void => {
+ startFn(arg);
+ return 0;
+ },
+ else => @compileError("expected return type of startFn to be 'u8', 'noreturn', 'void', or '!void'"),
+ }
+ }
+ };
+
+ const heap_handle = windows.GetProcessHeap() orelse return SpawnThreadError.OutOfMemory;
+ const byte_count = @alignOf(WinThread.OuterContext) + @sizeOf(WinThread.OuterContext);
+ const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) orelse return SpawnThreadError.OutOfMemory;
+ errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0);
+ const bytes = @ptrCast([*]u8, bytes_ptr)[0..byte_count];
+ const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext{
+ .thread = Thread{
+ .data = Thread.Data{
+ .heap_handle = heap_handle,
+ .alloc_start = bytes_ptr,
+ .handle = undefined,
+ },
+ },
+ .inner = context,
+ }) catch unreachable;
+
+ const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(*c_void, &outer_context.inner);
+ outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) orelse {
+ const err = windows.GetLastError();
+ return switch (err) {
+ else => os.unexpectedErrorWindows(err),
+ };
+ };
+ return &outer_context.thread;
+ }
+
+ const MainFuncs = struct {
+ extern fn linuxThreadMain(ctx_addr: usize) u8 {
+ const arg = if (@sizeOf(Context) == 0) {} else @intToPtr(*const Context, ctx_addr).*;
+
+ switch (@typeId(@typeOf(startFn).ReturnType)) {
+ builtin.TypeId.Int => {
+ return startFn(arg);
+ },
+ builtin.TypeId.Void => {
+ startFn(arg);
+ return 0;
+ },
+ else => @compileError("expected return type of startFn to be 'u8', 'noreturn', 'void', or '!void'"),
+ }
+ }
+ extern fn posixThreadMain(ctx: ?*c_void) ?*c_void {
+ if (@sizeOf(Context) == 0) {
+ _ = startFn({});
+ return null;
+ } else {
+ _ = startFn(@ptrCast(*const Context, @alignCast(@alignOf(Context), ctx)).*);
+ return null;
+ }
+ }
+ };
+
+ const MAP_GROWSDOWN = if (builtin.os == builtin.Os.linux) linux.MAP_GROWSDOWN else 0;
+
+ const mmap_len = default_stack_size;
+ const stack_addr = posix.mmap(null, mmap_len, posix.PROT_READ | posix.PROT_WRITE, posix.MAP_PRIVATE | posix.MAP_ANONYMOUS | MAP_GROWSDOWN, -1, 0);
+ if (stack_addr == posix.MAP_FAILED) return error.OutOfMemory;
+ errdefer assert(posix.munmap(stack_addr, mmap_len) == 0);
+
+ var stack_end: usize = stack_addr + mmap_len;
+ var arg: usize = undefined;
+ if (@sizeOf(Context) != 0) {
+ stack_end -= @sizeOf(Context);
+ stack_end -= stack_end % @alignOf(Context);
+ assert(stack_end >= stack_addr);
+ const context_ptr = @alignCast(@alignOf(Context), @intToPtr(*Context, stack_end));
+ context_ptr.* = context;
+ arg = stack_end;
+ }
+
+ stack_end -= @sizeOf(Thread);
+ stack_end -= stack_end % @alignOf(Thread);
+ assert(stack_end >= stack_addr);
+ const thread_ptr = @alignCast(@alignOf(Thread), @intToPtr(*Thread, stack_end));
+
+ thread_ptr.data.stack_addr = stack_addr;
+ thread_ptr.data.stack_len = mmap_len;
+
+ if (builtin.os == builtin.Os.windows) {
+ // use windows API directly
+ @compileError("TODO support spawnThread for Windows");
+ } else if (Thread.use_pthreads) {
+ // use pthreads
+ var attr: c.pthread_attr_t = undefined;
+ if (c.pthread_attr_init(&attr) != 0) return SpawnThreadError.SystemResources;
+ defer assert(c.pthread_attr_destroy(&attr) == 0);
+
+ // align to page
+ stack_end -= stack_end % os.page_size;
+ assert(c.pthread_attr_setstack(&attr, @intToPtr(*c_void, stack_addr), stack_end - stack_addr) == 0);
+
+ const err = c.pthread_create(&thread_ptr.data.handle, &attr, MainFuncs.posixThreadMain, @intToPtr(*c_void, arg));
+ switch (err) {
+ 0 => return thread_ptr,
+ posix.EAGAIN => return SpawnThreadError.SystemResources,
+ posix.EPERM => unreachable,
+ posix.EINVAL => unreachable,
+ else => return unexpectedErrorPosix(@intCast(usize, err)),
+ }
+ } else if (builtin.os == builtin.Os.linux) {
+ // use linux API directly. TODO use posix.CLONE_SETTLS and initialize thread local storage correctly
+ const flags = posix.CLONE_VM | posix.CLONE_FS | posix.CLONE_FILES | posix.CLONE_SIGHAND | posix.CLONE_THREAD | posix.CLONE_SYSVSEM | posix.CLONE_PARENT_SETTID | posix.CLONE_CHILD_CLEARTID | posix.CLONE_DETACHED;
+ const newtls: usize = 0;
+ const rc = posix.clone(MainFuncs.linuxThreadMain, stack_end, flags, arg, &thread_ptr.data.pid, newtls, &thread_ptr.data.pid);
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => return thread_ptr,
+ posix.EAGAIN => return SpawnThreadError.ThreadQuotaExceeded,
+ posix.EINVAL => unreachable,
+ posix.ENOMEM => return SpawnThreadError.SystemResources,
+ posix.ENOSPC => unreachable,
+ posix.EPERM => unreachable,
+ posix.EUSERS => unreachable,
+ else => return unexpectedErrorPosix(err),
+ }
+ } else {
+ @compileError("Unsupported OS");
+ }
+}
+
+pub fn posixWait(pid: i32) i32 {
+ var status: i32 = undefined;
+ while (true) {
+ const err = posix.getErrno(posix.waitpid(pid, &status, 0));
+ switch (err) {
+ 0 => return status,
+ posix.EINTR => continue,
+ posix.ECHILD => unreachable, // The process specified does not exist. It would be a race condition to handle this error.
+ posix.EINVAL => unreachable, // The options argument was invalid
+ else => unreachable,
+ }
+ }
+}
+
+pub fn posixFStat(fd: i32) !posix.Stat {
+ var stat: posix.Stat = undefined;
+ const err = posix.getErrno(posix.fstat(fd, &stat));
+ if (err > 0) {
+ return switch (err) {
+ posix.EBADF => error.BadFd,
+ posix.ENOMEM => error.SystemResources,
+ else => os.unexpectedErrorPosix(err),
+ };
+ }
+
+ return stat;
+}
+
+pub const CpuCountError = error{
+ OutOfMemory,
+ PermissionDenied,
+ Unexpected,
+};
+
+pub fn cpuCount(fallback_allocator: *mem.Allocator) CpuCountError!usize {
+ switch (builtin.os) {
+ builtin.Os.macosx => {
+ var count: c_int = undefined;
+ var count_len: usize = @sizeOf(c_int);
+ const rc = posix.sysctlbyname(c"hw.logicalcpu", @ptrCast(*c_void, &count), &count_len, null, 0);
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => return @intCast(usize, count),
+ posix.EFAULT => unreachable,
+ posix.EINVAL => unreachable,
+ posix.ENOMEM => return CpuCountError.OutOfMemory,
+ posix.ENOTDIR => unreachable,
+ posix.EISDIR => unreachable,
+ posix.ENOENT => unreachable,
+ posix.EPERM => unreachable,
+ else => return os.unexpectedErrorPosix(err),
+ }
+ },
+ builtin.Os.linux => {
+ const usize_count = 16;
+ const allocator = std.heap.stackFallback(usize_count * @sizeOf(usize), fallback_allocator).get();
+
+ var set = try allocator.alloc(usize, usize_count);
+ defer allocator.free(set);
+
+ while (true) {
+ const rc = posix.sched_getaffinity(0, set);
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => {
+ if (rc < set.len * @sizeOf(usize)) {
+ const result = set[0 .. rc / @sizeOf(usize)];
+ var sum: usize = 0;
+ for (result) |x| {
+ sum += @popCount(x);
+ }
+ return sum;
+ } else {
+ set = try allocator.realloc(usize, set, set.len * 2);
+ continue;
+ }
+ },
+ posix.EFAULT => unreachable,
+ posix.EINVAL => unreachable,
+ posix.EPERM => return CpuCountError.PermissionDenied,
+ posix.ESRCH => unreachable,
+ else => return os.unexpectedErrorPosix(err),
+ }
+ }
+ },
+ builtin.Os.windows => {
+ var system_info: windows.SYSTEM_INFO = undefined;
+ windows.GetSystemInfo(&system_info);
+ return @intCast(usize, system_info.dwNumberOfProcessors);
+ },
+ else => @compileError("unsupported OS"),
+ }
+}
+
+pub const BsdKQueueError = error{
+ /// The per-process limit on the number of open file descriptors has been reached.
+ ProcessFdQuotaExceeded,
+
+ /// The system-wide limit on the total number of open files has been reached.
+ SystemFdQuotaExceeded,
+
+ Unexpected,
+};
+
+pub fn bsdKQueue() BsdKQueueError!i32 {
+ const rc = posix.kqueue();
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => return @intCast(i32, rc),
+ posix.EMFILE => return BsdKQueueError.ProcessFdQuotaExceeded,
+ posix.ENFILE => return BsdKQueueError.SystemFdQuotaExceeded,
+ else => return unexpectedErrorPosix(err),
+ }
+}
+
+pub const BsdKEventError = error{
+ /// The process does not have permission to register a filter.
+ AccessDenied,
+
+ /// The event could not be found to be modified or deleted.
+ EventNotFound,
+
+ /// No memory was available to register the event.
+ SystemResources,
+
+ /// The specified process to attach to does not exist.
+ ProcessNotFound,
+};
+
+pub fn bsdKEvent(
+ kq: i32,
+ changelist: []const posix.Kevent,
+ eventlist: []posix.Kevent,
+ timeout: ?*const posix.timespec,
+) BsdKEventError!usize {
+ while (true) {
+ const rc = posix.kevent(kq, changelist, eventlist, timeout);
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => return rc,
+ posix.EACCES => return BsdKEventError.AccessDenied,
+ posix.EFAULT => unreachable,
+ posix.EBADF => unreachable,
+ posix.EINTR => continue,
+ posix.EINVAL => unreachable,
+ posix.ENOENT => return BsdKEventError.EventNotFound,
+ posix.ENOMEM => return BsdKEventError.SystemResources,
+ posix.ESRCH => return BsdKEventError.ProcessNotFound,
+ else => unreachable,
+ }
+ }
+}
diff --git a/std/os/linux/errno.zig b/std/os/linux/errno.zig
index 39f4e37a10..5ad8777f92 100644
--- a/std/os/linux/errno.zig
+++ b/std/os/linux/errno.zig
@@ -1,146 +1,427 @@
-pub const EPERM = 1; /// Operation not permitted
-pub const ENOENT = 2; /// No such file or directory
-pub const ESRCH = 3; /// No such process
-pub const EINTR = 4; /// Interrupted system call
-pub const EIO = 5; /// I/O error
-pub const ENXIO = 6; /// No such device or address
-pub const E2BIG = 7; /// Arg list too long
-pub const ENOEXEC = 8; /// Exec format error
-pub const EBADF = 9; /// Bad file number
-pub const ECHILD = 10; /// No child processes
-pub const EAGAIN = 11; /// Try again
-pub const ENOMEM = 12; /// Out of memory
-pub const EACCES = 13; /// Permission denied
-pub const EFAULT = 14; /// Bad address
-pub const ENOTBLK = 15; /// Block device required
-pub const EBUSY = 16; /// Device or resource busy
-pub const EEXIST = 17; /// File exists
-pub const EXDEV = 18; /// Cross-device link
-pub const ENODEV = 19; /// No such device
-pub const ENOTDIR = 20; /// Not a directory
-pub const EISDIR = 21; /// Is a directory
-pub const EINVAL = 22; /// Invalid argument
-pub const ENFILE = 23; /// File table overflow
-pub const EMFILE = 24; /// Too many open files
-pub const ENOTTY = 25; /// Not a typewriter
-pub const ETXTBSY = 26; /// Text file busy
-pub const EFBIG = 27; /// File too large
-pub const ENOSPC = 28; /// No space left on device
-pub const ESPIPE = 29; /// Illegal seek
-pub const EROFS = 30; /// Read-only file system
-pub const EMLINK = 31; /// Too many links
-pub const EPIPE = 32; /// Broken pipe
-pub const EDOM = 33; /// Math argument out of domain of func
-pub const ERANGE = 34; /// Math result not representable
-pub const EDEADLK = 35; /// Resource deadlock would occur
-pub const ENAMETOOLONG = 36; /// File name too long
-pub const ENOLCK = 37; /// No record locks available
-pub const ENOSYS = 38; /// Function not implemented
-pub const ENOTEMPTY = 39; /// Directory not empty
-pub const ELOOP = 40; /// Too many symbolic links encountered
-pub const EWOULDBLOCK = EAGAIN; /// Operation would block
-pub const ENOMSG = 42; /// No message of desired type
-pub const EIDRM = 43; /// Identifier removed
-pub const ECHRNG = 44; /// Channel number out of range
-pub const EL2NSYNC = 45; /// Level 2 not synchronized
-pub const EL3HLT = 46; /// Level 3 halted
-pub const EL3RST = 47; /// Level 3 reset
-pub const ELNRNG = 48; /// Link number out of range
-pub const EUNATCH = 49; /// Protocol driver not attached
-pub const ENOCSI = 50; /// No CSI structure available
-pub const EL2HLT = 51; /// Level 2 halted
-pub const EBADE = 52; /// Invalid exchange
-pub const EBADR = 53; /// Invalid request descriptor
-pub const EXFULL = 54; /// Exchange full
-pub const ENOANO = 55; /// No anode
-pub const EBADRQC = 56; /// Invalid request code
-pub const EBADSLT = 57; /// Invalid slot
+/// Operation not permitted
+pub const EPERM = 1;
-pub const EBFONT = 59; /// Bad font file format
-pub const ENOSTR = 60; /// Device not a stream
-pub const ENODATA = 61; /// No data available
-pub const ETIME = 62; /// Timer expired
-pub const ENOSR = 63; /// Out of streams resources
-pub const ENONET = 64; /// Machine is not on the network
-pub const ENOPKG = 65; /// Package not installed
-pub const EREMOTE = 66; /// Object is remote
-pub const ENOLINK = 67; /// Link has been severed
-pub const EADV = 68; /// Advertise error
-pub const ESRMNT = 69; /// Srmount error
-pub const ECOMM = 70; /// Communication error on send
-pub const EPROTO = 71; /// Protocol error
-pub const EMULTIHOP = 72; /// Multihop attempted
-pub const EDOTDOT = 73; /// RFS specific error
-pub const EBADMSG = 74; /// Not a data message
-pub const EOVERFLOW = 75; /// Value too large for defined data type
-pub const ENOTUNIQ = 76; /// Name not unique on network
-pub const EBADFD = 77; /// File descriptor in bad state
-pub const EREMCHG = 78; /// Remote address changed
-pub const ELIBACC = 79; /// Can not access a needed shared library
-pub const ELIBBAD = 80; /// Accessing a corrupted shared library
-pub const ELIBSCN = 81; /// .lib section in a.out corrupted
-pub const ELIBMAX = 82; /// Attempting to link in too many shared libraries
-pub const ELIBEXEC = 83; /// Cannot exec a shared library directly
-pub const EILSEQ = 84; /// Illegal byte sequence
-pub const ERESTART = 85; /// Interrupted system call should be restarted
-pub const ESTRPIPE = 86; /// Streams pipe error
-pub const EUSERS = 87; /// Too many users
-pub const ENOTSOCK = 88; /// Socket operation on non-socket
-pub const EDESTADDRREQ = 89; /// Destination address required
-pub const EMSGSIZE = 90; /// Message too long
-pub const EPROTOTYPE = 91; /// Protocol wrong type for socket
-pub const ENOPROTOOPT = 92; /// Protocol not available
-pub const EPROTONOSUPPORT = 93; /// Protocol not supported
-pub const ESOCKTNOSUPPORT = 94; /// Socket type not supported
-pub const EOPNOTSUPP = 95; /// Operation not supported on transport endpoint
-pub const EPFNOSUPPORT = 96; /// Protocol family not supported
-pub const EAFNOSUPPORT = 97; /// Address family not supported by protocol
-pub const EADDRINUSE = 98; /// Address already in use
-pub const EADDRNOTAVAIL = 99; /// Cannot assign requested address
-pub const ENETDOWN = 100; /// Network is down
-pub const ENETUNREACH = 101; /// Network is unreachable
-pub const ENETRESET = 102; /// Network dropped connection because of reset
-pub const ECONNABORTED = 103; /// Software caused connection abort
-pub const ECONNRESET = 104; /// Connection reset by peer
-pub const ENOBUFS = 105; /// No buffer space available
-pub const EISCONN = 106; /// Transport endpoint is already connected
-pub const ENOTCONN = 107; /// Transport endpoint is not connected
-pub const ESHUTDOWN = 108; /// Cannot send after transport endpoint shutdown
-pub const ETOOMANYREFS = 109; /// Too many references: cannot splice
-pub const ETIMEDOUT = 110; /// Connection timed out
-pub const ECONNREFUSED = 111; /// Connection refused
-pub const EHOSTDOWN = 112; /// Host is down
-pub const EHOSTUNREACH = 113; /// No route to host
-pub const EALREADY = 114; /// Operation already in progress
-pub const EINPROGRESS = 115; /// Operation now in progress
-pub const ESTALE = 116; /// Stale NFS file handle
-pub const EUCLEAN = 117; /// Structure needs cleaning
-pub const ENOTNAM = 118; /// Not a XENIX named type file
-pub const ENAVAIL = 119; /// No XENIX semaphores available
-pub const EISNAM = 120; /// Is a named type file
-pub const EREMOTEIO = 121; /// Remote I/O error
-pub const EDQUOT = 122; /// Quota exceeded
+/// No such file or directory
+pub const ENOENT = 2;
-pub const ENOMEDIUM = 123; /// No medium found
-pub const EMEDIUMTYPE = 124; /// Wrong medium type
+/// No such process
+pub const ESRCH = 3;
+
+/// Interrupted system call
+pub const EINTR = 4;
+
+/// I/O error
+pub const EIO = 5;
+
+/// No such device or address
+pub const ENXIO = 6;
+
+/// Arg list too long
+pub const E2BIG = 7;
+
+/// Exec format error
+pub const ENOEXEC = 8;
+
+/// Bad file number
+pub const EBADF = 9;
+
+/// No child processes
+pub const ECHILD = 10;
+
+/// Try again
+pub const EAGAIN = 11;
+
+/// Out of memory
+pub const ENOMEM = 12;
+
+/// Permission denied
+pub const EACCES = 13;
+
+/// Bad address
+pub const EFAULT = 14;
+
+/// Block device required
+pub const ENOTBLK = 15;
+
+/// Device or resource busy
+pub const EBUSY = 16;
+
+/// File exists
+pub const EEXIST = 17;
+
+/// Cross-device link
+pub const EXDEV = 18;
+
+/// No such device
+pub const ENODEV = 19;
+
+/// Not a directory
+pub const ENOTDIR = 20;
+
+/// Is a directory
+pub const EISDIR = 21;
+
+/// Invalid argument
+pub const EINVAL = 22;
+
+/// File table overflow
+pub const ENFILE = 23;
+
+/// Too many open files
+pub const EMFILE = 24;
+
+/// Not a typewriter
+pub const ENOTTY = 25;
+
+/// Text file busy
+pub const ETXTBSY = 26;
+
+/// File too large
+pub const EFBIG = 27;
+
+/// No space left on device
+pub const ENOSPC = 28;
+
+/// Illegal seek
+pub const ESPIPE = 29;
+
+/// Read-only file system
+pub const EROFS = 30;
+
+/// Too many links
+pub const EMLINK = 31;
+
+/// Broken pipe
+pub const EPIPE = 32;
+
+/// Math argument out of domain of func
+pub const EDOM = 33;
+
+/// Math result not representable
+pub const ERANGE = 34;
+
+/// Resource deadlock would occur
+pub const EDEADLK = 35;
+
+/// File name too long
+pub const ENAMETOOLONG = 36;
+
+/// No record locks available
+pub const ENOLCK = 37;
+
+/// Function not implemented
+pub const ENOSYS = 38;
+
+/// Directory not empty
+pub const ENOTEMPTY = 39;
+
+/// Too many symbolic links encountered
+pub const ELOOP = 40;
+
+/// Operation would block
+pub const EWOULDBLOCK = EAGAIN;
+
+/// No message of desired type
+pub const ENOMSG = 42;
+
+/// Identifier removed
+pub const EIDRM = 43;
+
+/// Channel number out of range
+pub const ECHRNG = 44;
+
+/// Level 2 not synchronized
+pub const EL2NSYNC = 45;
+
+/// Level 3 halted
+pub const EL3HLT = 46;
+
+/// Level 3 reset
+pub const EL3RST = 47;
+
+/// Link number out of range
+pub const ELNRNG = 48;
+
+/// Protocol driver not attached
+pub const EUNATCH = 49;
+
+/// No CSI structure available
+pub const ENOCSI = 50;
+
+/// Level 2 halted
+pub const EL2HLT = 51;
+
+/// Invalid exchange
+pub const EBADE = 52;
+
+/// Invalid request descriptor
+pub const EBADR = 53;
+
+/// Exchange full
+pub const EXFULL = 54;
+
+/// No anode
+pub const ENOANO = 55;
+
+/// Invalid request code
+pub const EBADRQC = 56;
+
+/// Invalid slot
+pub const EBADSLT = 57;
+
+/// Bad font file format
+pub const EBFONT = 59;
+
+/// Device not a stream
+pub const ENOSTR = 60;
+
+/// No data available
+pub const ENODATA = 61;
+
+/// Timer expired
+pub const ETIME = 62;
+
+/// Out of streams resources
+pub const ENOSR = 63;
+
+/// Machine is not on the network
+pub const ENONET = 64;
+
+/// Package not installed
+pub const ENOPKG = 65;
+
+/// Object is remote
+pub const EREMOTE = 66;
+
+/// Link has been severed
+pub const ENOLINK = 67;
+
+/// Advertise error
+pub const EADV = 68;
+
+/// Srmount error
+pub const ESRMNT = 69;
+
+/// Communication error on send
+pub const ECOMM = 70;
+
+/// Protocol error
+pub const EPROTO = 71;
+
+/// Multihop attempted
+pub const EMULTIHOP = 72;
+
+/// RFS specific error
+pub const EDOTDOT = 73;
+
+/// Not a data message
+pub const EBADMSG = 74;
+
+/// Value too large for defined data type
+pub const EOVERFLOW = 75;
+
+/// Name not unique on network
+pub const ENOTUNIQ = 76;
+
+/// File descriptor in bad state
+pub const EBADFD = 77;
+
+/// Remote address changed
+pub const EREMCHG = 78;
+
+/// Can not access a needed shared library
+pub const ELIBACC = 79;
+
+/// Accessing a corrupted shared library
+pub const ELIBBAD = 80;
+
+/// .lib section in a.out corrupted
+pub const ELIBSCN = 81;
+
+/// Attempting to link in too many shared libraries
+pub const ELIBMAX = 82;
+
+/// Cannot exec a shared library directly
+pub const ELIBEXEC = 83;
+
+/// Illegal byte sequence
+pub const EILSEQ = 84;
+
+/// Interrupted system call should be restarted
+pub const ERESTART = 85;
+
+/// Streams pipe error
+pub const ESTRPIPE = 86;
+
+/// Too many users
+pub const EUSERS = 87;
+
+/// Socket operation on non-socket
+pub const ENOTSOCK = 88;
+
+/// Destination address required
+pub const EDESTADDRREQ = 89;
+
+/// Message too long
+pub const EMSGSIZE = 90;
+
+/// Protocol wrong type for socket
+pub const EPROTOTYPE = 91;
+
+/// Protocol not available
+pub const ENOPROTOOPT = 92;
+
+/// Protocol not supported
+pub const EPROTONOSUPPORT = 93;
+
+/// Socket type not supported
+pub const ESOCKTNOSUPPORT = 94;
+
+/// Operation not supported on transport endpoint
+pub const EOPNOTSUPP = 95;
+
+/// Protocol family not supported
+pub const EPFNOSUPPORT = 96;
+
+/// Address family not supported by protocol
+pub const EAFNOSUPPORT = 97;
+
+/// Address already in use
+pub const EADDRINUSE = 98;
+
+/// Cannot assign requested address
+pub const EADDRNOTAVAIL = 99;
+
+/// Network is down
+pub const ENETDOWN = 100;
+
+/// Network is unreachable
+pub const ENETUNREACH = 101;
+
+/// Network dropped connection because of reset
+pub const ENETRESET = 102;
+
+/// Software caused connection abort
+pub const ECONNABORTED = 103;
+
+/// Connection reset by peer
+pub const ECONNRESET = 104;
+
+/// No buffer space available
+pub const ENOBUFS = 105;
+
+/// Transport endpoint is already connected
+pub const EISCONN = 106;
+
+/// Transport endpoint is not connected
+pub const ENOTCONN = 107;
+
+/// Cannot send after transport endpoint shutdown
+pub const ESHUTDOWN = 108;
+
+/// Too many references: cannot splice
+pub const ETOOMANYREFS = 109;
+
+/// Connection timed out
+pub const ETIMEDOUT = 110;
+
+/// Connection refused
+pub const ECONNREFUSED = 111;
+
+/// Host is down
+pub const EHOSTDOWN = 112;
+
+/// No route to host
+pub const EHOSTUNREACH = 113;
+
+/// Operation already in progress
+pub const EALREADY = 114;
+
+/// Operation now in progress
+pub const EINPROGRESS = 115;
+
+/// Stale NFS file handle
+pub const ESTALE = 116;
+
+/// Structure needs cleaning
+pub const EUCLEAN = 117;
+
+/// Not a XENIX named type file
+pub const ENOTNAM = 118;
+
+/// No XENIX semaphores available
+pub const ENAVAIL = 119;
+
+/// Is a named type file
+pub const EISNAM = 120;
+
+/// Remote I/O error
+pub const EREMOTEIO = 121;
+
+/// Quota exceeded
+pub const EDQUOT = 122;
+
+/// No medium found
+pub const ENOMEDIUM = 123;
+
+/// Wrong medium type
+pub const EMEDIUMTYPE = 124;
// nameserver query return codes
-pub const ENSROK = 0; /// DNS server returned answer with no data
-pub const ENSRNODATA = 160; /// DNS server returned answer with no data
-pub const ENSRFORMERR = 161; /// DNS server claims query was misformatted
-pub const ENSRSERVFAIL = 162; /// DNS server returned general failure
-pub const ENSRNOTFOUND = 163; /// Domain name not found
-pub const ENSRNOTIMP = 164; /// DNS server does not implement requested operation
-pub const ENSRREFUSED = 165; /// DNS server refused query
-pub const ENSRBADQUERY = 166; /// Misformatted DNS query
-pub const ENSRBADNAME = 167; /// Misformatted domain name
-pub const ENSRBADFAMILY = 168; /// Unsupported address family
-pub const ENSRBADRESP = 169; /// Misformatted DNS reply
-pub const ENSRCONNREFUSED = 170; /// Could not contact DNS servers
-pub const ENSRTIMEOUT = 171; /// Timeout while contacting DNS servers
-pub const ENSROF = 172; /// End of file
-pub const ENSRFILE = 173; /// Error reading file
-pub const ENSRNOMEM = 174; /// Out of memory
-pub const ENSRDESTRUCTION = 175; /// Application terminated lookup
-pub const ENSRQUERYDOMAINTOOLONG = 176; /// Domain name is too long
-pub const ENSRCNAMELOOP = 177; /// Domain name is too long
+
+/// DNS server returned answer with no data
+pub const ENSROK = 0;
+
+/// DNS server returned answer with no data
+pub const ENSRNODATA = 160;
+
+/// DNS server claims query was misformatted
+pub const ENSRFORMERR = 161;
+
+/// DNS server returned general failure
+pub const ENSRSERVFAIL = 162;
+
+/// Domain name not found
+pub const ENSRNOTFOUND = 163;
+
+/// DNS server does not implement requested operation
+pub const ENSRNOTIMP = 164;
+
+/// DNS server refused query
+pub const ENSRREFUSED = 165;
+
+/// Misformatted DNS query
+pub const ENSRBADQUERY = 166;
+
+/// Misformatted domain name
+pub const ENSRBADNAME = 167;
+
+/// Unsupported address family
+pub const ENSRBADFAMILY = 168;
+
+/// Misformatted DNS reply
+pub const ENSRBADRESP = 169;
+
+/// Could not contact DNS servers
+pub const ENSRCONNREFUSED = 170;
+
+/// Timeout while contacting DNS servers
+pub const ENSRTIMEOUT = 171;
+
+/// End of file
+pub const ENSROF = 172;
+
+/// Error reading file
+pub const ENSRFILE = 173;
+
+/// Out of memory
+pub const ENSRNOMEM = 174;
+
+/// Application terminated lookup
+pub const ENSRDESTRUCTION = 175;
+
+/// Domain name is too long
+pub const ENSRQUERYDOMAINTOOLONG = 176;
+
+/// Domain name is too long
+pub const ENSRCNAMELOOP = 177;
diff --git a/std/os/linux/index.zig b/std/os/linux/index.zig
index e100af7733..15607ea6c0 100644
--- a/std/os/linux/index.zig
+++ b/std/os/linux/index.zig
@@ -1,6 +1,7 @@
const std = @import("../../index.zig");
const assert = std.debug.assert;
const builtin = @import("builtin");
+const vdso = @import("vdso.zig");
pub use switch (builtin.arch) {
builtin.Arch.x86_64 => @import("x86_64.zig"),
builtin.Arch.i386 => @import("i386.zig"),
@@ -14,95 +15,110 @@ pub const STDIN_FILENO = 0;
pub const STDOUT_FILENO = 1;
pub const STDERR_FILENO = 2;
-pub const PROT_NONE = 0;
-pub const PROT_READ = 1;
-pub const PROT_WRITE = 2;
-pub const PROT_EXEC = 4;
-pub const PROT_GROWSDOWN = 0x01000000;
-pub const PROT_GROWSUP = 0x02000000;
+pub const FUTEX_WAIT = 0;
+pub const FUTEX_WAKE = 1;
+pub const FUTEX_FD = 2;
+pub const FUTEX_REQUEUE = 3;
+pub const FUTEX_CMP_REQUEUE = 4;
+pub const FUTEX_WAKE_OP = 5;
+pub const FUTEX_LOCK_PI = 6;
+pub const FUTEX_UNLOCK_PI = 7;
+pub const FUTEX_TRYLOCK_PI = 8;
+pub const FUTEX_WAIT_BITSET = 9;
-pub const MAP_FAILED = @maxValue(usize);
-pub const MAP_SHARED = 0x01;
-pub const MAP_PRIVATE = 0x02;
-pub const MAP_TYPE = 0x0f;
-pub const MAP_FIXED = 0x10;
-pub const MAP_ANONYMOUS = 0x20;
-pub const MAP_NORESERVE = 0x4000;
-pub const MAP_GROWSDOWN = 0x0100;
-pub const MAP_DENYWRITE = 0x0800;
+pub const FUTEX_PRIVATE_FLAG = 128;
+
+pub const FUTEX_CLOCK_REALTIME = 256;
+
+pub const PROT_NONE = 0;
+pub const PROT_READ = 1;
+pub const PROT_WRITE = 2;
+pub const PROT_EXEC = 4;
+pub const PROT_GROWSDOWN = 0x01000000;
+pub const PROT_GROWSUP = 0x02000000;
+
+pub const MAP_FAILED = @maxValue(usize);
+pub const MAP_SHARED = 0x01;
+pub const MAP_PRIVATE = 0x02;
+pub const MAP_TYPE = 0x0f;
+pub const MAP_FIXED = 0x10;
+pub const MAP_ANONYMOUS = 0x20;
+pub const MAP_NORESERVE = 0x4000;
+pub const MAP_GROWSDOWN = 0x0100;
+pub const MAP_DENYWRITE = 0x0800;
pub const MAP_EXECUTABLE = 0x1000;
-pub const MAP_LOCKED = 0x2000;
-pub const MAP_POPULATE = 0x8000;
-pub const MAP_NONBLOCK = 0x10000;
-pub const MAP_STACK = 0x20000;
-pub const MAP_HUGETLB = 0x40000;
-pub const MAP_FILE = 0;
+pub const MAP_LOCKED = 0x2000;
+pub const MAP_POPULATE = 0x8000;
+pub const MAP_NONBLOCK = 0x10000;
+pub const MAP_STACK = 0x20000;
+pub const MAP_HUGETLB = 0x40000;
+pub const MAP_FILE = 0;
pub const F_OK = 0;
pub const X_OK = 1;
pub const W_OK = 2;
pub const R_OK = 4;
-pub const WNOHANG = 1;
-pub const WUNTRACED = 2;
-pub const WSTOPPED = 2;
-pub const WEXITED = 4;
+pub const WNOHANG = 1;
+pub const WUNTRACED = 2;
+pub const WSTOPPED = 2;
+pub const WEXITED = 4;
pub const WCONTINUED = 8;
-pub const WNOWAIT = 0x1000000;
+pub const WNOWAIT = 0x1000000;
-pub const SA_NOCLDSTOP = 1;
-pub const SA_NOCLDWAIT = 2;
-pub const SA_SIGINFO = 4;
-pub const SA_ONSTACK = 0x08000000;
-pub const SA_RESTART = 0x10000000;
-pub const SA_NODEFER = 0x40000000;
-pub const SA_RESETHAND = 0x80000000;
-pub const SA_RESTORER = 0x04000000;
+pub const SA_NOCLDSTOP = 1;
+pub const SA_NOCLDWAIT = 2;
+pub const SA_SIGINFO = 4;
+pub const SA_ONSTACK = 0x08000000;
+pub const SA_RESTART = 0x10000000;
+pub const SA_NODEFER = 0x40000000;
+pub const SA_RESETHAND = 0x80000000;
+pub const SA_RESTORER = 0x04000000;
-pub const SIGHUP = 1;
-pub const SIGINT = 2;
-pub const SIGQUIT = 3;
-pub const SIGILL = 4;
-pub const SIGTRAP = 5;
-pub const SIGABRT = 6;
-pub const SIGIOT = SIGABRT;
-pub const SIGBUS = 7;
-pub const SIGFPE = 8;
-pub const SIGKILL = 9;
-pub const SIGUSR1 = 10;
-pub const SIGSEGV = 11;
-pub const SIGUSR2 = 12;
-pub const SIGPIPE = 13;
-pub const SIGALRM = 14;
-pub const SIGTERM = 15;
+pub const SIGHUP = 1;
+pub const SIGINT = 2;
+pub const SIGQUIT = 3;
+pub const SIGILL = 4;
+pub const SIGTRAP = 5;
+pub const SIGABRT = 6;
+pub const SIGIOT = SIGABRT;
+pub const SIGBUS = 7;
+pub const SIGFPE = 8;
+pub const SIGKILL = 9;
+pub const SIGUSR1 = 10;
+pub const SIGSEGV = 11;
+pub const SIGUSR2 = 12;
+pub const SIGPIPE = 13;
+pub const SIGALRM = 14;
+pub const SIGTERM = 15;
pub const SIGSTKFLT = 16;
-pub const SIGCHLD = 17;
-pub const SIGCONT = 18;
-pub const SIGSTOP = 19;
-pub const SIGTSTP = 20;
-pub const SIGTTIN = 21;
-pub const SIGTTOU = 22;
-pub const SIGURG = 23;
-pub const SIGXCPU = 24;
-pub const SIGXFSZ = 25;
+pub const SIGCHLD = 17;
+pub const SIGCONT = 18;
+pub const SIGSTOP = 19;
+pub const SIGTSTP = 20;
+pub const SIGTTIN = 21;
+pub const SIGTTOU = 22;
+pub const SIGURG = 23;
+pub const SIGXCPU = 24;
+pub const SIGXFSZ = 25;
pub const SIGVTALRM = 26;
-pub const SIGPROF = 27;
-pub const SIGWINCH = 28;
-pub const SIGIO = 29;
-pub const SIGPOLL = 29;
-pub const SIGPWR = 30;
-pub const SIGSYS = 31;
+pub const SIGPROF = 27;
+pub const SIGWINCH = 28;
+pub const SIGIO = 29;
+pub const SIGPOLL = 29;
+pub const SIGPWR = 30;
+pub const SIGSYS = 31;
pub const SIGUNUSED = SIGSYS;
pub const O_RDONLY = 0o0;
pub const O_WRONLY = 0o1;
-pub const O_RDWR = 0o2;
+pub const O_RDWR = 0o2;
pub const SEEK_SET = 0;
pub const SEEK_CUR = 1;
pub const SEEK_END = 2;
-pub const SIG_BLOCK = 0;
+pub const SIG_BLOCK = 0;
pub const SIG_UNBLOCK = 1;
pub const SIG_SETMASK = 2;
@@ -391,7 +407,6 @@ pub const DT_LNK = 10;
pub const DT_SOCK = 12;
pub const DT_WHT = 14;
-
pub const TCGETS = 0x5401;
pub const TCSETS = 0x5402;
pub const TCSETSW = 0x5403;
@@ -508,6 +523,10 @@ pub const CLONE_NEWPID = 0x20000000;
pub const CLONE_NEWNET = 0x40000000;
pub const CLONE_IO = 0x80000000;
+pub const EFD_SEMAPHORE = 1;
+pub const EFD_CLOEXEC = O_CLOEXEC;
+pub const EFD_NONBLOCK = O_NONBLOCK;
+
pub const MS_RDONLY = 1;
pub const MS_NOSUID = 2;
pub const MS_NODEV = 4;
@@ -522,23 +541,23 @@ pub const MS_BIND = 4096;
pub const MS_MOVE = 8192;
pub const MS_REC = 16384;
pub const MS_SILENT = 32768;
-pub const MS_POSIXACL = (1<<16);
-pub const MS_UNBINDABLE = (1<<17);
-pub const MS_PRIVATE = (1<<18);
-pub const MS_SLAVE = (1<<19);
-pub const MS_SHARED = (1<<20);
-pub const MS_RELATIME = (1<<21);
-pub const MS_KERNMOUNT = (1<<22);
-pub const MS_I_VERSION = (1<<23);
-pub const MS_STRICTATIME = (1<<24);
-pub const MS_LAZYTIME = (1<<25);
-pub const MS_NOREMOTELOCK = (1<<27);
-pub const MS_NOSEC = (1<<28);
-pub const MS_BORN = (1<<29);
-pub const MS_ACTIVE = (1<<30);
-pub const MS_NOUSER = (1<<31);
+pub const MS_POSIXACL = (1 << 16);
+pub const MS_UNBINDABLE = (1 << 17);
+pub const MS_PRIVATE = (1 << 18);
+pub const MS_SLAVE = (1 << 19);
+pub const MS_SHARED = (1 << 20);
+pub const MS_RELATIME = (1 << 21);
+pub const MS_KERNMOUNT = (1 << 22);
+pub const MS_I_VERSION = (1 << 23);
+pub const MS_STRICTATIME = (1 << 24);
+pub const MS_LAZYTIME = (1 << 25);
+pub const MS_NOREMOTELOCK = (1 << 27);
+pub const MS_NOSEC = (1 << 28);
+pub const MS_BORN = (1 << 29);
+pub const MS_ACTIVE = (1 << 30);
+pub const MS_NOUSER = (1 << 31);
-pub const MS_RMT_MASK = (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|MS_LAZYTIME);
+pub const MS_RMT_MASK = (MS_RDONLY | MS_SYNCHRONOUS | MS_MANDLOCK | MS_I_VERSION | MS_LAZYTIME);
pub const MS_MGC_VAL = 0xc0ed0000;
pub const MS_MGC_MSK = 0xffff0000;
@@ -548,7 +567,6 @@ pub const MNT_DETACH = 2;
pub const MNT_EXPIRE = 4;
pub const UMOUNT_NOFOLLOW = 8;
-
pub const S_IFMT = 0o170000;
pub const S_IFDIR = 0o040000;
@@ -609,15 +627,30 @@ pub const TFD_CLOEXEC = O_CLOEXEC;
pub const TFD_TIMER_ABSTIME = 1;
pub const TFD_TIMER_CANCEL_ON_SET = (1 << 1);
-fn unsigned(s: i32) u32 { return @bitCast(u32, s); }
-fn signed(s: u32) i32 { return @bitCast(i32, s); }
-pub fn WEXITSTATUS(s: i32) i32 { return signed((unsigned(s) & 0xff00) >> 8); }
-pub fn WTERMSIG(s: i32) i32 { return signed(unsigned(s) & 0x7f); }
-pub fn WSTOPSIG(s: i32) i32 { return WEXITSTATUS(s); }
-pub fn WIFEXITED(s: i32) bool { return WTERMSIG(s) == 0; }
-pub fn WIFSTOPPED(s: i32) bool { return (u16)(((unsigned(s)&0xffff)*%0x10001)>>8) > 0x7f00; }
-pub fn WIFSIGNALED(s: i32) bool { return (unsigned(s)&0xffff)-%1 < 0xff; }
-
+fn unsigned(s: i32) u32 {
+ return @bitCast(u32, s);
+}
+fn signed(s: u32) i32 {
+ return @bitCast(i32, s);
+}
+pub fn WEXITSTATUS(s: i32) i32 {
+ return signed((unsigned(s) & 0xff00) >> 8);
+}
+pub fn WTERMSIG(s: i32) i32 {
+ return signed(unsigned(s) & 0x7f);
+}
+pub fn WSTOPSIG(s: i32) i32 {
+ return WEXITSTATUS(s);
+}
+pub fn WIFEXITED(s: i32) bool {
+ return WTERMSIG(s) == 0;
+}
+pub fn WIFSTOPPED(s: i32) bool {
+ return @intCast(u16, ((unsigned(s) & 0xffff) *% 0x10001) >> 8) > 0x7f00;
+}
+pub fn WIFSIGNALED(s: i32) bool {
+ return (unsigned(s) & 0xffff) -% 1 < 0xff;
+}
pub const winsize = extern struct {
ws_row: u16,
@@ -629,22 +662,25 @@ pub const winsize = extern struct {
/// Get the errno from a syscall return value, or 0 for no error.
pub fn getErrno(r: usize) usize {
const signed_r = @bitCast(isize, r);
- return if (signed_r > -4096 and signed_r < 0) usize(-signed_r) else 0;
+ return if (signed_r > -4096 and signed_r < 0) @intCast(usize, -signed_r) else 0;
}
pub fn dup2(old: i32, new: i32) usize {
- return syscall2(SYS_dup2, usize(old), usize(new));
+ return syscall2(SYS_dup2, @intCast(usize, old), @intCast(usize, new));
}
-pub fn chdir(path: &const u8) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn chdir(path: [*]const u8) usize {
return syscall1(SYS_chdir, @ptrToInt(path));
}
-pub fn chroot(path: &const u8) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn chroot(path: [*]const u8) usize {
return syscall1(SYS_chroot, @ptrToInt(path));
}
-pub fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn execve(path: [*]const u8, argv: [*]const ?[*]const u8, envp: [*]const ?[*]const u8) usize {
return syscall3(SYS_execve, @ptrToInt(path), @ptrToInt(argv), @ptrToInt(envp));
}
@@ -652,106 +688,131 @@ pub fn fork() usize {
return syscall0(SYS_fork);
}
-pub fn getcwd(buf: &u8, size: usize) usize {
+pub fn futex_wait(uaddr: usize, futex_op: u32, val: i32, timeout: ?*timespec) usize {
+ return syscall4(SYS_futex, uaddr, futex_op, @bitCast(u32, val), @ptrToInt(timeout));
+}
+
+pub fn getcwd(buf: [*]u8, size: usize) usize {
return syscall2(SYS_getcwd, @ptrToInt(buf), size);
}
-pub fn getdents(fd: i32, dirp: &u8, count: usize) usize {
- return syscall3(SYS_getdents, usize(fd), @ptrToInt(dirp), count);
+pub fn getdents(fd: i32, dirp: [*]u8, count: usize) usize {
+ return syscall3(SYS_getdents, @intCast(usize, fd), @ptrToInt(dirp), count);
}
pub fn isatty(fd: i32) bool {
var wsz: winsize = undefined;
- return syscall3(SYS_ioctl, usize(fd), TIOCGWINSZ, @ptrToInt(&wsz)) == 0;
+ return syscall3(SYS_ioctl, @intCast(usize, fd), TIOCGWINSZ, @ptrToInt(&wsz)) == 0;
}
-pub fn readlink(noalias path: &const u8, noalias buf_ptr: &u8, buf_len: usize) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn readlink(noalias path: [*]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize {
return syscall3(SYS_readlink, @ptrToInt(path), @ptrToInt(buf_ptr), buf_len);
}
-pub fn mkdir(path: &const u8, mode: u32) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn mkdir(path: [*]const u8, mode: u32) usize {
return syscall2(SYS_mkdir, @ptrToInt(path), mode);
}
-pub fn mount(special: &const u8, dir: &const u8, fstype: &const u8, flags: usize, data: usize) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn mount(special: [*]const u8, dir: [*]const u8, fstype: [*]const u8, flags: usize, data: usize) usize {
return syscall5(SYS_mount, @ptrToInt(special), @ptrToInt(dir), @ptrToInt(fstype), flags, data);
}
-pub fn umount(special: &const u8) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn umount(special: [*]const u8) usize {
return syscall2(SYS_umount2, @ptrToInt(special), 0);
}
-pub fn umount2(special: &const u8, flags: u32) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn umount2(special: [*]const u8, flags: u32) usize {
return syscall2(SYS_umount2, @ptrToInt(special), flags);
}
-pub fn mmap(address: ?&u8, length: usize, prot: usize, flags: usize, fd: i32, offset: isize) usize {
- return syscall6(SYS_mmap, @ptrToInt(address), length, prot, flags, usize(fd),
- @bitCast(usize, offset));
+pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
+ return syscall6(SYS_mmap, @ptrToInt(address), length, prot, flags, @intCast(usize, fd), @bitCast(usize, offset));
}
-pub fn munmap(address: &u8, length: usize) usize {
- return syscall2(SYS_munmap, @ptrToInt(address), length);
+pub fn munmap(address: usize, length: usize) usize {
+ return syscall2(SYS_munmap, address, length);
}
-pub fn read(fd: i32, buf: &u8, count: usize) usize {
- return syscall3(SYS_read, usize(fd), @ptrToInt(buf), count);
+pub fn read(fd: i32, buf: [*]u8, count: usize) usize {
+ return syscall3(SYS_read, @intCast(usize, fd), @ptrToInt(buf), count);
}
-pub fn rmdir(path: &const u8) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn rmdir(path: [*]const u8) usize {
return syscall1(SYS_rmdir, @ptrToInt(path));
}
-pub fn symlink(existing: &const u8, new: &const u8) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn symlink(existing: [*]const u8, new: [*]const u8) usize {
return syscall2(SYS_symlink, @ptrToInt(existing), @ptrToInt(new));
}
-pub fn pread(fd: i32, buf: &u8, count: usize, offset: usize) usize {
- return syscall4(SYS_pread, usize(fd), @ptrToInt(buf), count, offset);
+pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: usize) usize {
+ return syscall4(SYS_pread, @intCast(usize, fd), @ptrToInt(buf), count, offset);
}
-pub fn access(path: &const u8, mode: u32) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn access(path: [*]const u8, mode: u32) usize {
return syscall2(SYS_access, @ptrToInt(path), mode);
}
-pub fn pipe(fd: &[2]i32) usize {
+pub fn pipe(fd: *[2]i32) usize {
return pipe2(fd, 0);
}
-pub fn pipe2(fd: &[2]i32, flags: usize) usize {
+pub fn pipe2(fd: *[2]i32, flags: usize) usize {
return syscall2(SYS_pipe2, @ptrToInt(fd), flags);
}
-pub fn write(fd: i32, buf: &const u8, count: usize) usize {
- return syscall3(SYS_write, usize(fd), @ptrToInt(buf), count);
+pub fn write(fd: i32, buf: [*]const u8, count: usize) usize {
+ return syscall3(SYS_write, @intCast(usize, fd), @ptrToInt(buf), count);
}
-pub fn pwrite(fd: i32, buf: &const u8, count: usize, offset: usize) usize {
- return syscall4(SYS_pwrite, usize(fd), @ptrToInt(buf), count, offset);
+pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: usize) usize {
+ return syscall4(SYS_pwrite, @intCast(usize, fd), @ptrToInt(buf), count, offset);
}
-pub fn rename(old: &const u8, new: &const u8) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn rename(old: [*]const u8, new: [*]const u8) usize {
return syscall2(SYS_rename, @ptrToInt(old), @ptrToInt(new));
}
-pub fn open(path: &const u8, flags: u32, perm: usize) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn open(path: [*]const u8, flags: u32, perm: usize) usize {
return syscall3(SYS_open, @ptrToInt(path), flags, perm);
}
-pub fn create(path: &const u8, perm: usize) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn create(path: [*]const u8, perm: usize) usize {
return syscall2(SYS_creat, @ptrToInt(path), perm);
}
-pub fn openat(dirfd: i32, path: &const u8, flags: usize, mode: usize) usize {
- return syscall4(SYS_openat, usize(dirfd), @ptrToInt(path), flags, mode);
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn openat(dirfd: i32, path: [*]const u8, flags: usize, mode: usize) usize {
+ return syscall4(SYS_openat, @intCast(usize, dirfd), @ptrToInt(path), flags, mode);
+}
+
+/// See also `clone` (from the arch-specific include)
+pub fn clone5(flags: usize, child_stack_ptr: usize, parent_tid: *i32, child_tid: *i32, newtls: usize) usize {
+ return syscall5(SYS_clone, flags, child_stack_ptr, @ptrToInt(parent_tid), @ptrToInt(child_tid), newtls);
+}
+
+/// See also `clone` (from the arch-specific include)
+pub fn clone2(flags: usize, child_stack_ptr: usize) usize {
+ return syscall2(SYS_clone, flags, child_stack_ptr);
}
pub fn close(fd: i32) usize {
- return syscall1(SYS_close, usize(fd));
+ return syscall1(SYS_close, @intCast(usize, fd));
}
pub fn lseek(fd: i32, offset: isize, ref_pos: usize) usize {
- return syscall3(SYS_lseek, usize(fd), @bitCast(usize, offset), ref_pos);
+ return syscall3(SYS_lseek, @intCast(usize, fd), @bitCast(usize, offset), ref_pos);
}
pub fn exit(status: i32) noreturn {
@@ -759,23 +820,62 @@ pub fn exit(status: i32) noreturn {
unreachable;
}
-pub fn getrandom(buf: &u8, count: usize, flags: u32) usize {
- return syscall3(SYS_getrandom, @ptrToInt(buf), count, usize(flags));
+pub fn getrandom(buf: [*]u8, count: usize, flags: u32) usize {
+ return syscall3(SYS_getrandom, @ptrToInt(buf), count, @intCast(usize, flags));
}
pub fn kill(pid: i32, sig: i32) usize {
- return syscall2(SYS_kill, @bitCast(usize, isize(pid)), usize(sig));
+ return syscall2(SYS_kill, @bitCast(usize, isize(pid)), @intCast(usize, sig));
}
-pub fn unlink(path: &const u8) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn unlink(path: [*]const u8) usize {
return syscall1(SYS_unlink, @ptrToInt(path));
}
-pub fn waitpid(pid: i32, status: &i32, options: i32) usize {
+pub fn waitpid(pid: i32, status: *i32, options: i32) usize {
return syscall4(SYS_wait4, @bitCast(usize, isize(pid)), @ptrToInt(status), @bitCast(usize, isize(options)), 0);
}
-pub fn nanosleep(req: &const timespec, rem: ?×pec) usize {
+pub fn clock_gettime(clk_id: i32, tp: *timespec) usize {
+ if (VDSO_CGT_SYM.len != 0) {
+ const f = @atomicLoad(@typeOf(init_vdso_clock_gettime), &vdso_clock_gettime, builtin.AtomicOrder.Unordered);
+ if (@ptrToInt(f) != 0) {
+ const rc = f(clk_id, tp);
+ switch (rc) {
+ 0, @bitCast(usize, isize(-EINVAL)) => return rc,
+ else => {},
+ }
+ }
+ }
+ return syscall2(SYS_clock_gettime, @bitCast(usize, isize(clk_id)), @ptrToInt(tp));
+}
+var vdso_clock_gettime = init_vdso_clock_gettime;
+extern fn init_vdso_clock_gettime(clk: i32, ts: *timespec) usize {
+ const addr = vdso.lookup(VDSO_CGT_VER, VDSO_CGT_SYM);
+ var f = @intToPtr(@typeOf(init_vdso_clock_gettime), addr);
+ _ = @cmpxchgStrong(@typeOf(init_vdso_clock_gettime), &vdso_clock_gettime, init_vdso_clock_gettime, f, builtin.AtomicOrder.Monotonic, builtin.AtomicOrder.Monotonic);
+ if (@ptrToInt(f) == 0) return @bitCast(usize, isize(-ENOSYS));
+ return f(clk, ts);
+}
+
+pub fn clock_getres(clk_id: i32, tp: *timespec) usize {
+ return syscall2(SYS_clock_getres, @bitCast(usize, isize(clk_id)), @ptrToInt(tp));
+}
+
+pub fn clock_settime(clk_id: i32, tp: *const timespec) usize {
+ return syscall2(SYS_clock_settime, @bitCast(usize, isize(clk_id)), @ptrToInt(tp));
+}
+
+pub fn gettimeofday(tv: *timeval, tz: *timezone) usize {
+ return syscall2(SYS_gettimeofday, @ptrToInt(tv), @ptrToInt(tz));
+}
+
+pub fn settimeofday(tv: *const timeval, tz: *const timezone) usize {
+ return syscall2(SYS_settimeofday, @ptrToInt(tv), @ptrToInt(tz));
+}
+
+pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return syscall2(SYS_nanosleep, @ptrToInt(req), @ptrToInt(rem));
}
@@ -819,11 +919,11 @@ pub fn setegid(egid: u32) usize {
return syscall1(SYS_setegid, egid);
}
-pub fn getresuid(ruid: &u32, euid: &u32, suid: &u32) usize {
+pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize {
return syscall3(SYS_getresuid, @ptrToInt(ruid), @ptrToInt(euid), @ptrToInt(suid));
}
-pub fn getresgid(rgid: &u32, egid: &u32, sgid: &u32) usize {
+pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize {
return syscall3(SYS_getresgid, @ptrToInt(rgid), @ptrToInt(egid), @ptrToInt(sgid));
}
@@ -835,34 +935,34 @@ pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize {
return syscall3(SYS_setresgid, rgid, egid, sgid);
}
-pub fn getgroups(size: usize, list: &u32) usize {
+pub fn getgroups(size: usize, list: *u32) usize {
return syscall2(SYS_getgroups, size, @ptrToInt(list));
}
-pub fn setgroups(size: usize, list: &const u32) usize {
+pub fn setgroups(size: usize, list: *const u32) usize {
return syscall2(SYS_setgroups, size, @ptrToInt(list));
}
pub fn getpid() i32 {
- return @bitCast(i32, u32(syscall0(SYS_getpid)));
+ return @bitCast(i32, @truncate(u32, syscall0(SYS_getpid)));
}
-pub fn sigprocmask(flags: u32, noalias set: &const sigset_t, noalias oldset: ?&sigset_t) usize {
- return syscall4(SYS_rt_sigprocmask, flags, @ptrToInt(set), @ptrToInt(oldset), NSIG/8);
+pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize {
+ return syscall4(SYS_rt_sigprocmask, flags, @ptrToInt(set), @ptrToInt(oldset), NSIG / 8);
}
-pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigaction) usize {
+pub fn sigaction(sig: u6, noalias act: *const Sigaction, noalias oact: ?*Sigaction) usize {
assert(sig >= 1);
assert(sig != SIGKILL);
assert(sig != SIGSTOP);
- var ksa = k_sigaction {
+ var ksa = k_sigaction{
.handler = act.handler,
.flags = act.flags | SA_RESTORER,
.mask = undefined,
- .restorer = @ptrCast(extern fn()void, restore_rt),
+ .restorer = @ptrCast(extern fn () void, restore_rt),
};
var ksa_old: k_sigaction = undefined;
- @memcpy(@ptrCast(&u8, &ksa.mask), @ptrCast(&const u8, &act.mask), 8);
+ @memcpy(@ptrCast([*]u8, &ksa.mask), @ptrCast([*]const u8, &act.mask), 8);
const result = syscall4(SYS_rt_sigaction, sig, @ptrToInt(&ksa), @ptrToInt(&ksa_old), @sizeOf(@typeOf(ksa.mask)));
const err = getErrno(result);
if (err != 0) {
@@ -871,7 +971,7 @@ pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigacti
if (oact) |old| {
old.handler = ksa_old.handler;
old.flags = @truncate(u32, ksa_old.flags);
- @memcpy(@ptrCast(&u8, &old.mask), @ptrCast(&const u8, &ksa_old.mask), @sizeOf(@typeOf(ksa_old.mask)));
+ @memcpy(@ptrCast([*]u8, &old.mask), @ptrCast([*]const u8, &ksa_old.mask), @sizeOf(@typeOf(ksa_old.mask)));
}
return 0;
}
@@ -882,53 +982,53 @@ const all_mask = []usize{@maxValue(usize)};
const app_mask = []usize{0xfffffffc7fffffff};
const k_sigaction = extern struct {
- handler: extern fn(i32)void,
+ handler: extern fn (i32) void,
flags: usize,
- restorer: extern fn()void,
+ restorer: extern fn () void,
mask: [2]u32,
};
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = struct {
- handler: extern fn(i32)void,
+ handler: extern fn (i32) void,
mask: sigset_t,
flags: u32,
};
-pub const SIG_ERR = @intToPtr(extern fn(i32)void, @maxValue(usize));
-pub const SIG_DFL = @intToPtr(extern fn(i32)void, 0);
-pub const SIG_IGN = @intToPtr(extern fn(i32)void, 1);
+pub const SIG_ERR = @intToPtr(extern fn (i32) void, @maxValue(usize));
+pub const SIG_DFL = @intToPtr(extern fn (i32) void, 0);
+pub const SIG_IGN = @intToPtr(extern fn (i32) void, 1);
pub const empty_sigset = []usize{0} ** sigset_t.len;
pub fn raise(sig: i32) usize {
var set: sigset_t = undefined;
blockAppSignals(&set);
- const tid = i32(syscall0(SYS_gettid));
- const ret = syscall2(SYS_tkill, usize(tid), usize(sig));
+ const tid = @intCast(i32, syscall0(SYS_gettid));
+ const ret = syscall2(SYS_tkill, @intCast(usize, tid), @intCast(usize, sig));
restoreSignals(&set);
return ret;
}
-fn blockAllSignals(set: &sigset_t) void {
- _ = syscall4(SYS_rt_sigprocmask, SIG_BLOCK, @ptrToInt(&all_mask), @ptrToInt(set), NSIG/8);
+fn blockAllSignals(set: *sigset_t) void {
+ _ = syscall4(SYS_rt_sigprocmask, SIG_BLOCK, @ptrToInt(&all_mask), @ptrToInt(set), NSIG / 8);
}
-fn blockAppSignals(set: &sigset_t) void {
- _ = syscall4(SYS_rt_sigprocmask, SIG_BLOCK, @ptrToInt(&app_mask), @ptrToInt(set), NSIG/8);
+fn blockAppSignals(set: *sigset_t) void {
+ _ = syscall4(SYS_rt_sigprocmask, SIG_BLOCK, @ptrToInt(&app_mask), @ptrToInt(set), NSIG / 8);
}
-fn restoreSignals(set: &sigset_t) void {
- _ = syscall4(SYS_rt_sigprocmask, SIG_SETMASK, @ptrToInt(set), 0, NSIG/8);
+fn restoreSignals(set: *sigset_t) void {
+ _ = syscall4(SYS_rt_sigprocmask, SIG_SETMASK, @ptrToInt(set), 0, NSIG / 8);
}
-pub fn sigaddset(set: &sigset_t, sig: u6) void {
+pub fn sigaddset(set: *sigset_t, sig: u6) void {
const s = sig - 1;
- (*set)[usize(s) / usize.bit_count] |= usize(1) << (s & (usize.bit_count - 1));
+ (set.*)[@intCast(usize, s) / usize.bit_count] |= @intCast(usize, 1) << (s & (usize.bit_count - 1));
}
-pub fn sigismember(set: &const sigset_t, sig: u6) bool {
+pub fn sigismember(set: *const sigset_t, sig: u6) bool {
const s = sig - 1;
- return ((*set)[usize(s) / usize.bit_count] & (usize(1) << (s & (usize.bit_count - 1)))) != 0;
+ return ((set.*)[@intCast(usize, s) / usize.bit_count] & (@intCast(usize, 1) << (s & (usize.bit_count - 1)))) != 0;
}
pub const in_port_t = u16;
@@ -956,145 +1056,151 @@ pub const sockaddr_in6 = extern struct {
};
pub const iovec = extern struct {
- iov_base: &u8,
+ iov_base: [*]u8,
iov_len: usize,
};
-pub fn getsockname(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize {
- return syscall3(SYS_getsockname, usize(fd), @ptrToInt(addr), @ptrToInt(len));
+pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
+ return syscall3(SYS_getsockname, @intCast(usize, fd), @ptrToInt(addr), @ptrToInt(len));
}
-pub fn getpeername(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize {
- return syscall3(SYS_getpeername, usize(fd), @ptrToInt(addr), @ptrToInt(len));
+pub fn getpeername(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
+ return syscall3(SYS_getpeername, @intCast(usize, fd), @ptrToInt(addr), @ptrToInt(len));
}
pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize {
return syscall3(SYS_socket, domain, socket_type, protocol);
}
-pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: &const u8, optlen: socklen_t) usize {
- return syscall5(SYS_setsockopt, usize(fd), level, optname, usize(optval), @ptrToInt(optlen));
+pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: [*]const u8, optlen: socklen_t) usize {
+ return syscall5(SYS_setsockopt, @intCast(usize, fd), level, optname, @intCast(usize, optval), @ptrToInt(optlen));
}
-pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: &u8, noalias optlen: &socklen_t) usize {
- return syscall5(SYS_getsockopt, usize(fd), level, optname, @ptrToInt(optval), @ptrToInt(optlen));
+pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: [*]u8, noalias optlen: *socklen_t) usize {
+ return syscall5(SYS_getsockopt, @intCast(usize, fd), level, optname, @ptrToInt(optval), @ptrToInt(optlen));
}
-pub fn sendmsg(fd: i32, msg: &const msghdr, flags: u32) usize {
- return syscall3(SYS_sendmsg, usize(fd), @ptrToInt(msg), flags);
+pub fn sendmsg(fd: i32, msg: *const msghdr, flags: u32) usize {
+ return syscall3(SYS_sendmsg, @intCast(usize, fd), @ptrToInt(msg), flags);
}
-pub fn connect(fd: i32, addr: &const sockaddr, len: socklen_t) usize {
- return syscall3(SYS_connect, usize(fd), @ptrToInt(addr), usize(len));
+pub fn connect(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
+ return syscall3(SYS_connect, @intCast(usize, fd), @ptrToInt(addr), @intCast(usize, len));
}
-pub fn recvmsg(fd: i32, msg: &msghdr, flags: u32) usize {
- return syscall3(SYS_recvmsg, usize(fd), @ptrToInt(msg), flags);
+pub fn recvmsg(fd: i32, msg: *msghdr, flags: u32) usize {
+ return syscall3(SYS_recvmsg, @intCast(usize, fd), @ptrToInt(msg), flags);
}
-pub fn recvfrom(fd: i32, noalias buf: &u8, len: usize, flags: u32,
- noalias addr: ?&sockaddr, noalias alen: ?&socklen_t) usize
-{
- return syscall6(SYS_recvfrom, usize(fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), @ptrToInt(alen));
+pub fn recvfrom(fd: i32, noalias buf: [*]u8, len: usize, flags: u32, noalias addr: ?*sockaddr, noalias alen: ?*socklen_t) usize {
+ return syscall6(SYS_recvfrom, @intCast(usize, fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), @ptrToInt(alen));
}
pub fn shutdown(fd: i32, how: i32) usize {
- return syscall2(SYS_shutdown, usize(fd), usize(how));
+ return syscall2(SYS_shutdown, @intCast(usize, fd), @intCast(usize, how));
}
-pub fn bind(fd: i32, addr: &const sockaddr, len: socklen_t) usize {
- return syscall3(SYS_bind, usize(fd), @ptrToInt(addr), usize(len));
+pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
+ return syscall3(SYS_bind, @intCast(usize, fd), @ptrToInt(addr), @intCast(usize, len));
}
pub fn listen(fd: i32, backlog: u32) usize {
- return syscall2(SYS_listen, usize(fd), backlog);
+ return syscall2(SYS_listen, @intCast(usize, fd), backlog);
}
-pub fn sendto(fd: i32, buf: &const u8, len: usize, flags: u32, addr: ?&const sockaddr, alen: socklen_t) usize {
- return syscall6(SYS_sendto, usize(fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), usize(alen));
+pub fn sendto(fd: i32, buf: [*]const u8, len: usize, flags: u32, addr: ?*const sockaddr, alen: socklen_t) usize {
+ return syscall6(SYS_sendto, @intCast(usize, fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), @intCast(usize, alen));
}
pub fn socketpair(domain: i32, socket_type: i32, protocol: i32, fd: [2]i32) usize {
- return syscall4(SYS_socketpair, usize(domain), usize(socket_type), usize(protocol), @ptrToInt(&fd[0]));
+ return syscall4(SYS_socketpair, @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @ptrToInt(*fd[0]));
}
-pub fn accept(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize {
+pub fn accept(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
return accept4(fd, addr, len, 0);
}
-pub fn accept4(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t, flags: u32) usize {
- return syscall4(SYS_accept4, usize(fd), @ptrToInt(addr), @ptrToInt(len), flags);
+pub fn accept4(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t, flags: u32) usize {
+ return syscall4(SYS_accept4, @intCast(usize, fd), @ptrToInt(addr), @ptrToInt(len), flags);
}
-pub fn fstat(fd: i32, stat_buf: &Stat) usize {
- return syscall2(SYS_fstat, usize(fd), @ptrToInt(stat_buf));
+pub fn fstat(fd: i32, stat_buf: *Stat) usize {
+ return syscall2(SYS_fstat, @intCast(usize, fd), @ptrToInt(stat_buf));
}
-pub fn stat(pathname: &const u8, statbuf: &Stat) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn stat(pathname: [*]const u8, statbuf: *Stat) usize {
return syscall2(SYS_stat, @ptrToInt(pathname), @ptrToInt(statbuf));
}
-pub fn lstat(pathname: &const u8, statbuf: &Stat) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn lstat(pathname: [*]const u8, statbuf: *Stat) usize {
return syscall2(SYS_lstat, @ptrToInt(pathname), @ptrToInt(statbuf));
}
-pub fn listxattr(path: &const u8, list: &u8, size: usize) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn listxattr(path: [*]const u8, list: [*]u8, size: usize) usize {
return syscall3(SYS_listxattr, @ptrToInt(path), @ptrToInt(list), size);
}
-pub fn llistxattr(path: &const u8, list: &u8, size: usize) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn llistxattr(path: [*]const u8, list: [*]u8, size: usize) usize {
return syscall3(SYS_llistxattr, @ptrToInt(path), @ptrToInt(list), size);
}
-pub fn flistxattr(fd: usize, list: &u8, size: usize) usize {
+pub fn flistxattr(fd: usize, list: [*]u8, size: usize) usize {
return syscall3(SYS_flistxattr, fd, @ptrToInt(list), size);
}
-pub fn getxattr(path: &const u8, name: &const u8, value: &void, size: usize) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn getxattr(path: [*]const u8, name: [*]const u8, value: [*]u8, size: usize) usize {
return syscall4(SYS_getxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size);
}
-pub fn lgetxattr(path: &const u8, name: &const u8, value: &void, size: usize) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn lgetxattr(path: [*]const u8, name: [*]const u8, value: [*]u8, size: usize) usize {
return syscall4(SYS_lgetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size);
}
-pub fn fgetxattr(fd: usize, name: &const u8, value: &void, size: usize) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn fgetxattr(fd: usize, name: [*]const u8, value: [*]u8, size: usize) usize {
return syscall4(SYS_lgetxattr, fd, @ptrToInt(name), @ptrToInt(value), size);
}
-pub fn setxattr(path: &const u8, name: &const u8, value: &const void,
- size: usize, flags: usize) usize {
-
- return syscall5(SYS_setxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value),
- size, flags);
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn setxattr(path: [*]const u8, name: [*]const u8, value: *const void, size: usize, flags: usize) usize {
+ return syscall5(SYS_setxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags);
}
-pub fn lsetxattr(path: &const u8, name: &const u8, value: &const void,
- size: usize, flags: usize) usize {
-
- return syscall5(SYS_lsetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value),
- size, flags);
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn lsetxattr(path: [*]const u8, name: [*]const u8, value: *const void, size: usize, flags: usize) usize {
+ return syscall5(SYS_lsetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags);
}
-pub fn fsetxattr(fd: usize, name: &const u8, value: &const void,
- size: usize, flags: usize) usize {
-
- return syscall5(SYS_fsetxattr, fd, @ptrToInt(name), @ptrToInt(value),
- size, flags);
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn fsetxattr(fd: usize, name: [*]const u8, value: *const void, size: usize, flags: usize) usize {
+ return syscall5(SYS_fsetxattr, fd, @ptrToInt(name), @ptrToInt(value), size, flags);
}
-pub fn removexattr(path: &const u8, name: &const u8) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn removexattr(path: [*]const u8, name: [*]const u8) usize {
return syscall2(SYS_removexattr, @ptrToInt(path), @ptrToInt(name));
}
-pub fn lremovexattr(path: &const u8, name: &const u8) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn lremovexattr(path: [*]const u8, name: [*]const u8) usize {
return syscall2(SYS_lremovexattr, @ptrToInt(path), @ptrToInt(name));
}
-pub fn fremovexattr(fd: usize, name: &const u8) usize {
+// TODO https://github.com/ziglang/zig/issues/265
+pub fn fremovexattr(fd: usize, name: [*]const u8) usize {
return syscall2(SYS_fremovexattr, fd, @ptrToInt(name));
}
+pub fn sched_getaffinity(pid: i32, set: []usize) usize {
+ return syscall3(SYS_sched_getaffinity, @bitCast(usize, isize(pid)), set.len * @sizeOf(usize), @ptrToInt(set.ptr));
+}
+
pub const epoll_data = packed union {
ptr: usize,
fd: i32,
@@ -1115,56 +1221,60 @@ pub fn epoll_create1(flags: usize) usize {
return syscall1(SYS_epoll_create1, flags);
}
-pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: &epoll_event) usize {
- return syscall4(SYS_epoll_ctl, usize(epoll_fd), usize(op), usize(fd), @ptrToInt(ev));
+pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: *epoll_event) usize {
+ return syscall4(SYS_epoll_ctl, @intCast(usize, epoll_fd), @intCast(usize, op), @intCast(usize, fd), @ptrToInt(ev));
}
-pub fn epoll_wait(epoll_fd: i32, events: &epoll_event, maxevents: u32, timeout: i32) usize {
- return syscall4(SYS_epoll_wait, usize(epoll_fd), @ptrToInt(events), usize(maxevents), usize(timeout));
+pub fn epoll_wait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout: i32) usize {
+ return syscall4(SYS_epoll_wait, @intCast(usize, epoll_fd), @ptrToInt(events), @intCast(usize, maxevents), @intCast(usize, timeout));
+}
+
+pub fn eventfd(count: u32, flags: u32) usize {
+ return syscall2(SYS_eventfd2, count, flags);
}
pub fn timerfd_create(clockid: i32, flags: u32) usize {
- return syscall2(SYS_timerfd_create, usize(clockid), usize(flags));
+ return syscall2(SYS_timerfd_create, @intCast(usize, clockid), @intCast(usize, flags));
}
pub const itimerspec = extern struct {
it_interval: timespec,
- it_value: timespec
+ it_value: timespec,
};
-pub fn timerfd_gettime(fd: i32, curr_value: &itimerspec) usize {
- return syscall2(SYS_timerfd_gettime, usize(fd), @ptrToInt(curr_value));
+pub fn timerfd_gettime(fd: i32, curr_value: *itimerspec) usize {
+ return syscall2(SYS_timerfd_gettime, @intCast(usize, fd), @ptrToInt(curr_value));
}
-pub fn timerfd_settime(fd: i32, flags: u32, new_value: &const itimerspec, old_value: ?&itimerspec) usize {
- return syscall4(SYS_timerfd_settime, usize(fd), usize(flags), @ptrToInt(new_value), @ptrToInt(old_value));
+pub fn timerfd_settime(fd: i32, flags: u32, new_value: *const itimerspec, old_value: ?*itimerspec) usize {
+ return syscall4(SYS_timerfd_settime, @intCast(usize, fd), @intCast(usize, flags), @ptrToInt(new_value), @ptrToInt(old_value));
}
pub const _LINUX_CAPABILITY_VERSION_1 = 0x19980330;
-pub const _LINUX_CAPABILITY_U32S_1 = 1;
+pub const _LINUX_CAPABILITY_U32S_1 = 1;
pub const _LINUX_CAPABILITY_VERSION_2 = 0x20071026;
-pub const _LINUX_CAPABILITY_U32S_2 = 2;
+pub const _LINUX_CAPABILITY_U32S_2 = 2;
pub const _LINUX_CAPABILITY_VERSION_3 = 0x20080522;
-pub const _LINUX_CAPABILITY_U32S_3 = 2;
+pub const _LINUX_CAPABILITY_U32S_3 = 2;
-pub const VFS_CAP_REVISION_MASK = 0xFF000000;
-pub const VFS_CAP_REVISION_SHIFT = 24;
-pub const VFS_CAP_FLAGS_MASK = ~VFS_CAP_REVISION_MASK;
+pub const VFS_CAP_REVISION_MASK = 0xFF000000;
+pub const VFS_CAP_REVISION_SHIFT = 24;
+pub const VFS_CAP_FLAGS_MASK = ~VFS_CAP_REVISION_MASK;
pub const VFS_CAP_FLAGS_EFFECTIVE = 0x000001;
pub const VFS_CAP_REVISION_1 = 0x01000000;
-pub const VFS_CAP_U32_1 = 1;
-pub const XATTR_CAPS_SZ_1 = @sizeOf(u32)*(1 + 2*VFS_CAP_U32_1);
+pub const VFS_CAP_U32_1 = 1;
+pub const XATTR_CAPS_SZ_1 = @sizeOf(u32) * (1 + 2 * VFS_CAP_U32_1);
pub const VFS_CAP_REVISION_2 = 0x02000000;
-pub const VFS_CAP_U32_2 = 2;
-pub const XATTR_CAPS_SZ_2 = @sizeOf(u32)*(1 + 2*VFS_CAP_U32_2);
+pub const VFS_CAP_U32_2 = 2;
+pub const XATTR_CAPS_SZ_2 = @sizeOf(u32) * (1 + 2 * VFS_CAP_U32_2);
-pub const XATTR_CAPS_SZ = XATTR_CAPS_SZ_2;
-pub const VFS_CAP_U32 = VFS_CAP_U32_2;
-pub const VFS_CAP_REVISION = VFS_CAP_REVISION_2;
+pub const XATTR_CAPS_SZ = XATTR_CAPS_SZ_2;
+pub const VFS_CAP_U32 = VFS_CAP_U32_2;
+pub const VFS_CAP_REVISION = VFS_CAP_REVISION_2;
pub const vfs_cap_data = extern struct {
//all of these are mandated as little endian
@@ -1175,49 +1285,48 @@ pub const vfs_cap_data = extern struct {
};
magic_etc: u32,
- data: [VFS_CAP_U32]Data,
+ data: [VFS_CAP_U32]Data,
};
-
-pub const CAP_CHOWN = 0;
-pub const CAP_DAC_OVERRIDE = 1;
-pub const CAP_DAC_READ_SEARCH = 2;
-pub const CAP_FOWNER = 3;
-pub const CAP_FSETID = 4;
-pub const CAP_KILL = 5;
-pub const CAP_SETGID = 6;
-pub const CAP_SETUID = 7;
-pub const CAP_SETPCAP = 8;
-pub const CAP_LINUX_IMMUTABLE = 9;
-pub const CAP_NET_BIND_SERVICE = 10;
-pub const CAP_NET_BROADCAST = 11;
-pub const CAP_NET_ADMIN = 12;
-pub const CAP_NET_RAW = 13;
-pub const CAP_IPC_LOCK = 14;
-pub const CAP_IPC_OWNER = 15;
-pub const CAP_SYS_MODULE = 16;
-pub const CAP_SYS_RAWIO = 17;
-pub const CAP_SYS_CHROOT = 18;
-pub const CAP_SYS_PTRACE = 19;
-pub const CAP_SYS_PACCT = 20;
-pub const CAP_SYS_ADMIN = 21;
-pub const CAP_SYS_BOOT = 22;
-pub const CAP_SYS_NICE = 23;
-pub const CAP_SYS_RESOURCE = 24;
-pub const CAP_SYS_TIME = 25;
-pub const CAP_SYS_TTY_CONFIG = 26;
-pub const CAP_MKNOD = 27;
-pub const CAP_LEASE = 28;
-pub const CAP_AUDIT_WRITE = 29;
-pub const CAP_AUDIT_CONTROL = 30;
-pub const CAP_SETFCAP = 31;
-pub const CAP_MAC_OVERRIDE = 32;
-pub const CAP_MAC_ADMIN = 33;
-pub const CAP_SYSLOG = 34;
-pub const CAP_WAKE_ALARM = 35;
-pub const CAP_BLOCK_SUSPEND = 36;
-pub const CAP_AUDIT_READ = 37;
-pub const CAP_LAST_CAP = CAP_AUDIT_READ;
+pub const CAP_CHOWN = 0;
+pub const CAP_DAC_OVERRIDE = 1;
+pub const CAP_DAC_READ_SEARCH = 2;
+pub const CAP_FOWNER = 3;
+pub const CAP_FSETID = 4;
+pub const CAP_KILL = 5;
+pub const CAP_SETGID = 6;
+pub const CAP_SETUID = 7;
+pub const CAP_SETPCAP = 8;
+pub const CAP_LINUX_IMMUTABLE = 9;
+pub const CAP_NET_BIND_SERVICE = 10;
+pub const CAP_NET_BROADCAST = 11;
+pub const CAP_NET_ADMIN = 12;
+pub const CAP_NET_RAW = 13;
+pub const CAP_IPC_LOCK = 14;
+pub const CAP_IPC_OWNER = 15;
+pub const CAP_SYS_MODULE = 16;
+pub const CAP_SYS_RAWIO = 17;
+pub const CAP_SYS_CHROOT = 18;
+pub const CAP_SYS_PTRACE = 19;
+pub const CAP_SYS_PACCT = 20;
+pub const CAP_SYS_ADMIN = 21;
+pub const CAP_SYS_BOOT = 22;
+pub const CAP_SYS_NICE = 23;
+pub const CAP_SYS_RESOURCE = 24;
+pub const CAP_SYS_TIME = 25;
+pub const CAP_SYS_TTY_CONFIG = 26;
+pub const CAP_MKNOD = 27;
+pub const CAP_LEASE = 28;
+pub const CAP_AUDIT_WRITE = 29;
+pub const CAP_AUDIT_CONTROL = 30;
+pub const CAP_SETFCAP = 31;
+pub const CAP_MAC_OVERRIDE = 32;
+pub const CAP_MAC_ADMIN = 33;
+pub const CAP_SYSLOG = 34;
+pub const CAP_WAKE_ALARM = 35;
+pub const CAP_BLOCK_SUSPEND = 36;
+pub const CAP_AUDIT_READ = 37;
+pub const CAP_LAST_CAP = CAP_AUDIT_READ;
pub fn cap_valid(u8: x) bool {
return x >= 0 and x <= CAP_LAST_CAP;
@@ -1232,8 +1341,8 @@ pub fn CAP_TO_INDEX(cap: u8) u8 {
}
pub const cap_t = extern struct {
- hdrp: &cap_user_header_t,
- datap: &cap_user_data_t,
+ hdrp: *cap_user_header_t,
+ datap: *cap_user_data_t,
};
pub const cap_user_header_t = extern struct {
@@ -1248,20 +1357,18 @@ pub const cap_user_data_t = extern struct {
};
pub fn unshare(flags: usize) usize {
- return syscall1(SYS_unshare, usize(flags));
+ return syscall1(SYS_unshare, @intCast(usize, flags));
}
-pub fn capget(hdrp: &cap_user_header_t, datap: &cap_user_data_t) usize {
+pub fn capget(hdrp: *cap_user_header_t, datap: *cap_user_data_t) usize {
return syscall2(SYS_capget, @ptrToInt(hdrp), @ptrToInt(datap));
}
-pub fn capset(hdrp: &cap_user_header_t, datap: &const cap_user_data_t) usize {
+pub fn capset(hdrp: *cap_user_header_t, datap: *const cap_user_data_t) usize {
return syscall2(SYS_capset, @ptrToInt(hdrp), @ptrToInt(datap));
}
-test "import linux test" {
- // TODO lazy analysis should prevent this test from being compiled on windows, but
- // it is still compiled on windows
+test "import" {
if (builtin.os == builtin.Os.linux) {
_ = @import("test.zig");
}
diff --git a/std/os/linux/test.zig b/std/os/linux/test.zig
index e427fd5d59..4de26012c7 100644
--- a/std/os/linux/test.zig
+++ b/std/os/linux/test.zig
@@ -1,7 +1,12 @@
const std = @import("../../index.zig");
+const builtin = @import("builtin");
const linux = std.os.linux;
const assert = std.debug.assert;
+test "getpid" {
+ assert(linux.getpid() != 0);
+}
+
test "timer" {
const epoll_fd = linux.epoll_create();
var err = linux.getErrno(epoll_fd);
@@ -10,29 +15,30 @@ test "timer" {
const timer_fd = linux.timerfd_create(linux.CLOCK_MONOTONIC, 0);
assert(linux.getErrno(timer_fd) == 0);
- const time_interval = linux.timespec {
+ const time_interval = linux.timespec{
.tv_sec = 0,
- .tv_nsec = 2000000
+ .tv_nsec = 2000000,
};
- const new_time = linux.itimerspec {
+ const new_time = linux.itimerspec{
.it_interval = time_interval,
- .it_value = time_interval
+ .it_value = time_interval,
};
- err = linux.timerfd_settime(i32(timer_fd), 0, &new_time, null);
+ err = linux.timerfd_settime(@intCast(i32, timer_fd), 0, &new_time, null);
assert(err == 0);
- var event = linux.epoll_event {
+ var event = linux.epoll_event{
.events = linux.EPOLLIN | linux.EPOLLOUT | linux.EPOLLET,
- .data = linux.epoll_data { .ptr = 0 },
+ .data = linux.epoll_data{ .ptr = 0 },
};
- err = linux.epoll_ctl(i32(epoll_fd), linux.EPOLL_CTL_ADD, i32(timer_fd), &event);
+ err = linux.epoll_ctl(@intCast(i32, epoll_fd), linux.EPOLL_CTL_ADD, @intCast(i32, timer_fd), &event);
assert(err == 0);
const events_one: linux.epoll_event = undefined;
var events = []linux.epoll_event{events_one} ** 8;
- err = linux.epoll_wait(i32(epoll_fd), &events[0], 8, -1);
+ // TODO implicit cast from *[N]T to [*]T
+ err = linux.epoll_wait(@intCast(i32, epoll_fd), @ptrCast([*]linux.epoll_event, &events), 8, -1);
}
diff --git a/std/os/linux/vdso.zig b/std/os/linux/vdso.zig
new file mode 100644
index 0000000000..a78e3370e6
--- /dev/null
+++ b/std/os/linux/vdso.zig
@@ -0,0 +1,91 @@
+const std = @import("../../index.zig");
+const elf = std.elf;
+const linux = std.os.linux;
+const cstr = std.cstr;
+const mem = std.mem;
+
+pub fn lookup(vername: []const u8, name: []const u8) usize {
+ const vdso_addr = std.os.linux_aux_raw[std.elf.AT_SYSINFO_EHDR];
+ if (vdso_addr == 0) return 0;
+
+ const eh = @intToPtr(*elf.Ehdr, vdso_addr);
+ var ph_addr: usize = vdso_addr + eh.e_phoff;
+ const ph = @intToPtr(*elf.Phdr, ph_addr);
+
+ var maybe_dynv: ?[*]usize = null;
+ var base: usize = @maxValue(usize);
+ {
+ var i: usize = 0;
+ while (i < eh.e_phnum) : ({
+ i += 1;
+ ph_addr += eh.e_phentsize;
+ }) {
+ const this_ph = @intToPtr(*elf.Phdr, ph_addr);
+ switch (this_ph.p_type) {
+ elf.PT_LOAD => base = vdso_addr + this_ph.p_offset - this_ph.p_vaddr,
+ elf.PT_DYNAMIC => maybe_dynv = @intToPtr([*]usize, vdso_addr + this_ph.p_offset),
+ else => {},
+ }
+ }
+ }
+ const dynv = maybe_dynv orelse return 0;
+ if (base == @maxValue(usize)) return 0;
+
+ var maybe_strings: ?[*]u8 = null;
+ var maybe_syms: ?[*]elf.Sym = null;
+ var maybe_hashtab: ?[*]linux.Elf_Symndx = null;
+ var maybe_versym: ?[*]u16 = null;
+ var maybe_verdef: ?*elf.Verdef = null;
+
+ {
+ var i: usize = 0;
+ while (dynv[i] != 0) : (i += 2) {
+ const p = base + dynv[i + 1];
+ switch (dynv[i]) {
+ elf.DT_STRTAB => maybe_strings = @intToPtr([*]u8, p),
+ elf.DT_SYMTAB => maybe_syms = @intToPtr([*]elf.Sym, p),
+ elf.DT_HASH => maybe_hashtab = @intToPtr([*]linux.Elf_Symndx, p),
+ elf.DT_VERSYM => maybe_versym = @intToPtr([*]u16, p),
+ elf.DT_VERDEF => maybe_verdef = @intToPtr(*elf.Verdef, p),
+ else => {},
+ }
+ }
+ }
+
+ const strings = maybe_strings orelse return 0;
+ const syms = maybe_syms orelse return 0;
+ const hashtab = maybe_hashtab orelse return 0;
+ if (maybe_verdef == null) maybe_versym = null;
+
+ const OK_TYPES = (1 << elf.STT_NOTYPE | 1 << elf.STT_OBJECT | 1 << elf.STT_FUNC | 1 << elf.STT_COMMON);
+ const OK_BINDS = (1 << elf.STB_GLOBAL | 1 << elf.STB_WEAK | 1 << elf.STB_GNU_UNIQUE);
+
+ var i: usize = 0;
+ while (i < hashtab[1]) : (i += 1) {
+ if (0 == (u32(1) << @intCast(u5, syms[i].st_info & 0xf) & OK_TYPES)) continue;
+ if (0 == (u32(1) << @intCast(u5, syms[i].st_info >> 4) & OK_BINDS)) continue;
+ if (0 == syms[i].st_shndx) continue;
+ if (!mem.eql(u8, name, cstr.toSliceConst(strings + syms[i].st_name))) continue;
+ if (maybe_versym) |versym| {
+ if (!checkver(maybe_verdef.?, versym[i], vername, strings))
+ continue;
+ }
+ return base + syms[i].st_value;
+ }
+
+ return 0;
+}
+
+fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*]u8) bool {
+ var def = def_arg;
+ const vsym = @bitCast(u32, vsym_arg) & 0x7fff;
+ while (true) {
+ if (0 == (def.vd_flags & elf.VER_FLG_BASE) and (def.vd_ndx & 0x7fff) == vsym)
+ break;
+ if (def.vd_next == 0)
+ return false;
+ def = @intToPtr(*elf.Verdef, @ptrToInt(def) + def.vd_next);
+ }
+ const aux = @intToPtr(*elf.Verdaux, @ptrToInt(def) + def.vd_aux);
+ return mem.eql(u8, vername, cstr.toSliceConst(strings + aux.vda_name));
+}
diff --git a/std/os/linux/x86_64.zig b/std/os/linux/x86_64.zig
index cfb2231df9..9a90e64757 100644
--- a/std/os/linux/x86_64.zig
+++ b/std/os/linux/x86_64.zig
@@ -330,26 +330,26 @@ pub const SYS_userfaultfd = 323;
pub const SYS_membarrier = 324;
pub const SYS_mlock2 = 325;
-pub const O_CREAT = 0o100;
-pub const O_EXCL = 0o200;
-pub const O_NOCTTY = 0o400;
-pub const O_TRUNC = 0o1000;
-pub const O_APPEND = 0o2000;
-pub const O_NONBLOCK = 0o4000;
-pub const O_DSYNC = 0o10000;
-pub const O_SYNC = 0o4010000;
-pub const O_RSYNC = 0o4010000;
+pub const O_CREAT = 0o100;
+pub const O_EXCL = 0o200;
+pub const O_NOCTTY = 0o400;
+pub const O_TRUNC = 0o1000;
+pub const O_APPEND = 0o2000;
+pub const O_NONBLOCK = 0o4000;
+pub const O_DSYNC = 0o10000;
+pub const O_SYNC = 0o4010000;
+pub const O_RSYNC = 0o4010000;
pub const O_DIRECTORY = 0o200000;
-pub const O_NOFOLLOW = 0o400000;
-pub const O_CLOEXEC = 0o2000000;
+pub const O_NOFOLLOW = 0o400000;
+pub const O_CLOEXEC = 0o2000000;
-pub const O_ASYNC = 0o20000;
-pub const O_DIRECT = 0o40000;
-pub const O_LARGEFILE = 0;
-pub const O_NOATIME = 0o1000000;
-pub const O_PATH = 0o10000000;
+pub const O_ASYNC = 0o20000;
+pub const O_DIRECT = 0o40000;
+pub const O_LARGEFILE = 0;
+pub const O_NOATIME = 0o1000000;
+pub const O_PATH = 0o10000000;
pub const O_TMPFILE = 0o20200000;
-pub const O_NDELAY = O_NONBLOCK;
+pub const O_NDELAY = O_NONBLOCK;
pub const F_DUPFD = 0;
pub const F_GETFD = 1;
@@ -371,93 +371,115 @@ pub const F_GETOWN_EX = 16;
pub const F_GETOWNER_UIDS = 17;
+pub const VDSO_USEFUL = true;
+pub const VDSO_CGT_SYM = "__vdso_clock_gettime";
+pub const VDSO_CGT_VER = "LINUX_2.6";
+pub const VDSO_GETCPU_SYM = "__vdso_getcpu";
+pub const VDSO_GETCPU_VER = "LINUX_2.6";
+
pub fn syscall0(number: usize) usize {
return asm volatile ("syscall"
: [ret] "={rax}" (-> usize)
: [number] "{rax}" (number)
- : "rcx", "r11");
+ : "rcx", "r11"
+ );
}
pub fn syscall1(number: usize, arg1: usize) usize {
return asm volatile ("syscall"
: [ret] "={rax}" (-> usize)
: [number] "{rax}" (number),
- [arg1] "{rdi}" (arg1)
- : "rcx", "r11");
+ [arg1] "{rdi}" (arg1)
+ : "rcx", "r11"
+ );
}
pub fn syscall2(number: usize, arg1: usize, arg2: usize) usize {
return asm volatile ("syscall"
: [ret] "={rax}" (-> usize)
: [number] "{rax}" (number),
- [arg1] "{rdi}" (arg1),
- [arg2] "{rsi}" (arg2)
- : "rcx", "r11");
+ [arg1] "{rdi}" (arg1),
+ [arg2] "{rsi}" (arg2)
+ : "rcx", "r11"
+ );
}
pub fn syscall3(number: usize, arg1: usize, arg2: usize, arg3: usize) usize {
return asm volatile ("syscall"
: [ret] "={rax}" (-> usize)
: [number] "{rax}" (number),
- [arg1] "{rdi}" (arg1),
- [arg2] "{rsi}" (arg2),
- [arg3] "{rdx}" (arg3)
- : "rcx", "r11");
+ [arg1] "{rdi}" (arg1),
+ [arg2] "{rsi}" (arg2),
+ [arg3] "{rdx}" (arg3)
+ : "rcx", "r11"
+ );
}
pub fn syscall4(number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize) usize {
return asm volatile ("syscall"
: [ret] "={rax}" (-> usize)
: [number] "{rax}" (number),
- [arg1] "{rdi}" (arg1),
- [arg2] "{rsi}" (arg2),
- [arg3] "{rdx}" (arg3),
- [arg4] "{r10}" (arg4)
- : "rcx", "r11");
+ [arg1] "{rdi}" (arg1),
+ [arg2] "{rsi}" (arg2),
+ [arg3] "{rdx}" (arg3),
+ [arg4] "{r10}" (arg4)
+ : "rcx", "r11"
+ );
}
pub fn syscall5(number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize {
return asm volatile ("syscall"
: [ret] "={rax}" (-> usize)
: [number] "{rax}" (number),
- [arg1] "{rdi}" (arg1),
- [arg2] "{rsi}" (arg2),
- [arg3] "{rdx}" (arg3),
- [arg4] "{r10}" (arg4),
- [arg5] "{r8}" (arg5)
- : "rcx", "r11");
+ [arg1] "{rdi}" (arg1),
+ [arg2] "{rsi}" (arg2),
+ [arg3] "{rdx}" (arg3),
+ [arg4] "{r10}" (arg4),
+ [arg5] "{r8}" (arg5)
+ : "rcx", "r11"
+ );
}
-pub fn syscall6(number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize,
- arg5: usize, arg6: usize) usize
-{
+pub fn syscall6(
+ number: usize,
+ arg1: usize,
+ arg2: usize,
+ arg3: usize,
+ arg4: usize,
+ arg5: usize,
+ arg6: usize,
+) usize {
return asm volatile ("syscall"
: [ret] "={rax}" (-> usize)
: [number] "{rax}" (number),
- [arg1] "{rdi}" (arg1),
- [arg2] "{rsi}" (arg2),
- [arg3] "{rdx}" (arg3),
- [arg4] "{r10}" (arg4),
- [arg5] "{r8}" (arg5),
- [arg6] "{r9}" (arg6)
- : "rcx", "r11");
+ [arg1] "{rdi}" (arg1),
+ [arg2] "{rsi}" (arg2),
+ [arg3] "{rdx}" (arg3),
+ [arg4] "{r10}" (arg4),
+ [arg5] "{r8}" (arg5),
+ [arg6] "{r9}" (arg6)
+ : "rcx", "r11"
+ );
}
+/// This matches the libc clone function.
+pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: usize, arg: usize, ptid: *i32, tls: usize, ctid: *i32) usize;
+
pub nakedcc fn restore_rt() void {
return asm volatile ("syscall"
:
: [number] "{rax}" (usize(SYS_rt_sigreturn))
- : "rcx", "r11");
+ : "rcx", "r11"
+ );
}
-
pub const msghdr = extern struct {
- msg_name: &u8,
+ msg_name: *u8,
msg_namelen: socklen_t,
- msg_iov: &iovec,
+ msg_iov: *iovec,
msg_iovlen: i32,
__pad1: i32,
- msg_control: &u8,
+ msg_control: *u8,
msg_controllen: socklen_t,
__pad2: socklen_t,
msg_flags: i32,
@@ -489,6 +511,16 @@ pub const timespec = extern struct {
tv_nsec: isize,
};
+pub const timeval = extern struct {
+ tv_sec: isize,
+ tv_usec: isize,
+};
+
+pub const timezone = extern struct {
+ tz_minuteswest: i32,
+ tz_dsttime: i32,
+};
+
pub const dirent = extern struct {
d_ino: usize,
d_off: usize,
@@ -496,3 +528,4 @@ pub const dirent = extern struct {
d_name: u8, // field address is the address of first byte of name
};
+pub const Elf_Symndx = u32;
diff --git a/std/os/path.zig b/std/os/path.zig
index 0ea5d5a753..d3ab0c519f 100644
--- a/std/os/path.zig
+++ b/std/os/path.zig
@@ -32,7 +32,7 @@ pub fn isSep(byte: u8) bool {
/// Naively combines a series of paths with the native path seperator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn join(allocator: &Allocator, paths: ...) ![]u8 {
+pub fn join(allocator: *Allocator, paths: ...) ![]u8 {
if (is_windows) {
return joinWindows(allocator, paths);
} else {
@@ -40,11 +40,11 @@ pub fn join(allocator: &Allocator, paths: ...) ![]u8 {
}
}
-pub fn joinWindows(allocator: &Allocator, paths: ...) ![]u8 {
+pub fn joinWindows(allocator: *Allocator, paths: ...) ![]u8 {
return mem.join(allocator, sep_windows, paths);
}
-pub fn joinPosix(allocator: &Allocator, paths: ...) ![]u8 {
+pub fn joinPosix(allocator: *Allocator, paths: ...) ![]u8 {
return mem.join(allocator, sep_posix, paths);
}
@@ -55,9 +55,7 @@ test "os.path.join" {
assert(mem.eql(u8, try joinWindows(debug.global_allocator, "c:\\", "a", "b\\", "c"), "c:\\a\\b\\c"));
assert(mem.eql(u8, try joinWindows(debug.global_allocator, "c:\\a\\", "b\\", "c"), "c:\\a\\b\\c"));
- assert(mem.eql(u8, try joinWindows(debug.global_allocator,
- "c:\\home\\andy\\dev\\zig\\build\\lib\\zig\\std", "io.zig"),
- "c:\\home\\andy\\dev\\zig\\build\\lib\\zig\\std\\io.zig"));
+ assert(mem.eql(u8, try joinWindows(debug.global_allocator, "c:\\home\\andy\\dev\\zig\\build\\lib\\zig\\std", "io.zig"), "c:\\home\\andy\\dev\\zig\\build\\lib\\zig\\std\\io.zig"));
assert(mem.eql(u8, try joinPosix(debug.global_allocator, "/a/b", "c"), "/a/b/c"));
assert(mem.eql(u8, try joinPosix(debug.global_allocator, "/a/b/", "c"), "/a/b/c"));
@@ -65,8 +63,7 @@ test "os.path.join" {
assert(mem.eql(u8, try joinPosix(debug.global_allocator, "/", "a", "b/", "c"), "/a/b/c"));
assert(mem.eql(u8, try joinPosix(debug.global_allocator, "/a/", "b/", "c"), "/a/b/c"));
- assert(mem.eql(u8, try joinPosix(debug.global_allocator, "/home/andy/dev/zig/build/lib/zig/std", "io.zig"),
- "/home/andy/dev/zig/build/lib/zig/std/io.zig"));
+ assert(mem.eql(u8, try joinPosix(debug.global_allocator, "/home/andy/dev/zig/build/lib/zig/std", "io.zig"), "/home/andy/dev/zig/build/lib/zig/std/io.zig"));
}
pub fn isAbsolute(path: []const u8) bool {
@@ -151,22 +148,22 @@ pub const WindowsPath = struct {
pub fn windowsParsePath(path: []const u8) WindowsPath {
if (path.len >= 2 and path[1] == ':') {
- return WindowsPath {
+ return WindowsPath{
.is_abs = isAbsoluteWindows(path),
.kind = WindowsPath.Kind.Drive,
.disk_designator = path[0..2],
};
}
if (path.len >= 1 and (path[0] == '/' or path[0] == '\\') and
- (path.len == 1 or (path[1] != '/' and path[1] != '\\')))
+ (path.len == 1 or (path[1] != '/' and path[1] != '\\')))
{
- return WindowsPath {
+ return WindowsPath{
.is_abs = true,
.kind = WindowsPath.Kind.None,
.disk_designator = path[0..0],
};
}
- const relative_path = WindowsPath {
+ const relative_path = WindowsPath{
.kind = WindowsPath.Kind.None,
.disk_designator = []u8{},
.is_abs = false,
@@ -178,16 +175,16 @@ pub fn windowsParsePath(path: []const u8) WindowsPath {
// TODO when I combined these together with `inline for` the compiler crashed
{
const this_sep = '/';
- const two_sep = []u8{this_sep, this_sep};
+ const two_sep = []u8{ this_sep, this_sep };
if (mem.startsWith(u8, path, two_sep)) {
if (path[2] == this_sep) {
return relative_path;
}
var it = mem.split(path, []u8{this_sep});
- _ = (it.next() ?? return relative_path);
- _ = (it.next() ?? return relative_path);
- return WindowsPath {
+ _ = (it.next() orelse return relative_path);
+ _ = (it.next() orelse return relative_path);
+ return WindowsPath{
.is_abs = isAbsoluteWindows(path),
.kind = WindowsPath.Kind.NetworkShare,
.disk_designator = path[0..it.index],
@@ -196,16 +193,16 @@ pub fn windowsParsePath(path: []const u8) WindowsPath {
}
{
const this_sep = '\\';
- const two_sep = []u8{this_sep, this_sep};
+ const two_sep = []u8{ this_sep, this_sep };
if (mem.startsWith(u8, path, two_sep)) {
if (path[2] == this_sep) {
return relative_path;
}
var it = mem.split(path, []u8{this_sep});
- _ = (it.next() ?? return relative_path);
- _ = (it.next() ?? return relative_path);
- return WindowsPath {
+ _ = (it.next() orelse return relative_path);
+ _ = (it.next() orelse return relative_path);
+ return WindowsPath{
.is_abs = isAbsoluteWindows(path),
.kind = WindowsPath.Kind.NetworkShare,
.disk_designator = path[0..it.index],
@@ -268,7 +265,7 @@ fn networkShareServersEql(ns1: []const u8, ns2: []const u8) bool {
var it2 = mem.split(ns2, []u8{sep2});
// TODO ASCII is wrong, we actually need full unicode support to compare paths.
- return asciiEqlIgnoreCase(??it1.next(), ??it2.next());
+ return asciiEqlIgnoreCase(it1.next().?, it2.next().?);
}
fn compareDiskDesignators(kind: WindowsPath.Kind, p1: []const u8, p2: []const u8) bool {
@@ -289,14 +286,14 @@ fn compareDiskDesignators(kind: WindowsPath.Kind, p1: []const u8, p2: []const u8
var it2 = mem.split(p2, []u8{sep2});
// TODO ASCII is wrong, we actually need full unicode support to compare paths.
- return asciiEqlIgnoreCase(??it1.next(), ??it2.next()) and asciiEqlIgnoreCase(??it1.next(), ??it2.next());
+ return asciiEqlIgnoreCase(it1.next().?, it2.next().?) and asciiEqlIgnoreCase(it1.next().?, it2.next().?);
},
}
}
fn asciiUpper(byte: u8) u8 {
return switch (byte) {
- 'a' ... 'z' => 'A' + (byte - 'a'),
+ 'a'...'z' => 'A' + (byte - 'a'),
else => byte,
};
}
@@ -313,7 +310,7 @@ fn asciiEqlIgnoreCase(s1: []const u8, s2: []const u8) bool {
}
/// Converts the command line arguments into a slice and calls `resolveSlice`.
-pub fn resolve(allocator: &Allocator, args: ...) ![]u8 {
+pub fn resolve(allocator: *Allocator, args: ...) ![]u8 {
var paths: [args.len][]const u8 = undefined;
comptime var arg_i = 0;
inline while (arg_i < args.len) : (arg_i += 1) {
@@ -323,7 +320,7 @@ pub fn resolve(allocator: &Allocator, args: ...) ![]u8 {
}
/// On Windows, this calls `resolveWindows` and on POSIX it calls `resolvePosix`.
-pub fn resolveSlice(allocator: &Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolveSlice(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (is_windows) {
return resolveWindows(allocator, paths);
} else {
@@ -337,7 +334,7 @@ pub fn resolveSlice(allocator: &Allocator, paths: []const []const u8) ![]u8 {
/// If all paths are relative it uses the current working directory as a starting point.
/// Each drive has its own current working directory.
/// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters.
-pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(is_windows); // resolveWindows called on non windows can't use getCwd
return os.getCwd(allocator);
@@ -372,7 +369,6 @@ pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
max_size += p.len + 1;
}
-
// if we will result with a disk designator, loop again to determine
// which is the last time the disk designator is absolutely specified, if any
// and count up the max bytes for paths related to this disk designator
@@ -386,8 +382,7 @@ pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
const parsed = windowsParsePath(p);
if (parsed.kind != WindowsPath.Kind.None) {
if (parsed.kind == have_drive_kind) {
- correct_disk_designator = compareDiskDesignators(have_drive_kind,
- result_disk_designator, parsed.disk_designator);
+ correct_disk_designator = compareDiskDesignators(have_drive_kind, result_disk_designator, parsed.disk_designator);
} else {
continue;
}
@@ -404,7 +399,6 @@ pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
}
}
-
// Allocate result and fill in the disk designator, calling getCwd if we have to.
var result: []u8 = undefined;
var result_index: usize = 0;
@@ -420,8 +414,8 @@ pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
WindowsPath.Kind.NetworkShare => {
result = try allocator.alloc(u8, max_size);
var it = mem.split(paths[first_index], "/\\");
- const server_name = ??it.next();
- const other_name = ??it.next();
+ const server_name = it.next().?;
+ const other_name = it.next().?;
result[result_index] = '\\';
result_index += 1;
@@ -433,7 +427,7 @@ pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
result_index += 1;
mem.copy(u8, result[result_index..], other_name);
result_index += other_name.len;
-
+
result_disk_designator = result[0..result_index];
},
WindowsPath.Kind.None => {
@@ -478,8 +472,7 @@ pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
if (parsed.kind != WindowsPath.Kind.None) {
if (parsed.kind == have_drive_kind) {
- correct_disk_designator = compareDiskDesignators(have_drive_kind,
- result_disk_designator, parsed.disk_designator);
+ correct_disk_designator = compareDiskDesignators(have_drive_kind, result_disk_designator, parsed.disk_designator);
} else {
continue;
}
@@ -520,7 +513,7 @@ pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
/// It resolves "." and "..".
/// The result does not have a trailing path separator.
/// If all paths are relative it uses the current working directory as a starting point.
-pub fn resolvePosix(allocator: &Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(!is_windows); // resolvePosix called on windows can't use getCwd
return os.getCwd(allocator);
@@ -591,7 +584,7 @@ test "os.path.resolve" {
}
assert(mem.eql(u8, testResolveWindows([][]const u8{"."}), cwd));
} else {
- assert(mem.eql(u8, testResolvePosix([][]const u8{"a/b/c/", "../../.."}), cwd));
+ assert(mem.eql(u8, testResolvePosix([][]const u8{ "a/b/c/", "../../.." }), cwd));
assert(mem.eql(u8, testResolvePosix([][]const u8{"."}), cwd));
}
}
@@ -601,16 +594,15 @@ test "os.path.resolveWindows" {
const cwd = try os.getCwd(debug.global_allocator);
const parsed_cwd = windowsParsePath(cwd);
{
- const result = testResolveWindows([][]const u8{"/usr/local", "lib\\zig\\std\\array_list.zig"});
- const expected = try join(debug.global_allocator,
- parsed_cwd.disk_designator, "usr\\local\\lib\\zig\\std\\array_list.zig");
+ const result = testResolveWindows([][]const u8{ "/usr/local", "lib\\zig\\std\\array_list.zig" });
+ const expected = try join(debug.global_allocator, parsed_cwd.disk_designator, "usr\\local\\lib\\zig\\std\\array_list.zig");
if (parsed_cwd.kind == WindowsPath.Kind.Drive) {
expected[0] = asciiUpper(parsed_cwd.disk_designator[0]);
}
assert(mem.eql(u8, result, expected));
}
{
- const result = testResolveWindows([][]const u8{"usr/local", "lib\\zig"});
+ const result = testResolveWindows([][]const u8{ "usr/local", "lib\\zig" });
const expected = try join(debug.global_allocator, cwd, "usr\\local\\lib\\zig");
if (parsed_cwd.kind == WindowsPath.Kind.Drive) {
expected[0] = asciiUpper(parsed_cwd.disk_designator[0]);
@@ -619,33 +611,32 @@ test "os.path.resolveWindows" {
}
}
- assert(mem.eql(u8, testResolveWindows([][]const u8{"c:\\a\\b\\c", "/hi", "ok"}), "C:\\hi\\ok"));
- assert(mem.eql(u8, testResolveWindows([][]const u8{"c:/blah\\blah", "d:/games", "c:../a"}), "C:\\blah\\a"));
- assert(mem.eql(u8, testResolveWindows([][]const u8{"c:/blah\\blah", "d:/games", "C:../a"}), "C:\\blah\\a"));
- assert(mem.eql(u8, testResolveWindows([][]const u8{"c:/ignore", "d:\\a/b\\c/d", "\\e.exe"}), "D:\\e.exe"));
- assert(mem.eql(u8, testResolveWindows([][]const u8{"c:/ignore", "c:/some/file"}), "C:\\some\\file"));
- assert(mem.eql(u8, testResolveWindows([][]const u8{"d:/ignore", "d:some/dir//"}), "D:\\ignore\\some\\dir"));
- assert(mem.eql(u8, testResolveWindows([][]const u8{"//server/share", "..", "relative\\"}), "\\\\server\\share\\relative"));
- assert(mem.eql(u8, testResolveWindows([][]const u8{"c:/", "//"}), "C:\\"));
- assert(mem.eql(u8, testResolveWindows([][]const u8{"c:/", "//dir"}), "C:\\dir"));
- assert(mem.eql(u8, testResolveWindows([][]const u8{"c:/", "//server/share"}), "\\\\server\\share\\"));
- assert(mem.eql(u8, testResolveWindows([][]const u8{"c:/", "//server//share"}), "\\\\server\\share\\"));
- assert(mem.eql(u8, testResolveWindows([][]const u8{"c:/", "///some//dir"}), "C:\\some\\dir"));
- assert(mem.eql(u8, testResolveWindows([][]const u8{"C:\\foo\\tmp.3\\", "..\\tmp.3\\cycles\\root.js"}),
- "C:\\foo\\tmp.3\\cycles\\root.js"));
+ assert(mem.eql(u8, testResolveWindows([][]const u8{ "c:\\a\\b\\c", "/hi", "ok" }), "C:\\hi\\ok"));
+ assert(mem.eql(u8, testResolveWindows([][]const u8{ "c:/blah\\blah", "d:/games", "c:../a" }), "C:\\blah\\a"));
+ assert(mem.eql(u8, testResolveWindows([][]const u8{ "c:/blah\\blah", "d:/games", "C:../a" }), "C:\\blah\\a"));
+ assert(mem.eql(u8, testResolveWindows([][]const u8{ "c:/ignore", "d:\\a/b\\c/d", "\\e.exe" }), "D:\\e.exe"));
+ assert(mem.eql(u8, testResolveWindows([][]const u8{ "c:/ignore", "c:/some/file" }), "C:\\some\\file"));
+ assert(mem.eql(u8, testResolveWindows([][]const u8{ "d:/ignore", "d:some/dir//" }), "D:\\ignore\\some\\dir"));
+ assert(mem.eql(u8, testResolveWindows([][]const u8{ "//server/share", "..", "relative\\" }), "\\\\server\\share\\relative"));
+ assert(mem.eql(u8, testResolveWindows([][]const u8{ "c:/", "//" }), "C:\\"));
+ assert(mem.eql(u8, testResolveWindows([][]const u8{ "c:/", "//dir" }), "C:\\dir"));
+ assert(mem.eql(u8, testResolveWindows([][]const u8{ "c:/", "//server/share" }), "\\\\server\\share\\"));
+ assert(mem.eql(u8, testResolveWindows([][]const u8{ "c:/", "//server//share" }), "\\\\server\\share\\"));
+ assert(mem.eql(u8, testResolveWindows([][]const u8{ "c:/", "///some//dir" }), "C:\\some\\dir"));
+ assert(mem.eql(u8, testResolveWindows([][]const u8{ "C:\\foo\\tmp.3\\", "..\\tmp.3\\cycles\\root.js" }), "C:\\foo\\tmp.3\\cycles\\root.js"));
}
test "os.path.resolvePosix" {
- assert(mem.eql(u8, testResolvePosix([][]const u8{"/a/b", "c"}), "/a/b/c"));
- assert(mem.eql(u8, testResolvePosix([][]const u8{"/a/b", "c", "//d", "e///"}), "/d/e"));
- assert(mem.eql(u8, testResolvePosix([][]const u8{"/a/b/c", "..", "../"}), "/a"));
- assert(mem.eql(u8, testResolvePosix([][]const u8{"/", "..", ".."}), "/"));
+ assert(mem.eql(u8, testResolvePosix([][]const u8{ "/a/b", "c" }), "/a/b/c"));
+ assert(mem.eql(u8, testResolvePosix([][]const u8{ "/a/b", "c", "//d", "e///" }), "/d/e"));
+ assert(mem.eql(u8, testResolvePosix([][]const u8{ "/a/b/c", "..", "../" }), "/a"));
+ assert(mem.eql(u8, testResolvePosix([][]const u8{ "/", "..", ".." }), "/"));
assert(mem.eql(u8, testResolvePosix([][]const u8{"/a/b/c/"}), "/a/b/c"));
- assert(mem.eql(u8, testResolvePosix([][]const u8{"/var/lib", "../", "file/"}), "/var/file"));
- assert(mem.eql(u8, testResolvePosix([][]const u8{"/var/lib", "/../", "file/"}), "/file"));
- assert(mem.eql(u8, testResolvePosix([][]const u8{"/some/dir", ".", "/absolute/"}), "/absolute"));
- assert(mem.eql(u8, testResolvePosix([][]const u8{"/foo/tmp.3/", "../tmp.3/cycles/root.js"}), "/foo/tmp.3/cycles/root.js"));
+ assert(mem.eql(u8, testResolvePosix([][]const u8{ "/var/lib", "../", "file/" }), "/var/file"));
+ assert(mem.eql(u8, testResolvePosix([][]const u8{ "/var/lib", "/../", "file/" }), "/file"));
+ assert(mem.eql(u8, testResolvePosix([][]const u8{ "/some/dir", ".", "/absolute/" }), "/absolute"));
+ assert(mem.eql(u8, testResolvePosix([][]const u8{ "/foo/tmp.3/", "../tmp.3/cycles/root.js" }), "/foo/tmp.3/cycles/root.js"));
}
fn testResolveWindows(paths: []const []const u8) []u8 {
@@ -656,7 +647,9 @@ fn testResolvePosix(paths: []const []const u8) []u8 {
return resolvePosix(debug.global_allocator, paths) catch unreachable;
}
-pub fn dirname(path: []const u8) []const u8 {
+/// If the path is a file in the current directory (no directory component)
+/// then returns null
+pub fn dirname(path: []const u8) ?[]const u8 {
if (is_windows) {
return dirnameWindows(path);
} else {
@@ -664,9 +657,9 @@ pub fn dirname(path: []const u8) []const u8 {
}
}
-pub fn dirnameWindows(path: []const u8) []const u8 {
+pub fn dirnameWindows(path: []const u8) ?[]const u8 {
if (path.len == 0)
- return path[0..0];
+ return null;
const root_slice = diskDesignatorWindows(path);
if (path.len == root_slice.len)
@@ -678,13 +671,13 @@ pub fn dirnameWindows(path: []const u8) []const u8 {
while ((path[end_index] == '/' or path[end_index] == '\\') and end_index > root_slice.len) {
if (end_index == 0)
- return path[0..0];
+ return null;
end_index -= 1;
}
while (path[end_index] != '/' and path[end_index] != '\\' and end_index > root_slice.len) {
if (end_index == 0)
- return path[0..0];
+ return null;
end_index -= 1;
}
@@ -692,12 +685,15 @@ pub fn dirnameWindows(path: []const u8) []const u8 {
end_index += 1;
}
+ if (end_index == 0)
+ return null;
+
return path[0..end_index];
}
-pub fn dirnamePosix(path: []const u8) []const u8 {
+pub fn dirnamePosix(path: []const u8) ?[]const u8 {
if (path.len == 0)
- return path[0..0];
+ return null;
var end_index: usize = path.len - 1;
while (path[end_index] == '/') {
@@ -708,13 +704,16 @@ pub fn dirnamePosix(path: []const u8) []const u8 {
while (path[end_index] != '/') {
if (end_index == 0)
- return path[0..0];
+ return null;
end_index -= 1;
}
if (end_index == 0 and path[end_index] == '/')
return path[0..1];
+ if (end_index == 0)
+ return null;
+
return path[0..end_index];
}
@@ -724,10 +723,10 @@ test "os.path.dirnamePosix" {
testDirnamePosix("/a", "/");
testDirnamePosix("/", "/");
testDirnamePosix("////", "/");
- testDirnamePosix("", "");
- testDirnamePosix("a", "");
- testDirnamePosix("a/", "");
- testDirnamePosix("a//", "");
+ testDirnamePosix("", null);
+ testDirnamePosix("a", null);
+ testDirnamePosix("a/", null);
+ testDirnamePosix("a//", null);
}
test "os.path.dirnameWindows" {
@@ -749,7 +748,7 @@ test "os.path.dirnameWindows" {
testDirnameWindows("c:foo\\bar", "c:foo");
testDirnameWindows("c:foo\\bar\\", "c:foo");
testDirnameWindows("c:foo\\bar\\baz", "c:foo\\bar");
- testDirnameWindows("file:stream", "");
+ testDirnameWindows("file:stream", null);
testDirnameWindows("dir\\file:stream", "dir");
testDirnameWindows("\\\\unc\\share", "\\\\unc\\share");
testDirnameWindows("\\\\unc\\share\\foo", "\\\\unc\\share\\");
@@ -760,18 +759,26 @@ test "os.path.dirnameWindows" {
testDirnameWindows("/a/b/", "/a");
testDirnameWindows("/a/b", "/a");
testDirnameWindows("/a", "/");
- testDirnameWindows("", "");
+ testDirnameWindows("", null);
testDirnameWindows("/", "/");
testDirnameWindows("////", "/");
- testDirnameWindows("foo", "");
+ testDirnameWindows("foo", null);
}
-fn testDirnamePosix(input: []const u8, expected_output: []const u8) void {
- assert(mem.eql(u8, dirnamePosix(input), expected_output));
+fn testDirnamePosix(input: []const u8, expected_output: ?[]const u8) void {
+ if (dirnamePosix(input)) |output| {
+ assert(mem.eql(u8, output, expected_output.?));
+ } else {
+ assert(expected_output == null);
+ }
}
-fn testDirnameWindows(input: []const u8, expected_output: []const u8) void {
- assert(mem.eql(u8, dirnameWindows(input), expected_output));
+fn testDirnameWindows(input: []const u8, expected_output: ?[]const u8) void {
+ if (dirnameWindows(input)) |output| {
+ assert(mem.eql(u8, output, expected_output.?));
+ } else {
+ assert(expected_output == null);
+ }
}
pub fn basename(path: []const u8) []const u8 {
@@ -800,7 +807,7 @@ pub fn basenamePosix(path: []const u8) []const u8 {
start_index -= 1;
}
- return path[start_index + 1..end_index];
+ return path[start_index + 1 .. end_index];
}
pub fn basenameWindows(path: []const u8) []const u8 {
@@ -832,7 +839,7 @@ pub fn basenameWindows(path: []const u8) []const u8 {
start_index -= 1;
}
- return path[start_index + 1..end_index];
+ return path[start_index + 1 .. end_index];
}
test "os.path.basename" {
@@ -890,7 +897,7 @@ fn testBasenameWindows(input: []const u8, expected_output: []const u8) void {
/// resolve to the same path (after calling `resolve` on each), a zero-length
/// string is returned.
/// On Windows this canonicalizes the drive to a capital letter and paths to `\\`.
-pub fn relative(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
if (is_windows) {
return relativeWindows(allocator, from, to);
} else {
@@ -898,7 +905,7 @@ pub fn relative(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
}
}
-pub fn relativeWindows(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolveWindows(allocator, [][]const u8{from});
defer allocator.free(resolved_from);
@@ -930,7 +937,7 @@ pub fn relativeWindows(allocator: &Allocator, from: []const u8, to: []const u8)
var from_it = mem.split(resolved_from, "/\\");
var to_it = mem.split(resolved_to, "/\\");
while (true) {
- const from_component = from_it.next() ?? return mem.dupe(allocator, u8, to_it.rest());
+ const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest());
const to_rest = to_it.rest();
if (to_it.next()) |to_component| {
// TODO ASCII is wrong, we actually need full unicode support to compare paths.
@@ -971,7 +978,7 @@ pub fn relativeWindows(allocator: &Allocator, from: []const u8, to: []const u8)
return []u8{};
}
-pub fn relativePosix(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolvePosix(allocator, [][]const u8{from});
defer allocator.free(resolved_from);
@@ -981,7 +988,7 @@ pub fn relativePosix(allocator: &Allocator, from: []const u8, to: []const u8) ![
var from_it = mem.split(resolved_from, "/");
var to_it = mem.split(resolved_to, "/");
while (true) {
- const from_component = from_it.next() ?? return mem.dupe(allocator, u8, to_it.rest());
+ const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest());
const to_rest = to_it.rest();
if (to_it.next()) |to_component| {
if (mem.eql(u8, from_component, to_component))
@@ -1006,7 +1013,7 @@ pub fn relativePosix(allocator: &Allocator, from: []const u8, to: []const u8) ![
}
if (to_rest.len == 0) {
// shave off the trailing slash
- return result[0..result_index - 1];
+ return result[0 .. result_index - 1];
}
mem.copy(u8, result[result_index..], to_rest);
@@ -1070,7 +1077,7 @@ fn testRelativeWindows(from: []const u8, to: []const u8, expected_output: []cons
/// Expands all symbolic links and resolves references to `.`, `..`, and
/// extra `/` characters in ::pathname.
/// Caller must deallocate result.
-pub fn real(allocator: &Allocator, pathname: []const u8) ![]u8 {
+pub fn real(allocator: *Allocator, pathname: []const u8) ![]u8 {
switch (builtin.os) {
Os.windows => {
const pathname_buf = try allocator.alloc(u8, pathname.len + 1);
@@ -1079,9 +1086,7 @@ pub fn real(allocator: &Allocator, pathname: []const u8) ![]u8 {
mem.copy(u8, pathname_buf, pathname);
pathname_buf[pathname.len] = 0;
- const h_file = windows.CreateFileA(pathname_buf.ptr,
- windows.GENERIC_READ, windows.FILE_SHARE_READ, null, windows.OPEN_EXISTING,
- windows.FILE_ATTRIBUTE_NORMAL, null);
+ const h_file = windows.CreateFileA(pathname_buf.ptr, windows.GENERIC_READ, windows.FILE_SHARE_READ, null, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, null);
if (h_file == windows.INVALID_HANDLE_VALUE) {
const err = windows.GetLastError();
return switch (err) {
@@ -1161,7 +1166,7 @@ pub fn real(allocator: &Allocator, pathname: []const u8) ![]u8 {
return allocator.shrink(u8, result_buf, cstr.len(result_buf.ptr));
},
Os.linux => {
- const fd = try os.posixOpen(allocator, pathname, posix.O_PATH|posix.O_NONBLOCK|posix.O_CLOEXEC, 0);
+ const fd = try os.posixOpen(allocator, pathname, posix.O_PATH | posix.O_NONBLOCK | posix.O_CLOEXEC, 0);
defer os.close(fd);
var buf: ["/proc/self/fd/-2147483648".len]u8 = undefined;
diff --git a/std/os/test.zig b/std/os/test.zig
index 718d1ce2c8..9e795e8ad2 100644
--- a/std/os/test.zig
+++ b/std/os/test.zig
@@ -6,13 +6,10 @@ const io = std.io;
const a = std.debug.global_allocator;
const builtin = @import("builtin");
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
test "makePath, put some files in it, deleteTree" {
- if (builtin.os == builtin.Os.windows) {
- // TODO implement os.Dir for windows
- // https://github.com/zig-lang/zig/issues/709
- return;
- }
try os.makePath(a, "os_test_tmp/b/c");
try io.writeFile(a, "os_test_tmp/b/c/file.txt", "nonsense");
try io.writeFile(a, "os_test_tmp/b/file2.txt", "blah");
@@ -25,18 +22,44 @@ test "makePath, put some files in it, deleteTree" {
}
test "access file" {
- if (builtin.os == builtin.Os.windows) {
- return;
- }
-
try os.makePath(a, "os_test_tmp");
- if (os.File.access(a, "os_test_tmp/file.txt", os.default_file_mode)) |ok| {
- unreachable;
+ if (os.File.access(a, "os_test_tmp/file.txt")) |ok| {
+ @panic("expected error");
} else |err| {
assert(err == error.NotFound);
}
try io.writeFile(a, "os_test_tmp/file.txt", "");
- assert((try os.File.access(a, "os_test_tmp/file.txt", os.default_file_mode)) == true);
+ try os.File.access(a, "os_test_tmp/file.txt");
try os.deleteTree(a, "os_test_tmp");
}
+
+test "spawn threads" {
+ var shared_ctx: i32 = 1;
+
+ const thread1 = try std.os.spawnThread({}, start1);
+ const thread2 = try std.os.spawnThread(&shared_ctx, start2);
+ const thread3 = try std.os.spawnThread(&shared_ctx, start2);
+ const thread4 = try std.os.spawnThread(&shared_ctx, start2);
+
+ thread1.wait();
+ thread2.wait();
+ thread3.wait();
+ thread4.wait();
+
+ assert(shared_ctx == 4);
+}
+
+fn start1(ctx: void) u8 {
+ return 0;
+}
+
+fn start2(ctx: *i32) u8 {
+ _ = @atomicRmw(i32, ctx, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ return 0;
+}
+
+test "cpu count" {
+ const cpu_count = try std.os.cpuCount(a);
+ assert(cpu_count >= 1);
+}
diff --git a/std/os/time.zig b/std/os/time.zig
new file mode 100644
index 0000000000..795605d7a9
--- /dev/null
+++ b/std/os/time.zig
@@ -0,0 +1,282 @@
+const std = @import("../index.zig");
+const builtin = @import("builtin");
+const Os = builtin.Os;
+const debug = std.debug;
+
+const windows = std.os.windows;
+const linux = std.os.linux;
+const darwin = std.os.darwin;
+const posix = std.os.posix;
+
+pub const epoch = @import("epoch.zig");
+
+/// Sleep for the specified duration
+pub fn sleep(seconds: usize, nanoseconds: usize) void {
+ switch (builtin.os) {
+ Os.linux, Os.macosx, Os.ios => {
+ posixSleep(@intCast(u63, seconds), @intCast(u63, nanoseconds));
+ },
+ Os.windows => {
+ const ns_per_ms = ns_per_s / ms_per_s;
+ const milliseconds = seconds * ms_per_s + nanoseconds / ns_per_ms;
+ windows.Sleep(@intCast(windows.DWORD, milliseconds));
+ },
+ else => @compileError("Unsupported OS"),
+ }
+}
+
+pub fn posixSleep(seconds: u63, nanoseconds: u63) void {
+ var req = posix.timespec{
+ .tv_sec = seconds,
+ .tv_nsec = nanoseconds,
+ };
+ var rem: posix.timespec = undefined;
+ while (true) {
+ const ret_val = posix.nanosleep(&req, &rem);
+ const err = posix.getErrno(ret_val);
+ if (err == 0) return;
+ switch (err) {
+ posix.EFAULT => unreachable,
+ posix.EINVAL => {
+ // Sometimes Darwin returns EINVAL for no reason.
+ // We treat it as a spurious wakeup.
+ return;
+ },
+ posix.EINTR => {
+ req = rem;
+ continue;
+ },
+ else => return,
+ }
+ }
+}
+
+/// Get the posix timestamp, UTC, in seconds
+pub fn timestamp() u64 {
+ return @divFloor(milliTimestamp(), ms_per_s);
+}
+
+/// Get the posix timestamp, UTC, in milliseconds
+pub const milliTimestamp = switch (builtin.os) {
+ Os.windows => milliTimestampWindows,
+ Os.linux => milliTimestampPosix,
+ Os.macosx, Os.ios => milliTimestampDarwin,
+ else => @compileError("Unsupported OS"),
+};
+
+fn milliTimestampWindows() u64 {
+ //FileTime has a granularity of 100 nanoseconds
+ // and uses the NTFS/Windows epoch
+ var ft: windows.FILETIME = undefined;
+ windows.GetSystemTimeAsFileTime(&ft);
+ const hns_per_ms = (ns_per_s / 100) / ms_per_s;
+ const epoch_adj = epoch.windows * ms_per_s;
+
+ const ft64 = (u64(ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+ return @divFloor(ft64, hns_per_ms) - -epoch_adj;
+}
+
+fn milliTimestampDarwin() u64 {
+ //Sources suggest MacOS 10.12 has support for
+ // posix clock_gettime.
+ var tv: darwin.timeval = undefined;
+ var err = darwin.gettimeofday(&tv, null);
+ debug.assert(err == 0);
+ const sec_ms = @intCast(u64, tv.tv_sec) * ms_per_s;
+ const usec_ms = @divFloor(@intCast(u64, tv.tv_usec), us_per_s / ms_per_s);
+ return u64(sec_ms) + u64(usec_ms);
+}
+
+fn milliTimestampPosix() u64 {
+ //From what I can tell there's no reason clock_gettime
+ // should ever fail for us with CLOCK_REALTIME,
+ // seccomp aside.
+ var ts: posix.timespec = undefined;
+ const err = posix.clock_gettime(posix.CLOCK_REALTIME, &ts);
+ debug.assert(err == 0);
+ const sec_ms = @intCast(u64, ts.tv_sec) * ms_per_s;
+ const nsec_ms = @divFloor(@intCast(u64, ts.tv_nsec), ns_per_s / ms_per_s);
+ return sec_ms + nsec_ms;
+}
+
+/// Divisions of a second
+pub const ns_per_s = 1000000000;
+pub const us_per_s = 1000000;
+pub const ms_per_s = 1000;
+pub const cs_per_s = 100;
+
+/// Common time divisions
+pub const s_per_min = 60;
+pub const s_per_hour = s_per_min * 60;
+pub const s_per_day = s_per_hour * 24;
+pub const s_per_week = s_per_day * 7;
+
+/// A monotonic high-performance timer.
+/// Timer.start() must be called to initialize the struct, which captures
+/// the counter frequency on windows and darwin, records the resolution,
+/// and gives the user an oportunity to check for the existnece of
+/// monotonic clocks without forcing them to check for error on each read.
+/// .resolution is in nanoseconds on all platforms but .start_time's meaning
+/// depends on the OS. On Windows and Darwin it is a hardware counter
+/// value that requires calculation to convert to a meaninful unit.
+pub const Timer = struct {
+
+ //if we used resolution's value when performing the
+ // performance counter calc on windows/darwin, it would
+ // be less precise
+ frequency: switch (builtin.os) {
+ Os.windows => u64,
+ Os.macosx, Os.ios => darwin.mach_timebase_info_data,
+ else => void,
+ },
+ resolution: u64,
+ start_time: u64,
+
+ //At some point we may change our minds on RAW, but for now we're
+ // sticking with posix standard MONOTONIC. For more information, see:
+ // https://github.com/ziglang/zig/pull/933
+ //
+ //const monotonic_clock_id = switch(builtin.os) {
+ // Os.linux => linux.CLOCK_MONOTONIC_RAW,
+ // else => posix.CLOCK_MONOTONIC,
+ //};
+ const monotonic_clock_id = posix.CLOCK_MONOTONIC;
+ /// Initialize the timer structure.
+ //This gives us an oportunity to grab the counter frequency in windows.
+ //On Windows: QueryPerformanceCounter will succeed on anything >= XP/2000.
+ //On Posix: CLOCK_MONOTONIC will only fail if the monotonic counter is not
+ // supported, or if the timespec pointer is out of bounds, which should be
+ // impossible here barring cosmic rays or other such occurances of
+ // incredibly bad luck.
+ //On Darwin: This cannot fail, as far as I am able to tell.
+ const TimerError = error{
+ TimerUnsupported,
+ Unexpected,
+ };
+ pub fn start() TimerError!Timer {
+ var self: Timer = undefined;
+
+ switch (builtin.os) {
+ Os.windows => {
+ var freq: i64 = undefined;
+ var err = windows.QueryPerformanceFrequency(&freq);
+ if (err == windows.FALSE) return error.TimerUnsupported;
+ self.frequency = @intCast(u64, freq);
+ self.resolution = @divFloor(ns_per_s, self.frequency);
+
+ var start_time: i64 = undefined;
+ err = windows.QueryPerformanceCounter(&start_time);
+ debug.assert(err != windows.FALSE);
+ self.start_time = @intCast(u64, start_time);
+ },
+ Os.linux => {
+ //On Linux, seccomp can do arbitrary things to our ability to call
+ // syscalls, including return any errno value it wants and
+ // inconsistently throwing errors. Since we can't account for
+ // abuses of seccomp in a reasonable way, we'll assume that if
+ // seccomp is going to block us it will at least do so consistently
+ var ts: posix.timespec = undefined;
+ var result = posix.clock_getres(monotonic_clock_id, &ts);
+ var errno = posix.getErrno(result);
+ switch (errno) {
+ 0 => {},
+ posix.EINVAL => return error.TimerUnsupported,
+ else => return std.os.unexpectedErrorPosix(errno),
+ }
+ self.resolution = @intCast(u64, ts.tv_sec) * u64(ns_per_s) + @intCast(u64, ts.tv_nsec);
+
+ result = posix.clock_gettime(monotonic_clock_id, &ts);
+ errno = posix.getErrno(result);
+ if (errno != 0) return std.os.unexpectedErrorPosix(errno);
+ self.start_time = @intCast(u64, ts.tv_sec) * u64(ns_per_s) + @intCast(u64, ts.tv_nsec);
+ },
+ Os.macosx, Os.ios => {
+ darwin.mach_timebase_info(&self.frequency);
+ self.resolution = @divFloor(self.frequency.numer, self.frequency.denom);
+ self.start_time = darwin.mach_absolute_time();
+ },
+ else => @compileError("Unsupported OS"),
+ }
+ return self;
+ }
+
+ /// Reads the timer value since start or the last reset in nanoseconds
+ pub fn read(self: *Timer) u64 {
+ var clock = clockNative() - self.start_time;
+ return switch (builtin.os) {
+ Os.windows => @divFloor(clock * ns_per_s, self.frequency),
+ Os.linux => clock,
+ Os.macosx, Os.ios => @divFloor(clock * self.frequency.numer, self.frequency.denom),
+ else => @compileError("Unsupported OS"),
+ };
+ }
+
+ /// Resets the timer value to 0/now.
+ pub fn reset(self: *Timer) void {
+ self.start_time = clockNative();
+ }
+
+ /// Returns the current value of the timer in nanoseconds, then resets it
+ pub fn lap(self: *Timer) u64 {
+ var now = clockNative();
+ var lap_time = self.read();
+ self.start_time = now;
+ return lap_time;
+ }
+
+ const clockNative = switch (builtin.os) {
+ Os.windows => clockWindows,
+ Os.linux => clockLinux,
+ Os.macosx, Os.ios => clockDarwin,
+ else => @compileError("Unsupported OS"),
+ };
+
+ fn clockWindows() u64 {
+ var result: i64 = undefined;
+ var err = windows.QueryPerformanceCounter(&result);
+ debug.assert(err != windows.FALSE);
+ return @intCast(u64, result);
+ }
+
+ fn clockDarwin() u64 {
+ return darwin.mach_absolute_time();
+ }
+
+ fn clockLinux() u64 {
+ var ts: posix.timespec = undefined;
+ var result = posix.clock_gettime(monotonic_clock_id, &ts);
+ debug.assert(posix.getErrno(result) == 0);
+ return @intCast(u64, ts.tv_sec) * u64(ns_per_s) + @intCast(u64, ts.tv_nsec);
+ }
+};
+
+test "os.time.sleep" {
+ sleep(0, 1);
+}
+
+test "os.time.timestamp" {
+ const ns_per_ms = (ns_per_s / ms_per_s);
+ const margin = 50;
+
+ const time_0 = milliTimestamp();
+ sleep(0, ns_per_ms);
+ const time_1 = milliTimestamp();
+ const interval = time_1 - time_0;
+ debug.assert(interval > 0 and interval < margin);
+}
+
+test "os.time.Timer" {
+ const ns_per_ms = (ns_per_s / ms_per_s);
+ const margin = ns_per_ms * 150;
+
+ var timer = try Timer.start();
+ sleep(0, 10 * ns_per_ms);
+ const time_0 = timer.read();
+ debug.assert(time_0 > 0 and time_0 < margin);
+
+ const time_1 = timer.lap();
+ debug.assert(time_1 >= time_0);
+
+ timer.reset();
+ debug.assert(timer.read() < time_1);
+}
diff --git a/std/os/windows/advapi32.zig b/std/os/windows/advapi32.zig
new file mode 100644
index 0000000000..2f3195475c
--- /dev/null
+++ b/std/os/windows/advapi32.zig
@@ -0,0 +1,35 @@
+use @import("index.zig");
+
+pub const PROV_RSA_FULL = 1;
+
+pub const REGSAM = ACCESS_MASK;
+pub const ACCESS_MASK = DWORD;
+pub const PHKEY = &HKEY;
+pub const HKEY = &HKEY__;
+pub const HKEY__ = extern struct {
+ unused: c_int,
+};
+pub const LSTATUS = LONG;
+
+pub extern "advapi32" stdcallcc fn CryptAcquireContextA(
+ phProv: *HCRYPTPROV,
+ pszContainer: ?LPCSTR,
+ pszProvider: ?LPCSTR,
+ dwProvType: DWORD,
+ dwFlags: DWORD,
+) BOOL;
+
+pub extern "advapi32" stdcallcc fn CryptReleaseContext(hProv: HCRYPTPROV, dwFlags: DWORD) BOOL;
+
+pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: [*]BYTE) BOOL;
+
+pub extern "advapi32" stdcallcc fn RegOpenKeyExW(hKey: HKEY, lpSubKey: LPCWSTR, ulOptions: DWORD, samDesired: REGSAM,
+ phkResult: &HKEY,) LSTATUS;
+
+pub extern "advapi32" stdcallcc fn RegQueryValueExW(hKey: HKEY, lpValueName: LPCWSTR, lpReserved: LPDWORD,
+ lpType: LPDWORD, lpData: LPBYTE, lpcbData: LPDWORD,) LSTATUS;
+
+// RtlGenRandom is known as SystemFunction036 under advapi32
+// http://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx */
+pub extern "advapi32" stdcallcc fn SystemFunction036(output: [*]u8, length: usize) BOOL;
+pub const RtlGenRandom = SystemFunction036;
diff --git a/std/os/windows/error.zig b/std/os/windows/error.zig
index 6a4087ab97..f90945d00e 100644
--- a/std/os/windows/error.zig
+++ b/std/os/windows/error.zig
@@ -1,2379 +1,3567 @@
/// The operation completed successfully.
pub const SUCCESS = 0;
+
/// Incorrect function.
pub const INVALID_FUNCTION = 1;
+
/// The system cannot find the file specified.
pub const FILE_NOT_FOUND = 2;
+
/// The system cannot find the path specified.
pub const PATH_NOT_FOUND = 3;
+
/// The system cannot open the file.
pub const TOO_MANY_OPEN_FILES = 4;
+
/// Access is denied.
pub const ACCESS_DENIED = 5;
+
/// The handle is invalid.
pub const INVALID_HANDLE = 6;
+
/// The storage control blocks were destroyed.
pub const ARENA_TRASHED = 7;
+
/// Not enough storage is available to process this command.
pub const NOT_ENOUGH_MEMORY = 8;
+
/// The storage control block address is invalid.
pub const INVALID_BLOCK = 9;
+
/// The environment is incorrect.
pub const BAD_ENVIRONMENT = 10;
+
/// An attempt was made to load a program with an incorrect format.
pub const BAD_FORMAT = 11;
+
/// The access code is invalid.
pub const INVALID_ACCESS = 12;
+
/// The data is invalid.
pub const INVALID_DATA = 13;
+
/// Not enough storage is available to complete this operation.
pub const OUTOFMEMORY = 14;
+
/// The system cannot find the drive specified.
pub const INVALID_DRIVE = 15;
+
/// The directory cannot be removed.
pub const CURRENT_DIRECTORY = 16;
+
/// The system cannot move the file to a different disk drive.
pub const NOT_SAME_DEVICE = 17;
+
/// There are no more files.
pub const NO_MORE_FILES = 18;
+
/// The media is write protected.
pub const WRITE_PROTECT = 19;
+
/// The system cannot find the device specified.
pub const BAD_UNIT = 20;
+
/// The device is not ready.
pub const NOT_READY = 21;
+
/// The device does not recognize the command.
pub const BAD_COMMAND = 22;
+
/// Data error (cyclic redundancy check).
pub const CRC = 23;
+
/// The program issued a command but the command length is incorrect.
pub const BAD_LENGTH = 24;
+
/// The drive cannot locate a specific area or track on the disk.
pub const SEEK = 25;
+
/// The specified disk or diskette cannot be accessed.
pub const NOT_DOS_DISK = 26;
+
/// The drive cannot find the sector requested.
pub const SECTOR_NOT_FOUND = 27;
+
/// The printer is out of paper.
pub const OUT_OF_PAPER = 28;
+
/// The system cannot write to the specified device.
pub const WRITE_FAULT = 29;
+
/// The system cannot read from the specified device.
pub const READ_FAULT = 30;
+
/// A device attached to the system is not functioning.
pub const GEN_FAILURE = 31;
+
/// The process cannot access the file because it is being used by another process.
pub const SHARING_VIOLATION = 32;
+
/// The process cannot access the file because another process has locked a portion of the file.
pub const LOCK_VIOLATION = 33;
+
/// The wrong diskette is in the drive. Insert %2 (Volume Serial Number: %3) into drive %1.
pub const WRONG_DISK = 34;
+
/// Too many files opened for sharing.
pub const SHARING_BUFFER_EXCEEDED = 36;
+
/// Reached the end of the file.
pub const HANDLE_EOF = 38;
+
/// The disk is full.
pub const HANDLE_DISK_FULL = 39;
+
/// The request is not supported.
pub const NOT_SUPPORTED = 50;
+
/// Windows cannot find the network path. Verify that the network path is correct and the destination computer is not busy or turned off. If Windows still cannot find the network path, contact your network administrator.
pub const REM_NOT_LIST = 51;
+
/// You were not connected because a duplicate name exists on the network. If joining a domain, go to System in Control Panel to change the computer name and try again. If joining a workgroup, choose another workgroup name.
pub const DUP_NAME = 52;
+
/// The network path was not found.
pub const BAD_NETPATH = 53;
+
/// The network is busy.
pub const NETWORK_BUSY = 54;
+
/// The specified network resource or device is no longer available.
pub const DEV_NOT_EXIST = 55;
+
/// The network BIOS command limit has been reached.
pub const TOO_MANY_CMDS = 56;
+
/// A network adapter hardware error occurred.
pub const ADAP_HDW_ERR = 57;
+
/// The specified server cannot perform the requested operation.
pub const BAD_NET_RESP = 58;
+
/// An unexpected network error occurred.
pub const UNEXP_NET_ERR = 59;
+
/// The remote adapter is not compatible.
pub const BAD_REM_ADAP = 60;
+
/// The printer queue is full.
pub const PRINTQ_FULL = 61;
+
/// Space to store the file waiting to be printed is not available on the server.
pub const NO_SPOOL_SPACE = 62;
+
/// Your file waiting to be printed was deleted.
pub const PRINT_CANCELLED = 63;
+
/// The specified network name is no longer available.
pub const NETNAME_DELETED = 64;
+
/// Network access is denied.
pub const NETWORK_ACCESS_DENIED = 65;
+
/// The network resource type is not correct.
pub const BAD_DEV_TYPE = 66;
+
/// The network name cannot be found.
pub const BAD_NET_NAME = 67;
+
/// The name limit for the local computer network adapter card was exceeded.
pub const TOO_MANY_NAMES = 68;
+
/// The network BIOS session limit was exceeded.
pub const TOO_MANY_SESS = 69;
+
/// The remote server has been paused or is in the process of being started.
pub const SHARING_PAUSED = 70;
+
/// No more connections can be made to this remote computer at this time because there are already as many connections as the computer can accept.
pub const REQ_NOT_ACCEP = 71;
+
/// The specified printer or disk device has been paused.
pub const REDIR_PAUSED = 72;
+
/// The file exists.
pub const FILE_EXISTS = 80;
+
/// The directory or file cannot be created.
pub const CANNOT_MAKE = 82;
+
/// Fail on INT 24.
pub const FAIL_I24 = 83;
+
/// Storage to process this request is not available.
pub const OUT_OF_STRUCTURES = 84;
+
/// The local device name is already in use.
pub const ALREADY_ASSIGNED = 85;
+
/// The specified network password is not correct.
pub const INVALID_PASSWORD = 86;
+
/// The parameter is incorrect.
pub const INVALID_PARAMETER = 87;
+
/// A write fault occurred on the network.
pub const NET_WRITE_FAULT = 88;
+
/// The system cannot start another process at this time.
pub const NO_PROC_SLOTS = 89;
+
/// Cannot create another system semaphore.
pub const TOO_MANY_SEMAPHORES = 100;
+
/// The exclusive semaphore is owned by another process.
pub const EXCL_SEM_ALREADY_OWNED = 101;
+
/// The semaphore is set and cannot be closed.
pub const SEM_IS_SET = 102;
+
/// The semaphore cannot be set again.
pub const TOO_MANY_SEM_REQUESTS = 103;
+
/// Cannot request exclusive semaphores at interrupt time.
pub const INVALID_AT_INTERRUPT_TIME = 104;
+
/// The previous ownership of this semaphore has ended.
pub const SEM_OWNER_DIED = 105;
+
/// Insert the diskette for drive %1.
pub const SEM_USER_LIMIT = 106;
+
/// The program stopped because an alternate diskette was not inserted.
pub const DISK_CHANGE = 107;
+
/// The disk is in use or locked by another process.
pub const DRIVE_LOCKED = 108;
+
/// The pipe has been ended.
pub const BROKEN_PIPE = 109;
+
/// The system cannot open the device or file specified.
pub const OPEN_FAILED = 110;
+
/// The file name is too long.
pub const BUFFER_OVERFLOW = 111;
+
/// There is not enough space on the disk.
pub const DISK_FULL = 112;
+
/// No more internal file identifiers available.
pub const NO_MORE_SEARCH_HANDLES = 113;
+
/// The target internal file identifier is incorrect.
pub const INVALID_TARGET_HANDLE = 114;
+
/// The IOCTL call made by the application program is not correct.
pub const INVALID_CATEGORY = 117;
+
/// The verify-on-write switch parameter value is not correct.
pub const INVALID_VERIFY_SWITCH = 118;
+
/// The system does not support the command requested.
pub const BAD_DRIVER_LEVEL = 119;
+
/// This function is not supported on this system.
pub const CALL_NOT_IMPLEMENTED = 120;
+
/// The semaphore timeout period has expired.
pub const SEM_TIMEOUT = 121;
+
/// The data area passed to a system call is too small.
pub const INSUFFICIENT_BUFFER = 122;
+
/// The filename, directory name, or volume label syntax is incorrect.
pub const INVALID_NAME = 123;
+
/// The system call level is not correct.
pub const INVALID_LEVEL = 124;
+
/// The disk has no volume label.
pub const NO_VOLUME_LABEL = 125;
+
/// The specified module could not be found.
pub const MOD_NOT_FOUND = 126;
+
/// The specified procedure could not be found.
pub const PROC_NOT_FOUND = 127;
+
/// There are no child processes to wait for.
pub const WAIT_NO_CHILDREN = 128;
+
/// The %1 application cannot be run in Win32 mode.
pub const CHILD_NOT_COMPLETE = 129;
+
/// Attempt to use a file handle to an open disk partition for an operation other than raw disk I/O.
pub const DIRECT_ACCESS_HANDLE = 130;
+
/// An attempt was made to move the file pointer before the beginning of the file.
pub const NEGATIVE_SEEK = 131;
+
/// The file pointer cannot be set on the specified device or file.
pub const SEEK_ON_DEVICE = 132;
+
/// A JOIN or SUBST command cannot be used for a drive that contains previously joined drives.
pub const IS_JOIN_TARGET = 133;
+
/// An attempt was made to use a JOIN or SUBST command on a drive that has already been joined.
pub const IS_JOINED = 134;
+
/// An attempt was made to use a JOIN or SUBST command on a drive that has already been substituted.
pub const IS_SUBSTED = 135;
+
/// The system tried to delete the JOIN of a drive that is not joined.
pub const NOT_JOINED = 136;
+
/// The system tried to delete the substitution of a drive that is not substituted.
pub const NOT_SUBSTED = 137;
+
/// The system tried to join a drive to a directory on a joined drive.
pub const JOIN_TO_JOIN = 138;
+
/// The system tried to substitute a drive to a directory on a substituted drive.
pub const SUBST_TO_SUBST = 139;
+
/// The system tried to join a drive to a directory on a substituted drive.
pub const JOIN_TO_SUBST = 140;
+
/// The system tried to SUBST a drive to a directory on a joined drive.
pub const SUBST_TO_JOIN = 141;
+
/// The system cannot perform a JOIN or SUBST at this time.
pub const BUSY_DRIVE = 142;
+
/// The system cannot join or substitute a drive to or for a directory on the same drive.
pub const SAME_DRIVE = 143;
+
/// The directory is not a subdirectory of the root directory.
pub const DIR_NOT_ROOT = 144;
+
/// The directory is not empty.
pub const DIR_NOT_EMPTY = 145;
+
/// The path specified is being used in a substitute.
pub const IS_SUBST_PATH = 146;
+
/// Not enough resources are available to process this command.
pub const IS_JOIN_PATH = 147;
+
/// The path specified cannot be used at this time.
pub const PATH_BUSY = 148;
+
/// An attempt was made to join or substitute a drive for which a directory on the drive is the target of a previous substitute.
pub const IS_SUBST_TARGET = 149;
+
/// System trace information was not specified in your CONFIG.SYS file, or tracing is disallowed.
pub const SYSTEM_TRACE = 150;
+
/// The number of specified semaphore events for DosMuxSemWait is not correct.
pub const INVALID_EVENT_COUNT = 151;
+
/// DosMuxSemWait did not execute; too many semaphores are already set.
pub const TOO_MANY_MUXWAITERS = 152;
+
/// The DosMuxSemWait list is not correct.
pub const INVALID_LIST_FORMAT = 153;
+
/// The volume label you entered exceeds the label character limit of the target file system.
pub const LABEL_TOO_LONG = 154;
+
/// Cannot create another thread.
pub const TOO_MANY_TCBS = 155;
+
/// The recipient process has refused the signal.
pub const SIGNAL_REFUSED = 156;
+
/// The segment is already discarded and cannot be locked.
pub const DISCARDED = 157;
+
/// The segment is already unlocked.
pub const NOT_LOCKED = 158;
+
/// The address for the thread ID is not correct.
pub const BAD_THREADID_ADDR = 159;
+
/// One or more arguments are not correct.
pub const BAD_ARGUMENTS = 160;
+
/// The specified path is invalid.
pub const BAD_PATHNAME = 161;
+
/// A signal is already pending.
pub const SIGNAL_PENDING = 162;
+
/// No more threads can be created in the system.
pub const MAX_THRDS_REACHED = 164;
+
/// Unable to lock a region of a file.
pub const LOCK_FAILED = 167;
+
/// The requested resource is in use.
pub const BUSY = 170;
+
/// Device's command support detection is in progress.
pub const DEVICE_SUPPORT_IN_PROGRESS = 171;
+
/// A lock request was not outstanding for the supplied cancel region.
pub const CANCEL_VIOLATION = 173;
+
/// The file system does not support atomic changes to the lock type.
pub const ATOMIC_LOCKS_NOT_SUPPORTED = 174;
+
/// The system detected a segment number that was not correct.
pub const INVALID_SEGMENT_NUMBER = 180;
+
/// The operating system cannot run %1.
pub const INVALID_ORDINAL = 182;
+
/// Cannot create a file when that file already exists.
pub const ALREADY_EXISTS = 183;
+
/// The flag passed is not correct.
pub const INVALID_FLAG_NUMBER = 186;
+
/// The specified system semaphore name was not found.
pub const SEM_NOT_FOUND = 187;
+
/// The operating system cannot run %1.
pub const INVALID_STARTING_CODESEG = 188;
+
/// The operating system cannot run %1.
pub const INVALID_STACKSEG = 189;
+
/// The operating system cannot run %1.
pub const INVALID_MODULETYPE = 190;
+
/// Cannot run %1 in Win32 mode.
pub const INVALID_EXE_SIGNATURE = 191;
+
/// The operating system cannot run %1.
pub const EXE_MARKED_INVALID = 192;
+
/// %1 is not a valid Win32 application.
pub const BAD_EXE_FORMAT = 193;
+
/// The operating system cannot run %1.
pub const ITERATED_DATA_EXCEEDS_64k = 194;
+
/// The operating system cannot run %1.
pub const INVALID_MINALLOCSIZE = 195;
+
/// The operating system cannot run this application program.
pub const DYNLINK_FROM_INVALID_RING = 196;
+
/// The operating system is not presently configured to run this application.
pub const IOPL_NOT_ENABLED = 197;
+
/// The operating system cannot run %1.
pub const INVALID_SEGDPL = 198;
+
/// The operating system cannot run this application program.
pub const AUTODATASEG_EXCEEDS_64k = 199;
+
/// The code segment cannot be greater than or equal to 64K.
pub const RING2SEG_MUST_BE_MOVABLE = 200;
+
/// The operating system cannot run %1.
pub const RELOC_CHAIN_XEEDS_SEGLIM = 201;
+
/// The operating system cannot run %1.
pub const INFLOOP_IN_RELOC_CHAIN = 202;
+
/// The system could not find the environment option that was entered.
pub const ENVVAR_NOT_FOUND = 203;
+
/// No process in the command subtree has a signal handler.
pub const NO_SIGNAL_SENT = 205;
+
/// The filename or extension is too long.
pub const FILENAME_EXCED_RANGE = 206;
+
/// The ring 2 stack is in use.
pub const RING2_STACK_IN_USE = 207;
+
/// The global filename characters, * or ?, are entered incorrectly or too many global filename characters are specified.
pub const META_EXPANSION_TOO_LONG = 208;
+
/// The signal being posted is not correct.
pub const INVALID_SIGNAL_NUMBER = 209;
+
/// The signal handler cannot be set.
pub const THREAD_1_INACTIVE = 210;
+
/// The segment is locked and cannot be reallocated.
pub const LOCKED = 212;
+
/// Too many dynamic-link modules are attached to this program or dynamic-link module.
pub const TOO_MANY_MODULES = 214;
+
/// Cannot nest calls to LoadModule.
pub const NESTING_NOT_ALLOWED = 215;
+
/// This version of %1 is not compatible with the version of Windows you're running. Check your computer's system information and then contact the software publisher.
pub const EXE_MACHINE_TYPE_MISMATCH = 216;
+
/// The image file %1 is signed, unable to modify.
pub const EXE_CANNOT_MODIFY_SIGNED_BINARY = 217;
+
/// The image file %1 is strong signed, unable to modify.
pub const EXE_CANNOT_MODIFY_STRONG_SIGNED_BINARY = 218;
+
/// This file is checked out or locked for editing by another user.
pub const FILE_CHECKED_OUT = 220;
+
/// The file must be checked out before saving changes.
pub const CHECKOUT_REQUIRED = 221;
+
/// The file type being saved or retrieved has been blocked.
pub const BAD_FILE_TYPE = 222;
+
/// The file size exceeds the limit allowed and cannot be saved.
pub const FILE_TOO_LARGE = 223;
+
/// Access Denied. Before opening files in this location, you must first add the web site to your trusted sites list, browse to the web site, and select the option to login automatically.
pub const FORMS_AUTH_REQUIRED = 224;
+
/// Operation did not complete successfully because the file contains a virus or potentially unwanted software.
pub const VIRUS_INFECTED = 225;
+
/// This file contains a virus or potentially unwanted software and cannot be opened. Due to the nature of this virus or potentially unwanted software, the file has been removed from this location.
pub const VIRUS_DELETED = 226;
+
/// The pipe is local.
pub const PIPE_LOCAL = 229;
+
/// The pipe state is invalid.
pub const BAD_PIPE = 230;
+
/// All pipe instances are busy.
pub const PIPE_BUSY = 231;
+
/// The pipe is being closed.
pub const NO_DATA = 232;
+
/// No process is on the other end of the pipe.
pub const PIPE_NOT_CONNECTED = 233;
+
/// More data is available.
pub const MORE_DATA = 234;
+
/// The session was canceled.
pub const VC_DISCONNECTED = 240;
+
/// The specified extended attribute name was invalid.
pub const INVALID_EA_NAME = 254;
+
/// The extended attributes are inconsistent.
pub const EA_LIST_INCONSISTENT = 255;
+
/// The wait operation timed out.
pub const IMEOUT = 258;
+
/// No more data is available.
pub const NO_MORE_ITEMS = 259;
+
/// The copy functions cannot be used.
pub const CANNOT_COPY = 266;
+
/// The directory name is invalid.
pub const DIRECTORY = 267;
+
/// The extended attributes did not fit in the buffer.
pub const EAS_DIDNT_FIT = 275;
+
/// The extended attribute file on the mounted file system is corrupt.
pub const EA_FILE_CORRUPT = 276;
+
/// The extended attribute table file is full.
pub const EA_TABLE_FULL = 277;
+
/// The specified extended attribute handle is invalid.
pub const INVALID_EA_HANDLE = 278;
+
/// The mounted file system does not support extended attributes.
pub const EAS_NOT_SUPPORTED = 282;
+
/// Attempt to release mutex not owned by caller.
pub const NOT_OWNER = 288;
+
/// Too many posts were made to a semaphore.
pub const TOO_MANY_POSTS = 298;
+
/// Only part of a ReadProcessMemory or WriteProcessMemory request was completed.
pub const PARTIAL_COPY = 299;
+
/// The oplock request is denied.
pub const OPLOCK_NOT_GRANTED = 300;
+
/// An invalid oplock acknowledgment was received by the system.
pub const INVALID_OPLOCK_PROTOCOL = 301;
+
/// The volume is too fragmented to complete this operation.
pub const DISK_TOO_FRAGMENTED = 302;
+
/// The file cannot be opened because it is in the process of being deleted.
pub const DELETE_PENDING = 303;
+
/// Short name settings may not be changed on this volume due to the global registry setting.
pub const INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING = 304;
+
/// Short names are not enabled on this volume.
pub const SHORT_NAMES_NOT_ENABLED_ON_VOLUME = 305;
+
/// The security stream for the given volume is in an inconsistent state. Please run CHKDSK on the volume.
pub const SECURITY_STREAM_IS_INCONSISTENT = 306;
+
/// A requested file lock operation cannot be processed due to an invalid byte range.
pub const INVALID_LOCK_RANGE = 307;
+
/// The subsystem needed to support the image type is not present.
pub const IMAGE_SUBSYSTEM_NOT_PRESENT = 308;
+
/// The specified file already has a notification GUID associated with it.
pub const NOTIFICATION_GUID_ALREADY_DEFINED = 309;
+
/// An invalid exception handler routine has been detected.
pub const INVALID_EXCEPTION_HANDLER = 310;
+
/// Duplicate privileges were specified for the token.
pub const DUPLICATE_PRIVILEGES = 311;
+
/// No ranges for the specified operation were able to be processed.
pub const NO_RANGES_PROCESSED = 312;
+
/// Operation is not allowed on a file system internal file.
pub const NOT_ALLOWED_ON_SYSTEM_FILE = 313;
+
/// The physical resources of this disk have been exhausted.
pub const DISK_RESOURCES_EXHAUSTED = 314;
+
/// The token representing the data is invalid.
pub const INVALID_TOKEN = 315;
+
/// The device does not support the command feature.
pub const DEVICE_FEATURE_NOT_SUPPORTED = 316;
+
/// The system cannot find message text for message number 0x%1 in the message file for %2.
pub const MR_MID_NOT_FOUND = 317;
+
/// The scope specified was not found.
pub const SCOPE_NOT_FOUND = 318;
+
/// The Central Access Policy specified is not defined on the target machine.
pub const UNDEFINED_SCOPE = 319;
+
/// The Central Access Policy obtained from Active Directory is invalid.
pub const INVALID_CAP = 320;
+
/// The device is unreachable.
pub const DEVICE_UNREACHABLE = 321;
+
/// The target device has insufficient resources to complete the operation.
pub const DEVICE_NO_RESOURCES = 322;
+
/// A data integrity checksum error occurred. Data in the file stream is corrupt.
pub const DATA_CHECKSUM_ERROR = 323;
+
/// An attempt was made to modify both a KERNEL and normal Extended Attribute (EA) in the same operation.
pub const INTERMIXED_KERNEL_EA_OPERATION = 324;
+
/// Device does not support file-level TRIM.
pub const FILE_LEVEL_TRIM_NOT_SUPPORTED = 326;
+
/// The command specified a data offset that does not align to the device's granularity/alignment.
pub const OFFSET_ALIGNMENT_VIOLATION = 327;
+
/// The command specified an invalid field in its parameter list.
pub const INVALID_FIELD_IN_PARAMETER_LIST = 328;
+
/// An operation is currently in progress with the device.
pub const OPERATION_IN_PROGRESS = 329;
+
/// An attempt was made to send down the command via an invalid path to the target device.
pub const BAD_DEVICE_PATH = 330;
+
/// The command specified a number of descriptors that exceeded the maximum supported by the device.
pub const TOO_MANY_DESCRIPTORS = 331;
+
/// Scrub is disabled on the specified file.
pub const SCRUB_DATA_DISABLED = 332;
+
/// The storage device does not provide redundancy.
pub const NOT_REDUNDANT_STORAGE = 333;
+
/// An operation is not supported on a resident file.
pub const RESIDENT_FILE_NOT_SUPPORTED = 334;
+
/// An operation is not supported on a compressed file.
pub const COMPRESSED_FILE_NOT_SUPPORTED = 335;
+
/// An operation is not supported on a directory.
pub const DIRECTORY_NOT_SUPPORTED = 336;
+
/// The specified copy of the requested data could not be read.
pub const NOT_READ_FROM_COPY = 337;
+
/// No action was taken as a system reboot is required.
pub const FAIL_NOACTION_REBOOT = 350;
+
/// The shutdown operation failed.
pub const FAIL_SHUTDOWN = 351;
+
/// The restart operation failed.
pub const FAIL_RESTART = 352;
+
/// The maximum number of sessions has been reached.
pub const MAX_SESSIONS_REACHED = 353;
+
/// The thread is already in background processing mode.
pub const THREAD_MODE_ALREADY_BACKGROUND = 400;
+
/// The thread is not in background processing mode.
pub const THREAD_MODE_NOT_BACKGROUND = 401;
+
/// The process is already in background processing mode.
pub const PROCESS_MODE_ALREADY_BACKGROUND = 402;
+
/// The process is not in background processing mode.
pub const PROCESS_MODE_NOT_BACKGROUND = 403;
+
/// Attempt to access invalid address.
pub const INVALID_ADDRESS = 487;
+
/// User profile cannot be loaded.
pub const USER_PROFILE_LOAD = 500;
+
/// Arithmetic result exceeded 32 bits.
pub const ARITHMETIC_OVERFLOW = 534;
+
/// There is a process on other end of the pipe.
pub const PIPE_CONNECTED = 535;
+
/// Waiting for a process to open the other end of the pipe.
pub const PIPE_LISTENING = 536;
+
/// Application verifier has found an error in the current process.
pub const VERIFIER_STOP = 537;
+
/// An error occurred in the ABIOS subsystem.
pub const ABIOS_ERROR = 538;
+
/// A warning occurred in the WX86 subsystem.
pub const WX86_WARNING = 539;
+
/// An error occurred in the WX86 subsystem.
pub const WX86_ERROR = 540;
+
/// An attempt was made to cancel or set a timer that has an associated APC and the subject thread is not the thread that originally set the timer with an associated APC routine.
pub const TIMER_NOT_CANCELED = 541;
+
/// Unwind exception code.
pub const UNWIND = 542;
+
/// An invalid or unaligned stack was encountered during an unwind operation.
pub const BAD_STACK = 543;
+
/// An invalid unwind target was encountered during an unwind operation.
pub const INVALID_UNWIND_TARGET = 544;
+
/// Invalid Object Attributes specified to NtCreatePort or invalid Port Attributes specified to NtConnectPort
pub const INVALID_PORT_ATTRIBUTES = 545;
+
/// Length of message passed to NtRequestPort or NtRequestWaitReplyPort was longer than the maximum message allowed by the port.
pub const PORT_MESSAGE_TOO_LONG = 546;
+
/// An attempt was made to lower a quota limit below the current usage.
pub const INVALID_QUOTA_LOWER = 547;
+
/// An attempt was made to attach to a device that was already attached to another device.
pub const DEVICE_ALREADY_ATTACHED = 548;
+
/// An attempt was made to execute an instruction at an unaligned address and the host system does not support unaligned instruction references.
pub const INSTRUCTION_MISALIGNMENT = 549;
+
/// Profiling not started.
pub const PROFILING_NOT_STARTED = 550;
+
/// Profiling not stopped.
pub const PROFILING_NOT_STOPPED = 551;
+
/// The passed ACL did not contain the minimum required information.
pub const COULD_NOT_INTERPRET = 552;
+
/// The number of active profiling objects is at the maximum and no more may be started.
pub const PROFILING_AT_LIMIT = 553;
+
/// Used to indicate that an operation cannot continue without blocking for I/O.
pub const CANT_WAIT = 554;
+
/// Indicates that a thread attempted to terminate itself by default (called NtTerminateThread with NULL) and it was the last thread in the current process.
pub const CANT_TERMINATE_SELF = 555;
+
/// If an MM error is returned which is not defined in the standard FsRtl filter, it is converted to one of the following errors which is guaranteed to be in the filter. In this case information is lost, however, the filter correctly handles the exception.
pub const UNEXPECTED_MM_CREATE_ERR = 556;
+
/// If an MM error is returned which is not defined in the standard FsRtl filter, it is converted to one of the following errors which is guaranteed to be in the filter. In this case information is lost, however, the filter correctly handles the exception.
pub const UNEXPECTED_MM_MAP_ERROR = 557;
+
/// If an MM error is returned which is not defined in the standard FsRtl filter, it is converted to one of the following errors which is guaranteed to be in the filter. In this case information is lost, however, the filter correctly handles the exception.
pub const UNEXPECTED_MM_EXTEND_ERR = 558;
+
/// A malformed function table was encountered during an unwind operation.
pub const BAD_FUNCTION_TABLE = 559;
+
/// Indicates that an attempt was made to assign protection to a file system file or directory and one of the SIDs in the security descriptor could not be translated into a GUID that could be stored by the file system. This causes the protection attempt to fail, which may cause a file creation attempt to fail.
pub const NO_GUID_TRANSLATION = 560;
+
/// Indicates that an attempt was made to grow an LDT by setting its size, or that the size was not an even number of selectors.
pub const INVALID_LDT_SIZE = 561;
+
/// Indicates that the starting value for the LDT information was not an integral multiple of the selector size.
pub const INVALID_LDT_OFFSET = 563;
+
/// Indicates that the user supplied an invalid descriptor when trying to set up Ldt descriptors.
pub const INVALID_LDT_DESCRIPTOR = 564;
+
/// Indicates a process has too many threads to perform the requested action. For example, assignment of a primary token may only be performed when a process has zero or one threads.
pub const TOO_MANY_THREADS = 565;
+
/// An attempt was made to operate on a thread within a specific process, but the thread specified is not in the process specified.
pub const THREAD_NOT_IN_PROCESS = 566;
+
/// Page file quota was exceeded.
pub const PAGEFILE_QUOTA_EXCEEDED = 567;
+
/// The Netlogon service cannot start because another Netlogon service running in the domain conflicts with the specified role.
pub const LOGON_SERVER_CONFLICT = 568;
+
/// The SAM database on a Windows Server is significantly out of synchronization with the copy on the Domain Controller. A complete synchronization is required.
pub const SYNCHRONIZATION_REQUIRED = 569;
+
/// The NtCreateFile API failed. This error should never be returned to an application, it is a place holder for the Windows Lan Manager Redirector to use in its internal error mapping routines.
pub const NET_OPEN_FAILED = 570;
+
/// {Privilege Failed} The I/O permissions for the process could not be changed.
pub const IO_PRIVILEGE_FAILED = 571;
+
/// {Application Exit by CTRL+C} The application terminated as a result of a CTRL+C.
pub const CONTROL_C_EXIT = 572;
+
/// {Missing System File} The required system file %hs is bad or missing.
pub const MISSING_SYSTEMFILE = 573;
+
/// {Application Error} The exception %s (0x%08lx) occurred in the application at location 0x%08lx.
pub const UNHANDLED_EXCEPTION = 574;
+
/// {Application Error} The application was unable to start correctly (0x%lx). Click OK to close the application.
pub const APP_INIT_FAILURE = 575;
+
/// {Unable to Create Paging File} The creation of the paging file %hs failed (%lx). The requested size was %ld.
pub const PAGEFILE_CREATE_FAILED = 576;
+
/// Windows cannot verify the digital signature for this file. A recent hardware or software change might have installed a file that is signed incorrectly or damaged, or that might be malicious software from an unknown source.
pub const INVALID_IMAGE_HASH = 577;
+
/// {No Paging File Specified} No paging file was specified in the system configuration.
pub const NO_PAGEFILE = 578;
+
/// {EXCEPTION} A real-mode application issued a floating-point instruction and floating-point hardware is not present.
pub const ILLEGAL_FLOAT_CONTEXT = 579;
+
/// An event pair synchronization operation was performed using the thread specific client/server event pair object, but no event pair object was associated with the thread.
pub const NO_EVENT_PAIR = 580;
+
/// A Windows Server has an incorrect configuration.
pub const DOMAIN_CTRLR_CONFIG_ERROR = 581;
+
/// An illegal character was encountered. For a multi-byte character set this includes a lead byte without a succeeding trail byte. For the Unicode character set this includes the characters 0xFFFF and 0xFFFE.
pub const ILLEGAL_CHARACTER = 582;
+
/// The Unicode character is not defined in the Unicode character set installed on the system.
pub const UNDEFINED_CHARACTER = 583;
+
/// The paging file cannot be created on a floppy diskette.
pub const FLOPPY_VOLUME = 584;
+
/// The system BIOS failed to connect a system interrupt to the device or bus for which the device is connected.
pub const BIOS_FAILED_TO_CONNECT_INTERRUPT = 585;
+
/// This operation is only allowed for the Primary Domain Controller of the domain.
pub const BACKUP_CONTROLLER = 586;
+
/// An attempt was made to acquire a mutant such that its maximum count would have been exceeded.
pub const MUTANT_LIMIT_EXCEEDED = 587;
+
/// A volume has been accessed for which a file system driver is required that has not yet been loaded.
pub const FS_DRIVER_REQUIRED = 588;
+
/// {Registry File Failure} The registry cannot load the hive (file): %hs or its log or alternate. It is corrupt, absent, or not writable.
pub const CANNOT_LOAD_REGISTRY_FILE = 589;
+
/// {Unexpected Failure in DebugActiveProcess} An unexpected failure occurred while processing a DebugActiveProcess API request. You may choose OK to terminate the process, or Cancel to ignore the error.
pub const DEBUG_ATTACH_FAILED = 590;
+
/// {Fatal System Error} The %hs system process terminated unexpectedly with a status of 0x%08x (0x%08x 0x%08x). The system has been shut down.
pub const SYSTEM_PROCESS_TERMINATED = 591;
+
/// {Data Not Accepted} The TDI client could not handle the data received during an indication.
pub const DATA_NOT_ACCEPTED = 592;
+
/// NTVDM encountered a hard error.
pub const VDM_HARD_ERROR = 593;
+
/// {Cancel Timeout} The driver %hs failed to complete a cancelled I/O request in the allotted time.
pub const DRIVER_CANCEL_TIMEOUT = 594;
+
/// {Reply Message Mismatch} An attempt was made to reply to an LPC message, but the thread specified by the client ID in the message was not waiting on that message.
pub const REPLY_MESSAGE_MISMATCH = 595;
+
/// {Delayed Write Failed} Windows was unable to save all the data for the file %hs. The data has been lost. This error may be caused by a failure of your computer hardware or network connection. Please try to save this file elsewhere.
pub const LOST_WRITEBEHIND_DATA = 596;
+
/// The parameter(s) passed to the server in the client/server shared memory window were invalid. Too much data may have been put in the shared memory window.
pub const CLIENT_SERVER_PARAMETERS_INVALID = 597;
+
/// The stream is not a tiny stream.
pub const NOT_TINY_STREAM = 598;
+
/// The request must be handled by the stack overflow code.
pub const STACK_OVERFLOW_READ = 599;
+
/// Internal OFS status codes indicating how an allocation operation is handled. Either it is retried after the containing onode is moved or the extent stream is converted to a large stream.
pub const CONVERT_TO_LARGE = 600;
+
/// The attempt to find the object found an object matching by ID on the volume but it is out of the scope of the handle used for the operation.
pub const FOUND_OUT_OF_SCOPE = 601;
+
/// The bucket array must be grown. Retry transaction after doing so.
pub const ALLOCATE_BUCKET = 602;
+
/// The user/kernel marshalling buffer has overflowed.
pub const MARSHALL_OVERFLOW = 603;
+
/// The supplied variant structure contains invalid data.
pub const INVALID_VARIANT = 604;
+
/// The specified buffer contains ill-formed data.
pub const BAD_COMPRESSION_BUFFER = 605;
+
/// {Audit Failed} An attempt to generate a security audit failed.
pub const AUDIT_FAILED = 606;
+
/// The timer resolution was not previously set by the current process.
pub const TIMER_RESOLUTION_NOT_SET = 607;
+
/// There is insufficient account information to log you on.
pub const INSUFFICIENT_LOGON_INFO = 608;
+
/// {Invalid DLL Entrypoint} The dynamic link library %hs is not written correctly. The stack pointer has been left in an inconsistent state. The entrypoint should be declared as WINAPI or STDCALL. Select YES to fail the DLL load. Select NO to continue execution. Selecting NO may cause the application to operate incorrectly.
pub const BAD_DLL_ENTRYPOINT = 609;
+
/// {Invalid Service Callback Entrypoint} The %hs service is not written correctly. The stack pointer has been left in an inconsistent state. The callback entrypoint should be declared as WINAPI or STDCALL. Selecting OK will cause the service to continue operation. However, the service process may operate incorrectly.
pub const BAD_SERVICE_ENTRYPOINT = 610;
+
/// There is an IP address conflict with another system on the network.
pub const IP_ADDRESS_CONFLICT1 = 611;
+
/// There is an IP address conflict with another system on the network.
pub const IP_ADDRESS_CONFLICT2 = 612;
+
/// {Low On Registry Space} The system has reached the maximum size allowed for the system part of the registry. Additional storage requests will be ignored.
pub const REGISTRY_QUOTA_LIMIT = 613;
+
/// A callback return system service cannot be executed when no callback is active.
pub const NO_CALLBACK_ACTIVE = 614;
+
/// The password provided is too short to meet the policy of your user account. Please choose a longer password.
pub const PWD_TOO_SHORT = 615;
+
/// The policy of your user account does not allow you to change passwords too frequently. This is done to prevent users from changing back to a familiar, but potentially discovered, password. If you feel your password has been compromised then please contact your administrator immediately to have a new one assigned.
pub const PWD_TOO_RECENT = 616;
+
/// You have attempted to change your password to one that you have used in the past. The policy of your user account does not allow this. Please select a password that you have not previously used.
pub const PWD_HISTORY_CONFLICT = 617;
+
/// The specified compression format is unsupported.
pub const UNSUPPORTED_COMPRESSION = 618;
+
/// The specified hardware profile configuration is invalid.
pub const INVALID_HW_PROFILE = 619;
+
/// The specified Plug and Play registry device path is invalid.
pub const INVALID_PLUGPLAY_DEVICE_PATH = 620;
+
/// The specified quota list is internally inconsistent with its descriptor.
pub const QUOTA_LIST_INCONSISTENT = 621;
+
/// {Windows Evaluation Notification} The evaluation period for this installation of Windows has expired. This system will shutdown in 1 hour. To restore access to this installation of Windows, please upgrade this installation using a licensed distribution of this product.
pub const EVALUATION_EXPIRATION = 622;
+
/// {Illegal System DLL Relocation} The system DLL %hs was relocated in memory. The application will not run properly. The relocation occurred because the DLL %hs occupied an address range reserved for Windows system DLLs. The vendor supplying the DLL should be contacted for a new DLL.
pub const ILLEGAL_DLL_RELOCATION = 623;
+
/// {DLL Initialization Failed} The application failed to initialize because the window station is shutting down.
pub const DLL_INIT_FAILED_LOGOFF = 624;
+
/// The validation process needs to continue on to the next step.
pub const VALIDATE_CONTINUE = 625;
+
/// There are no more matches for the current index enumeration.
pub const NO_MORE_MATCHES = 626;
+
/// The range could not be added to the range list because of a conflict.
pub const RANGE_LIST_CONFLICT = 627;
+
/// The server process is running under a SID different than that required by client.
pub const SERVER_SID_MISMATCH = 628;
+
/// A group marked use for deny only cannot be enabled.
pub const CANT_ENABLE_DENY_ONLY = 629;
+
/// {EXCEPTION} Multiple floating point faults.
pub const FLOAT_MULTIPLE_FAULTS = 630;
+
/// {EXCEPTION} Multiple floating point traps.
pub const FLOAT_MULTIPLE_TRAPS = 631;
+
/// The requested interface is not supported.
pub const NOINTERFACE = 632;
+
/// {System Standby Failed} The driver %hs does not support standby mode. Updating this driver may allow the system to go to standby mode.
pub const DRIVER_FAILED_SLEEP = 633;
+
/// The system file %1 has become corrupt and has been replaced.
pub const CORRUPT_SYSTEM_FILE = 634;
+
/// {Virtual Memory Minimum Too Low} Your system is low on virtual memory. Windows is increasing the size of your virtual memory paging file. During this process, memory requests for some applications may be denied. For more information, see Help.
pub const COMMITMENT_MINIMUM = 635;
+
/// A device was removed so enumeration must be restarted.
pub const PNP_RESTART_ENUMERATION = 636;
+
/// {Fatal System Error} The system image %s is not properly signed. The file has been replaced with the signed file. The system has been shut down.
pub const SYSTEM_IMAGE_BAD_SIGNATURE = 637;
+
/// Device will not start without a reboot.
pub const PNP_REBOOT_REQUIRED = 638;
+
/// There is not enough power to complete the requested operation.
pub const INSUFFICIENT_POWER = 639;
+
/// ERROR_MULTIPLE_FAULT_VIOLATION
pub const MULTIPLE_FAULT_VIOLATION = 640;
+
/// The system is in the process of shutting down.
pub const SYSTEM_SHUTDOWN = 641;
+
/// An attempt to remove a processes DebugPort was made, but a port was not already associated with the process.
pub const PORT_NOT_SET = 642;
+
/// This version of Windows is not compatible with the behavior version of directory forest, domain or domain controller.
pub const DS_VERSION_CHECK_FAILURE = 643;
+
/// The specified range could not be found in the range list.
pub const RANGE_NOT_FOUND = 644;
+
/// The driver was not loaded because the system is booting into safe mode.
pub const NOT_SAFE_MODE_DRIVER = 646;
+
/// The driver was not loaded because it failed its initialization call.
pub const FAILED_DRIVER_ENTRY = 647;
+
/// The "%hs" encountered an error while applying power or reading the device configuration. This may be caused by a failure of your hardware or by a poor connection.
pub const DEVICE_ENUMERATION_ERROR = 648;
+
/// The create operation failed because the name contained at least one mount point which resolves to a volume to which the specified device object is not attached.
pub const MOUNT_POINT_NOT_RESOLVED = 649;
+
/// The device object parameter is either not a valid device object or is not attached to the volume specified by the file name.
pub const INVALID_DEVICE_OBJECT_PARAMETER = 650;
+
/// A Machine Check Error has occurred. Please check the system eventlog for additional information.
pub const MCA_OCCURED = 651;
+
/// There was error [%2] processing the driver database.
pub const DRIVER_DATABASE_ERROR = 652;
+
/// System hive size has exceeded its limit.
pub const SYSTEM_HIVE_TOO_LARGE = 653;
+
/// The driver could not be loaded because a previous version of the driver is still in memory.
pub const DRIVER_FAILED_PRIOR_UNLOAD = 654;
+
/// {Volume Shadow Copy Service} Please wait while the Volume Shadow Copy Service prepares volume %hs for hibernation.
pub const VOLSNAP_PREPARE_HIBERNATE = 655;
+
/// The system has failed to hibernate (The error code is %hs). Hibernation will be disabled until the system is restarted.
pub const HIBERNATION_FAILURE = 656;
+
/// The password provided is too long to meet the policy of your user account. Please choose a shorter password.
pub const PWD_TOO_LONG = 657;
+
/// The requested operation could not be completed due to a file system limitation.
pub const FILE_SYSTEM_LIMITATION = 665;
+
/// An assertion failure has occurred.
pub const ASSERTION_FAILURE = 668;
+
/// An error occurred in the ACPI subsystem.
pub const ACPI_ERROR = 669;
+
/// WOW Assertion Error.
pub const WOW_ASSERTION = 670;
+
/// A device is missing in the system BIOS MPS table. This device will not be used. Please contact your system vendor for system BIOS update.
pub const PNP_BAD_MPS_TABLE = 671;
+
/// A translator failed to translate resources.
pub const PNP_TRANSLATION_FAILED = 672;
+
/// A IRQ translator failed to translate resources.
pub const PNP_IRQ_TRANSLATION_FAILED = 673;
+
/// Driver %2 returned invalid ID for a child device (%3).
pub const PNP_INVALID_ID = 674;
+
/// {Kernel Debugger Awakened} the system debugger was awakened by an interrupt.
pub const WAKE_SYSTEM_DEBUGGER = 675;
+
/// {Handles Closed} Handles to objects have been automatically closed as a result of the requested operation.
pub const HANDLES_CLOSED = 676;
+
/// {Too Much Information} The specified access control list (ACL) contained more information than was expected.
pub const EXTRANEOUS_INFORMATION = 677;
+
/// This warning level status indicates that the transaction state already exists for the registry sub-tree, but that a transaction commit was previously aborted. The commit has NOT been completed, but has not been rolled back either (so it may still be committed if desired).
pub const RXACT_COMMIT_NECESSARY = 678;
+
/// {Media Changed} The media may have changed.
pub const MEDIA_CHECK = 679;
+
/// {GUID Substitution} During the translation of a global identifier (GUID) to a Windows security ID (SID), no administratively-defined GUID prefix was found. A substitute prefix was used, which will not compromise system security. However, this may provide a more restrictive access than intended.
pub const GUID_SUBSTITUTION_MADE = 680;
+
/// The create operation stopped after reaching a symbolic link.
pub const STOPPED_ON_SYMLINK = 681;
+
/// A long jump has been executed.
pub const LONGJUMP = 682;
+
/// The Plug and Play query operation was not successful.
pub const PLUGPLAY_QUERY_VETOED = 683;
+
/// A frame consolidation has been executed.
pub const UNWIND_CONSOLIDATE = 684;
+
/// {Registry Hive Recovered} Registry hive (file): %hs was corrupted and it has been recovered. Some data might have been lost.
pub const REGISTRY_HIVE_RECOVERED = 685;
+
/// The application is attempting to run executable code from the module %hs. This may be insecure. An alternative, %hs, is available. Should the application use the secure module %hs?
pub const DLL_MIGHT_BE_INSECURE = 686;
+
/// The application is loading executable code from the module %hs. This is secure, but may be incompatible with previous releases of the operating system. An alternative, %hs, is available. Should the application use the secure module %hs?
pub const DLL_MIGHT_BE_INCOMPATIBLE = 687;
+
/// Debugger did not handle the exception.
pub const DBG_EXCEPTION_NOT_HANDLED = 688;
+
/// Debugger will reply later.
pub const DBG_REPLY_LATER = 689;
+
/// Debugger cannot provide handle.
pub const DBG_UNABLE_TO_PROVIDE_HANDLE = 690;
+
/// Debugger terminated thread.
pub const DBG_TERMINATE_THREAD = 691;
+
/// Debugger terminated process.
pub const DBG_TERMINATE_PROCESS = 692;
+
/// Debugger got control C.
pub const DBG_CONTROL_C = 693;
+
/// Debugger printed exception on control C.
pub const DBG_PRINTEXCEPTION_C = 694;
+
/// Debugger received RIP exception.
pub const DBG_RIPEXCEPTION = 695;
+
/// Debugger received control break.
pub const DBG_CONTROL_BREAK = 696;
+
/// Debugger command communication exception.
pub const DBG_COMMAND_EXCEPTION = 697;
+
/// {Object Exists} An attempt was made to create an object and the object name already existed.
pub const OBJECT_NAME_EXISTS = 698;
+
/// {Thread Suspended} A thread termination occurred while the thread was suspended. The thread was resumed, and termination proceeded.
pub const THREAD_WAS_SUSPENDED = 699;
+
/// {Image Relocated} An image file could not be mapped at the address specified in the image file. Local fixups must be performed on this image.
pub const IMAGE_NOT_AT_BASE = 700;
+
/// This informational level status indicates that a specified registry sub-tree transaction state did not yet exist and had to be created.
pub const RXACT_STATE_CREATED = 701;
+
/// {Segment Load} A virtual DOS machine (VDM) is loading, unloading, or moving an MS-DOS or Win16 program segment image. An exception is raised so a debugger can load, unload or track symbols and breakpoints within these 16-bit segments.
pub const SEGMENT_NOTIFICATION = 702;
+
/// {Invalid Current Directory} The process cannot switch to the startup current directory %hs. Select OK to set current directory to %hs, or select CANCEL to exit.
pub const BAD_CURRENT_DIRECTORY = 703;
+
/// {Redundant Read} To satisfy a read request, the NT fault-tolerant file system successfully read the requested data from a redundant copy. This was done because the file system encountered a failure on a member of the fault-tolerant volume, but was unable to reassign the failing area of the device.
pub const FT_READ_RECOVERY_FROM_BACKUP = 704;
+
/// {Redundant Write} To satisfy a write request, the NT fault-tolerant file system successfully wrote a redundant copy of the information. This was done because the file system encountered a failure on a member of the fault-tolerant volume, but was not able to reassign the failing area of the device.
pub const FT_WRITE_RECOVERY = 705;
+
/// {Machine Type Mismatch} The image file %hs is valid, but is for a machine type other than the current machine. Select OK to continue, or CANCEL to fail the DLL load.
pub const IMAGE_MACHINE_TYPE_MISMATCH = 706;
+
/// {Partial Data Received} The network transport returned partial data to its client. The remaining data will be sent later.
pub const RECEIVE_PARTIAL = 707;
+
/// {Expedited Data Received} The network transport returned data to its client that was marked as expedited by the remote system.
pub const RECEIVE_EXPEDITED = 708;
+
/// {Partial Expedited Data Received} The network transport returned partial data to its client and this data was marked as expedited by the remote system. The remaining data will be sent later.
pub const RECEIVE_PARTIAL_EXPEDITED = 709;
+
/// {TDI Event Done} The TDI indication has completed successfully.
pub const EVENT_DONE = 710;
+
/// {TDI Event Pending} The TDI indication has entered the pending state.
pub const EVENT_PENDING = 711;
+
/// Checking file system on %wZ.
pub const CHECKING_FILE_SYSTEM = 712;
+
/// {Fatal Application Exit} %hs.
pub const FATAL_APP_EXIT = 713;
+
/// The specified registry key is referenced by a predefined handle.
pub const PREDEFINED_HANDLE = 714;
+
/// {Page Unlocked} The page protection of a locked page was changed to 'No Access' and the page was unlocked from memory and from the process.
pub const WAS_UNLOCKED = 715;
+
/// %hs
pub const SERVICE_NOTIFICATION = 716;
+
/// {Page Locked} One of the pages to lock was already locked.
pub const WAS_LOCKED = 717;
+
/// Application popup: %1 : %2
pub const LOG_HARD_ERROR = 718;
+
/// ERROR_ALREADY_WIN32
pub const ALREADY_WIN32 = 719;
+
/// {Machine Type Mismatch} The image file %hs is valid, but is for a machine type other than the current machine.
pub const IMAGE_MACHINE_TYPE_MISMATCH_EXE = 720;
+
/// A yield execution was performed and no thread was available to run.
pub const NO_YIELD_PERFORMED = 721;
+
/// The resumable flag to a timer API was ignored.
pub const TIMER_RESUME_IGNORED = 722;
+
/// The arbiter has deferred arbitration of these resources to its parent.
pub const ARBITRATION_UNHANDLED = 723;
+
/// The inserted CardBus device cannot be started because of a configuration error on "%hs".
pub const CARDBUS_NOT_SUPPORTED = 724;
+
/// The CPUs in this multiprocessor system are not all the same revision level. To use all processors the operating system restricts itself to the features of the least capable processor in the system. Should problems occur with this system, contact the CPU manufacturer to see if this mix of processors is supported.
pub const MP_PROCESSOR_MISMATCH = 725;
+
/// The system was put into hibernation.
pub const HIBERNATED = 726;
+
/// The system was resumed from hibernation.
pub const RESUME_HIBERNATION = 727;
+
/// Windows has detected that the system firmware (BIOS) was updated [previous firmware date = %2, current firmware date %3].
pub const FIRMWARE_UPDATED = 728;
+
/// A device driver is leaking locked I/O pages causing system degradation. The system has automatically enabled tracking code in order to try and catch the culprit.
pub const DRIVERS_LEAKING_LOCKED_PAGES = 729;
+
/// The system has awoken.
pub const WAKE_SYSTEM = 730;
+
/// ERROR_WAIT_1
pub const WAIT_1 = 731;
+
/// ERROR_WAIT_2
pub const WAIT_2 = 732;
+
/// ERROR_WAIT_3
pub const WAIT_3 = 733;
+
/// ERROR_WAIT_63
pub const WAIT_63 = 734;
+
/// ERROR_ABANDONED_WAIT_0
pub const ABANDONED_WAIT_0 = 735;
+
/// ERROR_ABANDONED_WAIT_63
pub const ABANDONED_WAIT_63 = 736;
+
/// ERROR_USER_APC
pub const USER_APC = 737;
+
/// ERROR_KERNEL_APC
pub const KERNEL_APC = 738;
+
/// ERROR_ALERTED
pub const ALERTED = 739;
+
/// The requested operation requires elevation.
pub const ELEVATION_REQUIRED = 740;
+
/// A reparse should be performed by the Object Manager since the name of the file resulted in a symbolic link.
pub const REPARSE = 741;
+
/// An open/create operation completed while an oplock break is underway.
pub const OPLOCK_BREAK_IN_PROGRESS = 742;
+
/// A new volume has been mounted by a file system.
pub const VOLUME_MOUNTED = 743;
+
/// This success level status indicates that the transaction state already exists for the registry sub-tree, but that a transaction commit was previously aborted. The commit has now been completed.
pub const RXACT_COMMITTED = 744;
+
/// This indicates that a notify change request has been completed due to closing the handle which made the notify change request.
pub const NOTIFY_CLEANUP = 745;
+
/// {Connect Failure on Primary Transport} An attempt was made to connect to the remote server %hs on the primary transport, but the connection failed. The computer WAS able to connect on a secondary transport.
pub const PRIMARY_TRANSPORT_CONNECT_FAILED = 746;
+
/// Page fault was a transition fault.
pub const PAGE_FAULT_TRANSITION = 747;
+
/// Page fault was a demand zero fault.
pub const PAGE_FAULT_DEMAND_ZERO = 748;
+
/// Page fault was a demand zero fault.
pub const PAGE_FAULT_COPY_ON_WRITE = 749;
+
/// Page fault was a demand zero fault.
pub const PAGE_FAULT_GUARD_PAGE = 750;
+
/// Page fault was satisfied by reading from a secondary storage device.
pub const PAGE_FAULT_PAGING_FILE = 751;
+
/// Cached page was locked during operation.
pub const CACHE_PAGE_LOCKED = 752;
+
/// Crash dump exists in paging file.
pub const CRASH_DUMP = 753;
+
/// Specified buffer contains all zeros.
pub const BUFFER_ALL_ZEROS = 754;
+
/// A reparse should be performed by the Object Manager since the name of the file resulted in a symbolic link.
pub const REPARSE_OBJECT = 755;
+
/// The device has succeeded a query-stop and its resource requirements have changed.
pub const RESOURCE_REQUIREMENTS_CHANGED = 756;
+
/// The translator has translated these resources into the global space and no further translations should be performed.
pub const TRANSLATION_COMPLETE = 757;
+
/// A process being terminated has no threads to terminate.
pub const NOTHING_TO_TERMINATE = 758;
+
/// The specified process is not part of a job.
pub const PROCESS_NOT_IN_JOB = 759;
+
/// The specified process is part of a job.
pub const PROCESS_IN_JOB = 760;
+
/// {Volume Shadow Copy Service} The system is now ready for hibernation.
pub const VOLSNAP_HIBERNATE_READY = 761;
+
/// A file system or file system filter driver has successfully completed an FsFilter operation.
pub const FSFILTER_OP_COMPLETED_SUCCESSFULLY = 762;
+
/// The specified interrupt vector was already connected.
pub const INTERRUPT_VECTOR_ALREADY_CONNECTED = 763;
+
/// The specified interrupt vector is still connected.
pub const INTERRUPT_STILL_CONNECTED = 764;
+
/// An operation is blocked waiting for an oplock.
pub const WAIT_FOR_OPLOCK = 765;
+
/// Debugger handled exception.
pub const DBG_EXCEPTION_HANDLED = 766;
+
/// Debugger continued.
pub const DBG_CONTINUE = 767;
+
/// An exception occurred in a user mode callback and the kernel callback frame should be removed.
pub const CALLBACK_POP_STACK = 768;
+
/// Compression is disabled for this volume.
pub const COMPRESSION_DISABLED = 769;
+
/// The data provider cannot fetch backwards through a result set.
pub const CANTFETCHBACKWARDS = 770;
+
/// The data provider cannot scroll backwards through a result set.
pub const CANTSCROLLBACKWARDS = 771;
+
/// The data provider requires that previously fetched data is released before asking for more data.
pub const ROWSNOTRELEASED = 772;
+
/// The data provider was not able to interpret the flags set for a column binding in an accessor.
pub const BAD_ACCESSOR_FLAGS = 773;
+
/// One or more errors occurred while processing the request.
pub const ERRORS_ENCOUNTERED = 774;
+
/// The implementation is not capable of performing the request.
pub const NOT_CAPABLE = 775;
+
/// The client of a component requested an operation which is not valid given the state of the component instance.
pub const REQUEST_OUT_OF_SEQUENCE = 776;
+
/// A version number could not be parsed.
pub const VERSION_PARSE_ERROR = 777;
+
/// The iterator's start position is invalid.
pub const BADSTARTPOSITION = 778;
+
/// The hardware has reported an uncorrectable memory error.
pub const MEMORY_HARDWARE = 779;
+
/// The attempted operation required self healing to be enabled.
pub const DISK_REPAIR_DISABLED = 780;
+
/// The Desktop heap encountered an error while allocating session memory. There is more information in the system event log.
pub const INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE = 781;
+
/// The system power state is transitioning from %2 to %3.
pub const SYSTEM_POWERSTATE_TRANSITION = 782;
+
/// The system power state is transitioning from %2 to %3 but could enter %4.
pub const SYSTEM_POWERSTATE_COMPLEX_TRANSITION = 783;
+
/// A thread is getting dispatched with MCA EXCEPTION because of MCA.
pub const MCA_EXCEPTION = 784;
+
/// Access to %1 is monitored by policy rule %2.
pub const ACCESS_AUDIT_BY_POLICY = 785;
+
/// Access to %1 has been restricted by your Administrator by policy rule %2.
pub const ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY = 786;
+
/// A valid hibernation file has been invalidated and should be abandoned.
pub const ABANDON_HIBERFILE = 787;
+
/// {Delayed Write Failed} Windows was unable to save all the data for the file %hs; the data has been lost. This error may be caused by network connectivity issues. Please try to save this file elsewhere.
pub const LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED = 788;
+
/// {Delayed Write Failed} Windows was unable to save all the data for the file %hs; the data has been lost. This error was returned by the server on which the file exists. Please try to save this file elsewhere.
pub const LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR = 789;
+
/// {Delayed Write Failed} Windows was unable to save all the data for the file %hs; the data has been lost. This error may be caused if the device has been removed or the media is write-protected.
pub const LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR = 790;
+
/// The resources required for this device conflict with the MCFG table.
pub const BAD_MCFG_TABLE = 791;
+
/// The volume repair could not be performed while it is online. Please schedule to take the volume offline so that it can be repaired.
pub const DISK_REPAIR_REDIRECTED = 792;
+
/// The volume repair was not successful.
pub const DISK_REPAIR_UNSUCCESSFUL = 793;
+
/// One of the volume corruption logs is full. Further corruptions that may be detected won't be logged.
pub const CORRUPT_LOG_OVERFULL = 794;
+
/// One of the volume corruption logs is internally corrupted and needs to be recreated. The volume may contain undetected corruptions and must be scanned.
pub const CORRUPT_LOG_CORRUPTED = 795;
+
/// One of the volume corruption logs is unavailable for being operated on.
pub const CORRUPT_LOG_UNAVAILABLE = 796;
+
/// One of the volume corruption logs was deleted while still having corruption records in them. The volume contains detected corruptions and must be scanned.
pub const CORRUPT_LOG_DELETED_FULL = 797;
+
/// One of the volume corruption logs was cleared by chkdsk and no longer contains real corruptions.
pub const CORRUPT_LOG_CLEARED = 798;
+
/// Orphaned files exist on the volume but could not be recovered because no more new names could be created in the recovery directory. Files must be moved from the recovery directory.
pub const ORPHAN_NAME_EXHAUSTED = 799;
+
/// The oplock that was associated with this handle is now associated with a different handle.
pub const OPLOCK_SWITCHED_TO_NEW_HANDLE = 800;
+
/// An oplock of the requested level cannot be granted. An oplock of a lower level may be available.
pub const CANNOT_GRANT_REQUESTED_OPLOCK = 801;
+
/// The operation did not complete successfully because it would cause an oplock to be broken. The caller has requested that existing oplocks not be broken.
pub const CANNOT_BREAK_OPLOCK = 802;
+
/// The handle with which this oplock was associated has been closed. The oplock is now broken.
pub const OPLOCK_HANDLE_CLOSED = 803;
+
/// The specified access control entry (ACE) does not contain a condition.
pub const NO_ACE_CONDITION = 804;
+
/// The specified access control entry (ACE) contains an invalid condition.
pub const INVALID_ACE_CONDITION = 805;
+
/// Access to the specified file handle has been revoked.
pub const FILE_HANDLE_REVOKED = 806;
+
/// An image file was mapped at a different address from the one specified in the image file but fixups will still be automatically performed on the image.
pub const IMAGE_AT_DIFFERENT_BASE = 807;
+
/// Access to the extended attribute was denied.
pub const EA_ACCESS_DENIED = 994;
+
/// The I/O operation has been aborted because of either a thread exit or an application request.
pub const OPERATION_ABORTED = 995;
+
/// Overlapped I/O event is not in a signaled state.
pub const IO_INCOMPLETE = 996;
+
/// Overlapped I/O operation is in progress.
pub const IO_PENDING = 997;
+
/// Invalid access to memory location.
pub const NOACCESS = 998;
+
/// Error performing inpage operation.
pub const SWAPERROR = 999;
+
/// Recursion too deep; the stack overflowed.
pub const STACK_OVERFLOW = 1001;
+
/// The window cannot act on the sent message.
pub const INVALID_MESSAGE = 1002;
+
/// Cannot complete this function.
pub const CAN_NOT_COMPLETE = 1003;
+
/// Invalid flags.
pub const INVALID_FLAGS = 1004;
+
/// The volume does not contain a recognized file system. Please make sure that all required file system drivers are loaded and that the volume is not corrupted.
pub const UNRECOGNIZED_VOLUME = 1005;
+
/// The volume for a file has been externally altered so that the opened file is no longer valid.
pub const FILE_INVALID = 1006;
+
/// The requested operation cannot be performed in full-screen mode.
pub const FULLSCREEN_MODE = 1007;
+
/// An attempt was made to reference a token that does not exist.
pub const NO_TOKEN = 1008;
+
/// The configuration registry database is corrupt.
pub const BADDB = 1009;
+
/// The configuration registry key is invalid.
pub const BADKEY = 1010;
+
/// The configuration registry key could not be opened.
pub const CANTOPEN = 1011;
+
/// The configuration registry key could not be read.
pub const CANTREAD = 1012;
+
/// The configuration registry key could not be written.
pub const CANTWRITE = 1013;
+
/// One of the files in the registry database had to be recovered by use of a log or alternate copy. The recovery was successful.
pub const REGISTRY_RECOVERED = 1014;
+
/// The registry is corrupted. The structure of one of the files containing registry data is corrupted, or the system's memory image of the file is corrupted, or the file could not be recovered because the alternate copy or log was absent or corrupted.
pub const REGISTRY_CORRUPT = 1015;
+
/// An I/O operation initiated by the registry failed unrecoverably. The registry could not read in, or write out, or flush, one of the files that contain the system's image of the registry.
pub const REGISTRY_IO_FAILED = 1016;
+
/// The system has attempted to load or restore a file into the registry, but the specified file is not in a registry file format.
pub const NOT_REGISTRY_FILE = 1017;
+
/// Illegal operation attempted on a registry key that has been marked for deletion.
pub const KEY_DELETED = 1018;
+
/// System could not allocate the required space in a registry log.
pub const NO_LOG_SPACE = 1019;
+
/// Cannot create a symbolic link in a registry key that already has subkeys or values.
pub const KEY_HAS_CHILDREN = 1020;
+
/// Cannot create a stable subkey under a volatile parent key.
pub const CHILD_MUST_BE_VOLATILE = 1021;
+
/// A notify change request is being completed and the information is not being returned in the caller's buffer. The caller now needs to enumerate the files to find the changes.
pub const NOTIFY_ENUM_DIR = 1022;
+
/// A stop control has been sent to a service that other running services are dependent on.
pub const DEPENDENT_SERVICES_RUNNING = 1051;
+
/// The requested control is not valid for this service.
pub const INVALID_SERVICE_CONTROL = 1052;
+
/// The service did not respond to the start or control request in a timely fashion.
pub const SERVICE_REQUEST_TIMEOUT = 1053;
+
/// A thread could not be created for the service.
pub const SERVICE_NO_THREAD = 1054;
+
/// The service database is locked.
pub const SERVICE_DATABASE_LOCKED = 1055;
+
/// An instance of the service is already running.
pub const SERVICE_ALREADY_RUNNING = 1056;
+
/// The account name is invalid or does not exist, or the password is invalid for the account name specified.
pub const INVALID_SERVICE_ACCOUNT = 1057;
+
/// The service cannot be started, either because it is disabled or because it has no enabled devices associated with it.
pub const SERVICE_DISABLED = 1058;
+
/// Circular service dependency was specified.
pub const CIRCULAR_DEPENDENCY = 1059;
+
/// The specified service does not exist as an installed service.
pub const SERVICE_DOES_NOT_EXIST = 1060;
+
/// The service cannot accept control messages at this time.
pub const SERVICE_CANNOT_ACCEPT_CTRL = 1061;
+
/// The service has not been started.
pub const SERVICE_NOT_ACTIVE = 1062;
+
/// The service process could not connect to the service controller.
pub const FAILED_SERVICE_CONTROLLER_CONNECT = 1063;
+
/// An exception occurred in the service when handling the control request.
pub const EXCEPTION_IN_SERVICE = 1064;
+
/// The database specified does not exist.
pub const DATABASE_DOES_NOT_EXIST = 1065;
+
/// The service has returned a service-specific error code.
pub const SERVICE_SPECIFIC_ERROR = 1066;
+
/// The process terminated unexpectedly.
pub const PROCESS_ABORTED = 1067;
+
/// The dependency service or group failed to start.
pub const SERVICE_DEPENDENCY_FAIL = 1068;
+
/// The service did not start due to a logon failure.
pub const SERVICE_LOGON_FAILED = 1069;
+
/// After starting, the service hung in a start-pending state.
pub const SERVICE_START_HANG = 1070;
+
/// The specified service database lock is invalid.
pub const INVALID_SERVICE_LOCK = 1071;
+
/// The specified service has been marked for deletion.
pub const SERVICE_MARKED_FOR_DELETE = 1072;
+
/// The specified service already exists.
pub const SERVICE_EXISTS = 1073;
+
/// The system is currently running with the last-known-good configuration.
pub const ALREADY_RUNNING_LKG = 1074;
+
/// The dependency service does not exist or has been marked for deletion.
pub const SERVICE_DEPENDENCY_DELETED = 1075;
+
/// The current boot has already been accepted for use as the last-known-good control set.
pub const BOOT_ALREADY_ACCEPTED = 1076;
+
/// No attempts to start the service have been made since the last boot.
pub const SERVICE_NEVER_STARTED = 1077;
+
/// The name is already in use as either a service name or a service display name.
pub const DUPLICATE_SERVICE_NAME = 1078;
+
/// The account specified for this service is different from the account specified for other services running in the same process.
pub const DIFFERENT_SERVICE_ACCOUNT = 1079;
+
/// Failure actions can only be set for Win32 services, not for drivers.
pub const CANNOT_DETECT_DRIVER_FAILURE = 1080;
+
/// This service runs in the same process as the service control manager. Therefore, the service control manager cannot take action if this service's process terminates unexpectedly.
pub const CANNOT_DETECT_PROCESS_ABORT = 1081;
+
/// No recovery program has been configured for this service.
pub const NO_RECOVERY_PROGRAM = 1082;
+
/// The executable program that this service is configured to run in does not implement the service.
pub const SERVICE_NOT_IN_EXE = 1083;
+
/// This service cannot be started in Safe Mode.
pub const NOT_SAFEBOOT_SERVICE = 1084;
+
/// The physical end of the tape has been reached.
pub const END_OF_MEDIA = 1100;
+
/// A tape access reached a filemark.
pub const FILEMARK_DETECTED = 1101;
+
/// The beginning of the tape or a partition was encountered.
pub const BEGINNING_OF_MEDIA = 1102;
+
/// A tape access reached the end of a set of files.
pub const SETMARK_DETECTED = 1103;
+
/// No more data is on the tape.
pub const NO_DATA_DETECTED = 1104;
+
/// Tape could not be partitioned.
pub const PARTITION_FAILURE = 1105;
+
/// When accessing a new tape of a multivolume partition, the current block size is incorrect.
pub const INVALID_BLOCK_LENGTH = 1106;
+
/// Tape partition information could not be found when loading a tape.
pub const DEVICE_NOT_PARTITIONED = 1107;
+
/// Unable to lock the media eject mechanism.
pub const UNABLE_TO_LOCK_MEDIA = 1108;
+
/// Unable to unload the media.
pub const UNABLE_TO_UNLOAD_MEDIA = 1109;
+
/// The media in the drive may have changed.
pub const MEDIA_CHANGED = 1110;
+
/// The I/O bus was reset.
pub const BUS_RESET = 1111;
+
/// No media in drive.
pub const NO_MEDIA_IN_DRIVE = 1112;
+
/// No mapping for the Unicode character exists in the target multi-byte code page.
pub const NO_UNICODE_TRANSLATION = 1113;
+
/// A dynamic link library (DLL) initialization routine failed.
pub const DLL_INIT_FAILED = 1114;
+
/// A system shutdown is in progress.
pub const SHUTDOWN_IN_PROGRESS = 1115;
+
/// Unable to abort the system shutdown because no shutdown was in progress.
pub const NO_SHUTDOWN_IN_PROGRESS = 1116;
+
/// The request could not be performed because of an I/O device error.
pub const IO_DEVICE = 1117;
+
/// No serial device was successfully initialized. The serial driver will unload.
pub const SERIAL_NO_DEVICE = 1118;
+
/// Unable to open a device that was sharing an interrupt request (IRQ) with other devices. At least one other device that uses that IRQ was already opened.
pub const IRQ_BUSY = 1119;
+
/// A serial I/O operation was completed by another write to the serial port. The IOCTL_SERIAL_XOFF_COUNTER reached zero.)
pub const MORE_WRITES = 1120;
+
/// A serial I/O operation completed because the timeout period expired. The IOCTL_SERIAL_XOFF_COUNTER did not reach zero.)
pub const COUNTER_TIMEOUT = 1121;
+
/// No ID address mark was found on the floppy disk.
pub const FLOPPY_ID_MARK_NOT_FOUND = 1122;
+
/// Mismatch between the floppy disk sector ID field and the floppy disk controller track address.
pub const FLOPPY_WRONG_CYLINDER = 1123;
+
/// The floppy disk controller reported an error that is not recognized by the floppy disk driver.
pub const FLOPPY_UNKNOWN_ERROR = 1124;
+
/// The floppy disk controller returned inconsistent results in its registers.
pub const FLOPPY_BAD_REGISTERS = 1125;
+
/// While accessing the hard disk, a recalibrate operation failed, even after retries.
pub const DISK_RECALIBRATE_FAILED = 1126;
+
/// While accessing the hard disk, a disk operation failed even after retries.
pub const DISK_OPERATION_FAILED = 1127;
+
/// While accessing the hard disk, a disk controller reset was needed, but even that failed.
pub const DISK_RESET_FAILED = 1128;
+
/// Physical end of tape encountered.
pub const EOM_OVERFLOW = 1129;
+
/// Not enough server storage is available to process this command.
pub const NOT_ENOUGH_SERVER_MEMORY = 1130;
+
/// A potential deadlock condition has been detected.
pub const POSSIBLE_DEADLOCK = 1131;
+
/// The base address or the file offset specified does not have the proper alignment.
pub const MAPPED_ALIGNMENT = 1132;
+
/// An attempt to change the system power state was vetoed by another application or driver.
pub const SET_POWER_STATE_VETOED = 1140;
+
/// The system BIOS failed an attempt to change the system power state.
pub const SET_POWER_STATE_FAILED = 1141;
+
/// An attempt was made to create more links on a file than the file system supports.
pub const TOO_MANY_LINKS = 1142;
+
/// The specified program requires a newer version of Windows.
pub const OLD_WIN_VERSION = 1150;
+
/// The specified program is not a Windows or MS-DOS program.
pub const APP_WRONG_OS = 1151;
+
/// Cannot start more than one instance of the specified program.
pub const SINGLE_INSTANCE_APP = 1152;
+
/// The specified program was written for an earlier version of Windows.
pub const RMODE_APP = 1153;
+
/// One of the library files needed to run this application is damaged.
pub const INVALID_DLL = 1154;
+
/// No application is associated with the specified file for this operation.
pub const NO_ASSOCIATION = 1155;
+
/// An error occurred in sending the command to the application.
pub const DDE_FAIL = 1156;
+
/// One of the library files needed to run this application cannot be found.
pub const DLL_NOT_FOUND = 1157;
+
/// The current process has used all of its system allowance of handles for Window Manager objects.
pub const NO_MORE_USER_HANDLES = 1158;
+
/// The message can be used only with synchronous operations.
pub const MESSAGE_SYNC_ONLY = 1159;
+
/// The indicated source element has no media.
pub const SOURCE_ELEMENT_EMPTY = 1160;
+
/// The indicated destination element already contains media.
pub const DESTINATION_ELEMENT_FULL = 1161;
+
/// The indicated element does not exist.
pub const ILLEGAL_ELEMENT_ADDRESS = 1162;
+
/// The indicated element is part of a magazine that is not present.
pub const MAGAZINE_NOT_PRESENT = 1163;
+
/// The indicated device requires reinitialization due to hardware errors.
pub const DEVICE_REINITIALIZATION_NEEDED = 1164;
+
/// The device has indicated that cleaning is required before further operations are attempted.
pub const DEVICE_REQUIRES_CLEANING = 1165;
+
/// The device has indicated that its door is open.
pub const DEVICE_DOOR_OPEN = 1166;
+
/// The device is not connected.
pub const DEVICE_NOT_CONNECTED = 1167;
+
/// Element not found.
pub const NOT_FOUND = 1168;
+
/// There was no match for the specified key in the index.
pub const NO_MATCH = 1169;
+
/// The property set specified does not exist on the object.
pub const SET_NOT_FOUND = 1170;
+
/// The point passed to GetMouseMovePoints is not in the buffer.
pub const POINT_NOT_FOUND = 1171;
+
/// The tracking (workstation) service is not running.
pub const NO_TRACKING_SERVICE = 1172;
+
/// The Volume ID could not be found.
pub const NO_VOLUME_ID = 1173;
+
/// Unable to remove the file to be replaced.
pub const UNABLE_TO_REMOVE_REPLACED = 1175;
+
/// Unable to move the replacement file to the file to be replaced. The file to be replaced has retained its original name.
pub const UNABLE_TO_MOVE_REPLACEMENT = 1176;
+
/// Unable to move the replacement file to the file to be replaced. The file to be replaced has been renamed using the backup name.
pub const UNABLE_TO_MOVE_REPLACEMENT_2 = 1177;
+
/// The volume change journal is being deleted.
pub const JOURNAL_DELETE_IN_PROGRESS = 1178;
+
/// The volume change journal is not active.
pub const JOURNAL_NOT_ACTIVE = 1179;
+
/// A file was found, but it may not be the correct file.
pub const POTENTIAL_FILE_FOUND = 1180;
+
/// The journal entry has been deleted from the journal.
pub const JOURNAL_ENTRY_DELETED = 1181;
+
/// A system shutdown has already been scheduled.
pub const SHUTDOWN_IS_SCHEDULED = 1190;
+
/// The system shutdown cannot be initiated because there are other users logged on to the computer.
pub const SHUTDOWN_USERS_LOGGED_ON = 1191;
+
/// The specified device name is invalid.
pub const BAD_DEVICE = 1200;
+
/// The device is not currently connected but it is a remembered connection.
pub const CONNECTION_UNAVAIL = 1201;
+
/// The local device name has a remembered connection to another network resource.
pub const DEVICE_ALREADY_REMEMBERED = 1202;
+
/// The network path was either typed incorrectly, does not exist, or the network provider is not currently available. Please try retyping the path or contact your network administrator.
pub const NO_NET_OR_BAD_PATH = 1203;
+
/// The specified network provider name is invalid.
pub const BAD_PROVIDER = 1204;
+
/// Unable to open the network connection profile.
pub const CANNOT_OPEN_PROFILE = 1205;
+
/// The network connection profile is corrupted.
pub const BAD_PROFILE = 1206;
+
/// Cannot enumerate a noncontainer.
pub const NOT_CONTAINER = 1207;
+
/// An extended error has occurred.
pub const EXTENDED_ERROR = 1208;
+
/// The format of the specified group name is invalid.
pub const INVALID_GROUPNAME = 1209;
+
/// The format of the specified computer name is invalid.
pub const INVALID_COMPUTERNAME = 1210;
+
/// The format of the specified event name is invalid.
pub const INVALID_EVENTNAME = 1211;
+
/// The format of the specified domain name is invalid.
pub const INVALID_DOMAINNAME = 1212;
+
/// The format of the specified service name is invalid.
pub const INVALID_SERVICENAME = 1213;
+
/// The format of the specified network name is invalid.
pub const INVALID_NETNAME = 1214;
+
/// The format of the specified share name is invalid.
pub const INVALID_SHARENAME = 1215;
+
/// The format of the specified password is invalid.
pub const INVALID_PASSWORDNAME = 1216;
+
/// The format of the specified message name is invalid.
pub const INVALID_MESSAGENAME = 1217;
+
/// The format of the specified message destination is invalid.
pub const INVALID_MESSAGEDEST = 1218;
+
/// Multiple connections to a server or shared resource by the same user, using more than one user name, are not allowed. Disconnect all previous connections to the server or shared resource and try again.
pub const SESSION_CREDENTIAL_CONFLICT = 1219;
+
/// An attempt was made to establish a session to a network server, but there are already too many sessions established to that server.
pub const REMOTE_SESSION_LIMIT_EXCEEDED = 1220;
+
/// The workgroup or domain name is already in use by another computer on the network.
pub const DUP_DOMAINNAME = 1221;
+
/// The network is not present or not started.
pub const NO_NETWORK = 1222;
+
/// The operation was canceled by the user.
pub const CANCELLED = 1223;
+
/// The requested operation cannot be performed on a file with a user-mapped section open.
pub const USER_MAPPED_FILE = 1224;
+
/// The remote computer refused the network connection.
pub const CONNECTION_REFUSED = 1225;
+
/// The network connection was gracefully closed.
pub const GRACEFUL_DISCONNECT = 1226;
+
/// The network transport endpoint already has an address associated with it.
pub const ADDRESS_ALREADY_ASSOCIATED = 1227;
+
/// An address has not yet been associated with the network endpoint.
pub const ADDRESS_NOT_ASSOCIATED = 1228;
+
/// An operation was attempted on a nonexistent network connection.
pub const CONNECTION_INVALID = 1229;
+
/// An invalid operation was attempted on an active network connection.
pub const CONNECTION_ACTIVE = 1230;
+
/// The network location cannot be reached. For information about network troubleshooting, see Windows Help.
pub const NETWORK_UNREACHABLE = 1231;
+
/// The network location cannot be reached. For information about network troubleshooting, see Windows Help.
pub const HOST_UNREACHABLE = 1232;
+
/// The network location cannot be reached. For information about network troubleshooting, see Windows Help.
pub const PROTOCOL_UNREACHABLE = 1233;
+
/// No service is operating at the destination network endpoint on the remote system.
pub const PORT_UNREACHABLE = 1234;
+
/// The request was aborted.
pub const REQUEST_ABORTED = 1235;
+
/// The network connection was aborted by the local system.
pub const CONNECTION_ABORTED = 1236;
+
/// The operation could not be completed. A retry should be performed.
pub const RETRY = 1237;
+
/// A connection to the server could not be made because the limit on the number of concurrent connections for this account has been reached.
pub const CONNECTION_COUNT_LIMIT = 1238;
+
/// Attempting to log in during an unauthorized time of day for this account.
pub const LOGIN_TIME_RESTRICTION = 1239;
+
/// The account is not authorized to log in from this station.
pub const LOGIN_WKSTA_RESTRICTION = 1240;
+
/// The network address could not be used for the operation requested.
pub const INCORRECT_ADDRESS = 1241;
+
/// The service is already registered.
pub const ALREADY_REGISTERED = 1242;
+
/// The specified service does not exist.
pub const SERVICE_NOT_FOUND = 1243;
+
/// The operation being requested was not performed because the user has not been authenticated.
pub const NOT_AUTHENTICATED = 1244;
+
/// The operation being requested was not performed because the user has not logged on to the network. The specified service does not exist.
pub const NOT_LOGGED_ON = 1245;
+
/// Continue with work in progress.
pub const CONTINUE = 1246;
+
/// An attempt was made to perform an initialization operation when initialization has already been completed.
pub const ALREADY_INITIALIZED = 1247;
+
/// No more local devices.
pub const NO_MORE_DEVICES = 1248;
+
/// The specified site does not exist.
pub const NO_SUCH_SITE = 1249;
+
/// A domain controller with the specified name already exists.
pub const DOMAIN_CONTROLLER_EXISTS = 1250;
+
/// This operation is supported only when you are connected to the server.
pub const ONLY_IF_CONNECTED = 1251;
+
/// The group policy framework should call the extension even if there are no changes.
pub const OVERRIDE_NOCHANGES = 1252;
+
/// The specified user does not have a valid profile.
pub const BAD_USER_PROFILE = 1253;
+
/// This operation is not supported on a computer running Windows Server 2003 for Small Business Server.
pub const NOT_SUPPORTED_ON_SBS = 1254;
+
/// The server machine is shutting down.
pub const SERVER_SHUTDOWN_IN_PROGRESS = 1255;
+
/// The remote system is not available. For information about network troubleshooting, see Windows Help.
pub const HOST_DOWN = 1256;
+
/// The security identifier provided is not from an account domain.
pub const NON_ACCOUNT_SID = 1257;
+
/// The security identifier provided does not have a domain component.
pub const NON_DOMAIN_SID = 1258;
+
/// AppHelp dialog canceled thus preventing the application from starting.
pub const APPHELP_BLOCK = 1259;
+
/// This program is blocked by group policy. For more information, contact your system administrator.
pub const ACCESS_DISABLED_BY_POLICY = 1260;
+
/// A program attempt to use an invalid register value. Normally caused by an uninitialized register. This error is Itanium specific.
pub const REG_NAT_CONSUMPTION = 1261;
+
/// The share is currently offline or does not exist.
pub const CSCSHARE_OFFLINE = 1262;
+
/// The Kerberos protocol encountered an error while validating the KDC certificate during smartcard logon. There is more information in the system event log.
pub const PKINIT_FAILURE = 1263;
+
/// The Kerberos protocol encountered an error while attempting to utilize the smartcard subsystem.
pub const SMARTCARD_SUBSYSTEM_FAILURE = 1264;
+
/// The system cannot contact a domain controller to service the authentication request. Please try again later.
pub const DOWNGRADE_DETECTED = 1265;
+
/// The machine is locked and cannot be shut down without the force option.
pub const MACHINE_LOCKED = 1271;
+
/// An application-defined callback gave invalid data when called.
pub const CALLBACK_SUPPLIED_INVALID_DATA = 1273;
+
/// The group policy framework should call the extension in the synchronous foreground policy refresh.
pub const SYNC_FOREGROUND_REFRESH_REQUIRED = 1274;
+
/// This driver has been blocked from loading.
pub const DRIVER_BLOCKED = 1275;
+
/// A dynamic link library (DLL) referenced a module that was neither a DLL nor the process's executable image.
pub const INVALID_IMPORT_OF_NON_DLL = 1276;
+
/// Windows cannot open this program since it has been disabled.
pub const ACCESS_DISABLED_WEBBLADE = 1277;
+
/// Windows cannot open this program because the license enforcement system has been tampered with or become corrupted.
pub const ACCESS_DISABLED_WEBBLADE_TAMPER = 1278;
+
/// A transaction recover failed.
pub const RECOVERY_FAILURE = 1279;
+
/// The current thread has already been converted to a fiber.
pub const ALREADY_FIBER = 1280;
+
/// The current thread has already been converted from a fiber.
pub const ALREADY_THREAD = 1281;
+
/// The system detected an overrun of a stack-based buffer in this application. This overrun could potentially allow a malicious user to gain control of this application.
pub const STACK_BUFFER_OVERRUN = 1282;
+
/// Data present in one of the parameters is more than the function can operate on.
pub const PARAMETER_QUOTA_EXCEEDED = 1283;
+
/// An attempt to do an operation on a debug object failed because the object is in the process of being deleted.
pub const DEBUGGER_INACTIVE = 1284;
+
/// An attempt to delay-load a .dll or get a function address in a delay-loaded .dll failed.
pub const DELAY_LOAD_FAILED = 1285;
+
/// %1 is a 16-bit application. You do not have permissions to execute 16-bit applications. Check your permissions with your system administrator.
pub const VDM_DISALLOWED = 1286;
+
/// Insufficient information exists to identify the cause of failure.
pub const UNIDENTIFIED_ERROR = 1287;
+
/// The parameter passed to a C runtime function is incorrect.
pub const INVALID_CRUNTIME_PARAMETER = 1288;
+
/// The operation occurred beyond the valid data length of the file.
pub const BEYOND_VDL = 1289;
+
/// The service start failed since one or more services in the same process have an incompatible service SID type setting. A service with restricted service SID type can only coexist in the same process with other services with a restricted SID type. If the service SID type for this service was just configured, the hosting process must be restarted in order to start this service.
/// On Windows Server 2003 and Windows XP, an unrestricted service cannot coexist in the same process with other services. The service with the unrestricted service SID type must be moved to an owned process in order to start this service.
pub const INCOMPATIBLE_SERVICE_SID_TYPE = 1290;
+
/// The process hosting the driver for this device has been terminated.
pub const DRIVER_PROCESS_TERMINATED = 1291;
+
/// An operation attempted to exceed an implementation-defined limit.
pub const IMPLEMENTATION_LIMIT = 1292;
+
/// Either the target process, or the target thread's containing process, is a protected process.
pub const PROCESS_IS_PROTECTED = 1293;
+
/// The service notification client is lagging too far behind the current state of services in the machine.
pub const SERVICE_NOTIFY_CLIENT_LAGGING = 1294;
+
/// The requested file operation failed because the storage quota was exceeded. To free up disk space, move files to a different location or delete unnecessary files. For more information, contact your system administrator.
pub const DISK_QUOTA_EXCEEDED = 1295;
+
/// The requested file operation failed because the storage policy blocks that type of file. For more information, contact your system administrator.
pub const CONTENT_BLOCKED = 1296;
+
/// A privilege that the service requires to function properly does not exist in the service account configuration. You may use the Services Microsoft Management Console (MMC) snap-in (services.msc) and the Local Security Settings MMC snap-in (secpol.msc) to view the service configuration and the account configuration.
pub const INCOMPATIBLE_SERVICE_PRIVILEGE = 1297;
+
/// A thread involved in this operation appears to be unresponsive.
pub const APP_HANG = 1298;
+
/// Indicates a particular Security ID may not be assigned as the label of an object.
pub const INVALID_LABEL = 1299;
+
/// Not all privileges or groups referenced are assigned to the caller.
pub const NOT_ALL_ASSIGNED = 1300;
+
/// Some mapping between account names and security IDs was not done.
pub const SOME_NOT_MAPPED = 1301;
+
/// No system quota limits are specifically set for this account.
pub const NO_QUOTAS_FOR_ACCOUNT = 1302;
+
/// No encryption key is available. A well-known encryption key was returned.
pub const LOCAL_USER_SESSION_KEY = 1303;
+
/// The password is too complex to be converted to a LAN Manager password. The LAN Manager password returned is a NULL string.
pub const NULL_LM_PASSWORD = 1304;
+
/// The revision level is unknown.
pub const UNKNOWN_REVISION = 1305;
+
/// Indicates two revision levels are incompatible.
pub const REVISION_MISMATCH = 1306;
+
/// This security ID may not be assigned as the owner of this object.
pub const INVALID_OWNER = 1307;
+
/// This security ID may not be assigned as the primary group of an object.
pub const INVALID_PRIMARY_GROUP = 1308;
+
/// An attempt has been made to operate on an impersonation token by a thread that is not currently impersonating a client.
pub const NO_IMPERSONATION_TOKEN = 1309;
+
/// The group may not be disabled.
pub const CANT_DISABLE_MANDATORY = 1310;
+
/// There are currently no logon servers available to service the logon request.
pub const NO_LOGON_SERVERS = 1311;
+
/// A specified logon session does not exist. It may already have been terminated.
pub const NO_SUCH_LOGON_SESSION = 1312;
+
/// A specified privilege does not exist.
pub const NO_SUCH_PRIVILEGE = 1313;
+
/// A required privilege is not held by the client.
pub const PRIVILEGE_NOT_HELD = 1314;
+
/// The name provided is not a properly formed account name.
pub const INVALID_ACCOUNT_NAME = 1315;
+
/// The specified account already exists.
pub const USER_EXISTS = 1316;
+
/// The specified account does not exist.
pub const NO_SUCH_USER = 1317;
+
/// The specified group already exists.
pub const GROUP_EXISTS = 1318;
+
/// The specified group does not exist.
pub const NO_SUCH_GROUP = 1319;
+
/// Either the specified user account is already a member of the specified group, or the specified group cannot be deleted because it contains a member.
pub const MEMBER_IN_GROUP = 1320;
+
/// The specified user account is not a member of the specified group account.
pub const MEMBER_NOT_IN_GROUP = 1321;
+
/// This operation is disallowed as it could result in an administration account being disabled, deleted or unable to log on.
pub const LAST_ADMIN = 1322;
+
/// Unable to update the password. The value provided as the current password is incorrect.
pub const WRONG_PASSWORD = 1323;
+
/// Unable to update the password. The value provided for the new password contains values that are not allowed in passwords.
pub const ILL_FORMED_PASSWORD = 1324;
+
/// Unable to update the password. The value provided for the new password does not meet the length, complexity, or history requirements of the domain.
pub const PASSWORD_RESTRICTION = 1325;
+
/// The user name or password is incorrect.
pub const LOGON_FAILURE = 1326;
+
/// Account restrictions are preventing this user from signing in. For example: blank passwords aren't allowed, sign-in times are limited, or a policy restriction has been enforced.
pub const ACCOUNT_RESTRICTION = 1327;
+
/// Your account has time restrictions that keep you from signing in right now.
pub const INVALID_LOGON_HOURS = 1328;
+
/// This user isn't allowed to sign in to this computer.
pub const INVALID_WORKSTATION = 1329;
+
/// The password for this account has expired.
pub const PASSWORD_EXPIRED = 1330;
+
/// This user can't sign in because this account is currently disabled.
pub const ACCOUNT_DISABLED = 1331;
+
/// No mapping between account names and security IDs was done.
pub const NONE_MAPPED = 1332;
+
/// Too many local user identifiers (LUIDs) were requested at one time.
pub const TOO_MANY_LUIDS_REQUESTED = 1333;
+
/// No more local user identifiers (LUIDs) are available.
pub const LUIDS_EXHAUSTED = 1334;
+
/// The subauthority part of a security ID is invalid for this particular use.
pub const INVALID_SUB_AUTHORITY = 1335;
+
/// The access control list (ACL) structure is invalid.
pub const INVALID_ACL = 1336;
+
/// The security ID structure is invalid.
pub const INVALID_SID = 1337;
+
/// The security descriptor structure is invalid.
pub const INVALID_SECURITY_DESCR = 1338;
+
/// The inherited access control list (ACL) or access control entry (ACE) could not be built.
pub const BAD_INHERITANCE_ACL = 1340;
+
/// The server is currently disabled.
pub const SERVER_DISABLED = 1341;
+
/// The server is currently enabled.
pub const SERVER_NOT_DISABLED = 1342;
+
/// The value provided was an invalid value for an identifier authority.
pub const INVALID_ID_AUTHORITY = 1343;
+
/// No more memory is available for security information updates.
pub const ALLOTTED_SPACE_EXCEEDED = 1344;
+
/// The specified attributes are invalid, or incompatible with the attributes for the group as a whole.
pub const INVALID_GROUP_ATTRIBUTES = 1345;
+
/// Either a required impersonation level was not provided, or the provided impersonation level is invalid.
pub const BAD_IMPERSONATION_LEVEL = 1346;
+
/// Cannot open an anonymous level security token.
pub const CANT_OPEN_ANONYMOUS = 1347;
+
/// The validation information class requested was invalid.
pub const BAD_VALIDATION_CLASS = 1348;
+
/// The type of the token is inappropriate for its attempted use.
pub const BAD_TOKEN_TYPE = 1349;
+
/// Unable to perform a security operation on an object that has no associated security.
pub const NO_SECURITY_ON_OBJECT = 1350;
+
/// Configuration information could not be read from the domain controller, either because the machine is unavailable, or access has been denied.
pub const CANT_ACCESS_DOMAIN_INFO = 1351;
+
/// The security account manager (SAM) or local security authority (LSA) server was in the wrong state to perform the security operation.
pub const INVALID_SERVER_STATE = 1352;
+
/// The domain was in the wrong state to perform the security operation.
pub const INVALID_DOMAIN_STATE = 1353;
+
/// This operation is only allowed for the Primary Domain Controller of the domain.
pub const INVALID_DOMAIN_ROLE = 1354;
+
/// The specified domain either does not exist or could not be contacted.
pub const NO_SUCH_DOMAIN = 1355;
+
/// The specified domain already exists.
pub const DOMAIN_EXISTS = 1356;
+
/// An attempt was made to exceed the limit on the number of domains per server.
pub const DOMAIN_LIMIT_EXCEEDED = 1357;
+
/// Unable to complete the requested operation because of either a catastrophic media failure or a data structure corruption on the disk.
pub const INTERNAL_DB_CORRUPTION = 1358;
+
/// An internal error occurred.
pub const INTERNAL_ERROR = 1359;
+
/// Generic access types were contained in an access mask which should already be mapped to nongeneric types.
pub const GENERIC_NOT_MAPPED = 1360;
+
/// A security descriptor is not in the right format (absolute or self-relative).
pub const BAD_DESCRIPTOR_FORMAT = 1361;
+
/// The requested action is restricted for use by logon processes only. The calling process has not registered as a logon process.
pub const NOT_LOGON_PROCESS = 1362;
+
/// Cannot start a new logon session with an ID that is already in use.
pub const LOGON_SESSION_EXISTS = 1363;
+
/// A specified authentication package is unknown.
pub const NO_SUCH_PACKAGE = 1364;
+
/// The logon session is not in a state that is consistent with the requested operation.
pub const BAD_LOGON_SESSION_STATE = 1365;
+
/// The logon session ID is already in use.
pub const LOGON_SESSION_COLLISION = 1366;
+
/// A logon request contained an invalid logon type value.
pub const INVALID_LOGON_TYPE = 1367;
+
/// Unable to impersonate using a named pipe until data has been read from that pipe.
pub const CANNOT_IMPERSONATE = 1368;
+
/// The transaction state of a registry subtree is incompatible with the requested operation.
pub const RXACT_INVALID_STATE = 1369;
+
/// An internal security database corruption has been encountered.
pub const RXACT_COMMIT_FAILURE = 1370;
+
/// Cannot perform this operation on built-in accounts.
pub const SPECIAL_ACCOUNT = 1371;
+
/// Cannot perform this operation on this built-in special group.
pub const SPECIAL_GROUP = 1372;
+
/// Cannot perform this operation on this built-in special user.
pub const SPECIAL_USER = 1373;
+
/// The user cannot be removed from a group because the group is currently the user's primary group.
pub const MEMBERS_PRIMARY_GROUP = 1374;
+
/// The token is already in use as a primary token.
pub const TOKEN_ALREADY_IN_USE = 1375;
+
/// The specified local group does not exist.
pub const NO_SUCH_ALIAS = 1376;
+
/// The specified account name is not a member of the group.
pub const MEMBER_NOT_IN_ALIAS = 1377;
+
/// The specified account name is already a member of the group.
pub const MEMBER_IN_ALIAS = 1378;
+
/// The specified local group already exists.
pub const ALIAS_EXISTS = 1379;
+
/// Logon failure: the user has not been granted the requested logon type at this computer.
pub const LOGON_NOT_GRANTED = 1380;
+
/// The maximum number of secrets that may be stored in a single system has been exceeded.
pub const TOO_MANY_SECRETS = 1381;
+
/// The length of a secret exceeds the maximum length allowed.
pub const SECRET_TOO_LONG = 1382;
+
/// The local security authority database contains an internal inconsistency.
pub const INTERNAL_DB_ERROR = 1383;
+
/// During a logon attempt, the user's security context accumulated too many security IDs.
pub const TOO_MANY_CONTEXT_IDS = 1384;
+
/// Logon failure: the user has not been granted the requested logon type at this computer.
pub const LOGON_TYPE_NOT_GRANTED = 1385;
+
/// A cross-encrypted password is necessary to change a user password.
pub const NT_CROSS_ENCRYPTION_REQUIRED = 1386;
+
/// A member could not be added to or removed from the local group because the member does not exist.
pub const NO_SUCH_MEMBER = 1387;
+
/// A new member could not be added to a local group because the member has the wrong account type.
pub const INVALID_MEMBER = 1388;
+
/// Too many security IDs have been specified.
pub const TOO_MANY_SIDS = 1389;
+
/// A cross-encrypted password is necessary to change this user password.
pub const LM_CROSS_ENCRYPTION_REQUIRED = 1390;
+
/// Indicates an ACL contains no inheritable components.
pub const NO_INHERITANCE = 1391;
+
/// The file or directory is corrupted and unreadable.
pub const FILE_CORRUPT = 1392;
+
/// The disk structure is corrupted and unreadable.
pub const DISK_CORRUPT = 1393;
+
/// There is no user session key for the specified logon session.
pub const NO_USER_SESSION_KEY = 1394;
+
/// The service being accessed is licensed for a particular number of connections. No more connections can be made to the service at this time because there are already as many connections as the service can accept.
pub const LICENSE_QUOTA_EXCEEDED = 1395;
+
/// The target account name is incorrect.
pub const WRONG_TARGET_NAME = 1396;
+
/// Mutual Authentication failed. The server's password is out of date at the domain controller.
pub const MUTUAL_AUTH_FAILED = 1397;
+
/// There is a time and/or date difference between the client and server.
pub const TIME_SKEW = 1398;
+
/// This operation cannot be performed on the current domain.
pub const CURRENT_DOMAIN_NOT_ALLOWED = 1399;
+
/// Invalid window handle.
pub const INVALID_WINDOW_HANDLE = 1400;
+
/// Invalid menu handle.
pub const INVALID_MENU_HANDLE = 1401;
+
/// Invalid cursor handle.
pub const INVALID_CURSOR_HANDLE = 1402;
+
/// Invalid accelerator table handle.
pub const INVALID_ACCEL_HANDLE = 1403;
+
/// Invalid hook handle.
pub const INVALID_HOOK_HANDLE = 1404;
+
/// Invalid handle to a multiple-window position structure.
pub const INVALID_DWP_HANDLE = 1405;
+
/// Cannot create a top-level child window.
pub const TLW_WITH_WSCHILD = 1406;
+
/// Cannot find window class.
pub const CANNOT_FIND_WND_CLASS = 1407;
+
/// Invalid window; it belongs to other thread.
pub const WINDOW_OF_OTHER_THREAD = 1408;
+
/// Hot key is already registered.
pub const HOTKEY_ALREADY_REGISTERED = 1409;
+
/// Class already exists.
pub const CLASS_ALREADY_EXISTS = 1410;
+
/// Class does not exist.
pub const CLASS_DOES_NOT_EXIST = 1411;
+
/// Class still has open windows.
pub const CLASS_HAS_WINDOWS = 1412;
+
/// Invalid index.
pub const INVALID_INDEX = 1413;
+
/// Invalid icon handle.
pub const INVALID_ICON_HANDLE = 1414;
+
/// Using private DIALOG window words.
pub const PRIVATE_DIALOG_INDEX = 1415;
+
/// The list box identifier was not found.
pub const LISTBOX_ID_NOT_FOUND = 1416;
+
/// No wildcards were found.
pub const NO_WILDCARD_CHARACTERS = 1417;
+
/// Thread does not have a clipboard open.
pub const CLIPBOARD_NOT_OPEN = 1418;
+
/// Hot key is not registered.
pub const HOTKEY_NOT_REGISTERED = 1419;
+
/// The window is not a valid dialog window.
pub const WINDOW_NOT_DIALOG = 1420;
+
/// Control ID not found.
pub const CONTROL_ID_NOT_FOUND = 1421;
+
/// Invalid message for a combo box because it does not have an edit control.
pub const INVALID_COMBOBOX_MESSAGE = 1422;
+
/// The window is not a combo box.
pub const WINDOW_NOT_COMBOBOX = 1423;
+
/// Height must be less than 256.
pub const INVALID_EDIT_HEIGHT = 1424;
+
/// Invalid device context (DC) handle.
pub const DC_NOT_FOUND = 1425;
+
/// Invalid hook procedure type.
pub const INVALID_HOOK_FILTER = 1426;
+
/// Invalid hook procedure.
pub const INVALID_FILTER_PROC = 1427;
+
/// Cannot set nonlocal hook without a module handle.
pub const HOOK_NEEDS_HMOD = 1428;
+
/// This hook procedure can only be set globally.
pub const GLOBAL_ONLY_HOOK = 1429;
+
/// The journal hook procedure is already installed.
pub const JOURNAL_HOOK_SET = 1430;
+
/// The hook procedure is not installed.
pub const HOOK_NOT_INSTALLED = 1431;
+
/// Invalid message for single-selection list box.
pub const INVALID_LB_MESSAGE = 1432;
+
/// LB_SETCOUNT sent to non-lazy list box.
pub const SETCOUNT_ON_BAD_LB = 1433;
+
/// This list box does not support tab stops.
pub const LB_WITHOUT_TABSTOPS = 1434;
+
/// Cannot destroy object created by another thread.
pub const DESTROY_OBJECT_OF_OTHER_THREAD = 1435;
+
/// Child windows cannot have menus.
pub const CHILD_WINDOW_MENU = 1436;
+
/// The window does not have a system menu.
pub const NO_SYSTEM_MENU = 1437;
+
/// Invalid message box style.
pub const INVALID_MSGBOX_STYLE = 1438;
+
/// Invalid system-wide (SPI_*) parameter.
pub const INVALID_SPI_VALUE = 1439;
+
/// Screen already locked.
pub const SCREEN_ALREADY_LOCKED = 1440;
+
/// All handles to windows in a multiple-window position structure must have the same parent.
pub const HWNDS_HAVE_DIFF_PARENT = 1441;
+
/// The window is not a child window.
pub const NOT_CHILD_WINDOW = 1442;
+
/// Invalid GW_* command.
pub const INVALID_GW_COMMAND = 1443;
+
/// Invalid thread identifier.
pub const INVALID_THREAD_ID = 1444;
+
/// Cannot process a message from a window that is not a multiple document interface (MDI) window.
pub const NON_MDICHILD_WINDOW = 1445;
+
/// Popup menu already active.
pub const POPUP_ALREADY_ACTIVE = 1446;
+
/// The window does not have scroll bars.
pub const NO_SCROLLBARS = 1447;
+
/// Scroll bar range cannot be greater than MAXLONG.
pub const INVALID_SCROLLBAR_RANGE = 1448;
+
/// Cannot show or remove the window in the way specified.
pub const INVALID_SHOWWIN_COMMAND = 1449;
+
/// Insufficient system resources exist to complete the requested service.
pub const NO_SYSTEM_RESOURCES = 1450;
+
/// Insufficient system resources exist to complete the requested service.
pub const NONPAGED_SYSTEM_RESOURCES = 1451;
+
/// Insufficient system resources exist to complete the requested service.
pub const PAGED_SYSTEM_RESOURCES = 1452;
+
/// Insufficient quota to complete the requested service.
pub const WORKING_SET_QUOTA = 1453;
+
/// Insufficient quota to complete the requested service.
pub const PAGEFILE_QUOTA = 1454;
+
/// The paging file is too small for this operation to complete.
pub const COMMITMENT_LIMIT = 1455;
+
/// A menu item was not found.
pub const MENU_ITEM_NOT_FOUND = 1456;
+
/// Invalid keyboard layout handle.
pub const INVALID_KEYBOARD_HANDLE = 1457;
+
/// Hook type not allowed.
pub const HOOK_TYPE_NOT_ALLOWED = 1458;
+
/// This operation requires an interactive window station.
pub const REQUIRES_INTERACTIVE_WINDOWSTATION = 1459;
+
/// This operation returned because the timeout period expired.
pub const TIMEOUT = 1460;
+
/// Invalid monitor handle.
pub const INVALID_MONITOR_HANDLE = 1461;
+
/// Incorrect size argument.
pub const INCORRECT_SIZE = 1462;
+
/// The symbolic link cannot be followed because its type is disabled.
pub const SYMLINK_CLASS_DISABLED = 1463;
+
/// This application does not support the current operation on symbolic links.
pub const SYMLINK_NOT_SUPPORTED = 1464;
+
/// Windows was unable to parse the requested XML data.
pub const XML_PARSE_ERROR = 1465;
+
/// An error was encountered while processing an XML digital signature.
pub const XMLDSIG_ERROR = 1466;
+
/// This application must be restarted.
pub const RESTART_APPLICATION = 1467;
+
/// The caller made the connection request in the wrong routing compartment.
pub const WRONG_COMPARTMENT = 1468;
+
/// There was an AuthIP failure when attempting to connect to the remote host.
pub const AUTHIP_FAILURE = 1469;
+
/// Insufficient NVRAM resources exist to complete the requested service. A reboot might be required.
pub const NO_NVRAM_RESOURCES = 1470;
+
/// Unable to finish the requested operation because the specified process is not a GUI process.
pub const NOT_GUI_PROCESS = 1471;
+
/// The event log file is corrupted.
pub const EVENTLOG_FILE_CORRUPT = 1500;
+
/// No event log file could be opened, so the event logging service did not start.
pub const EVENTLOG_CANT_START = 1501;
+
/// The event log file is full.
pub const LOG_FILE_FULL = 1502;
+
/// The event log file has changed between read operations.
pub const EVENTLOG_FILE_CHANGED = 1503;
+
/// The specified task name is invalid.
pub const INVALID_TASK_NAME = 1550;
+
/// The specified task index is invalid.
pub const INVALID_TASK_INDEX = 1551;
+
/// The specified thread is already joining a task.
pub const THREAD_ALREADY_IN_TASK = 1552;
+
/// The Windows Installer Service could not be accessed. This can occur if the Windows Installer is not correctly installed. Contact your support personnel for assistance.
pub const INSTALL_SERVICE_FAILURE = 1601;
+
/// User cancelled installation.
pub const INSTALL_USEREXIT = 1602;
+
/// Fatal error during installation.
pub const INSTALL_FAILURE = 1603;
+
/// Installation suspended, incomplete.
pub const INSTALL_SUSPEND = 1604;
+
/// This action is only valid for products that are currently installed.
pub const UNKNOWN_PRODUCT = 1605;
+
/// Feature ID not registered.
pub const UNKNOWN_FEATURE = 1606;
+
/// Component ID not registered.
pub const UNKNOWN_COMPONENT = 1607;
+
/// Unknown property.
pub const UNKNOWN_PROPERTY = 1608;
+
/// Handle is in an invalid state.
pub const INVALID_HANDLE_STATE = 1609;
+
/// The configuration data for this product is corrupt. Contact your support personnel.
pub const BAD_CONFIGURATION = 1610;
+
/// Component qualifier not present.
pub const INDEX_ABSENT = 1611;
+
/// The installation source for this product is not available. Verify that the source exists and that you can access it.
pub const INSTALL_SOURCE_ABSENT = 1612;
+
/// This installation package cannot be installed by the Windows Installer service. You must install a Windows service pack that contains a newer version of the Windows Installer service.
pub const INSTALL_PACKAGE_VERSION = 1613;
+
/// Product is uninstalled.
pub const PRODUCT_UNINSTALLED = 1614;
+
/// SQL query syntax invalid or unsupported.
pub const BAD_QUERY_SYNTAX = 1615;
+
/// Record field does not exist.
pub const INVALID_FIELD = 1616;
+
/// The device has been removed.
pub const DEVICE_REMOVED = 1617;
+
/// Another installation is already in progress. Complete that installation before proceeding with this install.
pub const INSTALL_ALREADY_RUNNING = 1618;
+
/// This installation package could not be opened. Verify that the package exists and that you can access it, or contact the application vendor to verify that this is a valid Windows Installer package.
pub const INSTALL_PACKAGE_OPEN_FAILED = 1619;
+
/// This installation package could not be opened. Contact the application vendor to verify that this is a valid Windows Installer package.
pub const INSTALL_PACKAGE_INVALID = 1620;
+
/// There was an error starting the Windows Installer service user interface. Contact your support personnel.
pub const INSTALL_UI_FAILURE = 1621;
+
/// Error opening installation log file. Verify that the specified log file location exists and that you can write to it.
pub const INSTALL_LOG_FAILURE = 1622;
+
/// The language of this installation package is not supported by your system.
pub const INSTALL_LANGUAGE_UNSUPPORTED = 1623;
+
/// Error applying transforms. Verify that the specified transform paths are valid.
pub const INSTALL_TRANSFORM_FAILURE = 1624;
+
/// This installation is forbidden by system policy. Contact your system administrator.
pub const INSTALL_PACKAGE_REJECTED = 1625;
+
/// Function could not be executed.
pub const FUNCTION_NOT_CALLED = 1626;
+
/// Function failed during execution.
pub const FUNCTION_FAILED = 1627;
+
/// Invalid or unknown table specified.
pub const INVALID_TABLE = 1628;
+
/// Data supplied is of wrong type.
pub const DATATYPE_MISMATCH = 1629;
+
/// Data of this type is not supported.
pub const UNSUPPORTED_TYPE = 1630;
+
/// The Windows Installer service failed to start. Contact your support personnel.
pub const CREATE_FAILED = 1631;
+
/// The Temp folder is on a drive that is full or is inaccessible. Free up space on the drive or verify that you have write permission on the Temp folder.
pub const INSTALL_TEMP_UNWRITABLE = 1632;
+
/// This installation package is not supported by this processor type. Contact your product vendor.
pub const INSTALL_PLATFORM_UNSUPPORTED = 1633;
+
/// Component not used on this computer.
pub const INSTALL_NOTUSED = 1634;
+
/// This update package could not be opened. Verify that the update package exists and that you can access it, or contact the application vendor to verify that this is a valid Windows Installer update package.
pub const PATCH_PACKAGE_OPEN_FAILED = 1635;
+
/// This update package could not be opened. Contact the application vendor to verify that this is a valid Windows Installer update package.
pub const PATCH_PACKAGE_INVALID = 1636;
+
/// This update package cannot be processed by the Windows Installer service. You must install a Windows service pack that contains a newer version of the Windows Installer service.
pub const PATCH_PACKAGE_UNSUPPORTED = 1637;
+
/// Another version of this product is already installed. Installation of this version cannot continue. To configure or remove the existing version of this product, use Add/Remove Programs on the Control Panel.
pub const PRODUCT_VERSION = 1638;
+
/// Invalid command line argument. Consult the Windows Installer SDK for detailed command line help.
pub const INVALID_COMMAND_LINE = 1639;
+
/// Only administrators have permission to add, remove, or configure server software during a Terminal services remote session. If you want to install or configure software on the server, contact your network administrator.
pub const INSTALL_REMOTE_DISALLOWED = 1640;
+
/// The requested operation completed successfully. The system will be restarted so the changes can take effect.
pub const SUCCESS_REBOOT_INITIATED = 1641;
+
/// The upgrade cannot be installed by the Windows Installer service because the program to be upgraded may be missing, or the upgrade may update a different version of the program. Verify that the program to be upgraded exists on your computer and that you have the correct upgrade.
pub const PATCH_TARGET_NOT_FOUND = 1642;
+
/// The update package is not permitted by software restriction policy.
pub const PATCH_PACKAGE_REJECTED = 1643;
+
/// One or more customizations are not permitted by software restriction policy.
pub const INSTALL_TRANSFORM_REJECTED = 1644;
+
/// The Windows Installer does not permit installation from a Remote Desktop Connection.
pub const INSTALL_REMOTE_PROHIBITED = 1645;
+
/// Uninstallation of the update package is not supported.
pub const PATCH_REMOVAL_UNSUPPORTED = 1646;
+
/// The update is not applied to this product.
pub const UNKNOWN_PATCH = 1647;
+
/// No valid sequence could be found for the set of updates.
pub const PATCH_NO_SEQUENCE = 1648;
+
/// Update removal was disallowed by policy.
pub const PATCH_REMOVAL_DISALLOWED = 1649;
+
/// The XML update data is invalid.
pub const INVALID_PATCH_XML = 1650;
+
/// Windows Installer does not permit updating of managed advertised products. At least one feature of the product must be installed before applying the update.
pub const PATCH_MANAGED_ADVERTISED_PRODUCT = 1651;
+
/// The Windows Installer service is not accessible in Safe Mode. Please try again when your computer is not in Safe Mode or you can use System Restore to return your machine to a previous good state.
pub const INSTALL_SERVICE_SAFEBOOT = 1652;
+
/// A fail fast exception occurred. Exception handlers will not be invoked and the process will be terminated immediately.
pub const FAIL_FAST_EXCEPTION = 1653;
+
/// The app that you are trying to run is not supported on this version of Windows.
pub const INSTALL_REJECTED = 1654;
+
/// The string binding is invalid.
pub const RPC_S_INVALID_STRING_BINDING = 1700;
+
/// The binding handle is not the correct type.
pub const RPC_S_WRONG_KIND_OF_BINDING = 1701;
+
/// The binding handle is invalid.
pub const RPC_S_INVALID_BINDING = 1702;
+
/// The RPC protocol sequence is not supported.
pub const RPC_S_PROTSEQ_NOT_SUPPORTED = 1703;
+
/// The RPC protocol sequence is invalid.
pub const RPC_S_INVALID_RPC_PROTSEQ = 1704;
+
/// The string universal unique identifier (UUID) is invalid.
pub const RPC_S_INVALID_STRING_UUID = 1705;
+
/// The endpoint format is invalid.
pub const RPC_S_INVALID_ENDPOINT_FORMAT = 1706;
+
/// The network address is invalid.
pub const RPC_S_INVALID_NET_ADDR = 1707;
+
/// No endpoint was found.
pub const RPC_S_NO_ENDPOINT_FOUND = 1708;
+
/// The timeout value is invalid.
pub const RPC_S_INVALID_TIMEOUT = 1709;
+
/// The object universal unique identifier (UUID) was not found.
pub const RPC_S_OBJECT_NOT_FOUND = 1710;
+
/// The object universal unique identifier (UUID) has already been registered.
pub const RPC_S_ALREADY_REGISTERED = 1711;
+
/// The type universal unique identifier (UUID) has already been registered.
pub const RPC_S_TYPE_ALREADY_REGISTERED = 1712;
+
/// The RPC server is already listening.
pub const RPC_S_ALREADY_LISTENING = 1713;
+
/// No protocol sequences have been registered.
pub const RPC_S_NO_PROTSEQS_REGISTERED = 1714;
+
/// The RPC server is not listening.
pub const RPC_S_NOT_LISTENING = 1715;
+
/// The manager type is unknown.
pub const RPC_S_UNKNOWN_MGR_TYPE = 1716;
+
/// The interface is unknown.
pub const RPC_S_UNKNOWN_IF = 1717;
+
/// There are no bindings.
pub const RPC_S_NO_BINDINGS = 1718;
+
/// There are no protocol sequences.
pub const RPC_S_NO_PROTSEQS = 1719;
+
/// The endpoint cannot be created.
pub const RPC_S_CANT_CREATE_ENDPOINT = 1720;
+
/// Not enough resources are available to complete this operation.
pub const RPC_S_OUT_OF_RESOURCES = 1721;
+
/// The RPC server is unavailable.
pub const RPC_S_SERVER_UNAVAILABLE = 1722;
+
/// The RPC server is too busy to complete this operation.
pub const RPC_S_SERVER_TOO_BUSY = 1723;
+
/// The network options are invalid.
pub const RPC_S_INVALID_NETWORK_OPTIONS = 1724;
+
/// There are no remote procedure calls active on this thread.
pub const RPC_S_NO_CALL_ACTIVE = 1725;
+
/// The remote procedure call failed.
pub const RPC_S_CALL_FAILED = 1726;
+
/// The remote procedure call failed and did not execute.
pub const RPC_S_CALL_FAILED_DNE = 1727;
+
/// A remote procedure call (RPC) protocol error occurred.
pub const RPC_S_PROTOCOL_ERROR = 1728;
+
/// Access to the HTTP proxy is denied.
pub const RPC_S_PROXY_ACCESS_DENIED = 1729;
+
/// The transfer syntax is not supported by the RPC server.
pub const RPC_S_UNSUPPORTED_TRANS_SYN = 1730;
+
/// The universal unique identifier (UUID) type is not supported.
pub const RPC_S_UNSUPPORTED_TYPE = 1732;
+
/// The tag is invalid.
pub const RPC_S_INVALID_TAG = 1733;
+
/// The array bounds are invalid.
pub const RPC_S_INVALID_BOUND = 1734;
+
/// The binding does not contain an entry name.
pub const RPC_S_NO_ENTRY_NAME = 1735;
+
/// The name syntax is invalid.
pub const RPC_S_INVALID_NAME_SYNTAX = 1736;
+
/// The name syntax is not supported.
pub const RPC_S_UNSUPPORTED_NAME_SYNTAX = 1737;
+
/// No network address is available to use to construct a universal unique identifier (UUID).
pub const RPC_S_UUID_NO_ADDRESS = 1739;
+
/// The endpoint is a duplicate.
pub const RPC_S_DUPLICATE_ENDPOINT = 1740;
+
/// The authentication type is unknown.
pub const RPC_S_UNKNOWN_AUTHN_TYPE = 1741;
+
/// The maximum number of calls is too small.
pub const RPC_S_MAX_CALLS_TOO_SMALL = 1742;
+
/// The string is too long.
pub const RPC_S_STRING_TOO_LONG = 1743;
+
/// The RPC protocol sequence was not found.
pub const RPC_S_PROTSEQ_NOT_FOUND = 1744;
+
/// The procedure number is out of range.
pub const RPC_S_PROCNUM_OUT_OF_RANGE = 1745;
+
/// The binding does not contain any authentication information.
pub const RPC_S_BINDING_HAS_NO_AUTH = 1746;
+
/// The authentication service is unknown.
pub const RPC_S_UNKNOWN_AUTHN_SERVICE = 1747;
+
/// The authentication level is unknown.
pub const RPC_S_UNKNOWN_AUTHN_LEVEL = 1748;
+
/// The security context is invalid.
pub const RPC_S_INVALID_AUTH_IDENTITY = 1749;
+
/// The authorization service is unknown.
pub const RPC_S_UNKNOWN_AUTHZ_SERVICE = 1750;
+
/// The entry is invalid.
pub const EPT_S_INVALID_ENTRY = 1751;
+
/// The server endpoint cannot perform the operation.
pub const EPT_S_CANT_PERFORM_OP = 1752;
+
/// There are no more endpoints available from the endpoint mapper.
pub const EPT_S_NOT_REGISTERED = 1753;
+
/// No interfaces have been exported.
pub const RPC_S_NOTHING_TO_EXPORT = 1754;
+
/// The entry name is incomplete.
pub const RPC_S_INCOMPLETE_NAME = 1755;
+
/// The version option is invalid.
pub const RPC_S_INVALID_VERS_OPTION = 1756;
+
/// There are no more members.
pub const RPC_S_NO_MORE_MEMBERS = 1757;
+
/// There is nothing to unexport.
pub const RPC_S_NOT_ALL_OBJS_UNEXPORTED = 1758;
+
/// The interface was not found.
pub const RPC_S_INTERFACE_NOT_FOUND = 1759;
+
/// The entry already exists.
pub const RPC_S_ENTRY_ALREADY_EXISTS = 1760;
+
/// The entry is not found.
pub const RPC_S_ENTRY_NOT_FOUND = 1761;
+
/// The name service is unavailable.
pub const RPC_S_NAME_SERVICE_UNAVAILABLE = 1762;
+
/// The network address family is invalid.
pub const RPC_S_INVALID_NAF_ID = 1763;
+
/// The requested operation is not supported.
pub const RPC_S_CANNOT_SUPPORT = 1764;
+
/// No security context is available to allow impersonation.
pub const RPC_S_NO_CONTEXT_AVAILABLE = 1765;
+
/// An internal error occurred in a remote procedure call (RPC).
pub const RPC_S_INTERNAL_ERROR = 1766;
+
/// The RPC server attempted an integer division by zero.
pub const RPC_S_ZERO_DIVIDE = 1767;
+
/// An addressing error occurred in the RPC server.
pub const RPC_S_ADDRESS_ERROR = 1768;
+
/// A floating-point operation at the RPC server caused a division by zero.
pub const RPC_S_FP_DIV_ZERO = 1769;
+
/// A floating-point underflow occurred at the RPC server.
pub const RPC_S_FP_UNDERFLOW = 1770;
+
/// A floating-point overflow occurred at the RPC server.
pub const RPC_S_FP_OVERFLOW = 1771;
+
/// The list of RPC servers available for the binding of auto handles has been exhausted.
pub const RPC_X_NO_MORE_ENTRIES = 1772;
+
/// Unable to open the character translation table file.
pub const RPC_X_SS_CHAR_TRANS_OPEN_FAIL = 1773;
+
/// The file containing the character translation table has fewer than 512 bytes.
pub const RPC_X_SS_CHAR_TRANS_SHORT_FILE = 1774;
+
/// A null context handle was passed from the client to the host during a remote procedure call.
pub const RPC_X_SS_IN_NULL_CONTEXT = 1775;
+
/// The context handle changed during a remote procedure call.
pub const RPC_X_SS_CONTEXT_DAMAGED = 1777;
+
/// The binding handles passed to a remote procedure call do not match.
pub const RPC_X_SS_HANDLES_MISMATCH = 1778;
+
/// The stub is unable to get the remote procedure call handle.
pub const RPC_X_SS_CANNOT_GET_CALL_HANDLE = 1779;
+
/// A null reference pointer was passed to the stub.
pub const RPC_X_NULL_REF_POINTER = 1780;
+
/// The enumeration value is out of range.
pub const RPC_X_ENUM_VALUE_OUT_OF_RANGE = 1781;
+
/// The byte count is too small.
pub const RPC_X_BYTE_COUNT_TOO_SMALL = 1782;
+
/// The stub received bad data.
pub const RPC_X_BAD_STUB_DATA = 1783;
+
/// The supplied user buffer is not valid for the requested operation.
pub const INVALID_USER_BUFFER = 1784;
+
/// The disk media is not recognized. It may not be formatted.
pub const UNRECOGNIZED_MEDIA = 1785;
+
/// The workstation does not have a trust secret.
pub const NO_TRUST_LSA_SECRET = 1786;
+
/// The security database on the server does not have a computer account for this workstation trust relationship.
pub const NO_TRUST_SAM_ACCOUNT = 1787;
+
/// The trust relationship between the primary domain and the trusted domain failed.
pub const TRUSTED_DOMAIN_FAILURE = 1788;
+
/// The trust relationship between this workstation and the primary domain failed.
pub const TRUSTED_RELATIONSHIP_FAILURE = 1789;
+
/// The network logon failed.
pub const TRUST_FAILURE = 1790;
+
/// A remote procedure call is already in progress for this thread.
pub const RPC_S_CALL_IN_PROGRESS = 1791;
+
/// An attempt was made to logon, but the network logon service was not started.
pub const NETLOGON_NOT_STARTED = 1792;
+
/// The user's account has expired.
pub const ACCOUNT_EXPIRED = 1793;
+
/// The redirector is in use and cannot be unloaded.
pub const REDIRECTOR_HAS_OPEN_HANDLES = 1794;
+
/// The specified printer driver is already installed.
pub const PRINTER_DRIVER_ALREADY_INSTALLED = 1795;
+
/// The specified port is unknown.
pub const UNKNOWN_PORT = 1796;
+
/// The printer driver is unknown.
pub const UNKNOWN_PRINTER_DRIVER = 1797;
+
/// The print processor is unknown.
pub const UNKNOWN_PRINTPROCESSOR = 1798;
+
/// The specified separator file is invalid.
pub const INVALID_SEPARATOR_FILE = 1799;
+
/// The specified priority is invalid.
pub const INVALID_PRIORITY = 1800;
+
/// The printer name is invalid.
pub const INVALID_PRINTER_NAME = 1801;
+
/// The printer already exists.
pub const PRINTER_ALREADY_EXISTS = 1802;
+
/// The printer command is invalid.
pub const INVALID_PRINTER_COMMAND = 1803;
+
/// The specified datatype is invalid.
pub const INVALID_DATATYPE = 1804;
+
/// The environment specified is invalid.
pub const INVALID_ENVIRONMENT = 1805;
+
/// There are no more bindings.
pub const RPC_S_NO_MORE_BINDINGS = 1806;
+
/// The account used is an interdomain trust account. Use your global user account or local user account to access this server.
pub const NOLOGON_INTERDOMAIN_TRUST_ACCOUNT = 1807;
+
/// The account used is a computer account. Use your global user account or local user account to access this server.
pub const NOLOGON_WORKSTATION_TRUST_ACCOUNT = 1808;
+
/// The account used is a server trust account. Use your global user account or local user account to access this server.
pub const NOLOGON_SERVER_TRUST_ACCOUNT = 1809;
+
/// The name or security ID (SID) of the domain specified is inconsistent with the trust information for that domain.
pub const DOMAIN_TRUST_INCONSISTENT = 1810;
+
/// The server is in use and cannot be unloaded.
pub const SERVER_HAS_OPEN_HANDLES = 1811;
+
/// The specified image file did not contain a resource section.
pub const RESOURCE_DATA_NOT_FOUND = 1812;
+
/// The specified resource type cannot be found in the image file.
pub const RESOURCE_TYPE_NOT_FOUND = 1813;
+
/// The specified resource name cannot be found in the image file.
pub const RESOURCE_NAME_NOT_FOUND = 1814;
+
/// The specified resource language ID cannot be found in the image file.
pub const RESOURCE_LANG_NOT_FOUND = 1815;
+
/// Not enough quota is available to process this command.
pub const NOT_ENOUGH_QUOTA = 1816;
+
/// No interfaces have been registered.
pub const RPC_S_NO_INTERFACES = 1817;
+
/// The remote procedure call was cancelled.
pub const RPC_S_CALL_CANCELLED = 1818;
+
/// The binding handle does not contain all required information.
pub const RPC_S_BINDING_INCOMPLETE = 1819;
+
/// A communications failure occurred during a remote procedure call.
pub const RPC_S_COMM_FAILURE = 1820;
+
/// The requested authentication level is not supported.
pub const RPC_S_UNSUPPORTED_AUTHN_LEVEL = 1821;
+
/// No principal name registered.
pub const RPC_S_NO_PRINC_NAME = 1822;
+
/// The error specified is not a valid Windows RPC error code.
pub const RPC_S_NOT_RPC_ERROR = 1823;
+
/// A UUID that is valid only on this computer has been allocated.
pub const RPC_S_UUID_LOCAL_ONLY = 1824;
+
/// A security package specific error occurred.
pub const RPC_S_SEC_PKG_ERROR = 1825;
+
/// Thread is not canceled.
pub const RPC_S_NOT_CANCELLED = 1826;
+
/// Invalid operation on the encoding/decoding handle.
pub const RPC_X_INVALID_ES_ACTION = 1827;
+
/// Incompatible version of the serializing package.
pub const RPC_X_WRONG_ES_VERSION = 1828;
+
/// Incompatible version of the RPC stub.
pub const RPC_X_WRONG_STUB_VERSION = 1829;
+
/// The RPC pipe object is invalid or corrupted.
pub const RPC_X_INVALID_PIPE_OBJECT = 1830;
+
/// An invalid operation was attempted on an RPC pipe object.
pub const RPC_X_WRONG_PIPE_ORDER = 1831;
+
/// Unsupported RPC pipe version.
pub const RPC_X_WRONG_PIPE_VERSION = 1832;
+
/// HTTP proxy server rejected the connection because the cookie authentication failed.
pub const RPC_S_COOKIE_AUTH_FAILED = 1833;
+
/// The group member was not found.
pub const RPC_S_GROUP_MEMBER_NOT_FOUND = 1898;
+
/// The endpoint mapper database entry could not be created.
pub const EPT_S_CANT_CREATE = 1899;
+
/// The object universal unique identifier (UUID) is the nil UUID.
pub const RPC_S_INVALID_OBJECT = 1900;
+
/// The specified time is invalid.
pub const INVALID_TIME = 1901;
+
/// The specified form name is invalid.
pub const INVALID_FORM_NAME = 1902;
+
/// The specified form size is invalid.
pub const INVALID_FORM_SIZE = 1903;
+
/// The specified printer handle is already being waited on.
pub const ALREADY_WAITING = 1904;
+
/// The specified printer has been deleted.
pub const PRINTER_DELETED = 1905;
+
/// The state of the printer is invalid.
pub const INVALID_PRINTER_STATE = 1906;
+
/// The user's password must be changed before signing in.
pub const PASSWORD_MUST_CHANGE = 1907;
+
/// Could not find the domain controller for this domain.
pub const DOMAIN_CONTROLLER_NOT_FOUND = 1908;
+
/// The referenced account is currently locked out and may not be logged on to.
pub const ACCOUNT_LOCKED_OUT = 1909;
+
/// The object exporter specified was not found.
pub const OR_INVALID_OXID = 1910;
+
/// The object specified was not found.
pub const OR_INVALID_OID = 1911;
+
/// The object resolver set specified was not found.
pub const OR_INVALID_SET = 1912;
+
/// Some data remains to be sent in the request buffer.
pub const RPC_S_SEND_INCOMPLETE = 1913;
+
/// Invalid asynchronous remote procedure call handle.
pub const RPC_S_INVALID_ASYNC_HANDLE = 1914;
+
/// Invalid asynchronous RPC call handle for this operation.
pub const RPC_S_INVALID_ASYNC_CALL = 1915;
+
/// The RPC pipe object has already been closed.
pub const RPC_X_PIPE_CLOSED = 1916;
+
/// The RPC call completed before all pipes were processed.
pub const RPC_X_PIPE_DISCIPLINE_ERROR = 1917;
+
/// No more data is available from the RPC pipe.
pub const RPC_X_PIPE_EMPTY = 1918;
+
/// No site name is available for this machine.
pub const NO_SITENAME = 1919;
+
/// The file cannot be accessed by the system.
pub const CANT_ACCESS_FILE = 1920;
+
/// The name of the file cannot be resolved by the system.
pub const CANT_RESOLVE_FILENAME = 1921;
+
/// The entry is not of the expected type.
pub const RPC_S_ENTRY_TYPE_MISMATCH = 1922;
+
/// Not all object UUIDs could be exported to the specified entry.
pub const RPC_S_NOT_ALL_OBJS_EXPORTED = 1923;
+
/// Interface could not be exported to the specified entry.
pub const RPC_S_INTERFACE_NOT_EXPORTED = 1924;
+
/// The specified profile entry could not be added.
pub const RPC_S_PROFILE_NOT_ADDED = 1925;
+
/// The specified profile element could not be added.
pub const RPC_S_PRF_ELT_NOT_ADDED = 1926;
+
/// The specified profile element could not be removed.
pub const RPC_S_PRF_ELT_NOT_REMOVED = 1927;
+
/// The group element could not be added.
pub const RPC_S_GRP_ELT_NOT_ADDED = 1928;
+
/// The group element could not be removed.
pub const RPC_S_GRP_ELT_NOT_REMOVED = 1929;
+
/// The printer driver is not compatible with a policy enabled on your computer that blocks NT 4.0 drivers.
pub const KM_DRIVER_BLOCKED = 1930;
+
/// The context has expired and can no longer be used.
pub const CONTEXT_EXPIRED = 1931;
+
/// The current user's delegated trust creation quota has been exceeded.
pub const PER_USER_TRUST_QUOTA_EXCEEDED = 1932;
+
/// The total delegated trust creation quota has been exceeded.
pub const ALL_USER_TRUST_QUOTA_EXCEEDED = 1933;
+
/// The current user's delegated trust deletion quota has been exceeded.
pub const USER_DELETE_TRUST_QUOTA_EXCEEDED = 1934;
+
/// The computer you are signing into is protected by an authentication firewall. The specified account is not allowed to authenticate to the computer.
pub const AUTHENTICATION_FIREWALL_FAILED = 1935;
+
/// Remote connections to the Print Spooler are blocked by a policy set on your machine.
pub const REMOTE_PRINT_CONNECTIONS_BLOCKED = 1936;
+
/// Authentication failed because NTLM authentication has been disabled.
pub const NTLM_BLOCKED = 1937;
+
/// Logon Failure: EAS policy requires that the user change their password before this operation can be performed.
pub const PASSWORD_CHANGE_REQUIRED = 1938;
+
/// The pixel format is invalid.
pub const INVALID_PIXEL_FORMAT = 2000;
+
/// The specified driver is invalid.
pub const BAD_DRIVER = 2001;
+
/// The window style or class attribute is invalid for this operation.
pub const INVALID_WINDOW_STYLE = 2002;
+
/// The requested metafile operation is not supported.
pub const METAFILE_NOT_SUPPORTED = 2003;
+
/// The requested transformation operation is not supported.
pub const TRANSFORM_NOT_SUPPORTED = 2004;
+
/// The requested clipping operation is not supported.
pub const CLIPPING_NOT_SUPPORTED = 2005;
+
/// The specified color management module is invalid.
pub const INVALID_CMM = 2010;
+
/// The specified color profile is invalid.
pub const INVALID_PROFILE = 2011;
+
/// The specified tag was not found.
pub const TAG_NOT_FOUND = 2012;
+
/// A required tag is not present.
pub const TAG_NOT_PRESENT = 2013;
+
/// The specified tag is already present.
pub const DUPLICATE_TAG = 2014;
+
/// The specified color profile is not associated with the specified device.
pub const PROFILE_NOT_ASSOCIATED_WITH_DEVICE = 2015;
+
/// The specified color profile was not found.
pub const PROFILE_NOT_FOUND = 2016;
+
/// The specified color space is invalid.
pub const INVALID_COLORSPACE = 2017;
+
/// Image Color Management is not enabled.
pub const ICM_NOT_ENABLED = 2018;
+
/// There was an error while deleting the color transform.
pub const DELETING_ICM_XFORM = 2019;
+
/// The specified color transform is invalid.
pub const INVALID_TRANSFORM = 2020;
+
/// The specified transform does not match the bitmap's color space.
pub const COLORSPACE_MISMATCH = 2021;
+
/// The specified named color index is not present in the profile.
pub const INVALID_COLORINDEX = 2022;
+
/// The specified profile is intended for a device of a different type than the specified device.
pub const PROFILE_DOES_NOT_MATCH_DEVICE = 2023;
+
/// The network connection was made successfully, but the user had to be prompted for a password other than the one originally specified.
pub const CONNECTED_OTHER_PASSWORD = 2108;
+
/// The network connection was made successfully using default credentials.
pub const CONNECTED_OTHER_PASSWORD_DEFAULT = 2109;
+
/// The specified username is invalid.
pub const BAD_USERNAME = 2202;
+
/// This network connection does not exist.
pub const NOT_CONNECTED = 2250;
+
/// This network connection has files open or requests pending.
pub const OPEN_FILES = 2401;
+
/// Active connections still exist.
pub const ACTIVE_CONNECTIONS = 2402;
+
/// The device is in use by an active process and cannot be disconnected.
pub const DEVICE_IN_USE = 2404;
+
/// The specified print monitor is unknown.
pub const UNKNOWN_PRINT_MONITOR = 3000;
+
/// The specified printer driver is currently in use.
pub const PRINTER_DRIVER_IN_USE = 3001;
+
/// The spool file was not found.
pub const SPOOL_FILE_NOT_FOUND = 3002;
+
/// A StartDocPrinter call was not issued.
pub const SPL_NO_STARTDOC = 3003;
+
/// An AddJob call was not issued.
pub const SPL_NO_ADDJOB = 3004;
+
/// The specified print processor has already been installed.
pub const PRINT_PROCESSOR_ALREADY_INSTALLED = 3005;
+
/// The specified print monitor has already been installed.
pub const PRINT_MONITOR_ALREADY_INSTALLED = 3006;
+
/// The specified print monitor does not have the required functions.
pub const INVALID_PRINT_MONITOR = 3007;
+
/// The specified print monitor is currently in use.
pub const PRINT_MONITOR_IN_USE = 3008;
+
/// The requested operation is not allowed when there are jobs queued to the printer.
pub const PRINTER_HAS_JOBS_QUEUED = 3009;
+
/// The requested operation is successful. Changes will not be effective until the system is rebooted.
pub const SUCCESS_REBOOT_REQUIRED = 3010;
+
/// The requested operation is successful. Changes will not be effective until the service is restarted.
pub const SUCCESS_RESTART_REQUIRED = 3011;
+
/// No printers were found.
pub const PRINTER_NOT_FOUND = 3012;
+
/// The printer driver is known to be unreliable.
pub const PRINTER_DRIVER_WARNED = 3013;
+
/// The printer driver is known to harm the system.
pub const PRINTER_DRIVER_BLOCKED = 3014;
+
/// The specified printer driver package is currently in use.
pub const PRINTER_DRIVER_PACKAGE_IN_USE = 3015;
+
/// Unable to find a core driver package that is required by the printer driver package.
pub const CORE_DRIVER_PACKAGE_NOT_FOUND = 3016;
+
/// The requested operation failed. A system reboot is required to roll back changes made.
pub const FAIL_REBOOT_REQUIRED = 3017;
+
/// The requested operation failed. A system reboot has been initiated to roll back changes made.
pub const FAIL_REBOOT_INITIATED = 3018;
+
/// The specified printer driver was not found on the system and needs to be downloaded.
pub const PRINTER_DRIVER_DOWNLOAD_NEEDED = 3019;
+
/// The requested print job has failed to print. A print system update requires the job to be resubmitted.
pub const PRINT_JOB_RESTART_REQUIRED = 3020;
+
/// The printer driver does not contain a valid manifest, or contains too many manifests.
pub const INVALID_PRINTER_DRIVER_MANIFEST = 3021;
+
/// The specified printer cannot be shared.
pub const PRINTER_NOT_SHAREABLE = 3022;
+
/// The operation was paused.
pub const REQUEST_PAUSED = 3050;
+
/// Reissue the given operation as a cached IO operation.
pub const IO_REISSUE_AS_CACHED = 3950;
diff --git a/std/os/windows/index.zig b/std/os/windows/index.zig
index aa02c27f39..90ccfaf6c5 100644
--- a/std/os/windows/index.zig
+++ b/std/os/windows/index.zig
@@ -1,140 +1,48 @@
+const std = @import("../../index.zig");
+const assert = std.debug.assert;
+
+pub use @import("advapi32.zig");
+pub use @import("kernel32.zig");
+pub use @import("ole32.zig");
+pub use @import("shell32.zig");
+pub use @import("shlwapi.zig");
+pub use @import("user32.zig");
+
+test "import" {
+ _ = @import("util.zig");
+}
+
pub const ERROR = @import("error.zig");
-pub extern "advapi32" stdcallcc fn CryptAcquireContextA(phProv: &HCRYPTPROV, pszContainer: ?LPCSTR,
- pszProvider: ?LPCSTR, dwProvType: DWORD, dwFlags: DWORD) BOOL;
-
-pub extern "advapi32" stdcallcc fn CryptReleaseContext(hProv: HCRYPTPROV, dwFlags: DWORD) BOOL;
-
-pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: &BYTE) BOOL;
-
-
-pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL;
-
-pub extern "kernel32" stdcallcc fn CreateDirectoryA(lpPathName: LPCSTR,
- lpSecurityAttributes: ?&SECURITY_ATTRIBUTES) BOOL;
-
-pub extern "kernel32" stdcallcc fn CreateFileA(lpFileName: LPCSTR, dwDesiredAccess: DWORD,
- dwShareMode: DWORD, lpSecurityAttributes: ?LPSECURITY_ATTRIBUTES, dwCreationDisposition: DWORD,
- dwFlagsAndAttributes: DWORD, hTemplateFile: ?HANDLE) HANDLE;
-
-pub extern "kernel32" stdcallcc fn CreatePipe(hReadPipe: &HANDLE, hWritePipe: &HANDLE,
- lpPipeAttributes: &const SECURITY_ATTRIBUTES, nSize: DWORD) BOOL;
-
-pub extern "kernel32" stdcallcc fn CreateProcessA(lpApplicationName: ?LPCSTR, lpCommandLine: LPSTR,
- lpProcessAttributes: ?&SECURITY_ATTRIBUTES, lpThreadAttributes: ?&SECURITY_ATTRIBUTES, bInheritHandles: BOOL,
- dwCreationFlags: DWORD, lpEnvironment: ?&c_void, lpCurrentDirectory: ?LPCSTR, lpStartupInfo: &STARTUPINFOA,
- lpProcessInformation: &PROCESS_INFORMATION) BOOL;
-
-pub extern "kernel32" stdcallcc fn CreateSymbolicLinkA(lpSymlinkFileName: LPCSTR, lpTargetFileName: LPCSTR,
- dwFlags: DWORD) BOOLEAN;
-
-pub extern "kernel32" stdcallcc fn DeleteFileA(lpFileName: LPCSTR) BOOL;
-
-pub extern "kernel32" stdcallcc fn ExitProcess(exit_code: UINT) noreturn;
-
-pub extern "kernel32" stdcallcc fn FreeEnvironmentStringsA(penv: LPCH) BOOL;
-
-pub extern "kernel32" stdcallcc fn GetCommandLineA() LPSTR;
-
-pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: &DWORD) BOOL;
-
-pub extern "kernel32" stdcallcc fn GetCurrentDirectoryA(nBufferLength: WORD, lpBuffer: ?LPSTR) DWORD;
-
-pub extern "kernel32" stdcallcc fn GetEnvironmentStringsA() ?LPCH;
-
-pub extern "kernel32" stdcallcc fn GetEnvironmentVariableA(lpName: LPCSTR, lpBuffer: LPSTR, nSize: DWORD) DWORD;
-
-pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: &DWORD) BOOL;
-
-pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: &LARGE_INTEGER) BOOL;
-
-pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) DWORD;
-
-pub extern "kernel32" stdcallcc fn GetLastError() DWORD;
-
-pub extern "kernel32" stdcallcc fn GetFileInformationByHandleEx(in_hFile: HANDLE,
- in_FileInformationClass: FILE_INFO_BY_HANDLE_CLASS, out_lpFileInformation: &c_void,
- in_dwBufferSize: DWORD) BOOL;
-
-pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA(hFile: HANDLE, lpszFilePath: LPSTR,
- cchFilePath: DWORD, dwFlags: DWORD) DWORD;
-
-pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE;
-
-pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE;
-pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL;
-pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: &c_void, dwBytes: SIZE_T) ?&c_void;
-pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: &const c_void) SIZE_T;
-pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: &const c_void) BOOL;
-pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T;
-pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL;
-
-pub extern "kernel32" stdcallcc fn GetStdHandle(in_nStdHandle: DWORD) ?HANDLE;
-
-pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?&c_void;
-
-pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: &c_void) BOOL;
-
-pub extern "kernel32" stdcallcc fn MoveFileExA(lpExistingFileName: LPCSTR, lpNewFileName: LPCSTR,
- dwFlags: DWORD) BOOL;
-
-pub extern "kernel32" stdcallcc fn PathFileExists(pszPath: ?LPCTSTR) BOOL;
-
-pub extern "kernel32" stdcallcc fn ReadFile(in_hFile: HANDLE, out_lpBuffer: &c_void,
- in_nNumberOfBytesToRead: DWORD, out_lpNumberOfBytesRead: &DWORD,
- in_out_lpOverlapped: ?&OVERLAPPED) BOOL;
-
-pub extern "kernel32" stdcallcc fn SetFilePointerEx(in_fFile: HANDLE, in_liDistanceToMove: LARGE_INTEGER,
- out_opt_ldNewFilePointer: ?&LARGE_INTEGER, in_dwMoveMethod: DWORD) BOOL;
-
-pub extern "kernel32" stdcallcc fn SetHandleInformation(hObject: HANDLE, dwMask: DWORD, dwFlags: DWORD) BOOL;
-
-pub extern "kernel32" stdcallcc fn Sleep(dwMilliseconds: DWORD) void;
-
-pub extern "kernel32" stdcallcc fn TerminateProcess(hProcess: HANDLE, uExitCode: UINT) BOOL;
-
-pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) DWORD;
-
-pub extern "kernel32" stdcallcc fn WriteFile(in_hFile: HANDLE, in_lpBuffer: &const c_void,
- in_nNumberOfBytesToWrite: DWORD, out_lpNumberOfBytesWritten: ?&DWORD,
- in_out_lpOverlapped: ?&OVERLAPPED) BOOL;
-
-//TODO: call unicode versions instead of relying on ANSI code page
-pub extern "kernel32" stdcallcc fn LoadLibraryA(lpLibFileName: LPCSTR) ?HMODULE;
-
-pub extern "kernel32" stdcallcc fn FreeLibrary(hModule: HMODULE) BOOL;
-
-pub extern "user32" stdcallcc fn MessageBoxA(hWnd: ?HANDLE, lpText: ?LPCTSTR, lpCaption: ?LPCTSTR, uType: UINT) c_int;
-
-pub const PROV_RSA_FULL = 1;
-
pub const BOOL = c_int;
pub const BOOLEAN = BYTE;
pub const BYTE = u8;
pub const CHAR = u8;
pub const DWORD = u32;
pub const FLOAT = f32;
-pub const HANDLE = &c_void;
+pub const HANDLE = *c_void;
pub const HCRYPTPROV = ULONG_PTR;
-pub const HINSTANCE = &@OpaqueType();
-pub const HMODULE = &@OpaqueType();
+pub const HINSTANCE = *@OpaqueType();
+pub const HMODULE = *@OpaqueType();
pub const INT = c_int;
-pub const LPBYTE = &BYTE;
-pub const LPCH = &CHAR;
-pub const LPCSTR = &const CHAR;
-pub const LPCTSTR = &const TCHAR;
-pub const LPCVOID = &const c_void;
-pub const LPDWORD = &DWORD;
-pub const LPSTR = &CHAR;
+pub const LPBYTE = *BYTE;
+pub const LPCH = *CHAR;
+pub const LPCSTR = [*]const CHAR;
+pub const LPCTSTR = [*]const TCHAR;
+pub const LPCVOID = *const c_void;
+pub const LPDWORD = *DWORD;
+pub const LPSTR = [*]CHAR;
pub const LPTSTR = if (UNICODE) LPWSTR else LPSTR;
-pub const LPVOID = &c_void;
-pub const LPWSTR = &WCHAR;
-pub const PVOID = &c_void;
-pub const PWSTR = &WCHAR;
+pub const LPVOID = *c_void;
+pub const LPWSTR = [*]WCHAR;
+pub const LPCWSTR = [*]const WCHAR;
+pub const PVOID = *c_void;
+pub const PWSTR = [*]WCHAR;
pub const SIZE_T = usize;
pub const TCHAR = if (UNICODE) WCHAR else u8;
pub const UINT = c_uint;
pub const ULONG_PTR = usize;
+pub const DWORD_PTR = ULONG_PTR;
pub const UNICODE = false;
pub const WCHAR = u16;
pub const WORD = u16;
@@ -154,69 +62,72 @@ pub const STD_ERROR_HANDLE = @maxValue(DWORD) - 12 + 1;
pub const INVALID_HANDLE_VALUE = @intToPtr(HANDLE, @maxValue(usize));
+pub const INVALID_FILE_ATTRIBUTES = DWORD(@maxValue(DWORD));
+
pub const OVERLAPPED = extern struct {
Internal: ULONG_PTR,
InternalHigh: ULONG_PTR,
Pointer: PVOID,
hEvent: HANDLE,
};
-pub const LPOVERLAPPED = &OVERLAPPED;
+pub const LPOVERLAPPED = *OVERLAPPED;
pub const MAX_PATH = 260;
// TODO issue #305
pub const FILE_INFO_BY_HANDLE_CLASS = u32;
-pub const FileBasicInfo = 0;
-pub const FileStandardInfo = 1;
-pub const FileNameInfo = 2;
-pub const FileRenameInfo = 3;
-pub const FileDispositionInfo = 4;
-pub const FileAllocationInfo = 5;
-pub const FileEndOfFileInfo = 6;
-pub const FileStreamInfo = 7;
-pub const FileCompressionInfo = 8;
-pub const FileAttributeTagInfo = 9;
-pub const FileIdBothDirectoryInfo = 10;
-pub const FileIdBothDirectoryRestartInfo = 11;
-pub const FileIoPriorityHintInfo = 12;
-pub const FileRemoteProtocolInfo = 13;
-pub const FileFullDirectoryInfo = 14;
-pub const FileFullDirectoryRestartInfo = 15;
-pub const FileStorageInfo = 16;
-pub const FileAlignmentInfo = 17;
-pub const FileIdInfo = 18;
-pub const FileIdExtdDirectoryInfo = 19;
-pub const FileIdExtdDirectoryRestartInfo = 20;
+pub const FileBasicInfo = 0;
+pub const FileStandardInfo = 1;
+pub const FileNameInfo = 2;
+pub const FileRenameInfo = 3;
+pub const FileDispositionInfo = 4;
+pub const FileAllocationInfo = 5;
+pub const FileEndOfFileInfo = 6;
+pub const FileStreamInfo = 7;
+pub const FileCompressionInfo = 8;
+pub const FileAttributeTagInfo = 9;
+pub const FileIdBothDirectoryInfo = 10;
+pub const FileIdBothDirectoryRestartInfo = 11;
+pub const FileIoPriorityHintInfo = 12;
+pub const FileRemoteProtocolInfo = 13;
+pub const FileFullDirectoryInfo = 14;
+pub const FileFullDirectoryRestartInfo = 15;
+pub const FileStorageInfo = 16;
+pub const FileAlignmentInfo = 17;
+pub const FileIdInfo = 18;
+pub const FileIdExtdDirectoryInfo = 19;
+pub const FileIdExtdDirectoryRestartInfo = 20;
pub const FILE_NAME_INFO = extern struct {
FileNameLength: DWORD,
FileName: [1]WCHAR,
};
-
/// Return the normalized drive name. This is the default.
pub const FILE_NAME_NORMALIZED = 0x0;
+
/// Return the opened file name (not normalized).
pub const FILE_NAME_OPENED = 0x8;
/// Return the path with the drive letter. This is the default.
pub const VOLUME_NAME_DOS = 0x0;
+
/// Return the path with a volume GUID path instead of the drive name.
pub const VOLUME_NAME_GUID = 0x1;
+
/// Return the path with no drive information.
pub const VOLUME_NAME_NONE = 0x4;
+
/// Return the path with the volume device path.
pub const VOLUME_NAME_NT = 0x2;
-
pub const SECURITY_ATTRIBUTES = extern struct {
nLength: DWORD,
- lpSecurityDescriptor: ?&c_void,
+ lpSecurityDescriptor: ?*c_void,
bInheritHandle: BOOL,
};
-pub const PSECURITY_ATTRIBUTES = &SECURITY_ATTRIBUTES;
-pub const LPSECURITY_ATTRIBUTES = &SECURITY_ATTRIBUTES;
-
+pub const PSECURITY_ATTRIBUTES = *SECURITY_ATTRIBUTES;
+pub const LPSECURITY_ATTRIBUTES = *SECURITY_ATTRIBUTES;
pub const GENERIC_READ = 0x80000000;
pub const GENERIC_WRITE = 0x40000000;
@@ -233,15 +144,25 @@ pub const OPEN_ALWAYS = 4;
pub const OPEN_EXISTING = 3;
pub const TRUNCATE_EXISTING = 5;
-
pub const FILE_ATTRIBUTE_ARCHIVE = 0x20;
+pub const FILE_ATTRIBUTE_COMPRESSED = 0x800;
+pub const FILE_ATTRIBUTE_DEVICE = 0x40;
+pub const FILE_ATTRIBUTE_DIRECTORY = 0x10;
pub const FILE_ATTRIBUTE_ENCRYPTED = 0x4000;
pub const FILE_ATTRIBUTE_HIDDEN = 0x2;
+pub const FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x8000;
pub const FILE_ATTRIBUTE_NORMAL = 0x80;
+pub const FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x2000;
+pub const FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x20000;
pub const FILE_ATTRIBUTE_OFFLINE = 0x1000;
pub const FILE_ATTRIBUTE_READONLY = 0x1;
+pub const FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x400000;
+pub const FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x40000;
+pub const FILE_ATTRIBUTE_REPARSE_POINT = 0x400;
+pub const FILE_ATTRIBUTE_SPARSE_FILE = 0x200;
pub const FILE_ATTRIBUTE_SYSTEM = 0x4;
pub const FILE_ATTRIBUTE_TEMPORARY = 0x100;
+pub const FILE_ATTRIBUTE_VIRTUAL = 0x10000;
pub const PROCESS_INFORMATION = extern struct {
hProcess: HANDLE,
@@ -310,3 +231,122 @@ pub const FILE_END = 2;
pub const HEAP_CREATE_ENABLE_EXECUTE = 0x00040000;
pub const HEAP_GENERATE_EXCEPTIONS = 0x00000004;
pub const HEAP_NO_SERIALIZE = 0x00000001;
+
+pub const PTHREAD_START_ROUTINE = extern fn (LPVOID) DWORD;
+pub const LPTHREAD_START_ROUTINE = PTHREAD_START_ROUTINE;
+
+pub const WIN32_FIND_DATAA = extern struct {
+ dwFileAttributes: DWORD,
+ ftCreationTime: FILETIME,
+ ftLastAccessTime: FILETIME,
+ ftLastWriteTime: FILETIME,
+ nFileSizeHigh: DWORD,
+ nFileSizeLow: DWORD,
+ dwReserved0: DWORD,
+ dwReserved1: DWORD,
+ cFileName: [260]CHAR,
+ cAlternateFileName: [14]CHAR,
+};
+
+pub const FILETIME = extern struct {
+ dwLowDateTime: DWORD,
+ dwHighDateTime: DWORD,
+};
+
+pub const SYSTEM_INFO = extern struct {
+ anon1: extern union {
+ dwOemId: DWORD,
+ anon2: extern struct {
+ wProcessorArchitecture: WORD,
+ wReserved: WORD,
+ },
+ },
+ dwPageSize: DWORD,
+ lpMinimumApplicationAddress: LPVOID,
+ lpMaximumApplicationAddress: LPVOID,
+ dwActiveProcessorMask: DWORD_PTR,
+ dwNumberOfProcessors: DWORD,
+ dwProcessorType: DWORD,
+ dwAllocationGranularity: DWORD,
+ wProcessorLevel: WORD,
+ wProcessorRevision: WORD,
+};
+
+pub const HRESULT = c_long;
+
+pub const KNOWNFOLDERID = GUID;
+pub const GUID = extern struct {
+ Data1: c_ulong,
+ Data2: c_ushort,
+ Data3: c_ushort,
+ Data4: [8]u8,
+
+ pub fn parse(str: []const u8) GUID {
+ var guid: GUID = undefined;
+ var index: usize = 0;
+ assert(str[index] == '{');
+ index += 1;
+
+ guid.Data1 = std.fmt.parseUnsigned(c_ulong, str[index..index + 8], 16) catch unreachable;
+ index += 8;
+
+ assert(str[index] == '-');
+ index += 1;
+
+ guid.Data2 = std.fmt.parseUnsigned(c_ushort, str[index..index + 4], 16) catch unreachable;
+ index += 4;
+
+ assert(str[index] == '-');
+ index += 1;
+
+ guid.Data3 = std.fmt.parseUnsigned(c_ushort, str[index..index + 4], 16) catch unreachable;
+ index += 4;
+
+ assert(str[index] == '-');
+ index += 1;
+
+ guid.Data4[0] = std.fmt.parseUnsigned(u8, str[index..index + 2], 16) catch unreachable;
+ index += 2;
+ guid.Data4[1] = std.fmt.parseUnsigned(u8, str[index..index + 2], 16) catch unreachable;
+ index += 2;
+
+ assert(str[index] == '-');
+ index += 1;
+
+ var i: usize = 2;
+ while (i < guid.Data4.len) : (i += 1) {
+ guid.Data4[i] = std.fmt.parseUnsigned(u8, str[index..index + 2], 16) catch unreachable;
+ index += 2;
+ }
+
+ assert(str[index] == '}');
+ index += 1;
+ return guid;
+ }
+};
+
+pub const FOLDERID_LocalAppData = GUID.parse("{F1B32785-6FBA-4FCF-9D55-7B8E7F157091}");
+
+pub const KF_FLAG_DEFAULT = 0;
+pub const KF_FLAG_NO_APPCONTAINER_REDIRECTION = 65536;
+pub const KF_FLAG_CREATE = 32768;
+pub const KF_FLAG_DONT_VERIFY = 16384;
+pub const KF_FLAG_DONT_UNEXPAND = 8192;
+pub const KF_FLAG_NO_ALIAS = 4096;
+pub const KF_FLAG_INIT = 2048;
+pub const KF_FLAG_DEFAULT_PATH = 1024;
+pub const KF_FLAG_NOT_PARENT_RELATIVE = 512;
+pub const KF_FLAG_SIMPLE_IDLIST = 256;
+pub const KF_FLAG_ALIAS_ONLY = -2147483648;
+
+pub const S_OK = 0;
+pub const E_NOTIMPL = @bitCast(c_long, c_ulong(0x80004001));
+pub const E_NOINTERFACE = @bitCast(c_long, c_ulong(0x80004002));
+pub const E_POINTER = @bitCast(c_long, c_ulong(0x80004003));
+pub const E_ABORT = @bitCast(c_long, c_ulong(0x80004004));
+pub const E_FAIL = @bitCast(c_long, c_ulong(0x80004005));
+pub const E_UNEXPECTED = @bitCast(c_long, c_ulong(0x8000FFFF));
+pub const E_ACCESSDENIED = @bitCast(c_long, c_ulong(0x80070005));
+pub const E_HANDLE = @bitCast(c_long, c_ulong(0x80070006));
+pub const E_OUTOFMEMORY = @bitCast(c_long, c_ulong(0x8007000E));
+pub const E_INVALIDARG = @bitCast(c_long, c_ulong(0x80070057));
diff --git a/std/os/windows/kernel32.zig b/std/os/windows/kernel32.zig
new file mode 100644
index 0000000000..fa3473ad05
--- /dev/null
+++ b/std/os/windows/kernel32.zig
@@ -0,0 +1,162 @@
+use @import("index.zig");
+
+pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL;
+
+pub extern "kernel32" stdcallcc fn CreateDirectoryA(
+ lpPathName: LPCSTR,
+ lpSecurityAttributes: ?*SECURITY_ATTRIBUTES,
+) BOOL;
+
+pub extern "kernel32" stdcallcc fn CreateFileA(
+ lpFileName: LPCSTR,
+ dwDesiredAccess: DWORD,
+ dwShareMode: DWORD,
+ lpSecurityAttributes: ?LPSECURITY_ATTRIBUTES,
+ dwCreationDisposition: DWORD,
+ dwFlagsAndAttributes: DWORD,
+ hTemplateFile: ?HANDLE,
+) HANDLE;
+
+pub extern "kernel32" stdcallcc fn CreatePipe(
+ hReadPipe: *HANDLE,
+ hWritePipe: *HANDLE,
+ lpPipeAttributes: *const SECURITY_ATTRIBUTES,
+ nSize: DWORD,
+) BOOL;
+
+pub extern "kernel32" stdcallcc fn CreateProcessA(
+ lpApplicationName: ?LPCSTR,
+ lpCommandLine: LPSTR,
+ lpProcessAttributes: ?*SECURITY_ATTRIBUTES,
+ lpThreadAttributes: ?*SECURITY_ATTRIBUTES,
+ bInheritHandles: BOOL,
+ dwCreationFlags: DWORD,
+ lpEnvironment: ?*c_void,
+ lpCurrentDirectory: ?LPCSTR,
+ lpStartupInfo: *STARTUPINFOA,
+ lpProcessInformation: *PROCESS_INFORMATION,
+) BOOL;
+
+pub extern "kernel32" stdcallcc fn CreateSymbolicLinkA(
+ lpSymlinkFileName: LPCSTR,
+ lpTargetFileName: LPCSTR,
+ dwFlags: DWORD,
+) BOOLEAN;
+
+pub extern "kernel32" stdcallcc fn CreateIoCompletionPort(FileHandle: HANDLE, ExistingCompletionPort: ?HANDLE, CompletionKey: ULONG_PTR, NumberOfConcurrentThreads: DWORD) ?HANDLE;
+
+pub extern "kernel32" stdcallcc fn CreateThread(lpThreadAttributes: ?LPSECURITY_ATTRIBUTES, dwStackSize: SIZE_T, lpStartAddress: LPTHREAD_START_ROUTINE, lpParameter: ?LPVOID, dwCreationFlags: DWORD, lpThreadId: ?LPDWORD) ?HANDLE;
+
+pub extern "kernel32" stdcallcc fn DeleteFileA(lpFileName: LPCSTR) BOOL;
+
+pub extern "kernel32" stdcallcc fn ExitProcess(exit_code: UINT) noreturn;
+
+pub extern "kernel32" stdcallcc fn FindFirstFileA(lpFileName: LPCSTR, lpFindFileData: *WIN32_FIND_DATAA) HANDLE;
+pub extern "kernel32" stdcallcc fn FindClose(hFindFile: HANDLE) BOOL;
+pub extern "kernel32" stdcallcc fn FindNextFileA(hFindFile: HANDLE, lpFindFileData: *WIN32_FIND_DATAA) BOOL;
+
+pub extern "kernel32" stdcallcc fn FreeEnvironmentStringsA(penv: [*]u8) BOOL;
+
+pub extern "kernel32" stdcallcc fn GetCommandLineA() LPSTR;
+
+pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: *DWORD) BOOL;
+
+pub extern "kernel32" stdcallcc fn GetCurrentDirectoryA(nBufferLength: WORD, lpBuffer: ?LPSTR) DWORD;
+
+pub extern "kernel32" stdcallcc fn GetEnvironmentStringsA() ?[*]u8;
+
+pub extern "kernel32" stdcallcc fn GetEnvironmentVariableA(lpName: LPCSTR, lpBuffer: LPSTR, nSize: DWORD) DWORD;
+
+pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: *DWORD) BOOL;
+
+pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: *LARGE_INTEGER) BOOL;
+
+pub extern "kernel32" stdcallcc fn GetFileAttributesA(lpFileName: LPCSTR) DWORD;
+
+pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) DWORD;
+
+pub extern "kernel32" stdcallcc fn GetLastError() DWORD;
+
+pub extern "kernel32" stdcallcc fn GetFileInformationByHandleEx(
+ in_hFile: HANDLE,
+ in_FileInformationClass: FILE_INFO_BY_HANDLE_CLASS,
+ out_lpFileInformation: *c_void,
+ in_dwBufferSize: DWORD,
+) BOOL;
+
+pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA(
+ hFile: HANDLE,
+ lpszFilePath: LPSTR,
+ cchFilePath: DWORD,
+ dwFlags: DWORD,
+) DWORD;
+
+pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE;
+pub extern "kernel32" stdcallcc fn GetQueuedCompletionStatus(CompletionPort: HANDLE, lpNumberOfBytesTransferred: LPDWORD, lpCompletionKey: *ULONG_PTR, lpOverlapped: *?*OVERLAPPED, dwMilliseconds: DWORD) BOOL;
+
+pub extern "kernel32" stdcallcc fn GetSystemInfo(lpSystemInfo: *SYSTEM_INFO) void;
+pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(*FILETIME) void;
+
+pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE;
+pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL;
+pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void;
+pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T;
+pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL;
+pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T;
+pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL;
+
+pub extern "kernel32" stdcallcc fn GetStdHandle(in_nStdHandle: DWORD) ?HANDLE;
+
+pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?*c_void;
+
+pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL;
+
+pub extern "kernel32" stdcallcc fn MoveFileExA(
+ lpExistingFileName: LPCSTR,
+ lpNewFileName: LPCSTR,
+ dwFlags: DWORD,
+) BOOL;
+
+pub extern "kernel32" stdcallcc fn PostQueuedCompletionStatus(CompletionPort: HANDLE, dwNumberOfBytesTransferred: DWORD, dwCompletionKey: ULONG_PTR, lpOverlapped: ?*OVERLAPPED) BOOL;
+
+pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: *LARGE_INTEGER) BOOL;
+
+pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL;
+
+pub extern "kernel32" stdcallcc fn ReadFile(
+ in_hFile: HANDLE,
+ out_lpBuffer: *c_void,
+ in_nNumberOfBytesToRead: DWORD,
+ out_lpNumberOfBytesRead: *DWORD,
+ in_out_lpOverlapped: ?*OVERLAPPED,
+) BOOL;
+
+pub extern "kernel32" stdcallcc fn RemoveDirectoryA(lpPathName: LPCSTR) BOOL;
+
+pub extern "kernel32" stdcallcc fn SetFilePointerEx(
+ in_fFile: HANDLE,
+ in_liDistanceToMove: LARGE_INTEGER,
+ out_opt_ldNewFilePointer: ?*LARGE_INTEGER,
+ in_dwMoveMethod: DWORD,
+) BOOL;
+
+pub extern "kernel32" stdcallcc fn SetHandleInformation(hObject: HANDLE, dwMask: DWORD, dwFlags: DWORD) BOOL;
+
+pub extern "kernel32" stdcallcc fn Sleep(dwMilliseconds: DWORD) void;
+
+pub extern "kernel32" stdcallcc fn TerminateProcess(hProcess: HANDLE, uExitCode: UINT) BOOL;
+
+pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) DWORD;
+
+pub extern "kernel32" stdcallcc fn WriteFile(
+ in_hFile: HANDLE,
+ in_lpBuffer: *const c_void,
+ in_nNumberOfBytesToWrite: DWORD,
+ out_lpNumberOfBytesWritten: ?*DWORD,
+ in_out_lpOverlapped: ?*OVERLAPPED,
+) BOOL;
+
+//TODO: call unicode versions instead of relying on ANSI code page
+pub extern "kernel32" stdcallcc fn LoadLibraryA(lpLibFileName: LPCSTR) ?HMODULE;
+
+pub extern "kernel32" stdcallcc fn FreeLibrary(hModule: HMODULE) BOOL;
diff --git a/std/os/windows/ole32.zig b/std/os/windows/ole32.zig
new file mode 100644
index 0000000000..84d8089d07
--- /dev/null
+++ b/std/os/windows/ole32.zig
@@ -0,0 +1,18 @@
+use @import("index.zig");
+
+pub extern "ole32.dll" stdcallcc fn CoTaskMemFree(pv: LPVOID) void;
+pub extern "ole32.dll" stdcallcc fn CoUninitialize() void;
+pub extern "ole32.dll" stdcallcc fn CoGetCurrentProcess() DWORD;
+pub extern "ole32.dll" stdcallcc fn CoInitializeEx(pvReserved: LPVOID, dwCoInit: DWORD) HRESULT;
+
+
+pub const COINIT_APARTMENTTHREADED = COINIT.COINIT_APARTMENTTHREADED;
+pub const COINIT_MULTITHREADED = COINIT.COINIT_MULTITHREADED;
+pub const COINIT_DISABLE_OLE1DDE = COINIT.COINIT_DISABLE_OLE1DDE;
+pub const COINIT_SPEED_OVER_MEMORY = COINIT.COINIT_SPEED_OVER_MEMORY;
+pub const COINIT = extern enum {
+ COINIT_APARTMENTTHREADED = 2,
+ COINIT_MULTITHREADED = 0,
+ COINIT_DISABLE_OLE1DDE = 4,
+ COINIT_SPEED_OVER_MEMORY = 8,
+};
diff --git a/std/os/windows/shell32.zig b/std/os/windows/shell32.zig
new file mode 100644
index 0000000000..f10466add3
--- /dev/null
+++ b/std/os/windows/shell32.zig
@@ -0,0 +1,4 @@
+use @import("index.zig");
+
+pub extern "shell32.dll" stdcallcc fn SHGetKnownFolderPath(rfid: *const KNOWNFOLDERID, dwFlags: DWORD, hToken: ?HANDLE, ppszPath: *[*]WCHAR) HRESULT;
+
diff --git a/std/os/windows/shlwapi.zig b/std/os/windows/shlwapi.zig
new file mode 100644
index 0000000000..6bccefaf98
--- /dev/null
+++ b/std/os/windows/shlwapi.zig
@@ -0,0 +1,4 @@
+use @import("index.zig");
+
+pub extern "shlwapi" stdcallcc fn PathFileExistsA(pszPath: ?LPCTSTR) BOOL;
+
diff --git a/std/os/windows/user32.zig b/std/os/windows/user32.zig
new file mode 100644
index 0000000000..37f9f6f3b8
--- /dev/null
+++ b/std/os/windows/user32.zig
@@ -0,0 +1,4 @@
+use @import("index.zig");
+
+pub extern "user32" stdcallcc fn MessageBoxA(hWnd: ?HANDLE, lpText: ?LPCTSTR, lpCaption: ?LPCTSTR, uType: UINT) c_int;
+
diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig
index 5af318b7b0..c9d2c3c3e6 100644
--- a/std/os/windows/util.zig
+++ b/std/os/windows/util.zig
@@ -7,7 +7,7 @@ const mem = std.mem;
const BufMap = std.BufMap;
const cstr = std.cstr;
-pub const WaitError = error {
+pub const WaitError = error{
WaitAbandoned,
WaitTimeOut,
Unexpected,
@@ -33,7 +33,7 @@ pub fn windowsClose(handle: windows.HANDLE) void {
assert(windows.CloseHandle(handle) != 0);
}
-pub const WriteError = error {
+pub const WriteError = error{
SystemResources,
OperationAborted,
IoPending,
@@ -42,7 +42,7 @@ pub const WriteError = error {
};
pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void {
- if (windows.WriteFile(handle, @ptrCast(&const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) {
+ if (windows.WriteFile(handle, @ptrCast(*const c_void, bytes.ptr), @intCast(u32, bytes.len), null, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources,
@@ -68,20 +68,23 @@ pub fn windowsIsCygwinPty(handle: windows.HANDLE) bool {
const size = @sizeOf(windows.FILE_NAME_INFO);
var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = []u8{0} ** (size + windows.MAX_PATH);
- if (windows.GetFileInformationByHandleEx(handle, windows.FileNameInfo,
- @ptrCast(&c_void, &name_info_bytes[0]), u32(name_info_bytes.len)) == 0)
- {
+ if (windows.GetFileInformationByHandleEx(
+ handle,
+ windows.FileNameInfo,
+ @ptrCast(*c_void, &name_info_bytes[0]),
+ @intCast(u32, name_info_bytes.len),
+ ) == 0) {
return true;
}
- const name_info = @ptrCast(&const windows.FILE_NAME_INFO, &name_info_bytes[0]);
- const name_bytes = name_info_bytes[size..size + usize(name_info.FileNameLength)];
- const name_wide = ([]u16)(name_bytes);
- return mem.indexOf(u16, name_wide, []u16{'m','s','y','s','-'}) != null or
- mem.indexOf(u16, name_wide, []u16{'-','p','t','y'}) != null;
+ const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]);
+ const name_bytes = name_info_bytes[size .. size + usize(name_info.FileNameLength)];
+ const name_wide = @bytesToSlice(u16, name_bytes);
+ return mem.indexOf(u16, name_wide, []u16{ 'm', 's', 'y', 's', '-' }) != null or
+ mem.indexOf(u16, name_wide, []u16{ '-', 'p', 't', 'y' }) != null;
}
-pub const OpenError = error {
+pub const OpenError = error{
SharingViolation,
PathAlreadyExists,
FileNotFound,
@@ -92,15 +95,18 @@ pub const OpenError = error {
};
/// `file_path` needs to be copied in memory to add a null terminating byte, hence the allocator.
-pub fn windowsOpen(allocator: &mem.Allocator, file_path: []const u8, desired_access: windows.DWORD, share_mode: windows.DWORD,
- creation_disposition: windows.DWORD, flags_and_attrs: windows.DWORD)
- OpenError!windows.HANDLE
-{
+pub fn windowsOpen(
+ allocator: *mem.Allocator,
+ file_path: []const u8,
+ desired_access: windows.DWORD,
+ share_mode: windows.DWORD,
+ creation_disposition: windows.DWORD,
+ flags_and_attrs: windows.DWORD,
+) OpenError!windows.HANDLE {
const path_with_null = try cstr.addNullByte(allocator, file_path);
defer allocator.free(path_with_null);
- const result = windows.CreateFileA(path_with_null.ptr, desired_access, share_mode, null, creation_disposition,
- flags_and_attrs, null);
+ const result = windows.CreateFileA(path_with_null.ptr, desired_access, share_mode, null, creation_disposition, flags_and_attrs, null);
if (result == windows.INVALID_HANDLE_VALUE) {
const err = windows.GetLastError();
@@ -118,7 +124,7 @@ pub fn windowsOpen(allocator: &mem.Allocator, file_path: []const u8, desired_acc
}
/// Caller must free result.
-pub fn createWindowsEnvBlock(allocator: &mem.Allocator, env_map: &const BufMap) ![]u8 {
+pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) ![]u8 {
// count bytes needed
const bytes_needed = x: {
var bytes_needed: usize = 1; // 1 for the final null byte
@@ -149,25 +155,104 @@ pub fn createWindowsEnvBlock(allocator: &mem.Allocator, env_map: &const BufMap)
return result;
}
-pub fn windowsLoadDll(allocator: &mem.Allocator, dll_path: []const u8) !windows.HMODULE {
+pub fn windowsLoadDll(allocator: *mem.Allocator, dll_path: []const u8) !windows.HMODULE {
const padded_buff = try cstr.addNullByte(allocator, dll_path);
defer allocator.free(padded_buff);
- return windows.LoadLibraryA(padded_buff.ptr) ?? error.DllNotFound;
+ return windows.LoadLibraryA(padded_buff.ptr) orelse error.DllNotFound;
}
pub fn windowsUnloadDll(hModule: windows.HMODULE) void {
- assert(windows.FreeLibrary(hModule)!= 0);
+ assert(windows.FreeLibrary(hModule) != 0);
}
-
test "InvalidDll" {
- if (builtin.os != builtin.Os.windows) return;
+ if (builtin.os != builtin.Os.windows) return error.SkipZigTest;
const DllName = "asdf.dll";
const allocator = std.debug.global_allocator;
- const handle = os.windowsLoadDll(allocator, DllName) catch |err| {
+ const handle = os.windowsLoadDll(allocator, DllName) catch |err| {
assert(err == error.DllNotFound);
return;
};
}
+pub fn windowsFindFirstFile(
+ allocator: *mem.Allocator,
+ dir_path: []const u8,
+ find_file_data: *windows.WIN32_FIND_DATAA,
+) !windows.HANDLE {
+ const wild_and_null = []u8{ '\\', '*', 0 };
+ const path_with_wild_and_null = try allocator.alloc(u8, dir_path.len + wild_and_null.len);
+ defer allocator.free(path_with_wild_and_null);
+
+ mem.copy(u8, path_with_wild_and_null, dir_path);
+ mem.copy(u8, path_with_wild_and_null[dir_path.len..], wild_and_null);
+
+ const handle = windows.FindFirstFileA(path_with_wild_and_null.ptr, find_file_data);
+
+ if (handle == windows.INVALID_HANDLE_VALUE) {
+ const err = windows.GetLastError();
+ switch (err) {
+ windows.ERROR.FILE_NOT_FOUND,
+ windows.ERROR.PATH_NOT_FOUND,
+ => return error.PathNotFound,
+ else => return os.unexpectedErrorWindows(err),
+ }
+ }
+
+ return handle;
+}
+
+/// Returns `true` if there was another file, `false` otherwise.
+pub fn windowsFindNextFile(handle: windows.HANDLE, find_file_data: *windows.WIN32_FIND_DATAA) !bool {
+ if (windows.FindNextFileA(handle, find_file_data) == 0) {
+ const err = windows.GetLastError();
+ return switch (err) {
+ windows.ERROR.NO_MORE_FILES => false,
+ else => os.unexpectedErrorWindows(err),
+ };
+ }
+ return true;
+}
+
+pub const WindowsCreateIoCompletionPortError = error{Unexpected};
+
+pub fn windowsCreateIoCompletionPort(file_handle: windows.HANDLE, existing_completion_port: ?windows.HANDLE, completion_key: usize, concurrent_thread_count: windows.DWORD) !windows.HANDLE {
+ const handle = windows.CreateIoCompletionPort(file_handle, existing_completion_port, completion_key, concurrent_thread_count) orelse {
+ const err = windows.GetLastError();
+ switch (err) {
+ else => return os.unexpectedErrorWindows(err),
+ }
+ };
+ return handle;
+}
+
+pub const WindowsPostQueuedCompletionStatusError = error{Unexpected};
+
+pub fn windowsPostQueuedCompletionStatus(completion_port: windows.HANDLE, bytes_transferred_count: windows.DWORD, completion_key: usize, lpOverlapped: ?*windows.OVERLAPPED) WindowsPostQueuedCompletionStatusError!void {
+ if (windows.PostQueuedCompletionStatus(completion_port, bytes_transferred_count, completion_key, lpOverlapped) == 0) {
+ const err = windows.GetLastError();
+ switch (err) {
+ else => return os.unexpectedErrorWindows(err),
+ }
+ }
+}
+
+pub const WindowsWaitResult = error{
+ Normal,
+ Aborted,
+};
+
+pub fn windowsGetQueuedCompletionStatus(completion_port: windows.HANDLE, bytes_transferred_count: *windows.DWORD, lpCompletionKey: *usize, lpOverlapped: *?*windows.OVERLAPPED, dwMilliseconds: windows.DWORD) WindowsWaitResult {
+ if (windows.GetQueuedCompletionStatus(completion_port, bytes_transferred_count, lpCompletionKey, lpOverlapped, dwMilliseconds) == windows.FALSE) {
+ if (std.debug.runtime_safety) {
+ const err = windows.GetLastError();
+ if (err != windows.ERROR.ABANDONED_WAIT_0) {
+ std.debug.warn("err: {}\n", err);
+ }
+ assert(err == windows.ERROR.ABANDONED_WAIT_0);
+ }
+ return WindowsWaitResult.Aborted;
+ }
+ return WindowsWaitResult.Normal;
+}
diff --git a/std/os/zen.zig b/std/os/zen.zig
index 40c2c468c3..b418744e1d 100644
--- a/std/os/zen.zig
+++ b/std/os/zen.zig
@@ -6,26 +6,26 @@ const assert = std.debug.assert;
//////////////////////////
pub const Message = struct {
- sender: MailboxId,
+ sender: MailboxId,
receiver: MailboxId,
code: usize,
args: [5]usize,
payload: ?[]const u8,
- pub fn from(mailbox_id: &const MailboxId) Message {
- return Message {
- .sender = MailboxId.Undefined,
- .receiver = *mailbox_id,
+ pub fn from(mailbox_id: *const MailboxId) Message {
+ return Message{
+ .sender = MailboxId.Undefined,
+ .receiver = mailbox_id.*,
.code = undefined,
.args = undefined,
.payload = null,
};
}
- pub fn to(mailbox_id: &const MailboxId, msg_code: usize, args: ...) Message {
+ pub fn to(mailbox_id: *const MailboxId, msg_code: usize, args: ...) Message {
var message = Message {
.sender = MailboxId.This,
- .receiver = *mailbox_id,
+ .receiver = mailbox_id.*,
.code = msg_code,
.args = undefined,
.payload = null,
@@ -40,14 +40,14 @@ pub const Message = struct {
return message;
}
- pub fn as(self: &const Message, sender: &const MailboxId) Message {
- var message = *self;
- message.sender = *sender;
+ pub fn as(self: *const Message, sender: *const MailboxId) Message {
+ var message = self.*;
+ message.sender = sender.*;
return message;
}
- pub fn withPayload(self: &const Message, payload: []const u8) Message {
- var message = *self;
+ pub fn withPayload(self: *const Message, payload: []const u8) Message {
+ var message = self.*;
message.payload = payload;
return message;
}
@@ -57,27 +57,25 @@ pub const MailboxId = union(enum) {
Undefined,
This,
Kernel,
- Port: u16,
+ Port: u16,
Thread: u16,
};
-
//////////////////////////////////////
//// Ports reserved for servers ////
//////////////////////////////////////
pub const Server = struct {
- pub const Keyboard = MailboxId { .Port = 0 };
- pub const Terminal = MailboxId { .Port = 1 };
+ pub const Keyboard = MailboxId{ .Port = 0 };
+ pub const Terminal = MailboxId{ .Port = 1 };
};
-
////////////////////////
//// POSIX things ////
////////////////////////
// Standard streams.
-pub const STDIN_FILENO = 0;
+pub const STDIN_FILENO = 0;
pub const STDOUT_FILENO = 1;
pub const STDERR_FILENO = 2;
@@ -86,7 +84,7 @@ pub const getErrno = @import("linux/index.zig").getErrno;
use @import("linux/errno.zig");
// TODO: implement this correctly.
-pub fn read(fd: i32, buf: &u8, count: usize) usize {
+pub fn read(fd: i32, buf: *u8, count: usize) usize {
switch (fd) {
STDIN_FILENO => {
var i: usize = 0;
@@ -95,7 +93,7 @@ pub fn read(fd: i32, buf: &u8, count: usize) usize {
// FIXME: we should be certain that we are receiving from Keyboard.
var message = Message.from(MailboxId.This);
- receive(&message);
+ receive(message.*);
buf[i] = u8(message.args[0]);
}
@@ -106,7 +104,7 @@ pub fn read(fd: i32, buf: &u8, count: usize) usize {
}
// TODO: implement this correctly.
-pub fn write(fd: i32, buf: &const u8, count: usize) usize {
+pub fn write(fd: i32, buf: *const u8, count: usize) usize {
switch (fd) {
STDOUT_FILENO, STDERR_FILENO => {
send(Message.to(Server.Terminal, 1)
@@ -117,7 +115,6 @@ pub fn write(fd: i32, buf: &const u8, count: usize) usize {
return count;
}
-
///////////////////////////
//// Syscall numbers ////
///////////////////////////
@@ -133,7 +130,6 @@ pub const Syscall = enum(usize) {
createThread = 7,
};
-
////////////////////
//// Syscalls ////
////////////////////
@@ -143,15 +139,15 @@ pub fn exit(status: i32) noreturn {
unreachable;
}
-pub fn send(message: &const Message) void {
+pub fn send(message: *const Message) void {
_ = syscall1(Syscall.send, @ptrToInt(message));
}
-pub fn receive(destination: &Message) void {
+pub fn receive(destination: *Message) void {
_ = syscall1(Syscall.receive, @ptrToInt(destination));
}
-pub fn subscribeIRQ(irq: u8, mailbox_id: &const MailboxId) void {
+pub fn subscribeIRQ(irq: u8, mailbox_id: *const MailboxId) void {
_ = syscall2(Syscall.subscribeIRQ, irq, @ptrToInt(mailbox_id));
}
@@ -167,7 +163,7 @@ pub fn map(v_addr: usize, p_addr: usize, size: usize, writable: bool) bool {
return syscall4(Syscall.map, v_addr, p_addr, size, usize(writable)) != 0;
}
-pub fn createThread(function: fn()void) u16 {
+pub fn createThread(function: fn () void) u16 {
return u16(syscall1(Syscall.createThread, @ptrToInt(function)));
}
@@ -178,66 +174,84 @@ pub fn createThread(function: fn()void) u16 {
inline fn syscall0(number: Syscall) usize {
return asm volatile ("int $0x80"
: [ret] "={eax}" (-> usize)
- : [number] "{eax}" (number));
+ : [number] "{eax}" (number)
+ );
}
inline fn syscall1(number: Syscall, arg1: usize) usize {
return asm volatile ("int $0x80"
: [ret] "={eax}" (-> usize)
: [number] "{eax}" (number),
- [arg1] "{ecx}" (arg1));
+ [arg1] "{ecx}" (arg1)
+ );
}
inline fn syscall2(number: Syscall, arg1: usize, arg2: usize) usize {
return asm volatile ("int $0x80"
: [ret] "={eax}" (-> usize)
: [number] "{eax}" (number),
- [arg1] "{ecx}" (arg1),
- [arg2] "{edx}" (arg2));
+ [arg1] "{ecx}" (arg1),
+ [arg2] "{edx}" (arg2)
+ );
}
inline fn syscall3(number: Syscall, arg1: usize, arg2: usize, arg3: usize) usize {
return asm volatile ("int $0x80"
: [ret] "={eax}" (-> usize)
: [number] "{eax}" (number),
- [arg1] "{ecx}" (arg1),
- [arg2] "{edx}" (arg2),
- [arg3] "{ebx}" (arg3));
+ [arg1] "{ecx}" (arg1),
+ [arg2] "{edx}" (arg2),
+ [arg3] "{ebx}" (arg3)
+ );
}
inline fn syscall4(number: Syscall, arg1: usize, arg2: usize, arg3: usize, arg4: usize) usize {
return asm volatile ("int $0x80"
: [ret] "={eax}" (-> usize)
: [number] "{eax}" (number),
- [arg1] "{ecx}" (arg1),
- [arg2] "{edx}" (arg2),
- [arg3] "{ebx}" (arg3),
- [arg4] "{esi}" (arg4));
+ [arg1] "{ecx}" (arg1),
+ [arg2] "{edx}" (arg2),
+ [arg3] "{ebx}" (arg3),
+ [arg4] "{esi}" (arg4)
+ );
}
-inline fn syscall5(number: Syscall, arg1: usize, arg2: usize, arg3: usize,
- arg4: usize, arg5: usize) usize
-{
+inline fn syscall5(
+ number: Syscall,
+ arg1: usize,
+ arg2: usize,
+ arg3: usize,
+ arg4: usize,
+ arg5: usize,
+) usize {
return asm volatile ("int $0x80"
: [ret] "={eax}" (-> usize)
: [number] "{eax}" (number),
- [arg1] "{ecx}" (arg1),
- [arg2] "{edx}" (arg2),
- [arg3] "{ebx}" (arg3),
- [arg4] "{esi}" (arg4),
- [arg5] "{edi}" (arg5));
+ [arg1] "{ecx}" (arg1),
+ [arg2] "{edx}" (arg2),
+ [arg3] "{ebx}" (arg3),
+ [arg4] "{esi}" (arg4),
+ [arg5] "{edi}" (arg5)
+ );
}
-inline fn syscall6(number: Syscall, arg1: usize, arg2: usize, arg3: usize,
- arg4: usize, arg5: usize, arg6: usize) usize
-{
+inline fn syscall6(
+ number: Syscall,
+ arg1: usize,
+ arg2: usize,
+ arg3: usize,
+ arg4: usize,
+ arg5: usize,
+ arg6: usize,
+) usize {
return asm volatile ("int $0x80"
: [ret] "={eax}" (-> usize)
: [number] "{eax}" (number),
- [arg1] "{ecx}" (arg1),
- [arg2] "{edx}" (arg2),
- [arg3] "{ebx}" (arg3),
- [arg4] "{esi}" (arg4),
- [arg5] "{edi}" (arg5),
- [arg6] "{ebp}" (arg6));
+ [arg1] "{ecx}" (arg1),
+ [arg2] "{edx}" (arg2),
+ [arg3] "{ebx}" (arg3),
+ [arg4] "{esi}" (arg4),
+ [arg5] "{edi}" (arg5),
+ [arg6] "{ebp}" (arg6)
+ );
}
diff --git a/std/rand/index.zig b/std/rand/index.zig
index 6a746fce92..2cbff049ea 100644
--- a/std/rand/index.zig
+++ b/std/rand/index.zig
@@ -19,6 +19,7 @@ const builtin = @import("builtin");
const assert = std.debug.assert;
const mem = std.mem;
const math = std.math;
+const ziggurat = @import("ziggurat.zig");
// When you need fast unbiased random numbers
pub const DefaultPrng = Xoroshiro128;
@@ -27,15 +28,15 @@ pub const DefaultPrng = Xoroshiro128;
pub const DefaultCsprng = Isaac64;
pub const Random = struct {
- fillFn: fn(r: &Random, buf: []u8) void,
+ fillFn: fn (r: *Random, buf: []u8) void,
- /// Read random bytes into the specified buffer until fill.
- pub fn bytes(r: &Random, buf: []u8) void {
+ /// Read random bytes into the specified buffer until full.
+ pub fn bytes(r: *Random, buf: []u8) void {
r.fillFn(r, buf);
}
/// Return a random integer/boolean type.
- pub fn scalar(r: &Random, comptime T: type) T {
+ pub fn scalar(r: *Random, comptime T: type) T {
var rand_bytes: [@sizeOf(T)]u8 = undefined;
r.bytes(rand_bytes[0..]);
@@ -47,28 +48,28 @@ pub const Random = struct {
}
}
- /// Get a random unsigned integer with even distribution between `start`
- /// inclusive and `end` exclusive.
- pub fn range(r: &Random, comptime T: type, start: T, end: T) T {
- assert(start <= end);
+ /// Return a random integer with even distribution between `start`
+ /// inclusive and `end` exclusive. `start` must be less than `end`.
+ pub fn range(r: *Random, comptime T: type, start: T, end: T) T {
+ assert(start < end);
if (T.is_signed) {
const uint = @IntType(false, T.bit_count);
if (start >= 0 and end >= 0) {
- return T(r.range(uint, uint(start), uint(end)));
+ return @intCast(T, r.range(uint, @intCast(uint, start), @intCast(uint, end)));
} else if (start < 0 and end < 0) {
// Can't overflow because the range is over signed ints
return math.negateCast(r.range(uint, math.absCast(end), math.absCast(start)) + 1) catch unreachable;
} else if (start < 0 and end >= 0) {
- const end_uint = uint(end);
+ const end_uint = @intCast(uint, end);
const total_range = math.absCast(start) + end_uint;
const value = r.range(uint, 0, total_range);
const result = if (value < end_uint) x: {
- break :x T(value);
+ break :x @intCast(T, value);
} else if (value == end_uint) x: {
break :x start;
} else x: {
// Can't overflow because the range is over signed ints
- break :x math.negateCast(value - end_uint) catch unreachable;
+ break :x math.negateCast(value - end_uint) catch unreachable;
};
return result;
} else {
@@ -91,7 +92,7 @@ pub const Random = struct {
}
/// Return a floating point value evenly distributed in the range [0, 1).
- pub fn float(r: &Random, comptime T: type) T {
+ pub fn float(r: *Random, comptime T: type) T {
// Generate a uniform value between [1, 2) and scale down to [0, 1).
// Note: The lowest mantissa bit is always set to 0 so we only use half the available range.
switch (T) {
@@ -109,19 +110,32 @@ pub const Random = struct {
}
}
- /// Return a floating point value normally distributed in the range [0, 1].
- pub fn floatNorm(r: &Random, comptime T: type) T {
- // TODO(tiehuis): See https://www.doornik.com/research/ziggurat.pdf
- @compileError("floatNorm is unimplemented");
+ /// Return a floating point value normally distributed with mean = 0, stddev = 1.
+ ///
+ /// To use different parameters, use: floatNorm(...) * desiredStddev + desiredMean.
+ pub fn floatNorm(r: *Random, comptime T: type) T {
+ const value = ziggurat.next_f64(r, ziggurat.NormDist);
+ switch (T) {
+ f32 => return @floatCast(f32, value),
+ f64 => return value,
+ else => @compileError("unknown floating point type"),
+ }
}
- /// Return a exponentially distributed float between (0, @maxValue(f64))
- pub fn floatExp(r: &Random, comptime T: type) T {
- @compileError("floatExp is unimplemented");
+ /// Return an exponentially distributed float with a rate parameter of 1.
+ ///
+ /// To use a different rate parameter, use: floatExp(...) / desiredRate.
+ pub fn floatExp(r: *Random, comptime T: type) T {
+ const value = ziggurat.next_f64(r, ziggurat.ExpDist);
+ switch (T) {
+ f32 => return @floatCast(f32, value),
+ f64 => return value,
+ else => @compileError("unknown floating point type"),
+ }
}
/// Shuffle a slice into a random order.
- pub fn shuffle(r: &Random, comptime T: type, buf: []T) void {
+ pub fn shuffle(r: *Random, comptime T: type, buf: []T) void {
if (buf.len < 2) {
return;
}
@@ -142,10 +156,10 @@ const SplitMix64 = struct {
s: u64,
pub fn init(seed: u64) SplitMix64 {
- return SplitMix64 { .s = seed };
+ return SplitMix64{ .s = seed };
}
- pub fn next(self: &SplitMix64) u64 {
+ pub fn next(self: *SplitMix64) u64 {
self.s +%= 0x9e3779b97f4a7c15;
var z = self.s;
@@ -158,7 +172,7 @@ const SplitMix64 = struct {
test "splitmix64 sequence" {
var r = SplitMix64.init(0xaeecf86f7878dd75);
- const seq = []const u64 {
+ const seq = []const u64{
0x5dbd39db0178eb44,
0xa9900fb66b397da3,
0x5c1a28b1aeebcf5c,
@@ -184,8 +198,8 @@ pub const Pcg = struct {
i: u64,
pub fn init(init_s: u64) Pcg {
- var pcg = Pcg {
- .random = Random { .fillFn = fill },
+ var pcg = Pcg{
+ .random = Random{ .fillFn = fill },
.s = undefined,
.i = undefined,
};
@@ -194,23 +208,23 @@ pub const Pcg = struct {
return pcg;
}
- fn next(self: &Pcg) u32 {
+ fn next(self: *Pcg) u32 {
const l = self.s;
self.s = l *% default_multiplier +% (self.i | 1);
const xor_s = @truncate(u32, ((l >> 18) ^ l) >> 27);
- const rot = u32(l >> 59);
+ const rot = @intCast(u32, l >> 59);
- return (xor_s >> u5(rot)) | (xor_s << u5((0 -% rot) & 31));
+ return (xor_s >> @intCast(u5, rot)) | (xor_s << @intCast(u5, (0 -% rot) & 31));
}
- fn seed(self: &Pcg, init_s: u64) void {
+ fn seed(self: *Pcg, init_s: u64) void {
// Pcg requires 128-bits of seed.
var gen = SplitMix64.init(init_s);
self.seedTwo(gen.next(), gen.next());
}
- fn seedTwo(self: &Pcg, init_s: u64, init_i: u64) void {
+ fn seedTwo(self: *Pcg, init_s: u64, init_i: u64) void {
self.s = 0;
self.i = (init_s << 1) | 1;
self.s = self.s *% default_multiplier +% self.i;
@@ -218,7 +232,7 @@ pub const Pcg = struct {
self.s = self.s *% default_multiplier +% self.i;
}
- fn fill(r: &Random, buf: []u8) void {
+ fn fill(r: *Random, buf: []u8) void {
const self = @fieldParentPtr(Pcg, "random", r);
var i: usize = 0;
@@ -251,7 +265,7 @@ test "pcg sequence" {
const s1: u64 = 0x84e9c579ef59bbf7;
r.seedTwo(s0, s1);
- const seq = []const u32 {
+ const seq = []const u32{
2881561918,
3063928540,
1199791034,
@@ -274,8 +288,8 @@ pub const Xoroshiro128 = struct {
s: [2]u64,
pub fn init(init_s: u64) Xoroshiro128 {
- var x = Xoroshiro128 {
- .random = Random { .fillFn = fill },
+ var x = Xoroshiro128{
+ .random = Random{ .fillFn = fill },
.s = undefined,
};
@@ -283,7 +297,7 @@ pub const Xoroshiro128 = struct {
return x;
}
- fn next(self: &Xoroshiro128) u64 {
+ fn next(self: *Xoroshiro128) u64 {
const s0 = self.s[0];
var s1 = self.s[1];
const r = s0 +% s1;
@@ -296,19 +310,19 @@ pub const Xoroshiro128 = struct {
}
// Skip 2^64 places ahead in the sequence
- fn jump(self: &Xoroshiro128) void {
+ fn jump(self: *Xoroshiro128) void {
var s0: u64 = 0;
var s1: u64 = 0;
- const table = []const u64 {
+ const table = []const u64{
0xbeac0467eba5facb,
- 0xd86b048b86aa9922
+ 0xd86b048b86aa9922,
};
inline for (table) |entry| {
var b: usize = 0;
while (b < 64) : (b += 1) {
- if ((entry & (u64(1) << u6(b))) != 0) {
+ if ((entry & (u64(1) << @intCast(u6, b))) != 0) {
s0 ^= self.s[0];
s1 ^= self.s[1];
}
@@ -320,7 +334,7 @@ pub const Xoroshiro128 = struct {
self.s[1] = s1;
}
- fn seed(self: &Xoroshiro128, init_s: u64) void {
+ fn seed(self: *Xoroshiro128, init_s: u64) void {
// Xoroshiro requires 128-bits of seed.
var gen = SplitMix64.init(init_s);
@@ -328,7 +342,7 @@ pub const Xoroshiro128 = struct {
self.s[1] = gen.next();
}
- fn fill(r: &Random, buf: []u8) void {
+ fn fill(r: *Random, buf: []u8) void {
const self = @fieldParentPtr(Xoroshiro128, "random", r);
var i: usize = 0;
@@ -360,7 +374,7 @@ test "xoroshiro sequence" {
r.s[0] = 0xaeecf86f7878dd75;
r.s[1] = 0x01cd153642e72622;
- const seq1 = []const u64 {
+ const seq1 = []const u64{
0xb0ba0da5bb600397,
0x18a08afde614dccc,
0xa2635b956a31b929,
@@ -373,10 +387,9 @@ test "xoroshiro sequence" {
std.debug.assert(s == r.next());
}
-
r.jump();
- const seq2 = []const u64 {
+ const seq2 = []const u64{
0x95344a13556d3e22,
0xb4fb32dafa4d00df,
0xb2011d9ccdcfe2dd,
@@ -407,8 +420,8 @@ pub const Isaac64 = struct {
i: usize,
pub fn init(init_s: u64) Isaac64 {
- var isaac = Isaac64 {
- .random = Random { .fillFn = fill },
+ var isaac = Isaac64{
+ .random = Random{ .fillFn = fill },
.r = undefined,
.m = undefined,
.a = undefined,
@@ -422,7 +435,7 @@ pub const Isaac64 = struct {
return isaac;
}
- fn step(self: &Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2: usize) void {
+ fn step(self: *Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2: usize) void {
const x = self.m[base + m1];
self.a = mix +% self.m[base + m2];
@@ -433,7 +446,7 @@ pub const Isaac64 = struct {
self.r[self.r.len - 1 - base - m1] = self.b;
}
- fn refill(self: &Isaac64) void {
+ fn refill(self: *Isaac64) void {
const midpoint = self.r.len / 2;
self.c +%= 1;
@@ -442,27 +455,27 @@ pub const Isaac64 = struct {
{
var i: usize = 0;
while (i < midpoint) : (i += 4) {
- self.step( ~(self.a ^ (self.a << 21)), i + 0, 0, midpoint);
- self.step( self.a ^ (self.a >> 5) , i + 1, 0, midpoint);
- self.step( self.a ^ (self.a << 12) , i + 2, 0, midpoint);
- self.step( self.a ^ (self.a >> 33) , i + 3, 0, midpoint);
+ self.step(~(self.a ^ (self.a << 21)), i + 0, 0, midpoint);
+ self.step(self.a ^ (self.a >> 5), i + 1, 0, midpoint);
+ self.step(self.a ^ (self.a << 12), i + 2, 0, midpoint);
+ self.step(self.a ^ (self.a >> 33), i + 3, 0, midpoint);
}
}
{
var i: usize = 0;
while (i < midpoint) : (i += 4) {
- self.step( ~(self.a ^ (self.a << 21)), i + 0, midpoint, 0);
- self.step( self.a ^ (self.a >> 5) , i + 1, midpoint, 0);
- self.step( self.a ^ (self.a << 12) , i + 2, midpoint, 0);
- self.step( self.a ^ (self.a >> 33) , i + 3, midpoint, 0);
+ self.step(~(self.a ^ (self.a << 21)), i + 0, midpoint, 0);
+ self.step(self.a ^ (self.a >> 5), i + 1, midpoint, 0);
+ self.step(self.a ^ (self.a << 12), i + 2, midpoint, 0);
+ self.step(self.a ^ (self.a >> 33), i + 3, midpoint, 0);
}
}
self.i = 0;
}
- fn next(self: &Isaac64) u64 {
+ fn next(self: *Isaac64) u64 {
if (self.i >= self.r.len) {
self.refill();
}
@@ -472,14 +485,14 @@ pub const Isaac64 = struct {
return value;
}
- fn seed(self: &Isaac64, init_s: u64, comptime rounds: usize) void {
+ fn seed(self: *Isaac64, init_s: u64, comptime rounds: usize) void {
// We ignore the multi-pass requirement since we don't currently expose full access to
// seeding the self.m array completely.
mem.set(u64, self.m[0..], 0);
self.m[0] = init_s;
// prescrambled golden ratio constants
- var a = []const u64 {
+ var a = []const u64{
0x647c4677a2884b7c,
0xb9f8b322c73ac862,
0x8c0ea5053d4712a0,
@@ -499,14 +512,30 @@ pub const Isaac64 = struct {
a[x1] +%= self.m[j + x1];
}
- a[0] -%= a[4]; a[5] ^= a[7] >> 9; a[7] +%= a[0];
- a[1] -%= a[5]; a[6] ^= a[0] << 9; a[0] +%= a[1];
- a[2] -%= a[6]; a[7] ^= a[1] >> 23; a[1] +%= a[2];
- a[3] -%= a[7]; a[0] ^= a[2] << 15; a[2] +%= a[3];
- a[4] -%= a[0]; a[1] ^= a[3] >> 14; a[3] +%= a[4];
- a[5] -%= a[1]; a[2] ^= a[4] << 20; a[4] +%= a[5];
- a[6] -%= a[2]; a[3] ^= a[5] >> 17; a[5] +%= a[6];
- a[7] -%= a[3]; a[4] ^= a[6] << 14; a[6] +%= a[7];
+ a[0] -%= a[4];
+ a[5] ^= a[7] >> 9;
+ a[7] +%= a[0];
+ a[1] -%= a[5];
+ a[6] ^= a[0] << 9;
+ a[0] +%= a[1];
+ a[2] -%= a[6];
+ a[7] ^= a[1] >> 23;
+ a[1] +%= a[2];
+ a[3] -%= a[7];
+ a[0] ^= a[2] << 15;
+ a[2] +%= a[3];
+ a[4] -%= a[0];
+ a[1] ^= a[3] >> 14;
+ a[3] +%= a[4];
+ a[5] -%= a[1];
+ a[2] ^= a[4] << 20;
+ a[4] +%= a[5];
+ a[6] -%= a[2];
+ a[3] ^= a[5] >> 17;
+ a[5] +%= a[6];
+ a[7] -%= a[3];
+ a[4] ^= a[6] << 14;
+ a[6] +%= a[7];
comptime var x2: usize = 0;
inline while (x2 < 8) : (x2 += 1) {
@@ -519,10 +548,10 @@ pub const Isaac64 = struct {
self.a = 0;
self.b = 0;
self.c = 0;
- self.i = self.r.len; // trigger refill on first value
+ self.i = self.r.len; // trigger refill on first value
}
- fn fill(r: &Random, buf: []u8) void {
+ fn fill(r: *Random, buf: []u8) void {
const self = @fieldParentPtr(Isaac64, "random", r);
var i: usize = 0;
@@ -553,7 +582,7 @@ test "isaac64 sequence" {
var r = Isaac64.init(0);
// from reference implementation
- const seq = []const u64 {
+ const seq = []const u64{
0xf67dfba498e4937c,
0x84a5066a9204f380,
0xfee34bd5f5514dbb,
@@ -595,7 +624,7 @@ test "Random float" {
test "Random scalar" {
var prng = DefaultPrng.init(0);
- const s = prng .random.scalar(u64);
+ const s = prng.random.scalar(u64);
}
test "Random bytes" {
@@ -607,8 +636,8 @@ test "Random bytes" {
test "Random shuffle" {
var prng = DefaultPrng.init(0);
- var seq = []const u8 { 0, 1, 2, 3, 4 };
- var seen = []bool {false} ** 5;
+ var seq = []const u8{ 0, 1, 2, 3, 4 };
+ var seen = []bool{false} ** 5;
var i: usize = 0;
while (i < 1000) : (i += 1) {
@@ -625,7 +654,8 @@ test "Random shuffle" {
fn sumArray(s: []const u8) u32 {
var r: u32 = 0;
- for (s) |e| r += e;
+ for (s) |e|
+ r += e;
return r;
}
@@ -634,16 +664,17 @@ test "Random range" {
testRange(&prng.random, -4, 3);
testRange(&prng.random, -4, -1);
testRange(&prng.random, 10, 14);
+ // TODO: test that prng.random.range(1, 1) causes an assertion error
}
-fn testRange(r: &Random, start: i32, end: i32) void {
- const count = usize(end - start);
+fn testRange(r: *Random, start: i32, end: i32) void {
+ const count = @intCast(usize, end - start);
var values_buffer = []bool{false} ** 20;
const values = values_buffer[0..count];
var i: usize = 0;
while (i < count) {
const value = r.range(i32, start, end);
- const index = usize(value - start);
+ const index = @intCast(usize, value - start);
if (!values[index]) {
i += 1;
values[index] = true;
diff --git a/std/rand/ziggurat.zig b/std/rand/ziggurat.zig
new file mode 100644
index 0000000000..f7a1359f17
--- /dev/null
+++ b/std/rand/ziggurat.zig
@@ -0,0 +1,166 @@
+// Implements ZIGNOR [1].
+//
+// [1]: Jurgen A. Doornik (2005). [*An Improved Ziggurat Method to Generate Normal Random Samples*]
+// (https://www.doornik.com/research/ziggurat.pdf). Nuffield College, Oxford.
+//
+// rust/rand used as a reference;
+//
+// NOTE: This seems interesting but reference code is a bit hard to grok:
+// https://sbarral.github.io/etf.
+
+const std = @import("../index.zig");
+const math = std.math;
+const Random = std.rand.Random;
+
+pub fn next_f64(random: *Random, comptime tables: *const ZigTable) f64 {
+ while (true) {
+ // We manually construct a float from parts as we can avoid an extra random lookup here by
+ // using the unused exponent for the lookup table entry.
+ const bits = random.scalar(u64);
+ const i = usize(bits & 0xff);
+
+ const u = blk: {
+ if (tables.is_symmetric) {
+ // Generate a value in the range [2, 4) and scale into [-1, 1)
+ const repr = ((0x3ff + 1) << 52) | (bits >> 12);
+ break :blk @bitCast(f64, repr) - 3.0;
+ } else {
+ // Generate a value in the range [1, 2) and scale into (0, 1)
+ const repr = (0x3ff << 52) | (bits >> 12);
+ break :blk @bitCast(f64, repr) - (1.0 - math.f64_epsilon / 2.0);
+ }
+ };
+
+ const x = u * tables.x[i];
+ const test_x = if (tables.is_symmetric) math.fabs(x) else x;
+
+ // equivalent to |u| < tables.x[i+1] / tables.x[i] (or u < tables.x[i+1] / tables.x[i])
+ if (test_x < tables.x[i + 1]) {
+ return x;
+ }
+
+ if (i == 0) {
+ return tables.zero_case(random, u);
+ }
+
+ // equivalent to f1 + DRanU() * (f0 - f1) < 1
+ if (tables.f[i + 1] + (tables.f[i] - tables.f[i + 1]) * random.float(f64) < tables.pdf(x)) {
+ return x;
+ }
+ }
+}
+
+pub const ZigTable = struct {
+ r: f64,
+ x: [257]f64,
+ f: [257]f64,
+
+ // probability density function used as a fallback
+ pdf: fn (f64) f64,
+ // whether the distribution is symmetric
+ is_symmetric: bool,
+ // fallback calculation in the case we are in the 0 block
+ zero_case: fn (*Random, f64) f64,
+};
+
+// zigNorInit
+fn ZigTableGen(
+ comptime is_symmetric: bool,
+ comptime r: f64,
+ comptime v: f64,
+ comptime f: fn (f64) f64,
+ comptime f_inv: fn (f64) f64,
+ comptime zero_case: fn (*Random, f64) f64,
+) ZigTable {
+ var tables: ZigTable = undefined;
+
+ tables.is_symmetric = is_symmetric;
+ tables.r = r;
+ tables.pdf = f;
+ tables.zero_case = zero_case;
+
+ tables.x[0] = v / f(r);
+ tables.x[1] = r;
+
+ for (tables.x[2..256]) |*entry, i| {
+ const last = tables.x[2 + i - 1];
+ entry.* = f_inv(v / last + f(last));
+ }
+ tables.x[256] = 0;
+
+ for (tables.f[0..]) |*entry, i| {
+ entry.* = f(tables.x[i]);
+ }
+
+ return tables;
+}
+
+// N(0, 1)
+pub const NormDist = blk: {
+ @setEvalBranchQuota(30000);
+ break :blk ZigTableGen(true, norm_r, norm_v, norm_f, norm_f_inv, norm_zero_case);
+};
+
+const norm_r = 3.6541528853610088;
+const norm_v = 0.00492867323399;
+
+fn norm_f(x: f64) f64 {
+ return math.exp(-x * x / 2.0);
+}
+fn norm_f_inv(y: f64) f64 {
+ return math.sqrt(-2.0 * math.ln(y));
+}
+fn norm_zero_case(random: *Random, u: f64) f64 {
+ var x: f64 = 1;
+ var y: f64 = 0;
+
+ while (-2.0 * y < x * x) {
+ x = math.ln(random.float(f64)) / norm_r;
+ y = math.ln(random.float(f64));
+ }
+
+ if (u < 0) {
+ return x - norm_r;
+ } else {
+ return norm_r - x;
+ }
+}
+
+test "ziggurant normal dist sanity" {
+ var prng = std.rand.DefaultPrng.init(0);
+ var i: usize = 0;
+ while (i < 1000) : (i += 1) {
+ _ = prng.random.floatNorm(f64);
+ }
+}
+
+// Exp(1)
+pub const ExpDist = blk: {
+ @setEvalBranchQuota(30000);
+ break :blk ZigTableGen(false, exp_r, exp_v, exp_f, exp_f_inv, exp_zero_case);
+};
+
+const exp_r = 7.69711747013104972;
+const exp_v = 0.0039496598225815571993;
+
+fn exp_f(x: f64) f64 {
+ return math.exp(-x);
+}
+fn exp_f_inv(y: f64) f64 {
+ return -math.ln(y);
+}
+fn exp_zero_case(random: *Random, _: f64) f64 {
+ return exp_r - math.ln(random.float(f64));
+}
+
+test "ziggurant exp dist sanity" {
+ var prng = std.rand.DefaultPrng.init(0);
+ var i: usize = 0;
+ while (i < 1000) : (i += 1) {
+ _ = prng.random.floatExp(f64);
+ }
+}
+
+test "ziggurat table gen" {
+ const table = NormDist;
+}
diff --git a/std/segmented_list.zig b/std/segmented_list.zig
new file mode 100644
index 0000000000..6e3f32e9d6
--- /dev/null
+++ b/std/segmented_list.zig
@@ -0,0 +1,389 @@
+const std = @import("index.zig");
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+
+// Imagine that `fn at(self: &Self, index: usize) &T` is a customer asking for a box
+// from a warehouse, based on a flat array, boxes ordered from 0 to N - 1.
+// But the warehouse actually stores boxes in shelves of increasing powers of 2 sizes.
+// So when the customer requests a box index, we have to translate it to shelf index
+// and box index within that shelf. Illustration:
+//
+// customer indexes:
+// shelf 0: 0
+// shelf 1: 1 2
+// shelf 2: 3 4 5 6
+// shelf 3: 7 8 9 10 11 12 13 14
+// shelf 4: 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
+// shelf 5: 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
+// ...
+//
+// warehouse indexes:
+// shelf 0: 0
+// shelf 1: 0 1
+// shelf 2: 0 1 2 3
+// shelf 3: 0 1 2 3 4 5 6 7
+// shelf 4: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+// shelf 5: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+// ...
+//
+// With this arrangement, here are the equations to get the shelf index and
+// box index based on customer box index:
+//
+// shelf_index = floor(log2(customer_index + 1))
+// shelf_count = ceil(log2(box_count + 1))
+// box_index = customer_index + 1 - 2 ** shelf
+// shelf_size = 2 ** shelf_index
+//
+// Now we complicate it a little bit further by adding a preallocated shelf, which must be
+// a power of 2:
+// prealloc=4
+//
+// customer indexes:
+// prealloc: 0 1 2 3
+// shelf 0: 4 5 6 7 8 9 10 11
+// shelf 1: 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
+// shelf 2: 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
+// ...
+//
+// warehouse indexes:
+// prealloc: 0 1 2 3
+// shelf 0: 0 1 2 3 4 5 6 7
+// shelf 1: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+// shelf 2: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+// ...
+//
+// Now the equations are:
+//
+// shelf_index = floor(log2(customer_index + prealloc)) - log2(prealloc) - 1
+// shelf_count = ceil(log2(box_count + prealloc)) - log2(prealloc) - 1
+// box_index = customer_index + prealloc - 2 ** (log2(prealloc) + 1 + shelf)
+// shelf_size = prealloc * 2 ** (shelf_index + 1)
+
+/// This is a stack data structure where pointers to indexes have the same lifetime as the data structure
+/// itself, unlike ArrayList where push() invalidates all existing element pointers.
+/// The tradeoff is that elements are not guaranteed to be contiguous. For that, use ArrayList.
+/// Note however that most elements are contiguous, making this data structure cache-friendly.
+///
+/// Because it never has to copy elements from an old location to a new location, it does not require
+/// its elements to be copyable, and it avoids wasting memory when backed by an ArenaAllocator.
+/// Note that the push() and pop() convenience methods perform a copy, but you can instead use
+/// addOne(), at(), setCapacity(), and shrinkCapacity() to avoid copying items.
+///
+/// This data structure has O(1) push and O(1) pop.
+///
+/// It supports preallocated elements, making it especially well suited when the expected maximum
+/// size is small. `prealloc_item_count` must be 0, or a power of 2.
+pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type {
+ return struct {
+ const Self = this;
+ const prealloc_exp = blk: {
+ // we don't use the prealloc_exp constant when prealloc_item_count is 0.
+ assert(prealloc_item_count != 0);
+
+ const value = std.math.log2_int(usize, prealloc_item_count);
+ assert((1 << value) == prealloc_item_count); // prealloc_item_count must be a power of 2
+ break :blk @typeOf(1)(value);
+ };
+ const ShelfIndex = std.math.Log2Int(usize);
+
+ prealloc_segment: [prealloc_item_count]T,
+ dynamic_segments: [][*]T,
+ allocator: *Allocator,
+ len: usize,
+
+ pub const prealloc_count = prealloc_item_count;
+
+ /// Deinitialize with `deinit`
+ pub fn init(allocator: *Allocator) Self {
+ return Self{
+ .allocator = allocator,
+ .len = 0,
+ .prealloc_segment = undefined,
+ .dynamic_segments = [][*]T{},
+ };
+ }
+
+ pub fn deinit(self: *Self) void {
+ self.freeShelves(@intCast(ShelfIndex, self.dynamic_segments.len), 0);
+ self.allocator.free(self.dynamic_segments);
+ self.* = undefined;
+ }
+
+ pub fn at(self: *Self, i: usize) *T {
+ assert(i < self.len);
+ return self.uncheckedAt(i);
+ }
+
+ pub fn count(self: *const Self) usize {
+ return self.len;
+ }
+
+ pub fn push(self: *Self, item: *const T) !void {
+ const new_item_ptr = try self.addOne();
+ new_item_ptr.* = item.*;
+ }
+
+ pub fn pushMany(self: *Self, items: []const T) !void {
+ for (items) |item| {
+ try self.push(item);
+ }
+ }
+
+ pub fn pop(self: *Self) ?T {
+ if (self.len == 0) return null;
+
+ const index = self.len - 1;
+ const result = self.uncheckedAt(index).*;
+ self.len = index;
+ return result;
+ }
+
+ pub fn addOne(self: *Self) !*T {
+ const new_length = self.len + 1;
+ try self.growCapacity(new_length);
+ const result = self.uncheckedAt(self.len);
+ self.len = new_length;
+ return result;
+ }
+
+ /// Grows or shrinks capacity to match usage.
+ pub fn setCapacity(self: *Self, new_capacity: usize) !void {
+ if (new_capacity <= usize(1) << (prealloc_exp + self.dynamic_segments.len)) {
+ return self.shrinkCapacity(new_capacity);
+ } else {
+ return self.growCapacity(new_capacity);
+ }
+ }
+
+ /// Only grows capacity, or retains current capacity
+ pub fn growCapacity(self: *Self, new_capacity: usize) !void {
+ const new_cap_shelf_count = shelfCount(new_capacity);
+ const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len);
+ if (new_cap_shelf_count > old_shelf_count) {
+ self.dynamic_segments = try self.allocator.realloc([*]T, self.dynamic_segments, new_cap_shelf_count);
+ var i = old_shelf_count;
+ errdefer {
+ self.freeShelves(i, old_shelf_count);
+ self.dynamic_segments = self.allocator.shrink([*]T, self.dynamic_segments, old_shelf_count);
+ }
+ while (i < new_cap_shelf_count) : (i += 1) {
+ self.dynamic_segments[i] = (try self.allocator.alloc(T, shelfSize(i))).ptr;
+ }
+ }
+ }
+
+ /// Only shrinks capacity or retains current capacity
+ pub fn shrinkCapacity(self: *Self, new_capacity: usize) void {
+ if (new_capacity <= prealloc_item_count) {
+ const len = @intCast(ShelfIndex, self.dynamic_segments.len);
+ self.freeShelves(len, 0);
+ self.allocator.free(self.dynamic_segments);
+ self.dynamic_segments = [][*]T{};
+ return;
+ }
+
+ const new_cap_shelf_count = shelfCount(new_capacity);
+ const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len);
+ assert(new_cap_shelf_count <= old_shelf_count);
+ if (new_cap_shelf_count == old_shelf_count) {
+ return;
+ }
+
+ self.freeShelves(old_shelf_count, new_cap_shelf_count);
+ self.dynamic_segments = self.allocator.shrink([*]T, self.dynamic_segments, new_cap_shelf_count);
+ }
+
+ pub fn uncheckedAt(self: *Self, index: usize) *T {
+ if (index < prealloc_item_count) {
+ return &self.prealloc_segment[index];
+ }
+ const shelf_index = shelfIndex(index);
+ const box_index = boxIndex(index, shelf_index);
+ return &self.dynamic_segments[shelf_index][box_index];
+ }
+
+ fn shelfCount(box_count: usize) ShelfIndex {
+ if (prealloc_item_count == 0) {
+ return std.math.log2_int_ceil(usize, box_count + 1);
+ }
+ return std.math.log2_int_ceil(usize, box_count + prealloc_item_count) - prealloc_exp - 1;
+ }
+
+ fn shelfSize(shelf_index: ShelfIndex) usize {
+ if (prealloc_item_count == 0) {
+ return usize(1) << shelf_index;
+ }
+ return usize(1) << (shelf_index + (prealloc_exp + 1));
+ }
+
+ fn shelfIndex(list_index: usize) ShelfIndex {
+ if (prealloc_item_count == 0) {
+ return std.math.log2_int(usize, list_index + 1);
+ }
+ return std.math.log2_int(usize, list_index + prealloc_item_count) - prealloc_exp - 1;
+ }
+
+ fn boxIndex(list_index: usize, shelf_index: ShelfIndex) usize {
+ if (prealloc_item_count == 0) {
+ return (list_index + 1) - (usize(1) << shelf_index);
+ }
+ return list_index + prealloc_item_count - (usize(1) << ((prealloc_exp + 1) + shelf_index));
+ }
+
+ fn freeShelves(self: *Self, from_count: ShelfIndex, to_count: ShelfIndex) void {
+ var i = from_count;
+ while (i != to_count) {
+ i -= 1;
+ self.allocator.free(self.dynamic_segments[i][0..shelfSize(i)]);
+ }
+ }
+
+ pub const Iterator = struct {
+ list: *Self,
+ index: usize,
+ box_index: usize,
+ shelf_index: ShelfIndex,
+ shelf_size: usize,
+
+ pub fn next(it: *Iterator) ?*T {
+ if (it.index >= it.list.len) return null;
+ if (it.index < prealloc_item_count) {
+ const ptr = &it.list.prealloc_segment[it.index];
+ it.index += 1;
+ if (it.index == prealloc_item_count) {
+ it.box_index = 0;
+ it.shelf_index = 0;
+ it.shelf_size = prealloc_item_count * 2;
+ }
+ return ptr;
+ }
+
+ const ptr = &it.list.dynamic_segments[it.shelf_index][it.box_index];
+ it.index += 1;
+ it.box_index += 1;
+ if (it.box_index == it.shelf_size) {
+ it.shelf_index += 1;
+ it.box_index = 0;
+ it.shelf_size *= 2;
+ }
+ return ptr;
+ }
+
+ pub fn prev(it: *Iterator) ?*T {
+ if (it.index == 0) return null;
+
+ it.index -= 1;
+ if (it.index < prealloc_item_count) return &it.list.prealloc_segment[it.index];
+
+ if (it.box_index == 0) {
+ it.shelf_index -= 1;
+ it.shelf_size /= 2;
+ it.box_index = it.shelf_size - 1;
+ } else {
+ it.box_index -= 1;
+ }
+
+ return &it.list.dynamic_segments[it.shelf_index][it.box_index];
+ }
+
+ pub fn peek(it: *Iterator) ?*T {
+ if (it.index >= it.list.len)
+ return null;
+ if (it.index < prealloc_item_count)
+ return &it.list.prealloc_segment[it.index];
+
+ return &it.list.dynamic_segments[it.shelf_index][it.box_index];
+ }
+
+ pub fn set(it: *Iterator, index: usize) void {
+ it.index = index;
+ if (index < prealloc_item_count) return;
+ it.shelf_index = shelfIndex(index);
+ it.box_index = boxIndex(index, it.shelf_index);
+ it.shelf_size = shelfSize(it.shelf_index);
+ }
+ };
+
+ pub fn iterator(self: *Self, start_index: usize) Iterator {
+ var it = Iterator{
+ .list = self,
+ .index = undefined,
+ .shelf_index = undefined,
+ .box_index = undefined,
+ .shelf_size = undefined,
+ };
+ it.set(start_index);
+ return it;
+ }
+ };
+}
+
+test "std.SegmentedList" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+ var a = &da.allocator;
+
+ try testSegmentedList(0, a);
+ try testSegmentedList(1, a);
+ try testSegmentedList(2, a);
+ try testSegmentedList(4, a);
+ try testSegmentedList(8, a);
+ try testSegmentedList(16, a);
+}
+
+fn testSegmentedList(comptime prealloc: usize, allocator: *Allocator) !void {
+ var list = SegmentedList(i32, prealloc).init(allocator);
+ defer list.deinit();
+
+ {
+ var i: usize = 0;
+ while (i < 100) : (i += 1) {
+ try list.push(@intCast(i32, i + 1));
+ assert(list.len == i + 1);
+ }
+ }
+
+ {
+ var i: usize = 0;
+ while (i < 100) : (i += 1) {
+ assert(list.at(i).* == @intCast(i32, i + 1));
+ }
+ }
+
+ {
+ var it = list.iterator(0);
+ var x: i32 = 0;
+ while (it.next()) |item| {
+ x += 1;
+ assert(item.* == x);
+ }
+ assert(x == 100);
+ while (it.prev()) |item| : (x -= 1) {
+ assert(item.* == x);
+ }
+ assert(x == 0);
+ }
+
+ assert(list.pop().? == 100);
+ assert(list.len == 99);
+
+ try list.pushMany([]i32{
+ 1,
+ 2,
+ 3,
+ });
+ assert(list.len == 102);
+ assert(list.pop().? == 3);
+ assert(list.pop().? == 2);
+ assert(list.pop().? == 1);
+ assert(list.len == 99);
+
+ try list.pushMany([]const i32{});
+ assert(list.len == 99);
+
+ var i: i32 = 99;
+ while (list.pop()) |item| : (i -= 1) {
+ assert(item == i);
+ list.shrinkCapacity(list.len);
+ }
+}
diff --git a/std/sort.zig b/std/sort.zig
index 0f83df7bb4..f29f51b7cf 100644
--- a/std/sort.zig
+++ b/std/sort.zig
@@ -5,15 +5,18 @@ const math = std.math;
const builtin = @import("builtin");
/// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. O(1) memory (no allocator required).
-pub fn insertionSort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &const T)bool) void {
- {var i: usize = 1; while (i < items.len) : (i += 1) {
- const x = items[i];
- var j: usize = i;
- while (j > 0 and lessThan(x, items[j - 1])) : (j -= 1) {
- items[j] = items[j - 1];
+pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool) void {
+ {
+ var i: usize = 1;
+ while (i < items.len) : (i += 1) {
+ const x = items[i];
+ var j: usize = i;
+ while (j > 0 and lessThan(x, items[j - 1])) : (j -= 1) {
+ items[j] = items[j - 1];
+ }
+ items[j] = x;
}
- items[j] = x;
- }}
+ }
}
const Range = struct {
@@ -21,15 +24,17 @@ const Range = struct {
end: usize,
fn init(start: usize, end: usize) Range {
- return Range { .start = start, .end = end };
+ return Range{
+ .start = start,
+ .end = end,
+ };
}
- fn length(self: &const Range) usize {
+ fn length(self: Range) usize {
return self.end - self.start;
}
};
-
const Iterator = struct {
size: usize,
power_of_two: usize,
@@ -42,7 +47,7 @@ const Iterator = struct {
fn init(size2: usize, min_level: usize) Iterator {
const power_of_two = math.floorPowerOfTwo(usize, size2);
const denominator = power_of_two / min_level;
- return Iterator {
+ return Iterator{
.numerator = 0,
.decimal = 0,
.size = size2,
@@ -53,12 +58,12 @@ const Iterator = struct {
};
}
- fn begin(self: &Iterator) void {
+ fn begin(self: *Iterator) void {
self.numerator = 0;
self.decimal = 0;
}
- fn nextRange(self: &Iterator) Range {
+ fn nextRange(self: *Iterator) Range {
const start = self.decimal;
self.decimal += self.decimal_step;
@@ -68,14 +73,17 @@ const Iterator = struct {
self.decimal += 1;
}
- return Range {.start = start, .end = self.decimal};
+ return Range{
+ .start = start,
+ .end = self.decimal,
+ };
}
- fn finished(self: &Iterator) bool {
+ fn finished(self: *Iterator) bool {
return self.decimal >= self.size;
}
- fn nextLevel(self: &Iterator) bool {
+ fn nextLevel(self: *Iterator) bool {
self.decimal_step += self.decimal_step;
self.numerator_step += self.numerator_step;
if (self.numerator_step >= self.denominator) {
@@ -86,7 +94,7 @@ const Iterator = struct {
return (self.decimal_step < self.size);
}
- fn length(self: &Iterator) usize {
+ fn length(self: *Iterator) usize {
return self.decimal_step;
}
};
@@ -100,7 +108,7 @@ const Pull = struct {
/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. O(1) memory (no allocator required).
/// Currently implemented as block sort.
-pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &const T)bool) void {
+pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool) void {
// Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c
var cache: [512]T = undefined;
@@ -123,7 +131,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
// http://pages.ripco.net/~jgamble/nw.html
var iterator = Iterator.init(items.len, 4);
while (!iterator.finished()) {
- var order = []u8{0, 1, 2, 3, 4, 5, 6, 7};
+ var order = []u8{ 0, 1, 2, 3, 4, 5, 6, 7 };
const range = iterator.nextRange();
const sliced_items = items[range.start..];
@@ -149,56 +157,56 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
swap(T, sliced_items, lessThan, &order, 3, 5);
swap(T, sliced_items, lessThan, &order, 3, 4);
},
- 7 => {
- swap(T, sliced_items, lessThan, &order, 1, 2);
- swap(T, sliced_items, lessThan, &order, 3, 4);
- swap(T, sliced_items, lessThan, &order, 5, 6);
- swap(T, sliced_items, lessThan, &order, 0, 2);
- swap(T, sliced_items, lessThan, &order, 3, 5);
- swap(T, sliced_items, lessThan, &order, 4, 6);
- swap(T, sliced_items, lessThan, &order, 0, 1);
- swap(T, sliced_items, lessThan, &order, 4, 5);
- swap(T, sliced_items, lessThan, &order, 2, 6);
- swap(T, sliced_items, lessThan, &order, 0, 4);
- swap(T, sliced_items, lessThan, &order, 1, 5);
- swap(T, sliced_items, lessThan, &order, 0, 3);
- swap(T, sliced_items, lessThan, &order, 2, 5);
- swap(T, sliced_items, lessThan, &order, 1, 3);
- swap(T, sliced_items, lessThan, &order, 2, 4);
- swap(T, sliced_items, lessThan, &order, 2, 3);
- },
- 6 => {
- swap(T, sliced_items, lessThan, &order, 1, 2);
- swap(T, sliced_items, lessThan, &order, 4, 5);
- swap(T, sliced_items, lessThan, &order, 0, 2);
- swap(T, sliced_items, lessThan, &order, 3, 5);
- swap(T, sliced_items, lessThan, &order, 0, 1);
- swap(T, sliced_items, lessThan, &order, 3, 4);
- swap(T, sliced_items, lessThan, &order, 2, 5);
- swap(T, sliced_items, lessThan, &order, 0, 3);
- swap(T, sliced_items, lessThan, &order, 1, 4);
- swap(T, sliced_items, lessThan, &order, 2, 4);
- swap(T, sliced_items, lessThan, &order, 1, 3);
- swap(T, sliced_items, lessThan, &order, 2, 3);
- },
- 5 => {
- swap(T, sliced_items, lessThan, &order, 0, 1);
- swap(T, sliced_items, lessThan, &order, 3, 4);
- swap(T, sliced_items, lessThan, &order, 2, 4);
- swap(T, sliced_items, lessThan, &order, 2, 3);
- swap(T, sliced_items, lessThan, &order, 1, 4);
- swap(T, sliced_items, lessThan, &order, 0, 3);
- swap(T, sliced_items, lessThan, &order, 0, 2);
- swap(T, sliced_items, lessThan, &order, 1, 3);
- swap(T, sliced_items, lessThan, &order, 1, 2);
- },
- 4 => {
- swap(T, sliced_items, lessThan, &order, 0, 1);
- swap(T, sliced_items, lessThan, &order, 2, 3);
- swap(T, sliced_items, lessThan, &order, 0, 2);
- swap(T, sliced_items, lessThan, &order, 1, 3);
- swap(T, sliced_items, lessThan, &order, 1, 2);
- },
+ 7 => {
+ swap(T, sliced_items, lessThan, &order, 1, 2);
+ swap(T, sliced_items, lessThan, &order, 3, 4);
+ swap(T, sliced_items, lessThan, &order, 5, 6);
+ swap(T, sliced_items, lessThan, &order, 0, 2);
+ swap(T, sliced_items, lessThan, &order, 3, 5);
+ swap(T, sliced_items, lessThan, &order, 4, 6);
+ swap(T, sliced_items, lessThan, &order, 0, 1);
+ swap(T, sliced_items, lessThan, &order, 4, 5);
+ swap(T, sliced_items, lessThan, &order, 2, 6);
+ swap(T, sliced_items, lessThan, &order, 0, 4);
+ swap(T, sliced_items, lessThan, &order, 1, 5);
+ swap(T, sliced_items, lessThan, &order, 0, 3);
+ swap(T, sliced_items, lessThan, &order, 2, 5);
+ swap(T, sliced_items, lessThan, &order, 1, 3);
+ swap(T, sliced_items, lessThan, &order, 2, 4);
+ swap(T, sliced_items, lessThan, &order, 2, 3);
+ },
+ 6 => {
+ swap(T, sliced_items, lessThan, &order, 1, 2);
+ swap(T, sliced_items, lessThan, &order, 4, 5);
+ swap(T, sliced_items, lessThan, &order, 0, 2);
+ swap(T, sliced_items, lessThan, &order, 3, 5);
+ swap(T, sliced_items, lessThan, &order, 0, 1);
+ swap(T, sliced_items, lessThan, &order, 3, 4);
+ swap(T, sliced_items, lessThan, &order, 2, 5);
+ swap(T, sliced_items, lessThan, &order, 0, 3);
+ swap(T, sliced_items, lessThan, &order, 1, 4);
+ swap(T, sliced_items, lessThan, &order, 2, 4);
+ swap(T, sliced_items, lessThan, &order, 1, 3);
+ swap(T, sliced_items, lessThan, &order, 2, 3);
+ },
+ 5 => {
+ swap(T, sliced_items, lessThan, &order, 0, 1);
+ swap(T, sliced_items, lessThan, &order, 3, 4);
+ swap(T, sliced_items, lessThan, &order, 2, 4);
+ swap(T, sliced_items, lessThan, &order, 2, 3);
+ swap(T, sliced_items, lessThan, &order, 1, 4);
+ swap(T, sliced_items, lessThan, &order, 0, 3);
+ swap(T, sliced_items, lessThan, &order, 0, 2);
+ swap(T, sliced_items, lessThan, &order, 1, 3);
+ swap(T, sliced_items, lessThan, &order, 1, 2);
+ },
+ 4 => {
+ swap(T, sliced_items, lessThan, &order, 0, 1);
+ swap(T, sliced_items, lessThan, &order, 2, 3);
+ swap(T, sliced_items, lessThan, &order, 0, 2);
+ swap(T, sliced_items, lessThan, &order, 1, 3);
+ swap(T, sliced_items, lessThan, &order, 1, 2);
+ },
else => {},
}
}
@@ -240,7 +248,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
// merge A2 and B2 into the cache
if (lessThan(items[B2.end - 1], items[A2.start])) {
// the two ranges are in reverse order, so copy them in reverse order into the cache
- mem.copy(T, cache[A1.length() + B2.length()..], items[A2.start..A2.end]);
+ mem.copy(T, cache[A1.length() + B2.length() ..], items[A2.start..A2.end]);
mem.copy(T, cache[A1.length()..], items[B2.start..B2.end]);
} else if (lessThan(items[B2.start], items[A2.end - 1])) {
// these two ranges weren't already in order, so merge them into the cache
@@ -248,7 +256,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
} else {
// copy A2 and B2 into the cache in the same order
mem.copy(T, cache[A1.length()..], items[A2.start..A2.end]);
- mem.copy(T, cache[A1.length() + A2.length()..], items[B2.start..B2.end]);
+ mem.copy(T, cache[A1.length() + A2.length() ..], items[B2.start..B2.end]);
}
A2 = Range.init(A2.start, B2.end);
@@ -258,7 +266,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
if (lessThan(cache[B3.end - 1], cache[A3.start])) {
// the two ranges are in reverse order, so copy them in reverse order into the items
- mem.copy(T, items[A1.start + A2.length()..], cache[A3.start..A3.end]);
+ mem.copy(T, items[A1.start + A2.length() ..], cache[A3.start..A3.end]);
mem.copy(T, items[A1.start..], cache[B3.start..B3.end]);
} else if (lessThan(cache[B3.start], cache[A3.end - 1])) {
// these two ranges weren't already in order, so merge them back into the items
@@ -266,14 +274,13 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
} else {
// copy A3 and B3 into the items in the same order
mem.copy(T, items[A1.start..], cache[A3.start..A3.end]);
- mem.copy(T, items[A1.start + A1.length()..], cache[B3.start..B3.end]);
+ mem.copy(T, items[A1.start + A1.length() ..], cache[B3.start..B3.end]);
}
}
// we merged two levels at the same time, so we're done with this level already
// (iterator.nextLevel() is called again at the bottom of this outer merge loop)
_ = iterator.nextLevel();
-
} else {
iterator.begin();
while (!iterator.finished()) {
@@ -301,9 +308,8 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
// 6. merge each A block with any B values that follow, using the cache or the second internal buffer
// 7. sort the second internal buffer if it exists
// 8. redistribute the two internal buffers back into the items
-
var block_size: usize = math.sqrt(iterator.length());
- var buffer_size = iterator.length()/block_size + 1;
+ var buffer_size = iterator.length() / block_size + 1;
// as an optimization, we really only need to pull out the internal buffers once for each level of merges
// after that we can reuse the same buffers over and over, then redistribute it when we're finished with this level
@@ -316,8 +322,18 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
var start: usize = 0;
var pull_index: usize = 0;
var pull = []Pull{
- Pull {.from = 0, .to = 0, .count = 0, .range = Range.init(0, 0),},
- Pull {.from = 0, .to = 0, .count = 0, .range = Range.init(0, 0),},
+ Pull{
+ .from = 0,
+ .to = 0,
+ .count = 0,
+ .range = Range.init(0, 0),
+ },
+ Pull{
+ .from = 0,
+ .to = 0,
+ .count = 0,
+ .range = Range.init(0, 0),
+ },
};
var buffer1 = Range.init(0, 0);
@@ -355,7 +371,10 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
// these values will be pulled out to the start of A
last = A.start;
count = 1;
- while (count < find) : ({last = index; count += 1;}) {
+ while (count < find) : ({
+ last = index;
+ count += 1;
+ }) {
index = findLastForward(T, items, items[last], Range.init(last + 1, A.end), lessThan, find - count);
if (index == A.end) break;
}
@@ -363,7 +382,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
if (count >= buffer_size) {
// keep track of the range within the items where we'll need to "pull out" these values to create the internal buffer
- pull[pull_index] = Pull {
+ pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
@@ -398,7 +417,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
} else if (pull_index == 0 and count > buffer1.length()) {
// keep track of the largest buffer we were able to find
buffer1 = Range.init(A.start, A.start + count);
- pull[pull_index] = Pull {
+ pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
@@ -410,7 +429,10 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
// these values will be pulled out to the end of B
last = B.end - 1;
count = 1;
- while (count < find) : ({last = index - 1; count += 1;}) {
+ while (count < find) : ({
+ last = index - 1;
+ count += 1;
+ }) {
index = findFirstBackward(T, items, items[last], Range.init(B.start, last), lessThan, find - count);
if (index == B.start) break;
}
@@ -418,7 +440,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
if (count >= buffer_size) {
// keep track of the range within the items where we'll need to "pull out" these values to create the internal buffe
- pull[pull_index] = Pull {
+ pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
@@ -457,7 +479,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
} else if (pull_index == 0 and count > buffer1.length()) {
// keep track of the largest buffer we were able to find
buffer1 = Range.init(B.end - count, B.end);
- pull[pull_index] = Pull {
+ pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
@@ -496,7 +518,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
// adjust block_size and buffer_size based on the values we were able to pull out
buffer_size = buffer1.length();
- block_size = iterator.length()/buffer_size + 1;
+ block_size = iterator.length() / buffer_size + 1;
// the first buffer NEEDS to be large enough to tag each of the evenly sized A blocks,
// so this was originally here to test the math for adjusting block_size above
@@ -547,7 +569,10 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
// swap the first value of each A block with the value in buffer1
var indexA = buffer1.start;
index = firstA.end;
- while (index < blockA.end) : ({indexA += 1; index += block_size;}) {
+ while (index < blockA.end) : ({
+ indexA += 1;
+ index += block_size;
+ }) {
mem.swap(T, &items[indexA], &items[index]);
}
@@ -606,7 +631,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
if (buffer2.length() > 0 or block_size <= cache.len) {
// copy the previous A block into the cache or buffer2, since that's where we need it to be when we go to merge it anyway
if (block_size <= cache.len) {
- mem.copy(T, cache[0..], items[blockA.start..blockA.start + block_size]);
+ mem.copy(T, cache[0..], items[blockA.start .. blockA.start + block_size]);
} else {
blockSwap(T, items, blockA.start, buffer2.start, block_size);
}
@@ -617,7 +642,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
blockSwap(T, items, B_split, blockA.start + block_size - B_remaining, B_remaining);
} else {
// we are unable to use the 'buffer2' trick to speed up the rotation operation since buffer2 doesn't exist, so perform a normal rotation
- mem.rotate(T, items[B_split..blockA.start + block_size], blockA.start - B_split);
+ mem.rotate(T, items[B_split .. blockA.start + block_size], blockA.start - B_split);
}
// update the range for the remaining A blocks, and the range remaining from the B block after it was split
@@ -626,9 +651,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
// if there are no more A blocks remaining, this step is finished!
blockA.start += block_size;
- if (blockA.length() == 0)
- break;
-
+ if (blockA.length() == 0) break;
} else if (blockB.length() < block_size) {
// move the last B block, which is unevenly sized, to before the remaining A blocks, by using a rotation
// the cache is disabled here since it might contain the contents of the previous A block
@@ -709,7 +732,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &cons
}
// merge operation without a buffer
-fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const Range, lessThan: fn(&const T,&const T)bool) void {
+fn mergeInPlace(comptime T: type, items: []T, A_arg: Range, B_arg: Range, lessThan: fn (T, T) bool) void {
if (A_arg.length() == 0 or B_arg.length() == 0) return;
// this just repeatedly binary searches into B and rotates A into position.
@@ -730,8 +753,8 @@ fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const
// again, this is NOT a general-purpose solution – it only works well in this case!
// kind of like how the O(n^2) insertion sort is used in some places
- var A = *A_arg;
- var B = *B_arg;
+ var A = A_arg;
+ var B = B_arg;
while (true) {
// find the first place in B where the first item in A needs to be inserted
@@ -751,7 +774,7 @@ fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const
}
// merge operation using an internal buffer
-fn mergeInternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn(&const T,&const T)bool, buffer: &const Range) void {
+fn mergeInternal(comptime T: type, items: []T, A: Range, B: Range, lessThan: fn (T, T) bool, buffer: Range) void {
// whenever we find a value to add to the final array, swap it with the value that's already in that spot
// when this algorithm is finished, 'buffer' will contain its original contents, but in a different order
var A_count: usize = 0;
@@ -787,9 +810,9 @@ fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_s
// combine a linear search with a binary search to reduce the number of comparisons in situations
// where have some idea as to how many unique values there are and where the next value might be
-fn findFirstForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn(&const T,&const T)bool, unique: usize) usize {
+fn findFirstForward(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
- const skip = math.max(range.length()/unique, usize(1));
+ const skip = math.max(range.length() / unique, usize(1));
var index = range.start + skip;
while (lessThan(items[index - 1], value)) : (index += skip) {
@@ -801,9 +824,9 @@ fn findFirstForward(comptime T: type, items: []T, value: &const T, range: &const
return binaryFirst(T, items, value, Range.init(index - skip, index), lessThan);
}
-fn findFirstBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn(&const T,&const T)bool, unique: usize) usize {
+fn findFirstBackward(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
- const skip = math.max(range.length()/unique, usize(1));
+ const skip = math.max(range.length() / unique, usize(1));
var index = range.end - skip;
while (index > range.start and !lessThan(items[index - 1], value)) : (index -= skip) {
@@ -815,9 +838,9 @@ fn findFirstBackward(comptime T: type, items: []T, value: &const T, range: &cons
return binaryFirst(T, items, value, Range.init(index, index + skip), lessThan);
}
-fn findLastForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn(&const T,&const T)bool, unique: usize) usize {
+fn findLastForward(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
- const skip = math.max(range.length()/unique, usize(1));
+ const skip = math.max(range.length() / unique, usize(1));
var index = range.start + skip;
while (!lessThan(value, items[index - 1])) : (index += skip) {
@@ -829,9 +852,9 @@ fn findLastForward(comptime T: type, items: []T, value: &const T, range: &const
return binaryLast(T, items, value, Range.init(index - skip, index), lessThan);
}
-fn findLastBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn(&const T,&const T)bool, unique: usize) usize {
+fn findLastBackward(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
- const skip = math.max(range.length()/unique, usize(1));
+ const skip = math.max(range.length() / unique, usize(1));
var index = range.end - skip;
while (index > range.start and lessThan(value, items[index - 1])) : (index -= skip) {
@@ -843,12 +866,12 @@ fn findLastBackward(comptime T: type, items: []T, value: &const T, range: &const
return binaryLast(T, items, value, Range.init(index, index + skip), lessThan);
}
-fn binaryFirst(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn(&const T,&const T)bool) usize {
+fn binaryFirst(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
while (start < end) {
- const mid = start + (end - start)/2;
+ const mid = start + (end - start) / 2;
if (lessThan(items[mid], value)) {
start = mid + 1;
} else {
@@ -861,12 +884,12 @@ fn binaryFirst(comptime T: type, items: []T, value: &const T, range: &const Rang
return start;
}
-fn binaryLast(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn(&const T,&const T)bool) usize {
+fn binaryLast(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
while (start < end) {
- const mid = start + (end - start)/2;
+ const mid = start + (end - start) / 2;
if (!lessThan(value, items[mid])) {
start = mid + 1;
} else {
@@ -879,7 +902,7 @@ fn binaryLast(comptime T: type, items: []T, value: &const T, range: &const Range
return start;
}
-fn mergeInto(comptime T: type, from: []T, A: &const Range, B: &const Range, lessThan: fn(&const T,&const T)bool, into: []T) void {
+fn mergeInto(comptime T: type, from: []T, A: Range, B: Range, lessThan: fn (T, T) bool, into: []T) void {
var A_index: usize = A.start;
var B_index: usize = B.start;
const A_last = A.end;
@@ -909,7 +932,7 @@ fn mergeInto(comptime T: type, from: []T, A: &const Range, B: &const Range, less
}
}
-fn mergeExternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn(&const T,&const T)bool, cache: []T) void {
+fn mergeExternal(comptime T: type, items: []T, A: Range, B: Range, lessThan: fn (T, T) bool, cache: []T) void {
// A fits into the cache, so use that instead of the internal buffer
var A_index: usize = 0;
var B_index: usize = B.start;
@@ -937,29 +960,32 @@ fn mergeExternal(comptime T: type, items: []T, A: &const Range, B: &const Range,
mem.copy(T, items[insert_index..], cache[A_index..A_last]);
}
-fn swap(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &const T)bool, order: &[8]u8, x: usize, y: usize) void {
- if (lessThan(items[y], items[x]) or
- ((*order)[x] > (*order)[y] and !lessThan(items[x], items[y])))
- {
+fn swap(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool, order: *[8]u8, x: usize, y: usize) void {
+ if (lessThan(items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(items[x], items[y]))) {
mem.swap(T, &items[x], &items[y]);
- mem.swap(u8, &(*order)[x], &(*order)[y]);
+ mem.swap(u8, &(order.*)[x], &(order.*)[y]);
}
}
-fn i32asc(lhs: &const i32, rhs: &const i32) bool {
- return *lhs < *rhs;
+// Use these to generate a comparator function for a given type. e.g. `sort(u8, slice, asc(u8))`.
+pub fn asc(comptime T: type) fn (T, T) bool {
+ const impl = struct {
+ fn inner(a: T, b: T) bool {
+ return a < b;
+ }
+ };
+
+ return impl.inner;
}
-fn i32desc(lhs: &const i32, rhs: &const i32) bool {
- return *rhs < *lhs;
-}
+pub fn desc(comptime T: type) fn (T, T) bool {
+ const impl = struct {
+ fn inner(a: T, b: T) bool {
+ return a > b;
+ }
+ };
-fn u8asc(lhs: &const u8, rhs: &const u8) bool {
- return *lhs < *rhs;
-}
-
-fn u8desc(lhs: &const u8, rhs: &const u8) bool {
- return *rhs < *lhs;
+ return impl.inner;
}
test "stable sort" {
@@ -967,44 +993,44 @@ test "stable sort" {
comptime testStableSort();
}
fn testStableSort() void {
- var expected = []IdAndValue {
- IdAndValue{.id = 0, .value = 0},
- IdAndValue{.id = 1, .value = 0},
- IdAndValue{.id = 2, .value = 0},
- IdAndValue{.id = 0, .value = 1},
- IdAndValue{.id = 1, .value = 1},
- IdAndValue{.id = 2, .value = 1},
- IdAndValue{.id = 0, .value = 2},
- IdAndValue{.id = 1, .value = 2},
- IdAndValue{.id = 2, .value = 2},
+ var expected = []IdAndValue{
+ IdAndValue{ .id = 0, .value = 0 },
+ IdAndValue{ .id = 1, .value = 0 },
+ IdAndValue{ .id = 2, .value = 0 },
+ IdAndValue{ .id = 0, .value = 1 },
+ IdAndValue{ .id = 1, .value = 1 },
+ IdAndValue{ .id = 2, .value = 1 },
+ IdAndValue{ .id = 0, .value = 2 },
+ IdAndValue{ .id = 1, .value = 2 },
+ IdAndValue{ .id = 2, .value = 2 },
};
- var cases = [][9]IdAndValue {
- []IdAndValue {
- IdAndValue{.id = 0, .value = 0},
- IdAndValue{.id = 0, .value = 1},
- IdAndValue{.id = 0, .value = 2},
- IdAndValue{.id = 1, .value = 0},
- IdAndValue{.id = 1, .value = 1},
- IdAndValue{.id = 1, .value = 2},
- IdAndValue{.id = 2, .value = 0},
- IdAndValue{.id = 2, .value = 1},
- IdAndValue{.id = 2, .value = 2},
+ var cases = [][9]IdAndValue{
+ []IdAndValue{
+ IdAndValue{ .id = 0, .value = 0 },
+ IdAndValue{ .id = 0, .value = 1 },
+ IdAndValue{ .id = 0, .value = 2 },
+ IdAndValue{ .id = 1, .value = 0 },
+ IdAndValue{ .id = 1, .value = 1 },
+ IdAndValue{ .id = 1, .value = 2 },
+ IdAndValue{ .id = 2, .value = 0 },
+ IdAndValue{ .id = 2, .value = 1 },
+ IdAndValue{ .id = 2, .value = 2 },
},
- []IdAndValue {
- IdAndValue{.id = 0, .value = 2},
- IdAndValue{.id = 0, .value = 1},
- IdAndValue{.id = 0, .value = 0},
- IdAndValue{.id = 1, .value = 2},
- IdAndValue{.id = 1, .value = 1},
- IdAndValue{.id = 1, .value = 0},
- IdAndValue{.id = 2, .value = 2},
- IdAndValue{.id = 2, .value = 1},
- IdAndValue{.id = 2, .value = 0},
+ []IdAndValue{
+ IdAndValue{ .id = 0, .value = 2 },
+ IdAndValue{ .id = 0, .value = 1 },
+ IdAndValue{ .id = 0, .value = 0 },
+ IdAndValue{ .id = 1, .value = 2 },
+ IdAndValue{ .id = 1, .value = 1 },
+ IdAndValue{ .id = 1, .value = 0 },
+ IdAndValue{ .id = 2, .value = 2 },
+ IdAndValue{ .id = 2, .value = 1 },
+ IdAndValue{ .id = 2, .value = 0 },
},
};
for (cases) |*case| {
- insertionSort(IdAndValue, (*case)[0..], cmpByValue);
- for (*case) |item, i| {
+ insertionSort(IdAndValue, (case.*)[0..], cmpByValue);
+ for (case.*) |item, i| {
assert(item.id == expected[i].id);
assert(item.value == expected[i].value);
}
@@ -1014,68 +1040,122 @@ const IdAndValue = struct {
id: usize,
value: i32,
};
-fn cmpByValue(a: &const IdAndValue, b: &const IdAndValue) bool {
- return i32asc(a.value, b.value);
+fn cmpByValue(a: IdAndValue, b: IdAndValue) bool {
+ return asc(i32)(a.value, b.value);
}
test "std.sort" {
- const u8cases = [][]const []const u8 {
- [][]const u8{"", ""},
- [][]const u8{"a", "a"},
- [][]const u8{"az", "az"},
- [][]const u8{"za", "az"},
- [][]const u8{"asdf", "adfs"},
- [][]const u8{"one", "eno"},
+ const u8cases = [][]const []const u8{
+ [][]const u8{
+ "",
+ "",
+ },
+ [][]const u8{
+ "a",
+ "a",
+ },
+ [][]const u8{
+ "az",
+ "az",
+ },
+ [][]const u8{
+ "za",
+ "az",
+ },
+ [][]const u8{
+ "asdf",
+ "adfs",
+ },
+ [][]const u8{
+ "one",
+ "eno",
+ },
};
for (u8cases) |case| {
var buf: [8]u8 = undefined;
const slice = buf[0..case[0].len];
mem.copy(u8, slice, case[0]);
- sort(u8, slice, u8asc);
+ sort(u8, slice, asc(u8));
assert(mem.eql(u8, slice, case[1]));
}
- const i32cases = [][]const []const i32 {
- [][]const i32{[]i32{}, []i32{}},
- [][]const i32{[]i32{1}, []i32{1}},
- [][]const i32{[]i32{0, 1}, []i32{0, 1}},
- [][]const i32{[]i32{1, 0}, []i32{0, 1}},
- [][]const i32{[]i32{1, -1, 0}, []i32{-1, 0, 1}},
- [][]const i32{[]i32{2, 1, 3}, []i32{1, 2, 3}},
+ const i32cases = [][]const []const i32{
+ [][]const i32{
+ []i32{},
+ []i32{},
+ },
+ [][]const i32{
+ []i32{1},
+ []i32{1},
+ },
+ [][]const i32{
+ []i32{ 0, 1 },
+ []i32{ 0, 1 },
+ },
+ [][]const i32{
+ []i32{ 1, 0 },
+ []i32{ 0, 1 },
+ },
+ [][]const i32{
+ []i32{ 1, -1, 0 },
+ []i32{ -1, 0, 1 },
+ },
+ [][]const i32{
+ []i32{ 2, 1, 3 },
+ []i32{ 1, 2, 3 },
+ },
};
for (i32cases) |case| {
var buf: [8]i32 = undefined;
const slice = buf[0..case[0].len];
mem.copy(i32, slice, case[0]);
- sort(i32, slice, i32asc);
+ sort(i32, slice, asc(i32));
assert(mem.eql(i32, slice, case[1]));
}
}
test "std.sort descending" {
- const rev_cases = [][]const []const i32 {
- [][]const i32{[]i32{}, []i32{}},
- [][]const i32{[]i32{1}, []i32{1}},
- [][]const i32{[]i32{0, 1}, []i32{1, 0}},
- [][]const i32{[]i32{1, 0}, []i32{1, 0}},
- [][]const i32{[]i32{1, -1, 0}, []i32{1, 0, -1}},
- [][]const i32{[]i32{2, 1, 3}, []i32{3, 2, 1}},
+ const rev_cases = [][]const []const i32{
+ [][]const i32{
+ []i32{},
+ []i32{},
+ },
+ [][]const i32{
+ []i32{1},
+ []i32{1},
+ },
+ [][]const i32{
+ []i32{ 0, 1 },
+ []i32{ 1, 0 },
+ },
+ [][]const i32{
+ []i32{ 1, 0 },
+ []i32{ 1, 0 },
+ },
+ [][]const i32{
+ []i32{ 1, -1, 0 },
+ []i32{ 1, 0, -1 },
+ },
+ [][]const i32{
+ []i32{ 2, 1, 3 },
+ []i32{ 3, 2, 1 },
+ },
};
for (rev_cases) |case| {
var buf: [8]i32 = undefined;
const slice = buf[0..case[0].len];
mem.copy(i32, slice, case[0]);
- sort(i32, slice, i32desc);
+ sort(i32, slice, desc(i32));
assert(mem.eql(i32, slice, case[1]));
}
}
test "another sort case" {
var arr = []i32{ 5, 3, 1, 2, 4 };
- sort(i32, arr[0..], i32asc);
+ sort(i32, arr[0..], asc(i32));
assert(mem.eql(i32, arr, []i32{ 1, 2, 3, 4, 5 }));
}
@@ -1091,7 +1171,7 @@ test "sort fuzz testing" {
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
-fn fuzzTest(rng: &std.rand.Random) void {
+fn fuzzTest(rng: *std.rand.Random) void {
const array_size = rng.range(usize, 0, 1000);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var array = fixed_allocator.allocator.alloc(IdAndValue, array_size) catch unreachable;
@@ -1112,7 +1192,7 @@ fn fuzzTest(rng: &std.rand.Random) void {
}
}
-pub fn min(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &const T)bool) T {
+pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool) T {
var i: usize = 0;
var smallest = items[0];
for (items[1..]) |item| {
@@ -1123,7 +1203,7 @@ pub fn min(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &const
return smallest;
}
-pub fn max(comptime T: type, items: []T, lessThan: fn(lhs: &const T, rhs: &const T)bool) T {
+pub fn max(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool) T {
var i: usize = 0;
var biggest = items[0];
for (items[1..]) |item| {
diff --git a/std/special/bootstrap.zig b/std/special/bootstrap.zig
index d2b1af76a7..47b57c6c23 100644
--- a/std/special/bootstrap.zig
+++ b/std/special/bootstrap.zig
@@ -5,7 +5,7 @@ const root = @import("@root");
const std = @import("std");
const builtin = @import("builtin");
-var argc_ptr: &usize = undefined;
+var argc_ptr: [*]usize = undefined;
comptime {
const strong_linkage = builtin.GlobalLinkage.Strong;
@@ -21,10 +21,14 @@ comptime {
nakedcc fn _start() noreturn {
switch (builtin.arch) {
builtin.Arch.x86_64 => {
- argc_ptr = asm("lea (%%rsp), %[argc]": [argc] "=r" (-> &usize));
+ argc_ptr = asm ("lea (%%rsp), %[argc]"
+ : [argc] "=r" (-> [*]usize)
+ );
},
builtin.Arch.i386 => {
- argc_ptr = asm("lea (%%esp), %[argc]": [argc] "=r" (-> &usize));
+ argc_ptr = asm ("lea (%%esp), %[argc]"
+ : [argc] "=r" (-> [*]usize)
+ );
},
else => @compileError("unsupported arch"),
}
@@ -39,25 +43,38 @@ extern fn WinMainCRTStartup() noreturn {
std.os.windows.ExitProcess(callMain());
}
+// TODO https://github.com/ziglang/zig/issues/265
fn posixCallMainAndExit() noreturn {
- const argc = *argc_ptr;
- const argv = @ptrCast(&&u8, &argc_ptr[1]);
- const envp = @ptrCast(&?&u8, &argv[argc + 1]);
+ const argc = argc_ptr[0];
+ const argv = @ptrCast([*][*]u8, argc_ptr + 1);
+
+ const envp_optional = @ptrCast([*]?[*]u8, argv + argc + 1);
+ var envp_count: usize = 0;
+ while (envp_optional[envp_count]) |_| : (envp_count += 1) {}
+ const envp = @ptrCast([*][*]u8, envp_optional)[0..envp_count];
+ if (builtin.os == builtin.Os.linux) {
+ const auxv = @ptrCast([*]usize, envp.ptr + envp_count + 1);
+ var i: usize = 0;
+ while (auxv[i] != 0) : (i += 2) {
+ if (auxv[i] < std.os.linux_aux_raw.len) std.os.linux_aux_raw[auxv[i]] = auxv[i + 1];
+ }
+ std.debug.assert(std.os.linux_aux_raw[std.elf.AT_PAGESZ] == std.os.page_size);
+ }
+
std.os.posix.exit(callMainWithArgs(argc, argv, envp));
}
-fn callMainWithArgs(argc: usize, argv: &&u8, envp: &?&u8) u8 {
+fn callMainWithArgs(argc: usize, argv: [*][*]u8, envp: [][*]u8) u8 {
std.os.ArgIteratorPosix.raw = argv[0..argc];
-
- var env_count: usize = 0;
- while (envp[env_count] != null) : (env_count += 1) {}
- std.os.posix_environ_raw = @ptrCast(&&u8, envp)[0..env_count];
-
+ std.os.posix_environ_raw = envp;
return callMain();
}
-extern fn main(c_argc: i32, c_argv: &&u8, c_envp: &?&u8) i32 {
- return callMainWithArgs(usize(c_argc), c_argv, c_envp);
+extern fn main(c_argc: i32, c_argv: [*][*]u8, c_envp: [*]?[*]u8) i32 {
+ var env_count: usize = 0;
+ while (c_envp[env_count] != null) : (env_count += 1) {}
+ const envp = @ptrCast([*][*]u8, c_envp)[0..env_count];
+ return callMainWithArgs(@intCast(usize, c_argc), c_argv, envp);
}
fn callMain() u8 {
diff --git a/std/special/bootstrap_lib.zig b/std/special/bootstrap_lib.zig
index 40b6588838..f029495cb0 100644
--- a/std/special/bootstrap_lib.zig
+++ b/std/special/bootstrap_lib.zig
@@ -1,13 +1,16 @@
// This file is included in the compilation unit when exporting a library on windows.
const std = @import("std");
+const builtin = @import("builtin");
comptime {
- @export("_DllMainCRTStartup", _DllMainCRTStartup);
+ @export("_DllMainCRTStartup", _DllMainCRTStartup, builtin.GlobalLinkage.Strong);
}
-stdcallcc fn _DllMainCRTStartup(hinstDLL: std.os.windows.HINSTANCE, fdwReason: std.os.windows.DWORD,
- lpReserved: std.os.windows.LPVOID) std.os.windows.BOOL
-{
+stdcallcc fn _DllMainCRTStartup(
+ hinstDLL: std.os.windows.HINSTANCE,
+ fdwReason: std.os.windows.DWORD,
+ lpReserved: std.os.windows.LPVOID,
+) std.os.windows.BOOL {
return std.os.windows.TRUE;
}
diff --git a/std/special/build_file_template.zig b/std/special/build_file_template.zig
index 1c06c93cdc..11e9647698 100644
--- a/std/special/build_file_template.zig
+++ b/std/special/build_file_template.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const mode = b.standardReleaseOptions();
const exe = b.addExecutable("YOUR_NAME_HERE", "src/main.zig");
exe.setBuildMode(mode);
diff --git a/std/special/build_runner.zig b/std/special/build_runner.zig
index e1a35f6648..2f073b3e98 100644
--- a/std/special/build_runner.zig
+++ b/std/special/build_runner.zig
@@ -24,19 +24,18 @@ pub fn main() !void {
const allocator = &arena.allocator;
-
// skip my own exe name
_ = arg_it.skip();
- const zig_exe = try unwrapArg(arg_it.next(allocator) ?? {
+ const zig_exe = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected first argument to be path to zig compiler\n");
return error.InvalidArgs;
});
- const build_root = try unwrapArg(arg_it.next(allocator) ?? {
+ const build_root = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected second argument to be build root directory path\n");
return error.InvalidArgs;
});
- const cache_root = try unwrapArg(arg_it.next(allocator) ?? {
+ const cache_root = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected third argument to be cache root directory path\n");
return error.InvalidArgs;
});
@@ -72,7 +71,7 @@ pub fn main() !void {
}
if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| {
const option_name = option_contents[0..name_end];
- const option_value = option_contents[name_end + 1..];
+ const option_value = option_contents[name_end + 1 ..];
if (builder.addUserInputOption(option_name, option_value))
return usageAndErr(&builder, false, try stderr_stream);
} else {
@@ -85,12 +84,12 @@ pub fn main() !void {
} else if (mem.eql(u8, arg, "--help")) {
return usage(&builder, false, try stdout_stream);
} else if (mem.eql(u8, arg, "--prefix")) {
- prefix = try unwrapArg(arg_it.next(allocator) ?? {
+ prefix = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected argument after --prefix\n\n");
return usageAndErr(&builder, false, try stderr_stream);
});
} else if (mem.eql(u8, arg, "--search-prefix")) {
- const search_prefix = try unwrapArg(arg_it.next(allocator) ?? {
+ const search_prefix = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected argument after --search-prefix\n\n");
return usageAndErr(&builder, false, try stderr_stream);
});
@@ -123,14 +122,17 @@ pub fn main() !void {
return usageAndErr(&builder, true, try stderr_stream);
builder.make(targets.toSliceConst()) catch |err| {
- if (err == error.InvalidStepName) {
- return usageAndErr(&builder, true, try stderr_stream);
+ switch (err) {
+ error.InvalidStepName => {
+ return usageAndErr(&builder, true, try stderr_stream);
+ },
+ error.UncleanExit => os.exit(1),
+ else => return err,
}
- return err;
};
}
-fn runBuild(builder: &Builder) error!void {
+fn runBuild(builder: *Builder) error!void {
switch (@typeId(@typeOf(root.build).ReturnType)) {
builtin.TypeId.Void => root.build(builder),
builtin.TypeId.ErrorUnion => try root.build(builder),
@@ -138,7 +140,7 @@ fn runBuild(builder: &Builder) error!void {
}
}
-fn usage(builder: &Builder, already_ran_build: bool, out_stream: var) !void {
+fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void {
// run the build script to collect the options
if (!already_ran_build) {
builder.setInstallPrefix(null);
@@ -175,8 +177,7 @@ fn usage(builder: &Builder, already_ran_build: bool, out_stream: var) !void {
try out_stream.print(" (none)\n");
} else {
for (builder.available_options_list.toSliceConst()) |option| {
- const name = try fmt.allocPrint(allocator,
- " -D{}=[{}]", option.name, Builder.typeIdName(option.type_id));
+ const name = try fmt.allocPrint(allocator, " -D{}=[{}]", option.name, Builder.typeIdName(option.type_id));
defer allocator.free(name);
try out_stream.print("{s24} {}\n", name, option.description);
}
@@ -197,12 +198,12 @@ fn usage(builder: &Builder, already_ran_build: bool, out_stream: var) !void {
);
}
-fn usageAndErr(builder: &Builder, already_ran_build: bool, out_stream: var) error {
+fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: var) error {
usage(builder, already_ran_build, out_stream) catch {};
return error.InvalidArgs;
}
-const UnwrapArgError = error {OutOfMemory};
+const UnwrapArgError = error{OutOfMemory};
fn unwrapArg(arg: UnwrapArgError![]u8) UnwrapArgError![]u8 {
return arg catch |err| {
diff --git a/std/special/builtin.zig b/std/special/builtin.zig
index 9de0aa7679..56e578030b 100644
--- a/std/special/builtin.zig
+++ b/std/special/builtin.zig
@@ -5,7 +5,7 @@ const builtin = @import("builtin");
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
-pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn {
+pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
if (builtin.is_test) {
@setCold(true);
@import("std").debug.panic("{}", msg);
@@ -14,39 +14,39 @@ pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn
}
}
-export fn memset(dest: ?&u8, c: u8, n: usize) ?&u8 {
+export fn memset(dest: ?[*]u8, c: u8, n: usize) ?[*]u8 {
@setRuntimeSafety(false);
var index: usize = 0;
while (index != n) : (index += 1)
- (??dest)[index] = c;
+ dest.?[index] = c;
return dest;
}
-export fn memcpy(noalias dest: ?&u8, noalias src: ?&const u8, n: usize) ?&u8 {
+export fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) ?[*]u8 {
@setRuntimeSafety(false);
var index: usize = 0;
while (index != n) : (index += 1)
- (??dest)[index] = (??src)[index];
+ dest.?[index] = src.?[index];
return dest;
}
-export fn memmove(dest: ?&u8, src: ?&const u8, n: usize) ?&u8 {
+export fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) ?[*]u8 {
@setRuntimeSafety(false);
if (@ptrToInt(dest) < @ptrToInt(src)) {
var index: usize = 0;
while (index != n) : (index += 1) {
- (??dest)[index] = (??src)[index];
+ dest.?[index] = src.?[index];
}
} else {
var index = n;
while (index != 0) {
index -= 1;
- (??dest)[index] = (??src)[index];
+ dest.?[index] = src.?[index];
}
}
@@ -54,25 +54,75 @@ export fn memmove(dest: ?&u8, src: ?&const u8, n: usize) ?&u8 {
}
comptime {
- if (builtin.mode != builtin.Mode.ReleaseFast and builtin.os != builtin.Os.windows) {
+ if (builtin.mode != builtin.Mode.ReleaseFast and
+ builtin.mode != builtin.Mode.ReleaseSmall and
+ builtin.os != builtin.Os.windows)
+ {
@export("__stack_chk_fail", __stack_chk_fail, builtin.GlobalLinkage.Strong);
}
+ if (builtin.os == builtin.Os.linux and builtin.arch == builtin.Arch.x86_64) {
+ @export("clone", clone, builtin.GlobalLinkage.Strong);
+ }
}
extern fn __stack_chk_fail() noreturn {
@panic("stack smashing detected");
}
+// TODO we should be able to put this directly in std/linux/x86_64.zig but
+// it causes a segfault in release mode. this is a workaround of calling it
+// across .o file boundaries. fix comptime @ptrCast of nakedcc functions.
+nakedcc fn clone() void {
+ asm volatile (
+ \\ xor %%eax,%%eax
+ \\ mov $56,%%al
+ \\ mov %%rdi,%%r11
+ \\ mov %%rdx,%%rdi
+ \\ mov %%r8,%%rdx
+ \\ mov %%r9,%%r8
+ \\ mov 8(%%rsp),%%r10
+ \\ mov %%r11,%%r9
+ \\ and $-16,%%rsi
+ \\ sub $8,%%rsi
+ \\ mov %%rcx,(%%rsi)
+ \\ syscall
+ \\ test %%eax,%%eax
+ \\ jnz 1f
+ \\ xor %%ebp,%%ebp
+ \\ pop %%rdi
+ \\ call *%%r9
+ \\ mov %%eax,%%edi
+ \\ xor %%eax,%%eax
+ \\ mov $60,%%al
+ \\ syscall
+ \\ hlt
+ \\1: ret
+ \\
+ );
+}
+
const math = @import("../math/index.zig");
-export fn fmodf(x: f32, y: f32) f32 { return generic_fmod(f32, x, y); }
-export fn fmod(x: f64, y: f64) f64 { return generic_fmod(f64, x, y); }
+export fn fmodf(x: f32, y: f32) f32 {
+ return generic_fmod(f32, x, y);
+}
+export fn fmod(x: f64, y: f64) f64 {
+ return generic_fmod(f64, x, y);
+}
// TODO add intrinsics for these (and probably the double version too)
// and have the math stuff use the intrinsic. same as @mod and @rem
-export fn floorf(x: f32) f32 { return math.floor(x); }
-export fn ceilf(x: f32) f32 { return math.ceil(x); }
-export fn floor(x: f64) f64 { return math.floor(x); }
-export fn ceil(x: f64) f64 { return math.ceil(x); }
+export fn floorf(x: f32) f32 {
+ return math.floor(x);
+}
+export fn ceilf(x: f32) f32 {
+ return math.ceil(x);
+}
+export fn floor(x: f64) f64 {
+ return math.floor(x);
+}
+export fn ceil(x: f64) f64 {
+ return math.ceil(x);
+}
fn generic_fmod(comptime T: type, x: T, y: T) T {
@setRuntimeSafety(false);
@@ -85,9 +135,9 @@ fn generic_fmod(comptime T: type, x: T, y: T) T {
const mask = if (T == f32) 0xff else 0x7ff;
var ux = @bitCast(uint, x);
var uy = @bitCast(uint, y);
- var ex = i32((ux >> digits) & mask);
- var ey = i32((uy >> digits) & mask);
- const sx = if (T == f32) u32(ux & 0x80000000) else i32(ux >> bits_minus_1);
+ var ex = @intCast(i32, (ux >> digits) & mask);
+ var ey = @intCast(i32, (uy >> digits) & mask);
+ const sx = if (T == f32) @intCast(u32, ux & 0x80000000) else @intCast(i32, ux >> bits_minus_1);
var i: uint = undefined;
if (uy << 1 == 0 or isNan(uint, uy) or ex == mask)
@@ -102,16 +152,22 @@ fn generic_fmod(comptime T: type, x: T, y: T) T {
// normalize x and y
if (ex == 0) {
i = ux << exp_bits;
- while (i >> bits_minus_1 == 0) : (b: {ex -= 1; break :b i <<= 1;}) {}
- ux <<= log2uint(@bitCast(u32, -ex + 1));
+ while (i >> bits_minus_1 == 0) : (b: {
+ ex -= 1;
+ i <<= 1;
+ }) {}
+ ux <<= @intCast(log2uint, @bitCast(u32, -ex + 1));
} else {
ux &= @maxValue(uint) >> exp_bits;
ux |= 1 << digits;
}
if (ey == 0) {
i = uy << exp_bits;
- while (i >> bits_minus_1 == 0) : (b: {ey -= 1; break :b i <<= 1;}) {}
- uy <<= log2uint(@bitCast(u32, -ey + 1));
+ while (i >> bits_minus_1 == 0) : (b: {
+ ey -= 1;
+ i <<= 1;
+ }) {}
+ uy <<= @intCast(log2uint, @bitCast(u32, -ey + 1));
} else {
uy &= @maxValue(uint) >> exp_bits;
uy |= 1 << digits;
@@ -133,25 +189,30 @@ fn generic_fmod(comptime T: type, x: T, y: T) T {
return 0 * x;
ux = i;
}
- while (ux >> digits == 0) : (b: {ux <<= 1; break :b ex -= 1;}) {}
+ while (ux >> digits == 0) : (b: {
+ ux <<= 1;
+ ex -= 1;
+ }) {}
// scale result up
if (ex > 0) {
ux -%= 1 << digits;
ux |= uint(@bitCast(u32, ex)) << digits;
} else {
- ux >>= log2uint(@bitCast(u32, -ex + 1));
+ ux >>= @intCast(log2uint, @bitCast(u32, -ex + 1));
}
if (T == f32) {
ux |= sx;
} else {
- ux |= uint(sx) << bits_minus_1;
+ ux |= @intCast(uint, sx) << bits_minus_1;
}
return @bitCast(T, ux);
}
fn isNan(comptime T: type, bits: T) bool {
- if (T == u32) {
+ if (T == u16) {
+ return (bits & 0x7fff) > 0x7c00;
+ } else if (T == u32) {
return (bits & 0x7fffffff) > 0x7f800000;
} else if (T == u64) {
return (bits & (@maxValue(u64) >> 1)) > (u64(0x7ff) << 52);
@@ -159,3 +220,212 @@ fn isNan(comptime T: type, bits: T) bool {
unreachable;
}
}
+
+// NOTE: The original code is full of implicit signed -> unsigned assumptions and u32 wraparound
+// behaviour. Most intermediate i32 values are changed to u32 where appropriate but there are
+// potentially some edge cases remaining that are not handled in the same way.
+export fn sqrt(x: f64) f64 {
+ const tiny: f64 = 1.0e-300;
+ const sign: u32 = 0x80000000;
+ const u = @bitCast(u64, x);
+
+ var ix0 = @intCast(u32, u >> 32);
+ var ix1 = @intCast(u32, u & 0xFFFFFFFF);
+
+ // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = nan
+ if (ix0 & 0x7FF00000 == 0x7FF00000) {
+ return x * x + x;
+ }
+
+ // sqrt(+-0) = +-0
+ if (x == 0.0) {
+ return x;
+ }
+ // sqrt(-ve) = snan
+ if (ix0 & sign != 0) {
+ return math.snan(f64);
+ }
+
+ // normalize x
+ var m = @intCast(i32, ix0 >> 20);
+ if (m == 0) {
+ // subnormal
+ while (ix0 == 0) {
+ m -= 21;
+ ix0 |= ix1 >> 11;
+ ix1 <<= 21;
+ }
+
+ // subnormal
+ var i: u32 = 0;
+ while (ix0 & 0x00100000 == 0) : (i += 1) {
+ ix0 <<= 1;
+ }
+ m -= @intCast(i32, i) - 1;
+ ix0 |= ix1 >> @intCast(u5, 32 - i);
+ ix1 <<= @intCast(u5, i);
+ }
+
+ // unbias exponent
+ m -= 1023;
+ ix0 = (ix0 & 0x000FFFFF) | 0x00100000;
+ if (m & 1 != 0) {
+ ix0 += ix0 + (ix1 >> 31);
+ ix1 = ix1 +% ix1;
+ }
+ m >>= 1;
+
+ // sqrt(x) bit by bit
+ ix0 += ix0 + (ix1 >> 31);
+ ix1 = ix1 +% ix1;
+
+ var q: u32 = 0;
+ var q1: u32 = 0;
+ var s0: u32 = 0;
+ var s1: u32 = 0;
+ var r: u32 = 0x00200000;
+ var t: u32 = undefined;
+ var t1: u32 = undefined;
+
+ while (r != 0) {
+ t = s0 +% r;
+ if (t <= ix0) {
+ s0 = t + r;
+ ix0 -= t;
+ q += r;
+ }
+ ix0 = ix0 +% ix0 +% (ix1 >> 31);
+ ix1 = ix1 +% ix1;
+ r >>= 1;
+ }
+
+ r = sign;
+ while (r != 0) {
+ t = s1 +% r;
+ t = s0;
+ if (t < ix0 or (t == ix0 and t1 <= ix1)) {
+ s1 = t1 +% r;
+ if (t1 & sign == sign and s1 & sign == 0) {
+ s0 += 1;
+ }
+ ix0 -= t;
+ if (ix1 < t1) {
+ ix0 -= 1;
+ }
+ ix1 = ix1 -% t1;
+ q1 += r;
+ }
+ ix0 = ix0 +% ix0 +% (ix1 >> 31);
+ ix1 = ix1 +% ix1;
+ r >>= 1;
+ }
+
+ // rounding direction
+ if (ix0 | ix1 != 0) {
+ var z = 1.0 - tiny; // raise inexact
+ if (z >= 1.0) {
+ z = 1.0 + tiny;
+ if (q1 == 0xFFFFFFFF) {
+ q1 = 0;
+ q += 1;
+ } else if (z > 1.0) {
+ if (q1 == 0xFFFFFFFE) {
+ q += 1;
+ }
+ q1 += 2;
+ } else {
+ q1 += q1 & 1;
+ }
+ }
+ }
+
+ ix0 = (q >> 1) + 0x3FE00000;
+ ix1 = q1 >> 1;
+ if (q & 1 != 0) {
+ ix1 |= 0x80000000;
+ }
+
+ // NOTE: musl here appears to rely on signed twos-complement wraparound. +% has the same
+ // behaviour at least.
+ var iix0 = @intCast(i32, ix0);
+ iix0 = iix0 +% (m << 20);
+
+ const uz = (@intCast(u64, iix0) << 32) | ix1;
+ return @bitCast(f64, uz);
+}
+
+export fn sqrtf(x: f32) f32 {
+ const tiny: f32 = 1.0e-30;
+ const sign: i32 = @bitCast(i32, u32(0x80000000));
+ var ix: i32 = @bitCast(i32, x);
+
+ if ((ix & 0x7F800000) == 0x7F800000) {
+ return x * x + x; // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = snan
+ }
+
+ // zero
+ if (ix <= 0) {
+ if (ix & ~sign == 0) {
+ return x; // sqrt (+-0) = +-0
+ }
+ if (ix < 0) {
+ return math.snan(f32);
+ }
+ }
+
+ // normalize
+ var m = ix >> 23;
+ if (m == 0) {
+ // subnormal
+ var i: i32 = 0;
+ while (ix & 0x00800000 == 0) : (i += 1) {
+ ix <<= 1;
+ }
+ m -= i - 1;
+ }
+
+ m -= 127; // unbias exponent
+ ix = (ix & 0x007FFFFF) | 0x00800000;
+
+ if (m & 1 != 0) { // odd m, double x to even
+ ix += ix;
+ }
+
+ m >>= 1; // m = [m / 2]
+
+ // sqrt(x) bit by bit
+ ix += ix;
+ var q: i32 = 0; // q = sqrt(x)
+ var s: i32 = 0;
+ var r: i32 = 0x01000000; // r = moving bit right -> left
+
+ while (r != 0) {
+ const t = s + r;
+ if (t <= ix) {
+ s = t + r;
+ ix -= t;
+ q += r;
+ }
+ ix += ix;
+ r >>= 1;
+ }
+
+ // floating add to find rounding direction
+ if (ix != 0) {
+ var z = 1.0 - tiny; // inexact
+ if (z >= 1.0) {
+ z = 1.0 + tiny;
+ if (z > 1.0) {
+ q += 2;
+ } else {
+ if (q & 1 != 0) {
+ q += 1;
+ }
+ }
+ }
+ }
+
+ ix = (q >> 1) + 0x3f000000;
+ ix += m << 23;
+ return @bitCast(f32, ix);
+}
diff --git a/std/special/compiler_rt/comparetf2.zig b/std/special/compiler_rt/comparetf2.zig
index c189e5803b..0912b71bd5 100644
--- a/std/special/compiler_rt/comparetf2.zig
+++ b/std/special/compiler_rt/comparetf2.zig
@@ -1,4 +1,4 @@
-// TODO https://github.com/zig-lang/zig/issues/305
+// TODO https://github.com/ziglang/zig/issues/641
// and then make the return types of some of these functions the enum instead of c_int
const LE_LESS = c_int(-1);
const LE_EQUAL = c_int(0);
@@ -38,28 +38,25 @@ pub extern fn __letf2(a: f128, b: f128) c_int {
// If at least one of a and b is positive, we get the same result comparing
// a and b as signed integers as we would with a floating-point compare.
- return if ((aInt & bInt) >= 0)
- if (aInt < bInt)
- LE_LESS
- else if (aInt == bInt)
- LE_EQUAL
- else
- LE_GREATER
+ return if ((aInt & bInt) >= 0) if (aInt < bInt)
+ LE_LESS
+ else if (aInt == bInt)
+ LE_EQUAL
else
- // Otherwise, both are negative, so we need to flip the sense of the
- // comparison to get the correct result. (This assumes a twos- or ones-
- // complement integer representation; if integers are represented in a
- // sign-magnitude representation, then this flip is incorrect).
- if (aInt > bInt)
- LE_LESS
- else if (aInt == bInt)
- LE_EQUAL
- else
- LE_GREATER
- ;
+ LE_GREATER else
+ // Otherwise, both are negative, so we need to flip the sense of the
+ // comparison to get the correct result. (This assumes a twos- or ones-
+ // complement integer representation; if integers are represented in a
+ // sign-magnitude representation, then this flip is incorrect).
+ if (aInt > bInt)
+ LE_LESS
+ else if (aInt == bInt)
+ LE_EQUAL
+ else
+ LE_GREATER;
}
-// TODO https://github.com/zig-lang/zig/issues/305
+// TODO https://github.com/ziglang/zig/issues/641
// and then make the return types of some of these functions the enum instead of c_int
const GE_LESS = c_int(-1);
const GE_EQUAL = c_int(0);
@@ -76,21 +73,17 @@ pub extern fn __getf2(a: f128, b: f128) c_int {
if (aAbs > infRep or bAbs > infRep) return GE_UNORDERED;
if ((aAbs | bAbs) == 0) return GE_EQUAL;
- return if ((aInt & bInt) >= 0)
- if (aInt < bInt)
- GE_LESS
- else if (aInt == bInt)
- GE_EQUAL
- else
- GE_GREATER
+ return if ((aInt & bInt) >= 0) if (aInt < bInt)
+ GE_LESS
+ else if (aInt == bInt)
+ GE_EQUAL
else
- if (aInt > bInt)
- GE_LESS
- else if (aInt == bInt)
- GE_EQUAL
- else
- GE_GREATER
- ;
+ GE_GREATER else if (aInt > bInt)
+ GE_LESS
+ else if (aInt == bInt)
+ GE_EQUAL
+ else
+ GE_GREATER;
}
pub extern fn __unordtf2(a: f128, b: f128) c_int {
@@ -98,5 +91,5 @@ pub extern fn __unordtf2(a: f128, b: f128) c_int {
const aAbs = @bitCast(rep_t, a) & absMask;
const bAbs = @bitCast(rep_t, b) & absMask;
- return c_int(aAbs > infRep or bAbs > infRep);
+ return @boolToInt(aAbs > infRep or bAbs > infRep);
}
diff --git a/std/special/compiler_rt/divti3.zig b/std/special/compiler_rt/divti3.zig
new file mode 100644
index 0000000000..712cccba82
--- /dev/null
+++ b/std/special/compiler_rt/divti3.zig
@@ -0,0 +1,26 @@
+const udivmod = @import("udivmod.zig").udivmod;
+const builtin = @import("builtin");
+const compiler_rt = @import("index.zig");
+
+pub extern fn __divti3(a: i128, b: i128) i128 {
+ @setRuntimeSafety(builtin.is_test);
+
+ const s_a = a >> (i128.bit_count - 1);
+ const s_b = b >> (i128.bit_count - 1);
+
+ const an = (a ^ s_a) -% s_a;
+ const bn = (b ^ s_b) -% s_b;
+
+ const r = udivmod(u128, @bitCast(u128, an), @bitCast(u128, bn), null);
+ const s = s_a ^ s_b;
+ return (@bitCast(i128, r) ^ s) -% s;
+}
+
+pub extern fn __divti3_windows_x86_64(a: *const i128, b: *const i128) void {
+ @setRuntimeSafety(builtin.is_test);
+ compiler_rt.setXmm0(i128, __divti3(a.*, b.*));
+}
+
+test "import divti3" {
+ _ = @import("divti3_test.zig");
+}
diff --git a/std/special/compiler_rt/divti3_test.zig b/std/special/compiler_rt/divti3_test.zig
new file mode 100644
index 0000000000..eef5a9b812
--- /dev/null
+++ b/std/special/compiler_rt/divti3_test.zig
@@ -0,0 +1,21 @@
+const __divti3 = @import("divti3.zig").__divti3;
+const assert = @import("std").debug.assert;
+
+fn test__divti3(a: i128, b: i128, expected: i128) void {
+ const x = __divti3(a, b);
+ assert(x == expected);
+}
+
+test "divti3" {
+ test__divti3(0, 1, 0);
+ test__divti3(0, -1, 0);
+ test__divti3(2, 1, 2);
+ test__divti3(2, -1, -2);
+ test__divti3(-2, 1, -2);
+ test__divti3(-2, -1, 2);
+
+ test__divti3(@bitCast(i128, u128(0x8 << 124)), 1, @bitCast(i128, u128(0x8 << 124)));
+ test__divti3(@bitCast(i128, u128(0x8 << 124)), -1, @bitCast(i128, u128(0x8 << 124)));
+ test__divti3(@bitCast(i128, u128(0x8 << 124)), -2, @bitCast(i128, u128(0x4 << 124)));
+ test__divti3(@bitCast(i128, u128(0x8 << 124)), 2, @bitCast(i128, u128(0xc << 124)));
+}
diff --git a/std/special/compiler_rt/extendXfYf2.zig b/std/special/compiler_rt/extendXfYf2.zig
new file mode 100644
index 0000000000..099e27b74a
--- /dev/null
+++ b/std/special/compiler_rt/extendXfYf2.zig
@@ -0,0 +1,89 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+pub extern fn __extenddftf2(a: f64) f128 {
+ return extendXfYf2(f128, f64, a);
+}
+
+pub extern fn __extendsftf2(a: f32) f128 {
+ return extendXfYf2(f128, f32, a);
+}
+
+pub extern fn __extendhfsf2(a: u16) f32 {
+ return extendXfYf2(f32, f16, @bitCast(f16, a));
+}
+
+const CHAR_BIT = 8;
+
+inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
+ const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits);
+ const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits);
+ const srcSigBits = std.math.floatMantissaBits(src_t);
+ const dstSigBits = std.math.floatMantissaBits(dst_t);
+ const SrcShift = std.math.Log2Int(src_rep_t);
+ const DstShift = std.math.Log2Int(dst_rep_t);
+
+ // Various constants whose values follow from the type parameters.
+ // Any reasonable optimizer will fold and propagate all of these.
+ const srcBits = @sizeOf(src_t) * CHAR_BIT;
+ const srcExpBits = srcBits - srcSigBits - 1;
+ const srcInfExp = (1 << srcExpBits) - 1;
+ const srcExpBias = srcInfExp >> 1;
+
+ const srcMinNormal = 1 << srcSigBits;
+ const srcInfinity = srcInfExp << srcSigBits;
+ const srcSignMask = 1 << (srcSigBits + srcExpBits);
+ const srcAbsMask = srcSignMask - 1;
+ const srcQNaN = 1 << (srcSigBits - 1);
+ const srcNaNCode = srcQNaN - 1;
+
+ const dstBits = @sizeOf(dst_t) * CHAR_BIT;
+ const dstExpBits = dstBits - dstSigBits - 1;
+ const dstInfExp = (1 << dstExpBits) - 1;
+ const dstExpBias = dstInfExp >> 1;
+
+ const dstMinNormal: dst_rep_t = dst_rep_t(1) << dstSigBits;
+
+ // Break a into a sign and representation of the absolute value
+ const aRep: src_rep_t = @bitCast(src_rep_t, a);
+ const aAbs: src_rep_t = aRep & srcAbsMask;
+ const sign: src_rep_t = aRep & srcSignMask;
+ var absResult: dst_rep_t = undefined;
+
+ if (aAbs -% srcMinNormal < srcInfinity - srcMinNormal) {
+ // a is a normal number.
+ // Extend to the destination type by shifting the significand and
+ // exponent into the proper position and rebiasing the exponent.
+ absResult = dst_rep_t(aAbs) << (dstSigBits - srcSigBits);
+ absResult += (dstExpBias - srcExpBias) << dstSigBits;
+ } else if (aAbs >= srcInfinity) {
+ // a is NaN or infinity.
+ // Conjure the result by beginning with infinity, then setting the qNaN
+ // bit (if needed) and right-aligning the rest of the trailing NaN
+ // payload field.
+ absResult = dstInfExp << dstSigBits;
+ absResult |= dst_rep_t(aAbs & srcQNaN) << (dstSigBits - srcSigBits);
+ absResult |= dst_rep_t(aAbs & srcNaNCode) << (dstSigBits - srcSigBits);
+ } else if (aAbs != 0) {
+ // a is denormal.
+ // renormalize the significand and clear the leading bit, then insert
+ // the correct adjusted exponent in the destination type.
+ const scale: u32 = @clz(aAbs) - @clz(src_rep_t(srcMinNormal));
+ absResult = dst_rep_t(aAbs) << @intCast(DstShift, dstSigBits - srcSigBits + scale);
+ absResult ^= dstMinNormal;
+ const resultExponent: u32 = dstExpBias - srcExpBias - scale + 1;
+ absResult |= @intCast(dst_rep_t, resultExponent) << dstSigBits;
+ } else {
+ // a is zero.
+ absResult = 0;
+ }
+
+ // Apply the signbit to (dst_t)abs(a).
+ const result: dst_rep_t align(@alignOf(dst_t)) = absResult | dst_rep_t(sign) << (dstBits - srcBits);
+ return @bitCast(dst_t, result);
+}
+
+test "import extendXfYf2" {
+ _ = @import("extendXfYf2_test.zig");
+}
diff --git a/std/special/compiler_rt/extendXfYf2_test.zig b/std/special/compiler_rt/extendXfYf2_test.zig
new file mode 100644
index 0000000000..9969607011
--- /dev/null
+++ b/std/special/compiler_rt/extendXfYf2_test.zig
@@ -0,0 +1,155 @@
+const __extenddftf2 = @import("extendXfYf2.zig").__extenddftf2;
+const __extendhfsf2 = @import("extendXfYf2.zig").__extendhfsf2;
+const __extendsftf2 = @import("extendXfYf2.zig").__extendsftf2;
+const assert = @import("std").debug.assert;
+
+fn test__extenddftf2(a: f64, expectedHi: u64, expectedLo: u64) void {
+ const x = __extenddftf2(a);
+
+ const rep = @bitCast(u128, x);
+ const hi = @intCast(u64, rep >> 64);
+ const lo = @truncate(u64, rep);
+
+ if (hi == expectedHi and lo == expectedLo)
+ return;
+
+ // test other possible NaN representation(signal NaN)
+ if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
+ if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
+ ((hi & 0xffffffffffff) > 0 or lo > 0))
+ {
+ return;
+ }
+ }
+
+ @panic("__extenddftf2 test failure");
+}
+
+fn test__extendhfsf2(a: u16, expected: u32) void {
+ const x = __extendhfsf2(a);
+ const rep = @bitCast(u32, x);
+
+ if (rep == expected) {
+ if (rep & 0x7fffffff > 0x7f800000) {
+ return; // NaN is always unequal.
+ }
+ if (x == @bitCast(f32, expected)) {
+ return;
+ }
+ }
+
+ @panic("__extendhfsf2 test failure");
+}
+
+fn test__extendsftf2(a: f32, expectedHi: u64, expectedLo: u64) void {
+ const x = __extendsftf2(a);
+
+ const rep = @bitCast(u128, x);
+ const hi = @intCast(u64, rep >> 64);
+ const lo = @truncate(u64, rep);
+
+ if (hi == expectedHi and lo == expectedLo)
+ return;
+
+ // test other possible NaN representation(signal NaN)
+ if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
+ if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
+ ((hi & 0xffffffffffff) > 0 or lo > 0))
+ {
+ return;
+ }
+ }
+
+ @panic("__extendsftf2 test failure");
+}
+
+test "extenddftf2" {
+ // qNaN
+ test__extenddftf2(makeQNaN64(), 0x7fff800000000000, 0x0);
+
+ // NaN
+ test__extenddftf2(makeNaN64(0x7100000000000), 0x7fff710000000000, 0x0);
+
+ // inf
+ test__extenddftf2(makeInf64(), 0x7fff000000000000, 0x0);
+
+ // zero
+ test__extenddftf2(0.0, 0x0, 0x0);
+
+ test__extenddftf2(0x1.23456789abcdefp+5, 0x400423456789abcd, 0xf000000000000000);
+
+ test__extenddftf2(0x1.edcba987654321fp-9, 0x3ff6edcba9876543, 0x2000000000000000);
+
+ test__extenddftf2(0x1.23456789abcdefp+45, 0x402c23456789abcd, 0xf000000000000000);
+
+ test__extenddftf2(0x1.edcba987654321fp-45, 0x3fd2edcba9876543, 0x2000000000000000);
+}
+
+test "extendhfsf2" {
+ test__extendhfsf2(0x7e00, 0x7fc00000); // qNaN
+ test__extendhfsf2(0x7f00, 0x7fe00000); // sNaN
+ test__extendhfsf2(0x7c01, 0x7f802000); // sNaN
+
+ test__extendhfsf2(0, 0); // 0
+ test__extendhfsf2(0x8000, 0x80000000); // -0
+
+ test__extendhfsf2(0x7c00, 0x7f800000); // inf
+ test__extendhfsf2(0xfc00, 0xff800000); // -inf
+
+ test__extendhfsf2(0x0001, 0x33800000); // denormal (min), 2**-24
+ test__extendhfsf2(0x8001, 0xb3800000); // denormal (min), -2**-24
+
+ test__extendhfsf2(0x03ff, 0x387fc000); // denormal (max), 2**-14 - 2**-24
+ test__extendhfsf2(0x83ff, 0xb87fc000); // denormal (max), -2**-14 + 2**-24
+
+ test__extendhfsf2(0x0400, 0x38800000); // normal (min), 2**-14
+ test__extendhfsf2(0x8400, 0xb8800000); // normal (min), -2**-14
+
+ test__extendhfsf2(0x7bff, 0x477fe000); // normal (max), 65504
+ test__extendhfsf2(0xfbff, 0xc77fe000); // normal (max), -65504
+
+ test__extendhfsf2(0x3c01, 0x3f802000); // normal, 1 + 2**-10
+ test__extendhfsf2(0xbc01, 0xbf802000); // normal, -1 - 2**-10
+
+ test__extendhfsf2(0x3555, 0x3eaaa000); // normal, approx. 1/3
+ test__extendhfsf2(0xb555, 0xbeaaa000); // normal, approx. -1/3
+}
+
+test "extendsftf2" {
+ // qNaN
+ test__extendsftf2(makeQNaN32(), 0x7fff800000000000, 0x0);
+ // NaN
+ test__extendsftf2(makeNaN32(0x410000), 0x7fff820000000000, 0x0);
+ // inf
+ test__extendsftf2(makeInf32(), 0x7fff000000000000, 0x0);
+ // zero
+ test__extendsftf2(0.0, 0x0, 0x0);
+ test__extendsftf2(0x1.23456p+5, 0x4004234560000000, 0x0);
+ test__extendsftf2(0x1.edcbap-9, 0x3ff6edcba0000000, 0x0);
+ test__extendsftf2(0x1.23456p+45, 0x402c234560000000, 0x0);
+ test__extendsftf2(0x1.edcbap-45, 0x3fd2edcba0000000, 0x0);
+}
+
+fn makeQNaN64() f64 {
+ return @bitCast(f64, u64(0x7ff8000000000000));
+}
+
+fn makeInf64() f64 {
+ return @bitCast(f64, u64(0x7ff0000000000000));
+}
+
+fn makeNaN64(rand: u64) f64 {
+ return @bitCast(f64, 0x7ff0000000000000 | (rand & 0xfffffffffffff));
+}
+
+fn makeQNaN32() f32 {
+ return @bitCast(f32, u32(0x7fc00000));
+}
+
+fn makeNaN32(rand: u32) f32 {
+ return @bitCast(f32, 0x7f800000 | (rand & 0x7fffff));
+}
+
+fn makeInf32() f32 {
+ return @bitCast(f32, u32(0x7f800000));
+}
diff --git a/std/special/compiler_rt/fixuint.zig b/std/special/compiler_rt/fixuint.zig
index b01bc48118..55a113b368 100644
--- a/std/special/compiler_rt/fixuint.zig
+++ b/std/special/compiler_rt/fixuint.zig
@@ -1,5 +1,5 @@
const is_test = @import("builtin").is_test;
-const Log2Int = @import("../../math/index.zig").Log2Int;
+const Log2Int = @import("std").math.Log2Int;
pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t {
@setRuntimeSafety(is_test);
@@ -32,28 +32,20 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t
const aAbs: rep_t = aRep & absMask;
const sign = if ((aRep & signBit) != 0) i32(-1) else i32(1);
- const exponent = i32(aAbs >> significandBits) - exponentBias;
+ const exponent = @intCast(i32, aAbs >> significandBits) - exponentBias;
const significand: rep_t = (aAbs & significandMask) | implicitBit;
// If either the value or the exponent is negative, the result is zero.
- if (sign == -1 or exponent < 0)
- return 0;
+ if (sign == -1 or exponent < 0) return 0;
// If the value is too large for the integer type, saturate.
- if (c_uint(exponent) >= fixuint_t.bit_count)
- return ~fixuint_t(0);
+ if (@intCast(c_uint, exponent) >= fixuint_t.bit_count) return ~fixuint_t(0);
// If 0 <= exponent < significandBits, right shift to get the result.
// Otherwise, shift left.
if (exponent < significandBits) {
- // TODO this is a workaround for the mysterious "integer cast truncated bits"
- // happening on the next line
- @setRuntimeSafety(false);
- return fixuint_t(significand >> Log2Int(rep_t)(significandBits - exponent));
+ return @intCast(fixuint_t, significand >> @intCast(Log2Int(rep_t), significandBits - exponent));
} else {
- // TODO this is a workaround for the mysterious "integer cast truncated bits"
- // happening on the next line
- @setRuntimeSafety(false);
- return fixuint_t(significand) << Log2Int(fixuint_t)(exponent - significandBits);
+ return @intCast(fixuint_t, significand) << @intCast(Log2Int(fixuint_t), exponent - significandBits);
}
}
diff --git a/std/special/compiler_rt/fixunsdfdi.zig b/std/special/compiler_rt/fixunsdfdi.zig
index 37a8a01a50..1fa7ed758e 100644
--- a/std/special/compiler_rt/fixunsdfdi.zig
+++ b/std/special/compiler_rt/fixunsdfdi.zig
@@ -9,4 +9,3 @@ pub extern fn __fixunsdfdi(a: f64) u64 {
test "import fixunsdfdi" {
_ = @import("fixunsdfdi_test.zig");
}
-
diff --git a/std/special/compiler_rt/fixunsdfdi_test.zig b/std/special/compiler_rt/fixunsdfdi_test.zig
index 3443a4938e..e59d09f8de 100644
--- a/std/special/compiler_rt/fixunsdfdi_test.zig
+++ b/std/special/compiler_rt/fixunsdfdi_test.zig
@@ -1,5 +1,5 @@
const __fixunsdfdi = @import("fixunsdfdi.zig").__fixunsdfdi;
-const assert = @import("../../index.zig").debug.assert;
+const assert = @import("std").debug.assert;
fn test__fixunsdfdi(a: f64, expected: u64) void {
const x = __fixunsdfdi(a);
diff --git a/std/special/compiler_rt/fixunsdfsi.zig b/std/special/compiler_rt/fixunsdfsi.zig
index 0b5aebb7f6..a77cb8df89 100644
--- a/std/special/compiler_rt/fixunsdfsi.zig
+++ b/std/special/compiler_rt/fixunsdfsi.zig
@@ -9,4 +9,3 @@ pub extern fn __fixunsdfsi(a: f64) u32 {
test "import fixunsdfsi" {
_ = @import("fixunsdfsi_test.zig");
}
-
diff --git a/std/special/compiler_rt/fixunsdfsi_test.zig b/std/special/compiler_rt/fixunsdfsi_test.zig
index 3c74bc5f4c..db6e32e23d 100644
--- a/std/special/compiler_rt/fixunsdfsi_test.zig
+++ b/std/special/compiler_rt/fixunsdfsi_test.zig
@@ -1,5 +1,5 @@
const __fixunsdfsi = @import("fixunsdfsi.zig").__fixunsdfsi;
-const assert = @import("../../index.zig").debug.assert;
+const assert = @import("std").debug.assert;
fn test__fixunsdfsi(a: f64, expected: u32) void {
const x = __fixunsdfsi(a);
diff --git a/std/special/compiler_rt/fixunsdfti_test.zig b/std/special/compiler_rt/fixunsdfti_test.zig
index 3cb7687887..7f7b083d19 100644
--- a/std/special/compiler_rt/fixunsdfti_test.zig
+++ b/std/special/compiler_rt/fixunsdfti_test.zig
@@ -1,5 +1,5 @@
const __fixunsdfti = @import("fixunsdfti.zig").__fixunsdfti;
-const assert = @import("../../index.zig").debug.assert;
+const assert = @import("std").debug.assert;
fn test__fixunsdfti(a: f64, expected: u128) void {
const x = __fixunsdfti(a);
@@ -44,4 +44,3 @@ test "fixunsdfti" {
test__fixunsdfti(-0x1.FFFFFFFFFFFFFp+62, 0);
test__fixunsdfti(-0x1.FFFFFFFFFFFFEp+62, 0);
}
-
diff --git a/std/special/compiler_rt/fixunssfdi_test.zig b/std/special/compiler_rt/fixunssfdi_test.zig
index de27323777..e4e6c1736d 100644
--- a/std/special/compiler_rt/fixunssfdi_test.zig
+++ b/std/special/compiler_rt/fixunssfdi_test.zig
@@ -1,5 +1,5 @@
const __fixunssfdi = @import("fixunssfdi.zig").__fixunssfdi;
-const assert = @import("../../index.zig").debug.assert;
+const assert = @import("std").debug.assert;
fn test__fixunssfdi(a: f32, expected: u64) void {
const x = __fixunssfdi(a);
diff --git a/std/special/compiler_rt/fixunssfsi_test.zig b/std/special/compiler_rt/fixunssfsi_test.zig
index 47ed21d4f4..614c648dfe 100644
--- a/std/special/compiler_rt/fixunssfsi_test.zig
+++ b/std/special/compiler_rt/fixunssfsi_test.zig
@@ -1,5 +1,5 @@
const __fixunssfsi = @import("fixunssfsi.zig").__fixunssfsi;
-const assert = @import("../../index.zig").debug.assert;
+const assert = @import("std").debug.assert;
fn test__fixunssfsi(a: f32, expected: u32) void {
const x = __fixunssfsi(a);
diff --git a/std/special/compiler_rt/fixunssfti.zig b/std/special/compiler_rt/fixunssfti.zig
index 0abd73fe76..f0cd788d2e 100644
--- a/std/special/compiler_rt/fixunssfti.zig
+++ b/std/special/compiler_rt/fixunssfti.zig
@@ -9,4 +9,3 @@ pub extern fn __fixunssfti(a: f32) u128 {
test "import fixunssfti" {
_ = @import("fixunssfti_test.zig");
}
-
diff --git a/std/special/compiler_rt/fixunssfti_test.zig b/std/special/compiler_rt/fixunssfti_test.zig
index 3033eb0def..43ad527f53 100644
--- a/std/special/compiler_rt/fixunssfti_test.zig
+++ b/std/special/compiler_rt/fixunssfti_test.zig
@@ -1,5 +1,5 @@
const __fixunssfti = @import("fixunssfti.zig").__fixunssfti;
-const assert = @import("../../index.zig").debug.assert;
+const assert = @import("std").debug.assert;
fn test__fixunssfti(a: f32, expected: u128) void {
const x = __fixunssfti(a);
diff --git a/std/special/compiler_rt/fixunstfdi_test.zig b/std/special/compiler_rt/fixunstfdi_test.zig
index d1f5f6496a..6b1b9b7bd2 100644
--- a/std/special/compiler_rt/fixunstfdi_test.zig
+++ b/std/special/compiler_rt/fixunstfdi_test.zig
@@ -1,5 +1,5 @@
const __fixunstfdi = @import("fixunstfdi.zig").__fixunstfdi;
-const assert = @import("../../index.zig").debug.assert;
+const assert = @import("std").debug.assert;
fn test__fixunstfdi(a: f128, expected: u64) void {
const x = __fixunstfdi(a);
@@ -36,15 +36,14 @@ test "fixunstfdi" {
test__fixunstfdi(-0x1.FFFFFFFFFFFFFp+62, 0);
test__fixunstfdi(-0x1.FFFFFFFFFFFFEp+62, 0);
- // TODO enable these tests when we can parse f128 float literals
- //test__fixunstfdi(0x1.FFFFFFFFFFFFFFFEp+63, 0xFFFFFFFFFFFFFFFF);
- //test__fixunstfdi(0x1.0000000000000002p+63, 0x8000000000000001);
- //test__fixunstfdi(0x1.0000000000000000p+63, 0x8000000000000000);
- //test__fixunstfdi(0x1.FFFFFFFFFFFFFFFCp+62, 0x7FFFFFFFFFFFFFFF);
- //test__fixunstfdi(0x1.FFFFFFFFFFFFFFF8p+62, 0x7FFFFFFFFFFFFFFE);
- //test__fixunstfdi(0x1.p+64, 0xFFFFFFFFFFFFFFFF);
+ test__fixunstfdi(0x1.FFFFFFFFFFFFFFFEp+63, 0xFFFFFFFFFFFFFFFF);
+ test__fixunstfdi(0x1.0000000000000002p+63, 0x8000000000000001);
+ test__fixunstfdi(0x1.0000000000000000p+63, 0x8000000000000000);
+ test__fixunstfdi(0x1.FFFFFFFFFFFFFFFCp+62, 0x7FFFFFFFFFFFFFFF);
+ test__fixunstfdi(0x1.FFFFFFFFFFFFFFF8p+62, 0x7FFFFFFFFFFFFFFE);
+ test__fixunstfdi(0x1.p+64, 0xFFFFFFFFFFFFFFFF);
- //test__fixunstfdi(-0x1.0000000000000000p+63, 0);
- //test__fixunstfdi(-0x1.FFFFFFFFFFFFFFFCp+62, 0);
- //test__fixunstfdi(-0x1.FFFFFFFFFFFFFFF8p+62, 0);
+ test__fixunstfdi(-0x1.0000000000000000p+63, 0);
+ test__fixunstfdi(-0x1.FFFFFFFFFFFFFFFCp+62, 0);
+ test__fixunstfdi(-0x1.FFFFFFFFFFFFFFF8p+62, 0);
}
diff --git a/std/special/compiler_rt/fixunstfsi_test.zig b/std/special/compiler_rt/fixunstfsi_test.zig
index 8bdf36d9d4..f47fcb3c86 100644
--- a/std/special/compiler_rt/fixunstfsi_test.zig
+++ b/std/special/compiler_rt/fixunstfsi_test.zig
@@ -1,5 +1,5 @@
const __fixunstfsi = @import("fixunstfsi.zig").__fixunstfsi;
-const assert = @import("../../index.zig").debug.assert;
+const assert = @import("std").debug.assert;
fn test__fixunstfsi(a: f128, expected: u32) void {
const x = __fixunstfsi(a);
@@ -11,9 +11,9 @@ const inf128 = @bitCast(f128, u128(0x7fff0000000000000000000000000000));
test "fixunstfsi" {
test__fixunstfsi(inf128, 0xffffffff);
test__fixunstfsi(0, 0x0);
- //TODO test__fixunstfsi(0x1.23456789abcdefp+5, 0x24);
+ test__fixunstfsi(0x1.23456789abcdefp+5, 0x24);
test__fixunstfsi(0x1.23456789abcdefp-3, 0x0);
- //TODO test__fixunstfsi(0x1.23456789abcdefp+20, 0x123456);
+ test__fixunstfsi(0x1.23456789abcdefp+20, 0x123456);
test__fixunstfsi(0x1.23456789abcdefp+40, 0xffffffff);
test__fixunstfsi(0x1.23456789abcdefp+256, 0xffffffff);
test__fixunstfsi(-0x1.23456789abcdefp+3, 0x0);
diff --git a/std/special/compiler_rt/fixunstfti.zig b/std/special/compiler_rt/fixunstfti.zig
index fea99eb6e8..cd6178164a 100644
--- a/std/special/compiler_rt/fixunstfti.zig
+++ b/std/special/compiler_rt/fixunstfti.zig
@@ -9,4 +9,3 @@ pub extern fn __fixunstfti(a: f128) u128 {
test "import fixunstfti" {
_ = @import("fixunstfti_test.zig");
}
-
diff --git a/std/special/compiler_rt/fixunstfti_test.zig b/std/special/compiler_rt/fixunstfti_test.zig
index d9eb60e59b..9128ac6c08 100644
--- a/std/special/compiler_rt/fixunstfti_test.zig
+++ b/std/special/compiler_rt/fixunstfti_test.zig
@@ -1,5 +1,5 @@
const __fixunstfti = @import("fixunstfti.zig").__fixunstfti;
-const assert = @import("../../index.zig").debug.assert;
+const assert = @import("std").debug.assert;
fn test__fixunstfti(a: f128, expected: u128) void {
const x = __fixunstfti(a);
diff --git a/std/special/compiler_rt/floattidf.zig b/std/special/compiler_rt/floattidf.zig
new file mode 100644
index 0000000000..2a24c64efe
--- /dev/null
+++ b/std/special/compiler_rt/floattidf.zig
@@ -0,0 +1,69 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+const DBL_MANT_DIG = 53;
+
+pub extern fn __floattidf(arg: i128) f64 {
+ @setRuntimeSafety(is_test);
+
+ if (arg == 0)
+ return 0.0;
+
+ var ai = arg;
+ const N: u32 = 128;
+ const si = ai >> @intCast(u7, (N - 1));
+ ai = ((ai ^ si) -% si);
+ var a = @bitCast(u128, ai);
+
+ const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
+ var e: i32 = sd - 1; // exponent
+ if (sd > DBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit DBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit DBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ DBL_MANT_DIG + 1 => {
+ a <<= 1;
+ },
+ DBL_MANT_DIG + 2 => {},
+ else => {
+ const shift1_amt = @intCast(i32, sd - (DBL_MANT_DIG + 2));
+ const shift1_amt_u7 = @intCast(u7, shift1_amt);
+
+ const shift2_amt = @intCast(i32, N + (DBL_MANT_DIG + 2)) - sd;
+ const shift2_amt_u7 = @intCast(u7, shift2_amt);
+
+ a = (a >> shift1_amt_u7) | @boolToInt((a & (@intCast(u128, @maxValue(u128)) >> shift2_amt_u7)) != 0);
+ },
+ }
+ // finish
+ a |= @boolToInt((a & 4) != 0); // Or P into R
+ a += 1; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
+ if ((a & (u128(1) << DBL_MANT_DIG)) != 0) {
+ a >>= 1;
+ e += 1;
+ }
+ // a is now rounded to DBL_MANT_DIG bits
+ } else {
+ a <<= @intCast(u7, DBL_MANT_DIG - sd);
+ // a is now rounded to DBL_MANT_DIG bits
+ }
+
+ const s = @bitCast(u128, arg) >> (128 - 32);
+ const high: u64 = (@intCast(u64, s) & 0x80000000) | // sign
+ (@intCast(u32, (e + 1023)) << 20) | // exponent
+ (@truncate(u32, a >> 32) & 0x000fffff); // mantissa-high
+ const low: u64 = @truncate(u32, a); // mantissa-low
+
+ return @bitCast(f64, low | (high << 32));
+}
+
+test "import floattidf" {
+ _ = @import("floattidf_test.zig");
+}
diff --git a/std/special/compiler_rt/floattidf_test.zig b/std/special/compiler_rt/floattidf_test.zig
new file mode 100644
index 0000000000..25dc595052
--- /dev/null
+++ b/std/special/compiler_rt/floattidf_test.zig
@@ -0,0 +1,84 @@
+const __floattidf = @import("floattidf.zig").__floattidf;
+const assert = @import("std").debug.assert;
+
+fn test__floattidf(a: i128, expected: f64) void {
+ const x = __floattidf(a);
+ assert(x == expected);
+}
+
+test "floattidf" {
+ test__floattidf(0, 0.0);
+
+ test__floattidf(1, 1.0);
+ test__floattidf(2, 2.0);
+ test__floattidf(20, 20.0);
+ test__floattidf(-1, -1.0);
+ test__floattidf(-2, -2.0);
+ test__floattidf(-20, -20.0);
+
+ test__floattidf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ test__floattidf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ test__floattidf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ test__floattidf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+
+ test__floattidf(make_ti(0x8000008000000000, 0), -0x1.FFFFFEp+126);
+ test__floattidf(make_ti(0x8000000000000800, 0), -0x1.FFFFFFFFFFFFEp+126);
+ test__floattidf(make_ti(0x8000010000000000, 0), -0x1.FFFFFCp+126);
+ test__floattidf(make_ti(0x8000000000001000, 0), -0x1.FFFFFFFFFFFFCp+126);
+
+ test__floattidf(make_ti(0x8000000000000000, 0), -0x1.000000p+127);
+ test__floattidf(make_ti(0x8000000000000001, 0), -0x1.000000p+127);
+
+ test__floattidf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floattidf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ test__floattidf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ test__floattidf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ test__floattidf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ test__floattidf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+
+ test__floattidf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ test__floattidf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ test__floattidf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ test__floattidf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ test__floattidf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ test__floattidf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DA1, 0x1.1A3CFE870496Dp+57);
+ test__floattidf(0x023479FD0E092DB0, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DB8, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DB6, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DBF, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DC1, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DC7, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DC8, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DCF, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DD0, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DD1, 0x1.1A3CFE870496Fp+57);
+ test__floattidf(0x023479FD0E092DD8, 0x1.1A3CFE870496Fp+57);
+ test__floattidf(0x023479FD0E092DDF, 0x1.1A3CFE870496Fp+57);
+ test__floattidf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+
+ test__floattidf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496Dp+121);
+ test__floattidf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496Fp+121);
+ test__floattidf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496Fp+121);
+ test__floattidf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496Fp+121);
+ test__floattidf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
+}
+
+fn make_ti(high: u64, low: u64) i128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return @bitCast(i128, result);
+}
diff --git a/std/special/compiler_rt/floattisf.zig b/std/special/compiler_rt/floattisf.zig
new file mode 100644
index 0000000000..4618a86444
--- /dev/null
+++ b/std/special/compiler_rt/floattisf.zig
@@ -0,0 +1,69 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+const FLT_MANT_DIG = 24;
+
+pub extern fn __floattisf(arg: i128) f32 {
+ @setRuntimeSafety(is_test);
+
+ if (arg == 0)
+ return 0.0;
+
+ var ai = arg;
+ const N: u32 = 128;
+ const si = ai >> @intCast(u7, (N - 1));
+ ai = ((ai ^ si) -% si);
+ var a = @bitCast(u128, ai);
+
+ const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
+ var e: i32 = sd - 1; // exponent
+
+ if (sd > FLT_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit FLT_MANT_DIG-1 bits to the right of 1
+ // Q = bit FLT_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ FLT_MANT_DIG + 1 => {
+ a <<= 1;
+ },
+ FLT_MANT_DIG + 2 => {},
+ else => {
+ const shift1_amt = @intCast(i32, sd - (FLT_MANT_DIG + 2));
+ const shift1_amt_u7 = @intCast(u7, shift1_amt);
+
+ const shift2_amt = @intCast(i32, N + (FLT_MANT_DIG + 2)) - sd;
+ const shift2_amt_u7 = @intCast(u7, shift2_amt);
+
+ a = (a >> shift1_amt_u7) | @boolToInt((a & (@intCast(u128, @maxValue(u128)) >> shift2_amt_u7)) != 0);
+ },
+ }
+ // finish
+ a |= @boolToInt((a & 4) != 0); // Or P into R
+ a += 1; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
+ if ((a & (u128(1) << FLT_MANT_DIG)) != 0) {
+ a >>= 1;
+ e += 1;
+ }
+ // a is now rounded to FLT_MANT_DIG bits
+ } else {
+ a <<= @intCast(u7, FLT_MANT_DIG - sd);
+ // a is now rounded to FLT_MANT_DIG bits
+ }
+
+ const s = @bitCast(u128, arg) >> (128 - 32);
+ const r = (@intCast(u32, s) & 0x80000000) | // sign
+ (@intCast(u32, (e + 127)) << 23) | // exponent
+ (@truncate(u32, a) & 0x007fffff); // mantissa-high
+
+ return @bitCast(f32, r);
+}
+
+test "import floattisf" {
+ _ = @import("floattisf_test.zig");
+}
diff --git a/std/special/compiler_rt/floattisf_test.zig b/std/special/compiler_rt/floattisf_test.zig
new file mode 100644
index 0000000000..ecb8eac60a
--- /dev/null
+++ b/std/special/compiler_rt/floattisf_test.zig
@@ -0,0 +1,60 @@
+const __floattisf = @import("floattisf.zig").__floattisf;
+const assert = @import("std").debug.assert;
+
+fn test__floattisf(a: i128, expected: f32) void {
+ const x = __floattisf(a);
+ assert(x == expected);
+}
+
+test "floattisf" {
+ test__floattisf(0, 0.0);
+
+ test__floattisf(1, 1.0);
+ test__floattisf(2, 2.0);
+ test__floattisf(-1, -1.0);
+ test__floattisf(-2, -2.0);
+
+ test__floattisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ test__floattisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+
+ test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000008000000000), -0x1.FFFFFEp+62);
+ test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000010000000000), -0x1.FFFFFCp+62);
+
+ test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000000000000000), -0x1.000000p+63);
+ test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000000000000001), -0x1.000000p+63);
+
+ test__floattisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floattisf(0x0007FB72EA000000, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72EB000000, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72EBFFFFFF, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72EC000000, 0x1.FEDCBCp+50);
+ test__floattisf(0x0007FB72E8000001, 0x1.FEDCBAp+50);
+
+ test__floattisf(0x0007FB72E6000000, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72E7000000, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72E7FFFFFF, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72E4000001, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72E4000000, 0x1.FEDCB8p+50);
+
+ test__floattisf(make_ti(0x0007FB72E8000000, 0), 0x1.FEDCBAp+114);
+
+ test__floattisf(make_ti(0x0007FB72EA000000, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72EB000000, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72EBFFFFFF, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72EC000000, 0), 0x1.FEDCBCp+114);
+ test__floattisf(make_ti(0x0007FB72E8000001, 0), 0x1.FEDCBAp+114);
+
+ test__floattisf(make_ti(0x0007FB72E6000000, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72E7000000, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72E7FFFFFF, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72E4000001, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72E4000000, 0), 0x1.FEDCB8p+114);
+}
+
+fn make_ti(high: u64, low: u64) i128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return @bitCast(i128, result);
+}
diff --git a/std/special/compiler_rt/floattitf.zig b/std/special/compiler_rt/floattitf.zig
new file mode 100644
index 0000000000..4da2c145fa
--- /dev/null
+++ b/std/special/compiler_rt/floattitf.zig
@@ -0,0 +1,69 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+const LDBL_MANT_DIG = 113;
+
+pub extern fn __floattitf(arg: i128) f128 {
+ @setRuntimeSafety(is_test);
+
+ if (arg == 0)
+ return 0.0;
+
+ var ai = arg;
+ const N: u32 = 128;
+ const si = ai >> @intCast(u7, (N - 1));
+ ai = ((ai ^ si) -% si);
+ var a = @bitCast(u128, ai);
+
+ const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
+ var e: i32 = sd - 1; // exponent
+ if (sd > LDBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit LDBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit LDBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ LDBL_MANT_DIG + 1 => {
+ a <<= 1;
+ },
+ LDBL_MANT_DIG + 2 => {},
+ else => {
+ const shift1_amt = @intCast(i32, sd - (LDBL_MANT_DIG + 2));
+ const shift1_amt_u7 = @intCast(u7, shift1_amt);
+
+ const shift2_amt = @intCast(i32, N + (LDBL_MANT_DIG + 2)) - sd;
+ const shift2_amt_u7 = @intCast(u7, shift2_amt);
+
+ a = (a >> shift1_amt_u7) | @boolToInt((a & (@intCast(u128, @maxValue(u128)) >> shift2_amt_u7)) != 0);
+ },
+ }
+ // finish
+ a |= @boolToInt((a & 4) != 0); // Or P into R
+ a += 1; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
+ if ((a & (u128(1) << LDBL_MANT_DIG)) != 0) {
+ a >>= 1;
+ e += 1;
+ }
+ // a is now rounded to LDBL_MANT_DIG bits
+ } else {
+ a <<= @intCast(u7, LDBL_MANT_DIG - sd);
+ // a is now rounded to LDBL_MANT_DIG bits
+ }
+
+ const s = @bitCast(u128, arg) >> (128 - 64);
+ const high: u128 = (@intCast(u64, s) & 0x8000000000000000) | // sign
+ (@intCast(u64, (e + 16383)) << 48) | // exponent
+ (@truncate(u64, a >> 64) & 0x0000ffffffffffff); // mantissa-high
+ const low = @truncate(u64, a); // mantissa-low
+
+ return @bitCast(f128, low | (high << 64));
+}
+
+test "import floattitf" {
+ _ = @import("floattitf_test.zig");
+}
diff --git a/std/special/compiler_rt/floattitf_test.zig b/std/special/compiler_rt/floattitf_test.zig
new file mode 100644
index 0000000000..da2ccc8b35
--- /dev/null
+++ b/std/special/compiler_rt/floattitf_test.zig
@@ -0,0 +1,96 @@
+const __floattitf = @import("floattitf.zig").__floattitf;
+const assert = @import("std").debug.assert;
+
+fn test__floattitf(a: i128, expected: f128) void {
+ const x = __floattitf(a);
+ assert(x == expected);
+}
+
+test "floattitf" {
+ test__floattitf(0, 0.0);
+
+ test__floattitf(1, 1.0);
+ test__floattitf(2, 2.0);
+ test__floattitf(20, 20.0);
+ test__floattitf(-1, -1.0);
+ test__floattitf(-2, -2.0);
+ test__floattitf(-20, -20.0);
+
+ test__floattitf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ test__floattitf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ test__floattitf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ test__floattitf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+
+ test__floattitf(make_ti(0x8000008000000000, 0), -0x1.FFFFFEp+126);
+ test__floattitf(make_ti(0x8000000000000800, 0), -0x1.FFFFFFFFFFFFEp+126);
+ test__floattitf(make_ti(0x8000010000000000, 0), -0x1.FFFFFCp+126);
+ test__floattitf(make_ti(0x8000000000001000, 0), -0x1.FFFFFFFFFFFFCp+126);
+
+ test__floattitf(make_ti(0x8000000000000000, 0), -0x1.000000p+127);
+ test__floattitf(make_ti(0x8000000000000001, 0), -0x1.FFFFFFFFFFFFFFFCp+126);
+
+ test__floattitf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floattitf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ test__floattitf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ test__floattitf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ test__floattitf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ test__floattitf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+
+ test__floattitf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ test__floattitf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ test__floattitf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ test__floattitf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ test__floattitf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ test__floattitf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ test__floattitf(0x023479FD0E092DA1, 0x1.1A3CFE870496D08p+57);
+ test__floattitf(0x023479FD0E092DB0, 0x1.1A3CFE870496D8p+57);
+ test__floattitf(0x023479FD0E092DB8, 0x1.1A3CFE870496DCp+57);
+ test__floattitf(0x023479FD0E092DB6, 0x1.1A3CFE870496DBp+57);
+ test__floattitf(0x023479FD0E092DBF, 0x1.1A3CFE870496DF8p+57);
+ test__floattitf(0x023479FD0E092DC1, 0x1.1A3CFE870496E08p+57);
+ test__floattitf(0x023479FD0E092DC7, 0x1.1A3CFE870496E38p+57);
+ test__floattitf(0x023479FD0E092DC8, 0x1.1A3CFE870496E4p+57);
+ test__floattitf(0x023479FD0E092DCF, 0x1.1A3CFE870496E78p+57);
+ test__floattitf(0x023479FD0E092DD0, 0x1.1A3CFE870496E8p+57);
+ test__floattitf(0x023479FD0E092DD1, 0x1.1A3CFE870496E88p+57);
+ test__floattitf(0x023479FD0E092DD8, 0x1.1A3CFE870496ECp+57);
+ test__floattitf(0x023479FD0E092DDF, 0x1.1A3CFE870496EF8p+57);
+ test__floattitf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+
+ test__floattitf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
+ test__floattitf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496D08p+121);
+ test__floattitf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496D8p+121);
+ test__floattitf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496DCp+121);
+ test__floattitf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496DBp+121);
+ test__floattitf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496DF8p+121);
+ test__floattitf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496E08p+121);
+ test__floattitf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496E38p+121);
+ test__floattitf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496E4p+121);
+ test__floattitf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496E78p+121);
+ test__floattitf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496E8p+121);
+ test__floattitf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496E88p+121);
+ test__floattitf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496ECp+121);
+ test__floattitf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496EF8p+121);
+ test__floattitf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
+
+ test__floattitf(make_ti(0, 0xFFFFFFFFFFFFFFFF), 0x1.FFFFFFFFFFFFFFFEp+63);
+
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC2801), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3000), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC37FF), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3800), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4000), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC47FF), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4800), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4801), 0x1.23456789ABCDEF0123456789ABC5p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC57FF), 0x1.23456789ABCDEF0123456789ABC5p+124);
+}
+
+fn make_ti(high: u64, low: u64) i128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return @bitCast(i128, result);
+}
diff --git a/std/special/compiler_rt/floatunditf.zig b/std/special/compiler_rt/floatunditf.zig
new file mode 100644
index 0000000000..afc545448a
--- /dev/null
+++ b/std/special/compiler_rt/floatunditf.zig
@@ -0,0 +1,28 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+const std = @import("std");
+
+pub extern fn __floatunditf(a: u128) f128 {
+ @setRuntimeSafety(is_test);
+
+ if (a == 0) {
+ return 0;
+ }
+
+ const mantissa_bits = std.math.floatMantissaBits(f128);
+ const exponent_bits = std.math.floatExponentBits(f128);
+ const exponent_bias = (1 << (exponent_bits - 1)) - 1;
+ const implicit_bit = 1 << mantissa_bits;
+
+ const exp = (u128.bit_count - 1) - @clz(a);
+ const shift = mantissa_bits - @intCast(u7, exp);
+
+ var result: u128 align(16) = (a << shift) ^ implicit_bit;
+ result += (@intCast(u128, exp) + exponent_bias) << mantissa_bits;
+
+ return @bitCast(f128, result);
+}
+
+test "import floatunditf" {
+ _ = @import("floatunditf_test.zig");
+}
diff --git a/std/special/compiler_rt/floatunditf_test.zig b/std/special/compiler_rt/floatunditf_test.zig
new file mode 100644
index 0000000000..8533c75070
--- /dev/null
+++ b/std/special/compiler_rt/floatunditf_test.zig
@@ -0,0 +1,33 @@
+const __floatunditf = @import("floatunditf.zig").__floatunditf;
+const assert = @import("std").debug.assert;
+
+fn test__floatunditf(a: u128, expected_hi: u64, expected_lo: u64) void {
+ const x = __floatunditf(a);
+
+ const x_repr = @bitCast(u128, x);
+ const x_hi = @intCast(u64, x_repr >> 64);
+ const x_lo = @truncate(u64, x_repr);
+
+ if (x_hi == expected_hi and x_lo == expected_lo) {
+ return;
+ }
+ // nan repr
+ else if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
+ if ((x_hi & 0x7fff000000000000) == 0x7fff000000000000 and ((x_hi & 0xffffffffffff) > 0 or x_lo > 0)) {
+ return;
+ }
+ }
+
+ @panic("__floatunditf test failure");
+}
+
+test "floatunditf" {
+ test__floatunditf(0xffffffffffffffff, 0x403effffffffffff, 0xfffe000000000000);
+ test__floatunditf(0xfffffffffffffffe, 0x403effffffffffff, 0xfffc000000000000);
+ test__floatunditf(0x8000000000000000, 0x403e000000000000, 0x0);
+ test__floatunditf(0x7fffffffffffffff, 0x403dffffffffffff, 0xfffc000000000000);
+ test__floatunditf(0x123456789abcdef1, 0x403b23456789abcd, 0xef10000000000000);
+ test__floatunditf(0x2, 0x4000000000000000, 0x0);
+ test__floatunditf(0x1, 0x3fff000000000000, 0x0);
+ test__floatunditf(0x0, 0x0, 0x0);
+}
diff --git a/std/special/compiler_rt/floatunsitf.zig b/std/special/compiler_rt/floatunsitf.zig
new file mode 100644
index 0000000000..19a5918bd0
--- /dev/null
+++ b/std/special/compiler_rt/floatunsitf.zig
@@ -0,0 +1,29 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+const std = @import("std");
+
+pub extern fn __floatunsitf(a: u64) f128 {
+ @setRuntimeSafety(is_test);
+
+ if (a == 0) {
+ return 0;
+ }
+
+ const mantissa_bits = std.math.floatMantissaBits(f128);
+ const exponent_bits = std.math.floatExponentBits(f128);
+ const exponent_bias = (1 << (exponent_bits - 1)) - 1;
+ const implicit_bit = 1 << mantissa_bits;
+
+ const exp = (u64.bit_count - 1) - @clz(a);
+ const shift = mantissa_bits - @intCast(u7, exp);
+
+ // TODO(#1148): @bitCast alignment error
+ var result align(16) = (@intCast(u128, a) << shift) ^ implicit_bit;
+ result += (@intCast(u128, exp) + exponent_bias) << mantissa_bits;
+
+ return @bitCast(f128, result);
+}
+
+test "import floatunsitf" {
+ _ = @import("floatunsitf_test.zig");
+}
diff --git a/std/special/compiler_rt/floatunsitf_test.zig b/std/special/compiler_rt/floatunsitf_test.zig
new file mode 100644
index 0000000000..06f54cde03
--- /dev/null
+++ b/std/special/compiler_rt/floatunsitf_test.zig
@@ -0,0 +1,29 @@
+const __floatunsitf = @import("floatunsitf.zig").__floatunsitf;
+const assert = @import("std").debug.assert;
+
+fn test__floatunsitf(a: u64, expected_hi: u64, expected_lo: u64) void {
+ const x = __floatunsitf(a);
+
+ const x_repr = @bitCast(u128, x);
+ const x_hi = @intCast(u64, x_repr >> 64);
+ const x_lo = @truncate(u64, x_repr);
+
+ if (x_hi == expected_hi and x_lo == expected_lo) {
+ return;
+ }
+ // nan repr
+ else if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
+ if ((x_hi & 0x7fff000000000000) == 0x7fff000000000000 and ((x_hi & 0xffffffffffff) > 0 or x_lo > 0)) {
+ return;
+ }
+ }
+
+ @panic("__floatunsitf test failure");
+}
+
+test "floatunsitf" {
+ test__floatunsitf(0x7fffffff, 0x401dfffffffc0000, 0x0);
+ test__floatunsitf(0, 0x0, 0x0);
+ test__floatunsitf(0xffffffff, 0x401efffffffe0000, 0x0);
+ test__floatunsitf(0x12345678, 0x401b234567800000, 0x0);
+}
diff --git a/std/special/compiler_rt/floatuntidf.zig b/std/special/compiler_rt/floatuntidf.zig
new file mode 100644
index 0000000000..1101733825
--- /dev/null
+++ b/std/special/compiler_rt/floatuntidf.zig
@@ -0,0 +1,60 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+const DBL_MANT_DIG = 53;
+
+pub extern fn __floatuntidf(arg: u128) f64 {
+ @setRuntimeSafety(is_test);
+
+ if (arg == 0)
+ return 0.0;
+
+ var a = arg;
+ const N: u32 = @sizeOf(u128) * 8;
+ const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
+ var e: i32 = sd - 1; // exponent
+ if (sd > DBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit DBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit DBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ DBL_MANT_DIG + 1 => {
+ a <<= 1;
+ },
+ DBL_MANT_DIG + 2 => {},
+ else => {
+ const shift_amt = @bitCast(i32, N + (DBL_MANT_DIG + 2)) - sd;
+ const shift_amt_u7 = @intCast(u7, shift_amt);
+ a = (a >> @intCast(u7, sd - (DBL_MANT_DIG + 2))) |
+ @boolToInt((a & (u128(@maxValue(u128)) >> shift_amt_u7)) != 0);
+ },
+ }
+ // finish
+ a |= @boolToInt((a & 4) != 0); // Or P into R
+ a += 1; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
+ if ((a & (u128(1) << DBL_MANT_DIG)) != 0) {
+ a >>= 1;
+ e += 1;
+ }
+ // a is now rounded to DBL_MANT_DIG bits
+ } else {
+ a <<= @intCast(u7, DBL_MANT_DIG - sd);
+ // a is now rounded to DBL_MANT_DIG bits
+ }
+
+ const high: u64 = @bitCast(u32, (e + 1023) << 20) | // exponent
+ (@truncate(u32, a >> 32) & 0x000FFFFF); // mantissa-high
+ const low = @truncate(u32, a); // mantissa-low
+
+ return @bitCast(f64, low | (high << 32));
+}
+
+test "import floatuntidf" {
+ _ = @import("floatuntidf_test.zig");
+}
diff --git a/std/special/compiler_rt/floatuntidf_test.zig b/std/special/compiler_rt/floatuntidf_test.zig
new file mode 100644
index 0000000000..e2c79378e2
--- /dev/null
+++ b/std/special/compiler_rt/floatuntidf_test.zig
@@ -0,0 +1,81 @@
+const __floatuntidf = @import("floatuntidf.zig").__floatuntidf;
+const assert = @import("std").debug.assert;
+
+fn test__floatuntidf(a: u128, expected: f64) void {
+ const x = __floatuntidf(a);
+ assert(x == expected);
+}
+
+test "floatuntidf" {
+ test__floatuntidf(0, 0.0);
+
+ test__floatuntidf(1, 1.0);
+ test__floatuntidf(2, 2.0);
+ test__floatuntidf(20, 20.0);
+
+ test__floatuntidf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ test__floatuntidf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ test__floatuntidf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ test__floatuntidf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+
+ test__floatuntidf(make_ti(0x8000008000000000, 0), 0x1.000001p+127);
+ test__floatuntidf(make_ti(0x8000000000000800, 0), 0x1.0000000000001p+127);
+ test__floatuntidf(make_ti(0x8000010000000000, 0), 0x1.000002p+127);
+ test__floatuntidf(make_ti(0x8000000000001000, 0), 0x1.0000000000002p+127);
+
+ test__floatuntidf(make_ti(0x8000000000000000, 0), 0x1.000000p+127);
+ test__floatuntidf(make_ti(0x8000000000000001, 0), 0x1.0000000000000002p+127);
+
+ test__floatuntidf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floatuntidf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ test__floatuntidf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ test__floatuntidf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ test__floatuntidf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ test__floatuntidf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+
+ test__floatuntidf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ test__floatuntidf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ test__floatuntidf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ test__floatuntidf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ test__floatuntidf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ test__floatuntidf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DA1, 0x1.1A3CFE870496Dp+57);
+ test__floatuntidf(0x023479FD0E092DB0, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DB8, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DB6, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DBF, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DC1, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DC7, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DC8, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DCF, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DD0, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DD1, 0x1.1A3CFE870496Fp+57);
+ test__floatuntidf(0x023479FD0E092DD8, 0x1.1A3CFE870496Fp+57);
+ test__floatuntidf(0x023479FD0E092DDF, 0x1.1A3CFE870496Fp+57);
+ test__floatuntidf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+
+ test__floatuntidf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496Dp+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496Fp+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496Fp+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496Fp+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
+}
+
+fn make_ti(high: u64, low: u64) u128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return result;
+}
diff --git a/std/special/compiler_rt/floatuntisf.zig b/std/special/compiler_rt/floatuntisf.zig
new file mode 100644
index 0000000000..f85c22578e
--- /dev/null
+++ b/std/special/compiler_rt/floatuntisf.zig
@@ -0,0 +1,59 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+const FLT_MANT_DIG = 24;
+
+pub extern fn __floatuntisf(arg: u128) f32 {
+ @setRuntimeSafety(is_test);
+
+ if (arg == 0)
+ return 0.0;
+
+ var a = arg;
+ const N: u32 = @sizeOf(u128) * 8;
+ const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
+ var e: i32 = sd - 1; // exponent
+ if (sd > FLT_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit FLT_MANT_DIG-1 bits to the right of 1
+ // Q = bit FLT_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ FLT_MANT_DIG + 1 => {
+ a <<= 1;
+ },
+ FLT_MANT_DIG + 2 => {},
+ else => {
+ const shift_amt = @bitCast(i32, N + (FLT_MANT_DIG + 2)) - sd;
+ const shift_amt_u7 = @intCast(u7, shift_amt);
+ a = (a >> @intCast(u7, sd - (FLT_MANT_DIG + 2))) |
+ @boolToInt((a & (u128(@maxValue(u128)) >> shift_amt_u7)) != 0);
+ },
+ }
+ // finish
+ a |= @boolToInt((a & 4) != 0); // Or P into R
+ a += 1; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
+ if ((a & (u128(1) << FLT_MANT_DIG)) != 0) {
+ a >>= 1;
+ e += 1;
+ }
+ // a is now rounded to FLT_MANT_DIG bits
+ } else {
+ a <<= @intCast(u7, FLT_MANT_DIG - sd);
+ // a is now rounded to FLT_MANT_DIG bits
+ }
+
+ const high = @bitCast(u32, (e + 127) << 23); // exponent
+ const low = @truncate(u32, a) & 0x007fffff; // mantissa
+
+ return @bitCast(f32, high | low);
+}
+
+test "import floatuntisf" {
+ _ = @import("floatuntisf_test.zig");
+}
diff --git a/std/special/compiler_rt/floatuntisf_test.zig b/std/special/compiler_rt/floatuntisf_test.zig
new file mode 100644
index 0000000000..7f84c1f963
--- /dev/null
+++ b/std/special/compiler_rt/floatuntisf_test.zig
@@ -0,0 +1,72 @@
+const __floatuntisf = @import("floatuntisf.zig").__floatuntisf;
+const assert = @import("std").debug.assert;
+
+fn test__floatuntisf(a: u128, expected: f32) void {
+ const x = __floatuntisf(a);
+ assert(x == expected);
+}
+
+test "floatuntisf" {
+ test__floatuntisf(0, 0.0);
+
+ test__floatuntisf(1, 1.0);
+ test__floatuntisf(2, 2.0);
+ test__floatuntisf(20, 20.0);
+
+ test__floatuntisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ test__floatuntisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+
+ test__floatuntisf(make_ti(0x8000008000000000, 0), 0x1.000001p+127);
+ test__floatuntisf(make_ti(0x8000000000000800, 0), 0x1.0p+127);
+ test__floatuntisf(make_ti(0x8000010000000000, 0), 0x1.000002p+127);
+
+ test__floatuntisf(make_ti(0x8000000000000000, 0), 0x1.000000p+127);
+
+ test__floatuntisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floatuntisf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ test__floatuntisf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+
+ test__floatuntisf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+
+ test__floatuntisf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ test__floatuntisf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ test__floatuntisf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ test__floatuntisf(0xFFFFFFFFFFFFFFFE, 0x1p+64);
+ test__floatuntisf(0xFFFFFFFFFFFFFFFF, 0x1p+64);
+
+ test__floatuntisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floatuntisf(0x0007FB72EA000000, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72EB000000, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72EBFFFFFF, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72EC000000, 0x1.FEDCBCp+50);
+ test__floatuntisf(0x0007FB72E8000001, 0x1.FEDCBAp+50);
+
+ test__floatuntisf(0x0007FB72E6000000, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72E7000000, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72E7FFFFFF, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72E4000001, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72E4000000, 0x1.FEDCB8p+50);
+
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCB90000000000001), 0x1.FEDCBAp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBA0000000000000), 0x1.FEDCBAp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBAFFFFFFFFFFFFF), 0x1.FEDCBAp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBB0000000000000), 0x1.FEDCBCp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBB0000000000001), 0x1.FEDCBCp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBBFFFFFFFFFFFFF), 0x1.FEDCBCp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBC0000000000000), 0x1.FEDCBCp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBC0000000000001), 0x1.FEDCBCp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBD0000000000000), 0x1.FEDCBCp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBD0000000000001), 0x1.FEDCBEp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBDFFFFFFFFFFFFF), 0x1.FEDCBEp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBE0000000000000), 0x1.FEDCBEp+76);
+}
+
+fn make_ti(high: u64, low: u64) u128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return result;
+}
diff --git a/std/special/compiler_rt/floatuntitf.zig b/std/special/compiler_rt/floatuntitf.zig
new file mode 100644
index 0000000000..6354c89287
--- /dev/null
+++ b/std/special/compiler_rt/floatuntitf.zig
@@ -0,0 +1,60 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+const LDBL_MANT_DIG = 113;
+
+pub extern fn __floatuntitf(arg: u128) f128 {
+ @setRuntimeSafety(is_test);
+
+ if (arg == 0)
+ return 0.0;
+
+ var a = arg;
+ const N: u32 = @sizeOf(u128) * 8;
+ const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
+ var e: i32 = sd - 1; // exponent
+ if (sd > LDBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit LDBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit LDBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ LDBL_MANT_DIG + 1 => {
+ a <<= 1;
+ },
+ LDBL_MANT_DIG + 2 => {},
+ else => {
+ const shift_amt = @bitCast(i32, N + (LDBL_MANT_DIG + 2)) - sd;
+ const shift_amt_u7 = @intCast(u7, shift_amt);
+ a = (a >> @intCast(u7, sd - (LDBL_MANT_DIG + 2))) |
+ @boolToInt((a & (u128(@maxValue(u128)) >> shift_amt_u7)) != 0);
+ },
+ }
+ // finish
+ a |= @boolToInt((a & 4) != 0); // Or P into R
+ a += 1; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
+ if ((a & (u128(1) << LDBL_MANT_DIG)) != 0) {
+ a >>= 1;
+ e += 1;
+ }
+ // a is now rounded to LDBL_MANT_DIG bits
+ } else {
+ a <<= @intCast(u7, LDBL_MANT_DIG - sd);
+ // a is now rounded to LDBL_MANT_DIG bits
+ }
+
+ const high: u128 = (@intCast(u64, (e + 16383)) << 48) | // exponent
+ (@truncate(u64, a >> 64) & 0x0000ffffffffffff); // mantissa-high
+ const low = @truncate(u64, a); // mantissa-low
+
+ return @bitCast(f128, low | (high << 64));
+}
+
+test "import floatuntitf" {
+ _ = @import("floatuntitf_test.zig");
+}
diff --git a/std/special/compiler_rt/floatuntitf_test.zig b/std/special/compiler_rt/floatuntitf_test.zig
new file mode 100644
index 0000000000..8e67fee108
--- /dev/null
+++ b/std/special/compiler_rt/floatuntitf_test.zig
@@ -0,0 +1,99 @@
+const __floatuntitf = @import("floatuntitf.zig").__floatuntitf;
+const assert = @import("std").debug.assert;
+
+fn test__floatuntitf(a: u128, expected: f128) void {
+ const x = __floatuntitf(a);
+ assert(x == expected);
+}
+
+test "floatuntitf" {
+ test__floatuntitf(0, 0.0);
+
+ test__floatuntitf(1, 1.0);
+ test__floatuntitf(2, 2.0);
+ test__floatuntitf(20, 20.0);
+
+ test__floatuntitf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ test__floatuntitf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ test__floatuntitf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ test__floatuntitf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+ test__floatuntitf(0x7FFFFFFFFFFFFFFF, 0xF.FFFFFFFFFFFFFFEp+59);
+ test__floatuntitf(0xFFFFFFFFFFFFFFFE, 0xF.FFFFFFFFFFFFFFEp+60);
+ test__floatuntitf(0xFFFFFFFFFFFFFFFF, 0xF.FFFFFFFFFFFFFFFp+60);
+
+ test__floatuntitf(0x8000008000000000, 0x8.000008p+60);
+ test__floatuntitf(0x8000000000000800, 0x8.0000000000008p+60);
+ test__floatuntitf(0x8000010000000000, 0x8.00001p+60);
+ test__floatuntitf(0x8000000000001000, 0x8.000000000001p+60);
+
+ test__floatuntitf(0x8000000000000000, 0x8p+60);
+ test__floatuntitf(0x8000000000000001, 0x8.000000000000001p+60);
+
+ test__floatuntitf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floatuntitf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ test__floatuntitf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ test__floatuntitf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ test__floatuntitf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ test__floatuntitf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+
+ test__floatuntitf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ test__floatuntitf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ test__floatuntitf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ test__floatuntitf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ test__floatuntitf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ test__floatuntitf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ test__floatuntitf(0x023479FD0E092DA1, 0x1.1A3CFE870496D08p+57);
+ test__floatuntitf(0x023479FD0E092DB0, 0x1.1A3CFE870496D8p+57);
+ test__floatuntitf(0x023479FD0E092DB8, 0x1.1A3CFE870496DCp+57);
+ test__floatuntitf(0x023479FD0E092DB6, 0x1.1A3CFE870496DBp+57);
+ test__floatuntitf(0x023479FD0E092DBF, 0x1.1A3CFE870496DF8p+57);
+ test__floatuntitf(0x023479FD0E092DC1, 0x1.1A3CFE870496E08p+57);
+ test__floatuntitf(0x023479FD0E092DC7, 0x1.1A3CFE870496E38p+57);
+ test__floatuntitf(0x023479FD0E092DC8, 0x1.1A3CFE870496E4p+57);
+ test__floatuntitf(0x023479FD0E092DCF, 0x1.1A3CFE870496E78p+57);
+ test__floatuntitf(0x023479FD0E092DD0, 0x1.1A3CFE870496E8p+57);
+ test__floatuntitf(0x023479FD0E092DD1, 0x1.1A3CFE870496E88p+57);
+ test__floatuntitf(0x023479FD0E092DD8, 0x1.1A3CFE870496ECp+57);
+ test__floatuntitf(0x023479FD0E092DDF, 0x1.1A3CFE870496EF8p+57);
+ test__floatuntitf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+
+ test__floatuntitf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496D08p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496D8p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496DCp+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496DBp+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496DF8p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496E08p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496E38p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496E4p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496E78p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496E8p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496E88p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496ECp+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496EF8p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
+
+ test__floatuntitf(make_ti(0, 0xFFFFFFFFFFFFFFFF), 0x1.FFFFFFFFFFFFFFFEp+63);
+
+ test__floatuntitf(make_ti(0xFFFFFFFFFFFFFFFF, 0x0000000000000000), 0x1.FFFFFFFFFFFFFFFEp+127);
+ test__floatuntitf(make_ti(0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF), 0x1.0000000000000000p+128);
+
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC2801), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3000), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC37FF), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3800), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4000), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC47FF), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4800), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4801), 0x1.23456789ABCDEF0123456789ABC5p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC57FF), 0x1.23456789ABCDEF0123456789ABC5p+124);
+}
+
+fn make_ti(high: u64, low: u64) u128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return result;
+}
diff --git a/std/special/compiler_rt/index.zig b/std/special/compiler_rt/index.zig
index 81fe1ffec1..54a461d0f1 100644
--- a/std/special/compiler_rt/index.zig
+++ b/std/special/compiler_rt/index.zig
@@ -15,10 +15,31 @@ comptime {
@export("__lttf2", @import("comparetf2.zig").__letf2, linkage);
@export("__netf2", @import("comparetf2.zig").__letf2, linkage);
@export("__gttf2", @import("comparetf2.zig").__getf2, linkage);
+ @export("__gnu_h2f_ieee", @import("extendXfYf2.zig").__extendhfsf2, linkage);
+ @export("__gnu_f2h_ieee", @import("truncXfYf2.zig").__truncsfhf2, linkage);
}
@export("__unordtf2", @import("comparetf2.zig").__unordtf2, linkage);
+ @export("__floattitf", @import("floattitf.zig").__floattitf, linkage);
+ @export("__floattidf", @import("floattidf.zig").__floattidf, linkage);
+ @export("__floattisf", @import("floattisf.zig").__floattisf, linkage);
+
+ @export("__floatunditf", @import("floatunditf.zig").__floatunditf, linkage);
+ @export("__floatunsitf", @import("floatunsitf.zig").__floatunsitf, linkage);
+
+ @export("__floatuntitf", @import("floatuntitf.zig").__floatuntitf, linkage);
+ @export("__floatuntidf", @import("floatuntidf.zig").__floatuntidf, linkage);
+ @export("__floatuntisf", @import("floatuntisf.zig").__floatuntisf, linkage);
+
+ @export("__extenddftf2", @import("extendXfYf2.zig").__extenddftf2, linkage);
+ @export("__extendsftf2", @import("extendXfYf2.zig").__extendsftf2, linkage);
+ @export("__extendhfsf2", @import("extendXfYf2.zig").__extendhfsf2, linkage);
+
+ @export("__truncsfhf2", @import("truncXfYf2.zig").__truncsfhf2, linkage);
+ @export("__trunctfdf2", @import("truncXfYf2.zig").__trunctfdf2, linkage);
+ @export("__trunctfsf2", @import("truncXfYf2.zig").__trunctfsf2, linkage);
+
@export("__fixunssfsi", @import("fixunssfsi.zig").__fixunssfsi, linkage);
@export("__fixunssfdi", @import("fixunssfdi.zig").__fixunssfdi, linkage);
@export("__fixunssfti", @import("fixunssfti.zig").__fixunssfti, linkage);
@@ -32,10 +53,6 @@ comptime {
@export("__fixunstfti", @import("fixunstfti.zig").__fixunstfti, linkage);
@export("__udivmoddi4", @import("udivmoddi4.zig").__udivmoddi4, linkage);
- @export("__udivmodti4", @import("udivmodti4.zig").__udivmodti4, linkage);
-
- @export("__udivti3", @import("udivti3.zig").__udivti3, linkage);
- @export("__umodti3", @import("umodti3.zig").__umodti3, linkage);
@export("__udivsi3", __udivsi3, linkage);
@export("__udivdi3", __udivdi3, linkage);
@@ -62,27 +79,50 @@ comptime {
@export("__chkstk", __chkstk, strong_linkage);
@export("___chkstk_ms", ___chkstk_ms, linkage);
}
+ @export("__divti3", @import("divti3.zig").__divti3_windows_x86_64, linkage);
+ @export("__muloti4", @import("muloti4.zig").__muloti4_windows_x86_64, linkage);
+ @export("__udivti3", @import("udivti3.zig").__udivti3_windows_x86_64, linkage);
+ @export("__udivmodti4", @import("udivmodti4.zig").__udivmodti4_windows_x86_64, linkage);
+ @export("__umodti3", @import("umodti3.zig").__umodti3_windows_x86_64, linkage);
},
else => {},
}
+ } else {
+ @export("__divti3", @import("divti3.zig").__divti3, linkage);
+ @export("__muloti4", @import("muloti4.zig").__muloti4, linkage);
+ @export("__udivti3", @import("udivti3.zig").__udivti3, linkage);
+ @export("__udivmodti4", @import("udivmodti4.zig").__udivmodti4, linkage);
+ @export("__umodti3", @import("umodti3.zig").__umodti3, linkage);
}
}
-const assert = @import("../../index.zig").debug.assert;
+const std = @import("std");
+const assert = std.debug.assert;
const __udivmoddi4 = @import("udivmoddi4.zig").__udivmoddi4;
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
-pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn {
+pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
@setCold(true);
if (is_test) {
- @import("std").debug.panic("{}", msg);
+ std.debug.panic("{}", msg);
} else {
unreachable;
}
}
+pub fn setXmm0(comptime T: type, value: T) void {
+ comptime assert(builtin.arch == builtin.Arch.x86_64);
+ const aligned_value: T align(16) = value;
+ asm volatile (
+ \\movaps (%[ptr]), %%xmm0
+ :
+ : [ptr] "r" (&aligned_value)
+ : "xmm0"
+ );
+}
+
extern fn __udivdi3(a: u64, b: u64) u64 {
@setRuntimeSafety(is_test);
return __udivmoddi4(a, b, null);
@@ -144,7 +184,8 @@ fn isArmArch() bool {
builtin.Arch.armebv6t2,
builtin.Arch.armebv5,
builtin.Arch.armebv5te,
- builtin.Arch.armebv4t => true,
+ builtin.Arch.armebv4t,
+ => true,
else => false,
};
}
@@ -159,7 +200,10 @@ nakedcc fn __aeabi_uidivmod() void {
\\ ldr r1, [sp]
\\ add sp, sp, #4
\\ pop { pc }
- ::: "r2", "r1");
+ :
+ :
+ : "r2", "r1"
+ );
}
// _chkstk (_alloca) routine - probe stack between %esp and (%esp-%eax) in 4k increments,
@@ -265,39 +309,40 @@ nakedcc fn ___chkstk_ms() align(4) void {
);
}
-extern fn __udivmodsi4(a: u32, b: u32, rem: &u32) u32 {
+extern fn __udivmodsi4(a: u32, b: u32, rem: *u32) u32 {
@setRuntimeSafety(is_test);
const d = __udivsi3(a, b);
- *rem = u32(i32(a) -% (i32(d) * i32(b)));
+ rem.* = @bitCast(u32, @bitCast(i32, a) -% (@bitCast(i32, d) * @bitCast(i32, b)));
return d;
}
-
extern fn __udivsi3(n: u32, d: u32) u32 {
@setRuntimeSafety(is_test);
const n_uword_bits: c_uint = u32.bit_count;
// special cases
- if (d == 0)
- return 0; // ?!
- if (n == 0)
- return 0;
+ if (d == 0) return 0; // ?!
+ if (n == 0) return 0;
var sr = @bitCast(c_uint, c_int(@clz(d)) - c_int(@clz(n)));
// 0 <= sr <= n_uword_bits - 1 or sr large
- if (sr > n_uword_bits - 1) // d > r
+ if (sr > n_uword_bits - 1) {
+ // d > r
return 0;
- if (sr == n_uword_bits - 1) // d == 1
+ }
+ if (sr == n_uword_bits - 1) {
+ // d == 1
return n;
+ }
sr += 1;
// 1 <= sr <= n_uword_bits - 1
// Not a special case
- var q: u32 = n << u5(n_uword_bits - sr);
- var r: u32 = n >> u5(sr);
+ var q: u32 = n << @intCast(u5, n_uword_bits - sr);
+ var r: u32 = n >> @intCast(u5, sr);
var carry: u32 = 0;
while (sr > 0) : (sr -= 1) {
// r:q = ((r:q) << 1) | carry
- r = (r << 1) | (q >> u5(n_uword_bits - 1));
+ r = (r << 1) | (q >> @intCast(u5, n_uword_bits - 1));
q = (q << 1) | carry;
// carry = 0;
// if (r.all >= d.all)
@@ -305,8 +350,8 @@ extern fn __udivsi3(n: u32, d: u32) u32 {
// r.all -= d.all;
// carry = 1;
// }
- const s = i32(d -% r -% 1) >> u5(n_uword_bits - 1);
- carry = u32(s & 1);
+ const s = @intCast(i32, d -% r -% 1) >> @intCast(u5, n_uword_bits - 1);
+ carry = @intCast(u32, s & 1);
r -= d & @bitCast(u32, s);
}
q = (q << 1) | carry;
@@ -327,139 +372,667 @@ fn test_one_umoddi3(a: u64, b: u64, expected_r: u64) void {
}
test "test_udivsi3" {
- const cases = [][3]u32 {
- []u32{0x00000000, 0x00000001, 0x00000000},
- []u32{0x00000000, 0x00000002, 0x00000000},
- []u32{0x00000000, 0x00000003, 0x00000000},
- []u32{0x00000000, 0x00000010, 0x00000000},
- []u32{0x00000000, 0x078644FA, 0x00000000},
- []u32{0x00000000, 0x0747AE14, 0x00000000},
- []u32{0x00000000, 0x7FFFFFFF, 0x00000000},
- []u32{0x00000000, 0x80000000, 0x00000000},
- []u32{0x00000000, 0xFFFFFFFD, 0x00000000},
- []u32{0x00000000, 0xFFFFFFFE, 0x00000000},
- []u32{0x00000000, 0xFFFFFFFF, 0x00000000},
- []u32{0x00000001, 0x00000001, 0x00000001},
- []u32{0x00000001, 0x00000002, 0x00000000},
- []u32{0x00000001, 0x00000003, 0x00000000},
- []u32{0x00000001, 0x00000010, 0x00000000},
- []u32{0x00000001, 0x078644FA, 0x00000000},
- []u32{0x00000001, 0x0747AE14, 0x00000000},
- []u32{0x00000001, 0x7FFFFFFF, 0x00000000},
- []u32{0x00000001, 0x80000000, 0x00000000},
- []u32{0x00000001, 0xFFFFFFFD, 0x00000000},
- []u32{0x00000001, 0xFFFFFFFE, 0x00000000},
- []u32{0x00000001, 0xFFFFFFFF, 0x00000000},
- []u32{0x00000002, 0x00000001, 0x00000002},
- []u32{0x00000002, 0x00000002, 0x00000001},
- []u32{0x00000002, 0x00000003, 0x00000000},
- []u32{0x00000002, 0x00000010, 0x00000000},
- []u32{0x00000002, 0x078644FA, 0x00000000},
- []u32{0x00000002, 0x0747AE14, 0x00000000},
- []u32{0x00000002, 0x7FFFFFFF, 0x00000000},
- []u32{0x00000002, 0x80000000, 0x00000000},
- []u32{0x00000002, 0xFFFFFFFD, 0x00000000},
- []u32{0x00000002, 0xFFFFFFFE, 0x00000000},
- []u32{0x00000002, 0xFFFFFFFF, 0x00000000},
- []u32{0x00000003, 0x00000001, 0x00000003},
- []u32{0x00000003, 0x00000002, 0x00000001},
- []u32{0x00000003, 0x00000003, 0x00000001},
- []u32{0x00000003, 0x00000010, 0x00000000},
- []u32{0x00000003, 0x078644FA, 0x00000000},
- []u32{0x00000003, 0x0747AE14, 0x00000000},
- []u32{0x00000003, 0x7FFFFFFF, 0x00000000},
- []u32{0x00000003, 0x80000000, 0x00000000},
- []u32{0x00000003, 0xFFFFFFFD, 0x00000000},
- []u32{0x00000003, 0xFFFFFFFE, 0x00000000},
- []u32{0x00000003, 0xFFFFFFFF, 0x00000000},
- []u32{0x00000010, 0x00000001, 0x00000010},
- []u32{0x00000010, 0x00000002, 0x00000008},
- []u32{0x00000010, 0x00000003, 0x00000005},
- []u32{0x00000010, 0x00000010, 0x00000001},
- []u32{0x00000010, 0x078644FA, 0x00000000},
- []u32{0x00000010, 0x0747AE14, 0x00000000},
- []u32{0x00000010, 0x7FFFFFFF, 0x00000000},
- []u32{0x00000010, 0x80000000, 0x00000000},
- []u32{0x00000010, 0xFFFFFFFD, 0x00000000},
- []u32{0x00000010, 0xFFFFFFFE, 0x00000000},
- []u32{0x00000010, 0xFFFFFFFF, 0x00000000},
- []u32{0x078644FA, 0x00000001, 0x078644FA},
- []u32{0x078644FA, 0x00000002, 0x03C3227D},
- []u32{0x078644FA, 0x00000003, 0x028216FE},
- []u32{0x078644FA, 0x00000010, 0x0078644F},
- []u32{0x078644FA, 0x078644FA, 0x00000001},
- []u32{0x078644FA, 0x0747AE14, 0x00000001},
- []u32{0x078644FA, 0x7FFFFFFF, 0x00000000},
- []u32{0x078644FA, 0x80000000, 0x00000000},
- []u32{0x078644FA, 0xFFFFFFFD, 0x00000000},
- []u32{0x078644FA, 0xFFFFFFFE, 0x00000000},
- []u32{0x078644FA, 0xFFFFFFFF, 0x00000000},
- []u32{0x0747AE14, 0x00000001, 0x0747AE14},
- []u32{0x0747AE14, 0x00000002, 0x03A3D70A},
- []u32{0x0747AE14, 0x00000003, 0x026D3A06},
- []u32{0x0747AE14, 0x00000010, 0x00747AE1},
- []u32{0x0747AE14, 0x078644FA, 0x00000000},
- []u32{0x0747AE14, 0x0747AE14, 0x00000001},
- []u32{0x0747AE14, 0x7FFFFFFF, 0x00000000},
- []u32{0x0747AE14, 0x80000000, 0x00000000},
- []u32{0x0747AE14, 0xFFFFFFFD, 0x00000000},
- []u32{0x0747AE14, 0xFFFFFFFE, 0x00000000},
- []u32{0x0747AE14, 0xFFFFFFFF, 0x00000000},
- []u32{0x7FFFFFFF, 0x00000001, 0x7FFFFFFF},
- []u32{0x7FFFFFFF, 0x00000002, 0x3FFFFFFF},
- []u32{0x7FFFFFFF, 0x00000003, 0x2AAAAAAA},
- []u32{0x7FFFFFFF, 0x00000010, 0x07FFFFFF},
- []u32{0x7FFFFFFF, 0x078644FA, 0x00000011},
- []u32{0x7FFFFFFF, 0x0747AE14, 0x00000011},
- []u32{0x7FFFFFFF, 0x7FFFFFFF, 0x00000001},
- []u32{0x7FFFFFFF, 0x80000000, 0x00000000},
- []u32{0x7FFFFFFF, 0xFFFFFFFD, 0x00000000},
- []u32{0x7FFFFFFF, 0xFFFFFFFE, 0x00000000},
- []u32{0x7FFFFFFF, 0xFFFFFFFF, 0x00000000},
- []u32{0x80000000, 0x00000001, 0x80000000},
- []u32{0x80000000, 0x00000002, 0x40000000},
- []u32{0x80000000, 0x00000003, 0x2AAAAAAA},
- []u32{0x80000000, 0x00000010, 0x08000000},
- []u32{0x80000000, 0x078644FA, 0x00000011},
- []u32{0x80000000, 0x0747AE14, 0x00000011},
- []u32{0x80000000, 0x7FFFFFFF, 0x00000001},
- []u32{0x80000000, 0x80000000, 0x00000001},
- []u32{0x80000000, 0xFFFFFFFD, 0x00000000},
- []u32{0x80000000, 0xFFFFFFFE, 0x00000000},
- []u32{0x80000000, 0xFFFFFFFF, 0x00000000},
- []u32{0xFFFFFFFD, 0x00000001, 0xFFFFFFFD},
- []u32{0xFFFFFFFD, 0x00000002, 0x7FFFFFFE},
- []u32{0xFFFFFFFD, 0x00000003, 0x55555554},
- []u32{0xFFFFFFFD, 0x00000010, 0x0FFFFFFF},
- []u32{0xFFFFFFFD, 0x078644FA, 0x00000022},
- []u32{0xFFFFFFFD, 0x0747AE14, 0x00000023},
- []u32{0xFFFFFFFD, 0x7FFFFFFF, 0x00000001},
- []u32{0xFFFFFFFD, 0x80000000, 0x00000001},
- []u32{0xFFFFFFFD, 0xFFFFFFFD, 0x00000001},
- []u32{0xFFFFFFFD, 0xFFFFFFFE, 0x00000000},
- []u32{0xFFFFFFFD, 0xFFFFFFFF, 0x00000000},
- []u32{0xFFFFFFFE, 0x00000001, 0xFFFFFFFE},
- []u32{0xFFFFFFFE, 0x00000002, 0x7FFFFFFF},
- []u32{0xFFFFFFFE, 0x00000003, 0x55555554},
- []u32{0xFFFFFFFE, 0x00000010, 0x0FFFFFFF},
- []u32{0xFFFFFFFE, 0x078644FA, 0x00000022},
- []u32{0xFFFFFFFE, 0x0747AE14, 0x00000023},
- []u32{0xFFFFFFFE, 0x7FFFFFFF, 0x00000002},
- []u32{0xFFFFFFFE, 0x80000000, 0x00000001},
- []u32{0xFFFFFFFE, 0xFFFFFFFD, 0x00000001},
- []u32{0xFFFFFFFE, 0xFFFFFFFE, 0x00000001},
- []u32{0xFFFFFFFE, 0xFFFFFFFF, 0x00000000},
- []u32{0xFFFFFFFF, 0x00000001, 0xFFFFFFFF},
- []u32{0xFFFFFFFF, 0x00000002, 0x7FFFFFFF},
- []u32{0xFFFFFFFF, 0x00000003, 0x55555555},
- []u32{0xFFFFFFFF, 0x00000010, 0x0FFFFFFF},
- []u32{0xFFFFFFFF, 0x078644FA, 0x00000022},
- []u32{0xFFFFFFFF, 0x0747AE14, 0x00000023},
- []u32{0xFFFFFFFF, 0x7FFFFFFF, 0x00000002},
- []u32{0xFFFFFFFF, 0x80000000, 0x00000001},
- []u32{0xFFFFFFFF, 0xFFFFFFFD, 0x00000001},
- []u32{0xFFFFFFFF, 0xFFFFFFFE, 0x00000001},
- []u32{0xFFFFFFFF, 0xFFFFFFFF, 0x00000001},
+ const cases = [][3]u32{
+ []u32{
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000000,
+ 0x00000002,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000000,
+ 0x00000003,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000000,
+ 0x00000010,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000000,
+ 0x078644FA,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000000,
+ 0x0747AE14,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000000,
+ 0x7FFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000000,
+ 0x80000000,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000000,
+ 0xFFFFFFFD,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000000,
+ 0xFFFFFFFE,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000000,
+ 0xFFFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000001,
+ 0x00000001,
+ 0x00000001,
+ },
+ []u32{
+ 0x00000001,
+ 0x00000002,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000001,
+ 0x00000003,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000001,
+ 0x00000010,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000001,
+ 0x078644FA,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000001,
+ 0x0747AE14,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000001,
+ 0x7FFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000001,
+ 0x80000000,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000001,
+ 0xFFFFFFFD,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000001,
+ 0xFFFFFFFE,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000001,
+ 0xFFFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000002,
+ 0x00000001,
+ 0x00000002,
+ },
+ []u32{
+ 0x00000002,
+ 0x00000002,
+ 0x00000001,
+ },
+ []u32{
+ 0x00000002,
+ 0x00000003,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000002,
+ 0x00000010,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000002,
+ 0x078644FA,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000002,
+ 0x0747AE14,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000002,
+ 0x7FFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000002,
+ 0x80000000,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000002,
+ 0xFFFFFFFD,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000002,
+ 0xFFFFFFFE,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000002,
+ 0xFFFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000003,
+ 0x00000001,
+ 0x00000003,
+ },
+ []u32{
+ 0x00000003,
+ 0x00000002,
+ 0x00000001,
+ },
+ []u32{
+ 0x00000003,
+ 0x00000003,
+ 0x00000001,
+ },
+ []u32{
+ 0x00000003,
+ 0x00000010,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000003,
+ 0x078644FA,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000003,
+ 0x0747AE14,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000003,
+ 0x7FFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000003,
+ 0x80000000,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000003,
+ 0xFFFFFFFD,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000003,
+ 0xFFFFFFFE,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000003,
+ 0xFFFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000010,
+ 0x00000001,
+ 0x00000010,
+ },
+ []u32{
+ 0x00000010,
+ 0x00000002,
+ 0x00000008,
+ },
+ []u32{
+ 0x00000010,
+ 0x00000003,
+ 0x00000005,
+ },
+ []u32{
+ 0x00000010,
+ 0x00000010,
+ 0x00000001,
+ },
+ []u32{
+ 0x00000010,
+ 0x078644FA,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000010,
+ 0x0747AE14,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000010,
+ 0x7FFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000010,
+ 0x80000000,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000010,
+ 0xFFFFFFFD,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000010,
+ 0xFFFFFFFE,
+ 0x00000000,
+ },
+ []u32{
+ 0x00000010,
+ 0xFFFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x078644FA,
+ 0x00000001,
+ 0x078644FA,
+ },
+ []u32{
+ 0x078644FA,
+ 0x00000002,
+ 0x03C3227D,
+ },
+ []u32{
+ 0x078644FA,
+ 0x00000003,
+ 0x028216FE,
+ },
+ []u32{
+ 0x078644FA,
+ 0x00000010,
+ 0x0078644F,
+ },
+ []u32{
+ 0x078644FA,
+ 0x078644FA,
+ 0x00000001,
+ },
+ []u32{
+ 0x078644FA,
+ 0x0747AE14,
+ 0x00000001,
+ },
+ []u32{
+ 0x078644FA,
+ 0x7FFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x078644FA,
+ 0x80000000,
+ 0x00000000,
+ },
+ []u32{
+ 0x078644FA,
+ 0xFFFFFFFD,
+ 0x00000000,
+ },
+ []u32{
+ 0x078644FA,
+ 0xFFFFFFFE,
+ 0x00000000,
+ },
+ []u32{
+ 0x078644FA,
+ 0xFFFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x0747AE14,
+ 0x00000001,
+ 0x0747AE14,
+ },
+ []u32{
+ 0x0747AE14,
+ 0x00000002,
+ 0x03A3D70A,
+ },
+ []u32{
+ 0x0747AE14,
+ 0x00000003,
+ 0x026D3A06,
+ },
+ []u32{
+ 0x0747AE14,
+ 0x00000010,
+ 0x00747AE1,
+ },
+ []u32{
+ 0x0747AE14,
+ 0x078644FA,
+ 0x00000000,
+ },
+ []u32{
+ 0x0747AE14,
+ 0x0747AE14,
+ 0x00000001,
+ },
+ []u32{
+ 0x0747AE14,
+ 0x7FFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x0747AE14,
+ 0x80000000,
+ 0x00000000,
+ },
+ []u32{
+ 0x0747AE14,
+ 0xFFFFFFFD,
+ 0x00000000,
+ },
+ []u32{
+ 0x0747AE14,
+ 0xFFFFFFFE,
+ 0x00000000,
+ },
+ []u32{
+ 0x0747AE14,
+ 0xFFFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x7FFFFFFF,
+ 0x00000001,
+ 0x7FFFFFFF,
+ },
+ []u32{
+ 0x7FFFFFFF,
+ 0x00000002,
+ 0x3FFFFFFF,
+ },
+ []u32{
+ 0x7FFFFFFF,
+ 0x00000003,
+ 0x2AAAAAAA,
+ },
+ []u32{
+ 0x7FFFFFFF,
+ 0x00000010,
+ 0x07FFFFFF,
+ },
+ []u32{
+ 0x7FFFFFFF,
+ 0x078644FA,
+ 0x00000011,
+ },
+ []u32{
+ 0x7FFFFFFF,
+ 0x0747AE14,
+ 0x00000011,
+ },
+ []u32{
+ 0x7FFFFFFF,
+ 0x7FFFFFFF,
+ 0x00000001,
+ },
+ []u32{
+ 0x7FFFFFFF,
+ 0x80000000,
+ 0x00000000,
+ },
+ []u32{
+ 0x7FFFFFFF,
+ 0xFFFFFFFD,
+ 0x00000000,
+ },
+ []u32{
+ 0x7FFFFFFF,
+ 0xFFFFFFFE,
+ 0x00000000,
+ },
+ []u32{
+ 0x7FFFFFFF,
+ 0xFFFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0x80000000,
+ 0x00000001,
+ 0x80000000,
+ },
+ []u32{
+ 0x80000000,
+ 0x00000002,
+ 0x40000000,
+ },
+ []u32{
+ 0x80000000,
+ 0x00000003,
+ 0x2AAAAAAA,
+ },
+ []u32{
+ 0x80000000,
+ 0x00000010,
+ 0x08000000,
+ },
+ []u32{
+ 0x80000000,
+ 0x078644FA,
+ 0x00000011,
+ },
+ []u32{
+ 0x80000000,
+ 0x0747AE14,
+ 0x00000011,
+ },
+ []u32{
+ 0x80000000,
+ 0x7FFFFFFF,
+ 0x00000001,
+ },
+ []u32{
+ 0x80000000,
+ 0x80000000,
+ 0x00000001,
+ },
+ []u32{
+ 0x80000000,
+ 0xFFFFFFFD,
+ 0x00000000,
+ },
+ []u32{
+ 0x80000000,
+ 0xFFFFFFFE,
+ 0x00000000,
+ },
+ []u32{
+ 0x80000000,
+ 0xFFFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0xFFFFFFFD,
+ 0x00000001,
+ 0xFFFFFFFD,
+ },
+ []u32{
+ 0xFFFFFFFD,
+ 0x00000002,
+ 0x7FFFFFFE,
+ },
+ []u32{
+ 0xFFFFFFFD,
+ 0x00000003,
+ 0x55555554,
+ },
+ []u32{
+ 0xFFFFFFFD,
+ 0x00000010,
+ 0x0FFFFFFF,
+ },
+ []u32{
+ 0xFFFFFFFD,
+ 0x078644FA,
+ 0x00000022,
+ },
+ []u32{
+ 0xFFFFFFFD,
+ 0x0747AE14,
+ 0x00000023,
+ },
+ []u32{
+ 0xFFFFFFFD,
+ 0x7FFFFFFF,
+ 0x00000001,
+ },
+ []u32{
+ 0xFFFFFFFD,
+ 0x80000000,
+ 0x00000001,
+ },
+ []u32{
+ 0xFFFFFFFD,
+ 0xFFFFFFFD,
+ 0x00000001,
+ },
+ []u32{
+ 0xFFFFFFFD,
+ 0xFFFFFFFE,
+ 0x00000000,
+ },
+ []u32{
+ 0xFFFFFFFD,
+ 0xFFFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0xFFFFFFFE,
+ 0x00000001,
+ 0xFFFFFFFE,
+ },
+ []u32{
+ 0xFFFFFFFE,
+ 0x00000002,
+ 0x7FFFFFFF,
+ },
+ []u32{
+ 0xFFFFFFFE,
+ 0x00000003,
+ 0x55555554,
+ },
+ []u32{
+ 0xFFFFFFFE,
+ 0x00000010,
+ 0x0FFFFFFF,
+ },
+ []u32{
+ 0xFFFFFFFE,
+ 0x078644FA,
+ 0x00000022,
+ },
+ []u32{
+ 0xFFFFFFFE,
+ 0x0747AE14,
+ 0x00000023,
+ },
+ []u32{
+ 0xFFFFFFFE,
+ 0x7FFFFFFF,
+ 0x00000002,
+ },
+ []u32{
+ 0xFFFFFFFE,
+ 0x80000000,
+ 0x00000001,
+ },
+ []u32{
+ 0xFFFFFFFE,
+ 0xFFFFFFFD,
+ 0x00000001,
+ },
+ []u32{
+ 0xFFFFFFFE,
+ 0xFFFFFFFE,
+ 0x00000001,
+ },
+ []u32{
+ 0xFFFFFFFE,
+ 0xFFFFFFFF,
+ 0x00000000,
+ },
+ []u32{
+ 0xFFFFFFFF,
+ 0x00000001,
+ 0xFFFFFFFF,
+ },
+ []u32{
+ 0xFFFFFFFF,
+ 0x00000002,
+ 0x7FFFFFFF,
+ },
+ []u32{
+ 0xFFFFFFFF,
+ 0x00000003,
+ 0x55555555,
+ },
+ []u32{
+ 0xFFFFFFFF,
+ 0x00000010,
+ 0x0FFFFFFF,
+ },
+ []u32{
+ 0xFFFFFFFF,
+ 0x078644FA,
+ 0x00000022,
+ },
+ []u32{
+ 0xFFFFFFFF,
+ 0x0747AE14,
+ 0x00000023,
+ },
+ []u32{
+ 0xFFFFFFFF,
+ 0x7FFFFFFF,
+ 0x00000002,
+ },
+ []u32{
+ 0xFFFFFFFF,
+ 0x80000000,
+ 0x00000001,
+ },
+ []u32{
+ 0xFFFFFFFF,
+ 0xFFFFFFFD,
+ 0x00000001,
+ },
+ []u32{
+ 0xFFFFFFFF,
+ 0xFFFFFFFE,
+ 0x00000001,
+ },
+ []u32{
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x00000001,
+ },
};
for (cases) |case| {
diff --git a/std/special/compiler_rt/muloti4.zig b/std/special/compiler_rt/muloti4.zig
new file mode 100644
index 0000000000..866077c80c
--- /dev/null
+++ b/std/special/compiler_rt/muloti4.zig
@@ -0,0 +1,55 @@
+const udivmod = @import("udivmod.zig").udivmod;
+const builtin = @import("builtin");
+const compiler_rt = @import("index.zig");
+
+pub extern fn __muloti4(a: i128, b: i128, overflow: *c_int) i128 {
+ @setRuntimeSafety(builtin.is_test);
+
+ const min = @bitCast(i128, u128(1 << (i128.bit_count - 1)));
+ const max = ~min;
+ overflow.* = 0;
+
+ const r = a *% b;
+ if (a == min) {
+ if (b != 0 and b != 1) {
+ overflow.* = 1;
+ }
+ return r;
+ }
+ if (b == min) {
+ if (a != 0 and a != 1) {
+ overflow.* = 1;
+ }
+ return r;
+ }
+
+ const sa = a >> (i128.bit_count - 1);
+ const abs_a = (a ^ sa) -% sa;
+ const sb = b >> (i128.bit_count - 1);
+ const abs_b = (b ^ sb) -% sb;
+
+ if (abs_a < 2 or abs_b < 2) {
+ return r;
+ }
+
+ if (sa == sb) {
+ if (abs_a > @divFloor(max, abs_b)) {
+ overflow.* = 1;
+ }
+ } else {
+ if (abs_a > @divFloor(min, -abs_b)) {
+ overflow.* = 1;
+ }
+ }
+
+ return r;
+}
+
+pub extern fn __muloti4_windows_x86_64(a: *const i128, b: *const i128, overflow: *c_int) void {
+ @setRuntimeSafety(builtin.is_test);
+ compiler_rt.setXmm0(i128, __muloti4(a.*, b.*, overflow));
+}
+
+test "import muloti4" {
+ _ = @import("muloti4_test.zig");
+}
diff --git a/std/special/compiler_rt/muloti4_test.zig b/std/special/compiler_rt/muloti4_test.zig
new file mode 100644
index 0000000000..6b3671323f
--- /dev/null
+++ b/std/special/compiler_rt/muloti4_test.zig
@@ -0,0 +1,76 @@
+const __muloti4 = @import("muloti4.zig").__muloti4;
+const assert = @import("std").debug.assert;
+
+fn test__muloti4(a: i128, b: i128, expected: i128, expected_overflow: c_int) void {
+ var overflow: c_int = undefined;
+ const x = __muloti4(a, b, &overflow);
+ assert(overflow == expected_overflow and (expected_overflow != 0 or x == expected));
+}
+
+test "muloti4" {
+ test__muloti4(0, 0, 0, 0);
+ test__muloti4(0, 1, 0, 0);
+ test__muloti4(1, 0, 0, 0);
+ test__muloti4(0, 10, 0, 0);
+ test__muloti4(10, 0, 0, 0);
+
+ test__muloti4(0, 81985529216486895, 0, 0);
+ test__muloti4(81985529216486895, 0, 0, 0);
+
+ test__muloti4(0, -1, 0, 0);
+ test__muloti4(-1, 0, 0, 0);
+ test__muloti4(0, -10, 0, 0);
+ test__muloti4(-10, 0, 0, 0);
+ test__muloti4(0, -81985529216486895, 0, 0);
+ test__muloti4(-81985529216486895, 0, 0, 0);
+
+ test__muloti4(3037000499, 3037000499, 9223372030926249001, 0);
+ test__muloti4(-3037000499, 3037000499, -9223372030926249001, 0);
+ test__muloti4(3037000499, -3037000499, -9223372030926249001, 0);
+ test__muloti4(-3037000499, -3037000499, 9223372030926249001, 0);
+
+ test__muloti4(4398046511103, 2097152, 9223372036852678656, 0);
+ test__muloti4(-4398046511103, 2097152, -9223372036852678656, 0);
+ test__muloti4(4398046511103, -2097152, -9223372036852678656, 0);
+ test__muloti4(-4398046511103, -2097152, 9223372036852678656, 0);
+
+ test__muloti4(2097152, 4398046511103, 9223372036852678656, 0);
+ test__muloti4(-2097152, 4398046511103, -9223372036852678656, 0);
+ test__muloti4(2097152, -4398046511103, -9223372036852678656, 0);
+ test__muloti4(-2097152, -4398046511103, 9223372036852678656, 0);
+
+ test__muloti4(@bitCast(i128, u128(0x00000000000000B504F333F9DE5BE000)), @bitCast(i128, u128(0x000000000000000000B504F333F9DE5B)), @bitCast(i128, u128(0x7FFFFFFFFFFFF328DF915DA296E8A000)), 0);
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), -2, @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+ test__muloti4(-2, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), -1, @bitCast(i128, u128(0x80000000000000000000000000000001)), 0);
+ test__muloti4(-1, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 0);
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0, 0, 0);
+ test__muloti4(0, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0, 0);
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 1, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
+ test__muloti4(1, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 2, @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+ test__muloti4(2, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), -2, @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(-2, @bitCast(i128, u128(0x80000000000000000000000000000000)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), -1, @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(-1, @bitCast(i128, u128(0x80000000000000000000000000000000)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), 0, 0, 0);
+ test__muloti4(0, @bitCast(i128, u128(0x80000000000000000000000000000000)), 0, 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), 1, @bitCast(i128, u128(0x80000000000000000000000000000000)), 0);
+ test__muloti4(1, @bitCast(i128, u128(0x80000000000000000000000000000000)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), 2, @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(2, @bitCast(i128, u128(0x80000000000000000000000000000000)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), -2, @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+ test__muloti4(-2, @bitCast(i128, u128(0x80000000000000000000000000000001)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), -1, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
+ test__muloti4(-1, @bitCast(i128, u128(0x80000000000000000000000000000001)), @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), 0, 0, 0);
+ test__muloti4(0, @bitCast(i128, u128(0x80000000000000000000000000000001)), 0, 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), 1, @bitCast(i128, u128(0x80000000000000000000000000000001)), 0);
+ test__muloti4(1, @bitCast(i128, u128(0x80000000000000000000000000000001)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), 2, @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(2, @bitCast(i128, u128(0x80000000000000000000000000000001)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+}
diff --git a/std/special/compiler_rt/truncXfYf2.zig b/std/special/compiler_rt/truncXfYf2.zig
new file mode 100644
index 0000000000..5cb2f61568
--- /dev/null
+++ b/std/special/compiler_rt/truncXfYf2.zig
@@ -0,0 +1,117 @@
+const std = @import("std");
+
+pub extern fn __truncsfhf2(a: f32) u16 {
+ return @bitCast(u16, truncXfYf2(f16, f32, a));
+}
+
+pub extern fn __trunctfsf2(a: f128) f32 {
+ return truncXfYf2(f32, f128, a);
+}
+
+pub extern fn __trunctfdf2(a: f128) f64 {
+ return truncXfYf2(f64, f128, a);
+}
+
+inline fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
+ const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits);
+ const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits);
+ const srcSigBits = std.math.floatMantissaBits(src_t);
+ const dstSigBits = std.math.floatMantissaBits(dst_t);
+ const SrcShift = std.math.Log2Int(src_rep_t);
+ const DstShift = std.math.Log2Int(dst_rep_t);
+
+ // Various constants whose values follow from the type parameters.
+ // Any reasonable optimizer will fold and propagate all of these.
+ const srcBits = src_t.bit_count;
+ const srcExpBits = srcBits - srcSigBits - 1;
+ const srcInfExp = (1 << srcExpBits) - 1;
+ const srcExpBias = srcInfExp >> 1;
+
+ const srcMinNormal = 1 << srcSigBits;
+ const srcSignificandMask = srcMinNormal - 1;
+ const srcInfinity = srcInfExp << srcSigBits;
+ const srcSignMask = 1 << (srcSigBits + srcExpBits);
+ const srcAbsMask = srcSignMask - 1;
+ const roundMask = (1 << (srcSigBits - dstSigBits)) - 1;
+ const halfway = 1 << (srcSigBits - dstSigBits - 1);
+ const srcQNaN = 1 << (srcSigBits - 1);
+ const srcNaNCode = srcQNaN - 1;
+
+ const dstBits = dst_t.bit_count;
+ const dstExpBits = dstBits - dstSigBits - 1;
+ const dstInfExp = (1 << dstExpBits) - 1;
+ const dstExpBias = dstInfExp >> 1;
+
+ const underflowExponent = srcExpBias + 1 - dstExpBias;
+ const overflowExponent = srcExpBias + dstInfExp - dstExpBias;
+ const underflow = underflowExponent << srcSigBits;
+ const overflow = overflowExponent << srcSigBits;
+
+ const dstQNaN = 1 << (dstSigBits - 1);
+ const dstNaNCode = dstQNaN - 1;
+
+ // Break a into a sign and representation of the absolute value
+ const aRep: src_rep_t = @bitCast(src_rep_t, a);
+ const aAbs: src_rep_t = aRep & srcAbsMask;
+ const sign: src_rep_t = aRep & srcSignMask;
+ var absResult: dst_rep_t = undefined;
+
+ if (aAbs -% underflow < aAbs -% overflow) {
+ // The exponent of a is within the range of normal numbers in the
+ // destination format. We can convert by simply right-shifting with
+ // rounding and adjusting the exponent.
+ absResult = @truncate(dst_rep_t, aAbs >> (srcSigBits - dstSigBits));
+ absResult -%= dst_rep_t(srcExpBias - dstExpBias) << dstSigBits;
+
+ const roundBits: src_rep_t = aAbs & roundMask;
+ if (roundBits > halfway) {
+ // Round to nearest
+ absResult += 1;
+ } else if (roundBits == halfway) {
+ // Ties to even
+ absResult += absResult & 1;
+ }
+ } else if (aAbs > srcInfinity) {
+ // a is NaN.
+ // Conjure the result by beginning with infinity, setting the qNaN
+ // bit and inserting the (truncated) trailing NaN field.
+ absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits;
+ absResult |= dstQNaN;
+ absResult |= @intCast(dst_rep_t, ((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode);
+ } else if (aAbs >= overflow) {
+ // a overflows to infinity.
+ absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits;
+ } else {
+ // a underflows on conversion to the destination type or is an exact
+ // zero. The result may be a denormal or zero. Extract the exponent
+ // to get the shift amount for the denormalization.
+ const aExp = @intCast(u32, aAbs >> srcSigBits);
+ const shift = @intCast(u32, srcExpBias - dstExpBias - aExp + 1);
+
+ const significand: src_rep_t = (aRep & srcSignificandMask) | srcMinNormal;
+
+ // Right shift by the denormalization amount with sticky.
+ if (shift > srcSigBits) {
+ absResult = 0;
+ } else {
+ const sticky: src_rep_t = significand << @intCast(SrcShift, srcBits - shift);
+ const denormalizedSignificand: src_rep_t = significand >> @intCast(SrcShift, shift) | sticky;
+ absResult = @intCast(dst_rep_t, denormalizedSignificand >> (srcSigBits - dstSigBits));
+ const roundBits: src_rep_t = denormalizedSignificand & roundMask;
+ if (roundBits > halfway) {
+ // Round to nearest
+ absResult += 1;
+ } else if (roundBits == halfway) {
+ // Ties to even
+ absResult += absResult & 1;
+ }
+ }
+ }
+
+ const result: dst_rep_t align(@alignOf(dst_t)) = absResult | @truncate(dst_rep_t, sign >> @intCast(SrcShift, srcBits - dstBits));
+ return @bitCast(dst_t, result);
+}
+
+test "import truncXfYf2" {
+ _ = @import("truncXfYf2_test.zig");
+}
diff --git a/std/special/compiler_rt/truncXfYf2_test.zig b/std/special/compiler_rt/truncXfYf2_test.zig
new file mode 100644
index 0000000000..c4bf2db733
--- /dev/null
+++ b/std/special/compiler_rt/truncXfYf2_test.zig
@@ -0,0 +1,134 @@
+const __truncsfhf2 = @import("truncXfYf2.zig").__truncsfhf2;
+
+fn test__truncsfhf2(a: u32, expected: u16) void {
+ const actual = __truncsfhf2(@bitCast(f32, a));
+
+ if (actual == expected) {
+ return;
+ }
+
+ @panic("__truncsfhf2 test failure");
+}
+
+test "truncsfhf2" {
+ test__truncsfhf2(0x7fc00000, 0x7e00); // qNaN
+ test__truncsfhf2(0x7fe00000, 0x7f00); // sNaN
+
+ test__truncsfhf2(0, 0); // 0
+ test__truncsfhf2(0x80000000, 0x8000); // -0
+
+ test__truncsfhf2(0x7f800000, 0x7c00); // inf
+ test__truncsfhf2(0xff800000, 0xfc00); // -inf
+
+ test__truncsfhf2(0x477ff000, 0x7c00); // 65520 -> inf
+ test__truncsfhf2(0xc77ff000, 0xfc00); // -65520 -> -inf
+
+ test__truncsfhf2(0x71cc3892, 0x7c00); // 0x1.987124876876324p+100 -> inf
+ test__truncsfhf2(0xf1cc3892, 0xfc00); // -0x1.987124876876324p+100 -> -inf
+
+ test__truncsfhf2(0x38800000, 0x0400); // normal (min), 2**-14
+ test__truncsfhf2(0xb8800000, 0x8400); // normal (min), -2**-14
+
+ test__truncsfhf2(0x477fe000, 0x7bff); // normal (max), 65504
+ test__truncsfhf2(0xc77fe000, 0xfbff); // normal (max), -65504
+
+ test__truncsfhf2(0x477fe100, 0x7bff); // normal, 65505 -> 65504
+ test__truncsfhf2(0xc77fe100, 0xfbff); // normal, -65505 -> -65504
+
+ test__truncsfhf2(0x477fef00, 0x7bff); // normal, 65519 -> 65504
+ test__truncsfhf2(0xc77fef00, 0xfbff); // normal, -65519 -> -65504
+
+ test__truncsfhf2(0x3f802000, 0x3c01); // normal, 1 + 2**-10
+ test__truncsfhf2(0xbf802000, 0xbc01); // normal, -1 - 2**-10
+
+ test__truncsfhf2(0x3eaaa000, 0x3555); // normal, approx. 1/3
+ test__truncsfhf2(0xbeaaa000, 0xb555); // normal, approx. -1/3
+
+ test__truncsfhf2(0x40490fdb, 0x4248); // normal, 3.1415926535
+ test__truncsfhf2(0xc0490fdb, 0xc248); // normal, -3.1415926535
+
+ test__truncsfhf2(0x45cc3892, 0x6e62); // normal, 0x1.987124876876324p+12
+
+ test__truncsfhf2(0x3f800000, 0x3c00); // normal, 1
+ test__truncsfhf2(0x38800000, 0x0400); // normal, 0x1.0p-14
+
+ test__truncsfhf2(0x33800000, 0x0001); // denormal (min), 2**-24
+ test__truncsfhf2(0xb3800000, 0x8001); // denormal (min), -2**-24
+
+ test__truncsfhf2(0x387fc000, 0x03ff); // denormal (max), 2**-14 - 2**-24
+ test__truncsfhf2(0xb87fc000, 0x83ff); // denormal (max), -2**-14 + 2**-24
+
+ test__truncsfhf2(0x35800000, 0x0010); // denormal, 0x1.0p-20
+ test__truncsfhf2(0x33280000, 0x0001); // denormal, 0x1.5p-25 -> 0x1.0p-24
+ test__truncsfhf2(0x33000000, 0x0000); // 0x1.0p-25 -> zero
+}
+
+const __trunctfsf2 = @import("truncXfYf2.zig").__trunctfsf2;
+
+fn test__trunctfsf2(a: f128, expected: u32) void {
+ const x = __trunctfsf2(a);
+
+ const rep = @bitCast(u32, x);
+ if (rep == expected) {
+ return;
+ }
+ // test other possible NaN representation(signal NaN)
+ else if (expected == 0x7fc00000) {
+ if ((rep & 0x7f800000) == 0x7f800000 and (rep & 0x7fffff) > 0) {
+ return;
+ }
+ }
+
+ @panic("__trunctfsf2 test failure");
+}
+
+test "trunctfsf2" {
+ // qnan
+ test__trunctfsf2(@bitCast(f128, u128(0x7fff800000000000 << 64)), 0x7fc00000);
+ // nan
+ test__trunctfsf2(@bitCast(f128, u128((0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7fc08000);
+ // inf
+ test__trunctfsf2(@bitCast(f128, u128(0x7fff000000000000 << 64)), 0x7f800000);
+ // zero
+ test__trunctfsf2(0.0, 0x0);
+
+ test__trunctfsf2(0x1.23a2abb4a2ddee355f36789abcdep+5, 0x4211d156);
+ test__trunctfsf2(0x1.e3d3c45bd3abfd98b76a54cc321fp-9, 0x3b71e9e2);
+ test__trunctfsf2(0x1.234eebb5faa678f4488693abcdefp+4534, 0x7f800000);
+ test__trunctfsf2(0x1.edcba9bb8c76a5a43dd21f334634p-435, 0x0);
+}
+
+const __trunctfdf2 = @import("truncXfYf2.zig").__trunctfdf2;
+
+fn test__trunctfdf2(a: f128, expected: u64) void {
+ const x = __trunctfdf2(a);
+
+ const rep = @bitCast(u64, x);
+ if (rep == expected) {
+ return;
+ }
+ // test other possible NaN representation(signal NaN)
+ else if (expected == 0x7ff8000000000000) {
+ if ((rep & 0x7ff0000000000000) == 0x7ff0000000000000 and (rep & 0xfffffffffffff) > 0) {
+ return;
+ }
+ }
+
+ @panic("__trunctfsf2 test failure");
+}
+
+test "trunctfdf2" {
+ // qnan
+ test__trunctfdf2(@bitCast(f128, u128(0x7fff800000000000 << 64)), 0x7ff8000000000000);
+ // nan
+ test__trunctfdf2(@bitCast(f128, u128((0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7ff8100000000000);
+ // inf
+ test__trunctfdf2(@bitCast(f128, u128(0x7fff000000000000 << 64)), 0x7ff0000000000000);
+ // zero
+ test__trunctfdf2(0.0, 0x0);
+
+ test__trunctfdf2(0x1.af23456789bbaaab347645365cdep+5, 0x404af23456789bbb);
+ test__trunctfdf2(0x1.dedafcff354b6ae9758763545432p-9, 0x3f6dedafcff354b7);
+ test__trunctfdf2(0x1.2f34dd5f437e849b4baab754cdefp+4534, 0x7ff0000000000000);
+ test__trunctfdf2(0x1.edcbff8ad76ab5bf46463233214fp-435, 0x24cedcbff8ad76ab);
+}
diff --git a/std/special/compiler_rt/udivmod.zig b/std/special/compiler_rt/udivmod.zig
index 07eaef583c..e6b4ee0482 100644
--- a/std/special/compiler_rt/udivmod.zig
+++ b/std/special/compiler_rt/udivmod.zig
@@ -1,18 +1,21 @@
const builtin = @import("builtin");
const is_test = builtin.is_test;
-const low = switch (builtin.endian) { builtin.Endian.Big => 1, builtin.Endian.Little => 0 };
+const low = switch (builtin.endian) {
+ builtin.Endian.Big => 1,
+ builtin.Endian.Little => 0,
+};
const high = 1 - low;
-pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?&DoubleInt) DoubleInt {
+pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt {
@setRuntimeSafety(is_test);
const SingleInt = @IntType(false, @divExact(DoubleInt.bit_count, 2));
const SignedDoubleInt = @IntType(true, DoubleInt.bit_count);
- const Log2SingleInt = @import("../../math/index.zig").Log2Int(SingleInt);
+ const Log2SingleInt = @import("std").math.Log2Int(SingleInt);
- const n = *@ptrCast(&const [2]SingleInt, &a); // TODO issue #421
- const d = *@ptrCast(&const [2]SingleInt, &b); // TODO issue #421
+ const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421
+ const d = @ptrCast(*const [2]SingleInt, &b).*; // TODO issue #421
var q: [2]SingleInt = undefined;
var r: [2]SingleInt = undefined;
var sr: c_uint = undefined;
@@ -23,7 +26,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// ---
// 0 X
if (maybe_rem) |rem| {
- *rem = n[low] % d[low];
+ rem.* = n[low] % d[low];
}
return n[low] / d[low];
}
@@ -31,7 +34,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// ---
// K X
if (maybe_rem) |rem| {
- *rem = n[low];
+ rem.* = n[low];
}
return 0;
}
@@ -42,7 +45,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// ---
// 0 0
if (maybe_rem) |rem| {
- *rem = n[high] % d[low];
+ rem.* = n[high] % d[low];
}
return n[high] / d[low];
}
@@ -54,7 +57,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
if (maybe_rem) |rem| {
r[high] = n[high] % d[high];
r[low] = 0;
- *rem = *@ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]); // TODO issue #421
+ rem.* = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
}
return n[high] / d[high];
}
@@ -66,9 +69,9 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
if (maybe_rem) |rem| {
r[low] = n[low];
r[high] = n[high] & (d[high] - 1);
- *rem = *@ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]); // TODO issue #421
+ rem.* = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
}
- return n[high] >> Log2SingleInt(@ctz(d[high]));
+ return n[high] >> @intCast(Log2SingleInt, @ctz(d[high]));
}
// K K
// ---
@@ -77,7 +80,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// 0 <= sr <= SingleInt.bit_count - 2 or sr large
if (sr > SingleInt.bit_count - 2) {
if (maybe_rem) |rem| {
- *rem = a;
+ rem.* = a;
}
return 0;
}
@@ -85,10 +88,10 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// 1 <= sr <= SingleInt.bit_count - 1
// q.all = a << (DoubleInt.bit_count - sr);
q[low] = 0;
- q[high] = n[low] << Log2SingleInt(SingleInt.bit_count - sr);
+ q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
// r.all = a >> sr;
- r[high] = n[high] >> Log2SingleInt(sr);
- r[low] = (n[high] << Log2SingleInt(SingleInt.bit_count - sr)) | (n[low] >> Log2SingleInt(sr));
+ r[high] = n[high] >> @intCast(Log2SingleInt, sr);
+ r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
} else {
// d[low] != 0
if (d[high] == 0) {
@@ -98,15 +101,15 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
if ((d[low] & (d[low] - 1)) == 0) {
// d is a power of 2
if (maybe_rem) |rem| {
- *rem = n[low] & (d[low] - 1);
+ rem.* = n[low] & (d[low] - 1);
}
if (d[low] == 1) {
return a;
}
sr = @ctz(d[low]);
- q[high] = n[high] >> Log2SingleInt(sr);
- q[low] = (n[high] << Log2SingleInt(SingleInt.bit_count - sr)) | (n[low] >> Log2SingleInt(sr));
- return *@ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &q[0]); // TODO issue #421
+ q[high] = n[high] >> @intCast(Log2SingleInt, sr);
+ q[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
+ return @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421
}
// K X
// ---
@@ -123,15 +126,15 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
} else if (sr < SingleInt.bit_count) {
// 2 <= sr <= SingleInt.bit_count - 1
q[low] = 0;
- q[high] = n[low] << Log2SingleInt(SingleInt.bit_count - sr);
- r[high] = n[high] >> Log2SingleInt(sr);
- r[low] = (n[high] << Log2SingleInt(SingleInt.bit_count - sr)) | (n[low] >> Log2SingleInt(sr));
+ q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
+ r[high] = n[high] >> @intCast(Log2SingleInt, sr);
+ r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
} else {
// SingleInt.bit_count + 1 <= sr <= DoubleInt.bit_count - 1
- q[low] = n[low] << Log2SingleInt(DoubleInt.bit_count - sr);
- q[high] = (n[high] << Log2SingleInt(DoubleInt.bit_count - sr)) | (n[low] >> Log2SingleInt(sr - SingleInt.bit_count));
+ q[low] = n[low] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr);
+ q[high] = (n[high] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count));
r[high] = 0;
- r[low] = n[high] >> Log2SingleInt(sr - SingleInt.bit_count);
+ r[low] = n[high] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count);
}
} else {
// K X
@@ -141,7 +144,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// 0 <= sr <= SingleInt.bit_count - 1 or sr large
if (sr > SingleInt.bit_count - 1) {
if (maybe_rem) |rem| {
- *rem = a;
+ rem.* = a;
}
return 0;
}
@@ -155,9 +158,9 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
r[high] = 0;
r[low] = n[high];
} else {
- r[high] = n[high] >> Log2SingleInt(sr);
- r[low] = (n[high] << Log2SingleInt(SingleInt.bit_count - sr)) | (n[low] >> Log2SingleInt(sr));
- q[high] = n[low] << Log2SingleInt(SingleInt.bit_count - sr);
+ r[high] = n[high] >> @intCast(Log2SingleInt, sr);
+ r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
+ q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
}
}
}
@@ -170,25 +173,25 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
var r_all: DoubleInt = undefined;
while (sr > 0) : (sr -= 1) {
// r:q = ((r:q) << 1) | carry
- r[high] = (r[high] << 1) | (r[low] >> (SingleInt.bit_count - 1));
- r[low] = (r[low] << 1) | (q[high] >> (SingleInt.bit_count - 1));
- q[high] = (q[high] << 1) | (q[low] >> (SingleInt.bit_count - 1));
- q[low] = (q[low] << 1) | carry;
+ r[high] = (r[high] << 1) | (r[low] >> (SingleInt.bit_count - 1));
+ r[low] = (r[low] << 1) | (q[high] >> (SingleInt.bit_count - 1));
+ q[high] = (q[high] << 1) | (q[low] >> (SingleInt.bit_count - 1));
+ q[low] = (q[low] << 1) | carry;
// carry = 0;
// if (r.all >= b)
// {
// r.all -= b;
// carry = 1;
// }
- r_all = *@ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]); // TODO issue #421
- const s: SignedDoubleInt = SignedDoubleInt(b -% r_all -% 1) >> (DoubleInt.bit_count - 1);
- carry = u32(s & 1);
+ r_all = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
+ const s: SignedDoubleInt = @intCast(SignedDoubleInt, b -% r_all -% 1) >> (DoubleInt.bit_count - 1);
+ carry = @intCast(u32, s & 1);
r_all -= b & @bitCast(DoubleInt, s);
- r = *@ptrCast(&[2]SingleInt, &r_all); // TODO issue #421
+ r = @ptrCast(*[2]SingleInt, &r_all).*; // TODO issue #421
}
- const q_all = ((*@ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &q[0])) << 1) | carry; // TODO issue #421
+ const q_all = ((@ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*) << 1) | carry; // TODO issue #421
if (maybe_rem) |rem| {
- *rem = r_all;
+ rem.* = r_all;
}
return q_all;
}
diff --git a/std/special/compiler_rt/udivmoddi4.zig b/std/special/compiler_rt/udivmoddi4.zig
index 6cc54bb6bf..de86c845e5 100644
--- a/std/special/compiler_rt/udivmoddi4.zig
+++ b/std/special/compiler_rt/udivmoddi4.zig
@@ -1,7 +1,7 @@
const udivmod = @import("udivmod.zig").udivmod;
const builtin = @import("builtin");
-pub extern fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?&u64) u64 {
+pub extern fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?*u64) u64 {
@setRuntimeSafety(builtin.is_test);
return udivmod(u64, a, b, maybe_rem);
}
diff --git a/std/special/compiler_rt/udivmoddi4_test.zig b/std/special/compiler_rt/udivmoddi4_test.zig
index 324626d3f9..34b9dda1ea 100644
--- a/std/special/compiler_rt/udivmoddi4_test.zig
+++ b/std/special/compiler_rt/udivmoddi4_test.zig
@@ -1,3 +1,5 @@
+// Disable formatting to avoid unnecessary source repository bloat.
+// zig fmt: off
const __udivmoddi4 = @import("udivmoddi4.zig").__udivmoddi4;
const assert = @import("std").debug.assert;
diff --git a/std/special/compiler_rt/udivmodti4.zig b/std/special/compiler_rt/udivmodti4.zig
index 196d067aef..3fa596442f 100644
--- a/std/special/compiler_rt/udivmodti4.zig
+++ b/std/special/compiler_rt/udivmodti4.zig
@@ -1,11 +1,17 @@
const udivmod = @import("udivmod.zig").udivmod;
const builtin = @import("builtin");
+const compiler_rt = @import("index.zig");
-pub extern fn __udivmodti4(a: u128, b: u128, maybe_rem: ?&u128) u128 {
+pub extern fn __udivmodti4(a: u128, b: u128, maybe_rem: ?*u128) u128 {
@setRuntimeSafety(builtin.is_test);
return udivmod(u128, a, b, maybe_rem);
}
+pub extern fn __udivmodti4_windows_x86_64(a: *const u128, b: *const u128, maybe_rem: ?*u128) void {
+ @setRuntimeSafety(builtin.is_test);
+ compiler_rt.setXmm0(u128, udivmod(u128, a.*, b.*, maybe_rem));
+}
+
test "import udivmodti4" {
_ = @import("udivmodti4_test.zig");
}
diff --git a/std/special/compiler_rt/udivmodti4_test.zig b/std/special/compiler_rt/udivmodti4_test.zig
index 48d65b43c6..f6b370c26e 100644
--- a/std/special/compiler_rt/udivmodti4_test.zig
+++ b/std/special/compiler_rt/udivmodti4_test.zig
@@ -1,3 +1,5 @@
+// Disable formatting to avoid unnecessary source repository bloat.
+// zig fmt: off
const __udivmodti4 = @import("udivmodti4.zig").__udivmodti4;
const assert = @import("std").debug.assert;
diff --git a/std/special/compiler_rt/udivti3.zig b/std/special/compiler_rt/udivti3.zig
index eaecbac4d2..510e21ac1d 100644
--- a/std/special/compiler_rt/udivti3.zig
+++ b/std/special/compiler_rt/udivti3.zig
@@ -1,7 +1,12 @@
-const __udivmodti4 = @import("udivmodti4.zig").__udivmodti4;
+const udivmodti4 = @import("udivmodti4.zig");
const builtin = @import("builtin");
pub extern fn __udivti3(a: u128, b: u128) u128 {
@setRuntimeSafety(builtin.is_test);
- return __udivmodti4(a, b, null);
+ return udivmodti4.__udivmodti4(a, b, null);
+}
+
+pub extern fn __udivti3_windows_x86_64(a: *const u128, b: *const u128) void {
+ @setRuntimeSafety(builtin.is_test);
+ udivmodti4.__udivmodti4_windows_x86_64(a, b, null);
}
diff --git a/std/special/compiler_rt/umodti3.zig b/std/special/compiler_rt/umodti3.zig
index 26b306efa9..9551e63a6f 100644
--- a/std/special/compiler_rt/umodti3.zig
+++ b/std/special/compiler_rt/umodti3.zig
@@ -1,9 +1,15 @@
-const __udivmodti4 = @import("udivmodti4.zig").__udivmodti4;
+const udivmodti4 = @import("udivmodti4.zig");
const builtin = @import("builtin");
+const compiler_rt = @import("index.zig");
pub extern fn __umodti3(a: u128, b: u128) u128 {
@setRuntimeSafety(builtin.is_test);
var r: u128 = undefined;
- _ = __udivmodti4(a, b, &r);
+ _ = udivmodti4.__udivmodti4(a, b, &r);
return r;
}
+
+pub extern fn __umodti3_windows_x86_64(a: *const u128, b: *const u128) void {
+ @setRuntimeSafety(builtin.is_test);
+ compiler_rt.setXmm0(u128, __umodti3(a.*, b.*));
+}
diff --git a/std/special/panic.zig b/std/special/panic.zig
index 8f933ddd97..ca1caea73c 100644
--- a/std/special/panic.zig
+++ b/std/special/panic.zig
@@ -6,7 +6,7 @@
const builtin = @import("builtin");
const std = @import("std");
-pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn {
+pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
@setCold(true);
switch (builtin.os) {
// TODO: fix panic in zen.
diff --git a/std/special/test_runner.zig b/std/special/test_runner.zig
index 76a54a5018..857739e82d 100644
--- a/std/special/test_runner.zig
+++ b/std/special/test_runner.zig
@@ -5,11 +5,25 @@ const test_fn_list = builtin.__zig_test_fn_slice;
const warn = std.debug.warn;
pub fn main() !void {
+ var ok_count: usize = 0;
+ var skip_count: usize = 0;
for (test_fn_list) |test_fn, i| {
warn("Test {}/{} {}...", i + 1, test_fn_list.len, test_fn.name);
- try test_fn.func();
-
- warn("OK\n");
+ if (test_fn.func()) |_| {
+ ok_count += 1;
+ warn("OK\n");
+ } else |err| switch (err) {
+ error.SkipZigTest => {
+ skip_count += 1;
+ warn("SKIP\n");
+ },
+ else => return err,
+ }
+ }
+ if (ok_count == test_fn_list.len) {
+ warn("All tests passed.\n");
+ } else {
+ warn("{} passed; {} skipped.\n", ok_count, skip_count);
}
}
diff --git a/std/unicode.zig b/std/unicode.zig
index 356df824f0..8a9d4a9214 100644
--- a/std/unicode.zig
+++ b/std/unicode.zig
@@ -1,5 +1,18 @@
const std = @import("./index.zig");
+const builtin = @import("builtin");
const debug = std.debug;
+const assert = std.debug.assert;
+const mem = std.mem;
+
+/// Returns how many bytes the UTF-8 representation would require
+/// for the given codepoint.
+pub fn utf8CodepointSequenceLength(c: u32) !u3 {
+ if (c < 0x80) return u3(1);
+ if (c < 0x800) return u3(2);
+ if (c < 0x10000) return u3(3);
+ if (c < 0x110000) return u3(4);
+ return error.CodepointTooLarge;
+}
/// Given the first byte of a UTF-8 codepoint,
/// returns a number 1-4 indicating the total length of the codepoint in bytes.
@@ -12,11 +25,48 @@ pub fn utf8ByteSequenceLength(first_byte: u8) !u3 {
return error.Utf8InvalidStartByte;
}
+/// Encodes the given codepoint into a UTF-8 byte sequence.
+/// c: the codepoint.
+/// out: the out buffer to write to. Must have a len >= utf8CodepointSequenceLength(c).
+/// Errors: if c cannot be encoded in UTF-8.
+/// Returns: the number of bytes written to out.
+pub fn utf8Encode(c: u32, out: []u8) !u3 {
+ const length = try utf8CodepointSequenceLength(c);
+ debug.assert(out.len >= length);
+ switch (length) {
+ // The pattern for each is the same
+ // - Increasing the initial shift by 6 each time
+ // - Each time after the first shorten the shifted
+ // value to a max of 0b111111 (63)
+ 1 => out[0] = @intCast(u8, c), // Can just do 0 + codepoint for initial range
+ 2 => {
+ out[0] = @intCast(u8, 0b11000000 | (c >> 6));
+ out[1] = @intCast(u8, 0b10000000 | (c & 0b111111));
+ },
+ 3 => {
+ if (0xd800 <= c and c <= 0xdfff) return error.Utf8CannotEncodeSurrogateHalf;
+ out[0] = @intCast(u8, 0b11100000 | (c >> 12));
+ out[1] = @intCast(u8, 0b10000000 | ((c >> 6) & 0b111111));
+ out[2] = @intCast(u8, 0b10000000 | (c & 0b111111));
+ },
+ 4 => {
+ out[0] = @intCast(u8, 0b11110000 | (c >> 18));
+ out[1] = @intCast(u8, 0b10000000 | ((c >> 12) & 0b111111));
+ out[2] = @intCast(u8, 0b10000000 | ((c >> 6) & 0b111111));
+ out[3] = @intCast(u8, 0b10000000 | (c & 0b111111));
+ },
+ else => unreachable,
+ }
+ return length;
+}
+
+const Utf8DecodeError = Utf8Decode2Error || Utf8Decode3Error || Utf8Decode4Error;
+
/// Decodes the UTF-8 codepoint encoded in the given slice of bytes.
/// bytes.len must be equal to utf8ByteSequenceLength(bytes[0]) catch unreachable.
/// If you already know the length at comptime, you can call one of
/// utf8Decode2,utf8Decode3,utf8Decode4 directly instead of this function.
-pub fn utf8Decode(bytes: []const u8) !u32 {
+pub fn utf8Decode(bytes: []const u8) Utf8DecodeError!u32 {
return switch (bytes.len) {
1 => u32(bytes[0]),
2 => utf8Decode2(bytes),
@@ -25,7 +75,12 @@ pub fn utf8Decode(bytes: []const u8) !u32 {
else => unreachable,
};
}
-pub fn utf8Decode2(bytes: []const u8) !u32 {
+
+const Utf8Decode2Error = error{
+ Utf8ExpectedContinuation,
+ Utf8OverlongEncoding,
+};
+pub fn utf8Decode2(bytes: []const u8) Utf8Decode2Error!u32 {
debug.assert(bytes.len == 2);
debug.assert(bytes[0] & 0b11100000 == 0b11000000);
var value: u32 = bytes[0] & 0b00011111;
@@ -38,7 +93,13 @@ pub fn utf8Decode2(bytes: []const u8) !u32 {
return value;
}
-pub fn utf8Decode3(bytes: []const u8) !u32 {
+
+const Utf8Decode3Error = error{
+ Utf8ExpectedContinuation,
+ Utf8OverlongEncoding,
+ Utf8EncodesSurrogateHalf,
+};
+pub fn utf8Decode3(bytes: []const u8) Utf8Decode3Error!u32 {
debug.assert(bytes.len == 3);
debug.assert(bytes[0] & 0b11110000 == 0b11100000);
var value: u32 = bytes[0] & 0b00001111;
@@ -56,7 +117,13 @@ pub fn utf8Decode3(bytes: []const u8) !u32 {
return value;
}
-pub fn utf8Decode4(bytes: []const u8) !u32 {
+
+const Utf8Decode4Error = error{
+ Utf8ExpectedContinuation,
+ Utf8OverlongEncoding,
+ Utf8CodepointTooLarge,
+};
+pub fn utf8Decode4(bytes: []const u8) Utf8Decode4Error!u32 {
debug.assert(bytes.len == 4);
debug.assert(bytes[0] & 0b11111000 == 0b11110000);
var value: u32 = bytes[0] & 0b00000111;
@@ -87,7 +154,9 @@ pub fn utf8ValidateSlice(s: []const u8) bool {
return false;
}
- if (utf8Decode(s[i..i+cp_len])) |_| {} else |_| { return false; }
+ if (utf8Decode(s[i .. i + cp_len])) |_| {} else |_| {
+ return false;
+ }
i += cp_len;
} else |err| {
return false;
@@ -116,9 +185,7 @@ pub const Utf8View = struct {
}
pub fn initUnchecked(s: []const u8) Utf8View {
- return Utf8View {
- .bytes = s,
- };
+ return Utf8View{ .bytes = s };
}
pub fn initComptime(comptime s: []const u8) Utf8View {
@@ -128,12 +195,12 @@ pub const Utf8View = struct {
error.InvalidUtf8 => {
@compileError("invalid utf8");
unreachable;
- }
+ },
}
}
- pub fn iterator(s: &const Utf8View) Utf8Iterator {
- return Utf8Iterator {
+ pub fn iterator(s: *const Utf8View) Utf8Iterator {
+ return Utf8Iterator{
.bytes = s.bytes,
.i = 0,
};
@@ -144,7 +211,7 @@ const Utf8Iterator = struct {
bytes: []const u8,
i: usize,
- pub fn nextCodepointSlice(it: &Utf8Iterator) ?[]const u8 {
+ pub fn nextCodepointSlice(it: *Utf8Iterator) ?[]const u8 {
if (it.i >= it.bytes.len) {
return null;
}
@@ -152,65 +219,128 @@ const Utf8Iterator = struct {
const cp_len = utf8ByteSequenceLength(it.bytes[it.i]) catch unreachable;
it.i += cp_len;
- return it.bytes[it.i-cp_len..it.i];
+ return it.bytes[it.i - cp_len .. it.i];
}
- pub fn nextCodepoint(it: &Utf8Iterator) ?u32 {
- const slice = it.nextCodepointSlice() ?? return null;
+ pub fn nextCodepoint(it: *Utf8Iterator) ?u32 {
+ const slice = it.nextCodepointSlice() orelse return null;
- const r = switch (slice.len) {
- 1 => u32(slice[0]),
- 2 => utf8Decode2(slice),
- 3 => utf8Decode3(slice),
- 4 => utf8Decode4(slice),
+ switch (slice.len) {
+ 1 => return u32(slice[0]),
+ 2 => return utf8Decode2(slice) catch unreachable,
+ 3 => return utf8Decode3(slice) catch unreachable,
+ 4 => return utf8Decode4(slice) catch unreachable,
else => unreachable,
- };
-
- return r catch unreachable;
+ }
}
};
+test "utf8 encode" {
+ comptime testUtf8Encode() catch unreachable;
+ try testUtf8Encode();
+}
+fn testUtf8Encode() !void {
+ // A few taken from wikipedia a few taken elsewhere
+ var array: [4]u8 = undefined;
+ debug.assert((try utf8Encode(try utf8Decode("€"), array[0..])) == 3);
+ debug.assert(array[0] == 0b11100010);
+ debug.assert(array[1] == 0b10000010);
+ debug.assert(array[2] == 0b10101100);
+
+ debug.assert((try utf8Encode(try utf8Decode("$"), array[0..])) == 1);
+ debug.assert(array[0] == 0b00100100);
+
+ debug.assert((try utf8Encode(try utf8Decode("¢"), array[0..])) == 2);
+ debug.assert(array[0] == 0b11000010);
+ debug.assert(array[1] == 0b10100010);
+
+ debug.assert((try utf8Encode(try utf8Decode("𐍈"), array[0..])) == 4);
+ debug.assert(array[0] == 0b11110000);
+ debug.assert(array[1] == 0b10010000);
+ debug.assert(array[2] == 0b10001101);
+ debug.assert(array[3] == 0b10001000);
+}
+
+test "utf8 encode error" {
+ comptime testUtf8EncodeError();
+ testUtf8EncodeError();
+}
+fn testUtf8EncodeError() void {
+ var array: [4]u8 = undefined;
+ testErrorEncode(0xd800, array[0..], error.Utf8CannotEncodeSurrogateHalf);
+ testErrorEncode(0xdfff, array[0..], error.Utf8CannotEncodeSurrogateHalf);
+ testErrorEncode(0x110000, array[0..], error.CodepointTooLarge);
+ testErrorEncode(0xffffffff, array[0..], error.CodepointTooLarge);
+}
+
+fn testErrorEncode(codePoint: u32, array: []u8, expectedErr: error) void {
+ if (utf8Encode(codePoint, array)) |_| {
+ unreachable;
+ } else |err| {
+ debug.assert(err == expectedErr);
+ }
+}
+
test "utf8 iterator on ascii" {
+ comptime testUtf8IteratorOnAscii();
+ testUtf8IteratorOnAscii();
+}
+fn testUtf8IteratorOnAscii() void {
const s = Utf8View.initComptime("abc");
var it1 = s.iterator();
- debug.assert(std.mem.eql(u8, "a", ??it1.nextCodepointSlice()));
- debug.assert(std.mem.eql(u8, "b", ??it1.nextCodepointSlice()));
- debug.assert(std.mem.eql(u8, "c", ??it1.nextCodepointSlice()));
+ debug.assert(std.mem.eql(u8, "a", it1.nextCodepointSlice().?));
+ debug.assert(std.mem.eql(u8, "b", it1.nextCodepointSlice().?));
+ debug.assert(std.mem.eql(u8, "c", it1.nextCodepointSlice().?));
debug.assert(it1.nextCodepointSlice() == null);
var it2 = s.iterator();
- debug.assert(??it2.nextCodepoint() == 'a');
- debug.assert(??it2.nextCodepoint() == 'b');
- debug.assert(??it2.nextCodepoint() == 'c');
+ debug.assert(it2.nextCodepoint().? == 'a');
+ debug.assert(it2.nextCodepoint().? == 'b');
+ debug.assert(it2.nextCodepoint().? == 'c');
debug.assert(it2.nextCodepoint() == null);
}
test "utf8 view bad" {
+ comptime testUtf8ViewBad();
+ testUtf8ViewBad();
+}
+fn testUtf8ViewBad() void {
// Compile-time error.
// const s3 = Utf8View.initComptime("\xfe\xf2");
-
const s = Utf8View.init("hel\xadlo");
- if (s) |_| { unreachable; } else |err| { debug.assert(err == error.InvalidUtf8); }
+ if (s) |_| {
+ unreachable;
+ } else |err| {
+ debug.assert(err == error.InvalidUtf8);
+ }
}
test "utf8 view ok" {
+ comptime testUtf8ViewOk();
+ testUtf8ViewOk();
+}
+fn testUtf8ViewOk() void {
const s = Utf8View.initComptime("東京市");
var it1 = s.iterator();
- debug.assert(std.mem.eql(u8, "東", ??it1.nextCodepointSlice()));
- debug.assert(std.mem.eql(u8, "京", ??it1.nextCodepointSlice()));
- debug.assert(std.mem.eql(u8, "市", ??it1.nextCodepointSlice()));
+ debug.assert(std.mem.eql(u8, "東", it1.nextCodepointSlice().?));
+ debug.assert(std.mem.eql(u8, "京", it1.nextCodepointSlice().?));
+ debug.assert(std.mem.eql(u8, "市", it1.nextCodepointSlice().?));
debug.assert(it1.nextCodepointSlice() == null);
var it2 = s.iterator();
- debug.assert(??it2.nextCodepoint() == 0x6771);
- debug.assert(??it2.nextCodepoint() == 0x4eac);
- debug.assert(??it2.nextCodepoint() == 0x5e02);
+ debug.assert(it2.nextCodepoint().? == 0x6771);
+ debug.assert(it2.nextCodepoint().? == 0x4eac);
+ debug.assert(it2.nextCodepoint().? == 0x5e02);
debug.assert(it2.nextCodepoint() == null);
}
test "bad utf8 slice" {
+ comptime testBadUtf8Slice();
+ testBadUtf8Slice();
+}
+fn testBadUtf8Slice() void {
debug.assert(utf8ValidateSlice("abc"));
debug.assert(!utf8ValidateSlice("abc\xc0"));
debug.assert(!utf8ValidateSlice("abc\xc0abc"));
@@ -218,6 +348,10 @@ test "bad utf8 slice" {
}
test "valid utf8" {
+ comptime testValidUtf8();
+ testValidUtf8();
+}
+fn testValidUtf8() void {
testValid("\x00", 0x0);
testValid("\x20", 0x20);
testValid("\x7f", 0x7f);
@@ -233,6 +367,10 @@ test "valid utf8" {
}
test "invalid utf8 continuation bytes" {
+ comptime testInvalidUtf8ContinuationBytes();
+ testInvalidUtf8ContinuationBytes();
+}
+fn testInvalidUtf8ContinuationBytes() void {
// unexpected continuation
testError("\x80", error.Utf8InvalidStartByte);
testError("\xbf", error.Utf8InvalidStartByte);
@@ -261,6 +399,10 @@ test "invalid utf8 continuation bytes" {
}
test "overlong utf8 codepoint" {
+ comptime testOverlongUtf8Codepoint();
+ testOverlongUtf8Codepoint();
+}
+fn testOverlongUtf8Codepoint() void {
testError("\xc0\x80", error.Utf8OverlongEncoding);
testError("\xc1\xbf", error.Utf8OverlongEncoding);
testError("\xe0\x80\x80", error.Utf8OverlongEncoding);
@@ -270,6 +412,10 @@ test "overlong utf8 codepoint" {
}
test "misc invalid utf8" {
+ comptime testMiscInvalidUtf8();
+ testMiscInvalidUtf8();
+}
+fn testMiscInvalidUtf8() void {
// codepoint out of bounds
testError("\xf4\x90\x80\x80", error.Utf8CodepointTooLarge);
testError("\xf7\xbf\xbf\xbf", error.Utf8CodepointTooLarge);
@@ -298,3 +444,89 @@ fn testDecode(bytes: []const u8) !u32 {
debug.assert(bytes.len == length);
return utf8Decode(bytes);
}
+
+// TODO: make this API on top of a non-allocating Utf16LeView
+pub fn utf16leToUtf8(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 {
+ var result = std.ArrayList(u8).init(allocator);
+ // optimistically guess that it will all be ascii.
+ try result.ensureCapacity(utf16le.len);
+
+ const utf16le_as_bytes = @sliceToBytes(utf16le);
+ var i: usize = 0;
+ var out_index: usize = 0;
+ while (i < utf16le_as_bytes.len) : (i += 2) {
+ // decode
+ const c0: u32 = mem.readIntLE(u16, utf16le_as_bytes[i..i + 2]);
+ var codepoint: u32 = undefined;
+ if (c0 & ~u32(0x03ff) == 0xd800) {
+ // surrogate pair
+ i += 2;
+ if (i >= utf16le_as_bytes.len) return error.DanglingSurrogateHalf;
+ const c1: u32 = mem.readIntLE(u16, utf16le_as_bytes[i..i + 2]);
+ if (c1 & ~u32(0x03ff) != 0xdc00) return error.ExpectedSecondSurrogateHalf;
+ codepoint = 0x10000 + (((c0 & 0x03ff) << 10) | (c1 & 0x03ff));
+ } else if (c0 & ~u32(0x03ff) == 0xdc00) {
+ return error.UnexpectedSecondSurrogateHalf;
+ } else {
+ codepoint = c0;
+ }
+
+ // encode
+ const utf8_len = utf8CodepointSequenceLength(codepoint) catch unreachable;
+ try result.resize(result.len + utf8_len);
+ _ = utf8Encode(codepoint, result.items[out_index..]) catch unreachable;
+ out_index += utf8_len;
+ }
+
+ return result.toOwnedSlice();
+}
+
+test "utf16leToUtf8" {
+ var utf16le: [2]u16 = undefined;
+ const utf16le_as_bytes = @sliceToBytes(utf16le[0..]);
+
+ {
+ mem.writeInt(utf16le_as_bytes[0..], u16('A'), builtin.Endian.Little);
+ mem.writeInt(utf16le_as_bytes[2..], u16('a'), builtin.Endian.Little);
+ const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le);
+ assert(mem.eql(u8, utf8, "Aa"));
+ }
+
+ {
+ mem.writeInt(utf16le_as_bytes[0..], u16(0x80), builtin.Endian.Little);
+ mem.writeInt(utf16le_as_bytes[2..], u16(0xffff), builtin.Endian.Little);
+ const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le);
+ assert(mem.eql(u8, utf8, "\xc2\x80" ++ "\xef\xbf\xbf"));
+ }
+
+ {
+ // the values just outside the surrogate half range
+ mem.writeInt(utf16le_as_bytes[0..], u16(0xd7ff), builtin.Endian.Little);
+ mem.writeInt(utf16le_as_bytes[2..], u16(0xe000), builtin.Endian.Little);
+ const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le);
+ assert(mem.eql(u8, utf8, "\xed\x9f\xbf" ++ "\xee\x80\x80"));
+ }
+
+ {
+ // smallest surrogate pair
+ mem.writeInt(utf16le_as_bytes[0..], u16(0xd800), builtin.Endian.Little);
+ mem.writeInt(utf16le_as_bytes[2..], u16(0xdc00), builtin.Endian.Little);
+ const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le);
+ assert(mem.eql(u8, utf8, "\xf0\x90\x80\x80"));
+ }
+
+ {
+ // largest surrogate pair
+ mem.writeInt(utf16le_as_bytes[0..], u16(0xdbff), builtin.Endian.Little);
+ mem.writeInt(utf16le_as_bytes[2..], u16(0xdfff), builtin.Endian.Little);
+ const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le);
+ assert(mem.eql(u8, utf8, "\xf4\x8f\xbf\xbf"));
+ }
+
+ {
+ mem.writeInt(utf16le_as_bytes[0..], u16(0xdbff), builtin.Endian.Little);
+ mem.writeInt(utf16le_as_bytes[2..], u16(0xdc00), builtin.Endian.Little);
+ const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le);
+ assert(mem.eql(u8, utf8, "\xf4\x8f\xb0\x80"));
+ }
+}
diff --git a/std/zig/ast.zig b/std/zig/ast.zig
index 17a19e6213..95e899fb92 100644
--- a/std/zig/ast.zig
+++ b/std/zig/ast.zig
@@ -1,12 +1,256 @@
const std = @import("../index.zig");
const assert = std.debug.assert;
-const ArrayList = std.ArrayList;
-const Token = std.zig.Token;
+const SegmentedList = std.SegmentedList;
const mem = std.mem;
+const Token = std.zig.Token;
+
+pub const TokenIndex = usize;
+
+pub const Tree = struct {
+ source: []const u8,
+ tokens: TokenList,
+ root_node: *Node.Root,
+ arena_allocator: std.heap.ArenaAllocator,
+ errors: ErrorList,
+
+ pub const TokenList = SegmentedList(Token, 64);
+ pub const ErrorList = SegmentedList(Error, 0);
+
+ pub fn deinit(self: *Tree) void {
+ self.arena_allocator.deinit();
+ }
+
+ pub fn renderError(self: *Tree, parse_error: *Error, stream: var) !void {
+ return parse_error.render(&self.tokens, stream);
+ }
+
+ pub fn tokenSlice(self: *Tree, token_index: TokenIndex) []const u8 {
+ return self.tokenSlicePtr(self.tokens.at(token_index));
+ }
+
+ pub fn tokenSlicePtr(self: *Tree, token: *const Token) []const u8 {
+ return self.source[token.start..token.end];
+ }
+
+ pub const Location = struct {
+ line: usize,
+ column: usize,
+ line_start: usize,
+ line_end: usize,
+ };
+
+ pub fn tokenLocationPtr(self: *Tree, start_index: usize, token: *const Token) Location {
+ var loc = Location{
+ .line = 0,
+ .column = 0,
+ .line_start = start_index,
+ .line_end = self.source.len,
+ };
+ const token_start = token.start;
+ for (self.source[start_index..]) |c, i| {
+ if (i + start_index == token_start) {
+ loc.line_end = i + start_index;
+ while (loc.line_end < self.source.len and self.source[loc.line_end] != '\n') : (loc.line_end += 1) {}
+ return loc;
+ }
+ if (c == '\n') {
+ loc.line += 1;
+ loc.column = 0;
+ loc.line_start = i + 1;
+ } else {
+ loc.column += 1;
+ }
+ }
+ return loc;
+ }
+
+ pub fn tokenLocation(self: *Tree, start_index: usize, token_index: TokenIndex) Location {
+ return self.tokenLocationPtr(start_index, self.tokens.at(token_index));
+ }
+
+ pub fn tokensOnSameLine(self: *Tree, token1_index: TokenIndex, token2_index: TokenIndex) bool {
+ return self.tokensOnSameLinePtr(self.tokens.at(token1_index), self.tokens.at(token2_index));
+ }
+
+ pub fn tokensOnSameLinePtr(self: *Tree, token1: *const Token, token2: *const Token) bool {
+ return mem.indexOfScalar(u8, self.source[token1.end..token2.start], '\n') == null;
+ }
+
+ pub fn dump(self: *Tree) void {
+ self.root_node.base.dump(0);
+ }
+
+ /// Skips over comments
+ pub fn prevToken(self: *Tree, token_index: TokenIndex) TokenIndex {
+ var index = token_index - 1;
+ while (self.tokens.at(index).id == Token.Id.LineComment) {
+ index -= 1;
+ }
+ return index;
+ }
+
+ /// Skips over comments
+ pub fn nextToken(self: *Tree, token_index: TokenIndex) TokenIndex {
+ var index = token_index + 1;
+ while (self.tokens.at(index).id == Token.Id.LineComment) {
+ index += 1;
+ }
+ return index;
+ }
+};
+
+pub const Error = union(enum) {
+ InvalidToken: InvalidToken,
+ ExpectedVarDeclOrFn: ExpectedVarDeclOrFn,
+ ExpectedAggregateKw: ExpectedAggregateKw,
+ UnattachedDocComment: UnattachedDocComment,
+ ExpectedEqOrSemi: ExpectedEqOrSemi,
+ ExpectedSemiOrLBrace: ExpectedSemiOrLBrace,
+ ExpectedColonOrRParen: ExpectedColonOrRParen,
+ ExpectedLabelable: ExpectedLabelable,
+ ExpectedInlinable: ExpectedInlinable,
+ ExpectedAsmOutputReturnOrType: ExpectedAsmOutputReturnOrType,
+ ExpectedCall: ExpectedCall,
+ ExpectedCallOrFnProto: ExpectedCallOrFnProto,
+ ExpectedSliceOrRBracket: ExpectedSliceOrRBracket,
+ ExtraAlignQualifier: ExtraAlignQualifier,
+ ExtraConstQualifier: ExtraConstQualifier,
+ ExtraVolatileQualifier: ExtraVolatileQualifier,
+ ExpectedPrimaryExpr: ExpectedPrimaryExpr,
+ ExpectedToken: ExpectedToken,
+ ExpectedCommaOrEnd: ExpectedCommaOrEnd,
+
+ pub fn render(self: *const Error, tokens: *Tree.TokenList, stream: var) !void {
+ switch (self.*) {
+ // TODO https://github.com/ziglang/zig/issues/683
+ @TagType(Error).InvalidToken => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedVarDeclOrFn => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedAggregateKw => |*x| return x.render(tokens, stream),
+ @TagType(Error).UnattachedDocComment => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedEqOrSemi => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedSemiOrLBrace => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedColonOrRParen => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedLabelable => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedInlinable => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedAsmOutputReturnOrType => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedCall => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedCallOrFnProto => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedSliceOrRBracket => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExtraAlignQualifier => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExtraConstQualifier => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExtraVolatileQualifier => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedPrimaryExpr => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedToken => |*x| return x.render(tokens, stream),
+ @TagType(Error).ExpectedCommaOrEnd => |*x| return x.render(tokens, stream),
+ }
+ }
+
+ pub fn loc(self: *const Error) TokenIndex {
+ switch (self.*) {
+ // TODO https://github.com/ziglang/zig/issues/683
+ @TagType(Error).InvalidToken => |x| return x.token,
+ @TagType(Error).ExpectedVarDeclOrFn => |x| return x.token,
+ @TagType(Error).ExpectedAggregateKw => |x| return x.token,
+ @TagType(Error).UnattachedDocComment => |x| return x.token,
+ @TagType(Error).ExpectedEqOrSemi => |x| return x.token,
+ @TagType(Error).ExpectedSemiOrLBrace => |x| return x.token,
+ @TagType(Error).ExpectedColonOrRParen => |x| return x.token,
+ @TagType(Error).ExpectedLabelable => |x| return x.token,
+ @TagType(Error).ExpectedInlinable => |x| return x.token,
+ @TagType(Error).ExpectedAsmOutputReturnOrType => |x| return x.token,
+ @TagType(Error).ExpectedCall => |x| return x.node.firstToken(),
+ @TagType(Error).ExpectedCallOrFnProto => |x| return x.node.firstToken(),
+ @TagType(Error).ExpectedSliceOrRBracket => |x| return x.token,
+ @TagType(Error).ExtraAlignQualifier => |x| return x.token,
+ @TagType(Error).ExtraConstQualifier => |x| return x.token,
+ @TagType(Error).ExtraVolatileQualifier => |x| return x.token,
+ @TagType(Error).ExpectedPrimaryExpr => |x| return x.token,
+ @TagType(Error).ExpectedToken => |x| return x.token,
+ @TagType(Error).ExpectedCommaOrEnd => |x| return x.token,
+ }
+ }
+
+ pub const InvalidToken = SingleTokenError("Invalid token {}");
+ pub const ExpectedVarDeclOrFn = SingleTokenError("Expected variable declaration or function, found {}");
+ pub const ExpectedAggregateKw = SingleTokenError("Expected " ++ @tagName(Token.Id.Keyword_struct) ++ ", " ++ @tagName(Token.Id.Keyword_union) ++ ", or " ++ @tagName(Token.Id.Keyword_enum) ++ ", found {}");
+ pub const ExpectedEqOrSemi = SingleTokenError("Expected '=' or ';', found {}");
+ pub const ExpectedSemiOrLBrace = SingleTokenError("Expected ';' or '{{', found {}");
+ pub const ExpectedColonOrRParen = SingleTokenError("Expected ':' or ')', found {}");
+ pub const ExpectedLabelable = SingleTokenError("Expected 'while', 'for', 'inline', 'suspend', or '{{', found {}");
+ pub const ExpectedInlinable = SingleTokenError("Expected 'while' or 'for', found {}");
+ pub const ExpectedAsmOutputReturnOrType = SingleTokenError("Expected '->' or " ++ @tagName(Token.Id.Identifier) ++ ", found {}");
+ pub const ExpectedSliceOrRBracket = SingleTokenError("Expected ']' or '..', found {}");
+ pub const ExpectedPrimaryExpr = SingleTokenError("Expected primary expression, found {}");
+
+ pub const UnattachedDocComment = SimpleError("Unattached documentation comment");
+ pub const ExtraAlignQualifier = SimpleError("Extra align qualifier");
+ pub const ExtraConstQualifier = SimpleError("Extra const qualifier");
+ pub const ExtraVolatileQualifier = SimpleError("Extra volatile qualifier");
+
+ pub const ExpectedCall = struct {
+ node: *Node,
+
+ pub fn render(self: *const ExpectedCall, tokens: *Tree.TokenList, stream: var) !void {
+ return stream.print("expected " ++ @tagName(@TagType(Node.SuffixOp.Op).Call) ++ ", found {}", @tagName(self.node.id));
+ }
+ };
+
+ pub const ExpectedCallOrFnProto = struct {
+ node: *Node,
+
+ pub fn render(self: *const ExpectedCallOrFnProto, tokens: *Tree.TokenList, stream: var) !void {
+ return stream.print("expected " ++ @tagName(@TagType(Node.SuffixOp.Op).Call) ++ " or " ++ @tagName(Node.Id.FnProto) ++ ", found {}", @tagName(self.node.id));
+ }
+ };
+
+ pub const ExpectedToken = struct {
+ token: TokenIndex,
+ expected_id: @TagType(Token.Id),
+
+ pub fn render(self: *const ExpectedToken, tokens: *Tree.TokenList, stream: var) !void {
+ const token_name = @tagName(tokens.at(self.token).id);
+ return stream.print("expected {}, found {}", @tagName(self.expected_id), token_name);
+ }
+ };
+
+ pub const ExpectedCommaOrEnd = struct {
+ token: TokenIndex,
+ end_id: @TagType(Token.Id),
+
+ pub fn render(self: *const ExpectedCommaOrEnd, tokens: *Tree.TokenList, stream: var) !void {
+ const token_name = @tagName(tokens.at(self.token).id);
+ return stream.print("expected ',' or {}, found {}", @tagName(self.end_id), token_name);
+ }
+ };
+
+ fn SingleTokenError(comptime msg: []const u8) type {
+ return struct {
+ const ThisError = this;
+
+ token: TokenIndex,
+
+ pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: var) !void {
+ const token_name = @tagName(tokens.at(self.token).id);
+ return stream.print(msg, token_name);
+ }
+ };
+ }
+
+ fn SimpleError(comptime msg: []const u8) type {
+ return struct {
+ const ThisError = this;
+
+ token: TokenIndex,
+
+ pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: var) !void {
+ return stream.write(msg);
+ }
+ };
+ }
+};
pub const Node = struct {
id: Id,
- comment: ?&NodeLineComment,
pub const Id = enum {
// Top level
@@ -35,6 +279,7 @@ pub const Node = struct {
VarType,
ErrorType,
FnProto,
+ PromiseType,
// Primary expressions
IntegerLiteral,
@@ -57,7 +302,7 @@ pub const Node = struct {
Block,
// Misc
- LineComment,
+ DocComment,
SwitchCase,
SwitchElse,
Else,
@@ -67,6 +312,7 @@ pub const Node = struct {
StructField,
UnionTag,
EnumTag,
+ ErrorTag,
AsmInput,
AsmOutput,
AsyncAttribute,
@@ -74,1750 +320,1887 @@ pub const Node = struct {
FieldInitializer,
};
- const IdTypePair = struct {
- id: Id,
- Type: type,
- };
-
- // TODO: When @field exists, we could generate this by iterating over all members of `Id`,
- // and making an array of `IdTypePair { .id = @field(Id, @memberName(Id, i)), .Type = @field(ast, "Node" ++ @memberName(Id, i)) }`
- const idTypeTable = []IdTypePair {
- IdTypePair { .id = Id.Root, .Type = NodeRoot },
- IdTypePair { .id = Id.Use, .Type = NodeUse },
- IdTypePair { .id = Id.TestDecl, .Type = NodeTestDecl },
-
- IdTypePair { .id = Id.VarDecl, .Type = NodeVarDecl },
- IdTypePair { .id = Id.Defer, .Type = NodeDefer },
-
- IdTypePair { .id = Id.InfixOp, .Type = NodeInfixOp },
- IdTypePair { .id = Id.PrefixOp, .Type = NodePrefixOp },
- IdTypePair { .id = Id.SuffixOp, .Type = NodeSuffixOp },
-
- IdTypePair { .id = Id.Switch, .Type = NodeSwitch },
- IdTypePair { .id = Id.While, .Type = NodeWhile },
- IdTypePair { .id = Id.For, .Type = NodeFor },
- IdTypePair { .id = Id.If, .Type = NodeIf },
- IdTypePair { .id = Id.ControlFlowExpression, .Type = NodeControlFlowExpression },
- IdTypePair { .id = Id.Suspend, .Type = NodeSuspend },
-
- IdTypePair { .id = Id.VarType, .Type = NodeVarType },
- IdTypePair { .id = Id.ErrorType, .Type = NodeErrorType },
- IdTypePair { .id = Id.FnProto, .Type = NodeFnProto },
-
- IdTypePair { .id = Id.IntegerLiteral, .Type = NodeIntegerLiteral },
- IdTypePair { .id = Id.FloatLiteral, .Type = NodeFloatLiteral },
- IdTypePair { .id = Id.StringLiteral, .Type = NodeStringLiteral },
- IdTypePair { .id = Id.MultilineStringLiteral, .Type = NodeMultilineStringLiteral },
- IdTypePair { .id = Id.CharLiteral, .Type = NodeCharLiteral },
- IdTypePair { .id = Id.BoolLiteral, .Type = NodeBoolLiteral },
- IdTypePair { .id = Id.NullLiteral, .Type = NodeNullLiteral },
- IdTypePair { .id = Id.UndefinedLiteral, .Type = NodeUndefinedLiteral },
- IdTypePair { .id = Id.ThisLiteral, .Type = NodeThisLiteral },
- IdTypePair { .id = Id.Unreachable, .Type = NodeUnreachable },
- IdTypePair { .id = Id.Identifier, .Type = NodeIdentifier },
- IdTypePair { .id = Id.GroupedExpression, .Type = NodeGroupedExpression },
- IdTypePair { .id = Id.BuiltinCall, .Type = NodeBuiltinCall },
- IdTypePair { .id = Id.ErrorSetDecl, .Type = NodeErrorSetDecl },
- IdTypePair { .id = Id.ContainerDecl, .Type = NodeContainerDecl },
- IdTypePair { .id = Id.Asm, .Type = NodeAsm },
- IdTypePair { .id = Id.Comptime, .Type = NodeComptime },
- IdTypePair { .id = Id.Block, .Type = NodeBlock },
-
- IdTypePair { .id = Id.LineComment, .Type = NodeLineComment },
- IdTypePair { .id = Id.SwitchCase, .Type = NodeSwitchCase },
- IdTypePair { .id = Id.SwitchElse, .Type = NodeSwitchElse },
- IdTypePair { .id = Id.Else, .Type = NodeElse },
- IdTypePair { .id = Id.Payload, .Type = NodePayload },
- IdTypePair { .id = Id.PointerPayload, .Type = NodePointerPayload },
- IdTypePair { .id = Id.PointerIndexPayload, .Type = NodePointerIndexPayload },
- IdTypePair { .id = Id.StructField, .Type = NodeStructField },
- IdTypePair { .id = Id.UnionTag, .Type = NodeUnionTag },
- IdTypePair { .id = Id.EnumTag, .Type = NodeEnumTag },
- IdTypePair { .id = Id.AsmInput, .Type = NodeAsmInput },
- IdTypePair { .id = Id.AsmOutput, .Type = NodeAsmOutput },
- IdTypePair { .id = Id.AsyncAttribute, .Type = NodeAsyncAttribute },
- IdTypePair { .id = Id.ParamDecl, .Type = NodeParamDecl },
- IdTypePair { .id = Id.FieldInitializer, .Type = NodeFieldInitializer },
- };
-
- pub fn IdToType(comptime id: Id) type {
- inline for (idTypeTable) |id_type_pair| {
- if (id == id_type_pair.id)
- return id_type_pair.Type;
+ pub fn cast(base: *Node, comptime T: type) ?*T {
+ if (base.id == comptime typeToId(T)) {
+ return @fieldParentPtr(T, "base", base);
}
+ return null;
+ }
+ pub fn iterate(base: *Node, index: usize) ?*Node {
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (base.id == @field(Id, @memberName(Id, i))) {
+ const T = @field(Node, @memberName(Id, i));
+ return @fieldParentPtr(T, "base", base).iterate(index);
+ }
+ }
+ unreachable;
+ }
+
+ pub fn firstToken(base: *Node) TokenIndex {
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (base.id == @field(Id, @memberName(Id, i))) {
+ const T = @field(Node, @memberName(Id, i));
+ return @fieldParentPtr(T, "base", base).firstToken();
+ }
+ }
+ unreachable;
+ }
+
+ pub fn lastToken(base: *Node) TokenIndex {
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (base.id == @field(Id, @memberName(Id, i))) {
+ const T = @field(Node, @memberName(Id, i));
+ return @fieldParentPtr(T, "base", base).lastToken();
+ }
+ }
unreachable;
}
pub fn typeToId(comptime T: type) Id {
- inline for (idTypeTable) |id_type_pair| {
- if (T == id_type_pair.Type)
- return id_type_pair.id;
- }
-
- unreachable;
- }
-
- pub fn iterate(base: &Node, index: usize) ?&Node {
- inline for (idTypeTable) |id_type_pair| {
- if (base.id == id_type_pair.id)
- return @fieldParentPtr(id_type_pair.Type, "base", base).iterate(index);
- }
-
- unreachable;
- }
-
- pub fn firstToken(base: &Node) Token {
- inline for (idTypeTable) |id_type_pair| {
- if (base.id == id_type_pair.id)
- return @fieldParentPtr(id_type_pair.Type, "base", base).firstToken();
- }
-
- unreachable;
- }
-
- pub fn lastToken(base: &Node) Token {
- inline for (idTypeTable) |id_type_pair| {
- if (base.id == id_type_pair.id)
- return @fieldParentPtr(id_type_pair.Type, "base", base).lastToken();
- }
-
- unreachable;
- }
-};
-
-pub const NodeRoot = struct {
- base: Node,
- decls: ArrayList(&Node),
- eof_token: Token,
-
- pub fn iterate(self: &NodeRoot, index: usize) ?&Node {
- if (index < self.decls.len) {
- return self.decls.items[self.decls.len - index - 1];
- }
- return null;
- }
-
- pub fn firstToken(self: &NodeRoot) Token {
- return if (self.decls.len == 0) self.eof_token else self.decls.at(0).firstToken();
- }
-
- pub fn lastToken(self: &NodeRoot) Token {
- return if (self.decls.len == 0) self.eof_token else self.decls.at(self.decls.len - 1).lastToken();
- }
-};
-
-pub const NodeVarDecl = struct {
- base: Node,
- visib_token: ?Token,
- name_token: Token,
- eq_token: Token,
- mut_token: Token,
- comptime_token: ?Token,
- extern_export_token: ?Token,
- lib_name: ?&Node,
- type_node: ?&Node,
- align_node: ?&Node,
- init_node: ?&Node,
- semicolon_token: Token,
-
- pub fn iterate(self: &NodeVarDecl, index: usize) ?&Node {
- var i = index;
-
- if (self.type_node) |type_node| {
- if (i < 1) return type_node;
- i -= 1;
- }
-
- if (self.align_node) |align_node| {
- if (i < 1) return align_node;
- i -= 1;
- }
-
- if (self.init_node) |init_node| {
- if (i < 1) return init_node;
- i -= 1;
- }
-
- return null;
- }
-
- pub fn firstToken(self: &NodeVarDecl) Token {
- if (self.visib_token) |visib_token| return visib_token;
- if (self.comptime_token) |comptime_token| return comptime_token;
- if (self.extern_export_token) |extern_export_token| return extern_export_token;
- assert(self.lib_name == null);
- return self.mut_token;
- }
-
- pub fn lastToken(self: &NodeVarDecl) Token {
- return self.semicolon_token;
- }
-};
-
-pub const NodeUse = struct {
- base: Node,
- visib_token: ?Token,
- expr: &Node,
- semicolon_token: Token,
-
- pub fn iterate(self: &NodeUse, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.expr;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeUse) Token {
- if (self.visib_token) |visib_token| return visib_token;
- return self.expr.firstToken();
- }
-
- pub fn lastToken(self: &NodeUse) Token {
- return self.semicolon_token;
- }
-};
-
-pub const NodeErrorSetDecl = struct {
- base: Node,
- error_token: Token,
- decls: ArrayList(&Node),
- rbrace_token: Token,
-
- pub fn iterate(self: &NodeErrorSetDecl, index: usize) ?&Node {
- var i = index;
-
- if (i < self.decls.len) return self.decls.at(i);
- i -= self.decls.len;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeErrorSetDecl) Token {
- return self.error_token;
- }
-
- pub fn lastToken(self: &NodeErrorSetDecl) Token {
- return self.rbrace_token;
- }
-};
-
-pub const NodeContainerDecl = struct {
- base: Node,
- ltoken: Token,
- layout: Layout,
- kind: Kind,
- init_arg_expr: InitArg,
- fields_and_decls: ArrayList(&Node),
- rbrace_token: Token,
-
- const Layout = enum {
- Auto,
- Extern,
- Packed,
- };
-
- const Kind = enum {
- Struct,
- Enum,
- Union,
- };
-
- const InitArg = union(enum) {
- None,
- Enum,
- Type: &Node,
- };
-
- pub fn iterate(self: &NodeContainerDecl, index: usize) ?&Node {
- var i = index;
-
- switch (self.init_arg_expr) {
- InitArg.Type => |t| {
- if (i < 1) return t;
- i -= 1;
- },
- InitArg.None,
- InitArg.Enum => { }
- }
-
- if (i < self.fields_and_decls.len) return self.fields_and_decls.at(i);
- i -= self.fields_and_decls.len;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeContainerDecl) Token {
- return self.ltoken;
- }
-
- pub fn lastToken(self: &NodeContainerDecl) Token {
- return self.rbrace_token;
- }
-};
-
-pub const NodeStructField = struct {
- base: Node,
- visib_token: ?Token,
- name_token: Token,
- type_expr: &Node,
-
- pub fn iterate(self: &NodeStructField, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.type_expr;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeStructField) Token {
- if (self.visib_token) |visib_token| return visib_token;
- return self.name_token;
- }
-
- pub fn lastToken(self: &NodeStructField) Token {
- return self.type_expr.lastToken();
- }
-};
-
-pub const NodeUnionTag = struct {
- base: Node,
- name_token: Token,
- type_expr: ?&Node,
-
- pub fn iterate(self: &NodeUnionTag, index: usize) ?&Node {
- var i = index;
-
- if (self.type_expr) |type_expr| {
- if (i < 1) return type_expr;
- i -= 1;
- }
-
- return null;
- }
-
- pub fn firstToken(self: &NodeUnionTag) Token {
- return self.name_token;
- }
-
- pub fn lastToken(self: &NodeUnionTag) Token {
- if (self.type_expr) |type_expr| {
- return type_expr.lastToken();
- }
-
- return self.name_token;
- }
-};
-
-pub const NodeEnumTag = struct {
- base: Node,
- name_token: Token,
- value: ?&Node,
-
- pub fn iterate(self: &NodeEnumTag, index: usize) ?&Node {
- var i = index;
-
- if (self.value) |value| {
- if (i < 1) return value;
- i -= 1;
- }
-
- return null;
- }
-
- pub fn firstToken(self: &NodeEnumTag) Token {
- return self.name_token;
- }
-
- pub fn lastToken(self: &NodeEnumTag) Token {
- if (self.value) |value| {
- return value.lastToken();
- }
-
- return self.name_token;
- }
-};
-
-pub const NodeIdentifier = struct {
- base: Node,
- token: Token,
-
- pub fn iterate(self: &NodeIdentifier, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeIdentifier) Token {
- return self.token;
- }
-
- pub fn lastToken(self: &NodeIdentifier) Token {
- return self.token;
- }
-};
-
-pub const NodeAsyncAttribute = struct {
- base: Node,
- async_token: Token,
- allocator_type: ?&Node,
- rangle_bracket: ?Token,
-
- pub fn iterate(self: &NodeAsyncAttribute, index: usize) ?&Node {
- var i = index;
-
- if (self.allocator_type) |allocator_type| {
- if (i < 1) return allocator_type;
- i -= 1;
- }
-
- return null;
- }
-
- pub fn firstToken(self: &NodeAsyncAttribute) Token {
- return self.async_token;
- }
-
- pub fn lastToken(self: &NodeAsyncAttribute) Token {
- if (self.rangle_bracket) |rangle_bracket| {
- return rangle_bracket;
- }
-
- return self.async_token;
- }
-};
-
-pub const NodeFnProto = struct {
- base: Node,
- visib_token: ?Token,
- fn_token: Token,
- name_token: ?Token,
- params: ArrayList(&Node),
- return_type: ReturnType,
- var_args_token: ?Token,
- extern_export_inline_token: ?Token,
- cc_token: ?Token,
- async_attr: ?&NodeAsyncAttribute,
- body_node: ?&Node,
- lib_name: ?&Node, // populated if this is an extern declaration
- align_expr: ?&Node, // populated if align(A) is present
-
- pub const ReturnType = union(enum) {
- Explicit: &Node,
- InferErrorSet: &Node,
- };
-
- pub fn iterate(self: &NodeFnProto, index: usize) ?&Node {
- var i = index;
-
- if (self.body_node) |body_node| {
- if (i < 1) return body_node;
- i -= 1;
- }
-
- switch (self.return_type) {
- // TODO allow this and next prong to share bodies since the types are the same
- ReturnType.Explicit => |node| {
- if (i < 1) return node;
- i -= 1;
- },
- ReturnType.InferErrorSet => |node| {
- if (i < 1) return node;
- i -= 1;
- },
- }
-
- if (self.align_expr) |align_expr| {
- if (i < 1) return align_expr;
- i -= 1;
- }
-
- if (i < self.params.len) return self.params.items[self.params.len - i - 1];
- i -= self.params.len;
-
- if (self.lib_name) |lib_name| {
- if (i < 1) return lib_name;
- i -= 1;
- }
-
- return null;
- }
-
- pub fn firstToken(self: &NodeFnProto) Token {
- if (self.visib_token) |visib_token| return visib_token;
- if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token;
- assert(self.lib_name == null);
- if (self.cc_token) |cc_token| return cc_token;
- return self.fn_token;
- }
-
- pub fn lastToken(self: &NodeFnProto) Token {
- if (self.body_node) |body_node| return body_node.lastToken();
- switch (self.return_type) {
- // TODO allow this and next prong to share bodies since the types are the same
- ReturnType.Explicit => |node| return node.lastToken(),
- ReturnType.InferErrorSet => |node| return node.lastToken(),
- }
- }
-};
-
-pub const NodeParamDecl = struct {
- base: Node,
- comptime_token: ?Token,
- noalias_token: ?Token,
- name_token: ?Token,
- type_node: &Node,
- var_args_token: ?Token,
-
- pub fn iterate(self: &NodeParamDecl, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.type_node;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeParamDecl) Token {
- if (self.comptime_token) |comptime_token| return comptime_token;
- if (self.noalias_token) |noalias_token| return noalias_token;
- if (self.name_token) |name_token| return name_token;
- return self.type_node.firstToken();
- }
-
- pub fn lastToken(self: &NodeParamDecl) Token {
- if (self.var_args_token) |var_args_token| return var_args_token;
- return self.type_node.lastToken();
- }
-};
-
-pub const NodeBlock = struct {
- base: Node,
- label: ?Token,
- lbrace: Token,
- statements: ArrayList(&Node),
- rbrace: Token,
-
- pub fn iterate(self: &NodeBlock, index: usize) ?&Node {
- var i = index;
-
- if (i < self.statements.len) return self.statements.items[i];
- i -= self.statements.len;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeBlock) Token {
- if (self.label) |label| {
- return label;
- }
-
- return self.lbrace;
- }
-
- pub fn lastToken(self: &NodeBlock) Token {
- return self.rbrace;
- }
-};
-
-pub const NodeDefer = struct {
- base: Node,
- defer_token: Token,
- kind: Kind,
- expr: &Node,
-
- const Kind = enum {
- Error,
- Unconditional,
- };
-
- pub fn iterate(self: &NodeDefer, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.expr;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeDefer) Token {
- return self.defer_token;
- }
-
- pub fn lastToken(self: &NodeDefer) Token {
- return self.expr.lastToken();
- }
-};
-
-pub const NodeComptime = struct {
- base: Node,
- comptime_token: Token,
- expr: &Node,
-
- pub fn iterate(self: &NodeComptime, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.expr;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeComptime) Token {
- return self.comptime_token;
- }
-
- pub fn lastToken(self: &NodeComptime) Token {
- return self.expr.lastToken();
- }
-};
-
-pub const NodePayload = struct {
- base: Node,
- lpipe: Token,
- error_symbol: &Node,
- rpipe: Token,
-
- pub fn iterate(self: &NodePayload, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.error_symbol;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodePayload) Token {
- return self.lpipe;
- }
-
- pub fn lastToken(self: &NodePayload) Token {
- return self.rpipe;
- }
-};
-
-pub const NodePointerPayload = struct {
- base: Node,
- lpipe: Token,
- ptr_token: ?Token,
- value_symbol: &Node,
- rpipe: Token,
-
- pub fn iterate(self: &NodePointerPayload, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.value_symbol;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodePointerPayload) Token {
- return self.lpipe;
- }
-
- pub fn lastToken(self: &NodePointerPayload) Token {
- return self.rpipe;
- }
-};
-
-pub const NodePointerIndexPayload = struct {
- base: Node,
- lpipe: Token,
- ptr_token: ?Token,
- value_symbol: &Node,
- index_symbol: ?&Node,
- rpipe: Token,
-
- pub fn iterate(self: &NodePointerIndexPayload, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.value_symbol;
- i -= 1;
-
- if (self.index_symbol) |index_symbol| {
- if (i < 1) return index_symbol;
- i -= 1;
- }
-
- return null;
- }
-
- pub fn firstToken(self: &NodePointerIndexPayload) Token {
- return self.lpipe;
- }
-
- pub fn lastToken(self: &NodePointerIndexPayload) Token {
- return self.rpipe;
- }
-};
-
-pub const NodeElse = struct {
- base: Node,
- else_token: Token,
- payload: ?&Node,
- body: &Node,
-
- pub fn iterate(self: &NodeElse, index: usize) ?&Node {
- var i = index;
-
- if (self.payload) |payload| {
- if (i < 1) return payload;
- i -= 1;
- }
-
- if (i < 1) return self.body;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeElse) Token {
- return self.else_token;
- }
-
- pub fn lastToken(self: &NodeElse) Token {
- return self.body.lastToken();
- }
-};
-
-pub const NodeSwitch = struct {
- base: Node,
- switch_token: Token,
- expr: &Node,
- cases: ArrayList(&NodeSwitchCase),
- rbrace: Token,
-
- pub fn iterate(self: &NodeSwitch, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.expr;
- i -= 1;
-
- if (i < self.cases.len) return &self.cases.at(i).base;
- i -= self.cases.len;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeSwitch) Token {
- return self.switch_token;
- }
-
- pub fn lastToken(self: &NodeSwitch) Token {
- return self.rbrace;
- }
-};
-
-pub const NodeSwitchCase = struct {
- base: Node,
- items: ArrayList(&Node),
- payload: ?&Node,
- expr: &Node,
-
- pub fn iterate(self: &NodeSwitchCase, index: usize) ?&Node {
- var i = index;
-
- if (i < self.items.len) return self.items.at(i);
- i -= self.items.len;
-
- if (self.payload) |payload| {
- if (i < 1) return payload;
- i -= 1;
- }
-
- if (i < 1) return self.expr;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeSwitchCase) Token {
- return self.items.at(0).firstToken();
- }
-
- pub fn lastToken(self: &NodeSwitchCase) Token {
- return self.expr.lastToken();
- }
-};
-
-pub const NodeSwitchElse = struct {
- base: Node,
- token: Token,
-
- pub fn iterate(self: &NodeSwitchElse, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeSwitchElse) Token {
- return self.token;
- }
-
- pub fn lastToken(self: &NodeSwitchElse) Token {
- return self.token;
- }
-};
-
-pub const NodeWhile = struct {
- base: Node,
- label: ?Token,
- inline_token: ?Token,
- while_token: Token,
- condition: &Node,
- payload: ?&Node,
- continue_expr: ?&Node,
- body: &Node,
- @"else": ?&NodeElse,
-
- pub fn iterate(self: &NodeWhile, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.condition;
- i -= 1;
-
- if (self.payload) |payload| {
- if (i < 1) return payload;
- i -= 1;
- }
-
- if (self.continue_expr) |continue_expr| {
- if (i < 1) return continue_expr;
- i -= 1;
- }
-
- if (i < 1) return self.body;
- i -= 1;
-
- if (self.@"else") |@"else"| {
- if (i < 1) return &@"else".base;
- i -= 1;
- }
-
- return null;
- }
-
- pub fn firstToken(self: &NodeWhile) Token {
- if (self.label) |label| {
- return label;
- }
-
- if (self.inline_token) |inline_token| {
- return inline_token;
- }
-
- return self.while_token;
- }
-
- pub fn lastToken(self: &NodeWhile) Token {
- if (self.@"else") |@"else"| {
- return @"else".body.lastToken();
- }
-
- return self.body.lastToken();
- }
-};
-
-pub const NodeFor = struct {
- base: Node,
- label: ?Token,
- inline_token: ?Token,
- for_token: Token,
- array_expr: &Node,
- payload: ?&Node,
- body: &Node,
- @"else": ?&NodeElse,
-
- pub fn iterate(self: &NodeFor, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.array_expr;
- i -= 1;
-
- if (self.payload) |payload| {
- if (i < 1) return payload;
- i -= 1;
- }
-
- if (i < 1) return self.body;
- i -= 1;
-
- if (self.@"else") |@"else"| {
- if (i < 1) return &@"else".base;
- i -= 1;
- }
-
- return null;
- }
-
- pub fn firstToken(self: &NodeFor) Token {
- if (self.label) |label| {
- return label;
- }
-
- if (self.inline_token) |inline_token| {
- return inline_token;
- }
-
- return self.for_token;
- }
-
- pub fn lastToken(self: &NodeFor) Token {
- if (self.@"else") |@"else"| {
- return @"else".body.lastToken();
- }
-
- return self.body.lastToken();
- }
-};
-
-pub const NodeIf = struct {
- base: Node,
- if_token: Token,
- condition: &Node,
- payload: ?&Node,
- body: &Node,
- @"else": ?&NodeElse,
-
- pub fn iterate(self: &NodeIf, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.condition;
- i -= 1;
-
- if (self.payload) |payload| {
- if (i < 1) return payload;
- i -= 1;
- }
-
- if (i < 1) return self.body;
- i -= 1;
-
- if (self.@"else") |@"else"| {
- if (i < 1) return &@"else".base;
- i -= 1;
- }
-
- return null;
- }
-
- pub fn firstToken(self: &NodeIf) Token {
- return self.if_token;
- }
-
- pub fn lastToken(self: &NodeIf) Token {
- if (self.@"else") |@"else"| {
- return @"else".body.lastToken();
- }
-
- return self.body.lastToken();
- }
-};
-
-pub const NodeInfixOp = struct {
- base: Node,
- op_token: Token,
- lhs: &Node,
- op: InfixOp,
- rhs: &Node,
-
- const InfixOp = union(enum) {
- Add,
- AddWrap,
- ArrayCat,
- ArrayMult,
- Assign,
- AssignBitAnd,
- AssignBitOr,
- AssignBitShiftLeft,
- AssignBitShiftRight,
- AssignBitXor,
- AssignDiv,
- AssignMinus,
- AssignMinusWrap,
- AssignMod,
- AssignPlus,
- AssignPlusWrap,
- AssignTimes,
- AssignTimesWarp,
- BangEqual,
- BitAnd,
- BitOr,
- BitShiftLeft,
- BitShiftRight,
- BitXor,
- BoolAnd,
- BoolOr,
- Catch: ?&Node,
- Div,
- EqualEqual,
- ErrorUnion,
- GreaterOrEqual,
- GreaterThan,
- LessOrEqual,
- LessThan,
- MergeErrorSets,
- Mod,
- Mult,
- MultWrap,
- Period,
- Range,
- Sub,
- SubWrap,
- UnwrapMaybe,
- };
-
- pub fn iterate(self: &NodeInfixOp, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.lhs;
- i -= 1;
-
- switch (self.op) {
- InfixOp.Catch => |maybe_payload| {
- if (maybe_payload) |payload| {
- if (i < 1) return payload;
- i -= 1;
- }
- },
-
- InfixOp.Add,
- InfixOp.AddWrap,
- InfixOp.ArrayCat,
- InfixOp.ArrayMult,
- InfixOp.Assign,
- InfixOp.AssignBitAnd,
- InfixOp.AssignBitOr,
- InfixOp.AssignBitShiftLeft,
- InfixOp.AssignBitShiftRight,
- InfixOp.AssignBitXor,
- InfixOp.AssignDiv,
- InfixOp.AssignMinus,
- InfixOp.AssignMinusWrap,
- InfixOp.AssignMod,
- InfixOp.AssignPlus,
- InfixOp.AssignPlusWrap,
- InfixOp.AssignTimes,
- InfixOp.AssignTimesWarp,
- InfixOp.BangEqual,
- InfixOp.BitAnd,
- InfixOp.BitOr,
- InfixOp.BitShiftLeft,
- InfixOp.BitShiftRight,
- InfixOp.BitXor,
- InfixOp.BoolAnd,
- InfixOp.BoolOr,
- InfixOp.Div,
- InfixOp.EqualEqual,
- InfixOp.ErrorUnion,
- InfixOp.GreaterOrEqual,
- InfixOp.GreaterThan,
- InfixOp.LessOrEqual,
- InfixOp.LessThan,
- InfixOp.MergeErrorSets,
- InfixOp.Mod,
- InfixOp.Mult,
- InfixOp.MultWrap,
- InfixOp.Period,
- InfixOp.Range,
- InfixOp.Sub,
- InfixOp.SubWrap,
- InfixOp.UnwrapMaybe => {},
- }
-
- if (i < 1) return self.rhs;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeInfixOp) Token {
- return self.lhs.firstToken();
- }
-
- pub fn lastToken(self: &NodeInfixOp) Token {
- return self.rhs.lastToken();
- }
-};
-
-pub const NodePrefixOp = struct {
- base: Node,
- op_token: Token,
- op: PrefixOp,
- rhs: &Node,
-
- const PrefixOp = union(enum) {
- AddrOf: AddrOfInfo,
- ArrayType: &Node,
- Await,
- BitNot,
- BoolNot,
- Cancel,
- Deref,
- MaybeType,
- Negation,
- NegationWrap,
- Resume,
- SliceType: AddrOfInfo,
- Try,
- UnwrapMaybe,
- };
-
- const AddrOfInfo = struct {
- align_expr: ?&Node,
- bit_offset_start_token: ?Token,
- bit_offset_end_token: ?Token,
- const_token: ?Token,
- volatile_token: ?Token,
- };
-
- pub fn iterate(self: &NodePrefixOp, index: usize) ?&Node {
- var i = index;
-
- switch (self.op) {
- PrefixOp.SliceType => |addr_of_info| {
- if (addr_of_info.align_expr) |align_expr| {
- if (i < 1) return align_expr;
- i -= 1;
- }
- },
- PrefixOp.AddrOf => |addr_of_info| {
- if (addr_of_info.align_expr) |align_expr| {
- if (i < 1) return align_expr;
- i -= 1;
- }
- },
- PrefixOp.ArrayType => |size_expr| {
- if (i < 1) return size_expr;
- i -= 1;
- },
- PrefixOp.Await,
- PrefixOp.BitNot,
- PrefixOp.BoolNot,
- PrefixOp.Cancel,
- PrefixOp.Deref,
- PrefixOp.MaybeType,
- PrefixOp.Negation,
- PrefixOp.NegationWrap,
- PrefixOp.Try,
- PrefixOp.Resume,
- PrefixOp.UnwrapMaybe => {},
- }
-
- if (i < 1) return self.rhs;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodePrefixOp) Token {
- return self.op_token;
- }
-
- pub fn lastToken(self: &NodePrefixOp) Token {
- return self.rhs.lastToken();
- }
-};
-
-pub const NodeFieldInitializer = struct {
- base: Node,
- period_token: Token,
- name_token: Token,
- expr: &Node,
-
- pub fn iterate(self: &NodeFieldInitializer, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.expr;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeFieldInitializer) Token {
- return self.period_token;
- }
-
- pub fn lastToken(self: &NodeFieldInitializer) Token {
- return self.expr.lastToken();
- }
-};
-
-pub const NodeSuffixOp = struct {
- base: Node,
- lhs: &Node,
- op: SuffixOp,
- rtoken: Token,
-
- const SuffixOp = union(enum) {
- Call: CallInfo,
- ArrayAccess: &Node,
- Slice: SliceRange,
- ArrayInitializer: ArrayList(&Node),
- StructInitializer: ArrayList(&NodeFieldInitializer),
- };
-
- const CallInfo = struct {
- params: ArrayList(&Node),
- async_attr: ?&NodeAsyncAttribute,
- };
-
- const SliceRange = struct {
- start: &Node,
- end: ?&Node,
- };
-
- pub fn iterate(self: &NodeSuffixOp, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.lhs;
- i -= 1;
-
- switch (self.op) {
- SuffixOp.Call => |call_info| {
- if (i < call_info.params.len) return call_info.params.at(i);
- i -= call_info.params.len;
- },
- SuffixOp.ArrayAccess => |index_expr| {
- if (i < 1) return index_expr;
- i -= 1;
- },
- SuffixOp.Slice => |range| {
- if (i < 1) return range.start;
- i -= 1;
-
- if (range.end) |end| {
- if (i < 1) return end;
- i -= 1;
- }
- },
- SuffixOp.ArrayInitializer => |exprs| {
- if (i < exprs.len) return exprs.at(i);
- i -= exprs.len;
- },
- SuffixOp.StructInitializer => |fields| {
- if (i < fields.len) return &fields.at(i).base;
- i -= fields.len;
- },
- }
-
- return null;
- }
-
- pub fn firstToken(self: &NodeSuffixOp) Token {
- return self.lhs.firstToken();
- }
-
- pub fn lastToken(self: &NodeSuffixOp) Token {
- return self.rtoken;
- }
-};
-
-pub const NodeGroupedExpression = struct {
- base: Node,
- lparen: Token,
- expr: &Node,
- rparen: Token,
-
- pub fn iterate(self: &NodeGroupedExpression, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.expr;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeGroupedExpression) Token {
- return self.lparen;
- }
-
- pub fn lastToken(self: &NodeGroupedExpression) Token {
- return self.rparen;
- }
-};
-
-pub const NodeControlFlowExpression = struct {
- base: Node,
- ltoken: Token,
- kind: Kind,
- rhs: ?&Node,
-
- const Kind = union(enum) {
- Break: ?&Node,
- Continue: ?&Node,
- Return,
- };
-
- pub fn iterate(self: &NodeControlFlowExpression, index: usize) ?&Node {
- var i = index;
-
- switch (self.kind) {
- Kind.Break => |maybe_label| {
- if (maybe_label) |label| {
- if (i < 1) return label;
- i -= 1;
- }
- },
- Kind.Continue => |maybe_label| {
- if (maybe_label) |label| {
- if (i < 1) return label;
- i -= 1;
- }
- },
- Kind.Return => {},
- }
-
- if (self.rhs) |rhs| {
- if (i < 1) return rhs;
- i -= 1;
- }
-
- return null;
- }
-
- pub fn firstToken(self: &NodeControlFlowExpression) Token {
- return self.ltoken;
- }
-
- pub fn lastToken(self: &NodeControlFlowExpression) Token {
- if (self.rhs) |rhs| {
- return rhs.lastToken();
- }
-
- switch (self.kind) {
- Kind.Break => |maybe_label| {
- if (maybe_label) |label| {
- return label.lastToken();
- }
- },
- Kind.Continue => |maybe_label| {
- if (maybe_label) |label| {
- return label.lastToken();
- }
- },
- Kind.Return => return self.ltoken,
- }
-
- return self.ltoken;
- }
-};
-
-pub const NodeSuspend = struct {
- base: Node,
- suspend_token: Token,
- payload: ?&Node,
- body: ?&Node,
-
- pub fn iterate(self: &NodeSuspend, index: usize) ?&Node {
- var i = index;
-
- if (self.payload) |payload| {
- if (i < 1) return payload;
- i -= 1;
- }
-
- if (self.body) |body| {
- if (i < 1) return body;
- i -= 1;
- }
-
- return null;
- }
-
- pub fn firstToken(self: &NodeSuspend) Token {
- return self.suspend_token;
- }
-
- pub fn lastToken(self: &NodeSuspend) Token {
- if (self.body) |body| {
- return body.lastToken();
- }
-
- if (self.payload) |payload| {
- return payload.lastToken();
- }
-
- return self.suspend_token;
- }
-};
-
-pub const NodeIntegerLiteral = struct {
- base: Node,
- token: Token,
-
- pub fn iterate(self: &NodeIntegerLiteral, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeIntegerLiteral) Token {
- return self.token;
- }
-
- pub fn lastToken(self: &NodeIntegerLiteral) Token {
- return self.token;
- }
-};
-
-pub const NodeFloatLiteral = struct {
- base: Node,
- token: Token,
-
- pub fn iterate(self: &NodeFloatLiteral, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeFloatLiteral) Token {
- return self.token;
- }
-
- pub fn lastToken(self: &NodeFloatLiteral) Token {
- return self.token;
- }
-};
-
-pub const NodeBuiltinCall = struct {
- base: Node,
- builtin_token: Token,
- params: ArrayList(&Node),
- rparen_token: Token,
-
- pub fn iterate(self: &NodeBuiltinCall, index: usize) ?&Node {
- var i = index;
-
- if (i < self.params.len) return self.params.at(i);
- i -= self.params.len;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeBuiltinCall) Token {
- return self.builtin_token;
- }
-
- pub fn lastToken(self: &NodeBuiltinCall) Token {
- return self.rparen_token;
- }
-};
-
-pub const NodeStringLiteral = struct {
- base: Node,
- token: Token,
-
- pub fn iterate(self: &NodeStringLiteral, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeStringLiteral) Token {
- return self.token;
- }
-
- pub fn lastToken(self: &NodeStringLiteral) Token {
- return self.token;
- }
-};
-
-pub const NodeMultilineStringLiteral = struct {
- base: Node,
- tokens: ArrayList(Token),
-
- pub fn iterate(self: &NodeMultilineStringLiteral, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeMultilineStringLiteral) Token {
- return self.tokens.at(0);
- }
-
- pub fn lastToken(self: &NodeMultilineStringLiteral) Token {
- return self.tokens.at(self.tokens.len - 1);
- }
-};
-
-pub const NodeCharLiteral = struct {
- base: Node,
- token: Token,
-
- pub fn iterate(self: &NodeCharLiteral, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeCharLiteral) Token {
- return self.token;
- }
-
- pub fn lastToken(self: &NodeCharLiteral) Token {
- return self.token;
- }
-};
-
-pub const NodeBoolLiteral = struct {
- base: Node,
- token: Token,
-
- pub fn iterate(self: &NodeBoolLiteral, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeBoolLiteral) Token {
- return self.token;
- }
-
- pub fn lastToken(self: &NodeBoolLiteral) Token {
- return self.token;
- }
-};
-
-pub const NodeNullLiteral = struct {
- base: Node,
- token: Token,
-
- pub fn iterate(self: &NodeNullLiteral, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeNullLiteral) Token {
- return self.token;
- }
-
- pub fn lastToken(self: &NodeNullLiteral) Token {
- return self.token;
- }
-};
-
-pub const NodeUndefinedLiteral = struct {
- base: Node,
- token: Token,
-
- pub fn iterate(self: &NodeUndefinedLiteral, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeUndefinedLiteral) Token {
- return self.token;
- }
-
- pub fn lastToken(self: &NodeUndefinedLiteral) Token {
- return self.token;
- }
-};
-
-pub const NodeThisLiteral = struct {
- base: Node,
- token: Token,
-
- pub fn iterate(self: &NodeThisLiteral, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeThisLiteral) Token {
- return self.token;
- }
-
- pub fn lastToken(self: &NodeThisLiteral) Token {
- return self.token;
- }
-};
-
-pub const NodeAsmOutput = struct {
- base: Node,
- symbolic_name: &Node,
- constraint: &Node,
- kind: Kind,
-
- const Kind = union(enum) {
- Variable: &NodeIdentifier,
- Return: &Node
- };
-
- pub fn iterate(self: &NodeAsmOutput, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.symbolic_name;
- i -= 1;
-
- if (i < 1) return self.constraint;
- i -= 1;
-
- switch (self.kind) {
- Kind.Variable => |variable_name| {
- if (i < 1) return &variable_name.base;
- i -= 1;
- },
- Kind.Return => |return_type| {
- if (i < 1) return return_type;
- i -= 1;
+ comptime var i = 0;
+ inline while (i < @memberCount(Id)) : (i += 1) {
+ if (T == @field(Node, @memberName(Id, i))) {
+ return @field(Id, @memberName(Id, i));
}
}
-
- return null;
+ unreachable;
}
- pub fn firstToken(self: &NodeAsmOutput) Token {
- return self.symbolic_name.firstToken();
+ pub fn requireSemiColon(base: *const Node) bool {
+ var n = base;
+ while (true) {
+ switch (n.id) {
+ Id.Root,
+ Id.StructField,
+ Id.UnionTag,
+ Id.EnumTag,
+ Id.ParamDecl,
+ Id.Block,
+ Id.Payload,
+ Id.PointerPayload,
+ Id.PointerIndexPayload,
+ Id.Switch,
+ Id.SwitchCase,
+ Id.SwitchElse,
+ Id.FieldInitializer,
+ Id.DocComment,
+ Id.TestDecl,
+ => return false,
+ Id.While => {
+ const while_node = @fieldParentPtr(While, "base", n);
+ if (while_node.@"else") |@"else"| {
+ n = @"else".base;
+ continue;
+ }
+
+ return while_node.body.id != Id.Block;
+ },
+ Id.For => {
+ const for_node = @fieldParentPtr(For, "base", n);
+ if (for_node.@"else") |@"else"| {
+ n = @"else".base;
+ continue;
+ }
+
+ return for_node.body.id != Id.Block;
+ },
+ Id.If => {
+ const if_node = @fieldParentPtr(If, "base", n);
+ if (if_node.@"else") |@"else"| {
+ n = @"else".base;
+ continue;
+ }
+
+ return if_node.body.id != Id.Block;
+ },
+ Id.Else => {
+ const else_node = @fieldParentPtr(Else, "base", n);
+ n = else_node.body;
+ continue;
+ },
+ Id.Defer => {
+ const defer_node = @fieldParentPtr(Defer, "base", n);
+ return defer_node.expr.id != Id.Block;
+ },
+ Id.Comptime => {
+ const comptime_node = @fieldParentPtr(Comptime, "base", n);
+ return comptime_node.expr.id != Id.Block;
+ },
+ Id.Suspend => {
+ const suspend_node = @fieldParentPtr(Suspend, "base", n);
+ if (suspend_node.body) |body| {
+ return body.id != Id.Block;
+ }
+
+ return true;
+ },
+ else => return true,
+ }
+ }
}
- pub fn lastToken(self: &NodeAsmOutput) Token {
- return switch (self.kind) {
- Kind.Variable => |variable_name| variable_name.lastToken(),
- Kind.Return => |return_type| return_type.lastToken(),
+ pub fn dump(self: *Node, indent: usize) void {
+ {
+ var i: usize = 0;
+ while (i < indent) : (i += 1) {
+ std.debug.warn(" ");
+ }
+ }
+ std.debug.warn("{}\n", @tagName(self.id));
+
+ var child_i: usize = 0;
+ while (self.iterate(child_i)) |child| : (child_i += 1) {
+ child.dump(indent + 2);
+ }
+ }
+
+ pub const Root = struct {
+ base: Node,
+ doc_comments: ?*DocComment,
+ decls: DeclList,
+ eof_token: TokenIndex,
+
+ pub const DeclList = SegmentedList(*Node, 4);
+
+ pub fn iterate(self: *Root, index: usize) ?*Node {
+ if (index < self.decls.len) {
+ return self.decls.at(index).*;
+ }
+ return null;
+ }
+
+ pub fn firstToken(self: *Root) TokenIndex {
+ return if (self.decls.len == 0) self.eof_token else (self.decls.at(0).*).firstToken();
+ }
+
+ pub fn lastToken(self: *Root) TokenIndex {
+ return if (self.decls.len == 0) self.eof_token else (self.decls.at(self.decls.len - 1).*).lastToken();
+ }
+ };
+
+ pub const VarDecl = struct {
+ base: Node,
+ doc_comments: ?*DocComment,
+ visib_token: ?TokenIndex,
+ name_token: TokenIndex,
+ eq_token: TokenIndex,
+ mut_token: TokenIndex,
+ comptime_token: ?TokenIndex,
+ extern_export_token: ?TokenIndex,
+ lib_name: ?*Node,
+ type_node: ?*Node,
+ align_node: ?*Node,
+ init_node: ?*Node,
+ semicolon_token: TokenIndex,
+
+ pub fn iterate(self: *VarDecl, index: usize) ?*Node {
+ var i = index;
+
+ if (self.type_node) |type_node| {
+ if (i < 1) return type_node;
+ i -= 1;
+ }
+
+ if (self.align_node) |align_node| {
+ if (i < 1) return align_node;
+ i -= 1;
+ }
+
+ if (self.init_node) |init_node| {
+ if (i < 1) return init_node;
+ i -= 1;
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *VarDecl) TokenIndex {
+ if (self.visib_token) |visib_token| return visib_token;
+ if (self.comptime_token) |comptime_token| return comptime_token;
+ if (self.extern_export_token) |extern_export_token| return extern_export_token;
+ assert(self.lib_name == null);
+ return self.mut_token;
+ }
+
+ pub fn lastToken(self: *VarDecl) TokenIndex {
+ return self.semicolon_token;
+ }
+ };
+
+ pub const Use = struct {
+ base: Node,
+ doc_comments: ?*DocComment,
+ visib_token: ?TokenIndex,
+ use_token: TokenIndex,
+ expr: *Node,
+ semicolon_token: TokenIndex,
+
+ pub fn iterate(self: *Use, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.expr;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *Use) TokenIndex {
+ if (self.visib_token) |visib_token| return visib_token;
+ return self.use_token;
+ }
+
+ pub fn lastToken(self: *Use) TokenIndex {
+ return self.semicolon_token;
+ }
+ };
+
+ pub const ErrorSetDecl = struct {
+ base: Node,
+ error_token: TokenIndex,
+ decls: DeclList,
+ rbrace_token: TokenIndex,
+
+ pub const DeclList = SegmentedList(*Node, 2);
+
+ pub fn iterate(self: *ErrorSetDecl, index: usize) ?*Node {
+ var i = index;
+
+ if (i < self.decls.len) return self.decls.at(i).*;
+ i -= self.decls.len;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *ErrorSetDecl) TokenIndex {
+ return self.error_token;
+ }
+
+ pub fn lastToken(self: *ErrorSetDecl) TokenIndex {
+ return self.rbrace_token;
+ }
+ };
+
+ pub const ContainerDecl = struct {
+ base: Node,
+ layout_token: ?TokenIndex,
+ kind_token: TokenIndex,
+ init_arg_expr: InitArg,
+ fields_and_decls: DeclList,
+ lbrace_token: TokenIndex,
+ rbrace_token: TokenIndex,
+
+ pub const DeclList = Root.DeclList;
+
+ const InitArg = union(enum) {
+ None,
+ Enum: ?*Node,
+ Type: *Node,
};
- }
+
+ pub fn iterate(self: *ContainerDecl, index: usize) ?*Node {
+ var i = index;
+
+ switch (self.init_arg_expr) {
+ InitArg.Type => |t| {
+ if (i < 1) return t;
+ i -= 1;
+ },
+ InitArg.None, InitArg.Enum => {},
+ }
+
+ if (i < self.fields_and_decls.len) return self.fields_and_decls.at(i).*;
+ i -= self.fields_and_decls.len;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *ContainerDecl) TokenIndex {
+ if (self.layout_token) |layout_token| {
+ return layout_token;
+ }
+ return self.kind_token;
+ }
+
+ pub fn lastToken(self: *ContainerDecl) TokenIndex {
+ return self.rbrace_token;
+ }
+ };
+
+ pub const StructField = struct {
+ base: Node,
+ doc_comments: ?*DocComment,
+ visib_token: ?TokenIndex,
+ name_token: TokenIndex,
+ type_expr: *Node,
+
+ pub fn iterate(self: *StructField, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.type_expr;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *StructField) TokenIndex {
+ if (self.visib_token) |visib_token| return visib_token;
+ return self.name_token;
+ }
+
+ pub fn lastToken(self: *StructField) TokenIndex {
+ return self.type_expr.lastToken();
+ }
+ };
+
+ pub const UnionTag = struct {
+ base: Node,
+ doc_comments: ?*DocComment,
+ name_token: TokenIndex,
+ type_expr: ?*Node,
+ value_expr: ?*Node,
+
+ pub fn iterate(self: *UnionTag, index: usize) ?*Node {
+ var i = index;
+
+ if (self.type_expr) |type_expr| {
+ if (i < 1) return type_expr;
+ i -= 1;
+ }
+
+ if (self.value_expr) |value_expr| {
+ if (i < 1) return value_expr;
+ i -= 1;
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *UnionTag) TokenIndex {
+ return self.name_token;
+ }
+
+ pub fn lastToken(self: *UnionTag) TokenIndex {
+ if (self.value_expr) |value_expr| {
+ return value_expr.lastToken();
+ }
+ if (self.type_expr) |type_expr| {
+ return type_expr.lastToken();
+ }
+
+ return self.name_token;
+ }
+ };
+
+ pub const EnumTag = struct {
+ base: Node,
+ doc_comments: ?*DocComment,
+ name_token: TokenIndex,
+ value: ?*Node,
+
+ pub fn iterate(self: *EnumTag, index: usize) ?*Node {
+ var i = index;
+
+ if (self.value) |value| {
+ if (i < 1) return value;
+ i -= 1;
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *EnumTag) TokenIndex {
+ return self.name_token;
+ }
+
+ pub fn lastToken(self: *EnumTag) TokenIndex {
+ if (self.value) |value| {
+ return value.lastToken();
+ }
+
+ return self.name_token;
+ }
+ };
+
+ pub const ErrorTag = struct {
+ base: Node,
+ doc_comments: ?*DocComment,
+ name_token: TokenIndex,
+
+ pub fn iterate(self: *ErrorTag, index: usize) ?*Node {
+ var i = index;
+
+ if (self.doc_comments) |comments| {
+ if (i < 1) return &comments.base;
+ i -= 1;
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *ErrorTag) TokenIndex {
+ return self.name_token;
+ }
+
+ pub fn lastToken(self: *ErrorTag) TokenIndex {
+ return self.name_token;
+ }
+ };
+
+ pub const Identifier = struct {
+ base: Node,
+ token: TokenIndex,
+
+ pub fn iterate(self: *Identifier, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *Identifier) TokenIndex {
+ return self.token;
+ }
+
+ pub fn lastToken(self: *Identifier) TokenIndex {
+ return self.token;
+ }
+ };
+
+ pub const AsyncAttribute = struct {
+ base: Node,
+ async_token: TokenIndex,
+ allocator_type: ?*Node,
+ rangle_bracket: ?TokenIndex,
+
+ pub fn iterate(self: *AsyncAttribute, index: usize) ?*Node {
+ var i = index;
+
+ if (self.allocator_type) |allocator_type| {
+ if (i < 1) return allocator_type;
+ i -= 1;
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *AsyncAttribute) TokenIndex {
+ return self.async_token;
+ }
+
+ pub fn lastToken(self: *AsyncAttribute) TokenIndex {
+ if (self.rangle_bracket) |rangle_bracket| {
+ return rangle_bracket;
+ }
+
+ return self.async_token;
+ }
+ };
+
+ pub const FnProto = struct {
+ base: Node,
+ doc_comments: ?*DocComment,
+ visib_token: ?TokenIndex,
+ fn_token: TokenIndex,
+ name_token: ?TokenIndex,
+ params: ParamList,
+ return_type: ReturnType,
+ var_args_token: ?TokenIndex,
+ extern_export_inline_token: ?TokenIndex,
+ cc_token: ?TokenIndex,
+ async_attr: ?*AsyncAttribute,
+ body_node: ?*Node,
+ lib_name: ?*Node, // populated if this is an extern declaration
+ align_expr: ?*Node, // populated if align(A) is present
+
+ pub const ParamList = SegmentedList(*Node, 2);
+
+ pub const ReturnType = union(enum) {
+ Explicit: *Node,
+ InferErrorSet: *Node,
+ };
+
+ pub fn iterate(self: *FnProto, index: usize) ?*Node {
+ var i = index;
+
+ if (self.lib_name) |lib_name| {
+ if (i < 1) return lib_name;
+ i -= 1;
+ }
+
+ if (i < self.params.len) return self.params.at(self.params.len - i - 1).*;
+ i -= self.params.len;
+
+ if (self.align_expr) |align_expr| {
+ if (i < 1) return align_expr;
+ i -= 1;
+ }
+
+ switch (self.return_type) {
+ // TODO allow this and next prong to share bodies since the types are the same
+ ReturnType.Explicit => |node| {
+ if (i < 1) return node;
+ i -= 1;
+ },
+ ReturnType.InferErrorSet => |node| {
+ if (i < 1) return node;
+ i -= 1;
+ },
+ }
+
+ if (self.body_node) |body_node| {
+ if (i < 1) return body_node;
+ i -= 1;
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *FnProto) TokenIndex {
+ if (self.visib_token) |visib_token| return visib_token;
+ if (self.async_attr) |async_attr| return async_attr.firstToken();
+ if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token;
+ assert(self.lib_name == null);
+ if (self.cc_token) |cc_token| return cc_token;
+ return self.fn_token;
+ }
+
+ pub fn lastToken(self: *FnProto) TokenIndex {
+ if (self.body_node) |body_node| return body_node.lastToken();
+ switch (self.return_type) {
+ // TODO allow this and next prong to share bodies since the types are the same
+ ReturnType.Explicit => |node| return node.lastToken(),
+ ReturnType.InferErrorSet => |node| return node.lastToken(),
+ }
+ }
+ };
+
+ pub const PromiseType = struct {
+ base: Node,
+ promise_token: TokenIndex,
+ result: ?Result,
+
+ pub const Result = struct {
+ arrow_token: TokenIndex,
+ return_type: *Node,
+ };
+
+ pub fn iterate(self: *PromiseType, index: usize) ?*Node {
+ var i = index;
+
+ if (self.result) |result| {
+ if (i < 1) return result.return_type;
+ i -= 1;
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *PromiseType) TokenIndex {
+ return self.promise_token;
+ }
+
+ pub fn lastToken(self: *PromiseType) TokenIndex {
+ if (self.result) |result| return result.return_type.lastToken();
+ return self.promise_token;
+ }
+ };
+
+ pub const ParamDecl = struct {
+ base: Node,
+ comptime_token: ?TokenIndex,
+ noalias_token: ?TokenIndex,
+ name_token: ?TokenIndex,
+ type_node: *Node,
+ var_args_token: ?TokenIndex,
+
+ pub fn iterate(self: *ParamDecl, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.type_node;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *ParamDecl) TokenIndex {
+ if (self.comptime_token) |comptime_token| return comptime_token;
+ if (self.noalias_token) |noalias_token| return noalias_token;
+ if (self.name_token) |name_token| return name_token;
+ return self.type_node.firstToken();
+ }
+
+ pub fn lastToken(self: *ParamDecl) TokenIndex {
+ if (self.var_args_token) |var_args_token| return var_args_token;
+ return self.type_node.lastToken();
+ }
+ };
+
+ pub const Block = struct {
+ base: Node,
+ label: ?TokenIndex,
+ lbrace: TokenIndex,
+ statements: StatementList,
+ rbrace: TokenIndex,
+
+ pub const StatementList = Root.DeclList;
+
+ pub fn iterate(self: *Block, index: usize) ?*Node {
+ var i = index;
+
+ if (i < self.statements.len) return self.statements.at(i).*;
+ i -= self.statements.len;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *Block) TokenIndex {
+ if (self.label) |label| {
+ return label;
+ }
+
+ return self.lbrace;
+ }
+
+ pub fn lastToken(self: *Block) TokenIndex {
+ return self.rbrace;
+ }
+ };
+
+ pub const Defer = struct {
+ base: Node,
+ defer_token: TokenIndex,
+ expr: *Node,
+
+ pub fn iterate(self: *Defer, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.expr;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *Defer) TokenIndex {
+ return self.defer_token;
+ }
+
+ pub fn lastToken(self: *Defer) TokenIndex {
+ return self.expr.lastToken();
+ }
+ };
+
+ pub const Comptime = struct {
+ base: Node,
+ doc_comments: ?*DocComment,
+ comptime_token: TokenIndex,
+ expr: *Node,
+
+ pub fn iterate(self: *Comptime, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.expr;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *Comptime) TokenIndex {
+ return self.comptime_token;
+ }
+
+ pub fn lastToken(self: *Comptime) TokenIndex {
+ return self.expr.lastToken();
+ }
+ };
+
+ pub const Payload = struct {
+ base: Node,
+ lpipe: TokenIndex,
+ error_symbol: *Node,
+ rpipe: TokenIndex,
+
+ pub fn iterate(self: *Payload, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.error_symbol;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *Payload) TokenIndex {
+ return self.lpipe;
+ }
+
+ pub fn lastToken(self: *Payload) TokenIndex {
+ return self.rpipe;
+ }
+ };
+
+ pub const PointerPayload = struct {
+ base: Node,
+ lpipe: TokenIndex,
+ ptr_token: ?TokenIndex,
+ value_symbol: *Node,
+ rpipe: TokenIndex,
+
+ pub fn iterate(self: *PointerPayload, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.value_symbol;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *PointerPayload) TokenIndex {
+ return self.lpipe;
+ }
+
+ pub fn lastToken(self: *PointerPayload) TokenIndex {
+ return self.rpipe;
+ }
+ };
+
+ pub const PointerIndexPayload = struct {
+ base: Node,
+ lpipe: TokenIndex,
+ ptr_token: ?TokenIndex,
+ value_symbol: *Node,
+ index_symbol: ?*Node,
+ rpipe: TokenIndex,
+
+ pub fn iterate(self: *PointerIndexPayload, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.value_symbol;
+ i -= 1;
+
+ if (self.index_symbol) |index_symbol| {
+ if (i < 1) return index_symbol;
+ i -= 1;
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *PointerIndexPayload) TokenIndex {
+ return self.lpipe;
+ }
+
+ pub fn lastToken(self: *PointerIndexPayload) TokenIndex {
+ return self.rpipe;
+ }
+ };
+
+ pub const Else = struct {
+ base: Node,
+ else_token: TokenIndex,
+ payload: ?*Node,
+ body: *Node,
+
+ pub fn iterate(self: *Else, index: usize) ?*Node {
+ var i = index;
+
+ if (self.payload) |payload| {
+ if (i < 1) return payload;
+ i -= 1;
+ }
+
+ if (i < 1) return self.body;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *Else) TokenIndex {
+ return self.else_token;
+ }
+
+ pub fn lastToken(self: *Else) TokenIndex {
+ return self.body.lastToken();
+ }
+ };
+
+ pub const Switch = struct {
+ base: Node,
+ switch_token: TokenIndex,
+ expr: *Node,
+
+ /// these must be SwitchCase nodes
+ cases: CaseList,
+ rbrace: TokenIndex,
+
+ pub const CaseList = SegmentedList(*Node, 2);
+
+ pub fn iterate(self: *Switch, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.expr;
+ i -= 1;
+
+ if (i < self.cases.len) return self.cases.at(i).*;
+ i -= self.cases.len;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *Switch) TokenIndex {
+ return self.switch_token;
+ }
+
+ pub fn lastToken(self: *Switch) TokenIndex {
+ return self.rbrace;
+ }
+ };
+
+ pub const SwitchCase = struct {
+ base: Node,
+ items: ItemList,
+ arrow_token: TokenIndex,
+ payload: ?*Node,
+ expr: *Node,
+
+ pub const ItemList = SegmentedList(*Node, 1);
+
+ pub fn iterate(self: *SwitchCase, index: usize) ?*Node {
+ var i = index;
+
+ if (i < self.items.len) return self.items.at(i).*;
+ i -= self.items.len;
+
+ if (self.payload) |payload| {
+ if (i < 1) return payload;
+ i -= 1;
+ }
+
+ if (i < 1) return self.expr;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *SwitchCase) TokenIndex {
+ return (self.items.at(0).*).firstToken();
+ }
+
+ pub fn lastToken(self: *SwitchCase) TokenIndex {
+ return self.expr.lastToken();
+ }
+ };
+
+ pub const SwitchElse = struct {
+ base: Node,
+ token: TokenIndex,
+
+ pub fn iterate(self: *SwitchElse, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *SwitchElse) TokenIndex {
+ return self.token;
+ }
+
+ pub fn lastToken(self: *SwitchElse) TokenIndex {
+ return self.token;
+ }
+ };
+
+ pub const While = struct {
+ base: Node,
+ label: ?TokenIndex,
+ inline_token: ?TokenIndex,
+ while_token: TokenIndex,
+ condition: *Node,
+ payload: ?*Node,
+ continue_expr: ?*Node,
+ body: *Node,
+ @"else": ?*Else,
+
+ pub fn iterate(self: *While, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.condition;
+ i -= 1;
+
+ if (self.payload) |payload| {
+ if (i < 1) return payload;
+ i -= 1;
+ }
+
+ if (self.continue_expr) |continue_expr| {
+ if (i < 1) return continue_expr;
+ i -= 1;
+ }
+
+ if (i < 1) return self.body;
+ i -= 1;
+
+ if (self.@"else") |@"else"| {
+ if (i < 1) return &@"else".base;
+ i -= 1;
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *While) TokenIndex {
+ if (self.label) |label| {
+ return label;
+ }
+
+ if (self.inline_token) |inline_token| {
+ return inline_token;
+ }
+
+ return self.while_token;
+ }
+
+ pub fn lastToken(self: *While) TokenIndex {
+ if (self.@"else") |@"else"| {
+ return @"else".body.lastToken();
+ }
+
+ return self.body.lastToken();
+ }
+ };
+
+ pub const For = struct {
+ base: Node,
+ label: ?TokenIndex,
+ inline_token: ?TokenIndex,
+ for_token: TokenIndex,
+ array_expr: *Node,
+ payload: ?*Node,
+ body: *Node,
+ @"else": ?*Else,
+
+ pub fn iterate(self: *For, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.array_expr;
+ i -= 1;
+
+ if (self.payload) |payload| {
+ if (i < 1) return payload;
+ i -= 1;
+ }
+
+ if (i < 1) return self.body;
+ i -= 1;
+
+ if (self.@"else") |@"else"| {
+ if (i < 1) return &@"else".base;
+ i -= 1;
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *For) TokenIndex {
+ if (self.label) |label| {
+ return label;
+ }
+
+ if (self.inline_token) |inline_token| {
+ return inline_token;
+ }
+
+ return self.for_token;
+ }
+
+ pub fn lastToken(self: *For) TokenIndex {
+ if (self.@"else") |@"else"| {
+ return @"else".body.lastToken();
+ }
+
+ return self.body.lastToken();
+ }
+ };
+
+ pub const If = struct {
+ base: Node,
+ if_token: TokenIndex,
+ condition: *Node,
+ payload: ?*Node,
+ body: *Node,
+ @"else": ?*Else,
+
+ pub fn iterate(self: *If, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.condition;
+ i -= 1;
+
+ if (self.payload) |payload| {
+ if (i < 1) return payload;
+ i -= 1;
+ }
+
+ if (i < 1) return self.body;
+ i -= 1;
+
+ if (self.@"else") |@"else"| {
+ if (i < 1) return &@"else".base;
+ i -= 1;
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *If) TokenIndex {
+ return self.if_token;
+ }
+
+ pub fn lastToken(self: *If) TokenIndex {
+ if (self.@"else") |@"else"| {
+ return @"else".body.lastToken();
+ }
+
+ return self.body.lastToken();
+ }
+ };
+
+ pub const InfixOp = struct {
+ base: Node,
+ op_token: TokenIndex,
+ lhs: *Node,
+ op: Op,
+ rhs: *Node,
+
+ pub const Op = union(enum) {
+ Add,
+ AddWrap,
+ ArrayCat,
+ ArrayMult,
+ Assign,
+ AssignBitAnd,
+ AssignBitOr,
+ AssignBitShiftLeft,
+ AssignBitShiftRight,
+ AssignBitXor,
+ AssignDiv,
+ AssignMinus,
+ AssignMinusWrap,
+ AssignMod,
+ AssignPlus,
+ AssignPlusWrap,
+ AssignTimes,
+ AssignTimesWarp,
+ BangEqual,
+ BitAnd,
+ BitOr,
+ BitShiftLeft,
+ BitShiftRight,
+ BitXor,
+ BoolAnd,
+ BoolOr,
+ Catch: ?*Node,
+ Div,
+ EqualEqual,
+ ErrorUnion,
+ GreaterOrEqual,
+ GreaterThan,
+ LessOrEqual,
+ LessThan,
+ MergeErrorSets,
+ Mod,
+ Mult,
+ MultWrap,
+ Period,
+ Range,
+ Sub,
+ SubWrap,
+ UnwrapOptional,
+ };
+
+ pub fn iterate(self: *InfixOp, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.lhs;
+ i -= 1;
+
+ switch (self.op) {
+ Op.Catch => |maybe_payload| {
+ if (maybe_payload) |payload| {
+ if (i < 1) return payload;
+ i -= 1;
+ }
+ },
+
+ Op.Add,
+ Op.AddWrap,
+ Op.ArrayCat,
+ Op.ArrayMult,
+ Op.Assign,
+ Op.AssignBitAnd,
+ Op.AssignBitOr,
+ Op.AssignBitShiftLeft,
+ Op.AssignBitShiftRight,
+ Op.AssignBitXor,
+ Op.AssignDiv,
+ Op.AssignMinus,
+ Op.AssignMinusWrap,
+ Op.AssignMod,
+ Op.AssignPlus,
+ Op.AssignPlusWrap,
+ Op.AssignTimes,
+ Op.AssignTimesWarp,
+ Op.BangEqual,
+ Op.BitAnd,
+ Op.BitOr,
+ Op.BitShiftLeft,
+ Op.BitShiftRight,
+ Op.BitXor,
+ Op.BoolAnd,
+ Op.BoolOr,
+ Op.Div,
+ Op.EqualEqual,
+ Op.ErrorUnion,
+ Op.GreaterOrEqual,
+ Op.GreaterThan,
+ Op.LessOrEqual,
+ Op.LessThan,
+ Op.MergeErrorSets,
+ Op.Mod,
+ Op.Mult,
+ Op.MultWrap,
+ Op.Period,
+ Op.Range,
+ Op.Sub,
+ Op.SubWrap,
+ Op.UnwrapOptional,
+ => {},
+ }
+
+ if (i < 1) return self.rhs;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *InfixOp) TokenIndex {
+ return self.lhs.firstToken();
+ }
+
+ pub fn lastToken(self: *InfixOp) TokenIndex {
+ return self.rhs.lastToken();
+ }
+ };
+
+ pub const PrefixOp = struct {
+ base: Node,
+ op_token: TokenIndex,
+ op: Op,
+ rhs: *Node,
+
+ pub const Op = union(enum) {
+ AddressOf,
+ ArrayType: *Node,
+ Await,
+ BitNot,
+ BoolNot,
+ Cancel,
+ OptionalType,
+ Negation,
+ NegationWrap,
+ Resume,
+ PtrType: PtrInfo,
+ SliceType: PtrInfo,
+ Try,
+ };
+
+ pub const PtrInfo = struct {
+ align_info: ?Align,
+ const_token: ?TokenIndex,
+ volatile_token: ?TokenIndex,
+
+ pub const Align = struct {
+ node: *Node,
+ bit_range: ?BitRange,
+
+ pub const BitRange = struct {
+ start: *Node,
+ end: *Node,
+ };
+ };
+ };
+
+ pub fn iterate(self: *PrefixOp, index: usize) ?*Node {
+ var i = index;
+
+ switch (self.op) {
+ // TODO https://github.com/ziglang/zig/issues/1107
+ Op.SliceType => |addr_of_info| {
+ if (addr_of_info.align_info) |align_info| {
+ if (i < 1) return align_info.node;
+ i -= 1;
+ }
+ },
+
+ Op.PtrType => |addr_of_info| {
+ if (addr_of_info.align_info) |align_info| {
+ if (i < 1) return align_info.node;
+ i -= 1;
+ }
+ },
+
+ Op.ArrayType => |size_expr| {
+ if (i < 1) return size_expr;
+ i -= 1;
+ },
+
+ Op.AddressOf,
+ Op.Await,
+ Op.BitNot,
+ Op.BoolNot,
+ Op.Cancel,
+ Op.OptionalType,
+ Op.Negation,
+ Op.NegationWrap,
+ Op.Try,
+ Op.Resume,
+ => {},
+ }
+
+ if (i < 1) return self.rhs;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *PrefixOp) TokenIndex {
+ return self.op_token;
+ }
+
+ pub fn lastToken(self: *PrefixOp) TokenIndex {
+ return self.rhs.lastToken();
+ }
+ };
+
+ pub const FieldInitializer = struct {
+ base: Node,
+ period_token: TokenIndex,
+ name_token: TokenIndex,
+ expr: *Node,
+
+ pub fn iterate(self: *FieldInitializer, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.expr;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *FieldInitializer) TokenIndex {
+ return self.period_token;
+ }
+
+ pub fn lastToken(self: *FieldInitializer) TokenIndex {
+ return self.expr.lastToken();
+ }
+ };
+
+ pub const SuffixOp = struct {
+ base: Node,
+ lhs: *Node,
+ op: Op,
+ rtoken: TokenIndex,
+
+ pub const Op = union(enum) {
+ Call: Call,
+ ArrayAccess: *Node,
+ Slice: Slice,
+ ArrayInitializer: InitList,
+ StructInitializer: InitList,
+ Deref,
+ UnwrapOptional,
+
+ pub const InitList = SegmentedList(*Node, 2);
+
+ pub const Call = struct {
+ params: ParamList,
+ async_attr: ?*AsyncAttribute,
+
+ pub const ParamList = SegmentedList(*Node, 2);
+ };
+
+ pub const Slice = struct {
+ start: *Node,
+ end: ?*Node,
+ };
+ };
+
+ pub fn iterate(self: *SuffixOp, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.lhs;
+ i -= 1;
+
+ switch (self.op) {
+ @TagType(Op).Call => |*call_info| {
+ if (i < call_info.params.len) return call_info.params.at(i).*;
+ i -= call_info.params.len;
+ },
+ Op.ArrayAccess => |index_expr| {
+ if (i < 1) return index_expr;
+ i -= 1;
+ },
+ @TagType(Op).Slice => |range| {
+ if (i < 1) return range.start;
+ i -= 1;
+
+ if (range.end) |end| {
+ if (i < 1) return end;
+ i -= 1;
+ }
+ },
+ Op.ArrayInitializer => |*exprs| {
+ if (i < exprs.len) return exprs.at(i).*;
+ i -= exprs.len;
+ },
+ Op.StructInitializer => |*fields| {
+ if (i < fields.len) return fields.at(i).*;
+ i -= fields.len;
+ },
+ Op.UnwrapOptional,
+ Op.Deref,
+ => {},
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *SuffixOp) TokenIndex {
+ switch (self.op) {
+ @TagType(Op).Call => |*call_info| if (call_info.async_attr) |async_attr| return async_attr.firstToken(),
+ else => {},
+ }
+ return self.lhs.firstToken();
+ }
+
+ pub fn lastToken(self: *SuffixOp) TokenIndex {
+ return self.rtoken;
+ }
+ };
+
+ pub const GroupedExpression = struct {
+ base: Node,
+ lparen: TokenIndex,
+ expr: *Node,
+ rparen: TokenIndex,
+
+ pub fn iterate(self: *GroupedExpression, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.expr;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *GroupedExpression) TokenIndex {
+ return self.lparen;
+ }
+
+ pub fn lastToken(self: *GroupedExpression) TokenIndex {
+ return self.rparen;
+ }
+ };
+
+ pub const ControlFlowExpression = struct {
+ base: Node,
+ ltoken: TokenIndex,
+ kind: Kind,
+ rhs: ?*Node,
+
+ const Kind = union(enum) {
+ Break: ?*Node,
+ Continue: ?*Node,
+ Return,
+ };
+
+ pub fn iterate(self: *ControlFlowExpression, index: usize) ?*Node {
+ var i = index;
+
+ switch (self.kind) {
+ Kind.Break => |maybe_label| {
+ if (maybe_label) |label| {
+ if (i < 1) return label;
+ i -= 1;
+ }
+ },
+ Kind.Continue => |maybe_label| {
+ if (maybe_label) |label| {
+ if (i < 1) return label;
+ i -= 1;
+ }
+ },
+ Kind.Return => {},
+ }
+
+ if (self.rhs) |rhs| {
+ if (i < 1) return rhs;
+ i -= 1;
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *ControlFlowExpression) TokenIndex {
+ return self.ltoken;
+ }
+
+ pub fn lastToken(self: *ControlFlowExpression) TokenIndex {
+ if (self.rhs) |rhs| {
+ return rhs.lastToken();
+ }
+
+ switch (self.kind) {
+ Kind.Break => |maybe_label| {
+ if (maybe_label) |label| {
+ return label.lastToken();
+ }
+ },
+ Kind.Continue => |maybe_label| {
+ if (maybe_label) |label| {
+ return label.lastToken();
+ }
+ },
+ Kind.Return => return self.ltoken,
+ }
+
+ return self.ltoken;
+ }
+ };
+
+ pub const Suspend = struct {
+ base: Node,
+ suspend_token: TokenIndex,
+ body: ?*Node,
+
+ pub fn iterate(self: *Suspend, index: usize) ?*Node {
+ var i = index;
+
+ if (self.body) |body| {
+ if (i < 1) return body;
+ i -= 1;
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *Suspend) TokenIndex {
+ return self.suspend_token;
+ }
+
+ pub fn lastToken(self: *Suspend) TokenIndex {
+ if (self.body) |body| {
+ return body.lastToken();
+ }
+
+ return self.suspend_token;
+ }
+ };
+
+ pub const IntegerLiteral = struct {
+ base: Node,
+ token: TokenIndex,
+
+ pub fn iterate(self: *IntegerLiteral, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *IntegerLiteral) TokenIndex {
+ return self.token;
+ }
+
+ pub fn lastToken(self: *IntegerLiteral) TokenIndex {
+ return self.token;
+ }
+ };
+
+ pub const FloatLiteral = struct {
+ base: Node,
+ token: TokenIndex,
+
+ pub fn iterate(self: *FloatLiteral, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *FloatLiteral) TokenIndex {
+ return self.token;
+ }
+
+ pub fn lastToken(self: *FloatLiteral) TokenIndex {
+ return self.token;
+ }
+ };
+
+ pub const BuiltinCall = struct {
+ base: Node,
+ builtin_token: TokenIndex,
+ params: ParamList,
+ rparen_token: TokenIndex,
+
+ pub const ParamList = SegmentedList(*Node, 2);
+
+ pub fn iterate(self: *BuiltinCall, index: usize) ?*Node {
+ var i = index;
+
+ if (i < self.params.len) return self.params.at(i).*;
+ i -= self.params.len;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *BuiltinCall) TokenIndex {
+ return self.builtin_token;
+ }
+
+ pub fn lastToken(self: *BuiltinCall) TokenIndex {
+ return self.rparen_token;
+ }
+ };
+
+ pub const StringLiteral = struct {
+ base: Node,
+ token: TokenIndex,
+
+ pub fn iterate(self: *StringLiteral, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *StringLiteral) TokenIndex {
+ return self.token;
+ }
+
+ pub fn lastToken(self: *StringLiteral) TokenIndex {
+ return self.token;
+ }
+ };
+
+ pub const MultilineStringLiteral = struct {
+ base: Node,
+ lines: LineList,
+
+ pub const LineList = SegmentedList(TokenIndex, 4);
+
+ pub fn iterate(self: *MultilineStringLiteral, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *MultilineStringLiteral) TokenIndex {
+ return self.lines.at(0).*;
+ }
+
+ pub fn lastToken(self: *MultilineStringLiteral) TokenIndex {
+ return self.lines.at(self.lines.len - 1).*;
+ }
+ };
+
+ pub const CharLiteral = struct {
+ base: Node,
+ token: TokenIndex,
+
+ pub fn iterate(self: *CharLiteral, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *CharLiteral) TokenIndex {
+ return self.token;
+ }
+
+ pub fn lastToken(self: *CharLiteral) TokenIndex {
+ return self.token;
+ }
+ };
+
+ pub const BoolLiteral = struct {
+ base: Node,
+ token: TokenIndex,
+
+ pub fn iterate(self: *BoolLiteral, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *BoolLiteral) TokenIndex {
+ return self.token;
+ }
+
+ pub fn lastToken(self: *BoolLiteral) TokenIndex {
+ return self.token;
+ }
+ };
+
+ pub const NullLiteral = struct {
+ base: Node,
+ token: TokenIndex,
+
+ pub fn iterate(self: *NullLiteral, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *NullLiteral) TokenIndex {
+ return self.token;
+ }
+
+ pub fn lastToken(self: *NullLiteral) TokenIndex {
+ return self.token;
+ }
+ };
+
+ pub const UndefinedLiteral = struct {
+ base: Node,
+ token: TokenIndex,
+
+ pub fn iterate(self: *UndefinedLiteral, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *UndefinedLiteral) TokenIndex {
+ return self.token;
+ }
+
+ pub fn lastToken(self: *UndefinedLiteral) TokenIndex {
+ return self.token;
+ }
+ };
+
+ pub const ThisLiteral = struct {
+ base: Node,
+ token: TokenIndex,
+
+ pub fn iterate(self: *ThisLiteral, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *ThisLiteral) TokenIndex {
+ return self.token;
+ }
+
+ pub fn lastToken(self: *ThisLiteral) TokenIndex {
+ return self.token;
+ }
+ };
+
+ pub const AsmOutput = struct {
+ base: Node,
+ lbracket: TokenIndex,
+ symbolic_name: *Node,
+ constraint: *Node,
+ kind: Kind,
+ rparen: TokenIndex,
+
+ const Kind = union(enum) {
+ Variable: *Identifier,
+ Return: *Node,
+ };
+
+ pub fn iterate(self: *AsmOutput, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.symbolic_name;
+ i -= 1;
+
+ if (i < 1) return self.constraint;
+ i -= 1;
+
+ switch (self.kind) {
+ Kind.Variable => |variable_name| {
+ if (i < 1) return &variable_name.base;
+ i -= 1;
+ },
+ Kind.Return => |return_type| {
+ if (i < 1) return return_type;
+ i -= 1;
+ },
+ }
+
+ return null;
+ }
+
+ pub fn firstToken(self: *AsmOutput) TokenIndex {
+ return self.lbracket;
+ }
+
+ pub fn lastToken(self: *AsmOutput) TokenIndex {
+ return self.rparen;
+ }
+ };
+
+ pub const AsmInput = struct {
+ base: Node,
+ lbracket: TokenIndex,
+ symbolic_name: *Node,
+ constraint: *Node,
+ expr: *Node,
+ rparen: TokenIndex,
+
+ pub fn iterate(self: *AsmInput, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.symbolic_name;
+ i -= 1;
+
+ if (i < 1) return self.constraint;
+ i -= 1;
+
+ if (i < 1) return self.expr;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *AsmInput) TokenIndex {
+ return self.lbracket;
+ }
+
+ pub fn lastToken(self: *AsmInput) TokenIndex {
+ return self.rparen;
+ }
+ };
+
+ pub const Asm = struct {
+ base: Node,
+ asm_token: TokenIndex,
+ volatile_token: ?TokenIndex,
+ template: *Node,
+ outputs: OutputList,
+ inputs: InputList,
+ clobbers: ClobberList,
+ rparen: TokenIndex,
+
+ const OutputList = SegmentedList(*AsmOutput, 2);
+ const InputList = SegmentedList(*AsmInput, 2);
+ const ClobberList = SegmentedList(TokenIndex, 2);
+
+ pub fn iterate(self: *Asm, index: usize) ?*Node {
+ var i = index;
+
+ if (i < self.outputs.len) return &self.outputs.at(index).*.base;
+ i -= self.outputs.len;
+
+ if (i < self.inputs.len) return &self.inputs.at(index).*.base;
+ i -= self.inputs.len;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *Asm) TokenIndex {
+ return self.asm_token;
+ }
+
+ pub fn lastToken(self: *Asm) TokenIndex {
+ return self.rparen;
+ }
+ };
+
+ pub const Unreachable = struct {
+ base: Node,
+ token: TokenIndex,
+
+ pub fn iterate(self: *Unreachable, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *Unreachable) TokenIndex {
+ return self.token;
+ }
+
+ pub fn lastToken(self: *Unreachable) TokenIndex {
+ return self.token;
+ }
+ };
+
+ pub const ErrorType = struct {
+ base: Node,
+ token: TokenIndex,
+
+ pub fn iterate(self: *ErrorType, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *ErrorType) TokenIndex {
+ return self.token;
+ }
+
+ pub fn lastToken(self: *ErrorType) TokenIndex {
+ return self.token;
+ }
+ };
+
+ pub const VarType = struct {
+ base: Node,
+ token: TokenIndex,
+
+ pub fn iterate(self: *VarType, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *VarType) TokenIndex {
+ return self.token;
+ }
+
+ pub fn lastToken(self: *VarType) TokenIndex {
+ return self.token;
+ }
+ };
+
+ pub const DocComment = struct {
+ base: Node,
+ lines: LineList,
+
+ pub const LineList = SegmentedList(TokenIndex, 4);
+
+ pub fn iterate(self: *DocComment, index: usize) ?*Node {
+ return null;
+ }
+
+ pub fn firstToken(self: *DocComment) TokenIndex {
+ return self.lines.at(0).*;
+ }
+
+ pub fn lastToken(self: *DocComment) TokenIndex {
+ return self.lines.at(self.lines.len - 1).*;
+ }
+ };
+
+ pub const TestDecl = struct {
+ base: Node,
+ doc_comments: ?*DocComment,
+ test_token: TokenIndex,
+ name: *Node,
+ body_node: *Node,
+
+ pub fn iterate(self: *TestDecl, index: usize) ?*Node {
+ var i = index;
+
+ if (i < 1) return self.body_node;
+ i -= 1;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *TestDecl) TokenIndex {
+ return self.test_token;
+ }
+
+ pub fn lastToken(self: *TestDecl) TokenIndex {
+ return self.body_node.lastToken();
+ }
+ };
};
-pub const NodeAsmInput = struct {
- base: Node,
- symbolic_name: &Node,
- constraint: &Node,
- expr: &Node,
-
- pub fn iterate(self: &NodeAsmInput, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.symbolic_name;
- i -= 1;
-
- if (i < 1) return self.constraint;
- i -= 1;
-
- if (i < 1) return self.expr;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeAsmInput) Token {
- return self.symbolic_name.firstToken();
- }
-
- pub fn lastToken(self: &NodeAsmInput) Token {
- return self.expr.lastToken();
- }
-};
-
-pub const NodeAsm = struct {
- base: Node,
- asm_token: Token,
- volatile_token: ?Token,
- template: &Node,
- //tokens: ArrayList(AsmToken),
- outputs: ArrayList(&NodeAsmOutput),
- inputs: ArrayList(&NodeAsmInput),
- cloppers: ArrayList(&Node),
- rparen: Token,
-
- pub fn iterate(self: &NodeAsm, index: usize) ?&Node {
- var i = index;
-
- if (i < self.outputs.len) return &self.outputs.at(index).base;
- i -= self.outputs.len;
-
- if (i < self.inputs.len) return &self.inputs.at(index).base;
- i -= self.inputs.len;
-
- if (i < self.cloppers.len) return self.cloppers.at(index);
- i -= self.cloppers.len;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeAsm) Token {
- return self.asm_token;
- }
-
- pub fn lastToken(self: &NodeAsm) Token {
- return self.rparen;
- }
-};
-
-pub const NodeUnreachable = struct {
- base: Node,
- token: Token,
-
- pub fn iterate(self: &NodeUnreachable, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeUnreachable) Token {
- return self.token;
- }
-
- pub fn lastToken(self: &NodeUnreachable) Token {
- return self.token;
- }
-};
-
-pub const NodeErrorType = struct {
- base: Node,
- token: Token,
-
- pub fn iterate(self: &NodeErrorType, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeErrorType) Token {
- return self.token;
- }
-
- pub fn lastToken(self: &NodeErrorType) Token {
- return self.token;
- }
-};
-
-pub const NodeVarType = struct {
- base: Node,
- token: Token,
-
- pub fn iterate(self: &NodeVarType, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeVarType) Token {
- return self.token;
- }
-
- pub fn lastToken(self: &NodeVarType) Token {
- return self.token;
- }
-};
-
-pub const NodeLineComment = struct {
- base: Node,
- lines: ArrayList(Token),
-
- pub fn iterate(self: &NodeLineComment, index: usize) ?&Node {
- return null;
- }
-
- pub fn firstToken(self: &NodeLineComment) Token {
- return self.lines.at(0);
- }
-
- pub fn lastToken(self: &NodeLineComment) Token {
- return self.lines.at(self.lines.len - 1);
- }
-};
-
-pub const NodeTestDecl = struct {
- base: Node,
- test_token: Token,
- name: &Node,
- body_node: &Node,
-
- pub fn iterate(self: &NodeTestDecl, index: usize) ?&Node {
- var i = index;
-
- if (i < 1) return self.body_node;
- i -= 1;
-
- return null;
- }
-
- pub fn firstToken(self: &NodeTestDecl) Token {
- return self.test_token;
- }
-
- pub fn lastToken(self: &NodeTestDecl) Token {
- return self.body_node.lastToken();
- }
-};
+test "iterate" {
+ var root = Node.Root{
+ .base = Node{ .id = Node.Id.Root },
+ .doc_comments = null,
+ .decls = Node.Root.DeclList.init(std.debug.global_allocator),
+ .eof_token = 0,
+ };
+ var base = &root.base;
+ assert(base.iterate(0) == null);
+}
diff --git a/std/zig/bench.zig b/std/zig/bench.zig
new file mode 100644
index 0000000000..630f6b2233
--- /dev/null
+++ b/std/zig/bench.zig
@@ -0,0 +1,36 @@
+const std = @import("std");
+const mem = std.mem;
+const warn = std.debug.warn;
+const Tokenizer = std.zig.Tokenizer;
+const Parser = std.zig.Parser;
+const io = std.io;
+
+const source = @embedFile("../os/index.zig");
+var fixed_buffer_mem: [10 * 1024 * 1024]u8 = undefined;
+
+pub fn main() !void {
+ var i: usize = 0;
+ var timer = try std.os.time.Timer.start();
+ const start = timer.lap();
+ const iterations = 100;
+ var memory_used: usize = 0;
+ while (i < iterations) : (i += 1) {
+ memory_used += testOnce();
+ }
+ const end = timer.read();
+ memory_used /= iterations;
+ const elapsed_s = @intToFloat(f64, end - start) / std.os.time.ns_per_s;
+ const bytes_per_sec = @intToFloat(f64, source.len * iterations) / elapsed_s;
+ const mb_per_sec = bytes_per_sec / (1024 * 1024);
+
+ var stdout_file = try std.io.getStdOut();
+ const stdout = &std.io.FileOutStream.init(&stdout_file).stream;
+ try stdout.print("{.3} MiB/s, {} KiB used \n", mb_per_sec, memory_used / 1024);
+}
+
+fn testOnce() usize {
+ var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
+ var allocator = &fixed_buf_alloc.allocator;
+ _ = std.zig.parse(allocator, source) catch @panic("parse failure");
+ return fixed_buf_alloc.end_index;
+}
diff --git a/std/zig/index.zig b/std/zig/index.zig
index 32699935d9..da84bc5bb0 100644
--- a/std/zig/index.zig
+++ b/std/zig/index.zig
@@ -1,11 +1,16 @@
const tokenizer = @import("tokenizer.zig");
pub const Token = tokenizer.Token;
pub const Tokenizer = tokenizer.Tokenizer;
-pub const Parser = @import("parser.zig").Parser;
+pub const parse = @import("parse.zig").parse;
+pub const parseStringLiteral = @import("parse_string_literal.zig").parseStringLiteral;
+pub const render = @import("render.zig").render;
pub const ast = @import("ast.zig");
test "std.zig tests" {
- _ = @import("tokenizer.zig");
- _ = @import("parser.zig");
_ = @import("ast.zig");
+ _ = @import("parse.zig");
+ _ = @import("render.zig");
+ _ = @import("tokenizer.zig");
+ _ = @import("parse_string_literal.zig");
}
+
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
new file mode 100644
index 0000000000..fb49d2a2ba
--- /dev/null
+++ b/std/zig/parse.zig
@@ -0,0 +1,3358 @@
+const std = @import("../index.zig");
+const assert = std.debug.assert;
+const mem = std.mem;
+const ast = std.zig.ast;
+const Tokenizer = std.zig.Tokenizer;
+const Token = std.zig.Token;
+const TokenIndex = ast.TokenIndex;
+const Error = ast.Error;
+
+/// Result should be freed with tree.deinit() when there are
+/// no more references to any of the tokens or nodes.
+pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
+ var tree_arena = std.heap.ArenaAllocator.init(allocator);
+ errdefer tree_arena.deinit();
+
+ var stack = std.ArrayList(State).init(allocator);
+ defer stack.deinit();
+
+ const arena = &tree_arena.allocator;
+ const root_node = try arena.create(ast.Node.Root{
+ .base = ast.Node{ .id = ast.Node.Id.Root },
+ .decls = ast.Node.Root.DeclList.init(arena),
+ .doc_comments = null,
+ // initialized when we get the eof token
+ .eof_token = undefined,
+ });
+
+ var tree = ast.Tree{
+ .source = source,
+ .root_node = root_node,
+ .arena_allocator = tree_arena,
+ .tokens = ast.Tree.TokenList.init(arena),
+ .errors = ast.Tree.ErrorList.init(arena),
+ };
+
+ var tokenizer = Tokenizer.init(tree.source);
+ while (true) {
+ const token_ptr = try tree.tokens.addOne();
+ token_ptr.* = tokenizer.next();
+ if (token_ptr.id == Token.Id.Eof) break;
+ }
+ var tok_it = tree.tokens.iterator(0);
+
+ // skip over line comments at the top of the file
+ while (true) {
+ const next_tok = tok_it.peek() orelse break;
+ if (next_tok.id != Token.Id.LineComment) break;
+ _ = tok_it.next();
+ }
+
+ try stack.append(State.TopLevel);
+
+ while (true) {
+ // This gives us 1 free push that can't fail
+ const state = stack.pop();
+
+ switch (state) {
+ State.TopLevel => {
+ const comments = try eatDocComments(arena, &tok_it, &tree);
+
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Keyword_test => {
+ stack.append(State.TopLevel) catch unreachable;
+
+ const block = try arena.create(ast.Node.Block{
+ .base = ast.Node{ .id = ast.Node.Id.Block },
+ .label = null,
+ .lbrace = undefined,
+ .statements = ast.Node.Block.StatementList.init(arena),
+ .rbrace = undefined,
+ });
+ const test_node = try arena.create(ast.Node.TestDecl{
+ .base = ast.Node{ .id = ast.Node.Id.TestDecl },
+ .doc_comments = comments,
+ .test_token = token_index,
+ .name = undefined,
+ .body_node = &block.base,
+ });
+ try root_node.decls.push(&test_node.base);
+ try stack.append(State{ .Block = block });
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.LBrace,
+ .ptr = &block.lbrace,
+ },
+ });
+ try stack.append(State{ .StringLiteral = OptionalCtx{ .Required = &test_node.name } });
+ continue;
+ },
+ Token.Id.Eof => {
+ root_node.eof_token = token_index;
+ root_node.doc_comments = comments;
+ return tree;
+ },
+ Token.Id.Keyword_pub => {
+ stack.append(State.TopLevel) catch unreachable;
+ try stack.append(State{
+ .TopLevelExtern = TopLevelDeclCtx{
+ .decls = &root_node.decls,
+ .visib_token = token_index,
+ .extern_export_inline_token = null,
+ .lib_name = null,
+ .comments = comments,
+ },
+ });
+ continue;
+ },
+ Token.Id.Keyword_comptime => {
+ const block = try arena.create(ast.Node.Block{
+ .base = ast.Node{ .id = ast.Node.Id.Block },
+ .label = null,
+ .lbrace = undefined,
+ .statements = ast.Node.Block.StatementList.init(arena),
+ .rbrace = undefined,
+ });
+ const node = try arena.create(ast.Node.Comptime{
+ .base = ast.Node{ .id = ast.Node.Id.Comptime },
+ .comptime_token = token_index,
+ .expr = &block.base,
+ .doc_comments = comments,
+ });
+ try root_node.decls.push(&node.base);
+
+ stack.append(State.TopLevel) catch unreachable;
+ try stack.append(State{ .Block = block });
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.LBrace,
+ .ptr = &block.lbrace,
+ },
+ });
+ continue;
+ },
+ else => {
+ prevToken(&tok_it, &tree);
+ stack.append(State.TopLevel) catch unreachable;
+ try stack.append(State{
+ .TopLevelExtern = TopLevelDeclCtx{
+ .decls = &root_node.decls,
+ .visib_token = null,
+ .extern_export_inline_token = null,
+ .lib_name = null,
+ .comments = comments,
+ },
+ });
+ continue;
+ },
+ }
+ },
+ State.TopLevelExtern => |ctx| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Keyword_export, Token.Id.Keyword_inline => {
+ stack.append(State{
+ .TopLevelDecl = TopLevelDeclCtx{
+ .decls = ctx.decls,
+ .visib_token = ctx.visib_token,
+ .extern_export_inline_token = AnnotatedToken{
+ .index = token_index,
+ .ptr = token_ptr,
+ },
+ .lib_name = null,
+ .comments = ctx.comments,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ Token.Id.Keyword_extern => {
+ stack.append(State{
+ .TopLevelLibname = TopLevelDeclCtx{
+ .decls = ctx.decls,
+ .visib_token = ctx.visib_token,
+ .extern_export_inline_token = AnnotatedToken{
+ .index = token_index,
+ .ptr = token_ptr,
+ },
+ .lib_name = null,
+ .comments = ctx.comments,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ else => {
+ prevToken(&tok_it, &tree);
+ stack.append(State{ .TopLevelDecl = ctx }) catch unreachable;
+ continue;
+ },
+ }
+ },
+ State.TopLevelLibname => |ctx| {
+ const lib_name = blk: {
+ const lib_name_token = nextToken(&tok_it, &tree);
+ const lib_name_token_index = lib_name_token.index;
+ const lib_name_token_ptr = lib_name_token.ptr;
+ break :blk (try parseStringLiteral(arena, &tok_it, lib_name_token_ptr, lib_name_token_index, &tree)) orelse {
+ prevToken(&tok_it, &tree);
+ break :blk null;
+ };
+ };
+
+ stack.append(State{
+ .TopLevelDecl = TopLevelDeclCtx{
+ .decls = ctx.decls,
+ .visib_token = ctx.visib_token,
+ .extern_export_inline_token = ctx.extern_export_inline_token,
+ .lib_name = lib_name,
+ .comments = ctx.comments,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ State.TopLevelDecl => |ctx| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Keyword_use => {
+ if (ctx.extern_export_inline_token) |annotated_token| {
+ ((try tree.errors.addOne())).* = Error{ .InvalidToken = Error.InvalidToken{ .token = annotated_token.index } };
+ return tree;
+ }
+
+ const node = try arena.create(ast.Node.Use{
+ .base = ast.Node{ .id = ast.Node.Id.Use },
+ .use_token = token_index,
+ .visib_token = ctx.visib_token,
+ .expr = undefined,
+ .semicolon_token = undefined,
+ .doc_comments = ctx.comments,
+ });
+ try ctx.decls.push(&node.base);
+
+ stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.Semicolon,
+ .ptr = &node.semicolon_token,
+ },
+ }) catch unreachable;
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.expr } });
+ continue;
+ },
+ Token.Id.Keyword_var, Token.Id.Keyword_const => {
+ if (ctx.extern_export_inline_token) |annotated_token| {
+ if (annotated_token.ptr.id == Token.Id.Keyword_inline) {
+ ((try tree.errors.addOne())).* = Error{ .InvalidToken = Error.InvalidToken{ .token = annotated_token.index } };
+ return tree;
+ }
+ }
+
+ try stack.append(State{
+ .VarDecl = VarDeclCtx{
+ .comments = ctx.comments,
+ .visib_token = ctx.visib_token,
+ .lib_name = ctx.lib_name,
+ .comptime_token = null,
+ .extern_export_token = if (ctx.extern_export_inline_token) |at| at.index else null,
+ .mut_token = token_index,
+ .list = ctx.decls,
+ },
+ });
+ continue;
+ },
+ Token.Id.Keyword_fn, Token.Id.Keyword_nakedcc, Token.Id.Keyword_stdcallcc, Token.Id.Keyword_async => {
+ const fn_proto = try arena.create(ast.Node.FnProto{
+ .base = ast.Node{ .id = ast.Node.Id.FnProto },
+ .doc_comments = ctx.comments,
+ .visib_token = ctx.visib_token,
+ .name_token = null,
+ .fn_token = undefined,
+ .params = ast.Node.FnProto.ParamList.init(arena),
+ .return_type = undefined,
+ .var_args_token = null,
+ .extern_export_inline_token = if (ctx.extern_export_inline_token) |at| at.index else null,
+ .cc_token = null,
+ .async_attr = null,
+ .body_node = null,
+ .lib_name = ctx.lib_name,
+ .align_expr = null,
+ });
+ try ctx.decls.push(&fn_proto.base);
+ stack.append(State{ .FnDef = fn_proto }) catch unreachable;
+ try stack.append(State{ .FnProto = fn_proto });
+
+ switch (token_ptr.id) {
+ Token.Id.Keyword_nakedcc, Token.Id.Keyword_stdcallcc => {
+ fn_proto.cc_token = token_index;
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.Keyword_fn,
+ .ptr = &fn_proto.fn_token,
+ },
+ });
+ continue;
+ },
+ Token.Id.Keyword_async => {
+ const async_node = try arena.create(ast.Node.AsyncAttribute{
+ .base = ast.Node{ .id = ast.Node.Id.AsyncAttribute },
+ .async_token = token_index,
+ .allocator_type = null,
+ .rangle_bracket = null,
+ });
+ fn_proto.async_attr = async_node;
+
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.Keyword_fn,
+ .ptr = &fn_proto.fn_token,
+ },
+ });
+ try stack.append(State{ .AsyncAllocator = async_node });
+ continue;
+ },
+ Token.Id.Keyword_fn => {
+ fn_proto.fn_token = token_index;
+ continue;
+ },
+ else => unreachable,
+ }
+ },
+ else => {
+ ((try tree.errors.addOne())).* = Error{ .ExpectedVarDeclOrFn = Error.ExpectedVarDeclOrFn{ .token = token_index } };
+ return tree;
+ },
+ }
+ },
+ State.TopLevelExternOrField => |ctx| {
+ if (eatToken(&tok_it, &tree, Token.Id.Identifier)) |identifier| {
+ const node = try arena.create(ast.Node.StructField{
+ .base = ast.Node{ .id = ast.Node.Id.StructField },
+ .doc_comments = ctx.comments,
+ .visib_token = ctx.visib_token,
+ .name_token = identifier,
+ .type_expr = undefined,
+ });
+ const node_ptr = try ctx.container_decl.fields_and_decls.addOne();
+ node_ptr.* = &node.base;
+
+ stack.append(State{ .FieldListCommaOrEnd = ctx.container_decl }) catch unreachable;
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.type_expr } });
+ try stack.append(State{ .ExpectToken = Token.Id.Colon });
+ continue;
+ }
+
+ stack.append(State{ .ContainerDecl = ctx.container_decl }) catch unreachable;
+ try stack.append(State{
+ .TopLevelExtern = TopLevelDeclCtx{
+ .decls = &ctx.container_decl.fields_and_decls,
+ .visib_token = ctx.visib_token,
+ .extern_export_inline_token = null,
+ .lib_name = null,
+ .comments = ctx.comments,
+ },
+ });
+ continue;
+ },
+
+ State.FieldInitValue => |ctx| {
+ const eq_tok = nextToken(&tok_it, &tree);
+ const eq_tok_index = eq_tok.index;
+ const eq_tok_ptr = eq_tok.ptr;
+ if (eq_tok_ptr.id != Token.Id.Equal) {
+ prevToken(&tok_it, &tree);
+ continue;
+ }
+ stack.append(State{ .Expression = ctx }) catch unreachable;
+ continue;
+ },
+
+ State.ContainerKind => |ctx| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ const node = try arena.create(ast.Node.ContainerDecl{
+ .base = ast.Node{ .id = ast.Node.Id.ContainerDecl },
+ .layout_token = ctx.layout_token,
+ .kind_token = switch (token_ptr.id) {
+ Token.Id.Keyword_struct, Token.Id.Keyword_union, Token.Id.Keyword_enum => token_index,
+ else => {
+ ((try tree.errors.addOne())).* = Error{ .ExpectedAggregateKw = Error.ExpectedAggregateKw{ .token = token_index } };
+ return tree;
+ },
+ },
+ .init_arg_expr = ast.Node.ContainerDecl.InitArg.None,
+ .fields_and_decls = ast.Node.ContainerDecl.DeclList.init(arena),
+ .lbrace_token = undefined,
+ .rbrace_token = undefined,
+ });
+ ctx.opt_ctx.store(&node.base);
+
+ stack.append(State{ .ContainerDecl = node }) catch unreachable;
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.LBrace,
+ .ptr = &node.lbrace_token,
+ },
+ });
+ try stack.append(State{ .ContainerInitArgStart = node });
+ continue;
+ },
+
+ State.ContainerInitArgStart => |container_decl| {
+ if (eatToken(&tok_it, &tree, Token.Id.LParen) == null) {
+ continue;
+ }
+
+ stack.append(State{ .ExpectToken = Token.Id.RParen }) catch unreachable;
+ try stack.append(State{ .ContainerInitArg = container_decl });
+ continue;
+ },
+
+ State.ContainerInitArg => |container_decl| {
+ const init_arg_token = nextToken(&tok_it, &tree);
+ const init_arg_token_index = init_arg_token.index;
+ const init_arg_token_ptr = init_arg_token.ptr;
+ switch (init_arg_token_ptr.id) {
+ Token.Id.Keyword_enum => {
+ container_decl.init_arg_expr = ast.Node.ContainerDecl.InitArg{ .Enum = null };
+ const lparen_tok = nextToken(&tok_it, &tree);
+ const lparen_tok_index = lparen_tok.index;
+ const lparen_tok_ptr = lparen_tok.ptr;
+ if (lparen_tok_ptr.id == Token.Id.LParen) {
+ try stack.append(State{ .ExpectToken = Token.Id.RParen });
+ try stack.append(State{ .Expression = OptionalCtx{ .RequiredNull = &container_decl.init_arg_expr.Enum } });
+ } else {
+ prevToken(&tok_it, &tree);
+ }
+ },
+ else => {
+ prevToken(&tok_it, &tree);
+ container_decl.init_arg_expr = ast.Node.ContainerDecl.InitArg{ .Type = undefined };
+ stack.append(State{ .Expression = OptionalCtx{ .Required = &container_decl.init_arg_expr.Type } }) catch unreachable;
+ },
+ }
+ continue;
+ },
+
+ State.ContainerDecl => |container_decl| {
+ const comments = try eatDocComments(arena, &tok_it, &tree);
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Identifier => {
+ switch (tree.tokens.at(container_decl.kind_token).id) {
+ Token.Id.Keyword_struct => {
+ const node = try arena.create(ast.Node.StructField{
+ .base = ast.Node{ .id = ast.Node.Id.StructField },
+ .doc_comments = comments,
+ .visib_token = null,
+ .name_token = token_index,
+ .type_expr = undefined,
+ });
+ const node_ptr = try container_decl.fields_and_decls.addOne();
+ node_ptr.* = &node.base;
+
+ try stack.append(State{ .FieldListCommaOrEnd = container_decl });
+ try stack.append(State{ .TypeExprBegin = OptionalCtx{ .Required = &node.type_expr } });
+ try stack.append(State{ .ExpectToken = Token.Id.Colon });
+ continue;
+ },
+ Token.Id.Keyword_union => {
+ const node = try arena.create(ast.Node.UnionTag{
+ .base = ast.Node{ .id = ast.Node.Id.UnionTag },
+ .name_token = token_index,
+ .type_expr = null,
+ .value_expr = null,
+ .doc_comments = comments,
+ });
+ try container_decl.fields_and_decls.push(&node.base);
+
+ stack.append(State{ .FieldListCommaOrEnd = container_decl }) catch unreachable;
+ try stack.append(State{ .FieldInitValue = OptionalCtx{ .RequiredNull = &node.value_expr } });
+ try stack.append(State{ .TypeExprBegin = OptionalCtx{ .RequiredNull = &node.type_expr } });
+ try stack.append(State{ .IfToken = Token.Id.Colon });
+ continue;
+ },
+ Token.Id.Keyword_enum => {
+ const node = try arena.create(ast.Node.EnumTag{
+ .base = ast.Node{ .id = ast.Node.Id.EnumTag },
+ .name_token = token_index,
+ .value = null,
+ .doc_comments = comments,
+ });
+ try container_decl.fields_and_decls.push(&node.base);
+
+ stack.append(State{ .FieldListCommaOrEnd = container_decl }) catch unreachable;
+ try stack.append(State{ .Expression = OptionalCtx{ .RequiredNull = &node.value } });
+ try stack.append(State{ .IfToken = Token.Id.Equal });
+ continue;
+ },
+ else => unreachable,
+ }
+ },
+ Token.Id.Keyword_pub => {
+ switch (tree.tokens.at(container_decl.kind_token).id) {
+ Token.Id.Keyword_struct => {
+ try stack.append(State{
+ .TopLevelExternOrField = TopLevelExternOrFieldCtx{
+ .visib_token = token_index,
+ .container_decl = container_decl,
+ .comments = comments,
+ },
+ });
+ continue;
+ },
+ else => {
+ stack.append(State{ .ContainerDecl = container_decl }) catch unreachable;
+ try stack.append(State{
+ .TopLevelExtern = TopLevelDeclCtx{
+ .decls = &container_decl.fields_and_decls,
+ .visib_token = token_index,
+ .extern_export_inline_token = null,
+ .lib_name = null,
+ .comments = comments,
+ },
+ });
+ continue;
+ },
+ }
+ },
+ Token.Id.Keyword_export => {
+ stack.append(State{ .ContainerDecl = container_decl }) catch unreachable;
+ try stack.append(State{
+ .TopLevelExtern = TopLevelDeclCtx{
+ .decls = &container_decl.fields_and_decls,
+ .visib_token = token_index,
+ .extern_export_inline_token = null,
+ .lib_name = null,
+ .comments = comments,
+ },
+ });
+ continue;
+ },
+ Token.Id.RBrace => {
+ if (comments != null) {
+ ((try tree.errors.addOne())).* = Error{ .UnattachedDocComment = Error.UnattachedDocComment{ .token = token_index } };
+ return tree;
+ }
+ container_decl.rbrace_token = token_index;
+ continue;
+ },
+ else => {
+ prevToken(&tok_it, &tree);
+ stack.append(State{ .ContainerDecl = container_decl }) catch unreachable;
+ try stack.append(State{
+ .TopLevelExtern = TopLevelDeclCtx{
+ .decls = &container_decl.fields_and_decls,
+ .visib_token = null,
+ .extern_export_inline_token = null,
+ .lib_name = null,
+ .comments = comments,
+ },
+ });
+ continue;
+ },
+ }
+ },
+
+ State.VarDecl => |ctx| {
+ const var_decl = try arena.create(ast.Node.VarDecl{
+ .base = ast.Node{ .id = ast.Node.Id.VarDecl },
+ .doc_comments = ctx.comments,
+ .visib_token = ctx.visib_token,
+ .mut_token = ctx.mut_token,
+ .comptime_token = ctx.comptime_token,
+ .extern_export_token = ctx.extern_export_token,
+ .type_node = null,
+ .align_node = null,
+ .init_node = null,
+ .lib_name = ctx.lib_name,
+ // initialized later
+ .name_token = undefined,
+ .eq_token = undefined,
+ .semicolon_token = undefined,
+ });
+ try ctx.list.push(&var_decl.base);
+
+ try stack.append(State{ .VarDeclAlign = var_decl });
+ try stack.append(State{ .TypeExprBegin = OptionalCtx{ .RequiredNull = &var_decl.type_node } });
+ try stack.append(State{ .IfToken = Token.Id.Colon });
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.Identifier,
+ .ptr = &var_decl.name_token,
+ },
+ });
+ continue;
+ },
+ State.VarDeclAlign => |var_decl| {
+ try stack.append(State{ .VarDeclEq = var_decl });
+
+ const next_token = nextToken(&tok_it, &tree);
+ const next_token_index = next_token.index;
+ const next_token_ptr = next_token.ptr;
+ if (next_token_ptr.id == Token.Id.Keyword_align) {
+ try stack.append(State{ .ExpectToken = Token.Id.RParen });
+ try stack.append(State{ .Expression = OptionalCtx{ .RequiredNull = &var_decl.align_node } });
+ try stack.append(State{ .ExpectToken = Token.Id.LParen });
+ continue;
+ }
+
+ prevToken(&tok_it, &tree);
+ continue;
+ },
+ State.VarDeclEq => |var_decl| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Equal => {
+ var_decl.eq_token = token_index;
+ stack.append(State{ .VarDeclSemiColon = var_decl }) catch unreachable;
+ try stack.append(State{ .Expression = OptionalCtx{ .RequiredNull = &var_decl.init_node } });
+ continue;
+ },
+ Token.Id.Semicolon => {
+ var_decl.semicolon_token = token_index;
+ continue;
+ },
+ else => {
+ ((try tree.errors.addOne())).* = Error{ .ExpectedEqOrSemi = Error.ExpectedEqOrSemi{ .token = token_index } };
+ return tree;
+ },
+ }
+ },
+
+ State.VarDeclSemiColon => |var_decl| {
+ const semicolon_token = nextToken(&tok_it, &tree);
+
+ if (semicolon_token.ptr.id != Token.Id.Semicolon) {
+ ((try tree.errors.addOne())).* = Error{
+ .ExpectedToken = Error.ExpectedToken{
+ .token = semicolon_token.index,
+ .expected_id = Token.Id.Semicolon,
+ },
+ };
+ return tree;
+ }
+
+ var_decl.semicolon_token = semicolon_token.index;
+
+ if (eatToken(&tok_it, &tree, Token.Id.DocComment)) |doc_comment_token| {
+ const loc = tree.tokenLocation(semicolon_token.ptr.end, doc_comment_token);
+ if (loc.line == 0) {
+ try pushDocComment(arena, doc_comment_token, &var_decl.doc_comments);
+ } else {
+ prevToken(&tok_it, &tree);
+ }
+ }
+ },
+
+ State.FnDef => |fn_proto| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.LBrace => {
+ const block = try arena.create(ast.Node.Block{
+ .base = ast.Node{ .id = ast.Node.Id.Block },
+ .label = null,
+ .lbrace = token_index,
+ .statements = ast.Node.Block.StatementList.init(arena),
+ .rbrace = undefined,
+ });
+ fn_proto.body_node = &block.base;
+ stack.append(State{ .Block = block }) catch unreachable;
+ continue;
+ },
+ Token.Id.Semicolon => continue,
+ else => {
+ ((try tree.errors.addOne())).* = Error{ .ExpectedSemiOrLBrace = Error.ExpectedSemiOrLBrace{ .token = token_index } };
+ return tree;
+ },
+ }
+ },
+ State.FnProto => |fn_proto| {
+ stack.append(State{ .FnProtoAlign = fn_proto }) catch unreachable;
+ try stack.append(State{ .ParamDecl = fn_proto });
+ try stack.append(State{ .ExpectToken = Token.Id.LParen });
+
+ if (eatToken(&tok_it, &tree, Token.Id.Identifier)) |name_token| {
+ fn_proto.name_token = name_token;
+ }
+ continue;
+ },
+ State.FnProtoAlign => |fn_proto| {
+ stack.append(State{ .FnProtoReturnType = fn_proto }) catch unreachable;
+
+ if (eatToken(&tok_it, &tree, Token.Id.Keyword_align)) |align_token| {
+ try stack.append(State{ .ExpectToken = Token.Id.RParen });
+ try stack.append(State{ .Expression = OptionalCtx{ .RequiredNull = &fn_proto.align_expr } });
+ try stack.append(State{ .ExpectToken = Token.Id.LParen });
+ }
+ continue;
+ },
+ State.FnProtoReturnType => |fn_proto| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Bang => {
+ fn_proto.return_type = ast.Node.FnProto.ReturnType{ .InferErrorSet = undefined };
+ stack.append(State{ .TypeExprBegin = OptionalCtx{ .Required = &fn_proto.return_type.InferErrorSet } }) catch unreachable;
+ continue;
+ },
+ else => {
+ // TODO: this is a special case. Remove this when #760 is fixed
+ if (token_ptr.id == Token.Id.Keyword_error) {
+ if (tok_it.peek().?.id == Token.Id.LBrace) {
+ const error_type_node = try arena.create(ast.Node.ErrorType{
+ .base = ast.Node{ .id = ast.Node.Id.ErrorType },
+ .token = token_index,
+ });
+ fn_proto.return_type = ast.Node.FnProto.ReturnType{ .Explicit = &error_type_node.base };
+ continue;
+ }
+ }
+
+ prevToken(&tok_it, &tree);
+ fn_proto.return_type = ast.Node.FnProto.ReturnType{ .Explicit = undefined };
+ stack.append(State{ .TypeExprBegin = OptionalCtx{ .Required = &fn_proto.return_type.Explicit } }) catch unreachable;
+ continue;
+ },
+ }
+ },
+
+ State.ParamDecl => |fn_proto| {
+ if (eatToken(&tok_it, &tree, Token.Id.RParen)) |_| {
+ continue;
+ }
+ const param_decl = try arena.create(ast.Node.ParamDecl{
+ .base = ast.Node{ .id = ast.Node.Id.ParamDecl },
+ .comptime_token = null,
+ .noalias_token = null,
+ .name_token = null,
+ .type_node = undefined,
+ .var_args_token = null,
+ });
+ try fn_proto.params.push(¶m_decl.base);
+
+ stack.append(State{
+ .ParamDeclEnd = ParamDeclEndCtx{
+ .param_decl = param_decl,
+ .fn_proto = fn_proto,
+ },
+ }) catch unreachable;
+ try stack.append(State{ .ParamDeclName = param_decl });
+ try stack.append(State{ .ParamDeclAliasOrComptime = param_decl });
+ continue;
+ },
+ State.ParamDeclAliasOrComptime => |param_decl| {
+ if (eatToken(&tok_it, &tree, Token.Id.Keyword_comptime)) |comptime_token| {
+ param_decl.comptime_token = comptime_token;
+ } else if (eatToken(&tok_it, &tree, Token.Id.Keyword_noalias)) |noalias_token| {
+ param_decl.noalias_token = noalias_token;
+ }
+ continue;
+ },
+ State.ParamDeclName => |param_decl| {
+ // TODO: Here, we eat two tokens in one state. This means that we can't have
+ // comments between these two tokens.
+ if (eatToken(&tok_it, &tree, Token.Id.Identifier)) |ident_token| {
+ if (eatToken(&tok_it, &tree, Token.Id.Colon)) |_| {
+ param_decl.name_token = ident_token;
+ } else {
+ prevToken(&tok_it, &tree);
+ }
+ }
+ continue;
+ },
+ State.ParamDeclEnd => |ctx| {
+ if (eatToken(&tok_it, &tree, Token.Id.Ellipsis3)) |ellipsis3| {
+ ctx.param_decl.var_args_token = ellipsis3;
+ stack.append(State{ .ExpectToken = Token.Id.RParen }) catch unreachable;
+ continue;
+ }
+
+ try stack.append(State{ .ParamDeclComma = ctx.fn_proto });
+ try stack.append(State{ .TypeExprBegin = OptionalCtx{ .Required = &ctx.param_decl.type_node } });
+ continue;
+ },
+ State.ParamDeclComma => |fn_proto| {
+ switch (expectCommaOrEnd(&tok_it, &tree, Token.Id.RParen)) {
+ ExpectCommaOrEndResult.end_token => |t| {
+ if (t == null) {
+ stack.append(State{ .ParamDecl = fn_proto }) catch unreachable;
+ }
+ continue;
+ },
+ ExpectCommaOrEndResult.parse_error => |e| {
+ try tree.errors.push(e);
+ return tree;
+ },
+ }
+ },
+
+ State.MaybeLabeledExpression => |ctx| {
+ if (eatToken(&tok_it, &tree, Token.Id.Colon)) |_| {
+ stack.append(State{
+ .LabeledExpression = LabelCtx{
+ .label = ctx.label,
+ .opt_ctx = ctx.opt_ctx,
+ },
+ }) catch unreachable;
+ continue;
+ }
+
+ _ = try createToCtxLiteral(arena, ctx.opt_ctx, ast.Node.Identifier, ctx.label);
+ continue;
+ },
+ State.LabeledExpression => |ctx| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.LBrace => {
+ const block = try arena.create(ast.Node.Block{
+ .base = ast.Node{ .id = ast.Node.Id.Block },
+ .label = ctx.label,
+ .lbrace = token_index,
+ .statements = ast.Node.Block.StatementList.init(arena),
+ .rbrace = undefined,
+ });
+ ctx.opt_ctx.store(&block.base);
+ stack.append(State{ .Block = block }) catch unreachable;
+ continue;
+ },
+ Token.Id.Keyword_while => {
+ stack.append(State{
+ .While = LoopCtx{
+ .label = ctx.label,
+ .inline_token = null,
+ .loop_token = token_index,
+ .opt_ctx = ctx.opt_ctx.toRequired(),
+ },
+ }) catch unreachable;
+ continue;
+ },
+ Token.Id.Keyword_for => {
+ stack.append(State{
+ .For = LoopCtx{
+ .label = ctx.label,
+ .inline_token = null,
+ .loop_token = token_index,
+ .opt_ctx = ctx.opt_ctx.toRequired(),
+ },
+ }) catch unreachable;
+ continue;
+ },
+ Token.Id.Keyword_inline => {
+ stack.append(State{
+ .Inline = InlineCtx{
+ .label = ctx.label,
+ .inline_token = token_index,
+ .opt_ctx = ctx.opt_ctx.toRequired(),
+ },
+ }) catch unreachable;
+ continue;
+ },
+ else => {
+ if (ctx.opt_ctx != OptionalCtx.Optional) {
+ ((try tree.errors.addOne())).* = Error{ .ExpectedLabelable = Error.ExpectedLabelable{ .token = token_index } };
+ return tree;
+ }
+
+ prevToken(&tok_it, &tree);
+ continue;
+ },
+ }
+ },
+ State.Inline => |ctx| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Keyword_while => {
+ stack.append(State{
+ .While = LoopCtx{
+ .inline_token = ctx.inline_token,
+ .label = ctx.label,
+ .loop_token = token_index,
+ .opt_ctx = ctx.opt_ctx.toRequired(),
+ },
+ }) catch unreachable;
+ continue;
+ },
+ Token.Id.Keyword_for => {
+ stack.append(State{
+ .For = LoopCtx{
+ .inline_token = ctx.inline_token,
+ .label = ctx.label,
+ .loop_token = token_index,
+ .opt_ctx = ctx.opt_ctx.toRequired(),
+ },
+ }) catch unreachable;
+ continue;
+ },
+ else => {
+ if (ctx.opt_ctx != OptionalCtx.Optional) {
+ ((try tree.errors.addOne())).* = Error{ .ExpectedInlinable = Error.ExpectedInlinable{ .token = token_index } };
+ return tree;
+ }
+
+ prevToken(&tok_it, &tree);
+ continue;
+ },
+ }
+ },
+ State.While => |ctx| {
+ const node = try arena.create(ast.Node.While{
+ .base = ast.Node{ .id = ast.Node.Id.While },
+ .label = ctx.label,
+ .inline_token = ctx.inline_token,
+ .while_token = ctx.loop_token,
+ .condition = undefined,
+ .payload = null,
+ .continue_expr = null,
+ .body = undefined,
+ .@"else" = null,
+ });
+ ctx.opt_ctx.store(&node.base);
+ stack.append(State{ .Else = &node.@"else" }) catch unreachable;
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.body } });
+ try stack.append(State{ .WhileContinueExpr = &node.continue_expr });
+ try stack.append(State{ .IfToken = Token.Id.Colon });
+ try stack.append(State{ .PointerPayload = OptionalCtx{ .Optional = &node.payload } });
+ try stack.append(State{ .ExpectToken = Token.Id.RParen });
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.condition } });
+ try stack.append(State{ .ExpectToken = Token.Id.LParen });
+ continue;
+ },
+ State.WhileContinueExpr => |dest| {
+ stack.append(State{ .ExpectToken = Token.Id.RParen }) catch unreachable;
+ try stack.append(State{ .AssignmentExpressionBegin = OptionalCtx{ .RequiredNull = dest } });
+ try stack.append(State{ .ExpectToken = Token.Id.LParen });
+ continue;
+ },
+ State.For => |ctx| {
+ const node = try arena.create(ast.Node.For{
+ .base = ast.Node{ .id = ast.Node.Id.For },
+ .label = ctx.label,
+ .inline_token = ctx.inline_token,
+ .for_token = ctx.loop_token,
+ .array_expr = undefined,
+ .payload = null,
+ .body = undefined,
+ .@"else" = null,
+ });
+ ctx.opt_ctx.store(&node.base);
+ stack.append(State{ .Else = &node.@"else" }) catch unreachable;
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.body } });
+ try stack.append(State{ .PointerIndexPayload = OptionalCtx{ .Optional = &node.payload } });
+ try stack.append(State{ .ExpectToken = Token.Id.RParen });
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.array_expr } });
+ try stack.append(State{ .ExpectToken = Token.Id.LParen });
+ continue;
+ },
+ State.Else => |dest| {
+ if (eatToken(&tok_it, &tree, Token.Id.Keyword_else)) |else_token| {
+ const node = try arena.create(ast.Node.Else{
+ .base = ast.Node{ .id = ast.Node.Id.Else },
+ .else_token = else_token,
+ .payload = null,
+ .body = undefined,
+ });
+ dest.* = node;
+
+ stack.append(State{ .Expression = OptionalCtx{ .Required = &node.body } }) catch unreachable;
+ try stack.append(State{ .Payload = OptionalCtx{ .Optional = &node.payload } });
+ continue;
+ } else {
+ continue;
+ }
+ },
+
+ State.Block => |block| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.RBrace => {
+ block.rbrace = token_index;
+ continue;
+ },
+ else => {
+ prevToken(&tok_it, &tree);
+ stack.append(State{ .Block = block }) catch unreachable;
+
+ try stack.append(State{ .Statement = block });
+ continue;
+ },
+ }
+ },
+ State.Statement => |block| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Keyword_comptime => {
+ stack.append(State{
+ .ComptimeStatement = ComptimeStatementCtx{
+ .comptime_token = token_index,
+ .block = block,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ Token.Id.Keyword_var, Token.Id.Keyword_const => {
+ stack.append(State{
+ .VarDecl = VarDeclCtx{
+ .comments = null,
+ .visib_token = null,
+ .comptime_token = null,
+ .extern_export_token = null,
+ .lib_name = null,
+ .mut_token = token_index,
+ .list = &block.statements,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ Token.Id.Keyword_defer, Token.Id.Keyword_errdefer => {
+ const node = try arena.create(ast.Node.Defer{
+ .base = ast.Node{ .id = ast.Node.Id.Defer },
+ .defer_token = token_index,
+ .expr = undefined,
+ });
+ const node_ptr = try block.statements.addOne();
+ node_ptr.* = &node.base;
+
+ stack.append(State{ .Semicolon = node_ptr }) catch unreachable;
+ try stack.append(State{ .AssignmentExpressionBegin = OptionalCtx{ .Required = &node.expr } });
+ continue;
+ },
+ Token.Id.LBrace => {
+ const inner_block = try arena.create(ast.Node.Block{
+ .base = ast.Node{ .id = ast.Node.Id.Block },
+ .label = null,
+ .lbrace = token_index,
+ .statements = ast.Node.Block.StatementList.init(arena),
+ .rbrace = undefined,
+ });
+ try block.statements.push(&inner_block.base);
+
+ stack.append(State{ .Block = inner_block }) catch unreachable;
+ continue;
+ },
+ else => {
+ prevToken(&tok_it, &tree);
+ const statement = try block.statements.addOne();
+ try stack.append(State{ .Semicolon = statement });
+ try stack.append(State{ .AssignmentExpressionBegin = OptionalCtx{ .Required = statement } });
+ continue;
+ },
+ }
+ },
+ State.ComptimeStatement => |ctx| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Keyword_var, Token.Id.Keyword_const => {
+ stack.append(State{
+ .VarDecl = VarDeclCtx{
+ .comments = null,
+ .visib_token = null,
+ .comptime_token = ctx.comptime_token,
+ .extern_export_token = null,
+ .lib_name = null,
+ .mut_token = token_index,
+ .list = &ctx.block.statements,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ else => {
+ prevToken(&tok_it, &tree);
+ prevToken(&tok_it, &tree);
+ const statement = try ctx.block.statements.addOne();
+ try stack.append(State{ .Semicolon = statement });
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = statement } });
+ continue;
+ },
+ }
+ },
+ State.Semicolon => |node_ptr| {
+ const node = node_ptr.*;
+ if (node.requireSemiColon()) {
+ stack.append(State{ .ExpectToken = Token.Id.Semicolon }) catch unreachable;
+ continue;
+ }
+ continue;
+ },
+
+ State.AsmOutputItems => |items| {
+ const lbracket = nextToken(&tok_it, &tree);
+ const lbracket_index = lbracket.index;
+ const lbracket_ptr = lbracket.ptr;
+ if (lbracket_ptr.id != Token.Id.LBracket) {
+ prevToken(&tok_it, &tree);
+ continue;
+ }
+
+ const node = try arena.create(ast.Node.AsmOutput{
+ .base = ast.Node{ .id = ast.Node.Id.AsmOutput },
+ .lbracket = lbracket_index,
+ .symbolic_name = undefined,
+ .constraint = undefined,
+ .kind = undefined,
+ .rparen = undefined,
+ });
+ try items.push(node);
+
+ stack.append(State{ .AsmOutputItems = items }) catch unreachable;
+ try stack.append(State{ .IfToken = Token.Id.Comma });
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.RParen,
+ .ptr = &node.rparen,
+ },
+ });
+ try stack.append(State{ .AsmOutputReturnOrType = node });
+ try stack.append(State{ .ExpectToken = Token.Id.LParen });
+ try stack.append(State{ .StringLiteral = OptionalCtx{ .Required = &node.constraint } });
+ try stack.append(State{ .ExpectToken = Token.Id.RBracket });
+ try stack.append(State{ .Identifier = OptionalCtx{ .Required = &node.symbolic_name } });
+ continue;
+ },
+ State.AsmOutputReturnOrType => |node| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Identifier => {
+ node.kind = ast.Node.AsmOutput.Kind{ .Variable = try createLiteral(arena, ast.Node.Identifier, token_index) };
+ continue;
+ },
+ Token.Id.Arrow => {
+ node.kind = ast.Node.AsmOutput.Kind{ .Return = undefined };
+ try stack.append(State{ .TypeExprBegin = OptionalCtx{ .Required = &node.kind.Return } });
+ continue;
+ },
+ else => {
+ ((try tree.errors.addOne())).* = Error{ .ExpectedAsmOutputReturnOrType = Error.ExpectedAsmOutputReturnOrType{ .token = token_index } };
+ return tree;
+ },
+ }
+ },
+ State.AsmInputItems => |items| {
+ const lbracket = nextToken(&tok_it, &tree);
+ const lbracket_index = lbracket.index;
+ const lbracket_ptr = lbracket.ptr;
+ if (lbracket_ptr.id != Token.Id.LBracket) {
+ prevToken(&tok_it, &tree);
+ continue;
+ }
+
+ const node = try arena.create(ast.Node.AsmInput{
+ .base = ast.Node{ .id = ast.Node.Id.AsmInput },
+ .lbracket = lbracket_index,
+ .symbolic_name = undefined,
+ .constraint = undefined,
+ .expr = undefined,
+ .rparen = undefined,
+ });
+ try items.push(node);
+
+ stack.append(State{ .AsmInputItems = items }) catch unreachable;
+ try stack.append(State{ .IfToken = Token.Id.Comma });
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.RParen,
+ .ptr = &node.rparen,
+ },
+ });
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.expr } });
+ try stack.append(State{ .ExpectToken = Token.Id.LParen });
+ try stack.append(State{ .StringLiteral = OptionalCtx{ .Required = &node.constraint } });
+ try stack.append(State{ .ExpectToken = Token.Id.RBracket });
+ try stack.append(State{ .Identifier = OptionalCtx{ .Required = &node.symbolic_name } });
+ continue;
+ },
+ State.AsmClobberItems => |items| {
+ while (eatToken(&tok_it, &tree, Token.Id.StringLiteral)) |strlit| {
+ try items.push(strlit);
+ if (eatToken(&tok_it, &tree, Token.Id.Comma) == null)
+ break;
+ }
+ continue;
+ },
+
+ State.ExprListItemOrEnd => |list_state| {
+ if (eatToken(&tok_it, &tree, list_state.end)) |token_index| {
+ (list_state.ptr).* = token_index;
+ continue;
+ }
+
+ stack.append(State{ .ExprListCommaOrEnd = list_state }) catch unreachable;
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = try list_state.list.addOne() } });
+ continue;
+ },
+ State.ExprListCommaOrEnd => |list_state| {
+ switch (expectCommaOrEnd(&tok_it, &tree, list_state.end)) {
+ ExpectCommaOrEndResult.end_token => |maybe_end| if (maybe_end) |end| {
+ (list_state.ptr).* = end;
+ continue;
+ } else {
+ stack.append(State{ .ExprListItemOrEnd = list_state }) catch unreachable;
+ continue;
+ },
+ ExpectCommaOrEndResult.parse_error => |e| {
+ try tree.errors.push(e);
+ return tree;
+ },
+ }
+ },
+ State.FieldInitListItemOrEnd => |list_state| {
+ if (eatToken(&tok_it, &tree, Token.Id.RBrace)) |rbrace| {
+ (list_state.ptr).* = rbrace;
+ continue;
+ }
+
+ const node = try arena.create(ast.Node.FieldInitializer{
+ .base = ast.Node{ .id = ast.Node.Id.FieldInitializer },
+ .period_token = undefined,
+ .name_token = undefined,
+ .expr = undefined,
+ });
+ try list_state.list.push(&node.base);
+
+ stack.append(State{ .FieldInitListCommaOrEnd = list_state }) catch unreachable;
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.expr } });
+ try stack.append(State{ .ExpectToken = Token.Id.Equal });
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.Identifier,
+ .ptr = &node.name_token,
+ },
+ });
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.Period,
+ .ptr = &node.period_token,
+ },
+ });
+ continue;
+ },
+ State.FieldInitListCommaOrEnd => |list_state| {
+ switch (expectCommaOrEnd(&tok_it, &tree, Token.Id.RBrace)) {
+ ExpectCommaOrEndResult.end_token => |maybe_end| if (maybe_end) |end| {
+ (list_state.ptr).* = end;
+ continue;
+ } else {
+ stack.append(State{ .FieldInitListItemOrEnd = list_state }) catch unreachable;
+ continue;
+ },
+ ExpectCommaOrEndResult.parse_error => |e| {
+ try tree.errors.push(e);
+ return tree;
+ },
+ }
+ },
+ State.FieldListCommaOrEnd => |container_decl| {
+ switch (expectCommaOrEnd(&tok_it, &tree, Token.Id.RBrace)) {
+ ExpectCommaOrEndResult.end_token => |maybe_end| if (maybe_end) |end| {
+ container_decl.rbrace_token = end;
+ continue;
+ } else {
+ try stack.append(State{ .ContainerDecl = container_decl });
+ continue;
+ },
+ ExpectCommaOrEndResult.parse_error => |e| {
+ try tree.errors.push(e);
+ return tree;
+ },
+ }
+ },
+ State.ErrorTagListItemOrEnd => |list_state| {
+ if (eatToken(&tok_it, &tree, Token.Id.RBrace)) |rbrace| {
+ (list_state.ptr).* = rbrace;
+ continue;
+ }
+
+ const node_ptr = try list_state.list.addOne();
+
+ try stack.append(State{ .ErrorTagListCommaOrEnd = list_state });
+ try stack.append(State{ .ErrorTag = node_ptr });
+ continue;
+ },
+ State.ErrorTagListCommaOrEnd => |list_state| {
+ switch (expectCommaOrEnd(&tok_it, &tree, Token.Id.RBrace)) {
+ ExpectCommaOrEndResult.end_token => |maybe_end| if (maybe_end) |end| {
+ (list_state.ptr).* = end;
+ continue;
+ } else {
+ stack.append(State{ .ErrorTagListItemOrEnd = list_state }) catch unreachable;
+ continue;
+ },
+ ExpectCommaOrEndResult.parse_error => |e| {
+ try tree.errors.push(e);
+ return tree;
+ },
+ }
+ },
+ State.SwitchCaseOrEnd => |list_state| {
+ if (eatToken(&tok_it, &tree, Token.Id.RBrace)) |rbrace| {
+ (list_state.ptr).* = rbrace;
+ continue;
+ }
+
+ const comments = try eatDocComments(arena, &tok_it, &tree);
+ const node = try arena.create(ast.Node.SwitchCase{
+ .base = ast.Node{ .id = ast.Node.Id.SwitchCase },
+ .items = ast.Node.SwitchCase.ItemList.init(arena),
+ .payload = null,
+ .expr = undefined,
+ .arrow_token = undefined,
+ });
+ try list_state.list.push(&node.base);
+ try stack.append(State{ .SwitchCaseCommaOrEnd = list_state });
+ try stack.append(State{ .AssignmentExpressionBegin = OptionalCtx{ .Required = &node.expr } });
+ try stack.append(State{ .PointerPayload = OptionalCtx{ .Optional = &node.payload } });
+ try stack.append(State{ .SwitchCaseFirstItem = node });
+
+ continue;
+ },
+
+ State.SwitchCaseCommaOrEnd => |list_state| {
+ switch (expectCommaOrEnd(&tok_it, &tree, Token.Id.RBrace)) {
+ ExpectCommaOrEndResult.end_token => |maybe_end| if (maybe_end) |end| {
+ (list_state.ptr).* = end;
+ continue;
+ } else {
+ try stack.append(State{ .SwitchCaseOrEnd = list_state });
+ continue;
+ },
+ ExpectCommaOrEndResult.parse_error => |e| {
+ try tree.errors.push(e);
+ return tree;
+ },
+ }
+ },
+
+ State.SwitchCaseFirstItem => |switch_case| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ if (token_ptr.id == Token.Id.Keyword_else) {
+ const else_node = try arena.create(ast.Node.SwitchElse{
+ .base = ast.Node{ .id = ast.Node.Id.SwitchElse },
+ .token = token_index,
+ });
+ try switch_case.items.push(&else_node.base);
+
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.EqualAngleBracketRight,
+ .ptr = &switch_case.arrow_token,
+ },
+ });
+ continue;
+ } else {
+ prevToken(&tok_it, &tree);
+ stack.append(State{ .SwitchCaseItemCommaOrEnd = switch_case }) catch unreachable;
+ try stack.append(State{ .RangeExpressionBegin = OptionalCtx{ .Required = try switch_case.items.addOne() } });
+ continue;
+ }
+ },
+ State.SwitchCaseItemOrEnd => |switch_case| {
+ const token = nextToken(&tok_it, &tree);
+ if (token.ptr.id == Token.Id.EqualAngleBracketRight) {
+ switch_case.arrow_token = token.index;
+ continue;
+ } else {
+ prevToken(&tok_it, &tree);
+ stack.append(State{ .SwitchCaseItemCommaOrEnd = switch_case }) catch unreachable;
+ try stack.append(State{ .RangeExpressionBegin = OptionalCtx{ .Required = try switch_case.items.addOne() } });
+ continue;
+ }
+ },
+ State.SwitchCaseItemCommaOrEnd => |switch_case| {
+ switch (expectCommaOrEnd(&tok_it, &tree, Token.Id.EqualAngleBracketRight)) {
+ ExpectCommaOrEndResult.end_token => |end_token| {
+ if (end_token) |t| {
+ switch_case.arrow_token = t;
+ } else {
+ stack.append(State{ .SwitchCaseItemOrEnd = switch_case }) catch unreachable;
+ }
+ continue;
+ },
+ ExpectCommaOrEndResult.parse_error => |e| {
+ try tree.errors.push(e);
+ return tree;
+ },
+ }
+ continue;
+ },
+
+ State.SuspendBody => |suspend_node| {
+ const token = nextToken(&tok_it, &tree);
+ switch (token.ptr.id) {
+ Token.Id.Semicolon => {
+ prevToken(&tok_it, &tree);
+ continue;
+ },
+ Token.Id.LBrace => {
+ prevToken(&tok_it, &tree);
+ try stack.append(State{ .AssignmentExpressionBegin = OptionalCtx{ .RequiredNull = &suspend_node.body } });
+ continue;
+ },
+ else => {
+ ((try tree.errors.addOne())).* = Error{ .InvalidToken = Error.InvalidToken{ .token = token.index } };
+ },
+ }
+ },
+ State.AsyncAllocator => |async_node| {
+ if (eatToken(&tok_it, &tree, Token.Id.AngleBracketLeft) == null) {
+ continue;
+ }
+
+ async_node.rangle_bracket = TokenIndex(0);
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.AngleBracketRight,
+ .ptr = &async_node.rangle_bracket.?,
+ },
+ });
+ try stack.append(State{ .TypeExprBegin = OptionalCtx{ .RequiredNull = &async_node.allocator_type } });
+ continue;
+ },
+ State.AsyncEnd => |ctx| {
+ const node = ctx.ctx.get() orelse continue;
+
+ switch (node.id) {
+ ast.Node.Id.FnProto => {
+ const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", node);
+ fn_proto.async_attr = ctx.attribute;
+ continue;
+ },
+ ast.Node.Id.SuffixOp => {
+ const suffix_op = @fieldParentPtr(ast.Node.SuffixOp, "base", node);
+ if (suffix_op.op == @TagType(ast.Node.SuffixOp.Op).Call) {
+ suffix_op.op.Call.async_attr = ctx.attribute;
+ continue;
+ }
+
+ ((try tree.errors.addOne())).* = Error{ .ExpectedCall = Error.ExpectedCall{ .node = node } };
+ return tree;
+ },
+ else => {
+ ((try tree.errors.addOne())).* = Error{ .ExpectedCallOrFnProto = Error.ExpectedCallOrFnProto{ .node = node } };
+ return tree;
+ },
+ }
+ },
+
+ State.ExternType => |ctx| {
+ if (eatToken(&tok_it, &tree, Token.Id.Keyword_fn)) |fn_token| {
+ const fn_proto = try arena.create(ast.Node.FnProto{
+ .base = ast.Node{ .id = ast.Node.Id.FnProto },
+ .doc_comments = ctx.comments,
+ .visib_token = null,
+ .name_token = null,
+ .fn_token = fn_token,
+ .params = ast.Node.FnProto.ParamList.init(arena),
+ .return_type = undefined,
+ .var_args_token = null,
+ .extern_export_inline_token = ctx.extern_token,
+ .cc_token = null,
+ .async_attr = null,
+ .body_node = null,
+ .lib_name = null,
+ .align_expr = null,
+ });
+ ctx.opt_ctx.store(&fn_proto.base);
+ stack.append(State{ .FnProto = fn_proto }) catch unreachable;
+ continue;
+ }
+
+ stack.append(State{
+ .ContainerKind = ContainerKindCtx{
+ .opt_ctx = ctx.opt_ctx,
+ .layout_token = ctx.extern_token,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ State.SliceOrArrayAccess => |node| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Ellipsis2 => {
+ const start = node.op.ArrayAccess;
+ node.op = ast.Node.SuffixOp.Op{
+ .Slice = ast.Node.SuffixOp.Op.Slice{
+ .start = start,
+ .end = null,
+ },
+ };
+
+ stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.RBracket,
+ .ptr = &node.rtoken,
+ },
+ }) catch unreachable;
+ try stack.append(State{ .Expression = OptionalCtx{ .Optional = &node.op.Slice.end } });
+ continue;
+ },
+ Token.Id.RBracket => {
+ node.rtoken = token_index;
+ continue;
+ },
+ else => {
+ ((try tree.errors.addOne())).* = Error{ .ExpectedSliceOrRBracket = Error.ExpectedSliceOrRBracket{ .token = token_index } };
+ return tree;
+ },
+ }
+ },
+ State.SliceOrArrayType => |node| {
+ if (eatToken(&tok_it, &tree, Token.Id.RBracket)) |_| {
+ node.op = ast.Node.PrefixOp.Op{
+ .SliceType = ast.Node.PrefixOp.PtrInfo{
+ .align_info = null,
+ .const_token = null,
+ .volatile_token = null,
+ },
+ };
+ stack.append(State{ .TypeExprBegin = OptionalCtx{ .Required = &node.rhs } }) catch unreachable;
+ try stack.append(State{ .PtrTypeModifiers = &node.op.SliceType });
+ continue;
+ }
+
+ node.op = ast.Node.PrefixOp.Op{ .ArrayType = undefined };
+ stack.append(State{ .TypeExprBegin = OptionalCtx{ .Required = &node.rhs } }) catch unreachable;
+ try stack.append(State{ .ExpectToken = Token.Id.RBracket });
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.op.ArrayType } });
+ continue;
+ },
+
+ State.PtrTypeModifiers => |addr_of_info| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Keyword_align => {
+ stack.append(state) catch unreachable;
+ if (addr_of_info.align_info != null) {
+ ((try tree.errors.addOne())).* = Error{ .ExtraAlignQualifier = Error.ExtraAlignQualifier{ .token = token_index } };
+ return tree;
+ }
+ addr_of_info.align_info = ast.Node.PrefixOp.PtrInfo.Align{
+ .node = undefined,
+ .bit_range = null,
+ };
+ // TODO https://github.com/ziglang/zig/issues/1022
+ const align_info = &addr_of_info.align_info.?;
+
+ try stack.append(State{ .AlignBitRange = align_info });
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &align_info.node } });
+ try stack.append(State{ .ExpectToken = Token.Id.LParen });
+ continue;
+ },
+ Token.Id.Keyword_const => {
+ stack.append(state) catch unreachable;
+ if (addr_of_info.const_token != null) {
+ ((try tree.errors.addOne())).* = Error{ .ExtraConstQualifier = Error.ExtraConstQualifier{ .token = token_index } };
+ return tree;
+ }
+ addr_of_info.const_token = token_index;
+ continue;
+ },
+ Token.Id.Keyword_volatile => {
+ stack.append(state) catch unreachable;
+ if (addr_of_info.volatile_token != null) {
+ ((try tree.errors.addOne())).* = Error{ .ExtraVolatileQualifier = Error.ExtraVolatileQualifier{ .token = token_index } };
+ return tree;
+ }
+ addr_of_info.volatile_token = token_index;
+ continue;
+ },
+ else => {
+ prevToken(&tok_it, &tree);
+ continue;
+ },
+ }
+ },
+
+ State.AlignBitRange => |align_info| {
+ const token = nextToken(&tok_it, &tree);
+ switch (token.ptr.id) {
+ Token.Id.Colon => {
+ align_info.bit_range = ast.Node.PrefixOp.PtrInfo.Align.BitRange(undefined);
+ const bit_range = &align_info.bit_range.?;
+
+ try stack.append(State{ .ExpectToken = Token.Id.RParen });
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &bit_range.end } });
+ try stack.append(State{ .ExpectToken = Token.Id.Colon });
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &bit_range.start } });
+ continue;
+ },
+ Token.Id.RParen => continue,
+ else => {
+ (try tree.errors.addOne()).* = Error{
+ .ExpectedColonOrRParen = Error.ExpectedColonOrRParen{ .token = token.index },
+ };
+ return tree;
+ },
+ }
+ },
+
+ State.Payload => |opt_ctx| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ if (token_ptr.id != Token.Id.Pipe) {
+ if (opt_ctx != OptionalCtx.Optional) {
+ ((try tree.errors.addOne())).* = Error{
+ .ExpectedToken = Error.ExpectedToken{
+ .token = token_index,
+ .expected_id = Token.Id.Pipe,
+ },
+ };
+ return tree;
+ }
+
+ prevToken(&tok_it, &tree);
+ continue;
+ }
+
+ const node = try arena.create(ast.Node.Payload{
+ .base = ast.Node{ .id = ast.Node.Id.Payload },
+ .lpipe = token_index,
+ .error_symbol = undefined,
+ .rpipe = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.Pipe,
+ .ptr = &node.rpipe,
+ },
+ }) catch unreachable;
+ try stack.append(State{ .Identifier = OptionalCtx{ .Required = &node.error_symbol } });
+ continue;
+ },
+ State.PointerPayload => |opt_ctx| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ if (token_ptr.id != Token.Id.Pipe) {
+ if (opt_ctx != OptionalCtx.Optional) {
+ ((try tree.errors.addOne())).* = Error{
+ .ExpectedToken = Error.ExpectedToken{
+ .token = token_index,
+ .expected_id = Token.Id.Pipe,
+ },
+ };
+ return tree;
+ }
+
+ prevToken(&tok_it, &tree);
+ continue;
+ }
+
+ const node = try arena.create(ast.Node.PointerPayload{
+ .base = ast.Node{ .id = ast.Node.Id.PointerPayload },
+ .lpipe = token_index,
+ .ptr_token = null,
+ .value_symbol = undefined,
+ .rpipe = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.Pipe,
+ .ptr = &node.rpipe,
+ },
+ });
+ try stack.append(State{ .Identifier = OptionalCtx{ .Required = &node.value_symbol } });
+ try stack.append(State{
+ .OptionalTokenSave = OptionalTokenSave{
+ .id = Token.Id.Asterisk,
+ .ptr = &node.ptr_token,
+ },
+ });
+ continue;
+ },
+ State.PointerIndexPayload => |opt_ctx| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ if (token_ptr.id != Token.Id.Pipe) {
+ if (opt_ctx != OptionalCtx.Optional) {
+ ((try tree.errors.addOne())).* = Error{
+ .ExpectedToken = Error.ExpectedToken{
+ .token = token_index,
+ .expected_id = Token.Id.Pipe,
+ },
+ };
+ return tree;
+ }
+
+ prevToken(&tok_it, &tree);
+ continue;
+ }
+
+ const node = try arena.create(ast.Node.PointerIndexPayload{
+ .base = ast.Node{ .id = ast.Node.Id.PointerIndexPayload },
+ .lpipe = token_index,
+ .ptr_token = null,
+ .value_symbol = undefined,
+ .index_symbol = null,
+ .rpipe = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.Pipe,
+ .ptr = &node.rpipe,
+ },
+ }) catch unreachable;
+ try stack.append(State{ .Identifier = OptionalCtx{ .RequiredNull = &node.index_symbol } });
+ try stack.append(State{ .IfToken = Token.Id.Comma });
+ try stack.append(State{ .Identifier = OptionalCtx{ .Required = &node.value_symbol } });
+ try stack.append(State{
+ .OptionalTokenSave = OptionalTokenSave{
+ .id = Token.Id.Asterisk,
+ .ptr = &node.ptr_token,
+ },
+ });
+ continue;
+ },
+
+ State.Expression => |opt_ctx| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Keyword_return, Token.Id.Keyword_break, Token.Id.Keyword_continue => {
+ const node = try arena.create(ast.Node.ControlFlowExpression{
+ .base = ast.Node{ .id = ast.Node.Id.ControlFlowExpression },
+ .ltoken = token_index,
+ .kind = undefined,
+ .rhs = null,
+ });
+ opt_ctx.store(&node.base);
+
+ stack.append(State{ .Expression = OptionalCtx{ .Optional = &node.rhs } }) catch unreachable;
+
+ switch (token_ptr.id) {
+ Token.Id.Keyword_break => {
+ node.kind = ast.Node.ControlFlowExpression.Kind{ .Break = null };
+ try stack.append(State{ .Identifier = OptionalCtx{ .RequiredNull = &node.kind.Break } });
+ try stack.append(State{ .IfToken = Token.Id.Colon });
+ },
+ Token.Id.Keyword_continue => {
+ node.kind = ast.Node.ControlFlowExpression.Kind{ .Continue = null };
+ try stack.append(State{ .Identifier = OptionalCtx{ .RequiredNull = &node.kind.Continue } });
+ try stack.append(State{ .IfToken = Token.Id.Colon });
+ },
+ Token.Id.Keyword_return => {
+ node.kind = ast.Node.ControlFlowExpression.Kind.Return;
+ },
+ else => unreachable,
+ }
+ continue;
+ },
+ Token.Id.Keyword_try, Token.Id.Keyword_cancel, Token.Id.Keyword_resume => {
+ const node = try arena.create(ast.Node.PrefixOp{
+ .base = ast.Node{ .id = ast.Node.Id.PrefixOp },
+ .op_token = token_index,
+ .op = switch (token_ptr.id) {
+ Token.Id.Keyword_try => ast.Node.PrefixOp.Op{ .Try = void{} },
+ Token.Id.Keyword_cancel => ast.Node.PrefixOp.Op{ .Cancel = void{} },
+ Token.Id.Keyword_resume => ast.Node.PrefixOp.Op{ .Resume = void{} },
+ else => unreachable,
+ },
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ stack.append(State{ .Expression = OptionalCtx{ .Required = &node.rhs } }) catch unreachable;
+ continue;
+ },
+ else => {
+ if (!try parseBlockExpr(&stack, arena, opt_ctx, token_ptr, token_index)) {
+ prevToken(&tok_it, &tree);
+ stack.append(State{ .UnwrapExpressionBegin = opt_ctx }) catch unreachable;
+ }
+ continue;
+ },
+ }
+ },
+ State.RangeExpressionBegin => |opt_ctx| {
+ stack.append(State{ .RangeExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .Expression = opt_ctx });
+ continue;
+ },
+ State.RangeExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ if (eatToken(&tok_it, &tree, Token.Id.Ellipsis3)) |ellipsis3| {
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = ellipsis3,
+ .op = ast.Node.InfixOp.Op.Range,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .Expression = OptionalCtx{ .Required = &node.rhs } }) catch unreachable;
+ continue;
+ }
+ },
+ State.AssignmentExpressionBegin => |opt_ctx| {
+ stack.append(State{ .AssignmentExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .Expression = opt_ctx });
+ continue;
+ },
+
+ State.AssignmentExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ if (tokenIdToAssignment(token_ptr.id)) |ass_id| {
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = token_index,
+ .op = ass_id,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .AssignmentExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.rhs } });
+ continue;
+ } else {
+ prevToken(&tok_it, &tree);
+ continue;
+ }
+ },
+
+ State.UnwrapExpressionBegin => |opt_ctx| {
+ stack.append(State{ .UnwrapExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .BoolOrExpressionBegin = opt_ctx });
+ continue;
+ },
+
+ State.UnwrapExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ if (tokenIdToUnwrapExpr(token_ptr.id)) |unwrap_id| {
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = token_index,
+ .op = unwrap_id,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ stack.append(State{ .UnwrapExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.rhs } });
+
+ if (node.op == ast.Node.InfixOp.Op.Catch) {
+ try stack.append(State{ .Payload = OptionalCtx{ .Optional = &node.op.Catch } });
+ }
+ continue;
+ } else {
+ prevToken(&tok_it, &tree);
+ continue;
+ }
+ },
+
+ State.BoolOrExpressionBegin => |opt_ctx| {
+ stack.append(State{ .BoolOrExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .BoolAndExpressionBegin = opt_ctx });
+ continue;
+ },
+
+ State.BoolOrExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ if (eatToken(&tok_it, &tree, Token.Id.Keyword_or)) |or_token| {
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = or_token,
+ .op = ast.Node.InfixOp.Op.BoolOr,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .BoolOrExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .BoolAndExpressionBegin = OptionalCtx{ .Required = &node.rhs } });
+ continue;
+ }
+ },
+
+ State.BoolAndExpressionBegin => |opt_ctx| {
+ stack.append(State{ .BoolAndExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .ComparisonExpressionBegin = opt_ctx });
+ continue;
+ },
+
+ State.BoolAndExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ if (eatToken(&tok_it, &tree, Token.Id.Keyword_and)) |and_token| {
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = and_token,
+ .op = ast.Node.InfixOp.Op.BoolAnd,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .BoolAndExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .ComparisonExpressionBegin = OptionalCtx{ .Required = &node.rhs } });
+ continue;
+ }
+ },
+
+ State.ComparisonExpressionBegin => |opt_ctx| {
+ stack.append(State{ .ComparisonExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .BinaryOrExpressionBegin = opt_ctx });
+ continue;
+ },
+
+ State.ComparisonExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ if (tokenIdToComparison(token_ptr.id)) |comp_id| {
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = token_index,
+ .op = comp_id,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .ComparisonExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .BinaryOrExpressionBegin = OptionalCtx{ .Required = &node.rhs } });
+ continue;
+ } else {
+ prevToken(&tok_it, &tree);
+ continue;
+ }
+ },
+
+ State.BinaryOrExpressionBegin => |opt_ctx| {
+ stack.append(State{ .BinaryOrExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .BinaryXorExpressionBegin = opt_ctx });
+ continue;
+ },
+
+ State.BinaryOrExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ if (eatToken(&tok_it, &tree, Token.Id.Pipe)) |pipe| {
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = pipe,
+ .op = ast.Node.InfixOp.Op.BitOr,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .BinaryOrExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .BinaryXorExpressionBegin = OptionalCtx{ .Required = &node.rhs } });
+ continue;
+ }
+ },
+
+ State.BinaryXorExpressionBegin => |opt_ctx| {
+ stack.append(State{ .BinaryXorExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .BinaryAndExpressionBegin = opt_ctx });
+ continue;
+ },
+
+ State.BinaryXorExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ if (eatToken(&tok_it, &tree, Token.Id.Caret)) |caret| {
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = caret,
+ .op = ast.Node.InfixOp.Op.BitXor,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .BinaryXorExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .BinaryAndExpressionBegin = OptionalCtx{ .Required = &node.rhs } });
+ continue;
+ }
+ },
+
+ State.BinaryAndExpressionBegin => |opt_ctx| {
+ stack.append(State{ .BinaryAndExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .BitShiftExpressionBegin = opt_ctx });
+ continue;
+ },
+
+ State.BinaryAndExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ if (eatToken(&tok_it, &tree, Token.Id.Ampersand)) |ampersand| {
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = ampersand,
+ .op = ast.Node.InfixOp.Op.BitAnd,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .BinaryAndExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .BitShiftExpressionBegin = OptionalCtx{ .Required = &node.rhs } });
+ continue;
+ }
+ },
+
+ State.BitShiftExpressionBegin => |opt_ctx| {
+ stack.append(State{ .BitShiftExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .AdditionExpressionBegin = opt_ctx });
+ continue;
+ },
+
+ State.BitShiftExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ if (tokenIdToBitShift(token_ptr.id)) |bitshift_id| {
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = token_index,
+ .op = bitshift_id,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .BitShiftExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .AdditionExpressionBegin = OptionalCtx{ .Required = &node.rhs } });
+ continue;
+ } else {
+ prevToken(&tok_it, &tree);
+ continue;
+ }
+ },
+
+ State.AdditionExpressionBegin => |opt_ctx| {
+ stack.append(State{ .AdditionExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .MultiplyExpressionBegin = opt_ctx });
+ continue;
+ },
+
+ State.AdditionExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ if (tokenIdToAddition(token_ptr.id)) |add_id| {
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = token_index,
+ .op = add_id,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .AdditionExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .MultiplyExpressionBegin = OptionalCtx{ .Required = &node.rhs } });
+ continue;
+ } else {
+ prevToken(&tok_it, &tree);
+ continue;
+ }
+ },
+
+ State.MultiplyExpressionBegin => |opt_ctx| {
+ stack.append(State{ .MultiplyExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .CurlySuffixExpressionBegin = opt_ctx });
+ continue;
+ },
+
+ State.MultiplyExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ if (tokenIdToMultiply(token_ptr.id)) |mult_id| {
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = token_index,
+ .op = mult_id,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .MultiplyExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .CurlySuffixExpressionBegin = OptionalCtx{ .Required = &node.rhs } });
+ continue;
+ } else {
+ prevToken(&tok_it, &tree);
+ continue;
+ }
+ },
+
+ State.CurlySuffixExpressionBegin => |opt_ctx| {
+ stack.append(State{ .CurlySuffixExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .IfToken = Token.Id.LBrace });
+ try stack.append(State{ .TypeExprBegin = opt_ctx });
+ continue;
+ },
+
+ State.CurlySuffixExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ if (tok_it.peek().?.id == Token.Id.Period) {
+ const node = try arena.create(ast.Node.SuffixOp{
+ .base = ast.Node{ .id = ast.Node.Id.SuffixOp },
+ .lhs = lhs,
+ .op = ast.Node.SuffixOp.Op{ .StructInitializer = ast.Node.SuffixOp.Op.InitList.init(arena) },
+ .rtoken = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ stack.append(State{ .CurlySuffixExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .IfToken = Token.Id.LBrace });
+ try stack.append(State{
+ .FieldInitListItemOrEnd = ListSave(@typeOf(node.op.StructInitializer)){
+ .list = &node.op.StructInitializer,
+ .ptr = &node.rtoken,
+ },
+ });
+ continue;
+ }
+
+ const node = try arena.create(ast.Node.SuffixOp{
+ .base = ast.Node{ .id = ast.Node.Id.SuffixOp },
+ .lhs = lhs,
+ .op = ast.Node.SuffixOp.Op{ .ArrayInitializer = ast.Node.SuffixOp.Op.InitList.init(arena) },
+ .rtoken = undefined,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .CurlySuffixExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .IfToken = Token.Id.LBrace });
+ try stack.append(State{
+ .ExprListItemOrEnd = ExprListCtx{
+ .list = &node.op.ArrayInitializer,
+ .end = Token.Id.RBrace,
+ .ptr = &node.rtoken,
+ },
+ });
+ continue;
+ },
+
+ State.TypeExprBegin => |opt_ctx| {
+ stack.append(State{ .TypeExprEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .PrefixOpExpression = opt_ctx });
+ continue;
+ },
+
+ State.TypeExprEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ if (eatToken(&tok_it, &tree, Token.Id.Bang)) |bang| {
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = bang,
+ .op = ast.Node.InfixOp.Op.ErrorUnion,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .TypeExprEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .PrefixOpExpression = OptionalCtx{ .Required = &node.rhs } });
+ continue;
+ }
+ },
+
+ State.PrefixOpExpression => |opt_ctx| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ if (tokenIdToPrefixOp(token_ptr.id)) |prefix_id| {
+ var node = try arena.create(ast.Node.PrefixOp{
+ .base = ast.Node{ .id = ast.Node.Id.PrefixOp },
+ .op_token = token_index,
+ .op = prefix_id,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ // Treat '**' token as two pointer types
+ if (token_ptr.id == Token.Id.AsteriskAsterisk) {
+ const child = try arena.create(ast.Node.PrefixOp{
+ .base = ast.Node{ .id = ast.Node.Id.PrefixOp },
+ .op_token = token_index,
+ .op = prefix_id,
+ .rhs = undefined,
+ });
+ node.rhs = &child.base;
+ node = child;
+ }
+
+ stack.append(State{ .TypeExprBegin = OptionalCtx{ .Required = &node.rhs } }) catch unreachable;
+ if (node.op == ast.Node.PrefixOp.Op.PtrType) {
+ try stack.append(State{ .PtrTypeModifiers = &node.op.PtrType });
+ }
+ continue;
+ } else {
+ prevToken(&tok_it, &tree);
+ stack.append(State{ .SuffixOpExpressionBegin = opt_ctx }) catch unreachable;
+ continue;
+ }
+ },
+
+ State.SuffixOpExpressionBegin => |opt_ctx| {
+ if (eatToken(&tok_it, &tree, Token.Id.Keyword_async)) |async_token| {
+ const async_node = try arena.create(ast.Node.AsyncAttribute{
+ .base = ast.Node{ .id = ast.Node.Id.AsyncAttribute },
+ .async_token = async_token,
+ .allocator_type = null,
+ .rangle_bracket = null,
+ });
+ stack.append(State{
+ .AsyncEnd = AsyncEndCtx{
+ .ctx = opt_ctx,
+ .attribute = async_node,
+ },
+ }) catch unreachable;
+ try stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() });
+ try stack.append(State{ .PrimaryExpression = opt_ctx.toRequired() });
+ try stack.append(State{ .AsyncAllocator = async_node });
+ continue;
+ }
+
+ stack.append(State{ .SuffixOpExpressionEnd = opt_ctx }) catch unreachable;
+ try stack.append(State{ .PrimaryExpression = opt_ctx });
+ continue;
+ },
+
+ State.SuffixOpExpressionEnd => |opt_ctx| {
+ const lhs = opt_ctx.get() orelse continue;
+
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.LParen => {
+ const node = try arena.create(ast.Node.SuffixOp{
+ .base = ast.Node{ .id = ast.Node.Id.SuffixOp },
+ .lhs = lhs,
+ .op = ast.Node.SuffixOp.Op{
+ .Call = ast.Node.SuffixOp.Op.Call{
+ .params = ast.Node.SuffixOp.Op.Call.ParamList.init(arena),
+ .async_attr = null,
+ },
+ },
+ .rtoken = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{
+ .ExprListItemOrEnd = ExprListCtx{
+ .list = &node.op.Call.params,
+ .end = Token.Id.RParen,
+ .ptr = &node.rtoken,
+ },
+ });
+ continue;
+ },
+ Token.Id.LBracket => {
+ const node = try arena.create(ast.Node.SuffixOp{
+ .base = ast.Node{ .id = ast.Node.Id.SuffixOp },
+ .lhs = lhs,
+ .op = ast.Node.SuffixOp.Op{ .ArrayAccess = undefined },
+ .rtoken = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .SliceOrArrayAccess = node });
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.op.ArrayAccess } });
+ continue;
+ },
+ Token.Id.Period => {
+ if (eatToken(&tok_it, &tree, Token.Id.Asterisk)) |asterisk_token| {
+ const node = try arena.create(ast.Node.SuffixOp{
+ .base = ast.Node{ .id = ast.Node.Id.SuffixOp },
+ .lhs = lhs,
+ .op = ast.Node.SuffixOp.Op.Deref,
+ .rtoken = asterisk_token,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ continue;
+ }
+ if (eatToken(&tok_it, &tree, Token.Id.QuestionMark)) |question_token| {
+ const node = try arena.create(ast.Node.SuffixOp{
+ .base = ast.Node{ .id = ast.Node.Id.SuffixOp },
+ .lhs = lhs,
+ .op = ast.Node.SuffixOp.Op.UnwrapOptional,
+ .rtoken = question_token,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ continue;
+ }
+ const node = try arena.create(ast.Node.InfixOp{
+ .base = ast.Node{ .id = ast.Node.Id.InfixOp },
+ .lhs = lhs,
+ .op_token = token_index,
+ .op = ast.Node.InfixOp.Op.Period,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ try stack.append(State{ .Identifier = OptionalCtx{ .Required = &node.rhs } });
+ continue;
+ },
+ else => {
+ prevToken(&tok_it, &tree);
+ continue;
+ },
+ }
+ },
+
+ State.PrimaryExpression => |opt_ctx| {
+ const token = nextToken(&tok_it, &tree);
+ switch (token.ptr.id) {
+ Token.Id.IntegerLiteral => {
+ _ = try createToCtxLiteral(arena, opt_ctx, ast.Node.IntegerLiteral, token.index);
+ continue;
+ },
+ Token.Id.FloatLiteral => {
+ _ = try createToCtxLiteral(arena, opt_ctx, ast.Node.FloatLiteral, token.index);
+ continue;
+ },
+ Token.Id.CharLiteral => {
+ _ = try createToCtxLiteral(arena, opt_ctx, ast.Node.CharLiteral, token.index);
+ continue;
+ },
+ Token.Id.Keyword_undefined => {
+ _ = try createToCtxLiteral(arena, opt_ctx, ast.Node.UndefinedLiteral, token.index);
+ continue;
+ },
+ Token.Id.Keyword_true, Token.Id.Keyword_false => {
+ _ = try createToCtxLiteral(arena, opt_ctx, ast.Node.BoolLiteral, token.index);
+ continue;
+ },
+ Token.Id.Keyword_null => {
+ _ = try createToCtxLiteral(arena, opt_ctx, ast.Node.NullLiteral, token.index);
+ continue;
+ },
+ Token.Id.Keyword_this => {
+ _ = try createToCtxLiteral(arena, opt_ctx, ast.Node.ThisLiteral, token.index);
+ continue;
+ },
+ Token.Id.Keyword_var => {
+ _ = try createToCtxLiteral(arena, opt_ctx, ast.Node.VarType, token.index);
+ continue;
+ },
+ Token.Id.Keyword_unreachable => {
+ _ = try createToCtxLiteral(arena, opt_ctx, ast.Node.Unreachable, token.index);
+ continue;
+ },
+ Token.Id.Keyword_promise => {
+ const node = try arena.create(ast.Node.PromiseType{
+ .base = ast.Node{ .id = ast.Node.Id.PromiseType },
+ .promise_token = token.index,
+ .result = null,
+ });
+ opt_ctx.store(&node.base);
+ const next_token = nextToken(&tok_it, &tree);
+ const next_token_index = next_token.index;
+ const next_token_ptr = next_token.ptr;
+ if (next_token_ptr.id != Token.Id.Arrow) {
+ prevToken(&tok_it, &tree);
+ continue;
+ }
+ node.result = ast.Node.PromiseType.Result{
+ .arrow_token = next_token_index,
+ .return_type = undefined,
+ };
+ const return_type_ptr = &node.result.?.return_type;
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = return_type_ptr } });
+ continue;
+ },
+ Token.Id.StringLiteral, Token.Id.MultilineStringLiteralLine => {
+ opt_ctx.store((try parseStringLiteral(arena, &tok_it, token.ptr, token.index, &tree)) orelse unreachable);
+ continue;
+ },
+ Token.Id.LParen => {
+ const node = try arena.create(ast.Node.GroupedExpression{
+ .base = ast.Node{ .id = ast.Node.Id.GroupedExpression },
+ .lparen = token.index,
+ .expr = undefined,
+ .rparen = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.RParen,
+ .ptr = &node.rparen,
+ },
+ }) catch unreachable;
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.expr } });
+ continue;
+ },
+ Token.Id.Builtin => {
+ const node = try arena.create(ast.Node.BuiltinCall{
+ .base = ast.Node{ .id = ast.Node.Id.BuiltinCall },
+ .builtin_token = token.index,
+ .params = ast.Node.BuiltinCall.ParamList.init(arena),
+ .rparen_token = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ stack.append(State{
+ .ExprListItemOrEnd = ExprListCtx{
+ .list = &node.params,
+ .end = Token.Id.RParen,
+ .ptr = &node.rparen_token,
+ },
+ }) catch unreachable;
+ try stack.append(State{ .ExpectToken = Token.Id.LParen });
+ continue;
+ },
+ Token.Id.LBracket => {
+ const node = try arena.create(ast.Node.PrefixOp{
+ .base = ast.Node{ .id = ast.Node.Id.PrefixOp },
+ .op_token = token.index,
+ .op = undefined,
+ .rhs = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ stack.append(State{ .SliceOrArrayType = node }) catch unreachable;
+ continue;
+ },
+ Token.Id.Keyword_error => {
+ stack.append(State{
+ .ErrorTypeOrSetDecl = ErrorTypeOrSetDeclCtx{
+ .error_token = token.index,
+ .opt_ctx = opt_ctx,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ Token.Id.Keyword_packed => {
+ stack.append(State{
+ .ContainerKind = ContainerKindCtx{
+ .opt_ctx = opt_ctx,
+ .layout_token = token.index,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ Token.Id.Keyword_extern => {
+ stack.append(State{
+ .ExternType = ExternTypeCtx{
+ .opt_ctx = opt_ctx,
+ .extern_token = token.index,
+ .comments = null,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ Token.Id.Keyword_struct, Token.Id.Keyword_union, Token.Id.Keyword_enum => {
+ prevToken(&tok_it, &tree);
+ stack.append(State{
+ .ContainerKind = ContainerKindCtx{
+ .opt_ctx = opt_ctx,
+ .layout_token = null,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ Token.Id.Identifier => {
+ stack.append(State{
+ .MaybeLabeledExpression = MaybeLabeledExpressionCtx{
+ .label = token.index,
+ .opt_ctx = opt_ctx,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ Token.Id.Keyword_fn => {
+ const fn_proto = try arena.create(ast.Node.FnProto{
+ .base = ast.Node{ .id = ast.Node.Id.FnProto },
+ .doc_comments = null,
+ .visib_token = null,
+ .name_token = null,
+ .fn_token = token.index,
+ .params = ast.Node.FnProto.ParamList.init(arena),
+ .return_type = undefined,
+ .var_args_token = null,
+ .extern_export_inline_token = null,
+ .cc_token = null,
+ .async_attr = null,
+ .body_node = null,
+ .lib_name = null,
+ .align_expr = null,
+ });
+ opt_ctx.store(&fn_proto.base);
+ stack.append(State{ .FnProto = fn_proto }) catch unreachable;
+ continue;
+ },
+ Token.Id.Keyword_nakedcc, Token.Id.Keyword_stdcallcc => {
+ const fn_proto = try arena.create(ast.Node.FnProto{
+ .base = ast.Node{ .id = ast.Node.Id.FnProto },
+ .doc_comments = null,
+ .visib_token = null,
+ .name_token = null,
+ .fn_token = undefined,
+ .params = ast.Node.FnProto.ParamList.init(arena),
+ .return_type = undefined,
+ .var_args_token = null,
+ .extern_export_inline_token = null,
+ .cc_token = token.index,
+ .async_attr = null,
+ .body_node = null,
+ .lib_name = null,
+ .align_expr = null,
+ });
+ opt_ctx.store(&fn_proto.base);
+ stack.append(State{ .FnProto = fn_proto }) catch unreachable;
+ try stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.Keyword_fn,
+ .ptr = &fn_proto.fn_token,
+ },
+ });
+ continue;
+ },
+ Token.Id.Keyword_asm => {
+ const node = try arena.create(ast.Node.Asm{
+ .base = ast.Node{ .id = ast.Node.Id.Asm },
+ .asm_token = token.index,
+ .volatile_token = null,
+ .template = undefined,
+ .outputs = ast.Node.Asm.OutputList.init(arena),
+ .inputs = ast.Node.Asm.InputList.init(arena),
+ .clobbers = ast.Node.Asm.ClobberList.init(arena),
+ .rparen = undefined,
+ });
+ opt_ctx.store(&node.base);
+
+ stack.append(State{
+ .ExpectTokenSave = ExpectTokenSave{
+ .id = Token.Id.RParen,
+ .ptr = &node.rparen,
+ },
+ }) catch unreachable;
+ try stack.append(State{ .AsmClobberItems = &node.clobbers });
+ try stack.append(State{ .IfToken = Token.Id.Colon });
+ try stack.append(State{ .AsmInputItems = &node.inputs });
+ try stack.append(State{ .IfToken = Token.Id.Colon });
+ try stack.append(State{ .AsmOutputItems = &node.outputs });
+ try stack.append(State{ .IfToken = Token.Id.Colon });
+ try stack.append(State{ .StringLiteral = OptionalCtx{ .Required = &node.template } });
+ try stack.append(State{ .ExpectToken = Token.Id.LParen });
+ try stack.append(State{
+ .OptionalTokenSave = OptionalTokenSave{
+ .id = Token.Id.Keyword_volatile,
+ .ptr = &node.volatile_token,
+ },
+ });
+ },
+ Token.Id.Keyword_inline => {
+ stack.append(State{
+ .Inline = InlineCtx{
+ .label = null,
+ .inline_token = token.index,
+ .opt_ctx = opt_ctx,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ else => {
+ if (!try parseBlockExpr(&stack, arena, opt_ctx, token.ptr, token.index)) {
+ prevToken(&tok_it, &tree);
+ if (opt_ctx != OptionalCtx.Optional) {
+ ((try tree.errors.addOne())).* = Error{ .ExpectedPrimaryExpr = Error.ExpectedPrimaryExpr{ .token = token.index } };
+ return tree;
+ }
+ }
+ continue;
+ },
+ }
+ },
+
+ State.ErrorTypeOrSetDecl => |ctx| {
+ if (eatToken(&tok_it, &tree, Token.Id.LBrace) == null) {
+ _ = try createToCtxLiteral(arena, ctx.opt_ctx, ast.Node.ErrorType, ctx.error_token);
+ continue;
+ }
+
+ const node = try arena.create(ast.Node.ErrorSetDecl{
+ .base = ast.Node{ .id = ast.Node.Id.ErrorSetDecl },
+ .error_token = ctx.error_token,
+ .decls = ast.Node.ErrorSetDecl.DeclList.init(arena),
+ .rbrace_token = undefined,
+ });
+ ctx.opt_ctx.store(&node.base);
+
+ stack.append(State{
+ .ErrorTagListItemOrEnd = ListSave(@typeOf(node.decls)){
+ .list = &node.decls,
+ .ptr = &node.rbrace_token,
+ },
+ }) catch unreachable;
+ continue;
+ },
+ State.StringLiteral => |opt_ctx| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ opt_ctx.store((try parseStringLiteral(arena, &tok_it, token_ptr, token_index, &tree)) orelse {
+ prevToken(&tok_it, &tree);
+ if (opt_ctx != OptionalCtx.Optional) {
+ ((try tree.errors.addOne())).* = Error{ .ExpectedPrimaryExpr = Error.ExpectedPrimaryExpr{ .token = token_index } };
+ return tree;
+ }
+
+ continue;
+ });
+ },
+
+ State.Identifier => |opt_ctx| {
+ if (eatToken(&tok_it, &tree, Token.Id.Identifier)) |ident_token| {
+ _ = try createToCtxLiteral(arena, opt_ctx, ast.Node.Identifier, ident_token);
+ continue;
+ }
+
+ if (opt_ctx != OptionalCtx.Optional) {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ ((try tree.errors.addOne())).* = Error{
+ .ExpectedToken = Error.ExpectedToken{
+ .token = token_index,
+ .expected_id = Token.Id.Identifier,
+ },
+ };
+ return tree;
+ }
+ },
+
+ State.ErrorTag => |node_ptr| {
+ const comments = try eatDocComments(arena, &tok_it, &tree);
+ const ident_token = nextToken(&tok_it, &tree);
+ const ident_token_index = ident_token.index;
+ const ident_token_ptr = ident_token.ptr;
+ if (ident_token_ptr.id != Token.Id.Identifier) {
+ ((try tree.errors.addOne())).* = Error{
+ .ExpectedToken = Error.ExpectedToken{
+ .token = ident_token_index,
+ .expected_id = Token.Id.Identifier,
+ },
+ };
+ return tree;
+ }
+
+ const node = try arena.create(ast.Node.ErrorTag{
+ .base = ast.Node{ .id = ast.Node.Id.ErrorTag },
+ .doc_comments = comments,
+ .name_token = ident_token_index,
+ });
+ node_ptr.* = &node.base;
+ continue;
+ },
+
+ State.ExpectToken => |token_id| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ if (token_ptr.id != token_id) {
+ ((try tree.errors.addOne())).* = Error{
+ .ExpectedToken = Error.ExpectedToken{
+ .token = token_index,
+ .expected_id = token_id,
+ },
+ };
+ return tree;
+ }
+ continue;
+ },
+ State.ExpectTokenSave => |expect_token_save| {
+ const token = nextToken(&tok_it, &tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ if (token_ptr.id != expect_token_save.id) {
+ ((try tree.errors.addOne())).* = Error{
+ .ExpectedToken = Error.ExpectedToken{
+ .token = token_index,
+ .expected_id = expect_token_save.id,
+ },
+ };
+ return tree;
+ }
+ expect_token_save.ptr.* = token_index;
+ continue;
+ },
+ State.IfToken => |token_id| {
+ if (eatToken(&tok_it, &tree, token_id)) |_| {
+ continue;
+ }
+
+ _ = stack.pop();
+ continue;
+ },
+ State.IfTokenSave => |if_token_save| {
+ if (eatToken(&tok_it, &tree, if_token_save.id)) |token_index| {
+ (if_token_save.ptr).* = token_index;
+ continue;
+ }
+
+ _ = stack.pop();
+ continue;
+ },
+ State.OptionalTokenSave => |optional_token_save| {
+ if (eatToken(&tok_it, &tree, optional_token_save.id)) |token_index| {
+ (optional_token_save.ptr).* = token_index;
+ continue;
+ }
+
+ continue;
+ },
+ }
+ }
+}
+
+const AnnotatedToken = struct {
+ ptr: *Token,
+ index: TokenIndex,
+};
+
+const TopLevelDeclCtx = struct {
+ decls: *ast.Node.Root.DeclList,
+ visib_token: ?TokenIndex,
+ extern_export_inline_token: ?AnnotatedToken,
+ lib_name: ?*ast.Node,
+ comments: ?*ast.Node.DocComment,
+};
+
+const VarDeclCtx = struct {
+ mut_token: TokenIndex,
+ visib_token: ?TokenIndex,
+ comptime_token: ?TokenIndex,
+ extern_export_token: ?TokenIndex,
+ lib_name: ?*ast.Node,
+ list: *ast.Node.Root.DeclList,
+ comments: ?*ast.Node.DocComment,
+};
+
+const TopLevelExternOrFieldCtx = struct {
+ visib_token: TokenIndex,
+ container_decl: *ast.Node.ContainerDecl,
+ comments: ?*ast.Node.DocComment,
+};
+
+const ExternTypeCtx = struct {
+ opt_ctx: OptionalCtx,
+ extern_token: TokenIndex,
+ comments: ?*ast.Node.DocComment,
+};
+
+const ContainerKindCtx = struct {
+ opt_ctx: OptionalCtx,
+ layout_token: ?TokenIndex,
+};
+
+const ExpectTokenSave = struct {
+ id: @TagType(Token.Id),
+ ptr: *TokenIndex,
+};
+
+const OptionalTokenSave = struct {
+ id: @TagType(Token.Id),
+ ptr: *?TokenIndex,
+};
+
+const ExprListCtx = struct {
+ list: *ast.Node.SuffixOp.Op.InitList,
+ end: Token.Id,
+ ptr: *TokenIndex,
+};
+
+fn ListSave(comptime List: type) type {
+ return struct {
+ list: *List,
+ ptr: *TokenIndex,
+ };
+}
+
+const MaybeLabeledExpressionCtx = struct {
+ label: TokenIndex,
+ opt_ctx: OptionalCtx,
+};
+
+const LabelCtx = struct {
+ label: ?TokenIndex,
+ opt_ctx: OptionalCtx,
+};
+
+const InlineCtx = struct {
+ label: ?TokenIndex,
+ inline_token: ?TokenIndex,
+ opt_ctx: OptionalCtx,
+};
+
+const LoopCtx = struct {
+ label: ?TokenIndex,
+ inline_token: ?TokenIndex,
+ loop_token: TokenIndex,
+ opt_ctx: OptionalCtx,
+};
+
+const AsyncEndCtx = struct {
+ ctx: OptionalCtx,
+ attribute: *ast.Node.AsyncAttribute,
+};
+
+const ErrorTypeOrSetDeclCtx = struct {
+ opt_ctx: OptionalCtx,
+ error_token: TokenIndex,
+};
+
+const ParamDeclEndCtx = struct {
+ fn_proto: *ast.Node.FnProto,
+ param_decl: *ast.Node.ParamDecl,
+};
+
+const ComptimeStatementCtx = struct {
+ comptime_token: TokenIndex,
+ block: *ast.Node.Block,
+};
+
+const OptionalCtx = union(enum) {
+ Optional: *?*ast.Node,
+ RequiredNull: *?*ast.Node,
+ Required: **ast.Node,
+
+ pub fn store(self: *const OptionalCtx, value: *ast.Node) void {
+ switch (self.*) {
+ OptionalCtx.Optional => |ptr| ptr.* = value,
+ OptionalCtx.RequiredNull => |ptr| ptr.* = value,
+ OptionalCtx.Required => |ptr| ptr.* = value,
+ }
+ }
+
+ pub fn get(self: *const OptionalCtx) ?*ast.Node {
+ switch (self.*) {
+ OptionalCtx.Optional => |ptr| return ptr.*,
+ OptionalCtx.RequiredNull => |ptr| return ptr.*.?,
+ OptionalCtx.Required => |ptr| return ptr.*,
+ }
+ }
+
+ pub fn toRequired(self: *const OptionalCtx) OptionalCtx {
+ switch (self.*) {
+ OptionalCtx.Optional => |ptr| {
+ return OptionalCtx{ .RequiredNull = ptr };
+ },
+ OptionalCtx.RequiredNull => |ptr| return self.*,
+ OptionalCtx.Required => |ptr| return self.*,
+ }
+ }
+};
+
+const AddCommentsCtx = struct {
+ node_ptr: **ast.Node,
+ comments: ?*ast.Node.DocComment,
+};
+
+const State = union(enum) {
+ TopLevel,
+ TopLevelExtern: TopLevelDeclCtx,
+ TopLevelLibname: TopLevelDeclCtx,
+ TopLevelDecl: TopLevelDeclCtx,
+ TopLevelExternOrField: TopLevelExternOrFieldCtx,
+
+ ContainerKind: ContainerKindCtx,
+ ContainerInitArgStart: *ast.Node.ContainerDecl,
+ ContainerInitArg: *ast.Node.ContainerDecl,
+ ContainerDecl: *ast.Node.ContainerDecl,
+
+ VarDecl: VarDeclCtx,
+ VarDeclAlign: *ast.Node.VarDecl,
+ VarDeclEq: *ast.Node.VarDecl,
+ VarDeclSemiColon: *ast.Node.VarDecl,
+
+ FnDef: *ast.Node.FnProto,
+ FnProto: *ast.Node.FnProto,
+ FnProtoAlign: *ast.Node.FnProto,
+ FnProtoReturnType: *ast.Node.FnProto,
+
+ ParamDecl: *ast.Node.FnProto,
+ ParamDeclAliasOrComptime: *ast.Node.ParamDecl,
+ ParamDeclName: *ast.Node.ParamDecl,
+ ParamDeclEnd: ParamDeclEndCtx,
+ ParamDeclComma: *ast.Node.FnProto,
+
+ MaybeLabeledExpression: MaybeLabeledExpressionCtx,
+ LabeledExpression: LabelCtx,
+ Inline: InlineCtx,
+ While: LoopCtx,
+ WhileContinueExpr: *?*ast.Node,
+ For: LoopCtx,
+ Else: *?*ast.Node.Else,
+
+ Block: *ast.Node.Block,
+ Statement: *ast.Node.Block,
+ ComptimeStatement: ComptimeStatementCtx,
+ Semicolon: **ast.Node,
+
+ AsmOutputItems: *ast.Node.Asm.OutputList,
+ AsmOutputReturnOrType: *ast.Node.AsmOutput,
+ AsmInputItems: *ast.Node.Asm.InputList,
+ AsmClobberItems: *ast.Node.Asm.ClobberList,
+
+ ExprListItemOrEnd: ExprListCtx,
+ ExprListCommaOrEnd: ExprListCtx,
+ FieldInitListItemOrEnd: ListSave(ast.Node.SuffixOp.Op.InitList),
+ FieldInitListCommaOrEnd: ListSave(ast.Node.SuffixOp.Op.InitList),
+ FieldListCommaOrEnd: *ast.Node.ContainerDecl,
+ FieldInitValue: OptionalCtx,
+ ErrorTagListItemOrEnd: ListSave(ast.Node.ErrorSetDecl.DeclList),
+ ErrorTagListCommaOrEnd: ListSave(ast.Node.ErrorSetDecl.DeclList),
+ SwitchCaseOrEnd: ListSave(ast.Node.Switch.CaseList),
+ SwitchCaseCommaOrEnd: ListSave(ast.Node.Switch.CaseList),
+ SwitchCaseFirstItem: *ast.Node.SwitchCase,
+ SwitchCaseItemCommaOrEnd: *ast.Node.SwitchCase,
+ SwitchCaseItemOrEnd: *ast.Node.SwitchCase,
+
+ SuspendBody: *ast.Node.Suspend,
+ AsyncAllocator: *ast.Node.AsyncAttribute,
+ AsyncEnd: AsyncEndCtx,
+
+ ExternType: ExternTypeCtx,
+ SliceOrArrayAccess: *ast.Node.SuffixOp,
+ SliceOrArrayType: *ast.Node.PrefixOp,
+ PtrTypeModifiers: *ast.Node.PrefixOp.PtrInfo,
+ AlignBitRange: *ast.Node.PrefixOp.PtrInfo.Align,
+
+ Payload: OptionalCtx,
+ PointerPayload: OptionalCtx,
+ PointerIndexPayload: OptionalCtx,
+
+ Expression: OptionalCtx,
+ RangeExpressionBegin: OptionalCtx,
+ RangeExpressionEnd: OptionalCtx,
+ AssignmentExpressionBegin: OptionalCtx,
+ AssignmentExpressionEnd: OptionalCtx,
+ UnwrapExpressionBegin: OptionalCtx,
+ UnwrapExpressionEnd: OptionalCtx,
+ BoolOrExpressionBegin: OptionalCtx,
+ BoolOrExpressionEnd: OptionalCtx,
+ BoolAndExpressionBegin: OptionalCtx,
+ BoolAndExpressionEnd: OptionalCtx,
+ ComparisonExpressionBegin: OptionalCtx,
+ ComparisonExpressionEnd: OptionalCtx,
+ BinaryOrExpressionBegin: OptionalCtx,
+ BinaryOrExpressionEnd: OptionalCtx,
+ BinaryXorExpressionBegin: OptionalCtx,
+ BinaryXorExpressionEnd: OptionalCtx,
+ BinaryAndExpressionBegin: OptionalCtx,
+ BinaryAndExpressionEnd: OptionalCtx,
+ BitShiftExpressionBegin: OptionalCtx,
+ BitShiftExpressionEnd: OptionalCtx,
+ AdditionExpressionBegin: OptionalCtx,
+ AdditionExpressionEnd: OptionalCtx,
+ MultiplyExpressionBegin: OptionalCtx,
+ MultiplyExpressionEnd: OptionalCtx,
+ CurlySuffixExpressionBegin: OptionalCtx,
+ CurlySuffixExpressionEnd: OptionalCtx,
+ TypeExprBegin: OptionalCtx,
+ TypeExprEnd: OptionalCtx,
+ PrefixOpExpression: OptionalCtx,
+ SuffixOpExpressionBegin: OptionalCtx,
+ SuffixOpExpressionEnd: OptionalCtx,
+ PrimaryExpression: OptionalCtx,
+
+ ErrorTypeOrSetDecl: ErrorTypeOrSetDeclCtx,
+ StringLiteral: OptionalCtx,
+ Identifier: OptionalCtx,
+ ErrorTag: **ast.Node,
+
+ IfToken: @TagType(Token.Id),
+ IfTokenSave: ExpectTokenSave,
+ ExpectToken: @TagType(Token.Id),
+ ExpectTokenSave: ExpectTokenSave,
+ OptionalTokenSave: OptionalTokenSave,
+};
+
+fn pushDocComment(arena: *mem.Allocator, line_comment: TokenIndex, result: *?*ast.Node.DocComment) !void {
+ const node = blk: {
+ if (result.*) |comment_node| {
+ break :blk comment_node;
+ } else {
+ const comment_node = try arena.create(ast.Node.DocComment{
+ .base = ast.Node{ .id = ast.Node.Id.DocComment },
+ .lines = ast.Node.DocComment.LineList.init(arena),
+ });
+ result.* = comment_node;
+ break :blk comment_node;
+ }
+ };
+ try node.lines.push(line_comment);
+}
+
+fn eatDocComments(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) !?*ast.Node.DocComment {
+ var result: ?*ast.Node.DocComment = null;
+ while (true) {
+ if (eatToken(tok_it, tree, Token.Id.DocComment)) |line_comment| {
+ try pushDocComment(arena, line_comment, &result);
+ continue;
+ }
+ break;
+ }
+ return result;
+}
+
+fn parseStringLiteral(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterator, token_ptr: *const Token, token_index: TokenIndex, tree: *ast.Tree) !?*ast.Node {
+ switch (token_ptr.id) {
+ Token.Id.StringLiteral => {
+ return &(try createLiteral(arena, ast.Node.StringLiteral, token_index)).base;
+ },
+ Token.Id.MultilineStringLiteralLine => {
+ const node = try arena.create(ast.Node.MultilineStringLiteral{
+ .base = ast.Node{ .id = ast.Node.Id.MultilineStringLiteral },
+ .lines = ast.Node.MultilineStringLiteral.LineList.init(arena),
+ });
+ try node.lines.push(token_index);
+ while (true) {
+ const multiline_str = nextToken(tok_it, tree);
+ const multiline_str_index = multiline_str.index;
+ const multiline_str_ptr = multiline_str.ptr;
+ if (multiline_str_ptr.id != Token.Id.MultilineStringLiteralLine) {
+ prevToken(tok_it, tree);
+ break;
+ }
+
+ try node.lines.push(multiline_str_index);
+ }
+
+ return &node.base;
+ },
+ // TODO: We shouldn't need a cast, but:
+ // zig: /home/jc/Documents/zig/src/ir.cpp:7962: TypeTableEntry* ir_resolve_peer_types(IrAnalyze*, AstNode*, IrInstruction**, size_t): Assertion `err_set_type != nullptr' failed.
+ else => return (?*ast.Node)(null),
+ }
+}
+
+fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *const OptionalCtx, token_ptr: *const Token, token_index: TokenIndex) !bool {
+ switch (token_ptr.id) {
+ Token.Id.Keyword_suspend => {
+ const node = try arena.create(ast.Node.Suspend{
+ .base = ast.Node{ .id = ast.Node.Id.Suspend },
+ .suspend_token = token_index,
+ .body = null,
+ });
+ ctx.store(&node.base);
+
+ stack.append(State{ .SuspendBody = node }) catch unreachable;
+ return true;
+ },
+ Token.Id.Keyword_if => {
+ const node = try arena.create(ast.Node.If{
+ .base = ast.Node{ .id = ast.Node.Id.If },
+ .if_token = token_index,
+ .condition = undefined,
+ .payload = null,
+ .body = undefined,
+ .@"else" = null,
+ });
+ ctx.store(&node.base);
+
+ stack.append(State{ .Else = &node.@"else" }) catch unreachable;
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.body } });
+ try stack.append(State{ .PointerPayload = OptionalCtx{ .Optional = &node.payload } });
+ try stack.append(State{ .ExpectToken = Token.Id.RParen });
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.condition } });
+ try stack.append(State{ .ExpectToken = Token.Id.LParen });
+ return true;
+ },
+ Token.Id.Keyword_while => {
+ stack.append(State{
+ .While = LoopCtx{
+ .label = null,
+ .inline_token = null,
+ .loop_token = token_index,
+ .opt_ctx = ctx.*,
+ },
+ }) catch unreachable;
+ return true;
+ },
+ Token.Id.Keyword_for => {
+ stack.append(State{
+ .For = LoopCtx{
+ .label = null,
+ .inline_token = null,
+ .loop_token = token_index,
+ .opt_ctx = ctx.*,
+ },
+ }) catch unreachable;
+ return true;
+ },
+ Token.Id.Keyword_switch => {
+ const node = try arena.create(ast.Node.Switch{
+ .base = ast.Node{ .id = ast.Node.Id.Switch },
+ .switch_token = token_index,
+ .expr = undefined,
+ .cases = ast.Node.Switch.CaseList.init(arena),
+ .rbrace = undefined,
+ });
+ ctx.store(&node.base);
+
+ stack.append(State{
+ .SwitchCaseOrEnd = ListSave(@typeOf(node.cases)){
+ .list = &node.cases,
+ .ptr = &node.rbrace,
+ },
+ }) catch unreachable;
+ try stack.append(State{ .ExpectToken = Token.Id.LBrace });
+ try stack.append(State{ .ExpectToken = Token.Id.RParen });
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.expr } });
+ try stack.append(State{ .ExpectToken = Token.Id.LParen });
+ return true;
+ },
+ Token.Id.Keyword_comptime => {
+ const node = try arena.create(ast.Node.Comptime{
+ .base = ast.Node{ .id = ast.Node.Id.Comptime },
+ .comptime_token = token_index,
+ .expr = undefined,
+ .doc_comments = null,
+ });
+ ctx.store(&node.base);
+
+ try stack.append(State{ .Expression = OptionalCtx{ .Required = &node.expr } });
+ return true;
+ },
+ Token.Id.LBrace => {
+ const block = try arena.create(ast.Node.Block{
+ .base = ast.Node{ .id = ast.Node.Id.Block },
+ .label = null,
+ .lbrace = token_index,
+ .statements = ast.Node.Block.StatementList.init(arena),
+ .rbrace = undefined,
+ });
+ ctx.store(&block.base);
+ stack.append(State{ .Block = block }) catch unreachable;
+ return true;
+ },
+ else => {
+ return false;
+ },
+ }
+}
+
+const ExpectCommaOrEndResult = union(enum) {
+ end_token: ?TokenIndex,
+ parse_error: Error,
+};
+
+fn expectCommaOrEnd(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, end: @TagType(Token.Id)) ExpectCommaOrEndResult {
+ const token = nextToken(tok_it, tree);
+ const token_index = token.index;
+ const token_ptr = token.ptr;
+ switch (token_ptr.id) {
+ Token.Id.Comma => return ExpectCommaOrEndResult{ .end_token = null },
+ else => {
+ if (end == token_ptr.id) {
+ return ExpectCommaOrEndResult{ .end_token = token_index };
+ }
+
+ return ExpectCommaOrEndResult{
+ .parse_error = Error{
+ .ExpectedCommaOrEnd = Error.ExpectedCommaOrEnd{
+ .token = token_index,
+ .end_id = end,
+ },
+ },
+ };
+ },
+ }
+}
+
+fn tokenIdToAssignment(id: *const Token.Id) ?ast.Node.InfixOp.Op {
+ // TODO: We have to cast all cases because of this:
+ // error: expected type '?InfixOp', found '?@TagType(InfixOp)'
+ return switch (id.*) {
+ Token.Id.AmpersandEqual => ast.Node.InfixOp.Op{ .AssignBitAnd = {} },
+ Token.Id.AngleBracketAngleBracketLeftEqual => ast.Node.InfixOp.Op{ .AssignBitShiftLeft = {} },
+ Token.Id.AngleBracketAngleBracketRightEqual => ast.Node.InfixOp.Op{ .AssignBitShiftRight = {} },
+ Token.Id.AsteriskEqual => ast.Node.InfixOp.Op{ .AssignTimes = {} },
+ Token.Id.AsteriskPercentEqual => ast.Node.InfixOp.Op{ .AssignTimesWarp = {} },
+ Token.Id.CaretEqual => ast.Node.InfixOp.Op{ .AssignBitXor = {} },
+ Token.Id.Equal => ast.Node.InfixOp.Op{ .Assign = {} },
+ Token.Id.MinusEqual => ast.Node.InfixOp.Op{ .AssignMinus = {} },
+ Token.Id.MinusPercentEqual => ast.Node.InfixOp.Op{ .AssignMinusWrap = {} },
+ Token.Id.PercentEqual => ast.Node.InfixOp.Op{ .AssignMod = {} },
+ Token.Id.PipeEqual => ast.Node.InfixOp.Op{ .AssignBitOr = {} },
+ Token.Id.PlusEqual => ast.Node.InfixOp.Op{ .AssignPlus = {} },
+ Token.Id.PlusPercentEqual => ast.Node.InfixOp.Op{ .AssignPlusWrap = {} },
+ Token.Id.SlashEqual => ast.Node.InfixOp.Op{ .AssignDiv = {} },
+ else => null,
+ };
+}
+
+fn tokenIdToUnwrapExpr(id: @TagType(Token.Id)) ?ast.Node.InfixOp.Op {
+ return switch (id) {
+ Token.Id.Keyword_catch => ast.Node.InfixOp.Op{ .Catch = null },
+ Token.Id.Keyword_orelse => ast.Node.InfixOp.Op{ .UnwrapOptional = void{} },
+ else => null,
+ };
+}
+
+fn tokenIdToComparison(id: @TagType(Token.Id)) ?ast.Node.InfixOp.Op {
+ return switch (id) {
+ Token.Id.BangEqual => ast.Node.InfixOp.Op{ .BangEqual = void{} },
+ Token.Id.EqualEqual => ast.Node.InfixOp.Op{ .EqualEqual = void{} },
+ Token.Id.AngleBracketLeft => ast.Node.InfixOp.Op{ .LessThan = void{} },
+ Token.Id.AngleBracketLeftEqual => ast.Node.InfixOp.Op{ .LessOrEqual = void{} },
+ Token.Id.AngleBracketRight => ast.Node.InfixOp.Op{ .GreaterThan = void{} },
+ Token.Id.AngleBracketRightEqual => ast.Node.InfixOp.Op{ .GreaterOrEqual = void{} },
+ else => null,
+ };
+}
+
+fn tokenIdToBitShift(id: @TagType(Token.Id)) ?ast.Node.InfixOp.Op {
+ return switch (id) {
+ Token.Id.AngleBracketAngleBracketLeft => ast.Node.InfixOp.Op{ .BitShiftLeft = void{} },
+ Token.Id.AngleBracketAngleBracketRight => ast.Node.InfixOp.Op{ .BitShiftRight = void{} },
+ else => null,
+ };
+}
+
+fn tokenIdToAddition(id: @TagType(Token.Id)) ?ast.Node.InfixOp.Op {
+ return switch (id) {
+ Token.Id.Minus => ast.Node.InfixOp.Op{ .Sub = void{} },
+ Token.Id.MinusPercent => ast.Node.InfixOp.Op{ .SubWrap = void{} },
+ Token.Id.Plus => ast.Node.InfixOp.Op{ .Add = void{} },
+ Token.Id.PlusPercent => ast.Node.InfixOp.Op{ .AddWrap = void{} },
+ Token.Id.PlusPlus => ast.Node.InfixOp.Op{ .ArrayCat = void{} },
+ else => null,
+ };
+}
+
+fn tokenIdToMultiply(id: @TagType(Token.Id)) ?ast.Node.InfixOp.Op {
+ return switch (id) {
+ Token.Id.Slash => ast.Node.InfixOp.Op{ .Div = void{} },
+ Token.Id.Asterisk => ast.Node.InfixOp.Op{ .Mult = void{} },
+ Token.Id.AsteriskAsterisk => ast.Node.InfixOp.Op{ .ArrayMult = void{} },
+ Token.Id.AsteriskPercent => ast.Node.InfixOp.Op{ .MultWrap = void{} },
+ Token.Id.Percent => ast.Node.InfixOp.Op{ .Mod = void{} },
+ Token.Id.PipePipe => ast.Node.InfixOp.Op{ .MergeErrorSets = void{} },
+ else => null,
+ };
+}
+
+fn tokenIdToPrefixOp(id: @TagType(Token.Id)) ?ast.Node.PrefixOp.Op {
+ return switch (id) {
+ Token.Id.Bang => ast.Node.PrefixOp.Op{ .BoolNot = void{} },
+ Token.Id.Tilde => ast.Node.PrefixOp.Op{ .BitNot = void{} },
+ Token.Id.Minus => ast.Node.PrefixOp.Op{ .Negation = void{} },
+ Token.Id.MinusPercent => ast.Node.PrefixOp.Op{ .NegationWrap = void{} },
+ Token.Id.Ampersand => ast.Node.PrefixOp.Op{ .AddressOf = void{} },
+ Token.Id.Asterisk, Token.Id.AsteriskAsterisk, Token.Id.BracketStarBracket => ast.Node.PrefixOp.Op{
+ .PtrType = ast.Node.PrefixOp.PtrInfo{
+ .align_info = null,
+ .const_token = null,
+ .volatile_token = null,
+ },
+ },
+ Token.Id.QuestionMark => ast.Node.PrefixOp.Op{ .OptionalType = void{} },
+ Token.Id.Keyword_await => ast.Node.PrefixOp.Op{ .Await = void{} },
+ Token.Id.Keyword_try => ast.Node.PrefixOp.Op{ .Try = void{} },
+ else => null,
+ };
+}
+
+fn createLiteral(arena: *mem.Allocator, comptime T: type, token_index: TokenIndex) !*T {
+ return arena.create(T{
+ .base = ast.Node{ .id = ast.Node.typeToId(T) },
+ .token = token_index,
+ });
+}
+
+fn createToCtxLiteral(arena: *mem.Allocator, opt_ctx: *const OptionalCtx, comptime T: type, token_index: TokenIndex) !*T {
+ const node = try createLiteral(arena, T, token_index);
+ opt_ctx.store(&node.base);
+
+ return node;
+}
+
+fn eatToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, id: @TagType(Token.Id)) ?TokenIndex {
+ const token = tok_it.peek().?;
+
+ if (token.id == id) {
+ return nextToken(tok_it, tree).index;
+ }
+
+ return null;
+}
+
+fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedToken {
+ const result = AnnotatedToken{
+ .index = tok_it.index,
+ .ptr = tok_it.next().?,
+ };
+ assert(result.ptr.id != Token.Id.LineComment);
+
+ while (true) {
+ const next_tok = tok_it.peek() orelse return result;
+ if (next_tok.id != Token.Id.LineComment) return result;
+ _ = tok_it.next();
+ }
+}
+
+fn prevToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) void {
+ while (true) {
+ const prev_tok = tok_it.prev() orelse return;
+ if (prev_tok.id == Token.Id.LineComment) continue;
+ return;
+ }
+}
+
+test "std.zig.parser" {
+ _ = @import("parser_test.zig");
+}
diff --git a/std/zig/parse_string_literal.zig b/std/zig/parse_string_literal.zig
new file mode 100644
index 0000000000..00c92a7651
--- /dev/null
+++ b/std/zig/parse_string_literal.zig
@@ -0,0 +1,76 @@
+const std = @import("../index.zig");
+const assert = std.debug.assert;
+
+const State = enum {
+ Start,
+ Backslash,
+};
+
+pub const ParseStringLiteralError = error{
+ OutOfMemory,
+
+ /// When this is returned, index will be the position of the character.
+ InvalidCharacter,
+};
+
+/// caller owns returned memory
+pub fn parseStringLiteral(
+ allocator: *std.mem.Allocator,
+ bytes: []const u8,
+ bad_index: *usize, // populated if error.InvalidCharacter is returned
+) ParseStringLiteralError![]u8 {
+ const first_index = if (bytes[0] == 'c') usize(2) else usize(1);
+ assert(bytes[bytes.len - 1] == '"');
+
+ var list = std.ArrayList(u8).init(allocator);
+ errdefer list.deinit();
+
+ const slice = bytes[first_index..];
+ try list.ensureCapacity(slice.len - 1);
+
+ var state = State.Start;
+ for (slice) |b, index| {
+ switch (state) {
+ State.Start => switch (b) {
+ '\\' => state = State.Backslash,
+ '\n' => {
+ bad_index.* = index;
+ return error.InvalidCharacter;
+ },
+ '"' => return list.toOwnedSlice(),
+ else => try list.append(b),
+ },
+ State.Backslash => switch (b) {
+ 'x' => @panic("TODO"),
+ 'u' => @panic("TODO"),
+ 'U' => @panic("TODO"),
+ 'n' => {
+ try list.append('\n');
+ state = State.Start;
+ },
+ 'r' => {
+ try list.append('\r');
+ state = State.Start;
+ },
+ '\\' => {
+ try list.append('\\');
+ state = State.Start;
+ },
+ 't' => {
+ try list.append('\t');
+ state = State.Start;
+ },
+ '"' => {
+ try list.append('"');
+ state = State.Start;
+ },
+ else => {
+ bad_index.* = index;
+ return error.InvalidCharacter;
+ },
+ },
+ else => unreachable,
+ }
+ }
+ unreachable;
+}
diff --git a/std/zig/parser.zig b/std/zig/parser.zig
deleted file mode 100644
index c0708581ea..0000000000
--- a/std/zig/parser.zig
+++ /dev/null
@@ -1,5265 +0,0 @@
-const std = @import("../index.zig");
-const assert = std.debug.assert;
-const ArrayList = std.ArrayList;
-const mem = std.mem;
-const ast = std.zig.ast;
-const Tokenizer = std.zig.Tokenizer;
-const Token = std.zig.Token;
-const builtin = @import("builtin");
-const io = std.io;
-
-// TODO when we make parse errors into error types instead of printing directly,
-// get rid of this
-const warn = std.debug.warn;
-
-pub const Parser = struct {
- util_allocator: &mem.Allocator,
- tokenizer: &Tokenizer,
- put_back_tokens: [2]Token,
- put_back_count: usize,
- source_file_name: []const u8,
- pending_line_comment_node: ?&ast.NodeLineComment,
-
- pub const Tree = struct {
- root_node: &ast.NodeRoot,
- arena_allocator: std.heap.ArenaAllocator,
-
- pub fn deinit(self: &Tree) void {
- self.arena_allocator.deinit();
- }
- };
-
- // This memory contents are used only during a function call. It's used to repurpose memory;
- // we reuse the same bytes for the stack data structure used by parsing, tree rendering, and
- // source rendering.
- const utility_bytes_align = @alignOf( union { a: RenderAstFrame, b: State, c: RenderState } );
- utility_bytes: []align(utility_bytes_align) u8,
-
- /// allocator must outlive the returned Parser and all the parse trees you create with it.
- pub fn init(tokenizer: &Tokenizer, allocator: &mem.Allocator, source_file_name: []const u8) Parser {
- return Parser {
- .util_allocator = allocator,
- .tokenizer = tokenizer,
- .put_back_tokens = undefined,
- .put_back_count = 0,
- .source_file_name = source_file_name,
- .utility_bytes = []align(utility_bytes_align) u8{},
- .pending_line_comment_node = null,
- };
- }
-
- pub fn deinit(self: &Parser) void {
- self.util_allocator.free(self.utility_bytes);
- }
-
- const TopLevelDeclCtx = struct {
- decls: &ArrayList(&ast.Node),
- visib_token: ?Token,
- extern_export_inline_token: ?Token,
- lib_name: ?&ast.Node,
- };
-
- const VarDeclCtx = struct {
- mut_token: Token,
- visib_token: ?Token,
- comptime_token: ?Token,
- extern_export_token: ?Token,
- lib_name: ?&ast.Node,
- list: &ArrayList(&ast.Node),
- };
-
- const TopLevelExternOrFieldCtx = struct {
- visib_token: Token,
- container_decl: &ast.NodeContainerDecl,
- };
-
- const ExternTypeCtx = struct {
- opt_ctx: OptionalCtx,
- extern_token: Token,
- };
-
- const ContainerKindCtx = struct {
- opt_ctx: OptionalCtx,
- ltoken: Token,
- layout: ast.NodeContainerDecl.Layout,
- };
-
- const ExpectTokenSave = struct {
- id: Token.Id,
- ptr: &Token,
- };
-
- const OptionalTokenSave = struct {
- id: Token.Id,
- ptr: &?Token,
- };
-
- const ExprListCtx = struct {
- list: &ArrayList(&ast.Node),
- end: Token.Id,
- ptr: &Token,
- };
-
- fn ListSave(comptime T: type) type {
- return struct {
- list: &ArrayList(T),
- ptr: &Token,
- };
- }
-
- const MaybeLabeledExpressionCtx = struct {
- label: Token,
- opt_ctx: OptionalCtx,
- };
-
- const LabelCtx = struct {
- label: ?Token,
- opt_ctx: OptionalCtx,
- };
-
- const InlineCtx = struct {
- label: ?Token,
- inline_token: ?Token,
- opt_ctx: OptionalCtx,
- };
-
- const LoopCtx = struct {
- label: ?Token,
- inline_token: ?Token,
- loop_token: Token,
- opt_ctx: OptionalCtx,
- };
-
- const AsyncEndCtx = struct {
- ctx: OptionalCtx,
- attribute: &ast.NodeAsyncAttribute,
- };
-
- const ErrorTypeOrSetDeclCtx = struct {
- opt_ctx: OptionalCtx,
- error_token: Token,
- };
-
- const ParamDeclEndCtx = struct {
- fn_proto: &ast.NodeFnProto,
- param_decl: &ast.NodeParamDecl,
- };
-
- const ComptimeStatementCtx = struct {
- comptime_token: Token,
- block: &ast.NodeBlock,
- };
-
- const OptionalCtx = union(enum) {
- Optional: &?&ast.Node,
- RequiredNull: &?&ast.Node,
- Required: &&ast.Node,
-
- pub fn store(self: &const OptionalCtx, value: &ast.Node) void {
- switch (*self) {
- OptionalCtx.Optional => |ptr| *ptr = value,
- OptionalCtx.RequiredNull => |ptr| *ptr = value,
- OptionalCtx.Required => |ptr| *ptr = value,
- }
- }
-
- pub fn get(self: &const OptionalCtx) ?&ast.Node {
- switch (*self) {
- OptionalCtx.Optional => |ptr| return *ptr,
- OptionalCtx.RequiredNull => |ptr| return ??*ptr,
- OptionalCtx.Required => |ptr| return *ptr,
- }
- }
-
- pub fn toRequired(self: &const OptionalCtx) OptionalCtx {
- switch (*self) {
- OptionalCtx.Optional => |ptr| {
- return OptionalCtx { .RequiredNull = ptr };
- },
- OptionalCtx.RequiredNull => |ptr| return *self,
- OptionalCtx.Required => |ptr| return *self,
- }
- }
- };
-
- const State = union(enum) {
- TopLevel,
- TopLevelExtern: TopLevelDeclCtx,
- TopLevelLibname: TopLevelDeclCtx,
- TopLevelDecl: TopLevelDeclCtx,
- TopLevelExternOrField: TopLevelExternOrFieldCtx,
-
- ContainerKind: ContainerKindCtx,
- ContainerInitArgStart: &ast.NodeContainerDecl,
- ContainerInitArg: &ast.NodeContainerDecl,
- ContainerDecl: &ast.NodeContainerDecl,
-
- VarDecl: VarDeclCtx,
- VarDeclAlign: &ast.NodeVarDecl,
- VarDeclEq: &ast.NodeVarDecl,
-
- FnDef: &ast.NodeFnProto,
- FnProto: &ast.NodeFnProto,
- FnProtoAlign: &ast.NodeFnProto,
- FnProtoReturnType: &ast.NodeFnProto,
-
- ParamDecl: &ast.NodeFnProto,
- ParamDeclAliasOrComptime: &ast.NodeParamDecl,
- ParamDeclName: &ast.NodeParamDecl,
- ParamDeclEnd: ParamDeclEndCtx,
- ParamDeclComma: &ast.NodeFnProto,
-
- MaybeLabeledExpression: MaybeLabeledExpressionCtx,
- LabeledExpression: LabelCtx,
- Inline: InlineCtx,
- While: LoopCtx,
- WhileContinueExpr: &?&ast.Node,
- For: LoopCtx,
- Else: &?&ast.NodeElse,
-
- Block: &ast.NodeBlock,
- Statement: &ast.NodeBlock,
- ComptimeStatement: ComptimeStatementCtx,
- Semicolon: &&ast.Node,
-
- AsmOutputItems: &ArrayList(&ast.NodeAsmOutput),
- AsmOutputReturnOrType: &ast.NodeAsmOutput,
- AsmInputItems: &ArrayList(&ast.NodeAsmInput),
- AsmClopperItems: &ArrayList(&ast.Node),
-
- ExprListItemOrEnd: ExprListCtx,
- ExprListCommaOrEnd: ExprListCtx,
- FieldInitListItemOrEnd: ListSave(&ast.NodeFieldInitializer),
- FieldInitListCommaOrEnd: ListSave(&ast.NodeFieldInitializer),
- FieldListCommaOrEnd: &ast.NodeContainerDecl,
- IdentifierListItemOrEnd: ListSave(&ast.Node),
- IdentifierListCommaOrEnd: ListSave(&ast.Node),
- SwitchCaseOrEnd: ListSave(&ast.NodeSwitchCase),
- SwitchCaseCommaOrEnd: ListSave(&ast.NodeSwitchCase),
- SwitchCaseFirstItem: &ArrayList(&ast.Node),
- SwitchCaseItem: &ArrayList(&ast.Node),
- SwitchCaseItemCommaOrEnd: &ArrayList(&ast.Node),
-
- SuspendBody: &ast.NodeSuspend,
- AsyncAllocator: &ast.NodeAsyncAttribute,
- AsyncEnd: AsyncEndCtx,
-
- ExternType: ExternTypeCtx,
- SliceOrArrayAccess: &ast.NodeSuffixOp,
- SliceOrArrayType: &ast.NodePrefixOp,
- AddrOfModifiers: &ast.NodePrefixOp.AddrOfInfo,
-
- Payload: OptionalCtx,
- PointerPayload: OptionalCtx,
- PointerIndexPayload: OptionalCtx,
-
- Expression: OptionalCtx,
- RangeExpressionBegin: OptionalCtx,
- RangeExpressionEnd: OptionalCtx,
- AssignmentExpressionBegin: OptionalCtx,
- AssignmentExpressionEnd: OptionalCtx,
- UnwrapExpressionBegin: OptionalCtx,
- UnwrapExpressionEnd: OptionalCtx,
- BoolOrExpressionBegin: OptionalCtx,
- BoolOrExpressionEnd: OptionalCtx,
- BoolAndExpressionBegin: OptionalCtx,
- BoolAndExpressionEnd: OptionalCtx,
- ComparisonExpressionBegin: OptionalCtx,
- ComparisonExpressionEnd: OptionalCtx,
- BinaryOrExpressionBegin: OptionalCtx,
- BinaryOrExpressionEnd: OptionalCtx,
- BinaryXorExpressionBegin: OptionalCtx,
- BinaryXorExpressionEnd: OptionalCtx,
- BinaryAndExpressionBegin: OptionalCtx,
- BinaryAndExpressionEnd: OptionalCtx,
- BitShiftExpressionBegin: OptionalCtx,
- BitShiftExpressionEnd: OptionalCtx,
- AdditionExpressionBegin: OptionalCtx,
- AdditionExpressionEnd: OptionalCtx,
- MultiplyExpressionBegin: OptionalCtx,
- MultiplyExpressionEnd: OptionalCtx,
- CurlySuffixExpressionBegin: OptionalCtx,
- CurlySuffixExpressionEnd: OptionalCtx,
- TypeExprBegin: OptionalCtx,
- TypeExprEnd: OptionalCtx,
- PrefixOpExpression: OptionalCtx,
- SuffixOpExpressionBegin: OptionalCtx,
- SuffixOpExpressionEnd: OptionalCtx,
- PrimaryExpression: OptionalCtx,
-
- ErrorTypeOrSetDecl: ErrorTypeOrSetDeclCtx,
- StringLiteral: OptionalCtx,
- Identifier: OptionalCtx,
-
-
- IfToken: @TagType(Token.Id),
- IfTokenSave: ExpectTokenSave,
- ExpectToken: @TagType(Token.Id),
- ExpectTokenSave: ExpectTokenSave,
- OptionalTokenSave: OptionalTokenSave,
- };
-
- /// Returns an AST tree, allocated with the parser's allocator.
- /// Result should be freed with tree.deinit() when there are
- /// no more references to any AST nodes of the tree.
- pub fn parse(self: &Parser) !Tree {
- var stack = self.initUtilityArrayList(State);
- defer self.deinitUtilityArrayList(stack);
-
- var arena_allocator = std.heap.ArenaAllocator.init(self.util_allocator);
- errdefer arena_allocator.deinit();
-
- const arena = &arena_allocator.allocator;
- const root_node = try self.createNode(arena, ast.NodeRoot,
- ast.NodeRoot {
- .base = undefined,
- .decls = ArrayList(&ast.Node).init(arena),
- // initialized when we get the eof token
- .eof_token = undefined,
- }
- );
-
- try stack.append(State.TopLevel);
-
- while (true) {
- //{
- // const token = self.getNextToken();
- // warn("{} ", @tagName(token.id));
- // self.putBackToken(token);
- // var i: usize = stack.len;
- // while (i != 0) {
- // i -= 1;
- // warn("{} ", @tagName(stack.items[i]));
- // }
- // warn("\n");
- //}
-
- // look for line comments
- while (true) {
- if (self.eatToken(Token.Id.LineComment)) |line_comment| {
- const node = blk: {
- if (self.pending_line_comment_node) |comment_node| {
- break :blk comment_node;
- } else {
- const comment_node = try arena.create(ast.NodeLineComment);
- *comment_node = ast.NodeLineComment {
- .base = ast.Node {
- .id = ast.Node.Id.LineComment,
- .comment = null,
- },
- .lines = ArrayList(Token).init(arena),
- };
- self.pending_line_comment_node = comment_node;
- break :blk comment_node;
- }
- };
- try node.lines.append(line_comment);
- continue;
- }
- break;
- }
-
- // This gives us 1 free append that can't fail
- const state = stack.pop();
-
- switch (state) {
- State.TopLevel => {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.Keyword_test => {
- stack.append(State.TopLevel) catch unreachable;
-
- const block = try self.createNode(arena, ast.NodeBlock,
- ast.NodeBlock {
- .base = undefined,
- .label = null,
- .lbrace = undefined,
- .statements = ArrayList(&ast.Node).init(arena),
- .rbrace = undefined,
- }
- );
- const test_node = try self.createAttachNode(arena, &root_node.decls, ast.NodeTestDecl,
- ast.NodeTestDecl {
- .base = undefined,
- .test_token = token,
- .name = undefined,
- .body_node = &block.base,
- }
- );
- stack.append(State { .Block = block }) catch unreachable;
- try stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.LBrace,
- .ptr = &block.rbrace,
- }
- });
- try stack.append(State { .StringLiteral = OptionalCtx { .Required = &test_node.name } });
- continue;
- },
- Token.Id.Eof => {
- root_node.eof_token = token;
- return Tree {.root_node = root_node, .arena_allocator = arena_allocator};
- },
- Token.Id.Keyword_pub => {
- stack.append(State.TopLevel) catch unreachable;
- try stack.append(State {
- .TopLevelExtern = TopLevelDeclCtx {
- .decls = &root_node.decls,
- .visib_token = token,
- .extern_export_inline_token = null,
- .lib_name = null,
- }
- });
- continue;
- },
- Token.Id.Keyword_comptime => {
- const block = try self.createNode(arena, ast.NodeBlock,
- ast.NodeBlock {
- .base = undefined,
- .label = null,
- .lbrace = undefined,
- .statements = ArrayList(&ast.Node).init(arena),
- .rbrace = undefined,
- }
- );
- const node = try self.createAttachNode(arena, &root_node.decls, ast.NodeComptime,
- ast.NodeComptime {
- .base = undefined,
- .comptime_token = token,
- .expr = &block.base,
- }
- );
- stack.append(State.TopLevel) catch unreachable;
- try stack.append(State { .Block = block });
- try stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.LBrace,
- .ptr = &block.rbrace,
- }
- });
- continue;
- },
- else => {
- self.putBackToken(token);
- stack.append(State.TopLevel) catch unreachable;
- try stack.append(State {
- .TopLevelExtern = TopLevelDeclCtx {
- .decls = &root_node.decls,
- .visib_token = null,
- .extern_export_inline_token = null,
- .lib_name = null,
- }
- });
- continue;
- },
- }
- },
- State.TopLevelExtern => |ctx| {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.Keyword_export, Token.Id.Keyword_inline => {
- stack.append(State {
- .TopLevelDecl = TopLevelDeclCtx {
- .decls = ctx.decls,
- .visib_token = ctx.visib_token,
- .extern_export_inline_token = token,
- .lib_name = null,
- },
- }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_extern => {
- stack.append(State {
- .TopLevelLibname = TopLevelDeclCtx {
- .decls = ctx.decls,
- .visib_token = ctx.visib_token,
- .extern_export_inline_token = token,
- .lib_name = null,
- },
- }) catch unreachable;
- continue;
- },
- else => {
- self.putBackToken(token);
- stack.append(State { .TopLevelDecl = ctx }) catch unreachable;
- continue;
- }
- }
- },
- State.TopLevelLibname => |ctx| {
- const lib_name = blk: {
- const lib_name_token = self.getNextToken();
- break :blk (try self.parseStringLiteral(arena, lib_name_token)) ?? {
- self.putBackToken(lib_name_token);
- break :blk null;
- };
- };
-
- stack.append(State {
- .TopLevelDecl = TopLevelDeclCtx {
- .decls = ctx.decls,
- .visib_token = ctx.visib_token,
- .extern_export_inline_token = ctx.extern_export_inline_token,
- .lib_name = lib_name,
- },
- }) catch unreachable;
- continue;
- },
- State.TopLevelDecl => |ctx| {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.Keyword_use => {
- if (ctx.extern_export_inline_token != null) {
- return self.parseError(token, "Invalid token {}", @tagName((??ctx.extern_export_inline_token).id));
- }
-
- const node = try self.createAttachNode(arena, ctx.decls, ast.NodeUse,
- ast.NodeUse {
- .base = undefined,
- .visib_token = ctx.visib_token,
- .expr = undefined,
- .semicolon_token = undefined,
- }
- );
- stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.Semicolon,
- .ptr = &node.semicolon_token,
- }
- }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.expr } });
- continue;
- },
- Token.Id.Keyword_var, Token.Id.Keyword_const => {
- if (ctx.extern_export_inline_token) |extern_export_inline_token| {
- if (extern_export_inline_token.id == Token.Id.Keyword_inline) {
- return self.parseError(token, "Invalid token {}", @tagName(extern_export_inline_token.id));
- }
- }
-
- stack.append(State {
- .VarDecl = VarDeclCtx {
- .visib_token = ctx.visib_token,
- .lib_name = ctx.lib_name,
- .comptime_token = null,
- .extern_export_token = ctx.extern_export_inline_token,
- .mut_token = token,
- .list = ctx.decls
- }
- }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_fn, Token.Id.Keyword_nakedcc,
- Token.Id.Keyword_stdcallcc, Token.Id.Keyword_async => {
- const fn_proto = try self.createAttachNode(arena, ctx.decls, ast.NodeFnProto,
- ast.NodeFnProto {
- .base = undefined,
- .visib_token = ctx.visib_token,
- .name_token = null,
- .fn_token = undefined,
- .params = ArrayList(&ast.Node).init(arena),
- .return_type = undefined,
- .var_args_token = null,
- .extern_export_inline_token = ctx.extern_export_inline_token,
- .cc_token = null,
- .async_attr = null,
- .body_node = null,
- .lib_name = ctx.lib_name,
- .align_expr = null,
- }
- );
- stack.append(State { .FnDef = fn_proto }) catch unreachable;
- try stack.append(State { .FnProto = fn_proto });
-
- switch (token.id) {
- Token.Id.Keyword_nakedcc, Token.Id.Keyword_stdcallcc => {
- fn_proto.cc_token = token;
- try stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.Keyword_fn,
- .ptr = &fn_proto.fn_token,
- }
- });
- continue;
- },
- Token.Id.Keyword_async => {
- const async_node = try self.createNode(arena, ast.NodeAsyncAttribute,
- ast.NodeAsyncAttribute {
- .base = undefined,
- .async_token = token,
- .allocator_type = null,
- .rangle_bracket = null,
- }
- );
- fn_proto.async_attr = async_node;
-
- try stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.Keyword_fn,
- .ptr = &fn_proto.fn_token,
- }
- });
- try stack.append(State { .AsyncAllocator = async_node });
- continue;
- },
- Token.Id.Keyword_fn => {
- fn_proto.fn_token = token;
- continue;
- },
- else => unreachable,
- }
- },
- else => {
- return self.parseError(token, "expected variable declaration or function, found {}", @tagName(token.id));
- },
- }
- },
- State.TopLevelExternOrField => |ctx| {
- if (self.eatToken(Token.Id.Identifier)) |identifier| {
- std.debug.assert(ctx.container_decl.kind == ast.NodeContainerDecl.Kind.Struct);
- const node = try self.createAttachNode(arena, &ctx.container_decl.fields_and_decls, ast.NodeStructField,
- ast.NodeStructField {
- .base = undefined,
- .visib_token = ctx.visib_token,
- .name_token = identifier,
- .type_expr = undefined,
- }
- );
-
- stack.append(State { .FieldListCommaOrEnd = ctx.container_decl }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.type_expr } });
- try stack.append(State { .ExpectToken = Token.Id.Colon });
- continue;
- }
-
- stack.append(State{ .ContainerDecl = ctx.container_decl }) catch unreachable;
- try stack.append(State {
- .TopLevelExtern = TopLevelDeclCtx {
- .decls = &ctx.container_decl.fields_and_decls,
- .visib_token = ctx.visib_token,
- .extern_export_inline_token = null,
- .lib_name = null,
- }
- });
- continue;
- },
-
-
- State.ContainerKind => |ctx| {
- const token = self.getNextToken();
- const node = try self.createToCtxNode(arena, ctx.opt_ctx, ast.NodeContainerDecl,
- ast.NodeContainerDecl {
- .base = undefined,
- .ltoken = ctx.ltoken,
- .layout = ctx.layout,
- .kind = switch (token.id) {
- Token.Id.Keyword_struct => ast.NodeContainerDecl.Kind.Struct,
- Token.Id.Keyword_union => ast.NodeContainerDecl.Kind.Union,
- Token.Id.Keyword_enum => ast.NodeContainerDecl.Kind.Enum,
- else => {
- return self.parseError(token, "expected {}, {} or {}, found {}",
- @tagName(Token.Id.Keyword_struct),
- @tagName(Token.Id.Keyword_union),
- @tagName(Token.Id.Keyword_enum),
- @tagName(token.id));
- },
- },
- .init_arg_expr = ast.NodeContainerDecl.InitArg.None,
- .fields_and_decls = ArrayList(&ast.Node).init(arena),
- .rbrace_token = undefined,
- }
- );
-
- stack.append(State { .ContainerDecl = node }) catch unreachable;
- try stack.append(State { .ExpectToken = Token.Id.LBrace });
- try stack.append(State { .ContainerInitArgStart = node });
- continue;
- },
-
- State.ContainerInitArgStart => |container_decl| {
- if (self.eatToken(Token.Id.LParen) == null) {
- continue;
- }
-
- stack.append(State { .ExpectToken = Token.Id.RParen }) catch unreachable;
- try stack.append(State { .ContainerInitArg = container_decl });
- continue;
- },
-
- State.ContainerInitArg => |container_decl| {
- const init_arg_token = self.getNextToken();
- switch (init_arg_token.id) {
- Token.Id.Keyword_enum => {
- container_decl.init_arg_expr = ast.NodeContainerDecl.InitArg.Enum;
- },
- else => {
- self.putBackToken(init_arg_token);
- container_decl.init_arg_expr = ast.NodeContainerDecl.InitArg { .Type = undefined };
- stack.append(State { .Expression = OptionalCtx { .Required = &container_decl.init_arg_expr.Type } }) catch unreachable;
- },
- }
- continue;
- },
- State.ContainerDecl => |container_decl| {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.Identifier => {
- switch (container_decl.kind) {
- ast.NodeContainerDecl.Kind.Struct => {
- const node = try self.createAttachNode(arena, &container_decl.fields_and_decls, ast.NodeStructField,
- ast.NodeStructField {
- .base = undefined,
- .visib_token = null,
- .name_token = token,
- .type_expr = undefined,
- }
- );
-
- stack.append(State { .FieldListCommaOrEnd = container_decl }) catch unreachable;
- try stack.append(State { .TypeExprBegin = OptionalCtx { .Required = &node.type_expr } });
- try stack.append(State { .ExpectToken = Token.Id.Colon });
- continue;
- },
- ast.NodeContainerDecl.Kind.Union => {
- const node = try self.createAttachNode(arena, &container_decl.fields_and_decls, ast.NodeUnionTag,
- ast.NodeUnionTag {
- .base = undefined,
- .name_token = token,
- .type_expr = null,
- }
- );
-
- stack.append(State { .FieldListCommaOrEnd = container_decl }) catch unreachable;
- try stack.append(State { .TypeExprBegin = OptionalCtx { .RequiredNull = &node.type_expr } });
- try stack.append(State { .IfToken = Token.Id.Colon });
- continue;
- },
- ast.NodeContainerDecl.Kind.Enum => {
- const node = try self.createAttachNode(arena, &container_decl.fields_and_decls, ast.NodeEnumTag,
- ast.NodeEnumTag {
- .base = undefined,
- .name_token = token,
- .value = null,
- }
- );
-
- stack.append(State { .FieldListCommaOrEnd = container_decl }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx { .RequiredNull = &node.value } });
- try stack.append(State { .IfToken = Token.Id.Equal });
- continue;
- },
- }
- },
- Token.Id.Keyword_pub => {
- switch (container_decl.kind) {
- ast.NodeContainerDecl.Kind.Struct => {
- try stack.append(State {
- .TopLevelExternOrField = TopLevelExternOrFieldCtx {
- .visib_token = token,
- .container_decl = container_decl,
- }
- });
- continue;
- },
- else => {
- stack.append(State{ .ContainerDecl = container_decl }) catch unreachable;
- try stack.append(State {
- .TopLevelExtern = TopLevelDeclCtx {
- .decls = &container_decl.fields_and_decls,
- .visib_token = token,
- .extern_export_inline_token = null,
- .lib_name = null,
- }
- });
- continue;
- }
- }
- },
- Token.Id.Keyword_export => {
- stack.append(State{ .ContainerDecl = container_decl }) catch unreachable;
- try stack.append(State {
- .TopLevelExtern = TopLevelDeclCtx {
- .decls = &container_decl.fields_and_decls,
- .visib_token = token,
- .extern_export_inline_token = null,
- .lib_name = null,
- }
- });
- continue;
- },
- Token.Id.RBrace => {
- container_decl.rbrace_token = token;
- continue;
- },
- else => {
- self.putBackToken(token);
- stack.append(State{ .ContainerDecl = container_decl }) catch unreachable;
- try stack.append(State {
- .TopLevelExtern = TopLevelDeclCtx {
- .decls = &container_decl.fields_and_decls,
- .visib_token = null,
- .extern_export_inline_token = null,
- .lib_name = null,
- }
- });
- continue;
- }
- }
- },
-
-
- State.VarDecl => |ctx| {
- const var_decl = try self.createAttachNode(arena, ctx.list, ast.NodeVarDecl,
- ast.NodeVarDecl {
- .base = undefined,
- .visib_token = ctx.visib_token,
- .mut_token = ctx.mut_token,
- .comptime_token = ctx.comptime_token,
- .extern_export_token = ctx.extern_export_token,
- .type_node = null,
- .align_node = null,
- .init_node = null,
- .lib_name = ctx.lib_name,
- // initialized later
- .name_token = undefined,
- .eq_token = undefined,
- .semicolon_token = undefined,
- }
- );
-
- stack.append(State { .VarDeclAlign = var_decl }) catch unreachable;
- try stack.append(State { .TypeExprBegin = OptionalCtx { .RequiredNull = &var_decl.type_node} });
- try stack.append(State { .IfToken = Token.Id.Colon });
- try stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.Identifier,
- .ptr = &var_decl.name_token,
- }
- });
- continue;
- },
- State.VarDeclAlign => |var_decl| {
- stack.append(State { .VarDeclEq = var_decl }) catch unreachable;
-
- const next_token = self.getNextToken();
- if (next_token.id == Token.Id.Keyword_align) {
- try stack.append(State { .ExpectToken = Token.Id.RParen });
- try stack.append(State { .Expression = OptionalCtx { .RequiredNull = &var_decl.align_node} });
- try stack.append(State { .ExpectToken = Token.Id.LParen });
- continue;
- }
-
- self.putBackToken(next_token);
- continue;
- },
- State.VarDeclEq => |var_decl| {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.Equal => {
- var_decl.eq_token = token;
- stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.Semicolon,
- .ptr = &var_decl.semicolon_token,
- },
- }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx { .RequiredNull = &var_decl.init_node } });
- continue;
- },
- Token.Id.Semicolon => {
- var_decl.semicolon_token = token;
- continue;
- },
- else => {
- return self.parseError(token, "expected '=' or ';', found {}", @tagName(token.id));
- }
- }
- },
-
-
- State.FnDef => |fn_proto| {
- const token = self.getNextToken();
- switch(token.id) {
- Token.Id.LBrace => {
- const block = try self.createNode(arena, ast.NodeBlock,
- ast.NodeBlock {
- .base = undefined,
- .label = null,
- .lbrace = token,
- .statements = ArrayList(&ast.Node).init(arena),
- .rbrace = undefined,
- }
- );
- fn_proto.body_node = &block.base;
- stack.append(State { .Block = block }) catch unreachable;
- continue;
- },
- Token.Id.Semicolon => continue,
- else => {
- return self.parseError(token, "expected ';' or '{{', found {}", @tagName(token.id));
- },
- }
- },
- State.FnProto => |fn_proto| {
- stack.append(State { .FnProtoAlign = fn_proto }) catch unreachable;
- try stack.append(State { .ParamDecl = fn_proto });
- try stack.append(State { .ExpectToken = Token.Id.LParen });
-
- if (self.eatToken(Token.Id.Identifier)) |name_token| {
- fn_proto.name_token = name_token;
- }
- continue;
- },
- State.FnProtoAlign => |fn_proto| {
- stack.append(State { .FnProtoReturnType = fn_proto }) catch unreachable;
-
- if (self.eatToken(Token.Id.Keyword_align)) |align_token| {
- try stack.append(State { .ExpectToken = Token.Id.RParen });
- try stack.append(State { .Expression = OptionalCtx { .RequiredNull = &fn_proto.align_expr } });
- try stack.append(State { .ExpectToken = Token.Id.LParen });
- }
- continue;
- },
- State.FnProtoReturnType => |fn_proto| {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.Bang => {
- fn_proto.return_type = ast.NodeFnProto.ReturnType { .InferErrorSet = undefined };
- stack.append(State {
- .TypeExprBegin = OptionalCtx { .Required = &fn_proto.return_type.InferErrorSet },
- }) catch unreachable;
- continue;
- },
- else => {
- // TODO: this is a special case. Remove this when #760 is fixed
- if (token.id == Token.Id.Keyword_error) {
- if (self.isPeekToken(Token.Id.LBrace)) {
- fn_proto.return_type = ast.NodeFnProto.ReturnType {
- .Explicit = &(try self.createLiteral(arena, ast.NodeErrorType, token)).base
- };
- continue;
- }
- }
-
- self.putBackToken(token);
- fn_proto.return_type = ast.NodeFnProto.ReturnType { .Explicit = undefined };
- stack.append(State { .TypeExprBegin = OptionalCtx { .Required = &fn_proto.return_type.Explicit }, }) catch unreachable;
- continue;
- },
- }
- },
-
-
- State.ParamDecl => |fn_proto| {
- if (self.eatToken(Token.Id.RParen)) |_| {
- continue;
- }
- const param_decl = try self.createAttachNode(arena, &fn_proto.params, ast.NodeParamDecl,
- ast.NodeParamDecl {
- .base = undefined,
- .comptime_token = null,
- .noalias_token = null,
- .name_token = null,
- .type_node = undefined,
- .var_args_token = null,
- },
- );
-
- stack.append(State {
- .ParamDeclEnd = ParamDeclEndCtx {
- .param_decl = param_decl,
- .fn_proto = fn_proto,
- }
- }) catch unreachable;
- try stack.append(State { .ParamDeclName = param_decl });
- try stack.append(State { .ParamDeclAliasOrComptime = param_decl });
- continue;
- },
- State.ParamDeclAliasOrComptime => |param_decl| {
- if (self.eatToken(Token.Id.Keyword_comptime)) |comptime_token| {
- param_decl.comptime_token = comptime_token;
- } else if (self.eatToken(Token.Id.Keyword_noalias)) |noalias_token| {
- param_decl.noalias_token = noalias_token;
- }
- continue;
- },
- State.ParamDeclName => |param_decl| {
- // TODO: Here, we eat two tokens in one state. This means that we can't have
- // comments between these two tokens.
- if (self.eatToken(Token.Id.Identifier)) |ident_token| {
- if (self.eatToken(Token.Id.Colon)) |_| {
- param_decl.name_token = ident_token;
- } else {
- self.putBackToken(ident_token);
- }
- }
- continue;
- },
- State.ParamDeclEnd => |ctx| {
- if (self.eatToken(Token.Id.Ellipsis3)) |ellipsis3| {
- ctx.param_decl.var_args_token = ellipsis3;
- stack.append(State { .ExpectToken = Token.Id.RParen }) catch unreachable;
- continue;
- }
-
- try stack.append(State { .ParamDeclComma = ctx.fn_proto });
- try stack.append(State {
- .TypeExprBegin = OptionalCtx { .Required = &ctx.param_decl.type_node }
- });
- continue;
- },
- State.ParamDeclComma => |fn_proto| {
- if ((try self.expectCommaOrEnd(Token.Id.RParen)) == null) {
- stack.append(State { .ParamDecl = fn_proto }) catch unreachable;
- }
- continue;
- },
-
- State.MaybeLabeledExpression => |ctx| {
- if (self.eatToken(Token.Id.Colon)) |_| {
- stack.append(State {
- .LabeledExpression = LabelCtx {
- .label = ctx.label,
- .opt_ctx = ctx.opt_ctx,
- }
- }) catch unreachable;
- continue;
- }
-
- _ = try self.createToCtxLiteral(arena, ctx.opt_ctx, ast.NodeIdentifier, ctx.label);
- continue;
- },
- State.LabeledExpression => |ctx| {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.LBrace => {
- const block = try self.createToCtxNode(arena, ctx.opt_ctx, ast.NodeBlock,
- ast.NodeBlock {
- .base = undefined,
- .label = ctx.label,
- .lbrace = token,
- .statements = ArrayList(&ast.Node).init(arena),
- .rbrace = undefined,
- }
- );
- stack.append(State { .Block = block }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_while => {
- stack.append(State {
- .While = LoopCtx {
- .label = ctx.label,
- .inline_token = null,
- .loop_token = token,
- .opt_ctx = ctx.opt_ctx.toRequired(),
- }
- }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_for => {
- stack.append(State {
- .For = LoopCtx {
- .label = ctx.label,
- .inline_token = null,
- .loop_token = token,
- .opt_ctx = ctx.opt_ctx.toRequired(),
- }
- }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_inline => {
- stack.append(State {
- .Inline = InlineCtx {
- .label = ctx.label,
- .inline_token = token,
- .opt_ctx = ctx.opt_ctx.toRequired(),
- }
- }) catch unreachable;
- continue;
- },
- else => {
- if (ctx.opt_ctx != OptionalCtx.Optional) {
- return self.parseError(token, "expected 'while', 'for', 'inline' or '{{', found {}", @tagName(token.id));
- }
-
- self.putBackToken(token);
- continue;
- },
- }
- },
- State.Inline => |ctx| {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.Keyword_while => {
- stack.append(State {
- .While = LoopCtx {
- .inline_token = ctx.inline_token,
- .label = ctx.label,
- .loop_token = token,
- .opt_ctx = ctx.opt_ctx.toRequired(),
- }
- }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_for => {
- stack.append(State {
- .For = LoopCtx {
- .inline_token = ctx.inline_token,
- .label = ctx.label,
- .loop_token = token,
- .opt_ctx = ctx.opt_ctx.toRequired(),
- }
- }) catch unreachable;
- continue;
- },
- else => {
- if (ctx.opt_ctx != OptionalCtx.Optional) {
- return self.parseError(token, "expected 'while' or 'for', found {}", @tagName(token.id));
- }
-
- self.putBackToken(token);
- continue;
- },
- }
- },
- State.While => |ctx| {
- const node = try self.createToCtxNode(arena, ctx.opt_ctx, ast.NodeWhile,
- ast.NodeWhile {
- .base = undefined,
- .label = ctx.label,
- .inline_token = ctx.inline_token,
- .while_token = ctx.loop_token,
- .condition = undefined,
- .payload = null,
- .continue_expr = null,
- .body = undefined,
- .@"else" = null,
- }
- );
- stack.append(State { .Else = &node.@"else" }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.body } });
- try stack.append(State { .WhileContinueExpr = &node.continue_expr });
- try stack.append(State { .IfToken = Token.Id.Colon });
- try stack.append(State { .PointerPayload = OptionalCtx { .Optional = &node.payload } });
- try stack.append(State { .ExpectToken = Token.Id.RParen });
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.condition } });
- try stack.append(State { .ExpectToken = Token.Id.LParen });
- continue;
- },
- State.WhileContinueExpr => |dest| {
- stack.append(State { .ExpectToken = Token.Id.RParen }) catch unreachable;
- try stack.append(State { .AssignmentExpressionBegin = OptionalCtx { .RequiredNull = dest } });
- try stack.append(State { .ExpectToken = Token.Id.LParen });
- continue;
- },
- State.For => |ctx| {
- const node = try self.createToCtxNode(arena, ctx.opt_ctx, ast.NodeFor,
- ast.NodeFor {
- .base = undefined,
- .label = ctx.label,
- .inline_token = ctx.inline_token,
- .for_token = ctx.loop_token,
- .array_expr = undefined,
- .payload = null,
- .body = undefined,
- .@"else" = null,
- }
- );
- stack.append(State { .Else = &node.@"else" }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.body } });
- try stack.append(State { .PointerIndexPayload = OptionalCtx { .Optional = &node.payload } });
- try stack.append(State { .ExpectToken = Token.Id.RParen });
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.array_expr } });
- try stack.append(State { .ExpectToken = Token.Id.LParen });
- continue;
- },
- State.Else => |dest| {
- if (self.eatToken(Token.Id.Keyword_else)) |else_token| {
- const node = try self.createNode(arena, ast.NodeElse,
- ast.NodeElse {
- .base = undefined,
- .else_token = else_token,
- .payload = null,
- .body = undefined,
- }
- );
- *dest = node;
-
- stack.append(State { .Expression = OptionalCtx { .Required = &node.body } }) catch unreachable;
- try stack.append(State { .Payload = OptionalCtx { .Optional = &node.payload } });
- continue;
- } else {
- continue;
- }
- },
-
-
- State.Block => |block| {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.RBrace => {
- block.rbrace = token;
- continue;
- },
- else => {
- self.putBackToken(token);
- stack.append(State { .Block = block }) catch unreachable;
- try stack.append(State { .Statement = block });
- continue;
- },
- }
- },
- State.Statement => |block| {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.Keyword_comptime => {
- stack.append(State {
- .ComptimeStatement = ComptimeStatementCtx {
- .comptime_token = token,
- .block = block,
- }
- }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_var, Token.Id.Keyword_const => {
- stack.append(State {
- .VarDecl = VarDeclCtx {
- .visib_token = null,
- .comptime_token = null,
- .extern_export_token = null,
- .lib_name = null,
- .mut_token = token,
- .list = &block.statements,
- }
- }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_defer, Token.Id.Keyword_errdefer => {
- const node = try self.createAttachNode(arena, &block.statements, ast.NodeDefer,
- ast.NodeDefer {
- .base = undefined,
- .defer_token = token,
- .kind = switch (token.id) {
- Token.Id.Keyword_defer => ast.NodeDefer.Kind.Unconditional,
- Token.Id.Keyword_errdefer => ast.NodeDefer.Kind.Error,
- else => unreachable,
- },
- .expr = undefined,
- }
- );
- stack.append(State { .Semicolon = &&node.base }) catch unreachable;
- try stack.append(State { .AssignmentExpressionBegin = OptionalCtx{ .Required = &node.expr } });
- continue;
- },
- Token.Id.LBrace => {
- const inner_block = try self.createAttachNode(arena, &block.statements, ast.NodeBlock,
- ast.NodeBlock {
- .base = undefined,
- .label = null,
- .lbrace = token,
- .statements = ArrayList(&ast.Node).init(arena),
- .rbrace = undefined,
- }
- );
- stack.append(State { .Block = inner_block }) catch unreachable;
- continue;
- },
- else => {
- self.putBackToken(token);
- const statememt = try block.statements.addOne();
- stack.append(State { .Semicolon = statememt }) catch unreachable;
- try stack.append(State { .AssignmentExpressionBegin = OptionalCtx{ .Required = statememt } });
- continue;
- }
- }
- },
- State.ComptimeStatement => |ctx| {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.Keyword_var, Token.Id.Keyword_const => {
- stack.append(State {
- .VarDecl = VarDeclCtx {
- .visib_token = null,
- .comptime_token = ctx.comptime_token,
- .extern_export_token = null,
- .lib_name = null,
- .mut_token = token,
- .list = &ctx.block.statements,
- }
- }) catch unreachable;
- continue;
- },
- else => {
- self.putBackToken(token);
- self.putBackToken(ctx.comptime_token);
- const statememt = try ctx.block.statements.addOne();
- stack.append(State { .Semicolon = statememt }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx { .Required = statememt } });
- continue;
- }
- }
- },
- State.Semicolon => |node_ptr| {
- const node = *node_ptr;
- if (requireSemiColon(node)) {
- stack.append(State { .ExpectToken = Token.Id.Semicolon }) catch unreachable;
- continue;
- }
- continue;
- },
-
-
- State.AsmOutputItems => |items| {
- const lbracket = self.getNextToken();
- if (lbracket.id != Token.Id.LBracket) {
- self.putBackToken(lbracket);
- continue;
- }
-
- const node = try self.createNode(arena, ast.NodeAsmOutput,
- ast.NodeAsmOutput {
- .base = undefined,
- .symbolic_name = undefined,
- .constraint = undefined,
- .kind = undefined,
- }
- );
- try items.append(node);
-
- stack.append(State { .AsmOutputItems = items }) catch unreachable;
- try stack.append(State { .IfToken = Token.Id.Comma });
- try stack.append(State { .ExpectToken = Token.Id.RParen });
- try stack.append(State { .AsmOutputReturnOrType = node });
- try stack.append(State { .ExpectToken = Token.Id.LParen });
- try stack.append(State { .StringLiteral = OptionalCtx { .Required = &node.constraint } });
- try stack.append(State { .ExpectToken = Token.Id.RBracket });
- try stack.append(State { .Identifier = OptionalCtx { .Required = &node.symbolic_name } });
- continue;
- },
- State.AsmOutputReturnOrType => |node| {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.Identifier => {
- node.kind = ast.NodeAsmOutput.Kind { .Variable = try self.createLiteral(arena, ast.NodeIdentifier, token) };
- continue;
- },
- Token.Id.Arrow => {
- node.kind = ast.NodeAsmOutput.Kind { .Return = undefined };
- try stack.append(State { .TypeExprBegin = OptionalCtx { .Required = &node.kind.Return } });
- continue;
- },
- else => {
- return self.parseError(token, "expected '->' or {}, found {}",
- @tagName(Token.Id.Identifier),
- @tagName(token.id));
- },
- }
- },
- State.AsmInputItems => |items| {
- const lbracket = self.getNextToken();
- if (lbracket.id != Token.Id.LBracket) {
- self.putBackToken(lbracket);
- continue;
- }
-
- const node = try self.createNode(arena, ast.NodeAsmInput,
- ast.NodeAsmInput {
- .base = undefined,
- .symbolic_name = undefined,
- .constraint = undefined,
- .expr = undefined,
- }
- );
- try items.append(node);
-
- stack.append(State { .AsmInputItems = items }) catch unreachable;
- try stack.append(State { .IfToken = Token.Id.Comma });
- try stack.append(State { .ExpectToken = Token.Id.RParen });
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.expr } });
- try stack.append(State { .ExpectToken = Token.Id.LParen });
- try stack.append(State { .StringLiteral = OptionalCtx { .Required = &node.constraint } });
- try stack.append(State { .ExpectToken = Token.Id.RBracket });
- try stack.append(State { .Identifier = OptionalCtx { .Required = &node.symbolic_name } });
- continue;
- },
- State.AsmClopperItems => |items| {
- stack.append(State { .AsmClopperItems = items }) catch unreachable;
- try stack.append(State { .IfToken = Token.Id.Comma });
- try stack.append(State { .StringLiteral = OptionalCtx { .Required = try items.addOne() } });
- continue;
- },
-
-
- State.ExprListItemOrEnd => |list_state| {
- if (self.eatToken(list_state.end)) |token| {
- *list_state.ptr = token;
- continue;
- }
-
- stack.append(State { .ExprListCommaOrEnd = list_state }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx { .Required = try list_state.list.addOne() } });
- continue;
- },
- State.ExprListCommaOrEnd => |list_state| {
- if (try self.expectCommaOrEnd(list_state.end)) |end| {
- *list_state.ptr = end;
- continue;
- } else {
- stack.append(State { .ExprListItemOrEnd = list_state }) catch unreachable;
- continue;
- }
- },
- State.FieldInitListItemOrEnd => |list_state| {
- if (self.eatToken(Token.Id.RBrace)) |rbrace| {
- *list_state.ptr = rbrace;
- continue;
- }
-
- const node = try self.createNode(arena, ast.NodeFieldInitializer,
- ast.NodeFieldInitializer {
- .base = undefined,
- .period_token = undefined,
- .name_token = undefined,
- .expr = undefined,
- }
- );
- try list_state.list.append(node);
-
- stack.append(State { .FieldInitListCommaOrEnd = list_state }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx{ .Required = &node.expr } });
- try stack.append(State { .ExpectToken = Token.Id.Equal });
- try stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.Identifier,
- .ptr = &node.name_token,
- }
- });
- try stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.Period,
- .ptr = &node.period_token,
- }
- });
- continue;
- },
- State.FieldInitListCommaOrEnd => |list_state| {
- if (try self.expectCommaOrEnd(Token.Id.RBrace)) |end| {
- *list_state.ptr = end;
- continue;
- } else {
- stack.append(State { .FieldInitListItemOrEnd = list_state }) catch unreachable;
- continue;
- }
- },
- State.FieldListCommaOrEnd => |container_decl| {
- if (try self.expectCommaOrEnd(Token.Id.RBrace)) |end| {
- container_decl.rbrace_token = end;
- continue;
- } else {
- stack.append(State { .ContainerDecl = container_decl }) catch unreachable;
- continue;
- }
- },
- State.IdentifierListItemOrEnd => |list_state| {
- if (self.eatToken(Token.Id.RBrace)) |rbrace| {
- *list_state.ptr = rbrace;
- continue;
- }
-
- stack.append(State { .IdentifierListCommaOrEnd = list_state }) catch unreachable;
- try stack.append(State { .Identifier = OptionalCtx { .Required = try list_state.list.addOne() } });
- continue;
- },
- State.IdentifierListCommaOrEnd => |list_state| {
- if (try self.expectCommaOrEnd(Token.Id.RBrace)) |end| {
- *list_state.ptr = end;
- continue;
- } else {
- stack.append(State { .IdentifierListItemOrEnd = list_state }) catch unreachable;
- continue;
- }
- },
- State.SwitchCaseOrEnd => |list_state| {
- if (self.eatToken(Token.Id.RBrace)) |rbrace| {
- *list_state.ptr = rbrace;
- continue;
- }
-
- const node = try self.createNode(arena, ast.NodeSwitchCase,
- ast.NodeSwitchCase {
- .base = undefined,
- .items = ArrayList(&ast.Node).init(arena),
- .payload = null,
- .expr = undefined,
- }
- );
- try list_state.list.append(node);
- stack.append(State { .SwitchCaseCommaOrEnd = list_state }) catch unreachable;
- try stack.append(State { .AssignmentExpressionBegin = OptionalCtx { .Required = &node.expr } });
- try stack.append(State { .PointerPayload = OptionalCtx { .Optional = &node.payload } });
- try stack.append(State { .SwitchCaseFirstItem = &node.items });
- continue;
- },
- State.SwitchCaseCommaOrEnd => |list_state| {
- if (try self.expectCommaOrEnd(Token.Id.RBrace)) |end| {
- *list_state.ptr = end;
- continue;
- } else {
- stack.append(State { .SwitchCaseOrEnd = list_state }) catch unreachable;
- continue;
- }
- },
- State.SwitchCaseFirstItem => |case_items| {
- const token = self.getNextToken();
- if (token.id == Token.Id.Keyword_else) {
- const else_node = try self.createAttachNode(arena, case_items, ast.NodeSwitchElse,
- ast.NodeSwitchElse {
- .base = undefined,
- .token = token,
- }
- );
- try stack.append(State { .ExpectToken = Token.Id.EqualAngleBracketRight });
- continue;
- } else {
- self.putBackToken(token);
- try stack.append(State { .SwitchCaseItem = case_items });
- continue;
- }
- },
- State.SwitchCaseItem => |case_items| {
- stack.append(State { .SwitchCaseItemCommaOrEnd = case_items }) catch unreachable;
- try stack.append(State { .RangeExpressionBegin = OptionalCtx { .Required = try case_items.addOne() } });
- },
- State.SwitchCaseItemCommaOrEnd => |case_items| {
- if ((try self.expectCommaOrEnd(Token.Id.EqualAngleBracketRight)) == null) {
- stack.append(State { .SwitchCaseItem = case_items }) catch unreachable;
- }
- continue;
- },
-
-
- State.SuspendBody => |suspend_node| {
- if (suspend_node.payload != null) {
- try stack.append(State { .AssignmentExpressionBegin = OptionalCtx { .RequiredNull = &suspend_node.body } });
- }
- continue;
- },
- State.AsyncAllocator => |async_node| {
- if (self.eatToken(Token.Id.AngleBracketLeft) == null) {
- continue;
- }
-
- async_node.rangle_bracket = Token(undefined);
- try stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.AngleBracketRight,
- .ptr = &??async_node.rangle_bracket,
- }
- });
- try stack.append(State { .TypeExprBegin = OptionalCtx { .RequiredNull = &async_node.allocator_type } });
- continue;
- },
- State.AsyncEnd => |ctx| {
- const node = ctx.ctx.get() ?? continue;
-
- switch (node.id) {
- ast.Node.Id.FnProto => {
- const fn_proto = @fieldParentPtr(ast.NodeFnProto, "base", node);
- fn_proto.async_attr = ctx.attribute;
- continue;
- },
- ast.Node.Id.SuffixOp => {
- const suffix_op = @fieldParentPtr(ast.NodeSuffixOp, "base", node);
- if (suffix_op.op == ast.NodeSuffixOp.SuffixOp.Call) {
- suffix_op.op.Call.async_attr = ctx.attribute;
- continue;
- }
-
- return self.parseError(node.firstToken(), "expected {}, found {}.",
- @tagName(ast.NodeSuffixOp.SuffixOp.Call),
- @tagName(suffix_op.op));
- },
- else => {
- return self.parseError(node.firstToken(), "expected {} or {}, found {}.",
- @tagName(ast.NodeSuffixOp.SuffixOp.Call),
- @tagName(ast.Node.Id.FnProto),
- @tagName(node.id));
- }
- }
- },
-
-
- State.ExternType => |ctx| {
- if (self.eatToken(Token.Id.Keyword_fn)) |fn_token| {
- const fn_proto = try self.createToCtxNode(arena, ctx.opt_ctx, ast.NodeFnProto,
- ast.NodeFnProto {
- .base = undefined,
- .visib_token = null,
- .name_token = null,
- .fn_token = fn_token,
- .params = ArrayList(&ast.Node).init(arena),
- .return_type = undefined,
- .var_args_token = null,
- .extern_export_inline_token = ctx.extern_token,
- .cc_token = null,
- .async_attr = null,
- .body_node = null,
- .lib_name = null,
- .align_expr = null,
- }
- );
- stack.append(State { .FnProto = fn_proto }) catch unreachable;
- continue;
- }
-
- stack.append(State {
- .ContainerKind = ContainerKindCtx {
- .opt_ctx = ctx.opt_ctx,
- .ltoken = ctx.extern_token,
- .layout = ast.NodeContainerDecl.Layout.Extern,
- },
- }) catch unreachable;
- continue;
- },
- State.SliceOrArrayAccess => |node| {
- var token = self.getNextToken();
- switch (token.id) {
- Token.Id.Ellipsis2 => {
- const start = node.op.ArrayAccess;
- node.op = ast.NodeSuffixOp.SuffixOp {
- .Slice = ast.NodeSuffixOp.SliceRange {
- .start = start,
- .end = null,
- }
- };
-
- stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.RBracket,
- .ptr = &node.rtoken,
- }
- }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx { .Optional = &node.op.Slice.end } });
- continue;
- },
- Token.Id.RBracket => {
- node.rtoken = token;
- continue;
- },
- else => {
- return self.parseError(token, "expected ']' or '..', found {}", @tagName(token.id));
- }
- }
- },
- State.SliceOrArrayType => |node| {
- if (self.eatToken(Token.Id.RBracket)) |_| {
- node.op = ast.NodePrefixOp.PrefixOp {
- .SliceType = ast.NodePrefixOp.AddrOfInfo {
- .align_expr = null,
- .bit_offset_start_token = null,
- .bit_offset_end_token = null,
- .const_token = null,
- .volatile_token = null,
- }
- };
- stack.append(State { .TypeExprBegin = OptionalCtx { .Required = &node.rhs } }) catch unreachable;
- try stack.append(State { .AddrOfModifiers = &node.op.SliceType });
- continue;
- }
-
- node.op = ast.NodePrefixOp.PrefixOp { .ArrayType = undefined };
- stack.append(State { .TypeExprBegin = OptionalCtx { .Required = &node.rhs } }) catch unreachable;
- try stack.append(State { .ExpectToken = Token.Id.RBracket });
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.op.ArrayType } });
- continue;
- },
- State.AddrOfModifiers => |addr_of_info| {
- var token = self.getNextToken();
- switch (token.id) {
- Token.Id.Keyword_align => {
- stack.append(state) catch unreachable;
- if (addr_of_info.align_expr != null) {
- return self.parseError(token, "multiple align qualifiers");
- }
- try stack.append(State { .ExpectToken = Token.Id.RParen });
- try stack.append(State { .Expression = OptionalCtx { .RequiredNull = &addr_of_info.align_expr} });
- try stack.append(State { .ExpectToken = Token.Id.LParen });
- continue;
- },
- Token.Id.Keyword_const => {
- stack.append(state) catch unreachable;
- if (addr_of_info.const_token != null) {
- return self.parseError(token, "duplicate qualifier: const");
- }
- addr_of_info.const_token = token;
- continue;
- },
- Token.Id.Keyword_volatile => {
- stack.append(state) catch unreachable;
- if (addr_of_info.volatile_token != null) {
- return self.parseError(token, "duplicate qualifier: volatile");
- }
- addr_of_info.volatile_token = token;
- continue;
- },
- else => {
- self.putBackToken(token);
- continue;
- },
- }
- },
-
-
- State.Payload => |opt_ctx| {
- const token = self.getNextToken();
- if (token.id != Token.Id.Pipe) {
- if (opt_ctx != OptionalCtx.Optional) {
- return self.parseError(token, "expected {}, found {}.",
- @tagName(Token.Id.Pipe),
- @tagName(token.id));
- }
-
- self.putBackToken(token);
- continue;
- }
-
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodePayload,
- ast.NodePayload {
- .base = undefined,
- .lpipe = token,
- .error_symbol = undefined,
- .rpipe = undefined
- }
- );
-
- stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.Pipe,
- .ptr = &node.rpipe,
- }
- }) catch unreachable;
- try stack.append(State { .Identifier = OptionalCtx { .Required = &node.error_symbol } });
- continue;
- },
- State.PointerPayload => |opt_ctx| {
- const token = self.getNextToken();
- if (token.id != Token.Id.Pipe) {
- if (opt_ctx != OptionalCtx.Optional) {
- return self.parseError(token, "expected {}, found {}.",
- @tagName(Token.Id.Pipe),
- @tagName(token.id));
- }
-
- self.putBackToken(token);
- continue;
- }
-
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodePointerPayload,
- ast.NodePointerPayload {
- .base = undefined,
- .lpipe = token,
- .ptr_token = null,
- .value_symbol = undefined,
- .rpipe = undefined
- }
- );
-
- stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.Pipe,
- .ptr = &node.rpipe,
- }
- }) catch unreachable;
- try stack.append(State { .Identifier = OptionalCtx { .Required = &node.value_symbol } });
- try stack.append(State {
- .OptionalTokenSave = OptionalTokenSave {
- .id = Token.Id.Asterisk,
- .ptr = &node.ptr_token,
- }
- });
- continue;
- },
- State.PointerIndexPayload => |opt_ctx| {
- const token = self.getNextToken();
- if (token.id != Token.Id.Pipe) {
- if (opt_ctx != OptionalCtx.Optional) {
- return self.parseError(token, "expected {}, found {}.",
- @tagName(Token.Id.Pipe),
- @tagName(token.id));
- }
-
- self.putBackToken(token);
- continue;
- }
-
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodePointerIndexPayload,
- ast.NodePointerIndexPayload {
- .base = undefined,
- .lpipe = token,
- .ptr_token = null,
- .value_symbol = undefined,
- .index_symbol = null,
- .rpipe = undefined
- }
- );
-
- stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.Pipe,
- .ptr = &node.rpipe,
- }
- }) catch unreachable;
- try stack.append(State { .Identifier = OptionalCtx { .RequiredNull = &node.index_symbol } });
- try stack.append(State { .IfToken = Token.Id.Comma });
- try stack.append(State { .Identifier = OptionalCtx { .Required = &node.value_symbol } });
- try stack.append(State {
- .OptionalTokenSave = OptionalTokenSave {
- .id = Token.Id.Asterisk,
- .ptr = &node.ptr_token,
- }
- });
- continue;
- },
-
-
- State.Expression => |opt_ctx| {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.Keyword_return, Token.Id.Keyword_break, Token.Id.Keyword_continue => {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeControlFlowExpression,
- ast.NodeControlFlowExpression {
- .base = undefined,
- .ltoken = token,
- .kind = undefined,
- .rhs = null,
- }
- );
-
- stack.append(State { .Expression = OptionalCtx { .Optional = &node.rhs } }) catch unreachable;
-
- switch (token.id) {
- Token.Id.Keyword_break => {
- node.kind = ast.NodeControlFlowExpression.Kind { .Break = null };
- try stack.append(State { .Identifier = OptionalCtx { .RequiredNull = &node.kind.Break } });
- try stack.append(State { .IfToken = Token.Id.Colon });
- },
- Token.Id.Keyword_continue => {
- node.kind = ast.NodeControlFlowExpression.Kind { .Continue = null };
- try stack.append(State { .Identifier = OptionalCtx { .RequiredNull = &node.kind.Continue } });
- try stack.append(State { .IfToken = Token.Id.Colon });
- },
- Token.Id.Keyword_return => {
- node.kind = ast.NodeControlFlowExpression.Kind.Return;
- },
- else => unreachable,
- }
- continue;
- },
- Token.Id.Keyword_try, Token.Id.Keyword_cancel, Token.Id.Keyword_resume => {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodePrefixOp,
- ast.NodePrefixOp {
- .base = undefined,
- .op_token = token,
- .op = switch (token.id) {
- Token.Id.Keyword_try => ast.NodePrefixOp.PrefixOp { .Try = void{} },
- Token.Id.Keyword_cancel => ast.NodePrefixOp.PrefixOp { .Cancel = void{} },
- Token.Id.Keyword_resume => ast.NodePrefixOp.PrefixOp { .Resume = void{} },
- else => unreachable,
- },
- .rhs = undefined,
- }
- );
-
- stack.append(State { .Expression = OptionalCtx { .Required = &node.rhs } }) catch unreachable;
- continue;
- },
- else => {
- if (!try self.parseBlockExpr(&stack, arena, opt_ctx, token)) {
- self.putBackToken(token);
- stack.append(State { .UnwrapExpressionBegin = opt_ctx }) catch unreachable;
- }
- continue;
- }
- }
- },
- State.RangeExpressionBegin => |opt_ctx| {
- stack.append(State { .RangeExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .Expression = opt_ctx });
- continue;
- },
- State.RangeExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- if (self.eatToken(Token.Id.Ellipsis3)) |ellipsis3| {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = ellipsis3,
- .op = ast.NodeInfixOp.InfixOp.Range,
- .rhs = undefined,
- }
- );
- stack.append(State { .Expression = OptionalCtx { .Required = &node.rhs } }) catch unreachable;
- continue;
- }
- },
- State.AssignmentExpressionBegin => |opt_ctx| {
- stack.append(State { .AssignmentExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .Expression = opt_ctx });
- continue;
- },
-
- State.AssignmentExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- const token = self.getNextToken();
- if (tokenIdToAssignment(token.id)) |ass_id| {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = token,
- .op = ass_id,
- .rhs = undefined,
- }
- );
- stack.append(State { .AssignmentExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.rhs } });
- continue;
- } else {
- self.putBackToken(token);
- continue;
- }
- },
-
- State.UnwrapExpressionBegin => |opt_ctx| {
- stack.append(State { .UnwrapExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .BoolOrExpressionBegin = opt_ctx });
- continue;
- },
-
- State.UnwrapExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- const token = self.getNextToken();
- if (tokenIdToUnwrapExpr(token.id)) |unwrap_id| {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = token,
- .op = unwrap_id,
- .rhs = undefined,
- }
- );
-
- stack.append(State { .UnwrapExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.rhs } });
-
- if (node.op == ast.NodeInfixOp.InfixOp.Catch) {
- try stack.append(State { .Payload = OptionalCtx { .Optional = &node.op.Catch } });
- }
- continue;
- } else {
- self.putBackToken(token);
- continue;
- }
- },
-
- State.BoolOrExpressionBegin => |opt_ctx| {
- stack.append(State { .BoolOrExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .BoolAndExpressionBegin = opt_ctx });
- continue;
- },
-
- State.BoolOrExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- if (self.eatToken(Token.Id.Keyword_or)) |or_token| {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = or_token,
- .op = ast.NodeInfixOp.InfixOp.BoolOr,
- .rhs = undefined,
- }
- );
- stack.append(State { .BoolOrExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .BoolAndExpressionBegin = OptionalCtx { .Required = &node.rhs } });
- continue;
- }
- },
-
- State.BoolAndExpressionBegin => |opt_ctx| {
- stack.append(State { .BoolAndExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .ComparisonExpressionBegin = opt_ctx });
- continue;
- },
-
- State.BoolAndExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- if (self.eatToken(Token.Id.Keyword_and)) |and_token| {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = and_token,
- .op = ast.NodeInfixOp.InfixOp.BoolAnd,
- .rhs = undefined,
- }
- );
- stack.append(State { .BoolAndExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .ComparisonExpressionBegin = OptionalCtx { .Required = &node.rhs } });
- continue;
- }
- },
-
- State.ComparisonExpressionBegin => |opt_ctx| {
- stack.append(State { .ComparisonExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .BinaryOrExpressionBegin = opt_ctx });
- continue;
- },
-
- State.ComparisonExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- const token = self.getNextToken();
- if (tokenIdToComparison(token.id)) |comp_id| {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = token,
- .op = comp_id,
- .rhs = undefined,
- }
- );
- stack.append(State { .ComparisonExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .BinaryOrExpressionBegin = OptionalCtx { .Required = &node.rhs } });
- continue;
- } else {
- self.putBackToken(token);
- continue;
- }
- },
-
- State.BinaryOrExpressionBegin => |opt_ctx| {
- stack.append(State { .BinaryOrExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .BinaryXorExpressionBegin = opt_ctx });
- continue;
- },
-
- State.BinaryOrExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- if (self.eatToken(Token.Id.Pipe)) |pipe| {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = pipe,
- .op = ast.NodeInfixOp.InfixOp.BitOr,
- .rhs = undefined,
- }
- );
- stack.append(State { .BinaryOrExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .BinaryXorExpressionBegin = OptionalCtx { .Required = &node.rhs } });
- continue;
- }
- },
-
- State.BinaryXorExpressionBegin => |opt_ctx| {
- stack.append(State { .BinaryXorExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .BinaryAndExpressionBegin = opt_ctx });
- continue;
- },
-
- State.BinaryXorExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- if (self.eatToken(Token.Id.Caret)) |caret| {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = caret,
- .op = ast.NodeInfixOp.InfixOp.BitXor,
- .rhs = undefined,
- }
- );
- stack.append(State { .BinaryXorExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .BinaryAndExpressionBegin = OptionalCtx { .Required = &node.rhs } });
- continue;
- }
- },
-
- State.BinaryAndExpressionBegin => |opt_ctx| {
- stack.append(State { .BinaryAndExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .BitShiftExpressionBegin = opt_ctx });
- continue;
- },
-
- State.BinaryAndExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- if (self.eatToken(Token.Id.Ampersand)) |ampersand| {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = ampersand,
- .op = ast.NodeInfixOp.InfixOp.BitAnd,
- .rhs = undefined,
- }
- );
- stack.append(State { .BinaryAndExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .BitShiftExpressionBegin = OptionalCtx { .Required = &node.rhs } });
- continue;
- }
- },
-
- State.BitShiftExpressionBegin => |opt_ctx| {
- stack.append(State { .BitShiftExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .AdditionExpressionBegin = opt_ctx });
- continue;
- },
-
- State.BitShiftExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- const token = self.getNextToken();
- if (tokenIdToBitShift(token.id)) |bitshift_id| {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = token,
- .op = bitshift_id,
- .rhs = undefined,
- }
- );
- stack.append(State { .BitShiftExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .AdditionExpressionBegin = OptionalCtx { .Required = &node.rhs } });
- continue;
- } else {
- self.putBackToken(token);
- continue;
- }
- },
-
- State.AdditionExpressionBegin => |opt_ctx| {
- stack.append(State { .AdditionExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .MultiplyExpressionBegin = opt_ctx });
- continue;
- },
-
- State.AdditionExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- const token = self.getNextToken();
- if (tokenIdToAddition(token.id)) |add_id| {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = token,
- .op = add_id,
- .rhs = undefined,
- }
- );
- stack.append(State { .AdditionExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .MultiplyExpressionBegin = OptionalCtx { .Required = &node.rhs } });
- continue;
- } else {
- self.putBackToken(token);
- continue;
- }
- },
-
- State.MultiplyExpressionBegin => |opt_ctx| {
- stack.append(State { .MultiplyExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .CurlySuffixExpressionBegin = opt_ctx });
- continue;
- },
-
- State.MultiplyExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- const token = self.getNextToken();
- if (tokenIdToMultiply(token.id)) |mult_id| {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = token,
- .op = mult_id,
- .rhs = undefined,
- }
- );
- stack.append(State { .MultiplyExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .CurlySuffixExpressionBegin = OptionalCtx { .Required = &node.rhs } });
- continue;
- } else {
- self.putBackToken(token);
- continue;
- }
- },
-
- State.CurlySuffixExpressionBegin => |opt_ctx| {
- stack.append(State { .CurlySuffixExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .IfToken = Token.Id.LBrace });
- try stack.append(State { .TypeExprBegin = opt_ctx });
- continue;
- },
-
- State.CurlySuffixExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- if (self.isPeekToken(Token.Id.Period)) {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeSuffixOp,
- ast.NodeSuffixOp {
- .base = undefined,
- .lhs = lhs,
- .op = ast.NodeSuffixOp.SuffixOp {
- .StructInitializer = ArrayList(&ast.NodeFieldInitializer).init(arena),
- },
- .rtoken = undefined,
- }
- );
- stack.append(State { .CurlySuffixExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .IfToken = Token.Id.LBrace });
- try stack.append(State {
- .FieldInitListItemOrEnd = ListSave(&ast.NodeFieldInitializer) {
- .list = &node.op.StructInitializer,
- .ptr = &node.rtoken,
- }
- });
- continue;
- }
-
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeSuffixOp,
- ast.NodeSuffixOp {
- .base = undefined,
- .lhs = lhs,
- .op = ast.NodeSuffixOp.SuffixOp {
- .ArrayInitializer = ArrayList(&ast.Node).init(arena),
- },
- .rtoken = undefined,
- }
- );
- stack.append(State { .CurlySuffixExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .IfToken = Token.Id.LBrace });
- try stack.append(State {
- .ExprListItemOrEnd = ExprListCtx {
- .list = &node.op.ArrayInitializer,
- .end = Token.Id.RBrace,
- .ptr = &node.rtoken,
- }
- });
- continue;
- },
-
- State.TypeExprBegin => |opt_ctx| {
- stack.append(State { .TypeExprEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .PrefixOpExpression = opt_ctx });
- continue;
- },
-
- State.TypeExprEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- if (self.eatToken(Token.Id.Bang)) |bang| {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = bang,
- .op = ast.NodeInfixOp.InfixOp.ErrorUnion,
- .rhs = undefined,
- }
- );
- stack.append(State { .TypeExprEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .PrefixOpExpression = OptionalCtx { .Required = &node.rhs } });
- continue;
- }
- },
-
- State.PrefixOpExpression => |opt_ctx| {
- const token = self.getNextToken();
- if (tokenIdToPrefixOp(token.id)) |prefix_id| {
- var node = try self.createToCtxNode(arena, opt_ctx, ast.NodePrefixOp,
- ast.NodePrefixOp {
- .base = undefined,
- .op_token = token,
- .op = prefix_id,
- .rhs = undefined,
- }
- );
-
- // Treat '**' token as two derefs
- if (token.id == Token.Id.AsteriskAsterisk) {
- const child = try self.createNode(arena, ast.NodePrefixOp,
- ast.NodePrefixOp {
- .base = undefined,
- .op_token = token,
- .op = prefix_id,
- .rhs = undefined,
- }
- );
- node.rhs = &child.base;
- node = child;
- }
-
- stack.append(State { .TypeExprBegin = OptionalCtx { .Required = &node.rhs } }) catch unreachable;
- if (node.op == ast.NodePrefixOp.PrefixOp.AddrOf) {
- try stack.append(State { .AddrOfModifiers = &node.op.AddrOf });
- }
- continue;
- } else {
- self.putBackToken(token);
- stack.append(State { .SuffixOpExpressionBegin = opt_ctx }) catch unreachable;
- continue;
- }
- },
-
- State.SuffixOpExpressionBegin => |opt_ctx| {
- if (self.eatToken(Token.Id.Keyword_async)) |async_token| {
- const async_node = try self.createNode(arena, ast.NodeAsyncAttribute,
- ast.NodeAsyncAttribute {
- .base = undefined,
- .async_token = async_token,
- .allocator_type = null,
- .rangle_bracket = null,
- }
- );
- stack.append(State {
- .AsyncEnd = AsyncEndCtx {
- .ctx = opt_ctx,
- .attribute = async_node,
- }
- }) catch unreachable;
- try stack.append(State { .SuffixOpExpressionEnd = opt_ctx.toRequired() });
- try stack.append(State { .PrimaryExpression = opt_ctx.toRequired() });
- try stack.append(State { .AsyncAllocator = async_node });
- continue;
- }
-
- stack.append(State { .SuffixOpExpressionEnd = opt_ctx }) catch unreachable;
- try stack.append(State { .PrimaryExpression = opt_ctx });
- continue;
- },
-
- State.SuffixOpExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
-
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.LParen => {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeSuffixOp,
- ast.NodeSuffixOp {
- .base = undefined,
- .lhs = lhs,
- .op = ast.NodeSuffixOp.SuffixOp {
- .Call = ast.NodeSuffixOp.CallInfo {
- .params = ArrayList(&ast.Node).init(arena),
- .async_attr = null,
- }
- },
- .rtoken = undefined,
- }
- );
- stack.append(State { .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State {
- .ExprListItemOrEnd = ExprListCtx {
- .list = &node.op.Call.params,
- .end = Token.Id.RParen,
- .ptr = &node.rtoken,
- }
- });
- continue;
- },
- Token.Id.LBracket => {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeSuffixOp,
- ast.NodeSuffixOp {
- .base = undefined,
- .lhs = lhs,
- .op = ast.NodeSuffixOp.SuffixOp {
- .ArrayAccess = undefined,
- },
- .rtoken = undefined
- }
- );
- stack.append(State { .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .SliceOrArrayAccess = node });
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.op.ArrayAccess }});
- continue;
- },
- Token.Id.Period => {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeInfixOp,
- ast.NodeInfixOp {
- .base = undefined,
- .lhs = lhs,
- .op_token = token,
- .op = ast.NodeInfixOp.InfixOp.Period,
- .rhs = undefined,
- }
- );
- stack.append(State { .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
- try stack.append(State { .Identifier = OptionalCtx { .Required = &node.rhs } });
- continue;
- },
- else => {
- self.putBackToken(token);
- continue;
- },
- }
- },
-
- State.PrimaryExpression => |opt_ctx| {
- const token = self.getNextToken();
- switch (token.id) {
- Token.Id.IntegerLiteral => {
- _ = try self.createToCtxLiteral(arena, opt_ctx, ast.NodeStringLiteral, token);
- continue;
- },
- Token.Id.FloatLiteral => {
- _ = try self.createToCtxLiteral(arena, opt_ctx, ast.NodeFloatLiteral, token);
- continue;
- },
- Token.Id.CharLiteral => {
- _ = try self.createToCtxLiteral(arena, opt_ctx, ast.NodeCharLiteral, token);
- continue;
- },
- Token.Id.Keyword_undefined => {
- _ = try self.createToCtxLiteral(arena, opt_ctx, ast.NodeUndefinedLiteral, token);
- continue;
- },
- Token.Id.Keyword_true, Token.Id.Keyword_false => {
- _ = try self.createToCtxLiteral(arena, opt_ctx, ast.NodeBoolLiteral, token);
- continue;
- },
- Token.Id.Keyword_null => {
- _ = try self.createToCtxLiteral(arena, opt_ctx, ast.NodeNullLiteral, token);
- continue;
- },
- Token.Id.Keyword_this => {
- _ = try self.createToCtxLiteral(arena, opt_ctx, ast.NodeThisLiteral, token);
- continue;
- },
- Token.Id.Keyword_var => {
- _ = try self.createToCtxLiteral(arena, opt_ctx, ast.NodeVarType, token);
- continue;
- },
- Token.Id.Keyword_unreachable => {
- _ = try self.createToCtxLiteral(arena, opt_ctx, ast.NodeUnreachable, token);
- continue;
- },
- Token.Id.StringLiteral, Token.Id.MultilineStringLiteralLine => {
- opt_ctx.store((try self.parseStringLiteral(arena, token)) ?? unreachable);
- continue;
- },
- Token.Id.LParen => {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeGroupedExpression,
- ast.NodeGroupedExpression {
- .base = undefined,
- .lparen = token,
- .expr = undefined,
- .rparen = undefined,
- }
- );
- stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.RParen,
- .ptr = &node.rparen,
- }
- }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.expr } });
- continue;
- },
- Token.Id.Builtin => {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeBuiltinCall,
- ast.NodeBuiltinCall {
- .base = undefined,
- .builtin_token = token,
- .params = ArrayList(&ast.Node).init(arena),
- .rparen_token = undefined,
- }
- );
- stack.append(State {
- .ExprListItemOrEnd = ExprListCtx {
- .list = &node.params,
- .end = Token.Id.RParen,
- .ptr = &node.rparen_token,
- }
- }) catch unreachable;
- try stack.append(State { .ExpectToken = Token.Id.LParen, });
- continue;
- },
- Token.Id.LBracket => {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodePrefixOp,
- ast.NodePrefixOp {
- .base = undefined,
- .op_token = token,
- .op = undefined,
- .rhs = undefined,
- }
- );
- stack.append(State { .SliceOrArrayType = node }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_error => {
- stack.append(State {
- .ErrorTypeOrSetDecl = ErrorTypeOrSetDeclCtx {
- .error_token = token,
- .opt_ctx = opt_ctx
- }
- }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_packed => {
- stack.append(State {
- .ContainerKind = ContainerKindCtx {
- .opt_ctx = opt_ctx,
- .ltoken = token,
- .layout = ast.NodeContainerDecl.Layout.Packed,
- },
- }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_extern => {
- stack.append(State {
- .ExternType = ExternTypeCtx {
- .opt_ctx = opt_ctx,
- .extern_token = token,
- },
- }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_struct, Token.Id.Keyword_union, Token.Id.Keyword_enum => {
- self.putBackToken(token);
- stack.append(State {
- .ContainerKind = ContainerKindCtx {
- .opt_ctx = opt_ctx,
- .ltoken = token,
- .layout = ast.NodeContainerDecl.Layout.Auto,
- },
- }) catch unreachable;
- continue;
- },
- Token.Id.Identifier => {
- stack.append(State {
- .MaybeLabeledExpression = MaybeLabeledExpressionCtx {
- .label = token,
- .opt_ctx = opt_ctx
- }
- }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_fn => {
- const fn_proto = try self.createToCtxNode(arena, opt_ctx, ast.NodeFnProto,
- ast.NodeFnProto {
- .base = undefined,
- .visib_token = null,
- .name_token = null,
- .fn_token = token,
- .params = ArrayList(&ast.Node).init(arena),
- .return_type = undefined,
- .var_args_token = null,
- .extern_export_inline_token = null,
- .cc_token = null,
- .async_attr = null,
- .body_node = null,
- .lib_name = null,
- .align_expr = null,
- }
- );
- stack.append(State { .FnProto = fn_proto }) catch unreachable;
- continue;
- },
- Token.Id.Keyword_nakedcc, Token.Id.Keyword_stdcallcc => {
- const fn_proto = try self.createToCtxNode(arena, opt_ctx, ast.NodeFnProto,
- ast.NodeFnProto {
- .base = undefined,
- .visib_token = null,
- .name_token = null,
- .fn_token = undefined,
- .params = ArrayList(&ast.Node).init(arena),
- .return_type = undefined,
- .var_args_token = null,
- .extern_export_inline_token = null,
- .cc_token = token,
- .async_attr = null,
- .body_node = null,
- .lib_name = null,
- .align_expr = null,
- }
- );
- stack.append(State { .FnProto = fn_proto }) catch unreachable;
- try stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.Keyword_fn,
- .ptr = &fn_proto.fn_token
- }
- });
- continue;
- },
- Token.Id.Keyword_asm => {
- const node = try self.createToCtxNode(arena, opt_ctx, ast.NodeAsm,
- ast.NodeAsm {
- .base = undefined,
- .asm_token = token,
- .volatile_token = null,
- .template = undefined,
- //.tokens = ArrayList(ast.NodeAsm.AsmToken).init(arena),
- .outputs = ArrayList(&ast.NodeAsmOutput).init(arena),
- .inputs = ArrayList(&ast.NodeAsmInput).init(arena),
- .cloppers = ArrayList(&ast.Node).init(arena),
- .rparen = undefined,
- }
- );
- stack.append(State {
- .ExpectTokenSave = ExpectTokenSave {
- .id = Token.Id.RParen,
- .ptr = &node.rparen,
- }
- }) catch unreachable;
- try stack.append(State { .AsmClopperItems = &node.cloppers });
- try stack.append(State { .IfToken = Token.Id.Colon });
- try stack.append(State { .AsmInputItems = &node.inputs });
- try stack.append(State { .IfToken = Token.Id.Colon });
- try stack.append(State { .AsmOutputItems = &node.outputs });
- try stack.append(State { .IfToken = Token.Id.Colon });
- try stack.append(State { .StringLiteral = OptionalCtx { .Required = &node.template } });
- try stack.append(State { .ExpectToken = Token.Id.LParen });
- try stack.append(State {
- .OptionalTokenSave = OptionalTokenSave {
- .id = Token.Id.Keyword_volatile,
- .ptr = &node.volatile_token,
- }
- });
- },
- Token.Id.Keyword_inline => {
- stack.append(State {
- .Inline = InlineCtx {
- .label = null,
- .inline_token = token,
- .opt_ctx = opt_ctx,
- }
- }) catch unreachable;
- continue;
- },
- else => {
- if (!try self.parseBlockExpr(&stack, arena, opt_ctx, token)) {
- self.putBackToken(token);
- if (opt_ctx != OptionalCtx.Optional) {
- return self.parseError(token, "expected primary expression, found {}", @tagName(token.id));
- }
- }
- continue;
- }
- }
- },
-
-
- State.ErrorTypeOrSetDecl => |ctx| {
- if (self.eatToken(Token.Id.LBrace) == null) {
- _ = try self.createToCtxLiteral(arena, ctx.opt_ctx, ast.NodeErrorType, ctx.error_token);
- continue;
- }
-
- const node = try self.createToCtxNode(arena, ctx.opt_ctx, ast.NodeErrorSetDecl,
- ast.NodeErrorSetDecl {
- .base = undefined,
- .error_token = ctx.error_token,
- .decls = ArrayList(&ast.Node).init(arena),
- .rbrace_token = undefined,
- }
- );
-
- stack.append(State {
- .IdentifierListItemOrEnd = ListSave(&ast.Node) {
- .list = &node.decls,
- .ptr = &node.rbrace_token,
- }
- }) catch unreachable;
- continue;
- },
- State.StringLiteral => |opt_ctx| {
- const token = self.getNextToken();
- opt_ctx.store(
- (try self.parseStringLiteral(arena, token)) ?? {
- self.putBackToken(token);
- if (opt_ctx != OptionalCtx.Optional) {
- return self.parseError(token, "expected primary expression, found {}", @tagName(token.id));
- }
-
- continue;
- }
- );
- },
- State.Identifier => |opt_ctx| {
- if (self.eatToken(Token.Id.Identifier)) |ident_token| {
- _ = try self.createToCtxLiteral(arena, opt_ctx, ast.NodeIdentifier, ident_token);
- continue;
- }
-
- if (opt_ctx != OptionalCtx.Optional) {
- const token = self.getNextToken();
- return self.parseError(token, "expected identifier, found {}", @tagName(token.id));
- }
- },
-
-
- State.ExpectToken => |token_id| {
- _ = try self.expectToken(token_id);
- continue;
- },
- State.ExpectTokenSave => |expect_token_save| {
- *expect_token_save.ptr = try self.expectToken(expect_token_save.id);
- continue;
- },
- State.IfToken => |token_id| {
- if (self.eatToken(token_id)) |_| {
- continue;
- }
-
- _ = stack.pop();
- continue;
- },
- State.IfTokenSave => |if_token_save| {
- if (self.eatToken(if_token_save.id)) |token| {
- *if_token_save.ptr = token;
- continue;
- }
-
- _ = stack.pop();
- continue;
- },
- State.OptionalTokenSave => |optional_token_save| {
- if (self.eatToken(optional_token_save.id)) |token| {
- *optional_token_save.ptr = token;
- continue;
- }
-
- continue;
- },
- }
- }
- }
-
- fn requireSemiColon(node: &const ast.Node) bool {
- var n = node;
- while (true) {
- switch (n.id) {
- ast.Node.Id.Root,
- ast.Node.Id.StructField,
- ast.Node.Id.UnionTag,
- ast.Node.Id.EnumTag,
- ast.Node.Id.ParamDecl,
- ast.Node.Id.Block,
- ast.Node.Id.Payload,
- ast.Node.Id.PointerPayload,
- ast.Node.Id.PointerIndexPayload,
- ast.Node.Id.Switch,
- ast.Node.Id.SwitchCase,
- ast.Node.Id.SwitchElse,
- ast.Node.Id.FieldInitializer,
- ast.Node.Id.LineComment,
- ast.Node.Id.TestDecl => return false,
- ast.Node.Id.While => {
- const while_node = @fieldParentPtr(ast.NodeWhile, "base", n);
- if (while_node.@"else") |@"else"| {
- n = @"else".base;
- continue;
- }
-
- return while_node.body.id != ast.Node.Id.Block;
- },
- ast.Node.Id.For => {
- const for_node = @fieldParentPtr(ast.NodeFor, "base", n);
- if (for_node.@"else") |@"else"| {
- n = @"else".base;
- continue;
- }
-
- return for_node.body.id != ast.Node.Id.Block;
- },
- ast.Node.Id.If => {
- const if_node = @fieldParentPtr(ast.NodeIf, "base", n);
- if (if_node.@"else") |@"else"| {
- n = @"else".base;
- continue;
- }
-
- return if_node.body.id != ast.Node.Id.Block;
- },
- ast.Node.Id.Else => {
- const else_node = @fieldParentPtr(ast.NodeElse, "base", n);
- n = else_node.body;
- continue;
- },
- ast.Node.Id.Defer => {
- const defer_node = @fieldParentPtr(ast.NodeDefer, "base", n);
- return defer_node.expr.id != ast.Node.Id.Block;
- },
- ast.Node.Id.Comptime => {
- const comptime_node = @fieldParentPtr(ast.NodeComptime, "base", n);
- return comptime_node.expr.id != ast.Node.Id.Block;
- },
- ast.Node.Id.Suspend => {
- const suspend_node = @fieldParentPtr(ast.NodeSuspend, "base", n);
- if (suspend_node.body) |body| {
- return body.id != ast.Node.Id.Block;
- }
-
- return true;
- },
- else => return true,
- }
- }
- }
-
- fn parseStringLiteral(self: &Parser, arena: &mem.Allocator, token: &const Token) !?&ast.Node {
- switch (token.id) {
- Token.Id.StringLiteral => {
- return &(try self.createLiteral(arena, ast.NodeStringLiteral, token)).base;
- },
- Token.Id.MultilineStringLiteralLine => {
- const node = try self.createNode(arena, ast.NodeMultilineStringLiteral,
- ast.NodeMultilineStringLiteral {
- .base = undefined,
- .tokens = ArrayList(Token).init(arena),
- }
- );
- try node.tokens.append(token);
- while (true) {
- const multiline_str = self.getNextToken();
- if (multiline_str.id != Token.Id.MultilineStringLiteralLine) {
- self.putBackToken(multiline_str);
- break;
- }
-
- try node.tokens.append(multiline_str);
- }
-
- return &node.base;
- },
- // TODO: We shouldn't need a cast, but:
- // zig: /home/jc/Documents/zig/src/ir.cpp:7962: TypeTableEntry* ir_resolve_peer_types(IrAnalyze*, AstNode*, IrInstruction**, size_t): Assertion `err_set_type != nullptr' failed.
- else => return (?&ast.Node)(null),
- }
- }
-
- fn parseBlockExpr(self: &Parser, stack: &ArrayList(State), arena: &mem.Allocator, ctx: &const OptionalCtx, token: &const Token) !bool {
- switch (token.id) {
- Token.Id.Keyword_suspend => {
- const node = try self.createToCtxNode(arena, ctx, ast.NodeSuspend,
- ast.NodeSuspend {
- .base = undefined,
- .suspend_token = *token,
- .payload = null,
- .body = null,
- }
- );
-
- stack.append(State { .SuspendBody = node }) catch unreachable;
- try stack.append(State { .Payload = OptionalCtx { .Optional = &node.payload } });
- return true;
- },
- Token.Id.Keyword_if => {
- const node = try self.createToCtxNode(arena, ctx, ast.NodeIf,
- ast.NodeIf {
- .base = undefined,
- .if_token = *token,
- .condition = undefined,
- .payload = null,
- .body = undefined,
- .@"else" = null,
- }
- );
-
- stack.append(State { .Else = &node.@"else" }) catch unreachable;
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.body } });
- try stack.append(State { .PointerPayload = OptionalCtx { .Optional = &node.payload } });
- try stack.append(State { .ExpectToken = Token.Id.RParen });
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.condition } });
- try stack.append(State { .ExpectToken = Token.Id.LParen });
- return true;
- },
- Token.Id.Keyword_while => {
- stack.append(State {
- .While = LoopCtx {
- .label = null,
- .inline_token = null,
- .loop_token = *token,
- .opt_ctx = *ctx,
- }
- }) catch unreachable;
- return true;
- },
- Token.Id.Keyword_for => {
- stack.append(State {
- .For = LoopCtx {
- .label = null,
- .inline_token = null,
- .loop_token = *token,
- .opt_ctx = *ctx,
- }
- }) catch unreachable;
- return true;
- },
- Token.Id.Keyword_switch => {
- const node = try self.createToCtxNode(arena, ctx, ast.NodeSwitch,
- ast.NodeSwitch {
- .base = undefined,
- .switch_token = *token,
- .expr = undefined,
- .cases = ArrayList(&ast.NodeSwitchCase).init(arena),
- .rbrace = undefined,
- }
- );
-
- stack.append(State {
- .SwitchCaseOrEnd = ListSave(&ast.NodeSwitchCase) {
- .list = &node.cases,
- .ptr = &node.rbrace,
- },
- }) catch unreachable;
- try stack.append(State { .ExpectToken = Token.Id.LBrace });
- try stack.append(State { .ExpectToken = Token.Id.RParen });
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.expr } });
- try stack.append(State { .ExpectToken = Token.Id.LParen });
- return true;
- },
- Token.Id.Keyword_comptime => {
- const node = try self.createToCtxNode(arena, ctx, ast.NodeComptime,
- ast.NodeComptime {
- .base = undefined,
- .comptime_token = *token,
- .expr = undefined,
- }
- );
- try stack.append(State { .Expression = OptionalCtx { .Required = &node.expr } });
- return true;
- },
- Token.Id.LBrace => {
- const block = try self.createToCtxNode(arena, ctx, ast.NodeBlock,
- ast.NodeBlock {
- .base = undefined,
- .label = null,
- .lbrace = *token,
- .statements = ArrayList(&ast.Node).init(arena),
- .rbrace = undefined,
- }
- );
- stack.append(State { .Block = block }) catch unreachable;
- return true;
- },
- else => {
- return false;
- }
- }
- }
-
- fn expectCommaOrEnd(self: &Parser, end: @TagType(Token.Id)) !?Token {
- var token = self.getNextToken();
- switch (token.id) {
- Token.Id.Comma => return null,
- else => {
- if (end == token.id) {
- return token;
- }
-
- return self.parseError(token, "expected ',' or {}, found {}", @tagName(end), @tagName(token.id));
- },
- }
- }
-
- fn tokenIdToAssignment(id: &const Token.Id) ?ast.NodeInfixOp.InfixOp {
- // TODO: We have to cast all cases because of this:
- // error: expected type '?InfixOp', found '?@TagType(InfixOp)'
- return switch (*id) {
- Token.Id.AmpersandEqual => ast.NodeInfixOp.InfixOp { .AssignBitAnd = void{} },
- Token.Id.AngleBracketAngleBracketLeftEqual => ast.NodeInfixOp.InfixOp { .AssignBitShiftLeft = void{} },
- Token.Id.AngleBracketAngleBracketRightEqual => ast.NodeInfixOp.InfixOp { .AssignBitShiftRight = void{} },
- Token.Id.AsteriskEqual => ast.NodeInfixOp.InfixOp { .AssignTimes = void{} },
- Token.Id.AsteriskPercentEqual => ast.NodeInfixOp.InfixOp { .AssignTimesWarp = void{} },
- Token.Id.CaretEqual => ast.NodeInfixOp.InfixOp { .AssignBitXor = void{} },
- Token.Id.Equal => ast.NodeInfixOp.InfixOp { .Assign = void{} },
- Token.Id.MinusEqual => ast.NodeInfixOp.InfixOp { .AssignMinus = void{} },
- Token.Id.MinusPercentEqual => ast.NodeInfixOp.InfixOp { .AssignMinusWrap = void{} },
- Token.Id.PercentEqual => ast.NodeInfixOp.InfixOp { .AssignMod = void{} },
- Token.Id.PipeEqual => ast.NodeInfixOp.InfixOp { .AssignBitOr = void{} },
- Token.Id.PlusEqual => ast.NodeInfixOp.InfixOp { .AssignPlus = void{} },
- Token.Id.PlusPercentEqual => ast.NodeInfixOp.InfixOp { .AssignPlusWrap = void{} },
- Token.Id.SlashEqual => ast.NodeInfixOp.InfixOp { .AssignDiv = void{} },
- else => null,
- };
- }
-
- fn tokenIdToUnwrapExpr(id: @TagType(Token.Id)) ?ast.NodeInfixOp.InfixOp {
- return switch (id) {
- Token.Id.Keyword_catch => ast.NodeInfixOp.InfixOp { .Catch = null },
- Token.Id.QuestionMarkQuestionMark => ast.NodeInfixOp.InfixOp { .UnwrapMaybe = void{} },
- else => null,
- };
- }
-
- fn tokenIdToComparison(id: @TagType(Token.Id)) ?ast.NodeInfixOp.InfixOp {
- return switch (id) {
- Token.Id.BangEqual => ast.NodeInfixOp.InfixOp { .BangEqual = void{} },
- Token.Id.EqualEqual => ast.NodeInfixOp.InfixOp { .EqualEqual = void{} },
- Token.Id.AngleBracketLeft => ast.NodeInfixOp.InfixOp { .LessThan = void{} },
- Token.Id.AngleBracketLeftEqual => ast.NodeInfixOp.InfixOp { .LessOrEqual = void{} },
- Token.Id.AngleBracketRight => ast.NodeInfixOp.InfixOp { .GreaterThan = void{} },
- Token.Id.AngleBracketRightEqual => ast.NodeInfixOp.InfixOp { .GreaterOrEqual = void{} },
- else => null,
- };
- }
-
- fn tokenIdToBitShift(id: @TagType(Token.Id)) ?ast.NodeInfixOp.InfixOp {
- return switch (id) {
- Token.Id.AngleBracketAngleBracketLeft => ast.NodeInfixOp.InfixOp { .BitShiftLeft = void{} },
- Token.Id.AngleBracketAngleBracketRight => ast.NodeInfixOp.InfixOp { .BitShiftRight = void{} },
- else => null,
- };
- }
-
- fn tokenIdToAddition(id: @TagType(Token.Id)) ?ast.NodeInfixOp.InfixOp {
- return switch (id) {
- Token.Id.Minus => ast.NodeInfixOp.InfixOp { .Sub = void{} },
- Token.Id.MinusPercent => ast.NodeInfixOp.InfixOp { .SubWrap = void{} },
- Token.Id.Plus => ast.NodeInfixOp.InfixOp { .Add = void{} },
- Token.Id.PlusPercent => ast.NodeInfixOp.InfixOp { .AddWrap = void{} },
- Token.Id.PlusPlus => ast.NodeInfixOp.InfixOp { .ArrayCat = void{} },
- else => null,
- };
- }
-
- fn tokenIdToMultiply(id: @TagType(Token.Id)) ?ast.NodeInfixOp.InfixOp {
- return switch (id) {
- Token.Id.Slash => ast.NodeInfixOp.InfixOp { .Div = void{} },
- Token.Id.Asterisk => ast.NodeInfixOp.InfixOp { .Mult = void{} },
- Token.Id.AsteriskAsterisk => ast.NodeInfixOp.InfixOp { .ArrayMult = void{} },
- Token.Id.AsteriskPercent => ast.NodeInfixOp.InfixOp { .MultWrap = void{} },
- Token.Id.Percent => ast.NodeInfixOp.InfixOp { .Mod = void{} },
- Token.Id.PipePipe => ast.NodeInfixOp.InfixOp { .MergeErrorSets = void{} },
- else => null,
- };
- }
-
- fn tokenIdToPrefixOp(id: @TagType(Token.Id)) ?ast.NodePrefixOp.PrefixOp {
- return switch (id) {
- Token.Id.Bang => ast.NodePrefixOp.PrefixOp { .BoolNot = void{} },
- Token.Id.Tilde => ast.NodePrefixOp.PrefixOp { .BitNot = void{} },
- Token.Id.Minus => ast.NodePrefixOp.PrefixOp { .Negation = void{} },
- Token.Id.MinusPercent => ast.NodePrefixOp.PrefixOp { .NegationWrap = void{} },
- Token.Id.Asterisk, Token.Id.AsteriskAsterisk => ast.NodePrefixOp.PrefixOp { .Deref = void{} },
- Token.Id.Ampersand => ast.NodePrefixOp.PrefixOp {
- .AddrOf = ast.NodePrefixOp.AddrOfInfo {
- .align_expr = null,
- .bit_offset_start_token = null,
- .bit_offset_end_token = null,
- .const_token = null,
- .volatile_token = null,
- },
- },
- Token.Id.QuestionMark => ast.NodePrefixOp.PrefixOp { .MaybeType = void{} },
- Token.Id.QuestionMarkQuestionMark => ast.NodePrefixOp.PrefixOp { .UnwrapMaybe = void{} },
- Token.Id.Keyword_await => ast.NodePrefixOp.PrefixOp { .Await = void{} },
- Token.Id.Keyword_try => ast.NodePrefixOp.PrefixOp { .Try = void{ } },
- else => null,
- };
- }
-
- fn createNode(self: &Parser, arena: &mem.Allocator, comptime T: type, init_to: &const T) !&T {
- const node = try arena.create(T);
- *node = *init_to;
- node.base = blk: {
- const id = ast.Node.typeToId(T);
- if (self.pending_line_comment_node) |comment_node| {
- self.pending_line_comment_node = null;
- break :blk ast.Node {.id = id, .comment = comment_node};
- }
- break :blk ast.Node {.id = id, .comment = null };
- };
-
- return node;
- }
-
- fn createAttachNode(self: &Parser, arena: &mem.Allocator, list: &ArrayList(&ast.Node), comptime T: type, init_to: &const T) !&T {
- const node = try self.createNode(arena, T, init_to);
- try list.append(&node.base);
-
- return node;
- }
-
- fn createToCtxNode(self: &Parser, arena: &mem.Allocator, opt_ctx: &const OptionalCtx, comptime T: type, init_to: &const T) !&T {
- const node = try self.createNode(arena, T, init_to);
- opt_ctx.store(&node.base);
-
- return node;
- }
-
- fn createLiteral(self: &Parser, arena: &mem.Allocator, comptime T: type, token: &const Token) !&T {
- return self.createNode(arena, T,
- T {
- .base = undefined,
- .token = *token,
- }
- );
- }
-
- fn createToCtxLiteral(self: &Parser, arena: &mem.Allocator, opt_ctx: &const OptionalCtx, comptime T: type, token: &const Token) !&T {
- const node = try self.createLiteral(arena, T, token);
- opt_ctx.store(&node.base);
-
- return node;
- }
-
- fn parseError(self: &Parser, token: &const Token, comptime fmt: []const u8, args: ...) (error{ParseError}) {
- const loc = self.tokenizer.getTokenLocation(0, token);
- warn("{}:{}:{}: error: " ++ fmt ++ "\n", self.source_file_name, loc.line + 1, loc.column + 1, args);
- warn("{}\n", self.tokenizer.buffer[loc.line_start..loc.line_end]);
- {
- var i: usize = 0;
- while (i < loc.column) : (i += 1) {
- warn(" ");
- }
- }
- {
- const caret_count = token.end - token.start;
- var i: usize = 0;
- while (i < caret_count) : (i += 1) {
- warn("~");
- }
- }
- warn("\n");
- return error.ParseError;
- }
-
- fn expectToken(self: &Parser, id: @TagType(Token.Id)) !Token {
- const token = self.getNextToken();
- if (token.id != id) {
- return self.parseError(token, "expected {}, found {}", @tagName(id), @tagName(token.id));
- }
- return token;
- }
-
- fn eatToken(self: &Parser, id: @TagType(Token.Id)) ?Token {
- if (self.isPeekToken(id)) {
- return self.getNextToken();
- }
- return null;
- }
-
- fn putBackToken(self: &Parser, token: &const Token) void {
- self.put_back_tokens[self.put_back_count] = *token;
- self.put_back_count += 1;
- }
-
- fn getNextToken(self: &Parser) Token {
- if (self.put_back_count != 0) {
- const put_back_index = self.put_back_count - 1;
- const put_back_token = self.put_back_tokens[put_back_index];
- self.put_back_count = put_back_index;
- return put_back_token;
- } else {
- return self.tokenizer.next();
- }
- }
-
- fn isPeekToken(self: &Parser, id: @TagType(Token.Id)) bool {
- const token = self.getNextToken();
- defer self.putBackToken(token);
- return id == token.id;
- }
-
- const RenderAstFrame = struct {
- node: &ast.Node,
- indent: usize,
- };
-
- pub fn renderAst(self: &Parser, stream: var, root_node: &ast.NodeRoot) !void {
- var stack = self.initUtilityArrayList(RenderAstFrame);
- defer self.deinitUtilityArrayList(stack);
-
- try stack.append(RenderAstFrame {
- .node = &root_node.base,
- .indent = 0,
- });
-
- while (stack.popOrNull()) |frame| {
- {
- var i: usize = 0;
- while (i < frame.indent) : (i += 1) {
- try stream.print(" ");
- }
- }
- try stream.print("{}\n", @tagName(frame.node.id));
- var child_i: usize = 0;
- while (frame.node.iterate(child_i)) |child| : (child_i += 1) {
- try stack.append(RenderAstFrame {
- .node = child,
- .indent = frame.indent + 2,
- });
- }
- }
- }
-
- const RenderState = union(enum) {
- TopLevelDecl: &ast.Node,
- ParamDecl: &ast.Node,
- Text: []const u8,
- Expression: &ast.Node,
- VarDecl: &ast.NodeVarDecl,
- Statement: &ast.Node,
- FieldInitializer: &ast.NodeFieldInitializer,
- PrintIndent,
- Indent: usize,
- };
-
- pub fn renderSource(self: &Parser, stream: var, root_node: &ast.NodeRoot) !void {
- var stack = self.initUtilityArrayList(RenderState);
- defer self.deinitUtilityArrayList(stack);
-
- {
- try stack.append(RenderState { .Text = "\n"});
-
- var i = root_node.decls.len;
- while (i != 0) {
- i -= 1;
- const decl = root_node.decls.items[i];
- try stack.append(RenderState {.TopLevelDecl = decl});
- if (i != 0) {
- try stack.append(RenderState {
- .Text = blk: {
- const prev_node = root_node.decls.at(i - 1);
- const loc = self.tokenizer.getTokenLocation(prev_node.lastToken().end, decl.firstToken());
- if (loc.line >= 2) {
- break :blk "\n\n";
- }
- break :blk "\n";
- },
- });
- }
- }
- }
-
- const indent_delta = 4;
- var indent: usize = 0;
- while (stack.popOrNull()) |state| {
- switch (state) {
- RenderState.TopLevelDecl => |decl| {
- switch (decl.id) {
- ast.Node.Id.FnProto => {
- const fn_proto = @fieldParentPtr(ast.NodeFnProto, "base", decl);
-
- if (fn_proto.body_node) |body_node| {
- stack.append(RenderState { .Expression = body_node}) catch unreachable;
- try stack.append(RenderState { .Text = " "});
- } else {
- stack.append(RenderState { .Text = ";" }) catch unreachable;
- }
-
- try stack.append(RenderState { .Expression = decl });
- },
- ast.Node.Id.Use => {
- const use_decl = @fieldParentPtr(ast.NodeUse, "base", decl);
- if (use_decl.visib_token) |visib_token| {
- try stream.print("{} ", self.tokenizer.getTokenSlice(visib_token));
- }
- try stream.print("use ");
- try stack.append(RenderState { .Text = ";" });
- try stack.append(RenderState { .Expression = use_decl.expr });
- },
- ast.Node.Id.VarDecl => {
- const var_decl = @fieldParentPtr(ast.NodeVarDecl, "base", decl);
- try stack.append(RenderState { .VarDecl = var_decl});
- },
- ast.Node.Id.TestDecl => {
- const test_decl = @fieldParentPtr(ast.NodeTestDecl, "base", decl);
- try stream.print("test ");
- try stack.append(RenderState { .Expression = test_decl.body_node });
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Expression = test_decl.name });
- },
- ast.Node.Id.StructField => {
- const field = @fieldParentPtr(ast.NodeStructField, "base", decl);
- if (field.visib_token) |visib_token| {
- try stream.print("{} ", self.tokenizer.getTokenSlice(visib_token));
- }
- try stream.print("{}: ", self.tokenizer.getTokenSlice(field.name_token));
- try stack.append(RenderState { .Expression = field.type_expr});
- },
- ast.Node.Id.UnionTag => {
- const tag = @fieldParentPtr(ast.NodeUnionTag, "base", decl);
- try stream.print("{}", self.tokenizer.getTokenSlice(tag.name_token));
-
- if (tag.type_expr) |type_expr| {
- try stream.print(": ");
- try stack.append(RenderState { .Expression = type_expr});
- }
- },
- ast.Node.Id.EnumTag => {
- const tag = @fieldParentPtr(ast.NodeEnumTag, "base", decl);
- try stream.print("{}", self.tokenizer.getTokenSlice(tag.name_token));
-
- if (tag.value) |value| {
- try stream.print(" = ");
- try stack.append(RenderState { .Expression = value});
- }
- },
- ast.Node.Id.Comptime => {
- if (requireSemiColon(decl)) {
- try stack.append(RenderState { .Text = ";" });
- }
- try stack.append(RenderState { .Expression = decl });
- },
- else => unreachable,
- }
- },
-
- RenderState.FieldInitializer => |field_init| {
- try stream.print(".{}", self.tokenizer.getTokenSlice(field_init.name_token));
- try stream.print(" = ");
- try stack.append(RenderState { .Expression = field_init.expr });
- },
-
- RenderState.VarDecl => |var_decl| {
- try stack.append(RenderState { .Text = ";" });
- if (var_decl.init_node) |init_node| {
- try stack.append(RenderState { .Expression = init_node });
- try stack.append(RenderState { .Text = " = " });
- }
- if (var_decl.align_node) |align_node| {
- try stack.append(RenderState { .Text = ")" });
- try stack.append(RenderState { .Expression = align_node });
- try stack.append(RenderState { .Text = " align(" });
- }
- if (var_decl.type_node) |type_node| {
- try stack.append(RenderState { .Expression = type_node });
- try stack.append(RenderState { .Text = ": " });
- }
- try stack.append(RenderState { .Text = self.tokenizer.getTokenSlice(var_decl.name_token) });
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Text = self.tokenizer.getTokenSlice(var_decl.mut_token) });
-
- if (var_decl.comptime_token) |comptime_token| {
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Text = self.tokenizer.getTokenSlice(comptime_token) });
- }
-
- if (var_decl.extern_export_token) |extern_export_token| {
- if (var_decl.lib_name != null) {
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Expression = ??var_decl.lib_name });
- }
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Text = self.tokenizer.getTokenSlice(extern_export_token) });
- }
-
- if (var_decl.visib_token) |visib_token| {
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Text = self.tokenizer.getTokenSlice(visib_token) });
- }
- },
-
- RenderState.ParamDecl => |base| {
- const param_decl = @fieldParentPtr(ast.NodeParamDecl, "base", base);
- if (param_decl.comptime_token) |comptime_token| {
- try stream.print("{} ", self.tokenizer.getTokenSlice(comptime_token));
- }
- if (param_decl.noalias_token) |noalias_token| {
- try stream.print("{} ", self.tokenizer.getTokenSlice(noalias_token));
- }
- if (param_decl.name_token) |name_token| {
- try stream.print("{}: ", self.tokenizer.getTokenSlice(name_token));
- }
- if (param_decl.var_args_token) |var_args_token| {
- try stream.print("{}", self.tokenizer.getTokenSlice(var_args_token));
- } else {
- try stack.append(RenderState { .Expression = param_decl.type_node});
- }
- },
- RenderState.Text => |bytes| {
- try stream.write(bytes);
- },
- RenderState.Expression => |base| switch (base.id) {
- ast.Node.Id.Identifier => {
- const identifier = @fieldParentPtr(ast.NodeIdentifier, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(identifier.token));
- },
- ast.Node.Id.Block => {
- const block = @fieldParentPtr(ast.NodeBlock, "base", base);
- if (block.label) |label| {
- try stream.print("{}: ", self.tokenizer.getTokenSlice(label));
- }
-
- if (block.statements.len == 0) {
- try stream.write("{}");
- } else {
- try stream.write("{");
- try stack.append(RenderState { .Text = "}"});
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Indent = indent});
- try stack.append(RenderState { .Text = "\n"});
- var i = block.statements.len;
- while (i != 0) {
- i -= 1;
- const statement_node = block.statements.items[i];
- try stack.append(RenderState { .Statement = statement_node});
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Indent = indent + indent_delta});
- try stack.append(RenderState {
- .Text = blk: {
- if (i != 0) {
- const prev_node = block.statements.items[i - 1];
- const loc = self.tokenizer.getTokenLocation(prev_node.lastToken().end, statement_node.firstToken());
- if (loc.line >= 2) {
- break :blk "\n\n";
- }
- }
- break :blk "\n";
- },
- });
- }
- }
- },
- ast.Node.Id.Defer => {
- const defer_node = @fieldParentPtr(ast.NodeDefer, "base", base);
- try stream.print("{} ", self.tokenizer.getTokenSlice(defer_node.defer_token));
- try stack.append(RenderState { .Expression = defer_node.expr });
- },
- ast.Node.Id.Comptime => {
- const comptime_node = @fieldParentPtr(ast.NodeComptime, "base", base);
- try stream.print("{} ", self.tokenizer.getTokenSlice(comptime_node.comptime_token));
- try stack.append(RenderState { .Expression = comptime_node.expr });
- },
- ast.Node.Id.AsyncAttribute => {
- const async_attr = @fieldParentPtr(ast.NodeAsyncAttribute, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(async_attr.async_token));
-
- if (async_attr.allocator_type) |allocator_type| {
- try stack.append(RenderState { .Text = ">" });
- try stack.append(RenderState { .Expression = allocator_type });
- try stack.append(RenderState { .Text = "<" });
- }
- },
- ast.Node.Id.Suspend => {
- const suspend_node = @fieldParentPtr(ast.NodeSuspend, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(suspend_node.suspend_token));
-
- if (suspend_node.body) |body| {
- try stack.append(RenderState { .Expression = body });
- try stack.append(RenderState { .Text = " " });
- }
-
- if (suspend_node.payload) |payload| {
- try stack.append(RenderState { .Expression = payload });
- try stack.append(RenderState { .Text = " " });
- }
- },
- ast.Node.Id.InfixOp => {
- const prefix_op_node = @fieldParentPtr(ast.NodeInfixOp, "base", base);
- try stack.append(RenderState { .Expression = prefix_op_node.rhs });
-
- if (prefix_op_node.op == ast.NodeInfixOp.InfixOp.Catch) {
- if (prefix_op_node.op.Catch) |payload| {
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Expression = payload });
- }
- try stack.append(RenderState { .Text = " catch " });
- } else {
- const text = switch (prefix_op_node.op) {
- ast.NodeInfixOp.InfixOp.Add => " + ",
- ast.NodeInfixOp.InfixOp.AddWrap => " +% ",
- ast.NodeInfixOp.InfixOp.ArrayCat => " ++ ",
- ast.NodeInfixOp.InfixOp.ArrayMult => " ** ",
- ast.NodeInfixOp.InfixOp.Assign => " = ",
- ast.NodeInfixOp.InfixOp.AssignBitAnd => " &= ",
- ast.NodeInfixOp.InfixOp.AssignBitOr => " |= ",
- ast.NodeInfixOp.InfixOp.AssignBitShiftLeft => " <<= ",
- ast.NodeInfixOp.InfixOp.AssignBitShiftRight => " >>= ",
- ast.NodeInfixOp.InfixOp.AssignBitXor => " ^= ",
- ast.NodeInfixOp.InfixOp.AssignDiv => " /= ",
- ast.NodeInfixOp.InfixOp.AssignMinus => " -= ",
- ast.NodeInfixOp.InfixOp.AssignMinusWrap => " -%= ",
- ast.NodeInfixOp.InfixOp.AssignMod => " %= ",
- ast.NodeInfixOp.InfixOp.AssignPlus => " += ",
- ast.NodeInfixOp.InfixOp.AssignPlusWrap => " +%= ",
- ast.NodeInfixOp.InfixOp.AssignTimes => " *= ",
- ast.NodeInfixOp.InfixOp.AssignTimesWarp => " *%= ",
- ast.NodeInfixOp.InfixOp.BangEqual => " != ",
- ast.NodeInfixOp.InfixOp.BitAnd => " & ",
- ast.NodeInfixOp.InfixOp.BitOr => " | ",
- ast.NodeInfixOp.InfixOp.BitShiftLeft => " << ",
- ast.NodeInfixOp.InfixOp.BitShiftRight => " >> ",
- ast.NodeInfixOp.InfixOp.BitXor => " ^ ",
- ast.NodeInfixOp.InfixOp.BoolAnd => " and ",
- ast.NodeInfixOp.InfixOp.BoolOr => " or ",
- ast.NodeInfixOp.InfixOp.Div => " / ",
- ast.NodeInfixOp.InfixOp.EqualEqual => " == ",
- ast.NodeInfixOp.InfixOp.ErrorUnion => "!",
- ast.NodeInfixOp.InfixOp.GreaterOrEqual => " >= ",
- ast.NodeInfixOp.InfixOp.GreaterThan => " > ",
- ast.NodeInfixOp.InfixOp.LessOrEqual => " <= ",
- ast.NodeInfixOp.InfixOp.LessThan => " < ",
- ast.NodeInfixOp.InfixOp.MergeErrorSets => " || ",
- ast.NodeInfixOp.InfixOp.Mod => " % ",
- ast.NodeInfixOp.InfixOp.Mult => " * ",
- ast.NodeInfixOp.InfixOp.MultWrap => " *% ",
- ast.NodeInfixOp.InfixOp.Period => ".",
- ast.NodeInfixOp.InfixOp.Sub => " - ",
- ast.NodeInfixOp.InfixOp.SubWrap => " -% ",
- ast.NodeInfixOp.InfixOp.UnwrapMaybe => " ?? ",
- ast.NodeInfixOp.InfixOp.Range => " ... ",
- ast.NodeInfixOp.InfixOp.Catch => unreachable,
- };
-
- try stack.append(RenderState { .Text = text });
- }
- try stack.append(RenderState { .Expression = prefix_op_node.lhs });
- },
- ast.Node.Id.PrefixOp => {
- const prefix_op_node = @fieldParentPtr(ast.NodePrefixOp, "base", base);
- try stack.append(RenderState { .Expression = prefix_op_node.rhs });
- switch (prefix_op_node.op) {
- ast.NodePrefixOp.PrefixOp.AddrOf => |addr_of_info| {
- try stream.write("&");
- if (addr_of_info.volatile_token != null) {
- try stack.append(RenderState { .Text = "volatile "});
- }
- if (addr_of_info.const_token != null) {
- try stack.append(RenderState { .Text = "const "});
- }
- if (addr_of_info.align_expr) |align_expr| {
- try stream.print("align(");
- try stack.append(RenderState { .Text = ") "});
- try stack.append(RenderState { .Expression = align_expr});
- }
- },
- ast.NodePrefixOp.PrefixOp.SliceType => |addr_of_info| {
- try stream.write("[]");
- if (addr_of_info.volatile_token != null) {
- try stack.append(RenderState { .Text = "volatile "});
- }
- if (addr_of_info.const_token != null) {
- try stack.append(RenderState { .Text = "const "});
- }
- if (addr_of_info.align_expr) |align_expr| {
- try stream.print("align(");
- try stack.append(RenderState { .Text = ") "});
- try stack.append(RenderState { .Expression = align_expr});
- }
- },
- ast.NodePrefixOp.PrefixOp.ArrayType => |array_index| {
- try stack.append(RenderState { .Text = "]"});
- try stack.append(RenderState { .Expression = array_index});
- try stack.append(RenderState { .Text = "["});
- },
- ast.NodePrefixOp.PrefixOp.BitNot => try stream.write("~"),
- ast.NodePrefixOp.PrefixOp.BoolNot => try stream.write("!"),
- ast.NodePrefixOp.PrefixOp.Deref => try stream.write("*"),
- ast.NodePrefixOp.PrefixOp.Negation => try stream.write("-"),
- ast.NodePrefixOp.PrefixOp.NegationWrap => try stream.write("-%"),
- ast.NodePrefixOp.PrefixOp.Try => try stream.write("try "),
- ast.NodePrefixOp.PrefixOp.UnwrapMaybe => try stream.write("??"),
- ast.NodePrefixOp.PrefixOp.MaybeType => try stream.write("?"),
- ast.NodePrefixOp.PrefixOp.Await => try stream.write("await "),
- ast.NodePrefixOp.PrefixOp.Cancel => try stream.write("cancel "),
- ast.NodePrefixOp.PrefixOp.Resume => try stream.write("resume "),
- }
- },
- ast.Node.Id.SuffixOp => {
- const suffix_op = @fieldParentPtr(ast.NodeSuffixOp, "base", base);
-
- switch (suffix_op.op) {
- ast.NodeSuffixOp.SuffixOp.Call => |call_info| {
- try stack.append(RenderState { .Text = ")"});
- var i = call_info.params.len;
- while (i != 0) {
- i -= 1;
- const param_node = call_info.params.at(i);
- try stack.append(RenderState { .Expression = param_node});
- if (i != 0) {
- try stack.append(RenderState { .Text = ", " });
- }
- }
- try stack.append(RenderState { .Text = "("});
- try stack.append(RenderState { .Expression = suffix_op.lhs });
-
- if (call_info.async_attr) |async_attr| {
- try stack.append(RenderState { .Text = " "});
- try stack.append(RenderState { .Expression = &async_attr.base });
- }
- },
- ast.NodeSuffixOp.SuffixOp.ArrayAccess => |index_expr| {
- try stack.append(RenderState { .Text = "]"});
- try stack.append(RenderState { .Expression = index_expr});
- try stack.append(RenderState { .Text = "["});
- try stack.append(RenderState { .Expression = suffix_op.lhs });
- },
- ast.NodeSuffixOp.SuffixOp.Slice => |range| {
- try stack.append(RenderState { .Text = "]"});
- if (range.end) |end| {
- try stack.append(RenderState { .Expression = end});
- }
- try stack.append(RenderState { .Text = ".."});
- try stack.append(RenderState { .Expression = range.start});
- try stack.append(RenderState { .Text = "["});
- try stack.append(RenderState { .Expression = suffix_op.lhs });
- },
- ast.NodeSuffixOp.SuffixOp.StructInitializer => |field_inits| {
- if (field_inits.len == 0) {
- try stack.append(RenderState { .Text = "{}" });
- try stack.append(RenderState { .Expression = suffix_op.lhs });
- continue;
- }
- try stack.append(RenderState { .Text = "}"});
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Indent = indent });
- var i = field_inits.len;
- while (i != 0) {
- i -= 1;
- const field_init = field_inits.at(i);
- try stack.append(RenderState { .Text = ",\n" });
- try stack.append(RenderState { .FieldInitializer = field_init });
- try stack.append(RenderState.PrintIndent);
- }
- try stack.append(RenderState { .Indent = indent + indent_delta });
- try stack.append(RenderState { .Text = " {\n"});
- try stack.append(RenderState { .Expression = suffix_op.lhs });
- },
- ast.NodeSuffixOp.SuffixOp.ArrayInitializer => |exprs| {
- if (exprs.len == 0) {
- try stack.append(RenderState { .Text = "{}" });
- try stack.append(RenderState { .Expression = suffix_op.lhs });
- continue;
- }
- try stack.append(RenderState { .Text = "}"});
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Indent = indent });
- var i = exprs.len;
- while (i != 0) {
- i -= 1;
- const expr = exprs.at(i);
- try stack.append(RenderState { .Text = ",\n" });
- try stack.append(RenderState { .Expression = expr });
- try stack.append(RenderState.PrintIndent);
- }
- try stack.append(RenderState { .Indent = indent + indent_delta });
- try stack.append(RenderState { .Text = " {\n"});
- try stack.append(RenderState { .Expression = suffix_op.lhs });
- },
- }
- },
- ast.Node.Id.ControlFlowExpression => {
- const flow_expr = @fieldParentPtr(ast.NodeControlFlowExpression, "base", base);
-
- if (flow_expr.rhs) |rhs| {
- try stack.append(RenderState { .Expression = rhs });
- try stack.append(RenderState { .Text = " " });
- }
-
- switch (flow_expr.kind) {
- ast.NodeControlFlowExpression.Kind.Break => |maybe_label| {
- try stream.print("break");
- if (maybe_label) |label| {
- try stream.print(" :");
- try stack.append(RenderState { .Expression = label });
- }
- },
- ast.NodeControlFlowExpression.Kind.Continue => |maybe_label| {
- try stream.print("continue");
- if (maybe_label) |label| {
- try stream.print(" :");
- try stack.append(RenderState { .Expression = label });
- }
- },
- ast.NodeControlFlowExpression.Kind.Return => {
- try stream.print("return");
- },
-
- }
- },
- ast.Node.Id.Payload => {
- const payload = @fieldParentPtr(ast.NodePayload, "base", base);
- try stack.append(RenderState { .Text = "|"});
- try stack.append(RenderState { .Expression = payload.error_symbol });
- try stack.append(RenderState { .Text = "|"});
- },
- ast.Node.Id.PointerPayload => {
- const payload = @fieldParentPtr(ast.NodePointerPayload, "base", base);
- try stack.append(RenderState { .Text = "|"});
- try stack.append(RenderState { .Expression = payload.value_symbol });
-
- if (payload.ptr_token) |ptr_token| {
- try stack.append(RenderState { .Text = self.tokenizer.getTokenSlice(ptr_token) });
- }
-
- try stack.append(RenderState { .Text = "|"});
- },
- ast.Node.Id.PointerIndexPayload => {
- const payload = @fieldParentPtr(ast.NodePointerIndexPayload, "base", base);
- try stack.append(RenderState { .Text = "|"});
-
- if (payload.index_symbol) |index_symbol| {
- try stack.append(RenderState { .Expression = index_symbol });
- try stack.append(RenderState { .Text = ", "});
- }
-
- try stack.append(RenderState { .Expression = payload.value_symbol });
-
- if (payload.ptr_token) |ptr_token| {
- try stack.append(RenderState { .Text = self.tokenizer.getTokenSlice(ptr_token) });
- }
-
- try stack.append(RenderState { .Text = "|"});
- },
- ast.Node.Id.GroupedExpression => {
- const grouped_expr = @fieldParentPtr(ast.NodeGroupedExpression, "base", base);
- try stack.append(RenderState { .Text = ")"});
- try stack.append(RenderState { .Expression = grouped_expr.expr });
- try stack.append(RenderState { .Text = "("});
- },
- ast.Node.Id.FieldInitializer => {
- const field_init = @fieldParentPtr(ast.NodeFieldInitializer, "base", base);
- try stream.print(".{} = ", self.tokenizer.getTokenSlice(field_init.name_token));
- try stack.append(RenderState { .Expression = field_init.expr });
- },
- ast.Node.Id.IntegerLiteral => {
- const integer_literal = @fieldParentPtr(ast.NodeIntegerLiteral, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(integer_literal.token));
- },
- ast.Node.Id.FloatLiteral => {
- const float_literal = @fieldParentPtr(ast.NodeFloatLiteral, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(float_literal.token));
- },
- ast.Node.Id.StringLiteral => {
- const string_literal = @fieldParentPtr(ast.NodeStringLiteral, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(string_literal.token));
- },
- ast.Node.Id.CharLiteral => {
- const char_literal = @fieldParentPtr(ast.NodeCharLiteral, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(char_literal.token));
- },
- ast.Node.Id.BoolLiteral => {
- const bool_literal = @fieldParentPtr(ast.NodeCharLiteral, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(bool_literal.token));
- },
- ast.Node.Id.NullLiteral => {
- const null_literal = @fieldParentPtr(ast.NodeNullLiteral, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(null_literal.token));
- },
- ast.Node.Id.ThisLiteral => {
- const this_literal = @fieldParentPtr(ast.NodeThisLiteral, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(this_literal.token));
- },
- ast.Node.Id.Unreachable => {
- const unreachable_node = @fieldParentPtr(ast.NodeUnreachable, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(unreachable_node.token));
- },
- ast.Node.Id.ErrorType => {
- const error_type = @fieldParentPtr(ast.NodeErrorType, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(error_type.token));
- },
- ast.Node.Id.VarType => {
- const var_type = @fieldParentPtr(ast.NodeVarType, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(var_type.token));
- },
- ast.Node.Id.ContainerDecl => {
- const container_decl = @fieldParentPtr(ast.NodeContainerDecl, "base", base);
-
- switch (container_decl.layout) {
- ast.NodeContainerDecl.Layout.Packed => try stream.print("packed "),
- ast.NodeContainerDecl.Layout.Extern => try stream.print("extern "),
- ast.NodeContainerDecl.Layout.Auto => { },
- }
-
- switch (container_decl.kind) {
- ast.NodeContainerDecl.Kind.Struct => try stream.print("struct"),
- ast.NodeContainerDecl.Kind.Enum => try stream.print("enum"),
- ast.NodeContainerDecl.Kind.Union => try stream.print("union"),
- }
-
- try stack.append(RenderState { .Text = "}"});
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Indent = indent });
- try stack.append(RenderState { .Text = "\n"});
-
- const fields_and_decls = container_decl.fields_and_decls.toSliceConst();
- var i = fields_and_decls.len;
- while (i != 0) {
- i -= 1;
- const node = fields_and_decls[i];
- switch (node.id) {
- ast.Node.Id.StructField,
- ast.Node.Id.UnionTag,
- ast.Node.Id.EnumTag => {
- try stack.append(RenderState { .Text = "," });
- },
- else => { }
- }
- try stack.append(RenderState { .TopLevelDecl = node});
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState {
- .Text = blk: {
- if (i != 0) {
- const prev_node = fields_and_decls[i - 1];
- const loc = self.tokenizer.getTokenLocation(prev_node.lastToken().end, node.firstToken());
- if (loc.line >= 2) {
- break :blk "\n\n";
- }
- }
- break :blk "\n";
- },
- });
- }
- try stack.append(RenderState { .Indent = indent + indent_delta});
- try stack.append(RenderState { .Text = "{"});
-
- switch (container_decl.init_arg_expr) {
- ast.NodeContainerDecl.InitArg.None => try stack.append(RenderState { .Text = " "}),
- ast.NodeContainerDecl.InitArg.Enum => try stack.append(RenderState { .Text = "(enum) "}),
- ast.NodeContainerDecl.InitArg.Type => |type_expr| {
- try stack.append(RenderState { .Text = ") "});
- try stack.append(RenderState { .Expression = type_expr});
- try stack.append(RenderState { .Text = "("});
- },
- }
- },
- ast.Node.Id.ErrorSetDecl => {
- const err_set_decl = @fieldParentPtr(ast.NodeErrorSetDecl, "base", base);
- try stream.print("error ");
-
- try stack.append(RenderState { .Text = "}"});
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Indent = indent });
- try stack.append(RenderState { .Text = "\n"});
-
- const decls = err_set_decl.decls.toSliceConst();
- var i = decls.len;
- while (i != 0) {
- i -= 1;
- const node = decls[i];
- try stack.append(RenderState { .Text = "," });
- try stack.append(RenderState { .Expression = node });
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState {
- .Text = blk: {
- if (i != 0) {
- const prev_node = decls[i - 1];
- const loc = self.tokenizer.getTokenLocation(prev_node.lastToken().end, node.firstToken());
- if (loc.line >= 2) {
- break :blk "\n\n";
- }
- }
- break :blk "\n";
- },
- });
- }
- try stack.append(RenderState { .Indent = indent + indent_delta});
- try stack.append(RenderState { .Text = "{"});
- },
- ast.Node.Id.MultilineStringLiteral => {
- const multiline_str_literal = @fieldParentPtr(ast.NodeMultilineStringLiteral, "base", base);
- try stream.print("\n");
-
- var i : usize = 0;
- while (i < multiline_str_literal.tokens.len) : (i += 1) {
- const t = multiline_str_literal.tokens.at(i);
- try stream.writeByteNTimes(' ', indent + indent_delta);
- try stream.print("{}", self.tokenizer.getTokenSlice(t));
- }
- try stream.writeByteNTimes(' ', indent + indent_delta);
- },
- ast.Node.Id.UndefinedLiteral => {
- const undefined_literal = @fieldParentPtr(ast.NodeUndefinedLiteral, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(undefined_literal.token));
- },
- ast.Node.Id.BuiltinCall => {
- const builtin_call = @fieldParentPtr(ast.NodeBuiltinCall, "base", base);
- try stream.print("{}(", self.tokenizer.getTokenSlice(builtin_call.builtin_token));
- try stack.append(RenderState { .Text = ")"});
- var i = builtin_call.params.len;
- while (i != 0) {
- i -= 1;
- const param_node = builtin_call.params.at(i);
- try stack.append(RenderState { .Expression = param_node});
- if (i != 0) {
- try stack.append(RenderState { .Text = ", " });
- }
- }
- },
- ast.Node.Id.FnProto => {
- const fn_proto = @fieldParentPtr(ast.NodeFnProto, "base", base);
-
- switch (fn_proto.return_type) {
- ast.NodeFnProto.ReturnType.Explicit => |node| {
- try stack.append(RenderState { .Expression = node});
- },
- ast.NodeFnProto.ReturnType.InferErrorSet => |node| {
- try stack.append(RenderState { .Expression = node});
- try stack.append(RenderState { .Text = "!"});
- },
- }
-
- if (fn_proto.align_expr) |align_expr| {
- try stack.append(RenderState { .Text = ") " });
- try stack.append(RenderState { .Expression = align_expr});
- try stack.append(RenderState { .Text = "align(" });
- }
-
- try stack.append(RenderState { .Text = ") " });
- var i = fn_proto.params.len;
- while (i != 0) {
- i -= 1;
- const param_decl_node = fn_proto.params.items[i];
- try stack.append(RenderState { .ParamDecl = param_decl_node});
- if (i != 0) {
- try stack.append(RenderState { .Text = ", " });
- }
- }
-
- try stack.append(RenderState { .Text = "(" });
- if (fn_proto.name_token) |name_token| {
- try stack.append(RenderState { .Text = self.tokenizer.getTokenSlice(name_token) });
- try stack.append(RenderState { .Text = " " });
- }
-
- try stack.append(RenderState { .Text = "fn" });
-
- if (fn_proto.async_attr) |async_attr| {
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Expression = &async_attr.base });
- }
-
- if (fn_proto.cc_token) |cc_token| {
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Text = self.tokenizer.getTokenSlice(cc_token) });
- }
-
- if (fn_proto.lib_name) |lib_name| {
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Expression = lib_name });
- }
- if (fn_proto.extern_export_inline_token) |extern_export_inline_token| {
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Text = self.tokenizer.getTokenSlice(extern_export_inline_token) });
- }
-
- if (fn_proto.visib_token) |visib_token| {
- assert(visib_token.id == Token.Id.Keyword_pub or visib_token.id == Token.Id.Keyword_export);
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Text = self.tokenizer.getTokenSlice(visib_token) });
- }
- },
- ast.Node.Id.LineComment => @panic("TODO render line comment in an expression"),
- ast.Node.Id.Switch => {
- const switch_node = @fieldParentPtr(ast.NodeSwitch, "base", base);
- try stream.print("{} (", self.tokenizer.getTokenSlice(switch_node.switch_token));
-
- try stack.append(RenderState { .Text = "}"});
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Indent = indent });
- try stack.append(RenderState { .Text = "\n"});
-
- const cases = switch_node.cases.toSliceConst();
- var i = cases.len;
- while (i != 0) {
- i -= 1;
- const node = cases[i];
- try stack.append(RenderState { .Text = ","});
- try stack.append(RenderState { .Expression = &node.base});
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState {
- .Text = blk: {
- if (i != 0) {
- const prev_node = cases[i - 1];
- const loc = self.tokenizer.getTokenLocation(prev_node.lastToken().end, node.firstToken());
- if (loc.line >= 2) {
- break :blk "\n\n";
- }
- }
- break :blk "\n";
- },
- });
- }
- try stack.append(RenderState { .Indent = indent + indent_delta});
- try stack.append(RenderState { .Text = ") {"});
- try stack.append(RenderState { .Expression = switch_node.expr });
- },
- ast.Node.Id.SwitchCase => {
- const switch_case = @fieldParentPtr(ast.NodeSwitchCase, "base", base);
-
- try stack.append(RenderState { .Expression = switch_case.expr });
- if (switch_case.payload) |payload| {
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Expression = payload });
- }
- try stack.append(RenderState { .Text = " => "});
-
- const items = switch_case.items.toSliceConst();
- var i = items.len;
- while (i != 0) {
- i -= 1;
- try stack.append(RenderState { .Expression = items[i] });
-
- if (i != 0) {
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Text = ",\n" });
- }
- }
- },
- ast.Node.Id.SwitchElse => {
- const switch_else = @fieldParentPtr(ast.NodeSwitchElse, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(switch_else.token));
- },
- ast.Node.Id.Else => {
- const else_node = @fieldParentPtr(ast.NodeElse, "base", base);
- try stream.print("{}", self.tokenizer.getTokenSlice(else_node.else_token));
-
- switch (else_node.body.id) {
- ast.Node.Id.Block, ast.Node.Id.If,
- ast.Node.Id.For, ast.Node.Id.While,
- ast.Node.Id.Switch => {
- try stream.print(" ");
- try stack.append(RenderState { .Expression = else_node.body });
- },
- else => {
- try stack.append(RenderState { .Indent = indent });
- try stack.append(RenderState { .Expression = else_node.body });
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Indent = indent + indent_delta });
- try stack.append(RenderState { .Text = "\n" });
- }
- }
-
- if (else_node.payload) |payload| {
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Expression = payload });
- }
- },
- ast.Node.Id.While => {
- const while_node = @fieldParentPtr(ast.NodeWhile, "base", base);
- if (while_node.label) |label| {
- try stream.print("{}: ", self.tokenizer.getTokenSlice(label));
- }
-
- if (while_node.inline_token) |inline_token| {
- try stream.print("{} ", self.tokenizer.getTokenSlice(inline_token));
- }
-
- try stream.print("{} ", self.tokenizer.getTokenSlice(while_node.while_token));
-
- if (while_node.@"else") |@"else"| {
- try stack.append(RenderState { .Expression = &@"else".base });
-
- if (while_node.body.id == ast.Node.Id.Block) {
- try stack.append(RenderState { .Text = " " });
- } else {
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Text = "\n" });
- }
- }
-
- if (while_node.body.id == ast.Node.Id.Block) {
- try stack.append(RenderState { .Expression = while_node.body });
- try stack.append(RenderState { .Text = " " });
- } else {
- try stack.append(RenderState { .Indent = indent });
- try stack.append(RenderState { .Expression = while_node.body });
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Indent = indent + indent_delta });
- try stack.append(RenderState { .Text = "\n" });
- }
-
- if (while_node.continue_expr) |continue_expr| {
- try stack.append(RenderState { .Text = ")" });
- try stack.append(RenderState { .Expression = continue_expr });
- try stack.append(RenderState { .Text = ": (" });
- try stack.append(RenderState { .Text = " " });
- }
-
- if (while_node.payload) |payload| {
- try stack.append(RenderState { .Expression = payload });
- try stack.append(RenderState { .Text = " " });
- }
-
- try stack.append(RenderState { .Text = ")" });
- try stack.append(RenderState { .Expression = while_node.condition });
- try stack.append(RenderState { .Text = "(" });
- },
- ast.Node.Id.For => {
- const for_node = @fieldParentPtr(ast.NodeFor, "base", base);
- if (for_node.label) |label| {
- try stream.print("{}: ", self.tokenizer.getTokenSlice(label));
- }
-
- if (for_node.inline_token) |inline_token| {
- try stream.print("{} ", self.tokenizer.getTokenSlice(inline_token));
- }
-
- try stream.print("{} ", self.tokenizer.getTokenSlice(for_node.for_token));
-
- if (for_node.@"else") |@"else"| {
- try stack.append(RenderState { .Expression = &@"else".base });
-
- if (for_node.body.id == ast.Node.Id.Block) {
- try stack.append(RenderState { .Text = " " });
- } else {
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Text = "\n" });
- }
- }
-
- if (for_node.body.id == ast.Node.Id.Block) {
- try stack.append(RenderState { .Expression = for_node.body });
- try stack.append(RenderState { .Text = " " });
- } else {
- try stack.append(RenderState { .Indent = indent });
- try stack.append(RenderState { .Expression = for_node.body });
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Indent = indent + indent_delta });
- try stack.append(RenderState { .Text = "\n" });
- }
-
- if (for_node.payload) |payload| {
- try stack.append(RenderState { .Expression = payload });
- try stack.append(RenderState { .Text = " " });
- }
-
- try stack.append(RenderState { .Text = ")" });
- try stack.append(RenderState { .Expression = for_node.array_expr });
- try stack.append(RenderState { .Text = "(" });
- },
- ast.Node.Id.If => {
- const if_node = @fieldParentPtr(ast.NodeIf, "base", base);
- try stream.print("{} ", self.tokenizer.getTokenSlice(if_node.if_token));
-
- switch (if_node.body.id) {
- ast.Node.Id.Block, ast.Node.Id.If,
- ast.Node.Id.For, ast.Node.Id.While,
- ast.Node.Id.Switch => {
- if (if_node.@"else") |@"else"| {
- try stack.append(RenderState { .Expression = &@"else".base });
-
- if (if_node.body.id == ast.Node.Id.Block) {
- try stack.append(RenderState { .Text = " " });
- } else {
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Text = "\n" });
- }
- }
- },
- else => {
- if (if_node.@"else") |@"else"| {
- try stack.append(RenderState { .Expression = @"else".body });
-
- if (@"else".payload) |payload| {
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Expression = payload });
- }
-
- try stack.append(RenderState { .Text = " " });
- try stack.append(RenderState { .Text = self.tokenizer.getTokenSlice(@"else".else_token) });
- try stack.append(RenderState { .Text = " " });
- }
- }
- }
-
- try stack.append(RenderState { .Expression = if_node.body });
- try stack.append(RenderState { .Text = " " });
-
- if (if_node.payload) |payload| {
- try stack.append(RenderState { .Expression = payload });
- try stack.append(RenderState { .Text = " " });
- }
-
- try stack.append(RenderState { .Text = ")" });
- try stack.append(RenderState { .Expression = if_node.condition });
- try stack.append(RenderState { .Text = "(" });
- },
- ast.Node.Id.Asm => {
- const asm_node = @fieldParentPtr(ast.NodeAsm, "base", base);
- try stream.print("{} ", self.tokenizer.getTokenSlice(asm_node.asm_token));
-
- if (asm_node.volatile_token) |volatile_token| {
- try stream.print("{} ", self.tokenizer.getTokenSlice(volatile_token));
- }
-
- try stack.append(RenderState { .Indent = indent });
- try stack.append(RenderState { .Text = ")" });
- {
- const cloppers = asm_node.cloppers.toSliceConst();
- var i = cloppers.len;
- while (i != 0) {
- i -= 1;
- try stack.append(RenderState { .Expression = cloppers[i] });
-
- if (i != 0) {
- try stack.append(RenderState { .Text = ", " });
- }
- }
- }
- try stack.append(RenderState { .Text = ": " });
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Indent = indent + indent_delta });
- try stack.append(RenderState { .Text = "\n" });
- {
- const inputs = asm_node.inputs.toSliceConst();
- var i = inputs.len;
- while (i != 0) {
- i -= 1;
- const node = inputs[i];
- try stack.append(RenderState { .Expression = &node.base});
-
- if (i != 0) {
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState {
- .Text = blk: {
- const prev_node = inputs[i - 1];
- const loc = self.tokenizer.getTokenLocation(prev_node.lastToken().end, node.firstToken());
- if (loc.line >= 2) {
- break :blk "\n\n";
- }
- break :blk "\n";
- },
- });
- try stack.append(RenderState { .Text = "," });
- }
- }
- }
- try stack.append(RenderState { .Indent = indent + indent_delta + 2});
- try stack.append(RenderState { .Text = ": "});
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Indent = indent + indent_delta});
- try stack.append(RenderState { .Text = "\n" });
- {
- const outputs = asm_node.outputs.toSliceConst();
- var i = outputs.len;
- while (i != 0) {
- i -= 1;
- const node = outputs[i];
- try stack.append(RenderState { .Expression = &node.base});
-
- if (i != 0) {
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState {
- .Text = blk: {
- const prev_node = outputs[i - 1];
- const loc = self.tokenizer.getTokenLocation(prev_node.lastToken().end, node.firstToken());
- if (loc.line >= 2) {
- break :blk "\n\n";
- }
- break :blk "\n";
- },
- });
- try stack.append(RenderState { .Text = "," });
- }
- }
- }
- try stack.append(RenderState { .Indent = indent + indent_delta + 2});
- try stack.append(RenderState { .Text = ": "});
- try stack.append(RenderState.PrintIndent);
- try stack.append(RenderState { .Indent = indent + indent_delta});
- try stack.append(RenderState { .Text = "\n" });
- try stack.append(RenderState { .Expression = asm_node.template });
- try stack.append(RenderState { .Text = "(" });
- },
- ast.Node.Id.AsmInput => {
- const asm_input = @fieldParentPtr(ast.NodeAsmInput, "base", base);
-
- try stack.append(RenderState { .Text = ")"});
- try stack.append(RenderState { .Expression = asm_input.expr});
- try stack.append(RenderState { .Text = " ("});
- try stack.append(RenderState { .Expression = asm_input.constraint });
- try stack.append(RenderState { .Text = "] "});
- try stack.append(RenderState { .Expression = asm_input.symbolic_name });
- try stack.append(RenderState { .Text = "["});
- },
- ast.Node.Id.AsmOutput => {
- const asm_output = @fieldParentPtr(ast.NodeAsmOutput, "base", base);
-
- try stack.append(RenderState { .Text = ")"});
- switch (asm_output.kind) {
- ast.NodeAsmOutput.Kind.Variable => |variable_name| {
- try stack.append(RenderState { .Expression = &variable_name.base});
- },
- ast.NodeAsmOutput.Kind.Return => |return_type| {
- try stack.append(RenderState { .Expression = return_type});
- try stack.append(RenderState { .Text = "-> "});
- },
- }
- try stack.append(RenderState { .Text = " ("});
- try stack.append(RenderState { .Expression = asm_output.constraint });
- try stack.append(RenderState { .Text = "] "});
- try stack.append(RenderState { .Expression = asm_output.symbolic_name });
- try stack.append(RenderState { .Text = "["});
- },
-
- ast.Node.Id.StructField,
- ast.Node.Id.UnionTag,
- ast.Node.Id.EnumTag,
- ast.Node.Id.Root,
- ast.Node.Id.VarDecl,
- ast.Node.Id.Use,
- ast.Node.Id.TestDecl,
- ast.Node.Id.ParamDecl => unreachable,
- },
- RenderState.Statement => |base| {
- if (base.comment) |comment| {
- for (comment.lines.toSliceConst()) |line_token| {
- try stream.print("{}\n", self.tokenizer.getTokenSlice(line_token));
- try stream.writeByteNTimes(' ', indent);
- }
- }
- switch (base.id) {
- ast.Node.Id.VarDecl => {
- const var_decl = @fieldParentPtr(ast.NodeVarDecl, "base", base);
- try stack.append(RenderState { .VarDecl = var_decl});
- },
- else => {
- if (requireSemiColon(base)) {
- try stack.append(RenderState { .Text = ";" });
- }
- try stack.append(RenderState { .Expression = base });
- },
- }
- },
- RenderState.Indent => |new_indent| indent = new_indent,
- RenderState.PrintIndent => try stream.writeByteNTimes(' ', indent),
- }
- }
- }
-
- fn initUtilityArrayList(self: &Parser, comptime T: type) ArrayList(T) {
- const new_byte_count = self.utility_bytes.len - self.utility_bytes.len % @sizeOf(T);
- self.utility_bytes = self.util_allocator.alignedShrink(u8, utility_bytes_align, self.utility_bytes, new_byte_count);
- const typed_slice = ([]T)(self.utility_bytes);
- return ArrayList(T) {
- .allocator = self.util_allocator,
- .items = typed_slice,
- .len = 0,
- };
- }
-
- fn deinitUtilityArrayList(self: &Parser, list: var) void {
- self.utility_bytes = ([]align(utility_bytes_align) u8)(list.items);
- }
-
-};
-
-var fixed_buffer_mem: [100 * 1024]u8 = undefined;
-
-fn testParse(source: []const u8, allocator: &mem.Allocator) ![]u8 {
- var tokenizer = Tokenizer.init(source);
- var parser = Parser.init(&tokenizer, allocator, "(memory buffer)");
- defer parser.deinit();
-
- var tree = try parser.parse();
- defer tree.deinit();
-
- var buffer = try std.Buffer.initSize(allocator, 0);
- errdefer buffer.deinit();
-
- var buffer_out_stream = io.BufferOutStream.init(&buffer);
- try parser.renderSource(&buffer_out_stream.stream, tree.root_node);
- return buffer.toOwnedSlice();
-}
-
-fn testCanonical(source: []const u8) !void {
- const needed_alloc_count = x: {
- // Try it once with unlimited memory, make sure it works
- var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, @maxValue(usize));
- const result_source = try testParse(source, &failing_allocator.allocator);
- if (!mem.eql(u8, result_source, source)) {
- warn("\n====== expected this output: =========\n");
- warn("{}", source);
- warn("\n======== instead found this: =========\n");
- warn("{}", result_source);
- warn("\n======================================\n");
- return error.TestFailed;
- }
- failing_allocator.allocator.free(result_source);
- break :x failing_allocator.index;
- };
-
- var fail_index: usize = 0;
- while (fail_index < needed_alloc_count) : (fail_index += 1) {
- var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, fail_index);
- if (testParse(source, &failing_allocator.allocator)) |_| {
- return error.NondeterministicMemoryUsage;
- } else |err| switch (err) {
- error.OutOfMemory => {
- if (failing_allocator.allocated_bytes != failing_allocator.freed_bytes) {
- warn("\nfail_index: {}/{}\nallocated bytes: {}\nfreed bytes: {}\nallocations: {}\ndeallocations: {}\n",
- fail_index, needed_alloc_count,
- failing_allocator.allocated_bytes, failing_allocator.freed_bytes,
- failing_allocator.index, failing_allocator.deallocations);
- return error.MemoryLeakDetected;
- }
- },
- error.ParseError => @panic("test failed"),
- }
- }
-}
-
-test "zig fmt: get stdout or fail" {
- try testCanonical(
- \\const std = @import("std");
- \\
- \\pub fn main() !void {
- \\ // If this program is run without stdout attached, exit with an error.
- \\ // another comment
- \\ var stdout_file = try std.io.getStdOut;
- \\}
- \\
- );
-}
-
-test "zig fmt: preserve spacing" {
- try testCanonical(
- \\const std = @import("std");
- \\
- \\pub fn main() !void {
- \\ var stdout_file = try std.io.getStdOut;
- \\ var stdout_file = try std.io.getStdOut;
- \\
- \\ var stdout_file = try std.io.getStdOut;
- \\ var stdout_file = try std.io.getStdOut;
- \\}
- \\
- );
-}
-
-test "zig fmt: return types" {
- try testCanonical(
- \\pub fn main() !void {}
- \\pub fn main() var {}
- \\pub fn main() i32 {}
- \\
- );
-}
-
-test "zig fmt: imports" {
- try testCanonical(
- \\const std = @import("std");
- \\const std = @import();
- \\
- );
-}
-
-test "zig fmt: global declarations" {
- try testCanonical(
- \\const a = b;
- \\pub const a = b;
- \\var a = b;
- \\pub var a = b;
- \\const a: i32 = b;
- \\pub const a: i32 = b;
- \\var a: i32 = b;
- \\pub var a: i32 = b;
- \\extern const a: i32 = b;
- \\pub extern const a: i32 = b;
- \\extern var a: i32 = b;
- \\pub extern var a: i32 = b;
- \\extern "a" const a: i32 = b;
- \\pub extern "a" const a: i32 = b;
- \\extern "a" var a: i32 = b;
- \\pub extern "a" var a: i32 = b;
- \\
- );
-}
-
-test "zig fmt: extern declaration" {
- try testCanonical(
- \\extern var foo: c_int;
- \\
- );
-}
-
-test "zig fmt: alignment" {
- try testCanonical(
- \\var foo: c_int align(1);
- \\
- );
-}
-
-test "zig fmt: C main" {
- try testCanonical(
- \\fn main(argc: c_int, argv: &&u8) c_int {
- \\ const a = b;
- \\}
- \\
- );
-}
-
-test "zig fmt: return" {
- try testCanonical(
- \\fn foo(argc: c_int, argv: &&u8) c_int {
- \\ return 0;
- \\}
- \\
- \\fn bar() void {
- \\ return;
- \\}
- \\
- );
-}
-
-test "zig fmt: pointer attributes" {
- try testCanonical(
- \\extern fn f1(s: &align(&u8) u8) c_int;
- \\extern fn f2(s: &&align(1) &const &volatile u8) c_int;
- \\extern fn f3(s: &align(1) const &align(1) volatile &const volatile u8) c_int;
- \\extern fn f4(s: &align(1) const volatile u8) c_int;
- \\
- );
-}
-
-test "zig fmt: slice attributes" {
- try testCanonical(
- \\extern fn f1(s: &align(&u8) u8) c_int;
- \\extern fn f2(s: &&align(1) &const &volatile u8) c_int;
- \\extern fn f3(s: &align(1) const &align(1) volatile &const volatile u8) c_int;
- \\extern fn f4(s: &align(1) const volatile u8) c_int;
- \\
- );
-}
-
-test "zig fmt: test declaration" {
- try testCanonical(
- \\test "test name" {
- \\ const a = 1;
- \\ var b = 1;
- \\}
- \\
- );
-}
-
-test "zig fmt: infix operators" {
- try testCanonical(
- \\test "infix operators" {
- \\ var i = undefined;
- \\ i = 2;
- \\ i *= 2;
- \\ i |= 2;
- \\ i ^= 2;
- \\ i <<= 2;
- \\ i >>= 2;
- \\ i &= 2;
- \\ i *= 2;
- \\ i *%= 2;
- \\ i -= 2;
- \\ i -%= 2;
- \\ i += 2;
- \\ i +%= 2;
- \\ i /= 2;
- \\ i %= 2;
- \\ _ = i == i;
- \\ _ = i != i;
- \\ _ = i != i;
- \\ _ = i.i;
- \\ _ = i || i;
- \\ _ = i!i;
- \\ _ = i ** i;
- \\ _ = i ++ i;
- \\ _ = i ?? i;
- \\ _ = i % i;
- \\ _ = i / i;
- \\ _ = i *% i;
- \\ _ = i * i;
- \\ _ = i -% i;
- \\ _ = i - i;
- \\ _ = i +% i;
- \\ _ = i + i;
- \\ _ = i << i;
- \\ _ = i >> i;
- \\ _ = i & i;
- \\ _ = i ^ i;
- \\ _ = i | i;
- \\ _ = i >= i;
- \\ _ = i <= i;
- \\ _ = i > i;
- \\ _ = i < i;
- \\ _ = i and i;
- \\ _ = i or i;
- \\}
- \\
- );
-}
-
-test "zig fmt: precedence" {
- try testCanonical(
- \\test "precedence" {
- \\ a!b();
- \\ (a!b)();
- \\ !a!b;
- \\ !(a!b);
- \\ !a{};
- \\ !(a{});
- \\ a + b{};
- \\ (a + b){};
- \\ a << b + c;
- \\ (a << b) + c;
- \\ a & b << c;
- \\ (a & b) << c;
- \\ a ^ b & c;
- \\ (a ^ b) & c;
- \\ a | b ^ c;
- \\ (a | b) ^ c;
- \\ a == b | c;
- \\ (a == b) | c;
- \\ a and b == c;
- \\ (a and b) == c;
- \\ a or b and c;
- \\ (a or b) and c;
- \\ (a or b) and c;
- \\}
- \\
- );
-}
-
-test "zig fmt: prefix operators" {
- try testCanonical(
- \\test "prefix operators" {
- \\ try return --%~??!*&0;
- \\}
- \\
- );
-}
-
-test "zig fmt: call expression" {
- try testCanonical(
- \\test "test calls" {
- \\ a();
- \\ a(1);
- \\ a(1, 2);
- \\ a(1, 2) + a(1, 2);
- \\}
- \\
- );
-}
-
-test "zig fmt: var args" {
- try testCanonical(
- \\fn print(args: ...) void {}
- \\
- );
-}
-
-test "zig fmt: var type" {
- try testCanonical(
- \\fn print(args: var) var {}
- \\const Var = var;
- \\const i: var = 0;
- \\
- );
-}
-
-test "zig fmt: functions" {
- try testCanonical(
- \\extern fn puts(s: &const u8) c_int;
- \\extern "c" fn puts(s: &const u8) c_int;
- \\export fn puts(s: &const u8) c_int;
- \\inline fn puts(s: &const u8) c_int;
- \\pub extern fn puts(s: &const u8) c_int;
- \\pub extern "c" fn puts(s: &const u8) c_int;
- \\pub export fn puts(s: &const u8) c_int;
- \\pub inline fn puts(s: &const u8) c_int;
- \\pub extern fn puts(s: &const u8) align(2 + 2) c_int;
- \\pub extern "c" fn puts(s: &const u8) align(2 + 2) c_int;
- \\pub export fn puts(s: &const u8) align(2 + 2) c_int;
- \\pub inline fn puts(s: &const u8) align(2 + 2) c_int;
- \\
- );
-}
-
-test "zig fmt: multiline string" {
- try testCanonical(
- \\const s =
- \\ \\ something
- \\ \\ something else
- \\ ;
- \\
- );
-}
-
-test "zig fmt: values" {
- try testCanonical(
- \\test "values" {
- \\ 1;
- \\ 1.0;
- \\ "string";
- \\ c"cstring";
- \\ 'c';
- \\ true;
- \\ false;
- \\ null;
- \\ undefined;
- \\ error;
- \\ this;
- \\ unreachable;
- \\}
- \\
- );
-}
-
-test "zig fmt: indexing" {
- try testCanonical(
- \\test "test index" {
- \\ a[0];
- \\ a[0 + 5];
- \\ a[0..];
- \\ a[0..5];
- \\ a[a[0]];
- \\ a[a[0..]];
- \\ a[a[0..5]];
- \\ a[a[0]..];
- \\ a[a[0..5]..];
- \\ a[a[0]..a[0]];
- \\ a[a[0..5]..a[0]];
- \\ a[a[0..5]..a[0..5]];
- \\}
- \\
- );
-}
-
-test "zig fmt: struct declaration" {
- try testCanonical(
- \\const S = struct {
- \\ const Self = this;
- \\ f1: u8,
- \\ pub f3: u8,
- \\
- \\ fn method(self: &Self) Self {
- \\ return *self;
- \\ }
- \\
- \\ f2: u8,
- \\};
- \\
- \\const Ps = packed struct {
- \\ a: u8,
- \\ pub b: u8,
- \\
- \\ c: u8,
- \\};
- \\
- \\const Es = extern struct {
- \\ a: u8,
- \\ pub b: u8,
- \\
- \\ c: u8,
- \\};
- \\
- );
-}
-
-test "zig fmt: enum declaration" {
- try testCanonical(
- \\const E = enum {
- \\ Ok,
- \\ SomethingElse = 0,
- \\};
- \\
- \\const E2 = enum(u8) {
- \\ Ok,
- \\ SomethingElse = 255,
- \\ SomethingThird,
- \\};
- \\
- \\const Ee = extern enum {
- \\ Ok,
- \\ SomethingElse,
- \\ SomethingThird,
- \\};
- \\
- \\const Ep = packed enum {
- \\ Ok,
- \\ SomethingElse,
- \\ SomethingThird,
- \\};
- \\
- );
-}
-
-test "zig fmt: union declaration" {
- try testCanonical(
- \\const U = union {
- \\ Int: u8,
- \\ Float: f32,
- \\ None,
- \\ Bool: bool,
- \\};
- \\
- \\const Ue = union(enum) {
- \\ Int: u8,
- \\ Float: f32,
- \\ None,
- \\ Bool: bool,
- \\};
- \\
- \\const E = enum {
- \\ Int,
- \\ Float,
- \\ None,
- \\ Bool,
- \\};
- \\
- \\const Ue2 = union(E) {
- \\ Int: u8,
- \\ Float: f32,
- \\ None,
- \\ Bool: bool,
- \\};
- \\
- \\const Eu = extern union {
- \\ Int: u8,
- \\ Float: f32,
- \\ None,
- \\ Bool: bool,
- \\};
- \\
- );
-}
-
-test "zig fmt: error set declaration" {
- try testCanonical(
- \\const E = error {
- \\ A,
- \\ B,
- \\
- \\ C,
- \\};
- \\
- );
-}
-
-test "zig fmt: arrays" {
- try testCanonical(
- \\test "test array" {
- \\ const a: [2]u8 = [2]u8 {
- \\ 1,
- \\ 2,
- \\ };
- \\ const a: [2]u8 = []u8 {
- \\ 1,
- \\ 2,
- \\ };
- \\ const a: [0]u8 = []u8{};
- \\}
- \\
- );
-}
-
-test "zig fmt: container initializers" {
- try testCanonical(
- \\const a1 = []u8{};
- \\const a2 = []u8 {
- \\ 1,
- \\ 2,
- \\ 3,
- \\ 4,
- \\};
- \\const s1 = S{};
- \\const s2 = S {
- \\ .a = 1,
- \\ .b = 2,
- \\};
- \\
- );
-}
-
-test "zig fmt: catch" {
- try testCanonical(
- \\test "catch" {
- \\ const a: error!u8 = 0;
- \\ _ = a catch return;
- \\ _ = a catch |err| return;
- \\}
- \\
- );
-}
-
-test "zig fmt: blocks" {
- try testCanonical(
- \\test "blocks" {
- \\ {
- \\ const a = 0;
- \\ const b = 0;
- \\ }
- \\
- \\ blk: {
- \\ const a = 0;
- \\ const b = 0;
- \\ }
- \\
- \\ const r = blk: {
- \\ const a = 0;
- \\ const b = 0;
- \\ };
- \\}
- \\
- );
-}
-
-test "zig fmt: switch" {
- try testCanonical(
- \\test "switch" {
- \\ switch (0) {
- \\ 0 => {},
- \\ 1 => unreachable,
- \\ 2,
- \\ 3 => {},
- \\ 4 ... 7 => {},
- \\ 1 + 4 * 3 + 22 => {},
- \\ else => {
- \\ const a = 1;
- \\ const b = a;
- \\ },
- \\ }
- \\
- \\ const res = switch (0) {
- \\ 0 => 0,
- \\ 1 => 2,
- \\ 1 => a = 4,
- \\ else => 4,
- \\ };
- \\
- \\ const Union = union(enum) {
- \\ Int: i64,
- \\ Float: f64,
- \\ };
- \\
- \\ const u = Union {
- \\ .Int = 0,
- \\ };
- \\ switch (u) {
- \\ Union.Int => |int| {},
- \\ Union.Float => |*float| unreachable,
- \\ }
- \\}
- \\
- );
-}
-
-test "zig fmt: while" {
- try testCanonical(
- \\test "while" {
- \\ while (10 < 1) {
- \\ unreachable;
- \\ }
- \\
- \\ while (10 < 1)
- \\ unreachable;
- \\
- \\ var i: usize = 0;
- \\ while (i < 10) : (i += 1) {
- \\ continue;
- \\ }
- \\
- \\ i = 0;
- \\ while (i < 10) : (i += 1)
- \\ continue;
- \\
- \\ i = 0;
- \\ var j: usize = 0;
- \\ while (i < 10) : ({
- \\ i += 1;
- \\ j += 1;
- \\ }) {
- \\ continue;
- \\ }
- \\
- \\ var a: ?u8 = 2;
- \\ while (a) |v| : (a = null) {
- \\ continue;
- \\ }
- \\
- \\ while (a) |v| : (a = null)
- \\ unreachable;
- \\
- \\ label: while (10 < 0) {
- \\ unreachable;
- \\ }
- \\
- \\ const res = while (0 < 10) {
- \\ break 7;
- \\ } else {
- \\ unreachable;
- \\ };
- \\
- \\ const res = while (0 < 10)
- \\ break 7
- \\ else
- \\ unreachable;
- \\
- \\ var a: error!u8 = 0;
- \\ while (a) |v| {
- \\ a = error.Err;
- \\ } else |err| {
- \\ i = 1;
- \\ }
- \\
- \\ comptime var k: usize = 0;
- \\ inline while (i < 10) : (i += 1)
- \\ j += 2;
- \\}
- \\
- );
-}
-
-test "zig fmt: for" {
- try testCanonical(
- \\test "for" {
- \\ const a = []u8 {
- \\ 1,
- \\ 2,
- \\ 3,
- \\ };
- \\ for (a) |v| {
- \\ continue;
- \\ }
- \\
- \\ for (a) |v|
- \\ continue;
- \\
- \\ for (a) |*v|
- \\ continue;
- \\
- \\ for (a) |v, i| {
- \\ continue;
- \\ }
- \\
- \\ for (a) |v, i|
- \\ continue;
- \\
- \\ const res = for (a) |v, i| {
- \\ break v;
- \\ } else {
- \\ unreachable;
- \\ };
- \\
- \\ var num: usize = 0;
- \\ inline for (a) |v, i| {
- \\ num += v;
- \\ num += i;
- \\ }
- \\}
- \\
- );
-}
-
-test "zig fmt: if" {
- try testCanonical(
- \\test "if" {
- \\ if (10 < 0) {
- \\ unreachable;
- \\ }
- \\
- \\ if (10 < 0) unreachable;
- \\
- \\ if (10 < 0) {
- \\ unreachable;
- \\ } else {
- \\ const a = 20;
- \\ }
- \\
- \\ if (10 < 0) {
- \\ unreachable;
- \\ } else if (5 < 0) {
- \\ unreachable;
- \\ } else {
- \\ const a = 20;
- \\ }
- \\
- \\ const is_world_broken = if (10 < 0) true else false;
- \\ const some_number = 1 + if (10 < 0) 2 else 3;
- \\
- \\ const a: ?u8 = 10;
- \\ const b: ?u8 = null;
- \\ if (a) |v| {
- \\ const some = v;
- \\ } else if (b) |*v| {
- \\ unreachable;
- \\ } else {
- \\ const some = 10;
- \\ }
- \\
- \\ const non_null_a = if (a) |v| v else 0;
- \\
- \\ const a_err: error!u8 = 0;
- \\ if (a_err) |v| {
- \\ const p = v;
- \\ } else |err| {
- \\ unreachable;
- \\ }
- \\}
- \\
- );
-}
-
-test "zig fmt: defer" {
- try testCanonical(
- \\test "defer" {
- \\ var i: usize = 0;
- \\ defer i = 1;
- \\ defer {
- \\ i += 2;
- \\ i *= i;
- \\ }
- \\
- \\ errdefer i += 3;
- \\ errdefer {
- \\ i += 2;
- \\ i /= i;
- \\ }
- \\}
- \\
- );
-}
-
-test "zig fmt: comptime" {
- try testCanonical(
- \\fn a() u8 {
- \\ return 5;
- \\}
- \\
- \\fn b(comptime i: u8) u8 {
- \\ return i;
- \\}
- \\
- \\const av = comptime a();
- \\const av2 = comptime blk: {
- \\ var res = a();
- \\ res *= b(2);
- \\ break :blk res;
- \\};
- \\
- \\comptime {
- \\ _ = a();
- \\}
- \\
- \\test "comptime" {
- \\ const av3 = comptime a();
- \\ const av4 = comptime blk: {
- \\ var res = a();
- \\ res *= a();
- \\ break :blk res;
- \\ };
- \\
- \\ comptime var i = 0;
- \\ comptime {
- \\ i = a();
- \\ i += b(i);
- \\ }
- \\}
- \\
- );
-}
-
-test "zig fmt: fn type" {
- try testCanonical(
- \\fn a(i: u8) u8 {
- \\ return i + 1;
- \\}
- \\
- \\const a: fn(u8) u8 = undefined;
- \\const b: extern fn(u8) u8 = undefined;
- \\const c: nakedcc fn(u8) u8 = undefined;
- \\const ap: fn(u8) u8 = a;
- \\
- );
-}
-
-test "zig fmt: inline asm" {
- try testCanonical(
- \\pub fn syscall1(number: usize, arg1: usize) usize {
- \\ return asm volatile ("syscall"
- \\ : [ret] "={rax}" (-> usize)
- \\ : [number] "{rax}" (number),
- \\ [arg1] "{rdi}" (arg1)
- \\ : "rcx", "r11");
- \\}
- \\
- );
-}
-
-test "zig fmt: coroutines" {
- try testCanonical(
- \\async fn simpleAsyncFn() void {
- \\ const a = async a.b();
- \\ x += 1;
- \\ suspend;
- \\ x += 1;
- \\ suspend |p| {}
- \\ const p = async simpleAsyncFn() catch unreachable;
- \\ await p;
- \\}
- \\
- \\test "coroutine suspend, resume, cancel" {
- \\ const p = try async testAsyncSeq();
- \\ resume p;
- \\ cancel p;
- \\}
- \\
- );
-}
-
-test "zig fmt: Block after if" {
- try testCanonical(
- \\test "Block after if" {
- \\ if (true) {
- \\ const a = 0;
- \\ }
- \\
- \\ {
- \\ const a = 0;
- \\ }
- \\}
- \\
- );
-}
-
-test "zig fmt: use" {
- try testCanonical(
- \\use @import("std");
- \\pub use @import("std");
- \\
- );
-}
-
-test "zig fmt: string identifier" {
- try testCanonical(
- \\const @"a b" = @"c d".@"e f";
- \\fn @"g h"() void {}
- \\
- );
-}
-
-test "zig fmt: error return" {
- try testCanonical(
- \\fn err() error {
- \\ call();
- \\ return error.InvalidArgs;
- \\}
- \\
- );
-}
-
-test "zig fmt: struct literals with fields on each line" {
- try testCanonical(
- \\var self = BufSet {
- \\ .hash_map = BufSetHashMap.init(a),
- \\};
- \\
- );
-}
diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig
new file mode 100644
index 0000000000..582bffdf3d
--- /dev/null
+++ b/std/zig/parser_test.zig
@@ -0,0 +1,1945 @@
+test "zig fmt: preserve space between async fn definitions" {
+ try testCanonical(
+ \\async fn a() void {}
+ \\
+ \\async fn b() void {}
+ \\
+ );
+}
+
+test "zig fmt: comment to disable/enable zig fmt first" {
+ try testCanonical(
+ \\// Test trailing comma syntax
+ \\// zig fmt: off
+ \\
+ \\const struct_trailing_comma = struct { x: i32, y: i32, };
+ );
+}
+
+test "zig fmt: comment to disable/enable zig fmt" {
+ try testTransform(
+ \\const a = b;
+ \\// zig fmt: off
+ \\const c = d;
+ \\// zig fmt: on
+ \\const e = f;
+ ,
+ \\const a = b;
+ \\// zig fmt: off
+ \\const c = d;
+ \\// zig fmt: on
+ \\const e = f;
+ \\
+ );
+}
+
+test "zig fmt: pointer of unknown length" {
+ try testCanonical(
+ \\fn foo(ptr: [*]u8) void {}
+ \\
+ );
+}
+
+test "zig fmt: spaces around slice operator" {
+ try testCanonical(
+ \\var a = b[c..d];
+ \\var a = b[c + 1 .. d];
+ \\var a = b[c + 1 ..];
+ \\var a = b[c .. d + 1];
+ \\var a = b[c.a..d.e];
+ \\
+ );
+}
+
+test "zig fmt: async call in if condition" {
+ try testCanonical(
+ \\comptime {
+ \\ if (async b()) {
+ \\ a();
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: 2nd arg multiline string" {
+ try testCanonical(
+ \\comptime {
+ \\ cases.addAsm("hello world linux x86_64",
+ \\ \\.text
+ \\ , "Hello, world!\n");
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: if condition wraps" {
+ try testTransform(
+ \\comptime {
+ \\ if (cond and
+ \\ cond) {
+ \\ return x;
+ \\ }
+ \\ while (cond and
+ \\ cond) {
+ \\ return x;
+ \\ }
+ \\ if (a == b and
+ \\ c) {
+ \\ a = b;
+ \\ }
+ \\ while (a == b and
+ \\ c) {
+ \\ a = b;
+ \\ }
+ \\ if ((cond and
+ \\ cond)) {
+ \\ return x;
+ \\ }
+ \\ while ((cond and
+ \\ cond)) {
+ \\ return x;
+ \\ }
+ \\ var a = if (a) |*f| x: {
+ \\ break :x &a.b;
+ \\ } else |err| err;
+ \\}
+ ,
+ \\comptime {
+ \\ if (cond and
+ \\ cond)
+ \\ {
+ \\ return x;
+ \\ }
+ \\ while (cond and
+ \\ cond)
+ \\ {
+ \\ return x;
+ \\ }
+ \\ if (a == b and
+ \\ c)
+ \\ {
+ \\ a = b;
+ \\ }
+ \\ while (a == b and
+ \\ c)
+ \\ {
+ \\ a = b;
+ \\ }
+ \\ if ((cond and
+ \\ cond))
+ \\ {
+ \\ return x;
+ \\ }
+ \\ while ((cond and
+ \\ cond))
+ \\ {
+ \\ return x;
+ \\ }
+ \\ var a = if (a) |*f| x: {
+ \\ break :x &a.b;
+ \\ } else |err| err;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: if condition has line break but must not wrap" {
+ try testCanonical(
+ \\comptime {
+ \\ if (self.user_input_options.put(name, UserInputOption{
+ \\ .name = name,
+ \\ .used = false,
+ \\ }) catch unreachable) |*prev_value| {
+ \\ foo();
+ \\ bar();
+ \\ }
+ \\ if (put(
+ \\ a,
+ \\ b,
+ \\ )) {
+ \\ foo();
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: same-line doc comment on variable declaration" {
+ try testTransform(
+ \\pub const MAP_ANONYMOUS = 0x1000; /// allocated from memory, swap space
+ \\pub const MAP_FILE = 0x0000; /// map from file (default)
+ \\
+ \\pub const EMEDIUMTYPE = 124; /// Wrong medium type
+ \\
+ \\// nameserver query return codes
+ \\pub const ENSROK = 0; /// DNS server returned answer with no data
+ ,
+ \\/// allocated from memory, swap space
+ \\pub const MAP_ANONYMOUS = 0x1000;
+ \\/// map from file (default)
+ \\pub const MAP_FILE = 0x0000;
+ \\
+ \\/// Wrong medium type
+ \\pub const EMEDIUMTYPE = 124;
+ \\
+ \\// nameserver query return codes
+ \\/// DNS server returned answer with no data
+ \\pub const ENSROK = 0;
+ \\
+ );
+}
+
+test "zig fmt: if-else with comment before else" {
+ try testCanonical(
+ \\comptime {
+ \\ // cexp(finite|nan +- i inf|nan) = nan + i nan
+ \\ if ((hx & 0x7fffffff) != 0x7f800000) {
+ \\ return Complex(f32).new(y - y, y - y);
+ \\ } // cexp(-inf +- i inf|nan) = 0 + i0
+ \\ else if (hx & 0x80000000 != 0) {
+ \\ return Complex(f32).new(0, 0);
+ \\ } // cexp(+inf +- i inf|nan) = inf + i nan
+ \\ else {
+ \\ return Complex(f32).new(x, y - y);
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: respect line breaks in if-else" {
+ try testCanonical(
+ \\comptime {
+ \\ return if (cond) a else b;
+ \\ return if (cond)
+ \\ a
+ \\ else
+ \\ b;
+ \\ return if (cond)
+ \\ a
+ \\ else if (cond)
+ \\ b
+ \\ else
+ \\ c;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: respect line breaks after infix operators" {
+ try testCanonical(
+ \\comptime {
+ \\ self.crc =
+ \\ lookup_tables[0][p[7]] ^
+ \\ lookup_tables[1][p[6]] ^
+ \\ lookup_tables[2][p[5]] ^
+ \\ lookup_tables[3][p[4]] ^
+ \\ lookup_tables[4][@truncate(u8, self.crc >> 24)] ^
+ \\ lookup_tables[5][@truncate(u8, self.crc >> 16)] ^
+ \\ lookup_tables[6][@truncate(u8, self.crc >> 8)] ^
+ \\ lookup_tables[7][@truncate(u8, self.crc >> 0)];
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: fn decl with trailing comma" {
+ try testTransform(
+ \\fn foo(a: i32, b: i32,) void {}
+ ,
+ \\fn foo(
+ \\ a: i32,
+ \\ b: i32,
+ \\) void {}
+ \\
+ );
+}
+
+test "zig fmt: enum decl with no trailing comma" {
+ try testTransform(
+ \\const StrLitKind = enum {Normal, C};
+ ,
+ \\const StrLitKind = enum {
+ \\ Normal,
+ \\ C,
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: switch comment before prong" {
+ try testCanonical(
+ \\comptime {
+ \\ switch (a) {
+ \\ // hi
+ \\ 0 => {},
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: struct literal no trailing comma" {
+ try testTransform(
+ \\const a = foo{ .x = 1, .y = 2 };
+ \\const a = foo{ .x = 1,
+ \\ .y = 2 };
+ ,
+ \\const a = foo{ .x = 1, .y = 2 };
+ \\const a = foo{
+ \\ .x = 1,
+ \\ .y = 2,
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: array literal with hint" {
+ try testTransform(
+ \\const a = []u8{
+ \\ 1, 2, //
+ \\ 3,
+ \\ 4,
+ \\ 5,
+ \\ 6,
+ \\ 7 };
+ \\const a = []u8{
+ \\ 1, 2, //
+ \\ 3,
+ \\ 4,
+ \\ 5,
+ \\ 6,
+ \\ 7, 8 };
+ \\const a = []u8{
+ \\ 1, 2, //
+ \\ 3,
+ \\ 4,
+ \\ 5,
+ \\ 6, // blah
+ \\ 7, 8 };
+ \\const a = []u8{
+ \\ 1, 2, //
+ \\ 3, //
+ \\ 4,
+ \\ 5,
+ \\ 6,
+ \\ 7 };
+ \\const a = []u8{
+ \\ 1,
+ \\ 2,
+ \\ 3, 4, //
+ \\ 5, 6, //
+ \\ 7, 8, //
+ \\};
+ ,
+ \\const a = []u8{
+ \\ 1, 2,
+ \\ 3, 4,
+ \\ 5, 6,
+ \\ 7,
+ \\};
+ \\const a = []u8{
+ \\ 1, 2,
+ \\ 3, 4,
+ \\ 5, 6,
+ \\ 7, 8,
+ \\};
+ \\const a = []u8{
+ \\ 1, 2,
+ \\ 3, 4,
+ \\ 5, 6, // blah
+ \\ 7, 8,
+ \\};
+ \\const a = []u8{
+ \\ 1, 2,
+ \\ 3, //
+ \\ 4,
+ \\ 5, 6,
+ \\ 7,
+ \\};
+ \\const a = []u8{
+ \\ 1,
+ \\ 2,
+ \\ 3,
+ \\ 4,
+ \\ 5,
+ \\ 6,
+ \\ 7,
+ \\ 8,
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: multiline string with backslash at end of line" {
+ try testCanonical(
+ \\comptime {
+ \\ err(
+ \\ \\\
+ \\ );
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: multiline string parameter in fn call with trailing comma" {
+ try testCanonical(
+ \\fn foo() void {
+ \\ try stdout.print(
+ \\ \\ZIG_CMAKE_BINARY_DIR {}
+ \\ \\ZIG_C_HEADER_FILES {}
+ \\ \\ZIG_DIA_GUIDS_LIB {}
+ \\ \\
+ \\ ,
+ \\ std.cstr.toSliceConst(c.ZIG_CMAKE_BINARY_DIR),
+ \\ std.cstr.toSliceConst(c.ZIG_CXX_COMPILER),
+ \\ std.cstr.toSliceConst(c.ZIG_DIA_GUIDS_LIB),
+ \\ );
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: trailing comma on fn call" {
+ try testCanonical(
+ \\comptime {
+ \\ var module = try Module.create(
+ \\ allocator,
+ \\ zig_lib_dir,
+ \\ full_cache_dir,
+ \\ );
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: empty block with only comment" {
+ try testCanonical(
+ \\comptime {
+ \\ {
+ \\ // comment
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: no trailing comma on struct decl" {
+ try testTransform(
+ \\const RoundParam = struct {
+ \\ k: usize, s: u32, t: u32
+ \\};
+ ,
+ \\const RoundParam = struct {
+ \\ k: usize,
+ \\ s: u32,
+ \\ t: u32,
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: simple asm" {
+ try testTransform(
+ \\comptime {
+ \\ asm volatile (
+ \\ \\.globl aoeu;
+ \\ \\.type aoeu, @function;
+ \\ \\.set aoeu, derp;
+ \\ );
+ \\
+ \\ asm ("not real assembly"
+ \\ :[a] "x" (x),);
+ \\ asm ("not real assembly"
+ \\ :[a] "x" (->i32),:[a] "x" (1),);
+ \\ asm ("still not real assembly"
+ \\ :::"a","b",);
+ \\}
+ ,
+ \\comptime {
+ \\ asm volatile (
+ \\ \\.globl aoeu;
+ \\ \\.type aoeu, @function;
+ \\ \\.set aoeu, derp;
+ \\ );
+ \\
+ \\ asm ("not real assembly"
+ \\ : [a] "x" (x)
+ \\ );
+ \\ asm ("not real assembly"
+ \\ : [a] "x" (-> i32)
+ \\ : [a] "x" (1)
+ \\ );
+ \\ asm ("still not real assembly"
+ \\ :
+ \\ :
+ \\ : "a", "b"
+ \\ );
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: nested struct literal with one item" {
+ try testCanonical(
+ \\const a = foo{
+ \\ .item = bar{ .a = b },
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: switch cases trailing comma" {
+ try testTransform(
+ \\fn switch_cases(x: i32) void {
+ \\ switch (x) {
+ \\ 1,2,3 => {},
+ \\ 4,5, => {},
+ \\ 6... 8, => {},
+ \\ else => {},
+ \\ }
+ \\}
+ ,
+ \\fn switch_cases(x: i32) void {
+ \\ switch (x) {
+ \\ 1, 2, 3 => {},
+ \\ 4,
+ \\ 5,
+ \\ => {},
+ \\ 6...8 => {},
+ \\ else => {},
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: slice align" {
+ try testCanonical(
+ \\const A = struct {
+ \\ items: []align(A) T,
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: add trailing comma to array literal" {
+ try testTransform(
+ \\comptime {
+ \\ return []u16{'m', 's', 'y', 's', '-' // hi
+ \\ };
+ \\ return []u16{'m', 's', 'y', 's',
+ \\ '-'};
+ \\ return []u16{'m', 's', 'y', 's', '-'};
+ \\}
+ ,
+ \\comptime {
+ \\ return []u16{
+ \\ 'm', 's', 'y', 's', '-', // hi
+ \\ };
+ \\ return []u16{
+ \\ 'm', 's', 'y', 's',
+ \\ '-',
+ \\ };
+ \\ return []u16{ 'm', 's', 'y', 's', '-' };
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: first thing in file is line comment" {
+ try testCanonical(
+ \\// Introspection and determination of system libraries needed by zig.
+ \\
+ \\// Introspection and determination of system libraries needed by zig.
+ \\
+ \\const std = @import("std");
+ \\
+ );
+}
+
+test "zig fmt: line comment after doc comment" {
+ try testCanonical(
+ \\/// doc comment
+ \\// line comment
+ \\fn foo() void {}
+ \\
+ );
+}
+
+test "zig fmt: float literal with exponent" {
+ try testCanonical(
+ \\test "bit field alignment" {
+ \\ assert(@typeOf(&blah.b) == *align(1:3:6) const u3);
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: float literal with exponent" {
+ try testCanonical(
+ \\test "aoeu" {
+ \\ switch (state) {
+ \\ TermState.Start => switch (c) {
+ \\ '\x1b' => state = TermState.Escape,
+ \\ else => try out.writeByte(c),
+ \\ },
+ \\ }
+ \\}
+ \\
+ );
+}
+test "zig fmt: float literal with exponent" {
+ try testCanonical(
+ \\pub const f64_true_min = 4.94065645841246544177e-324;
+ \\const threshold = 0x1.a827999fcef32p+1022;
+ \\
+ );
+}
+
+test "zig fmt: if-else end of comptime" {
+ try testCanonical(
+ \\comptime {
+ \\ if (a) {
+ \\ b();
+ \\ } else {
+ \\ b();
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: nested blocks" {
+ try testCanonical(
+ \\comptime {
+ \\ {
+ \\ {
+ \\ {
+ \\ a();
+ \\ }
+ \\ }
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: block with same line comment after end brace" {
+ try testCanonical(
+ \\comptime {
+ \\ {
+ \\ b();
+ \\ } // comment
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: statements with comment between" {
+ try testCanonical(
+ \\comptime {
+ \\ a = b;
+ \\ // comment
+ \\ a = b;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: statements with empty line between" {
+ try testCanonical(
+ \\comptime {
+ \\ a = b;
+ \\
+ \\ a = b;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: ptr deref operator and unwrap optional operator" {
+ try testCanonical(
+ \\const a = b.*;
+ \\const a = b.?;
+ \\
+ );
+}
+
+test "zig fmt: comment after if before another if" {
+ try testCanonical(
+ \\test "aoeu" {
+ \\ // comment
+ \\ if (x) {
+ \\ bar();
+ \\ }
+ \\}
+ \\
+ \\test "aoeu" {
+ \\ if (x) {
+ \\ foo();
+ \\ }
+ \\ // comment
+ \\ if (x) {
+ \\ bar();
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: line comment between if block and else keyword" {
+ try testCanonical(
+ \\test "aoeu" {
+ \\ // cexp(finite|nan +- i inf|nan) = nan + i nan
+ \\ if ((hx & 0x7fffffff) != 0x7f800000) {
+ \\ return Complex(f32).new(y - y, y - y);
+ \\ }
+ \\ // cexp(-inf +- i inf|nan) = 0 + i0
+ \\ else if (hx & 0x80000000 != 0) {
+ \\ return Complex(f32).new(0, 0);
+ \\ }
+ \\ // cexp(+inf +- i inf|nan) = inf + i nan
+ \\ // another comment
+ \\ else {
+ \\ return Complex(f32).new(x, y - y);
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: same line comments in expression" {
+ try testCanonical(
+ \\test "aoeu" {
+ \\ const x = ( // a
+ \\ 0 // b
+ \\ ); // c
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: add comma on last switch prong" {
+ try testTransform(
+ \\test "aoeu" {
+ \\switch (self.init_arg_expr) {
+ \\ InitArg.Type => |t| { },
+ \\ InitArg.None,
+ \\ InitArg.Enum => { }
+ \\}
+ \\ switch (self.init_arg_expr) {
+ \\ InitArg.Type => |t| { },
+ \\ InitArg.None,
+ \\ InitArg.Enum => { }//line comment
+ \\ }
+ \\}
+ ,
+ \\test "aoeu" {
+ \\ switch (self.init_arg_expr) {
+ \\ InitArg.Type => |t| {},
+ \\ InitArg.None, InitArg.Enum => {},
+ \\ }
+ \\ switch (self.init_arg_expr) {
+ \\ InitArg.Type => |t| {},
+ \\ InitArg.None, InitArg.Enum => {}, //line comment
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: same-line comment after a statement" {
+ try testCanonical(
+ \\test "" {
+ \\ a = b;
+ \\ debug.assert(H.digest_size <= H.block_size); // HMAC makes this assumption
+ \\ a = b;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: same-line comment after var decl in struct" {
+ try testCanonical(
+ \\pub const vfs_cap_data = extern struct {
+ \\ const Data = struct {}; // when on disk.
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: same-line comment after field decl" {
+ try testCanonical(
+ \\pub const dirent = extern struct {
+ \\ d_name: u8,
+ \\ d_name: u8, // comment 1
+ \\ d_name: u8,
+ \\ d_name: u8, // comment 2
+ \\ d_name: u8,
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: same-line comment after switch prong" {
+ try testCanonical(
+ \\test "" {
+ \\ switch (err) {
+ \\ error.PathAlreadyExists => {}, // comment 2
+ \\ else => return err, // comment 1
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: same-line comment after non-block if expression" {
+ try testCanonical(
+ \\comptime {
+ \\ if (sr > n_uword_bits - 1) // d > r
+ \\ return 0;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: same-line comment on comptime expression" {
+ try testCanonical(
+ \\test "" {
+ \\ comptime assert(@typeId(T) == builtin.TypeId.Int); // must pass an integer to absInt
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: switch with empty body" {
+ try testCanonical(
+ \\test "" {
+ \\ foo() catch |err| switch (err) {};
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: line comments in struct initializer" {
+ try testCanonical(
+ \\fn foo() void {
+ \\ return Self{
+ \\ .a = b,
+ \\
+ \\ // Initialize these two fields to buffer_size so that
+ \\ // in `readFn` we treat the state as being able to read
+ \\ .start_index = buffer_size,
+ \\ .end_index = buffer_size,
+ \\
+ \\ // middle
+ \\
+ \\ .a = b,
+ \\
+ \\ // end
+ \\ };
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: doc comments before struct field" {
+ try testCanonical(
+ \\pub const Allocator = struct {
+ \\ /// Allocate byte_count bytes and return them in a slice, with the
+ \\ /// slice's pointer aligned at least to alignment bytes.
+ \\ allocFn: fn () void,
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: error set declaration" {
+ try testCanonical(
+ \\const E = error{
+ \\ A,
+ \\ B,
+ \\
+ \\ C,
+ \\};
+ \\
+ \\const Error = error{
+ \\ /// no more memory
+ \\ OutOfMemory,
+ \\};
+ \\
+ \\const Error = error{
+ \\ /// no more memory
+ \\ OutOfMemory,
+ \\
+ \\ /// another
+ \\ Another,
+ \\
+ \\ // end
+ \\};
+ \\
+ \\const Error = error{OutOfMemory};
+ \\const Error = error{};
+ \\
+ );
+}
+
+test "zig fmt: union(enum(u32)) with assigned enum values" {
+ try testCanonical(
+ \\const MultipleChoice = union(enum(u32)) {
+ \\ A = 20,
+ \\ B = 40,
+ \\ C = 60,
+ \\ D = 1000,
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: resume from suspend block" {
+ try testCanonical(
+ \\fn foo() void {
+ \\ suspend {
+ \\ resume @handle();
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: comments before error set decl" {
+ try testCanonical(
+ \\const UnexpectedError = error{
+ \\ /// The Operating System returned an undocumented error code.
+ \\ Unexpected,
+ \\ // another
+ \\ Another,
+ \\
+ \\ // in between
+ \\
+ \\ // at end
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: comments before switch prong" {
+ try testCanonical(
+ \\test "" {
+ \\ switch (err) {
+ \\ error.PathAlreadyExists => continue,
+ \\
+ \\ // comment 1
+ \\
+ \\ // comment 2
+ \\ else => return err,
+ \\ // at end
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: comments before var decl in struct" {
+ try testCanonical(
+ \\pub const vfs_cap_data = extern struct {
+ \\ // All of these are mandated as little endian
+ \\ // when on disk.
+ \\ const Data = struct {
+ \\ permitted: u32,
+ \\ inheritable: u32,
+ \\ };
+ \\
+ \\ // in between
+ \\
+ \\ /// All of these are mandated as little endian
+ \\ /// when on disk.
+ \\ const Data = struct {
+ \\ permitted: u32,
+ \\ inheritable: u32,
+ \\ };
+ \\
+ \\ // at end
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: array literal with 1 item on 1 line" {
+ try testCanonical(
+ \\var s = []const u64{0} ** 25;
+ \\
+ );
+}
+
+test "zig fmt: comments before global variables" {
+ try testCanonical(
+ \\/// Foo copies keys and values before they go into the map, and
+ \\/// frees them when they get removed.
+ \\pub const Foo = struct {};
+ \\
+ );
+}
+
+test "zig fmt: comments in statements" {
+ try testCanonical(
+ \\test "std" {
+ \\ // statement comment
+ \\ _ = @import("foo/bar.zig");
+ \\
+ \\ // middle
+ \\ // middle2
+ \\
+ \\ // end
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: comments before test decl" {
+ try testCanonical(
+ \\/// top level doc comment
+ \\test "hi" {}
+ \\
+ \\// top level normal comment
+ \\test "hi" {}
+ \\
+ \\// middle
+ \\
+ \\// end
+ \\
+ );
+}
+
+test "zig fmt: preserve spacing" {
+ try testCanonical(
+ \\const std = @import("std");
+ \\
+ \\pub fn main() !void {
+ \\ var stdout_file = try std.io.getStdOut;
+ \\ var stdout_file = try std.io.getStdOut;
+ \\
+ \\ var stdout_file = try std.io.getStdOut;
+ \\ var stdout_file = try std.io.getStdOut;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: return types" {
+ try testCanonical(
+ \\pub fn main() !void {}
+ \\pub fn main() var {}
+ \\pub fn main() i32 {}
+ \\
+ );
+}
+
+test "zig fmt: imports" {
+ try testCanonical(
+ \\const std = @import("std");
+ \\const std = @import();
+ \\
+ );
+}
+
+test "zig fmt: global declarations" {
+ try testCanonical(
+ \\const a = b;
+ \\pub const a = b;
+ \\var a = b;
+ \\pub var a = b;
+ \\const a: i32 = b;
+ \\pub const a: i32 = b;
+ \\var a: i32 = b;
+ \\pub var a: i32 = b;
+ \\extern const a: i32 = b;
+ \\pub extern const a: i32 = b;
+ \\extern var a: i32 = b;
+ \\pub extern var a: i32 = b;
+ \\extern "a" const a: i32 = b;
+ \\pub extern "a" const a: i32 = b;
+ \\extern "a" var a: i32 = b;
+ \\pub extern "a" var a: i32 = b;
+ \\
+ );
+}
+
+test "zig fmt: extern declaration" {
+ try testCanonical(
+ \\extern var foo: c_int;
+ \\
+ );
+}
+
+test "zig fmt: alignment" {
+ try testCanonical(
+ \\var foo: c_int align(1);
+ \\
+ );
+}
+
+test "zig fmt: C main" {
+ try testCanonical(
+ \\fn main(argc: c_int, argv: **u8) c_int {
+ \\ const a = b;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: return" {
+ try testCanonical(
+ \\fn foo(argc: c_int, argv: **u8) c_int {
+ \\ return 0;
+ \\}
+ \\
+ \\fn bar() void {
+ \\ return;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: pointer attributes" {
+ try testCanonical(
+ \\extern fn f1(s: *align(*u8) u8) c_int;
+ \\extern fn f2(s: **align(1) *const *volatile u8) c_int;
+ \\extern fn f3(s: *align(1) const *align(1) volatile *const volatile u8) c_int;
+ \\extern fn f4(s: *align(1) const volatile u8) c_int;
+ \\
+ );
+}
+
+test "zig fmt: slice attributes" {
+ try testCanonical(
+ \\extern fn f1(s: *align(*u8) u8) c_int;
+ \\extern fn f2(s: **align(1) *const *volatile u8) c_int;
+ \\extern fn f3(s: *align(1) const *align(1) volatile *const volatile u8) c_int;
+ \\extern fn f4(s: *align(1) const volatile u8) c_int;
+ \\
+ );
+}
+
+test "zig fmt: test declaration" {
+ try testCanonical(
+ \\test "test name" {
+ \\ const a = 1;
+ \\ var b = 1;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: infix operators" {
+ try testCanonical(
+ \\test "infix operators" {
+ \\ var i = undefined;
+ \\ i = 2;
+ \\ i *= 2;
+ \\ i |= 2;
+ \\ i ^= 2;
+ \\ i <<= 2;
+ \\ i >>= 2;
+ \\ i &= 2;
+ \\ i *= 2;
+ \\ i *%= 2;
+ \\ i -= 2;
+ \\ i -%= 2;
+ \\ i += 2;
+ \\ i +%= 2;
+ \\ i /= 2;
+ \\ i %= 2;
+ \\ _ = i == i;
+ \\ _ = i != i;
+ \\ _ = i != i;
+ \\ _ = i.i;
+ \\ _ = i || i;
+ \\ _ = i!i;
+ \\ _ = i ** i;
+ \\ _ = i ++ i;
+ \\ _ = i orelse i;
+ \\ _ = i % i;
+ \\ _ = i / i;
+ \\ _ = i *% i;
+ \\ _ = i * i;
+ \\ _ = i -% i;
+ \\ _ = i - i;
+ \\ _ = i +% i;
+ \\ _ = i + i;
+ \\ _ = i << i;
+ \\ _ = i >> i;
+ \\ _ = i & i;
+ \\ _ = i ^ i;
+ \\ _ = i | i;
+ \\ _ = i >= i;
+ \\ _ = i <= i;
+ \\ _ = i > i;
+ \\ _ = i < i;
+ \\ _ = i and i;
+ \\ _ = i or i;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: precedence" {
+ try testCanonical(
+ \\test "precedence" {
+ \\ a!b();
+ \\ (a!b)();
+ \\ !a!b;
+ \\ !(a!b);
+ \\ !a{};
+ \\ !(a{});
+ \\ a + b{};
+ \\ (a + b){};
+ \\ a << b + c;
+ \\ (a << b) + c;
+ \\ a & b << c;
+ \\ (a & b) << c;
+ \\ a ^ b & c;
+ \\ (a ^ b) & c;
+ \\ a | b ^ c;
+ \\ (a | b) ^ c;
+ \\ a == b | c;
+ \\ (a == b) | c;
+ \\ a and b == c;
+ \\ (a and b) == c;
+ \\ a or b and c;
+ \\ (a or b) and c;
+ \\ (a or b) and c;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: prefix operators" {
+ try testCanonical(
+ \\test "prefix operators" {
+ \\ try return --%~!*&0;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: call expression" {
+ try testCanonical(
+ \\test "test calls" {
+ \\ a();
+ \\ a(1);
+ \\ a(1, 2);
+ \\ a(1, 2) + a(1, 2);
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: var args" {
+ try testCanonical(
+ \\fn print(args: ...) void {}
+ \\
+ );
+}
+
+test "zig fmt: var type" {
+ try testCanonical(
+ \\fn print(args: var) var {}
+ \\const Var = var;
+ \\const i: var = 0;
+ \\
+ );
+}
+
+test "zig fmt: functions" {
+ try testCanonical(
+ \\extern fn puts(s: *const u8) c_int;
+ \\extern "c" fn puts(s: *const u8) c_int;
+ \\export fn puts(s: *const u8) c_int;
+ \\inline fn puts(s: *const u8) c_int;
+ \\pub extern fn puts(s: *const u8) c_int;
+ \\pub extern "c" fn puts(s: *const u8) c_int;
+ \\pub export fn puts(s: *const u8) c_int;
+ \\pub inline fn puts(s: *const u8) c_int;
+ \\pub extern fn puts(s: *const u8) align(2 + 2) c_int;
+ \\pub extern "c" fn puts(s: *const u8) align(2 + 2) c_int;
+ \\pub export fn puts(s: *const u8) align(2 + 2) c_int;
+ \\pub inline fn puts(s: *const u8) align(2 + 2) c_int;
+ \\
+ );
+}
+
+test "zig fmt: multiline string" {
+ try testCanonical(
+ \\test "" {
+ \\ const s1 =
+ \\ \\one
+ \\ \\two)
+ \\ \\three
+ \\ ;
+ \\ const s2 =
+ \\ c\\one
+ \\ c\\two)
+ \\ c\\three
+ \\ ;
+ \\ const s3 = // hi
+ \\ \\one
+ \\ \\two)
+ \\ \\three
+ \\ ;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: values" {
+ try testCanonical(
+ \\test "values" {
+ \\ 1;
+ \\ 1.0;
+ \\ "string";
+ \\ c"cstring";
+ \\ 'c';
+ \\ true;
+ \\ false;
+ \\ null;
+ \\ undefined;
+ \\ error;
+ \\ this;
+ \\ unreachable;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: indexing" {
+ try testCanonical(
+ \\test "test index" {
+ \\ a[0];
+ \\ a[0 + 5];
+ \\ a[0..];
+ \\ a[0..5];
+ \\ a[a[0]];
+ \\ a[a[0..]];
+ \\ a[a[0..5]];
+ \\ a[a[0]..];
+ \\ a[a[0..5]..];
+ \\ a[a[0]..a[0]];
+ \\ a[a[0..5]..a[0]];
+ \\ a[a[0..5]..a[0..5]];
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: struct declaration" {
+ try testCanonical(
+ \\const S = struct {
+ \\ const Self = this;
+ \\ f1: u8,
+ \\ pub f3: u8,
+ \\
+ \\ fn method(self: *Self) Self {
+ \\ return self.*;
+ \\ }
+ \\
+ \\ f2: u8,
+ \\};
+ \\
+ \\const Ps = packed struct {
+ \\ a: u8,
+ \\ pub b: u8,
+ \\
+ \\ c: u8,
+ \\};
+ \\
+ \\const Es = extern struct {
+ \\ a: u8,
+ \\ pub b: u8,
+ \\
+ \\ c: u8,
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: enum declaration" {
+ try testCanonical(
+ \\const E = enum {
+ \\ Ok,
+ \\ SomethingElse = 0,
+ \\};
+ \\
+ \\const E2 = enum(u8) {
+ \\ Ok,
+ \\ SomethingElse = 255,
+ \\ SomethingThird,
+ \\};
+ \\
+ \\const Ee = extern enum {
+ \\ Ok,
+ \\ SomethingElse,
+ \\ SomethingThird,
+ \\};
+ \\
+ \\const Ep = packed enum {
+ \\ Ok,
+ \\ SomethingElse,
+ \\ SomethingThird,
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: union declaration" {
+ try testCanonical(
+ \\const U = union {
+ \\ Int: u8,
+ \\ Float: f32,
+ \\ None,
+ \\ Bool: bool,
+ \\};
+ \\
+ \\const Ue = union(enum) {
+ \\ Int: u8,
+ \\ Float: f32,
+ \\ None,
+ \\ Bool: bool,
+ \\};
+ \\
+ \\const E = enum {
+ \\ Int,
+ \\ Float,
+ \\ None,
+ \\ Bool,
+ \\};
+ \\
+ \\const Ue2 = union(E) {
+ \\ Int: u8,
+ \\ Float: f32,
+ \\ None,
+ \\ Bool: bool,
+ \\};
+ \\
+ \\const Eu = extern union {
+ \\ Int: u8,
+ \\ Float: f32,
+ \\ None,
+ \\ Bool: bool,
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: arrays" {
+ try testCanonical(
+ \\test "test array" {
+ \\ const a: [2]u8 = [2]u8{
+ \\ 1,
+ \\ 2,
+ \\ };
+ \\ const a: [2]u8 = []u8{
+ \\ 1,
+ \\ 2,
+ \\ };
+ \\ const a: [0]u8 = []u8{};
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: container initializers" {
+ try testCanonical(
+ \\const a0 = []u8{};
+ \\const a1 = []u8{1};
+ \\const a2 = []u8{
+ \\ 1,
+ \\ 2,
+ \\ 3,
+ \\ 4,
+ \\};
+ \\const s0 = S{};
+ \\const s1 = S{ .a = 1 };
+ \\const s2 = S{
+ \\ .a = 1,
+ \\ .b = 2,
+ \\};
+ \\
+ );
+}
+
+test "zig fmt: catch" {
+ try testCanonical(
+ \\test "catch" {
+ \\ const a: error!u8 = 0;
+ \\ _ = a catch return;
+ \\ _ = a catch |err| return;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: blocks" {
+ try testCanonical(
+ \\test "blocks" {
+ \\ {
+ \\ const a = 0;
+ \\ const b = 0;
+ \\ }
+ \\
+ \\ blk: {
+ \\ const a = 0;
+ \\ const b = 0;
+ \\ }
+ \\
+ \\ const r = blk: {
+ \\ const a = 0;
+ \\ const b = 0;
+ \\ };
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: switch" {
+ try testCanonical(
+ \\test "switch" {
+ \\ switch (0) {
+ \\ 0 => {},
+ \\ 1 => unreachable,
+ \\ 2, 3 => {},
+ \\ 4...7 => {},
+ \\ 1 + 4 * 3 + 22 => {},
+ \\ else => {
+ \\ const a = 1;
+ \\ const b = a;
+ \\ },
+ \\ }
+ \\
+ \\ const res = switch (0) {
+ \\ 0 => 0,
+ \\ 1 => 2,
+ \\ 1 => a = 4,
+ \\ else => 4,
+ \\ };
+ \\
+ \\ const Union = union(enum) {
+ \\ Int: i64,
+ \\ Float: f64,
+ \\ };
+ \\
+ \\ switch (u) {
+ \\ Union.Int => |int| {},
+ \\ Union.Float => |*float| unreachable,
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: while" {
+ try testCanonical(
+ \\test "while" {
+ \\ while (10 < 1) {
+ \\ unreachable;
+ \\ }
+ \\
+ \\ while (10 < 1)
+ \\ unreachable;
+ \\
+ \\ var i: usize = 0;
+ \\ while (i < 10) : (i += 1) {
+ \\ continue;
+ \\ }
+ \\
+ \\ i = 0;
+ \\ while (i < 10) : (i += 1)
+ \\ continue;
+ \\
+ \\ i = 0;
+ \\ var j: usize = 0;
+ \\ while (i < 10) : ({
+ \\ i += 1;
+ \\ j += 1;
+ \\ }) {
+ \\ continue;
+ \\ }
+ \\
+ \\ var a: ?u8 = 2;
+ \\ while (a) |v| : (a = null) {
+ \\ continue;
+ \\ }
+ \\
+ \\ while (a) |v| : (a = null)
+ \\ unreachable;
+ \\
+ \\ label: while (10 < 0) {
+ \\ unreachable;
+ \\ }
+ \\
+ \\ const res = while (0 < 10) {
+ \\ break 7;
+ \\ } else {
+ \\ unreachable;
+ \\ };
+ \\
+ \\ const res = while (0 < 10)
+ \\ break 7
+ \\ else
+ \\ unreachable;
+ \\
+ \\ var a: error!u8 = 0;
+ \\ while (a) |v| {
+ \\ a = error.Err;
+ \\ } else |err| {
+ \\ i = 1;
+ \\ }
+ \\
+ \\ comptime var k: usize = 0;
+ \\ inline while (i < 10) : (i += 1)
+ \\ j += 2;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: for" {
+ try testCanonical(
+ \\test "for" {
+ \\ for (a) |v| {
+ \\ continue;
+ \\ }
+ \\
+ \\ for (a) |v|
+ \\ continue;
+ \\
+ \\ for (a) |*v|
+ \\ continue;
+ \\
+ \\ for (a) |v, i| {
+ \\ continue;
+ \\ }
+ \\
+ \\ for (a) |v, i|
+ \\ continue;
+ \\
+ \\ const res = for (a) |v, i| {
+ \\ break v;
+ \\ } else {
+ \\ unreachable;
+ \\ };
+ \\
+ \\ var num: usize = 0;
+ \\ inline for (a) |v, i| {
+ \\ num += v;
+ \\ num += i;
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: if" {
+ try testCanonical(
+ \\test "if" {
+ \\ if (10 < 0) {
+ \\ unreachable;
+ \\ }
+ \\
+ \\ if (10 < 0) unreachable;
+ \\
+ \\ if (10 < 0) {
+ \\ unreachable;
+ \\ } else {
+ \\ const a = 20;
+ \\ }
+ \\
+ \\ if (10 < 0) {
+ \\ unreachable;
+ \\ } else if (5 < 0) {
+ \\ unreachable;
+ \\ } else {
+ \\ const a = 20;
+ \\ }
+ \\
+ \\ const is_world_broken = if (10 < 0) true else false;
+ \\ const some_number = 1 + if (10 < 0) 2 else 3;
+ \\
+ \\ const a: ?u8 = 10;
+ \\ const b: ?u8 = null;
+ \\ if (a) |v| {
+ \\ const some = v;
+ \\ } else if (b) |*v| {
+ \\ unreachable;
+ \\ } else {
+ \\ const some = 10;
+ \\ }
+ \\
+ \\ const non_null_a = if (a) |v| v else 0;
+ \\
+ \\ const a_err: error!u8 = 0;
+ \\ if (a_err) |v| {
+ \\ const p = v;
+ \\ } else |err| {
+ \\ unreachable;
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: defer" {
+ try testCanonical(
+ \\test "defer" {
+ \\ var i: usize = 0;
+ \\ defer i = 1;
+ \\ defer {
+ \\ i += 2;
+ \\ i *= i;
+ \\ }
+ \\
+ \\ errdefer i += 3;
+ \\ errdefer {
+ \\ i += 2;
+ \\ i /= i;
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: comptime" {
+ try testCanonical(
+ \\fn a() u8 {
+ \\ return 5;
+ \\}
+ \\
+ \\fn b(comptime i: u8) u8 {
+ \\ return i;
+ \\}
+ \\
+ \\const av = comptime a();
+ \\const av2 = comptime blk: {
+ \\ var res = a();
+ \\ res *= b(2);
+ \\ break :blk res;
+ \\};
+ \\
+ \\comptime {
+ \\ _ = a();
+ \\}
+ \\
+ \\test "comptime" {
+ \\ const av3 = comptime a();
+ \\ const av4 = comptime blk: {
+ \\ var res = a();
+ \\ res *= a();
+ \\ break :blk res;
+ \\ };
+ \\
+ \\ comptime var i = 0;
+ \\ comptime {
+ \\ i = a();
+ \\ i += b(i);
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: fn type" {
+ try testCanonical(
+ \\fn a(i: u8) u8 {
+ \\ return i + 1;
+ \\}
+ \\
+ \\const a: fn (u8) u8 = undefined;
+ \\const b: extern fn (u8) u8 = undefined;
+ \\const c: nakedcc fn (u8) u8 = undefined;
+ \\const ap: fn (u8) u8 = a;
+ \\
+ );
+}
+
+test "zig fmt: inline asm" {
+ try testCanonical(
+ \\pub fn syscall1(number: usize, arg1: usize) usize {
+ \\ return asm volatile ("syscall"
+ \\ : [ret] "={rax}" (-> usize)
+ \\ : [number] "{rax}" (number),
+ \\ [arg1] "{rdi}" (arg1)
+ \\ : "rcx", "r11"
+ \\ );
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: coroutines" {
+ try testCanonical(
+ \\async fn simpleAsyncFn() void {
+ \\ const a = async a.b();
+ \\ x += 1;
+ \\ suspend;
+ \\ x += 1;
+ \\ suspend;
+ \\ const p: promise->void = async simpleAsyncFn() catch unreachable;
+ \\ await p;
+ \\}
+ \\
+ \\test "coroutine suspend, resume, cancel" {
+ \\ const p: promise = try async testAsyncSeq();
+ \\ resume p;
+ \\ cancel p;
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: Block after if" {
+ try testCanonical(
+ \\test "Block after if" {
+ \\ if (true) {
+ \\ const a = 0;
+ \\ }
+ \\
+ \\ {
+ \\ const a = 0;
+ \\ }
+ \\}
+ \\
+ );
+}
+
+test "zig fmt: use" {
+ try testCanonical(
+ \\use @import("std");
+ \\pub use @import("std");
+ \\
+ );
+}
+
+test "zig fmt: string identifier" {
+ try testCanonical(
+ \\const @"a b" = @"c d".@"e f";
+ \\fn @"g h"() void {}
+ \\
+ );
+}
+
+test "zig fmt: error return" {
+ try testCanonical(
+ \\fn err() error {
+ \\ call();
+ \\ return error.InvalidArgs;
+ \\}
+ \\
+ );
+}
+
+const std = @import("std");
+const mem = std.mem;
+const warn = std.debug.warn;
+const io = std.io;
+
+var fixed_buffer_mem: [100 * 1024]u8 = undefined;
+
+fn testParse(source: []const u8, allocator: *mem.Allocator, anything_changed: *bool) ![]u8 {
+ var stderr_file = try io.getStdErr();
+ var stderr = &io.FileOutStream.init(&stderr_file).stream;
+
+ var tree = try std.zig.parse(allocator, source);
+ defer tree.deinit();
+
+ var error_it = tree.errors.iterator(0);
+ while (error_it.next()) |parse_error| {
+ const token = tree.tokens.at(parse_error.loc());
+ const loc = tree.tokenLocation(0, parse_error.loc());
+ try stderr.print("(memory buffer):{}:{}: error: ", loc.line + 1, loc.column + 1);
+ try tree.renderError(parse_error, stderr);
+ try stderr.print("\n{}\n", source[loc.line_start..loc.line_end]);
+ {
+ var i: usize = 0;
+ while (i < loc.column) : (i += 1) {
+ try stderr.write(" ");
+ }
+ }
+ {
+ const caret_count = token.end - token.start;
+ var i: usize = 0;
+ while (i < caret_count) : (i += 1) {
+ try stderr.write("~");
+ }
+ }
+ try stderr.write("\n");
+ }
+ if (tree.errors.len != 0) {
+ return error.ParseError;
+ }
+
+ var buffer = try std.Buffer.initSize(allocator, 0);
+ errdefer buffer.deinit();
+
+ var buffer_out_stream = io.BufferOutStream.init(&buffer);
+ anything_changed.* = try std.zig.render(allocator, &buffer_out_stream.stream, &tree);
+ return buffer.toOwnedSlice();
+}
+
+fn testTransform(source: []const u8, expected_source: []const u8) !void {
+ const needed_alloc_count = x: {
+ // Try it once with unlimited memory, make sure it works
+ var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
+ var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, @maxValue(usize));
+ var anything_changed: bool = undefined;
+ const result_source = try testParse(source, &failing_allocator.allocator, &anything_changed);
+ if (!mem.eql(u8, result_source, expected_source)) {
+ warn("\n====== expected this output: =========\n");
+ warn("{}", expected_source);
+ warn("\n======== instead found this: =========\n");
+ warn("{}", result_source);
+ warn("\n======================================\n");
+ return error.TestFailed;
+ }
+ const changes_expected = source.ptr != expected_source.ptr;
+ if (anything_changed != changes_expected) {
+ warn("std.zig.render returned {} instead of {}\n", anything_changed, changes_expected);
+ return error.TestFailed;
+ }
+ std.debug.assert(anything_changed == changes_expected);
+ failing_allocator.allocator.free(result_source);
+ break :x failing_allocator.index;
+ };
+
+ var fail_index: usize = 0;
+ while (fail_index < needed_alloc_count) : (fail_index += 1) {
+ var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
+ var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, fail_index);
+ var anything_changed: bool = undefined;
+ if (testParse(source, &failing_allocator.allocator, &anything_changed)) |_| {
+ return error.NondeterministicMemoryUsage;
+ } else |err| switch (err) {
+ error.OutOfMemory => {
+ if (failing_allocator.allocated_bytes != failing_allocator.freed_bytes) {
+ warn(
+ "\nfail_index: {}/{}\nallocated bytes: {}\nfreed bytes: {}\nallocations: {}\ndeallocations: {}\n",
+ fail_index,
+ needed_alloc_count,
+ failing_allocator.allocated_bytes,
+ failing_allocator.freed_bytes,
+ failing_allocator.index,
+ failing_allocator.deallocations,
+ );
+ return error.MemoryLeakDetected;
+ }
+ },
+ error.ParseError => @panic("test failed"),
+ else => @panic("test failed"),
+ }
+ }
+}
+
+fn testCanonical(source: []const u8) !void {
+ return testTransform(source, source);
+}
diff --git a/std/zig/render.zig b/std/zig/render.zig
new file mode 100644
index 0000000000..868902a0d1
--- /dev/null
+++ b/std/zig/render.zig
@@ -0,0 +1,2015 @@
+const std = @import("../index.zig");
+const builtin = @import("builtin");
+const assert = std.debug.assert;
+const mem = std.mem;
+const ast = std.zig.ast;
+const Token = std.zig.Token;
+
+const indent_delta = 4;
+
+pub const Error = error{
+ /// Ran out of memory allocating call stack frames to complete rendering.
+ OutOfMemory,
+};
+
+/// Returns whether anything changed
+pub fn render(allocator: *mem.Allocator, stream: var, tree: *ast.Tree) (@typeOf(stream).Child.Error || Error)!bool {
+ comptime assert(@typeId(@typeOf(stream)) == builtin.TypeId.Pointer);
+
+ var anything_changed: bool = false;
+
+ // make a passthrough stream that checks whether something changed
+ const MyStream = struct {
+ const MyStream = this;
+ const StreamError = @typeOf(stream).Child.Error;
+ const Stream = std.io.OutStream(StreamError);
+
+ anything_changed_ptr: *bool,
+ child_stream: @typeOf(stream),
+ stream: Stream,
+ source_index: usize,
+ source: []const u8,
+
+ fn write(iface_stream: *Stream, bytes: []const u8) StreamError!void {
+ const self = @fieldParentPtr(MyStream, "stream", iface_stream);
+
+ if (!self.anything_changed_ptr.*) {
+ const end = self.source_index + bytes.len;
+ if (end > self.source.len) {
+ self.anything_changed_ptr.* = true;
+ } else {
+ const src_slice = self.source[self.source_index..end];
+ self.source_index += bytes.len;
+ if (!mem.eql(u8, bytes, src_slice)) {
+ self.anything_changed_ptr.* = true;
+ }
+ }
+ }
+
+ try self.child_stream.write(bytes);
+ }
+ };
+ var my_stream = MyStream{
+ .stream = MyStream.Stream{ .writeFn = MyStream.write },
+ .child_stream = stream,
+ .anything_changed_ptr = &anything_changed,
+ .source_index = 0,
+ .source = tree.source,
+ };
+
+ try renderRoot(allocator, &my_stream.stream, tree);
+
+ return anything_changed;
+}
+
+fn renderRoot(
+ allocator: *mem.Allocator,
+ stream: var,
+ tree: *ast.Tree,
+) (@typeOf(stream).Child.Error || Error)!void {
+ // render all the line comments at the beginning of the file
+ var tok_it = tree.tokens.iterator(0);
+ while (tok_it.next()) |token| {
+ if (token.id != Token.Id.LineComment) break;
+ try stream.print("{}\n", mem.trimRight(u8, tree.tokenSlicePtr(token), " "));
+ if (tok_it.peek()) |next_token| {
+ const loc = tree.tokenLocationPtr(token.end, next_token);
+ if (loc.line >= 2) {
+ try stream.writeByte('\n');
+ }
+ }
+ }
+
+ var start_col: usize = 0;
+ var it = tree.root_node.decls.iterator(0);
+ while (true) {
+ var decl = (it.next() orelse return).*;
+ // look for zig fmt: off comment
+ var start_token_index = decl.firstToken();
+ zig_fmt_loop: while (start_token_index != 0) {
+ start_token_index -= 1;
+ const start_token = tree.tokens.at(start_token_index);
+ switch (start_token.id) {
+ Token.Id.LineComment => {},
+ Token.Id.DocComment => continue,
+ else => break,
+ }
+ if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(start_token)[2..], " "), "zig fmt: off")) {
+ var end_token_index = start_token_index;
+ while (true) {
+ end_token_index += 1;
+ const end_token = tree.tokens.at(end_token_index);
+ switch (end_token.id) {
+ Token.Id.LineComment => {},
+ Token.Id.Eof => {
+ const start = tree.tokens.at(start_token_index + 1).start;
+ try stream.write(tree.source[start..]);
+ return;
+ },
+ else => continue,
+ }
+ if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(end_token)[2..], " "), "zig fmt: on")) {
+ const start = tree.tokens.at(start_token_index + 1).start;
+ try stream.print("{}\n", tree.source[start..end_token.end]);
+ while (tree.tokens.at(decl.firstToken()).start < end_token.end) {
+ decl = (it.next() orelse return).*;
+ }
+ break :zig_fmt_loop;
+ }
+ }
+ }
+ }
+
+ try renderTopLevelDecl(allocator, stream, tree, 0, &start_col, decl);
+ if (it.peek()) |next_decl| {
+ try renderExtraNewline(tree, stream, &start_col, next_decl.*);
+ }
+ }
+}
+
+fn renderExtraNewline(tree: *ast.Tree, stream: var, start_col: *usize, node: *ast.Node) !void {
+ const first_token = node.firstToken();
+ var prev_token = first_token;
+ while (tree.tokens.at(prev_token - 1).id == Token.Id.DocComment) {
+ prev_token -= 1;
+ }
+ const prev_token_end = tree.tokens.at(prev_token - 1).end;
+ const loc = tree.tokenLocation(prev_token_end, first_token);
+ if (loc.line >= 2) {
+ try stream.writeByte('\n');
+ start_col.* = 0;
+ }
+}
+
+fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@typeOf(stream).Child.Error || Error)!void {
+ switch (decl.id) {
+ ast.Node.Id.FnProto => {
+ const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
+
+ try renderDocComments(tree, stream, fn_proto, indent, start_col);
+
+ if (fn_proto.body_node) |body_node| {
+ try renderExpression(allocator, stream, tree, indent, start_col, decl, Space.Space);
+ try renderExpression(allocator, stream, tree, indent, start_col, body_node, Space.Newline);
+ } else {
+ try renderExpression(allocator, stream, tree, indent, start_col, decl, Space.None);
+ try renderToken(tree, stream, tree.nextToken(decl.lastToken()), indent, start_col, Space.Newline);
+ }
+ },
+
+ ast.Node.Id.Use => {
+ const use_decl = @fieldParentPtr(ast.Node.Use, "base", decl);
+
+ if (use_decl.visib_token) |visib_token| {
+ try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub
+ }
+ try renderToken(tree, stream, use_decl.use_token, indent, start_col, Space.Space); // use
+ try renderExpression(allocator, stream, tree, indent, start_col, use_decl.expr, Space.None);
+ try renderToken(tree, stream, use_decl.semicolon_token, indent, start_col, Space.Newline); // ;
+ },
+
+ ast.Node.Id.VarDecl => {
+ const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", decl);
+
+ try renderDocComments(tree, stream, var_decl, indent, start_col);
+ try renderVarDecl(allocator, stream, tree, indent, start_col, var_decl);
+ },
+
+ ast.Node.Id.TestDecl => {
+ const test_decl = @fieldParentPtr(ast.Node.TestDecl, "base", decl);
+
+ try renderDocComments(tree, stream, test_decl, indent, start_col);
+ try renderToken(tree, stream, test_decl.test_token, indent, start_col, Space.Space);
+ try renderExpression(allocator, stream, tree, indent, start_col, test_decl.name, Space.Space);
+ try renderExpression(allocator, stream, tree, indent, start_col, test_decl.body_node, Space.Newline);
+ },
+
+ ast.Node.Id.StructField => {
+ const field = @fieldParentPtr(ast.Node.StructField, "base", decl);
+
+ try renderDocComments(tree, stream, field, indent, start_col);
+ if (field.visib_token) |visib_token| {
+ try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub
+ }
+ try renderToken(tree, stream, field.name_token, indent, start_col, Space.None); // name
+ try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, Space.Space); // :
+ try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr, Space.Comma); // type,
+ },
+
+ ast.Node.Id.UnionTag => {
+ const tag = @fieldParentPtr(ast.Node.UnionTag, "base", decl);
+
+ try renderDocComments(tree, stream, tag, indent, start_col);
+
+ if (tag.type_expr == null and tag.value_expr == null) {
+ return renderToken(tree, stream, tag.name_token, indent, start_col, Space.Comma); // name,
+ }
+
+ if (tag.type_expr == null) {
+ try renderToken(tree, stream, tag.name_token, indent, start_col, Space.Space); // name
+ } else {
+ try renderToken(tree, stream, tag.name_token, indent, start_col, Space.None); // name
+ }
+
+ if (tag.type_expr) |type_expr| {
+ try renderToken(tree, stream, tree.nextToken(tag.name_token), indent, start_col, Space.Space); // :
+
+ if (tag.value_expr == null) {
+ try renderExpression(allocator, stream, tree, indent, start_col, type_expr, Space.Comma); // type,
+ return;
+ } else {
+ try renderExpression(allocator, stream, tree, indent, start_col, type_expr, Space.Space); // type
+ }
+ }
+
+ const value_expr = tag.value_expr.?;
+ try renderToken(tree, stream, tree.prevToken(value_expr.firstToken()), indent, start_col, Space.Space); // =
+ try renderExpression(allocator, stream, tree, indent, start_col, value_expr, Space.Comma); // value,
+ },
+
+ ast.Node.Id.EnumTag => {
+ const tag = @fieldParentPtr(ast.Node.EnumTag, "base", decl);
+
+ try renderDocComments(tree, stream, tag, indent, start_col);
+
+ if (tag.value) |value| {
+ try renderToken(tree, stream, tag.name_token, indent, start_col, Space.Space); // name
+
+ try renderToken(tree, stream, tree.nextToken(tag.name_token), indent, start_col, Space.Space); // =
+ try renderExpression(allocator, stream, tree, indent, start_col, value, Space.Comma);
+ } else {
+ try renderToken(tree, stream, tag.name_token, indent, start_col, Space.Comma); // name
+ }
+ },
+
+ ast.Node.Id.Comptime => {
+ assert(!decl.requireSemiColon());
+ try renderExpression(allocator, stream, tree, indent, start_col, decl, Space.Newline);
+ },
+ else => unreachable,
+ }
+}
+
+fn renderExpression(
+ allocator: *mem.Allocator,
+ stream: var,
+ tree: *ast.Tree,
+ indent: usize,
+ start_col: *usize,
+ base: *ast.Node,
+ space: Space,
+) (@typeOf(stream).Child.Error || Error)!void {
+ switch (base.id) {
+ ast.Node.Id.Identifier => {
+ const identifier = @fieldParentPtr(ast.Node.Identifier, "base", base);
+ return renderToken(tree, stream, identifier.token, indent, start_col, space);
+ },
+ ast.Node.Id.Block => {
+ const block = @fieldParentPtr(ast.Node.Block, "base", base);
+
+ if (block.label) |label| {
+ try renderToken(tree, stream, label, indent, start_col, Space.None);
+ try renderToken(tree, stream, tree.nextToken(label), indent, start_col, Space.Space);
+ }
+
+ if (block.statements.len == 0) {
+ try renderToken(tree, stream, block.lbrace, indent + indent_delta, start_col, Space.None);
+ return renderToken(tree, stream, block.rbrace, indent, start_col, space);
+ } else {
+ const block_indent = indent + indent_delta;
+ try renderToken(tree, stream, block.lbrace, block_indent, start_col, Space.Newline);
+
+ var it = block.statements.iterator(0);
+ while (it.next()) |statement| {
+ try stream.writeByteNTimes(' ', block_indent);
+ try renderStatement(allocator, stream, tree, block_indent, start_col, statement.*);
+
+ if (it.peek()) |next_statement| {
+ try renderExtraNewline(tree, stream, start_col, next_statement.*);
+ }
+ }
+
+ try stream.writeByteNTimes(' ', indent);
+ return renderToken(tree, stream, block.rbrace, indent, start_col, space);
+ }
+ },
+ ast.Node.Id.Defer => {
+ const defer_node = @fieldParentPtr(ast.Node.Defer, "base", base);
+
+ try renderToken(tree, stream, defer_node.defer_token, indent, start_col, Space.Space);
+ return renderExpression(allocator, stream, tree, indent, start_col, defer_node.expr, space);
+ },
+ ast.Node.Id.Comptime => {
+ const comptime_node = @fieldParentPtr(ast.Node.Comptime, "base", base);
+
+ try renderToken(tree, stream, comptime_node.comptime_token, indent, start_col, Space.Space);
+ return renderExpression(allocator, stream, tree, indent, start_col, comptime_node.expr, space);
+ },
+
+ ast.Node.Id.AsyncAttribute => {
+ const async_attr = @fieldParentPtr(ast.Node.AsyncAttribute, "base", base);
+
+ if (async_attr.allocator_type) |allocator_type| {
+ try renderToken(tree, stream, async_attr.async_token, indent, start_col, Space.None); // async
+
+ try renderToken(tree, stream, tree.nextToken(async_attr.async_token), indent, start_col, Space.None); // <
+ try renderExpression(allocator, stream, tree, indent, start_col, allocator_type, Space.None); // allocator
+ return renderToken(tree, stream, tree.nextToken(allocator_type.lastToken()), indent, start_col, space); // >
+ } else {
+ return renderToken(tree, stream, async_attr.async_token, indent, start_col, space); // async
+ }
+ },
+
+ ast.Node.Id.Suspend => {
+ const suspend_node = @fieldParentPtr(ast.Node.Suspend, "base", base);
+
+ if (suspend_node.body) |body| {
+ try renderToken(tree, stream, suspend_node.suspend_token, indent, start_col, Space.Space);
+ return renderExpression(allocator, stream, tree, indent, start_col, body, space);
+ } else {
+ return renderToken(tree, stream, suspend_node.suspend_token, indent, start_col, space);
+ }
+ },
+
+ ast.Node.Id.InfixOp => {
+ const infix_op_node = @fieldParentPtr(ast.Node.InfixOp, "base", base);
+
+ const op_token = tree.tokens.at(infix_op_node.op_token);
+ const op_space = switch (infix_op_node.op) {
+ ast.Node.InfixOp.Op.Period, ast.Node.InfixOp.Op.ErrorUnion, ast.Node.InfixOp.Op.Range => Space.None,
+ else => Space.Space,
+ };
+ try renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.lhs, op_space);
+
+ const after_op_space = blk: {
+ const loc = tree.tokenLocation(tree.tokens.at(infix_op_node.op_token).end, tree.nextToken(infix_op_node.op_token));
+ break :blk if (loc.line == 0) op_space else Space.Newline;
+ };
+
+ try renderToken(tree, stream, infix_op_node.op_token, indent, start_col, after_op_space);
+ if (after_op_space == Space.Newline) {
+ try stream.writeByteNTimes(' ', indent + indent_delta);
+ start_col.* = indent + indent_delta;
+ }
+
+ switch (infix_op_node.op) {
+ ast.Node.InfixOp.Op.Catch => |maybe_payload| if (maybe_payload) |payload| {
+ try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space);
+ },
+ else => {},
+ }
+
+ return renderExpression(allocator, stream, tree, indent, start_col, infix_op_node.rhs, space);
+ },
+
+ ast.Node.Id.PrefixOp => {
+ const prefix_op_node = @fieldParentPtr(ast.Node.PrefixOp, "base", base);
+
+ switch (prefix_op_node.op) {
+ ast.Node.PrefixOp.Op.PtrType => |ptr_info| {
+ const star_offset = switch (tree.tokens.at(prefix_op_node.op_token).id) {
+ Token.Id.AsteriskAsterisk => usize(1),
+ else => usize(0),
+ };
+ try renderTokenOffset(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None, star_offset); // *
+ if (ptr_info.align_info) |align_info| {
+ const lparen_token = tree.prevToken(align_info.node.firstToken());
+ const align_token = tree.prevToken(lparen_token);
+
+ try renderToken(tree, stream, align_token, indent, start_col, Space.None); // align
+ try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // (
+
+ try renderExpression(allocator, stream, tree, indent, start_col, align_info.node, Space.None);
+
+ if (align_info.bit_range) |bit_range| {
+ const colon1 = tree.prevToken(bit_range.start.firstToken());
+ const colon2 = tree.prevToken(bit_range.end.firstToken());
+
+ try renderToken(tree, stream, colon1, indent, start_col, Space.None); // :
+ try renderExpression(allocator, stream, tree, indent, start_col, bit_range.start, Space.None);
+ try renderToken(tree, stream, colon2, indent, start_col, Space.None); // :
+ try renderExpression(allocator, stream, tree, indent, start_col, bit_range.end, Space.None);
+
+ const rparen_token = tree.nextToken(bit_range.end.lastToken());
+ try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
+ } else {
+ const rparen_token = tree.nextToken(align_info.node.lastToken());
+ try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
+ }
+ }
+ if (ptr_info.const_token) |const_token| {
+ try renderToken(tree, stream, const_token, indent, start_col, Space.Space); // const
+ }
+ if (ptr_info.volatile_token) |volatile_token| {
+ try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space); // volatile
+ }
+ },
+
+ ast.Node.PrefixOp.Op.SliceType => |ptr_info| {
+ try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None); // [
+ try renderToken(tree, stream, tree.nextToken(prefix_op_node.op_token), indent, start_col, Space.None); // ]
+
+ if (ptr_info.align_info) |align_info| {
+ const lparen_token = tree.prevToken(align_info.node.firstToken());
+ const align_token = tree.prevToken(lparen_token);
+
+ try renderToken(tree, stream, align_token, indent, start_col, Space.None); // align
+ try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // (
+
+ try renderExpression(allocator, stream, tree, indent, start_col, align_info.node, Space.None);
+
+ if (align_info.bit_range) |bit_range| {
+ const colon1 = tree.prevToken(bit_range.start.firstToken());
+ const colon2 = tree.prevToken(bit_range.end.firstToken());
+
+ try renderToken(tree, stream, colon1, indent, start_col, Space.None); // :
+ try renderExpression(allocator, stream, tree, indent, start_col, bit_range.start, Space.None);
+ try renderToken(tree, stream, colon2, indent, start_col, Space.None); // :
+ try renderExpression(allocator, stream, tree, indent, start_col, bit_range.end, Space.None);
+
+ const rparen_token = tree.nextToken(bit_range.end.lastToken());
+ try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
+ } else {
+ const rparen_token = tree.nextToken(align_info.node.lastToken());
+ try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
+ }
+ }
+ if (ptr_info.const_token) |const_token| {
+ try renderToken(tree, stream, const_token, indent, start_col, Space.Space);
+ }
+ if (ptr_info.volatile_token) |volatile_token| {
+ try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space);
+ }
+ },
+
+ ast.Node.PrefixOp.Op.ArrayType => |array_index| {
+ try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None); // [
+ try renderExpression(allocator, stream, tree, indent, start_col, array_index, Space.None);
+ try renderToken(tree, stream, tree.nextToken(array_index.lastToken()), indent, start_col, Space.None); // ]
+ },
+ ast.Node.PrefixOp.Op.BitNot,
+ ast.Node.PrefixOp.Op.BoolNot,
+ ast.Node.PrefixOp.Op.Negation,
+ ast.Node.PrefixOp.Op.NegationWrap,
+ ast.Node.PrefixOp.Op.OptionalType,
+ ast.Node.PrefixOp.Op.AddressOf,
+ => {
+ try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None);
+ },
+
+ ast.Node.PrefixOp.Op.Try,
+ ast.Node.PrefixOp.Op.Await,
+ ast.Node.PrefixOp.Op.Cancel,
+ ast.Node.PrefixOp.Op.Resume,
+ => {
+ try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.Space);
+ },
+ }
+
+ return renderExpression(allocator, stream, tree, indent, start_col, prefix_op_node.rhs, space);
+ },
+
+ ast.Node.Id.SuffixOp => {
+ const suffix_op = @fieldParentPtr(ast.Node.SuffixOp, "base", base);
+
+ switch (suffix_op.op) {
+ @TagType(ast.Node.SuffixOp.Op).Call => |*call_info| {
+ if (call_info.async_attr) |async_attr| {
+ try renderExpression(allocator, stream, tree, indent, start_col, &async_attr.base, Space.Space);
+ }
+
+ try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
+
+ const lparen = tree.nextToken(suffix_op.lhs.lastToken());
+
+ if (call_info.params.len == 0) {
+ try renderToken(tree, stream, lparen, indent, start_col, Space.None);
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
+ }
+
+ const src_has_trailing_comma = blk: {
+ const maybe_comma = tree.prevToken(suffix_op.rtoken);
+ break :blk tree.tokens.at(maybe_comma).id == Token.Id.Comma;
+ };
+
+ if (src_has_trailing_comma) {
+ const new_indent = indent + indent_delta;
+ try renderToken(tree, stream, lparen, new_indent, start_col, Space.Newline);
+
+ var it = call_info.params.iterator(0);
+ while (true) {
+ const param_node = it.next().?;
+
+ const param_node_new_indent = if (param_node.*.id == ast.Node.Id.MultilineStringLiteral) blk: {
+ break :blk indent;
+ } else blk: {
+ try stream.writeByteNTimes(' ', new_indent);
+ break :blk new_indent;
+ };
+
+ if (it.peek()) |next_node| {
+ try renderExpression(allocator, stream, tree, param_node_new_indent, start_col, param_node.*, Space.None);
+ const comma = tree.nextToken(param_node.*.lastToken());
+ try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline); // ,
+ try renderExtraNewline(tree, stream, start_col, next_node.*);
+ } else {
+ try renderExpression(allocator, stream, tree, param_node_new_indent, start_col, param_node.*, Space.Comma);
+ try stream.writeByteNTimes(' ', indent);
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
+ }
+ }
+ }
+
+ try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
+
+ var it = call_info.params.iterator(0);
+ while (it.next()) |param_node| {
+ try renderExpression(allocator, stream, tree, indent, start_col, param_node.*, Space.None);
+
+ if (it.peek() != null) {
+ const comma = tree.nextToken(param_node.*.lastToken());
+ try renderToken(tree, stream, comma, indent, start_col, Space.Space);
+ }
+ }
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
+ },
+
+ ast.Node.SuffixOp.Op.ArrayAccess => |index_expr| {
+ const lbracket = tree.prevToken(index_expr.firstToken());
+ const rbracket = tree.nextToken(index_expr.lastToken());
+
+ try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
+ try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [
+ try renderExpression(allocator, stream, tree, indent, start_col, index_expr, Space.None);
+ return renderToken(tree, stream, rbracket, indent, start_col, space); // ]
+ },
+
+ ast.Node.SuffixOp.Op.Deref, ast.Node.SuffixOp.Op.UnwrapOptional => {
+ try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
+ try renderToken(tree, stream, tree.prevToken(suffix_op.rtoken), indent, start_col, Space.None); // .
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // * or ?
+ },
+
+ @TagType(ast.Node.SuffixOp.Op).Slice => |range| {
+ try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
+
+ const lbracket = tree.prevToken(range.start.firstToken());
+ const dotdot = tree.nextToken(range.start.lastToken());
+
+ const after_start_space_bool = nodeCausesSliceOpSpace(range.start) or
+ (if (range.end) |end| nodeCausesSliceOpSpace(end) else false);
+ const after_start_space = if (after_start_space_bool) Space.Space else Space.None;
+ const after_op_space = if (range.end != null) after_start_space else Space.None;
+
+ try renderToken(tree, stream, lbracket, indent, start_col, Space.None); // [
+ try renderExpression(allocator, stream, tree, indent, start_col, range.start, after_start_space);
+ try renderToken(tree, stream, dotdot, indent, start_col, after_op_space); // ..
+ if (range.end) |end| {
+ try renderExpression(allocator, stream, tree, indent, start_col, end, Space.None);
+ }
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // ]
+ },
+
+ ast.Node.SuffixOp.Op.StructInitializer => |*field_inits| {
+ const lbrace = tree.nextToken(suffix_op.lhs.lastToken());
+
+ if (field_inits.len == 0) {
+ try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
+ try renderToken(tree, stream, lbrace, indent, start_col, Space.None);
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
+ }
+
+ if (field_inits.len == 1) blk: {
+ const field_init = field_inits.at(0).*.cast(ast.Node.FieldInitializer).?;
+
+ if (field_init.expr.cast(ast.Node.SuffixOp)) |nested_suffix_op| {
+ if (nested_suffix_op.op == ast.Node.SuffixOp.Op.StructInitializer) {
+ break :blk;
+ }
+ }
+
+ try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
+ try renderToken(tree, stream, lbrace, indent, start_col, Space.Space);
+ try renderExpression(allocator, stream, tree, indent, start_col, &field_init.base, Space.Space);
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
+ }
+
+ const src_has_trailing_comma = blk: {
+ const maybe_comma = tree.prevToken(suffix_op.rtoken);
+ break :blk tree.tokens.at(maybe_comma).id == Token.Id.Comma;
+ };
+
+ const src_same_line = blk: {
+ const loc = tree.tokenLocation(tree.tokens.at(lbrace).end, suffix_op.rtoken);
+ break :blk loc.line == 0;
+ };
+
+ if (!src_has_trailing_comma and src_same_line) {
+ // render all on one line, no trailing comma
+ try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
+ try renderToken(tree, stream, lbrace, indent, start_col, Space.Space);
+
+ var it = field_inits.iterator(0);
+ while (it.next()) |field_init| {
+ if (it.peek() != null) {
+ try renderExpression(allocator, stream, tree, indent, start_col, field_init.*, Space.None);
+
+ const comma = tree.nextToken(field_init.*.lastToken());
+ try renderToken(tree, stream, comma, indent, start_col, Space.Space);
+ } else {
+ try renderExpression(allocator, stream, tree, indent, start_col, field_init.*, Space.Space);
+ }
+ }
+
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
+ }
+
+ try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
+ try renderToken(tree, stream, lbrace, indent, start_col, Space.Newline);
+
+ const new_indent = indent + indent_delta;
+
+ var it = field_inits.iterator(0);
+ while (it.next()) |field_init| {
+ try stream.writeByteNTimes(' ', new_indent);
+
+ if (it.peek()) |next_field_init| {
+ try renderExpression(allocator, stream, tree, new_indent, start_col, field_init.*, Space.None);
+
+ const comma = tree.nextToken(field_init.*.lastToken());
+ try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline);
+
+ try renderExtraNewline(tree, stream, start_col, next_field_init.*);
+ } else {
+ try renderExpression(allocator, stream, tree, new_indent, start_col, field_init.*, Space.Comma);
+ }
+ }
+
+ try stream.writeByteNTimes(' ', indent);
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
+ },
+
+ ast.Node.SuffixOp.Op.ArrayInitializer => |*exprs| {
+ const lbrace = tree.nextToken(suffix_op.lhs.lastToken());
+
+ if (exprs.len == 0) {
+ try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
+ try renderToken(tree, stream, lbrace, indent, start_col, Space.None);
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
+ }
+ if (exprs.len == 1) {
+ const expr = exprs.at(0).*;
+
+ try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
+ try renderToken(tree, stream, lbrace, indent, start_col, Space.None);
+ try renderExpression(allocator, stream, tree, indent, start_col, expr, Space.None);
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
+ }
+
+ try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
+
+ // scan to find row size
+ const maybe_row_size: ?usize = blk: {
+ var count: usize = 1;
+ var it = exprs.iterator(0);
+ while (true) {
+ const expr = it.next().?.*;
+ if (it.peek()) |next_expr| {
+ const expr_last_token = expr.*.lastToken() + 1;
+ const loc = tree.tokenLocation(tree.tokens.at(expr_last_token).end, next_expr.*.firstToken());
+ if (loc.line != 0) break :blk count;
+ count += 1;
+ } else {
+ const expr_last_token = expr.*.lastToken();
+ const loc = tree.tokenLocation(tree.tokens.at(expr_last_token).end, suffix_op.rtoken);
+ if (loc.line == 0) {
+ // all on one line
+ const src_has_trailing_comma = trailblk: {
+ const maybe_comma = tree.prevToken(suffix_op.rtoken);
+ break :trailblk tree.tokens.at(maybe_comma).id == Token.Id.Comma;
+ };
+ if (src_has_trailing_comma) {
+ break :blk 1; // force row size 1
+ } else {
+ break :blk null; // no newlines
+ }
+ }
+ break :blk count;
+ }
+ }
+ };
+
+ if (maybe_row_size) |row_size| {
+ const new_indent = indent + indent_delta;
+ try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline);
+ try stream.writeByteNTimes(' ', new_indent);
+
+ var it = exprs.iterator(0);
+ var i: usize = 1;
+ while (it.next()) |expr| {
+ if (it.peek()) |next_expr| {
+ try renderExpression(allocator, stream, tree, new_indent, start_col, expr.*, Space.None);
+
+ const comma = tree.nextToken(expr.*.lastToken());
+
+ if (i != row_size) {
+ try renderToken(tree, stream, comma, new_indent, start_col, Space.Space); // ,
+ i += 1;
+ continue;
+ }
+ i = 1;
+
+ try renderToken(tree, stream, comma, new_indent, start_col, Space.Newline); // ,
+
+ try renderExtraNewline(tree, stream, start_col, next_expr.*);
+ try stream.writeByteNTimes(' ', new_indent);
+ } else {
+ try renderExpression(allocator, stream, tree, new_indent, start_col, expr.*, Space.Comma); // ,
+ }
+ }
+ try stream.writeByteNTimes(' ', indent);
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
+ } else {
+ try renderToken(tree, stream, lbrace, indent, start_col, Space.Space);
+ var it = exprs.iterator(0);
+ while (it.next()) |expr| {
+ if (it.peek()) |next_expr| {
+ try renderExpression(allocator, stream, tree, indent, start_col, expr.*, Space.None);
+ const comma = tree.nextToken(expr.*.lastToken());
+ try renderToken(tree, stream, comma, indent, start_col, Space.Space); // ,
+ } else {
+ try renderExpression(allocator, stream, tree, indent, start_col, expr.*, Space.Space);
+ }
+ }
+
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space);
+ }
+ },
+ }
+ },
+
+ ast.Node.Id.ControlFlowExpression => {
+ const flow_expr = @fieldParentPtr(ast.Node.ControlFlowExpression, "base", base);
+
+ switch (flow_expr.kind) {
+ ast.Node.ControlFlowExpression.Kind.Break => |maybe_label| {
+ if (maybe_label == null and flow_expr.rhs == null) {
+ return renderToken(tree, stream, flow_expr.ltoken, indent, start_col, space); // break
+ }
+
+ try renderToken(tree, stream, flow_expr.ltoken, indent, start_col, Space.Space); // break
+ if (maybe_label) |label| {
+ const colon = tree.nextToken(flow_expr.ltoken);
+ try renderToken(tree, stream, colon, indent, start_col, Space.None); // :
+
+ if (flow_expr.rhs == null) {
+ return renderExpression(allocator, stream, tree, indent, start_col, label, space); // label
+ }
+ try renderExpression(allocator, stream, tree, indent, start_col, label, Space.Space); // label
+ }
+ },
+ ast.Node.ControlFlowExpression.Kind.Continue => |maybe_label| {
+ assert(flow_expr.rhs == null);
+
+ if (maybe_label == null and flow_expr.rhs == null) {
+ return renderToken(tree, stream, flow_expr.ltoken, indent, start_col, space); // continue
+ }
+
+ try renderToken(tree, stream, flow_expr.ltoken, indent, start_col, Space.Space); // continue
+ if (maybe_label) |label| {
+ const colon = tree.nextToken(flow_expr.ltoken);
+ try renderToken(tree, stream, colon, indent, start_col, Space.None); // :
+
+ return renderExpression(allocator, stream, tree, indent, start_col, label, space);
+ }
+ },
+ ast.Node.ControlFlowExpression.Kind.Return => {
+ if (flow_expr.rhs == null) {
+ return renderToken(tree, stream, flow_expr.ltoken, indent, start_col, space);
+ }
+ try renderToken(tree, stream, flow_expr.ltoken, indent, start_col, Space.Space);
+ },
+ }
+
+ return renderExpression(allocator, stream, tree, indent, start_col, flow_expr.rhs.?, space);
+ },
+
+ ast.Node.Id.Payload => {
+ const payload = @fieldParentPtr(ast.Node.Payload, "base", base);
+
+ try renderToken(tree, stream, payload.lpipe, indent, start_col, Space.None);
+ try renderExpression(allocator, stream, tree, indent, start_col, payload.error_symbol, Space.None);
+ return renderToken(tree, stream, payload.rpipe, indent, start_col, space);
+ },
+
+ ast.Node.Id.PointerPayload => {
+ const payload = @fieldParentPtr(ast.Node.PointerPayload, "base", base);
+
+ try renderToken(tree, stream, payload.lpipe, indent, start_col, Space.None);
+ if (payload.ptr_token) |ptr_token| {
+ try renderToken(tree, stream, ptr_token, indent, start_col, Space.None);
+ }
+ try renderExpression(allocator, stream, tree, indent, start_col, payload.value_symbol, Space.None);
+ return renderToken(tree, stream, payload.rpipe, indent, start_col, space);
+ },
+
+ ast.Node.Id.PointerIndexPayload => {
+ const payload = @fieldParentPtr(ast.Node.PointerIndexPayload, "base", base);
+
+ try renderToken(tree, stream, payload.lpipe, indent, start_col, Space.None);
+ if (payload.ptr_token) |ptr_token| {
+ try renderToken(tree, stream, ptr_token, indent, start_col, Space.None);
+ }
+ try renderExpression(allocator, stream, tree, indent, start_col, payload.value_symbol, Space.None);
+
+ if (payload.index_symbol) |index_symbol| {
+ const comma = tree.nextToken(payload.value_symbol.lastToken());
+
+ try renderToken(tree, stream, comma, indent, start_col, Space.Space);
+ try renderExpression(allocator, stream, tree, indent, start_col, index_symbol, Space.None);
+ }
+
+ return renderToken(tree, stream, payload.rpipe, indent, start_col, space);
+ },
+
+ ast.Node.Id.GroupedExpression => {
+ const grouped_expr = @fieldParentPtr(ast.Node.GroupedExpression, "base", base);
+
+ try renderToken(tree, stream, grouped_expr.lparen, indent, start_col, Space.None);
+ try renderExpression(allocator, stream, tree, indent, start_col, grouped_expr.expr, Space.None);
+ return renderToken(tree, stream, grouped_expr.rparen, indent, start_col, space);
+ },
+
+ ast.Node.Id.FieldInitializer => {
+ const field_init = @fieldParentPtr(ast.Node.FieldInitializer, "base", base);
+
+ try renderToken(tree, stream, field_init.period_token, indent, start_col, Space.None); // .
+ try renderToken(tree, stream, field_init.name_token, indent, start_col, Space.Space); // name
+ try renderToken(tree, stream, tree.nextToken(field_init.name_token), indent, start_col, Space.Space); // =
+ return renderExpression(allocator, stream, tree, indent, start_col, field_init.expr, space);
+ },
+
+ ast.Node.Id.IntegerLiteral => {
+ const integer_literal = @fieldParentPtr(ast.Node.IntegerLiteral, "base", base);
+ return renderToken(tree, stream, integer_literal.token, indent, start_col, space);
+ },
+ ast.Node.Id.FloatLiteral => {
+ const float_literal = @fieldParentPtr(ast.Node.FloatLiteral, "base", base);
+ return renderToken(tree, stream, float_literal.token, indent, start_col, space);
+ },
+ ast.Node.Id.StringLiteral => {
+ const string_literal = @fieldParentPtr(ast.Node.StringLiteral, "base", base);
+ return renderToken(tree, stream, string_literal.token, indent, start_col, space);
+ },
+ ast.Node.Id.CharLiteral => {
+ const char_literal = @fieldParentPtr(ast.Node.CharLiteral, "base", base);
+ return renderToken(tree, stream, char_literal.token, indent, start_col, space);
+ },
+ ast.Node.Id.BoolLiteral => {
+ const bool_literal = @fieldParentPtr(ast.Node.CharLiteral, "base", base);
+ return renderToken(tree, stream, bool_literal.token, indent, start_col, space);
+ },
+ ast.Node.Id.NullLiteral => {
+ const null_literal = @fieldParentPtr(ast.Node.NullLiteral, "base", base);
+ return renderToken(tree, stream, null_literal.token, indent, start_col, space);
+ },
+ ast.Node.Id.ThisLiteral => {
+ const this_literal = @fieldParentPtr(ast.Node.ThisLiteral, "base", base);
+ return renderToken(tree, stream, this_literal.token, indent, start_col, space);
+ },
+ ast.Node.Id.Unreachable => {
+ const unreachable_node = @fieldParentPtr(ast.Node.Unreachable, "base", base);
+ return renderToken(tree, stream, unreachable_node.token, indent, start_col, space);
+ },
+ ast.Node.Id.ErrorType => {
+ const error_type = @fieldParentPtr(ast.Node.ErrorType, "base", base);
+ return renderToken(tree, stream, error_type.token, indent, start_col, space);
+ },
+ ast.Node.Id.VarType => {
+ const var_type = @fieldParentPtr(ast.Node.VarType, "base", base);
+ return renderToken(tree, stream, var_type.token, indent, start_col, space);
+ },
+ ast.Node.Id.ContainerDecl => {
+ const container_decl = @fieldParentPtr(ast.Node.ContainerDecl, "base", base);
+
+ if (container_decl.layout_token) |layout_token| {
+ try renderToken(tree, stream, layout_token, indent, start_col, Space.Space);
+ }
+
+ switch (container_decl.init_arg_expr) {
+ ast.Node.ContainerDecl.InitArg.None => {
+ try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.Space); // union
+ },
+ ast.Node.ContainerDecl.InitArg.Enum => |enum_tag_type| {
+ try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.None); // union
+
+ const lparen = tree.nextToken(container_decl.kind_token);
+ const enum_token = tree.nextToken(lparen);
+
+ try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
+ try renderToken(tree, stream, enum_token, indent, start_col, Space.None); // enum
+
+ if (enum_tag_type) |expr| {
+ try renderToken(tree, stream, tree.nextToken(enum_token), indent, start_col, Space.None); // (
+ try renderExpression(allocator, stream, tree, indent, start_col, expr, Space.None);
+
+ const rparen = tree.nextToken(expr.lastToken());
+ try renderToken(tree, stream, rparen, indent, start_col, Space.None); // )
+ try renderToken(tree, stream, tree.nextToken(rparen), indent, start_col, Space.Space); // )
+ } else {
+ try renderToken(tree, stream, tree.nextToken(enum_token), indent, start_col, Space.Space); // )
+ }
+ },
+ ast.Node.ContainerDecl.InitArg.Type => |type_expr| {
+ try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.None); // union
+
+ const lparen = tree.nextToken(container_decl.kind_token);
+ const rparen = tree.nextToken(type_expr.lastToken());
+
+ try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
+ try renderExpression(allocator, stream, tree, indent, start_col, type_expr, Space.None);
+ try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // )
+ },
+ }
+
+ if (container_decl.fields_and_decls.len == 0) {
+ try renderToken(tree, stream, container_decl.lbrace_token, indent + indent_delta, start_col, Space.None); // {
+ return renderToken(tree, stream, container_decl.rbrace_token, indent, start_col, space); // }
+ } else {
+ const new_indent = indent + indent_delta;
+ try renderToken(tree, stream, container_decl.lbrace_token, new_indent, start_col, Space.Newline); // {
+
+ var it = container_decl.fields_and_decls.iterator(0);
+ while (it.next()) |decl| {
+ try stream.writeByteNTimes(' ', new_indent);
+ try renderTopLevelDecl(allocator, stream, tree, new_indent, start_col, decl.*);
+
+ if (it.peek()) |next_decl| {
+ try renderExtraNewline(tree, stream, start_col, next_decl.*);
+ }
+ }
+
+ try stream.writeByteNTimes(' ', indent);
+ return renderToken(tree, stream, container_decl.rbrace_token, indent, start_col, space); // }
+ }
+ },
+
+ ast.Node.Id.ErrorSetDecl => {
+ const err_set_decl = @fieldParentPtr(ast.Node.ErrorSetDecl, "base", base);
+
+ const lbrace = tree.nextToken(err_set_decl.error_token);
+
+ if (err_set_decl.decls.len == 0) {
+ try renderToken(tree, stream, err_set_decl.error_token, indent, start_col, Space.None);
+ try renderToken(tree, stream, lbrace, indent, start_col, Space.None);
+ return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space);
+ }
+
+ if (err_set_decl.decls.len == 1) blk: {
+ const node = err_set_decl.decls.at(0).*;
+
+ // if there are any doc comments or same line comments
+ // don't try to put it all on one line
+ if (node.cast(ast.Node.ErrorTag)) |tag| {
+ if (tag.doc_comments != null) break :blk;
+ } else {
+ break :blk;
+ }
+
+ try renderToken(tree, stream, err_set_decl.error_token, indent, start_col, Space.None); // error
+ try renderToken(tree, stream, lbrace, indent, start_col, Space.None); // {
+ try renderExpression(allocator, stream, tree, indent, start_col, node, Space.None);
+ return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space); // }
+ }
+
+ try renderToken(tree, stream, err_set_decl.error_token, indent, start_col, Space.None); // error
+ try renderToken(tree, stream, lbrace, indent, start_col, Space.Newline); // {
+ const new_indent = indent + indent_delta;
+
+ var it = err_set_decl.decls.iterator(0);
+ while (it.next()) |node| {
+ try stream.writeByteNTimes(' ', new_indent);
+
+ if (it.peek()) |next_node| {
+ try renderExpression(allocator, stream, tree, new_indent, start_col, node.*, Space.None);
+ try renderToken(tree, stream, tree.nextToken(node.*.lastToken()), new_indent, start_col, Space.Newline); // ,
+
+ try renderExtraNewline(tree, stream, start_col, next_node.*);
+ } else {
+ try renderExpression(allocator, stream, tree, new_indent, start_col, node.*, Space.Comma);
+ }
+ }
+
+ try stream.writeByteNTimes(' ', indent);
+ return renderToken(tree, stream, err_set_decl.rbrace_token, indent, start_col, space); // }
+ },
+
+ ast.Node.Id.ErrorTag => {
+ const tag = @fieldParentPtr(ast.Node.ErrorTag, "base", base);
+
+ try renderDocComments(tree, stream, tag, indent, start_col);
+ return renderToken(tree, stream, tag.name_token, indent, start_col, space); // name
+ },
+
+ ast.Node.Id.MultilineStringLiteral => {
+ const multiline_str_literal = @fieldParentPtr(ast.Node.MultilineStringLiteral, "base", base);
+
+ var skip_first_indent = true;
+ if (tree.tokens.at(multiline_str_literal.firstToken() - 1).id != Token.Id.LineComment) {
+ try stream.print("\n");
+ skip_first_indent = false;
+ }
+
+ var i: usize = 0;
+ while (i < multiline_str_literal.lines.len) : (i += 1) {
+ const t = multiline_str_literal.lines.at(i).*;
+ if (!skip_first_indent) {
+ try stream.writeByteNTimes(' ', indent + indent_delta);
+ }
+ try renderToken(tree, stream, t, indent, start_col, Space.None);
+ skip_first_indent = false;
+ }
+ try stream.writeByteNTimes(' ', indent);
+ },
+ ast.Node.Id.UndefinedLiteral => {
+ const undefined_literal = @fieldParentPtr(ast.Node.UndefinedLiteral, "base", base);
+ return renderToken(tree, stream, undefined_literal.token, indent, start_col, space);
+ },
+
+ ast.Node.Id.BuiltinCall => {
+ const builtin_call = @fieldParentPtr(ast.Node.BuiltinCall, "base", base);
+
+ try renderToken(tree, stream, builtin_call.builtin_token, indent, start_col, Space.None); // @name
+ try renderToken(tree, stream, tree.nextToken(builtin_call.builtin_token), indent, start_col, Space.None); // (
+
+ var it = builtin_call.params.iterator(0);
+ while (it.next()) |param_node| {
+ try renderExpression(allocator, stream, tree, indent, start_col, param_node.*, Space.None);
+
+ if (it.peek() != null) {
+ const comma_token = tree.nextToken(param_node.*.lastToken());
+ try renderToken(tree, stream, comma_token, indent, start_col, Space.Space); // ,
+ }
+ }
+ return renderToken(tree, stream, builtin_call.rparen_token, indent, start_col, space); // )
+ },
+
+ ast.Node.Id.FnProto => {
+ const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", base);
+
+ if (fn_proto.visib_token) |visib_token_index| {
+ const visib_token = tree.tokens.at(visib_token_index);
+ assert(visib_token.id == Token.Id.Keyword_pub or visib_token.id == Token.Id.Keyword_export);
+
+ try renderToken(tree, stream, visib_token_index, indent, start_col, Space.Space); // pub
+ }
+
+ if (fn_proto.extern_export_inline_token) |extern_export_inline_token| {
+ try renderToken(tree, stream, extern_export_inline_token, indent, start_col, Space.Space); // extern/export
+ }
+
+ if (fn_proto.lib_name) |lib_name| {
+ try renderExpression(allocator, stream, tree, indent, start_col, lib_name, Space.Space);
+ }
+
+ if (fn_proto.cc_token) |cc_token| {
+ try renderToken(tree, stream, cc_token, indent, start_col, Space.Space); // stdcallcc
+ }
+
+ if (fn_proto.async_attr) |async_attr| {
+ try renderExpression(allocator, stream, tree, indent, start_col, &async_attr.base, Space.Space);
+ }
+
+ const lparen = if (fn_proto.name_token) |name_token| blk: {
+ try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn
+ try renderToken(tree, stream, name_token, indent, start_col, Space.None); // name
+ break :blk tree.nextToken(name_token);
+ } else blk: {
+ try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn
+ break :blk tree.nextToken(fn_proto.fn_token);
+ };
+
+ const rparen = tree.prevToken(switch (fn_proto.return_type) {
+ ast.Node.FnProto.ReturnType.Explicit => |node| node.firstToken(),
+ ast.Node.FnProto.ReturnType.InferErrorSet => |node| tree.prevToken(node.firstToken()),
+ });
+
+ const src_params_trailing_comma = blk: {
+ const maybe_comma = tree.prevToken(rparen);
+ break :blk tree.tokens.at(maybe_comma).id == Token.Id.Comma;
+ };
+ const src_params_same_line = blk: {
+ const loc = tree.tokenLocation(tree.tokens.at(lparen).end, rparen);
+ break :blk loc.line == 0;
+ };
+
+ if (!src_params_trailing_comma and src_params_same_line) {
+ try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
+
+ // render all on one line, no trailing comma
+ var it = fn_proto.params.iterator(0);
+ while (it.next()) |param_decl_node| {
+ try renderParamDecl(allocator, stream, tree, indent, start_col, param_decl_node.*, Space.None);
+
+ if (it.peek() != null) {
+ const comma = tree.nextToken(param_decl_node.*.lastToken());
+ try renderToken(tree, stream, comma, indent, start_col, Space.Space); // ,
+ }
+ }
+ } else {
+ // one param per line
+ const new_indent = indent + indent_delta;
+ try renderToken(tree, stream, lparen, new_indent, start_col, Space.Newline); // (
+
+ var it = fn_proto.params.iterator(0);
+ while (it.next()) |param_decl_node| {
+ try stream.writeByteNTimes(' ', new_indent);
+ try renderParamDecl(allocator, stream, tree, indent, start_col, param_decl_node.*, Space.Comma);
+ }
+ try stream.writeByteNTimes(' ', indent);
+ }
+
+ try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // )
+
+ if (fn_proto.align_expr) |align_expr| {
+ const align_rparen = tree.nextToken(align_expr.lastToken());
+ const align_lparen = tree.prevToken(align_expr.firstToken());
+ const align_kw = tree.prevToken(align_lparen);
+
+ try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align
+ try renderToken(tree, stream, align_lparen, indent, start_col, Space.None); // (
+ try renderExpression(allocator, stream, tree, indent, start_col, align_expr, Space.None);
+ try renderToken(tree, stream, align_rparen, indent, start_col, Space.Space); // )
+ }
+
+ switch (fn_proto.return_type) {
+ ast.Node.FnProto.ReturnType.Explicit => |node| {
+ return renderExpression(allocator, stream, tree, indent, start_col, node, space);
+ },
+ ast.Node.FnProto.ReturnType.InferErrorSet => |node| {
+ try renderToken(tree, stream, tree.prevToken(node.firstToken()), indent, start_col, Space.None); // !
+ return renderExpression(allocator, stream, tree, indent, start_col, node, space);
+ },
+ }
+ },
+
+ ast.Node.Id.PromiseType => {
+ const promise_type = @fieldParentPtr(ast.Node.PromiseType, "base", base);
+
+ if (promise_type.result) |result| {
+ try renderToken(tree, stream, promise_type.promise_token, indent, start_col, Space.None); // promise
+ try renderToken(tree, stream, result.arrow_token, indent, start_col, Space.None); // ->
+ return renderExpression(allocator, stream, tree, indent, start_col, result.return_type, space);
+ } else {
+ return renderToken(tree, stream, promise_type.promise_token, indent, start_col, space); // promise
+ }
+ },
+
+ ast.Node.Id.DocComment => unreachable, // doc comments are attached to nodes
+
+ ast.Node.Id.Switch => {
+ const switch_node = @fieldParentPtr(ast.Node.Switch, "base", base);
+
+ try renderToken(tree, stream, switch_node.switch_token, indent, start_col, Space.Space); // switch
+ try renderToken(tree, stream, tree.nextToken(switch_node.switch_token), indent, start_col, Space.None); // (
+
+ const rparen = tree.nextToken(switch_node.expr.lastToken());
+ const lbrace = tree.nextToken(rparen);
+
+ if (switch_node.cases.len == 0) {
+ try renderExpression(allocator, stream, tree, indent, start_col, switch_node.expr, Space.None);
+ try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // )
+ try renderToken(tree, stream, lbrace, indent, start_col, Space.None); // {
+ return renderToken(tree, stream, switch_node.rbrace, indent, start_col, space); // }
+ }
+
+ try renderExpression(allocator, stream, tree, indent, start_col, switch_node.expr, Space.None);
+
+ const new_indent = indent + indent_delta;
+
+ try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // )
+ try renderToken(tree, stream, lbrace, new_indent, start_col, Space.Newline); // {
+
+ var it = switch_node.cases.iterator(0);
+ while (it.next()) |node| {
+ try stream.writeByteNTimes(' ', new_indent);
+ try renderExpression(allocator, stream, tree, new_indent, start_col, node.*, Space.Comma);
+
+ if (it.peek()) |next_node| {
+ try renderExtraNewline(tree, stream, start_col, next_node.*);
+ }
+ }
+
+ try stream.writeByteNTimes(' ', indent);
+ return renderToken(tree, stream, switch_node.rbrace, indent, start_col, space); // }
+ },
+
+ ast.Node.Id.SwitchCase => {
+ const switch_case = @fieldParentPtr(ast.Node.SwitchCase, "base", base);
+
+ assert(switch_case.items.len != 0);
+ const src_has_trailing_comma = blk: {
+ const last_node = switch_case.items.at(switch_case.items.len - 1).*;
+ const maybe_comma = tree.nextToken(last_node.lastToken());
+ break :blk tree.tokens.at(maybe_comma).id == Token.Id.Comma;
+ };
+
+ if (switch_case.items.len == 1 or !src_has_trailing_comma) {
+ var it = switch_case.items.iterator(0);
+ while (it.next()) |node| {
+ if (it.peek()) |next_node| {
+ try renderExpression(allocator, stream, tree, indent, start_col, node.*, Space.None);
+
+ const comma_token = tree.nextToken(node.*.lastToken());
+ try renderToken(tree, stream, comma_token, indent, start_col, Space.Space); // ,
+ try renderExtraNewline(tree, stream, start_col, next_node.*);
+ } else {
+ try renderExpression(allocator, stream, tree, indent, start_col, node.*, Space.Space);
+ }
+ }
+ } else {
+ var it = switch_case.items.iterator(0);
+ while (true) {
+ const node = it.next().?;
+ if (it.peek()) |next_node| {
+ try renderExpression(allocator, stream, tree, indent, start_col, node.*, Space.None);
+
+ const comma_token = tree.nextToken(node.*.lastToken());
+ try renderToken(tree, stream, comma_token, indent, start_col, Space.Newline); // ,
+ try renderExtraNewline(tree, stream, start_col, next_node.*);
+ try stream.writeByteNTimes(' ', indent);
+ } else {
+ try renderExpression(allocator, stream, tree, indent, start_col, node.*, Space.Comma);
+ try stream.writeByteNTimes(' ', indent);
+ break;
+ }
+ }
+ }
+
+ try renderToken(tree, stream, switch_case.arrow_token, indent, start_col, Space.Space); // =>
+
+ if (switch_case.payload) |payload| {
+ try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space);
+ }
+
+ return renderExpression(allocator, stream, tree, indent, start_col, switch_case.expr, space);
+ },
+ ast.Node.Id.SwitchElse => {
+ const switch_else = @fieldParentPtr(ast.Node.SwitchElse, "base", base);
+ return renderToken(tree, stream, switch_else.token, indent, start_col, space);
+ },
+ ast.Node.Id.Else => {
+ const else_node = @fieldParentPtr(ast.Node.Else, "base", base);
+
+ const body_is_block = nodeIsBlock(else_node.body);
+ const same_line = body_is_block or tree.tokensOnSameLine(else_node.else_token, else_node.body.lastToken());
+
+ const after_else_space = if (same_line or else_node.payload != null) Space.Space else Space.Newline;
+ try renderToken(tree, stream, else_node.else_token, indent, start_col, after_else_space);
+
+ if (else_node.payload) |payload| {
+ const payload_space = if (same_line) Space.Space else Space.Newline;
+ try renderExpression(allocator, stream, tree, indent, start_col, payload, payload_space);
+ }
+
+ if (same_line) {
+ return renderExpression(allocator, stream, tree, indent, start_col, else_node.body, space);
+ }
+
+ try stream.writeByteNTimes(' ', indent + indent_delta);
+ start_col.* = indent + indent_delta;
+ return renderExpression(allocator, stream, tree, indent, start_col, else_node.body, space);
+ },
+
+ ast.Node.Id.While => {
+ const while_node = @fieldParentPtr(ast.Node.While, "base", base);
+
+ if (while_node.label) |label| {
+ try renderToken(tree, stream, label, indent, start_col, Space.None); // label
+ try renderToken(tree, stream, tree.nextToken(label), indent, start_col, Space.Space); // :
+ }
+
+ if (while_node.inline_token) |inline_token| {
+ try renderToken(tree, stream, inline_token, indent, start_col, Space.Space); // inline
+ }
+
+ try renderToken(tree, stream, while_node.while_token, indent, start_col, Space.Space); // while
+ try renderToken(tree, stream, tree.nextToken(while_node.while_token), indent, start_col, Space.None); // (
+ try renderExpression(allocator, stream, tree, indent, start_col, while_node.condition, Space.None);
+
+ const cond_rparen = tree.nextToken(while_node.condition.lastToken());
+
+ const body_is_block = nodeIsBlock(while_node.body);
+
+ var block_start_space: Space = undefined;
+ var after_body_space: Space = undefined;
+
+ if (body_is_block) {
+ block_start_space = Space.BlockStart;
+ after_body_space = if (while_node.@"else" == null) space else Space.SpaceOrOutdent;
+ } else if (tree.tokensOnSameLine(cond_rparen, while_node.body.lastToken())) {
+ block_start_space = Space.Space;
+ after_body_space = if (while_node.@"else" == null) space else Space.Space;
+ } else {
+ block_start_space = Space.Newline;
+ after_body_space = if (while_node.@"else" == null) space else Space.Newline;
+ }
+
+ {
+ const rparen_space = if (while_node.payload != null or while_node.continue_expr != null) Space.Space else block_start_space;
+ try renderToken(tree, stream, cond_rparen, indent, start_col, rparen_space); // )
+ }
+
+ if (while_node.payload) |payload| {
+ const payload_space = if (while_node.continue_expr != null) Space.Space else block_start_space;
+ try renderExpression(allocator, stream, tree, indent, start_col, payload, payload_space);
+ }
+
+ if (while_node.continue_expr) |continue_expr| {
+ const rparen = tree.nextToken(continue_expr.lastToken());
+ const lparen = tree.prevToken(continue_expr.firstToken());
+ const colon = tree.prevToken(lparen);
+
+ try renderToken(tree, stream, colon, indent, start_col, Space.Space); // :
+ try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
+
+ try renderExpression(allocator, stream, tree, indent, start_col, continue_expr, Space.None);
+
+ try renderToken(tree, stream, rparen, indent, start_col, block_start_space); // )
+ }
+
+ var new_indent = indent;
+ if (block_start_space == Space.Newline) {
+ new_indent += indent_delta;
+ try stream.writeByteNTimes(' ', new_indent);
+ start_col.* = new_indent;
+ }
+
+ try renderExpression(allocator, stream, tree, indent, start_col, while_node.body, after_body_space);
+
+ if (while_node.@"else") |@"else"| {
+ if (after_body_space == Space.Newline) {
+ try stream.writeByteNTimes(' ', indent);
+ start_col.* = indent;
+ }
+ return renderExpression(allocator, stream, tree, indent, start_col, &@"else".base, space);
+ }
+ },
+
+ ast.Node.Id.For => {
+ const for_node = @fieldParentPtr(ast.Node.For, "base", base);
+
+ if (for_node.label) |label| {
+ try renderToken(tree, stream, label, indent, start_col, Space.None); // label
+ try renderToken(tree, stream, tree.nextToken(label), indent, start_col, Space.Space); // :
+ }
+
+ if (for_node.inline_token) |inline_token| {
+ try renderToken(tree, stream, inline_token, indent, start_col, Space.Space); // inline
+ }
+
+ try renderToken(tree, stream, for_node.for_token, indent, start_col, Space.Space); // for
+ try renderToken(tree, stream, tree.nextToken(for_node.for_token), indent, start_col, Space.None); // (
+ try renderExpression(allocator, stream, tree, indent, start_col, for_node.array_expr, Space.None);
+
+ const rparen = tree.nextToken(for_node.array_expr.lastToken());
+ const rparen_space = if (for_node.payload != null or
+ for_node.body.id == ast.Node.Id.Block) Space.Space else Space.Newline;
+ try renderToken(tree, stream, rparen, indent, start_col, rparen_space); // )
+
+ if (for_node.payload) |payload| {
+ const payload_space = if (for_node.body.id == ast.Node.Id.Block) Space.Space else Space.Newline;
+ try renderExpression(allocator, stream, tree, indent, start_col, payload, payload_space);
+ }
+
+ const body_space = blk: {
+ if (for_node.@"else" != null) {
+ if (for_node.body.id == ast.Node.Id.Block) {
+ break :blk Space.Space;
+ } else {
+ break :blk Space.Newline;
+ }
+ } else {
+ break :blk space;
+ }
+ };
+ if (for_node.body.id == ast.Node.Id.Block) {
+ try renderExpression(allocator, stream, tree, indent, start_col, for_node.body, body_space);
+ } else {
+ try stream.writeByteNTimes(' ', indent + indent_delta);
+ try renderExpression(allocator, stream, tree, indent, start_col, for_node.body, body_space);
+ }
+
+ if (for_node.@"else") |@"else"| {
+ if (for_node.body.id != ast.Node.Id.Block) {
+ try stream.writeByteNTimes(' ', indent);
+ }
+
+ return renderExpression(allocator, stream, tree, indent, start_col, &@"else".base, space);
+ }
+ },
+
+ ast.Node.Id.If => {
+ const if_node = @fieldParentPtr(ast.Node.If, "base", base);
+
+ const lparen = tree.prevToken(if_node.condition.firstToken());
+ const rparen = tree.nextToken(if_node.condition.lastToken());
+
+ try renderToken(tree, stream, if_node.if_token, indent, start_col, Space.Space); // if
+ try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
+
+ try renderExpression(allocator, stream, tree, indent, start_col, if_node.condition, Space.None); // condition
+
+ const body_is_block = nodeIsBlock(if_node.body);
+
+ if (body_is_block) {
+ const after_rparen_space = if (if_node.payload == null) Space.BlockStart else Space.Space;
+ try renderToken(tree, stream, rparen, indent, start_col, after_rparen_space); // )
+
+ if (if_node.payload) |payload| {
+ try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.BlockStart); // |x|
+ }
+
+ if (if_node.@"else") |@"else"| {
+ try renderExpression(allocator, stream, tree, indent, start_col, if_node.body, Space.SpaceOrOutdent);
+ return renderExpression(allocator, stream, tree, indent, start_col, &@"else".base, space);
+ } else {
+ return renderExpression(allocator, stream, tree, indent, start_col, if_node.body, space);
+ }
+ }
+
+ const src_has_newline = !tree.tokensOnSameLine(rparen, if_node.body.lastToken());
+
+ if (src_has_newline) {
+ const after_rparen_space = if (if_node.payload == null) Space.Newline else Space.Space;
+ try renderToken(tree, stream, rparen, indent, start_col, after_rparen_space); // )
+
+ if (if_node.payload) |payload| {
+ try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Newline);
+ }
+
+ const new_indent = indent + indent_delta;
+ try stream.writeByteNTimes(' ', new_indent);
+
+ if (if_node.@"else") |@"else"| {
+ const else_is_block = nodeIsBlock(@"else".body);
+ try renderExpression(allocator, stream, tree, new_indent, start_col, if_node.body, Space.Newline);
+ try stream.writeByteNTimes(' ', indent);
+
+ if (else_is_block) {
+ try renderToken(tree, stream, @"else".else_token, indent, start_col, Space.Space); // else
+
+ if (@"else".payload) |payload| {
+ try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space);
+ }
+
+ return renderExpression(allocator, stream, tree, indent, start_col, @"else".body, space);
+ } else {
+ const after_else_space = if (@"else".payload == null) Space.Newline else Space.Space;
+ try renderToken(tree, stream, @"else".else_token, indent, start_col, after_else_space); // else
+
+ if (@"else".payload) |payload| {
+ try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Newline);
+ }
+ try stream.writeByteNTimes(' ', new_indent);
+
+ return renderExpression(allocator, stream, tree, new_indent, start_col, @"else".body, space);
+ }
+ } else {
+ return renderExpression(allocator, stream, tree, new_indent, start_col, if_node.body, space);
+ }
+ }
+
+ try renderToken(tree, stream, rparen, indent, start_col, Space.Space); // )
+
+ if (if_node.payload) |payload| {
+ try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space);
+ }
+
+ if (if_node.@"else") |@"else"| {
+ try renderExpression(allocator, stream, tree, indent, start_col, if_node.body, Space.Space);
+ try renderToken(tree, stream, @"else".else_token, indent, start_col, Space.Space);
+
+ if (@"else".payload) |payload| {
+ try renderExpression(allocator, stream, tree, indent, start_col, payload, Space.Space);
+ }
+
+ return renderExpression(allocator, stream, tree, indent, start_col, @"else".body, space);
+ } else {
+ return renderExpression(allocator, stream, tree, indent, start_col, if_node.body, space);
+ }
+ },
+
+ ast.Node.Id.Asm => {
+ const asm_node = @fieldParentPtr(ast.Node.Asm, "base", base);
+
+ try renderToken(tree, stream, asm_node.asm_token, indent, start_col, Space.Space); // asm
+
+ if (asm_node.volatile_token) |volatile_token| {
+ try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space); // volatile
+ try renderToken(tree, stream, tree.nextToken(volatile_token), indent, start_col, Space.None); // (
+ } else {
+ try renderToken(tree, stream, tree.nextToken(asm_node.asm_token), indent, start_col, Space.None); // (
+ }
+
+ if (asm_node.outputs.len == 0 and asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) {
+ try renderExpression(allocator, stream, tree, indent, start_col, asm_node.template, Space.None);
+ return renderToken(tree, stream, asm_node.rparen, indent, start_col, space);
+ }
+
+ try renderExpression(allocator, stream, tree, indent, start_col, asm_node.template, Space.Newline);
+
+ const indent_once = indent + indent_delta;
+ try stream.writeByteNTimes(' ', indent_once);
+
+ const colon1 = tree.nextToken(asm_node.template.lastToken());
+ const indent_extra = indent_once + 2;
+
+ const colon2 = if (asm_node.outputs.len == 0) blk: {
+ try renderToken(tree, stream, colon1, indent, start_col, Space.Newline); // :
+ try stream.writeByteNTimes(' ', indent_once);
+
+ break :blk tree.nextToken(colon1);
+ } else blk: {
+ try renderToken(tree, stream, colon1, indent, start_col, Space.Space); // :
+
+ var it = asm_node.outputs.iterator(0);
+ while (true) {
+ const asm_output = it.next().?;
+ const node = &(asm_output.*).base;
+
+ if (it.peek()) |next_asm_output| {
+ try renderExpression(allocator, stream, tree, indent_extra, start_col, node, Space.None);
+ const next_node = &(next_asm_output.*).base;
+
+ const comma = tree.prevToken(next_asm_output.*.firstToken());
+ try renderToken(tree, stream, comma, indent_extra, start_col, Space.Newline); // ,
+ try renderExtraNewline(tree, stream, start_col, next_node);
+
+ try stream.writeByteNTimes(' ', indent_extra);
+ } else if (asm_node.inputs.len == 0 and asm_node.clobbers.len == 0) {
+ try renderExpression(allocator, stream, tree, indent_extra, start_col, node, Space.Newline);
+ try stream.writeByteNTimes(' ', indent);
+ return renderToken(tree, stream, asm_node.rparen, indent, start_col, space);
+ } else {
+ try renderExpression(allocator, stream, tree, indent_extra, start_col, node, Space.Newline);
+ try stream.writeByteNTimes(' ', indent_once);
+ const comma_or_colon = tree.nextToken(node.lastToken());
+ break :blk switch (tree.tokens.at(comma_or_colon).id) {
+ Token.Id.Comma => tree.nextToken(comma_or_colon),
+ else => comma_or_colon,
+ };
+ }
+ }
+ };
+
+ const colon3 = if (asm_node.inputs.len == 0) blk: {
+ try renderToken(tree, stream, colon2, indent, start_col, Space.Newline); // :
+ try stream.writeByteNTimes(' ', indent_once);
+
+ break :blk tree.nextToken(colon2);
+ } else blk: {
+ try renderToken(tree, stream, colon2, indent, start_col, Space.Space); // :
+
+ var it = asm_node.inputs.iterator(0);
+ while (true) {
+ const asm_input = it.next().?;
+ const node = &(asm_input.*).base;
+
+ if (it.peek()) |next_asm_input| {
+ try renderExpression(allocator, stream, tree, indent_extra, start_col, node, Space.None);
+ const next_node = &(next_asm_input.*).base;
+
+ const comma = tree.prevToken(next_asm_input.*.firstToken());
+ try renderToken(tree, stream, comma, indent_extra, start_col, Space.Newline); // ,
+ try renderExtraNewline(tree, stream, start_col, next_node);
+
+ try stream.writeByteNTimes(' ', indent_extra);
+ } else if (asm_node.clobbers.len == 0) {
+ try renderExpression(allocator, stream, tree, indent_extra, start_col, node, Space.Newline);
+ try stream.writeByteNTimes(' ', indent);
+ return renderToken(tree, stream, asm_node.rparen, indent, start_col, space); // )
+ } else {
+ try renderExpression(allocator, stream, tree, indent_extra, start_col, node, Space.Newline);
+ try stream.writeByteNTimes(' ', indent_once);
+ const comma_or_colon = tree.nextToken(node.lastToken());
+ break :blk switch (tree.tokens.at(comma_or_colon).id) {
+ Token.Id.Comma => tree.nextToken(comma_or_colon),
+ else => comma_or_colon,
+ };
+ }
+ }
+ };
+
+ try renderToken(tree, stream, colon3, indent, start_col, Space.Space); // :
+
+ var it = asm_node.clobbers.iterator(0);
+ while (true) {
+ const clobber_token = it.next().?;
+
+ if (it.peek() == null) {
+ try renderToken(tree, stream, clobber_token.*, indent_once, start_col, Space.Newline);
+ try stream.writeByteNTimes(' ', indent);
+ return renderToken(tree, stream, asm_node.rparen, indent, start_col, space);
+ } else {
+ try renderToken(tree, stream, clobber_token.*, indent_once, start_col, Space.None);
+ const comma = tree.nextToken(clobber_token.*);
+ try renderToken(tree, stream, comma, indent_once, start_col, Space.Space); // ,
+ }
+ }
+ },
+
+ ast.Node.Id.AsmInput => {
+ const asm_input = @fieldParentPtr(ast.Node.AsmInput, "base", base);
+
+ try stream.write("[");
+ try renderExpression(allocator, stream, tree, indent, start_col, asm_input.symbolic_name, Space.None);
+ try stream.write("] ");
+ try renderExpression(allocator, stream, tree, indent, start_col, asm_input.constraint, Space.None);
+ try stream.write(" (");
+ try renderExpression(allocator, stream, tree, indent, start_col, asm_input.expr, Space.None);
+ return renderToken(tree, stream, asm_input.lastToken(), indent, start_col, space); // )
+ },
+
+ ast.Node.Id.AsmOutput => {
+ const asm_output = @fieldParentPtr(ast.Node.AsmOutput, "base", base);
+
+ try stream.write("[");
+ try renderExpression(allocator, stream, tree, indent, start_col, asm_output.symbolic_name, Space.None);
+ try stream.write("] ");
+ try renderExpression(allocator, stream, tree, indent, start_col, asm_output.constraint, Space.None);
+ try stream.write(" (");
+
+ switch (asm_output.kind) {
+ ast.Node.AsmOutput.Kind.Variable => |variable_name| {
+ try renderExpression(allocator, stream, tree, indent, start_col, &variable_name.base, Space.None);
+ },
+ ast.Node.AsmOutput.Kind.Return => |return_type| {
+ try stream.write("-> ");
+ try renderExpression(allocator, stream, tree, indent, start_col, return_type, Space.None);
+ },
+ }
+
+ return renderToken(tree, stream, asm_output.lastToken(), indent, start_col, space); // )
+ },
+
+ ast.Node.Id.StructField,
+ ast.Node.Id.UnionTag,
+ ast.Node.Id.EnumTag,
+ ast.Node.Id.Root,
+ ast.Node.Id.VarDecl,
+ ast.Node.Id.Use,
+ ast.Node.Id.TestDecl,
+ ast.Node.Id.ParamDecl,
+ => unreachable,
+ }
+}
+
+fn renderVarDecl(
+ allocator: *mem.Allocator,
+ stream: var,
+ tree: *ast.Tree,
+ indent: usize,
+ start_col: *usize,
+ var_decl: *ast.Node.VarDecl,
+) (@typeOf(stream).Child.Error || Error)!void {
+ if (var_decl.visib_token) |visib_token| {
+ try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub
+ }
+
+ if (var_decl.extern_export_token) |extern_export_token| {
+ try renderToken(tree, stream, extern_export_token, indent, start_col, Space.Space); // extern
+
+ if (var_decl.lib_name) |lib_name| {
+ try renderExpression(allocator, stream, tree, indent, start_col, lib_name, Space.Space); // "lib"
+ }
+ }
+
+ if (var_decl.comptime_token) |comptime_token| {
+ try renderToken(tree, stream, comptime_token, indent, start_col, Space.Space); // comptime
+ }
+
+ try renderToken(tree, stream, var_decl.mut_token, indent, start_col, Space.Space); // var
+
+ const name_space = if (var_decl.type_node == null and (var_decl.align_node != null or
+ var_decl.init_node != null)) Space.Space else Space.None;
+ try renderToken(tree, stream, var_decl.name_token, indent, start_col, name_space);
+
+ if (var_decl.type_node) |type_node| {
+ try renderToken(tree, stream, tree.nextToken(var_decl.name_token), indent, start_col, Space.Space);
+ const s = if (var_decl.align_node != null or var_decl.init_node != null) Space.Space else Space.None;
+ try renderExpression(allocator, stream, tree, indent, start_col, type_node, s);
+ }
+
+ if (var_decl.align_node) |align_node| {
+ const lparen = tree.prevToken(align_node.firstToken());
+ const align_kw = tree.prevToken(lparen);
+ const rparen = tree.nextToken(align_node.lastToken());
+ try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align
+ try renderToken(tree, stream, lparen, indent, start_col, Space.None); // (
+ try renderExpression(allocator, stream, tree, indent, start_col, align_node, Space.None);
+ const s = if (var_decl.init_node != null) Space.Space else Space.None;
+ try renderToken(tree, stream, rparen, indent, start_col, s); // )
+ }
+
+ if (var_decl.init_node) |init_node| {
+ const s = if (init_node.id == ast.Node.Id.MultilineStringLiteral) Space.None else Space.Space;
+ try renderToken(tree, stream, var_decl.eq_token, indent, start_col, s); // =
+ try renderExpression(allocator, stream, tree, indent, start_col, init_node, Space.None);
+ }
+
+ try renderToken(tree, stream, var_decl.semicolon_token, indent, start_col, Space.Newline);
+}
+
+fn renderParamDecl(
+ allocator: *mem.Allocator,
+ stream: var,
+ tree: *ast.Tree,
+ indent: usize,
+ start_col: *usize,
+ base: *ast.Node,
+ space: Space,
+) (@typeOf(stream).Child.Error || Error)!void {
+ const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", base);
+
+ if (param_decl.comptime_token) |comptime_token| {
+ try renderToken(tree, stream, comptime_token, indent, start_col, Space.Space);
+ }
+ if (param_decl.noalias_token) |noalias_token| {
+ try renderToken(tree, stream, noalias_token, indent, start_col, Space.Space);
+ }
+ if (param_decl.name_token) |name_token| {
+ try renderToken(tree, stream, name_token, indent, start_col, Space.None);
+ try renderToken(tree, stream, tree.nextToken(name_token), indent, start_col, Space.Space); // :
+ }
+ if (param_decl.var_args_token) |var_args_token| {
+ try renderToken(tree, stream, var_args_token, indent, start_col, space);
+ } else {
+ try renderExpression(allocator, stream, tree, indent, start_col, param_decl.type_node, space);
+ }
+}
+
+fn renderStatement(
+ allocator: *mem.Allocator,
+ stream: var,
+ tree: *ast.Tree,
+ indent: usize,
+ start_col: *usize,
+ base: *ast.Node,
+) (@typeOf(stream).Child.Error || Error)!void {
+ switch (base.id) {
+ ast.Node.Id.VarDecl => {
+ const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", base);
+ try renderVarDecl(allocator, stream, tree, indent, start_col, var_decl);
+ },
+ else => {
+ if (base.requireSemiColon()) {
+ try renderExpression(allocator, stream, tree, indent, start_col, base, Space.None);
+
+ const semicolon_index = tree.nextToken(base.lastToken());
+ assert(tree.tokens.at(semicolon_index).id == Token.Id.Semicolon);
+ try renderToken(tree, stream, semicolon_index, indent, start_col, Space.Newline);
+ } else {
+ try renderExpression(allocator, stream, tree, indent, start_col, base, Space.Newline);
+ }
+ },
+ }
+}
+
+const Space = enum {
+ None,
+ Newline,
+ Comma,
+ Space,
+ SpaceOrOutdent,
+ NoNewline,
+ NoComment,
+ BlockStart,
+};
+
+fn renderTokenOffset(
+ tree: *ast.Tree,
+ stream: var,
+ token_index: ast.TokenIndex,
+ indent: usize,
+ start_col: *usize,
+ space: Space,
+ token_skip_bytes: usize,
+) (@typeOf(stream).Child.Error || Error)!void {
+ if (space == Space.BlockStart) {
+ if (start_col.* < indent + indent_delta)
+ return renderToken(tree, stream, token_index, indent, start_col, Space.Space);
+ try renderToken(tree, stream, token_index, indent, start_col, Space.Newline);
+ try stream.writeByteNTimes(' ', indent);
+ start_col.* = indent;
+ return;
+ }
+
+ var token = tree.tokens.at(token_index);
+ try stream.write(mem.trimRight(u8, tree.tokenSlicePtr(token)[token_skip_bytes..], " "));
+
+ if (space == Space.NoComment)
+ return;
+
+ var next_token = tree.tokens.at(token_index + 1);
+
+ if (space == Space.Comma) switch (next_token.id) {
+ Token.Id.Comma => return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline),
+ Token.Id.LineComment => {
+ try stream.write(", ");
+ return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline);
+ },
+ else => {
+ if (tree.tokens.at(token_index + 2).id == Token.Id.MultilineStringLiteralLine) {
+ try stream.write(",");
+ return;
+ } else {
+ try stream.write(",\n");
+ start_col.* = 0;
+ return;
+ }
+ },
+ };
+
+ // Skip over same line doc comments
+ var offset: usize = 1;
+ if (next_token.id == Token.Id.DocComment) {
+ const loc = tree.tokenLocationPtr(token.end, next_token);
+ if (loc.line == 0) {
+ offset += 1;
+ next_token = tree.tokens.at(token_index + offset);
+ }
+ }
+
+ if (next_token.id != Token.Id.LineComment) blk: {
+ switch (space) {
+ Space.None, Space.NoNewline => return,
+ Space.Newline => {
+ if (next_token.id == Token.Id.MultilineStringLiteralLine) {
+ return;
+ } else {
+ try stream.write("\n");
+ start_col.* = 0;
+ return;
+ }
+ },
+ Space.Space, Space.SpaceOrOutdent => {
+ if (next_token.id == Token.Id.MultilineStringLiteralLine)
+ return;
+ try stream.writeByte(' ');
+ return;
+ },
+ Space.NoComment, Space.Comma, Space.BlockStart => unreachable,
+ }
+ }
+
+ const comment_is_empty = mem.trimRight(u8, tree.tokenSlicePtr(next_token), " ").len == 2;
+ if (comment_is_empty) {
+ switch (space) {
+ Space.Newline => {
+ try stream.writeByte('\n');
+ start_col.* = 0;
+ return;
+ },
+ else => {},
+ }
+ }
+
+ var loc = tree.tokenLocationPtr(token.end, next_token);
+ if (loc.line == 0) {
+ try stream.print(" {}", mem.trimRight(u8, tree.tokenSlicePtr(next_token), " "));
+ offset = 2;
+ token = next_token;
+ next_token = tree.tokens.at(token_index + offset);
+ if (next_token.id != Token.Id.LineComment) {
+ switch (space) {
+ Space.None, Space.Space => {
+ try stream.writeByte('\n');
+ const after_comment_token = tree.tokens.at(token_index + offset);
+ const next_line_indent = switch (after_comment_token.id) {
+ Token.Id.RParen, Token.Id.RBrace, Token.Id.RBracket => indent,
+ else => indent + indent_delta,
+ };
+ try stream.writeByteNTimes(' ', next_line_indent);
+ start_col.* = next_line_indent;
+ },
+ Space.SpaceOrOutdent => {
+ try stream.writeByte('\n');
+ try stream.writeByteNTimes(' ', indent);
+ start_col.* = indent;
+ },
+ Space.Newline => {
+ if (next_token.id == Token.Id.MultilineStringLiteralLine) {
+ return;
+ } else {
+ try stream.write("\n");
+ start_col.* = 0;
+ return;
+ }
+ },
+ Space.NoNewline => {},
+ Space.NoComment, Space.Comma, Space.BlockStart => unreachable,
+ }
+ return;
+ }
+ loc = tree.tokenLocationPtr(token.end, next_token);
+ }
+
+ while (true) {
+ assert(loc.line != 0);
+ const newline_count = if (loc.line == 1) u8(1) else u8(2);
+ try stream.writeByteNTimes('\n', newline_count);
+ try stream.writeByteNTimes(' ', indent);
+ try stream.write(mem.trimRight(u8, tree.tokenSlicePtr(next_token), " "));
+
+ offset += 1;
+ token = next_token;
+ next_token = tree.tokens.at(token_index + offset);
+ if (next_token.id != Token.Id.LineComment) {
+ switch (space) {
+ Space.Newline => {
+ if (next_token.id == Token.Id.MultilineStringLiteralLine) {
+ return;
+ } else {
+ try stream.write("\n");
+ start_col.* = 0;
+ return;
+ }
+ },
+ Space.None, Space.Space => {
+ try stream.writeByte('\n');
+
+ const after_comment_token = tree.tokens.at(token_index + offset);
+ const next_line_indent = switch (after_comment_token.id) {
+ Token.Id.RParen, Token.Id.RBrace, Token.Id.RBracket => indent - indent_delta,
+ else => indent,
+ };
+ try stream.writeByteNTimes(' ', next_line_indent);
+ start_col.* = next_line_indent;
+ },
+ Space.SpaceOrOutdent => {
+ try stream.writeByte('\n');
+ try stream.writeByteNTimes(' ', indent);
+ start_col.* = indent;
+ },
+ Space.NoNewline => {},
+ Space.NoComment, Space.Comma, Space.BlockStart => unreachable,
+ }
+ return;
+ }
+ loc = tree.tokenLocationPtr(token.end, next_token);
+ }
+}
+
+fn renderToken(
+ tree: *ast.Tree,
+ stream: var,
+ token_index: ast.TokenIndex,
+ indent: usize,
+ start_col: *usize,
+ space: Space,
+) (@typeOf(stream).Child.Error || Error)!void {
+ return renderTokenOffset(tree, stream, token_index, indent, start_col, space, 0);
+}
+
+fn renderDocComments(
+ tree: *ast.Tree,
+ stream: var,
+ node: var,
+ indent: usize,
+ start_col: *usize,
+) (@typeOf(stream).Child.Error || Error)!void {
+ const comment = node.doc_comments orelse return;
+ var it = comment.lines.iterator(0);
+ const first_token = node.firstToken();
+ while (it.next()) |line_token_index| {
+ if (line_token_index.* < first_token) {
+ try renderToken(tree, stream, line_token_index.*, indent, start_col, Space.Newline);
+ try stream.writeByteNTimes(' ', indent);
+ } else {
+ try renderToken(tree, stream, line_token_index.*, indent, start_col, Space.NoComment);
+ try stream.write("\n");
+ try stream.writeByteNTimes(' ', indent);
+ }
+ }
+}
+
+fn nodeIsBlock(base: *const ast.Node) bool {
+ return switch (base.id) {
+ ast.Node.Id.Block,
+ ast.Node.Id.If,
+ ast.Node.Id.For,
+ ast.Node.Id.While,
+ ast.Node.Id.Switch,
+ => true,
+ else => false,
+ };
+}
+
+fn nodeCausesSliceOpSpace(base: *ast.Node) bool {
+ const infix_op = base.cast(ast.Node.InfixOp) orelse return false;
+ return switch (infix_op.op) {
+ ast.Node.InfixOp.Op.Period => false,
+ else => true,
+ };
+}
diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig
index a2c4def9e0..3c7ab1f0a8 100644
--- a/std/zig/tokenizer.zig
+++ b/std/zig/tokenizer.zig
@@ -6,61 +6,64 @@ pub const Token = struct {
start: usize,
end: usize,
- const KeywordId = struct {
+ pub const Keyword = struct {
bytes: []const u8,
id: Id,
};
- const keywords = []KeywordId {
- KeywordId{.bytes="align", .id = Id.Keyword_align},
- KeywordId{.bytes="and", .id = Id.Keyword_and},
- KeywordId{.bytes="asm", .id = Id.Keyword_asm},
- KeywordId{.bytes="async", .id = Id.Keyword_async},
- KeywordId{.bytes="await", .id = Id.Keyword_await},
- KeywordId{.bytes="break", .id = Id.Keyword_break},
- KeywordId{.bytes="catch", .id = Id.Keyword_catch},
- KeywordId{.bytes="cancel", .id = Id.Keyword_cancel},
- KeywordId{.bytes="comptime", .id = Id.Keyword_comptime},
- KeywordId{.bytes="const", .id = Id.Keyword_const},
- KeywordId{.bytes="continue", .id = Id.Keyword_continue},
- KeywordId{.bytes="defer", .id = Id.Keyword_defer},
- KeywordId{.bytes="else", .id = Id.Keyword_else},
- KeywordId{.bytes="enum", .id = Id.Keyword_enum},
- KeywordId{.bytes="errdefer", .id = Id.Keyword_errdefer},
- KeywordId{.bytes="error", .id = Id.Keyword_error},
- KeywordId{.bytes="export", .id = Id.Keyword_export},
- KeywordId{.bytes="extern", .id = Id.Keyword_extern},
- KeywordId{.bytes="false", .id = Id.Keyword_false},
- KeywordId{.bytes="fn", .id = Id.Keyword_fn},
- KeywordId{.bytes="for", .id = Id.Keyword_for},
- KeywordId{.bytes="if", .id = Id.Keyword_if},
- KeywordId{.bytes="inline", .id = Id.Keyword_inline},
- KeywordId{.bytes="nakedcc", .id = Id.Keyword_nakedcc},
- KeywordId{.bytes="noalias", .id = Id.Keyword_noalias},
- KeywordId{.bytes="null", .id = Id.Keyword_null},
- KeywordId{.bytes="or", .id = Id.Keyword_or},
- KeywordId{.bytes="packed", .id = Id.Keyword_packed},
- KeywordId{.bytes="pub", .id = Id.Keyword_pub},
- KeywordId{.bytes="resume", .id = Id.Keyword_resume},
- KeywordId{.bytes="return", .id = Id.Keyword_return},
- KeywordId{.bytes="section", .id = Id.Keyword_section},
- KeywordId{.bytes="stdcallcc", .id = Id.Keyword_stdcallcc},
- KeywordId{.bytes="struct", .id = Id.Keyword_struct},
- KeywordId{.bytes="suspend", .id = Id.Keyword_suspend},
- KeywordId{.bytes="switch", .id = Id.Keyword_switch},
- KeywordId{.bytes="test", .id = Id.Keyword_test},
- KeywordId{.bytes="this", .id = Id.Keyword_this},
- KeywordId{.bytes="true", .id = Id.Keyword_true},
- KeywordId{.bytes="try", .id = Id.Keyword_try},
- KeywordId{.bytes="undefined", .id = Id.Keyword_undefined},
- KeywordId{.bytes="union", .id = Id.Keyword_union},
- KeywordId{.bytes="unreachable", .id = Id.Keyword_unreachable},
- KeywordId{.bytes="use", .id = Id.Keyword_use},
- KeywordId{.bytes="var", .id = Id.Keyword_var},
- KeywordId{.bytes="volatile", .id = Id.Keyword_volatile},
- KeywordId{.bytes="while", .id = Id.Keyword_while},
+ pub const keywords = []Keyword{
+ Keyword{ .bytes = "align", .id = Id.Keyword_align },
+ Keyword{ .bytes = "and", .id = Id.Keyword_and },
+ Keyword{ .bytes = "asm", .id = Id.Keyword_asm },
+ Keyword{ .bytes = "async", .id = Id.Keyword_async },
+ Keyword{ .bytes = "await", .id = Id.Keyword_await },
+ Keyword{ .bytes = "break", .id = Id.Keyword_break },
+ Keyword{ .bytes = "catch", .id = Id.Keyword_catch },
+ Keyword{ .bytes = "cancel", .id = Id.Keyword_cancel },
+ Keyword{ .bytes = "comptime", .id = Id.Keyword_comptime },
+ Keyword{ .bytes = "const", .id = Id.Keyword_const },
+ Keyword{ .bytes = "continue", .id = Id.Keyword_continue },
+ Keyword{ .bytes = "defer", .id = Id.Keyword_defer },
+ Keyword{ .bytes = "else", .id = Id.Keyword_else },
+ Keyword{ .bytes = "enum", .id = Id.Keyword_enum },
+ Keyword{ .bytes = "errdefer", .id = Id.Keyword_errdefer },
+ Keyword{ .bytes = "error", .id = Id.Keyword_error },
+ Keyword{ .bytes = "export", .id = Id.Keyword_export },
+ Keyword{ .bytes = "extern", .id = Id.Keyword_extern },
+ Keyword{ .bytes = "false", .id = Id.Keyword_false },
+ Keyword{ .bytes = "fn", .id = Id.Keyword_fn },
+ Keyword{ .bytes = "for", .id = Id.Keyword_for },
+ Keyword{ .bytes = "if", .id = Id.Keyword_if },
+ Keyword{ .bytes = "inline", .id = Id.Keyword_inline },
+ Keyword{ .bytes = "nakedcc", .id = Id.Keyword_nakedcc },
+ Keyword{ .bytes = "noalias", .id = Id.Keyword_noalias },
+ Keyword{ .bytes = "null", .id = Id.Keyword_null },
+ Keyword{ .bytes = "or", .id = Id.Keyword_or },
+ Keyword{ .bytes = "orelse", .id = Id.Keyword_orelse },
+ Keyword{ .bytes = "packed", .id = Id.Keyword_packed },
+ Keyword{ .bytes = "promise", .id = Id.Keyword_promise },
+ Keyword{ .bytes = "pub", .id = Id.Keyword_pub },
+ Keyword{ .bytes = "resume", .id = Id.Keyword_resume },
+ Keyword{ .bytes = "return", .id = Id.Keyword_return },
+ Keyword{ .bytes = "section", .id = Id.Keyword_section },
+ Keyword{ .bytes = "stdcallcc", .id = Id.Keyword_stdcallcc },
+ Keyword{ .bytes = "struct", .id = Id.Keyword_struct },
+ Keyword{ .bytes = "suspend", .id = Id.Keyword_suspend },
+ Keyword{ .bytes = "switch", .id = Id.Keyword_switch },
+ Keyword{ .bytes = "test", .id = Id.Keyword_test },
+ Keyword{ .bytes = "this", .id = Id.Keyword_this },
+ Keyword{ .bytes = "true", .id = Id.Keyword_true },
+ Keyword{ .bytes = "try", .id = Id.Keyword_try },
+ Keyword{ .bytes = "undefined", .id = Id.Keyword_undefined },
+ Keyword{ .bytes = "union", .id = Id.Keyword_union },
+ Keyword{ .bytes = "unreachable", .id = Id.Keyword_unreachable },
+ Keyword{ .bytes = "use", .id = Id.Keyword_use },
+ Keyword{ .bytes = "var", .id = Id.Keyword_var },
+ Keyword{ .bytes = "volatile", .id = Id.Keyword_volatile },
+ Keyword{ .bytes = "while", .id = Id.Keyword_while },
};
+ // TODO perfect hash at comptime
fn getKeyword(bytes: []const u8) ?Id {
for (keywords) |kw| {
if (mem.eql(u8, kw.bytes, bytes)) {
@@ -70,7 +73,11 @@ pub const Token = struct {
return null;
}
- const StrLitKind = enum {Normal, C};
+ /// TODO remove this enum
+ const StrLitKind = enum {
+ Normal,
+ C,
+ };
pub const Id = union(enum) {
Invalid,
@@ -124,7 +131,6 @@ pub const Token = struct {
Ampersand,
AmpersandEqual,
QuestionMark,
- QuestionMarkQuestionMark,
AngleBracketLeft,
AngleBracketLeftEqual,
AngleBracketAngleBracketLeft,
@@ -137,6 +143,8 @@ pub const Token = struct {
IntegerLiteral,
FloatLiteral,
LineComment,
+ DocComment,
+ BracketStarBracket,
Keyword_align,
Keyword_and,
Keyword_asm,
@@ -164,7 +172,9 @@ pub const Token = struct {
Keyword_noalias,
Keyword_null,
Keyword_or,
+ Keyword_orelse,
Keyword_packed,
+ Keyword_promise,
Keyword_pub,
Keyword_resume,
Keyword_return,
@@ -192,44 +202,13 @@ pub const Tokenizer = struct {
index: usize,
pending_invalid_token: ?Token,
- pub const Location = struct {
- line: usize,
- column: usize,
- line_start: usize,
- line_end: usize,
- };
-
- pub fn getTokenLocation(self: &Tokenizer, start_index: usize, token: &const Token) Location {
- var loc = Location {
- .line = 0,
- .column = 0,
- .line_start = start_index,
- .line_end = self.buffer.len,
- };
- for (self.buffer[start_index..]) |c, i| {
- if (i + start_index == token.start) {
- loc.line_end = i + start_index;
- while (loc.line_end < self.buffer.len and self.buffer[loc.line_end] != '\n') : (loc.line_end += 1) {}
- return loc;
- }
- if (c == '\n') {
- loc.line += 1;
- loc.column = 0;
- loc.line_start = i + 1;
- } else {
- loc.column += 1;
- }
- }
- return loc;
- }
-
/// For debugging purposes
- pub fn dump(self: &Tokenizer, token: &const Token) void {
+ pub fn dump(self: *Tokenizer, token: *const Token) void {
std.debug.warn("{} \"{}\"\n", @tagName(token.id), self.buffer[token.start..token.end]);
}
pub fn init(buffer: []const u8) Tokenizer {
- return Tokenizer {
+ return Tokenizer{
.buffer = buffer,
.index = 0,
.pending_invalid_token = null,
@@ -244,9 +223,10 @@ pub const Tokenizer = struct {
StringLiteral,
StringLiteralBackslash,
MultilineStringLiteralLine,
- MultilineStringLiteralLineBackslash,
CharLiteral,
CharLiteralBackslash,
+ CharLiteralEscape1,
+ CharLiteralEscape2,
CharLiteralEnd,
Backslash,
Equal,
@@ -257,18 +237,25 @@ pub const Tokenizer = struct {
Asterisk,
AsteriskPercent,
Slash,
+ LineCommentStart,
LineComment,
+ DocCommentStart,
+ DocComment,
Zero,
IntegerLiteral,
IntegerLiteralWithRadix,
+ IntegerLiteralWithRadixHex,
NumberDot,
+ NumberDotHex,
FloatFraction,
+ FloatFractionHex,
FloatExponentUnsigned,
+ FloatExponentUnsignedHex,
FloatExponentNumber,
+ FloatExponentNumberHex,
Ampersand,
Caret,
Percent,
- QuestionMark,
Plus,
PlusPercent,
AngleBracketLeft,
@@ -278,16 +265,18 @@ pub const Tokenizer = struct {
Period,
Period2,
SawAtSign,
+ LBracket,
+ LBracketStar,
};
- pub fn next(self: &Tokenizer) Token {
+ pub fn next(self: *Tokenizer) Token {
if (self.pending_invalid_token) |token| {
self.pending_invalid_token = null;
return token;
}
const start_index = self.index;
var state = State.Start;
- var result = Token {
+ var result = Token{
.id = Token.Id.Eof,
.start = self.index,
.end = undefined,
@@ -308,7 +297,7 @@ pub const Tokenizer = struct {
},
'"' => {
state = State.StringLiteral;
- result.id = Token.Id { .StringLiteral = Token.StrLitKind.Normal };
+ result.id = Token.Id{ .StringLiteral = Token.StrLitKind.Normal };
},
'\'' => {
state = State.CharLiteral;
@@ -340,9 +329,7 @@ pub const Tokenizer = struct {
break;
},
'[' => {
- result.id = Token.Id.LBracket;
- self.index += 1;
- break;
+ state = State.LBracket;
},
']' => {
result.id = Token.Id.RBracket;
@@ -359,6 +346,11 @@ pub const Tokenizer = struct {
self.index += 1;
break;
},
+ '?' => {
+ result.id = Token.Id.QuestionMark;
+ self.index += 1;
+ break;
+ },
':' => {
result.id = Token.Id.Colon;
self.index += 1;
@@ -373,9 +365,6 @@ pub const Tokenizer = struct {
'+' => {
state = State.Plus;
},
- '?' => {
- state = State.QuestionMark;
- },
'<' => {
state = State.AngleBracketLeft;
},
@@ -387,7 +376,7 @@ pub const Tokenizer = struct {
},
'\\' => {
state = State.Backslash;
- result.id = Token.Id { .MultilineStringLiteralLine = Token.StrLitKind.Normal };
+ result.id = Token.Id{ .MultilineStringLiteralLine = Token.StrLitKind.Normal };
},
'{' => {
result.id = Token.Id.LBrace;
@@ -444,6 +433,28 @@ pub const Tokenizer = struct {
},
},
+ State.LBracket => switch (c) {
+ '*' => {
+ state = State.LBracketStar;
+ },
+ else => {
+ result.id = Token.Id.LBracket;
+ break;
+ },
+ },
+
+ State.LBracketStar => switch (c) {
+ ']' => {
+ result.id = Token.Id.BracketStarBracket;
+ self.index += 1;
+ break;
+ },
+ else => {
+ result.id = Token.Id.Invalid;
+ break;
+ },
+ },
+
State.Ampersand => switch (c) {
'=' => {
result.id = Token.Id.AmpersandEqual;
@@ -473,7 +484,7 @@ pub const Tokenizer = struct {
else => {
result.id = Token.Id.Asterisk;
break;
- }
+ },
},
State.AsteriskPercent => switch (c) {
@@ -485,18 +496,6 @@ pub const Tokenizer = struct {
else => {
result.id = Token.Id.AsteriskPercent;
break;
- }
- },
-
- State.QuestionMark => switch (c) {
- '?' => {
- result.id = Token.Id.QuestionMarkQuestionMark;
- self.index += 1;
- break;
- },
- else => {
- result.id = Token.Id.QuestionMark;
- break;
},
},
@@ -553,7 +552,7 @@ pub const Tokenizer = struct {
else => {
result.id = Token.Id.Caret;
break;
- }
+ },
},
State.Identifier => switch (c) {
@@ -578,11 +577,11 @@ pub const Tokenizer = struct {
State.C => switch (c) {
'\\' => {
state = State.Backslash;
- result.id = Token.Id { .MultilineStringLiteralLine = Token.StrLitKind.C };
+ result.id = Token.Id{ .MultilineStringLiteralLine = Token.StrLitKind.C };
},
'"' => {
state = State.StringLiteral;
- result.id = Token.Id { .StringLiteral = Token.StrLitKind.C };
+ result.id = Token.Id{ .StringLiteral = Token.StrLitKind.C };
},
'a'...'z', 'A'...'Z', '_', '0'...'9' => {
state = State.Identifier;
@@ -623,7 +622,7 @@ pub const Tokenizer = struct {
}
state = State.CharLiteralEnd;
- }
+ },
},
State.CharLiteralBackslash => switch (c) {
@@ -631,11 +630,34 @@ pub const Tokenizer = struct {
result.id = Token.Id.Invalid;
break;
},
+ 'x' => {
+ state = State.CharLiteralEscape1;
+ },
else => {
state = State.CharLiteralEnd;
},
},
+ State.CharLiteralEscape1 => switch (c) {
+ '0'...'9', 'a'...'z', 'A'...'F' => {
+ state = State.CharLiteralEscape2;
+ },
+ else => {
+ result.id = Token.Id.Invalid;
+ break;
+ },
+ },
+
+ State.CharLiteralEscape2 => switch (c) {
+ '0'...'9', 'a'...'z', 'A'...'F' => {
+ state = State.CharLiteralEnd;
+ },
+ else => {
+ result.id = Token.Id.Invalid;
+ break;
+ },
+ },
+
State.CharLiteralEnd => switch (c) {
'\'' => {
result.id = Token.Id.CharLiteral;
@@ -649,9 +671,6 @@ pub const Tokenizer = struct {
},
State.MultilineStringLiteralLine => switch (c) {
- '\\' => {
- state = State.MultilineStringLiteralLineBackslash;
- },
'\n' => {
self.index += 1;
break;
@@ -659,13 +678,6 @@ pub const Tokenizer = struct {
else => self.checkLiteralCharacter(),
},
- State.MultilineStringLiteralLineBackslash => switch (c) {
- '\n' => break, // Look for this error later.
- else => {
- state = State.MultilineStringLiteralLine;
- },
- },
-
State.Bang => switch (c) {
'=' => {
result.id = Token.Id.BangEqual;
@@ -741,7 +753,7 @@ pub const Tokenizer = struct {
else => {
result.id = Token.Id.MinusPercent;
break;
- }
+ },
},
State.AngleBracketLeft => switch (c) {
@@ -822,8 +834,8 @@ pub const Tokenizer = struct {
State.Slash => switch (c) {
'/' => {
+ state = State.LineCommentStart;
result.id = Token.Id.LineComment;
- state = State.LineComment;
},
'=' => {
result.id = Token.Id.SlashEqual;
@@ -835,14 +847,41 @@ pub const Tokenizer = struct {
break;
},
},
- State.LineComment => switch (c) {
+ State.LineCommentStart => switch (c) {
+ '/' => {
+ state = State.DocCommentStart;
+ },
+ '\n' => break,
+ else => {
+ state = State.LineComment;
+ self.checkLiteralCharacter();
+ },
+ },
+ State.DocCommentStart => switch (c) {
+ '/' => {
+ state = State.LineComment;
+ },
+ '\n' => {
+ result.id = Token.Id.DocComment;
+ break;
+ },
+ else => {
+ state = State.DocComment;
+ result.id = Token.Id.DocComment;
+ self.checkLiteralCharacter();
+ },
+ },
+ State.LineComment, State.DocComment => switch (c) {
'\n' => break,
else => self.checkLiteralCharacter(),
},
State.Zero => switch (c) {
- 'b', 'o', 'x' => {
+ 'b', 'o' => {
state = State.IntegerLiteralWithRadix;
},
+ 'x' => {
+ state = State.IntegerLiteralWithRadixHex;
+ },
else => {
// reinterpret as a normal number
self.index -= 1;
@@ -863,8 +902,15 @@ pub const Tokenizer = struct {
'.' => {
state = State.NumberDot;
},
+ '0'...'9' => {},
+ else => break,
+ },
+ State.IntegerLiteralWithRadixHex => switch (c) {
+ '.' => {
+ state = State.NumberDotHex;
+ },
'p', 'P' => {
- state = State.FloatExponentUnsigned;
+ state = State.FloatExponentUnsignedHex;
},
'0'...'9', 'a'...'f', 'A'...'F' => {},
else => break,
@@ -881,10 +927,29 @@ pub const Tokenizer = struct {
state = State.FloatFraction;
},
},
+ State.NumberDotHex => switch (c) {
+ '.' => {
+ self.index -= 1;
+ state = State.Start;
+ break;
+ },
+ else => {
+ self.index -= 1;
+ result.id = Token.Id.FloatLiteral;
+ state = State.FloatFractionHex;
+ },
+ },
State.FloatFraction => switch (c) {
- 'p', 'P' => {
+ 'e', 'E' => {
state = State.FloatExponentUnsigned;
},
+ '0'...'9' => {},
+ else => break,
+ },
+ State.FloatFractionHex => switch (c) {
+ 'p', 'P' => {
+ state = State.FloatExponentUnsignedHex;
+ },
'0'...'9', 'a'...'f', 'A'...'F' => {},
else => break,
},
@@ -896,9 +961,23 @@ pub const Tokenizer = struct {
// reinterpret as a normal exponent number
self.index -= 1;
state = State.FloatExponentNumber;
- }
+ },
+ },
+ State.FloatExponentUnsignedHex => switch (c) {
+ '+', '-' => {
+ state = State.FloatExponentNumberHex;
+ },
+ else => {
+ // reinterpret as a normal exponent number
+ self.index -= 1;
+ state = State.FloatExponentNumberHex;
+ },
},
State.FloatExponentNumber => switch (c) {
+ '0'...'9' => {},
+ else => break,
+ },
+ State.FloatExponentNumberHex => switch (c) {
'0'...'9', 'a'...'f', 'A'...'F' => {},
else => break,
},
@@ -909,30 +988,42 @@ pub const Tokenizer = struct {
State.C,
State.IntegerLiteral,
State.IntegerLiteralWithRadix,
+ State.IntegerLiteralWithRadixHex,
State.FloatFraction,
+ State.FloatFractionHex,
State.FloatExponentNumber,
+ State.FloatExponentNumberHex,
State.StringLiteral, // find this error later
State.MultilineStringLiteralLine,
- State.Builtin => {},
+ State.Builtin,
+ => {},
State.Identifier => {
if (Token.getKeyword(self.buffer[result.start..self.index])) |id| {
result.id = id;
}
},
- State.LineComment => {
- result.id = Token.Id.Eof;
+ State.LineCommentStart, State.LineComment => {
+ result.id = Token.Id.LineComment;
+ },
+ State.DocComment, State.DocCommentStart => {
+ result.id = Token.Id.DocComment;
},
State.NumberDot,
+ State.NumberDotHex,
State.FloatExponentUnsigned,
+ State.FloatExponentUnsignedHex,
State.SawAtSign,
State.Backslash,
- State.MultilineStringLiteralLineBackslash,
State.CharLiteral,
State.CharLiteralBackslash,
+ State.CharLiteralEscape1,
+ State.CharLiteralEscape2,
State.CharLiteralEnd,
- State.StringLiteralBackslash => {
+ State.StringLiteralBackslash,
+ State.LBracketStar,
+ => {
result.id = Token.Id.Invalid;
},
@@ -948,6 +1039,9 @@ pub const Tokenizer = struct {
State.Slash => {
result.id = Token.Id.Slash;
},
+ State.LBracket => {
+ result.id = Token.Id.LBracket;
+ },
State.Zero => {
result.id = Token.Id.IntegerLiteral;
},
@@ -981,9 +1075,6 @@ pub const Tokenizer = struct {
State.Plus => {
result.id = Token.Id.Plus;
},
- State.QuestionMark => {
- result.id = Token.Id.QuestionMark;
- },
State.Percent => {
result.id = Token.Id.Percent;
},
@@ -1013,22 +1104,18 @@ pub const Tokenizer = struct {
return result;
}
- pub fn getTokenSlice(self: &const Tokenizer, token: &const Token) []const u8 {
- return self.buffer[token.start..token.end];
- }
-
- fn checkLiteralCharacter(self: &Tokenizer) void {
+ fn checkLiteralCharacter(self: *Tokenizer) void {
if (self.pending_invalid_token != null) return;
const invalid_length = self.getInvalidCharacterLength();
if (invalid_length == 0) return;
- self.pending_invalid_token = Token {
+ self.pending_invalid_token = Token{
.id = Token.Id.Invalid,
.start = self.index,
.end = self.index + invalid_length,
};
}
- fn getInvalidCharacterLength(self: &Tokenizer) u3 {
+ fn getInvalidCharacterLength(self: *Tokenizer) u3 {
const c0 = self.buffer[self.index];
if (c0 < 0x80) {
if (c0 < 0x20 or c0 == 0x7f) {
@@ -1042,9 +1129,9 @@ pub const Tokenizer = struct {
// check utf8-encoded character.
const length = std.unicode.utf8ByteSequenceLength(c0) catch return 1;
if (self.index + length > self.buffer.len) {
- return u3(self.buffer.len - self.index);
+ return @intCast(u3, self.buffer.len - self.index);
}
- const bytes = self.buffer[self.index..self.index + length];
+ const bytes = self.buffer[self.index .. self.index + length];
switch (length) {
2 => {
const value = std.unicode.utf8Decode2(bytes) catch return length;
@@ -1066,84 +1153,147 @@ pub const Tokenizer = struct {
}
};
-
-
test "tokenizer" {
- testTokenize("test", []Token.Id {
- Token.Id.Keyword_test,
+ testTokenize("test", []Token.Id{Token.Id.Keyword_test});
+}
+
+test "tokenizer - unknown length pointer" {
+ testTokenize(
+ \\[*]u8
+ , []Token.Id{
+ Token.Id.BracketStarBracket,
+ Token.Id.Identifier,
+ });
+}
+
+test "tokenizer - char literal with hex escape" {
+ testTokenize(
+ \\'\x1b'
+ , []Token.Id{Token.Id.CharLiteral});
+}
+
+test "tokenizer - float literal e exponent" {
+ testTokenize("a = 4.94065645841246544177e-324;\n", []Token.Id{
+ Token.Id.Identifier,
+ Token.Id.Equal,
+ Token.Id.FloatLiteral,
+ Token.Id.Semicolon,
+ });
+}
+
+test "tokenizer - float literal p exponent" {
+ testTokenize("a = 0x1.a827999fcef32p+1022;\n", []Token.Id{
+ Token.Id.Identifier,
+ Token.Id.Equal,
+ Token.Id.FloatLiteral,
+ Token.Id.Semicolon,
});
}
test "tokenizer - chars" {
- testTokenize("'c'", []Token.Id {Token.Id.CharLiteral});
+ testTokenize("'c'", []Token.Id{Token.Id.CharLiteral});
}
test "tokenizer - invalid token characters" {
testTokenize("#", []Token.Id{Token.Id.Invalid});
testTokenize("`", []Token.Id{Token.Id.Invalid});
- testTokenize("'c", []Token.Id {Token.Id.Invalid});
- testTokenize("'", []Token.Id {Token.Id.Invalid});
- testTokenize("''", []Token.Id {Token.Id.Invalid, Token.Id.Invalid});
+ testTokenize("'c", []Token.Id{Token.Id.Invalid});
+ testTokenize("'", []Token.Id{Token.Id.Invalid});
+ testTokenize("''", []Token.Id{ Token.Id.Invalid, Token.Id.Invalid });
}
test "tokenizer - invalid literal/comment characters" {
- testTokenize("\"\x00\"", []Token.Id {
- Token.Id { .StringLiteral = Token.StrLitKind.Normal },
+ testTokenize("\"\x00\"", []Token.Id{
+ Token.Id{ .StringLiteral = Token.StrLitKind.Normal },
Token.Id.Invalid,
});
- testTokenize("//\x00", []Token.Id {
+ testTokenize("//\x00", []Token.Id{
+ Token.Id.LineComment,
Token.Id.Invalid,
});
- testTokenize("//\x1f", []Token.Id {
+ testTokenize("//\x1f", []Token.Id{
+ Token.Id.LineComment,
Token.Id.Invalid,
});
- testTokenize("//\x7f", []Token.Id {
+ testTokenize("//\x7f", []Token.Id{
+ Token.Id.LineComment,
Token.Id.Invalid,
});
}
test "tokenizer - utf8" {
- testTokenize("//\xc2\x80", []Token.Id{});
- testTokenize("//\xf4\x8f\xbf\xbf", []Token.Id{});
+ testTokenize("//\xc2\x80", []Token.Id{Token.Id.LineComment});
+ testTokenize("//\xf4\x8f\xbf\xbf", []Token.Id{Token.Id.LineComment});
}
test "tokenizer - invalid utf8" {
- testTokenize("//\x80", []Token.Id{Token.Id.Invalid});
- testTokenize("//\xbf", []Token.Id{Token.Id.Invalid});
- testTokenize("//\xf8", []Token.Id{Token.Id.Invalid});
- testTokenize("//\xff", []Token.Id{Token.Id.Invalid});
- testTokenize("//\xc2\xc0", []Token.Id{Token.Id.Invalid});
- testTokenize("//\xe0", []Token.Id{Token.Id.Invalid});
- testTokenize("//\xf0", []Token.Id{Token.Id.Invalid});
- testTokenize("//\xf0\x90\x80\xc0", []Token.Id{Token.Id.Invalid});
+ testTokenize("//\x80", []Token.Id{
+ Token.Id.LineComment,
+ Token.Id.Invalid,
+ });
+ testTokenize("//\xbf", []Token.Id{
+ Token.Id.LineComment,
+ Token.Id.Invalid,
+ });
+ testTokenize("//\xf8", []Token.Id{
+ Token.Id.LineComment,
+ Token.Id.Invalid,
+ });
+ testTokenize("//\xff", []Token.Id{
+ Token.Id.LineComment,
+ Token.Id.Invalid,
+ });
+ testTokenize("//\xc2\xc0", []Token.Id{
+ Token.Id.LineComment,
+ Token.Id.Invalid,
+ });
+ testTokenize("//\xe0", []Token.Id{
+ Token.Id.LineComment,
+ Token.Id.Invalid,
+ });
+ testTokenize("//\xf0", []Token.Id{
+ Token.Id.LineComment,
+ Token.Id.Invalid,
+ });
+ testTokenize("//\xf0\x90\x80\xc0", []Token.Id{
+ Token.Id.LineComment,
+ Token.Id.Invalid,
+ });
}
test "tokenizer - illegal unicode codepoints" {
// unicode newline characters.U+0085, U+2028, U+2029
- testTokenize("//\xc2\x84", []Token.Id{});
- testTokenize("//\xc2\x85", []Token.Id{Token.Id.Invalid});
- testTokenize("//\xc2\x86", []Token.Id{});
- testTokenize("//\xe2\x80\xa7", []Token.Id{});
- testTokenize("//\xe2\x80\xa8", []Token.Id{Token.Id.Invalid});
- testTokenize("//\xe2\x80\xa9", []Token.Id{Token.Id.Invalid});
- testTokenize("//\xe2\x80\xaa", []Token.Id{});
+ testTokenize("//\xc2\x84", []Token.Id{Token.Id.LineComment});
+ testTokenize("//\xc2\x85", []Token.Id{
+ Token.Id.LineComment,
+ Token.Id.Invalid,
+ });
+ testTokenize("//\xc2\x86", []Token.Id{Token.Id.LineComment});
+ testTokenize("//\xe2\x80\xa7", []Token.Id{Token.Id.LineComment});
+ testTokenize("//\xe2\x80\xa8", []Token.Id{
+ Token.Id.LineComment,
+ Token.Id.Invalid,
+ });
+ testTokenize("//\xe2\x80\xa9", []Token.Id{
+ Token.Id.LineComment,
+ Token.Id.Invalid,
+ });
+ testTokenize("//\xe2\x80\xaa", []Token.Id{Token.Id.LineComment});
}
test "tokenizer - string identifier and builtin fns" {
testTokenize(
\\const @"if" = @import("std");
- ,
- []Token.Id{
- Token.Id.Keyword_const,
- Token.Id.Identifier,
- Token.Id.Equal,
- Token.Id.Builtin,
- Token.Id.LParen,
- Token.Id {.StringLiteral = Token.StrLitKind.Normal},
- Token.Id.RParen,
- Token.Id.Semicolon,
- }
- );
+ , []Token.Id{
+ Token.Id.Keyword_const,
+ Token.Id.Identifier,
+ Token.Id.Equal,
+ Token.Id.Builtin,
+ Token.Id.LParen,
+ Token.Id{ .StringLiteral = Token.StrLitKind.Normal },
+ Token.Id.RParen,
+ Token.Id.Semicolon,
+ });
}
test "tokenizer - pipe and then invalid" {
@@ -1153,14 +1303,42 @@ test "tokenizer - pipe and then invalid" {
});
}
+test "tokenizer - line comment and doc comment" {
+ testTokenize("//", []Token.Id{Token.Id.LineComment});
+ testTokenize("// a / b", []Token.Id{Token.Id.LineComment});
+ testTokenize("// /", []Token.Id{Token.Id.LineComment});
+ testTokenize("/// a", []Token.Id{Token.Id.DocComment});
+ testTokenize("///", []Token.Id{Token.Id.DocComment});
+ testTokenize("////", []Token.Id{Token.Id.LineComment});
+}
+
+test "tokenizer - line comment followed by identifier" {
+ testTokenize(
+ \\ Unexpected,
+ \\ // another
+ \\ Another,
+ , []Token.Id{
+ Token.Id.Identifier,
+ Token.Id.Comma,
+ Token.Id.LineComment,
+ Token.Id.Identifier,
+ Token.Id.Comma,
+ });
+}
+
fn testTokenize(source: []const u8, expected_tokens: []const Token.Id) void {
var tokenizer = Tokenizer.init(source);
for (expected_tokens) |expected_token_id| {
const token = tokenizer.next();
- std.debug.assert(@TagType(Token.Id)(token.id) == @TagType(Token.Id)(expected_token_id));
+ if (@TagType(Token.Id)(token.id) != @TagType(Token.Id)(expected_token_id)) {
+ std.debug.panic("expected {}, found {}\n", @tagName(@TagType(Token.Id)(expected_token_id)), @tagName(@TagType(Token.Id)(token.id)));
+ }
switch (expected_token_id) {
Token.Id.StringLiteral => |expected_kind| {
- std.debug.assert(expected_kind == switch (token.id) { Token.Id.StringLiteral => |kind| kind, else => unreachable });
+ std.debug.assert(expected_kind == switch (token.id) {
+ Token.Id.StringLiteral => |kind| kind,
+ else => unreachable,
+ });
},
else => {},
}
diff --git a/test/assemble_and_link.zig b/test/assemble_and_link.zig
index 2593f3306a..8c727e87b5 100644
--- a/test/assemble_and_link.zig
+++ b/test/assemble_and_link.zig
@@ -1,7 +1,7 @@
const builtin = @import("builtin");
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.CompareOutputContext) void {
+pub fn addCases(cases: *tests.CompareOutputContext) void {
if (builtin.os == builtin.Os.linux and builtin.arch == builtin.Arch.x86_64) {
cases.addAsm("hello world linux x86_64",
\\.text
diff --git a/test/behavior.zig b/test/behavior.zig
index de39b20dad..e993d7e0dc 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -8,12 +8,18 @@ comptime {
_ = @import("cases/atomics.zig");
_ = @import("cases/bitcast.zig");
_ = @import("cases/bool.zig");
+ _ = @import("cases/bugs/1111.zig");
+ _ = @import("cases/bugs/1230.zig");
_ = @import("cases/bugs/394.zig");
_ = @import("cases/bugs/655.zig");
_ = @import("cases/bugs/656.zig");
_ = @import("cases/bugs/828.zig");
+ _ = @import("cases/bugs/920.zig");
+ _ = @import("cases/byval_arg_var.zig");
+ _ = @import("cases/cancel.zig");
_ = @import("cases/cast.zig");
_ = @import("cases/const_slice_child.zig");
+ _ = @import("cases/coroutine_await_struct.zig");
_ = @import("cases/coroutines.zig");
_ = @import("cases/defer.zig");
_ = @import("cases/enum.zig");
@@ -22,6 +28,7 @@ comptime {
_ = @import("cases/eval.zig");
_ = @import("cases/field_parent_ptr.zig");
_ = @import("cases/fn.zig");
+ _ = @import("cases/fn_in_struct_in_comptime.zig");
_ = @import("cases/for.zig");
_ = @import("cases/generics.zig");
_ = @import("cases/if.zig");
@@ -29,9 +36,14 @@ comptime {
_ = @import("cases/incomplete_struct_param_tld.zig");
_ = @import("cases/ir_block_deps.zig");
_ = @import("cases/math.zig");
+ _ = @import("cases/merge_error_sets.zig");
_ = @import("cases/misc.zig");
_ = @import("cases/namespace_depends_on_compile_var/index.zig");
+ _ = @import("cases/new_stack_call.zig");
_ = @import("cases/null.zig");
+ _ = @import("cases/optional.zig");
+ _ = @import("cases/pointers.zig");
+ _ = @import("cases/popcount.zig");
_ = @import("cases/pub_enum/index.zig");
_ = @import("cases/ref_var_in_if_after_if_2nd_switch_prong.zig");
_ = @import("cases/reflection.zig");
@@ -46,9 +58,12 @@ comptime {
_ = @import("cases/syntax.zig");
_ = @import("cases/this.zig");
_ = @import("cases/try.zig");
+ _ = @import("cases/type_info.zig");
_ = @import("cases/undefined.zig");
+ _ = @import("cases/underscore.zig");
_ = @import("cases/union.zig");
_ = @import("cases/var_args.zig");
_ = @import("cases/void.zig");
_ = @import("cases/while.zig");
+ _ = @import("cases/widening.zig");
}
diff --git a/test/build_examples.zig b/test/build_examples.zig
index a3b44b9136..79192c3e9a 100644
--- a/test/build_examples.zig
+++ b/test/build_examples.zig
@@ -2,20 +2,30 @@ const tests = @import("tests.zig");
const builtin = @import("builtin");
const is_windows = builtin.os == builtin.Os.windows;
-pub fn addCases(cases: &tests.BuildExamplesContext) void {
+pub fn addCases(cases: *tests.BuildExamplesContext) void {
cases.add("example/hello_world/hello.zig");
cases.addC("example/hello_world/hello_libc.zig");
cases.add("example/cat/main.zig");
cases.add("example/guess_number/main.zig");
if (!is_windows) {
// TODO get this test passing on windows
- // See https://github.com/zig-lang/zig/issues/538
+ // See https://github.com/ziglang/zig/issues/538
cases.addBuildFile("example/shared_library/build.zig");
cases.addBuildFile("example/mix_o_files/build.zig");
}
- cases.addBuildFile("test/standalone/issue_339/build.zig");
+ if (builtin.os != builtin.Os.macosx) {
+ // TODO https://github.com/ziglang/zig/issues/1126
+ cases.addBuildFile("test/standalone/issue_339/build.zig");
+ }
cases.addBuildFile("test/standalone/issue_794/build.zig");
cases.addBuildFile("test/standalone/pkg_import/build.zig");
cases.addBuildFile("test/standalone/use_alias/build.zig");
cases.addBuildFile("test/standalone/brace_expansion/build.zig");
+ if (false) {
+ // TODO this test is disabled because it is failing on the CI server's linux. when this is fixed
+ // enable it for at least linux
+ // TODO hook up the DynLib API for windows using LoadLibraryA
+ // TODO figure out how to make this work on darwin - probably libSystem has dlopen/dlsym in it
+ cases.addBuildFile("test/standalone/load_dynamic_library/build.zig");
+ }
}
diff --git a/test/cases/align.zig b/test/cases/align.zig
index ad3a66a2e0..64f0788efc 100644
--- a/test/cases/align.zig
+++ b/test/cases/align.zig
@@ -5,34 +5,34 @@ var foo: u8 align(4) = 100;
test "global variable alignment" {
assert(@typeOf(&foo).alignment == 4);
- assert(@typeOf(&foo) == &align(4) u8);
- const slice = (&foo)[0..1];
+ assert(@typeOf(&foo) == *align(4) u8);
+ const slice = (*[1]u8)(&foo)[0..];
assert(@typeOf(slice) == []align(4) u8);
}
-fn derp() align(@sizeOf(usize) * 2) i32 { return 1234; }
+fn derp() align(@sizeOf(usize) * 2) i32 {
+ return 1234;
+}
fn noop1() align(1) void {}
fn noop4() align(4) void {}
test "function alignment" {
assert(derp() == 1234);
- assert(@typeOf(noop1) == fn() align(1) void);
- assert(@typeOf(noop4) == fn() align(4) void);
+ assert(@typeOf(noop1) == fn () align(1) void);
+ assert(@typeOf(noop4) == fn () align(4) void);
noop1();
noop4();
}
-
var baz: packed struct {
a: u32,
b: u32,
} = undefined;
test "packed struct alignment" {
- assert(@typeOf(&baz.b) == &align(1) u32);
+ assert(@typeOf(&baz.b) == *align(1) u32);
}
-
const blah: packed struct {
a: u3,
b: u3,
@@ -40,11 +40,11 @@ const blah: packed struct {
} = undefined;
test "bit field alignment" {
- assert(@typeOf(&blah.b) == &align(1:3:6) const u3);
+ assert(@typeOf(&blah.b) == *align(1:3:6) const u3);
}
test "default alignment allows unspecified in type syntax" {
- assert(&u32 == &align(@alignOf(u32)) u32);
+ assert(*u32 == *align(@alignOf(u32)) u32);
}
test "implicitly decreasing pointer alignment" {
@@ -53,30 +53,44 @@ test "implicitly decreasing pointer alignment" {
assert(addUnaligned(&a, &b) == 7);
}
-fn addUnaligned(a: &align(1) const u32, b: &align(1) const u32) u32 { return *a + *b; }
+fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 {
+ return a.* + b.*;
+}
test "implicitly decreasing slice alignment" {
const a: u32 align(4) = 3;
const b: u32 align(8) = 4;
- assert(addUnalignedSlice((&a)[0..1], (&b)[0..1]) == 7);
+ assert(addUnalignedSlice((*[1]u32)(&a)[0..], (*[1]u32)(&b)[0..]) == 7);
+}
+fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 {
+ return a[0] + b[0];
}
-fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 { return a[0] + b[0]; }
test "specifying alignment allows pointer cast" {
testBytesAlign(0x33);
}
fn testBytesAlign(b: u8) void {
- var bytes align(4) = []u8{b, b, b, b};
- const ptr = @ptrCast(&u32, &bytes[0]);
- assert(*ptr == 0x33333333);
+ var bytes align(4) = []u8{
+ b,
+ b,
+ b,
+ b,
+ };
+ const ptr = @ptrCast(*u32, &bytes[0]);
+ assert(ptr.* == 0x33333333);
}
test "specifying alignment allows slice cast" {
testBytesAlignSlice(0x33);
}
fn testBytesAlignSlice(b: u8) void {
- var bytes align(4) = []u8{b, b, b, b};
- const slice = ([]u32)(bytes[0..]);
+ var bytes align(4) = []u8{
+ b,
+ b,
+ b,
+ b,
+ };
+ const slice: []u32 = @bytesToSlice(u32, bytes[0..]);
assert(slice[0] == 0x33333333);
}
@@ -85,15 +99,18 @@ test "@alignCast pointers" {
expectsOnly1(&x);
assert(x == 2);
}
-fn expectsOnly1(x: &align(1) u32) void {
+fn expectsOnly1(x: *align(1) u32) void {
expects4(@alignCast(4, x));
}
-fn expects4(x: &align(4) u32) void {
- *x += 1;
+fn expects4(x: *align(4) u32) void {
+ x.* += 1;
}
test "@alignCast slices" {
- var array align(4) = []u32{1, 1};
+ var array align(4) = []u32{
+ 1,
+ 1,
+ };
const slice = array[0..];
sliceExpectsOnly1(slice);
assert(slice[0] == 2);
@@ -105,7 +122,6 @@ fn sliceExpects4(slice: []align(4) u32) void {
slice[0] += 1;
}
-
test "implicitly decreasing fn alignment" {
testImplicitlyDecreaseFnAlign(alignedSmall, 1234);
testImplicitlyDecreaseFnAlign(alignedBig, 5678);
@@ -115,21 +131,25 @@ fn testImplicitlyDecreaseFnAlign(ptr: fn () align(1) i32, answer: i32) void {
assert(ptr() == answer);
}
-fn alignedSmall() align(8) i32 { return 1234; }
-fn alignedBig() align(16) i32 { return 5678; }
-
+fn alignedSmall() align(8) i32 {
+ return 1234;
+}
+fn alignedBig() align(16) i32 {
+ return 5678;
+}
test "@alignCast functions" {
assert(fnExpectsOnly1(simple4) == 0x19);
}
-fn fnExpectsOnly1(ptr: fn()align(1) i32) i32 {
+fn fnExpectsOnly1(ptr: fn () align(1) i32) i32 {
return fnExpects4(@alignCast(4, ptr));
}
-fn fnExpects4(ptr: fn()align(4) i32) i32 {
+fn fnExpects4(ptr: fn () align(4) i32) i32 {
return ptr();
}
-fn simple4() align(4) i32 { return 0x19; }
-
+fn simple4() align(4) i32 {
+ return 0x19;
+}
test "generic function with align param" {
assert(whyWouldYouEverDoThis(1) == 0x1);
@@ -137,52 +157,53 @@ test "generic function with align param" {
assert(whyWouldYouEverDoThis(8) == 0x1);
}
-fn whyWouldYouEverDoThis(comptime align_bytes: u8) align(align_bytes) u8 { return 0x1; }
-
+fn whyWouldYouEverDoThis(comptime align_bytes: u8) align(align_bytes) u8 {
+ return 0x1;
+}
test "@ptrCast preserves alignment of bigger source" {
var x: u32 align(16) = 1234;
- const ptr = @ptrCast(&u8, &x);
- assert(@typeOf(ptr) == &align(16) u8);
+ const ptr = @ptrCast(*u8, &x);
+ assert(@typeOf(ptr) == *align(16) u8);
}
-
-test "compile-time known array index has best alignment possible" {
+test "runtime known array index has best alignment possible" {
// take full advantage of over-alignment
- var array align(4) = []u8 {1, 2, 3, 4};
- assert(@typeOf(&array[0]) == &align(4) u8);
- assert(@typeOf(&array[1]) == &u8);
- assert(@typeOf(&array[2]) == &align(2) u8);
- assert(@typeOf(&array[3]) == &u8);
+ var array align(4) = []u8{ 1, 2, 3, 4 };
+ assert(@typeOf(&array[0]) == *align(4) u8);
+ assert(@typeOf(&array[1]) == *u8);
+ assert(@typeOf(&array[2]) == *align(2) u8);
+ assert(@typeOf(&array[3]) == *u8);
// because align is too small but we still figure out to use 2
- var bigger align(2) = []u64{1, 2, 3, 4};
- assert(@typeOf(&bigger[0]) == &align(2) u64);
- assert(@typeOf(&bigger[1]) == &align(2) u64);
- assert(@typeOf(&bigger[2]) == &align(2) u64);
- assert(@typeOf(&bigger[3]) == &align(2) u64);
+ var bigger align(2) = []u64{ 1, 2, 3, 4 };
+ assert(@typeOf(&bigger[0]) == *align(2) u64);
+ assert(@typeOf(&bigger[1]) == *align(2) u64);
+ assert(@typeOf(&bigger[2]) == *align(2) u64);
+ assert(@typeOf(&bigger[3]) == *align(2) u64);
// because pointer is align 2 and u32 align % 2 == 0 we can assume align 2
- var smaller align(2) = []u32{1, 2, 3, 4};
- testIndex(&smaller[0], 0, &align(2) u32);
- testIndex(&smaller[0], 1, &align(2) u32);
- testIndex(&smaller[0], 2, &align(2) u32);
- testIndex(&smaller[0], 3, &align(2) u32);
+ var smaller align(2) = []u32{ 1, 2, 3, 4 };
+ comptime assert(@typeOf(smaller[0..]) == []align(2) u32);
+ comptime assert(@typeOf(smaller[0..].ptr) == [*]align(2) u32);
+ testIndex(smaller[0..].ptr, 0, *align(2) u32);
+ testIndex(smaller[0..].ptr, 1, *align(2) u32);
+ testIndex(smaller[0..].ptr, 2, *align(2) u32);
+ testIndex(smaller[0..].ptr, 3, *align(2) u32);
// has to use ABI alignment because index known at runtime only
- testIndex2(&array[0], 0, &u8);
- testIndex2(&array[0], 1, &u8);
- testIndex2(&array[0], 2, &u8);
- testIndex2(&array[0], 3, &u8);
+ testIndex2(array[0..].ptr, 0, *u8);
+ testIndex2(array[0..].ptr, 1, *u8);
+ testIndex2(array[0..].ptr, 2, *u8);
+ testIndex2(array[0..].ptr, 3, *u8);
}
-fn testIndex(smaller: &align(2) u32, index: usize, comptime T: type) void {
- assert(@typeOf(&smaller[index]) == T);
+fn testIndex(smaller: [*]align(2) u32, index: usize, comptime T: type) void {
+ comptime assert(@typeOf(&smaller[index]) == T);
}
-fn testIndex2(ptr: &align(4) u8, index: usize, comptime T: type) void {
- assert(@typeOf(&ptr[index]) == T);
+fn testIndex2(ptr: [*]align(4) u8, index: usize, comptime T: type) void {
+ comptime assert(@typeOf(&ptr[index]) == T);
}
-
test "alignstack" {
assert(fnWithAlignedStack() == 1234);
}
diff --git a/test/cases/alignof.zig b/test/cases/alignof.zig
index 27b95c7fdc..130a2a5b44 100644
--- a/test/cases/alignof.zig
+++ b/test/cases/alignof.zig
@@ -1,7 +1,11 @@
const assert = @import("std").debug.assert;
const builtin = @import("builtin");
-const Foo = struct { x: u32, y: u32, z: u32, };
+const Foo = struct {
+ x: u32,
+ y: u32,
+ z: u32,
+};
test "@alignOf(T) before referencing T" {
comptime assert(@alignOf(Foo) != @maxValue(usize));
diff --git a/test/cases/array.zig b/test/cases/array.zig
index 577161dd16..b72491bcc0 100644
--- a/test/cases/array.zig
+++ b/test/cases/array.zig
@@ -2,9 +2,9 @@ const assert = @import("std").debug.assert;
const mem = @import("std").mem;
test "arrays" {
- var array : [5]u32 = undefined;
+ var array: [5]u32 = undefined;
- var i : u32 = 0;
+ var i: u32 = 0;
while (i < 5) {
array[i] = i + 1;
i = array[i];
@@ -34,24 +34,41 @@ test "void arrays" {
}
test "array literal" {
- const hex_mult = []u16{4096, 256, 16, 1};
+ const hex_mult = []u16{
+ 4096,
+ 256,
+ 16,
+ 1,
+ };
assert(hex_mult.len == 4);
assert(hex_mult[1] == 256);
}
test "array dot len const expr" {
- assert(comptime x: {break :x some_array.len == 4;});
+ assert(comptime x: {
+ break :x some_array.len == 4;
+ });
}
const ArrayDotLenConstExpr = struct {
y: [some_array.len]u8,
};
-const some_array = []u8 {0, 1, 2, 3};
-
+const some_array = []u8{
+ 0,
+ 1,
+ 2,
+ 3,
+};
test "nested arrays" {
- const array_of_strings = [][]const u8 {"hello", "this", "is", "my", "thing"};
+ const array_of_strings = [][]const u8{
+ "hello",
+ "this",
+ "is",
+ "my",
+ "thing",
+ };
for (array_of_strings) |s, i| {
if (i == 0) assert(mem.eql(u8, s, "hello"));
if (i == 1) assert(mem.eql(u8, s, "this"));
@@ -61,7 +78,6 @@ test "nested arrays" {
}
}
-
var s_array: [8]Sub = undefined;
const Sub = struct {
b: u8,
@@ -70,7 +86,7 @@ const Str = struct {
a: []Sub,
};
test "set global var array via slice embedded in struct" {
- var s = Str { .a = s_array[0..]};
+ var s = Str{ .a = s_array[0..] };
s.a[0].b = 1;
s.a[1].b = 2;
@@ -82,7 +98,10 @@ test "set global var array via slice embedded in struct" {
}
test "array literal with specified size" {
- var array = [2]u8{1, 2};
+ var array = [2]u8{
+ 1,
+ 2,
+ };
assert(array[0] == 1);
assert(array[1] == 2);
}
@@ -96,3 +115,48 @@ test "array len property" {
var x: [5]i32 = undefined;
assert(@typeOf(x).len == 5);
}
+
+test "array len field" {
+ var arr = [4]u8{ 0, 0, 0, 0 };
+ var ptr = &arr;
+ assert(arr.len == 4);
+ comptime assert(arr.len == 4);
+ assert(ptr.len == 4);
+ comptime assert(ptr.len == 4);
+}
+
+test "single-item pointer to array indexing and slicing" {
+ testSingleItemPtrArrayIndexSlice();
+ comptime testSingleItemPtrArrayIndexSlice();
+}
+
+fn testSingleItemPtrArrayIndexSlice() void {
+ var array = "aaaa";
+ doSomeMangling(&array);
+ assert(mem.eql(u8, "azya", array));
+}
+
+fn doSomeMangling(array: *[4]u8) void {
+ array[1] = 'z';
+ array[2..3][0] = 'y';
+}
+
+test "implicit cast single-item pointer" {
+ testImplicitCastSingleItemPtr();
+ comptime testImplicitCastSingleItemPtr();
+}
+
+fn testImplicitCastSingleItemPtr() void {
+ var byte: u8 = 100;
+ const slice = (*[1]u8)(&byte)[0..];
+ slice[0] += 1;
+ assert(byte == 101);
+}
+
+fn testArrayByValAtComptime(b: [2]u8) u8 { return b[0]; }
+
+test "comptime evalutating function that takes array by value" {
+ const arr = []u8{0,1};
+ _ = comptime testArrayByValAtComptime(arr);
+ _ = comptime testArrayByValAtComptime(arr);
+}
diff --git a/test/cases/atomics.zig b/test/cases/atomics.zig
index e8e81b76e6..67c9ab3dd1 100644
--- a/test/cases/atomics.zig
+++ b/test/cases/atomics.zig
@@ -1,12 +1,24 @@
-const assert = @import("std").debug.assert;
+const std = @import("std");
+const assert = std.debug.assert;
const builtin = @import("builtin");
const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
test "cmpxchg" {
var x: i32 = 1234;
- while (!@cmpxchg(&x, 1234, 5678, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) {}
+ if (@cmpxchgWeak(i32, &x, 99, 5678, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
+ assert(x1 == 1234);
+ } else {
+ @panic("cmpxchg should have failed");
+ }
+
+ while (@cmpxchgWeak(i32, &x, 1234, 5678, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
+ assert(x1 == 1234);
+ }
assert(x == 5678);
+
+ assert(@cmpxchgStrong(i32, &x, 5678, 42, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null);
+ assert(x == 42);
}
test "fence" {
@@ -15,13 +27,45 @@ test "fence" {
x = 5678;
}
-test "atomicrmw" {
+test "atomicrmw and atomicload" {
var data: u8 = 200;
testAtomicRmw(&data);
assert(data == 42);
+ testAtomicLoad(&data);
}
-fn testAtomicRmw(ptr: &u8) void {
+fn testAtomicRmw(ptr: *u8) void {
const prev_value = @atomicRmw(u8, ptr, AtomicRmwOp.Xchg, 42, AtomicOrder.SeqCst);
assert(prev_value == 200);
+ comptime {
+ var x: i32 = 1234;
+ const y: i32 = 12345;
+ assert(@atomicLoad(i32, &x, AtomicOrder.SeqCst) == 1234);
+ assert(@atomicLoad(i32, &y, AtomicOrder.SeqCst) == 12345);
+ }
+}
+
+fn testAtomicLoad(ptr: *u8) void {
+ const x = @atomicLoad(u8, ptr, AtomicOrder.SeqCst);
+ assert(x == 42);
+}
+
+test "cmpxchg with ptr" {
+ var data1: i32 = 1234;
+ var data2: i32 = 5678;
+ var data3: i32 = 9101;
+ var x: *i32 = &data1;
+ if (@cmpxchgWeak(*i32, &x, &data2, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
+ assert(x1 == &data1);
+ } else {
+ @panic("cmpxchg should have failed");
+ }
+
+ while (@cmpxchgWeak(*i32, &x, &data1, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
+ assert(x1 == &data1);
+ }
+ assert(x == &data3);
+
+ assert(@cmpxchgStrong(*i32, &x, &data3, &data2, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null);
+ assert(x == &data2);
}
diff --git a/test/cases/bitcast.zig b/test/cases/bitcast.zig
index f1f2ccd672..878140954a 100644
--- a/test/cases/bitcast.zig
+++ b/test/cases/bitcast.zig
@@ -10,5 +10,9 @@ fn testBitCast_i32_u32() void {
assert(conv2(@maxValue(u32)) == -1);
}
-fn conv(x: i32) u32 { return @bitCast(u32, x); }
-fn conv2(x: u32) i32 { return @bitCast(i32, x); }
+fn conv(x: i32) u32 {
+ return @bitCast(u32, x);
+}
+fn conv2(x: u32) i32 {
+ return @bitCast(i32, x);
+}
diff --git a/test/cases/bool.zig b/test/cases/bool.zig
index 07d30454ee..3e4ac9c1cf 100644
--- a/test/cases/bool.zig
+++ b/test/cases/bool.zig
@@ -8,14 +8,14 @@ test "bool literals" {
test "cast bool to int" {
const t = true;
const f = false;
- assert(i32(t) == i32(1));
- assert(i32(f) == i32(0));
+ assert(@boolToInt(t) == u32(1));
+ assert(@boolToInt(f) == u32(0));
nonConstCastBoolToInt(t, f);
}
fn nonConstCastBoolToInt(t: bool, f: bool) void {
- assert(i32(t) == i32(1));
- assert(i32(f) == i32(0));
+ assert(@boolToInt(t) == u32(1));
+ assert(@boolToInt(f) == u32(0));
}
test "bool cmp" {
diff --git a/test/cases/bugs/1111.zig b/test/cases/bugs/1111.zig
new file mode 100644
index 0000000000..f62107f9a3
--- /dev/null
+++ b/test/cases/bugs/1111.zig
@@ -0,0 +1,12 @@
+const Foo = extern enum {
+ Bar = -1,
+};
+
+test "issue 1111 fixed" {
+ const v = Foo.Bar;
+
+ switch (v) {
+ Foo.Bar => return,
+ else => return,
+ }
+}
diff --git a/test/cases/bugs/1230.zig b/test/cases/bugs/1230.zig
new file mode 100644
index 0000000000..b782a77f0b
--- /dev/null
+++ b/test/cases/bugs/1230.zig
@@ -0,0 +1,14 @@
+const assert = @import("std").debug.assert;
+
+const S = extern struct {
+ x: i32,
+};
+
+extern fn ret_struct() S {
+ return S{ .x = 42 };
+}
+
+test "extern return small struct (bug 1230)" {
+ const s = ret_struct();
+ assert(s.x == 42);
+}
diff --git a/test/cases/bugs/394.zig b/test/cases/bugs/394.zig
index 071619d59c..b0afec2357 100644
--- a/test/cases/bugs/394.zig
+++ b/test/cases/bugs/394.zig
@@ -1,9 +1,18 @@
-const E = union(enum) { A: [9]u8, B: u64, };
-const S = struct { x: u8, y: E, };
+const E = union(enum) {
+ A: [9]u8,
+ B: u64,
+};
+const S = struct {
+ x: u8,
+ y: E,
+};
const assert = @import("std").debug.assert;
test "bug 394 fixed" {
- const x = S { .x = 3, .y = E {.B = 1 } };
+ const x = S{
+ .x = 3,
+ .y = E{ .B = 1 },
+ };
assert(x.x == 3);
}
diff --git a/test/cases/bugs/655.zig b/test/cases/bugs/655.zig
index e6a275004c..50374d4e6d 100644
--- a/test/cases/bugs/655.zig
+++ b/test/cases/bugs/655.zig
@@ -3,10 +3,10 @@ const other_file = @import("655_other_file.zig");
test "function with &const parameter with type dereferenced by namespace" {
const x: other_file.Integer = 1234;
- comptime std.debug.assert(@typeOf(&x) == &const other_file.Integer);
+ comptime std.debug.assert(@typeOf(&x) == *const other_file.Integer);
foo(x);
}
-fn foo(x: &const other_file.Integer) void {
- std.debug.assert(*x == 1234);
+fn foo(x: *const other_file.Integer) void {
+ std.debug.assert(x.* == 1234);
}
diff --git a/test/cases/bugs/656.zig b/test/cases/bugs/656.zig
index ce3eec8046..f93f0ac4d5 100644
--- a/test/cases/bugs/656.zig
+++ b/test/cases/bugs/656.zig
@@ -9,17 +9,18 @@ const Value = struct {
align_expr: ?u32,
};
-test "nullable if after an if in a switch prong of a switch with 2 prongs in an else" {
+test "optional if after an if in a switch prong of a switch with 2 prongs in an else" {
foo(false, true);
}
fn foo(a: bool, b: bool) void {
- var prefix_op = PrefixOp { .AddrOf = Value { .align_expr = 1234 } };
- if (a) {
- } else {
+ var prefix_op = PrefixOp{
+ .AddrOf = Value{ .align_expr = 1234 },
+ };
+ if (a) {} else {
switch (prefix_op) {
PrefixOp.AddrOf => |addr_of_info| {
- if (b) { }
+ if (b) {}
if (addr_of_info.align_expr) |align_expr| {
assert(align_expr == 1234);
}
diff --git a/test/cases/bugs/828.zig b/test/cases/bugs/828.zig
index c46548cb7a..50ae0fd279 100644
--- a/test/cases/bugs/828.zig
+++ b/test/cases/bugs/828.zig
@@ -1,31 +1,27 @@
const CountBy = struct {
a: usize,
-
- const One = CountBy {
- .a = 1,
- };
-
- pub fn counter(self: &const CountBy) Counter {
- return Counter {
- .i = 0,
- };
+
+ const One = CountBy{ .a = 1 };
+
+ pub fn counter(self: *const CountBy) Counter {
+ return Counter{ .i = 0 };
}
};
const Counter = struct {
i: usize,
-
- pub fn count(self: &Counter) bool {
+
+ pub fn count(self: *Counter) bool {
self.i += 1;
return self.i <= 10;
}
};
-fn constCount(comptime cb: &const CountBy, comptime unused: u32) void {
+fn constCount(comptime cb: *const CountBy, comptime unused: u32) void {
comptime {
var cnt = cb.counter();
- if(cnt.i != 0) @compileError("Counter instance reused!");
- while(cnt.count()){}
+ if (cnt.i != 0) @compileError("Counter instance reused!");
+ while (cnt.count()) {}
}
}
diff --git a/test/cases/bugs/920.zig b/test/cases/bugs/920.zig
new file mode 100644
index 0000000000..2903f05a29
--- /dev/null
+++ b/test/cases/bugs/920.zig
@@ -0,0 +1,65 @@
+const std = @import("std");
+const math = std.math;
+const Random = std.rand.Random;
+
+const ZigTable = struct {
+ r: f64,
+ x: [257]f64,
+ f: [257]f64,
+
+ pdf: fn (f64) f64,
+ is_symmetric: bool,
+ zero_case: fn (*Random, f64) f64,
+};
+
+fn ZigTableGen(comptime is_symmetric: bool, comptime r: f64, comptime v: f64, comptime f: fn (f64) f64, comptime f_inv: fn (f64) f64, comptime zero_case: fn (*Random, f64) f64) ZigTable {
+ var tables: ZigTable = undefined;
+
+ tables.is_symmetric = is_symmetric;
+ tables.r = r;
+ tables.pdf = f;
+ tables.zero_case = zero_case;
+
+ tables.x[0] = v / f(r);
+ tables.x[1] = r;
+
+ for (tables.x[2..256]) |*entry, i| {
+ const last = tables.x[2 + i - 1];
+ entry.* = f_inv(v / last + f(last));
+ }
+ tables.x[256] = 0;
+
+ for (tables.f[0..]) |*entry, i| {
+ entry.* = f(tables.x[i]);
+ }
+
+ return tables;
+}
+
+const norm_r = 3.6541528853610088;
+const norm_v = 0.00492867323399;
+
+fn norm_f(x: f64) f64 {
+ return math.exp(-x * x / 2.0);
+}
+fn norm_f_inv(y: f64) f64 {
+ return math.sqrt(-2.0 * math.ln(y));
+}
+fn norm_zero_case(random: *Random, u: f64) f64 {
+ return 0.0;
+}
+
+const NormalDist = blk: {
+ @setEvalBranchQuota(30000);
+ break :blk ZigTableGen(true, norm_r, norm_v, norm_f, norm_f_inv, norm_zero_case);
+};
+
+test "bug 920 fixed" {
+ const NormalDist1 = blk: {
+ break :blk ZigTableGen(true, norm_r, norm_v, norm_f, norm_f_inv, norm_zero_case);
+ };
+
+ for (NormalDist1.f) |_, i| {
+ std.debug.assert(NormalDist1.f[i] == NormalDist.f[i]);
+ }
+}
diff --git a/test/cases/byval_arg_var.zig b/test/cases/byval_arg_var.zig
new file mode 100644
index 0000000000..826b9cc9e5
--- /dev/null
+++ b/test/cases/byval_arg_var.zig
@@ -0,0 +1,27 @@
+const std = @import("std");
+
+var result: []const u8 = "wrong";
+
+test "aoeu" {
+ start();
+ blowUpStack(10);
+
+ std.debug.assert(std.mem.eql(u8, result, "string literal"));
+}
+
+fn start() void {
+ foo("string literal");
+}
+
+fn foo(x: var) void {
+ bar(x);
+}
+
+fn bar(x: var) void {
+ result = x;
+}
+
+fn blowUpStack(x: u32) void {
+ if (x == 0) return;
+ blowUpStack(x - 1);
+}
diff --git a/test/cases/cancel.zig b/test/cases/cancel.zig
new file mode 100644
index 0000000000..c0f74fd34f
--- /dev/null
+++ b/test/cases/cancel.zig
@@ -0,0 +1,92 @@
+const std = @import("std");
+
+var defer_f1: bool = false;
+var defer_f2: bool = false;
+var defer_f3: bool = false;
+
+test "cancel forwards" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const p = async<&da.allocator> f1() catch unreachable;
+ cancel p;
+ std.debug.assert(defer_f1);
+ std.debug.assert(defer_f2);
+ std.debug.assert(defer_f3);
+}
+
+async fn f1() void {
+ defer {
+ defer_f1 = true;
+ }
+ await (async f2() catch unreachable);
+}
+
+async fn f2() void {
+ defer {
+ defer_f2 = true;
+ }
+ await (async f3() catch unreachable);
+}
+
+async fn f3() void {
+ defer {
+ defer_f3 = true;
+ }
+ suspend;
+}
+
+var defer_b1: bool = false;
+var defer_b2: bool = false;
+var defer_b3: bool = false;
+var defer_b4: bool = false;
+
+test "cancel backwards" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const p = async<&da.allocator> b1() catch unreachable;
+ cancel p;
+ std.debug.assert(defer_b1);
+ std.debug.assert(defer_b2);
+ std.debug.assert(defer_b3);
+ std.debug.assert(defer_b4);
+}
+
+async fn b1() void {
+ defer {
+ defer_b1 = true;
+ }
+ await (async b2() catch unreachable);
+}
+
+var b4_handle: promise = undefined;
+
+async fn b2() void {
+ const b3_handle = async b3() catch unreachable;
+ resume b4_handle;
+ cancel b4_handle;
+ defer {
+ defer_b2 = true;
+ }
+ const value = await b3_handle;
+ @panic("unreachable");
+}
+
+async fn b3() i32 {
+ defer {
+ defer_b3 = true;
+ }
+ await (async b4() catch unreachable);
+ return 1234;
+}
+
+async fn b4() void {
+ defer {
+ defer_b4 = true;
+ }
+ suspend {
+ b4_handle = @handle();
+ }
+ suspend;
+}
diff --git a/test/cases/cast.zig b/test/cases/cast.zig
index 024ece0055..63cc6313e1 100644
--- a/test/cases/cast.zig
+++ b/test/cases/cast.zig
@@ -1,23 +1,24 @@
-const assert = @import("std").debug.assert;
-const mem = @import("std").mem;
+const std = @import("std");
+const assert = std.debug.assert;
+const mem = std.mem;
test "int to ptr cast" {
const x = usize(13);
- const y = @intToPtr(&u8, x);
+ const y = @intToPtr(*u8, x);
const z = @ptrToInt(y);
assert(z == 13);
}
test "integer literal to pointer cast" {
- const vga_mem = @intToPtr(&u16, 0xB8000);
+ const vga_mem = @intToPtr(*u16, 0xB8000);
assert(@ptrToInt(vga_mem) == 0xB8000);
}
test "pointer reinterpret const float to int" {
const float: f64 = 5.99999999999994648725e-01;
const float_ptr = &float;
- const int_ptr = @ptrCast(&const i32, float_ptr);
- const int_val = *int_ptr;
+ const int_ptr = @ptrCast(*const i32, float_ptr);
+ const int_val = int_ptr.*;
assert(int_val == 858993411);
}
@@ -28,26 +29,26 @@ test "implicitly cast a pointer to a const pointer of it" {
assert(x == 2);
}
-fn funcWithConstPtrPtr(x: &const &i32) void {
- **x += 1;
+fn funcWithConstPtrPtr(x: *const *i32) void {
+ x.*.* += 1;
}
test "implicitly cast a container to a const pointer of it" {
- const z = Struct(void) { .x = void{} };
+ const z = Struct(void){ .x = void{} };
assert(0 == @sizeOf(@typeOf(z)));
assert(void{} == Struct(void).pointer(z).x);
assert(void{} == Struct(void).pointer(&z).x);
assert(void{} == Struct(void).maybePointer(z).x);
assert(void{} == Struct(void).maybePointer(&z).x);
assert(void{} == Struct(void).maybePointer(null).x);
- const s = Struct(u8) { .x = 42 };
+ const s = Struct(u8){ .x = 42 };
assert(0 != @sizeOf(@typeOf(s)));
assert(42 == Struct(u8).pointer(s).x);
assert(42 == Struct(u8).pointer(&s).x);
assert(42 == Struct(u8).maybePointer(s).x);
assert(42 == Struct(u8).maybePointer(&s).x);
assert(0 == Struct(u8).maybePointer(null).x);
- const u = Union { .x = 42 };
+ const u = Union{ .x = 42 };
assert(42 == Union.pointer(u).x);
assert(42 == Union.pointer(&u).x);
assert(42 == Union.maybePointer(u).x);
@@ -66,13 +67,13 @@ fn Struct(comptime T: type) type {
const Self = this;
x: T,
- fn pointer(self: &const Self) Self {
- return *self;
+ fn pointer(self: *const Self) Self {
+ return self.*;
}
- fn maybePointer(self: ?&const Self) Self {
- const none = Self { .x = if (T == void) void{} else 0 };
- return *(self ?? &none);
+ fn maybePointer(self: ?*const Self) Self {
+ const none = Self{ .x = if (T == void) void{} else 0 };
+ return (self orelse &none).*;
}
};
}
@@ -80,13 +81,13 @@ fn Struct(comptime T: type) type {
const Union = union {
x: u8,
- fn pointer(self: &const Union) Union {
- return *self;
+ fn pointer(self: *const Union) Union {
+ return self.*;
}
- fn maybePointer(self: ?&const Union) Union {
- const none = Union { .x = 0 };
- return *(self ?? &none);
+ fn maybePointer(self: ?*const Union) Union {
+ const none = Union{ .x = 0 };
+ return (self orelse &none).*;
}
};
@@ -94,12 +95,12 @@ const Enum = enum {
None,
Some,
- fn pointer(self: &const Enum) Enum {
- return *self;
+ fn pointer(self: *const Enum) Enum {
+ return self.*;
}
- fn maybePointer(self: ?&const Enum) Enum {
- return *(self ?? &Enum.None);
+ fn maybePointer(self: ?*const Enum) Enum {
+ return (self orelse &Enum.None).*;
}
};
@@ -107,20 +108,20 @@ test "implicitly cast indirect pointer to maybe-indirect pointer" {
const S = struct {
const Self = this;
x: u8,
- fn constConst(p: &const &const Self) u8 {
- return (*p).x;
+ fn constConst(p: *const *const Self) u8 {
+ return p.*.x;
}
- fn maybeConstConst(p: ?&const &const Self) u8 {
- return (*??p).x;
+ fn maybeConstConst(p: ?*const *const Self) u8 {
+ return p.?.*.x;
}
- fn constConstConst(p: &const &const &const Self) u8 {
- return (**p).x;
+ fn constConstConst(p: *const *const *const Self) u8 {
+ return p.*.*.x;
}
- fn maybeConstConstConst(p: ?&const &const &const Self) u8 {
- return (**??p).x;
+ fn maybeConstConstConst(p: ?*const *const *const Self) u8 {
+ return p.?.*.*.x;
}
};
- const s = S { .x = 42 };
+ const s = S{ .x = 42 };
const p = &s;
const q = &p;
const r = &q;
@@ -139,8 +140,8 @@ test "explicit cast from integer to error type" {
comptime testCastIntToErr(error.ItBroke);
}
fn testCastIntToErr(err: error) void {
- const x = usize(err);
- const y = error(x);
+ const x = @errorToInt(err);
+ const y = @intToError(x);
assert(error.ItBroke == y);
}
@@ -154,7 +155,6 @@ fn boolToStr(b: bool) []const u8 {
return if (b) "true" else "false";
}
-
test "peer resolve array and const slice" {
testPeerResolveArrayConstSlice(true);
comptime testPeerResolveArrayConstSlice(true);
@@ -167,67 +167,66 @@ fn testPeerResolveArrayConstSlice(b: bool) void {
}
test "integer literal to &const int" {
- const x: &const i32 = 3;
- assert(*x == 3);
+ const x: *const i32 = 3;
+ assert(x.* == 3);
}
test "string literal to &const []const u8" {
- const x: &const []const u8 = "hello";
- assert(mem.eql(u8, *x, "hello"));
+ const x: *const []const u8 = "hello";
+ assert(mem.eql(u8, x.*, "hello"));
}
test "implicitly cast from T to error!?T" {
- castToMaybeTypeError(1);
- comptime castToMaybeTypeError(1);
+ castToOptionalTypeError(1);
+ comptime castToOptionalTypeError(1);
}
const A = struct {
a: i32,
};
-fn castToMaybeTypeError(z: i32) void {
+fn castToOptionalTypeError(z: i32) void {
const x = i32(1);
const y: error!?i32 = x;
- assert(??(try y) == 1);
+ assert((try y).? == 1);
const f = z;
const g: error!?i32 = f;
const a = A{ .a = z };
const b: error!?A = a;
- assert((??(b catch unreachable)).a == 1);
+ assert((b catch unreachable).?.a == 1);
}
test "implicitly cast from int to error!?T" {
- implicitIntLitToMaybe();
- comptime implicitIntLitToMaybe();
+ implicitIntLitToOptional();
+ comptime implicitIntLitToOptional();
}
-fn implicitIntLitToMaybe() void {
+fn implicitIntLitToOptional() void {
const f: ?i32 = 1;
const g: error!?i32 = 1;
}
-
test "return null from fn() error!?&T" {
- const a = returnNullFromMaybeTypeErrorRef();
- const b = returnNullLitFromMaybeTypeErrorRef();
+ const a = returnNullFromOptionalTypeErrorRef();
+ const b = returnNullLitFromOptionalTypeErrorRef();
assert((try a) == null and (try b) == null);
}
-fn returnNullFromMaybeTypeErrorRef() error!?&A {
- const a: ?&A = null;
+fn returnNullFromOptionalTypeErrorRef() error!?*A {
+ const a: ?*A = null;
return a;
}
-fn returnNullLitFromMaybeTypeErrorRef() error!?&A {
+fn returnNullLitFromOptionalTypeErrorRef() error!?*A {
return null;
}
test "peer type resolution: ?T and T" {
- assert(??peerTypeTAndMaybeT(true, false) == 0);
- assert(??peerTypeTAndMaybeT(false, false) == 3);
+ assert(peerTypeTAndOptionalT(true, false).? == 0);
+ assert(peerTypeTAndOptionalT(false, false).? == 3);
comptime {
- assert(??peerTypeTAndMaybeT(true, false) == 0);
- assert(??peerTypeTAndMaybeT(false, false) == 3);
+ assert(peerTypeTAndOptionalT(true, false).? == 0);
+ assert(peerTypeTAndOptionalT(false, false).? == 3);
}
}
-fn peerTypeTAndMaybeT(c: bool, b: bool) ?usize {
+fn peerTypeTAndOptionalT(c: bool, b: bool) ?usize {
if (c) {
return if (b) null else usize(0);
}
@@ -235,7 +234,6 @@ fn peerTypeTAndMaybeT(c: bool, b: bool) ?usize {
return usize(3);
}
-
test "peer type resolution: [0]u8 and []const u8" {
assert(peerTypeEmptyArrayAndSlice(true, "hi").len == 0);
assert(peerTypeEmptyArrayAndSlice(false, "hi").len == 1);
@@ -246,22 +244,21 @@ test "peer type resolution: [0]u8 and []const u8" {
}
fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
if (a) {
- return []const u8 {};
+ return []const u8{};
}
return slice[0..1];
}
test "implicitly cast from [N]T to ?[]const T" {
- assert(mem.eql(u8, ??castToMaybeSlice(), "hi"));
- comptime assert(mem.eql(u8, ??castToMaybeSlice(), "hi"));
+ assert(mem.eql(u8, castToOptionalSlice().?, "hi"));
+ comptime assert(mem.eql(u8, castToOptionalSlice().?, "hi"));
}
-fn castToMaybeSlice() ?[]const u8 {
+fn castToOptionalSlice() ?[]const u8 {
return "hi";
}
-
test "implicitly cast from [0]T to error![]T" {
testCastZeroArrayToErrSliceMut();
comptime testCastZeroArrayToErrSliceMut();
@@ -316,25 +313,15 @@ test "implicit cast from &const [N]T to []const T" {
fn testCastConstArrayRefToConstSlice() void {
const blah = "aoeu";
const const_array_ref = &blah;
- assert(@typeOf(const_array_ref) == &const [4]u8);
+ assert(@typeOf(const_array_ref) == *const [4]u8);
const slice: []const u8 = const_array_ref;
assert(mem.eql(u8, slice, "aoeu"));
}
-test "var args implicitly casts by value arg to const ref" {
- foo("hello");
-}
-
-fn foo(args: ...) void {
- assert(@typeOf(args[0]) == &const [5]u8);
-}
-
-
test "peer type resolution: error and [N]T" {
// TODO: implicit error!T to error!U where T can implicitly cast to U
//assert(mem.eql(u8, try testPeerErrorAndArray(0), "OK"));
//comptime assert(mem.eql(u8, try testPeerErrorAndArray(0), "OK"));
-
assert(mem.eql(u8, try testPeerErrorAndArray2(1), "OKK"));
comptime assert(mem.eql(u8, try testPeerErrorAndArray2(1), "OKK"));
}
@@ -353,11 +340,26 @@ fn testPeerErrorAndArray2(x: u8) error![]const u8 {
};
}
-test "explicit cast float number literal to integer if no fraction component" {
+test "@floatToInt" {
+ testFloatToInts();
+ comptime testFloatToInts();
+}
+
+fn testFloatToInts() void {
const x = i32(1e4);
assert(x == 10000);
- const y = i32(f32(1e4));
+ const y = @floatToInt(i32, f32(1e4));
assert(y == 10000);
+ expectFloatToInt(f16, 255.1, u8, 255);
+ expectFloatToInt(f16, 127.2, i8, 127);
+ expectFloatToInt(f16, -128.2, i8, -128);
+ expectFloatToInt(f32, 255.1, u8, 255);
+ expectFloatToInt(f32, 127.2, i8, 127);
+ expectFloatToInt(f32, -128.2, i8, -128);
+}
+
+fn expectFloatToInt(comptime F: type, f: F, comptime I: type, i: I) void {
+ assert(@floatToInt(I, f) == i);
}
test "cast u128 to f128 and back" {
@@ -378,10 +380,108 @@ fn cast128Float(x: u128) f128 {
}
test "const slice widen cast" {
- const bytes align(4) = []u8{0x12, 0x12, 0x12, 0x12};
+ const bytes align(4) = []u8{
+ 0x12,
+ 0x12,
+ 0x12,
+ 0x12,
+ };
- const u32_value = ([]const u32)(bytes[0..])[0];
+ const u32_value = @bytesToSlice(u32, bytes[0..])[0];
assert(u32_value == 0x12121212);
assert(@bitCast(u32, bytes) == 0x12121212);
}
+
+test "single-item pointer of array to slice and to unknown length pointer" {
+ testCastPtrOfArrayToSliceAndPtr();
+ comptime testCastPtrOfArrayToSliceAndPtr();
+}
+
+fn testCastPtrOfArrayToSliceAndPtr() void {
+ var array = "ao" ++ "eu"; // TODO https://github.com/ziglang/zig/issues/1076
+ const x: [*]u8 = &array;
+ x[0] += 1;
+ assert(mem.eql(u8, array[0..], "boeu"));
+ const y: []u8 = &array;
+ y[0] += 1;
+ assert(mem.eql(u8, array[0..], "coeu"));
+}
+
+test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
+ const window_name = [1][*]const u8{c"window name"};
+ const x: [*]const ?[*]const u8 = &window_name;
+ assert(mem.eql(u8, std.cstr.toSliceConst(x[0].?), "window name"));
+}
+
+test "@intCast comptime_int" {
+ const result = @intCast(i32, 1234);
+ assert(@typeOf(result) == i32);
+ assert(result == 1234);
+}
+
+test "@floatCast comptime_int and comptime_float" {
+ {
+ const result = @floatCast(f16, 1234);
+ assert(@typeOf(result) == f16);
+ assert(result == 1234.0);
+ }
+ {
+ const result = @floatCast(f16, 1234.0);
+ assert(@typeOf(result) == f16);
+ assert(result == 1234.0);
+ }
+ {
+ const result = @floatCast(f32, 1234);
+ assert(@typeOf(result) == f32);
+ assert(result == 1234.0);
+ }
+ {
+ const result = @floatCast(f32, 1234.0);
+ assert(@typeOf(result) == f32);
+ assert(result == 1234.0);
+ }
+}
+
+test "comptime_int @intToFloat" {
+ {
+ const result = @intToFloat(f16, 1234);
+ assert(@typeOf(result) == f16);
+ assert(result == 1234.0);
+ }
+ {
+ const result = @intToFloat(f32, 1234);
+ assert(@typeOf(result) == f32);
+ assert(result == 1234.0);
+ }
+}
+
+test "@bytesToSlice keeps pointer alignment" {
+ var bytes = []u8{ 0x01, 0x02, 0x03, 0x04 };
+ const numbers = @bytesToSlice(u32, bytes[0..]);
+ comptime assert(@typeOf(numbers) == []align(@alignOf(@typeOf(bytes))) u32);
+}
+
+test "@intCast i32 to u7" {
+ var x: u128 = @maxValue(u128);
+ var y: i32 = 120;
+ var z = x >> @intCast(u7, y);
+ assert(z == 0xff);
+}
+
+test "implicit cast undefined to optional" {
+ assert(MakeType(void).getNull() == null);
+ assert(MakeType(void).getNonNull() != null);
+}
+
+fn MakeType(comptime T: type) type {
+ return struct {
+ fn getNull() ?T {
+ return null;
+ }
+
+ fn getNonNull() ?T {
+ return T(undefined);
+ }
+ };
+}
diff --git a/test/cases/const_slice_child.zig b/test/cases/const_slice_child.zig
index 456b115234..07d02d5df0 100644
--- a/test/cases/const_slice_child.zig
+++ b/test/cases/const_slice_child.zig
@@ -1,15 +1,16 @@
const debug = @import("std").debug;
const assert = debug.assert;
-var argv: &const &const u8 = undefined;
+var argv: [*]const [*]const u8 = undefined;
test "const slice child" {
- const strs = ([]&const u8) {
+ const strs = ([][*]const u8){
c"one",
c"two",
c"three",
};
- argv = &strs[0];
+ // TODO this should implicitly cast
+ argv = @ptrCast([*]const [*]const u8, &strs);
bar(strs.len);
}
@@ -29,7 +30,7 @@ fn bar(argc: usize) void {
foo(args);
}
-fn strlen(ptr: &const u8) usize {
+fn strlen(ptr: [*]const u8) usize {
var count: usize = 0;
while (ptr[count] != 0) : (count += 1) {}
return count;
diff --git a/test/cases/coroutine_await_struct.zig b/test/cases/coroutine_await_struct.zig
new file mode 100644
index 0000000000..79168715d8
--- /dev/null
+++ b/test/cases/coroutine_await_struct.zig
@@ -0,0 +1,47 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const assert = std.debug.assert;
+
+const Foo = struct {
+ x: i32,
+};
+
+var await_a_promise: promise = undefined;
+var await_final_result = Foo{ .x = 0 };
+
+test "coroutine await struct" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ await_seq('a');
+ const p = async<&da.allocator> await_amain() catch unreachable;
+ await_seq('f');
+ resume await_a_promise;
+ await_seq('i');
+ assert(await_final_result.x == 1234);
+ assert(std.mem.eql(u8, await_points, "abcdefghi"));
+}
+async fn await_amain() void {
+ await_seq('b');
+ const p = async await_another() catch unreachable;
+ await_seq('e');
+ await_final_result = await p;
+ await_seq('h');
+}
+async fn await_another() Foo {
+ await_seq('c');
+ suspend {
+ await_seq('d');
+ await_a_promise = @handle();
+ }
+ await_seq('g');
+ return Foo{ .x = 1234 };
+}
+
+var await_points = []u8{0} ** "abcdefghi".len;
+var await_seq_index: usize = 0;
+
+fn await_seq(c: u8) void {
+ await_points[await_seq_index] = c;
+ await_seq_index += 1;
+}
diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig
index 6d28b98c9d..bd6b6abf6f 100644
--- a/test/cases/coroutines.zig
+++ b/test/cases/coroutines.zig
@@ -5,12 +5,14 @@ const assert = std.debug.assert;
var x: i32 = 1;
test "create a coroutine and cancel it" {
- const p = try async simpleAsyncFn();
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const p = try async<&da.allocator> simpleAsyncFn();
comptime assert(@typeOf(p) == promise->void);
cancel p;
assert(x == 2);
}
-
async fn simpleAsyncFn() void {
x += 1;
suspend;
@@ -18,8 +20,11 @@ async fn simpleAsyncFn() void {
}
test "coroutine suspend, resume, cancel" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
seq('a');
- const p = try async testAsyncSeq();
+ const p = try async<&da.allocator> testAsyncSeq();
seq('c');
resume p;
seq('f');
@@ -28,7 +33,6 @@ test "coroutine suspend, resume, cancel" {
assert(std.mem.eql(u8, points, "abcdefg"));
}
-
async fn testAsyncSeq() void {
defer seq('e');
@@ -45,7 +49,10 @@ fn seq(c: u8) void {
}
test "coroutine suspend with block" {
- const p = try async testSuspendBlock();
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const p = try async<&da.allocator> testSuspendBlock();
std.debug.assert(!result);
resume a_promise;
std.debug.assert(result);
@@ -54,12 +61,16 @@ test "coroutine suspend with block" {
var a_promise: promise = undefined;
var result = false;
-
async fn testSuspendBlock() void {
- suspend |p| {
- comptime assert(@typeOf(p) == promise->void);
- a_promise = p;
+ suspend {
+ comptime assert(@typeOf(@handle()) == promise->void);
+ a_promise = @handle();
}
+
+ //Test to make sure that @handle() works as advertised (issue #1296)
+ //var our_handle: promise = @handle();
+ assert( a_promise == @handle() );
+
result = true;
}
@@ -67,15 +78,17 @@ var await_a_promise: promise = undefined;
var await_final_result: i32 = 0;
test "coroutine await" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
await_seq('a');
- const p = async await_amain() catch unreachable;
+ const p = async<&da.allocator> await_amain() catch unreachable;
await_seq('f');
resume await_a_promise;
await_seq('i');
assert(await_final_result == 1234);
assert(std.mem.eql(u8, await_points, "abcdefghi"));
}
-
async fn await_amain() void {
await_seq('b');
const p = async await_another() catch unreachable;
@@ -83,12 +96,11 @@ async fn await_amain() void {
await_final_result = await p;
await_seq('h');
}
-
async fn await_another() i32 {
await_seq('c');
- suspend |p| {
+ suspend {
await_seq('d');
- await_a_promise = p;
+ await_a_promise = @handle();
}
await_seq('g');
return 1234;
@@ -102,25 +114,25 @@ fn await_seq(c: u8) void {
await_seq_index += 1;
}
-
var early_final_result: i32 = 0;
test "coroutine await early return" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
early_seq('a');
- const p = async early_amain() catch unreachable;
+ const p = async<&da.allocator> early_amain() catch @panic("out of memory");
early_seq('f');
assert(early_final_result == 1234);
assert(std.mem.eql(u8, early_points, "abcdef"));
}
-
async fn early_amain() void {
early_seq('b');
- const p = async early_another() catch unreachable;
+ const p = async early_another() catch @panic("out of memory");
early_seq('d');
early_final_result = await p;
early_seq('e');
}
-
async fn early_another() i32 {
early_seq('c');
return 1234;
@@ -142,7 +154,6 @@ test "coro allocation failure" {
error.OutOfMemory => {},
}
}
-
async fn asyncFuncThatNeverGetsRun() void {
@panic("coro frame allocation should fail");
}
@@ -155,7 +166,9 @@ test "async function with dot syntax" {
suspend;
}
};
- const p = try async S.foo();
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+ const p = try async<&da.allocator> S.foo();
cancel p;
assert(S.y == 2);
}
@@ -163,29 +176,29 @@ test "async function with dot syntax" {
test "async fn pointer in a struct field" {
var data: i32 = 1;
const Foo = struct {
- bar: async<&std.mem.Allocator> fn(&i32) void,
+ bar: async<*std.mem.Allocator> fn (*i32) void,
};
- var foo = Foo {
- .bar = simpleAsyncFn2,
- };
- const p = (async foo.bar(&data)) catch unreachable;
+ var foo = Foo{ .bar = simpleAsyncFn2 };
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+ const p = (async<&da.allocator> foo.bar(&data)) catch unreachable;
assert(data == 2);
cancel p;
assert(data == 4);
}
-
-async<&std.mem.Allocator> fn simpleAsyncFn2(y: &i32) void {
- defer *y += 2;
- *y += 1;
+async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void {
+ defer y.* += 2;
+ y.* += 1;
suspend;
}
test "async fn with inferred error set" {
- const p = (async failing()) catch unreachable;
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+ const p = (async<&da.allocator> failing()) catch unreachable;
resume p;
cancel p;
}
-
async fn failing() !void {
suspend;
return error.Fail;
@@ -194,7 +207,9 @@ async fn failing() !void {
test "error return trace across suspend points - early return" {
const p = nonFailing();
resume p;
- const p2 = try async printTrace(p);
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+ const p2 = try async<&da.allocator> printTrace(p);
cancel p2;
}
@@ -205,22 +220,39 @@ test "error return trace across suspend points - async return" {
cancel p2;
}
-fn nonFailing() promise->error!void {
+// TODO https://github.com/ziglang/zig/issues/760
+fn nonFailing() (promise->error!void) {
return async suspendThenFail() catch unreachable;
}
-
async fn suspendThenFail() error!void {
suspend;
return error.Fail;
}
-
async fn printTrace(p: promise->error!void) void {
(await p) catch |e| {
std.debug.assert(e == error.Fail);
if (@errorReturnTrace()) |trace| {
assert(trace.index == 1);
- } else if (builtin.mode != builtin.Mode.ReleaseFast) {
- @panic("expected return trace");
+ } else switch (builtin.mode) {
+ builtin.Mode.Debug, builtin.Mode.ReleaseSafe => @panic("expected return trace"),
+ builtin.Mode.ReleaseFast, builtin.Mode.ReleaseSmall => {},
}
};
}
+
+test "break from suspend" {
+ var buf: [500]u8 = undefined;
+ var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
+ var my_result: i32 = 1;
+ const p = try async testBreakFromSuspend(&my_result);
+ cancel p;
+ std.debug.assert(my_result == 2);
+}
+async fn testBreakFromSuspend(my_result: *i32) void {
+ suspend {
+ resume @handle();
+ }
+ my_result.* += 1;
+ suspend;
+ my_result.* += 1;
+}
diff --git a/test/cases/defer.zig b/test/cases/defer.zig
index a989af18c2..7d4d1bc3d8 100644
--- a/test/cases/defer.zig
+++ b/test/cases/defer.zig
@@ -5,9 +5,18 @@ var index: usize = undefined;
fn runSomeErrorDefers(x: bool) !bool {
index = 0;
- defer {result[index] = 'a'; index += 1;}
- errdefer {result[index] = 'b'; index += 1;}
- defer {result[index] = 'c'; index += 1;}
+ defer {
+ result[index] = 'a';
+ index += 1;
+ }
+ errdefer {
+ result[index] = 'b';
+ index += 1;
+ }
+ defer {
+ result[index] = 'c';
+ index += 1;
+ }
return if (x) x else error.FalseNotAllowed;
}
@@ -41,3 +50,29 @@ fn testBreakContInDefer(x: usize) void {
assert(i == 5);
}
}
+
+test "defer and labeled break" {
+ var i = usize(0);
+
+ blk: {
+ defer i += 1;
+ break :blk;
+ }
+
+ assert(i == 1);
+}
+
+test "errdefer does not apply to fn inside fn" {
+ if (testNestedFnErrDefer()) |_| @panic("expected error") else |e| assert(e == error.Bad);
+}
+
+fn testNestedFnErrDefer() error!void {
+ var a: i32 = 0;
+ errdefer a += 1;
+ const S = struct {
+ fn baz() error {
+ return error.Bad;
+ }
+ };
+ return S.baz();
+}
diff --git a/test/cases/enum.zig b/test/cases/enum.zig
index 644c989b04..50edfda536 100644
--- a/test/cases/enum.zig
+++ b/test/cases/enum.zig
@@ -2,8 +2,13 @@ const assert = @import("std").debug.assert;
const mem = @import("std").mem;
test "enum type" {
- const foo1 = Foo{ .One = 13};
- const foo2 = Foo{. Two = Point { .x = 1234, .y = 5678, }};
+ const foo1 = Foo{ .One = 13 };
+ const foo2 = Foo{
+ .Two = Point{
+ .x = 1234,
+ .y = 5678,
+ },
+ };
const bar = Bar.B;
assert(bar == Bar.B);
@@ -41,26 +46,25 @@ const Bar = enum {
};
fn returnAnInt(x: i32) Foo {
- return Foo { .One = x };
+ return Foo{ .One = x };
}
-
test "constant enum with payload" {
- var empty = AnEnumWithPayload {.Empty = {}};
- var full = AnEnumWithPayload {.Full = 13};
+ var empty = AnEnumWithPayload{ .Empty = {} };
+ var full = AnEnumWithPayload{ .Full = 13 };
shouldBeEmpty(empty);
shouldBeNotEmpty(full);
}
-fn shouldBeEmpty(x: &const AnEnumWithPayload) void {
- switch (*x) {
+fn shouldBeEmpty(x: *const AnEnumWithPayload) void {
+ switch (x.*) {
AnEnumWithPayload.Empty => {},
else => unreachable,
}
}
-fn shouldBeNotEmpty(x: &const AnEnumWithPayload) void {
- switch (*x) {
+fn shouldBeNotEmpty(x: *const AnEnumWithPayload) void {
+ switch (x.*) {
AnEnumWithPayload.Empty => unreachable,
else => {},
}
@@ -71,8 +75,6 @@ const AnEnumWithPayload = union(enum) {
Full: i32,
};
-
-
const Number = enum {
Zero,
One,
@@ -90,15 +92,14 @@ test "enum to int" {
}
fn shouldEqual(n: Number, expected: u3) void {
- assert(u3(n) == expected);
+ assert(@enumToInt(n) == expected);
}
-
test "int to enum" {
testIntToEnumEval(3);
}
fn testIntToEnumEval(x: i32) void {
- assert(IntToEnumNumber(u3(x)) == IntToEnumNumber.Three);
+ assert(@intToEnum(IntToEnumNumber, @intCast(u3, x)) == IntToEnumNumber.Three);
}
const IntToEnumNumber = enum {
Zero,
@@ -108,7 +109,6 @@ const IntToEnumNumber = enum {
Four,
};
-
test "@tagName" {
assert(mem.eql(u8, testEnumTagNameBare(BareNumber.Three), "Three"));
comptime assert(mem.eql(u8, testEnumTagNameBare(BareNumber.Three), "Three"));
@@ -124,7 +124,6 @@ const BareNumber = enum {
Three,
};
-
test "enum alignment" {
comptime {
assert(@alignOf(AlignTestEnum) >= @alignOf([9]u8));
@@ -137,47 +136,529 @@ const AlignTestEnum = union(enum) {
B: u64,
};
-const ValueCount1 = enum { I0 };
-const ValueCount2 = enum { I0, I1 };
+const ValueCount1 = enum {
+ I0,
+};
+const ValueCount2 = enum {
+ I0,
+ I1,
+};
const ValueCount256 = enum {
- I0, I1, I2, I3, I4, I5, I6, I7, I8, I9, I10, I11, I12, I13, I14, I15,
- I16, I17, I18, I19, I20, I21, I22, I23, I24, I25, I26, I27, I28, I29, I30, I31,
- I32, I33, I34, I35, I36, I37, I38, I39, I40, I41, I42, I43, I44, I45, I46, I47,
- I48, I49, I50, I51, I52, I53, I54, I55, I56, I57, I58, I59, I60, I61, I62, I63,
- I64, I65, I66, I67, I68, I69, I70, I71, I72, I73, I74, I75, I76, I77, I78, I79,
- I80, I81, I82, I83, I84, I85, I86, I87, I88, I89, I90, I91, I92, I93, I94, I95,
- I96, I97, I98, I99, I100, I101, I102, I103, I104, I105, I106, I107, I108, I109,
- I110, I111, I112, I113, I114, I115, I116, I117, I118, I119, I120, I121, I122, I123,
- I124, I125, I126, I127, I128, I129, I130, I131, I132, I133, I134, I135, I136, I137,
- I138, I139, I140, I141, I142, I143, I144, I145, I146, I147, I148, I149, I150, I151,
- I152, I153, I154, I155, I156, I157, I158, I159, I160, I161, I162, I163, I164, I165,
- I166, I167, I168, I169, I170, I171, I172, I173, I174, I175, I176, I177, I178, I179,
- I180, I181, I182, I183, I184, I185, I186, I187, I188, I189, I190, I191, I192, I193,
- I194, I195, I196, I197, I198, I199, I200, I201, I202, I203, I204, I205, I206, I207,
- I208, I209, I210, I211, I212, I213, I214, I215, I216, I217, I218, I219, I220, I221,
- I222, I223, I224, I225, I226, I227, I228, I229, I230, I231, I232, I233, I234, I235,
- I236, I237, I238, I239, I240, I241, I242, I243, I244, I245, I246, I247, I248, I249,
- I250, I251, I252, I253, I254, I255
+ I0,
+ I1,
+ I2,
+ I3,
+ I4,
+ I5,
+ I6,
+ I7,
+ I8,
+ I9,
+ I10,
+ I11,
+ I12,
+ I13,
+ I14,
+ I15,
+ I16,
+ I17,
+ I18,
+ I19,
+ I20,
+ I21,
+ I22,
+ I23,
+ I24,
+ I25,
+ I26,
+ I27,
+ I28,
+ I29,
+ I30,
+ I31,
+ I32,
+ I33,
+ I34,
+ I35,
+ I36,
+ I37,
+ I38,
+ I39,
+ I40,
+ I41,
+ I42,
+ I43,
+ I44,
+ I45,
+ I46,
+ I47,
+ I48,
+ I49,
+ I50,
+ I51,
+ I52,
+ I53,
+ I54,
+ I55,
+ I56,
+ I57,
+ I58,
+ I59,
+ I60,
+ I61,
+ I62,
+ I63,
+ I64,
+ I65,
+ I66,
+ I67,
+ I68,
+ I69,
+ I70,
+ I71,
+ I72,
+ I73,
+ I74,
+ I75,
+ I76,
+ I77,
+ I78,
+ I79,
+ I80,
+ I81,
+ I82,
+ I83,
+ I84,
+ I85,
+ I86,
+ I87,
+ I88,
+ I89,
+ I90,
+ I91,
+ I92,
+ I93,
+ I94,
+ I95,
+ I96,
+ I97,
+ I98,
+ I99,
+ I100,
+ I101,
+ I102,
+ I103,
+ I104,
+ I105,
+ I106,
+ I107,
+ I108,
+ I109,
+ I110,
+ I111,
+ I112,
+ I113,
+ I114,
+ I115,
+ I116,
+ I117,
+ I118,
+ I119,
+ I120,
+ I121,
+ I122,
+ I123,
+ I124,
+ I125,
+ I126,
+ I127,
+ I128,
+ I129,
+ I130,
+ I131,
+ I132,
+ I133,
+ I134,
+ I135,
+ I136,
+ I137,
+ I138,
+ I139,
+ I140,
+ I141,
+ I142,
+ I143,
+ I144,
+ I145,
+ I146,
+ I147,
+ I148,
+ I149,
+ I150,
+ I151,
+ I152,
+ I153,
+ I154,
+ I155,
+ I156,
+ I157,
+ I158,
+ I159,
+ I160,
+ I161,
+ I162,
+ I163,
+ I164,
+ I165,
+ I166,
+ I167,
+ I168,
+ I169,
+ I170,
+ I171,
+ I172,
+ I173,
+ I174,
+ I175,
+ I176,
+ I177,
+ I178,
+ I179,
+ I180,
+ I181,
+ I182,
+ I183,
+ I184,
+ I185,
+ I186,
+ I187,
+ I188,
+ I189,
+ I190,
+ I191,
+ I192,
+ I193,
+ I194,
+ I195,
+ I196,
+ I197,
+ I198,
+ I199,
+ I200,
+ I201,
+ I202,
+ I203,
+ I204,
+ I205,
+ I206,
+ I207,
+ I208,
+ I209,
+ I210,
+ I211,
+ I212,
+ I213,
+ I214,
+ I215,
+ I216,
+ I217,
+ I218,
+ I219,
+ I220,
+ I221,
+ I222,
+ I223,
+ I224,
+ I225,
+ I226,
+ I227,
+ I228,
+ I229,
+ I230,
+ I231,
+ I232,
+ I233,
+ I234,
+ I235,
+ I236,
+ I237,
+ I238,
+ I239,
+ I240,
+ I241,
+ I242,
+ I243,
+ I244,
+ I245,
+ I246,
+ I247,
+ I248,
+ I249,
+ I250,
+ I251,
+ I252,
+ I253,
+ I254,
+ I255,
};
const ValueCount257 = enum {
- I0, I1, I2, I3, I4, I5, I6, I7, I8, I9, I10, I11, I12, I13, I14, I15,
- I16, I17, I18, I19, I20, I21, I22, I23, I24, I25, I26, I27, I28, I29, I30, I31,
- I32, I33, I34, I35, I36, I37, I38, I39, I40, I41, I42, I43, I44, I45, I46, I47,
- I48, I49, I50, I51, I52, I53, I54, I55, I56, I57, I58, I59, I60, I61, I62, I63,
- I64, I65, I66, I67, I68, I69, I70, I71, I72, I73, I74, I75, I76, I77, I78, I79,
- I80, I81, I82, I83, I84, I85, I86, I87, I88, I89, I90, I91, I92, I93, I94, I95,
- I96, I97, I98, I99, I100, I101, I102, I103, I104, I105, I106, I107, I108, I109,
- I110, I111, I112, I113, I114, I115, I116, I117, I118, I119, I120, I121, I122, I123,
- I124, I125, I126, I127, I128, I129, I130, I131, I132, I133, I134, I135, I136, I137,
- I138, I139, I140, I141, I142, I143, I144, I145, I146, I147, I148, I149, I150, I151,
- I152, I153, I154, I155, I156, I157, I158, I159, I160, I161, I162, I163, I164, I165,
- I166, I167, I168, I169, I170, I171, I172, I173, I174, I175, I176, I177, I178, I179,
- I180, I181, I182, I183, I184, I185, I186, I187, I188, I189, I190, I191, I192, I193,
- I194, I195, I196, I197, I198, I199, I200, I201, I202, I203, I204, I205, I206, I207,
- I208, I209, I210, I211, I212, I213, I214, I215, I216, I217, I218, I219, I220, I221,
- I222, I223, I224, I225, I226, I227, I228, I229, I230, I231, I232, I233, I234, I235,
- I236, I237, I238, I239, I240, I241, I242, I243, I244, I245, I246, I247, I248, I249,
- I250, I251, I252, I253, I254, I255, I256
+ I0,
+ I1,
+ I2,
+ I3,
+ I4,
+ I5,
+ I6,
+ I7,
+ I8,
+ I9,
+ I10,
+ I11,
+ I12,
+ I13,
+ I14,
+ I15,
+ I16,
+ I17,
+ I18,
+ I19,
+ I20,
+ I21,
+ I22,
+ I23,
+ I24,
+ I25,
+ I26,
+ I27,
+ I28,
+ I29,
+ I30,
+ I31,
+ I32,
+ I33,
+ I34,
+ I35,
+ I36,
+ I37,
+ I38,
+ I39,
+ I40,
+ I41,
+ I42,
+ I43,
+ I44,
+ I45,
+ I46,
+ I47,
+ I48,
+ I49,
+ I50,
+ I51,
+ I52,
+ I53,
+ I54,
+ I55,
+ I56,
+ I57,
+ I58,
+ I59,
+ I60,
+ I61,
+ I62,
+ I63,
+ I64,
+ I65,
+ I66,
+ I67,
+ I68,
+ I69,
+ I70,
+ I71,
+ I72,
+ I73,
+ I74,
+ I75,
+ I76,
+ I77,
+ I78,
+ I79,
+ I80,
+ I81,
+ I82,
+ I83,
+ I84,
+ I85,
+ I86,
+ I87,
+ I88,
+ I89,
+ I90,
+ I91,
+ I92,
+ I93,
+ I94,
+ I95,
+ I96,
+ I97,
+ I98,
+ I99,
+ I100,
+ I101,
+ I102,
+ I103,
+ I104,
+ I105,
+ I106,
+ I107,
+ I108,
+ I109,
+ I110,
+ I111,
+ I112,
+ I113,
+ I114,
+ I115,
+ I116,
+ I117,
+ I118,
+ I119,
+ I120,
+ I121,
+ I122,
+ I123,
+ I124,
+ I125,
+ I126,
+ I127,
+ I128,
+ I129,
+ I130,
+ I131,
+ I132,
+ I133,
+ I134,
+ I135,
+ I136,
+ I137,
+ I138,
+ I139,
+ I140,
+ I141,
+ I142,
+ I143,
+ I144,
+ I145,
+ I146,
+ I147,
+ I148,
+ I149,
+ I150,
+ I151,
+ I152,
+ I153,
+ I154,
+ I155,
+ I156,
+ I157,
+ I158,
+ I159,
+ I160,
+ I161,
+ I162,
+ I163,
+ I164,
+ I165,
+ I166,
+ I167,
+ I168,
+ I169,
+ I170,
+ I171,
+ I172,
+ I173,
+ I174,
+ I175,
+ I176,
+ I177,
+ I178,
+ I179,
+ I180,
+ I181,
+ I182,
+ I183,
+ I184,
+ I185,
+ I186,
+ I187,
+ I188,
+ I189,
+ I190,
+ I191,
+ I192,
+ I193,
+ I194,
+ I195,
+ I196,
+ I197,
+ I198,
+ I199,
+ I200,
+ I201,
+ I202,
+ I203,
+ I204,
+ I205,
+ I206,
+ I207,
+ I208,
+ I209,
+ I210,
+ I211,
+ I212,
+ I213,
+ I214,
+ I215,
+ I216,
+ I217,
+ I218,
+ I219,
+ I220,
+ I221,
+ I222,
+ I223,
+ I224,
+ I225,
+ I226,
+ I227,
+ I228,
+ I229,
+ I230,
+ I231,
+ I232,
+ I233,
+ I234,
+ I235,
+ I236,
+ I237,
+ I238,
+ I239,
+ I240,
+ I241,
+ I242,
+ I243,
+ I244,
+ I245,
+ I246,
+ I247,
+ I248,
+ I249,
+ I250,
+ I251,
+ I252,
+ I253,
+ I254,
+ I255,
+ I256,
};
test "enum sizes" {
@@ -189,11 +670,11 @@ test "enum sizes" {
}
}
-const Small2 = enum (u2) {
+const Small2 = enum(u2) {
One,
Two,
};
-const Small = enum (u2) {
+const Small = enum(u2) {
One,
Two,
Three,
@@ -213,8 +694,7 @@ test "set enum tag type" {
}
}
-
-const A = enum (u3) {
+const A = enum(u3) {
One,
Two,
Three,
@@ -225,7 +705,7 @@ const A = enum (u3) {
Four2,
};
-const B = enum (u3) {
+const B = enum(u3) {
One3,
Two3,
Three3,
@@ -236,7 +716,7 @@ const B = enum (u3) {
Four23,
};
-const C = enum (u2) {
+const C = enum(u2) {
One4,
Two4,
Three4,
@@ -249,7 +729,7 @@ const BitFieldOfEnums = packed struct {
c: C,
};
-const bit_field_1 = BitFieldOfEnums {
+const bit_field_1 = BitFieldOfEnums{
.a = A.Two,
.b = B.Three3,
.c = C.Four4,
@@ -270,15 +750,15 @@ test "bit field access with enum fields" {
assert(data.b == B.Four3);
}
-fn getA(data: &const BitFieldOfEnums) A {
+fn getA(data: *const BitFieldOfEnums) A {
return data.a;
}
-fn getB(data: &const BitFieldOfEnums) B {
+fn getB(data: *const BitFieldOfEnums) B {
return data.b;
}
-fn getC(data: &const BitFieldOfEnums) C {
+fn getC(data: *const BitFieldOfEnums) C {
return data.c;
}
@@ -288,7 +768,7 @@ test "casting enum to its tag type" {
}
fn testCastEnumToTagType(value: Small2) void {
- assert(u2(value) == 1);
+ assert(@enumToInt(value) == 1);
}
const MultipleChoice = enum(u32) {
@@ -304,7 +784,7 @@ test "enum with specified tag values" {
}
fn testEnumWithSpecifiedTagValues(x: MultipleChoice) void {
- assert(u32(x) == 60);
+ assert(@enumToInt(x) == 60);
assert(1234 == switch (x) {
MultipleChoice.A => 1,
MultipleChoice.B => 2,
@@ -331,7 +811,7 @@ test "enum with specified and unspecified tag values" {
}
fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) void {
- assert(u32(x) == 1000);
+ assert(@enumToInt(x) == 1000);
assert(1234 == switch (x) {
MultipleChoice2.A => 1,
MultipleChoice2.B => 2,
@@ -346,8 +826,8 @@ fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) void {
}
test "cast integer literal to enum" {
- assert(MultipleChoice2(0) == MultipleChoice2.Unspecified1);
- assert(MultipleChoice2(40) == MultipleChoice2.B);
+ assert(@intToEnum(MultipleChoice2, 0) == MultipleChoice2.Unspecified1);
+ assert(@intToEnum(MultipleChoice2, 40) == MultipleChoice2.B);
}
const EnumWithOneMember = enum {
@@ -385,10 +865,30 @@ const EnumWithTagValues = enum(u4) {
D = 1 << 3,
};
test "enum with tag values don't require parens" {
- assert(u4(EnumWithTagValues.C) == 0b0100);
+ assert(@enumToInt(EnumWithTagValues.C) == 0b0100);
}
test "enum with 1 field but explicit tag type should still have the tag type" {
- const Enum = enum(u8) { B = 2 };
+ const Enum = enum(u8) {
+ B = 2,
+ };
comptime @import("std").debug.assert(@sizeOf(Enum) == @sizeOf(u8));
}
+
+test "empty extern enum with members" {
+ const E = extern enum {
+ A,
+ B,
+ C,
+ };
+ assert(@sizeOf(E) == @sizeOf(c_int));
+}
+
+test "aoeu" {
+ const LocalFoo = enum {
+ A = 1,
+ B = 0,
+ };
+ var b = LocalFoo.B;
+ assert(mem.eql(u8, @tagName(b), "B"));
+}
diff --git a/test/cases/enum_with_members.zig b/test/cases/enum_with_members.zig
index 0c2ae1c383..18174186a9 100644
--- a/test/cases/enum_with_members.zig
+++ b/test/cases/enum_with_members.zig
@@ -6,8 +6,8 @@ const ET = union(enum) {
SINT: i32,
UINT: u32,
- pub fn print(a: &const ET, buf: []u8) error!usize {
- return switch (*a) {
+ pub fn print(a: *const ET, buf: []u8) error!usize {
+ return switch (a.*) {
ET.SINT => |x| fmt.formatIntBuf(buf, x, 10, false, 0),
ET.UINT => |x| fmt.formatIntBuf(buf, x, 10, false, 0),
};
@@ -15,8 +15,8 @@ const ET = union(enum) {
};
test "enum with members" {
- const a = ET { .SINT = -42 };
- const b = ET { .UINT = 42 };
+ const a = ET{ .SINT = -42 };
+ const b = ET{ .UINT = 42 };
var buf: [20]u8 = undefined;
assert((a.print(buf[0..]) catch unreachable) == 3);
diff --git a/test/cases/error.zig b/test/cases/error.zig
index e64bf02c91..45971fd40d 100644
--- a/test/cases/error.zig
+++ b/test/cases/error.zig
@@ -30,14 +30,12 @@ test "@errorName" {
assert(mem.eql(u8, @errorName(error.ALongerErrorName), "ALongerErrorName"));
}
-
test "error values" {
- const a = i32(error.err1);
- const b = i32(error.err2);
+ const a = @errorToInt(error.err1);
+ const b = @errorToInt(error.err2);
assert(a != b);
}
-
test "redefinition of error values allowed" {
shouldBeNotEqual(error.AnError, error.SecondError);
}
@@ -45,7 +43,6 @@ fn shouldBeNotEqual(a: error, b: error) void {
if (a == b) unreachable;
}
-
test "error binary operator" {
const a = errBinaryOperatorG(true) catch 3;
const b = errBinaryOperatorG(false) catch 3;
@@ -56,20 +53,20 @@ fn errBinaryOperatorG(x: bool) error!isize {
return if (x) error.ItBroke else isize(10);
}
-
test "unwrap simple value from error" {
const i = unwrapSimpleValueFromErrorDo() catch unreachable;
assert(i == 13);
}
-fn unwrapSimpleValueFromErrorDo() error!isize { return 13; }
-
+fn unwrapSimpleValueFromErrorDo() error!isize {
+ return 13;
+}
test "error return in assignment" {
doErrReturnInAssignment() catch unreachable;
}
fn doErrReturnInAssignment() error!void {
- var x : i32 = undefined;
+ var x: i32 = undefined;
x = try makeANonErr();
}
@@ -95,7 +92,10 @@ test "error set type " {
comptime testErrorSetType();
}
-const MyErrSet = error {OutOfMemory, FileNotFound};
+const MyErrSet = error{
+ OutOfMemory,
+ FileNotFound,
+};
fn testErrorSetType() void {
assert(@memberCount(MyErrSet) == 2);
@@ -109,18 +109,23 @@ fn testErrorSetType() void {
}
}
-
test "explicit error set cast" {
testExplicitErrorSetCast(Set1.A);
comptime testExplicitErrorSetCast(Set1.A);
}
-const Set1 = error{A, B};
-const Set2 = error{A, C};
+const Set1 = error{
+ A,
+ B,
+};
+const Set2 = error{
+ A,
+ C,
+};
fn testExplicitErrorSetCast(set1: Set1) void {
- var x = Set2(set1);
- var y = Set1(x);
+ var x = @errSetCast(Set2, set1);
+ var y = @errSetCast(Set1, x);
assert(y == error.A);
}
@@ -129,24 +134,27 @@ test "comptime test error for empty error set" {
comptime testComptimeTestErrorEmptySet(1234);
}
-const EmptyErrorSet = error {};
+const EmptyErrorSet = error{};
fn testComptimeTestErrorEmptySet(x: EmptyErrorSet!i32) void {
if (x) |v| assert(v == 1234) else |err| @compileError("bad");
}
-test "syntax: nullable operator in front of error union operator" {
+test "syntax: optional operator in front of error union operator" {
comptime {
assert(?error!i32 == ?(error!i32));
}
}
test "comptime err to int of error set with only 1 possible value" {
- testErrToIntWithOnePossibleValue(error.A, u32(error.A));
- comptime testErrToIntWithOnePossibleValue(error.A, u32(error.A));
+ testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A));
+ comptime testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A));
}
-fn testErrToIntWithOnePossibleValue(x: error{A}, comptime value: u32) void {
- if (u32(x) != value) {
+fn testErrToIntWithOnePossibleValue(
+ x: error{A},
+ comptime value: u32,
+) void {
+ if (@errorToInt(x) != value) {
@compileError("bad");
}
}
@@ -175,3 +183,63 @@ fn baz_1() !i32 {
fn quux_1() !i32 {
return error.C;
}
+
+test "error: fn returning empty error set can be passed as fn returning any error" {
+ entry();
+ comptime entry();
+}
+
+fn entry() void {
+ foo2(bar2);
+}
+
+fn foo2(f: fn () error!void) void {
+ const x = f();
+}
+
+fn bar2() (error{}!void) {}
+
+test "error: Zero sized error set returned with value payload crash" {
+ _ = foo3(0);
+ _ = comptime foo3(0);
+}
+
+const Error = error{};
+fn foo3(b: usize) Error!usize {
+ return b;
+}
+
+test "error: Infer error set from literals" {
+ _ = nullLiteral("n") catch |err| handleErrors(err);
+ _ = floatLiteral("n") catch |err| handleErrors(err);
+ _ = intLiteral("n") catch |err| handleErrors(err);
+ _ = comptime nullLiteral("n") catch |err| handleErrors(err);
+ _ = comptime floatLiteral("n") catch |err| handleErrors(err);
+ _ = comptime intLiteral("n") catch |err| handleErrors(err);
+}
+
+fn handleErrors(err: var) noreturn {
+ switch (err) {
+ error.T => {},
+ }
+
+ unreachable;
+}
+
+fn nullLiteral(str: []const u8) !?i64 {
+ if (str[0] == 'n') return null;
+
+ return error.T;
+}
+
+fn floatLiteral(str: []const u8) !?f64 {
+ if (str[0] == 'n') return 1.0;
+
+ return error.T;
+}
+
+fn intLiteral(str: []const u8) !?i64 {
+ if (str[0] == 'n') return 1;
+
+ return error.T;
+}
diff --git a/test/cases/eval.zig b/test/cases/eval.zig
index d6f7afe864..9da475994d 100644
--- a/test/cases/eval.zig
+++ b/test/cases/eval.zig
@@ -5,16 +5,14 @@ const builtin = @import("builtin");
test "compile time recursion" {
assert(some_data.len == 21);
}
-var some_data: [usize(fibonacci(7))]u8 = undefined;
+var some_data: [@intCast(usize, fibonacci(7))]u8 = undefined;
fn fibonacci(x: i32) i32 {
if (x <= 1) return 1;
return fibonacci(x - 1) + fibonacci(x - 2);
}
-
-
fn unwrapAndAddOne(blah: ?i32) i32 {
- return ??blah + 1;
+ return blah.? + 1;
}
const should_be_1235 = unwrapAndAddOne(1234);
test "static add one" {
@@ -40,13 +38,13 @@ test "inline variable gets result of const if" {
assert(gimme1or2(false) == 2);
}
-
test "static function evaluation" {
assert(statically_added_number == 3);
}
const statically_added_number = staticAdd(1, 2);
-fn staticAdd(a: i32, b: i32) i32 { return a + b; }
-
+fn staticAdd(a: i32, b: i32) i32 {
+ return a + b;
+}
test "const expr eval on single expr blocks" {
assert(constExprEvalOnSingleExprBlocksFn(1, true) == 3);
@@ -64,9 +62,6 @@ fn constExprEvalOnSingleExprBlocksFn(x: i32, b: bool) i32 {
return result;
}
-
-
-
test "statically initialized list" {
assert(static_point_list[0].x == 1);
assert(static_point_list[0].y == 2);
@@ -77,15 +72,17 @@ const Point = struct {
x: i32,
y: i32,
};
-const static_point_list = []Point { makePoint(1, 2), makePoint(3, 4) };
+const static_point_list = []Point{
+ makePoint(1, 2),
+ makePoint(3, 4),
+};
fn makePoint(x: i32, y: i32) Point {
- return Point {
+ return Point{
.x = x,
.y = y,
};
}
-
test "static eval list init" {
assert(static_vec3.data[2] == 1.0);
assert(vec3(0.0, 0.0, 3.0).data[2] == 3.0);
@@ -95,18 +92,18 @@ pub const Vec3 = struct {
data: [3]f32,
};
pub fn vec3(x: f32, y: f32, z: f32) Vec3 {
- return Vec3 {
- .data = []f32 { x, y, z, },
- };
+ return Vec3{ .data = []f32{
+ x,
+ y,
+ z,
+ } };
}
-
test "constant expressions" {
- var array : [array_size]u8 = undefined;
+ var array: [array_size]u8 = undefined;
assert(@sizeOf(@typeOf(array)) == 20);
}
-const array_size : u8 = 20;
-
+const array_size: u8 = 20;
test "constant struct with negation" {
assert(vertices[0].x == -0.6);
@@ -118,13 +115,30 @@ const Vertex = struct {
g: f32,
b: f32,
};
-const vertices = []Vertex {
- Vertex { .x = -0.6, .y = -0.4, .r = 1.0, .g = 0.0, .b = 0.0 },
- Vertex { .x = 0.6, .y = -0.4, .r = 0.0, .g = 1.0, .b = 0.0 },
- Vertex { .x = 0.0, .y = 0.6, .r = 0.0, .g = 0.0, .b = 1.0 },
+const vertices = []Vertex{
+ Vertex{
+ .x = -0.6,
+ .y = -0.4,
+ .r = 1.0,
+ .g = 0.0,
+ .b = 0.0,
+ },
+ Vertex{
+ .x = 0.6,
+ .y = -0.4,
+ .r = 0.0,
+ .g = 1.0,
+ .b = 0.0,
+ },
+ Vertex{
+ .x = 0.0,
+ .y = 0.6,
+ .r = 0.0,
+ .g = 0.0,
+ .b = 1.0,
+ },
};
-
test "statically initialized struct" {
st_init_str_foo.x += 1;
assert(st_init_str_foo.x == 14);
@@ -133,15 +147,21 @@ const StInitStrFoo = struct {
x: i32,
y: bool,
};
-var st_init_str_foo = StInitStrFoo { .x = 13, .y = true, };
-
+var st_init_str_foo = StInitStrFoo{
+ .x = 13,
+ .y = true,
+};
test "statically initalized array literal" {
- const y : [4]u8 = st_init_arr_lit_x;
+ const y: [4]u8 = st_init_arr_lit_x;
assert(y[3] == 4);
}
-const st_init_arr_lit_x = []u8{1,2,3,4};
-
+const st_init_arr_lit_x = []u8{
+ 1,
+ 2,
+ 3,
+ 4,
+};
test "const slice" {
comptime {
@@ -195,17 +215,32 @@ test "inlined block and runtime block phi" {
const CmdFn = struct {
name: []const u8,
- func: fn(i32) i32,
+ func: fn (i32) i32,
};
const cmd_fns = []CmdFn{
- CmdFn {.name = "one", .func = one},
- CmdFn {.name = "two", .func = two},
- CmdFn {.name = "three", .func = three},
+ CmdFn{
+ .name = "one",
+ .func = one,
+ },
+ CmdFn{
+ .name = "two",
+ .func = two,
+ },
+ CmdFn{
+ .name = "three",
+ .func = three,
+ },
};
-fn one(value: i32) i32 { return value + 1; }
-fn two(value: i32) i32 { return value + 2; }
-fn three(value: i32) i32 { return value + 3; }
+fn one(value: i32) i32 {
+ return value + 1;
+}
+fn two(value: i32) i32 {
+ return value + 2;
+}
+fn three(value: i32) i32 {
+ return value + 3;
+}
fn performFn(comptime prefix_char: u8, start_value: i32) i32 {
var result: i32 = start_value;
@@ -229,7 +264,7 @@ test "eval @setRuntimeSafety at compile-time" {
assert(result == 1234);
}
-fn fnWithSetRuntimeSafety() i32{
+fn fnWithSetRuntimeSafety() i32 {
@setRuntimeSafety(true);
return 1234;
}
@@ -244,16 +279,15 @@ fn fnWithFloatMode() f32 {
return 1234.0;
}
-
const SimpleStruct = struct {
field: i32,
- fn method(self: &const SimpleStruct) i32 {
+ fn method(self: *const SimpleStruct) i32 {
return self.field + 3;
}
};
-var simple_struct = SimpleStruct{ .field = 1234, };
+var simple_struct = SimpleStruct{ .field = 1234 };
const bound_fn = simple_struct.method;
@@ -261,8 +295,6 @@ test "call method on bound fn referring to var instance" {
assert(bound_fn() == 1237);
}
-
-
test "ptr to local array argument at comptime" {
comptime {
var bytes: [10]u8 = undefined;
@@ -277,7 +309,6 @@ fn modifySomeBytes(bytes: []u8) void {
bytes[9] = 'b';
}
-
test "comparisons 0 <= uint and 0 > uint should be comptime" {
testCompTimeUIntComparisons(1234);
}
@@ -296,8 +327,6 @@ fn testCompTimeUIntComparisons(x: u32) void {
}
}
-
-
test "const ptr to variable data changes at runtime" {
assert(foo_ref.name[0] == 'a');
foo_ref.name = "b";
@@ -308,11 +337,9 @@ const Foo = struct {
name: []const u8,
};
-var foo_contents = Foo { .name = "a", };
+var foo_contents = Foo{ .name = "a" };
const foo_ref = &foo_contents;
-
-
test "create global array with for loop" {
assert(global_array[5] == 5 * 5);
assert(global_array[9] == 9 * 9);
@@ -321,7 +348,7 @@ test "create global array with for loop" {
const global_array = x: {
var result: [10]usize = undefined;
for (result) |*item, index| {
- *item = index * index;
+ item.* = index * index;
}
break :x result;
};
@@ -329,7 +356,7 @@ const global_array = x: {
test "compile-time downcast when the bits fit" {
comptime {
const spartan_count: u16 = 255;
- const byte = u8(spartan_count);
+ const byte = @intCast(u8, spartan_count);
assert(byte == 255);
}
}
@@ -340,7 +367,7 @@ test "const global shares pointer with other same one" {
assertEqualPtrs(&hi1[0], &hi2[0]);
comptime assert(&hi1[0] == &hi2[0]);
}
-fn assertEqualPtrs(ptr1: &const u8, ptr2: &const u8) void {
+fn assertEqualPtrs(ptr1: *const u8, ptr2: *const u8) void {
assert(ptr1 == ptr2);
}
@@ -379,7 +406,7 @@ test "f128 at compile time is lossy" {
pub fn TypeWithCompTimeSlice(comptime field_name: []const u8) type {
return struct {
- pub const Node = struct { };
+ pub const Node = struct {};
};
}
@@ -391,9 +418,9 @@ test "string literal used as comptime slice is memoized" {
}
test "comptime slice of undefined pointer of length 0" {
- const slice1 = (&i32)(undefined)[0..0];
+ const slice1 = ([*]i32)(undefined)[0..0];
assert(slice1.len == 0);
- const slice2 = (&i32)(undefined)[100..100];
+ const slice2 = ([*]i32)(undefined)[100..100];
assert(slice2.len == 0);
}
@@ -401,10 +428,10 @@ fn copyWithPartialInline(s: []u32, b: []u8) void {
comptime var i: usize = 0;
inline while (i < 4) : (i += 1) {
s[i] = 0;
- s[i] |= u32(b[i*4+0]) << 24;
- s[i] |= u32(b[i*4+1]) << 16;
- s[i] |= u32(b[i*4+2]) << 8;
- s[i] |= u32(b[i*4+3]) << 0;
+ s[i] |= u32(b[i * 4 + 0]) << 24;
+ s[i] |= u32(b[i * 4 + 1]) << 16;
+ s[i] |= u32(b[i * 4 + 2]) << 8;
+ s[i] |= u32(b[i * 4 + 3]) << 0;
}
}
@@ -413,7 +440,7 @@ test "binary math operator in partially inlined function" {
var b: [16]u8 = undefined;
for (b) |*r, i|
- *r = u8(i + 1);
+ r.* = @intCast(u8, i + 1);
copyWithPartialInline(s[0..], b[0..]);
assert(s[0] == 0x1020304);
@@ -422,7 +449,6 @@ test "binary math operator in partially inlined function" {
assert(s[3] == 0xd0e0f10);
}
-
test "comptime function with the same args is memoized" {
comptime {
assert(MakeType(i32) == MakeType(i32));
@@ -446,15 +472,15 @@ test "comptime function with mutable pointer is not memoized" {
}
}
-fn increment(value: &i32) void {
- *value += 1;
+fn increment(value: *i32) void {
+ value.* += 1;
}
fn generateTable(comptime T: type) [1010]T {
- var res : [1010]T = undefined;
- var i : usize = 0;
+ var res: [1010]T = undefined;
+ var i: usize = 0;
while (i < 1010) : (i += 1) {
- res[i] = T(i);
+ res[i] = @intCast(T, i);
}
return res;
}
@@ -482,7 +508,7 @@ test "comptime slice of slice preserves comptime var" {
test "comptime slice of pointer preserves comptime var" {
comptime {
var buff: [10]u8 = undefined;
- var a = &buff[0];
+ var a = buff[0..].ptr;
a[0..1][0] = 1;
assert(buff[0..][0..][0] == 1);
}
@@ -491,14 +517,13 @@ test "comptime slice of pointer preserves comptime var" {
const SingleFieldStruct = struct {
x: i32,
- fn read_x(self: &const SingleFieldStruct) i32 {
+ fn read_x(self: *const SingleFieldStruct) i32 {
return self.x;
}
};
test "const ptr to comptime mutable data is not memoized" {
-
comptime {
- var foo = SingleFieldStruct {.x = 1};
+ var foo = SingleFieldStruct{ .x = 1 };
assert(foo.read_x() == 1);
foo.x = 2;
assert(foo.read_x() == 2);
@@ -513,3 +538,117 @@ test "array concat of slices gives slice" {
assert(std.mem.eql(u8, c, "aoeuasdf"));
}
}
+
+test "comptime shlWithOverflow" {
+ const ct_shifted: u64 = comptime amt: {
+ var amt = u64(0);
+ _ = @shlWithOverflow(u64, ~u64(0), 16, &amt);
+ break :amt amt;
+ };
+
+ const rt_shifted: u64 = amt: {
+ var amt = u64(0);
+ _ = @shlWithOverflow(u64, ~u64(0), 16, &amt);
+ break :amt amt;
+ };
+
+ assert(ct_shifted == rt_shifted);
+}
+
+test "runtime 128 bit integer division" {
+ var a: u128 = 152313999999999991610955792383;
+ var b: u128 = 10000000000000000000;
+ var c = a / b;
+ assert(c == 15231399999);
+}
+
+pub const Info = struct {
+ version: u8,
+};
+
+pub const diamond_info = Info{ .version = 0 };
+
+test "comptime modification of const struct field" {
+ comptime {
+ var res = diamond_info;
+ res.version = 1;
+ assert(diamond_info.version == 0);
+ assert(res.version == 1);
+ }
+}
+
+test "pointer to type" {
+ comptime {
+ var T: type = i32;
+ assert(T == i32);
+ var ptr = &T;
+ assert(@typeOf(ptr) == *type);
+ ptr.* = f32;
+ assert(T == f32);
+ assert(*T == *f32);
+ }
+}
+
+test "slice of type" {
+ comptime {
+ var types_array = []type{ i32, f64, type };
+ for (types_array) |T, i| {
+ switch (i) {
+ 0 => assert(T == i32),
+ 1 => assert(T == f64),
+ 2 => assert(T == type),
+ else => unreachable,
+ }
+ }
+ for (types_array[0..]) |T, i| {
+ switch (i) {
+ 0 => assert(T == i32),
+ 1 => assert(T == f64),
+ 2 => assert(T == type),
+ else => unreachable,
+ }
+ }
+ }
+}
+
+const Wrapper = struct {
+ T: type,
+};
+
+fn wrap(comptime T: type) Wrapper {
+ return Wrapper{ .T = T };
+}
+
+test "function which returns struct with type field causes implicit comptime" {
+ const ty = wrap(i32).T;
+ assert(ty == i32);
+}
+
+test "call method with comptime pass-by-non-copying-value self parameter" {
+ const S = struct {
+ a: u8,
+
+ fn b(comptime s: this) u8 {
+ return s.a;
+ }
+ };
+
+ const s = S{ .a = 2 };
+ var b = s.b();
+ assert(b == 2);
+}
+
+test "@tagName of @typeId" {
+ const str = @tagName(@typeId(u8));
+ assert(std.mem.eql(u8, str, "Int"));
+}
+
+test "setting backward branch quota just before a generic fn call" {
+ @setEvalBranchQuota(1001);
+ loopNTimes(1001);
+}
+
+fn loopNTimes(comptime n: usize) void {
+ comptime var i = 0;
+ inline while (i < n) : (i += 1) {}
+}
diff --git a/test/cases/field_parent_ptr.zig b/test/cases/field_parent_ptr.zig
index 2e519098cc..00d4e0f367 100644
--- a/test/cases/field_parent_ptr.zig
+++ b/test/cases/field_parent_ptr.zig
@@ -17,14 +17,14 @@ const Foo = struct {
d: i32,
};
-const foo = Foo {
+const foo = Foo{
.a = true,
.b = 0.123,
.c = 1234,
.d = -10,
};
-fn testParentFieldPtr(c: &const i32) void {
+fn testParentFieldPtr(c: *const i32) void {
assert(c == &foo.c);
const base = @fieldParentPtr(Foo, "c", c);
@@ -32,7 +32,7 @@ fn testParentFieldPtr(c: &const i32) void {
assert(&base.c == c);
}
-fn testParentFieldPtrFirst(a: &const bool) void {
+fn testParentFieldPtrFirst(a: *const bool) void {
assert(a == &foo.a);
const base = @fieldParentPtr(Foo, "a", a);
diff --git a/test/cases/fn.zig b/test/cases/fn.zig
index 5388deac10..47f7d5e688 100644
--- a/test/cases/fn.zig
+++ b/test/cases/fn.zig
@@ -7,7 +7,6 @@ fn testParamsAdd(a: i32, b: i32) i32 {
return a + b;
}
-
test "local variables" {
testLocVars(2);
}
@@ -16,7 +15,6 @@ fn testLocVars(b: i32) void {
if (a + b != 3) unreachable;
}
-
test "void parameters" {
voidFun(1, void{}, 2, {});
}
@@ -27,9 +25,8 @@ fn voidFun(a: i32, b: void, c: i32, d: void) void {
return vv;
}
-
test "mutable local variables" {
- var zero : i32 = 0;
+ var zero: i32 = 0;
assert(zero == 0);
var i = i32(0);
@@ -41,7 +38,7 @@ test "mutable local variables" {
test "separate block scopes" {
{
- const no_conflict : i32 = 5;
+ const no_conflict: i32 = 5;
assert(no_conflict == 5);
}
@@ -56,8 +53,7 @@ test "call function with empty string" {
acceptsString("");
}
-fn acceptsString(foo: []u8) void { }
-
+fn acceptsString(foo: []u8) void {}
fn @"weird function name"() i32 {
return 1234;
@@ -70,31 +66,43 @@ test "implicit cast function unreachable return" {
wantsFnWithVoid(fnWithUnreachable);
}
-fn wantsFnWithVoid(f: fn() void) void { }
+fn wantsFnWithVoid(f: fn () void) void {}
fn fnWithUnreachable() noreturn {
unreachable;
}
-
test "function pointers" {
- const fns = []@typeOf(fn1) { fn1, fn2, fn3, fn4, };
+ const fns = []@typeOf(fn1){
+ fn1,
+ fn2,
+ fn3,
+ fn4,
+ };
for (fns) |f, i| {
- assert(f() == u32(i) + 5);
+ assert(f() == @intCast(u32, i) + 5);
}
}
-fn fn1() u32 {return 5;}
-fn fn2() u32 {return 6;}
-fn fn3() u32 {return 7;}
-fn fn4() u32 {return 8;}
-
+fn fn1() u32 {
+ return 5;
+}
+fn fn2() u32 {
+ return 6;
+}
+fn fn3() u32 {
+ return 7;
+}
+fn fn4() u32 {
+ return 8;
+}
test "inline function call" {
assert(@inlineCall(add, 3, 9) == 12);
}
-fn add(a: i32, b: i32) i32 { return a + b; }
-
+fn add(a: i32, b: i32) i32 {
+ return a + b;
+}
test "number literal as an argument" {
numberLiteralArg(3);
@@ -110,4 +118,61 @@ test "assign inline fn to const variable" {
a();
}
-inline fn inlineFn() void { }
+inline fn inlineFn() void {}
+
+test "pass by non-copying value" {
+ assert(addPointCoords(Point{ .x = 1, .y = 2 }) == 3);
+}
+
+const Point = struct {
+ x: i32,
+ y: i32,
+};
+
+fn addPointCoords(pt: Point) i32 {
+ return pt.x + pt.y;
+}
+
+test "pass by non-copying value through var arg" {
+ assert(addPointCoordsVar(Point{ .x = 1, .y = 2 }) == 3);
+}
+
+fn addPointCoordsVar(pt: var) i32 {
+ comptime assert(@typeOf(pt) == Point);
+ return pt.x + pt.y;
+}
+
+test "pass by non-copying value as method" {
+ var pt = Point2{ .x = 1, .y = 2 };
+ assert(pt.addPointCoords() == 3);
+}
+
+const Point2 = struct {
+ x: i32,
+ y: i32,
+
+ fn addPointCoords(self: Point2) i32 {
+ return self.x + self.y;
+ }
+};
+
+test "pass by non-copying value as method, which is generic" {
+ var pt = Point3{ .x = 1, .y = 2 };
+ assert(pt.addPointCoords(i32) == 3);
+}
+
+const Point3 = struct {
+ x: i32,
+ y: i32,
+
+ fn addPointCoords(self: Point3, comptime T: type) i32 {
+ return self.x + self.y;
+ }
+};
+
+test "pass by non-copying value as method, at comptime" {
+ comptime {
+ var pt = Point2{ .x = 1, .y = 2 };
+ assert(pt.addPointCoords() == 3);
+ }
+}
diff --git a/test/cases/fn_in_struct_in_comptime.zig b/test/cases/fn_in_struct_in_comptime.zig
new file mode 100644
index 0000000000..fabb57e9cb
--- /dev/null
+++ b/test/cases/fn_in_struct_in_comptime.zig
@@ -0,0 +1,17 @@
+const assert = @import("std").debug.assert;
+
+fn get_foo() fn (*u8) usize {
+ comptime {
+ return struct {
+ fn func(ptr: *u8) usize {
+ var u = @ptrToInt(ptr);
+ return u;
+ }
+ }.func;
+ }
+}
+
+test "define a function in an anonymous struct in comptime" {
+ const foo = get_foo();
+ assert(foo(@intToPtr(*u8, 12345)) == 12345);
+}
diff --git a/test/cases/for.zig b/test/cases/for.zig
index 7bb0d7c9fa..59d90c1b85 100644
--- a/test/cases/for.zig
+++ b/test/cases/for.zig
@@ -3,8 +3,14 @@ const assert = std.debug.assert;
const mem = std.mem;
test "continue in for loop" {
- const array = []i32 {1, 2, 3, 4, 5};
- var sum : i32 = 0;
+ const array = []i32{
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ };
+ var sum: i32 = 0;
for (array) |x| {
sum += x;
if (x < 3) {
@@ -24,23 +30,23 @@ test "for loop with pointer elem var" {
}
fn mangleString(s: []u8) void {
for (s) |*c| {
- *c += 1;
+ c.* += 1;
}
}
test "basic for loop" {
- const expected_result = []u8{9, 8, 7, 6, 0, 1, 2, 3, 9, 8, 7, 6, 0, 1, 2, 3 };
+ const expected_result = []u8{ 9, 8, 7, 6, 0, 1, 2, 3, 9, 8, 7, 6, 0, 1, 2, 3 };
var buffer: [expected_result.len]u8 = undefined;
var buf_index: usize = 0;
- const array = []u8 {9, 8, 7, 6};
+ const array = []u8{ 9, 8, 7, 6 };
for (array) |item| {
buffer[buf_index] = item;
buf_index += 1;
}
for (array) |item, index| {
- buffer[buf_index] = u8(index);
+ buffer[buf_index] = @intCast(u8, index);
buf_index += 1;
}
const unknown_size: []const u8 = array;
@@ -49,7 +55,7 @@ test "basic for loop" {
buf_index += 1;
}
for (unknown_size) |item, index| {
- buffer[buf_index] = u8(index);
+ buffer[buf_index] = @intCast(u8, index);
buf_index += 1;
}
@@ -65,7 +71,8 @@ fn testBreakOuter() void {
var array = "aoeu";
var count: usize = 0;
outer: for (array) |_| {
- for (array) |_2| { // TODO shouldn't get error for redeclaring "_"
+ // TODO shouldn't get error for redeclaring "_"
+ for (array) |_2| {
count += 1;
break :outer;
}
@@ -82,7 +89,8 @@ fn testContinueOuter() void {
var array = "aoeu";
var counter: usize = 0;
outer: for (array) |_| {
- for (array) |_2| { // TODO shouldn't get error for redeclaring "_"
+ // TODO shouldn't get error for redeclaring "_"
+ for (array) |_2| {
counter += 1;
continue :outer;
}
diff --git a/test/cases/generics.zig b/test/cases/generics.zig
index 19b4a598d8..52aa013989 100644
--- a/test/cases/generics.zig
+++ b/test/cases/generics.zig
@@ -37,7 +37,6 @@ test "fn with comptime args" {
assert(sameButWithFloats(0.43, 0.49) == 0.49);
}
-
test "var params" {
assert(max_i32(12, 34) == 34);
assert(max_f64(1.2, 3.4) == 3.4);
@@ -60,7 +59,6 @@ fn max_f64(a: f64, b: f64) f64 {
return max_var(a, b);
}
-
pub fn List(comptime T: type) type {
return SmallList(T, 8);
}
@@ -82,10 +80,15 @@ test "function with return type type" {
assert(list2.prealloc_items.len == 8);
}
-
test "generic struct" {
- var a1 = GenNode(i32) {.value = 13, .next = null,};
- var b1 = GenNode(bool) {.value = true, .next = null,};
+ var a1 = GenNode(i32){
+ .value = 13,
+ .next = null,
+ };
+ var b1 = GenNode(bool){
+ .value = true,
+ .next = null,
+ };
assert(a1.value == 13);
assert(a1.value == a1.getVal());
assert(b1.getVal());
@@ -93,8 +96,10 @@ test "generic struct" {
fn GenNode(comptime T: type) type {
return struct {
value: T,
- next: ?&GenNode(T),
- fn getVal(n: &const GenNode(T)) T { return n.value; }
+ next: ?*GenNode(T),
+ fn getVal(n: *const GenNode(T)) T {
+ return n.value;
+ }
};
}
@@ -107,7 +112,6 @@ fn GenericDataThing(comptime count: isize) type {
};
}
-
test "use generic param in generic param" {
assert(aGenericFn(i32, 3, 4) == 7);
}
@@ -115,21 +119,31 @@ fn aGenericFn(comptime T: type, comptime a: T, b: T) T {
return a + b;
}
-
test "generic fn with implicit cast" {
- assert(getFirstByte(u8, []u8 {13}) == 13);
- assert(getFirstByte(u16, []u16 {0, 13}) == 0);
+ assert(getFirstByte(u8, []u8{13}) == 13);
+ assert(getFirstByte(u16, []u16{
+ 0,
+ 13,
+ }) == 0);
+}
+fn getByte(ptr: ?*const u8) u8 {
+ return ptr.?.*;
}
-fn getByte(ptr: ?&const u8) u8 {return *??ptr;}
fn getFirstByte(comptime T: type, mem: []const T) u8 {
- return getByte(@ptrCast(&const u8, &mem[0]));
+ return getByte(@ptrCast(*const u8, &mem[0]));
}
+const foos = []fn (var) bool{
+ foo1,
+ foo2,
+};
-const foos = []fn(var) bool { foo1, foo2 };
-
-fn foo1(arg: var) bool { return arg; }
-fn foo2(arg: var) bool { return !arg; }
+fn foo1(arg: var) bool {
+ return arg;
+}
+fn foo2(arg: var) bool {
+ return !arg;
+}
test "array of generic fns" {
assert(foos[0](true));
diff --git a/test/cases/if.zig b/test/cases/if.zig
index 2caae7448c..808936bfa5 100644
--- a/test/cases/if.zig
+++ b/test/cases/if.zig
@@ -23,7 +23,6 @@ fn firstEqlThird(a: i32, b: i32, c: i32) void {
}
}
-
test "else if expression" {
assert(elseIfExpressionF(1) == 1);
}
diff --git a/test/cases/import/a_namespace.zig b/test/cases/import/a_namespace.zig
index 5cf906cf91..042f1867a5 100644
--- a/test/cases/import/a_namespace.zig
+++ b/test/cases/import/a_namespace.zig
@@ -1 +1,3 @@
-pub fn foo() i32 { return 1234; }
+pub fn foo() i32 {
+ return 1234;
+}
diff --git a/test/cases/incomplete_struct_param_tld.zig b/test/cases/incomplete_struct_param_tld.zig
index a907ca748a..552d6ef185 100644
--- a/test/cases/incomplete_struct_param_tld.zig
+++ b/test/cases/incomplete_struct_param_tld.zig
@@ -11,21 +11,19 @@ const B = struct {
const C = struct {
x: i32,
- fn d(c: &const C) i32 {
+ fn d(c: *const C) i32 {
return c.x;
}
};
-fn foo(a: &const A) i32 {
+fn foo(a: *const A) i32 {
return a.b.c.d();
}
test "incomplete struct param top level declaration" {
- const a = A {
- .b = B {
- .c = C {
- .x = 13,
- },
+ const a = A{
+ .b = B{
+ .c = C{ .x = 13 },
},
};
assert(foo(a) == 13);
diff --git a/test/cases/ir_block_deps.zig b/test/cases/ir_block_deps.zig
index 202df19f62..c017eca508 100644
--- a/test/cases/ir_block_deps.zig
+++ b/test/cases/ir_block_deps.zig
@@ -11,7 +11,9 @@ fn foo(id: u64) !i32 {
};
}
-fn getErrInt() error!i32 { return 0; }
+fn getErrInt() error!i32 {
+ return 0;
+}
test "ir block deps" {
assert((foo(1) catch unreachable) == 0);
diff --git a/test/cases/math.zig b/test/cases/math.zig
index 574aa39bb1..195ada15dd 100644
--- a/test/cases/math.zig
+++ b/test/cases/math.zig
@@ -6,15 +6,20 @@ test "division" {
}
fn testDivision() void {
assert(div(u32, 13, 3) == 4);
+ assert(div(f16, 1.0, 2.0) == 0.5);
assert(div(f32, 1.0, 2.0) == 0.5);
assert(divExact(u32, 55, 11) == 5);
assert(divExact(i32, -55, 11) == -5);
+ assert(divExact(f16, 55.0, 11.0) == 5.0);
+ assert(divExact(f16, -55.0, 11.0) == -5.0);
assert(divExact(f32, 55.0, 11.0) == 5.0);
assert(divExact(f32, -55.0, 11.0) == -5.0);
assert(divFloor(i32, 5, 3) == 1);
assert(divFloor(i32, -5, 3) == -2);
+ assert(divFloor(f16, 5.0, 3.0) == 1.0);
+ assert(divFloor(f16, -5.0, 3.0) == -2.0);
assert(divFloor(f32, 5.0, 3.0) == 1.0);
assert(divFloor(f32, -5.0, 3.0) == -2.0);
assert(divFloor(i32, -0x80000000, -2) == 0x40000000);
@@ -24,30 +29,35 @@ fn testDivision() void {
assert(divTrunc(i32, 5, 3) == 1);
assert(divTrunc(i32, -5, 3) == -1);
+ assert(divTrunc(f16, 5.0, 3.0) == 1.0);
+ assert(divTrunc(f16, -5.0, 3.0) == -1.0);
assert(divTrunc(f32, 5.0, 3.0) == 1.0);
assert(divTrunc(f32, -5.0, 3.0) == -1.0);
+ assert(divTrunc(f64, 5.0, 3.0) == 1.0);
+ assert(divTrunc(f64, -5.0, 3.0) == -1.0);
comptime {
assert(
- 1194735857077236777412821811143690633098347576 %
- 508740759824825164163191790951174292733114988 ==
- 177254337427586449086438229241342047632117600);
- assert(@rem(-1194735857077236777412821811143690633098347576,
- 508740759824825164163191790951174292733114988) ==
- -177254337427586449086438229241342047632117600);
- assert(1194735857077236777412821811143690633098347576 /
- 508740759824825164163191790951174292733114988 ==
- 2);
- assert(@divTrunc(-1194735857077236777412821811143690633098347576,
- 508740759824825164163191790951174292733114988) ==
- -2);
- assert(@divTrunc(1194735857077236777412821811143690633098347576,
- -508740759824825164163191790951174292733114988) ==
- -2);
- assert(@divTrunc(-1194735857077236777412821811143690633098347576,
- -508740759824825164163191790951174292733114988) ==
- 2);
- assert(4126227191251978491697987544882340798050766755606969681711 % 10 == 1);
+ 1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600,
+ );
+ assert(
+ @rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600,
+ );
+ assert(
+ 1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2,
+ );
+ assert(
+ @divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2,
+ );
+ assert(
+ @divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2,
+ );
+ assert(
+ @divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2,
+ );
+ assert(
+ 4126227191251978491697987544882340798050766755606969681711 % 10 == 1,
+ );
}
}
fn div(comptime T: type, a: T, b: T) T {
@@ -114,18 +124,28 @@ fn ctz(x: var) usize {
test "assignment operators" {
var i: u32 = 0;
- i += 5; assert(i == 5);
- i -= 2; assert(i == 3);
- i *= 20; assert(i == 60);
- i /= 3; assert(i == 20);
- i %= 11; assert(i == 9);
- i <<= 1; assert(i == 18);
- i >>= 2; assert(i == 4);
+ i += 5;
+ assert(i == 5);
+ i -= 2;
+ assert(i == 3);
+ i *= 20;
+ assert(i == 60);
+ i /= 3;
+ assert(i == 20);
+ i %= 11;
+ assert(i == 9);
+ i <<= 1;
+ assert(i == 18);
+ i >>= 2;
+ assert(i == 4);
i = 6;
- i &= 5; assert(i == 4);
- i ^= 6; assert(i == 2);
+ i &= 5;
+ assert(i == 4);
+ i ^= 6;
+ assert(i == 2);
i = 6;
- i |= 3; assert(i == 7);
+ i |= 3;
+ assert(i == 7);
}
test "three expr in a row" {
@@ -138,7 +158,7 @@ fn testThreeExprInARow(f: bool, t: bool) void {
assertFalse(1 | 2 | 4 != 7);
assertFalse(3 ^ 6 ^ 8 != 13);
assertFalse(7 & 14 & 28 != 4);
- assertFalse(9 << 1 << 2 != 9 << 3);
+ assertFalse(9 << 1 << 2 != 9 << 3);
assertFalse(90 >> 1 >> 2 != 90 >> 3);
assertFalse(100 - 1 + 1000 != 1099);
assertFalse(5 * 4 / 2 % 3 != 1);
@@ -150,7 +170,6 @@ fn assertFalse(b: bool) void {
assert(!b);
}
-
test "const number literal" {
const one = 1;
const eleven = ten + one;
@@ -159,8 +178,6 @@ test "const number literal" {
}
const ten = 10;
-
-
test "unsigned wrapping" {
testUnsignedWrappingEval(@maxValue(u32));
comptime testUnsignedWrappingEval(@maxValue(u32));
@@ -203,7 +220,7 @@ fn test_u64_div() void {
assert(result.remainder == 100663296);
}
fn divWithResult(a: u64, b: u64) DivResult {
- return DivResult {
+ return DivResult{
.quotient = a / b,
.remainder = a % b,
};
@@ -214,8 +231,12 @@ const DivResult = struct {
};
test "binary not" {
- assert(comptime x: {break :x ~u16(0b1010101010101010) == 0b0101010101010101;});
- assert(comptime x: {break :x ~u64(2147483647) == 18446744071562067968;});
+ assert(comptime x: {
+ break :x ~u16(0b1010101010101010) == 0b0101010101010101;
+ });
+ assert(comptime x: {
+ break :x ~u64(2147483647) == 18446744071562067968;
+ });
testBinaryNot(0b1010101010101010);
}
@@ -275,6 +296,14 @@ test "quad hex float literal parsing in range" {
const d = 0x1.edcbff8ad76ab5bf46463233214fp-435;
}
+test "quad hex float literal parsing accurate" {
+ const a: f128 = 0x1.1111222233334444555566667777p+0;
+
+ // implied 1 is dropped, with an exponent of 0 (0x3fff) after biasing.
+ const expected: u128 = 0x3fff1111222233334444555566667777;
+ assert(@bitCast(u128, a) == expected);
+}
+
test "hex float literal within range" {
const a = 0x1.0p16383;
const b = 0x0.1p16387;
@@ -317,38 +346,55 @@ fn testShrExact(x: u8) void {
assert(shifted == 0b00101101);
}
-test "big number addition" {
+test "comptime_int addition" {
comptime {
- assert(
- 35361831660712422535336160538497375248 +
- 101752735581729509668353361206450473702 ==
- 137114567242441932203689521744947848950);
- assert(
- 594491908217841670578297176641415611445982232488944558774612 +
- 390603545391089362063884922208143568023166603618446395589768 ==
- 985095453608931032642182098849559179469148836107390954364380);
+ assert(35361831660712422535336160538497375248 + 101752735581729509668353361206450473702 == 137114567242441932203689521744947848950);
+ assert(594491908217841670578297176641415611445982232488944558774612 + 390603545391089362063884922208143568023166603618446395589768 == 985095453608931032642182098849559179469148836107390954364380);
}
}
-test "big number multiplication" {
+test "comptime_int multiplication" {
comptime {
assert(
- 45960427431263824329884196484953148229 *
- 128339149605334697009938835852565949723 ==
- 5898522172026096622534201617172456926982464453350084962781392314016180490567);
+ 45960427431263824329884196484953148229 * 128339149605334697009938835852565949723 == 5898522172026096622534201617172456926982464453350084962781392314016180490567,
+ );
assert(
- 594491908217841670578297176641415611445982232488944558774612 *
- 390603545391089362063884922208143568023166603618446395589768 ==
- 232210647056203049913662402532976186578842425262306016094292237500303028346593132411865381225871291702600263463125370016);
+ 594491908217841670578297176641415611445982232488944558774612 * 390603545391089362063884922208143568023166603618446395589768 == 232210647056203049913662402532976186578842425262306016094292237500303028346593132411865381225871291702600263463125370016,
+ );
}
}
-test "big number shifting" {
+test "comptime_int shifting" {
comptime {
assert((u128(1) << 127) == 0x80000000000000000000000000000000);
}
}
+test "comptime_int multi-limb shift and mask" {
+ comptime {
+ var a = 0xefffffffa0000001eeeeeeefaaaaaaab;
+
+ assert(u32(a & 0xffffffff) == 0xaaaaaaab);
+ a >>= 32;
+ assert(u32(a & 0xffffffff) == 0xeeeeeeef);
+ a >>= 32;
+ assert(u32(a & 0xffffffff) == 0xa0000001);
+ a >>= 32;
+ assert(u32(a & 0xffffffff) == 0xefffffff);
+ a >>= 32;
+
+ assert(a == 0);
+ }
+}
+
+test "comptime_int multi-limb partial shift right" {
+ comptime {
+ var a = 0x1ffffffffeeeeeeee;
+ a >>= 16;
+ assert(a == 0x1ffffffffeeee);
+ }
+}
+
test "xor" {
test_xor();
comptime test_xor();
@@ -362,7 +408,7 @@ fn test_xor() void {
assert(0xFF ^ 0xFF == 0x00);
}
-test "big number xor" {
+test "comptime_int xor" {
comptime {
assert(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ^ 0x00000000000000000000000000000000 == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
assert(0xFFFFFFFFFFFFFFFF0000000000000000 ^ 0x0000000000000000FFFFFFFFFFFFFFFF == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
@@ -380,7 +426,9 @@ test "f128" {
comptime test_f128();
}
-fn make_f128(x: f128) f128 { return x; }
+fn make_f128(x: f128) f128 {
+ return x;
+}
fn test_f128() void {
assert(@sizeOf(f128) == 16);
@@ -402,3 +450,48 @@ test "comptime float rem int" {
assert(x == 1.0);
}
}
+
+test "remainder division" {
+ comptime remdiv(f16);
+ comptime remdiv(f32);
+ comptime remdiv(f64);
+ comptime remdiv(f128);
+ remdiv(f16);
+ remdiv(f64);
+ remdiv(f128);
+}
+
+fn remdiv(comptime T: type) void {
+ assert(T(1) == T(1) % T(2));
+ assert(T(1) == T(7) % T(3));
+}
+
+test "@sqrt" {
+ testSqrt(f64, 12.0);
+ comptime testSqrt(f64, 12.0);
+ testSqrt(f32, 13.0);
+ comptime testSqrt(f32, 13.0);
+ testSqrt(f16, 13.0);
+ comptime testSqrt(f16, 13.0);
+
+ const x = 14.0;
+ const y = x * x;
+ const z = @sqrt(@typeOf(y), y);
+ comptime assert(z == x);
+}
+
+fn testSqrt(comptime T: type, x: T) void {
+ assert(@sqrt(T, x * x) == x);
+}
+
+test "comptime_int param and return" {
+ const a = comptimeAdd(35361831660712422535336160538497375248, 101752735581729509668353361206450473702);
+ assert(a == 137114567242441932203689521744947848950);
+
+ const b = comptimeAdd(594491908217841670578297176641415611445982232488944558774612, 390603545391089362063884922208143568023166603618446395589768);
+ assert(b == 985095453608931032642182098849559179469148836107390954364380);
+}
+
+fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int {
+ return a + b;
+}
diff --git a/test/cases/merge_error_sets.zig b/test/cases/merge_error_sets.zig
new file mode 100644
index 0000000000..189bd16a4d
--- /dev/null
+++ b/test/cases/merge_error_sets.zig
@@ -0,0 +1,21 @@
+const A = error{
+ PathNotFound,
+ NotDir,
+};
+const B = error{OutOfMemory};
+
+const C = A || B;
+
+fn foo() C!void {
+ return error.NotDir;
+}
+
+test "merge error sets" {
+ if (foo()) {
+ @panic("unexpected");
+ } else |err| switch (err) {
+ error.OutOfMemory => @panic("unexpected"),
+ error.PathNotFound => @panic("unexpected"),
+ error.NotDir => {},
+ }
+}
diff --git a/test/cases/misc.zig b/test/cases/misc.zig
index 95a9a46bff..1c0189571b 100644
--- a/test/cases/misc.zig
+++ b/test/cases/misc.zig
@@ -4,6 +4,7 @@ const cstr = @import("std").cstr;
const builtin = @import("builtin");
// normal comment
+
/// this is a documentation comment
/// doc comment line 2
fn emptyFunctionWithComments() void {}
@@ -16,8 +17,7 @@ comptime {
@export("disabledExternFn", disabledExternFn, builtin.GlobalLinkage.Internal);
}
-extern fn disabledExternFn() void {
-}
+extern fn disabledExternFn() void {}
test "call disabled extern fn" {
disabledExternFn();
@@ -53,15 +53,11 @@ test "@IntType builtin" {
}
test "floating point primitive bit counts" {
+ assert(f16.bit_count == 16);
assert(f32.bit_count == 32);
assert(f64.bit_count == 64);
}
-const u1 = @IntType(false, 1);
-const u63 = @IntType(false, 63);
-const i1 = @IntType(true, 1);
-const i63 = @IntType(true, 63);
-
test "@minValue and @maxValue" {
assert(@maxValue(u1) == 1);
assert(@maxValue(u8) == 255);
@@ -110,17 +106,29 @@ fn testShortCircuit(f: bool, t: bool) void {
var hit_3 = f;
var hit_4 = f;
- if (t or x: {assert(f); break :x f;}) {
+ if (t or x: {
+ assert(f);
+ break :x f;
+ }) {
hit_1 = t;
}
- if (f or x: { hit_2 = t; break :x f; }) {
+ if (f or x: {
+ hit_2 = t;
+ break :x f;
+ }) {
assert(f);
}
- if (t and x: { hit_3 = t; break :x f; }) {
+ if (t and x: {
+ hit_3 = t;
+ break :x f;
+ }) {
assert(f);
}
- if (f and x: {assert(f); break :x f;}) {
+ if (f and x: {
+ assert(f);
+ break :x f;
+ }) {
assert(f);
} else {
hit_4 = t;
@@ -146,8 +154,8 @@ test "return string from function" {
assert(mem.eql(u8, first4KeysOfHomeRow(), "aoeu"));
}
-const g1 : i32 = 1233 + 1;
-var g2 : i32 = 0;
+const g1: i32 = 1233 + 1;
+var g2: i32 = 0;
test "global variables" {
assert(g2 == 0);
@@ -155,24 +163,25 @@ test "global variables" {
assert(g2 == 1234);
}
-
test "memcpy and memset intrinsics" {
- var foo : [20]u8 = undefined;
- var bar : [20]u8 = undefined;
+ var foo: [20]u8 = undefined;
+ var bar: [20]u8 = undefined;
- @memset(&foo[0], 'A', foo.len);
- @memcpy(&bar[0], &foo[0], bar.len);
+ @memset(foo[0..].ptr, 'A', foo.len);
+ @memcpy(bar[0..].ptr, foo[0..].ptr, bar.len);
if (bar[11] != 'A') unreachable;
}
test "builtin static eval" {
- const x : i32 = comptime x: {break :x 1 + 2 + 3;};
+ const x: i32 = comptime x: {
+ break :x 1 + 2 + 3;
+ };
assert(x == comptime 6);
}
test "slicing" {
- var array : [20]i32 = undefined;
+ var array: [20]i32 = undefined;
array[5] = 1234;
@@ -181,21 +190,21 @@ test "slicing" {
if (slice.len != 5) unreachable;
const ptr = &slice[0];
- if (ptr[0] != 1234) unreachable;
+ if (ptr.* != 1234) unreachable;
var slice_rest = array[10..];
if (slice_rest.len != 10) unreachable;
}
-
test "constant equal function pointers" {
const alias = emptyFn;
- assert(comptime x: {break :x emptyFn == alias;});
+ assert(comptime x: {
+ break :x emptyFn == alias;
+ });
}
fn emptyFn() void {}
-
test "hex escape" {
assert(mem.eql(u8, "\x68\x65\x6c\x6c\x6f", "hello"));
}
@@ -238,35 +247,32 @@ test "multiline C string" {
assert(cstr.cmp(s1, s2) == 0);
}
-
test "type equality" {
- assert(&const u8 != &u8);
+ assert(*const u8 != *u8);
}
-
const global_a: i32 = 1234;
-const global_b: &const i32 = &global_a;
-const global_c: &const f32 = @ptrCast(&const f32, global_b);
+const global_b: *const i32 = &global_a;
+const global_c: *const f32 = @ptrCast(*const f32, global_b);
test "compile time global reinterpret" {
- const d = @ptrCast(&const i32, global_c);
- assert(*d == 1234);
+ const d = @ptrCast(*const i32, global_c);
+ assert(d.* == 1234);
}
test "explicit cast maybe pointers" {
- const a: ?&i32 = undefined;
- const b: ?&f32 = @ptrCast(?&f32, a);
+ const a: ?*i32 = undefined;
+ const b: ?*f32 = @ptrCast(?*f32, a);
}
test "generic malloc free" {
const a = memAlloc(u8, 10) catch unreachable;
memFree(u8, a);
}
-var some_mem : [100]u8 = undefined;
+var some_mem: [100]u8 = undefined;
fn memAlloc(comptime T: type, n: usize) error![]T {
- return @ptrCast(&T, &some_mem[0])[0..n];
+ return @ptrCast([*]T, &some_mem[0])[0..n];
}
-fn memFree(comptime T: type, memory: []T) void { }
-
+fn memFree(comptime T: type, memory: []T) void {}
test "cast undefined" {
const array: [100]u8 = undefined;
@@ -275,32 +281,35 @@ test "cast undefined" {
}
fn testCastUndefined(x: []const u8) void {}
-
test "cast small unsigned to larger signed" {
assert(castSmallUnsignedToLargerSigned1(200) == i16(200));
assert(castSmallUnsignedToLargerSigned2(9999) == i64(9999));
}
-fn castSmallUnsignedToLargerSigned1(x: u8) i16 { return x; }
-fn castSmallUnsignedToLargerSigned2(x: u16) i64 { return x; }
-
+fn castSmallUnsignedToLargerSigned1(x: u8) i16 {
+ return x;
+}
+fn castSmallUnsignedToLargerSigned2(x: u16) i64 {
+ return x;
+}
test "implicit cast after unreachable" {
assert(outer() == 1234);
}
-fn inner() i32 { return 1234; }
+fn inner() i32 {
+ return 1234;
+}
fn outer() i64 {
return inner();
}
-
test "pointer dereferencing" {
var x = i32(3);
const y = &x;
- *y += 1;
+ y.* += 1;
assert(x == 4);
- assert(*y == 4);
+ assert(y.* == 4);
}
test "call result of if else expression" {
@@ -310,9 +319,12 @@ test "call result of if else expression" {
fn f2(x: bool) []const u8 {
return (if (x) fA else fB)();
}
-fn fA() []const u8 { return "a"; }
-fn fB() []const u8 { return "b"; }
-
+fn fA() []const u8 {
+ return "a";
+}
+fn fB() []const u8 {
+ return "b";
+}
test "const expression eval handling of variables" {
var x = true;
@@ -321,8 +333,6 @@ test "const expression eval handling of variables" {
}
}
-
-
test "constant enum initialization with differing sizes" {
test3_1(test3_foo);
test3_2(test3_bar);
@@ -336,10 +346,15 @@ const Test3Point = struct {
x: i32,
y: i32,
};
-const test3_foo = Test3Foo { .Three = Test3Point {.x = 3, .y = 4}};
-const test3_bar = Test3Foo { .Two = 13};
-fn test3_1(f: &const Test3Foo) void {
- switch (*f) {
+const test3_foo = Test3Foo{
+ .Three = Test3Point{
+ .x = 3,
+ .y = 4,
+ },
+};
+const test3_bar = Test3Foo{ .Two = 13 };
+fn test3_1(f: *const Test3Foo) void {
+ switch (f.*) {
Test3Foo.Three => |pt| {
assert(pt.x == 3);
assert(pt.y == 4);
@@ -347,8 +362,8 @@ fn test3_1(f: &const Test3Foo) void {
else => unreachable,
}
}
-fn test3_2(f: &const Test3Foo) void {
- switch (*f) {
+fn test3_2(f: *const Test3Foo) void {
+ switch (f.*) {
Test3Foo.Two => |x| {
assert(x == 13);
},
@@ -356,58 +371,61 @@ fn test3_2(f: &const Test3Foo) void {
}
}
-
test "character literals" {
assert('\'' == single_quote);
}
const single_quote = '\'';
-
-
test "take address of parameter" {
testTakeAddressOfParameter(12.34);
}
fn testTakeAddressOfParameter(f: f32) void {
const f_ptr = &f;
- assert(*f_ptr == 12.34);
+ assert(f_ptr.* == 12.34);
}
-
test "pointer comparison" {
const a = ([]const u8)("a");
const b = &a;
assert(ptrEql(b, b));
}
-fn ptrEql(a: &const []const u8, b: &const []const u8) bool {
+fn ptrEql(a: *const []const u8, b: *const []const u8) bool {
return a == b;
}
-
test "C string concatenation" {
const a = c"OK" ++ c" IT " ++ c"WORKED";
const b = c"OK IT WORKED";
const len = cstr.len(b);
const len_with_null = len + 1;
- {var i: u32 = 0; while (i < len_with_null) : (i += 1) {
- assert(a[i] == b[i]);
- }}
+ {
+ var i: u32 = 0;
+ while (i < len_with_null) : (i += 1) {
+ assert(a[i] == b[i]);
+ }
+ }
assert(a[len] == 0);
assert(b[len] == 0);
}
test "cast slice to u8 slice" {
assert(@sizeOf(i32) == 4);
- var big_thing_array = []i32{1, 2, 3, 4};
+ var big_thing_array = []i32{
+ 1,
+ 2,
+ 3,
+ 4,
+ };
const big_thing_slice: []i32 = big_thing_array[0..];
- const bytes = ([]u8)(big_thing_slice);
+ const bytes = @sliceToBytes(big_thing_slice);
assert(bytes.len == 4 * 4);
bytes[4] = 0;
bytes[5] = 0;
bytes[6] = 0;
bytes[7] = 0;
assert(big_thing_slice[1] == 0);
- const big_thing_again = ([]align(1) i32)(bytes);
+ const big_thing_again = @bytesToSlice(i32, bytes);
assert(big_thing_again[2] == 3);
big_thing_again[2] = -1;
assert(bytes[8] == @maxValue(u8));
@@ -421,39 +439,48 @@ test "pointer to void return type" {
}
fn testPointerToVoidReturnType() error!void {
const a = testPointerToVoidReturnType2();
- return *a;
+ return a.*;
}
const test_pointer_to_void_return_type_x = void{};
-fn testPointerToVoidReturnType2() &const void {
+fn testPointerToVoidReturnType2() *const void {
return &test_pointer_to_void_return_type_x;
}
-
test "non const ptr to aliased type" {
const int = i32;
- assert(?&int == ?&i32);
+ assert(?*int == ?*i32);
}
-
-
test "array 2D const double ptr" {
- const rect_2d_vertexes = [][1]f32 {
+ const rect_2d_vertexes = [][1]f32{
[]f32{1.0},
[]f32{2.0},
};
testArray2DConstDoublePtr(&rect_2d_vertexes[0][0]);
}
-fn testArray2DConstDoublePtr(ptr: &const f32) void {
- assert(ptr[0] == 1.0);
- assert(ptr[1] == 2.0);
+fn testArray2DConstDoublePtr(ptr: *const f32) void {
+ const ptr2 = @ptrCast([*]const f32, ptr);
+ assert(ptr2[0] == 1.0);
+ assert(ptr2[1] == 2.0);
}
const Tid = builtin.TypeId;
-const AStruct = struct { x: i32, };
-const AnEnum = enum { One, Two, };
-const AUnionEnum = union(enum) { One: i32, Two: void, };
-const AUnion = union { One: void, Two: void };
+const AStruct = struct {
+ x: i32,
+};
+const AnEnum = enum {
+ One,
+ Two,
+};
+const AUnionEnum = union(enum) {
+ One: i32,
+ Two: void,
+};
+const AUnion = union {
+ One: void,
+ Two: void,
+};
test "@typeId" {
comptime {
@@ -467,40 +494,33 @@ test "@typeId" {
assert(@typeId(u64) == Tid.Int);
assert(@typeId(f32) == Tid.Float);
assert(@typeId(f64) == Tid.Float);
- assert(@typeId(&f32) == Tid.Pointer);
+ assert(@typeId(*f32) == Tid.Pointer);
assert(@typeId([2]u8) == Tid.Array);
assert(@typeId(AStruct) == Tid.Struct);
- assert(@typeId(@typeOf(1)) == Tid.IntLiteral);
- assert(@typeId(@typeOf(1.0)) == Tid.FloatLiteral);
- assert(@typeId(@typeOf(undefined)) == Tid.UndefinedLiteral);
- assert(@typeId(@typeOf(null)) == Tid.NullLiteral);
- assert(@typeId(?i32) == Tid.Nullable);
+ assert(@typeId(@typeOf(1)) == Tid.ComptimeInt);
+ assert(@typeId(@typeOf(1.0)) == Tid.ComptimeFloat);
+ assert(@typeId(@typeOf(undefined)) == Tid.Undefined);
+ assert(@typeId(@typeOf(null)) == Tid.Null);
+ assert(@typeId(?i32) == Tid.Optional);
assert(@typeId(error!i32) == Tid.ErrorUnion);
assert(@typeId(error) == Tid.ErrorSet);
assert(@typeId(AnEnum) == Tid.Enum);
assert(@typeId(@typeOf(AUnionEnum.One)) == Tid.Enum);
assert(@typeId(AUnionEnum) == Tid.Union);
assert(@typeId(AUnion) == Tid.Union);
- assert(@typeId(fn()void) == Tid.Fn);
+ assert(@typeId(fn () void) == Tid.Fn);
assert(@typeId(@typeOf(builtin)) == Tid.Namespace);
- assert(@typeId(@typeOf(x: {break :x this;})) == Tid.Block);
+ assert(@typeId(@typeOf(x: {
+ break :x this;
+ })) == Tid.Block);
// TODO bound fn
// TODO arg tuple
// TODO opaque
}
}
-test "@canImplicitCast" {
- comptime {
- assert(@canImplicitCast(i64, i32(3)));
- assert(!@canImplicitCast(i32, f32(1.234)));
- assert(@canImplicitCast([]const u8, "aoeu"));
- }
-}
-
test "@typeName" {
- const Struct = struct {
- };
+ const Struct = struct {};
const Union = union {
unused: u8,
};
@@ -509,8 +529,8 @@ test "@typeName" {
};
comptime {
assert(mem.eql(u8, @typeName(i64), "i64"));
- assert(mem.eql(u8, @typeName(&usize), "&usize"));
- // https://github.com/zig-lang/zig/issues/675
+ assert(mem.eql(u8, @typeName(*usize), "*usize"));
+ // https://github.com/ziglang/zig/issues/675
assert(mem.eql(u8, @typeName(TypeFromFn(u8)), "TypeFromFn(u8)"));
assert(mem.eql(u8, @typeName(Struct), "Struct"));
assert(mem.eql(u8, @typeName(Union), "Union"));
@@ -524,15 +544,20 @@ fn TypeFromFn(comptime T: type) type {
test "volatile load and store" {
var number: i32 = 1234;
- const ptr = (&volatile i32)(&number);
- *ptr += 1;
- assert(*ptr == 1235);
+ const ptr = (*volatile i32)(&number);
+ ptr.* += 1;
+ assert(ptr.* == 1235);
}
test "slice string literal has type []const u8" {
comptime {
assert(@typeOf("aoeu"[0..]) == []const u8);
- const array = []i32{1, 2, 3, 4};
+ const array = []i32{
+ 1,
+ 2,
+ 3,
+ 4,
+ };
assert(@typeOf(array[0..]) == []const i32);
}
}
@@ -543,37 +568,36 @@ test "global variable initialized to global variable array element" {
const GDTEntry = struct {
field: i32,
};
-var gdt = []GDTEntry {
- GDTEntry {.field = 1},
- GDTEntry {.field = 2},
+var gdt = []GDTEntry{
+ GDTEntry{ .field = 1 },
+ GDTEntry{ .field = 2 },
};
var global_ptr = &gdt[0];
-
// can't really run this test but we can make sure it has no compile error
// and generates code
-const vram = @intToPtr(&volatile u8, 0x20000000)[0..0x8000];
+const vram = @intToPtr([*]volatile u8, 0x20000000)[0..0x8000];
export fn writeToVRam() void {
vram[0] = 'X';
}
test "pointer child field" {
- assert((&u32).Child == u32);
+ assert((*u32).Child == u32);
}
const OpaqueA = @OpaqueType();
const OpaqueB = @OpaqueType();
test "@OpaqueType" {
- assert(&OpaqueA != &OpaqueB);
+ assert(*OpaqueA != *OpaqueB);
assert(mem.eql(u8, @typeName(OpaqueA), "OpaqueA"));
assert(mem.eql(u8, @typeName(OpaqueB), "OpaqueB"));
}
test "variable is allowed to be a pointer to an opaque type" {
var x: i32 = 1234;
- _ = hereIsAnOpaqueType(@ptrCast(&OpaqueA, &x));
+ _ = hereIsAnOpaqueType(@ptrCast(*OpaqueA, &x));
}
-fn hereIsAnOpaqueType(ptr: &OpaqueA) &OpaqueA {
+fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA {
var a = ptr;
return a;
}
@@ -584,7 +608,7 @@ test "comptime if inside runtime while which unconditionally breaks" {
}
fn testComptimeIfInsideRuntimeWhileWhichUnconditionallyBreaks(cond: bool) void {
while (cond) {
- if (false) { }
+ if (false) {}
break;
}
}
@@ -607,7 +631,7 @@ fn testStructInFn() void {
kind: BlockKind,
};
- var block = Block { .kind = 1234 };
+ var block = Block{ .kind = 1234 };
block.kind += 1;
@@ -617,7 +641,9 @@ fn testStructInFn() void {
fn fnThatClosesOverLocalConst() type {
const c = 1;
return struct {
- fn g() i32 { return c; }
+ fn g() i32 {
+ return c;
+ }
};
}
@@ -635,22 +661,27 @@ fn thisIsAColdFn() void {
@setCold(true);
}
-
-const PackedStruct = packed struct { a: u8, b: u8, };
-const PackedUnion = packed union { a: u8, b: u32, };
-const PackedEnum = packed enum { A, B, };
+const PackedStruct = packed struct {
+ a: u8,
+ b: u8,
+};
+const PackedUnion = packed union {
+ a: u8,
+ b: u32,
+};
+const PackedEnum = packed enum {
+ A,
+ B,
+};
test "packed struct, enum, union parameters in extern function" {
- testPackedStuff(
- PackedStruct{.a = 1, .b = 2},
- PackedUnion{.a = 1},
- PackedEnum.A,
- );
-}
-
-export fn testPackedStuff(a: &const PackedStruct, b: &const PackedUnion, c: PackedEnum) void {
+ testPackedStuff(PackedStruct{
+ .a = 1,
+ .b = 2,
+ }, PackedUnion{ .a = 1 }, PackedEnum.A);
}
+export fn testPackedStuff(a: *const PackedStruct, b: *const PackedUnion, c: PackedEnum) void {}
test "slicing zero length array" {
const s1 = ""[0..];
@@ -661,9 +692,13 @@ test "slicing zero length array" {
assert(mem.eql(u32, s2, []u32{}));
}
-
-const addr1 = @ptrCast(&const u8, emptyFn);
+const addr1 = @ptrCast(*const u8, emptyFn);
test "comptime cast fn to ptr" {
- const addr2 = @ptrCast(&const u8, emptyFn);
+ const addr2 = @ptrCast(*const u8, emptyFn);
comptime assert(addr1 == addr2);
}
+
+test "equality compare fn ptrs" {
+ var a = emptyFn;
+ assert(a == a);
+}
diff --git a/test/cases/namespace_depends_on_compile_var/index.zig b/test/cases/namespace_depends_on_compile_var/index.zig
index 95209dcef3..ccc49d9367 100644
--- a/test/cases/namespace_depends_on_compile_var/index.zig
+++ b/test/cases/namespace_depends_on_compile_var/index.zig
@@ -8,7 +8,7 @@ test "namespace depends on compile var" {
assert(!some_namespace.a_bool);
}
}
-const some_namespace = switch(builtin.os) {
+const some_namespace = switch (builtin.os) {
builtin.Os.linux => @import("a.zig"),
else => @import("b.zig"),
};
diff --git a/test/cases/new_stack_call.zig b/test/cases/new_stack_call.zig
new file mode 100644
index 0000000000..5912550d54
--- /dev/null
+++ b/test/cases/new_stack_call.zig
@@ -0,0 +1,26 @@
+const std = @import("std");
+const assert = std.debug.assert;
+
+var new_stack_bytes: [1024]u8 = undefined;
+
+test "calling a function with a new stack" {
+ const arg = 1234;
+
+ const a = @newStackCall(new_stack_bytes[0..512], targetFunction, arg);
+ const b = @newStackCall(new_stack_bytes[512..], targetFunction, arg);
+ _ = targetFunction(arg);
+
+ assert(arg == 1234);
+ assert(a < b);
+}
+
+fn targetFunction(x: i32) usize {
+ assert(x == 1234);
+
+ var local_variable: i32 = 42;
+ const ptr = &local_variable;
+ ptr.* += 1;
+
+ assert(local_variable == 43);
+ return @ptrToInt(ptr);
+}
diff --git a/test/cases/null.zig b/test/cases/null.zig
index 35d72b729c..c86dd34b06 100644
--- a/test/cases/null.zig
+++ b/test/cases/null.zig
@@ -1,7 +1,7 @@
const assert = @import("std").debug.assert;
-test "nullable type" {
- const x : ?bool = true;
+test "optional type" {
+ const x: ?bool = true;
if (x) |y| {
if (y) {
@@ -13,15 +13,15 @@ test "nullable type" {
unreachable;
}
- const next_x : ?i32 = null;
+ const next_x: ?i32 = null;
- const z = next_x ?? 1234;
+ const z = next_x orelse 1234;
assert(z == 1234);
- const final_x : ?i32 = 13;
+ const final_x: ?i32 = 13;
- const num = final_x ?? unreachable;
+ const num = final_x orelse unreachable;
assert(num == 13);
}
@@ -30,42 +30,43 @@ test "test maybe object and get a pointer to the inner value" {
var maybe_bool: ?bool = true;
if (maybe_bool) |*b| {
- *b = false;
+ b.* = false;
}
- assert(??maybe_bool == false);
+ assert(maybe_bool.? == false);
}
-
test "rhs maybe unwrap return" {
const x: ?bool = true;
- const y = x ?? return;
+ const y = x orelse return;
}
-
test "maybe return" {
maybeReturnImpl();
comptime maybeReturnImpl();
}
fn maybeReturnImpl() void {
- assert(??foo(1235));
- if (foo(null) != null)
- unreachable;
- assert(!??foo(1234));
+ assert(foo(1235).?);
+ if (foo(null) != null) unreachable;
+ assert(!foo(1234).?);
}
fn foo(x: ?i32) ?bool {
- const value = x ?? return null;
+ const value = x orelse return null;
return value > 1234;
}
-
test "if var maybe pointer" {
- assert(shouldBeAPlus1(Particle {.a = 14, .b = 1, .c = 1, .d = 1}) == 15);
+ assert(shouldBeAPlus1(Particle{
+ .a = 14,
+ .b = 1,
+ .c = 1,
+ .d = 1,
+ }) == 15);
}
-fn shouldBeAPlus1(p: &const Particle) u64 {
- var maybe_particle: ?Particle = *p;
+fn shouldBeAPlus1(p: *const Particle) u64 {
+ var maybe_particle: ?Particle = p.*;
if (maybe_particle) |*particle| {
particle.a += 1;
}
@@ -81,7 +82,6 @@ const Particle = struct {
d: u64,
};
-
test "null literal outside function" {
const is_null = here_is_a_null_literal.context == null;
assert(is_null);
@@ -92,10 +92,7 @@ test "null literal outside function" {
const SillyStruct = struct {
context: ?i32,
};
-const here_is_a_null_literal = SillyStruct {
- .context = null,
-};
-
+const here_is_a_null_literal = SillyStruct{ .context = null };
test "test null runtime" {
testTestNullRuntime(null);
@@ -105,12 +102,12 @@ fn testTestNullRuntime(x: ?i32) void {
assert(!(x != null));
}
-test "nullable void" {
- nullableVoidImpl();
- comptime nullableVoidImpl();
+test "optional void" {
+ optionalVoidImpl();
+ comptime optionalVoidImpl();
}
-fn nullableVoidImpl() void {
+fn optionalVoidImpl() void {
assert(bar(null) == null);
assert(bar({}) != null);
}
@@ -123,21 +120,19 @@ fn bar(x: ?void) ?void {
}
}
-
-
-const StructWithNullable = struct {
+const StructWithOptional = struct {
field: ?i32,
};
-var struct_with_nullable: StructWithNullable = undefined;
+var struct_with_optional: StructWithOptional = undefined;
-test "unwrap nullable which is field of global var" {
- struct_with_nullable.field = null;
- if (struct_with_nullable.field) |payload| {
+test "unwrap optional which is field of global var" {
+ struct_with_optional.field = null;
+ if (struct_with_optional.field) |payload| {
unreachable;
}
- struct_with_nullable.field = 1234;
- if (struct_with_nullable.field) |payload| {
+ struct_with_optional.field = 1234;
+ if (struct_with_optional.field) |payload| {
assert(payload == 1234);
} else {
unreachable;
@@ -145,6 +140,17 @@ test "unwrap nullable which is field of global var" {
}
test "null with default unwrap" {
- const x: i32 = null ?? 1;
+ const x: i32 = null orelse 1;
assert(x == 1);
}
+
+test "optional types" {
+ comptime {
+ const opt_type_struct = StructWithOptionalType{ .t = u8 };
+ assert(opt_type_struct.t != null and opt_type_struct.t.? == u8);
+ }
+}
+
+const StructWithOptionalType = struct {
+ t: ?type,
+};
diff --git a/test/cases/optional.zig b/test/cases/optional.zig
new file mode 100644
index 0000000000..d43682bbec
--- /dev/null
+++ b/test/cases/optional.zig
@@ -0,0 +1,30 @@
+const assert = @import("std").debug.assert;
+
+pub const EmptyStruct = struct {};
+
+test "optional pointer to size zero struct" {
+ var e = EmptyStruct{};
+ var o: ?*EmptyStruct = &e;
+ assert(o != null);
+}
+
+test "equality compare nullable pointers" {
+ testNullPtrsEql();
+ comptime testNullPtrsEql();
+}
+
+fn testNullPtrsEql() void {
+ var number: i32 = 1234;
+
+ var x: ?*i32 = null;
+ var y: ?*i32 = null;
+ assert(x == y);
+ y = &number;
+ assert(x != y);
+ assert(x != &number);
+ assert(&number != x);
+ x = &number;
+ assert(x == y);
+ assert(x == &number);
+ assert(&number == x);
+}
diff --git a/test/cases/pointers.zig b/test/cases/pointers.zig
new file mode 100644
index 0000000000..47afb60a2e
--- /dev/null
+++ b/test/cases/pointers.zig
@@ -0,0 +1,44 @@
+const std = @import("std");
+const assert = std.debug.assert;
+
+test "dereference pointer" {
+ comptime testDerefPtr();
+ testDerefPtr();
+}
+
+fn testDerefPtr() void {
+ var x: i32 = 1234;
+ var y = &x;
+ y.* += 1;
+ assert(x == 1235);
+}
+
+test "pointer arithmetic" {
+ var ptr = c"abcd";
+
+ assert(ptr[0] == 'a');
+ ptr += 1;
+ assert(ptr[0] == 'b');
+ ptr += 1;
+ assert(ptr[0] == 'c');
+ ptr += 1;
+ assert(ptr[0] == 'd');
+ ptr += 1;
+ assert(ptr[0] == 0);
+ ptr -= 1;
+ assert(ptr[0] == 'd');
+ ptr -= 1;
+ assert(ptr[0] == 'c');
+ ptr -= 1;
+ assert(ptr[0] == 'b');
+ ptr -= 1;
+ assert(ptr[0] == 'a');
+}
+
+test "double pointer parsing" {
+ comptime assert(PtrOf(PtrOf(i32)) == **i32);
+}
+
+fn PtrOf(comptime T: type) type {
+ return *T;
+}
diff --git a/test/cases/popcount.zig b/test/cases/popcount.zig
new file mode 100644
index 0000000000..7dc7f28c0e
--- /dev/null
+++ b/test/cases/popcount.zig
@@ -0,0 +1,24 @@
+const assert = @import("std").debug.assert;
+
+test "@popCount" {
+ comptime testPopCount();
+ testPopCount();
+}
+
+fn testPopCount() void {
+ {
+ var x: u32 = 0xaa;
+ assert(@popCount(x) == 4);
+ }
+ {
+ var x: u32 = 0xaaaaaaaa;
+ assert(@popCount(x) == 16);
+ }
+ {
+ var x: i16 = -1;
+ assert(@popCount(x) == 16);
+ }
+ comptime {
+ assert(@popCount(0b11111111000110001100010000100001000011000011100101010001) == 24);
+ }
+}
diff --git a/test/cases/ref_var_in_if_after_if_2nd_switch_prong.zig b/test/cases/ref_var_in_if_after_if_2nd_switch_prong.zig
index 76cff3731a..3c94bb0d49 100644
--- a/test/cases/ref_var_in_if_after_if_2nd_switch_prong.zig
+++ b/test/cases/ref_var_in_if_after_if_2nd_switch_prong.zig
@@ -23,7 +23,7 @@ fn foo(c: bool, k: Num, c2: bool, b: []const u8) void {
if (c) {
const output_path = b;
- if (c2) { }
+ if (c2) {}
a(output_path);
}
diff --git a/test/cases/reflection.zig b/test/cases/reflection.zig
index 18a766d9fc..3d3af3c889 100644
--- a/test/cases/reflection.zig
+++ b/test/cases/reflection.zig
@@ -1,10 +1,11 @@
const assert = @import("std").debug.assert;
const mem = @import("std").mem;
+const reflection = this;
-test "reflection: array, pointer, nullable, error union type child" {
+test "reflection: array, pointer, optional, error union type child" {
comptime {
assert(([10]u8).Child == u8);
- assert((&u8).Child == u8);
+ assert((*u8).Child == u8);
assert((error!u8).Payload == u8);
assert((?u8).Child == u8);
}
@@ -22,7 +23,9 @@ test "reflection: function return type, var args, and param types" {
}
}
-fn dummy(a: bool, b: i32, c: f32) i32 { return 1234; }
+fn dummy(a: bool, b: i32, c: f32) i32 {
+ return 1234;
+}
fn dummy_varargs(args: ...) void {}
test "reflection: struct member types and names" {
@@ -53,10 +56,32 @@ test "reflection: enum member types and names" {
assert(mem.eql(u8, @memberName(Bar, 2), "Three"));
assert(mem.eql(u8, @memberName(Bar, 3), "Four"));
}
+}
+test "reflection: @field" {
+ var f = Foo{
+ .one = 42,
+ .two = true,
+ .three = void{},
+ };
+
+ assert(f.one == f.one);
+ assert(@field(f, "o" ++ "ne") == f.one);
+ assert(@field(f, "t" ++ "wo") == f.two);
+ assert(@field(f, "th" ++ "ree") == f.three);
+ assert(@field(Foo, "const" ++ "ant") == Foo.constant);
+ assert(@field(Bar, "O" ++ "ne") == Bar.One);
+ assert(@field(Bar, "T" ++ "wo") == Bar.Two);
+ assert(@field(Bar, "Th" ++ "ree") == Bar.Three);
+ assert(@field(Bar, "F" ++ "our") == Bar.Four);
+ assert(@field(reflection, "dum" ++ "my")(true, 1, 2) == dummy(true, 1, 2));
+ @field(f, "o" ++ "ne") = 4;
+ assert(f.one == 4);
}
const Foo = struct {
+ const constant = 52;
+
one: i32,
two: bool,
three: void,
diff --git a/test/cases/slice.zig b/test/cases/slice.zig
index ea708ba3b5..b4b43bdd19 100644
--- a/test/cases/slice.zig
+++ b/test/cases/slice.zig
@@ -1,7 +1,7 @@
const assert = @import("std").debug.assert;
const mem = @import("std").mem;
-const x = @intToPtr(&i32, 0x1000)[0..0x500];
+const x = @intToPtr([*]i32, 0x1000)[0..0x500];
const y = x[0x100..];
test "compile time slice of pointer to hard coded address" {
assert(@ptrToInt(x.ptr) == 0x1000);
@@ -18,7 +18,11 @@ test "slice child property" {
}
test "runtime safety lets us slice from len..len" {
- var an_array = []u8{1, 2, 3};
+ var an_array = []u8{
+ 1,
+ 2,
+ 3,
+ };
assert(mem.eql(u8, sliceFromLenToLen(an_array[0..], 3, 3), ""));
}
@@ -27,7 +31,7 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 {
}
test "implicitly cast array of size 0 to slice" {
- var msg = []u8 {};
+ var msg = []u8{};
assertLenIsZero(msg);
}
diff --git a/test/cases/struct.zig b/test/cases/struct.zig
index c3df97678b..20d46999d5 100644
--- a/test/cases/struct.zig
+++ b/test/cases/struct.zig
@@ -2,9 +2,11 @@ const assert = @import("std").debug.assert;
const builtin = @import("builtin");
const StructWithNoFields = struct {
- fn add(a: i32, b: i32) i32 { return a + b; }
+ fn add(a: i32, b: i32) i32 {
+ return a + b;
+ }
};
-const empty_global_instance = StructWithNoFields {};
+const empty_global_instance = StructWithNoFields{};
test "call struct static method" {
const result = StructWithNoFields.add(3, 4);
@@ -25,7 +27,7 @@ test "invake static method in global scope" {
}
test "void struct fields" {
- const foo = VoidStructFieldsFoo {
+ const foo = VoidStructFieldsFoo{
.a = void{},
.b = 1,
.c = void{},
@@ -34,15 +36,14 @@ test "void struct fields" {
assert(@sizeOf(VoidStructFieldsFoo) == 4);
}
const VoidStructFieldsFoo = struct {
- a : void,
- b : i32,
- c : void,
+ a: void,
+ b: i32,
+ c: void,
};
-
test "structs" {
var foo: StructFoo = undefined;
- @memset(@ptrCast(&u8, &foo), 0, @sizeOf(StructFoo));
+ @memset(@ptrCast([*]u8, &foo), 0, @sizeOf(StructFoo));
foo.a += 1;
foo.b = foo.a == 1;
testFoo(foo);
@@ -50,21 +51,20 @@ test "structs" {
assert(foo.c == 100);
}
const StructFoo = struct {
- a : i32,
- b : bool,
- c : f32,
+ a: i32,
+ b: bool,
+ c: f32,
};
-fn testFoo(foo: &const StructFoo) void {
+fn testFoo(foo: *const StructFoo) void {
assert(foo.b);
}
-fn testMutation(foo: &StructFoo) void {
+fn testMutation(foo: *StructFoo) void {
foo.c = 100;
}
-
const Node = struct {
val: Val,
- next: &Node,
+ next: *Node,
};
const Val = struct {
@@ -72,10 +72,10 @@ const Val = struct {
};
test "struct point to self" {
- var root : Node = undefined;
+ var root: Node = undefined;
root.val.x = 1;
- var node : Node = undefined;
+ var node: Node = undefined;
node.next = &root;
node.val.x = 2;
@@ -85,8 +85,8 @@ test "struct point to self" {
}
test "struct byval assign" {
- var foo1 : StructFoo = undefined;
- var foo2 : StructFoo = undefined;
+ var foo1: StructFoo = undefined;
+ var foo2: StructFoo = undefined;
foo1.a = 1234;
foo2.a = 0;
@@ -96,51 +96,52 @@ test "struct byval assign" {
}
fn structInitializer() void {
- const val = Val { .x = 42 };
+ const val = Val{ .x = 42 };
assert(val.x == 42);
}
-
test "fn call of struct field" {
- assert(callStructField(Foo {.ptr = aFunc,}) == 13);
+ assert(callStructField(Foo{ .ptr = aFunc }) == 13);
}
const Foo = struct {
- ptr: fn() i32,
+ ptr: fn () i32,
};
-fn aFunc() i32 { return 13; }
+fn aFunc() i32 {
+ return 13;
+}
-fn callStructField(foo: &const Foo) i32 {
+fn callStructField(foo: *const Foo) i32 {
return foo.ptr();
}
-
test "store member function in variable" {
- const instance = MemberFnTestFoo { .x = 1234, };
+ const instance = MemberFnTestFoo{ .x = 1234 };
const memberFn = MemberFnTestFoo.member;
const result = memberFn(instance);
assert(result == 1234);
}
const MemberFnTestFoo = struct {
x: i32,
- fn member(foo: &const MemberFnTestFoo) i32 { return foo.x; }
+ fn member(foo: *const MemberFnTestFoo) i32 {
+ return foo.x;
+ }
};
-
test "call member function directly" {
- const instance = MemberFnTestFoo { .x = 1234, };
+ const instance = MemberFnTestFoo{ .x = 1234 };
const result = MemberFnTestFoo.member(instance);
assert(result == 1234);
}
test "member functions" {
- const r = MemberFnRand {.seed = 1234};
+ const r = MemberFnRand{ .seed = 1234 };
assert(r.getSeed() == 1234);
}
const MemberFnRand = struct {
seed: u32,
- pub fn getSeed(r: &const MemberFnRand) u32 {
+ pub fn getSeed(r: *const MemberFnRand) u32 {
return r.seed;
}
};
@@ -154,7 +155,7 @@ const Bar = struct {
y: i32,
};
fn makeBar(x: i32, y: i32) Bar {
- return Bar {
+ return Bar{
.x = x,
.y = y,
};
@@ -165,22 +166,21 @@ test "empty struct method call" {
assert(es.method() == 1234);
}
const EmptyStruct = struct {
- fn method(es: &const EmptyStruct) i32 {
+ fn method(es: *const EmptyStruct) i32 {
return 1234;
}
};
-
test "return empty struct from fn" {
_ = testReturnEmptyStructFromFn();
}
const EmptyStruct2 = struct {};
fn testReturnEmptyStructFromFn() EmptyStruct2 {
- return EmptyStruct2 {};
+ return EmptyStruct2{};
}
test "pass slice of empty struct to fn" {
- assert(testPassSliceOfEmptyStructToFn([]EmptyStruct2{ EmptyStruct2{} }) == 1);
+ assert(testPassSliceOfEmptyStructToFn([]EmptyStruct2{EmptyStruct2{}}) == 1);
}
fn testPassSliceOfEmptyStructToFn(slice: []const EmptyStruct2) usize {
return slice.len;
@@ -192,7 +192,7 @@ const APackedStruct = packed struct {
};
test "packed struct" {
- var foo = APackedStruct {
+ var foo = APackedStruct{
.x = 1,
.y = 2,
};
@@ -201,14 +201,13 @@ test "packed struct" {
assert(four == 4);
}
-
const BitField1 = packed struct {
a: u3,
b: u3,
c: u2,
};
-const bit_field_1 = BitField1 {
+const bit_field_1 = BitField1{
.a = 1,
.b = 2,
.c = 3,
@@ -229,19 +228,18 @@ test "bit field access" {
assert(data.b == 3);
}
-fn getA(data: &const BitField1) u3 {
+fn getA(data: *const BitField1) u3 {
return data.a;
}
-fn getB(data: &const BitField1) u3 {
+fn getB(data: *const BitField1) u3 {
return data.b;
}
-fn getC(data: &const BitField1) u2 {
+fn getC(data: *const BitField1) u2 {
return data.c;
}
-const u24 = @IntType(false, 24);
const Foo24Bits = packed struct {
field: u24,
};
@@ -258,7 +256,7 @@ test "packed struct 24bits" {
assert(@sizeOf(Foo96Bits) == 12);
}
- var value = Foo96Bits {
+ var value = Foo96Bits{
.a = 0,
.b = 0,
.c = 0,
@@ -303,7 +301,7 @@ test "packed array 24bits" {
var bytes = []u8{0} ** (@sizeOf(FooArray24Bits) + 1);
bytes[bytes.len - 1] = 0xaa;
- const ptr = &([]FooArray24Bits)(bytes[0..bytes.len - 1])[0];
+ const ptr = &@bytesToSlice(FooArray24Bits, bytes[0 .. bytes.len - 1])[0];
assert(ptr.a == 0);
assert(ptr.b[0].field == 0);
assert(ptr.b[1].field == 0);
@@ -352,7 +350,7 @@ test "aligned array of packed struct" {
}
var bytes = []u8{0xbb} ** @sizeOf(FooArrayOfAligned);
- const ptr = &([]FooArrayOfAligned)(bytes[0..bytes.len])[0];
+ const ptr = &@bytesToSlice(FooArrayOfAligned, bytes[0..bytes.len])[0];
assert(ptr.a[0].a == 0xbb);
assert(ptr.a[0].b == 0xbb);
@@ -360,16 +358,20 @@ test "aligned array of packed struct" {
assert(ptr.a[1].b == 0xbb);
}
-
-
test "runtime struct initialization of bitfield" {
- const s1 = Nibbles { .x = x1, .y = x1 };
- const s2 = Nibbles { .x = u4(x2), .y = u4(x2) };
+ const s1 = Nibbles{
+ .x = x1,
+ .y = x1,
+ };
+ const s2 = Nibbles{
+ .x = @intCast(u4, x2),
+ .y = @intCast(u4, x2),
+ };
assert(s1.x == x1);
assert(s1.y == x1);
- assert(s2.x == u4(x2));
- assert(s2.y == u4(x2));
+ assert(s2.x == @intCast(u4, x2));
+ assert(s2.y == @intCast(u4, x2));
}
var x1 = u4(1);
@@ -393,8 +395,8 @@ const Bitfields = packed struct {
test "native bit field understands endianness" {
var all: u64 = 0x7765443322221111;
var bytes: [8]u8 = undefined;
- @memcpy(&bytes[0], @ptrCast(&u8, &all), 8);
- var bitfields = *@ptrCast(&Bitfields, &bytes[0]);
+ @memcpy(bytes[0..].ptr, @ptrCast([*]u8, &all), 8);
+ var bitfields = @ptrCast(*Bitfields, bytes[0..].ptr).*;
assert(bitfields.f1 == 0x1111);
assert(bitfields.f2 == 0x2222);
@@ -412,9 +414,26 @@ test "align 1 field before self referential align 8 field as slice return type"
const Expr = union(enum) {
Literal: u8,
- Question: &Expr,
+ Question: *Expr,
};
fn alloc(comptime T: type) []T {
return []T{};
}
+
+test "call method with mutable reference to struct with no fields" {
+ const S = struct {
+ fn doC(s: *const this) bool {
+ return true;
+ }
+ fn do(s: *this) bool {
+ return true;
+ }
+ };
+
+ var s = S{};
+ assert(S.doC(&s));
+ assert(s.doC());
+ assert(S.do(&s));
+ assert(s.do());
+}
diff --git a/test/cases/struct_contains_null_ptr_itself.zig b/test/cases/struct_contains_null_ptr_itself.zig
index 5864ef4038..21175974b3 100644
--- a/test/cases/struct_contains_null_ptr_itself.zig
+++ b/test/cases/struct_contains_null_ptr_itself.zig
@@ -2,13 +2,13 @@ const std = @import("std");
const assert = std.debug.assert;
test "struct contains null pointer which contains original struct" {
- var x: ?&NodeLineComment = null;
+ var x: ?*NodeLineComment = null;
assert(x == null);
}
pub const Node = struct {
id: Id,
- comment: ?&NodeLineComment,
+ comment: ?*NodeLineComment,
pub const Id = enum {
Root,
@@ -19,4 +19,3 @@ pub const Node = struct {
pub const NodeLineComment = struct {
base: Node,
};
-
diff --git a/test/cases/struct_contains_slice_of_itself.zig b/test/cases/struct_contains_slice_of_itself.zig
index 45ec56c1e2..07987ae32b 100644
--- a/test/cases/struct_contains_slice_of_itself.zig
+++ b/test/cases/struct_contains_slice_of_itself.zig
@@ -7,30 +7,30 @@ const Node = struct {
test "struct contains slice of itself" {
var other_nodes = []Node{
- Node {
+ Node{
.payload = 31,
.children = []Node{},
},
- Node {
+ Node{
.payload = 32,
.children = []Node{},
},
};
- var nodes = []Node {
- Node {
+ var nodes = []Node{
+ Node{
.payload = 1,
.children = []Node{},
},
- Node {
+ Node{
.payload = 2,
.children = []Node{},
},
- Node {
+ Node{
.payload = 3,
.children = other_nodes[0..],
},
};
- const root = Node {
+ const root = Node{
.payload = 1234,
.children = nodes[0..],
};
diff --git a/test/cases/switch.zig b/test/cases/switch.zig
index a0ac646160..c6a4b60f09 100644
--- a/test/cases/switch.zig
+++ b/test/cases/switch.zig
@@ -6,7 +6,7 @@ test "switch with numbers" {
fn testSwitchWithNumbers(x: u32) void {
const result = switch (x) {
- 1, 2, 3, 4 ... 8 => false,
+ 1, 2, 3, 4...8 => false,
13 => true,
else => false,
};
@@ -22,9 +22,9 @@ test "switch with all ranges" {
fn testSwitchWithAllRanges(x: u32, y: u32) u32 {
return switch (x) {
- 0 ... 100 => 1,
- 101 ... 200 => 2,
- 201 ... 300 => 3,
+ 0...100 => 1,
+ 101...200 => 2,
+ 201...300 => 3,
else => y,
};
}
@@ -61,7 +61,6 @@ fn nonConstSwitchOnEnum(fruit: Fruit) void {
}
}
-
test "switch statement" {
nonConstSwitch(SwitchStatmentFoo.C);
}
@@ -81,19 +80,18 @@ const SwitchStatmentFoo = enum {
D,
};
-
test "switch prong with variable" {
- switchProngWithVarFn(SwitchProngWithVarEnum { .One = 13});
- switchProngWithVarFn(SwitchProngWithVarEnum { .Two = 13.0});
- switchProngWithVarFn(SwitchProngWithVarEnum { .Meh = {}});
+ switchProngWithVarFn(SwitchProngWithVarEnum{ .One = 13 });
+ switchProngWithVarFn(SwitchProngWithVarEnum{ .Two = 13.0 });
+ switchProngWithVarFn(SwitchProngWithVarEnum{ .Meh = {} });
}
const SwitchProngWithVarEnum = union(enum) {
One: i32,
Two: f32,
Meh: void,
};
-fn switchProngWithVarFn(a: &const SwitchProngWithVarEnum) void {
- switch(*a) {
+fn switchProngWithVarFn(a: *const SwitchProngWithVarEnum) void {
+ switch (a.*) {
SwitchProngWithVarEnum.One => |x| {
assert(x == 13);
},
@@ -112,9 +110,9 @@ test "switch on enum using pointer capture" {
}
fn testSwitchEnumPtrCapture() void {
- var value = SwitchProngWithVarEnum { .One = 1234 };
+ var value = SwitchProngWithVarEnum{ .One = 1234 };
switch (value) {
- SwitchProngWithVarEnum.One => |*x| *x += 1,
+ SwitchProngWithVarEnum.One => |*x| x.* += 1,
else => unreachable,
}
switch (value) {
@@ -135,14 +133,13 @@ fn returnsFive() i32 {
return 5;
}
-
const Number = union(enum) {
One: u64,
Two: u8,
Three: f32,
};
-const number = Number { .Three = 1.23 };
+const number = Number{ .Three = 1.23 };
fn returnsFalse() bool {
switch (number) {
@@ -196,11 +193,11 @@ fn testSwitchHandleAllCasesExhaustive(x: u2) u2 {
fn testSwitchHandleAllCasesRange(x: u8) u8 {
return switch (x) {
- 0 ... 100 => u8(0),
- 101 ... 200 => 1,
+ 0...100 => u8(0),
+ 101...200 => 1,
201, 203 => 2,
202 => 4,
- 204 ... 255 => 3,
+ 204...255 => 3,
};
}
diff --git a/test/cases/switch_prong_err_enum.zig b/test/cases/switch_prong_err_enum.zig
index 136e8834e6..f060ac2c57 100644
--- a/test/cases/switch_prong_err_enum.zig
+++ b/test/cases/switch_prong_err_enum.zig
@@ -14,14 +14,16 @@ const FormValue = union(enum) {
fn doThing(form_id: u64) error!FormValue {
return switch (form_id) {
- 17 => FormValue { .Address = try readOnce() },
+ 17 => FormValue{ .Address = try readOnce() },
else => error.InvalidDebugInfo,
};
}
test "switch prong returns error enum" {
switch (doThing(17) catch unreachable) {
- FormValue.Address => |payload| { assert(payload == 1); },
+ FormValue.Address => |payload| {
+ assert(payload == 1);
+ },
else => unreachable,
}
assert(read_count == 1);
diff --git a/test/cases/switch_prong_implicit_cast.zig b/test/cases/switch_prong_implicit_cast.zig
index 335feeef43..56d37e290f 100644
--- a/test/cases/switch_prong_implicit_cast.zig
+++ b/test/cases/switch_prong_implicit_cast.zig
@@ -7,8 +7,8 @@ const FormValue = union(enum) {
fn foo(id: u64) !FormValue {
return switch (id) {
- 2 => FormValue { .Two = true },
- 1 => FormValue { .One = {} },
+ 2 => FormValue{ .Two = true },
+ 1 => FormValue{ .One = {} },
else => return error.Whatever,
};
}
diff --git a/test/cases/syntax.zig b/test/cases/syntax.zig
index 6c851c0ff3..b497b060c4 100644
--- a/test/cases/syntax.zig
+++ b/test/cases/syntax.zig
@@ -1,12 +1,11 @@
// Test trailing comma syntax
+// zig fmt: off
const struct_trailing_comma = struct { x: i32, y: i32, };
const struct_no_comma = struct { x: i32, y: i32 };
-const struct_no_comma_void_type = struct { x: i32, y };
const struct_fn_no_comma = struct { fn m() void {} y: i32 };
const enum_no_comma = enum { A, B };
-const enum_no_comma_type = enum { A, B: i32 };
fn container_init() void {
const S = struct { x: i32, y: i32 };
@@ -36,16 +35,11 @@ fn switch_prongs(x: i32) void {
const fn_no_comma = fn(i32, i32)void;
const fn_trailing_comma = fn(i32, i32,)void;
-const fn_vararg_trailing_comma = fn(i32, i32, ...,)void;
fn fn_calls() void {
fn add(x: i32, y: i32,) i32 { x + y };
_ = add(1, 2);
_ = add(1, 2,);
-
- fn swallow(x: ...,) void {};
- _ = swallow(1,2,3,);
- _ = swallow();
}
fn asm_lists() void {
diff --git a/test/cases/this.zig b/test/cases/this.zig
index 8ed5e1ae1a..ba51d0ac90 100644
--- a/test/cases/this.zig
+++ b/test/cases/this.zig
@@ -8,7 +8,7 @@ fn Point(comptime T: type) type {
x: T,
y: T,
- fn addOne(self: &Self) void {
+ fn addOne(self: *Self) void {
self.x += 1;
self.y += 1;
}
@@ -29,7 +29,7 @@ test "this refer to module call private fn" {
}
test "this refer to container" {
- var pt = Point(i32) {
+ var pt = Point(i32){
.x = 12,
.y = 34,
};
diff --git a/test/cases/try.zig b/test/cases/try.zig
index 4a0425e22e..cf5fa5862a 100644
--- a/test/cases/try.zig
+++ b/test/cases/try.zig
@@ -3,13 +3,10 @@ const assert = @import("std").debug.assert;
test "try on error union" {
tryOnErrorUnionImpl();
comptime tryOnErrorUnionImpl();
-
}
fn tryOnErrorUnionImpl() void {
- const x = if (returnsTen()) |val|
- val + 1
- else |err| switch (err) {
+ const x = if (returnsTen()) |val| val + 1 else |err| switch (err) {
error.ItBroke, error.NoMem => 1,
error.CrappedOut => i32(2),
else => unreachable,
diff --git a/test/cases/type_info.zig b/test/cases/type_info.zig
new file mode 100644
index 0000000000..b8fc4cf14e
--- /dev/null
+++ b/test/cases/type_info.zig
@@ -0,0 +1,259 @@
+const assert = @import("std").debug.assert;
+const mem = @import("std").mem;
+const TypeInfo = @import("builtin").TypeInfo;
+const TypeId = @import("builtin").TypeId;
+
+test "type info: tag type, void info" {
+ testBasic();
+ comptime testBasic();
+}
+
+fn testBasic() void {
+ assert(@TagType(TypeInfo) == TypeId);
+ const void_info = @typeInfo(void);
+ assert(TypeId(void_info) == TypeId.Void);
+ assert(void_info.Void == {});
+}
+
+test "type info: integer, floating point type info" {
+ testIntFloat();
+ comptime testIntFloat();
+}
+
+fn testIntFloat() void {
+ const u8_info = @typeInfo(u8);
+ assert(TypeId(u8_info) == TypeId.Int);
+ assert(!u8_info.Int.is_signed);
+ assert(u8_info.Int.bits == 8);
+
+ const f64_info = @typeInfo(f64);
+ assert(TypeId(f64_info) == TypeId.Float);
+ assert(f64_info.Float.bits == 64);
+}
+
+test "type info: pointer type info" {
+ testPointer();
+ comptime testPointer();
+}
+
+fn testPointer() void {
+ const u32_ptr_info = @typeInfo(*u32);
+ assert(TypeId(u32_ptr_info) == TypeId.Pointer);
+ assert(u32_ptr_info.Pointer.size == TypeInfo.Pointer.Size.One);
+ assert(u32_ptr_info.Pointer.is_const == false);
+ assert(u32_ptr_info.Pointer.is_volatile == false);
+ assert(u32_ptr_info.Pointer.alignment == @alignOf(u32));
+ assert(u32_ptr_info.Pointer.child == u32);
+}
+
+test "type info: unknown length pointer type info" {
+ testUnknownLenPtr();
+ comptime testUnknownLenPtr();
+}
+
+fn testUnknownLenPtr() void {
+ const u32_ptr_info = @typeInfo([*]const volatile f64);
+ assert(TypeId(u32_ptr_info) == TypeId.Pointer);
+ assert(u32_ptr_info.Pointer.size == TypeInfo.Pointer.Size.Many);
+ assert(u32_ptr_info.Pointer.is_const == true);
+ assert(u32_ptr_info.Pointer.is_volatile == true);
+ assert(u32_ptr_info.Pointer.alignment == @alignOf(f64));
+ assert(u32_ptr_info.Pointer.child == f64);
+}
+
+test "type info: slice type info" {
+ testSlice();
+ comptime testSlice();
+}
+
+fn testSlice() void {
+ const u32_slice_info = @typeInfo([]u32);
+ assert(TypeId(u32_slice_info) == TypeId.Pointer);
+ assert(u32_slice_info.Pointer.size == TypeInfo.Pointer.Size.Slice);
+ assert(u32_slice_info.Pointer.is_const == false);
+ assert(u32_slice_info.Pointer.is_volatile == false);
+ assert(u32_slice_info.Pointer.alignment == 4);
+ assert(u32_slice_info.Pointer.child == u32);
+}
+
+test "type info: array type info" {
+ testArray();
+ comptime testArray();
+}
+
+fn testArray() void {
+ const arr_info = @typeInfo([42]bool);
+ assert(TypeId(arr_info) == TypeId.Array);
+ assert(arr_info.Array.len == 42);
+ assert(arr_info.Array.child == bool);
+}
+
+test "type info: optional type info" {
+ testOptional();
+ comptime testOptional();
+}
+
+fn testOptional() void {
+ const null_info = @typeInfo(?void);
+ assert(TypeId(null_info) == TypeId.Optional);
+ assert(null_info.Optional.child == void);
+}
+
+test "type info: promise info" {
+ testPromise();
+ comptime testPromise();
+}
+
+fn testPromise() void {
+ const null_promise_info = @typeInfo(promise);
+ assert(TypeId(null_promise_info) == TypeId.Promise);
+ assert(null_promise_info.Promise.child == null);
+
+ const promise_info = @typeInfo(promise->usize);
+ assert(TypeId(promise_info) == TypeId.Promise);
+ assert(promise_info.Promise.child.? == usize);
+}
+
+test "type info: error set, error union info" {
+ testErrorSet();
+ comptime testErrorSet();
+}
+
+fn testErrorSet() void {
+ const TestErrorSet = error{
+ First,
+ Second,
+ Third,
+ };
+
+ const error_set_info = @typeInfo(TestErrorSet);
+ assert(TypeId(error_set_info) == TypeId.ErrorSet);
+ assert(error_set_info.ErrorSet.errors.len == 3);
+ assert(mem.eql(u8, error_set_info.ErrorSet.errors[0].name, "First"));
+ assert(error_set_info.ErrorSet.errors[2].value == @errorToInt(TestErrorSet.Third));
+
+ const error_union_info = @typeInfo(TestErrorSet!usize);
+ assert(TypeId(error_union_info) == TypeId.ErrorUnion);
+ assert(error_union_info.ErrorUnion.error_set == TestErrorSet);
+ assert(error_union_info.ErrorUnion.payload == usize);
+}
+
+test "type info: enum info" {
+ testEnum();
+ comptime testEnum();
+}
+
+fn testEnum() void {
+ const Os = @import("builtin").Os;
+
+ const os_info = @typeInfo(Os);
+ assert(TypeId(os_info) == TypeId.Enum);
+ assert(os_info.Enum.layout == TypeInfo.ContainerLayout.Auto);
+ assert(os_info.Enum.fields.len == 32);
+ assert(mem.eql(u8, os_info.Enum.fields[1].name, "ananas"));
+ assert(os_info.Enum.fields[10].value == 10);
+ assert(os_info.Enum.tag_type == u5);
+ assert(os_info.Enum.defs.len == 0);
+}
+
+test "type info: union info" {
+ testUnion();
+ comptime testUnion();
+}
+
+fn testUnion() void {
+ const typeinfo_info = @typeInfo(TypeInfo);
+ assert(TypeId(typeinfo_info) == TypeId.Union);
+ assert(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto);
+ assert(typeinfo_info.Union.tag_type.? == TypeId);
+ assert(typeinfo_info.Union.fields.len == 25);
+ assert(typeinfo_info.Union.fields[4].enum_field != null);
+ assert(typeinfo_info.Union.fields[4].enum_field.?.value == 4);
+ assert(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int));
+ assert(typeinfo_info.Union.defs.len == 20);
+
+ const TestNoTagUnion = union {
+ Foo: void,
+ Bar: u32,
+ };
+
+ const notag_union_info = @typeInfo(TestNoTagUnion);
+ assert(TypeId(notag_union_info) == TypeId.Union);
+ assert(notag_union_info.Union.tag_type == null);
+ assert(notag_union_info.Union.layout == TypeInfo.ContainerLayout.Auto);
+ assert(notag_union_info.Union.fields.len == 2);
+ assert(notag_union_info.Union.fields[0].enum_field == null);
+ assert(notag_union_info.Union.fields[1].field_type == u32);
+
+ const TestExternUnion = extern union {
+ foo: *c_void,
+ };
+
+ const extern_union_info = @typeInfo(TestExternUnion);
+ assert(extern_union_info.Union.layout == TypeInfo.ContainerLayout.Extern);
+ assert(extern_union_info.Union.tag_type == null);
+ assert(extern_union_info.Union.fields[0].enum_field == null);
+ assert(extern_union_info.Union.fields[0].field_type == *c_void);
+}
+
+test "type info: struct info" {
+ testStruct();
+ comptime testStruct();
+}
+
+fn testStruct() void {
+ const struct_info = @typeInfo(TestStruct);
+ assert(TypeId(struct_info) == TypeId.Struct);
+ assert(struct_info.Struct.layout == TypeInfo.ContainerLayout.Packed);
+ assert(struct_info.Struct.fields.len == 3);
+ assert(struct_info.Struct.fields[1].offset == null);
+ assert(struct_info.Struct.fields[2].field_type == *TestStruct);
+ assert(struct_info.Struct.defs.len == 2);
+ assert(struct_info.Struct.defs[0].is_pub);
+ assert(!struct_info.Struct.defs[0].data.Fn.is_extern);
+ assert(struct_info.Struct.defs[0].data.Fn.lib_name == null);
+ assert(struct_info.Struct.defs[0].data.Fn.return_type == void);
+ assert(struct_info.Struct.defs[0].data.Fn.fn_type == fn (*const TestStruct) void);
+}
+
+const TestStruct = packed struct {
+ const Self = this;
+
+ fieldA: usize,
+ fieldB: void,
+ fieldC: *Self,
+
+ pub fn foo(self: *const Self) void {}
+};
+
+test "type info: function type info" {
+ testFunction();
+ comptime testFunction();
+}
+
+fn testFunction() void {
+ const fn_info = @typeInfo(@typeOf(foo));
+ assert(TypeId(fn_info) == TypeId.Fn);
+ assert(fn_info.Fn.calling_convention == TypeInfo.CallingConvention.Unspecified);
+ assert(fn_info.Fn.is_generic);
+ assert(fn_info.Fn.args.len == 2);
+ assert(fn_info.Fn.is_var_args);
+ assert(fn_info.Fn.return_type == null);
+ assert(fn_info.Fn.async_allocator_type == null);
+
+ const test_instance: TestStruct = undefined;
+ const bound_fn_info = @typeInfo(@typeOf(test_instance.foo));
+ assert(TypeId(bound_fn_info) == TypeId.BoundFn);
+ assert(bound_fn_info.BoundFn.args[0].arg_type.? == *const TestStruct);
+}
+
+fn foo(comptime a: usize, b: bool, args: ...) usize {
+ return 0;
+}
+
+test "typeInfo with comptime parameter in struct fn def" {
+ const S = struct {
+ pub fn func(comptime x: f32) void {}
+ };
+ comptime var info = @typeInfo(S);
+}
diff --git a/test/cases/undefined.zig b/test/cases/undefined.zig
index bc81f9cf84..83c620d211 100644
--- a/test/cases/undefined.zig
+++ b/test/cases/undefined.zig
@@ -27,12 +27,12 @@ test "init static array to undefined" {
const Foo = struct {
x: i32,
- fn setFooXMethod(foo: &Foo) void {
+ fn setFooXMethod(foo: *Foo) void {
foo.x = 3;
}
};
-fn setFooX(foo: &Foo) void {
+fn setFooX(foo: *Foo) void {
foo.x = 2;
}
@@ -63,6 +63,6 @@ test "assign undefined to struct with method" {
}
test "type name of undefined" {
- const x = undefined;
- assert(mem.eql(u8, @typeName(@typeOf(x)), "(undefined)"));
+ const x = undefined;
+ assert(mem.eql(u8, @typeName(@typeOf(x)), "(undefined)"));
}
diff --git a/test/cases/underscore.zig b/test/cases/underscore.zig
new file mode 100644
index 0000000000..44451e3723
--- /dev/null
+++ b/test/cases/underscore.zig
@@ -0,0 +1,28 @@
+const std = @import("std");
+const assert = std.debug.assert;
+
+test "ignore lval with underscore" {
+ _ = false;
+}
+
+test "ignore lval with underscore (for loop)" {
+ for ([]void{}) |_, i| {
+ for ([]void{}) |_, j| {
+ break;
+ }
+ break;
+ }
+}
+
+test "ignore lval with underscore (while loop)" {
+ while (optionalReturnError()) |_| {
+ while (optionalReturnError()) |_| {
+ break;
+ } else |_| { }
+ break;
+ } else |_| { }
+}
+
+fn optionalReturnError() !?u32 {
+ return error.optionalReturnError;
+}
diff --git a/test/cases/union.zig b/test/cases/union.zig
index dc2a7c3414..08969e64fe 100644
--- a/test/cases/union.zig
+++ b/test/cases/union.zig
@@ -10,41 +10,54 @@ const Agg = struct {
val2: Value,
};
-const v1 = Value { .Int = 1234 };
-const v2 = Value { .Array = []u8{3} ** 9 };
+const v1 = Value{ .Int = 1234 };
+const v2 = Value{ .Array = []u8{3} ** 9 };
-const err = (error!Agg)(Agg {
+const err = (error!Agg)(Agg{
.val1 = v1,
.val2 = v2,
});
-const array = []Value { v1, v2, v1, v2};
-
+const array = []Value{
+ v1,
+ v2,
+ v1,
+ v2,
+};
test "unions embedded in aggregate types" {
switch (array[1]) {
Value.Array => |arr| assert(arr[4] == 3),
else => unreachable,
}
- switch((err catch unreachable).val1) {
+ switch ((err catch unreachable).val1) {
Value.Int => |x| assert(x == 1234),
else => unreachable,
}
}
-
const Foo = union {
float: f64,
int: i32,
};
test "basic unions" {
- var foo = Foo { .int = 1 };
+ var foo = Foo{ .int = 1 };
assert(foo.int == 1);
- foo = Foo {.float = 12.34};
+ foo = Foo{ .float = 12.34 };
assert(foo.float == 12.34);
}
+test "comptime union field access" {
+ comptime {
+ var foo = Foo{ .int = 0 };
+ assert(foo.int == 0);
+
+ foo = Foo{ .float = 42.42 };
+ assert(foo.float == 42.42);
+ }
+}
+
test "init union with runtime value" {
var foo: Foo = undefined;
@@ -55,12 +68,12 @@ test "init union with runtime value" {
assert(foo.int == 42);
}
-fn setFloat(foo: &Foo, x: f64) void {
- *foo = Foo { .float = x };
+fn setFloat(foo: *Foo, x: f64) void {
+ foo.* = Foo{ .float = x };
}
-fn setInt(foo: &Foo, x: i32) void {
- *foo = Foo { .int = x };
+fn setInt(foo: *Foo, x: i32) void {
+ foo.* = Foo{ .int = x };
}
const FooExtern = extern union {
@@ -69,13 +82,12 @@ const FooExtern = extern union {
};
test "basic extern unions" {
- var foo = FooExtern { .int = 1 };
+ var foo = FooExtern{ .int = 1 };
assert(foo.int == 1);
foo.float = 12.34;
assert(foo.float == 12.34);
}
-
const Letter = enum {
A,
B,
@@ -93,12 +105,12 @@ test "union with specified enum tag" {
}
fn doTest() void {
- assert(bar(Payload {.A = 1234}) == -10);
+ assert(bar(Payload{ .A = 1234 }) == -10);
}
-fn bar(value: &const Payload) i32 {
- assert(Letter(*value) == Letter.A);
- return switch (*value) {
+fn bar(value: *const Payload) i32 {
+ assert(Letter(value.*) == Letter.A);
+ return switch (value.*) {
Payload.A => |x| return x - 1244,
Payload.B => |x| if (x == 12.34) i32(20) else 21,
Payload.C => |x| if (x) i32(30) else 31,
@@ -114,7 +126,7 @@ const MultipleChoice = union(enum(u32)) {
test "simple union(enum(u32))" {
var x = MultipleChoice.C;
assert(x == MultipleChoice.C);
- assert(u32(@TagType(MultipleChoice)(x)) == 60);
+ assert(@enumToInt(@TagType(MultipleChoice)(x)) == 60);
}
const MultipleChoice2 = union(enum(u32)) {
@@ -131,13 +143,13 @@ const MultipleChoice2 = union(enum(u32)) {
test "union(enum(u32)) with specified and unspecified tag values" {
comptime assert(@TagType(@TagType(MultipleChoice2)) == u32);
- testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2 {.C = 123});
- comptime testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2 { .C = 123} );
+ testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 });
+ comptime testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 });
}
-fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: &const MultipleChoice2) void {
- assert(u32(@TagType(MultipleChoice2)(*x)) == 60);
- assert(1123 == switch (*x) {
+fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: *const MultipleChoice2) void {
+ assert(@enumToInt(@TagType(MultipleChoice2)(x.*)) == 60);
+ assert(1123 == switch (x.*) {
MultipleChoice2.A => 1,
MultipleChoice2.B => 2,
MultipleChoice2.C => |v| i32(1000) + v,
@@ -150,18 +162,17 @@ fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: &const MultipleChoice2) void
});
}
-
const ExternPtrOrInt = extern union {
- ptr: &u8,
- int: u64
+ ptr: *u8,
+ int: u64,
};
test "extern union size" {
comptime assert(@sizeOf(ExternPtrOrInt) == 8);
}
const PackedPtrOrInt = packed union {
- ptr: &u8,
- int: u64
+ ptr: *u8,
+ int: u64,
};
test "extern union size" {
comptime assert(@sizeOf(PackedPtrOrInt) == 8);
@@ -174,8 +185,16 @@ test "union with only 1 field which is void should be zero bits" {
comptime assert(@sizeOf(ZeroBits) == 0);
}
-const TheTag = enum {A, B, C};
-const TheUnion = union(TheTag) { A: i32, B: i32, C: i32 };
+const TheTag = enum {
+ A,
+ B,
+ C,
+};
+const TheUnion = union(TheTag) {
+ A: i32,
+ B: i32,
+ C: i32,
+};
test "union field access gives the enum values" {
assert(TheUnion.A == TheTag.A);
assert(TheUnion.B == TheTag.B);
@@ -183,20 +202,28 @@ test "union field access gives the enum values" {
}
test "cast union to tag type of union" {
- testCastUnionToTagType(TheUnion {.B = 1234});
- comptime testCastUnionToTagType(TheUnion {.B = 1234});
+ testCastUnionToTagType(TheUnion{ .B = 1234 });
+ comptime testCastUnionToTagType(TheUnion{ .B = 1234 });
}
-fn testCastUnionToTagType(x: &const TheUnion) void {
- assert(TheTag(*x) == TheTag.B);
+fn testCastUnionToTagType(x: *const TheUnion) void {
+ assert(TheTag(x.*) == TheTag.B);
}
test "cast tag type of union to union" {
var x: Value2 = Letter2.B;
assert(Letter2(x) == Letter2.B);
}
-const Letter2 = enum { A, B, C };
-const Value2 = union(Letter2) { A: i32, B, C, };
+const Letter2 = enum {
+ A,
+ B,
+ C,
+};
+const Value2 = union(Letter2) {
+ A: i32,
+ B,
+ C,
+};
test "implicit cast union to its tag type" {
var x: Value2 = Letter2.B;
@@ -216,20 +243,17 @@ const TheUnion2 = union(enum) {
Item2: i32,
};
-fn assertIsTheUnion2Item1(value: &const TheUnion2) void {
- assert(*value == TheUnion2.Item1);
+fn assertIsTheUnion2Item1(value: *const TheUnion2) void {
+ assert(value.* == TheUnion2.Item1);
}
-
pub const PackThis = union(enum) {
Invalid: bool,
StringLiteral: u2,
};
test "constant packed union" {
- testConstPackedUnion([]PackThis {
- PackThis { .StringLiteral = 1 },
- });
+ testConstPackedUnion([]PackThis{PackThis{ .StringLiteral = 1 }});
}
fn testConstPackedUnion(expected_tokens: []const PackThis) void {
@@ -242,7 +266,7 @@ test "switch on union with only 1 field" {
switch (r) {
PartialInst.Compiled => {
var z: PartialInstWithPayload = undefined;
- z = PartialInstWithPayload { .Compiled = 1234 };
+ z = PartialInstWithPayload{ .Compiled = 1234 };
switch (z) {
PartialInstWithPayload.Compiled => |x| {
assert(x == 1234);
@@ -262,3 +286,28 @@ const PartialInstWithPayload = union(enum) {
Compiled: i32,
};
+test "access a member of tagged union with conflicting enum tag name" {
+ const Bar = union(enum) {
+ A: A,
+ B: B,
+
+ const A = u8;
+ const B = void;
+ };
+
+ comptime assert(Bar.A == u8);
+}
+
+test "tagged union initialization with runtime void" {
+ assert(testTaggedUnionInit({}));
+}
+
+const TaggedUnionWithAVoid = union(enum) {
+ A,
+ B: i32,
+};
+
+fn testTaggedUnionInit(x: var) bool {
+ const y = TaggedUnionWithAVoid{ .A = x };
+ return @TagType(TaggedUnionWithAVoid)(y) == TaggedUnionWithAVoid.A;
+}
diff --git a/test/cases/var_args.zig b/test/cases/var_args.zig
index cead9eb8bf..3eb6e30448 100644
--- a/test/cases/var_args.zig
+++ b/test/cases/var_args.zig
@@ -2,9 +2,12 @@ const assert = @import("std").debug.assert;
fn add(args: ...) i32 {
var sum = i32(0);
- {comptime var i: usize = 0; inline while (i < args.len) : (i += 1) {
- sum += args[i];
- }}
+ {
+ comptime var i: usize = 0;
+ inline while (i < args.len) : (i += 1) {
+ sum += args[i];
+ }
+ }
return sum;
}
@@ -55,31 +58,23 @@ fn extraFn(extra: u32, args: ...) usize {
return args.len;
}
+const foos = []fn (...) bool{
+ foo1,
+ foo2,
+};
-const foos = []fn(...) bool { foo1, foo2 };
-
-fn foo1(args: ...) bool { return true; }
-fn foo2(args: ...) bool { return false; }
+fn foo1(args: ...) bool {
+ return true;
+}
+fn foo2(args: ...) bool {
+ return false;
+}
test "array of var args functions" {
assert(foos[0]());
assert(!foos[1]());
}
-
-test "pass array and slice of same array to var args should have same pointers" {
- const array = "hi";
- const slice: []const u8 = array;
- return assertSlicePtrsEql(array, slice);
-}
-
-fn assertSlicePtrsEql(args: ...) void {
- const s1 = ([]const u8)(args[0]);
- const s2 = args[1];
- assert(s1.ptr == s2.ptr);
-}
-
-
test "pass zero length array to var args param" {
doNothingWithFirstArg("");
}
diff --git a/test/cases/void.zig b/test/cases/void.zig
index f4d72209e4..7121ac664b 100644
--- a/test/cases/void.zig
+++ b/test/cases/void.zig
@@ -8,7 +8,7 @@ const Foo = struct {
test "compare void with void compile time known" {
comptime {
- const foo = Foo {
+ const foo = Foo{
.a = {},
.b = 1,
.c = {},
@@ -16,3 +16,15 @@ test "compare void with void compile time known" {
assert(foo.a == {});
}
}
+
+test "iterate over a void slice" {
+ var j: usize = 0;
+ for (times(10)) |_, i| {
+ assert(i == j);
+ j += 1;
+ }
+}
+
+fn times(n: usize) []const void {
+ return ([*]void)(undefined)[0..n];
+}
diff --git a/test/cases/while.zig b/test/cases/while.zig
index 33d5a5623a..fe53522ea6 100644
--- a/test/cases/while.zig
+++ b/test/cases/while.zig
@@ -1,7 +1,7 @@
const assert = @import("std").debug.assert;
test "while loop" {
- var i : i32 = 0;
+ var i: i32 = 0;
while (i < 4) {
i += 1;
}
@@ -35,7 +35,7 @@ test "continue and break" {
}
var continue_and_break_counter: i32 = 0;
fn runContinueAndBreakTest() void {
- var i : i32 = 0;
+ var i: i32 = 0;
while (true) {
continue_and_break_counter += 2;
i += 1;
@@ -58,10 +58,13 @@ fn returnWithImplicitCastFromWhileLoopTest() error!void {
test "while with continue expression" {
var sum: i32 = 0;
- {var i: i32 = 0; while (i < 10) : (i += 1) {
- if (i == 5) continue;
- sum += i;
- }}
+ {
+ var i: i32 = 0;
+ while (i < 10) : (i += 1) {
+ if (i == 5) continue;
+ sum += i;
+ }
+ }
assert(sum == 40);
}
@@ -78,7 +81,7 @@ test "while with else" {
assert(got_else == 1);
}
-test "while with nullable as condition" {
+test "while with optional as condition" {
numbers_left = 10;
var sum: i32 = 0;
while (getNumberOrNull()) |value| {
@@ -87,7 +90,7 @@ test "while with nullable as condition" {
assert(sum == 45);
}
-test "while with nullable as condition with else" {
+test "while with optional as condition with else" {
numbers_left = 10;
var sum: i32 = 0;
var got_else: i32 = 0;
@@ -117,61 +120,63 @@ test "while with error union condition" {
var numbers_left: i32 = undefined;
fn getNumberOrErr() error!i32 {
- return if (numbers_left == 0)
- error.OutOfNumbers
- else x: {
+ return if (numbers_left == 0) error.OutOfNumbers else x: {
numbers_left -= 1;
break :x numbers_left;
};
}
fn getNumberOrNull() ?i32 {
- return if (numbers_left == 0)
- null
- else x: {
+ return if (numbers_left == 0) null else x: {
numbers_left -= 1;
break :x numbers_left;
};
}
-test "while on nullable with else result follow else prong" {
+test "while on optional with else result follow else prong" {
const result = while (returnNull()) |value| {
break value;
- } else i32(2);
+ } else
+ i32(2);
assert(result == 2);
}
-test "while on nullable with else result follow break prong" {
- const result = while (returnMaybe(10)) |value| {
+test "while on optional with else result follow break prong" {
+ const result = while (returnOptional(10)) |value| {
break value;
- } else i32(2);
+ } else
+ i32(2);
assert(result == 10);
}
test "while on error union with else result follow else prong" {
const result = while (returnError()) |value| {
break value;
- } else |err| i32(2);
+ } else |err|
+ i32(2);
assert(result == 2);
}
test "while on error union with else result follow break prong" {
const result = while (returnSuccess(10)) |value| {
break value;
- } else |err| i32(2);
+ } else |err|
+ i32(2);
assert(result == 10);
}
test "while on bool with else result follow else prong" {
const result = while (returnFalse()) {
break i32(10);
- } else i32(2);
+ } else
+ i32(2);
assert(result == 2);
}
test "while on bool with else result follow break prong" {
const result = while (returnTrue()) {
break i32(10);
- } else i32(2);
+ } else
+ i32(2);
assert(result == 10);
}
@@ -202,9 +207,21 @@ fn testContinueOuter() void {
}
}
-fn returnNull() ?i32 { return null; }
-fn returnMaybe(x: i32) ?i32 { return x; }
-fn returnError() error!i32 { return error.YouWantedAnError; }
-fn returnSuccess(x: i32) error!i32 { return x; }
-fn returnFalse() bool { return false; }
-fn returnTrue() bool { return true; }
+fn returnNull() ?i32 {
+ return null;
+}
+fn returnOptional(x: i32) ?i32 {
+ return x;
+}
+fn returnError() error!i32 {
+ return error.YouWantedAnError;
+}
+fn returnSuccess(x: i32) error!i32 {
+ return x;
+}
+fn returnFalse() bool {
+ return false;
+}
+fn returnTrue() bool {
+ return true;
+}
diff --git a/test/cases/widening.zig b/test/cases/widening.zig
new file mode 100644
index 0000000000..cf6ab4ca0f
--- /dev/null
+++ b/test/cases/widening.zig
@@ -0,0 +1,27 @@
+const std = @import("std");
+const assert = std.debug.assert;
+const mem = std.mem;
+
+test "integer widening" {
+ var a: u8 = 250;
+ var b: u16 = a;
+ var c: u32 = b;
+ var d: u64 = c;
+ var e: u64 = d;
+ var f: u128 = e;
+ assert(f == a);
+}
+
+test "implicit unsigned integer to signed integer" {
+ var a: u8 = 250;
+ var b: i16 = a;
+ assert(b == 250);
+}
+
+test "float widening" {
+ var a: f16 = 12.34;
+ var b: f32 = a;
+ var c: f64 = b;
+ var d: f128 = c;
+ assert(d == a);
+}
diff --git a/test/compare_output.zig b/test/compare_output.zig
index 9595bf8259..a18a78b419 100644
--- a/test/compare_output.zig
+++ b/test/compare_output.zig
@@ -3,10 +3,10 @@ const std = @import("std");
const os = std.os;
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.CompareOutputContext) void {
+pub fn addCases(cases: *tests.CompareOutputContext) void {
cases.addC("hello world with libc",
\\const c = @cImport(@cInclude("stdio.h"));
- \\export fn main(argc: c_int, argv: &&u8) c_int {
+ \\export fn main(argc: c_int, argv: [*][*]u8) c_int {
\\ _ = c.puts(c"Hello, world!");
\\ return 0;
\\}
@@ -131,7 +131,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\const is_windows = builtin.os == builtin.Os.windows;
\\const c = @cImport({
\\ if (is_windows) {
- \\ // See https://github.com/zig-lang/zig/issues/515
+ \\ // See https://github.com/ziglang/zig/issues/515
\\ @cDefine("_NO_CRT_STDIO_INLINE", "1");
\\ @cInclude("io.h");
\\ @cInclude("fcntl.h");
@@ -139,7 +139,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\ @cInclude("stdio.h");
\\});
\\
- \\export fn main(argc: c_int, argv: &&u8) c_int {
+ \\export fn main(argc: c_int, argv: [*][*]u8) c_int {
\\ if (is_windows) {
\\ // we want actual \n, not \r\n
\\ _ = c._setmode(1, c._O_BINARY);
@@ -284,12 +284,12 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
cases.addC("expose function pointer to C land",
\\const c = @cImport(@cInclude("stdlib.h"));
\\
- \\export fn compare_fn(a: ?&const c_void, b: ?&const c_void) c_int {
- \\ const a_int = @ptrCast(&align(1) const i32, a ?? unreachable);
- \\ const b_int = @ptrCast(&align(1) const i32, b ?? unreachable);
- \\ if (*a_int < *b_int) {
+ \\export fn compare_fn(a: ?*const c_void, b: ?*const c_void) c_int {
+ \\ const a_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), a));
+ \\ const b_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), b));
+ \\ if (a_int.* < b_int.*) {
\\ return -1;
- \\ } else if (*a_int > *b_int) {
+ \\ } else if (a_int.* > b_int.*) {
\\ return 1;
\\ } else {
\\ return 0;
@@ -297,9 +297,9 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\}
\\
\\export fn main() c_int {
- \\ var array = []u32 { 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 };
+ \\ var array = []u32{ 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 };
\\
- \\ c.qsort(@ptrCast(&c_void, &array[0]), c_ulong(array.len), @sizeOf(i32), compare_fn);
+ \\ c.qsort(@ptrCast(?*c_void, array[0..].ptr), @intCast(c_ulong, array.len), @sizeOf(i32), compare_fn);
\\
\\ for (array) |item, i| {
\\ if (item != i) {
@@ -316,7 +316,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\const is_windows = builtin.os == builtin.Os.windows;
\\const c = @cImport({
\\ if (is_windows) {
- \\ // See https://github.com/zig-lang/zig/issues/515
+ \\ // See https://github.com/ziglang/zig/issues/515
\\ @cDefine("_NO_CRT_STDIO_INLINE", "1");
\\ @cInclude("io.h");
\\ @cInclude("fcntl.h");
@@ -324,15 +324,15 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\ @cInclude("stdio.h");
\\});
\\
- \\export fn main(argc: c_int, argv: &&u8) c_int {
+ \\export fn main(argc: c_int, argv: [*][*]u8) c_int {
\\ if (is_windows) {
\\ // we want actual \n, not \r\n
\\ _ = c._setmode(1, c._O_BINARY);
\\ }
\\ const small: f32 = 3.25;
\\ const x: f64 = small;
- \\ const y = i32(x);
- \\ const z = f64(y);
+ \\ const y = @floatToInt(i32, x);
+ \\ const z = @intToFloat(f64, y);
\\ _ = c.printf(c"%.2f\n%d\n%.2f\n%.2f\n", x, y, z, f64(-0.4));
\\ return 0;
\\}
@@ -344,13 +344,13 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\const Foo = struct {
\\ field1: Bar,
\\
- \\ fn method(a: &const Foo) bool { return true; }
+ \\ fn method(a: *const Foo) bool { return true; }
\\};
\\
\\const Bar = struct {
\\ field2: i32,
\\
- \\ fn method(b: &const Bar) bool { return true; }
+ \\ fn method(b: *const Bar) bool { return true; }
\\};
\\
\\pub fn main() void {
@@ -475,7 +475,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\
);
- tc.setCommandLineArgs([][]const u8 {
+ tc.setCommandLineArgs([][]const u8{
"first arg",
"'a' 'b' \\",
"bare",
@@ -516,7 +516,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\
);
- tc.setCommandLineArgs([][]const u8 {
+ tc.setCommandLineArgs([][]const u8{
"first arg",
"'a' 'b' \\",
"bare",
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index b22816a9a8..56b2c51d74 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,25 +1,484 @@
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.CompileErrorContext) void {
- cases.add("assign inline fn to non-comptime var",
+pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "@handle() called outside of function definition",
+ \\var handle_undef: promise = undefined;
+ \\var handle_dummy: promise = @handle();
+ \\export fn entry() bool {
+ \\ return handle_undef == handle_dummy;
+ \\}
+ ,
+ ".tmp_source.zig:2:29: error: @handle() called outside of function definition",
+ );
+
+ cases.add(
+ "@handle() in non-async function",
+ \\export fn entry() bool {
+ \\ var handle_undef: promise = undefined;
+ \\ return handle_undef == @handle();
+ \\}
+ ,
+ ".tmp_source.zig:3:28: error: @handle() in non-async function",
+ );
+
+ cases.add(
+ "`_` is not a declarable symbol",
+ \\export fn f1() usize {
+ \\ var _: usize = 2;
+ \\ return _;
+ \\}
+ ,
+ ".tmp_source.zig:2:5: error: `_` is not a declarable symbol",
+ ".tmp_source.zig:3:12: error: use of undeclared identifier '_'",
+ );
+
+ cases.add(
+ "`_` should not be usable inside for",
+ \\export fn returns() void {
+ \\ for ([]void{}) |_, i| {
+ \\ for ([]void{}) |_, j| {
+ \\ return _;
+ \\ }
+ \\ }
+ \\}
+ ,
+ ".tmp_source.zig:4:20: error: use of undeclared identifier '_'",
+ );
+
+ cases.add(
+ "`_` should not be usable inside while",
+ \\export fn returns() void {
+ \\ while (optionalReturn()) |_| {
+ \\ while (optionalReturn()) |_| {
+ \\ return _;
+ \\ }
+ \\ }
+ \\}
+ \\fn optionalReturn() ?u32 {
+ \\ return 1;
+ \\}
+ ,
+ ".tmp_source.zig:4:20: error: use of undeclared identifier '_'",
+ );
+
+ cases.add(
+ "`_` should not be usable inside while else",
+ \\export fn returns() void {
+ \\ while (optionalReturnError()) |_| {
+ \\ while (optionalReturnError()) |_| {
+ \\ return;
+ \\ } else |_| {
+ \\ if (_ == error.optionalReturnError) return;
+ \\ }
+ \\ }
+ \\}
+ \\fn optionalReturnError() !?u32 {
+ \\ return error.optionalReturnError;
+ \\}
+ ,
+ ".tmp_source.zig:6:17: error: use of undeclared identifier '_'",
+ );
+
+ cases.add(
+ "while loop body expression ignored",
+ \\fn returns() usize {
+ \\ return 2;
+ \\}
+ \\export fn f1() void {
+ \\ while (true) returns();
+ \\}
+ \\export fn f2() void {
+ \\ var x: ?i32 = null;
+ \\ while (x) |_| returns();
+ \\}
+ \\export fn f3() void {
+ \\ var x: error!i32 = error.Bad;
+ \\ while (x) |_| returns() else |_| unreachable;
+ \\}
+ ,
+ ".tmp_source.zig:5:25: error: expression value is ignored",
+ ".tmp_source.zig:9:26: error: expression value is ignored",
+ ".tmp_source.zig:13:26: error: expression value is ignored",
+ );
+
+ cases.add(
+ "missing parameter name of generic function",
+ \\fn dump(var) void {}
+ \\export fn entry() void {
+ \\ var a: u8 = 9;
+ \\ dump(a);
+ \\}
+ ,
+ ".tmp_source.zig:1:9: error: missing parameter name",
+ );
+
+ cases.add(
+ "non-inline for loop on a type that requires comptime",
+ \\const Foo = struct {
+ \\ name: []const u8,
+ \\ T: type,
+ \\};
+ \\export fn entry() void {
+ \\ const xx: [2]Foo = undefined;
+ \\ for (xx) |f| {}
+ \\}
+ ,
+ ".tmp_source.zig:7:15: error: variable of type 'Foo' must be const or comptime",
+ );
+
+ cases.add(
+ "generic fn as parameter without comptime keyword",
+ \\fn f(_: fn (var) void) void {}
+ \\fn g(_: var) void {}
+ \\export fn entry() void {
+ \\ f(g);
+ \\}
+ ,
+ ".tmp_source.zig:1:9: error: parameter of type 'fn(var)var' must be declared comptime",
+ );
+
+ cases.add(
+ "optional pointer to void in extern struct",
+ \\comptime {
+ \\ _ = @IntType(false, @maxValue(u32) + 1);
+ \\}
+ ,
+ ".tmp_source.zig:2:40: error: integer value 4294967296 cannot be implicitly casted to type 'u32'",
+ );
+
+ cases.add(
+ "optional pointer to void in extern struct",
+ \\const Foo = extern struct {
+ \\ x: ?*const void,
+ \\};
+ \\const Bar = extern struct {
+ \\ foo: Foo,
+ \\ y: i32,
+ \\};
+ \\export fn entry(bar: *Bar) void {}
+ ,
+ ".tmp_source.zig:2:5: error: extern structs cannot contain fields of type '?*const void'",
+ );
+
+ cases.add(
+ "use of comptime-known undefined function value",
+ \\const Cmd = struct {
+ \\ exec: fn () void,
+ \\};
+ \\export fn entry() void {
+ \\ const command = Cmd{ .exec = undefined };
+ \\ command.exec();
+ \\}
+ ,
+ ".tmp_source.zig:6:12: error: use of undefined value",
+ );
+
+ cases.add(
+ "use of comptime-known undefined function value",
+ \\const Cmd = struct {
+ \\ exec: fn () void,
+ \\};
+ \\export fn entry() void {
+ \\ const command = Cmd{ .exec = undefined };
+ \\ command.exec();
+ \\}
+ ,
+ ".tmp_source.zig:6:12: error: use of undefined value",
+ );
+
+ cases.add(
+ "bad @alignCast at comptime",
+ \\comptime {
+ \\ const ptr = @intToPtr(*i32, 0x1);
+ \\ const aligned = @alignCast(4, ptr);
+ \\}
+ ,
+ ".tmp_source.zig:3:35: error: pointer address 0x1 is not aligned to 4 bytes",
+ );
+
+ cases.add(
+ "@ptrToInt on *void",
+ \\export fn entry() bool {
+ \\ return @ptrToInt(&{}) == @ptrToInt(&{});
+ \\}
+ ,
+ ".tmp_source.zig:2:23: error: pointer to size 0 type has no address",
+ );
+
+ cases.add(
+ "@popCount - non-integer",
+ \\export fn entry(x: f32) u32 {
+ \\ return @popCount(x);
+ \\}
+ ,
+ ".tmp_source.zig:2:22: error: expected integer type, found 'f32'",
+ );
+
+ cases.add(
+ "@popCount - negative comptime_int",
+ \\comptime {
+ \\ _ = @popCount(-1);
+ \\}
+ ,
+ ".tmp_source.zig:2:9: error: @popCount on negative comptime_int value -1",
+ );
+
+ cases.addCase(x: {
+ const tc = cases.create(
+ "wrong same named struct",
+ \\const a = @import("a.zig");
+ \\const b = @import("b.zig");
+ \\
+ \\export fn entry() void {
+ \\ var a1: a.Foo = undefined;
+ \\ bar(&a1);
+ \\}
+ \\
+ \\fn bar(x: *b.Foo) void {}
+ ,
+ ".tmp_source.zig:6:10: error: expected type '*Foo', found '*Foo'",
+ ".tmp_source.zig:6:10: note: pointer type child 'Foo' cannot cast into pointer type child 'Foo'",
+ "a.zig:1:17: note: Foo declared here",
+ "b.zig:1:17: note: Foo declared here",
+ );
+
+ tc.addSourceFile("a.zig",
+ \\pub const Foo = struct {
+ \\ x: i32,
+ \\};
+ );
+
+ tc.addSourceFile("b.zig",
+ \\pub const Foo = struct {
+ \\ z: f64,
+ \\};
+ );
+
+ break :x tc;
+ });
+
+ cases.add(
+ "enum field value references enum",
+ \\pub const Foo = extern enum {
+ \\ A = Foo.B,
+ \\ C = D,
+ \\};
+ \\export fn entry() void {
+ \\ var s: Foo = Foo.E;
+ \\}
+ ,
+ ".tmp_source.zig:1:17: error: 'Foo' depends on itself",
+ );
+
+ cases.add(
+ "@floatToInt comptime safety",
+ \\comptime {
+ \\ _ = @floatToInt(i8, f32(-129.1));
+ \\}
+ \\comptime {
+ \\ _ = @floatToInt(u8, f32(-1.1));
+ \\}
+ \\comptime {
+ \\ _ = @floatToInt(u8, f32(256.1));
+ \\}
+ ,
+ ".tmp_source.zig:2:9: error: integer value '-129' cannot be stored in type 'i8'",
+ ".tmp_source.zig:5:9: error: integer value '-1' cannot be stored in type 'u8'",
+ ".tmp_source.zig:8:9: error: integer value '256' cannot be stored in type 'u8'",
+ );
+
+ cases.add(
+ "use c_void as return type of fn ptr",
+ \\export fn entry() void {
+ \\ const a: fn () c_void = undefined;
+ \\}
+ ,
+ ".tmp_source.zig:2:20: error: return type cannot be opaque",
+ );
+
+ cases.add(
+ "non int passed to @intToFloat",
+ \\export fn entry() void {
+ \\ const x = @intToFloat(f32, 1.1);
+ \\}
+ ,
+ ".tmp_source.zig:2:32: error: expected int type, found 'comptime_float'",
+ );
+
+ cases.add(
+ "use implicit casts to assign null to non-nullable pointer",
+ \\export fn entry() void {
+ \\ var x: i32 = 1234;
+ \\ var p: *i32 = &x;
+ \\ var pp: *?*i32 = &p;
+ \\ pp.* = null;
+ \\ var y = p.*;
+ \\}
+ ,
+ ".tmp_source.zig:4:23: error: expected type '*?*i32', found '**i32'",
+ );
+
+ cases.add(
+ "attempted implicit cast from T to [*]const T",
+ \\export fn entry() void {
+ \\ const x: [*]const bool = true;
+ \\}
+ ,
+ ".tmp_source.zig:2:30: error: expected type '[*]const bool', found 'bool'",
+ );
+
+ cases.add(
+ "dereference unknown length pointer",
+ \\export fn entry(x: [*]i32) i32 {
+ \\ return x.*;
+ \\}
+ ,
+ ".tmp_source.zig:2:13: error: index syntax required for unknown-length pointer type '[*]i32'",
+ );
+
+ cases.add(
+ "field access of unknown length pointer",
+ \\const Foo = extern struct {
+ \\ a: i32,
+ \\};
+ \\
+ \\export fn entry(foo: [*]Foo) void {
+ \\ foo.a += 1;
+ \\}
+ ,
+ ".tmp_source.zig:6:8: error: type '[*]Foo' does not support field access",
+ );
+
+ cases.add(
+ "unknown length pointer to opaque",
+ \\export const T = [*]@OpaqueType();
+ ,
+ ".tmp_source.zig:1:18: error: unknown-length pointer to opaque",
+ );
+
+ cases.add(
+ "error when evaluating return type",
+ \\const Foo = struct {
+ \\ map: i32(i32),
+ \\
+ \\ fn init() Foo {
+ \\ return undefined;
+ \\ }
+ \\};
+ \\export fn entry() void {
+ \\ var rule_set = try Foo.init();
+ \\}
+ ,
+ ".tmp_source.zig:2:13: error: expected type 'i32', found 'type'",
+ );
+
+ cases.add(
+ "slicing single-item pointer",
+ \\export fn entry(ptr: *i32) void {
+ \\ const slice = ptr[0..2];
+ \\}
+ ,
+ ".tmp_source.zig:2:22: error: slice of single-item pointer",
+ );
+
+ cases.add(
+ "indexing single-item pointer",
+ \\export fn entry(ptr: *i32) i32 {
+ \\ return ptr[1];
+ \\}
+ ,
+ ".tmp_source.zig:2:15: error: index of single-item pointer",
+ );
+
+ cases.add(
+ "nested error set mismatch",
+ \\const NextError = error{NextError};
+ \\const OtherError = error{OutOfMemory};
+ \\
+ \\export fn entry() void {
+ \\ const a: ?NextError!i32 = foo();
+ \\}
+ \\
+ \\fn foo() ?OtherError!i32 {
+ \\ return null;
+ \\}
+ ,
+ ".tmp_source.zig:5:34: error: expected type '?NextError!i32', found '?OtherError!i32'",
+ ".tmp_source.zig:5:34: note: optional type child 'OtherError!i32' cannot cast into optional type child 'NextError!i32'",
+ ".tmp_source.zig:5:34: note: error set 'OtherError' cannot cast into error set 'NextError'",
+ ".tmp_source.zig:2:26: note: 'error.OutOfMemory' not a member of destination error set",
+ );
+
+ cases.add(
+ "invalid deref on switch target",
+ \\comptime {
+ \\ var tile = Tile.Empty;
+ \\ switch (tile.*) {
+ \\ Tile.Empty => {},
+ \\ Tile.Filled => {},
+ \\ }
+ \\}
+ \\const Tile = enum {
+ \\ Empty,
+ \\ Filled,
+ \\};
+ ,
+ ".tmp_source.zig:3:17: error: invalid deref on switch target",
+ );
+
+ cases.add(
+ "invalid field access in comptime",
+ \\comptime { var x = doesnt_exist.whatever; }
+ ,
+ ".tmp_source.zig:1:20: error: use of undeclared identifier 'doesnt_exist'",
+ );
+
+ cases.add(
+ "suspend inside suspend block",
+ \\const std = @import("std",);
+ \\
+ \\export fn entry() void {
+ \\ var buf: [500]u8 = undefined;
+ \\ var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
+ \\ const p = (async foo()) catch unreachable;
+ \\ cancel p;
+ \\}
+ \\
+ \\async fn foo() void {
+ \\ suspend {
+ \\ suspend {
+ \\ }
+ \\ }
+ \\}
+ ,
+ ".tmp_source.zig:12:9: error: cannot suspend inside suspend block",
+ ".tmp_source.zig:11:5: note: other suspend block here",
+ );
+
+ cases.add(
+ "assign inline fn to non-comptime var",
\\export fn entry() void {
\\ var a = b;
\\}
\\inline fn b() void { }
,
".tmp_source.zig:2:5: error: functions marked inline must be stored in const or comptime var",
- ".tmp_source.zig:4:8: note: declared here");
+ ".tmp_source.zig:4:8: note: declared here",
+ );
- cases.add("wrong type passed to @panic",
+ cases.add(
+ "wrong type passed to @panic",
\\export fn entry() void {
\\ var e = error.Foo;
\\ @panic(e);
\\}
,
- ".tmp_source.zig:3:12: error: expected type '[]const u8', found 'error{Foo}'");
+ ".tmp_source.zig:3:12: error: expected type '[]const u8', found 'error{Foo}'",
+ );
-
- cases.add("@tagName used on union with no associated enum tag",
+ cases.add(
+ "@tagName used on union with no associated enum tag",
\\const FloatInt = extern union {
\\ Float: f32,
\\ Int: i32,
@@ -30,10 +489,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:7:19: error: union has no associated enum",
- ".tmp_source.zig:1:18: note: declared here");
+ ".tmp_source.zig:1:18: note: declared here",
+ );
- cases.add("returning error from void async function",
- \\const std = @import("std");
+ cases.add(
+ "returning error from void async function",
+ \\const std = @import("std",);
\\export fn entry() void {
\\ const p = async amain() catch unreachable;
\\}
@@ -41,32 +502,40 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ return error.ShouldBeCompileError;
\\}
,
- ".tmp_source.zig:6:17: error: expected type 'void', found 'error{ShouldBeCompileError}'");
+ ".tmp_source.zig:6:17: error: expected type 'void', found 'error{ShouldBeCompileError}'",
+ );
- cases.add("var not allowed in structs",
+ cases.add(
+ "var not allowed in structs",
\\export fn entry() void {
\\ var s = (struct{v: var}){.v=i32(10)};
\\}
,
- ".tmp_source.zig:2:23: error: invalid token: 'var'");
+ ".tmp_source.zig:2:23: error: invalid token: 'var'",
+ );
- cases.add("@ptrCast discards const qualifier",
+ cases.add(
+ "@ptrCast discards const qualifier",
\\export fn entry() void {
\\ const x: i32 = 1234;
- \\ const y = @ptrCast(&i32, &x);
+ \\ const y = @ptrCast(*i32, &x);
\\}
,
- ".tmp_source.zig:3:15: error: cast discards const qualifier");
+ ".tmp_source.zig:3:15: error: cast discards const qualifier",
+ );
- cases.add("comptime slice of undefined pointer non-zero len",
+ cases.add(
+ "comptime slice of undefined pointer non-zero len",
\\export fn entry() void {
- \\ const slice = (&i32)(undefined)[0..1];
+ \\ const slice = ([*]i32)(undefined)[0..1];
\\}
,
- ".tmp_source.zig:2:36: error: non-zero length slice of undefined pointer");
+ ".tmp_source.zig:2:38: error: non-zero length slice of undefined pointer",
+ );
- cases.add("type checking function pointers",
- \\fn a(b: fn (&const u8) void) void {
+ cases.add(
+ "type checking function pointers",
+ \\fn a(b: fn (*const u8) void) void {
\\ b('a');
\\}
\\fn c(d: u8) void {
@@ -76,9 +545,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ a(c);
\\}
,
- ".tmp_source.zig:8:7: error: expected type 'fn(&const u8) void', found 'fn(u8) void'");
+ ".tmp_source.zig:8:7: error: expected type 'fn(*const u8) void', found 'fn(u8) void'",
+ );
- cases.add("no else prong on switch on global error set",
+ cases.add(
+ "no else prong on switch on global error set",
\\export fn entry() void {
\\ foo(error.A);
\\}
@@ -88,18 +559,22 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ }
\\}
,
- ".tmp_source.zig:5:5: error: else prong required when switching on type 'error'");
+ ".tmp_source.zig:5:5: error: else prong required when switching on type 'error'",
+ );
- cases.add("inferred error set with no returned error",
+ cases.add(
+ "inferred error set with no returned error",
\\export fn entry() void {
\\ foo() catch unreachable;
\\}
\\fn foo() !void {
\\}
,
- ".tmp_source.zig:4:11: error: function with inferred error set must return at least one possible error");
+ ".tmp_source.zig:4:11: error: function with inferred error set must return at least one possible error",
+ );
- cases.add("error not handled in switch",
+ cases.add(
+ "error not handled in switch",
\\export fn entry() void {
\\ foo(452) catch |err| switch (err) {
\\ error.Foo => {},
@@ -115,9 +590,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:2:26: error: error.Baz not handled in switch",
- ".tmp_source.zig:2:26: error: error.Bar not handled in switch");
+ ".tmp_source.zig:2:26: error: error.Bar not handled in switch",
+ );
- cases.add("duplicate error in switch",
+ cases.add(
+ "duplicate error in switch",
\\export fn entry() void {
\\ foo(452) catch |err| switch (err) {
\\ error.Foo => {},
@@ -135,9 +612,25 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:5:14: error: duplicate switch value: '@typeOf(foo).ReturnType.ErrorSet.Foo'",
- ".tmp_source.zig:3:14: note: other value is here");
+ ".tmp_source.zig:3:14: note: other value is here",
+ );
- cases.add("range operator in switch used on error set",
+ cases.add("invalid cast from integral type to enum",
+ \\const E = enum(usize) { One, Two };
+ \\
+ \\export fn entry() void {
+ \\ foo(1);
+ \\}
+ \\
+ \\fn foo(x: usize) void {
+ \\ switch (x) {
+ \\ E.One => {},
+ \\ }
+ \\}
+ , ".tmp_source.zig:9:10: error: expected type 'usize', found 'E'");
+
+ cases.add(
+ "range operator in switch used on error set",
\\export fn entry() void {
\\ try foo(452) catch |err| switch (err) {
\\ error.A ... error.B => {},
@@ -152,31 +645,39 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ }
\\}
,
- ".tmp_source.zig:3:17: error: operator not allowed for errors");
+ ".tmp_source.zig:3:17: error: operator not allowed for errors",
+ );
- cases.add("inferring error set of function pointer",
+ cases.add(
+ "inferring error set of function pointer",
\\comptime {
\\ const z: ?fn()!void = null;
\\}
,
- ".tmp_source.zig:2:15: error: inferring error set of return type valid only for function definitions");
+ ".tmp_source.zig:2:15: error: inferring error set of return type valid only for function definitions",
+ );
- cases.add("access non-existent member of error set",
+ cases.add(
+ "access non-existent member of error set",
\\const Foo = error{A};
\\comptime {
\\ const z = Foo.Bar;
\\}
,
- ".tmp_source.zig:3:18: error: no error named 'Bar' in 'Foo'");
+ ".tmp_source.zig:3:18: error: no error named 'Bar' in 'Foo'",
+ );
- cases.add("error union operator with non error set LHS",
+ cases.add(
+ "error union operator with non error set LHS",
\\comptime {
\\ const z = i32!i32;
\\}
,
- ".tmp_source.zig:2:15: error: expected error set type, found type 'i32'");
+ ".tmp_source.zig:2:15: error: expected error set type, found type 'i32'",
+ );
- cases.add("error equality but sets have no common members",
+ cases.add(
+ "error equality but sets have no common members",
\\const Set1 = error{A, C};
\\const Set2 = error{B, D};
\\export fn entry() void {
@@ -188,26 +689,32 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ }
\\}
,
- ".tmp_source.zig:7:11: error: error sets 'Set1' and 'Set2' have no common errors");
+ ".tmp_source.zig:7:11: error: error sets 'Set1' and 'Set2' have no common errors",
+ );
- cases.add("only equality binary operator allowed for error sets",
+ cases.add(
+ "only equality binary operator allowed for error sets",
\\comptime {
\\ const z = error.A > error.B;
\\}
,
- ".tmp_source.zig:2:23: error: operator not allowed for errors");
+ ".tmp_source.zig:2:23: error: operator not allowed for errors",
+ );
- cases.add("explicit error set cast known at comptime violates error sets",
+ cases.add(
+ "explicit error set cast known at comptime violates error sets",
\\const Set1 = error {A, B};
\\const Set2 = error {A, C};
\\comptime {
\\ var x = Set1.B;
- \\ var y = Set2(x);
+ \\ var y = @errSetCast(Set2, x);
\\}
,
- ".tmp_source.zig:5:17: error: error.B not a member of error set 'Set2'");
+ ".tmp_source.zig:5:13: error: error.B not a member of error set 'Set2'",
+ );
- cases.add("cast error union of global error set to error union of smaller error set",
+ cases.add(
+ "cast error union of global error set to error union of smaller error set",
\\const SmallErrorSet = error{A};
\\export fn entry() void {
\\ var x: SmallErrorSet!i32 = foo();
@@ -216,10 +723,13 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ return error.B;
\\}
,
- ".tmp_source.zig:3:35: error: expected 'SmallErrorSet!i32', found 'error!i32'",
- ".tmp_source.zig:3:35: note: unable to cast global error set into smaller set");
+ ".tmp_source.zig:3:35: error: expected type 'SmallErrorSet!i32', found 'error!i32'",
+ ".tmp_source.zig:3:35: note: error set 'error' cannot cast into error set 'SmallErrorSet'",
+ ".tmp_source.zig:3:35: note: cannot cast global error set into smaller set",
+ );
- cases.add("cast global error set to error set",
+ cases.add(
+ "cast global error set to error set",
\\const SmallErrorSet = error{A};
\\export fn entry() void {
\\ var x: SmallErrorSet = foo();
@@ -228,10 +738,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ return error.B;
\\}
,
- ".tmp_source.zig:3:31: error: expected 'SmallErrorSet', found 'error'",
- ".tmp_source.zig:3:31: note: unable to cast global error set into smaller set");
+ ".tmp_source.zig:3:31: error: expected type 'SmallErrorSet', found 'error'",
+ ".tmp_source.zig:3:31: note: cannot cast global error set into smaller set",
+ );
- cases.add("recursive inferred error set",
+ cases.add(
+ "recursive inferred error set",
\\export fn entry() void {
\\ foo() catch unreachable;
\\}
@@ -239,9 +751,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ try foo();
\\}
,
- ".tmp_source.zig:5:5: error: cannot resolve inferred error set '@typeOf(foo).ReturnType.ErrorSet': function 'foo' not fully analyzed yet");
+ ".tmp_source.zig:5:5: error: cannot resolve inferred error set '@typeOf(foo).ReturnType.ErrorSet': function 'foo' not fully analyzed yet",
+ );
- cases.add("implicit cast of error set not a subset",
+ cases.add(
+ "implicit cast of error set not a subset",
\\const Set1 = error{A, B};
\\const Set2 = error{A, C};
\\export fn entry() void {
@@ -251,36 +765,53 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ var x: Set2 = set1;
\\}
,
- ".tmp_source.zig:7:19: error: expected 'Set2', found 'Set1'",
- ".tmp_source.zig:1:23: note: 'error.B' not a member of destination error set");
+ ".tmp_source.zig:7:19: error: expected type 'Set2', found 'Set1'",
+ ".tmp_source.zig:1:23: note: 'error.B' not a member of destination error set",
+ );
- cases.add("int to err global invalid number",
- \\const Set1 = error{A, B};
+ cases.add(
+ "int to err global invalid number",
+ \\const Set1 = error{
+ \\ A,
+ \\ B,
+ \\};
\\comptime {
- \\ var x: usize = 3;
- \\ var y = error(x);
+ \\ var x: u16 = 3;
+ \\ var y = @intToError(x);
\\}
,
- ".tmp_source.zig:4:18: error: integer value 3 represents no error");
+ ".tmp_source.zig:7:13: error: integer value 3 represents no error",
+ );
- cases.add("int to err non global invalid number",
- \\const Set1 = error{A, B};
- \\const Set2 = error{A, C};
+ cases.add(
+ "int to err non global invalid number",
+ \\const Set1 = error{
+ \\ A,
+ \\ B,
+ \\};
+ \\const Set2 = error{
+ \\ A,
+ \\ C,
+ \\};
\\comptime {
- \\ var x = usize(Set1.B);
- \\ var y = Set2(x);
+ \\ var x = @errorToInt(Set1.B);
+ \\ var y = @errSetCast(Set2, @intToError(x));
\\}
,
- ".tmp_source.zig:5:17: error: integer value 2 represents no error in 'Set2'");
+ ".tmp_source.zig:11:13: error: error.B not a member of error set 'Set2'",
+ );
- cases.add("@memberCount of error",
+ cases.add(
+ "@memberCount of error",
\\comptime {
\\ _ = @memberCount(error);
\\}
,
- ".tmp_source.zig:2:9: error: global error set member count not available at comptime");
+ ".tmp_source.zig:2:9: error: global error set member count not available at comptime",
+ );
- cases.add("duplicate error value in error set",
+ cases.add(
+ "duplicate error value in error set",
\\const Foo = error {
\\ Bar,
\\ Bar,
@@ -290,22 +821,30 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:3:5: error: duplicate error: 'Bar'",
- ".tmp_source.zig:2:5: note: other error here");
+ ".tmp_source.zig:2:5: note: other error here",
+ );
- cases.add("cast negative integer literal to usize",
+ cases.add(
+ "cast negative integer literal to usize",
\\export fn entry() void {
\\ const x = usize(-10);
\\}
- , ".tmp_source.zig:2:21: error: cannot cast negative value -10 to unsigned integer type 'usize'");
+ ,
+ ".tmp_source.zig:2:21: error: cannot cast negative value -10 to unsigned integer type 'usize'",
+ );
- cases.add("use invalid number literal as array index",
+ cases.add(
+ "use invalid number literal as array index",
\\var v = 25;
\\export fn entry() void {
\\ var arr: [v]u8 = undefined;
\\}
- , ".tmp_source.zig:1:1: error: unable to infer variable type");
+ ,
+ ".tmp_source.zig:1:1: error: unable to infer variable type",
+ );
- cases.add("duplicate struct field",
+ cases.add(
+ "duplicate struct field",
\\const Foo = struct {
\\ Bar: i32,
\\ Bar: usize,
@@ -315,9 +854,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:3:5: error: duplicate struct field: 'Bar'",
- ".tmp_source.zig:2:5: note: other field here");
+ ".tmp_source.zig:2:5: note: other field here",
+ );
- cases.add("duplicate union field",
+ cases.add(
+ "duplicate union field",
\\const Foo = union {
\\ Bar: i32,
\\ Bar: usize,
@@ -327,9 +868,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:3:5: error: duplicate union field: 'Bar'",
- ".tmp_source.zig:2:5: note: other field here");
+ ".tmp_source.zig:2:5: note: other field here",
+ );
- cases.add("duplicate enum field",
+ cases.add(
+ "duplicate enum field",
\\const Foo = enum {
\\ Bar,
\\ Bar,
@@ -340,77 +883,108 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:3:5: error: duplicate enum field: 'Bar'",
- ".tmp_source.zig:2:5: note: other field here");
+ ".tmp_source.zig:2:5: note: other field here",
+ );
- cases.add("calling function with naked calling convention",
+ cases.add(
+ "calling function with naked calling convention",
\\export fn entry() void {
\\ foo();
\\}
\\nakedcc fn foo() void { }
,
".tmp_source.zig:2:5: error: unable to call function with naked calling convention",
- ".tmp_source.zig:4:9: note: declared here");
+ ".tmp_source.zig:4:9: note: declared here",
+ );
- cases.add("function with invalid return type",
+ cases.add(
+ "function with invalid return type",
\\export fn foo() boid {}
- , ".tmp_source.zig:1:17: error: use of undeclared identifier 'boid'");
+ ,
+ ".tmp_source.zig:1:17: error: use of undeclared identifier 'boid'",
+ );
- cases.add("function with non-extern non-packed enum parameter",
+ cases.add(
+ "function with non-extern non-packed enum parameter",
\\const Foo = enum { A, B, C };
\\export fn entry(foo: Foo) void { }
- , ".tmp_source.zig:2:22: error: parameter of type 'Foo' not allowed in function with calling convention 'ccc'");
+ ,
+ ".tmp_source.zig:2:22: error: parameter of type 'Foo' not allowed in function with calling convention 'ccc'",
+ );
- cases.add("function with non-extern non-packed struct parameter",
+ cases.add(
+ "function with non-extern non-packed struct parameter",
\\const Foo = struct {
\\ A: i32,
\\ B: f32,
\\ C: bool,
\\};
\\export fn entry(foo: Foo) void { }
- , ".tmp_source.zig:6:22: error: parameter of type 'Foo' not allowed in function with calling convention 'ccc'");
+ ,
+ ".tmp_source.zig:6:22: error: parameter of type 'Foo' not allowed in function with calling convention 'ccc'",
+ );
- cases.add("function with non-extern non-packed union parameter",
+ cases.add(
+ "function with non-extern non-packed union parameter",
\\const Foo = union {
\\ A: i32,
\\ B: f32,
\\ C: bool,
\\};
\\export fn entry(foo: Foo) void { }
- , ".tmp_source.zig:6:22: error: parameter of type 'Foo' not allowed in function with calling convention 'ccc'");
+ ,
+ ".tmp_source.zig:6:22: error: parameter of type 'Foo' not allowed in function with calling convention 'ccc'",
+ );
- cases.add("switch on enum with 1 field with no prongs",
+ cases.add(
+ "switch on enum with 1 field with no prongs",
\\const Foo = enum { M };
\\
\\export fn entry() void {
\\ var f = Foo.M;
\\ switch (f) {}
\\}
- , ".tmp_source.zig:5:5: error: enumeration value 'Foo.M' not handled in switch");
+ ,
+ ".tmp_source.zig:5:5: error: enumeration value 'Foo.M' not handled in switch",
+ );
- cases.add("shift by negative comptime integer",
+ cases.add(
+ "shift by negative comptime integer",
\\comptime {
\\ var a = 1 >> -1;
\\}
- , ".tmp_source.zig:2:18: error: shift by negative value -1");
+ ,
+ ".tmp_source.zig:2:18: error: shift by negative value -1",
+ );
- cases.add("@panic called at compile time",
+ cases.add(
+ "@panic called at compile time",
\\export fn entry() void {
\\ comptime {
- \\ @panic("aoeu");
+ \\ @panic("aoeu",);
\\ }
\\}
- , ".tmp_source.zig:3:9: error: encountered @panic at compile-time");
+ ,
+ ".tmp_source.zig:3:9: error: encountered @panic at compile-time",
+ );
- cases.add("wrong return type for main",
+ cases.add(
+ "wrong return type for main",
\\pub fn main() f32 { }
- , "error: expected return type of main to be 'u8', 'noreturn', 'void', or '!void'");
+ ,
+ "error: expected return type of main to be 'u8', 'noreturn', 'void', or '!void'",
+ );
- cases.add("double ?? on main return value",
+ cases.add(
+ "double ?? on main return value",
\\pub fn main() ??void {
\\}
- , "error: expected return type of main to be 'u8', 'noreturn', 'void', or '!void'");
+ ,
+ "error: expected return type of main to be 'u8', 'noreturn', 'void', or '!void'",
+ );
- cases.add("bad identifier in function with struct defined inside function which references local const",
+ cases.add(
+ "bad identifier in function with struct defined inside function which references local const",
\\export fn entry() void {
\\ const BlockKind = u32;
\\
@@ -420,9 +994,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\ bogus;
\\}
- , ".tmp_source.zig:8:5: error: use of undeclared identifier 'bogus'");
+ ,
+ ".tmp_source.zig:8:5: error: use of undeclared identifier 'bogus'",
+ );
- cases.add("labeled break not found",
+ cases.add(
+ "labeled break not found",
\\export fn entry() void {
\\ blah: while (true) {
\\ while (true) {
@@ -430,9 +1007,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ }
\\ }
\\}
- , ".tmp_source.zig:4:13: error: label not found: 'outer'");
+ ,
+ ".tmp_source.zig:4:13: error: label not found: 'outer'",
+ );
- cases.add("labeled continue not found",
+ cases.add(
+ "labeled continue not found",
\\export fn entry() void {
\\ var i: usize = 0;
\\ blah: while (i < 10) : (i += 1) {
@@ -441,400 +1021,554 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ }
\\ }
\\}
- , ".tmp_source.zig:5:13: error: labeled loop not found: 'outer'");
+ ,
+ ".tmp_source.zig:5:13: error: labeled loop not found: 'outer'",
+ );
- cases.add("attempt to use 0 bit type in extern fn",
- \\extern fn foo(ptr: extern fn(&void) void) void;
+ cases.add(
+ "attempt to use 0 bit type in extern fn",
+ \\extern fn foo(ptr: extern fn(*void) void) void;
\\
\\export fn entry() void {
\\ foo(bar);
\\}
\\
- \\extern fn bar(x: &void) void { }
- , ".tmp_source.zig:7:18: error: parameter of type '&void' has 0 bits; not allowed in function with calling convention 'ccc'");
+ \\extern fn bar(x: *void) void { }
+ ,
+ ".tmp_source.zig:7:18: error: parameter of type '*void' has 0 bits; not allowed in function with calling convention 'ccc'",
+ );
- cases.add("implicit semicolon - block statement",
+ cases.add(
+ "implicit semicolon - block statement",
\\export fn entry() void {
\\ {}
\\ var good = {};
\\ ({})
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - block expr",
+ cases.add(
+ "implicit semicolon - block expr",
\\export fn entry() void {
\\ _ = {};
\\ var good = {};
\\ _ = {}
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - comptime statement",
+ cases.add(
+ "implicit semicolon - comptime statement",
\\export fn entry() void {
\\ comptime {}
\\ var good = {};
\\ comptime ({})
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - comptime expression",
+ cases.add(
+ "implicit semicolon - comptime expression",
\\export fn entry() void {
\\ _ = comptime {};
\\ var good = {};
\\ _ = comptime {}
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - defer",
+ cases.add(
+ "implicit semicolon - defer",
\\export fn entry() void {
\\ defer {}
\\ var good = {};
\\ defer ({})
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - if statement",
+ cases.add(
+ "implicit semicolon - if statement",
\\export fn entry() void {
\\ if(true) {}
\\ var good = {};
\\ if(true) ({})
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - if expression",
+ cases.add(
+ "implicit semicolon - if expression",
\\export fn entry() void {
\\ _ = if(true) {};
\\ var good = {};
\\ _ = if(true) {}
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - if-else statement",
+ cases.add(
+ "implicit semicolon - if-else statement",
\\export fn entry() void {
\\ if(true) {} else {}
\\ var good = {};
\\ if(true) ({}) else ({})
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - if-else expression",
+ cases.add(
+ "implicit semicolon - if-else expression",
\\export fn entry() void {
\\ _ = if(true) {} else {};
\\ var good = {};
\\ _ = if(true) {} else {}
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - if-else-if statement",
+ cases.add(
+ "implicit semicolon - if-else-if statement",
\\export fn entry() void {
\\ if(true) {} else if(true) {}
\\ var good = {};
\\ if(true) ({}) else if(true) ({})
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - if-else-if expression",
+ cases.add(
+ "implicit semicolon - if-else-if expression",
\\export fn entry() void {
\\ _ = if(true) {} else if(true) {};
\\ var good = {};
\\ _ = if(true) {} else if(true) {}
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - if-else-if-else statement",
+ cases.add(
+ "implicit semicolon - if-else-if-else statement",
\\export fn entry() void {
\\ if(true) {} else if(true) {} else {}
\\ var good = {};
\\ if(true) ({}) else if(true) ({}) else ({})
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - if-else-if-else expression",
+ cases.add(
+ "implicit semicolon - if-else-if-else expression",
\\export fn entry() void {
\\ _ = if(true) {} else if(true) {} else {};
\\ var good = {};
\\ _ = if(true) {} else if(true) {} else {}
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - test statement",
+ cases.add(
+ "implicit semicolon - test statement",
\\export fn entry() void {
\\ if (foo()) |_| {}
\\ var good = {};
\\ if (foo()) |_| ({})
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - test expression",
+ cases.add(
+ "implicit semicolon - test expression",
\\export fn entry() void {
\\ _ = if (foo()) |_| {};
\\ var good = {};
\\ _ = if (foo()) |_| {}
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - while statement",
+ cases.add(
+ "implicit semicolon - while statement",
\\export fn entry() void {
\\ while(true) {}
\\ var good = {};
\\ while(true) ({})
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - while expression",
+ cases.add(
+ "implicit semicolon - while expression",
\\export fn entry() void {
\\ _ = while(true) {};
\\ var good = {};
\\ _ = while(true) {}
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - while-continue statement",
+ cases.add(
+ "implicit semicolon - while-continue statement",
\\export fn entry() void {
\\ while(true):({}) {}
\\ var good = {};
\\ while(true):({}) ({})
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - while-continue expression",
+ cases.add(
+ "implicit semicolon - while-continue expression",
\\export fn entry() void {
\\ _ = while(true):({}) {};
\\ var good = {};
\\ _ = while(true):({}) {}
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - for statement",
+ cases.add(
+ "implicit semicolon - for statement",
\\export fn entry() void {
\\ for(foo()) {}
\\ var good = {};
\\ for(foo()) ({})
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("implicit semicolon - for expression",
+ cases.add(
+ "implicit semicolon - for expression",
\\export fn entry() void {
\\ _ = for(foo()) {};
\\ var good = {};
\\ _ = for(foo()) {}
\\ var bad = {};
\\}
- , ".tmp_source.zig:5:5: error: expected token ';', found 'var'");
+ ,
+ ".tmp_source.zig:5:5: error: expected token ';', found 'var'",
+ );
- cases.add("multiple function definitions",
+ cases.add(
+ "multiple function definitions",
\\fn a() void {}
\\fn a() void {}
\\export fn entry() void { a(); }
- , ".tmp_source.zig:2:1: error: redefinition of 'a'");
+ ,
+ ".tmp_source.zig:2:1: error: redefinition of 'a'",
+ );
- cases.add("unreachable with return",
+ cases.add(
+ "unreachable with return",
\\fn a() noreturn {return;}
\\export fn entry() void { a(); }
- , ".tmp_source.zig:1:18: error: expected type 'noreturn', found 'void'");
+ ,
+ ".tmp_source.zig:1:18: error: expected type 'noreturn', found 'void'",
+ );
- cases.add("control reaches end of non-void function",
+ cases.add(
+ "control reaches end of non-void function",
\\fn a() i32 {}
\\export fn entry() void { _ = a(); }
- , ".tmp_source.zig:1:12: error: expected type 'i32', found 'void'");
+ ,
+ ".tmp_source.zig:1:12: error: expected type 'i32', found 'void'",
+ );
- cases.add("undefined function call",
+ cases.add(
+ "undefined function call",
\\export fn a() void {
\\ b();
\\}
- , ".tmp_source.zig:2:5: error: use of undeclared identifier 'b'");
+ ,
+ ".tmp_source.zig:2:5: error: use of undeclared identifier 'b'",
+ );
- cases.add("wrong number of arguments",
+ cases.add(
+ "wrong number of arguments",
\\export fn a() void {
\\ b(1);
\\}
\\fn b(a: i32, b: i32, c: i32) void { }
- , ".tmp_source.zig:2:6: error: expected 3 arguments, found 1");
+ ,
+ ".tmp_source.zig:2:6: error: expected 3 arguments, found 1",
+ );
- cases.add("invalid type",
+ cases.add(
+ "invalid type",
\\fn a() bogus {}
\\export fn entry() void { _ = a(); }
- , ".tmp_source.zig:1:8: error: use of undeclared identifier 'bogus'");
+ ,
+ ".tmp_source.zig:1:8: error: use of undeclared identifier 'bogus'",
+ );
- cases.add("pointer to noreturn",
- \\fn a() &noreturn {}
+ cases.add(
+ "pointer to noreturn",
+ \\fn a() *noreturn {}
\\export fn entry() void { _ = a(); }
- , ".tmp_source.zig:1:9: error: pointer to noreturn not allowed");
+ ,
+ ".tmp_source.zig:1:8: error: pointer to noreturn not allowed",
+ );
- cases.add("unreachable code",
+ cases.add(
+ "unreachable code",
\\export fn a() void {
\\ return;
\\ b();
\\}
\\
\\fn b() void {}
- , ".tmp_source.zig:3:5: error: unreachable code");
+ ,
+ ".tmp_source.zig:3:5: error: unreachable code",
+ );
- cases.add("bad import",
- \\const bogus = @import("bogus-does-not-exist.zig");
+ cases.add(
+ "bad import",
+ \\const bogus = @import("bogus-does-not-exist.zig",);
\\export fn entry() void { bogus.bogo(); }
- , ".tmp_source.zig:1:15: error: unable to find 'bogus-does-not-exist.zig'");
+ ,
+ ".tmp_source.zig:1:15: error: unable to find 'bogus-does-not-exist.zig'",
+ );
- cases.add("undeclared identifier",
+ cases.add(
+ "undeclared identifier",
\\export fn a() void {
\\ return
\\ b +
\\ c;
\\}
,
- ".tmp_source.zig:3:5: error: use of undeclared identifier 'b'",
- ".tmp_source.zig:4:5: error: use of undeclared identifier 'c'");
+ ".tmp_source.zig:3:5: error: use of undeclared identifier 'b'",
+ ".tmp_source.zig:4:5: error: use of undeclared identifier 'c'",
+ );
- cases.add("parameter redeclaration",
+ cases.add(
+ "parameter redeclaration",
\\fn f(a : i32, a : i32) void {
\\}
\\export fn entry() void { f(1, 2); }
- , ".tmp_source.zig:1:15: error: redeclaration of variable 'a'");
+ ,
+ ".tmp_source.zig:1:15: error: redeclaration of variable 'a'",
+ );
- cases.add("local variable redeclaration",
+ cases.add(
+ "local variable redeclaration",
\\export fn f() void {
\\ const a : i32 = 0;
\\ const a = 0;
\\}
- , ".tmp_source.zig:3:5: error: redeclaration of variable 'a'");
+ ,
+ ".tmp_source.zig:3:5: error: redeclaration of variable 'a'",
+ );
- cases.add("local variable redeclares parameter",
+ cases.add(
+ "local variable redeclares parameter",
\\fn f(a : i32) void {
\\ const a = 0;
\\}
\\export fn entry() void { f(1); }
- , ".tmp_source.zig:2:5: error: redeclaration of variable 'a'");
+ ,
+ ".tmp_source.zig:2:5: error: redeclaration of variable 'a'",
+ );
- cases.add("variable has wrong type",
+ cases.add(
+ "variable has wrong type",
\\export fn f() i32 {
\\ const a = c"a";
\\ return a;
\\}
- , ".tmp_source.zig:3:12: error: expected type 'i32', found '&const u8'");
+ ,
+ ".tmp_source.zig:3:12: error: expected type 'i32', found '[*]const u8'",
+ );
- cases.add("if condition is bool, not int",
+ cases.add(
+ "if condition is bool, not int",
\\export fn f() void {
\\ if (0) {}
\\}
- , ".tmp_source.zig:2:9: error: integer value 0 cannot be implicitly casted to type 'bool'");
+ ,
+ ".tmp_source.zig:2:9: error: integer value 0 cannot be implicitly casted to type 'bool'",
+ );
- cases.add("assign unreachable",
+ cases.add(
+ "assign unreachable",
\\export fn f() void {
\\ const a = return;
\\}
- , ".tmp_source.zig:2:5: error: unreachable code");
+ ,
+ ".tmp_source.zig:2:5: error: unreachable code",
+ );
- cases.add("unreachable variable",
+ cases.add(
+ "unreachable variable",
\\export fn f() void {
\\ const a: noreturn = {};
\\}
- , ".tmp_source.zig:2:14: error: variable of type 'noreturn' not allowed");
+ ,
+ ".tmp_source.zig:2:14: error: variable of type 'noreturn' not allowed",
+ );
- cases.add("unreachable parameter",
+ cases.add(
+ "unreachable parameter",
\\fn f(a: noreturn) void {}
\\export fn entry() void { f(); }
- , ".tmp_source.zig:1:9: error: parameter of type 'noreturn' not allowed");
+ ,
+ ".tmp_source.zig:1:9: error: parameter of type 'noreturn' not allowed",
+ );
- cases.add("bad assignment target",
+ cases.add(
+ "bad assignment target",
\\export fn f() void {
\\ 3 = 3;
\\}
- , ".tmp_source.zig:2:7: error: cannot assign to constant");
+ ,
+ ".tmp_source.zig:2:7: error: cannot assign to constant",
+ );
- cases.add("assign to constant variable",
+ cases.add(
+ "assign to constant variable",
\\export fn f() void {
\\ const a = 3;
\\ a = 4;
\\}
- , ".tmp_source.zig:3:7: error: cannot assign to constant");
+ ,
+ ".tmp_source.zig:3:7: error: cannot assign to constant",
+ );
- cases.add("use of undeclared identifier",
+ cases.add(
+ "use of undeclared identifier",
\\export fn f() void {
\\ b = 3;
\\}
- , ".tmp_source.zig:2:5: error: use of undeclared identifier 'b'");
+ ,
+ ".tmp_source.zig:2:5: error: use of undeclared identifier 'b'",
+ );
- cases.add("const is a statement, not an expression",
+ cases.add(
+ "const is a statement, not an expression",
\\export fn f() void {
\\ (const a = 0);
\\}
- , ".tmp_source.zig:2:6: error: invalid token: 'const'");
+ ,
+ ".tmp_source.zig:2:6: error: invalid token: 'const'",
+ );
- cases.add("array access of undeclared identifier",
+ cases.add(
+ "array access of undeclared identifier",
\\export fn f() void {
\\ i[i] = i[i];
\\}
- , ".tmp_source.zig:2:5: error: use of undeclared identifier 'i'",
- ".tmp_source.zig:2:12: error: use of undeclared identifier 'i'");
+ ,
+ ".tmp_source.zig:2:5: error: use of undeclared identifier 'i'",
+ ".tmp_source.zig:2:12: error: use of undeclared identifier 'i'",
+ );
- cases.add("array access of non array",
+ cases.add(
+ "array access of non array",
\\export fn f() void {
\\ var bad : bool = undefined;
\\ bad[bad] = bad[bad];
\\}
- , ".tmp_source.zig:3:8: error: array access of non-array type 'bool'",
- ".tmp_source.zig:3:19: error: array access of non-array type 'bool'");
+ ,
+ ".tmp_source.zig:3:8: error: array access of non-array type 'bool'",
+ ".tmp_source.zig:3:19: error: array access of non-array type 'bool'",
+ );
- cases.add("array access with non integer index",
+ cases.add(
+ "array access with non integer index",
\\export fn f() void {
\\ var array = "aoeu";
\\ var bad = false;
\\ array[bad] = array[bad];
\\}
- , ".tmp_source.zig:4:11: error: expected type 'usize', found 'bool'",
- ".tmp_source.zig:4:24: error: expected type 'usize', found 'bool'");
+ ,
+ ".tmp_source.zig:4:11: error: expected type 'usize', found 'bool'",
+ ".tmp_source.zig:4:24: error: expected type 'usize', found 'bool'",
+ );
- cases.add("write to const global variable",
+ cases.add(
+ "write to const global variable",
\\const x : i32 = 99;
\\fn f() void {
\\ x = 1;
\\}
\\export fn entry() void { f(); }
- , ".tmp_source.zig:3:7: error: cannot assign to constant");
+ ,
+ ".tmp_source.zig:3:7: error: cannot assign to constant",
+ );
-
- cases.add("missing else clause",
+ cases.add(
+ "missing else clause",
\\fn f(b: bool) void {
\\ const x : i32 = if (b) h: { break :h 1; };
\\ const y = if (b) h: { break :h i32(1); };
\\}
\\export fn entry() void { f(true); }
- , ".tmp_source.zig:2:42: error: integer value 1 cannot be implicitly casted to type 'void'",
- ".tmp_source.zig:3:15: error: incompatible types: 'i32' and 'void'");
+ ,
+ ".tmp_source.zig:2:42: error: integer value 1 cannot be implicitly casted to type 'void'",
+ ".tmp_source.zig:3:15: error: incompatible types: 'i32' and 'void'",
+ );
- cases.add("direct struct loop",
+ cases.add(
+ "direct struct loop",
\\const A = struct { a : A, };
\\export fn entry() usize { return @sizeOf(A); }
- , ".tmp_source.zig:1:11: error: struct 'A' contains itself");
+ ,
+ ".tmp_source.zig:1:11: error: struct 'A' contains itself",
+ );
- cases.add("indirect struct loop",
+ cases.add(
+ "indirect struct loop",
\\const A = struct { b : B, };
\\const B = struct { c : C, };
\\const C = struct { a : A, };
\\export fn entry() usize { return @sizeOf(A); }
- , ".tmp_source.zig:1:11: error: struct 'A' contains itself");
+ ,
+ ".tmp_source.zig:1:11: error: struct 'A' contains itself",
+ );
- cases.add("invalid struct field",
+ cases.add(
+ "invalid struct field",
\\const A = struct { x : i32, };
\\export fn f() void {
\\ var a : A = undefined;
@@ -842,27 +1576,37 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ const y = a.bar;
\\}
,
- ".tmp_source.zig:4:6: error: no member named 'foo' in struct 'A'",
- ".tmp_source.zig:5:16: error: no member named 'bar' in struct 'A'");
+ ".tmp_source.zig:4:6: error: no member named 'foo' in struct 'A'",
+ ".tmp_source.zig:5:16: error: no member named 'bar' in struct 'A'",
+ );
- cases.add("redefinition of struct",
+ cases.add(
+ "redefinition of struct",
\\const A = struct { x : i32, };
\\const A = struct { y : i32, };
- , ".tmp_source.zig:2:1: error: redefinition of 'A'");
+ ,
+ ".tmp_source.zig:2:1: error: redefinition of 'A'",
+ );
- cases.add("redefinition of enums",
+ cases.add(
+ "redefinition of enums",
\\const A = enum {};
\\const A = enum {};
- , ".tmp_source.zig:2:1: error: redefinition of 'A'");
+ ,
+ ".tmp_source.zig:2:1: error: redefinition of 'A'",
+ );
- cases.add("redefinition of global variables",
+ cases.add(
+ "redefinition of global variables",
\\var a : i32 = 1;
\\var a : i32 = 2;
,
- ".tmp_source.zig:2:1: error: redefinition of 'a'",
- ".tmp_source.zig:1:1: note: previous definition is here");
+ ".tmp_source.zig:2:1: error: redefinition of 'a'",
+ ".tmp_source.zig:1:1: note: previous definition is here",
+ );
- cases.add("duplicate field in struct value expression",
+ cases.add(
+ "duplicate field in struct value expression",
\\const A = struct {
\\ x : i32,
\\ y : i32,
@@ -876,9 +1620,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ .z = 4,
\\ };
\\}
- , ".tmp_source.zig:11:9: error: duplicate field");
+ ,
+ ".tmp_source.zig:11:9: error: duplicate field",
+ );
- cases.add("missing field in struct value expression",
+ cases.add(
+ "missing field in struct value expression",
\\const A = struct {
\\ x : i32,
\\ y : i32,
@@ -892,9 +1639,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ .y = 2,
\\ };
\\}
- , ".tmp_source.zig:9:17: error: missing field: 'x'");
+ ,
+ ".tmp_source.zig:9:17: error: missing field: 'x'",
+ );
- cases.add("invalid field in struct value expression",
+ cases.add(
+ "invalid field in struct value expression",
\\const A = struct {
\\ x : i32,
\\ y : i32,
@@ -907,66 +1657,95 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ .foo = 42,
\\ };
\\}
- , ".tmp_source.zig:10:9: error: no member named 'foo' in struct 'A'");
+ ,
+ ".tmp_source.zig:10:9: error: no member named 'foo' in struct 'A'",
+ );
- cases.add("invalid break expression",
+ cases.add(
+ "invalid break expression",
\\export fn f() void {
\\ break;
\\}
- , ".tmp_source.zig:2:5: error: break expression outside loop");
+ ,
+ ".tmp_source.zig:2:5: error: break expression outside loop",
+ );
- cases.add("invalid continue expression",
+ cases.add(
+ "invalid continue expression",
\\export fn f() void {
\\ continue;
\\}
- , ".tmp_source.zig:2:5: error: continue expression outside loop");
+ ,
+ ".tmp_source.zig:2:5: error: continue expression outside loop",
+ );
- cases.add("invalid maybe type",
+ cases.add(
+ "invalid maybe type",
\\export fn f() void {
\\ if (true) |x| { }
\\}
- , ".tmp_source.zig:2:9: error: expected nullable type, found 'bool'");
+ ,
+ ".tmp_source.zig:2:9: error: expected optional type, found 'bool'",
+ );
- cases.add("cast unreachable",
+ cases.add(
+ "cast unreachable",
\\fn f() i32 {
\\ return i32(return 1);
\\}
\\export fn entry() void { _ = f(); }
- , ".tmp_source.zig:2:15: error: unreachable code");
+ ,
+ ".tmp_source.zig:2:15: error: unreachable code",
+ );
- cases.add("invalid builtin fn",
+ cases.add(
+ "invalid builtin fn",
\\fn f() @bogus(foo) {
\\}
\\export fn entry() void { _ = f(); }
- , ".tmp_source.zig:1:8: error: invalid builtin function: 'bogus'");
+ ,
+ ".tmp_source.zig:1:8: error: invalid builtin function: 'bogus'",
+ );
- cases.add("top level decl dependency loop",
+ cases.add(
+ "top level decl dependency loop",
\\const a : @typeOf(b) = 0;
\\const b : @typeOf(a) = 0;
\\export fn entry() void {
\\ const c = a + b;
\\}
- , ".tmp_source.zig:1:1: error: 'a' depends on itself");
+ ,
+ ".tmp_source.zig:1:1: error: 'a' depends on itself",
+ );
- cases.add("noalias on non pointer param",
+ cases.add(
+ "noalias on non pointer param",
\\fn f(noalias x: i32) void {}
\\export fn entry() void { f(1234); }
- , ".tmp_source.zig:1:6: error: noalias on non-pointer parameter");
+ ,
+ ".tmp_source.zig:1:6: error: noalias on non-pointer parameter",
+ );
- cases.add("struct init syntax for array",
+ cases.add(
+ "struct init syntax for array",
\\const foo = []u16{.x = 1024,};
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
- , ".tmp_source.zig:1:18: error: type '[]u16' does not support struct initialization syntax");
+ ,
+ ".tmp_source.zig:1:18: error: type '[]u16' does not support struct initialization syntax",
+ );
- cases.add("type variables must be constant",
+ cases.add(
+ "type variables must be constant",
\\var foo = u8;
\\export fn entry() foo {
\\ return 1;
\\}
- , ".tmp_source.zig:1:1: error: variable of type 'type' must be constant");
+ ,
+ ".tmp_source.zig:1:1: error: variable of type 'type' must be constant",
+ );
-
- cases.add("variables shadowing types",
+ cases.add(
+ "variables shadowing types",
\\const Foo = struct {};
\\const Bar = struct {};
\\
@@ -978,12 +1757,14 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ f(1234);
\\}
,
- ".tmp_source.zig:4:6: error: redefinition of 'Foo'",
- ".tmp_source.zig:1:1: note: previous definition is here",
- ".tmp_source.zig:5:5: error: redefinition of 'Bar'",
- ".tmp_source.zig:2:1: note: previous definition is here");
+ ".tmp_source.zig:4:6: error: redefinition of 'Foo'",
+ ".tmp_source.zig:1:1: note: previous definition is here",
+ ".tmp_source.zig:5:5: error: redefinition of 'Bar'",
+ ".tmp_source.zig:2:1: note: previous definition is here",
+ );
- cases.add("switch expression - missing enumeration prong",
+ cases.add(
+ "switch expression - missing enumeration prong",
\\const Number = enum {
\\ One,
\\ Two,
@@ -999,9 +1780,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(f)); }
- , ".tmp_source.zig:8:5: error: enumeration value 'Number.Four' not handled in switch");
+ ,
+ ".tmp_source.zig:8:5: error: enumeration value 'Number.Four' not handled in switch",
+ );
- cases.add("switch expression - duplicate enumeration prong",
+ cases.add(
+ "switch expression - duplicate enumeration prong",
\\const Number = enum {
\\ One,
\\ Two,
@@ -1019,10 +1803,13 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(f)); }
- , ".tmp_source.zig:13:15: error: duplicate switch value",
- ".tmp_source.zig:10:15: note: other value is here");
+ ,
+ ".tmp_source.zig:13:15: error: duplicate switch value",
+ ".tmp_source.zig:10:15: note: other value is here",
+ );
- cases.add("switch expression - duplicate enumeration prong when else present",
+ cases.add(
+ "switch expression - duplicate enumeration prong when else present",
\\const Number = enum {
\\ One,
\\ Two,
@@ -1041,10 +1828,13 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(f)); }
- , ".tmp_source.zig:13:15: error: duplicate switch value",
- ".tmp_source.zig:10:15: note: other value is here");
+ ,
+ ".tmp_source.zig:13:15: error: duplicate switch value",
+ ".tmp_source.zig:10:15: note: other value is here",
+ );
- cases.add("switch expression - multiple else prongs",
+ cases.add(
+ "switch expression - multiple else prongs",
\\fn f(x: u32) void {
\\ const value: bool = switch (x) {
\\ 1234 => false,
@@ -1055,9 +1845,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\export fn entry() void {
\\ f(1234);
\\}
- , ".tmp_source.zig:5:9: error: multiple else prongs in switch expression");
+ ,
+ ".tmp_source.zig:5:9: error: multiple else prongs in switch expression",
+ );
- cases.add("switch expression - non exhaustive integer prongs",
+ cases.add(
+ "switch expression - non exhaustive integer prongs",
\\fn foo(x: u8) void {
\\ switch (x) {
\\ 0 => {},
@@ -1065,9 +1858,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
,
- ".tmp_source.zig:2:5: error: switch must handle all possibilities");
+ ".tmp_source.zig:2:5: error: switch must handle all possibilities",
+ );
- cases.add("switch expression - duplicate or overlapping integer value",
+ cases.add(
+ "switch expression - duplicate or overlapping integer value",
\\fn foo(x: u8) u8 {
\\ return switch (x) {
\\ 0 ... 100 => u8(0),
@@ -1079,10 +1874,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
,
".tmp_source.zig:6:9: error: duplicate switch value",
- ".tmp_source.zig:5:14: note: previous value is here");
+ ".tmp_source.zig:5:14: note: previous value is here",
+ );
- cases.add("switch expression - switch on pointer type with no else",
- \\fn foo(x: &u8) void {
+ cases.add(
+ "switch expression - switch on pointer type with no else",
+ \\fn foo(x: *u8) void {
\\ switch (x) {
\\ &y => {},
\\ }
@@ -1090,62 +1887,97 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\const y: u8 = 100;
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
,
- ".tmp_source.zig:2:5: error: else prong required when switching on type '&u8'");
+ ".tmp_source.zig:2:5: error: else prong required when switching on type '*u8'",
+ );
- cases.add("global variable initializer must be constant expression",
+ cases.add(
+ "global variable initializer must be constant expression",
\\extern fn foo() i32;
\\const x = foo();
\\export fn entry() i32 { return x; }
- , ".tmp_source.zig:2:11: error: unable to evaluate constant expression");
+ ,
+ ".tmp_source.zig:2:11: error: unable to evaluate constant expression",
+ );
- cases.add("array concatenation with wrong type",
+ cases.add(
+ "array concatenation with wrong type",
\\const src = "aoeu";
\\const derp = usize(1234);
\\const a = derp ++ "foo";
\\
\\export fn entry() usize { return @sizeOf(@typeOf(a)); }
- , ".tmp_source.zig:3:11: error: expected array or C string literal, found 'usize'");
+ ,
+ ".tmp_source.zig:3:11: error: expected array or C string literal, found 'usize'",
+ );
- cases.add("non compile time array concatenation",
+ cases.add(
+ "non compile time array concatenation",
\\fn f() []u8 {
\\ return s ++ "foo";
\\}
\\var s: [10]u8 = undefined;
\\export fn entry() usize { return @sizeOf(@typeOf(f)); }
- , ".tmp_source.zig:2:12: error: unable to evaluate constant expression");
+ ,
+ ".tmp_source.zig:2:12: error: unable to evaluate constant expression",
+ );
- cases.add("@cImport with bogus include",
+ cases.add(
+ "@cImport with bogus include",
\\const c = @cImport(@cInclude("bogus.h"));
\\export fn entry() usize { return @sizeOf(@typeOf(c.bogo)); }
- , ".tmp_source.zig:1:11: error: C import failed",
- ".h:1:10: note: 'bogus.h' file not found");
+ ,
+ ".tmp_source.zig:1:11: error: C import failed",
+ ".h:1:10: note: 'bogus.h' file not found",
+ );
- cases.add("address of number literal",
+ cases.add(
+ "address of number literal",
\\const x = 3;
\\const y = &x;
- \\fn foo() &const i32 { return y; }
+ \\fn foo() *const i32 { return y; }
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
- , ".tmp_source.zig:3:30: error: expected type '&const i32', found '&const (integer literal)'");
+ ,
+ ".tmp_source.zig:3:30: error: expected type '*const i32', found '*const comptime_int'",
+ );
- cases.add("integer overflow error",
+ cases.add(
+ "integer overflow error",
\\const x : u8 = 300;
\\export fn entry() usize { return @sizeOf(@typeOf(x)); }
- , ".tmp_source.zig:1:16: error: integer value 300 cannot be implicitly casted to type 'u8'");
+ ,
+ ".tmp_source.zig:1:16: error: integer value 300 cannot be implicitly casted to type 'u8'",
+ );
- cases.add("incompatible number literals",
+ cases.add(
+ "invalid shift amount error",
+ \\const x : u8 = 2;
+ \\fn f() u16 {
+ \\ return x << 8;
+ \\}
+ \\export fn entry() u16 { return f(); }
+ ,
+ ".tmp_source.zig:3:14: error: RHS of shift is too large for LHS type",
+ ".tmp_source.zig:3:17: note: value 8 cannot fit into type u3",
+ );
+
+ cases.add(
+ "incompatible number literals",
\\const x = 2 == 2.0;
\\export fn entry() usize { return @sizeOf(@typeOf(x)); }
- , ".tmp_source.zig:1:11: error: integer value 2 cannot be implicitly casted to type '(float literal)'");
+ ,
+ ".tmp_source.zig:1:11: error: integer value 2 cannot be implicitly casted to type 'comptime_float'",
+ );
- cases.add("missing function call param",
+ cases.add(
+ "missing function call param",
\\const Foo = struct {
\\ a: i32,
\\ b: i32,
\\
- \\ fn member_a(foo: &const Foo) i32 {
+ \\ fn member_a(foo: *const Foo) i32 {
\\ return foo.a;
\\ }
- \\ fn member_b(foo: &const Foo) i32 {
+ \\ fn member_b(foo: *const Foo) i32 {
\\ return foo.b;
\\ }
\\};
@@ -1156,63 +1988,78 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ Foo.member_b,
\\};
\\
- \\fn f(foo: &const Foo, index: usize) void {
+ \\fn f(foo: *const Foo, index: usize) void {
\\ const result = members[index]();
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(f)); }
- , ".tmp_source.zig:20:34: error: expected 1 arguments, found 0");
+ ,
+ ".tmp_source.zig:20:34: error: expected 1 arguments, found 0",
+ );
- cases.add("missing function name and param name",
+ cases.add(
+ "missing function name and param name",
\\fn () void {}
\\fn f(i32) void {}
\\export fn entry() usize { return @sizeOf(@typeOf(f)); }
,
- ".tmp_source.zig:1:1: error: missing function name",
- ".tmp_source.zig:2:6: error: missing parameter name");
+ ".tmp_source.zig:1:1: error: missing function name",
+ ".tmp_source.zig:2:6: error: missing parameter name",
+ );
- cases.add("wrong function type",
+ cases.add(
+ "wrong function type",
\\const fns = []fn() void { a, b, c };
\\fn a() i32 {return 0;}
\\fn b() i32 {return 1;}
\\fn c() i32 {return 2;}
\\export fn entry() usize { return @sizeOf(@typeOf(fns)); }
- , ".tmp_source.zig:1:27: error: expected type 'fn() void', found 'fn() i32'");
+ ,
+ ".tmp_source.zig:1:27: error: expected type 'fn() void', found 'fn() i32'",
+ );
- cases.add("extern function pointer mismatch",
+ cases.add(
+ "extern function pointer mismatch",
\\const fns = [](fn(i32)i32) { a, b, c };
\\pub fn a(x: i32) i32 {return x + 0;}
\\pub fn b(x: i32) i32 {return x + 1;}
\\export fn c(x: i32) i32 {return x + 2;}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(fns)); }
- , ".tmp_source.zig:1:36: error: expected type 'fn(i32) i32', found 'extern fn(i32) i32'");
+ ,
+ ".tmp_source.zig:1:36: error: expected type 'fn(i32) i32', found 'extern fn(i32) i32'",
+ );
-
- cases.add("implicit cast from f64 to f32",
+ cases.add(
+ "implicit cast from f64 to f32",
\\const x : f64 = 1.0;
\\const y : f32 = x;
\\
\\export fn entry() usize { return @sizeOf(@typeOf(y)); }
- , ".tmp_source.zig:2:17: error: expected type 'f32', found 'f64'");
+ ,
+ ".tmp_source.zig:2:17: error: expected type 'f32', found 'f64'",
+ );
-
- cases.add("colliding invalid top level functions",
+ cases.add(
+ "colliding invalid top level functions",
\\fn func() bogus {}
\\fn func() bogus {}
\\export fn entry() usize { return @sizeOf(@typeOf(func)); }
,
- ".tmp_source.zig:2:1: error: redefinition of 'func'",
- ".tmp_source.zig:1:11: error: use of undeclared identifier 'bogus'");
+ ".tmp_source.zig:2:1: error: redefinition of 'func'",
+ ".tmp_source.zig:1:11: error: use of undeclared identifier 'bogus'",
+ );
-
- cases.add("bogus compile var",
+ cases.add(
+ "bogus compile var",
\\const x = @import("builtin").bogus;
\\export fn entry() usize { return @sizeOf(@typeOf(x)); }
- , ".tmp_source.zig:1:29: error: no member named 'bogus' in '");
+ ,
+ ".tmp_source.zig:1:29: error: no member named 'bogus' in '",
+ );
-
- cases.add("non constant expression in array size outside function",
+ cases.add(
+ "non constant expression in array size outside function",
\\const Foo = struct {
\\ y: [get()]u8,
\\};
@@ -1221,22 +2068,25 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@typeOf(Foo)); }
,
- ".tmp_source.zig:5:25: error: unable to evaluate constant expression",
- ".tmp_source.zig:2:12: note: called from here",
- ".tmp_source.zig:2:8: note: called from here");
+ ".tmp_source.zig:5:25: error: unable to evaluate constant expression",
+ ".tmp_source.zig:2:12: note: called from here",
+ ".tmp_source.zig:2:8: note: called from here",
+ );
-
- cases.add("addition with non numbers",
+ cases.add(
+ "addition with non numbers",
\\const Foo = struct {
\\ field: i32,
\\};
\\const x = Foo {.field = 1} + Foo {.field = 2};
\\
\\export fn entry() usize { return @sizeOf(@typeOf(x)); }
- , ".tmp_source.zig:4:28: error: invalid operands to binary expression: 'Foo' and 'Foo'");
+ ,
+ ".tmp_source.zig:4:28: error: invalid operands to binary expression: 'Foo' and 'Foo'",
+ );
-
- cases.add("division by zero",
+ cases.add(
+ "division by zero",
\\const lit_int_x = 1 / 0;
\\const lit_float_x = 1.0 / 0.0;
\\const int_x = u32(1) / u32(0);
@@ -1247,49 +2097,65 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\export fn entry3() usize { return @sizeOf(@typeOf(int_x)); }
\\export fn entry4() usize { return @sizeOf(@typeOf(float_x)); }
,
- ".tmp_source.zig:1:21: error: division by zero",
- ".tmp_source.zig:2:25: error: division by zero",
- ".tmp_source.zig:3:22: error: division by zero",
- ".tmp_source.zig:4:26: error: division by zero");
+ ".tmp_source.zig:1:21: error: division by zero",
+ ".tmp_source.zig:2:25: error: division by zero",
+ ".tmp_source.zig:3:22: error: division by zero",
+ ".tmp_source.zig:4:26: error: division by zero",
+ );
-
- cases.add("normal string with newline",
+ cases.add(
+ "normal string with newline",
\\const foo = "a
\\b";
\\
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
- , ".tmp_source.zig:1:13: error: newline not allowed in string literal");
+ ,
+ ".tmp_source.zig:1:13: error: newline not allowed in string literal",
+ );
- cases.add("invalid comparison for function pointers",
+ cases.add(
+ "invalid comparison for function pointers",
\\fn foo() void {}
\\const invalid = foo > foo;
\\
\\export fn entry() usize { return @sizeOf(@typeOf(invalid)); }
- , ".tmp_source.zig:2:21: error: operator not allowed for type 'fn() void'");
+ ,
+ ".tmp_source.zig:2:21: error: operator not allowed for type 'fn() void'",
+ );
- cases.add("generic function instance with non-constant expression",
+ cases.add(
+ "generic function instance with non-constant expression",
\\fn foo(comptime x: i32, y: i32) i32 { return x + y; }
\\fn test1(a: i32, b: i32) i32 {
\\ return foo(a, b);
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(test1)); }
- , ".tmp_source.zig:3:16: error: unable to evaluate constant expression");
+ ,
+ ".tmp_source.zig:3:16: error: unable to evaluate constant expression",
+ );
- cases.add("assign null to non-nullable pointer",
- \\const a: &u8 = null;
+ cases.add(
+ "assign null to non-optional pointer",
+ \\const a: *u8 = null;
\\
\\export fn entry() usize { return @sizeOf(@typeOf(a)); }
- , ".tmp_source.zig:1:16: error: expected type '&u8', found '(null)'");
+ ,
+ ".tmp_source.zig:1:16: error: expected type '*u8', found '(null)'",
+ );
- cases.add("indexing an array of size zero",
+ cases.add(
+ "indexing an array of size zero",
\\const array = []u8{};
\\export fn foo() void {
\\ const pointer = &array[0];
\\}
- , ".tmp_source.zig:3:27: error: index 0 outside array of size 0");
+ ,
+ ".tmp_source.zig:3:27: error: index 0 outside array of size 0",
+ );
- cases.add("compile time division by zero",
+ cases.add(
+ "compile time division by zero",
\\const y = foo(0);
\\fn foo(x: u32) u32 {
\\ return 1 / x;
@@ -1297,17 +2163,431 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@typeOf(y)); }
,
- ".tmp_source.zig:3:14: error: division by zero",
- ".tmp_source.zig:1:14: note: called from here");
+ ".tmp_source.zig:3:14: error: division by zero",
+ ".tmp_source.zig:1:14: note: called from here",
+ );
- cases.add("branch on undefined value",
+ cases.add(
+ "branch on undefined value",
\\const x = if (undefined) true else false;
\\
\\export fn entry() usize { return @sizeOf(@typeOf(x)); }
- , ".tmp_source.zig:1:15: error: use of undefined value");
+ ,
+ ".tmp_source.zig:1:15: error: use of undefined value",
+ );
+ cases.add(
+ "div on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a / a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
- cases.add("endless loop in function evaluation",
+ cases.add(
+ "div assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a /= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "mod on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a % a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "mod assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a %= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "add on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a + a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "add assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a += a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "add wrap on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a +% a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "add wrap assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a +%= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "sub on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a - a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "sub assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a -= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "sub wrap on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a -% a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "sub wrap assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a -%= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "mult on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a * a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "mult assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a *= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "mult wrap on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a *% a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "mult wrap assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a *%= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "shift left on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a << 2;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "shift left assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a <<= 2;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "shift right on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a >> 2;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "shift left assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a >>= 2;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin and on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a & a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin and assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a &= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin or on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a | a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin or assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a |= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin xor on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a ^ a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin xor assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a ^= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "equal on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a == a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "not equal on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a != a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "greater than on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a > a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "greater than equal on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a >= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "less than on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a < a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "less than equal on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a <= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "and on undefined value",
+ \\comptime {
+ \\ var a: bool = undefined;
+ \\ _ = a and a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "or on undefined value",
+ \\comptime {
+ \\ var a: bool = undefined;
+ \\ _ = a or a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "negate on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = -a;
+ \\}
+ ,
+ ".tmp_source.zig:3:10: error: use of undefined value",
+ );
+
+ cases.add(
+ "negate wrap on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = -%a;
+ \\}
+ ,
+ ".tmp_source.zig:3:11: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin not on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = ~a;
+ \\}
+ ,
+ ".tmp_source.zig:3:10: error: use of undefined value",
+ );
+
+ cases.add(
+ "bool not on undefined value",
+ \\comptime {
+ \\ var a: bool = undefined;
+ \\ _ = !a;
+ \\}
+ ,
+ ".tmp_source.zig:3:10: error: use of undefined value",
+ );
+
+ cases.add(
+ "orelse on undefined value",
+ \\comptime {
+ \\ var a: ?bool = undefined;
+ \\ _ = a orelse false;
+ \\}
+ ,
+ ".tmp_source.zig:3:11: error: use of undefined value",
+ );
+
+ cases.add(
+ "catch on undefined value",
+ \\comptime {
+ \\ var a: error!bool = undefined;
+ \\ _ = a catch |err| false;
+ \\}
+ ,
+ ".tmp_source.zig:3:11: error: use of undefined value",
+ );
+
+ cases.add(
+ "deref on undefined value",
+ \\comptime {
+ \\ var a: *u8 = undefined;
+ \\ _ = a.*;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "endless loop in function evaluation",
\\const seventh_fib_number = fibbonaci(7);
\\fn fibbonaci(x: i32) i32 {
\\ return fibbonaci(x - 1) + fibbonaci(x - 2);
@@ -1315,16 +2595,22 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@typeOf(seventh_fib_number)); }
,
- ".tmp_source.zig:3:21: error: evaluation exceeded 1000 backwards branches",
- ".tmp_source.zig:3:21: note: called from here");
+ ".tmp_source.zig:3:21: error: evaluation exceeded 1000 backwards branches",
+ ".tmp_source.zig:3:21: note: called from here",
+ );
- cases.add("@embedFile with bogus file",
- \\const resource = @embedFile("bogus.txt");
+ cases.add(
+ "@embedFile with bogus file",
+ \\const resource = @embedFile("bogus.txt",);
\\
\\export fn entry() usize { return @sizeOf(@typeOf(resource)); }
- , ".tmp_source.zig:1:29: error: unable to find '", "bogus.txt'");
+ ,
+ ".tmp_source.zig:1:29: error: unable to find '",
+ "bogus.txt'",
+ );
- cases.add("non-const expression in struct literal outside function",
+ cases.add(
+ "non-const expression in struct literal outside function",
\\const Foo = struct {
\\ x: i32,
\\};
@@ -1332,9 +2618,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\extern fn get_it() i32;
\\
\\export fn entry() usize { return @sizeOf(@typeOf(a)); }
- , ".tmp_source.zig:4:21: error: unable to evaluate constant expression");
+ ,
+ ".tmp_source.zig:4:21: error: unable to evaluate constant expression",
+ );
- cases.add("non-const expression function call with struct return value outside function",
+ cases.add(
+ "non-const expression function call with struct return value outside function",
\\const Foo = struct {
\\ x: i32,
\\};
@@ -1347,19 +2636,24 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@typeOf(a)); }
,
- ".tmp_source.zig:6:24: error: unable to evaluate constant expression",
- ".tmp_source.zig:4:17: note: called from here");
+ ".tmp_source.zig:6:24: error: unable to evaluate constant expression",
+ ".tmp_source.zig:4:17: note: called from here",
+ );
- cases.add("undeclared identifier error should mark fn as impure",
+ cases.add(
+ "undeclared identifier error should mark fn as impure",
\\export fn foo() void {
\\ test_a_thing();
\\}
\\fn test_a_thing() void {
\\ bad_fn_call();
\\}
- , ".tmp_source.zig:5:5: error: use of undeclared identifier 'bad_fn_call'");
+ ,
+ ".tmp_source.zig:5:5: error: use of undeclared identifier 'bad_fn_call'",
+ );
- cases.add("illegal comparison of types",
+ cases.add(
+ "illegal comparison of types",
\\fn bad_eql_1(a: []u8, b: []u8) bool {
\\ return a == b;
\\}
@@ -1367,17 +2661,19 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ One: void,
\\ Two: i32,
\\};
- \\fn bad_eql_2(a: &const EnumWithData, b: &const EnumWithData) bool {
- \\ return *a == *b;
+ \\fn bad_eql_2(a: *const EnumWithData, b: *const EnumWithData) bool {
+ \\ return a.* == b.*;
\\}
\\
\\export fn entry1() usize { return @sizeOf(@typeOf(bad_eql_1)); }
\\export fn entry2() usize { return @sizeOf(@typeOf(bad_eql_2)); }
,
- ".tmp_source.zig:2:14: error: operator not allowed for type '[]u8'",
- ".tmp_source.zig:9:15: error: operator not allowed for type 'EnumWithData'");
+ ".tmp_source.zig:2:14: error: operator not allowed for type '[]u8'",
+ ".tmp_source.zig:9:16: error: operator not allowed for type 'EnumWithData'",
+ );
- cases.add("non-const switch number literal",
+ cases.add(
+ "non-const switch number literal",
\\export fn foo() void {
\\ const x = switch (bar()) {
\\ 1, 2 => 1,
@@ -1388,25 +2684,34 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\fn bar() i32 {
\\ return 2;
\\}
- , ".tmp_source.zig:2:15: error: unable to infer expression type");
+ ,
+ ".tmp_source.zig:2:15: error: unable to infer expression type",
+ );
- cases.add("atomic orderings of cmpxchg - failure stricter than success",
+ cases.add(
+ "atomic orderings of cmpxchg - failure stricter than success",
\\const AtomicOrder = @import("builtin").AtomicOrder;
\\export fn f() void {
\\ var x: i32 = 1234;
- \\ while (!@cmpxchg(&x, 1234, 5678, AtomicOrder.Monotonic, AtomicOrder.SeqCst)) {}
+ \\ while (!@cmpxchgWeak(i32, &x, 1234, 5678, AtomicOrder.Monotonic, AtomicOrder.SeqCst)) {}
\\}
- , ".tmp_source.zig:4:72: error: failure atomic ordering must be no stricter than success");
+ ,
+ ".tmp_source.zig:4:81: error: failure atomic ordering must be no stricter than success",
+ );
- cases.add("atomic orderings of cmpxchg - success Monotonic or stricter",
+ cases.add(
+ "atomic orderings of cmpxchg - success Monotonic or stricter",
\\const AtomicOrder = @import("builtin").AtomicOrder;
\\export fn f() void {
\\ var x: i32 = 1234;
- \\ while (!@cmpxchg(&x, 1234, 5678, AtomicOrder.Unordered, AtomicOrder.Unordered)) {}
+ \\ while (!@cmpxchgWeak(i32, &x, 1234, 5678, AtomicOrder.Unordered, AtomicOrder.Unordered)) {}
\\}
- , ".tmp_source.zig:4:49: error: success atomic ordering must be Monotonic or stricter");
+ ,
+ ".tmp_source.zig:4:58: error: success atomic ordering must be Monotonic or stricter",
+ );
- cases.add("negation overflow in function evaluation",
+ cases.add(
+ "negation overflow in function evaluation",
\\const y = neg(-128);
\\fn neg(x: i8) i8 {
\\ return -x;
@@ -1414,10 +2719,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@typeOf(y)); }
,
- ".tmp_source.zig:3:12: error: negation caused overflow",
- ".tmp_source.zig:1:14: note: called from here");
+ ".tmp_source.zig:3:12: error: negation caused overflow",
+ ".tmp_source.zig:1:14: note: called from here",
+ );
- cases.add("add overflow in function evaluation",
+ cases.add(
+ "add overflow in function evaluation",
\\const y = add(65530, 10);
\\fn add(a: u16, b: u16) u16 {
\\ return a + b;
@@ -1425,11 +2732,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@typeOf(y)); }
,
- ".tmp_source.zig:3:14: error: operation caused overflow",
- ".tmp_source.zig:1:14: note: called from here");
+ ".tmp_source.zig:3:14: error: operation caused overflow",
+ ".tmp_source.zig:1:14: note: called from here",
+ );
-
- cases.add("sub overflow in function evaluation",
+ cases.add(
+ "sub overflow in function evaluation",
\\const y = sub(10, 20);
\\fn sub(a: u16, b: u16) u16 {
\\ return a - b;
@@ -1437,10 +2745,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@typeOf(y)); }
,
- ".tmp_source.zig:3:14: error: operation caused overflow",
- ".tmp_source.zig:1:14: note: called from here");
+ ".tmp_source.zig:3:14: error: operation caused overflow",
+ ".tmp_source.zig:1:14: note: called from here",
+ );
- cases.add("mul overflow in function evaluation",
+ cases.add(
+ "mul overflow in function evaluation",
\\const y = mul(300, 6000);
\\fn mul(a: u16, b: u16) u16 {
\\ return a * b;
@@ -1448,58 +2758,78 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@typeOf(y)); }
,
- ".tmp_source.zig:3:14: error: operation caused overflow",
- ".tmp_source.zig:1:14: note: called from here");
+ ".tmp_source.zig:3:14: error: operation caused overflow",
+ ".tmp_source.zig:1:14: note: called from here",
+ );
- cases.add("truncate sign mismatch",
+ cases.add(
+ "truncate sign mismatch",
\\fn f() i8 {
\\ const x: u32 = 10;
\\ return @truncate(i8, x);
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(f)); }
- , ".tmp_source.zig:3:26: error: expected signed integer type, found 'u32'");
+ ,
+ ".tmp_source.zig:3:26: error: expected signed integer type, found 'u32'",
+ );
- cases.add("try in function with non error return type",
+ cases.add(
+ "try in function with non error return type",
\\export fn f() void {
\\ try something();
\\}
\\fn something() error!void { }
,
- ".tmp_source.zig:2:5: error: expected type 'void', found 'error'");
+ ".tmp_source.zig:2:5: error: expected type 'void', found 'error'",
+ );
- cases.add("invalid pointer for var type",
+ cases.add(
+ "invalid pointer for var type",
\\extern fn ext() usize;
\\var bytes: [ext()]u8 = undefined;
\\export fn f() void {
\\ for (bytes) |*b, i| {
- \\ *b = u8(i);
+ \\ b.* = u8(i);
\\ }
\\}
- , ".tmp_source.zig:2:13: error: unable to evaluate constant expression");
+ ,
+ ".tmp_source.zig:2:13: error: unable to evaluate constant expression",
+ );
- cases.add("export function with comptime parameter",
+ cases.add(
+ "export function with comptime parameter",
\\export fn foo(comptime x: i32, y: i32) i32{
\\ return x + y;
\\}
- , ".tmp_source.zig:1:15: error: comptime parameter not allowed in function with calling convention 'ccc'");
+ ,
+ ".tmp_source.zig:1:15: error: comptime parameter not allowed in function with calling convention 'ccc'",
+ );
- cases.add("extern function with comptime parameter",
+ cases.add(
+ "extern function with comptime parameter",
\\extern fn foo(comptime x: i32, y: i32) i32;
\\fn f() i32 {
\\ return foo(1, 2);
\\}
\\export fn entry() usize { return @sizeOf(@typeOf(f)); }
- , ".tmp_source.zig:1:15: error: comptime parameter not allowed in function with calling convention 'ccc'");
+ ,
+ ".tmp_source.zig:1:15: error: comptime parameter not allowed in function with calling convention 'ccc'",
+ );
- cases.add("convert fixed size array to slice with invalid size",
+ cases.add(
+ "convert fixed size array to slice with invalid size",
\\export fn f() void {
\\ var array: [5]u8 = undefined;
- \\ var foo = ([]const u32)(array)[0];
+ \\ var foo = @bytesToSlice(u32, array)[0];
\\}
- , ".tmp_source.zig:3:28: error: unable to convert [5]u8 to []const u32: size mismatch");
+ ,
+ ".tmp_source.zig:3:15: error: unable to convert [5]u8 to []align(1) const u32: size mismatch",
+ ".tmp_source.zig:3:29: note: u32 has size 4; remaining bytes: 1",
+ );
- cases.add("non-pure function returns type",
+ cases.add(
+ "non-pure function returns type",
\\var a: u32 = 0;
\\pub fn List(comptime T: type) type {
\\ a += 1;
@@ -1518,56 +2848,77 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ var list: List(i32) = undefined;
\\ list.length = 10;
\\}
- , ".tmp_source.zig:3:7: error: unable to evaluate constant expression",
- ".tmp_source.zig:16:19: note: called from here");
+ ,
+ ".tmp_source.zig:3:7: error: unable to evaluate constant expression",
+ ".tmp_source.zig:16:19: note: called from here",
+ );
- cases.add("bogus method call on slice",
+ cases.add(
+ "bogus method call on slice",
\\var self = "aoeu";
\\fn f(m: []const u8) void {
\\ m.copy(u8, self[0..], m);
\\}
\\export fn entry() usize { return @sizeOf(@typeOf(f)); }
- , ".tmp_source.zig:3:6: error: no member named 'copy' in '[]const u8'");
+ ,
+ ".tmp_source.zig:3:6: error: no member named 'copy' in '[]const u8'",
+ );
- cases.add("wrong number of arguments for method fn call",
+ cases.add(
+ "wrong number of arguments for method fn call",
\\const Foo = struct {
- \\ fn method(self: &const Foo, a: i32) void {}
+ \\ fn method(self: *const Foo, a: i32) void {}
\\};
- \\fn f(foo: &const Foo) void {
+ \\fn f(foo: *const Foo) void {
\\
\\ foo.method(1, 2);
\\}
\\export fn entry() usize { return @sizeOf(@typeOf(f)); }
- , ".tmp_source.zig:6:15: error: expected 2 arguments, found 3");
+ ,
+ ".tmp_source.zig:6:15: error: expected 2 arguments, found 3",
+ );
- cases.add("assign through constant pointer",
+ cases.add(
+ "assign through constant pointer",
\\export fn f() void {
\\ var cstr = c"Hat";
\\ cstr[0] = 'W';
\\}
- , ".tmp_source.zig:3:11: error: cannot assign to constant");
+ ,
+ ".tmp_source.zig:3:11: error: cannot assign to constant",
+ );
- cases.add("assign through constant slice",
+ cases.add(
+ "assign through constant slice",
\\export fn f() void {
\\ var cstr: []const u8 = "Hat";
\\ cstr[0] = 'W';
\\}
- , ".tmp_source.zig:3:11: error: cannot assign to constant");
+ ,
+ ".tmp_source.zig:3:11: error: cannot assign to constant",
+ );
- cases.add("main function with bogus args type",
+ cases.add(
+ "main function with bogus args type",
\\pub fn main(args: [][]bogus) !void {}
- , ".tmp_source.zig:1:23: error: use of undeclared identifier 'bogus'");
+ ,
+ ".tmp_source.zig:1:23: error: use of undeclared identifier 'bogus'",
+ );
- cases.add("for loop missing element param",
+ cases.add(
+ "for loop missing element param",
\\fn foo(blah: []u8) void {
\\ for (blah) { }
\\}
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
- , ".tmp_source.zig:2:5: error: for loop expression missing element parameter");
+ ,
+ ".tmp_source.zig:2:5: error: for loop expression missing element parameter",
+ );
- cases.add("misspelled type with pointer only reference",
+ cases.add(
+ "misspelled type with pointer only reference",
\\const JasonHM = u8;
- \\const JasonList = &JsonNode;
+ \\const JasonList = *JsonNode;
\\
\\const JsonOA = union(enum) {
\\ JSONArray: JsonList,
@@ -1596,9 +2947,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
- , ".tmp_source.zig:5:16: error: use of undeclared identifier 'JsonList'");
+ ,
+ ".tmp_source.zig:5:16: error: use of undeclared identifier 'JsonList'",
+ );
- cases.add("method call with first arg type primitive",
+ cases.add(
+ "method call with first arg type primitive",
\\const Foo = struct {
\\ x: i32,
\\
@@ -1614,14 +2968,17 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\ derp.init();
\\}
- , ".tmp_source.zig:14:5: error: expected type 'i32', found '&const Foo'");
+ ,
+ ".tmp_source.zig:14:5: error: expected type 'i32', found 'Foo'",
+ );
- cases.add("method call with first arg type wrong container",
+ cases.add(
+ "method call with first arg type wrong container",
\\pub const List = struct {
\\ len: usize,
- \\ allocator: &Allocator,
+ \\ allocator: *Allocator,
\\
- \\ pub fn init(allocator: &Allocator) List {
+ \\ pub fn init(allocator: *Allocator) List {
\\ return List {
\\ .len = 0,
\\ .allocator = allocator,
@@ -1641,26 +2998,33 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ var x = List.init(&global_allocator);
\\ x.init();
\\}
- , ".tmp_source.zig:23:5: error: expected type '&Allocator', found '&List'");
+ ,
+ ".tmp_source.zig:23:5: error: expected type '*Allocator', found '*List'",
+ );
- cases.add("binary not on number literal",
+ cases.add(
+ "binary not on number literal",
\\const TINY_QUANTUM_SHIFT = 4;
\\const TINY_QUANTUM_SIZE = 1 << TINY_QUANTUM_SHIFT;
\\var block_aligned_stuff: usize = (4 + TINY_QUANTUM_SIZE) & ~(TINY_QUANTUM_SIZE - 1);
\\
\\export fn entry() usize { return @sizeOf(@typeOf(block_aligned_stuff)); }
- , ".tmp_source.zig:3:60: error: unable to perform binary not operation on type '(integer literal)'");
+ ,
+ ".tmp_source.zig:3:60: error: unable to perform binary not operation on type 'comptime_int'",
+ );
cases.addCase(x: {
- const tc = cases.create("multiple files with private function error",
- \\const foo = @import("foo.zig");
+ const tc = cases.create(
+ "multiple files with private function error",
+ \\const foo = @import("foo.zig",);
\\
\\export fn callPrivFunction() void {
\\ foo.privateFunction();
\\}
,
".tmp_source.zig:4:8: error: 'privateFunction' is private",
- "foo.zig:1:1: note: declared here");
+ "foo.zig:1:1: note: declared here",
+ );
tc.addSourceFile("foo.zig",
\\fn privateFunction() void { }
@@ -1669,14 +3033,18 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
break :x tc;
});
- cases.add("container init with non-type",
+ cases.add(
+ "container init with non-type",
\\const zero: i32 = 0;
\\const a = zero{1};
\\
\\export fn entry() usize { return @sizeOf(@typeOf(a)); }
- , ".tmp_source.zig:2:11: error: expected type, found 'i32'");
+ ,
+ ".tmp_source.zig:2:11: error: expected type, found 'i32'",
+ );
- cases.add("assign to constant field",
+ cases.add(
+ "assign to constant field",
\\const Foo = struct {
\\ field: i32,
\\};
@@ -1684,15 +3052,18 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ const f = Foo {.field = 1234,};
\\ f.field = 0;
\\}
- , ".tmp_source.zig:6:13: error: cannot assign to constant");
+ ,
+ ".tmp_source.zig:6:13: error: cannot assign to constant",
+ );
- cases.add("return from defer expression",
+ cases.add(
+ "return from defer expression",
\\pub fn testTrickyDefer() !void {
\\ defer canFail() catch {};
\\
\\ defer try canFail();
\\
- \\ const a = maybeInt() ?? return;
+ \\ const a = maybeInt() orelse return;
\\}
\\
\\fn canFail() error!void { }
@@ -1702,9 +3073,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(testTrickyDefer)); }
- , ".tmp_source.zig:4:11: error: cannot return from defer expression");
+ ,
+ ".tmp_source.zig:4:11: error: cannot return from defer expression",
+ );
- cases.add("attempt to access var args out of bounds",
+ cases.add(
+ "attempt to access var args out of bounds",
\\fn add(args: ...) i32 {
\\ return args[0] + args[1];
\\}
@@ -1715,10 +3089,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
,
- ".tmp_source.zig:2:26: error: index 1 outside argument list of size 1",
- ".tmp_source.zig:6:15: note: called from here");
+ ".tmp_source.zig:2:26: error: index 1 outside argument list of size 1",
+ ".tmp_source.zig:6:15: note: called from here",
+ );
- cases.add("pass integer literal to var args",
+ cases.add(
+ "pass integer literal to var args",
\\fn add(args: ...) i32 {
\\ var sum = i32(0);
\\ {comptime var i: usize = 0; inline while (i < args.len) : (i += 1) {
@@ -1732,32 +3108,44 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(bar)); }
- , ".tmp_source.zig:10:16: error: compiler bug: integer and float literals in var args function must be casted");
+ ,
+ ".tmp_source.zig:10:16: error: compiler bug: integer and float literals in var args function must be casted",
+ );
- cases.add("assign too big number to u16",
+ cases.add(
+ "assign too big number to u16",
\\export fn foo() void {
\\ var vga_mem: u16 = 0xB8000;
\\}
- , ".tmp_source.zig:2:24: error: integer value 753664 cannot be implicitly casted to type 'u16'");
+ ,
+ ".tmp_source.zig:2:24: error: integer value 753664 cannot be implicitly casted to type 'u16'",
+ );
- cases.add("global variable alignment non power of 2",
+ cases.add(
+ "global variable alignment non power of 2",
\\const some_data: [100]u8 align(3) = undefined;
\\export fn entry() usize { return @sizeOf(@typeOf(some_data)); }
- , ".tmp_source.zig:1:32: error: alignment value 3 is not a power of 2");
+ ,
+ ".tmp_source.zig:1:32: error: alignment value 3 is not a power of 2",
+ );
- cases.add("function alignment non power of 2",
+ cases.add(
+ "function alignment non power of 2",
\\extern fn foo() align(3) void;
\\export fn entry() void { return foo(); }
- , ".tmp_source.zig:1:23: error: alignment value 3 is not a power of 2");
+ ,
+ ".tmp_source.zig:1:23: error: alignment value 3 is not a power of 2",
+ );
- cases.add("compile log",
+ cases.add(
+ "compile log",
\\export fn foo() void {
- \\ comptime bar(12, "hi");
+ \\ comptime bar(12, "hi",);
\\}
\\fn bar(a: i32, b: []const u8) void {
- \\ @compileLog("begin");
+ \\ @compileLog("begin",);
\\ @compileLog("a", a, "b", b);
- \\ @compileLog("end");
+ \\ @compileLog("end",);
\\}
,
".tmp_source.zig:5:5: error: found compile log statement",
@@ -1765,27 +3153,32 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
".tmp_source.zig:6:5: error: found compile log statement",
".tmp_source.zig:2:17: note: called from here",
".tmp_source.zig:7:5: error: found compile log statement",
- ".tmp_source.zig:2:17: note: called from here");
+ ".tmp_source.zig:2:17: note: called from here",
+ );
- cases.add("casting bit offset pointer to regular pointer",
+ cases.add(
+ "casting bit offset pointer to regular pointer",
\\const BitField = packed struct {
\\ a: u3,
\\ b: u3,
\\ c: u2,
\\};
\\
- \\fn foo(bit_field: &const BitField) u3 {
+ \\fn foo(bit_field: *const BitField) u3 {
\\ return bar(&bit_field.b);
\\}
\\
- \\fn bar(x: &const u3) u3 {
- \\ return *x;
+ \\fn bar(x: *const u3) u3 {
+ \\ return x.*;
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
- , ".tmp_source.zig:8:26: error: expected type '&const u3', found '&align(1:3:6) const u3'");
+ ,
+ ".tmp_source.zig:8:26: error: expected type '*const u3', found '*align(1:3:6) const u3'",
+ );
- cases.add("referring to a struct that is invalid",
+ cases.add(
+ "referring to a struct that is invalid",
\\const UsbDeviceRequest = struct {
\\ Type: u8,
\\};
@@ -1798,10 +3191,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ if (!ok) unreachable;
\\}
,
- ".tmp_source.zig:10:14: error: unable to evaluate constant expression",
- ".tmp_source.zig:6:20: note: called from here");
+ ".tmp_source.zig:10:14: error: unable to evaluate constant expression",
+ ".tmp_source.zig:6:20: note: called from here",
+ );
- cases.add("control flow uses comptime var at runtime",
+ cases.add(
+ "control flow uses comptime var at runtime",
\\export fn foo() void {
\\ comptime var i = 0;
\\ while (i < 5) : (i += 1) {
@@ -1811,88 +3206,118 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\fn bar() void { }
,
- ".tmp_source.zig:3:5: error: control flow attempts to use compile-time variable at runtime",
- ".tmp_source.zig:3:24: note: compile-time variable assigned here");
+ ".tmp_source.zig:3:5: error: control flow attempts to use compile-time variable at runtime",
+ ".tmp_source.zig:3:24: note: compile-time variable assigned here",
+ );
- cases.add("ignored return value",
+ cases.add(
+ "ignored return value",
\\export fn foo() void {
\\ bar();
\\}
\\fn bar() i32 { return 0; }
- , ".tmp_source.zig:2:8: error: expression value is ignored");
+ ,
+ ".tmp_source.zig:2:8: error: expression value is ignored",
+ );
- cases.add("ignored assert-err-ok return value",
+ cases.add(
+ "ignored assert-err-ok return value",
\\export fn foo() void {
\\ bar() catch unreachable;
\\}
\\fn bar() error!i32 { return 0; }
- , ".tmp_source.zig:2:11: error: expression value is ignored");
+ ,
+ ".tmp_source.zig:2:11: error: expression value is ignored",
+ );
- cases.add("ignored statement value",
+ cases.add(
+ "ignored statement value",
\\export fn foo() void {
\\ 1;
\\}
- , ".tmp_source.zig:2:5: error: expression value is ignored");
+ ,
+ ".tmp_source.zig:2:5: error: expression value is ignored",
+ );
- cases.add("ignored comptime statement value",
+ cases.add(
+ "ignored comptime statement value",
\\export fn foo() void {
\\ comptime {1;}
\\}
- , ".tmp_source.zig:2:15: error: expression value is ignored");
+ ,
+ ".tmp_source.zig:2:15: error: expression value is ignored",
+ );
- cases.add("ignored comptime value",
+ cases.add(
+ "ignored comptime value",
\\export fn foo() void {
\\ comptime 1;
\\}
- , ".tmp_source.zig:2:5: error: expression value is ignored");
+ ,
+ ".tmp_source.zig:2:5: error: expression value is ignored",
+ );
- cases.add("ignored defered statement value",
+ cases.add(
+ "ignored defered statement value",
\\export fn foo() void {
\\ defer {1;}
\\}
- , ".tmp_source.zig:2:12: error: expression value is ignored");
+ ,
+ ".tmp_source.zig:2:12: error: expression value is ignored",
+ );
- cases.add("ignored defered function call",
+ cases.add(
+ "ignored defered function call",
\\export fn foo() void {
\\ defer bar();
\\}
\\fn bar() error!i32 { return 0; }
- , ".tmp_source.zig:2:14: error: expression value is ignored");
+ ,
+ ".tmp_source.zig:2:14: error: expression value is ignored",
+ );
- cases.add("dereference an array",
+ cases.add(
+ "dereference an array",
\\var s_buffer: [10]u8 = undefined;
\\pub fn pass(in: []u8) []u8 {
\\ var out = &s_buffer;
- \\ *out[0] = in[0];
- \\ return (*out)[0..1];
+ \\ out.*.* = in[0];
+ \\ return out.*[0..1];
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(pass)); }
- , ".tmp_source.zig:4:5: error: attempt to dereference non pointer type '[10]u8'");
+ ,
+ ".tmp_source.zig:4:10: error: attempt to dereference non pointer type '[10]u8'",
+ );
- cases.add("pass const ptr to mutable ptr fn",
+ cases.add(
+ "pass const ptr to mutable ptr fn",
\\fn foo() bool {
- \\ const a = ([]const u8)("a");
+ \\ const a = ([]const u8)("a",);
\\ const b = &a;
\\ return ptrEql(b, b);
\\}
- \\fn ptrEql(a: &[]const u8, b: &[]const u8) bool {
+ \\fn ptrEql(a: *[]const u8, b: *[]const u8) bool {
\\ return true;
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
- , ".tmp_source.zig:4:19: error: expected type '&[]const u8', found '&const []const u8'");
+ ,
+ ".tmp_source.zig:4:19: error: expected type '*[]const u8', found '*const []const u8'",
+ );
cases.addCase(x: {
- const tc = cases.create("export collision",
- \\const foo = @import("foo.zig");
+ const tc = cases.create(
+ "export collision",
+ \\const foo = @import("foo.zig",);
\\
\\export fn bar() usize {
\\ return foo.baz;
\\}
,
"foo.zig:1:8: error: exported symbol collision: 'bar'",
- ".tmp_source.zig:3:8: note: other symbol here");
+ ".tmp_source.zig:3:8: note: other symbol here",
+ );
tc.addSourceFile("foo.zig",
\\export fn bar() void {}
@@ -1902,35 +3327,28 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
break :x tc;
});
- cases.add("pass non-copyable type by value to function",
- \\const Point = struct { x: i32, y: i32, };
- \\fn foo(p: Point) void { }
- \\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
- , ".tmp_source.zig:2:11: error: type 'Point' is not copyable; cannot pass by value");
-
- cases.add("implicit cast from array to mutable slice",
+ cases.add(
+ "implicit cast from array to mutable slice",
\\var global_array: [10]i32 = undefined;
\\fn foo(param: []i32) void {}
\\export fn entry() void {
\\ foo(global_array);
\\}
- , ".tmp_source.zig:4:9: error: expected type '[]i32', found '[10]i32'");
+ ,
+ ".tmp_source.zig:4:9: error: expected type '[]i32', found '[10]i32'",
+ );
- cases.add("ptrcast to non-pointer",
- \\export fn entry(a: &i32) usize {
+ cases.add(
+ "ptrcast to non-pointer",
+ \\export fn entry(a: *i32) usize {
\\ return @ptrCast(usize, a);
\\}
- , ".tmp_source.zig:2:21: error: expected pointer, found 'usize'");
+ ,
+ ".tmp_source.zig:2:21: error: expected pointer, found 'usize'",
+ );
- cases.add("too many error values to cast to small integer",
- \\const Error = error { A, B, C, D, E, F, G, H };
- \\fn foo(e: Error) u2 {
- \\ return u2(e);
- \\}
- \\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
- , ".tmp_source.zig:3:14: error: too many error values to fit in 'u2'");
-
- cases.add("asm at compile time",
+ cases.add(
+ "asm at compile time",
\\comptime {
\\ doSomeAsm();
\\}
@@ -1942,48 +3360,66 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ \\.set aoeu, derp;
\\ );
\\}
- , ".tmp_source.zig:6:5: error: unable to evaluate constant expression");
+ ,
+ ".tmp_source.zig:6:5: error: unable to evaluate constant expression",
+ );
- cases.add("invalid member of builtin enum",
- \\const builtin = @import("builtin");
+ cases.add(
+ "invalid member of builtin enum",
+ \\const builtin = @import("builtin",);
\\export fn entry() void {
\\ const foo = builtin.Arch.x86;
\\}
- , ".tmp_source.zig:3:29: error: container 'Arch' has no member called 'x86'");
+ ,
+ ".tmp_source.zig:3:29: error: container 'Arch' has no member called 'x86'",
+ );
- cases.add("int to ptr of 0 bits",
+ cases.add(
+ "int to ptr of 0 bits",
\\export fn foo() void {
\\ var x: usize = 0x1000;
- \\ var y: &void = @intToPtr(&void, x);
+ \\ var y: *void = @intToPtr(*void, x);
\\}
- , ".tmp_source.zig:3:31: error: type '&void' has 0 bits and cannot store information");
+ ,
+ ".tmp_source.zig:3:30: error: type '*void' has 0 bits and cannot store information",
+ );
- cases.add("@fieldParentPtr - non struct",
+ cases.add(
+ "@fieldParentPtr - non struct",
\\const Foo = i32;
- \\export fn foo(a: &i32) &Foo {
+ \\export fn foo(a: *i32) *Foo {
\\ return @fieldParentPtr(Foo, "a", a);
\\}
- , ".tmp_source.zig:3:28: error: expected struct type, found 'i32'");
+ ,
+ ".tmp_source.zig:3:28: error: expected struct type, found 'i32'",
+ );
- cases.add("@fieldParentPtr - bad field name",
+ cases.add(
+ "@fieldParentPtr - bad field name",
\\const Foo = extern struct {
\\ derp: i32,
\\};
- \\export fn foo(a: &i32) &Foo {
+ \\export fn foo(a: *i32) *Foo {
\\ return @fieldParentPtr(Foo, "a", a);
\\}
- , ".tmp_source.zig:5:33: error: struct 'Foo' has no field 'a'");
+ ,
+ ".tmp_source.zig:5:33: error: struct 'Foo' has no field 'a'",
+ );
- cases.add("@fieldParentPtr - field pointer is not pointer",
+ cases.add(
+ "@fieldParentPtr - field pointer is not pointer",
\\const Foo = extern struct {
\\ a: i32,
\\};
- \\export fn foo(a: i32) &Foo {
+ \\export fn foo(a: i32) *Foo {
\\ return @fieldParentPtr(Foo, "a", a);
\\}
- , ".tmp_source.zig:5:38: error: expected pointer, found 'i32'");
+ ,
+ ".tmp_source.zig:5:38: error: expected pointer, found 'i32'",
+ );
- cases.add("@fieldParentPtr - comptime field ptr not based on struct",
+ cases.add(
+ "@fieldParentPtr - comptime field ptr not based on struct",
\\const Foo = struct {
\\ a: i32,
\\ b: i32,
@@ -1991,12 +3427,15 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\const foo = Foo { .a = 1, .b = 2, };
\\
\\comptime {
- \\ const field_ptr = @intToPtr(&i32, 0x1234);
+ \\ const field_ptr = @intToPtr(*i32, 0x1234);
\\ const another_foo_ptr = @fieldParentPtr(Foo, "b", field_ptr);
\\}
- , ".tmp_source.zig:9:55: error: pointer value not based on parent struct");
+ ,
+ ".tmp_source.zig:9:55: error: pointer value not based on parent struct",
+ );
- cases.add("@fieldParentPtr - comptime wrong field index",
+ cases.add(
+ "@fieldParentPtr - comptime wrong field index",
\\const Foo = struct {
\\ a: i32,
\\ b: i32,
@@ -2006,76 +3445,100 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\comptime {
\\ const another_foo_ptr = @fieldParentPtr(Foo, "b", &foo.a);
\\}
- , ".tmp_source.zig:8:29: error: field 'b' has index 1 but pointer value is index 0 of struct 'Foo'");
+ ,
+ ".tmp_source.zig:8:29: error: field 'b' has index 1 but pointer value is index 0 of struct 'Foo'",
+ );
- cases.add("@offsetOf - non struct",
+ cases.add(
+ "@offsetOf - non struct",
\\const Foo = i32;
\\export fn foo() usize {
- \\ return @offsetOf(Foo, "a");
+ \\ return @offsetOf(Foo, "a",);
\\}
- , ".tmp_source.zig:3:22: error: expected struct type, found 'i32'");
+ ,
+ ".tmp_source.zig:3:22: error: expected struct type, found 'i32'",
+ );
- cases.add("@offsetOf - bad field name",
+ cases.add(
+ "@offsetOf - bad field name",
\\const Foo = struct {
\\ derp: i32,
\\};
\\export fn foo() usize {
- \\ return @offsetOf(Foo, "a");
+ \\ return @offsetOf(Foo, "a",);
\\}
- , ".tmp_source.zig:5:27: error: struct 'Foo' has no field 'a'");
+ ,
+ ".tmp_source.zig:5:27: error: struct 'Foo' has no field 'a'",
+ );
- cases.addExe("missing main fn in executable",
+ cases.addExe(
+ "missing main fn in executable",
\\
- , "error: no member named 'main' in '");
+ ,
+ "error: no member named 'main' in '",
+ );
- cases.addExe("private main fn",
+ cases.addExe(
+ "private main fn",
\\fn main() void {}
,
"error: 'main' is private",
- ".tmp_source.zig:1:1: note: declared here");
+ ".tmp_source.zig:1:1: note: declared here",
+ );
- cases.add("setting a section on an extern variable",
+ cases.add(
+ "setting a section on an extern variable",
\\extern var foo: i32 section(".text2");
\\export fn entry() i32 {
\\ return foo;
\\}
,
- ".tmp_source.zig:1:29: error: cannot set section of external variable 'foo'");
+ ".tmp_source.zig:1:29: error: cannot set section of external variable 'foo'",
+ );
- cases.add("setting a section on a local variable",
+ cases.add(
+ "setting a section on a local variable",
\\export fn entry() i32 {
\\ var foo: i32 section(".text2") = 1234;
\\ return foo;
\\}
,
- ".tmp_source.zig:2:26: error: cannot set section of local variable 'foo'");
+ ".tmp_source.zig:2:26: error: cannot set section of local variable 'foo'",
+ );
- cases.add("setting a section on an extern fn",
+ cases.add(
+ "setting a section on an extern fn",
\\extern fn foo() section(".text2") void;
\\export fn entry() void {
\\ foo();
\\}
,
- ".tmp_source.zig:1:25: error: cannot set section of external function 'foo'");
+ ".tmp_source.zig:1:25: error: cannot set section of external function 'foo'",
+ );
- cases.add("returning address of local variable - simple",
- \\export fn foo() &i32 {
+ cases.add(
+ "returning address of local variable - simple",
+ \\export fn foo() *i32 {
\\ var a: i32 = undefined;
\\ return &a;
\\}
,
- ".tmp_source.zig:3:13: error: function returns address of local variable");
+ ".tmp_source.zig:3:13: error: function returns address of local variable",
+ );
- cases.add("returning address of local variable - phi",
- \\export fn foo(c: bool) &i32 {
+ cases.add(
+ "returning address of local variable - phi",
+ \\export fn foo(c: bool) *i32 {
\\ var a: i32 = undefined;
\\ var b: i32 = undefined;
\\ return if (c) &a else &b;
\\}
,
- ".tmp_source.zig:4:12: error: function returns address of local variable");
+ ".tmp_source.zig:4:12: error: function returns address of local variable",
+ );
- cases.add("inner struct member shadowing outer struct member",
+ cases.add(
+ "inner struct member shadowing outer struct member",
\\fn A() type {
\\ return struct {
\\ b: B(),
@@ -2097,57 +3560,71 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:9:17: error: redefinition of 'Self'",
- ".tmp_source.zig:5:9: note: previous definition is here");
+ ".tmp_source.zig:5:9: note: previous definition is here",
+ );
- cases.add("while expected bool, got nullable",
+ cases.add(
+ "while expected bool, got optional",
\\export fn foo() void {
\\ while (bar()) {}
\\}
\\fn bar() ?i32 { return 1; }
,
- ".tmp_source.zig:2:15: error: expected type 'bool', found '?i32'");
+ ".tmp_source.zig:2:15: error: expected type 'bool', found '?i32'",
+ );
- cases.add("while expected bool, got error union",
+ cases.add(
+ "while expected bool, got error union",
\\export fn foo() void {
\\ while (bar()) {}
\\}
\\fn bar() error!i32 { return 1; }
,
- ".tmp_source.zig:2:15: error: expected type 'bool', found 'error!i32'");
+ ".tmp_source.zig:2:15: error: expected type 'bool', found 'error!i32'",
+ );
- cases.add("while expected nullable, got bool",
+ cases.add(
+ "while expected optional, got bool",
\\export fn foo() void {
\\ while (bar()) |x| {}
\\}
\\fn bar() bool { return true; }
,
- ".tmp_source.zig:2:15: error: expected nullable type, found 'bool'");
+ ".tmp_source.zig:2:15: error: expected optional type, found 'bool'",
+ );
- cases.add("while expected nullable, got error union",
+ cases.add(
+ "while expected optional, got error union",
\\export fn foo() void {
\\ while (bar()) |x| {}
\\}
\\fn bar() error!i32 { return 1; }
,
- ".tmp_source.zig:2:15: error: expected nullable type, found 'error!i32'");
+ ".tmp_source.zig:2:15: error: expected optional type, found 'error!i32'",
+ );
- cases.add("while expected error union, got bool",
+ cases.add(
+ "while expected error union, got bool",
\\export fn foo() void {
\\ while (bar()) |x| {} else |err| {}
\\}
\\fn bar() bool { return true; }
,
- ".tmp_source.zig:2:15: error: expected error union type, found 'bool'");
+ ".tmp_source.zig:2:15: error: expected error union type, found 'bool'",
+ );
- cases.add("while expected error union, got nullable",
+ cases.add(
+ "while expected error union, got optional",
\\export fn foo() void {
\\ while (bar()) |x| {} else |err| {}
\\}
\\fn bar() ?i32 { return 1; }
,
- ".tmp_source.zig:2:15: error: expected error union type, found '?i32'");
+ ".tmp_source.zig:2:15: error: expected error union type, found '?i32'",
+ );
- cases.add("inline fn calls itself indirectly",
+ cases.add(
+ "inline fn calls itself indirectly",
\\export fn foo() void {
\\ bar();
\\}
@@ -2161,91 +3638,113 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
\\extern fn quux() void;
,
- ".tmp_source.zig:4:8: error: unable to inline function");
+ ".tmp_source.zig:4:8: error: unable to inline function",
+ );
- cases.add("save reference to inline function",
+ cases.add(
+ "save reference to inline function",
\\export fn foo() void {
\\ quux(@ptrToInt(bar));
\\}
\\inline fn bar() void { }
\\extern fn quux(usize) void;
,
- ".tmp_source.zig:4:8: error: unable to inline function");
+ ".tmp_source.zig:4:8: error: unable to inline function",
+ );
- cases.add("signed integer division",
+ cases.add(
+ "signed integer division",
\\export fn foo(a: i32, b: i32) i32 {
\\ return a / b;
\\}
,
- ".tmp_source.zig:2:14: error: division with 'i32' and 'i32': signed integers must use @divTrunc, @divFloor, or @divExact");
+ ".tmp_source.zig:2:14: error: division with 'i32' and 'i32': signed integers must use @divTrunc, @divFloor, or @divExact",
+ );
- cases.add("signed integer remainder division",
+ cases.add(
+ "signed integer remainder division",
\\export fn foo(a: i32, b: i32) i32 {
\\ return a % b;
\\}
,
- ".tmp_source.zig:2:14: error: remainder division with 'i32' and 'i32': signed integers and floats must use @rem or @mod");
+ ".tmp_source.zig:2:14: error: remainder division with 'i32' and 'i32': signed integers and floats must use @rem or @mod",
+ );
- cases.add("cast negative value to unsigned integer",
+ cases.add(
+ "cast negative value to unsigned integer",
\\comptime {
\\ const value: i32 = -1;
- \\ const unsigned = u32(value);
+ \\ const unsigned = @intCast(u32, value);
\\}
,
- ".tmp_source.zig:3:25: error: attempt to cast negative value to unsigned integer");
+ ".tmp_source.zig:3:22: error: attempt to cast negative value to unsigned integer",
+ );
- cases.add("compile-time division by zero",
+ cases.add(
+ "compile-time division by zero",
\\comptime {
\\ const a: i32 = 1;
\\ const b: i32 = 0;
\\ const c = a / b;
\\}
,
- ".tmp_source.zig:4:17: error: division by zero");
+ ".tmp_source.zig:4:17: error: division by zero",
+ );
- cases.add("compile-time remainder division by zero",
+ cases.add(
+ "compile-time remainder division by zero",
\\comptime {
\\ const a: i32 = 1;
\\ const b: i32 = 0;
\\ const c = a % b;
\\}
,
- ".tmp_source.zig:4:17: error: division by zero");
+ ".tmp_source.zig:4:17: error: division by zero",
+ );
- cases.add("compile-time integer cast truncates bits",
+ cases.add(
+ "compile-time integer cast truncates bits",
\\comptime {
\\ const spartan_count: u16 = 300;
- \\ const byte = u8(spartan_count);
+ \\ const byte = @intCast(u8, spartan_count);
\\}
,
- ".tmp_source.zig:3:20: error: cast from 'u16' to 'u8' truncates bits");
+ ".tmp_source.zig:3:18: error: cast from 'u16' to 'u8' truncates bits",
+ );
- cases.add("@setRuntimeSafety twice for same scope",
+ cases.add(
+ "@setRuntimeSafety twice for same scope",
\\export fn foo() void {
\\ @setRuntimeSafety(false);
\\ @setRuntimeSafety(false);
\\}
,
".tmp_source.zig:3:5: error: runtime safety set twice for same scope",
- ".tmp_source.zig:2:5: note: first set here");
+ ".tmp_source.zig:2:5: note: first set here",
+ );
- cases.add("@setFloatMode twice for same scope",
+ cases.add(
+ "@setFloatMode twice for same scope",
\\export fn foo() void {
\\ @setFloatMode(this, @import("builtin").FloatMode.Optimized);
\\ @setFloatMode(this, @import("builtin").FloatMode.Optimized);
\\}
,
".tmp_source.zig:3:5: error: float mode set twice for same scope",
- ".tmp_source.zig:2:5: note: first set here");
+ ".tmp_source.zig:2:5: note: first set here",
+ );
- cases.add("array access of type",
+ cases.add(
+ "array access of type",
\\export fn foo() void {
\\ var b: u8[40] = undefined;
\\}
,
- ".tmp_source.zig:2:14: error: array access of non-array type 'type'");
+ ".tmp_source.zig:2:14: error: array access of non-array type 'type'",
+ );
- cases.add("cannot break out of defer expression",
+ cases.add(
+ "cannot break out of defer expression",
\\export fn foo() void {
\\ while (true) {
\\ defer {
@@ -2254,9 +3753,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ }
\\}
,
- ".tmp_source.zig:4:13: error: cannot break out of defer expression");
+ ".tmp_source.zig:4:13: error: cannot break out of defer expression",
+ );
- cases.add("cannot continue out of defer expression",
+ cases.add(
+ "cannot continue out of defer expression",
\\export fn foo() void {
\\ while (true) {
\\ defer {
@@ -2265,9 +3766,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ }
\\}
,
- ".tmp_source.zig:4:13: error: cannot continue out of defer expression");
+ ".tmp_source.zig:4:13: error: cannot continue out of defer expression",
+ );
- cases.add("calling a var args function only known at runtime",
+ cases.add(
+ "calling a var args function only known at runtime",
\\var foos = []fn(...) void { foo1, foo2 };
\\
\\fn foo1(args: ...) void {}
@@ -2277,9 +3780,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ foos[0]();
\\}
,
- ".tmp_source.zig:7:9: error: calling a generic function requires compile-time known function value");
+ ".tmp_source.zig:7:9: error: calling a generic function requires compile-time known function value",
+ );
- cases.add("calling a generic function only known at runtime",
+ cases.add(
+ "calling a generic function only known at runtime",
\\var foos = []fn(var) void { foo1, foo2 };
\\
\\fn foo1(arg: var) void {}
@@ -2289,10 +3794,12 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ foos[0](true);
\\}
,
- ".tmp_source.zig:7:9: error: calling a generic function requires compile-time known function value");
+ ".tmp_source.zig:7:9: error: calling a generic function requires compile-time known function value",
+ );
- cases.add("@compileError shows traceback of references that caused it",
- \\const foo = @compileError("aoeu");
+ cases.add(
+ "@compileError shows traceback of references that caused it",
+ \\const foo = @compileError("aoeu",);
\\
\\const bar = baz + foo;
\\const baz = 1;
@@ -2303,9 +3810,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
,
".tmp_source.zig:1:13: error: aoeu",
".tmp_source.zig:3:19: note: referenced here",
- ".tmp_source.zig:7:12: note: referenced here");
+ ".tmp_source.zig:7:12: note: referenced here",
+ );
- cases.add("instantiating an undefined value for an invalid struct that contains itself",
+ cases.add(
+ "instantiating an undefined value for an invalid struct that contains itself",
\\const Foo = struct {
\\ x: Foo,
\\};
@@ -2316,73 +3825,93 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ return @sizeOf(@typeOf(foo.x));
\\}
,
- ".tmp_source.zig:1:13: error: struct 'Foo' contains itself");
+ ".tmp_source.zig:1:13: error: struct 'Foo' contains itself",
+ );
- cases.add("float literal too large error",
+ cases.add(
+ "float literal too large error",
\\comptime {
\\ const a = 0x1.0p16384;
\\}
,
- ".tmp_source.zig:2:15: error: float literal out of range of any type");
+ ".tmp_source.zig:2:15: error: float literal out of range of any type",
+ );
- cases.add("float literal too small error (denormal)",
+ cases.add(
+ "float literal too small error (denormal)",
\\comptime {
\\ const a = 0x1.0p-16384;
\\}
,
- ".tmp_source.zig:2:15: error: float literal out of range of any type");
+ ".tmp_source.zig:2:15: error: float literal out of range of any type",
+ );
- cases.add("explicit cast float literal to integer when there is a fraction component",
+ cases.add(
+ "explicit cast float literal to integer when there is a fraction component",
\\export fn entry() i32 {
\\ return i32(12.34);
\\}
,
- ".tmp_source.zig:2:16: error: fractional component prevents float value 12.340000 from being casted to type 'i32'");
+ ".tmp_source.zig:2:16: error: fractional component prevents float value 12.340000 from being casted to type 'i32'",
+ );
- cases.add("non pointer given to @ptrToInt",
+ cases.add(
+ "non pointer given to @ptrToInt",
\\export fn entry(x: i32) usize {
\\ return @ptrToInt(x);
\\}
,
- ".tmp_source.zig:2:22: error: expected pointer, found 'i32'");
+ ".tmp_source.zig:2:22: error: expected pointer, found 'i32'",
+ );
- cases.add("@shlExact shifts out 1 bits",
+ cases.add(
+ "@shlExact shifts out 1 bits",
\\comptime {
\\ const x = @shlExact(u8(0b01010101), 2);
\\}
,
- ".tmp_source.zig:2:15: error: operation caused overflow");
+ ".tmp_source.zig:2:15: error: operation caused overflow",
+ );
- cases.add("@shrExact shifts out 1 bits",
+ cases.add(
+ "@shrExact shifts out 1 bits",
\\comptime {
\\ const x = @shrExact(u8(0b10101010), 2);
\\}
,
- ".tmp_source.zig:2:15: error: exact shift shifted out 1 bits");
+ ".tmp_source.zig:2:15: error: exact shift shifted out 1 bits",
+ );
- cases.add("shifting without int type or comptime known",
+ cases.add(
+ "shifting without int type or comptime known",
\\export fn entry(x: u8) u8 {
\\ return 0x11 << x;
\\}
,
- ".tmp_source.zig:2:17: error: LHS of shift must be an integer type, or RHS must be compile-time known");
+ ".tmp_source.zig:2:17: error: LHS of shift must be an integer type, or RHS must be compile-time known",
+ );
- cases.add("shifting RHS is log2 of LHS int bit width",
+ cases.add(
+ "shifting RHS is log2 of LHS int bit width",
\\export fn entry(x: u8, y: u8) u8 {
\\ return x << y;
\\}
,
- ".tmp_source.zig:2:17: error: expected type 'u3', found 'u8'");
+ ".tmp_source.zig:2:17: error: expected type 'u3', found 'u8'",
+ );
- cases.add("globally shadowing a primitive type",
+ cases.add(
+ "globally shadowing a primitive type",
\\const u16 = @intType(false, 8);
\\export fn entry() void {
\\ const a: u16 = 300;
\\}
,
- ".tmp_source.zig:1:1: error: declaration shadows type 'u16'");
+ ".tmp_source.zig:1:1: error: declaration shadows type 'u16'",
+ );
- cases.add("implicitly increasing pointer alignment",
+ cases.add(
+ "implicitly increasing pointer alignment",
\\const Foo = packed struct {
\\ a: u8,
\\ b: u32,
@@ -2393,13 +3922,15 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ bar(&foo.b);
\\}
\\
- \\fn bar(x: &u32) void {
- \\ *x += 1;
+ \\fn bar(x: *u32) void {
+ \\ x.* += 1;
\\}
,
- ".tmp_source.zig:8:13: error: expected type '&u32', found '&align(1) u32'");
+ ".tmp_source.zig:8:13: error: expected type '*u32', found '*align(1) u32'",
+ );
- cases.add("implicitly increasing slice alignment",
+ cases.add(
+ "implicitly increasing slice alignment",
\\const Foo = packed struct {
\\ a: u8,
\\ b: u32,
@@ -2408,44 +3939,42 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\export fn entry() void {
\\ var foo = Foo { .a = 1, .b = 10 };
\\ foo.b += 1;
- \\ bar((&foo.b)[0..1]);
+ \\ bar((*[1]u32)(&foo.b)[0..]);
\\}
\\
\\fn bar(x: []u32) void {
\\ x[0] += 1;
\\}
,
- ".tmp_source.zig:9:17: error: expected type '[]u32', found '[]align(1) u32'");
+ ".tmp_source.zig:9:18: error: cast increases pointer alignment",
+ ".tmp_source.zig:9:23: note: '*align(1) u32' has alignment 1",
+ ".tmp_source.zig:9:18: note: '*[1]u32' has alignment 4",
+ );
- cases.add("increase pointer alignment in @ptrCast",
+ cases.add(
+ "increase pointer alignment in @ptrCast",
\\export fn entry() u32 {
\\ var bytes: [4]u8 = []u8{0x01, 0x02, 0x03, 0x04};
- \\ const ptr = @ptrCast(&u32, &bytes[0]);
- \\ return *ptr;
+ \\ const ptr = @ptrCast(*u32, &bytes[0]);
+ \\ return ptr.*;
\\}
,
".tmp_source.zig:3:17: error: cast increases pointer alignment",
- ".tmp_source.zig:3:38: note: '&u8' has alignment 1",
- ".tmp_source.zig:3:27: note: '&u32' has alignment 4");
+ ".tmp_source.zig:3:38: note: '*u8' has alignment 1",
+ ".tmp_source.zig:3:26: note: '*u32' has alignment 4",
+ );
- cases.add("increase pointer alignment in slice resize",
- \\export fn entry() u32 {
- \\ var bytes = []u8{0x01, 0x02, 0x03, 0x04};
- \\ return ([]u32)(bytes[0..])[0];
- \\}
- ,
- ".tmp_source.zig:3:19: error: cast increases pointer alignment",
- ".tmp_source.zig:3:19: note: '[]u8' has alignment 1",
- ".tmp_source.zig:3:19: note: '[]u32' has alignment 4");
-
- cases.add("@alignCast expects pointer or slice",
+ cases.add(
+ "@alignCast expects pointer or slice",
\\export fn entry() void {
\\ @alignCast(4, u32(3));
\\}
,
- ".tmp_source.zig:2:22: error: expected pointer or slice, found 'u32'");
+ ".tmp_source.zig:2:22: error: expected pointer or slice, found 'u32'",
+ );
- cases.add("passing an under-aligned function pointer",
+ cases.add(
+ "passing an under-aligned function pointer",
\\export fn entry() void {
\\ testImplicitlyDecreaseFnAlign(alignedSmall, 1234);
\\}
@@ -2454,26 +3983,32 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
\\fn alignedSmall() align(4) i32 { return 1234; }
,
- ".tmp_source.zig:2:35: error: expected type 'fn() align(8) i32', found 'fn() align(4) i32'");
+ ".tmp_source.zig:2:35: error: expected type 'fn() align(8) i32', found 'fn() align(4) i32'",
+ );
- cases.add("passing a not-aligned-enough pointer to cmpxchg",
+ cases.add(
+ "passing a not-aligned-enough pointer to cmpxchg",
\\const AtomicOrder = @import("builtin").AtomicOrder;
\\export fn entry() bool {
\\ var x: i32 align(1) = 1234;
- \\ while (!@cmpxchg(&x, 1234, 5678, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) {}
+ \\ while (!@cmpxchgWeak(i32, &x, 1234, 5678, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) {}
\\ return x == 5678;
\\}
,
- ".tmp_source.zig:4:23: error: expected pointer alignment of at least 4, found 1");
+ ".tmp_source.zig:4:32: error: expected type '*i32', found '*align(1) i32'",
+ );
- cases.add("wrong size to an array literal",
+ cases.add(
+ "wrong size to an array literal",
\\comptime {
\\ const array = [2]u8{1, 2, 3};
\\}
,
- ".tmp_source.zig:2:24: error: expected [2]u8 literal, found [3]u8 literal");
+ ".tmp_source.zig:2:24: error: expected [2]u8 literal, found [3]u8 literal",
+ );
- cases.add("@setEvalBranchQuota in non-root comptime execution context",
+ cases.add(
+ "@setEvalBranchQuota in non-root comptime execution context",
\\comptime {
\\ foo();
\\}
@@ -2483,72 +4018,82 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
,
".tmp_source.zig:5:5: error: @setEvalBranchQuota must be called from the top of the comptime stack",
".tmp_source.zig:2:8: note: called from here",
- ".tmp_source.zig:1:10: note: called from here");
+ ".tmp_source.zig:1:10: note: called from here",
+ );
- cases.add("wrong pointer implicitly casted to pointer to @OpaqueType()",
+ cases.add(
+ "wrong pointer implicitly casted to pointer to @OpaqueType()",
\\const Derp = @OpaqueType();
- \\extern fn bar(d: &Derp) void;
+ \\extern fn bar(d: *Derp) void;
\\export fn foo() void {
\\ var x = u8(1);
- \\ bar(@ptrCast(&c_void, &x));
+ \\ bar(@ptrCast(*c_void, &x));
\\}
,
- ".tmp_source.zig:5:9: error: expected type '&Derp', found '&c_void'");
+ ".tmp_source.zig:5:9: error: expected type '*Derp', found '*c_void'",
+ );
- cases.add("non-const variables of things that require const variables",
+ cases.add(
+ "non-const variables of things that require const variables",
\\const Opaque = @OpaqueType();
\\
- \\export fn entry(opaque: &Opaque) void {
+ \\export fn entry(opaque: *Opaque) void {
\\ var m2 = &2;
- \\ const y: u32 = *m2;
+ \\ const y: u32 = m2.*;
\\
\\ var a = undefined;
\\ var b = 1;
\\ var c = 1.0;
\\ var d = this;
\\ var e = null;
- \\ var f = *opaque;
+ \\ var f = opaque.*;
\\ var g = i32;
- \\ var h = @import("std");
+ \\ var h = @import("std",);
\\ var i = (Foo {}).bar;
\\
\\ var z: noreturn = return;
\\}
\\
\\const Foo = struct {
- \\ fn bar(self: &const Foo) void {}
+ \\ fn bar(self: *const Foo) void {}
\\};
,
- ".tmp_source.zig:4:4: error: variable of type '&const (integer literal)' must be const or comptime",
+ ".tmp_source.zig:4:4: error: variable of type '*comptime_int' must be const or comptime",
".tmp_source.zig:7:4: error: variable of type '(undefined)' must be const or comptime",
- ".tmp_source.zig:8:4: error: variable of type '(integer literal)' must be const or comptime",
- ".tmp_source.zig:9:4: error: variable of type '(float literal)' must be const or comptime",
+ ".tmp_source.zig:8:4: error: variable of type 'comptime_int' must be const or comptime",
+ ".tmp_source.zig:9:4: error: variable of type 'comptime_float' must be const or comptime",
".tmp_source.zig:10:4: error: variable of type '(block)' must be const or comptime",
".tmp_source.zig:11:4: error: variable of type '(null)' must be const or comptime",
- ".tmp_source.zig:12:4: error: variable of type 'Opaque' must be const or comptime",
+ ".tmp_source.zig:12:4: error: variable of type 'Opaque' not allowed",
".tmp_source.zig:13:4: error: variable of type 'type' must be const or comptime",
".tmp_source.zig:14:4: error: variable of type '(namespace)' must be const or comptime",
- ".tmp_source.zig:15:4: error: variable of type '(bound fn(&const Foo) void)' must be const or comptime",
- ".tmp_source.zig:17:4: error: unreachable code");
+ ".tmp_source.zig:15:4: error: variable of type '(bound fn(*const Foo) void)' must be const or comptime",
+ ".tmp_source.zig:17:4: error: unreachable code",
+ );
- cases.add("wrong types given to atomic order args in cmpxchg",
+ cases.add(
+ "wrong types given to atomic order args in cmpxchg",
\\export fn entry() void {
\\ var x: i32 = 1234;
- \\ while (!@cmpxchg(&x, 1234, 5678, u32(1234), u32(1234))) {}
+ \\ while (!@cmpxchgWeak(i32, &x, 1234, 5678, u32(1234), u32(1234))) {}
\\}
,
- ".tmp_source.zig:3:41: error: expected type 'AtomicOrder', found 'u32'");
+ ".tmp_source.zig:3:50: error: expected type 'AtomicOrder', found 'u32'",
+ );
- cases.add("wrong types given to @export",
+ cases.add(
+ "wrong types given to @export",
\\extern fn entry() void { }
\\comptime {
\\ @export("entry", entry, u32(1234));
\\}
,
- ".tmp_source.zig:3:32: error: expected type 'GlobalLinkage', found 'u32'");
+ ".tmp_source.zig:3:32: error: expected type 'GlobalLinkage', found 'u32'",
+ );
- cases.add("struct with invalid field",
- \\const std = @import("std");
+ cases.add(
+ "struct with invalid field",
+ \\const std = @import("std",);
\\const Allocator = std.mem.Allocator;
\\const ArrayList = std.ArrayList;
\\
@@ -2572,23 +4117,29 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ };
\\}
,
- ".tmp_source.zig:14:17: error: use of undeclared identifier 'HeaderValue'");
+ ".tmp_source.zig:14:17: error: use of undeclared identifier 'HeaderValue'",
+ );
- cases.add("@setAlignStack outside function",
+ cases.add(
+ "@setAlignStack outside function",
\\comptime {
\\ @setAlignStack(16);
\\}
,
- ".tmp_source.zig:2:5: error: @setAlignStack outside function");
+ ".tmp_source.zig:2:5: error: @setAlignStack outside function",
+ );
- cases.add("@setAlignStack in naked function",
+ cases.add(
+ "@setAlignStack in naked function",
\\export nakedcc fn entry() void {
\\ @setAlignStack(16);
\\}
,
- ".tmp_source.zig:2:5: error: @setAlignStack in naked function");
+ ".tmp_source.zig:2:5: error: @setAlignStack in naked function",
+ );
- cases.add("@setAlignStack in inline function",
+ cases.add(
+ "@setAlignStack in inline function",
\\export fn entry() void {
\\ foo();
\\}
@@ -2596,25 +4147,31 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ @setAlignStack(16);
\\}
,
- ".tmp_source.zig:5:5: error: @setAlignStack in inline function");
+ ".tmp_source.zig:5:5: error: @setAlignStack in inline function",
+ );
- cases.add("@setAlignStack set twice",
+ cases.add(
+ "@setAlignStack set twice",
\\export fn entry() void {
\\ @setAlignStack(16);
\\ @setAlignStack(16);
\\}
,
".tmp_source.zig:3:5: error: alignstack set twice",
- ".tmp_source.zig:2:5: note: first set here");
+ ".tmp_source.zig:2:5: note: first set here",
+ );
- cases.add("@setAlignStack too big",
+ cases.add(
+ "@setAlignStack too big",
\\export fn entry() void {
\\ @setAlignStack(511 + 1);
\\}
,
- ".tmp_source.zig:2:5: error: attempt to @setAlignStack(512); maximum is 256");
+ ".tmp_source.zig:2:5: error: attempt to @setAlignStack(512); maximum is 256",
+ );
- cases.add("storing runtime value in compile time variable then using it",
+ cases.add(
+ "storing runtime value in compile time variable then using it",
\\const Mode = @import("builtin").Mode;
\\
\\fn Free(comptime filename: []const u8) TestCase {
@@ -2657,134 +4214,164 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ }
\\}
,
- ".tmp_source.zig:37:16: error: cannot store runtime value in compile time variable");
+ ".tmp_source.zig:37:16: error: cannot store runtime value in compile time variable",
+ );
- cases.add("field access of opaque type",
+ cases.add(
+ "field access of opaque type",
\\const MyType = @OpaqueType();
\\
\\export fn entry() bool {
\\ var x: i32 = 1;
- \\ return bar(@ptrCast(&MyType, &x));
+ \\ return bar(@ptrCast(*MyType, &x));
\\}
\\
- \\fn bar(x: &MyType) bool {
+ \\fn bar(x: *MyType) bool {
\\ return x.blah;
\\}
,
- ".tmp_source.zig:9:13: error: type '&MyType' does not support field access");
+ ".tmp_source.zig:9:13: error: type '*MyType' does not support field access",
+ );
- cases.add("carriage return special case",
+ cases.add(
+ "carriage return special case",
"fn test() bool {\r\n" ++
- " true\r\n" ++
- "}\r\n"
- ,
- ".tmp_source.zig:1:17: error: invalid carriage return, only '\\n' line endings are supported");
+ " true\r\n" ++
+ "}\r\n",
+ ".tmp_source.zig:1:17: error: invalid carriage return, only '\\n' line endings are supported",
+ );
- cases.add("non-printable invalid character",
- "\xff\xfe" ++
- \\fn test() bool {\r
- \\ true\r
- \\}
- ,
- ".tmp_source.zig:1:1: error: invalid character: '\\xff'");
+ cases.add(
+ "non-printable invalid character",
+ "\xff\xfe" ++
+ \\fn test() bool {\r
+ \\ true\r
+ \\}
+ ,
+ ".tmp_source.zig:1:1: error: invalid character: '\\xff'",
+ );
- cases.add("non-printable invalid character with escape alternative",
+ cases.add(
+ "non-printable invalid character with escape alternative",
"fn test() bool {\n" ++
- "\ttrue\n" ++
- "}\n"
- ,
- ".tmp_source.zig:2:1: error: invalid character: '\\t'");
+ "\ttrue\n" ++
+ "}\n",
+ ".tmp_source.zig:2:1: error: invalid character: '\\t'",
+ );
- cases.add("@ArgType given non function parameter",
+ cases.add(
+ "@ArgType given non function parameter",
\\comptime {
\\ _ = @ArgType(i32, 3);
\\}
,
- ".tmp_source.zig:2:18: error: expected function, found 'i32'");
+ ".tmp_source.zig:2:18: error: expected function, found 'i32'",
+ );
- cases.add("@ArgType arg index out of bounds",
+ cases.add(
+ "@ArgType arg index out of bounds",
\\comptime {
\\ _ = @ArgType(@typeOf(add), 2);
\\}
\\fn add(a: i32, b: i32) i32 { return a + b; }
,
- ".tmp_source.zig:2:32: error: arg index 2 out of bounds; 'fn(i32, i32) i32' has 2 arguments");
+ ".tmp_source.zig:2:32: error: arg index 2 out of bounds; 'fn(i32, i32) i32' has 2 arguments",
+ );
- cases.add("@memberType on unsupported type",
+ cases.add(
+ "@memberType on unsupported type",
\\comptime {
\\ _ = @memberType(i32, 0);
\\}
,
- ".tmp_source.zig:2:21: error: type 'i32' does not support @memberType");
+ ".tmp_source.zig:2:21: error: type 'i32' does not support @memberType",
+ );
- cases.add("@memberType on enum",
+ cases.add(
+ "@memberType on enum",
\\comptime {
\\ _ = @memberType(Foo, 0);
\\}
\\const Foo = enum {A,};
,
- ".tmp_source.zig:2:21: error: type 'Foo' does not support @memberType");
+ ".tmp_source.zig:2:21: error: type 'Foo' does not support @memberType",
+ );
- cases.add("@memberType struct out of bounds",
+ cases.add(
+ "@memberType struct out of bounds",
\\comptime {
\\ _ = @memberType(Foo, 0);
\\}
\\const Foo = struct {};
,
- ".tmp_source.zig:2:26: error: member index 0 out of bounds; 'Foo' has 0 members");
+ ".tmp_source.zig:2:26: error: member index 0 out of bounds; 'Foo' has 0 members",
+ );
- cases.add("@memberType union out of bounds",
+ cases.add(
+ "@memberType union out of bounds",
\\comptime {
\\ _ = @memberType(Foo, 1);
\\}
\\const Foo = union {A: void,};
,
- ".tmp_source.zig:2:26: error: member index 1 out of bounds; 'Foo' has 1 members");
+ ".tmp_source.zig:2:26: error: member index 1 out of bounds; 'Foo' has 1 members",
+ );
- cases.add("@memberName on unsupported type",
+ cases.add(
+ "@memberName on unsupported type",
\\comptime {
\\ _ = @memberName(i32, 0);
\\}
,
- ".tmp_source.zig:2:21: error: type 'i32' does not support @memberName");
+ ".tmp_source.zig:2:21: error: type 'i32' does not support @memberName",
+ );
- cases.add("@memberName struct out of bounds",
+ cases.add(
+ "@memberName struct out of bounds",
\\comptime {
\\ _ = @memberName(Foo, 0);
\\}
\\const Foo = struct {};
,
- ".tmp_source.zig:2:26: error: member index 0 out of bounds; 'Foo' has 0 members");
+ ".tmp_source.zig:2:26: error: member index 0 out of bounds; 'Foo' has 0 members",
+ );
- cases.add("@memberName enum out of bounds",
+ cases.add(
+ "@memberName enum out of bounds",
\\comptime {
\\ _ = @memberName(Foo, 1);
\\}
\\const Foo = enum {A,};
,
- ".tmp_source.zig:2:26: error: member index 1 out of bounds; 'Foo' has 1 members");
+ ".tmp_source.zig:2:26: error: member index 1 out of bounds; 'Foo' has 1 members",
+ );
- cases.add("@memberName union out of bounds",
+ cases.add(
+ "@memberName union out of bounds",
\\comptime {
\\ _ = @memberName(Foo, 1);
\\}
\\const Foo = union {A:i32,};
,
- ".tmp_source.zig:2:26: error: member index 1 out of bounds; 'Foo' has 1 members");
+ ".tmp_source.zig:2:26: error: member index 1 out of bounds; 'Foo' has 1 members",
+ );
- cases.add("calling var args extern function, passing array instead of pointer",
+ cases.add(
+ "calling var args extern function, passing array instead of pointer",
\\export fn entry() void {
- \\ foo("hello");
+ \\ foo("hello",);
\\}
- \\pub extern fn foo(format: &const u8, ...) void;
+ \\pub extern fn foo(format: *const u8, ...) void;
,
- ".tmp_source.zig:2:9: error: expected type '&const u8', found '[5]u8'");
+ ".tmp_source.zig:2:9: error: expected type '*const u8', found '[5]u8'",
+ );
- cases.add("constant inside comptime function has compile error",
+ cases.add(
+ "constant inside comptime function has compile error",
\\const ContextAllocator = MemoryPool(usize);
\\
\\pub fn MemoryPool(comptime T: type) type {
- \\ const free_list_t = @compileError("aoeu");
+ \\ const free_list_t = @compileError("aoeu",);
\\
\\ return struct {
\\ free_list: free_list_t,
@@ -2797,9 +4384,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
,
".tmp_source.zig:4:25: error: aoeu",
".tmp_source.zig:1:36: note: called from here",
- ".tmp_source.zig:12:20: note: referenced here");
+ ".tmp_source.zig:12:20: note: referenced here",
+ );
- cases.add("specify enum tag type that is too small",
+ cases.add(
+ "specify enum tag type that is too small",
\\const Small = enum (u2) {
\\ One,
\\ Two,
@@ -2812,9 +4401,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ var x = Small.One;
\\}
,
- ".tmp_source.zig:1:20: error: 'u2' too small to hold all bits; must be at least 'u3'");
+ ".tmp_source.zig:1:20: error: 'u2' too small to hold all bits; must be at least 'u3'",
+ );
- cases.add("specify non-integer enum tag type",
+ cases.add(
+ "specify non-integer enum tag type",
\\const Small = enum (f32) {
\\ One,
\\ Two,
@@ -2825,9 +4416,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ var x = Small.One;
\\}
,
- ".tmp_source.zig:1:20: error: expected integer, found 'f32'");
+ ".tmp_source.zig:1:20: error: expected integer, found 'f32'",
+ );
- cases.add("implicitly casting enum to tag type",
+ cases.add(
+ "implicitly casting enum to tag type",
\\const Small = enum(u2) {
\\ One,
\\ Two,
@@ -2839,23 +4432,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ var x: u2 = Small.Two;
\\}
,
- ".tmp_source.zig:9:22: error: expected type 'u2', found 'Small'");
+ ".tmp_source.zig:9:22: error: expected type 'u2', found 'Small'",
+ );
- cases.add("explicitly casting enum to non tag type",
- \\const Small = enum(u2) {
- \\ One,
- \\ Two,
- \\ Three,
- \\ Four,
- \\};
- \\
- \\export fn entry() void {
- \\ var x = u3(Small.Two);
- \\}
- ,
- ".tmp_source.zig:9:15: error: enum to integer cast to 'u3' instead of its tag type, 'u2'");
-
- cases.add("explicitly casting non tag type to enum",
+ cases.add(
+ "explicitly casting non tag type to enum",
\\const Small = enum(u2) {
\\ One,
\\ Two,
@@ -2865,12 +4446,14 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\export fn entry() void {
\\ var y = u3(3);
- \\ var x = Small(y);
+ \\ var x = @intToEnum(Small, y);
\\}
,
- ".tmp_source.zig:10:18: error: integer to enum cast from 'u3' instead of its tag type, 'u2'");
+ ".tmp_source.zig:10:31: error: expected type 'u2', found 'u3'",
+ );
- cases.add("non unsigned integer enum tag type",
+ cases.add(
+ "non unsigned integer enum tag type",
\\const Small = enum(i2) {
\\ One,
\\ Two,
@@ -2882,9 +4465,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ var y = Small.Two;
\\}
,
- ".tmp_source.zig:1:19: error: expected unsigned integer, found 'i2'");
+ ".tmp_source.zig:1:19: error: expected unsigned integer, found 'i2'",
+ );
- cases.add("struct fields with value assignments",
+ cases.add(
+ "struct fields with value assignments",
\\const MultipleChoice = struct {
\\ A: i32 = 20,
\\};
@@ -2892,9 +4477,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ var x: MultipleChoice = undefined;
\\}
,
- ".tmp_source.zig:2:14: error: enums, not structs, support field assignment");
+ ".tmp_source.zig:2:14: error: enums, not structs, support field assignment",
+ );
- cases.add("union fields with value assignments",
+ cases.add(
+ "union fields with value assignments",
\\const MultipleChoice = union {
\\ A: i32 = 20,
\\};
@@ -2903,25 +4490,31 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:2:14: error: non-enum union field assignment",
- ".tmp_source.zig:1:24: note: consider 'union(enum)' here");
+ ".tmp_source.zig:1:24: note: consider 'union(enum)' here",
+ );
- cases.add("enum with 0 fields",
+ cases.add(
+ "enum with 0 fields",
\\const Foo = enum {};
\\export fn entry() usize {
\\ return @sizeOf(Foo);
\\}
,
- ".tmp_source.zig:1:13: error: enums must have 1 or more fields");
+ ".tmp_source.zig:1:13: error: enums must have 1 or more fields",
+ );
- cases.add("union with 0 fields",
+ cases.add(
+ "union with 0 fields",
\\const Foo = union {};
\\export fn entry() usize {
\\ return @sizeOf(Foo);
\\}
,
- ".tmp_source.zig:1:13: error: unions must have 1 or more fields");
+ ".tmp_source.zig:1:13: error: unions must have 1 or more fields",
+ );
- cases.add("enum value already taken",
+ cases.add(
+ "enum value already taken",
\\const MultipleChoice = enum(u32) {
\\ A = 20,
\\ B = 40,
@@ -2934,9 +4527,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:6:9: error: enum tag value 60 already taken",
- ".tmp_source.zig:4:9: note: other occurrence here");
+ ".tmp_source.zig:4:9: note: other occurrence here",
+ );
- cases.add("union with specified enum omits field",
+ cases.add(
+ "union with specified enum omits field",
\\const Letter = enum {
\\ A,
\\ B,
@@ -2951,9 +4546,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:6:17: error: enum field missing: 'C'",
- ".tmp_source.zig:4:5: note: declared here");
+ ".tmp_source.zig:4:5: note: declared here",
+ );
- cases.add("@TagType when union has no attached enum",
+ cases.add(
+ "@TagType when union has no attached enum",
\\const Foo = union {
\\ A: i32,
\\};
@@ -2962,9 +4559,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:5:24: error: union 'Foo' has no tag",
- ".tmp_source.zig:1:13: note: consider 'union(enum)' here");
+ ".tmp_source.zig:1:13: note: consider 'union(enum)' here",
+ );
- cases.add("non-integer tag type to automatic union enum",
+ cases.add(
+ "non-integer tag type to automatic union enum",
\\const Foo = union(enum(f32)) {
\\ A: i32,
\\};
@@ -2972,9 +4571,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ const x = @TagType(Foo);
\\}
,
- ".tmp_source.zig:1:23: error: expected integer tag type, found 'f32'");
+ ".tmp_source.zig:1:23: error: expected integer tag type, found 'f32'",
+ );
- cases.add("non-enum tag type passed to union",
+ cases.add(
+ "non-enum tag type passed to union",
\\const Foo = union(u32) {
\\ A: i32,
\\};
@@ -2982,9 +4583,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ const x = @TagType(Foo);
\\}
,
- ".tmp_source.zig:1:18: error: expected enum tag type, found 'u32'");
+ ".tmp_source.zig:1:18: error: expected enum tag type, found 'u32'",
+ );
- cases.add("union auto-enum value already taken",
+ cases.add(
+ "union auto-enum value already taken",
\\const MultipleChoice = union(enum(u32)) {
\\ A = 20,
\\ B = 40,
@@ -2997,9 +4600,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:6:9: error: enum tag value 60 already taken",
- ".tmp_source.zig:4:9: note: other occurrence here");
+ ".tmp_source.zig:4:9: note: other occurrence here",
+ );
- cases.add("union enum field does not match enum",
+ cases.add(
+ "union enum field does not match enum",
\\const Letter = enum {
\\ A,
\\ B,
@@ -3016,9 +4621,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:10:5: error: enum field not found: 'D'",
- ".tmp_source.zig:1:16: note: enum declared here");
+ ".tmp_source.zig:1:16: note: enum declared here",
+ );
- cases.add("field type supplied in an enum",
+ cases.add(
+ "field type supplied in an enum",
\\const Letter = enum {
\\ A: void,
\\ B,
@@ -3029,9 +4636,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:2:8: error: structs and unions, not enums, support field types",
- ".tmp_source.zig:1:16: note: consider 'union(enum)' here");
+ ".tmp_source.zig:1:16: note: consider 'union(enum)' here",
+ );
- cases.add("struct field missing type",
+ cases.add(
+ "struct field missing type",
\\const Letter = struct {
\\ A,
\\};
@@ -3039,9 +4648,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ var a = Letter { .A = {} };
\\}
,
- ".tmp_source.zig:2:5: error: struct field missing type");
+ ".tmp_source.zig:2:5: error: struct field missing type",
+ );
- cases.add("extern union field missing type",
+ cases.add(
+ "extern union field missing type",
\\const Letter = extern union {
\\ A,
\\};
@@ -3049,9 +4660,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ var a = Letter { .A = {} };
\\}
,
- ".tmp_source.zig:2:5: error: union field missing type");
+ ".tmp_source.zig:2:5: error: union field missing type",
+ );
- cases.add("extern union given enum tag type",
+ cases.add(
+ "extern union given enum tag type",
\\const Letter = enum {
\\ A,
\\ B,
@@ -3066,9 +4679,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ var a = Payload { .A = 1234 };
\\}
,
- ".tmp_source.zig:6:29: error: extern union does not support enum tag type");
+ ".tmp_source.zig:6:29: error: extern union does not support enum tag type",
+ );
- cases.add("packed union given enum tag type",
+ cases.add(
+ "packed union given enum tag type",
\\const Letter = enum {
\\ A,
\\ B,
@@ -3083,9 +4698,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ var a = Payload { .A = 1234 };
\\}
,
- ".tmp_source.zig:6:29: error: packed union does not support enum tag type");
+ ".tmp_source.zig:6:29: error: packed union does not support enum tag type",
+ );
- cases.add("switch on union with no attached enum",
+ cases.add(
+ "switch on union with no attached enum",
\\const Payload = union {
\\ A: i32,
\\ B: f64,
@@ -3095,29 +4712,33 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ const a = Payload { .A = 1234 };
\\ foo(a);
\\}
- \\fn foo(a: &const Payload) void {
- \\ switch (*a) {
+ \\fn foo(a: *const Payload) void {
+ \\ switch (a.*) {
\\ Payload.A => {},
\\ else => unreachable,
\\ }
\\}
,
- ".tmp_source.zig:11:13: error: switch on union which has no attached enum",
- ".tmp_source.zig:1:17: note: consider 'union(enum)' here");
+ ".tmp_source.zig:11:14: error: switch on union which has no attached enum",
+ ".tmp_source.zig:1:17: note: consider 'union(enum)' here",
+ );
- cases.add("enum in field count range but not matching tag",
+ cases.add(
+ "enum in field count range but not matching tag",
\\const Foo = enum(u32) {
\\ A = 10,
\\ B = 11,
\\};
\\export fn entry() void {
- \\ var x = Foo(0);
+ \\ var x = @intToEnum(Foo, 0);
\\}
,
- ".tmp_source.zig:6:16: error: enum 'Foo' has no tag matching integer value 0",
- ".tmp_source.zig:1:13: note: 'Foo' declared here");
+ ".tmp_source.zig:6:13: error: enum 'Foo' has no tag matching integer value 0",
+ ".tmp_source.zig:1:13: note: 'Foo' declared here",
+ );
- cases.add("comptime cast enum to union but field has payload",
+ cases.add(
+ "comptime cast enum to union but field has payload",
\\const Letter = enum { A, B, C };
\\const Value = union(Letter) {
\\ A: i32,
@@ -3129,9 +4750,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:8:26: error: cast to union 'Value' must initialize 'i32' field 'A'",
- ".tmp_source.zig:3:5: note: field 'A' declared here");
+ ".tmp_source.zig:3:5: note: field 'A' declared here",
+ );
- cases.add("runtime cast to union which has non-void fields",
+ cases.add(
+ "runtime cast to union which has non-void fields",
\\const Letter = enum { A, B, C };
\\const Value = union(Letter) {
\\ A: i32,
@@ -3146,27 +4769,52 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
,
".tmp_source.zig:11:20: error: runtime cast to union 'Value' which has non-void fields",
- ".tmp_source.zig:3:5: note: field 'A' has type 'i32'");
+ ".tmp_source.zig:3:5: note: field 'A' has type 'i32'",
+ );
- cases.add("self-referencing function pointer field",
- \\const S = struct {
- \\ f: fn(_: S) void,
- \\};
- \\fn f(_: S) void {
- \\}
- \\export fn entry() void {
- \\ var _ = S { .f = f };
- \\}
- ,
- ".tmp_source.zig:4:9: error: type 'S' is not copyable; cannot pass by value");
-
- cases.add("taking offset of void field in struct",
+ cases.add(
+ "taking offset of void field in struct",
\\const Empty = struct {
\\ val: void,
\\};
\\export fn foo() void {
- \\ const fieldOffset = @offsetOf(Empty, "val");
+ \\ const fieldOffset = @offsetOf(Empty, "val",);
\\}
,
- ".tmp_source.zig:5:42: error: zero-bit field 'val' in struct 'Empty' has no offset");
+ ".tmp_source.zig:5:42: error: zero-bit field 'val' in struct 'Empty' has no offset",
+ );
+
+ cases.add(
+ "invalid union field access in comptime",
+ \\const Foo = union {
+ \\ Bar: u8,
+ \\ Baz: void,
+ \\};
+ \\comptime {
+ \\ var foo = Foo {.Baz = {}};
+ \\ const bar_val = foo.Bar;
+ \\}
+ ,
+ ".tmp_source.zig:7:24: error: accessing union field 'Bar' while field 'Baz' is set",
+ );
+
+ cases.add(
+ "getting return type of generic function",
+ \\fn generic(a: var) void {}
+ \\comptime {
+ \\ _ = @typeOf(generic).ReturnType;
+ \\}
+ ,
+ ".tmp_source.zig:3:25: error: ReturnType has not been resolved because 'fn(var)var' is generic",
+ );
+
+ cases.add(
+ "getting @ArgType of generic function",
+ \\fn generic(a: var) void {}
+ \\comptime {
+ \\ _ = @ArgType(@typeOf(generic), 0);
+ \\}
+ ,
+ ".tmp_source.zig:3:36: error: @ArgType could not resolve the type of arg 0 because 'fn(var)var' is generic",
+ );
}
diff --git a/test/gen_h.zig b/test/gen_h.zig
index 30d168cf2c..b3aaa263d6 100644
--- a/test/gen_h.zig
+++ b/test/gen_h.zig
@@ -1,6 +1,6 @@
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.GenHContext) void {
+pub fn addCases(cases: *tests.GenHContext) void {
cases.add("declare enum",
\\const Foo = extern enum { A, B, C };
\\export fn entry(foo: Foo) void { }
@@ -54,7 +54,7 @@ pub fn addCases(cases: &tests.GenHContext) void {
cases.add("declare opaque type",
\\export const Foo = @OpaqueType();
\\
- \\export fn entry(foo: ?&Foo) void { }
+ \\export fn entry(foo: ?*Foo) void { }
,
\\struct Foo;
\\
@@ -64,7 +64,7 @@ pub fn addCases(cases: &tests.GenHContext) void {
cases.add("array field-type",
\\const Foo = extern struct {
\\ A: [2]i32,
- \\ B: [4]&u32,
+ \\ B: [4]*u32,
\\};
\\export fn entry(foo: Foo, bar: [3]u8) void { }
,
@@ -77,4 +77,50 @@ pub fn addCases(cases: &tests.GenHContext) void {
\\
);
+ cases.add("ptr to zig struct",
+ \\const S = struct {
+ \\ a: u8,
+ \\};
+ \\
+ \\export fn a(s: *S) u8 {
+ \\ return s.a;
+ \\}
+
+ ,
+ \\struct S;
+ \\TEST_EXPORT uint8_t a(struct S * s);
+ \\
+ );
+
+ cases.add("ptr to zig union",
+ \\const U = union(enum) {
+ \\ A: u8,
+ \\ B: u16,
+ \\};
+ \\
+ \\export fn a(s: *U) u8 {
+ \\ return s.A;
+ \\}
+
+ ,
+ \\union U;
+ \\TEST_EXPORT uint8_t a(union U * s);
+ \\
+ );
+
+ cases.add("ptr to zig enum",
+ \\const E = enum(u8) {
+ \\ A,
+ \\ B,
+ \\};
+ \\
+ \\export fn a(s: *E) u8 {
+ \\ return @enumToInt(s.*);
+ \\}
+
+ ,
+ \\enum E;
+ \\TEST_EXPORT uint8_t a(enum E * s);
+ \\
+ );
}
diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig
index 1fea6347ab..3d58dfe748 100644
--- a/test/runtime_safety.zig
+++ b/test/runtime_safety.zig
@@ -1,8 +1,65 @@
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.CompareOutputContext) void {
+pub fn addCases(cases: *tests.CompareOutputContext) void {
+ cases.addRuntimeSafety("@intToEnum - no matching tag value",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\const Foo = enum {
+ \\ A,
+ \\ B,
+ \\ C,
+ \\};
+ \\pub fn main() void {
+ \\ baz(bar(3));
+ \\}
+ \\fn bar(a: u2) Foo {
+ \\ return @intToEnum(Foo, a);
+ \\}
+ \\fn baz(a: Foo) void {}
+ );
+
+ cases.addRuntimeSafety("@floatToInt cannot fit - negative to unsigned",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\pub fn main() void {
+ \\ baz(bar(-1.1));
+ \\}
+ \\fn bar(a: f32) u8 {
+ \\ return @floatToInt(u8, a);
+ \\}
+ \\fn baz(a: u8) void { }
+ );
+
+ cases.addRuntimeSafety("@floatToInt cannot fit - negative out of range",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\pub fn main() void {
+ \\ baz(bar(-129.1));
+ \\}
+ \\fn bar(a: f32) i8 {
+ \\ return @floatToInt(i8, a);
+ \\}
+ \\fn baz(a: i8) void { }
+ );
+
+ cases.addRuntimeSafety("@floatToInt cannot fit - positive out of range",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\pub fn main() void {
+ \\ baz(bar(256.2));
+ \\}
+ \\fn bar(a: f32) u8 {
+ \\ return @floatToInt(u8, a);
+ \\}
+ \\fn baz(a: u8) void { }
+ );
+
cases.addRuntimeSafety("calling panic",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() void {
@@ -11,7 +68,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("out of bounds slice access",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() void {
@@ -25,7 +82,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("integer addition overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -38,7 +95,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("integer subtraction overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -51,7 +108,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("integer multiplication overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -64,7 +121,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("integer negation overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -77,7 +134,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("signed integer division overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -90,7 +147,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("signed shift left overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -103,7 +160,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("unsigned shift left overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -116,7 +173,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("signed shift right overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -129,7 +186,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("unsigned shift right overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -142,7 +199,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("integer division by zero",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() void {
@@ -154,7 +211,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("exact division failure",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -167,7 +224,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("cast []u8 to bigger slice of wrong size",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -175,12 +232,12 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\ if (x.len == 0) return error.Whatever;
\\}
\\fn widenSlice(slice: []align(1) const u8) []align(1) const i32 {
- \\ return ([]align(1) const i32)(slice);
+ \\ return @bytesToSlice(i32, slice);
\\}
);
cases.addRuntimeSafety("value does not fit in shortening cast",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -188,12 +245,12 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\ if (x == 0) return error.Whatever;
\\}
\\fn shorten_cast(x: i32) i8 {
- \\ return i8(x);
+ \\ return @intCast(i8, x);
\\}
);
cases.addRuntimeSafety("signed integer not fitting in cast to unsigned integer",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -201,12 +258,12 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\ if (x == 0) return error.Whatever;
\\}
\\fn unsigned_cast(x: i32) u32 {
- \\ return u32(x);
+ \\ return @intCast(u32, x);
\\}
);
cases.addRuntimeSafety("unwrap error",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ if (@import("std").mem.eql(u8, message, "attempt to unwrap error: Whatever")) {
\\ @import("std").os.exit(126); // good
\\ }
@@ -221,19 +278,19 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("cast integer to global error and no code matches",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() void {
\\ _ = bar(9999);
\\}
- \\fn bar(x: u32) error {
- \\ return error(x);
+ \\fn bar(x: u16) error {
+ \\ return @intToError(x);
\\}
);
- cases.addRuntimeSafety("cast integer to non-global error set and no match",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ cases.addRuntimeSafety("@errSetCast error not present in destination",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\const Set1 = error{A, B};
@@ -242,28 +299,28 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\ _ = foo(Set1.B);
\\}
\\fn foo(set1: Set1) Set2 {
- \\ return Set2(set1);
+ \\ return @errSetCast(Set2, set1);
\\}
);
cases.addRuntimeSafety("@alignCast misaligned",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
\\ var array align(4) = []u32{0x11111111, 0x11111111};
- \\ const bytes = ([]u8)(array[0..]);
+ \\ const bytes = @sliceToBytes(array[0..]);
\\ if (foo(bytes) != 0x11111111) return error.Wrong;
\\}
\\fn foo(bytes: []u8) u32 {
\\ const slice4 = bytes[1..5];
- \\ const int_slice = ([]u32)(@alignCast(4, slice4));
+ \\ const int_slice = @bytesToSlice(u32, @alignCast(4, slice4));
\\ return int_slice[0];
\\}
);
cases.addRuntimeSafety("bad union field access",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\
@@ -277,7 +334,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\ bar(&f);
\\}
\\
- \\fn bar(f: &Foo) void {
+ \\fn bar(f: *Foo) void {
\\ f.float = 12.34;
\\}
);
@@ -287,7 +344,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
cases.addRuntimeSafety("error return trace across suspend points",
\\const std = @import("std");
\\
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ std.os.exit(126);
\\}
\\
diff --git a/test/stage2/compare_output.zig b/test/stage2/compare_output.zig
new file mode 100644
index 0000000000..fdc3d49145
--- /dev/null
+++ b/test/stage2/compare_output.zig
@@ -0,0 +1,25 @@
+const std = @import("std");
+const TestContext = @import("../../src-self-hosted/test.zig").TestContext;
+
+pub fn addCases(ctx: *TestContext) !void {
+ // hello world
+ try ctx.testCompareOutputLibC(
+ \\extern fn puts([*]const u8) void;
+ \\export fn main() c_int {
+ \\ puts(c"Hello, world!");
+ \\ return 0;
+ \\}
+ , "Hello, world!" ++ std.cstr.line_sep);
+
+ // function calling another function
+ try ctx.testCompareOutputLibC(
+ \\extern fn puts(s: [*]const u8) void;
+ \\export fn main() c_int {
+ \\ return foo(c"OK");
+ \\}
+ \\fn foo(s: [*]const u8) c_int {
+ \\ puts(s);
+ \\ return 0;
+ \\}
+ , "OK" ++ std.cstr.line_sep);
+}
diff --git a/test/stage2/compile_errors.zig b/test/stage2/compile_errors.zig
new file mode 100644
index 0000000000..2cecd78653
--- /dev/null
+++ b/test/stage2/compile_errors.zig
@@ -0,0 +1,30 @@
+const TestContext = @import("../../src-self-hosted/test.zig").TestContext;
+
+pub fn addCases(ctx: *TestContext) !void {
+ try ctx.testCompileError(
+ \\export fn entry() void {}
+ \\export fn entry() void {}
+ , "1.zig", 2, 8, "exported symbol collision: 'entry'");
+
+ try ctx.testCompileError(
+ \\fn() void {}
+ , "1.zig", 1, 1, "missing function name");
+
+ try ctx.testCompileError(
+ \\comptime {
+ \\ return;
+ \\}
+ , "1.zig", 2, 5, "return expression outside function definition");
+
+ try ctx.testCompileError(
+ \\export fn entry() void {
+ \\ defer return;
+ \\}
+ , "1.zig", 2, 11, "cannot return from defer expression");
+
+ try ctx.testCompileError(
+ \\export fn entry() c_int {
+ \\ return 36893488147419103232;
+ \\}
+ , "1.zig", 2, 12, "integer value '36893488147419103232' cannot be stored in type 'c_int'");
+}
diff --git a/test/standalone/brace_expansion/build.zig b/test/standalone/brace_expansion/build.zig
index 7752f599df..64f3c08583 100644
--- a/test/standalone/brace_expansion/build.zig
+++ b/test/standalone/brace_expansion/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const main = b.addTest("main.zig");
main.setBuildMode(b.standardReleaseOptions());
diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion/main.zig
index d4d86ae12c..ccb4f6dd45 100644
--- a/test/standalone/brace_expansion/main.zig
+++ b/test/standalone/brace_expansion/main.zig
@@ -14,9 +14,9 @@ const Token = union(enum) {
Eof,
};
-var global_allocator: &mem.Allocator = undefined;
+var global_allocator: *mem.Allocator = undefined;
-fn tokenize(input:[] const u8) !ArrayList(Token) {
+fn tokenize(input: []const u8) !ArrayList(Token) {
const State = enum {
Start,
Word,
@@ -41,7 +41,7 @@ fn tokenize(input:[] const u8) !ArrayList(Token) {
State.Word => switch (b) {
'a'...'z', 'A'...'Z' => {},
'{', '}', ',' => {
- try token_list.append(Token { .Word = input[tok_begin..i] });
+ try token_list.append(Token{ .Word = input[tok_begin..i] });
switch (b) {
'{' => try token_list.append(Token.OpenBrace),
'}' => try token_list.append(Token.CloseBrace),
@@ -56,7 +56,7 @@ fn tokenize(input:[] const u8) !ArrayList(Token) {
}
switch (state) {
State.Start => {},
- State.Word => try token_list.append(Token {.Word = input[tok_begin..] }),
+ State.Word => try token_list.append(Token{ .Word = input[tok_begin..] }),
}
try token_list.append(Token.Eof);
return token_list;
@@ -68,24 +68,24 @@ const Node = union(enum) {
Combine: []Node,
};
-const ParseError = error {
+const ParseError = error{
InvalidInput,
OutOfMemory,
};
-fn parse(tokens: &const ArrayList(Token), token_index: &usize) ParseError!Node {
- const first_token = tokens.items[*token_index];
- *token_index += 1;
+fn parse(tokens: *const ArrayList(Token), token_index: *usize) ParseError!Node {
+ const first_token = tokens.items[token_index.*];
+ token_index.* += 1;
const result_node = switch (first_token) {
- Token.Word => |word| Node { .Scalar = word },
+ Token.Word => |word| Node{ .Scalar = word },
Token.OpenBrace => blk: {
var list = ArrayList(Node).init(global_allocator);
while (true) {
try list.append(try parse(tokens, token_index));
- const token = tokens.items[*token_index];
- *token_index += 1;
+ const token = tokens.items[token_index.*];
+ token_index.* += 1;
switch (token) {
Token.CloseBrace => break,
@@ -93,23 +93,23 @@ fn parse(tokens: &const ArrayList(Token), token_index: &usize) ParseError!Node {
else => return error.InvalidInput,
}
}
- break :blk Node { .List = list };
+ break :blk Node{ .List = list };
},
else => return error.InvalidInput,
};
- switch (tokens.items[*token_index]) {
+ switch (tokens.items[token_index.*]) {
Token.Word, Token.OpenBrace => {
const pair = try global_allocator.alloc(Node, 2);
pair[0] = result_node;
pair[1] = try parse(tokens, token_index);
- return Node { .Combine = pair };
+ return Node{ .Combine = pair };
},
else => return result_node,
}
}
-fn expandString(input: []const u8, output: &Buffer) !void {
+fn expandString(input: []const u8, output: *Buffer) !void {
const tokens = try tokenize(input);
if (tokens.len == 1) {
return output.resize(0);
@@ -137,13 +137,11 @@ fn expandString(input: []const u8, output: &Buffer) !void {
}
}
-const ExpandNodeError = error {
- OutOfMemory,
-};
+const ExpandNodeError = error{OutOfMemory};
-fn expandNode(node: &const Node, output: &ArrayList(Buffer)) ExpandNodeError!void {
+fn expandNode(node: *const Node, output: *ArrayList(Buffer)) ExpandNodeError!void {
assert(output.len == 0);
- switch (*node) {
+ switch (node.*) {
Node.Scalar => |scalar| {
try output.append(try Buffer.init(global_allocator, scalar));
},
diff --git a/test/standalone/issue_339/build.zig b/test/standalone/issue_339/build.zig
index f3ab327006..733b3729c1 100644
--- a/test/standalone/issue_339/build.zig
+++ b/test/standalone/issue_339/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const obj = b.addObject("test", "test.zig");
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/issue_339/test.zig b/test/standalone/issue_339/test.zig
index f65b9f734e..f4068dcfac 100644
--- a/test/standalone/issue_339/test.zig
+++ b/test/standalone/issue_339/test.zig
@@ -1,5 +1,8 @@
const StackTrace = @import("builtin").StackTrace;
-pub fn panic(msg: []const u8, stack_trace: ?&StackTrace) noreturn { @breakpoint(); while (true) {} }
+pub fn panic(msg: []const u8, stack_trace: ?*StackTrace) noreturn {
+ @breakpoint();
+ while (true) {}
+}
fn bar() error!void {}
diff --git a/test/standalone/issue_794/build.zig b/test/standalone/issue_794/build.zig
index 4f5dcd7ff4..06c37a83a3 100644
--- a/test/standalone/issue_794/build.zig
+++ b/test/standalone/issue_794/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const test_artifact = b.addTest("main.zig");
test_artifact.addIncludeDir("a_directory");
diff --git a/test/standalone/load_dynamic_library/add.zig b/test/standalone/load_dynamic_library/add.zig
new file mode 100644
index 0000000000..a04ec1544d
--- /dev/null
+++ b/test/standalone/load_dynamic_library/add.zig
@@ -0,0 +1,3 @@
+export fn add(a: i32, b: i32) i32 {
+ return a + b;
+}
diff --git a/test/standalone/load_dynamic_library/build.zig b/test/standalone/load_dynamic_library/build.zig
new file mode 100644
index 0000000000..2d47a893f2
--- /dev/null
+++ b/test/standalone/load_dynamic_library/build.zig
@@ -0,0 +1,21 @@
+const Builder = @import("std").build.Builder;
+
+pub fn build(b: *Builder) void {
+ const opts = b.standardReleaseOptions();
+
+ const lib = b.addSharedLibrary("add", "add.zig", b.version(1, 0, 0));
+ lib.setBuildMode(opts);
+
+ const main = b.addExecutable("main", "main.zig");
+ main.setBuildMode(opts);
+
+ const run = b.addCommand(".", b.env_map, [][]const u8{
+ main.getOutputPath(),
+ lib.getOutputPath(),
+ });
+ run.step.dependOn(&lib.step);
+ run.step.dependOn(&main.step);
+
+ const test_step = b.step("test", "Test the program");
+ test_step.dependOn(&run.step);
+}
diff --git a/test/standalone/load_dynamic_library/main.zig b/test/standalone/load_dynamic_library/main.zig
new file mode 100644
index 0000000000..4c45ad6fde
--- /dev/null
+++ b/test/standalone/load_dynamic_library/main.zig
@@ -0,0 +1,17 @@
+const std = @import("std");
+
+pub fn main() !void {
+ const args = try std.os.argsAlloc(std.debug.global_allocator);
+ defer std.os.argsFree(std.debug.global_allocator, args);
+
+ const dynlib_name = args[1];
+
+ var lib = try std.DynLib.open(std.debug.global_allocator, dynlib_name);
+ defer lib.close();
+
+ const addr = lib.lookup("add") orelse return error.SymbolNotFound;
+ const addFn = @intToPtr(extern fn (i32, i32) i32, addr);
+
+ const result = addFn(12, 34);
+ std.debug.assert(result == 46);
+}
diff --git a/test/standalone/pkg_import/build.zig b/test/standalone/pkg_import/build.zig
index bb9416d3c4..e0b3885dc3 100644
--- a/test/standalone/pkg_import/build.zig
+++ b/test/standalone/pkg_import/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const exe = b.addExecutable("test", "test.zig");
exe.addPackagePath("my_pkg", "pkg.zig");
diff --git a/test/standalone/pkg_import/pkg.zig b/test/standalone/pkg_import/pkg.zig
index abb977a2ef..19ab525b81 100644
--- a/test/standalone/pkg_import/pkg.zig
+++ b/test/standalone/pkg_import/pkg.zig
@@ -1 +1,3 @@
-pub fn add(a: i32, b: i32) i32 { return a + b; }
+pub fn add(a: i32, b: i32) i32 {
+ return a + b;
+}
diff --git a/test/standalone/use_alias/build.zig b/test/standalone/use_alias/build.zig
index ecbba297d8..c700d43db9 100644
--- a/test/standalone/use_alias/build.zig
+++ b/test/standalone/use_alias/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
b.addCIncludePath(".");
const main = b.addTest("main.zig");
diff --git a/test/standalone/use_alias/main.zig b/test/standalone/use_alias/main.zig
index 40cab9ad8a..873393cef7 100644
--- a/test/standalone/use_alias/main.zig
+++ b/test/standalone/use_alias/main.zig
@@ -2,7 +2,7 @@ const c = @import("c.zig");
const assert = @import("std").debug.assert;
test "symbol exists" {
- var foo = c.Foo {
+ var foo = c.Foo{
.a = 1,
.b = 1,
};
diff --git a/test/tests.zig b/test/tests.zig
index 19a4f82b74..aa5eed17ee 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -27,18 +27,18 @@ const TestTarget = struct {
environ: builtin.Environ,
};
-const test_targets = []TestTarget {
- TestTarget {
+const test_targets = []TestTarget{
+ TestTarget{
.os = builtin.Os.linux,
.arch = builtin.Arch.x86_64,
.environ = builtin.Environ.gnu,
},
- TestTarget {
+ TestTarget{
.os = builtin.Os.macosx,
.arch = builtin.Arch.x86_64,
.environ = builtin.Environ.unknown,
},
- TestTarget {
+ TestTarget{
.os = builtin.Os.windows,
.arch = builtin.Arch.x86_64,
.environ = builtin.Environ.msvc,
@@ -47,120 +47,117 @@ const test_targets = []TestTarget {
const max_stdout_size = 1 * 1024 * 1024; // 1 MB
-pub fn addCompareOutputTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
- const cases = b.allocator.create(CompareOutputContext) catch unreachable;
- *cases = CompareOutputContext {
+pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+ const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-compare-output", "Run the compare output tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ .modes = modes,
+ }) catch unreachable;
compare_output.addCases(cases);
return cases.step;
}
-pub fn addRuntimeSafetyTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
- const cases = b.allocator.create(CompareOutputContext) catch unreachable;
- *cases = CompareOutputContext {
+pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+ const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-runtime-safety", "Run the runtime safety tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ .modes = modes,
+ }) catch unreachable;
runtime_safety.addCases(cases);
return cases.step;
}
-pub fn addCompileErrorTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
- const cases = b.allocator.create(CompileErrorContext) catch unreachable;
- *cases = CompileErrorContext {
+pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+ const cases = b.allocator.create(CompileErrorContext{
.b = b,
.step = b.step("test-compile-errors", "Run the compile error tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ .modes = modes,
+ }) catch unreachable;
compile_errors.addCases(cases);
return cases.step;
}
-pub fn addBuildExampleTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
- const cases = b.allocator.create(BuildExamplesContext) catch unreachable;
- *cases = BuildExamplesContext {
+pub fn addBuildExampleTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+ const cases = b.allocator.create(BuildExamplesContext{
.b = b,
.step = b.step("test-build-examples", "Build the examples"),
.test_index = 0,
.test_filter = test_filter,
- };
+ .modes = modes,
+ }) catch unreachable;
build_examples.addCases(cases);
return cases.step;
}
-pub fn addAssembleAndLinkTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
- const cases = b.allocator.create(CompareOutputContext) catch unreachable;
- *cases = CompareOutputContext {
+pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+ const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-asm-link", "Run the assemble and link tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ .modes = modes,
+ }) catch unreachable;
assemble_and_link.addCases(cases);
return cases.step;
}
-pub fn addTranslateCTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
- const cases = b.allocator.create(TranslateCContext) catch unreachable;
- *cases = TranslateCContext {
+pub fn addTranslateCTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
+ const cases = b.allocator.create(TranslateCContext{
.b = b,
.step = b.step("test-translate-c", "Run the C transation tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
translate_c.addCases(cases);
return cases.step;
}
-pub fn addGenHTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
- const cases = b.allocator.create(GenHContext) catch unreachable;
- *cases = GenHContext {
+pub fn addGenHTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
+ const cases = b.allocator.create(GenHContext{
.b = b,
.step = b.step("test-gen-h", "Run the C header file generation tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
gen_h.addCases(cases);
return cases.step;
}
-
-pub fn addPkgTests(b: &build.Builder, test_filter: ?[]const u8, root_src: []const u8,
- name:[] const u8, desc: []const u8, with_lldb: bool) &build.Step
-{
+pub fn addPkgTests(b: *build.Builder, test_filter: ?[]const u8, root_src: []const u8, name: []const u8, desc: []const u8, modes: []const Mode) *build.Step {
const step = b.step(b.fmt("test-{}", name), desc);
for (test_targets) |test_target| {
const is_native = (test_target.os == builtin.os and test_target.arch == builtin.arch);
- for ([]Mode{Mode.Debug, Mode.ReleaseSafe, Mode.ReleaseFast}) |mode| {
- for ([]bool{false, true}) |link_libc| {
+ for (modes) |mode| {
+ for ([]bool{
+ false,
+ true,
+ }) |link_libc| {
if (link_libc and !is_native) {
// don't assume we have a cross-compiling libc set up
continue;
}
const these_tests = b.addTest(root_src);
- these_tests.setNamePrefix(b.fmt("{}-{}-{}-{}-{} ", name, @tagName(test_target.os),
- @tagName(test_target.arch), @tagName(mode), if (link_libc) "c" else "bare"));
+ these_tests.setNamePrefix(b.fmt("{}-{}-{}-{}-{} ", name, @tagName(test_target.os), @tagName(test_target.arch), @tagName(mode), if (link_libc) "c" else "bare"));
these_tests.setFilter(test_filter);
these_tests.setBuildMode(mode);
if (!is_native) {
@@ -169,10 +166,6 @@ pub fn addPkgTests(b: &build.Builder, test_filter: ?[]const u8, root_src: []cons
if (link_libc) {
these_tests.linkSystemLibrary("c");
}
- if (with_lldb) {
- these_tests.setExecCmd([]?[]const u8{
- "lldb", null, "-o", "run", "-o", "bt", "-o", "exit"});
- }
step.dependOn(&these_tests.step);
}
}
@@ -181,10 +174,11 @@ pub fn addPkgTests(b: &build.Builder, test_filter: ?[]const u8, root_src: []cons
}
pub const CompareOutputContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
+ modes: []const Mode,
const Special = enum {
None,
@@ -205,34 +199,30 @@ pub const CompareOutputContext = struct {
source: []const u8,
};
- pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void {
- self.sources.append(SourceFile {
+ pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
+ self.sources.append(SourceFile{
.filename = filename,
.source = source,
}) catch unreachable;
}
- pub fn setCommandLineArgs(self: &TestCase, args: []const []const u8) void {
+ pub fn setCommandLineArgs(self: *TestCase, args: []const []const u8) void {
self.cli_args = args;
}
};
const RunCompareOutputStep = struct {
step: build.Step,
- context: &CompareOutputContext,
+ context: *CompareOutputContext,
exe_path: []const u8,
name: []const u8,
expected_output: []const u8,
test_index: usize,
cli_args: []const []const u8,
- pub fn create(context: &CompareOutputContext, exe_path: []const u8,
- name: []const u8, expected_output: []const u8,
- cli_args: []const []const u8) &RunCompareOutputStep
- {
+ pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8, expected_output: []const u8, cli_args: []const []const u8) *RunCompareOutputStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(RunCompareOutputStep) catch unreachable;
- *ptr = RunCompareOutputStep {
+ const ptr = allocator.create(RunCompareOutputStep{
.context = context,
.exe_path = exe_path,
.name = name,
@@ -240,12 +230,12 @@ pub const CompareOutputContext = struct {
.test_index = context.test_index,
.step = build.Step.init("RunCompareOutput", allocator, make),
.cli_args = cli_args,
- };
+ }) catch unreachable;
context.test_index += 1;
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(RunCompareOutputStep, "step", step);
const b = self.context.b;
@@ -258,7 +248,7 @@ pub const CompareOutputContext = struct {
args.append(arg) catch unreachable;
}
- warn("Test {}/{} {}...", self.test_index+1, self.context.test_index, self.name);
+ warn("Test {}/{} {}...", self.test_index + 1, self.context.test_index, self.name);
const child = os.ChildProcess.init(args.toSliceConst(), b.allocator) catch unreachable;
defer child.deinit();
@@ -273,8 +263,8 @@ pub const CompareOutputContext = struct {
var stdout = Buffer.initNull(b.allocator);
var stderr = Buffer.initNull(b.allocator);
- var stdout_file_in_stream = io.FileInStream.init(&??child.stdout);
- var stderr_file_in_stream = io.FileInStream.init(&??child.stderr);
+ var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?);
+ var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?);
stdout_file_in_stream.stream.readAllBuffer(&stdout, max_stdout_size) catch unreachable;
stderr_file_in_stream.stream.readAllBuffer(&stderr, max_stdout_size) catch unreachable;
@@ -295,7 +285,6 @@ pub const CompareOutputContext = struct {
},
}
-
if (!mem.eql(u8, self.expected_output, stdout.toSliceConst())) {
warn(
\\
@@ -313,34 +302,32 @@ pub const CompareOutputContext = struct {
const RuntimeSafetyRunStep = struct {
step: build.Step,
- context: &CompareOutputContext,
+ context: *CompareOutputContext,
exe_path: []const u8,
name: []const u8,
test_index: usize,
- pub fn create(context: &CompareOutputContext, exe_path: []const u8,
- name: []const u8) &RuntimeSafetyRunStep
- {
+ pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8) *RuntimeSafetyRunStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(RuntimeSafetyRunStep) catch unreachable;
- *ptr = RuntimeSafetyRunStep {
+ const ptr = allocator.create(RuntimeSafetyRunStep{
.context = context,
.exe_path = exe_path,
.name = name,
.test_index = context.test_index,
.step = build.Step.init("RuntimeSafetyRun", allocator, make),
- };
+ }) catch unreachable;
+
context.test_index += 1;
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(RuntimeSafetyRunStep, "step", step);
const b = self.context.b;
const full_exe_path = b.pathFromRoot(self.exe_path);
- warn("Test {}/{} {}...", self.test_index+1, self.context.test_index, self.name);
+ warn("Test {}/{} {}...", self.test_index + 1, self.context.test_index, self.name);
const child = os.ChildProcess.init([][]u8{full_exe_path}, b.allocator) catch unreachable;
defer child.deinit();
@@ -358,19 +345,16 @@ pub const CompareOutputContext = struct {
switch (term) {
Term.Exited => |code| {
if (code != expected_exit_code) {
- warn("\nProgram expected to exit with code {} " ++
- "but exited with code {}\n", expected_exit_code, code);
+ warn("\nProgram expected to exit with code {} " ++ "but exited with code {}\n", expected_exit_code, code);
return error.TestFailed;
}
},
Term.Signal => |sig| {
- warn("\nProgram expected to exit with code {} " ++
- "but instead signaled {}\n", expected_exit_code, sig);
+ warn("\nProgram expected to exit with code {} " ++ "but instead signaled {}\n", expected_exit_code, sig);
return error.TestFailed;
},
else => {
- warn("\nProgram expected to exit with code {}" ++
- " but exited in an unexpected way\n", expected_exit_code);
+ warn("\nProgram expected to exit with code {}" ++ " but exited in an unexpected way\n", expected_exit_code);
return error.TestFailed;
},
}
@@ -379,10 +363,8 @@ pub const CompareOutputContext = struct {
}
};
- pub fn createExtra(self: &CompareOutputContext, name: []const u8, source: []const u8,
- expected_output: []const u8, special: Special) TestCase
- {
- var tc = TestCase {
+ pub fn createExtra(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8, special: Special) TestCase {
+ var tc = TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
.expected_output = expected_output,
@@ -395,34 +377,32 @@ pub const CompareOutputContext = struct {
return tc;
}
- pub fn create(self: &CompareOutputContext, name: []const u8, source: []const u8,
- expected_output: []const u8) TestCase
- {
+ pub fn create(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) TestCase {
return createExtra(self, name, source, expected_output, Special.None);
}
- pub fn addC(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
+ pub fn addC(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
var tc = self.create(name, source, expected_output);
tc.link_libc = true;
self.addCase(tc);
}
- pub fn add(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
+ pub fn add(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
const tc = self.create(name, source, expected_output);
self.addCase(tc);
}
- pub fn addAsm(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
+ pub fn addAsm(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
const tc = self.createExtra(name, source, expected_output, Special.Asm);
self.addCase(tc);
}
- pub fn addRuntimeSafety(self: &CompareOutputContext, name: []const u8, source: []const u8) void {
+ pub fn addRuntimeSafety(self: *CompareOutputContext, name: []const u8, source: []const u8) void {
const tc = self.createExtra(name, source, undefined, Special.RuntimeSafety);
self.addCase(tc);
}
- pub fn addCase(self: &CompareOutputContext, case: &const TestCase) void {
+ pub fn addCase(self: *CompareOutputContext, case: *const TestCase) void {
const b = self.b;
const root_src = os.path.join(b.allocator, b.cache_root, case.sources.items[0].filename) catch unreachable;
@@ -431,8 +411,7 @@ pub const CompareOutputContext = struct {
Special.Asm => {
const annotated_case_name = fmt.allocPrint(self.b.allocator, "assemble-and-link {}", case.name) catch unreachable;
if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null)
- return;
+ if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
}
const exe = b.addExecutable("test", null);
@@ -444,19 +423,16 @@ pub const CompareOutputContext = struct {
exe.step.dependOn(&write_src.step);
}
- const run_and_cmp_output = RunCompareOutputStep.create(self, exe.getOutputPath(), annotated_case_name,
- case.expected_output, case.cli_args);
+ const run_and_cmp_output = RunCompareOutputStep.create(self, exe.getOutputPath(), annotated_case_name, case.expected_output, case.cli_args);
run_and_cmp_output.step.dependOn(&exe.step);
self.step.dependOn(&run_and_cmp_output.step);
},
Special.None => {
- for ([]Mode{Mode.Debug, Mode.ReleaseSafe, Mode.ReleaseFast}) |mode| {
- const annotated_case_name = fmt.allocPrint(self.b.allocator, "{} {} ({})",
- "compare-output", case.name, @tagName(mode)) catch unreachable;
+ for (self.modes) |mode| {
+ const annotated_case_name = fmt.allocPrint(self.b.allocator, "{} {} ({})", "compare-output", case.name, @tagName(mode)) catch unreachable;
if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null)
- continue;
+ if (mem.indexOf(u8, annotated_case_name, filter) == null) continue;
}
const exe = b.addExecutable("test", root_src);
@@ -471,8 +447,7 @@ pub const CompareOutputContext = struct {
exe.step.dependOn(&write_src.step);
}
- const run_and_cmp_output = RunCompareOutputStep.create(self, exe.getOutputPath(),
- annotated_case_name, case.expected_output, case.cli_args);
+ const run_and_cmp_output = RunCompareOutputStep.create(self, exe.getOutputPath(), annotated_case_name, case.expected_output, case.cli_args);
run_and_cmp_output.step.dependOn(&exe.step);
self.step.dependOn(&run_and_cmp_output.step);
@@ -481,8 +456,7 @@ pub const CompareOutputContext = struct {
Special.RuntimeSafety => {
const annotated_case_name = fmt.allocPrint(self.b.allocator, "safety {}", case.name) catch unreachable;
if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null)
- return;
+ if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
}
const exe = b.addExecutable("test", root_src);
@@ -506,10 +480,11 @@ pub const CompareOutputContext = struct {
};
pub const CompileErrorContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
+ modes: []const Mode,
const TestCase = struct {
name: []const u8,
@@ -523,44 +498,42 @@ pub const CompileErrorContext = struct {
source: []const u8,
};
- pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void {
- self.sources.append(SourceFile {
+ pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
+ self.sources.append(SourceFile{
.filename = filename,
.source = source,
}) catch unreachable;
}
- pub fn addExpectedError(self: &TestCase, text: []const u8) void {
+ pub fn addExpectedError(self: *TestCase, text: []const u8) void {
self.expected_errors.append(text) catch unreachable;
}
};
const CompileCmpOutputStep = struct {
step: build.Step,
- context: &CompileErrorContext,
+ context: *CompileErrorContext,
name: []const u8,
test_index: usize,
- case: &const TestCase,
+ case: *const TestCase,
build_mode: Mode,
- pub fn create(context: &CompileErrorContext, name: []const u8,
- case: &const TestCase, build_mode: Mode) &CompileCmpOutputStep
- {
+ pub fn create(context: *CompileErrorContext, name: []const u8, case: *const TestCase, build_mode: Mode) *CompileCmpOutputStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(CompileCmpOutputStep) catch unreachable;
- *ptr = CompileCmpOutputStep {
+ const ptr = allocator.create(CompileCmpOutputStep{
.step = build.Step.init("CompileCmpOutput", allocator, make),
.context = context,
.name = name,
.test_index = context.test_index,
.case = case,
.build_mode = build_mode,
- };
+ }) catch unreachable;
+
context.test_index += 1;
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(CompileCmpOutputStep, "step", step);
const b = self.context.b;
@@ -583,9 +556,10 @@ pub const CompileErrorContext = struct {
Mode.Debug => {},
Mode.ReleaseSafe => zig_args.append("--release-safe") catch unreachable,
Mode.ReleaseFast => zig_args.append("--release-fast") catch unreachable,
+ Mode.ReleaseSmall => zig_args.append("--release-small") catch unreachable,
}
- warn("Test {}/{} {}...", self.test_index+1, self.context.test_index, self.name);
+ warn("Test {}/{} {}...", self.test_index + 1, self.context.test_index, self.name);
if (b.verbose) {
printInvocation(zig_args.toSliceConst());
@@ -604,8 +578,8 @@ pub const CompileErrorContext = struct {
var stdout_buf = Buffer.initNull(b.allocator);
var stderr_buf = Buffer.initNull(b.allocator);
- var stdout_file_in_stream = io.FileInStream.init(&??child.stdout);
- var stderr_file_in_stream = io.FileInStream.init(&??child.stderr);
+ var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?);
+ var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?);
stdout_file_in_stream.stream.readAllBuffer(&stdout_buf, max_stdout_size) catch unreachable;
stderr_file_in_stream.stream.readAllBuffer(&stderr_buf, max_stdout_size) catch unreachable;
@@ -625,7 +599,6 @@ pub const CompileErrorContext = struct {
},
}
-
const stdout = stdout_buf.toSliceConst();
const stderr = stderr_buf.toSliceConst();
@@ -665,17 +638,15 @@ pub const CompileErrorContext = struct {
warn("\n");
}
- pub fn create(self: &CompileErrorContext, name: []const u8, source: []const u8,
- expected_lines: ...) &TestCase
- {
- const tc = self.b.allocator.create(TestCase) catch unreachable;
- *tc = TestCase {
+ pub fn create(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
+ const tc = self.b.allocator.create(TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
.expected_errors = ArrayList([]const u8).init(self.b.allocator),
.link_libc = false,
.is_exe = false,
- };
+ }) catch unreachable;
+
tc.addSourceFile(".tmp_source.zig", source);
comptime var arg_i = 0;
inline while (arg_i < expected_lines.len) : (arg_i += 1) {
@@ -684,32 +655,30 @@ pub const CompileErrorContext = struct {
return tc;
}
- pub fn addC(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn addC(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
var tc = self.create(name, source, expected_lines);
tc.link_libc = true;
self.addCase(tc);
}
- pub fn addExe(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn addExe(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
var tc = self.create(name, source, expected_lines);
tc.is_exe = true;
self.addCase(tc);
}
- pub fn add(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn add(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create(name, source, expected_lines);
self.addCase(tc);
}
- pub fn addCase(self: &CompileErrorContext, case: &const TestCase) void {
+ pub fn addCase(self: *CompileErrorContext, case: *const TestCase) void {
const b = self.b;
- for ([]Mode{Mode.Debug, Mode.ReleaseSafe, Mode.ReleaseFast}) |mode| {
- const annotated_case_name = fmt.allocPrint(self.b.allocator, "compile-error {} ({})",
- case.name, @tagName(mode)) catch unreachable;
+ for (self.modes) |mode| {
+ const annotated_case_name = fmt.allocPrint(self.b.allocator, "compile-error {} ({})", case.name, @tagName(mode)) catch unreachable;
if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null)
- continue;
+ if (mem.indexOf(u8, annotated_case_name, filter) == null) continue;
}
const compile_and_cmp_errors = CompileCmpOutputStep.create(self, annotated_case_name, case, mode);
@@ -725,26 +694,26 @@ pub const CompileErrorContext = struct {
};
pub const BuildExamplesContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
+ modes: []const Mode,
- pub fn addC(self: &BuildExamplesContext, root_src: []const u8) void {
+ pub fn addC(self: *BuildExamplesContext, root_src: []const u8) void {
self.addAllArgs(root_src, true);
}
- pub fn add(self: &BuildExamplesContext, root_src: []const u8) void {
+ pub fn add(self: *BuildExamplesContext, root_src: []const u8) void {
self.addAllArgs(root_src, false);
}
- pub fn addBuildFile(self: &BuildExamplesContext, build_file: []const u8) void {
+ pub fn addBuildFile(self: *BuildExamplesContext, build_file: []const u8) void {
const b = self.b;
const annotated_case_name = b.fmt("build {} (Debug)", build_file);
if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null)
- return;
+ if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
}
var zig_args = ArrayList([]const u8).init(b.allocator);
@@ -769,15 +738,13 @@ pub const BuildExamplesContext = struct {
self.step.dependOn(&log_step.step);
}
- pub fn addAllArgs(self: &BuildExamplesContext, root_src: []const u8, link_libc: bool) void {
+ pub fn addAllArgs(self: *BuildExamplesContext, root_src: []const u8, link_libc: bool) void {
const b = self.b;
- for ([]Mode{Mode.Debug, Mode.ReleaseSafe, Mode.ReleaseFast}) |mode| {
- const annotated_case_name = fmt.allocPrint(self.b.allocator, "build {} ({})",
- root_src, @tagName(mode)) catch unreachable;
+ for (self.modes) |mode| {
+ const annotated_case_name = fmt.allocPrint(self.b.allocator, "build {} ({})", root_src, @tagName(mode)) catch unreachable;
if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null)
- continue;
+ if (mem.indexOf(u8, annotated_case_name, filter) == null) continue;
}
const exe = b.addExecutable("test", root_src);
@@ -795,8 +762,8 @@ pub const BuildExamplesContext = struct {
};
pub const TranslateCContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
@@ -811,40 +778,40 @@ pub const TranslateCContext = struct {
source: []const u8,
};
- pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void {
- self.sources.append(SourceFile {
+ pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
+ self.sources.append(SourceFile{
.filename = filename,
.source = source,
}) catch unreachable;
}
- pub fn addExpectedLine(self: &TestCase, text: []const u8) void {
+ pub fn addExpectedLine(self: *TestCase, text: []const u8) void {
self.expected_lines.append(text) catch unreachable;
}
};
const TranslateCCmpOutputStep = struct {
step: build.Step,
- context: &TranslateCContext,
+ context: *TranslateCContext,
name: []const u8,
test_index: usize,
- case: &const TestCase,
+ case: *const TestCase,
- pub fn create(context: &TranslateCContext, name: []const u8, case: &const TestCase) &TranslateCCmpOutputStep {
+ pub fn create(context: *TranslateCContext, name: []const u8, case: *const TestCase) *TranslateCCmpOutputStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(TranslateCCmpOutputStep) catch unreachable;
- *ptr = TranslateCCmpOutputStep {
+ const ptr = allocator.create(TranslateCCmpOutputStep{
.step = build.Step.init("ParseCCmpOutput", allocator, make),
.context = context,
.name = name,
.test_index = context.test_index,
.case = case,
- };
+ }) catch unreachable;
+
context.test_index += 1;
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(TranslateCCmpOutputStep, "step", step);
const b = self.context.b;
@@ -856,7 +823,7 @@ pub const TranslateCContext = struct {
zig_args.append("translate-c") catch unreachable;
zig_args.append(b.pathFromRoot(root_src)) catch unreachable;
- warn("Test {}/{} {}...", self.test_index+1, self.context.test_index, self.name);
+ warn("Test {}/{} {}...", self.test_index + 1, self.context.test_index, self.name);
if (b.verbose) {
printInvocation(zig_args.toSliceConst());
@@ -875,8 +842,8 @@ pub const TranslateCContext = struct {
var stdout_buf = Buffer.initNull(b.allocator);
var stderr_buf = Buffer.initNull(b.allocator);
- var stdout_file_in_stream = io.FileInStream.init(&??child.stdout);
- var stderr_file_in_stream = io.FileInStream.init(&??child.stderr);
+ var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?);
+ var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?);
stdout_file_in_stream.stream.readAllBuffer(&stdout_buf, max_stdout_size) catch unreachable;
stderr_file_in_stream.stream.readAllBuffer(&stderr_buf, max_stdout_size) catch unreachable;
@@ -938,16 +905,14 @@ pub const TranslateCContext = struct {
warn("\n");
}
- pub fn create(self: &TranslateCContext, allow_warnings: bool, filename: []const u8, name: []const u8,
- source: []const u8, expected_lines: ...) &TestCase
- {
- const tc = self.b.allocator.create(TestCase) catch unreachable;
- *tc = TestCase {
+ pub fn create(self: *TranslateCContext, allow_warnings: bool, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
+ const tc = self.b.allocator.create(TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
.expected_lines = ArrayList([]const u8).init(self.b.allocator),
.allow_warnings = allow_warnings,
- };
+ }) catch unreachable;
+
tc.addSourceFile(filename, source);
comptime var arg_i = 0;
inline while (arg_i < expected_lines.len) : (arg_i += 1) {
@@ -956,28 +921,27 @@ pub const TranslateCContext = struct {
return tc;
}
- pub fn add(self: &TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn add(self: *TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create(false, "source.h", name, source, expected_lines);
self.addCase(tc);
}
- pub fn addC(self: &TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn addC(self: *TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create(false, "source.c", name, source, expected_lines);
self.addCase(tc);
}
- pub fn addAllowWarnings(self: &TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn addAllowWarnings(self: *TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create(true, "source.h", name, source, expected_lines);
self.addCase(tc);
}
- pub fn addCase(self: &TranslateCContext, case: &const TestCase) void {
+ pub fn addCase(self: *TranslateCContext, case: *const TestCase) void {
const b = self.b;
const annotated_case_name = fmt.allocPrint(self.b.allocator, "translate-c {}", case.name) catch unreachable;
if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null)
- return;
+ if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
}
const translate_c_and_cmp = TranslateCCmpOutputStep.create(self, annotated_case_name, case);
@@ -992,8 +956,8 @@ pub const TranslateCContext = struct {
};
pub const GenHContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
@@ -1007,46 +971,46 @@ pub const GenHContext = struct {
source: []const u8,
};
- pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void {
- self.sources.append(SourceFile {
+ pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
+ self.sources.append(SourceFile{
.filename = filename,
.source = source,
}) catch unreachable;
}
- pub fn addExpectedLine(self: &TestCase, text: []const u8) void {
+ pub fn addExpectedLine(self: *TestCase, text: []const u8) void {
self.expected_lines.append(text) catch unreachable;
}
};
const GenHCmpOutputStep = struct {
step: build.Step,
- context: &GenHContext,
+ context: *GenHContext,
h_path: []const u8,
name: []const u8,
test_index: usize,
- case: &const TestCase,
+ case: *const TestCase,
- pub fn create(context: &GenHContext, h_path: []const u8, name: []const u8, case: &const TestCase) &GenHCmpOutputStep {
+ pub fn create(context: *GenHContext, h_path: []const u8, name: []const u8, case: *const TestCase) *GenHCmpOutputStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(GenHCmpOutputStep) catch unreachable;
- *ptr = GenHCmpOutputStep {
+ const ptr = allocator.create(GenHCmpOutputStep{
.step = build.Step.init("ParseCCmpOutput", allocator, make),
.context = context,
.h_path = h_path,
.name = name,
.test_index = context.test_index,
.case = case,
- };
+ }) catch unreachable;
+
context.test_index += 1;
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(GenHCmpOutputStep, "step", step);
const b = self.context.b;
- warn("Test {}/{} {}...", self.test_index+1, self.context.test_index, self.name);
+ warn("Test {}/{} {}...", self.test_index + 1, self.context.test_index, self.name);
const full_h_path = b.pathFromRoot(self.h_path);
const actual_h = try io.readFileAlloc(b.allocator, full_h_path);
@@ -1075,15 +1039,13 @@ pub const GenHContext = struct {
warn("\n");
}
- pub fn create(self: &GenHContext, filename: []const u8, name: []const u8,
- source: []const u8, expected_lines: ...) &TestCase
- {
- const tc = self.b.allocator.create(TestCase) catch unreachable;
- *tc = TestCase {
+ pub fn create(self: *GenHContext, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
+ const tc = self.b.allocator.create(TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
.expected_lines = ArrayList([]const u8).init(self.b.allocator),
- };
+ }) catch unreachable;
+
tc.addSourceFile(filename, source);
comptime var arg_i = 0;
inline while (arg_i < expected_lines.len) : (arg_i += 1) {
@@ -1092,20 +1054,19 @@ pub const GenHContext = struct {
return tc;
}
- pub fn add(self: &GenHContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn add(self: *GenHContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create("test.zig", name, source, expected_lines);
self.addCase(tc);
}
- pub fn addCase(self: &GenHContext, case: &const TestCase) void {
+ pub fn addCase(self: *GenHContext, case: *const TestCase) void {
const b = self.b;
const root_src = os.path.join(b.allocator, b.cache_root, case.sources.items[0].filename) catch unreachable;
const mode = builtin.Mode.Debug;
const annotated_case_name = fmt.allocPrint(self.b.allocator, "gen-h {} ({})", case.name, @tagName(mode)) catch unreachable;
if (self.test_filter) |filter| {
- if (mem.indexOf(u8, annotated_case_name, filter) == null)
- return;
+ if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
}
const obj = b.addObject("test", root_src);
diff --git a/test/translate_c.zig b/test/translate_c.zig
index 9a69c2b03e..b31e515aa2 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -1,6 +1,72 @@
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.TranslateCContext) void {
+pub fn addCases(cases: *tests.TranslateCContext) void {
+ cases.add("for loop with var init but empty body",
+ \\void foo(void) {
+ \\ for (int x = 0; x < 10; x++);
+ \\}
+ ,
+ \\pub fn foo() void {
+ \\ {
+ \\ var x: c_int = 0;
+ \\ while (x < 10) : (x += 1) {}
+ \\ }
+ \\}
+ );
+
+ cases.add("do while with empty body",
+ \\void foo(void) {
+ \\ do ; while (1);
+ \\}
+ , // TODO this should be if (1 != 0) break
+ \\pub fn foo() void {
+ \\ while (true) {
+ \\ if (!1) break;
+ \\ }
+ \\}
+ );
+
+ cases.add("for with empty body",
+ \\void foo(void) {
+ \\ for (;;);
+ \\}
+ ,
+ \\pub fn foo() void {
+ \\ while (true) {}
+ \\}
+ );
+
+ cases.add("while with empty body",
+ \\void foo(void) {
+ \\ while (1);
+ \\}
+ ,
+ \\pub fn foo() void {
+ \\ while (1 != 0) {}
+ \\}
+ );
+
+ cases.add("double define struct",
+ \\typedef struct Bar Bar;
+ \\typedef struct Foo Foo;
+ \\
+ \\struct Foo {
+ \\ Foo *a;
+ \\};
+ \\
+ \\struct Bar {
+ \\ Foo *a;
+ \\};
+ ,
+ \\pub const struct_Foo = extern struct {
+ \\ a: ?[*]Foo,
+ \\};
+ \\pub const Foo = struct_Foo;
+ \\pub const struct_Bar = extern struct {
+ \\ a: ?[*]Foo,
+ \\};
+ );
+
cases.addAllowWarnings("simple data types",
\\#include
\\int foo(char a, unsigned char b, signed char c);
@@ -53,10 +119,32 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\pub const Foo = enum_Foo;
);
+ cases.add("enums",
+ \\enum Foo {
+ \\ FooA = 2,
+ \\ FooB = 5,
+ \\ Foo1,
+ \\};
+ ,
+ \\pub const enum_Foo = extern enum {
+ \\ A = 2,
+ \\ B = 5,
+ \\ @"1" = 6,
+ \\};
+ ,
+ \\pub const FooA = enum_Foo.A;
+ ,
+ \\pub const FooB = enum_Foo.B;
+ ,
+ \\pub const Foo1 = enum_Foo.@"1";
+ ,
+ \\pub const Foo = enum_Foo;
+ );
+
cases.add("restrict -> noalias",
\\void foo(void *restrict bar, void *restrict);
,
- \\pub extern fn foo(noalias bar: ?&c_void, noalias arg1: ?&c_void) void;
+ \\pub extern fn foo(noalias bar: ?*c_void, noalias arg1: ?*c_void) void;
);
cases.add("simple struct",
@@ -67,7 +155,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\const struct_Foo = extern struct {
\\ x: c_int,
- \\ y: ?&u8,
+ \\ y: ?[*]u8,
\\};
,
\\pub const Foo = struct_Foo;
@@ -98,7 +186,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\pub const BarB = enum_Bar.B;
,
- \\pub extern fn func(a: ?&struct_Foo, b: ?&(?&enum_Bar)) void;
+ \\pub extern fn func(a: ?[*]struct_Foo, b: ?[*](?[*]enum_Bar)) void;
,
\\pub const Foo = struct_Foo;
,
@@ -108,7 +196,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
cases.add("constant size array",
\\void func(int array[20]);
,
- \\pub extern fn func(array: ?&c_int) void;
+ \\pub extern fn func(array: ?[*]c_int) void;
);
cases.add("self referential struct with function pointer",
@@ -117,7 +205,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\};
,
\\pub const struct_Foo = extern struct {
- \\ derp: ?extern fn(?&struct_Foo) void,
+ \\ derp: ?extern fn(?[*]struct_Foo) void,
\\};
,
\\pub const Foo = struct_Foo;
@@ -129,7 +217,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\pub const struct_Foo = @OpaqueType();
,
- \\pub extern fn some_func(foo: ?&struct_Foo, x: c_int) ?&struct_Foo;
+ \\pub extern fn some_func(foo: ?*struct_Foo, x: c_int) ?*struct_Foo;
,
\\pub const Foo = struct_Foo;
);
@@ -176,11 +264,11 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\};
,
\\pub const struct_Bar = extern struct {
- \\ next: ?&struct_Foo,
+ \\ next: ?[*]struct_Foo,
\\};
,
\\pub const struct_Foo = extern struct {
- \\ next: ?&struct_Bar,
+ \\ next: ?[*]struct_Bar,
\\};
);
@@ -190,7 +278,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\pub const Foo = c_void;
,
- \\pub extern fn fun(a: ?&Foo) Foo;
+ \\pub extern fn fun(a: ?*Foo) Foo;
);
cases.add("generate inline func for #define global extern fn",
@@ -203,13 +291,13 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\pub extern var fn_ptr: ?extern fn() void;
,
\\pub inline fn foo() void {
- \\ return (??fn_ptr)();
+ \\ return fn_ptr.?();
\\}
,
\\pub extern var fn_ptr2: ?extern fn(c_int, f32) u8;
,
\\pub inline fn bar(arg0: c_int, arg1: f32) u8 {
- \\ return (??fn_ptr2)(arg0, arg1);
+ \\ return fn_ptr2.?(arg0, arg1);
\\}
);
@@ -462,7 +550,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return 6;
\\}
,
- \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?&c_void) c_int {
+ \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ if ((a != 0) and (b != 0)) return 0;
\\ if ((b != 0) and (c != null)) return 1;
\\ if ((a != 0) and (c != null)) return 2;
@@ -564,8 +652,8 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\pub const struct_Foo = extern struct {
\\ field: c_int,
\\};
- \\pub export fn read_field(foo: ?&struct_Foo) c_int {
- \\ return (??foo).field;
+ \\pub export fn read_field(foo: ?[*]struct_Foo) c_int {
+ \\ return foo.?.field;
\\}
);
@@ -595,7 +683,6 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\}
);
-
cases.addC("c style cast",
\\int float_to_int(float a) {
\\ return (int)a;
@@ -611,8 +698,8 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return x;
\\}
,
- \\pub export fn foo(x: ?&c_ushort) ?&c_void {
- \\ return @ptrCast(?&c_void, x);
+ \\pub export fn foo(x: ?[*]c_ushort) ?*c_void {
+ \\ return @ptrCast(?*c_void, x);
\\}
);
@@ -632,7 +719,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return 0;
\\}
,
- \\pub export fn foo() ?&c_int {
+ \\pub export fn foo() ?[*]c_int {
\\ return null;
\\}
);
@@ -677,43 +764,43 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ var a: c_int = 0;
\\ a += x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) + 1);
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* + 1);
+ \\ break :x _ref.*;
\\ };
\\ a -= x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) - 1);
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* - 1);
+ \\ break :x _ref.*;
\\ };
\\ a *= x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) * 1);
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* * 1);
+ \\ break :x _ref.*;
\\ };
\\ a &= x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) & 1);
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* & 1);
+ \\ break :x _ref.*;
\\ };
\\ a |= x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) | 1);
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* | 1);
+ \\ break :x _ref.*;
\\ };
\\ a ^= x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) ^ 1);
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* ^ 1);
+ \\ break :x _ref.*;
\\ };
\\ a >>= @import("std").math.Log2Int(c_int)(x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) >> @import("std").math.Log2Int(c_int)(1));
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* >> @import("std").math.Log2Int(c_int)(1));
+ \\ break :x _ref.*;
\\ });
\\ a <<= @import("std").math.Log2Int(c_int)(x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) << @import("std").math.Log2Int(c_int)(1));
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* << @import("std").math.Log2Int(c_int)(1));
+ \\ break :x _ref.*;
\\ });
\\}
);
@@ -735,43 +822,43 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ var a: c_uint = c_uint(0);
\\ a +%= x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) +% c_uint(1));
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* +% c_uint(1));
+ \\ break :x _ref.*;
\\ };
\\ a -%= x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) -% c_uint(1));
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* -% c_uint(1));
+ \\ break :x _ref.*;
\\ };
\\ a *%= x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) *% c_uint(1));
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* *% c_uint(1));
+ \\ break :x _ref.*;
\\ };
\\ a &= x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) & c_uint(1));
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* & c_uint(1));
+ \\ break :x _ref.*;
\\ };
\\ a |= x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) | c_uint(1));
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* | c_uint(1));
+ \\ break :x _ref.*;
\\ };
\\ a ^= x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) ^ c_uint(1));
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* ^ c_uint(1));
+ \\ break :x _ref.*;
\\ };
\\ a >>= @import("std").math.Log2Int(c_uint)(x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) >> @import("std").math.Log2Int(c_uint)(1));
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* >> @import("std").math.Log2Int(c_uint)(1));
+ \\ break :x _ref.*;
\\ });
\\ a <<= @import("std").math.Log2Int(c_uint)(x: {
\\ const _ref = &a;
- \\ (*_ref) = ((*_ref) << @import("std").math.Log2Int(c_uint)(1));
- \\ break :x *_ref;
+ \\ _ref.* = (_ref.* << @import("std").math.Log2Int(c_uint)(1));
+ \\ break :x _ref.*;
\\ });
\\}
);
@@ -810,26 +897,26 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ u -%= 1;
\\ i = x: {
\\ const _ref = &i;
- \\ const _tmp = *_ref;
- \\ (*_ref) += 1;
+ \\ const _tmp = _ref.*;
+ \\ _ref.* += 1;
\\ break :x _tmp;
\\ };
\\ i = x: {
\\ const _ref = &i;
- \\ const _tmp = *_ref;
- \\ (*_ref) -= 1;
+ \\ const _tmp = _ref.*;
+ \\ _ref.* -= 1;
\\ break :x _tmp;
\\ };
\\ u = x: {
\\ const _ref = &u;
- \\ const _tmp = *_ref;
- \\ (*_ref) +%= 1;
+ \\ const _tmp = _ref.*;
+ \\ _ref.* +%= 1;
\\ break :x _tmp;
\\ };
\\ u = x: {
\\ const _ref = &u;
- \\ const _tmp = *_ref;
- \\ (*_ref) -%= 1;
+ \\ const _tmp = _ref.*;
+ \\ _ref.* -%= 1;
\\ break :x _tmp;
\\ };
\\}
@@ -858,23 +945,23 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ u -%= 1;
\\ i = x: {
\\ const _ref = &i;
- \\ (*_ref) += 1;
- \\ break :x *_ref;
+ \\ _ref.* += 1;
+ \\ break :x _ref.*;
\\ };
\\ i = x: {
\\ const _ref = &i;
- \\ (*_ref) -= 1;
- \\ break :x *_ref;
+ \\ _ref.* -= 1;
+ \\ break :x _ref.*;
\\ };
\\ u = x: {
\\ const _ref = &u;
- \\ (*_ref) +%= 1;
- \\ break :x *_ref;
+ \\ _ref.* +%= 1;
+ \\ break :x _ref.*;
\\ };
\\ u = x: {
\\ const _ref = &u;
- \\ (*_ref) -%= 1;
- \\ break :x *_ref;
+ \\ _ref.* -%= 1;
+ \\ break :x _ref.*;
\\ };
\\}
);
@@ -927,11 +1014,11 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\pub export fn bar() void {
\\ var f: ?extern fn() void = foo;
\\ var b: ?extern fn() c_int = baz;
- \\ (??f)();
- \\ (??f)();
+ \\ f.?();
+ \\ f.?();
\\ foo();
- \\ _ = (??b)();
- \\ _ = (??b)();
+ \\ _ = b.?();
+ \\ _ = b.?();
\\ _ = baz();
\\}
);
@@ -941,8 +1028,8 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ *x = 1;
\\}
,
- \\pub export fn foo(x: ?&c_int) void {
- \\ (*??x) = 1;
+ \\pub export fn foo(x: ?[*]c_int) void {
+ \\ x.?.* = 1;
\\}
);
@@ -969,8 +1056,8 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\pub fn foo() c_int {
\\ var x: c_int = 1234;
- \\ var ptr: ?&c_int = &x;
- \\ return *??ptr;
+ \\ var ptr: ?[*]c_int = &x;
+ \\ return ptr.?.*;
\\}
);
@@ -979,7 +1066,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return "bar";
\\}
,
- \\pub fn foo() ?&const u8 {
+ \\pub fn foo() ?[*]const u8 {
\\ return c"bar";
\\}
);
@@ -1077,7 +1164,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\pub const glClearPFN = PFNGLCLEARPROC;
,
\\pub inline fn glClearUnion(arg0: GLbitfield) void {
- \\ return (??glProcs.gl.Clear)(arg0);
+ \\ return glProcs.gl.Clear.?(arg0);
\\}
,
\\pub const OpenGLProcs = union_OpenGLProcs;
@@ -1108,8 +1195,8 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return (float *)a;
\\}
,
- \\fn ptrcast(a: ?&c_int) ?&f32 {
- \\ return @ptrCast(?&f32, a);
+ \\fn ptrcast(a: ?[*]c_int) ?[*]f32 {
+ \\ return @ptrCast(?[*]f32, a);
\\}
);
@@ -1131,7 +1218,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return !c;
\\}
,
- \\pub fn foo(a: c_int, b: f32, c: ?&c_void) c_int {
+ \\pub fn foo(a: c_int, b: f32, c: ?*c_void) c_int {
\\ return !(a == 0);
\\ return !(a != 0);
\\ return !(b != 0);
@@ -1152,7 +1239,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
cases.add("const ptr initializer",
\\static const char *v0 = "0.0.0";
,
- \\pub var v0: ?&const u8 = c"0.0.0";
+ \\pub var v0: ?[*]const u8 = c"0.0.0";
);
cases.add("static incomplete array inside function",
@@ -1161,14 +1248,14 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\}
,
\\pub fn foo() void {
- \\ const v2: &const u8 = c"2.2.2";
+ \\ const v2: [*]const u8 = c"2.2.2";
\\}
);
cases.add("macro pointer cast",
\\#define NRF_GPIO ((NRF_GPIO_Type *) NRF_GPIO_BASE)
,
- \\pub const NRF_GPIO = if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Pointer) @ptrCast(&NRF_GPIO_Type, NRF_GPIO_BASE) else if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Int) @intToPtr(&NRF_GPIO_Type, NRF_GPIO_BASE) else (&NRF_GPIO_Type)(NRF_GPIO_BASE);
+ \\pub const NRF_GPIO = if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Pointer) @ptrCast([*]NRF_GPIO_Type, NRF_GPIO_BASE) else if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Int) @intToPtr([*]NRF_GPIO_Type, NRF_GPIO_BASE) else ([*]NRF_GPIO_Type)(NRF_GPIO_BASE);
);
cases.add("if on none bool",
@@ -1189,7 +1276,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ B,
\\ C,
\\};
- \\pub fn if_none_bool(a: c_int, b: f32, c: ?&c_void, d: enum_SomeEnum) c_int {
+ \\pub fn if_none_bool(a: c_int, b: f32, c: ?*c_void, d: enum_SomeEnum) c_int {
\\ if (a != 0) return 0;
\\ if (b != 0) return 1;
\\ if (c != null) return 2;
@@ -1206,7 +1293,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return 3;
\\}
,
- \\pub fn while_none_bool(a: c_int, b: f32, c: ?&c_void) c_int {
+ \\pub fn while_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ while (a != 0) return 0;
\\ while (b != 0) return 1;
\\ while (c != null) return 2;
@@ -1222,7 +1309,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return 3;
\\}
,
- \\pub fn for_none_bool(a: c_int, b: f32, c: ?&c_void) c_int {
+ \\pub fn for_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ while (a != 0) return 0;
\\ while (b != 0) return 1;
\\ while (c != null) return 2;
@@ -1246,29 +1333,29 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ }
\\}
,
- \\pub fn switch_fn(i: c_int) c_int {
- \\ var res: c_int = 0;
- \\ __switch: {
- \\ __case_2: {
- \\ __default: {
- \\ __case_1: {
- \\ __case_0: {
- \\ switch (i) {
- \\ 0 => break :__case_0,
- \\ 1 => break :__case_1,
- \\ else => break :__default,
- \\ 2 => break :__case_2,
- \\ }
- \\ }
- \\ res = 1;
- \\ }
- \\ res = 2;
- \\ }
- \\ res = (3 * i);
- \\ break :__switch;
- \\ }
- \\ res = 5;
- \\ }
- \\}
+ \\pub fn switch_fn(i: c_int) c_int {
+ \\ var res: c_int = 0;
+ \\ __switch: {
+ \\ __case_2: {
+ \\ __default: {
+ \\ __case_1: {
+ \\ __case_0: {
+ \\ switch (i) {
+ \\ 0 => break :__case_0,
+ \\ 1 => break :__case_1,
+ \\ else => break :__default,
+ \\ 2 => break :__case_2,
+ \\ }
+ \\ }
+ \\ res = 1;
+ \\ }
+ \\ res = 2;
+ \\ }
+ \\ res = (3 * i);
+ \\ break :__switch;
+ \\ }
+ \\ res = 5;
+ \\ }
+ \\}
);
}