diff --git a/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md
similarity index 100%
rename from CODE_OF_CONDUCT.md
rename to .github/CODE_OF_CONDUCT.md
diff --git a/CONTRIBUTING.md b/.github/CONTRIBUTING.md
similarity index 100%
rename from CONTRIBUTING.md
rename to .github/CONTRIBUTING.md
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000000..fc5f33e21f
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,7 @@
+contact_links:
+ - name: Language Proposal
+ about: Propose to improve the Zig language
+ url: https://github.com/ziglang/zig/wiki/Language-Proposals
+ - name: Question
+ about: Please use one of the community spaces for questions or general discussions.
+ url: https://github.com/ziglang/zig/wiki/Community
diff --git a/.github/ISSUE_TEMPLATE/proposal.yml b/.github/ISSUE_TEMPLATE/proposal.yml
deleted file mode 100644
index 83e30d2ef4..0000000000
--- a/.github/ISSUE_TEMPLATE/proposal.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-name: Language Proposal
-description: Propose to improve the Zig language
-labels: ["proposal"]
-body:
- - type: markdown
- attributes:
- value: |
- Thank you for your interest in improving the Zig language. However, we are
- not accepting new proposals to change the language at this time.
- - type: checkboxes
- id: trash
- attributes:
- label: Please do not file a proposal to change the language
- options:
- - label: "I understand, thank you. I will not submit a new proposal at this time"
- required: true
diff --git a/.github/ISSUE_TEMPLATE/question.yml b/.github/ISSUE_TEMPLATE/question.yml
deleted file mode 100644
index 1e57fc76d4..0000000000
--- a/.github/ISSUE_TEMPLATE/question.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-name: Question
-description: Ask a Zig-related question
-labels: ["question"]
-body:
- - type: markdown
- attributes:
- value: |
- Welcome! There are a bunch of great places to ask Zig-related questions.
- Please take a look at
- [The Community Wiki Page](https://github.com/ziglang/zig/wiki/Community) and
- find a comfy place to ask questions. You will find plenty of helpful people in
- these spaces. However, this issue tracker is not for questions. It is for
- more actionable items such as bug reports and enhancements.
- - type: checkboxes
- id: trash
- attributes:
- label: Please do not open a question issue on the bug tracker
- options:
- - label: "I understand, thank you. I will take my question to one of the community spaces instead"
- required: true
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e12e040fe4..b28748fba4 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -12,7 +12,7 @@ if(NOT CMAKE_BUILD_TYPE)
endif()
if(NOT CMAKE_INSTALL_PREFIX)
- set(CMAKE_INSTALL_PREFIX "${CMAKE_BINARY_DIR}" CACHE STRING
+ set(CMAKE_INSTALL_PREFIX "${CMAKE_BINARY_DIR}/stage1" CACHE STRING
"Directory to install zig to" FORCE)
endif()
@@ -63,7 +63,7 @@ if("${ZIG_VERSION}" STREQUAL "")
endif()
endif()
endif()
-message("Configuring zig version ${ZIG_VERSION}")
+message(STATUS "Configuring zig version ${ZIG_VERSION}")
set(ZIG_STATIC off CACHE BOOL "Attempt to build a static zig executable (not compatible with glibc)")
set(ZIG_STATIC_LLVM off CACHE BOOL "Prefer linking against static LLVM libraries")
@@ -398,6 +398,9 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/debug.zig"
"${CMAKE_SOURCE_DIR}/lib/std/dwarf.zig"
"${CMAKE_SOURCE_DIR}/lib/std/dwarf/AT.zig"
+ "${CMAKE_SOURCE_DIR}/lib/std/dwarf/ATE.zig"
+ "${CMAKE_SOURCE_DIR}/lib/std/dwarf/FORM.zig"
+ "${CMAKE_SOURCE_DIR}/lib/std/dwarf/LANG.zig"
"${CMAKE_SOURCE_DIR}/lib/std/dwarf/OP.zig"
"${CMAKE_SOURCE_DIR}/lib/std/dwarf/TAG.zig"
"${CMAKE_SOURCE_DIR}/lib/std/elf.zig"
@@ -441,9 +444,8 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/math.zig"
"${CMAKE_SOURCE_DIR}/lib/std/math/big.zig"
"${CMAKE_SOURCE_DIR}/lib/std/math/big/int.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/math/floor.zig"
+ "${CMAKE_SOURCE_DIR}/lib/std/math/float.zig"
"${CMAKE_SOURCE_DIR}/lib/std/math/frexp.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/math/inf.zig"
"${CMAKE_SOURCE_DIR}/lib/std/math/isinf.zig"
"${CMAKE_SOURCE_DIR}/lib/std/math/isnan.zig"
"${CMAKE_SOURCE_DIR}/lib/std/math/ln.zig"
@@ -460,12 +462,14 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/meta/trait.zig"
"${CMAKE_SOURCE_DIR}/lib/std/multi_array_list.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os.zig"
+ "${CMAKE_SOURCE_DIR}/lib/std/os/darwin.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/linux.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/linux/errno/generic.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/linux/x86_64.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/linux.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/linux/io_uring.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/linux/x86_64.zig"
+ "${CMAKE_SOURCE_DIR}/lib/std/os/posix_spawn.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/windows.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/windows/ntstatus.zig"
"${CMAKE_SOURCE_DIR}/lib/std/os/windows/win32error.zig"
@@ -474,74 +478,180 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/process.zig"
"${CMAKE_SOURCE_DIR}/lib/std/rand.zig"
"${CMAKE_SOURCE_DIR}/lib/std/sort.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/absv.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/addXf3.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/atomics.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/bswap.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/clear_cache.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/cmp.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/compareXf2.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/count0bits.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/divdf3.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/divsf3.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/divtf3.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/divti3.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/extendXfYf2.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixdfdi.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixdfsi.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixdfti.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixint.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixsfdi.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixsfsi.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixsfti.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixtfdi.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixtfsi.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixtfti.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixuint.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixunsdfdi.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixunsdfsi.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixunsdfti.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixunssfdi.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixunssfsi.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixunssfti.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixunstfdi.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixunstfsi.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/fixunstfti.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floatXisf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floatdidf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floatditf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floatsiXf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floattidf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floattitf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floatundidf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floatundisf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floatunditf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floatunsidf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floatunsisf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floatunsitf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floatuntidf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floatuntisf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/floatuntitf.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/int.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/modti3.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/mulXf3.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/muldi3.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/mulo.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/multi3.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/negXf2.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/negXi2.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/negv.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/os_version_check.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/parity.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/popcount.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/shift.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/stack_probe.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/truncXfYf2.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/udivmod.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/udivmodti4.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/udivti3.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt/umodti3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/absv.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/absvdi2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/absvsi2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/absvti2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/adddf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addo.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addsf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addtf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addxf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/arm.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/atomics.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/aulldiv.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/aullrem.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/bswap.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/ceil.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/clear_cache.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmp.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmpdf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmpsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmptf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmpxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/common.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/comparef.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cos.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/count0bits.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/divdf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/divsf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/divtf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/divti3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/divxf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/emutls.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/exp.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/exp2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extenddftf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extenddfxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendhfsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendhftf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendhfxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendsfdf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendsftf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendsfxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendxftf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fabs.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixdfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixdfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixdfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixhfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixhfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixhfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixsfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixsfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixsfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixtfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixtfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixtfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsdfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsdfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsdfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunshfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunshfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunshfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunssfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunssfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunssfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunstfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunstfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunstfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsxfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsxfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsxfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/float_to_int.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatditf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsitf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattitf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunditf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsitf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntitf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floor.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fma.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fmax.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fmin.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fmod.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/gedf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/gesf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/getf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/gexf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/int.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/int_to_float.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/log.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/log10.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/log2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/modti3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/muldf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/muldi3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulo.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulsf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/multf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/multi3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulxf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/negXi2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/negv.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/os_version_check.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/parity.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/popcount.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/rem_pio2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/rem_pio2_large.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/rem_pio2f.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/round.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/shift.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/sin.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/sincos.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/sqrt.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/stack_probe.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/subo.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/subsf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/subdf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/subtf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/subxf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/negsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/negdf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/negtf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/negxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/tan.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/trig.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunc.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncdfhf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncdfsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncsfhf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunctfdf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunctfhf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunctfsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunctfxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncxfdf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncxfhf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncxfsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivmod.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivmodti4.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivti3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/umodti3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/unorddf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/unordsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/unordtf2.zig"
"${CMAKE_SOURCE_DIR}/lib/std/start.zig"
"${CMAKE_SOURCE_DIR}/lib/std/std.zig"
"${CMAKE_SOURCE_DIR}/lib/std/target.zig"
@@ -561,16 +671,16 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/target/wasm.zig"
"${CMAKE_SOURCE_DIR}/lib/std/target/x86.zig"
"${CMAKE_SOURCE_DIR}/lib/std/Thread.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/Thread/AutoResetEvent.zig"
"${CMAKE_SOURCE_DIR}/lib/std/Thread/Futex.zig"
"${CMAKE_SOURCE_DIR}/lib/std/Thread/Mutex.zig"
"${CMAKE_SOURCE_DIR}/lib/std/Thread/ResetEvent.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/Thread/StaticResetEvent.zig"
"${CMAKE_SOURCE_DIR}/lib/std/time.zig"
+ "${CMAKE_SOURCE_DIR}/lib/std/treap.zig"
"${CMAKE_SOURCE_DIR}/lib/std/unicode.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/Ast.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/CrossTarget.zig"
+ "${CMAKE_SOURCE_DIR}/lib/std/zig/c_builtins.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/parse.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/render.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/string_literal.zig"
@@ -597,14 +707,22 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/arch/aarch64/Emit.zig"
"${CMAKE_SOURCE_DIR}/src/arch/aarch64/Mir.zig"
"${CMAKE_SOURCE_DIR}/src/arch/aarch64/bits.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/aarch64/abi.zig"
"${CMAKE_SOURCE_DIR}/src/arch/arm/CodeGen.zig"
"${CMAKE_SOURCE_DIR}/src/arch/arm/Emit.zig"
"${CMAKE_SOURCE_DIR}/src/arch/arm/Mir.zig"
"${CMAKE_SOURCE_DIR}/src/arch/arm/bits.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/arm/abi.zig"
"${CMAKE_SOURCE_DIR}/src/arch/riscv64/CodeGen.zig"
"${CMAKE_SOURCE_DIR}/src/arch/riscv64/Emit.zig"
"${CMAKE_SOURCE_DIR}/src/arch/riscv64/Mir.zig"
"${CMAKE_SOURCE_DIR}/src/arch/riscv64/bits.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/riscv64/abi.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/sparc64/CodeGen.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/sparc64/Emit.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/sparc64/Mir.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/sparc64/bits.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/sparc64/abi.zig"
"${CMAKE_SOURCE_DIR}/src/arch/wasm/CodeGen.zig"
"${CMAKE_SOURCE_DIR}/src/arch/wasm/Emit.zig"
"${CMAKE_SOURCE_DIR}/src/arch/wasm/Mir.zig"
@@ -612,6 +730,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/arch/x86_64/Emit.zig"
"${CMAKE_SOURCE_DIR}/src/arch/x86_64/Mir.zig"
"${CMAKE_SOURCE_DIR}/src/arch/x86_64/bits.zig"
+ "${CMAKE_SOURCE_DIR}/src/arch/x86_64/abi.zig"
"${CMAKE_SOURCE_DIR}/src/clang.zig"
"${CMAKE_SOURCE_DIR}/src/clang_options.zig"
"${CMAKE_SOURCE_DIR}/src/clang_options_data.zig"
@@ -655,6 +774,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/print_env.zig"
"${CMAKE_SOURCE_DIR}/src/print_targets.zig"
"${CMAKE_SOURCE_DIR}/src/print_zir.zig"
+ "${CMAKE_SOURCE_DIR}/src/register_manager.zig"
"${CMAKE_SOURCE_DIR}/src/stage1.zig"
"${CMAKE_SOURCE_DIR}/src/target.zig"
"${CMAKE_SOURCE_DIR}/src/tracy.zig"
@@ -791,6 +911,9 @@ add_library(opt_c_util STATIC ${OPTIMIZED_C_SOURCES})
set_target_properties(opt_c_util PROPERTIES
COMPILE_FLAGS "${OPTIMIZED_C_FLAGS}"
)
+target_include_directories(opt_c_util PRIVATE
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e-prebuilt"
+)
add_library(zigstage1 STATIC ${STAGE1_SOURCES})
set_target_properties(zigstage1 PROPERTIES
@@ -834,7 +957,7 @@ else()
set(ZIG1_RELEASE_ARG -OReleaseFast --strip)
endif()
if(ZIG_SINGLE_THREADED)
- set(ZIG1_SINGLE_THREADED_ARG "--single-threaded")
+ set(ZIG1_SINGLE_THREADED_ARG "-fsingle-threaded")
else()
set(ZIG1_SINGLE_THREADED_ARG "")
endif()
@@ -852,7 +975,7 @@ set(BUILD_ZIG1_ARGS
-lc
--pkg-begin build_options "${ZIG_CONFIG_ZIG_OUT}"
--pkg-end
- --pkg-begin compiler_rt "${CMAKE_SOURCE_DIR}/lib/std/special/compiler_rt.zig"
+ --pkg-begin compiler_rt "${CMAKE_SOURCE_DIR}/lib/compiler_rt.zig"
--pkg-end
)
diff --git a/README.md b/README.md
index cd8b4b4e96..3ffd544e98 100644
--- a/README.md
+++ b/README.md
@@ -9,8 +9,8 @@ A general-purpose programming language and toolchain for maintaining
* [Download & Documentation](https://ziglang.org/download)
* [Chapter 0 - Getting Started | ZigLearn.org](https://ziglearn.org/)
* [Community](https://github.com/ziglang/zig/wiki/Community)
- * [Contributing](https://github.com/ziglang/zig/blob/master/CONTRIBUTING.md)
- * [Code of Conduct](https://github.com/ziglang/zig/blob/master/CODE_OF_CONDUCT.md)
+ * [Contributing](https://github.com/ziglang/zig/blob/master/.github/CONTRIBUTING.md)
+ * [Code of Conduct](https://github.com/ziglang/zig/blob/master/.github/CODE_OF_CONDUCT.md)
* [Frequently Asked Questions](https://github.com/ziglang/zig/wiki/FAQ)
* [Community Projects](https://github.com/ziglang/zig/wiki/Community-Projects)
diff --git a/build.zig b/build.zig
index f2d154c702..0ca2e0d7d7 100644
--- a/build.zig
+++ b/build.zig
@@ -19,7 +19,7 @@ pub fn build(b: *Builder) !void {
const single_threaded = b.option(bool, "single-threaded", "Build artifacts that run in single threaded mode");
const use_zig_libcxx = b.option(bool, "use-zig-libcxx", "If libc++ is needed, use zig's bundled version, don't try to integrate with the system") orelse false;
- var docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
+ const docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
docgen_exe.single_threaded = single_threaded;
const rel_zig_exe = try fs.path.relative(b.allocator, b.build_root, b.zig_exe);
@@ -27,7 +27,7 @@ pub fn build(b: *Builder) !void {
b.allocator,
&[_][]const u8{ b.cache_root, "langref.html" },
) catch unreachable;
- var docgen_cmd = docgen_exe.run();
+ const docgen_cmd = docgen_exe.run();
docgen_cmd.addArgs(&[_][]const u8{
rel_zig_exe,
"doc" ++ fs.path.sep_str ++ "langref.html.in",
@@ -40,10 +40,10 @@ pub fn build(b: *Builder) !void {
const toolchain_step = b.step("test-toolchain", "Run the tests for the toolchain");
- var test_stage2 = b.addTest("src/test.zig");
- test_stage2.setBuildMode(mode);
- test_stage2.addPackagePath("test_cases", "test/cases.zig");
- test_stage2.single_threaded = single_threaded;
+ var test_cases = b.addTest("src/test.zig");
+ test_cases.setBuildMode(mode);
+ test_cases.addPackagePath("test_cases", "test/cases.zig");
+ test_cases.single_threaded = single_threaded;
const fmt_build_zig = b.addFmt(&[_][]const u8{"build.zig"});
@@ -54,12 +54,14 @@ pub fn build(b: *Builder) !void {
const skip_release_safe = b.option(bool, "skip-release-safe", "Main test suite skips release-safe builds") orelse skip_release;
const skip_non_native = b.option(bool, "skip-non-native", "Main test suite skips non-native builds") orelse false;
const skip_libc = b.option(bool, "skip-libc", "Main test suite skips tests that link libc") orelse false;
- const skip_compile_errors = b.option(bool, "skip-compile-errors", "Main test suite skips compile error tests") orelse false;
+ const skip_single_threaded = b.option(bool, "skip-single-threaded", "Main test suite skips tests that are single-threaded") orelse false;
+ const skip_stage1 = b.option(bool, "skip-stage1", "Main test suite skips stage1 compile error tests") orelse false;
const skip_run_translated_c = b.option(bool, "skip-run-translated-c", "Main test suite skips run-translated-c tests") orelse false;
const skip_stage2_tests = b.option(bool, "skip-stage2-tests", "Main test suite skips self-hosted compiler tests") orelse false;
const skip_install_lib_files = b.option(bool, "skip-install-lib-files", "Do not copy lib/ files to installation prefix") orelse false;
const only_install_lib_files = b.option(bool, "lib-files-only", "Only install library files") orelse false;
+
const is_stage1 = b.option(bool, "stage1", "Build the stage1 compiler, put stage2 behind a feature flag") orelse false;
const omit_stage2 = b.option(bool, "omit-stage2", "Do not include stage2 behind a feature flag inside stage1") orelse false;
const static_llvm = b.option(bool, "static-llvm", "Disable integration with system-installed LLVM, Clang, LLD, and libc++") orelse false;
@@ -128,6 +130,8 @@ pub fn build(b: *Builder) !void {
const force_gpa = b.option(bool, "force-gpa", "Force the compiler to use GeneralPurposeAllocator") orelse false;
const link_libc = b.option(bool, "force-link-libc", "Force self-hosted compiler to link libc") orelse enable_llvm;
const strip = b.option(bool, "strip", "Omit debug information") orelse false;
+ const use_zig0 = b.option(bool, "zig0", "Bootstrap using zig0") orelse false;
+ const value_tracing = b.option(bool, "value-tracing", "Enable extra state tracking to help troubleshoot bugs in the compiler (using the std.debug.Trace API)") orelse false;
const mem_leak_frames: u32 = b.option(u32, "mem-leak-frames", "How many stack frames to print when a memory leak occurs. Tests get 2x this amount.") orelse blk: {
if (strip) break :blk @as(u32, 0);
@@ -135,23 +139,29 @@ pub fn build(b: *Builder) !void {
break :blk 4;
};
- const main_file = if (is_stage1) "src/stage1.zig" else "src/main.zig";
+ const main_file: ?[]const u8 = mf: {
+ if (!is_stage1) break :mf "src/main.zig";
+ if (use_zig0) break :mf null;
+ break :mf "src/stage1.zig";
+ };
- var exe = b.addExecutable("zig", main_file);
+ const exe = b.addExecutable("zig", main_file);
exe.strip = strip;
+ exe.build_id = b.option(bool, "build-id", "Include a build id note") orelse false;
exe.install();
exe.setBuildMode(mode);
exe.setTarget(target);
if (!skip_stage2_tests) {
toolchain_step.dependOn(&exe.step);
}
+
b.default_step.dependOn(&exe.step);
exe.single_threaded = single_threaded;
if (target.isWindows() and target.getAbi() == .gnu) {
// LTO is currently broken on mingw, this can be removed when it's fixed.
exe.want_lto = false;
- test_stage2.want_lto = false;
+ test_cases.want_lto = false;
}
const exe_options = b.addOptions();
@@ -166,59 +176,9 @@ pub fn build(b: *Builder) !void {
exe_options.addOption(bool, "llvm_has_arc", llvm_has_arc);
exe_options.addOption(bool, "force_gpa", force_gpa);
- if (enable_llvm) {
- const cmake_cfg = if (static_llvm) null else findAndParseConfigH(b, config_h_path_option);
-
- if (is_stage1) {
- exe.addIncludePath("src");
- exe.addIncludePath("deps/SoftFloat-3e/source/include");
-
- test_stage2.addIncludePath("src");
- test_stage2.addIncludePath("deps/SoftFloat-3e/source/include");
- // This is intentionally a dummy path. stage1.zig tries to @import("compiler_rt") in case
- // of being built by cmake. But when built by zig it's gonna get a compiler_rt so that
- // is pointless.
- exe.addPackagePath("compiler_rt", "src/empty.zig");
- exe.defineCMacro("ZIG_LINK_MODE", "Static");
- test_stage2.defineCMacro("ZIG_LINK_MODE", "Static");
-
- const softfloat = b.addStaticLibrary("softfloat", null);
- softfloat.setBuildMode(.ReleaseFast);
- softfloat.setTarget(target);
- softfloat.addIncludePath("deps/SoftFloat-3e-prebuilt");
- softfloat.addIncludePath("deps/SoftFloat-3e/source/8086");
- softfloat.addIncludePath("deps/SoftFloat-3e/source/include");
- softfloat.addCSourceFiles(&softfloat_sources, &[_][]const u8{ "-std=c99", "-O3" });
- softfloat.single_threaded = single_threaded;
-
- exe.linkLibrary(softfloat);
- test_stage2.linkLibrary(softfloat);
-
- exe.addCSourceFiles(&stage1_sources, &exe_cflags);
- exe.addCSourceFiles(&optimized_c_sources, &[_][]const u8{ "-std=c99", "-O3" });
-
- test_stage2.addCSourceFiles(&stage1_sources, &exe_cflags);
- test_stage2.addCSourceFiles(&optimized_c_sources, &[_][]const u8{ "-std=c99", "-O3" });
- }
- if (cmake_cfg) |cfg| {
- // Inside this code path, we have to coordinate with system packaged LLVM, Clang, and LLD.
- // That means we also have to rely on stage1 compiled c++ files. We parse config.h to find
- // the information passed on to us from cmake.
- if (cfg.cmake_prefix_path.len > 0) {
- b.addSearchPrefix(cfg.cmake_prefix_path);
- }
-
- try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx);
- try addCmakeCfgOptionsToExe(b, cfg, test_stage2, use_zig_libcxx);
- } else {
- // Here we are -Denable-llvm but no cmake integration.
- try addStaticLlvmOptionsToExe(exe);
- try addStaticLlvmOptionsToExe(test_stage2);
- }
- }
if (link_libc) {
exe.linkLibC();
- test_stage2.linkLibC();
+ test_cases.linkLibC();
}
const is_debug = mode == .Debug;
@@ -227,6 +187,10 @@ pub fn build(b: *Builder) !void {
const opt_version_string = b.option([]const u8, "version-string", "Override Zig version string. Default is to find out with git.");
const version = if (opt_version_string) |version| version else v: {
+ if (!std.process.can_spawn) {
+ std.debug.print("error: version info cannot be retrieved from git. Zig version must be provided using -Dversion-string\n", .{});
+ std.process.exit(1);
+ }
const version_string = b.fmt("{d}.{d}.{d}", .{ zig_version.major, zig_version.minor, zig_version.patch });
var code: u8 = undefined;
@@ -276,6 +240,112 @@ pub fn build(b: *Builder) !void {
};
exe_options.addOption([:0]const u8, "version", try b.allocator.dupeZ(u8, version));
+ if (enable_llvm) {
+ const cmake_cfg = if (static_llvm) null else findAndParseConfigH(b, config_h_path_option);
+
+ if (is_stage1) {
+ const softfloat = b.addStaticLibrary("softfloat", null);
+ softfloat.setBuildMode(.ReleaseFast);
+ softfloat.setTarget(target);
+ softfloat.addIncludePath("deps/SoftFloat-3e-prebuilt");
+ softfloat.addIncludePath("deps/SoftFloat-3e/source/8086");
+ softfloat.addIncludePath("deps/SoftFloat-3e/source/include");
+ softfloat.addCSourceFiles(&softfloat_sources, &[_][]const u8{ "-std=c99", "-O3" });
+ softfloat.single_threaded = single_threaded;
+
+ const zig0 = b.addExecutable("zig0", null);
+ zig0.addCSourceFiles(&.{"src/stage1/zig0.cpp"}, &exe_cflags);
+ zig0.addIncludePath("zig-cache/tmp"); // for config.h
+ zig0.defineCMacro("ZIG_VERSION_MAJOR", b.fmt("{d}", .{zig_version.major}));
+ zig0.defineCMacro("ZIG_VERSION_MINOR", b.fmt("{d}", .{zig_version.minor}));
+ zig0.defineCMacro("ZIG_VERSION_PATCH", b.fmt("{d}", .{zig_version.patch}));
+ zig0.defineCMacro("ZIG_VERSION_STRING", b.fmt("\"{s}\"", .{version}));
+
+ for ([_]*std.build.LibExeObjStep{ zig0, exe, test_cases }) |artifact| {
+ artifact.addIncludePath("src");
+ artifact.addIncludePath("deps/SoftFloat-3e/source/include");
+ artifact.addIncludePath("deps/SoftFloat-3e-prebuilt");
+
+ artifact.defineCMacro("ZIG_LINK_MODE", "Static");
+
+ artifact.addCSourceFiles(&stage1_sources, &exe_cflags);
+ artifact.addCSourceFiles(&optimized_c_sources, &[_][]const u8{ "-std=c99", "-O3" });
+
+ artifact.linkLibrary(softfloat);
+ artifact.linkLibCpp();
+ }
+
+ try addStaticLlvmOptionsToExe(zig0);
+
+ const zig1_obj_ext = target.getObjectFormat().fileExt(target.getCpuArch());
+ const zig1_obj_path = b.pathJoin(&.{ "zig-cache", "tmp", b.fmt("zig1{s}", .{zig1_obj_ext}) });
+ const zig1_compiler_rt_path = b.pathJoin(&.{ b.pathFromRoot("lib"), "std", "special", "compiler_rt.zig" });
+
+ const zig1_obj = zig0.run();
+ zig1_obj.addArgs(&.{
+ "src/stage1.zig",
+ "-target",
+ try target.zigTriple(b.allocator),
+ "-mcpu=baseline",
+ "--name",
+ "zig1",
+ "--zig-lib-dir",
+ b.pathFromRoot("lib"),
+ b.fmt("-femit-bin={s}", .{b.pathFromRoot(zig1_obj_path)}),
+ "-fcompiler-rt",
+ "-lc",
+ });
+ {
+ zig1_obj.addArgs(&.{ "--pkg-begin", "build_options" });
+ zig1_obj.addFileSourceArg(exe_options.getSource());
+ zig1_obj.addArgs(&.{ "--pkg-end", "--pkg-begin", "compiler_rt", zig1_compiler_rt_path, "--pkg-end" });
+ }
+ switch (mode) {
+ .Debug => {},
+ .ReleaseFast => {
+ zig1_obj.addArg("-OReleaseFast");
+ zig1_obj.addArg("--strip");
+ },
+ .ReleaseSafe => {
+ zig1_obj.addArg("-OReleaseSafe");
+ zig1_obj.addArg("--strip");
+ },
+ .ReleaseSmall => {
+ zig1_obj.addArg("-OReleaseSmall");
+ zig1_obj.addArg("--strip");
+ },
+ }
+ if (single_threaded orelse false) {
+ zig1_obj.addArg("-fsingle-threaded");
+ }
+
+ if (use_zig0) {
+ exe.step.dependOn(&zig1_obj.step);
+ exe.addObjectFile(zig1_obj_path);
+ }
+
+ // This is intentionally a dummy path. stage1.zig tries to @import("compiler_rt") in case
+ // of being built by cmake. But when built by zig it's gonna get a compiler_rt so that
+ // is pointless.
+ exe.addPackagePath("compiler_rt", "src/empty.zig");
+ }
+ if (cmake_cfg) |cfg| {
+ // Inside this code path, we have to coordinate with system packaged LLVM, Clang, and LLD.
+ // That means we also have to rely on stage1 compiled c++ files. We parse config.h to find
+ // the information passed on to us from cmake.
+ if (cfg.cmake_prefix_path.len > 0) {
+ b.addSearchPrefix(cfg.cmake_prefix_path);
+ }
+
+ try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx);
+ try addCmakeCfgOptionsToExe(b, cfg, test_cases, use_zig_libcxx);
+ } else {
+ // Here we are -Denable-llvm but no cmake integration.
+ try addStaticLlvmOptionsToExe(exe);
+ try addStaticLlvmOptionsToExe(test_cases);
+ }
+ }
+
const semver = try std.SemanticVersion.parse(version);
exe_options.addOption(std.SemanticVersion, "semver", semver);
@@ -284,6 +354,7 @@ pub fn build(b: *Builder) !void {
exe_options.addOption(bool, "enable_tracy", tracy != null);
exe_options.addOption(bool, "enable_tracy_callstack", tracy_callstack);
exe_options.addOption(bool, "enable_tracy_allocation", tracy_allocation);
+ exe_options.addOption(bool, "value_tracing", value_tracing);
exe_options.addOption(bool, "is_stage1", is_stage1);
exe_options.addOption(bool, "omit_stage2", omit_stage2);
if (tracy) |tracy_path| {
@@ -313,34 +384,37 @@ pub fn build(b: *Builder) !void {
const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");
- const test_stage2_options = b.addOptions();
- test_stage2.addOptions("build_options", test_stage2_options);
+ const test_cases_options = b.addOptions();
+ test_cases.addOptions("build_options", test_cases_options);
- test_stage2_options.addOption(bool, "enable_logging", enable_logging);
- test_stage2_options.addOption(bool, "enable_link_snapshots", enable_link_snapshots);
- test_stage2_options.addOption(bool, "skip_non_native", skip_non_native);
- test_stage2_options.addOption(bool, "skip_compile_errors", skip_compile_errors);
- test_stage2_options.addOption(bool, "is_stage1", is_stage1);
- test_stage2_options.addOption(bool, "omit_stage2", omit_stage2);
- test_stage2_options.addOption(bool, "have_llvm", enable_llvm);
- test_stage2_options.addOption(bool, "llvm_has_m68k", llvm_has_m68k);
- test_stage2_options.addOption(bool, "llvm_has_csky", llvm_has_csky);
- test_stage2_options.addOption(bool, "llvm_has_ve", llvm_has_ve);
- test_stage2_options.addOption(bool, "llvm_has_arc", llvm_has_arc);
- test_stage2_options.addOption(bool, "enable_qemu", b.enable_qemu);
- test_stage2_options.addOption(bool, "enable_wine", b.enable_wine);
- test_stage2_options.addOption(bool, "enable_wasmtime", b.enable_wasmtime);
- test_stage2_options.addOption(bool, "enable_rosetta", b.enable_rosetta);
- test_stage2_options.addOption(bool, "enable_darling", b.enable_darling);
- test_stage2_options.addOption(u32, "mem_leak_frames", mem_leak_frames * 2);
- test_stage2_options.addOption(?[]const u8, "glibc_runtimes_dir", b.glibc_runtimes_dir);
- test_stage2_options.addOption([:0]const u8, "version", try b.allocator.dupeZ(u8, version));
- test_stage2_options.addOption(std.SemanticVersion, "semver", semver);
+ test_cases_options.addOption(bool, "enable_logging", enable_logging);
+ test_cases_options.addOption(bool, "enable_link_snapshots", enable_link_snapshots);
+ test_cases_options.addOption(bool, "skip_non_native", skip_non_native);
+ test_cases_options.addOption(bool, "skip_stage1", skip_stage1);
+ test_cases_options.addOption(bool, "is_stage1", is_stage1);
+ test_cases_options.addOption(bool, "omit_stage2", omit_stage2);
+ test_cases_options.addOption(bool, "have_llvm", enable_llvm);
+ test_cases_options.addOption(bool, "llvm_has_m68k", llvm_has_m68k);
+ test_cases_options.addOption(bool, "llvm_has_csky", llvm_has_csky);
+ test_cases_options.addOption(bool, "llvm_has_ve", llvm_has_ve);
+ test_cases_options.addOption(bool, "llvm_has_arc", llvm_has_arc);
+ test_cases_options.addOption(bool, "force_gpa", force_gpa);
+ test_cases_options.addOption(bool, "enable_qemu", b.enable_qemu);
+ test_cases_options.addOption(bool, "enable_wine", b.enable_wine);
+ test_cases_options.addOption(bool, "enable_wasmtime", b.enable_wasmtime);
+ test_cases_options.addOption(bool, "enable_rosetta", b.enable_rosetta);
+ test_cases_options.addOption(bool, "enable_darling", b.enable_darling);
+ test_cases_options.addOption(u32, "mem_leak_frames", mem_leak_frames * 2);
+ test_cases_options.addOption(bool, "value_tracing", value_tracing);
+ test_cases_options.addOption(?[]const u8, "glibc_runtimes_dir", b.glibc_runtimes_dir);
+ test_cases_options.addOption([:0]const u8, "version", try b.allocator.dupeZ(u8, version));
+ test_cases_options.addOption(std.SemanticVersion, "semver", semver);
+ test_cases_options.addOption(?[]const u8, "test_filter", test_filter);
- const test_stage2_step = b.step("test-stage2", "Run the stage2 compiler tests");
- test_stage2_step.dependOn(&test_stage2.step);
+ const test_cases_step = b.step("test-cases", "Run the main compiler test cases");
+ test_cases_step.dependOn(&test_cases.step);
if (!skip_stage2_tests) {
- toolchain_step.dependOn(test_stage2_step);
+ toolchain_step.dependOn(test_cases_step);
}
var chosen_modes: [4]builtin.Mode = undefined;
@@ -375,41 +449,50 @@ pub fn build(b: *Builder) !void {
"behavior",
"Run the behavior tests",
modes,
- false, // skip_single_threaded
+ skip_single_threaded,
skip_non_native,
skip_libc,
+ skip_stage1,
+ omit_stage2,
+ is_stage1,
));
toolchain_step.dependOn(tests.addPkgTests(
b,
test_filter,
- "lib/std/special/compiler_rt.zig",
+ "lib/compiler_rt.zig",
"compiler-rt",
"Run the compiler_rt tests",
modes,
true, // skip_single_threaded
skip_non_native,
true, // skip_libc
+ skip_stage1,
+ omit_stage2 or true, // TODO get these all passing
+ is_stage1,
));
toolchain_step.dependOn(tests.addPkgTests(
b,
test_filter,
- "lib/std/special/c.zig",
- "minilibc",
- "Run the mini libc tests",
+ "lib/c.zig",
+ "universal-libc",
+ "Run the universal libc tests",
modes,
true, // skip_single_threaded
skip_non_native,
true, // skip_libc
+ skip_stage1,
+ omit_stage2 or true, // TODO get these all passing
+ is_stage1,
));
toolchain_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
toolchain_step.dependOn(tests.addStandaloneTests(b, test_filter, modes, skip_non_native, enable_macos_sdk, target));
+ toolchain_step.dependOn(tests.addLinkTests(b, test_filter, modes, enable_macos_sdk));
toolchain_step.dependOn(tests.addStackTraceTests(b, test_filter, modes));
toolchain_step.dependOn(tests.addCliTests(b, test_filter, modes));
toolchain_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
- toolchain_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter, modes));
toolchain_step.dependOn(tests.addTranslateCTests(b, test_filter));
if (!skip_run_translated_c) {
toolchain_step.dependOn(tests.addRunTranslatedCTests(b, test_filter, target));
@@ -424,9 +507,12 @@ pub fn build(b: *Builder) !void {
"std",
"Run the standard library tests",
modes,
- false,
+ skip_single_threaded,
skip_non_native,
skip_libc,
+ skip_stage1,
+ omit_stage2 or true, // TODO get these all passing
+ is_stage1,
);
const test_step = b.step("test", "Run all the tests");
@@ -499,9 +585,7 @@ fn addCmakeCfgOptionsToExe(
}
}
-fn addStaticLlvmOptionsToExe(
- exe: *std.build.LibExeObjStep,
-) !void {
+fn addStaticLlvmOptionsToExe(exe: *std.build.LibExeObjStep) !void {
// Adds the Zig C++ sources which both stage1 and stage2 need.
//
// We need this because otherwise zig_clang_cc1_main.cpp ends up pulling
@@ -542,11 +626,14 @@ fn addCxxKnownPath(
errtxt: ?[]const u8,
need_cpp_includes: bool,
) !void {
+ if (!std.process.can_spawn)
+ return error.RequiredLibraryNotFound;
const path_padded = try b.exec(&[_][]const u8{
ctx.cxx_compiler,
b.fmt("-print-file-name={s}", .{objname}),
});
- const path_unpadded = mem.tokenize(u8, path_padded, "\r\n").next().?;
+ var tokenizer = mem.tokenize(u8, path_padded, "\r\n");
+ const path_unpadded = tokenizer.next().?;
if (mem.eql(u8, path_unpadded, objname)) {
if (errtxt) |msg| {
std.debug.print("{s}", .{msg});
@@ -688,15 +775,19 @@ fn toNativePathSep(b: *Builder, s: []const u8) []u8 {
const softfloat_sources = [_][]const u8{
"deps/SoftFloat-3e/source/8086/f128M_isSignalingNaN.c",
+ "deps/SoftFloat-3e/source/8086/extF80M_isSignalingNaN.c",
"deps/SoftFloat-3e/source/8086/s_commonNaNToF128M.c",
+ "deps/SoftFloat-3e/source/8086/s_commonNaNToExtF80M.c",
"deps/SoftFloat-3e/source/8086/s_commonNaNToF16UI.c",
"deps/SoftFloat-3e/source/8086/s_commonNaNToF32UI.c",
"deps/SoftFloat-3e/source/8086/s_commonNaNToF64UI.c",
"deps/SoftFloat-3e/source/8086/s_f128MToCommonNaN.c",
+ "deps/SoftFloat-3e/source/8086/s_extF80MToCommonNaN.c",
"deps/SoftFloat-3e/source/8086/s_f16UIToCommonNaN.c",
"deps/SoftFloat-3e/source/8086/s_f32UIToCommonNaN.c",
"deps/SoftFloat-3e/source/8086/s_f64UIToCommonNaN.c",
"deps/SoftFloat-3e/source/8086/s_propagateNaNF128M.c",
+ "deps/SoftFloat-3e/source/8086/s_propagateNaNExtF80M.c",
"deps/SoftFloat-3e/source/8086/s_propagateNaNF16UI.c",
"deps/SoftFloat-3e/source/8086/softfloat_raiseFlags.c",
"deps/SoftFloat-3e/source/f128M_add.c",
@@ -716,6 +807,7 @@ const softfloat_sources = [_][]const u8{
"deps/SoftFloat-3e/source/f128M_to_f16.c",
"deps/SoftFloat-3e/source/f128M_to_f32.c",
"deps/SoftFloat-3e/source/f128M_to_f64.c",
+ "deps/SoftFloat-3e/source/f128M_to_extF80M.c",
"deps/SoftFloat-3e/source/f128M_to_i32.c",
"deps/SoftFloat-3e/source/f128M_to_i32_r_minMag.c",
"deps/SoftFloat-3e/source/f128M_to_i64.c",
@@ -724,6 +816,20 @@ const softfloat_sources = [_][]const u8{
"deps/SoftFloat-3e/source/f128M_to_ui32_r_minMag.c",
"deps/SoftFloat-3e/source/f128M_to_ui64.c",
"deps/SoftFloat-3e/source/f128M_to_ui64_r_minMag.c",
+ "deps/SoftFloat-3e/source/extF80M_add.c",
+ "deps/SoftFloat-3e/source/extF80M_div.c",
+ "deps/SoftFloat-3e/source/extF80M_eq.c",
+ "deps/SoftFloat-3e/source/extF80M_le.c",
+ "deps/SoftFloat-3e/source/extF80M_lt.c",
+ "deps/SoftFloat-3e/source/extF80M_mul.c",
+ "deps/SoftFloat-3e/source/extF80M_rem.c",
+ "deps/SoftFloat-3e/source/extF80M_roundToInt.c",
+ "deps/SoftFloat-3e/source/extF80M_sqrt.c",
+ "deps/SoftFloat-3e/source/extF80M_sub.c",
+ "deps/SoftFloat-3e/source/extF80M_to_f16.c",
+ "deps/SoftFloat-3e/source/extF80M_to_f32.c",
+ "deps/SoftFloat-3e/source/extF80M_to_f64.c",
+ "deps/SoftFloat-3e/source/extF80M_to_f128M.c",
"deps/SoftFloat-3e/source/f16_add.c",
"deps/SoftFloat-3e/source/f16_div.c",
"deps/SoftFloat-3e/source/f16_eq.c",
@@ -735,9 +841,12 @@ const softfloat_sources = [_][]const u8{
"deps/SoftFloat-3e/source/f16_roundToInt.c",
"deps/SoftFloat-3e/source/f16_sqrt.c",
"deps/SoftFloat-3e/source/f16_sub.c",
+ "deps/SoftFloat-3e/source/f16_to_extF80M.c",
"deps/SoftFloat-3e/source/f16_to_f128M.c",
"deps/SoftFloat-3e/source/f16_to_f64.c",
+ "deps/SoftFloat-3e/source/f32_to_extF80M.c",
"deps/SoftFloat-3e/source/f32_to_f128M.c",
+ "deps/SoftFloat-3e/source/f64_to_extF80M.c",
"deps/SoftFloat-3e/source/f64_to_f128M.c",
"deps/SoftFloat-3e/source/f64_to_f16.c",
"deps/SoftFloat-3e/source/i32_to_f128M.c",
@@ -745,6 +854,7 @@ const softfloat_sources = [_][]const u8{
"deps/SoftFloat-3e/source/s_addCarryM.c",
"deps/SoftFloat-3e/source/s_addComplCarryM.c",
"deps/SoftFloat-3e/source/s_addF128M.c",
+ "deps/SoftFloat-3e/source/s_addExtF80M.c",
"deps/SoftFloat-3e/source/s_addM.c",
"deps/SoftFloat-3e/source/s_addMagsF16.c",
"deps/SoftFloat-3e/source/s_addMagsF32.c",
@@ -755,12 +865,14 @@ const softfloat_sources = [_][]const u8{
"deps/SoftFloat-3e/source/s_approxRecip_1Ks.c",
"deps/SoftFloat-3e/source/s_compare128M.c",
"deps/SoftFloat-3e/source/s_compare96M.c",
+ "deps/SoftFloat-3e/source/s_compareNonnormExtF80M.c",
"deps/SoftFloat-3e/source/s_countLeadingZeros16.c",
"deps/SoftFloat-3e/source/s_countLeadingZeros32.c",
"deps/SoftFloat-3e/source/s_countLeadingZeros64.c",
"deps/SoftFloat-3e/source/s_countLeadingZeros8.c",
"deps/SoftFloat-3e/source/s_eq128.c",
"deps/SoftFloat-3e/source/s_invalidF128M.c",
+ "deps/SoftFloat-3e/source/s_invalidExtF80M.c",
"deps/SoftFloat-3e/source/s_isNaNF128M.c",
"deps/SoftFloat-3e/source/s_le128.c",
"deps/SoftFloat-3e/source/s_lt128.c",
@@ -771,7 +883,9 @@ const softfloat_sources = [_][]const u8{
"deps/SoftFloat-3e/source/s_mulAddF32.c",
"deps/SoftFloat-3e/source/s_mulAddF64.c",
"deps/SoftFloat-3e/source/s_negXM.c",
+ "deps/SoftFloat-3e/source/s_normExtF80SigM.c",
"deps/SoftFloat-3e/source/s_normRoundPackMToF128M.c",
+ "deps/SoftFloat-3e/source/s_normRoundPackMToExtF80M.c",
"deps/SoftFloat-3e/source/s_normRoundPackToF16.c",
"deps/SoftFloat-3e/source/s_normRoundPackToF32.c",
"deps/SoftFloat-3e/source/s_normRoundPackToF64.c",
@@ -782,6 +896,7 @@ const softfloat_sources = [_][]const u8{
"deps/SoftFloat-3e/source/s_remStepMBy32.c",
"deps/SoftFloat-3e/source/s_roundMToI64.c",
"deps/SoftFloat-3e/source/s_roundMToUI64.c",
+ "deps/SoftFloat-3e/source/s_roundPackMToExtF80M.c",
"deps/SoftFloat-3e/source/s_roundPackMToF128M.c",
"deps/SoftFloat-3e/source/s_roundPackToF16.c",
"deps/SoftFloat-3e/source/s_roundPackToF32.c",
@@ -810,9 +925,12 @@ const softfloat_sources = [_][]const u8{
"deps/SoftFloat-3e/source/s_subMagsF32.c",
"deps/SoftFloat-3e/source/s_subMagsF64.c",
"deps/SoftFloat-3e/source/s_tryPropagateNaNF128M.c",
+ "deps/SoftFloat-3e/source/s_tryPropagateNaNExtF80M.c",
"deps/SoftFloat-3e/source/softfloat_state.c",
"deps/SoftFloat-3e/source/ui32_to_f128M.c",
"deps/SoftFloat-3e/source/ui64_to_f128M.c",
+ "deps/SoftFloat-3e/source/ui32_to_extF80M.c",
+ "deps/SoftFloat-3e/source/ui64_to_extF80M.c",
};
const stage1_sources = [_][]const u8{
diff --git a/ci/azure/build.zig b/ci/azure/build.zig
new file mode 100644
index 0000000000..12197fdf07
--- /dev/null
+++ b/ci/azure/build.zig
@@ -0,0 +1,980 @@
+const std = @import("std");
+const builtin = std.builtin;
+const Builder = std.build.Builder;
+const BufMap = std.BufMap;
+const mem = std.mem;
+const ArrayList = std.ArrayList;
+const io = std.io;
+const fs = std.fs;
+const InstallDirectoryOptions = std.build.InstallDirectoryOptions;
+const assert = std.debug.assert;
+
+const zig_version = std.builtin.Version{ .major = 0, .minor = 10, .patch = 0 };
+
+pub fn build(b: *Builder) !void {
+ b.setPreferredReleaseMode(.ReleaseFast);
+ const mode = b.standardReleaseOptions();
+ const target = b.standardTargetOptions(.{});
+ const single_threaded = b.option(bool, "single-threaded", "Build artifacts that run in single threaded mode");
+ const use_zig_libcxx = b.option(bool, "use-zig-libcxx", "If libc++ is needed, use zig's bundled version, don't try to integrate with the system") orelse false;
+
+ const docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
+ docgen_exe.single_threaded = single_threaded;
+
+ const rel_zig_exe = try fs.path.relative(b.allocator, b.build_root, b.zig_exe);
+ const langref_out_path = fs.path.join(
+ b.allocator,
+ &[_][]const u8{ b.cache_root, "langref.html" },
+ ) catch unreachable;
+ const docgen_cmd = docgen_exe.run();
+ docgen_cmd.addArgs(&[_][]const u8{
+ rel_zig_exe,
+ "doc" ++ fs.path.sep_str ++ "langref.html.in",
+ langref_out_path,
+ });
+ docgen_cmd.step.dependOn(&docgen_exe.step);
+
+ const docs_step = b.step("docs", "Build documentation");
+ docs_step.dependOn(&docgen_cmd.step);
+
+ const is_stage1 = b.option(bool, "stage1", "Build the stage1 compiler, put stage2 behind a feature flag") orelse false;
+ const omit_stage2 = b.option(bool, "omit-stage2", "Do not include stage2 behind a feature flag inside stage1") orelse false;
+ const static_llvm = b.option(bool, "static-llvm", "Disable integration with system-installed LLVM, Clang, LLD, and libc++") orelse false;
+ const enable_llvm = b.option(bool, "enable-llvm", "Build self-hosted compiler with LLVM backend enabled") orelse (is_stage1 or static_llvm);
+ const llvm_has_m68k = b.option(
+ bool,
+ "llvm-has-m68k",
+ "Whether LLVM has the experimental target m68k enabled",
+ ) orelse false;
+ const llvm_has_csky = b.option(
+ bool,
+ "llvm-has-csky",
+ "Whether LLVM has the experimental target csky enabled",
+ ) orelse false;
+ const llvm_has_ve = b.option(
+ bool,
+ "llvm-has-ve",
+ "Whether LLVM has the experimental target ve enabled",
+ ) orelse false;
+ const llvm_has_arc = b.option(
+ bool,
+ "llvm-has-arc",
+ "Whether LLVM has the experimental target arc enabled",
+ ) orelse false;
+ const config_h_path_option = b.option([]const u8, "config_h", "Path to the generated config.h");
+
+ b.installDirectory(InstallDirectoryOptions{
+ .source_dir = "lib",
+ .install_dir = .lib,
+ .install_subdir = "zig",
+ .exclude_extensions = &[_][]const u8{
+ // exclude files from lib/std/compress/
+ ".gz",
+ ".z.0",
+ ".z.9",
+ "rfc1951.txt",
+ "rfc1952.txt",
+ // exclude files from lib/std/compress/deflate/testdata
+ ".expect",
+ ".expect-noinput",
+ ".golden",
+ ".input",
+ "compress-e.txt",
+ "compress-gettysburg.txt",
+ "compress-pi.txt",
+ "rfc1951.txt",
+ // exclude files from lib/std/tz/
+ ".tzif",
+ // others
+ "README.md",
+ },
+ .blank_extensions = &[_][]const u8{
+ "test.zig",
+ },
+ });
+
+ const tracy = b.option([]const u8, "tracy", "Enable Tracy integration. Supply path to Tracy source");
+ const tracy_callstack = b.option(bool, "tracy-callstack", "Include callstack information with Tracy data. Does nothing if -Dtracy is not provided") orelse false;
+ const tracy_allocation = b.option(bool, "tracy-allocation", "Include allocation information with Tracy data. Does nothing if -Dtracy is not provided") orelse false;
+ const force_gpa = b.option(bool, "force-gpa", "Force the compiler to use GeneralPurposeAllocator") orelse false;
+ const link_libc = b.option(bool, "force-link-libc", "Force self-hosted compiler to link libc") orelse enable_llvm;
+ const strip = b.option(bool, "strip", "Omit debug information") orelse false;
+ const value_tracing = b.option(bool, "value-tracing", "Enable extra state tracking to help troubleshoot bugs in the compiler (using the std.debug.Trace API)") orelse false;
+
+ const mem_leak_frames: u32 = b.option(u32, "mem-leak-frames", "How many stack frames to print when a memory leak occurs. Tests get 2x this amount.") orelse blk: {
+ if (strip) break :blk @as(u32, 0);
+ if (mode != .Debug) break :blk 0;
+ break :blk 4;
+ };
+
+ const main_file: ?[]const u8 = if (is_stage1) null else "src/main.zig";
+
+ const exe = b.addExecutable("zig", main_file);
+ exe.strip = strip;
+ exe.install();
+ exe.setBuildMode(mode);
+ exe.setTarget(target);
+
+ b.default_step.dependOn(&exe.step);
+ exe.single_threaded = single_threaded;
+
+ if (target.isWindows() and target.getAbi() == .gnu) {
+ // LTO is currently broken on mingw, this can be removed when it's fixed.
+ exe.want_lto = false;
+ }
+
+ const exe_options = b.addOptions();
+ exe.addOptions("build_options", exe_options);
+
+ exe_options.addOption(u32, "mem_leak_frames", mem_leak_frames);
+ exe_options.addOption(bool, "skip_non_native", false);
+ exe_options.addOption(bool, "have_llvm", enable_llvm);
+ exe_options.addOption(bool, "llvm_has_m68k", llvm_has_m68k);
+ exe_options.addOption(bool, "llvm_has_csky", llvm_has_csky);
+ exe_options.addOption(bool, "llvm_has_ve", llvm_has_ve);
+ exe_options.addOption(bool, "llvm_has_arc", llvm_has_arc);
+ exe_options.addOption(bool, "force_gpa", force_gpa);
+
+ if (link_libc) {
+ exe.linkLibC();
+ }
+
+ const is_debug = mode == .Debug;
+ const enable_logging = b.option(bool, "log", "Enable debug logging with --debug-log") orelse is_debug;
+ const enable_link_snapshots = b.option(bool, "link-snapshot", "Whether to enable linker state snapshots") orelse false;
+
+ const opt_version_string = b.option([]const u8, "version-string", "Override Zig version string. Default is to find out with git.");
+ const version = if (opt_version_string) |version| version else v: {
+ const version_string = b.fmt("{d}.{d}.{d}", .{ zig_version.major, zig_version.minor, zig_version.patch });
+
+ var code: u8 = undefined;
+ const git_describe_untrimmed = b.execAllowFail(&[_][]const u8{
+ "git", "-C", b.build_root, "describe", "--match", "*.*.*", "--tags",
+ }, &code, .Ignore) catch {
+ break :v version_string;
+ };
+ const git_describe = mem.trim(u8, git_describe_untrimmed, " \n\r");
+
+ switch (mem.count(u8, git_describe, "-")) {
+ 0 => {
+ // Tagged release version (e.g. 0.9.0).
+ if (!mem.eql(u8, git_describe, version_string)) {
+ std.debug.print("Zig version '{s}' does not match Git tag '{s}'\n", .{ version_string, git_describe });
+ std.process.exit(1);
+ }
+ break :v version_string;
+ },
+ 2 => {
+ // Untagged development build (e.g. 0.9.0-dev.2025+ecf0050a9).
+ var it = mem.split(u8, git_describe, "-");
+ const tagged_ancestor = it.next() orelse unreachable;
+ const commit_height = it.next() orelse unreachable;
+ const commit_id = it.next() orelse unreachable;
+
+ const ancestor_ver = try std.builtin.Version.parse(tagged_ancestor);
+ if (zig_version.order(ancestor_ver) != .gt) {
+ std.debug.print("Zig version '{}' must be greater than tagged ancestor '{}'\n", .{ zig_version, ancestor_ver });
+ std.process.exit(1);
+ }
+
+ // Check that the commit hash is prefixed with a 'g' (a Git convention).
+ if (commit_id.len < 1 or commit_id[0] != 'g') {
+ std.debug.print("Unexpected `git describe` output: {s}\n", .{git_describe});
+ break :v version_string;
+ }
+
+ // The version is reformatted in accordance with the https://semver.org specification.
+ break :v b.fmt("{s}-dev.{s}+{s}", .{ version_string, commit_height, commit_id[1..] });
+ },
+ else => {
+ std.debug.print("Unexpected `git describe` output: {s}\n", .{git_describe});
+ break :v version_string;
+ },
+ }
+ };
+ exe_options.addOption([:0]const u8, "version", try b.allocator.dupeZ(u8, version));
+
+ if (enable_llvm) {
+ const cmake_cfg = if (static_llvm) null else findAndParseConfigH(b, config_h_path_option);
+
+ if (is_stage1) {
+ const softfloat = b.addStaticLibrary("softfloat", null);
+ softfloat.setBuildMode(.ReleaseFast);
+ softfloat.setTarget(target);
+ softfloat.addIncludeDir("deps/SoftFloat-3e-prebuilt");
+ softfloat.addIncludeDir("deps/SoftFloat-3e/source/8086");
+ softfloat.addIncludeDir("deps/SoftFloat-3e/source/include");
+ softfloat.addCSourceFiles(&softfloat_sources, &[_][]const u8{ "-std=c99", "-O3" });
+ softfloat.single_threaded = single_threaded;
+
+ const zig0 = b.addExecutable("zig0", null);
+ zig0.addCSourceFiles(&.{"src/stage1/zig0.cpp"}, &exe_cflags);
+ zig0.addIncludeDir("zig-cache/tmp"); // for config.h
+ zig0.defineCMacro("ZIG_VERSION_MAJOR", b.fmt("{d}", .{zig_version.major}));
+ zig0.defineCMacro("ZIG_VERSION_MINOR", b.fmt("{d}", .{zig_version.minor}));
+ zig0.defineCMacro("ZIG_VERSION_PATCH", b.fmt("{d}", .{zig_version.patch}));
+ zig0.defineCMacro("ZIG_VERSION_STRING", b.fmt("\"{s}\"", .{version}));
+
+ for ([_]*std.build.LibExeObjStep{ zig0, exe }) |artifact| {
+ artifact.addIncludeDir("src");
+ artifact.addIncludeDir("deps/SoftFloat-3e/source/include");
+ artifact.addIncludeDir("deps/SoftFloat-3e-prebuilt");
+
+ artifact.defineCMacro("ZIG_LINK_MODE", "Static");
+
+ artifact.addCSourceFiles(&stage1_sources, &exe_cflags);
+ artifact.addCSourceFiles(&optimized_c_sources, &[_][]const u8{ "-std=c99", "-O3" });
+
+ artifact.linkLibrary(softfloat);
+ artifact.linkLibCpp();
+ }
+
+ try addStaticLlvmOptionsToExe(zig0);
+
+ const zig1_obj_ext = target.getObjectFormat().fileExt(target.getCpuArch());
+ const zig1_obj_path = b.pathJoin(&.{ "zig-cache", "tmp", b.fmt("zig1{s}", .{zig1_obj_ext}) });
+ const zig1_compiler_rt_path = b.pathJoin(&.{ b.pathFromRoot("lib"), "std", "special", "compiler_rt.zig" });
+
+ const zig1_obj = zig0.run();
+ zig1_obj.addArgs(&.{
+ "src/stage1.zig",
+ "-target",
+ try target.zigTriple(b.allocator),
+ "-mcpu=baseline",
+ "--name",
+ "zig1",
+ "--zig-lib-dir",
+ b.pathFromRoot("lib"),
+ b.fmt("-femit-bin={s}", .{b.pathFromRoot(zig1_obj_path)}),
+ "-fcompiler-rt",
+ "-lc",
+ });
+ {
+ zig1_obj.addArgs(&.{ "--pkg-begin", "build_options" });
+ zig1_obj.addFileSourceArg(exe_options.getSource());
+ zig1_obj.addArgs(&.{ "--pkg-end", "--pkg-begin", "compiler_rt", zig1_compiler_rt_path, "--pkg-end" });
+ }
+ switch (mode) {
+ .Debug => {},
+ .ReleaseFast => {
+ zig1_obj.addArg("-OReleaseFast");
+ zig1_obj.addArg("--strip");
+ },
+ .ReleaseSafe => {
+ zig1_obj.addArg("-OReleaseSafe");
+ zig1_obj.addArg("--strip");
+ },
+ .ReleaseSmall => {
+ zig1_obj.addArg("-OReleaseSmall");
+ zig1_obj.addArg("--strip");
+ },
+ }
+ if (single_threaded orelse false) {
+ zig1_obj.addArg("-fsingle-threaded");
+ }
+
+ exe.step.dependOn(&zig1_obj.step);
+ exe.addObjectFile(zig1_obj_path);
+
+ // This is intentionally a dummy path. stage1.zig tries to @import("compiler_rt") in case
+ // of being built by cmake. But when built by zig it's gonna get a compiler_rt so that
+ // is pointless.
+ exe.addPackagePath("compiler_rt", "src/empty.zig");
+ }
+ if (cmake_cfg) |cfg| {
+ // Inside this code path, we have to coordinate with system packaged LLVM, Clang, and LLD.
+ // That means we also have to rely on stage1 compiled c++ files. We parse config.h to find
+ // the information passed on to us from cmake.
+ if (cfg.cmake_prefix_path.len > 0) {
+ b.addSearchPrefix(cfg.cmake_prefix_path);
+ }
+
+ try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx);
+ } else {
+ // Here we are -Denable-llvm but no cmake integration.
+ try addStaticLlvmOptionsToExe(exe);
+ }
+ }
+
+ const semver = try std.SemanticVersion.parse(version);
+ exe_options.addOption(std.SemanticVersion, "semver", semver);
+
+ exe_options.addOption(bool, "enable_logging", enable_logging);
+ exe_options.addOption(bool, "enable_link_snapshots", enable_link_snapshots);
+ exe_options.addOption(bool, "enable_tracy", tracy != null);
+ exe_options.addOption(bool, "enable_tracy_callstack", tracy_callstack);
+ exe_options.addOption(bool, "enable_tracy_allocation", tracy_allocation);
+ exe_options.addOption(bool, "value_tracing", value_tracing);
+ exe_options.addOption(bool, "is_stage1", is_stage1);
+ exe_options.addOption(bool, "omit_stage2", omit_stage2);
+ if (tracy) |tracy_path| {
+ const client_cpp = fs.path.join(
+ b.allocator,
+ &[_][]const u8{ tracy_path, "TracyClient.cpp" },
+ ) catch unreachable;
+
+ // On mingw, we need to opt into windows 7+ to get some features required by tracy.
+ const tracy_c_flags: []const []const u8 = if (target.isWindows() and target.getAbi() == .gnu)
+ &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined", "-D_WIN32_WINNT=0x601" }
+ else
+ &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" };
+
+ exe.addIncludeDir(tracy_path);
+ exe.addCSourceFile(client_cpp, tracy_c_flags);
+ if (!enable_llvm) {
+ exe.linkSystemLibraryName("c++");
+ }
+ exe.linkLibC();
+
+ if (target.isWindows()) {
+ exe.linkSystemLibrary("dbghelp");
+ exe.linkSystemLibrary("ws2_32");
+ }
+ }
+}
+
+const exe_cflags = [_][]const u8{
+ "-std=c++14",
+ "-D__STDC_CONSTANT_MACROS",
+ "-D__STDC_FORMAT_MACROS",
+ "-D__STDC_LIMIT_MACROS",
+ "-D_GNU_SOURCE",
+ "-fvisibility-inlines-hidden",
+ "-fno-exceptions",
+ "-fno-rtti",
+ "-Werror=type-limits",
+ "-Wno-missing-braces",
+ "-Wno-comment",
+};
+
+fn addCmakeCfgOptionsToExe(
+ b: *Builder,
+ cfg: CMakeConfig,
+ exe: *std.build.LibExeObjStep,
+ use_zig_libcxx: bool,
+) !void {
+ exe.addObjectFile(fs.path.join(b.allocator, &[_][]const u8{
+ cfg.cmake_binary_dir,
+ "zigcpp",
+ b.fmt("{s}{s}{s}", .{ exe.target.libPrefix(), "zigcpp", exe.target.staticLibSuffix() }),
+ }) catch unreachable);
+ assert(cfg.lld_include_dir.len != 0);
+ exe.addIncludeDir(cfg.lld_include_dir);
+ addCMakeLibraryList(exe, cfg.clang_libraries);
+ addCMakeLibraryList(exe, cfg.lld_libraries);
+ addCMakeLibraryList(exe, cfg.llvm_libraries);
+
+ if (use_zig_libcxx) {
+ exe.linkLibCpp();
+ } else {
+ const need_cpp_includes = true;
+
+ // System -lc++ must be used because in this code path we are attempting to link
+ // against system-provided LLVM, Clang, LLD.
+ if (exe.target.getOsTag() == .linux) {
+ // First we try to static link against gcc libstdc++. If that doesn't work,
+ // we fall back to -lc++ and cross our fingers.
+ addCxxKnownPath(b, cfg, exe, "libstdc++.a", "", need_cpp_includes) catch |err| switch (err) {
+ error.RequiredLibraryNotFound => {
+ exe.linkSystemLibrary("c++");
+ },
+ else => |e| return e,
+ };
+ exe.linkSystemLibrary("unwind");
+ } else if (exe.target.isFreeBSD()) {
+ try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
+ exe.linkSystemLibrary("pthread");
+ } else if (exe.target.getOsTag() == .openbsd) {
+ try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
+ try addCxxKnownPath(b, cfg, exe, "libc++abi.a", null, need_cpp_includes);
+ } else if (exe.target.isDarwin()) {
+ exe.linkSystemLibrary("c++");
+ }
+ }
+
+ if (cfg.dia_guids_lib.len != 0) {
+ exe.addObjectFile(cfg.dia_guids_lib);
+ }
+}
+
+fn addStaticLlvmOptionsToExe(
+ exe: *std.build.LibExeObjStep,
+) !void {
+ // Adds the Zig C++ sources which both stage1 and stage2 need.
+ //
+ // We need this because otherwise zig_clang_cc1_main.cpp ends up pulling
+ // in a dependency on llvm::cfg::UpdateZig Language Reference
Next, a {#link|public function|Functions#}, {#syntax#}pub fn{#endsyntax#}, named {#syntax#}main{#endsyntax#} @@ -535,8 +535,8 @@ const Timestamp = struct { {#header_close#} {#header_open|Top-Level Doc Comments#}
User documentation that doesn't belong to whatever - immediately follows it, like package-level documentation, goes - in top-level doc comments. A top-level doc comment is one that + immediately follows it, like container level documentation, goes + in top level doc comments. A top level doc comment is one that begins with two slashes and an exclamation point: {#syntax#}//!{#endsyntax#}.
{#code_begin|syntax|tldoc_comments#} @@ -672,8 +672,8 @@ pub fn main() void {uintptr_tuintptr_t, size_tThe shell output shown above displays two lines after the zig test command. These lines are @@ -1295,13 +1295,39 @@ test "expectError demo" { A variable is a unit of {#link|Memory#} storage.
- Variables are never allowed to shadow identifiers from an outer scope. -
-It is generally preferable to use {#syntax#}const{#endsyntax#} rather than {#syntax#}var{#endsyntax#} when declaring a variable. This causes less work for both humans and computers to do when reading code, and creates more optimization opportunities.
+ + {#header_open|Identifiers#} ++ Variable identifiers are never allowed to shadow identifiers from an outer scope. +
++ Identifiers must start with an alphabetic character or underscore and may be followed + by any number of alphanumeric characters or underscores. + They must not overlap with any keywords. See {#link|Keyword Reference#}. +
++ If a name that does not fit these requirements is needed, such as for linking with external libraries, the {#syntax#}@""{#endsyntax#} syntax may be used. +
+ {#code_begin|syntax#} +const @"identifier with spaces in it" = 0xff; +const @"1SmallStep4Man" = 112358; + +const c = @import("std").c; +pub extern "c" fn @"error"() void; +pub extern "c" fn @"fstat$INODE64"(fd: c.fd_t, buf: *c.Stat) c_int; + +const Color = enum { + red, + @"really red", +}; +const color: Color = .@"really red"; + {#code_end#} + {#header_close#} + {#header_open|Container Level Variables#}Container level variables have static lifetime and are order-independent and lazily analyzed. @@ -1486,7 +1512,7 @@ fn divide(a: i32, b: i32) i32 {
Operators such as {#syntax#}+{#endsyntax#} and {#syntax#}-{#endsyntax#} cause undefined behavior on - integer overflow. Alternative operators are provided for wrapping and saturating arithmetic on all targets. + integer overflow. Alternative operators are provided for wrapping and saturating arithmetic on all targets. {#syntax#}+%{#endsyntax#} and {#syntax#}-%{#endsyntax#} perform wrapping arithmetic while {#syntax#}+|{#endsyntax#} and {#syntax#}-|{#endsyntax#} perform saturating arithmetic.
@@ -2489,51 +2515,51 @@ test "null terminated array" { {#header_open|Vectors#}- A vector is a group of booleans, {#link|Integers#}, {#link|Floats#}, or {#link|Pointers#} which are operated on - in parallel using SIMD instructions. Vector types are created with the builtin function {#link|@Type#}, - or using the shorthand function {#syntax#}std.meta.Vector{#endsyntax#}. + A vector is a group of booleans, {#link|Integers#}, {#link|Floats#}, or + {#link|Pointers#} which are operated on in parallel, using SIMD instructions if possible. + Vector types are created with the builtin function {#link|@Vector#}.
- Vectors support the same builtin operators as their underlying base types. These operations are performed - element-wise, and return a vector of the same length as the input vectors. This includes: + Vectors support the same builtin operators as their underlying base types. + These operations are performed element-wise, and return a vector of the same length + as the input vectors. This includes:
- It is prohibited to use a math operator on a mixture of scalars (individual numbers) and vectors. - Zig provides the {#link|@splat#} builtin to easily convert from scalars to vectors, and it supports {#link|@reduce#} - and array indexing syntax to convert from vectors to scalars. Vectors also support assignment to and from - fixed-length arrays with comptime known length. + It is prohibited to use a math operator on a mixture of scalars (individual numbers) + and vectors. Zig provides the {#link|@splat#} builtin to easily convert from scalars + to vectors, and it supports {#link|@reduce#} and array indexing syntax to convert + from vectors to scalars. Vectors also support assignment to and from fixed-length + arrays with comptime known length.
For rearranging elements within and between vectors, Zig provides the {#link|@shuffle#} and {#link|@select#} functions.
- Operations on vectors shorter than the target machine's native SIMD size will typically compile to single SIMD - instructions, while vectors longer than the target machine's native SIMD size will compile to multiple SIMD - instructions. If a given operation doesn't have SIMD support on the target architecture, the compiler will default - to operating on each vector element one at a time. Zig supports any comptime-known vector length up to 2^32-1, - although small powers of two (2-64) are most typical. Note that excessively long vector lengths (e.g. 2^20) may + Operations on vectors shorter than the target machine's native SIMD size will typically compile to single SIMD + instructions, while vectors longer than the target machine's native SIMD size will compile to multiple SIMD + instructions. If a given operation doesn't have SIMD support on the target architecture, the compiler will default + to operating on each vector element one at a time. Zig supports any comptime-known vector length up to 2^32-1, + although small powers of two (2-64) are most typical. Note that excessively long vector lengths (e.g. 2^20) may result in compiler crashes on current versions of Zig.
{#code_begin|test|vector_example#} const std = @import("std"); -const Vector = std.meta.Vector; const expectEqual = std.testing.expectEqual; test "Basic vector usage" { - // Vectors have a compile-time known length and base type, - // and can be assigned to using array literal syntax - const a: Vector(4, i32) = [_]i32{ 1, 2, 3, 4 }; - const b: Vector(4, i32) = [_]i32{ 5, 6, 7, 8 }; + // Vectors have a compile-time known length and base type. + const a = @Vector(4, i32){ 1, 2, 3, 4 }; + const b = @Vector(4, i32){ 5, 6, 7, 8 }; - // Math operations take place element-wise + // Math operations take place element-wise. const c = a + b; // Individual vector elements can be accessed using array indexing syntax. @@ -2546,19 +2572,19 @@ test "Basic vector usage" { test "Conversion between vectors, arrays, and slices" { // Vectors and fixed-length arrays can be automatically assigned back and forth var arr1: [4]f32 = [_]f32{ 1.1, 3.2, 4.5, 5.6 }; - var vec: Vector(4, f32) = arr1; + var vec: @Vector(4, f32) = arr1; var arr2: [4]f32 = vec; try expectEqual(arr1, arr2); // You can also assign from a slice with comptime-known length to a vector using .* - const vec2: Vector(2, f32) = arr1[1..3].*; + const vec2: @Vector(2, f32) = arr1[1..3].*; var slice: []const f32 = &arr1; var offset: u32 = 1; // To extract a comptime-known length from a runtime-known offset, // first extract a new slice from the starting offset, then an array of // comptime known length - const vec3: Vector(2, f32) = slice[offset..][0..2].*; + const vec3: @Vector(2, f32) = slice[offset..][0..2].*; try expectEqual(slice[offset], vec2[0]); try expectEqual(slice[offset + 1], vec2[1]); try expectEqual(vec2, vec3); @@ -2569,7 +2595,7 @@ test "Conversion between vectors, arrays, and slices" { TODO consider suggesting std.MultiArrayList {#see_also|@splat|@shuffle|@select|@reduce#} - + {#header_close#} {#header_open|Pointers#} @@ -2987,8 +3013,8 @@ test "null terminated slice" { } {#code_end#}- Sentinel-terminated slices can also be created using a variation of the slice syntax - {#syntax#}data[start..end :x]{#endsyntax#}, where {#syntax#}data{#endsyntax#} is a many-item pointer, + Sentinel-terminated slices can also be created using a variation of the slice syntax + {#syntax#}data[start..end :x]{#endsyntax#}, where {#syntax#}data{#endsyntax#} is a many-item pointer, array or slice and {#syntax#}x{#endsyntax#} is the sentinel value.
{#code_begin|test|null_terminated_slicing#} @@ -3005,7 +3031,7 @@ test "null terminated slicing" { } {#code_end#}- Sentinel-terminated slicing asserts that the element in the sentinel position of the backing data is + Sentinel-terminated slicing asserts that the element in the sentinel position of the backing data is actually the sentinel value. If this is not the case, safety-protected {#link|Undefined Behavior#} results.
{#code_begin|test_safety|sentinel mismatch#} @@ -3014,10 +3040,10 @@ const expect = std.testing.expect; test "sentinel mismatch" { var array = [_]u8{ 3, 2, 1, 0 }; - - // Creating a sentinel-terminated slice from the array with a length of 2 - // will result in the value `1` occupying the sentinel element position. - // This does not match the indicated sentinel value of `0` and will lead + + // Creating a sentinel-terminated slice from the array with a length of 2 + // will result in the value `1` occupying the sentinel element position. + // This does not match the indicated sentinel value of `0` and will lead // to a runtime panic. var runtime_length: usize = 2; const slice = array[0..runtime_length :0]; @@ -3165,7 +3191,7 @@ test "linked list" { .last = &node, .len = 1, }; - + // When using a pointer to a struct, fields can be accessed directly, // without explicitly dereferencing the pointer. // So you can do @@ -3428,6 +3454,8 @@ test "aligned struct fields" {(anonymous struct at file.zig:7:38).The fields are implicitly named using numbers starting from 0. Because their names are integers, - the {#syntax#}@"0"{#endsyntax#} syntax must be used to access them. Names inside {#syntax#}@""{#endsyntax#} are always recognised as identifiers. + the {#syntax#}@"0"{#endsyntax#} syntax must be used to access them. Names inside {#syntax#}@""{#endsyntax#} are always recognised as {#link|identifiers|Identifiers#}.
Like arrays, tuples have a .len field, can be indexed and work with the ++ and ** operators. They can also be iterated over with {#link|inline for#}. @@ -3986,7 +4014,7 @@ test "labeled break from labeled block expression" { {#see_also|Labeled while|Labeled for#} {#header_open|Shadowing#} -
Identifiers are never allowed to "hide" other identifiers by using the same name:
+{#link|Identifiers#} are never allowed to "hide" other identifiers by using the same name:
{#code_begin|test_err|local shadows declaration#} const pi = 3.14; @@ -3998,8 +4026,8 @@ test "inside test block" { } {#code_end#}- Because of this, when you read Zig code you can always rely on an identifier to consistently mean - the same thing within the scope it is defined. Note that you can, however, use the same name if + Because of this, when you read Zig code you can always rely on an identifier to consistently mean + the same thing within the scope it is defined. Note that you can, however, use the same name if the scopes are separate:
{#code_begin|test|test_scopes#} @@ -4037,7 +4065,7 @@ test "switch simple" { 1, 2, 3 => 0, // Ranges can be specified using the ... syntax. These are inclusive - // both ends. + // of both ends. 5...100 => 1, // Branches can be arbitrarily complex. @@ -4809,7 +4837,7 @@ test "errdefer unwinding" { {#header_open|Basics#} {#code_begin|test|test_unreachable#} -// unreachable is used to assert that control flow will never happen upon a +// unreachable is used to assert that control flow will never reach a // particular location: test "basic math" { const x = 1; @@ -5309,6 +5337,179 @@ fn createFoo(param: i32) !Foo { is covered. The deallocation code is always directly following the allocation code. {#header_close#} + {#header_open|Common errdefer Slip-Ups#} ++ It should be noted that {#syntax#}errdefer{#endsyntax#} statements only last until the end of the block + they are written in, and therefore are not run if an error is returned outside of that block: +
+ {#code_begin|test_err|1 tests leaked memory#} +const std = @import("std"); +const Allocator = std.mem.Allocator; + +const Foo = struct { + data: u32, +}; + +fn tryToAllocateFoo(allocator: Allocator) !*Foo { + return allocator.create(Foo); +} + +fn deallocateFoo(allocator: Allocator, foo: *Foo) void { + allocator.destroy(foo); +} + +fn getFooData() !u32 { + return 666; +} + +fn createFoo(allocator: Allocator, param: i32) !*Foo { + const foo = getFoo: { + var foo = try tryToAllocateFoo(allocator); + errdefer deallocateFoo(allocator, foo); // Only lasts until the end of getFoo + + // Calls deallocateFoo on error + foo.data = try getFooData(); + + break :getFoo foo; + }; + + // Outside of the scope of the errdefer, so + // deallocateFoo will not be called here + if (param > 1337) return error.InvalidParam; + + return foo; +} + +test "createFoo" { + try std.testing.expectError(error.InvalidParam, createFoo(std.testing.allocator, 2468)); +} + {#code_end#} ++ To ensure that {#syntax#}deallocateFoo{#endsyntax#} is properly called + when returning an error, you must add an {#syntax#}errdefer{#endsyntax#} outside of the block: + {#code_begin|test|test_errdefer_block#} +const std = @import("std"); +const Allocator = std.mem.Allocator; + +const Foo = struct { + data: u32, +}; + +fn tryToAllocateFoo(allocator: Allocator) !*Foo { + return allocator.create(Foo); +} + +fn deallocateFoo(allocator: Allocator, foo: *Foo) void { + allocator.destroy(foo); +} + +fn getFooData() !u32 { + return 666; +} + +fn createFoo(allocator: Allocator, param: i32) !*Foo { + const foo = getFoo: { + var foo = try tryToAllocateFoo(allocator); + errdefer deallocateFoo(allocator, foo); + + foo.data = try getFooData(); + + break :getFoo foo; + }; + // This lasts for the rest of the function + errdefer deallocateFoo(allocator, foo); + + // Error is now properly handled by errdefer + if (param > 1337) return error.InvalidParam; + + return foo; +} + +test "createFoo" { + try std.testing.expectError(error.InvalidParam, createFoo(std.testing.allocator, 2468)); +} + {#code_end#} +
+ The fact that errdefers only last for the block they are declared in is + especially important when using loops: +
+ {#code_begin|test_err|3 errors were logged#} +const std = @import("std"); +const Allocator = std.mem.Allocator; + +const Foo = struct { + data: *u32 +}; + +fn getData() !u32 { + return 666; +} + +fn genFoos(allocator: Allocator, num: usize) ![]Foo { + var foos = try allocator.alloc(Foo, num); + errdefer allocator.free(foos); + + for(foos) |*foo, i| { + foo.data = try allocator.create(u32); + // This errdefer does not last between iterations + errdefer allocator.destroy(foo.data); + + // The data for the first 3 foos will be leaked + if(i >= 3) return error.TooManyFoos; + + foo.data.* = try getData(); + } + + return foos; +} + +test "genFoos" { + try std.testing.expectError(error.TooManyFoos, genFoos(std.testing.allocator, 5)); +} + {#code_end#} ++ Special care must be taken with code that allocates in a loop + to make sure that no memory is leaked when returning an error: +
+ {#code_begin|test|test_errdefer_loop#} +const std = @import("std"); +const Allocator = std.mem.Allocator; + +const Foo = struct { + data: *u32 +}; + +fn getData() !u32 { + return 666; +} + +fn genFoos(allocator: Allocator, num: usize) ![]Foo { + var foos = try allocator.alloc(Foo, num); + errdefer allocator.free(foos); + + // Used to track how many foos have been initialized + // (including their data being allocated) + var num_allocated: usize = 0; + errdefer for(foos[0..num_allocated]) |foo| { + allocator.destroy(foo.data); + }; + for(foos) |*foo, i| { + foo.data = try allocator.create(u32); + num_allocated += 1; + + if(i >= 3) return error.TooManyFoos; + + foo.data.* = try getData(); + } + + return foos; +} + +test "genFoos" { + try std.testing.expectError(error.TooManyFoos, genFoos(std.testing.allocator, 5)); +} + {#code_end#} + {#header_close#}A couple of other tidbits about error handling:
@@ -6777,8 +6978,7 @@ test "variable values" { generic data structure.- Here is an example of a generic {#syntax#}List{#endsyntax#} data structure, that we will instantiate with - the type {#syntax#}i32{#endsyntax#}. In Zig we refer to the type as {#syntax#}List(i32){#endsyntax#}. + Here is an example of a generic {#syntax#}List{#endsyntax#} data structure.
{#code_begin|syntax#} fn List(comptime T: type) type { @@ -6787,27 +6987,46 @@ fn List(comptime T: type) type { len: usize, }; } - {#code_end#} -- That's it. It's a function that returns an anonymous {#syntax#}struct{#endsyntax#}. For the purposes of error messages - and debugging, Zig infers the name {#syntax#}"List(i32)"{#endsyntax#} from the function name and parameters invoked when creating - the anonymous struct. -
-- To keep the language small and uniform, all aggregate types in Zig are anonymous. To give a type - a name, we assign it to a constant: -
- {#code_begin|syntax#} -const Node = struct { - next: *Node, - name: []u8, + +// The generic List data structure can be instantiated by passing in a type: +var buffer: [10]i32 = undefined; +var list = List(i32){ + .items = &buffer, + .len = 0, }; {#code_end#}- This works because all top level declarations are order-independent, and as long as there isn't - an actual infinite regression, values can refer to themselves, directly or indirectly. In this case, - {#syntax#}Node{#endsyntax#} refers to itself as a pointer, which is not actually an infinite regression, so - it works fine. + That's it. It's a function that returns an anonymous {#syntax#}struct{#endsyntax#}. + To keep the language small and uniform, all aggregate types in Zig are anonymous. + For the purposes of error messages and debugging, Zig infers the name + {#syntax#}"List(i32)"{#endsyntax#} from the function name and parameters invoked when creating + the anonymous struct. +
++ To explicitly give a type a name, we assign it to a constant. +
+ {#code_begin|syntax#} +const Node = struct { + next: ?*Node, + name: []const u8, +}; + +var node_a = Node{ + .next = null, + .name = &"Node A", +}; + +var node_b = Node{ + .next = &node_a, + .name = &"Node B", +}; + {#code_end#} ++ In this example, the {#syntax#}Node{#endsyntax#} struct refers to itself. + This works because all top level declarations are order-independent. + As long as the compiler can determine the size of the struct, it is free to refer to itself. + In this case, {#syntax#}Node{#endsyntax#} refers to itself as a pointer, which has a + well-defined size at compile time, so it works fine.
{#header_close#} {#header_open|Case Study: print in Zig#} @@ -7220,10 +7439,10 @@ test "global assembly" { provided explicitly by the caller, and it can be suspended and resumed any number of times.- The code following the {#syntax#}async{#endsyntax#} callsite runs immediately after the async - function first suspends. When the return value of the async function is needed, - the calling code can {#syntax#}await{#endsyntax#} on the async function frame. - This will suspend the calling code until the async function completes, at which point + The code following the {#syntax#}async{#endsyntax#} callsite runs immediately after the async + function first suspends. When the return value of the async function is needed, + the calling code can {#syntax#}await{#endsyntax#} on the async function frame. + This will suspend the calling code until the async function completes, at which point execution resumes just after the {#syntax#}await{#endsyntax#} callsite.
@@ -7333,8 +7552,8 @@ fn testResumeFromSuspend(my_result: *i32) void { in standard code.
- However, it is possible to have an {#syntax#}async{#endsyntax#} call - without a matching {#syntax#}await{#endsyntax#}. Upon completion of the async function, + However, it is possible to have an {#syntax#}async{#endsyntax#} call + without a matching {#syntax#}await{#endsyntax#}. Upon completion of the async function, execution would continue at the most recent {#syntax#}async{#endsyntax#} callsite or {#syntax#}resume{#endsyntax#} callsite, and the return value of the async function would be lost.
@@ -7371,8 +7590,8 @@ fn func() void {{#syntax#}await{#endsyntax#} is a suspend point, and takes as an operand anything that - coerces to {#syntax#}anyframe->T{#endsyntax#}. Calling {#syntax#}await{#endsyntax#} on - the frame of an async function will cause execution to continue at the + coerces to {#syntax#}anyframe->T{#endsyntax#}. Calling {#syntax#}await{#endsyntax#} on + the frame of an async function will cause execution to continue at the {#syntax#}await{#endsyntax#} callsite once the target function completes.
@@ -7614,7 +7833,7 @@ fn readFile(allocator: Allocator, filename: []const u8) ![]u8 { for the current target to match the C ABI. When the child type of a pointer has this alignment, the alignment can be omitted from the type.
-{#syntax#}const expect = @import("std").debug.assert;
+ {#syntax#}const assert = @import("std").debug.assert;
comptime {
assert(*u32 == *align(@alignOf(u32)) u32);
}{#endsyntax#}
@@ -7807,7 +8026,7 @@ fn func(y: *i32) void {
only rounds once, and is thus more accurate.
- Supports Floats and Vectors of floats.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
@@ -8297,8 +8516,8 @@ fn internalName() callconv(.C) void {}
{#code_begin|obj#}
export fn foo() void {}
{#code_end#}
- Note that even when using {#syntax#}export{#endsyntax#}, {#syntax#}@"foo"{#endsyntax#} syntax can
- be used to choose any string for the symbol name:
+ Note that even when using {#syntax#}export{#endsyntax#}, the {#syntax#}@"foo"{#endsyntax#} syntax for
+ {#link|identifiers|Identifiers#} can be used to choose any string for the symbol name:
{#code_begin|obj#}
export fn @"A function name that is a complete sentence."() void {}
{#code_end#}
@@ -8447,7 +8666,7 @@ fn func() void {
{#header_close#}
{#header_open|@frameSize#}
- {#syntax#}@frameSize() usize{#endsyntax#}
+ {#syntax#}@frameSize(func: anytype) usize{#endsyntax#}
This is the same as {#syntax#}@sizeOf(@Frame(func)){#endsyntax#}, where {#syntax#}func{#endsyntax#}
may be runtime-known.
@@ -8597,7 +8816,9 @@ test "integer cast panic" {
{#header_open|@intToPtr#}
{#syntax#}@intToPtr(comptime DestType: type, address: usize) DestType{#endsyntax#}
- Converts an integer to a {#link|pointer|Pointers#}. To convert the other way, use {#link|@ptrToInt#}.
+ Converts an integer to a {#link|pointer|Pointers#}. To convert the other way, use {#link|@ptrToInt#}. Casting an address of 0 to a destination type
+ which in not {#link|optional|Optional Pointers#} and does not have the {#syntax#}allowzero{#endsyntax#} attribute will result in a
+ {#link|Pointer Cast Invalid Null#} panic when runtime safety checks are enabled.
If the destination pointer type does not allow address zero and {#syntax#}address{#endsyntax#}
@@ -8711,7 +8932,8 @@ test "@wasmMemoryGrow" {
{#syntax#}@mod(numerator: T, denominator: T) T{#endsyntax#}
Modulus division. For unsigned integers this is the same as
- {#syntax#}numerator % denominator{#endsyntax#}. Caller guarantees {#syntax#}denominator > 0{#endsyntax#}.
+ {#syntax#}numerator % denominator{#endsyntax#}. Caller guarantees {#syntax#}denominator > 0{#endsyntax#}, otherwise the
+ operation will result in a {#link|Remainder Division by Zero#} when runtime safety checks are enabled.
{#syntax#}@panic(message: []const u8) noreturn{#endsyntax#}
@@ -8836,7 +9058,8 @@ pub const PrefetchOptions = struct {
{#syntax#}@rem(numerator: T, denominator: T) T{#endsyntax#}
Remainder division. For unsigned integers this is the same as - {#syntax#}numerator % denominator{#endsyntax#}. Caller guarantees {#syntax#}denominator > 0{#endsyntax#}. + {#syntax#}numerator % denominator{#endsyntax#}. Caller guarantees {#syntax#}denominator > 0{#endsyntax#}, otherwise the + operation will result in a {#link|Remainder Division by Zero#} when runtime safety checks are enabled.
{#syntax#}@select(comptime T: type, pred: std.meta.Vector(len, bool), a: std.meta.Vector(len, T), b: std.meta.Vector(len, T)) std.meta.Vector(len, T){#endsyntax#}
+ {#syntax#}@select(comptime T: type, pred: @Vector(len, bool), a: @Vector(len, T), b: @Vector(len, T)) @Vector(len, T){#endsyntax#}
Selects values element-wise from {#syntax#}a{#endsyntax#} or {#syntax#}b{#endsyntax#} based on {#syntax#}pred{#endsyntax#}. If {#syntax#}pred[i]{#endsyntax#} is {#syntax#}true{#endsyntax#}, the corresponding element in the result will be {#syntax#}a[i]{#endsyntax#} and otherwise {#syntax#}b[i]{#endsyntax#}.
@@ -8878,14 +9101,14 @@ pub const PrefetchOptions = struct { {#header_close#} {#header_open|@setCold#} -{#syntax#}@setCold(is_cold: bool){#endsyntax#}
+ {#syntax#}@setCold(comptime is_cold: bool){#endsyntax#}
Tells the optimizer that a function is rarely called.
{#header_close#} {#header_open|@setEvalBranchQuota#} -{#syntax#}@setEvalBranchQuota(new_quota: u32){#endsyntax#}
+ {#syntax#}@setEvalBranchQuota(comptime new_quota: u32){#endsyntax#}
Changes the maximum number of backwards branches that compile-time code execution can use before giving up and making a compile error. @@ -8920,7 +9143,7 @@ test "foo" { {#header_close#} {#header_open|@setFloatMode#} -
{#syntax#}@setFloatMode(mode: @import("std").builtin.FloatMode){#endsyntax#}
+ {#syntax#}@setFloatMode(comptime mode: @import("std").builtin.FloatMode){#endsyntax#}
Sets the floating point mode of the current scope. Possible values are:
@@ -8955,7 +9178,7 @@ pub const FloatMode = enum { {#header_close#} {#header_open|@setRuntimeSafety#} -{#syntax#}@setRuntimeSafety(safety_on: bool) void{#endsyntax#}
+ {#syntax#}@setRuntimeSafety(comptime safety_on: bool) void{#endsyntax#}
Sets whether runtime safety checks are enabled for the scope that contains the function call.
@@ -8997,8 +9220,8 @@ test "@setRuntimeSafety" { any bits that disagree with the resultant sign bit are shifted out.- The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(T.bit_count){#endsyntax#} bits. - This is because {#syntax#}shift_amt >= T.bit_count{#endsyntax#} is undefined behavior. + The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(@typeInfo(T).Int.bits){#endsyntax#} bits. + This is because {#syntax#}shift_amt >= @typeInfo(T).Int.bits{#endsyntax#} is undefined behavior.
{#see_also|@shrExact|@shlWithOverflow#} {#header_close#} @@ -9011,12 +9234,12 @@ test "@setRuntimeSafety" { If no overflow or underflow occurs, returns {#syntax#}false{#endsyntax#}.- The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(T.bit_count){#endsyntax#} bits. - This is because {#syntax#}shift_amt >= T.bit_count{#endsyntax#} is undefined behavior. + The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(@typeInfo(T).Int.bits){#endsyntax#} bits. + This is because {#syntax#}shift_amt >= @typeInfo(T).Int.bits{#endsyntax#} is undefined behavior.
{#see_also|@shlExact|@shrExact#} {#header_close#} - + {#header_open|@shrExact#}{#syntax#}@shrExact(value: T, shift_amt: Log2T) T{#endsyntax#}
@@ -9024,14 +9247,14 @@ test "@setRuntimeSafety" { that the shift will not shift any 1 bits out.
- The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(T.bit_count){#endsyntax#} bits. - This is because {#syntax#}shift_amt >= T.bit_count{#endsyntax#} is undefined behavior. + The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(@typeInfo(T).Int.bits){#endsyntax#} bits. + This is because {#syntax#}shift_amt >= @typeInfo(T).Int.bits{#endsyntax#} is undefined behavior.
{#see_also|@shlExact|@shlWithOverflow#} {#header_close#} {#header_open|@shuffle#} -{#syntax#}@shuffle(comptime E: type, a: std.meta.Vector(a_len, E), b: std.meta.Vector(b_len, E), comptime mask: std.meta.Vector(mask_len, i32)) std.meta.Vector(mask_len, E){#endsyntax#}
+ {#syntax#}@shuffle(comptime E: type, a: @Vector(a_len, E), b: @Vector(b_len, E), comptime mask: @Vector(mask_len, i32)) @Vector(mask_len, E){#endsyntax#}
Constructs a new {#link|vector|Vectors#} by selecting elements from {#syntax#}a{#endsyntax#} and {#syntax#}b{#endsyntax#} based on {#syntax#}mask{#endsyntax#}. @@ -9066,22 +9289,21 @@ test "@setRuntimeSafety" {
{#code_begin|test|vector_shuffle#} const std = @import("std"); -const Vector = std.meta.Vector; const expect = std.testing.expect; test "vector @shuffle" { - const a: Vector(7, u8) = [_]u8{ 'o', 'l', 'h', 'e', 'r', 'z', 'w' }; - const b: Vector(4, u8) = [_]u8{ 'w', 'd', '!', 'x' }; + const a = @Vector(7, u8){ 'o', 'l', 'h', 'e', 'r', 'z', 'w' }; + const b = @Vector(4, u8){ 'w', 'd', '!', 'x' }; // To shuffle within a single vector, pass undefined as the second argument. // Notice that we can re-order, duplicate, or omit elements of the input vector - const mask1: Vector(5, i32) = [_]i32{ 2, 3, 1, 1, 0 }; - const res1: Vector(5, u8) = @shuffle(u8, a, undefined, mask1); + const mask1 = @Vector(5, i32){ 2, 3, 1, 1, 0 }; + const res1: @Vector(5, u8) = @shuffle(u8, a, undefined, mask1); try expect(std.mem.eql(u8, &@as([5]u8, res1), "hello")); // Combining two vectors - const mask2: Vector(6, i32) = [_]i32{ -1, 0, 4, 1, -2, -3 }; - const res2: Vector(6, u8) = @shuffle(u8, a, b, mask2); + const mask2 = @Vector(6, i32){ -1, 0, 4, 1, -2, -3 }; + const res2: @Vector(6, u8) = @shuffle(u8, a, b, mask2); try expect(std.mem.eql(u8, &@as([6]u8, res2), "world!")); } {#code_end#} @@ -9108,7 +9330,7 @@ test "vector @shuffle" { {#header_close#} {#header_open|@splat#} -{#syntax#}@splat(comptime len: u32, scalar: anytype) std.meta.Vector(len, @TypeOf(scalar)){#endsyntax#}
+ {#syntax#}@splat(comptime len: u32, scalar: anytype) @Vector(len, @TypeOf(scalar)){#endsyntax#}
Produces a vector of length {#syntax#}len{#endsyntax#} where each element is the value {#syntax#}scalar{#endsyntax#}: @@ -9120,7 +9342,7 @@ const expect = std.testing.expect; test "vector @splat" { const scalar: u32 = 5; const result = @splat(4, scalar); - comptime try expect(@TypeOf(result) == std.meta.Vector(4, u32)); + comptime try expect(@TypeOf(result) == @Vector(4, u32)); try expect(std.mem.eql(u32, &@as([4]u32, result), &[_]u32{ 5, 5, 5, 5 })); } {#code_end#} @@ -9132,22 +9354,23 @@ test "vector @splat" { {#header_close#} {#header_open|@reduce#} -
{#syntax#}@reduce(comptime op: std.builtin.ReduceOp, value: anytype) std.meta.Child(value){#endsyntax#}
+ {#syntax#}@reduce(comptime op: std.builtin.ReduceOp, value: anytype) E{#endsyntax#}
- Transforms a {#link|vector|Vectors#} into a scalar value by performing a
- sequential horizontal reduction of its elements using the specified operator {#syntax#}op{#endsyntax#}.
+ Transforms a {#link|vector|Vectors#} into a scalar value (of type E)
+ by performing a sequential horizontal reduction of its elements using the
+ specified operator {#syntax#}op{#endsyntax#}.
Not every operator is available for every vector element type:
Note that {#syntax#}.Add{#endsyntax#} and {#syntax#}.Mul{#endsyntax#} @@ -9160,10 +9383,10 @@ const std = @import("std"); const expect = std.testing.expect; test "vector @reduce" { - const value: std.meta.Vector(4, i32) = [_]i32{ 1, -1, 1, -1 }; + const value = @Vector(4, i32){ 1, -1, 1, -1 }; const result = value > @splat(4, @as(i32, 0)); // result is { true, false, true, false }; - comptime try expect(@TypeOf(result) == std.meta.Vector(4, bool)); + comptime try expect(@TypeOf(result) == @Vector(4, bool)); const is_all_true = @reduce(.And, result); comptime try expect(@TypeOf(is_all_true) == bool); try expect(is_all_true == false); @@ -9217,6 +9440,7 @@ fn doTheTest() !void { some float operations are not yet implemented for all float types.
{#header_close#} + {#header_open|@cos#}{#syntax#}@cos(value: anytype) @TypeOf(value){#endsyntax#}
@@ -9228,6 +9452,19 @@ fn doTheTest() !void { some float operations are not yet implemented for all float types.
{#header_close#} + + {#header_open|@tan#} +{#syntax#}@tan(value: anytype) @TypeOf(value){#endsyntax#}
+ + Tangent trigonometric function on a floating point number. + Uses a dedicated hardware instruction when available. +
++ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types. +
+ {#header_close#} + {#header_open|@exp#}{#syntax#}@exp(value: anytype) @TypeOf(value){#endsyntax#}
@@ -9347,7 +9584,7 @@ fn doTheTest() !void { If no overflow or underflow occurs, returns {#syntax#}false{#endsyntax#}.
{#header_close#} - + {#header_open|@tagName#}{#syntax#}@tagName(value: anytype) [:0]const u8{#endsyntax#}
@@ -9418,7 +9655,7 @@ test "integer truncation" { {#header_close#} {#header_open|@Type#} -
{#syntax#}@Type(comptime info: std.builtin.TypeInfo) type{#endsyntax#}
+ {#syntax#}@Type(comptime info: std.builtin.Type) type{#endsyntax#}
This function is the inverse of {#link|@typeInfo#}. It reifies type information into a {#syntax#}type{#endsyntax#}. @@ -9460,14 +9697,19 @@ test "integer truncation" { {#header_close#} {#header_open|@typeInfo#} -
{#syntax#}@typeInfo(comptime T: type) std.builtin.TypeInfo{#endsyntax#}
+ {#syntax#}@typeInfo(comptime T: type) std.builtin.Type{#endsyntax#}
Provides type reflection.
- For {#link|structs|struct#}, {#link|unions|union#}, {#link|enums|enum#}, and - {#link|error sets|Error Set Type#}, the fields are guaranteed to be in the same - order as declared. For declarations, the order is unspecified. + Type information of {#link|structs|struct#}, {#link|unions|union#}, {#link|enums|enum#}, and + {#link|error sets|Error Set Type#} has fields which are are guaranteed to be in the same + order as appearance in the source file. +
++ Type information of {#link|structs|struct#}, {#link|unions|union#}, {#link|enums|enum#}, and + {#link|opaques|opaque#} has declarations, which are also guaranteed to be in the same + order as appearance in the source file.
{#header_close#} @@ -9476,8 +9718,9 @@ test "integer truncation" {This function returns the string representation of a type, as an array. It is equivalent to a string literal of the type name. + The returned type name is fully qualified with the parent namespace included + as part of the type name with a series of dots.
- {#header_close#} {#header_open|@TypeOf#} @@ -9517,6 +9760,12 @@ fn foo(comptime T: type, ptr: *T) T { {#syntax#}@unionInit{#endsyntax#} forwards its {#link|result location|Result Location Semantics#} to {#syntax#}init_expr{#endsyntax#}. {#header_close#} + + + {#header_open|@Vector#} +{#syntax#}@Vector(len: comptime_int, Element: type) type{#endsyntax#}
+ Creates {#link|Vectors#}.
+ {#header_close#} {#header_close#} {#header_open|Build Mode#} @@ -10352,7 +10601,7 @@ pub fn main() !void {String literals such as {#syntax#}"foo"{#endsyntax#} are in the global constant data section. This is why it is an error to pass a string literal to a mutable slice, like this:
- {#code_begin|test_err|expected type '[]u8'#} + {#code_begin|test_err|cannot cast pointer to array literal to slice type '[]u8'#} fn foo(s: []u8) void { _ = s; } @@ -11044,7 +11293,7 @@ pub fn main() !void { var preopens = PreopenList.init(gpa); defer preopens.deinit(); - try preopens.populate(); + try preopens.populate(null); for (preopens.asSlice()) |preopen, i| { std.debug.print("{}: {}\n", .{ i, preopen }); @@ -11143,7 +11392,7 @@ Architectures: riscv32 riscv64 sparc - sparcv9 + sparc64 sparcel s390x thumb @@ -11294,7 +11543,7 @@ Available libcs: s390x-linux-gnu s390x-linux-musl sparc-linux-gnu - sparcv9-linux-gnu + sparc64-linux-gnu wasm32-freestanding-musl wasm32-wasi-musl x86_64-linux-gnu @@ -11720,17 +11969,6 @@ fn readU32Be() u32 {}{#syntax#}false{#endsyntax#}
- {#syntax#}fn{#endsyntax#}
@@ -11806,17 +12044,6 @@ fn readU32Be() u32 {}
{#syntax#}null{#endsyntax#}
- {#syntax#}or{#endsyntax#}
@@ -11955,17 +12182,6 @@ fn readU32Be() u32 {}
{#syntax#}true{#endsyntax#}
- {#syntax#}try{#endsyntax#}
@@ -11979,17 +12195,6 @@ fn readU32Be() u32 {}
{#syntax#}undefined{#endsyntax#}
- {#syntax#}union{#endsyntax#}
diff --git a/lib/std/special/build_runner.zig b/lib/build_runner.zig
similarity index 94%
rename from lib/std/special/build_runner.zig
rename to lib/build_runner.zig
index 2a64861cf9..523723ddf2 100644
--- a/lib/std/special/build_runner.zig
+++ b/lib/build_runner.zig
@@ -24,19 +24,19 @@ pub fn main() !void {
var arg_idx: usize = 1;
const zig_exe = nextArg(args, &arg_idx) orelse {
- std.debug.print("Expected first argument to be path to zig compiler\n", .{});
+ std.debug.print("Expected path to zig compiler\n", .{});
return error.InvalidArgs;
};
const build_root = nextArg(args, &arg_idx) orelse {
- std.debug.print("Expected second argument to be build root directory path\n", .{});
+ std.debug.print("Expected build root directory path\n", .{});
return error.InvalidArgs;
};
const cache_root = nextArg(args, &arg_idx) orelse {
- std.debug.print("Expected third argument to be cache root directory path\n", .{});
+ std.debug.print("Expected cache root directory path\n", .{});
return error.InvalidArgs;
};
const global_cache_root = nextArg(args, &arg_idx) orelse {
- std.debug.print("Expected third argument to be global cache root directory path\n", .{});
+ std.debug.print("Expected global cache root directory path\n", .{});
return error.InvalidArgs;
};
@@ -147,10 +147,6 @@ pub fn main() !void {
std.debug.print("Expected argument after --glibc-runtimes\n\n", .{});
return usageAndErr(builder, false, stderr_stream);
};
- } else if (mem.eql(u8, arg, "--verbose-tokenize")) {
- builder.verbose_tokenize = true;
- } else if (mem.eql(u8, arg, "--verbose-ast")) {
- builder.verbose_ast = true;
} else if (mem.eql(u8, arg, "--verbose-link")) {
builder.verbose_link = true;
} else if (mem.eql(u8, arg, "--verbose-air")) {
@@ -185,6 +181,10 @@ pub fn main() !void {
builder.enable_darling = true;
} else if (mem.eql(u8, arg, "-fno-darling")) {
builder.enable_darling = false;
+ } else if (mem.eql(u8, arg, "-fstage1")) {
+ builder.use_stage1 = true;
+ } else if (mem.eql(u8, arg, "-fno-stage1")) {
+ builder.use_stage1 = false;
} else if (mem.eql(u8, arg, "--")) {
builder.args = argsRest(args, arg_idx);
break;
@@ -306,12 +306,13 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void
try out_stream.writeAll(
\\
\\Advanced Options:
+ \\ -fstage1 Force using bootstrap compiler as the codegen backend
+ \\ -fno-stage1 Prevent using bootstrap compiler as the codegen backend
\\ --build-file [file] Override path to build.zig
- \\ --cache-dir [path] Override path to zig cache directory
+ \\ --cache-dir [path] Override path to local Zig cache directory
+ \\ --global-cache-dir [path] Override path to global Zig cache directory
\\ --zig-lib-dir [arg] Override path to Zig lib directory
\\ --debug-log [scope] Enable debugging the compiler
- \\ --verbose-tokenize Enable compiler debug output for tokenization
- \\ --verbose-ast Enable compiler debug output for parsing into an AST
\\ --verbose-link Enable compiler debug output for linking
\\ --verbose-air Enable compiler debug output for Zig AIR
\\ --verbose-llvm-ir Enable compiler debug output for LLVM IR
diff --git a/lib/std/special/c_stage1.zig b/lib/c.zig
similarity index 57%
rename from lib/std/special/c_stage1.zig
rename to lib/c.zig
index 3ae93c2bdb..9df1b3fb01 100644
--- a/lib/std/special/c_stage1.zig
+++ b/lib/c.zig
@@ -1,11 +1,17 @@
+//! This is Zig's multi-target implementation of libc.
+//! When builtin.link_libc is true, we need to export all the functions and
+//! provide an entire C API.
+//! Otherwise, only the functions which LLVM generates calls to need to be generated,
+//! such as memcpy, memset, and some math functions.
+
const std = @import("std");
const builtin = @import("builtin");
-const maxInt = std.math.maxInt;
+const math = std.math;
const isNan = std.math.isNan;
+const maxInt = std.math.maxInt;
+const native_os = builtin.os.tag;
const native_arch = builtin.cpu.arch;
const native_abi = builtin.abi;
-const native_os = builtin.os.tag;
-const long_double_is_f128 = builtin.target.longDoubleIsF128();
const is_wasm = switch (native_arch) {
.wasm32, .wasm64 => true,
@@ -19,10 +25,23 @@ const is_freestanding = switch (native_os) {
.freestanding => true,
else => false,
};
+
comptime {
if (is_freestanding and is_wasm and builtin.link_libc) {
@export(wasm_start, .{ .name = "_start", .linkage = .Strong });
}
+
+ if (native_os == .linux) {
+ @export(clone, .{ .name = "clone" });
+ }
+
+ @export(memset, .{ .name = "memset", .linkage = .Strong });
+ @export(__memset, .{ .name = "__memset", .linkage = .Strong });
+ @export(memcpy, .{ .name = "memcpy", .linkage = .Strong });
+ @export(memmove, .{ .name = "memmove", .linkage = .Strong });
+ @export(memcmp, .{ .name = "memcmp", .linkage = .Strong });
+ @export(bcmp, .{ .name = "bcmp", .linkage = .Strong });
+
if (builtin.link_libc) {
@export(strcmp, .{ .name = "strcmp", .linkage = .Strong });
@export(strncmp, .{ .name = "strncmp", .linkage = .Strong });
@@ -37,13 +56,137 @@ comptime {
}
}
-var _fltused: c_int = 1;
+// Avoid dragging in the runtime safety mechanisms into this .o file,
+// unless we're trying to test this file.
+pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn {
+ @setCold(true);
+ _ = error_return_trace;
+ if (builtin.is_test) {
+ std.debug.panic("{s}", .{msg});
+ }
+ if (native_os != .freestanding and native_os != .other) {
+ std.os.abort();
+ }
+ while (true) {}
+}
extern fn main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
fn wasm_start() callconv(.C) void {
_ = main(0, undefined);
}
+fn memset(dest: ?[*]u8, c: u8, len: usize) callconv(.C) ?[*]u8 {
+ @setRuntimeSafety(false);
+
+ if (len != 0) {
+ var d = dest.?;
+ var n = len;
+ while (true) {
+ d[0] = c;
+ n -= 1;
+ if (n == 0) break;
+ d += 1;
+ }
+ }
+
+ return dest;
+}
+
+fn __memset(dest: ?[*]u8, c: u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 {
+ if (dest_n < n)
+ @panic("buffer overflow");
+ return memset(dest, c, n);
+}
+
+fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, len: usize) callconv(.C) ?[*]u8 {
+ @setRuntimeSafety(false);
+
+ if (len != 0) {
+ var d = dest.?;
+ var s = src.?;
+ var n = len;
+ while (true) {
+ d[0] = s[0];
+ n -= 1;
+ if (n == 0) break;
+ d += 1;
+ s += 1;
+ }
+ }
+
+ return dest;
+}
+
+fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8 {
+ @setRuntimeSafety(false);
+
+ if (@ptrToInt(dest) < @ptrToInt(src)) {
+ var index: usize = 0;
+ while (index != n) : (index += 1) {
+ dest.?[index] = src.?[index];
+ }
+ } else {
+ var index = n;
+ while (index != 0) {
+ index -= 1;
+ dest.?[index] = src.?[index];
+ }
+ }
+
+ return dest;
+}
+
+fn memcmp(vl: ?[*]const u8, vr: ?[*]const u8, n: usize) callconv(.C) c_int {
+ @setRuntimeSafety(false);
+
+ var index: usize = 0;
+ while (index != n) : (index += 1) {
+ const compare_val = @bitCast(i8, vl.?[index] -% vr.?[index]);
+ if (compare_val != 0) {
+ return compare_val;
+ }
+ }
+
+ return 0;
+}
+
+test "memcmp" {
+ const base_arr = &[_]u8{ 1, 1, 1 };
+ const arr1 = &[_]u8{ 1, 1, 1 };
+ const arr2 = &[_]u8{ 1, 0, 1 };
+ const arr3 = &[_]u8{ 1, 2, 1 };
+
+ try std.testing.expect(memcmp(base_arr[0..], arr1[0..], base_arr.len) == 0);
+ try std.testing.expect(memcmp(base_arr[0..], arr2[0..], base_arr.len) > 0);
+ try std.testing.expect(memcmp(base_arr[0..], arr3[0..], base_arr.len) < 0);
+}
+
+fn bcmp(vl: [*]allowzero const u8, vr: [*]allowzero const u8, n: usize) callconv(.C) c_int {
+ @setRuntimeSafety(false);
+
+ var index: usize = 0;
+ while (index != n) : (index += 1) {
+ if (vl[index] != vr[index]) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+test "bcmp" {
+ const base_arr = &[_]u8{ 1, 1, 1 };
+ const arr1 = &[_]u8{ 1, 1, 1 };
+ const arr2 = &[_]u8{ 1, 0, 1 };
+ const arr3 = &[_]u8{ 1, 2, 1 };
+
+ try std.testing.expect(bcmp(base_arr[0..], arr1[0..], base_arr.len) == 0);
+ try std.testing.expect(bcmp(base_arr[0..], arr2[0..], base_arr.len) != 0);
+ try std.testing.expect(bcmp(base_arr[0..], arr3[0..], base_arr.len) != 0);
+}
+
+var _fltused: c_int = 1;
+
fn strcpy(dest: [*:0]u8, src: [*:0]const u8) callconv(.C) [*:0]u8 {
var i: usize = 0;
while (src[i] != 0) : (i += 1) {
@@ -161,106 +304,6 @@ test "strncmp" {
try std.testing.expect(strncmp("\xff", "\x02", 1) == 253);
}
-export fn memset(dest: ?[*]u8, c: u8, n: usize) callconv(.C) ?[*]u8 {
- @setRuntimeSafety(false);
-
- var index: usize = 0;
- while (index != n) : (index += 1)
- dest.?[index] = c;
-
- return dest;
-}
-
-export fn __memset(dest: ?[*]u8, c: u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 {
- if (dest_n < n)
- @panic("buffer overflow");
- return memset(dest, c, n);
-}
-
-export fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8 {
- @setRuntimeSafety(false);
-
- var index: usize = 0;
- while (index != n) : (index += 1)
- dest.?[index] = src.?[index];
-
- return dest;
-}
-
-export fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8 {
- @setRuntimeSafety(false);
-
- if (@ptrToInt(dest) < @ptrToInt(src)) {
- var index: usize = 0;
- while (index != n) : (index += 1) {
- dest.?[index] = src.?[index];
- }
- } else {
- var index = n;
- while (index != 0) {
- index -= 1;
- dest.?[index] = src.?[index];
- }
- }
-
- return dest;
-}
-
-export fn memcmp(vl: ?[*]const u8, vr: ?[*]const u8, n: usize) callconv(.C) c_int {
- @setRuntimeSafety(false);
-
- var index: usize = 0;
- while (index != n) : (index += 1) {
- const compare_val = @bitCast(i8, vl.?[index] -% vr.?[index]);
- if (compare_val != 0) {
- return compare_val;
- }
- }
-
- return 0;
-}
-
-test "memcmp" {
- const base_arr = &[_]u8{ 1, 1, 1 };
- const arr1 = &[_]u8{ 1, 1, 1 };
- const arr2 = &[_]u8{ 1, 0, 1 };
- const arr3 = &[_]u8{ 1, 2, 1 };
-
- try std.testing.expect(memcmp(base_arr[0..], arr1[0..], base_arr.len) == 0);
- try std.testing.expect(memcmp(base_arr[0..], arr2[0..], base_arr.len) > 0);
- try std.testing.expect(memcmp(base_arr[0..], arr3[0..], base_arr.len) < 0);
-}
-
-export fn bcmp(vl: [*]allowzero const u8, vr: [*]allowzero const u8, n: usize) callconv(.C) c_int {
- @setRuntimeSafety(false);
-
- var index: usize = 0;
- while (index != n) : (index += 1) {
- if (vl[index] != vr[index]) {
- return 1;
- }
- }
-
- return 0;
-}
-
-test "bcmp" {
- const base_arr = &[_]u8{ 1, 1, 1 };
- const arr1 = &[_]u8{ 1, 1, 1 };
- const arr2 = &[_]u8{ 1, 0, 1 };
- const arr3 = &[_]u8{ 1, 2, 1 };
-
- try std.testing.expect(bcmp(base_arr[0..], arr1[0..], base_arr.len) == 0);
- try std.testing.expect(bcmp(base_arr[0..], arr2[0..], base_arr.len) != 0);
- try std.testing.expect(bcmp(base_arr[0..], arr3[0..], base_arr.len) != 0);
-}
-
-comptime {
- if (native_os == .linux) {
- @export(clone, .{ .name = "clone" });
- }
-}
-
// TODO we should be able to put this directly in std/linux/x86_64.zig but
// it causes a segfault in release mode. this is a workaround of calling it
// across .o file boundaries. fix comptime @ptrCast of nakedcc functions.
@@ -582,7 +625,7 @@ fn clone() callconv(.Naked) void {
\\ sc
);
},
- .sparcv9 => {
+ .sparc64 => {
// __clone(func, stack, flags, arg, ptid, tls, ctid)
// i0, i1, i2, i3, i4, i5, sp
// syscall(SYS_clone, flags, stack, ptid, tls, ctid)
@@ -629,568 +672,3 @@ fn clone() callconv(.Naked) void {
else => @compileError("Implement clone() for this arch."),
}
}
-
-const math = std.math;
-
-export fn fmodf(x: f32, y: f32) f32 {
- return generic_fmod(f32, x, y);
-}
-export fn fmod(x: f64, y: f64) f64 {
- return generic_fmod(f64, x, y);
-}
-
-export fn ceilf(x: f32) f32 {
- return math.ceil(x);
-}
-export fn ceil(x: f64) f64 {
- return math.ceil(x);
-}
-export fn ceill(x: c_longdouble) c_longdouble {
- if (!long_double_is_f128) {
- @panic("TODO implement this");
- }
- return math.ceil(x);
-}
-
-export fn fmaf(a: f32, b: f32, c: f32) f32 {
- return math.fma(f32, a, b, c);
-}
-
-export fn fma(a: f64, b: f64, c: f64) f64 {
- return math.fma(f64, a, b, c);
-}
-export fn fmal(a: c_longdouble, b: c_longdouble, c: c_longdouble) c_longdouble {
- if (!long_double_is_f128) {
- @panic("TODO implement this");
- }
- return math.fma(c_longdouble, a, b, c);
-}
-
-export fn sin(a: f64) f64 {
- return math.sin(a);
-}
-
-export fn sinf(a: f32) f32 {
- return math.sin(a);
-}
-
-export fn cos(a: f64) f64 {
- return math.cos(a);
-}
-
-export fn cosf(a: f32) f32 {
- return math.cos(a);
-}
-
-export fn sincos(a: f64, r_sin: *f64, r_cos: *f64) void {
- r_sin.* = math.sin(a);
- r_cos.* = math.cos(a);
-}
-
-export fn sincosf(a: f32, r_sin: *f32, r_cos: *f32) void {
- r_sin.* = math.sin(a);
- r_cos.* = math.cos(a);
-}
-
-export fn exp(a: f64) f64 {
- return math.exp(a);
-}
-
-export fn expf(a: f32) f32 {
- return math.exp(a);
-}
-
-export fn exp2(a: f64) f64 {
- return math.exp2(a);
-}
-
-export fn exp2f(a: f32) f32 {
- return math.exp2(a);
-}
-
-export fn log(a: f64) f64 {
- return math.ln(a);
-}
-
-export fn logf(a: f32) f32 {
- return math.ln(a);
-}
-
-export fn log2(a: f64) f64 {
- return math.log2(a);
-}
-
-export fn log2f(a: f32) f32 {
- return math.log2(a);
-}
-
-export fn log10(a: f64) f64 {
- return math.log10(a);
-}
-
-export fn log10f(a: f32) f32 {
- return math.log10(a);
-}
-
-export fn fabs(a: f64) f64 {
- return math.fabs(a);
-}
-
-export fn fabsf(a: f32) f32 {
- return math.fabs(a);
-}
-
-export fn trunc(a: f64) f64 {
- return math.trunc(a);
-}
-
-export fn truncf(a: f32) f32 {
- return math.trunc(a);
-}
-
-export fn truncl(a: c_longdouble) c_longdouble {
- if (!long_double_is_f128) {
- @panic("TODO implement this");
- }
- return math.trunc(a);
-}
-
-export fn round(a: f64) f64 {
- return math.round(a);
-}
-
-export fn roundf(a: f32) f32 {
- return math.round(a);
-}
-
-fn generic_fmod(comptime T: type, x: T, y: T) T {
- @setRuntimeSafety(false);
-
- const bits = @typeInfo(T).Float.bits;
- const uint = std.meta.Int(.unsigned, bits);
- const log2uint = math.Log2Int(uint);
- const digits = if (T == f32) 23 else 52;
- const exp_bits = if (T == f32) 9 else 12;
- const bits_minus_1 = bits - 1;
- const mask = if (T == f32) 0xff else 0x7ff;
- var ux = @bitCast(uint, x);
- var uy = @bitCast(uint, y);
- var ex = @intCast(i32, (ux >> digits) & mask);
- var ey = @intCast(i32, (uy >> digits) & mask);
- const sx = if (T == f32) @intCast(u32, ux & 0x80000000) else @intCast(i32, ux >> bits_minus_1);
- var i: uint = undefined;
-
- if (uy << 1 == 0 or isNan(@bitCast(T, uy)) or ex == mask)
- return (x * y) / (x * y);
-
- if (ux << 1 <= uy << 1) {
- if (ux << 1 == uy << 1)
- return 0 * x;
- return x;
- }
-
- // normalize x and y
- if (ex == 0) {
- i = ux << exp_bits;
- while (i >> bits_minus_1 == 0) : ({
- ex -= 1;
- i <<= 1;
- }) {}
- ux <<= @intCast(log2uint, @bitCast(u32, -ex + 1));
- } else {
- ux &= maxInt(uint) >> exp_bits;
- ux |= 1 << digits;
- }
- if (ey == 0) {
- i = uy << exp_bits;
- while (i >> bits_minus_1 == 0) : ({
- ey -= 1;
- i <<= 1;
- }) {}
- uy <<= @intCast(log2uint, @bitCast(u32, -ey + 1));
- } else {
- uy &= maxInt(uint) >> exp_bits;
- uy |= 1 << digits;
- }
-
- // x mod y
- while (ex > ey) : (ex -= 1) {
- i = ux -% uy;
- if (i >> bits_minus_1 == 0) {
- if (i == 0)
- return 0 * x;
- ux = i;
- }
- ux <<= 1;
- }
- i = ux -% uy;
- if (i >> bits_minus_1 == 0) {
- if (i == 0)
- return 0 * x;
- ux = i;
- }
- while (ux >> digits == 0) : ({
- ux <<= 1;
- ex -= 1;
- }) {}
-
- // scale result up
- if (ex > 0) {
- ux -%= 1 << digits;
- ux |= @as(uint, @bitCast(u32, ex)) << digits;
- } else {
- ux >>= @intCast(log2uint, @bitCast(u32, -ex + 1));
- }
- if (T == f32) {
- ux |= sx;
- } else {
- ux |= @intCast(uint, sx) << bits_minus_1;
- }
- return @bitCast(T, ux);
-}
-
-test "fmod, fmodf" {
- inline for ([_]type{ f32, f64 }) |T| {
- const nan_val = math.nan(T);
- const inf_val = math.inf(T);
-
- try std.testing.expect(isNan(generic_fmod(T, nan_val, 1.0)));
- try std.testing.expect(isNan(generic_fmod(T, 1.0, nan_val)));
- try std.testing.expect(isNan(generic_fmod(T, inf_val, 1.0)));
- try std.testing.expect(isNan(generic_fmod(T, 0.0, 0.0)));
- try std.testing.expect(isNan(generic_fmod(T, 1.0, 0.0)));
-
- try std.testing.expectEqual(@as(T, 0.0), generic_fmod(T, 0.0, 2.0));
- try std.testing.expectEqual(@as(T, -0.0), generic_fmod(T, -0.0, 2.0));
-
- try std.testing.expectEqual(@as(T, -2.0), generic_fmod(T, -32.0, 10.0));
- try std.testing.expectEqual(@as(T, -2.0), generic_fmod(T, -32.0, -10.0));
- try std.testing.expectEqual(@as(T, 2.0), generic_fmod(T, 32.0, 10.0));
- try std.testing.expectEqual(@as(T, 2.0), generic_fmod(T, 32.0, -10.0));
- }
-}
-
-fn generic_fmin(comptime T: type, x: T, y: T) T {
- if (isNan(x))
- return y;
- if (isNan(y))
- return x;
- return if (x < y) x else y;
-}
-
-export fn fminf(x: f32, y: f32) callconv(.C) f32 {
- return generic_fmin(f32, x, y);
-}
-
-export fn fmin(x: f64, y: f64) callconv(.C) f64 {
- return generic_fmin(f64, x, y);
-}
-
-test "fmin, fminf" {
- inline for ([_]type{ f32, f64 }) |T| {
- const nan_val = math.nan(T);
-
- try std.testing.expect(isNan(generic_fmin(T, nan_val, nan_val)));
- try std.testing.expectEqual(@as(T, 1.0), generic_fmin(T, nan_val, 1.0));
- try std.testing.expectEqual(@as(T, 1.0), generic_fmin(T, 1.0, nan_val));
-
- try std.testing.expectEqual(@as(T, 1.0), generic_fmin(T, 1.0, 10.0));
- try std.testing.expectEqual(@as(T, -1.0), generic_fmin(T, 1.0, -1.0));
- }
-}
-
-fn generic_fmax(comptime T: type, x: T, y: T) T {
- if (isNan(x))
- return y;
- if (isNan(y))
- return x;
- return if (x < y) y else x;
-}
-
-export fn fmaxf(x: f32, y: f32) callconv(.C) f32 {
- return generic_fmax(f32, x, y);
-}
-
-export fn fmax(x: f64, y: f64) callconv(.C) f64 {
- return generic_fmax(f64, x, y);
-}
-
-test "fmax, fmaxf" {
- inline for ([_]type{ f32, f64 }) |T| {
- const nan_val = math.nan(T);
-
- try std.testing.expect(isNan(generic_fmax(T, nan_val, nan_val)));
- try std.testing.expectEqual(@as(T, 1.0), generic_fmax(T, nan_val, 1.0));
- try std.testing.expectEqual(@as(T, 1.0), generic_fmax(T, 1.0, nan_val));
-
- try std.testing.expectEqual(@as(T, 10.0), generic_fmax(T, 1.0, 10.0));
- try std.testing.expectEqual(@as(T, 1.0), generic_fmax(T, 1.0, -1.0));
- }
-}
-
-// NOTE: The original code is full of implicit signed -> unsigned assumptions and u32 wraparound
-// behaviour. Most intermediate i32 values are changed to u32 where appropriate but there are
-// potentially some edge cases remaining that are not handled in the same way.
-export fn sqrt(x: f64) f64 {
- const tiny: f64 = 1.0e-300;
- const sign: u32 = 0x80000000;
- const u = @bitCast(u64, x);
-
- var ix0 = @intCast(u32, u >> 32);
- var ix1 = @intCast(u32, u & 0xFFFFFFFF);
-
- // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = nan
- if (ix0 & 0x7FF00000 == 0x7FF00000) {
- return x * x + x;
- }
-
- // sqrt(+-0) = +-0
- if (x == 0.0) {
- return x;
- }
- // sqrt(-ve) = snan
- if (ix0 & sign != 0) {
- return math.snan(f64);
- }
-
- // normalize x
- var m = @intCast(i32, ix0 >> 20);
- if (m == 0) {
- // subnormal
- while (ix0 == 0) {
- m -= 21;
- ix0 |= ix1 >> 11;
- ix1 <<= 21;
- }
-
- // subnormal
- var i: u32 = 0;
- while (ix0 & 0x00100000 == 0) : (i += 1) {
- ix0 <<= 1;
- }
- m -= @intCast(i32, i) - 1;
- ix0 |= ix1 >> @intCast(u5, 32 - i);
- ix1 <<= @intCast(u5, i);
- }
-
- // unbias exponent
- m -= 1023;
- ix0 = (ix0 & 0x000FFFFF) | 0x00100000;
- if (m & 1 != 0) {
- ix0 += ix0 + (ix1 >> 31);
- ix1 = ix1 +% ix1;
- }
- m >>= 1;
-
- // sqrt(x) bit by bit
- ix0 += ix0 + (ix1 >> 31);
- ix1 = ix1 +% ix1;
-
- var q: u32 = 0;
- var q1: u32 = 0;
- var s0: u32 = 0;
- var s1: u32 = 0;
- var r: u32 = 0x00200000;
- var t: u32 = undefined;
- var t1: u32 = undefined;
-
- while (r != 0) {
- t = s0 +% r;
- if (t <= ix0) {
- s0 = t + r;
- ix0 -= t;
- q += r;
- }
- ix0 = ix0 +% ix0 +% (ix1 >> 31);
- ix1 = ix1 +% ix1;
- r >>= 1;
- }
-
- r = sign;
- while (r != 0) {
- t1 = s1 +% r;
- t = s0;
- if (t < ix0 or (t == ix0 and t1 <= ix1)) {
- s1 = t1 +% r;
- if (t1 & sign == sign and s1 & sign == 0) {
- s0 += 1;
- }
- ix0 -= t;
- if (ix1 < t1) {
- ix0 -= 1;
- }
- ix1 = ix1 -% t1;
- q1 += r;
- }
- ix0 = ix0 +% ix0 +% (ix1 >> 31);
- ix1 = ix1 +% ix1;
- r >>= 1;
- }
-
- // rounding direction
- if (ix0 | ix1 != 0) {
- var z = 1.0 - tiny; // raise inexact
- if (z >= 1.0) {
- z = 1.0 + tiny;
- if (q1 == 0xFFFFFFFF) {
- q1 = 0;
- q += 1;
- } else if (z > 1.0) {
- if (q1 == 0xFFFFFFFE) {
- q += 1;
- }
- q1 += 2;
- } else {
- q1 += q1 & 1;
- }
- }
- }
-
- ix0 = (q >> 1) + 0x3FE00000;
- ix1 = q1 >> 1;
- if (q & 1 != 0) {
- ix1 |= 0x80000000;
- }
-
- // NOTE: musl here appears to rely on signed twos-complement wraparound. +% has the same
- // behaviour at least.
- var iix0 = @intCast(i32, ix0);
- iix0 = iix0 +% (m << 20);
-
- const uz = (@intCast(u64, iix0) << 32) | ix1;
- return @bitCast(f64, uz);
-}
-
-test "sqrt" {
- const V = [_]f64{
- 0.0,
- 4.089288054930154,
- 7.538757127071935,
- 8.97780793672623,
- 5.304443821913729,
- 5.682408965311888,
- 0.5846878579110049,
- 3.650338664297043,
- 0.3178091951800732,
- 7.1505232436382835,
- 3.6589165881946464,
- };
-
- // Note that @sqrt will either generate the sqrt opcode (if supported by the
- // target ISA) or a call to `sqrtf` otherwise.
- for (V) |val|
- try std.testing.expectEqual(@sqrt(val), sqrt(val));
-}
-
-test "sqrt special" {
- try std.testing.expect(std.math.isPositiveInf(sqrt(std.math.inf(f64))));
- try std.testing.expect(sqrt(0.0) == 0.0);
- try std.testing.expect(sqrt(-0.0) == -0.0);
- try std.testing.expect(isNan(sqrt(-1.0)));
- try std.testing.expect(isNan(sqrt(std.math.nan(f64))));
-}
-
-export fn sqrtf(x: f32) f32 {
- const tiny: f32 = 1.0e-30;
- const sign: i32 = @bitCast(i32, @as(u32, 0x80000000));
- var ix: i32 = @bitCast(i32, x);
-
- if ((ix & 0x7F800000) == 0x7F800000) {
- return x * x + x; // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = snan
- }
-
- // zero
- if (ix <= 0) {
- if (ix & ~sign == 0) {
- return x; // sqrt (+-0) = +-0
- }
- if (ix < 0) {
- return math.snan(f32);
- }
- }
-
- // normalize
- var m = ix >> 23;
- if (m == 0) {
- // subnormal
- var i: i32 = 0;
- while (ix & 0x00800000 == 0) : (i += 1) {
- ix <<= 1;
- }
- m -= i - 1;
- }
-
- m -= 127; // unbias exponent
- ix = (ix & 0x007FFFFF) | 0x00800000;
-
- if (m & 1 != 0) { // odd m, double x to even
- ix += ix;
- }
-
- m >>= 1; // m = [m / 2]
-
- // sqrt(x) bit by bit
- ix += ix;
- var q: i32 = 0; // q = sqrt(x)
- var s: i32 = 0;
- var r: i32 = 0x01000000; // r = moving bit right -> left
-
- while (r != 0) {
- const t = s + r;
- if (t <= ix) {
- s = t + r;
- ix -= t;
- q += r;
- }
- ix += ix;
- r >>= 1;
- }
-
- // floating add to find rounding direction
- if (ix != 0) {
- var z = 1.0 - tiny; // inexact
- if (z >= 1.0) {
- z = 1.0 + tiny;
- if (z > 1.0) {
- q += 2;
- } else {
- if (q & 1 != 0) {
- q += 1;
- }
- }
- }
- }
-
- ix = (q >> 1) + 0x3f000000;
- ix += m << 23;
- return @bitCast(f32, ix);
-}
-
-test "sqrtf" {
- const V = [_]f32{
- 0.0,
- 4.089288054930154,
- 7.538757127071935,
- 8.97780793672623,
- 5.304443821913729,
- 5.682408965311888,
- 0.5846878579110049,
- 3.650338664297043,
- 0.3178091951800732,
- 7.1505232436382835,
- 3.6589165881946464,
- };
-
- // Note that @sqrt will either generate the sqrt opcode (if supported by the
- // target ISA) or a call to `sqrtf` otherwise.
- for (V) |val|
- try std.testing.expectEqual(@sqrt(val), sqrtf(val));
-}
-
-test "sqrtf special" {
- try std.testing.expect(std.math.isPositiveInf(sqrtf(std.math.inf(f32))));
- try std.testing.expect(sqrtf(0.0) == 0.0);
- try std.testing.expect(sqrtf(-0.0) == -0.0);
- try std.testing.expect(isNan(sqrtf(-1.0)));
- try std.testing.expect(isNan(sqrtf(std.math.nan(f32))));
-}
diff --git a/lib/compiler_rt.zig b/lib/compiler_rt.zig
new file mode 100644
index 0000000000..105c5ed7ad
--- /dev/null
+++ b/lib/compiler_rt.zig
@@ -0,0 +1,187 @@
+pub const panic = @import("compiler_rt/common.zig").panic;
+
+comptime {
+ _ = @import("compiler_rt/atomics.zig");
+
+ _ = @import("compiler_rt/addf3.zig");
+ _ = @import("compiler_rt/addsf3.zig");
+ _ = @import("compiler_rt/adddf3.zig");
+ _ = @import("compiler_rt/addtf3.zig");
+ _ = @import("compiler_rt/addxf3.zig");
+
+ _ = @import("compiler_rt/subsf3.zig");
+ _ = @import("compiler_rt/subdf3.zig");
+ _ = @import("compiler_rt/subtf3.zig");
+ _ = @import("compiler_rt/subxf3.zig");
+
+ _ = @import("compiler_rt/mulf3.zig");
+ _ = @import("compiler_rt/muldf3.zig");
+ _ = @import("compiler_rt/mulsf3.zig");
+ _ = @import("compiler_rt/multf3.zig");
+ _ = @import("compiler_rt/mulxf3.zig");
+
+ _ = @import("compiler_rt/negsf2.zig");
+ _ = @import("compiler_rt/negdf2.zig");
+ _ = @import("compiler_rt/negtf2.zig");
+ _ = @import("compiler_rt/negxf2.zig");
+
+ _ = @import("compiler_rt/comparef.zig");
+ _ = @import("compiler_rt/cmpsf2.zig");
+ _ = @import("compiler_rt/cmpdf2.zig");
+ _ = @import("compiler_rt/cmptf2.zig");
+ _ = @import("compiler_rt/cmpxf2.zig");
+ _ = @import("compiler_rt/gesf2.zig");
+ _ = @import("compiler_rt/gedf2.zig");
+ _ = @import("compiler_rt/getf2.zig");
+ _ = @import("compiler_rt/gexf2.zig");
+ _ = @import("compiler_rt/unordsf2.zig");
+ _ = @import("compiler_rt/unorddf2.zig");
+ _ = @import("compiler_rt/unordtf2.zig");
+
+ _ = @import("compiler_rt/extendf.zig");
+ _ = @import("compiler_rt/extenddftf2.zig");
+ _ = @import("compiler_rt/extenddfxf2.zig");
+ _ = @import("compiler_rt/extendhfsf2.zig");
+ _ = @import("compiler_rt/extendhftf2.zig");
+ _ = @import("compiler_rt/extendhfxf2.zig");
+ _ = @import("compiler_rt/extendsfdf2.zig");
+ _ = @import("compiler_rt/extendsftf2.zig");
+ _ = @import("compiler_rt/extendsfxf2.zig");
+ _ = @import("compiler_rt/extendxftf2.zig");
+
+ _ = @import("compiler_rt/truncf.zig");
+ _ = @import("compiler_rt/truncsfhf2.zig");
+ _ = @import("compiler_rt/truncdfhf2.zig");
+ _ = @import("compiler_rt/truncdfsf2.zig");
+ _ = @import("compiler_rt/trunctfhf2.zig");
+ _ = @import("compiler_rt/trunctfsf2.zig");
+ _ = @import("compiler_rt/trunctfdf2.zig");
+ _ = @import("compiler_rt/trunctfxf2.zig");
+ _ = @import("compiler_rt/truncxfhf2.zig");
+ _ = @import("compiler_rt/truncxfsf2.zig");
+ _ = @import("compiler_rt/truncxfdf2.zig");
+
+ _ = @import("compiler_rt/divtf3.zig");
+ _ = @import("compiler_rt/divsf3.zig");
+ _ = @import("compiler_rt/divdf3.zig");
+ _ = @import("compiler_rt/divxf3.zig");
+ _ = @import("compiler_rt/sin.zig");
+ _ = @import("compiler_rt/cos.zig");
+ _ = @import("compiler_rt/sincos.zig");
+ _ = @import("compiler_rt/ceil.zig");
+ _ = @import("compiler_rt/exp.zig");
+ _ = @import("compiler_rt/exp2.zig");
+ _ = @import("compiler_rt/fabs.zig");
+ _ = @import("compiler_rt/floor.zig");
+ _ = @import("compiler_rt/fma.zig");
+ _ = @import("compiler_rt/fmax.zig");
+ _ = @import("compiler_rt/fmin.zig");
+ _ = @import("compiler_rt/fmod.zig");
+ _ = @import("compiler_rt/log.zig");
+ _ = @import("compiler_rt/log10.zig");
+ _ = @import("compiler_rt/log2.zig");
+ _ = @import("compiler_rt/round.zig");
+ _ = @import("compiler_rt/sqrt.zig");
+ _ = @import("compiler_rt/tan.zig");
+ _ = @import("compiler_rt/trunc.zig");
+ _ = @import("compiler_rt/stack_probe.zig");
+ _ = @import("compiler_rt/divti3.zig");
+ _ = @import("compiler_rt/modti3.zig");
+ _ = @import("compiler_rt/multi3.zig");
+ _ = @import("compiler_rt/udivti3.zig");
+ _ = @import("compiler_rt/udivmodti4.zig");
+ _ = @import("compiler_rt/umodti3.zig");
+
+ _ = @import("compiler_rt/int_to_float.zig");
+ _ = @import("compiler_rt/floatsihf.zig");
+ _ = @import("compiler_rt/floatsisf.zig");
+ _ = @import("compiler_rt/floatsidf.zig");
+ _ = @import("compiler_rt/floatsitf.zig");
+ _ = @import("compiler_rt/floatsixf.zig");
+ _ = @import("compiler_rt/floatdihf.zig");
+ _ = @import("compiler_rt/floatdisf.zig");
+ _ = @import("compiler_rt/floatdidf.zig");
+ _ = @import("compiler_rt/floatditf.zig");
+ _ = @import("compiler_rt/floatdixf.zig");
+ _ = @import("compiler_rt/floattihf.zig");
+ _ = @import("compiler_rt/floattisf.zig");
+ _ = @import("compiler_rt/floattidf.zig");
+ _ = @import("compiler_rt/floattitf.zig");
+ _ = @import("compiler_rt/floattixf.zig");
+ _ = @import("compiler_rt/floatundihf.zig");
+ _ = @import("compiler_rt/floatundisf.zig");
+ _ = @import("compiler_rt/floatundidf.zig");
+ _ = @import("compiler_rt/floatunditf.zig");
+ _ = @import("compiler_rt/floatundixf.zig");
+ _ = @import("compiler_rt/floatunsihf.zig");
+ _ = @import("compiler_rt/floatunsisf.zig");
+ _ = @import("compiler_rt/floatunsidf.zig");
+ _ = @import("compiler_rt/floatunsitf.zig");
+ _ = @import("compiler_rt/floatunsixf.zig");
+ _ = @import("compiler_rt/floatuntihf.zig");
+ _ = @import("compiler_rt/floatuntisf.zig");
+ _ = @import("compiler_rt/floatuntidf.zig");
+ _ = @import("compiler_rt/floatuntitf.zig");
+ _ = @import("compiler_rt/floatuntixf.zig");
+
+ _ = @import("compiler_rt/float_to_int.zig");
+ _ = @import("compiler_rt/fixhfsi.zig");
+ _ = @import("compiler_rt/fixhfdi.zig");
+ _ = @import("compiler_rt/fixhfti.zig");
+ _ = @import("compiler_rt/fixsfsi.zig");
+ _ = @import("compiler_rt/fixsfdi.zig");
+ _ = @import("compiler_rt/fixsfti.zig");
+ _ = @import("compiler_rt/fixdfsi.zig");
+ _ = @import("compiler_rt/fixdfdi.zig");
+ _ = @import("compiler_rt/fixdfti.zig");
+ _ = @import("compiler_rt/fixtfsi.zig");
+ _ = @import("compiler_rt/fixtfdi.zig");
+ _ = @import("compiler_rt/fixtfti.zig");
+ _ = @import("compiler_rt/fixxfsi.zig");
+ _ = @import("compiler_rt/fixxfdi.zig");
+ _ = @import("compiler_rt/fixxfti.zig");
+ _ = @import("compiler_rt/fixunshfsi.zig");
+ _ = @import("compiler_rt/fixunshfdi.zig");
+ _ = @import("compiler_rt/fixunshfti.zig");
+ _ = @import("compiler_rt/fixunssfsi.zig");
+ _ = @import("compiler_rt/fixunssfdi.zig");
+ _ = @import("compiler_rt/fixunssfti.zig");
+ _ = @import("compiler_rt/fixunsdfsi.zig");
+ _ = @import("compiler_rt/fixunsdfdi.zig");
+ _ = @import("compiler_rt/fixunsdfti.zig");
+ _ = @import("compiler_rt/fixunstfsi.zig");
+ _ = @import("compiler_rt/fixunstfdi.zig");
+ _ = @import("compiler_rt/fixunstfti.zig");
+ _ = @import("compiler_rt/fixunsxfsi.zig");
+ _ = @import("compiler_rt/fixunsxfdi.zig");
+ _ = @import("compiler_rt/fixunsxfti.zig");
+
+ _ = @import("compiler_rt/count0bits.zig");
+ _ = @import("compiler_rt/parity.zig");
+ _ = @import("compiler_rt/popcount.zig");
+ _ = @import("compiler_rt/bswap.zig");
+ _ = @import("compiler_rt/int.zig");
+ _ = @import("compiler_rt/shift.zig");
+
+ _ = @import("compiler_rt/negXi2.zig");
+
+ _ = @import("compiler_rt/muldi3.zig");
+
+ _ = @import("compiler_rt/absv.zig");
+ _ = @import("compiler_rt/absvsi2.zig");
+ _ = @import("compiler_rt/absvdi2.zig");
+ _ = @import("compiler_rt/absvti2.zig");
+
+ _ = @import("compiler_rt/negv.zig");
+ _ = @import("compiler_rt/addo.zig");
+ _ = @import("compiler_rt/subo.zig");
+ _ = @import("compiler_rt/mulo.zig");
+ _ = @import("compiler_rt/cmp.zig");
+
+ _ = @import("compiler_rt/os_version_check.zig");
+ _ = @import("compiler_rt/emutls.zig");
+ _ = @import("compiler_rt/arm.zig");
+ _ = @import("compiler_rt/aulldiv.zig");
+ _ = @import("compiler_rt/aullrem.zig");
+ _ = @import("compiler_rt/clear_cache.zig");
+}
diff --git a/lib/compiler_rt/README.md b/lib/compiler_rt/README.md
new file mode 100644
index 0000000000..cf1bf29522
--- /dev/null
+++ b/lib/compiler_rt/README.md
@@ -0,0 +1,267 @@
+If hardware lacks basic or specialized functionality, compiler-rt adds such functionality
+for basic arithmetic(s).
+One such example is 64-bit integer multiplication on 32-bit x86.
+
+Goals:
+1. zig as linker for object files produced by other compilers
+ => `function compatibility` to compiler-rt and libgcc for same-named functions
+ * compatibility conflict between compiler-rt and libgcc: prefer compiler-rt
+2. `symbol-level compatibility` low-priority compared to emitted calls by llvm
+ * symbol-level compatibility: libgcc even lower priority
+3. add zig-specific language runtime features, see #7265
+ * example: arbitrary bit width integer arithmetic
+ * lower to call those functions for e.g. multiplying two i12345 numbers together
+ * proper naming + documention for standardizing (allow languages to follow our exmaple)
+
+Current status (tracking libgcc documentation):
+- Integer library routines => almost implemented
+- Soft float library routines => only f80 routines missing
+- Decimal float library routines => unimplemented (~120 functions)
+- Fixed-point fractional library routines => unimplemented (~300 functions)
+- Exception handling routines => unclear, if supported (~32+x undocumented functions)
+- Miscellaneous routines => unclear, if supported (cache control and stack function)
+- No zig-specific language runtime features in compiler-rt yet
+
+This library is automatically built as-needed for the compilation target and
+then statically linked and therefore is a transparent dependency for the
+programmer.
+For details see `../compiler_rt.zig`.
+
+The routines in this folder are listed below.
+Routines are annotated as `type source routine // description`, with `routine`
+being the name used in aforementioned `compiler_rt.zig`.
+`dev` means deviating from compiler_rt, `port` ported, `source` is the
+information source for the implementation, `none` means unimplemented.
+Some examples for the naming convention are:
+- dev source name_routine, name_routine2 various implementations for performance, simplicity etc
+- port llvm compiler-rt library routines from [LLVM](http://compiler-rt.llvm.org/)
+ * LLVM emits library calls to compiler-rt, if the hardware lacks functionality
+- port musl libc routines from [musl](https://musl.libc.org/)
+If the library or information source is uncommon, use the entry `other` for `source`.
+Please do not break the search by inserting entries in another format than `impl space source`.
+
+Bugs should be solved by trying to duplicate the bug upstream, if possible.
+ * If the bug exists upstream, get it fixed upstream and port the fix downstream to Zig.
+ * If the bug only exists in Zig, use the corresponding C code and debug
+ both implementations side by side to figure out what is wrong.
+
+## Integer library routines
+
+#### Integer Bit operations
+- dev HackersDelight __clzsi2 // count leading zeros
+- dev HackersDelight __clzdi2 // count leading zeros
+- dev HackersDelight __clzti2 // count leading zeros
+- dev HackersDelight __ctzsi2 // count trailing zeros
+- dev HackersDelight __ctzdi2 // count trailing zeros
+- dev HackersDelight __ctzti2 // count trailing zeros
+- dev __ctzsi2 __ffssi2 // find least significant 1 bit
+- dev __ctzsi2 __ffsdi2 // find least significant 1 bit
+- dev __ctzsi2 __ffsti2 // find least significant 1 bit
+- dev BitTwiddlingHacks __paritysi2 // bit parity
+- dev BitTwiddlingHacks __paritydi2 // bit parity
+- dev BitTwiddlingHacks __parityti2 // bit parity
+- dev TAOCP __popcountsi2 // bit population
+- dev TAOCP __popcountdi2 // bit population
+- dev TAOCP __popcountti2 // bit population
+- dev other __bswapsi2 // a byteswapped
+- dev other __bswapdi2 // a byteswapped
+- dev other __bswapti2 // a byteswapped
+
+#### Integer Comparison
+- port llvm __cmpsi2 // (aoutput=0, (a==b)=>output=1, (a>b)=>output=2
+- port llvm __cmpdi2
+- port llvm __cmpti2
+- port llvm __ucmpsi2 // (aoutput=0, (a==b)=>output=1, (a>b)=>output=2
+- port llvm __ucmpdi2
+- port llvm __ucmpti2
+
+#### Integer Arithmetic
+- none none __ashlsi3 // a << b unused in llvm, missing (e.g. used by rl78)
+- port llvm __ashldi3 // a << b
+- port llvm __ashlti3 // a << b
+- none none __ashrsi3 // a >> b arithmetic (sign fill) missing (e.g. used by rl78)
+- port llvm __ashrdi3 // a >> b arithmetic (sign fill)
+- port llvm __ashrti3 // a >> b arithmetic (sign fill)
+- none none __lshrsi3 // a >> b logical (zero fill) missing (e.g. used by rl78)
+- port llvm __lshrdi3 // a >> b logical (zero fill)
+- port llvm __lshrti3 // a >> b logical (zero fill)
+- port llvm __negdi2 // -a symbol-level compatibility: libgcc
+- port llvm __negti2 // -a unnecessary: unused in backends
+- port llvm __mulsi3 // a * b signed
+- port llvm __muldi3 // a * b signed
+- port llvm __multi3 // a * b signed
+- port llvm __divsi3 // a / b signed
+- port llvm __divdi3 // a / b signed
+- port llvm __divti3 // a / b signed
+- port llvm __udivsi3 // a / b unsigned
+- port llvm __udivdi3 // a / b unsigned
+- port llvm __udivti3 // a / b unsigned
+- port llvm __modsi3 // a % b signed
+- port llvm __moddi3 // a % b signed
+- port llvm __modti3 // a % b signed
+- port llvm __umodsi3 // a % b unsigned
+- port llvm __umoddi3 // a % b unsigned
+- port llvm __umodti3 // a % b unsigned
+- port llvm __udivmoddi4 // a / b, rem.* = a % b unsigned
+- port llvm __udivmodti4 // a / b, rem.* = a % b unsigned
+- port llvm __udivmodsi4 // a / b, rem.* = a % b unsigned
+- port llvm __divmodsi4 // a / b, rem.* = a % b signed, ARM
+
+#### Integer Arithmetic with trapping overflow
+- dev BitTwiddlingHacks __absvsi2 // abs(a)
+- dev BitTwiddlingHacks __absvdi2 // abs(a)
+- dev BitTwiddlingHacks __absvti2 // abs(a)
+- port llvm __negvsi2 // -a symbol-level compatibility: libgcc
+- port llvm __negvdi2 // -a unnecessary: unused in backends
+- port llvm __negvti2 // -a
+- TODO upstreaming __addvsi3..__mulvti3 after testing panics works
+- dev HackersDelight __addvsi3 // a + b
+- dev HackersDelight __addvdi3 // a + b
+- dev HackersDelight __addvti3 // a + b
+- dev HackersDelight __subvsi3 // a - b
+- dev HackersDelight __subvdi3 // a - b
+- dev HackersDelight __subvti3 // a - b
+- dev HackersDelight __mulvsi3 // a * b
+- dev HackersDelight __mulvdi3 // a * b
+- dev HackersDelight __mulvti3 // a * b
+
+#### Integer Arithmetic which returns if overflow (would be faster without pointer)
+- dev HackersDelight __addosi4 // a + b, overflow=>ov.*=1 else 0
+- dev HackersDelight __addodi4 // (completeness + performance, llvm does not use them)
+- dev HackersDelight __addoti4 //
+- dev HackersDelight __subosi4 // a - b, overflow=>ov.*=1 else 0
+- dev HackersDelight __subodi4 // (completeness + performance, llvm does not use them)
+- dev HackersDelight __suboti4 //
+- dev HackersDelight __mulosi4 // a * b, overflow=>ov.*=1 else 0
+- dev HackersDelight __mulodi4 // (required by llvm)
+- dev HackersDelight __muloti4 //
+
+## Float library routines
+
+#### Float Conversion
+- todo todo __extendsfdf2 // extend a f32 => f64
+- todo todo __extendsftf2 // extend a f32 => f128
+- dev llvm __extendsfxf2 // extend a f32 => f80
+- todo todo __extenddftf2 // extend a f64 => f128
+- dev llvm __extenddfxf2 // extend a f64 => f80
+- todo todo __truncdfsf2 // truncate a to narrower mode of return type, rounding towards zero
+- todo todo __trunctfdf2 //
+- todo todo __trunctfsf2 //
+- dev llvm __truncxfsf2 //
+- dev llvm __truncxfdf2 //
+- todo todo __fixsfsi // convert a to i32, rounding towards zero
+- todo todo __fixdfsi //
+- todo todo __fixtfsi //
+- todo todo __fixxfsi //
+- todo todo __fixsfdi // convert a to i64, rounding towards zero
+- todo todo __fixdfdi //
+- todo todo __fixtfdi //
+- todo todo __fixxfdi //
+- todo todo __fixsfti // convert a to i128, rounding towards zero
+- todo todo __fixdfti //
+- todo todo __fixtfdi //
+- todo todo __fixxfti //
+
+- __fixunssfsi // convert to u32, rounding towards zero. negative values become 0.
+- __fixunsdfsi //
+- __fixunstfsi //
+- __fixunsxfsi //
+- __fixunssfdi // convert to u64, rounding towards zero. negative values become 0.
+- __fixunsdfdi //
+- __fixunstfdi //
+- __fixunsxfdi //
+- __fixunssfti // convert to u128, rounding towards zero. negative values become 0.
+- __fixunsdfti //
+- __fixunstfdi //
+- __fixunsxfti //
+
+- __floatsisf // convert i32 to floating point
+- __floatsidf //
+- __floatsitf //
+- __floatsixf //
+- __floatdisf // convert i64 to floating point
+- __floatdidf //
+- __floatditf //
+- __floatdixf //
+- __floattisf // convert i128 to floating point
+- __floattidf //
+- __floattixf //
+
+- __floatunsisf // convert u32 to floating point
+- __floatunsidf //
+- __floatunsitf //
+- __floatunsixf //
+- __floatundisf // convert u64 to floating point
+- __floatundidf //
+- __floatunditf //
+- __floatundixf //
+- __floatuntisf // convert u128 to floating point
+- __floatuntidf //
+- __floatuntitf //
+- __floatuntixf //
+
+#### Float Comparison
+- __cmpsf2 // return (a-1,(a==b)=>0,(a>b)=>1,Nan=>1 dont rely on this
+- __cmpdf2 // exported from __lesf2, __ledf2, __letf2 (below)
+- __cmptf2 //
+- __unordsf2 // (input==NaN) => out!=0 else out=0,
+- __unorddf2 // __only reliable for (input!=Nan)__
+- __unordtf2 //
+- __eqsf2 // (a!=NaN) and (b!=Nan) and (a==b) => output=0
+- __eqdf2 //
+- __eqtf2 //
+- __nesf2 // (a==NaN) or (b==Nan) or (a!=b) => output!=0
+- __nedf2 //
+- __netf2 //
+- __gesf2 // (a!=Nan) and (b!=Nan) and (a>=b) => output>=0
+- __gedf2 //
+- __getf2 //
+- __ltsf2 // (a!=Nan) and (b!=Nan) and (a output<0
+- __ltdf2 //
+- __lttf2 //
+- __lesf2 // (a!=Nan) and (b!=Nan) and (a<=b) => output<=0
+- __ledf2 //
+- __letf2 //
+- __gtsf2 // (a!=Nan) and (b!=Nan) and (a>b) => output>0
+- __gtdf2 //
+- __gttf2 //
+
+#### Float Arithmetic
+- __addsf3 // a + b f32
+- __adddf3 // a + b f64
+- __addtf3 // a + b f128
+- __addxf3 // a + b f80
+- __aeabi_fadd // a + b f64 ARM: AAPCS
+- __aeabi_dadd // a + b f64 ARM: AAPCS
+- __subsf3 // a - b
+- __subdf3 // a - b
+- __subtf3 // a - b
+- __subxf3 // a - b f80
+- __aeabi_fsub // a - b f64 ARM: AAPCS
+- __aeabi_dsub // a - b f64 ARM: AAPCS
+- __mulsf3 // a * b
+- __muldf3 // a * b
+- __multf3 // a * b
+- __mulxf3 // a * b
+- __divsf3 // a / b
+- __divdf3 // a / b
+- __divtf3 // a / b
+- __divxf3 // a / b
+- __negsf2 // -a symbol-level compatibility: libgcc uses this for the rl78
+- __negdf2 // -a unnecessary: can be lowered directly to a xor
+- __negtf2 // -a
+- __negxf2 // -a
+
+#### Floating point raised to integer power
+- __powisf2 // unclear, if supported a ^ b
+- __powidf2 //
+- __powitf2 //
+- __powixf2 //
+- __mulsc3 // unsupported (a+ib) * (c+id)
+- __muldc3 //
+- __multc3 //
+- __mulxc3 //
+- __divsc3 // unsupported (a+ib) * / (c+id)
+- __divdc3 //
+- __divtc3 //
+- __divxc3 //
diff --git a/lib/std/special/compiler_rt/absv.zig b/lib/compiler_rt/absv.zig
similarity index 58%
rename from lib/std/special/compiler_rt/absv.zig
rename to lib/compiler_rt/absv.zig
index f14497daf2..8910a4a6b9 100644
--- a/lib/std/special/compiler_rt/absv.zig
+++ b/lib/compiler_rt/absv.zig
@@ -1,8 +1,6 @@
-// absv - absolute oVerflow
-// * @panic, if value can not be represented
-// - absvXi4_generic for unoptimized version
-
-inline fn absvXi(comptime ST: type, a: ST) ST {
+/// absv - absolute oVerflow
+/// * @panic if value can not be represented
+pub inline fn absv(comptime ST: type, a: ST) ST {
const UT = switch (ST) {
i32 => u32,
i64 => u64,
@@ -21,18 +19,6 @@ inline fn absvXi(comptime ST: type, a: ST) ST {
return x;
}
-pub fn __absvsi2(a: i32) callconv(.C) i32 {
- return absvXi(i32, a);
-}
-
-pub fn __absvdi2(a: i64) callconv(.C) i64 {
- return absvXi(i64, a);
-}
-
-pub fn __absvti2(a: i128) callconv(.C) i128 {
- return absvXi(i128, a);
-}
-
test {
_ = @import("absvsi2_test.zig");
_ = @import("absvdi2_test.zig");
diff --git a/lib/compiler_rt/absvdi2.zig b/lib/compiler_rt/absvdi2.zig
new file mode 100644
index 0000000000..7ebf561ae5
--- /dev/null
+++ b/lib/compiler_rt/absvdi2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const absv = @import("./absv.zig").absv;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__absvdi2, .{ .name = "__absvdi2", .linkage = common.linkage });
+}
+
+pub fn __absvdi2(a: i64) callconv(.C) i64 {
+ return absv(i64, a);
+}
diff --git a/lib/std/special/compiler_rt/absvdi2_test.zig b/lib/compiler_rt/absvdi2_test.zig
similarity index 93%
rename from lib/std/special/compiler_rt/absvdi2_test.zig
rename to lib/compiler_rt/absvdi2_test.zig
index 4aa73513ee..e861ef0ff3 100644
--- a/lib/std/special/compiler_rt/absvdi2_test.zig
+++ b/lib/compiler_rt/absvdi2_test.zig
@@ -1,8 +1,9 @@
-const absv = @import("absv.zig");
const testing = @import("std").testing;
+const __absvdi2 = @import("absvdi2.zig").__absvdi2;
+
fn test__absvdi2(a: i64, expected: i64) !void {
- var result = absv.__absvdi2(a);
+ var result = __absvdi2(a);
try testing.expectEqual(expected, result);
}
diff --git a/lib/compiler_rt/absvsi2.zig b/lib/compiler_rt/absvsi2.zig
new file mode 100644
index 0000000000..664925f8f9
--- /dev/null
+++ b/lib/compiler_rt/absvsi2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const absv = @import("./absv.zig").absv;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__absvsi2, .{ .name = "__absvsi2", .linkage = common.linkage });
+}
+
+pub fn __absvsi2(a: i32) callconv(.C) i32 {
+ return absv(i32, a);
+}
diff --git a/lib/std/special/compiler_rt/absvsi2_test.zig b/lib/compiler_rt/absvsi2_test.zig
similarity index 91%
rename from lib/std/special/compiler_rt/absvsi2_test.zig
rename to lib/compiler_rt/absvsi2_test.zig
index 2cc3dbf991..9c74ebee67 100644
--- a/lib/std/special/compiler_rt/absvsi2_test.zig
+++ b/lib/compiler_rt/absvsi2_test.zig
@@ -1,8 +1,9 @@
-const absv = @import("absv.zig");
const testing = @import("std").testing;
+const __absvsi2 = @import("absvsi2.zig").__absvsi2;
+
fn test__absvsi2(a: i32, expected: i32) !void {
- var result = absv.__absvsi2(a);
+ var result = __absvsi2(a);
try testing.expectEqual(expected, result);
}
diff --git a/lib/compiler_rt/absvti2.zig b/lib/compiler_rt/absvti2.zig
new file mode 100644
index 0000000000..f7d0f796b0
--- /dev/null
+++ b/lib/compiler_rt/absvti2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const absv = @import("./absv.zig").absv;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__absvti2, .{ .name = "__absvti2", .linkage = common.linkage });
+}
+
+pub fn __absvti2(a: i128) callconv(.C) i128 {
+ return absv(i128, a);
+}
diff --git a/lib/std/special/compiler_rt/absvti2_test.zig b/lib/compiler_rt/absvti2_test.zig
similarity index 94%
rename from lib/std/special/compiler_rt/absvti2_test.zig
rename to lib/compiler_rt/absvti2_test.zig
index 5b4deb3640..fbed961775 100644
--- a/lib/std/special/compiler_rt/absvti2_test.zig
+++ b/lib/compiler_rt/absvti2_test.zig
@@ -1,8 +1,9 @@
-const absv = @import("absv.zig");
const testing = @import("std").testing;
+const __absvti2 = @import("absvti2.zig").__absvti2;
+
fn test__absvti2(a: i128, expected: i128) !void {
- var result = absv.__absvti2(a);
+ var result = __absvti2(a);
try testing.expectEqual(expected, result);
}
diff --git a/lib/compiler_rt/adddf3.zig b/lib/compiler_rt/adddf3.zig
new file mode 100644
index 0000000000..1b511f78a4
--- /dev/null
+++ b/lib/compiler_rt/adddf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const addf3 = @import("./addf3.zig").addf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = common.linkage });
+ } else {
+ @export(__adddf3, .{ .name = "__adddf3", .linkage = common.linkage });
+ }
+}
+
+fn __adddf3(a: f64, b: f64) callconv(.C) f64 {
+ return addf3(f64, a, b);
+}
+
+fn __aeabi_dadd(a: f64, b: f64) callconv(.AAPCS) f64 {
+ return addf3(f64, a, b);
+}
diff --git a/lib/std/special/compiler_rt/addXf3.zig b/lib/compiler_rt/addf3.zig
similarity index 57%
rename from lib/std/special/compiler_rt/addXf3.zig
rename to lib/compiler_rt/addf3.zig
index 4c74110310..7f2e368121 100644
--- a/lib/std/special/compiler_rt/addXf3.zig
+++ b/lib/compiler_rt/addf3.zig
@@ -1,98 +1,37 @@
-// Ported from:
-//
-// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/lib/builtins/fp_add_impl.inc
-
const std = @import("std");
-const builtin = @import("builtin");
-const compiler_rt = @import("../compiler_rt.zig");
+const math = std.math;
+const common = @import("./common.zig");
+const normalize = common.normalize;
-pub fn __addsf3(a: f32, b: f32) callconv(.C) f32 {
- return addXf3(f32, a, b);
-}
-
-pub fn __adddf3(a: f64, b: f64) callconv(.C) f64 {
- return addXf3(f64, a, b);
-}
-
-pub fn __addtf3(a: f128, b: f128) callconv(.C) f128 {
- return addXf3(f128, a, b);
-}
-
-pub fn __subsf3(a: f32, b: f32) callconv(.C) f32 {
- const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31));
- return addXf3(f32, a, neg_b);
-}
-
-pub fn __subdf3(a: f64, b: f64) callconv(.C) f64 {
- const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63));
- return addXf3(f64, a, neg_b);
-}
-
-pub fn __subtf3(a: f128, b: f128) callconv(.C) f128 {
- const neg_b = @bitCast(f128, @bitCast(u128, b) ^ (@as(u128, 1) << 127));
- return addXf3(f128, a, neg_b);
-}
-
-pub fn __aeabi_fadd(a: f32, b: f32) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __addsf3, .{ a, b });
-}
-
-pub fn __aeabi_dadd(a: f64, b: f64) callconv(.AAPCS) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __adddf3, .{ a, b });
-}
-
-pub fn __aeabi_fsub(a: f32, b: f32) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __subsf3, .{ a, b });
-}
-
-pub fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __subdf3, .{ a, b });
-}
-
-// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
-fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
- const bits = @typeInfo(T).Float.bits;
- const Z = std.meta.Int(.unsigned, bits);
- const S = std.meta.Int(.unsigned, bits - @clz(Z, @as(Z, bits) - 1));
- const significandBits = std.math.floatMantissaBits(T);
- const implicitBit = @as(Z, 1) << significandBits;
-
- const shift = @clz(std.meta.Int(.unsigned, bits), significand.*) - @clz(Z, implicitBit);
- significand.* <<= @intCast(S, shift);
- return 1 - shift;
-}
-
-// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
-fn addXf3(comptime T: type, a: T, b: T) T {
+/// Ported from:
+///
+/// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/lib/builtins/fp_add_impl.inc
+pub inline fn addf3(comptime T: type, a: T, b: T) T {
const bits = @typeInfo(T).Float.bits;
const Z = std.meta.Int(.unsigned, bits);
const S = std.meta.Int(.unsigned, bits - @clz(Z, @as(Z, bits) - 1));
const typeWidth = bits;
- const significandBits = std.math.floatMantissaBits(T);
- const exponentBits = std.math.floatExponentBits(T);
+ const significandBits = math.floatMantissaBits(T);
+ const fractionalBits = math.floatFractionalBits(T);
+ const exponentBits = math.floatExponentBits(T);
const signBit = (@as(Z, 1) << (significandBits + exponentBits));
const maxExponent = ((1 << exponentBits) - 1);
- const implicitBit = (@as(Z, 1) << significandBits);
- const quietBit = implicitBit >> 1;
- const significandMask = implicitBit - 1;
+ const integerBit = (@as(Z, 1) << fractionalBits);
+ const quietBit = integerBit >> 1;
+ const significandMask = (@as(Z, 1) << significandBits) - 1;
const absMask = signBit - 1;
- const exponentMask = absMask ^ significandMask;
- const qnanRep = exponentMask | quietBit;
+ const qnanRep = @bitCast(Z, math.nan(T)) | quietBit;
var aRep = @bitCast(Z, a);
var bRep = @bitCast(Z, b);
const aAbs = aRep & absMask;
const bAbs = bRep & absMask;
- const infRep = @bitCast(Z, std.math.inf(T));
+ const infRep = @bitCast(Z, math.inf(T));
// Detect if a or b is zero, infinity, or NaN.
if (aAbs -% @as(Z, 1) >= infRep - @as(Z, 1) or
@@ -157,12 +96,12 @@ fn addXf3(comptime T: type, a: T, b: T) T {
// implicit significand bit. (If we fell through from the denormal path it
// was already set by normalize( ), but setting it twice won't hurt
// anything.)
- aSignificand = (aSignificand | implicitBit) << 3;
- bSignificand = (bSignificand | implicitBit) << 3;
+ aSignificand = (aSignificand | integerBit) << 3;
+ bSignificand = (bSignificand | integerBit) << 3;
// Shift the significand of b by the difference in exponents, with a sticky
// bottom bit to get rounding correct.
- const @"align" = @intCast(Z, aExponent - bExponent);
+ const @"align" = @intCast(u32, aExponent - bExponent);
if (@"align" != 0) {
if (@"align" < typeWidth) {
const sticky = if (bSignificand << @intCast(S, typeWidth - @"align") != 0) @as(Z, 1) else 0;
@@ -178,8 +117,8 @@ fn addXf3(comptime T: type, a: T, b: T) T {
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
- if (aSignificand < implicitBit << 3) {
- const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(.unsigned, bits), implicitBit << 3));
+ if (aSignificand < integerBit << 3) {
+ const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(.unsigned, bits), integerBit << 3));
aSignificand <<= @intCast(S, shift);
aExponent -= shift;
}
@@ -188,7 +127,7 @@ fn addXf3(comptime T: type, a: T, b: T) T {
// If the addition carried up, we need to right-shift the result and
// adjust the exponent:
- if (aSignificand & (implicitBit << 4) != 0) {
+ if (aSignificand & (integerBit << 4) != 0) {
const sticky = aSignificand & 1;
aSignificand = aSignificand >> 1 | sticky;
aExponent += 1;
@@ -199,18 +138,16 @@ fn addXf3(comptime T: type, a: T, b: T) T {
if (aExponent >= maxExponent) return @bitCast(T, infRep | resultSign);
if (aExponent <= 0) {
- // Result is denormal before rounding; the exponent is zero and we
- // need to shift the significand.
- const shift = @intCast(Z, 1 - aExponent);
- const sticky = if (aSignificand << @intCast(S, typeWidth - shift) != 0) @as(Z, 1) else 0;
- aSignificand = aSignificand >> @intCast(S, shift | sticky);
- aExponent = 0;
+ // Result is denormal; the exponent and round/sticky bits are zero.
+ // All we need to do is shift the significand and apply the correct sign.
+ aSignificand >>= @intCast(S, 4 - aExponent);
+ return @bitCast(T, resultSign | aSignificand);
}
// Low three bits are round, guard, and sticky.
const roundGuardSticky = aSignificand & 0x7;
- // Shift the significand into place, and mask off the implicit bit.
+ // Shift the significand into place, and mask off the integer bit, if it's implicit.
var result = (aSignificand >> 3) & significandMask;
// Insert the exponent and sign.
@@ -222,9 +159,14 @@ fn addXf3(comptime T: type, a: T, b: T) T {
if (roundGuardSticky > 0x4) result += 1;
if (roundGuardSticky == 0x4) result += result & 1;
+ // Restore any explicit integer bit, if it was rounded off
+ if (significandBits != fractionalBits) {
+ if ((result >> significandBits) != 0) result |= integerBit;
+ }
+
return @bitCast(T, result);
}
test {
- _ = @import("addXf3_test.zig");
+ _ = @import("addf3_test.zig");
}
diff --git a/lib/compiler_rt/addf3_test.zig b/lib/compiler_rt/addf3_test.zig
new file mode 100644
index 0000000000..1df87a889f
--- /dev/null
+++ b/lib/compiler_rt/addf3_test.zig
@@ -0,0 +1,156 @@
+// Ported from:
+//
+// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/test/builtins/Unit/addtf3_test.c
+// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/test/builtins/Unit/subtf3_test.c
+
+const std = @import("std");
+const math = std.math;
+const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64);
+
+const __addtf3 = @import("addtf3.zig").__addtf3;
+const __addxf3 = @import("addxf3.zig").__addxf3;
+const __subtf3 = @import("subtf3.zig").__subtf3;
+
+fn test__addtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
+ const x = __addtf3(a, b);
+
+ const rep = @bitCast(u128, x);
+ const hi = @intCast(u64, rep >> 64);
+ const lo = @truncate(u64, rep);
+
+ if (hi == expected_hi and lo == expected_lo) {
+ return;
+ }
+ // test other possible NaN representation (signal NaN)
+ else if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
+ if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
+ ((hi & 0xffffffffffff) > 0 or lo > 0))
+ {
+ return;
+ }
+ }
+
+ return error.TestFailed;
+}
+
+test "addtf3" {
+ try test__addtf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
+
+ // NaN + any = NaN
+ try test__addtf3(@bitCast(f128, (@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000)), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
+
+ // inf + inf = inf
+ try test__addtf3(math.inf(f128), math.inf(f128), 0x7fff000000000000, 0x0);
+
+ // inf + any = inf
+ try test__addtf3(math.inf(f128), 0x1.2335653452436234723489432abcdefp+5, 0x7fff000000000000, 0x0);
+
+ // any + any
+ try test__addtf3(0x1.23456734245345543849abcdefp+5, 0x1.edcba52449872455634654321fp-1, 0x40042afc95c8b579, 0x61e58dd6c51eb77c);
+ try test__addtf3(0x1.edcba52449872455634654321fp-1, 0x1.23456734245345543849abcdefp+5, 0x40042afc95c8b579, 0x61e58dd6c51eb77c);
+}
+
+fn test__subtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
+ const x = __subtf3(a, b);
+
+ const rep = @bitCast(u128, x);
+ const hi = @intCast(u64, rep >> 64);
+ const lo = @truncate(u64, rep);
+
+ if (hi == expected_hi and lo == expected_lo) {
+ return;
+ }
+ // test other possible NaN representation (signal NaN)
+ else if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
+ if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
+ ((hi & 0xffffffffffff) > 0 or lo > 0))
+ {
+ return;
+ }
+ }
+
+ return error.TestFailed;
+}
+
+test "subtf3" {
+ // qNaN - any = qNaN
+ try test__subtf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
+
+ // NaN + any = NaN
+ try test__subtf3(@bitCast(f128, (@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000)), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
+
+ // inf - any = inf
+ try test__subtf3(math.inf(f128), 0x1.23456789abcdefp+5, 0x7fff000000000000, 0x0);
+
+ // any + any
+ try test__subtf3(0x1.234567829a3bcdef5678ade36734p+5, 0x1.ee9d7c52354a6936ab8d7654321fp-1, 0x40041b8af1915166, 0xa44a7bca780a166c);
+ try test__subtf3(0x1.ee9d7c52354a6936ab8d7654321fp-1, 0x1.234567829a3bcdef5678ade36734p+5, 0xc0041b8af1915166, 0xa44a7bca780a166c);
+}
+
+const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1)));
+
+fn test__addxf3(a: f80, b: f80, expected: u80) !void {
+ const x = __addxf3(a, b);
+ const rep = @bitCast(u80, x);
+
+ if (rep == expected)
+ return;
+
+ if (math.isNan(@bitCast(f80, expected)) and math.isNan(x))
+ return; // We don't currently test NaN payload propagation
+
+ return error.TestFailed;
+}
+
+test "addxf3" {
+ // NaN + any = NaN
+ try test__addxf3(qnan80, 0x1.23456789abcdefp+5, @bitCast(u80, qnan80));
+ try test__addxf3(@bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), 0x1.23456789abcdefp+5, @bitCast(u80, qnan80));
+
+ // any + NaN = NaN
+ try test__addxf3(0x1.23456789abcdefp+5, qnan80, @bitCast(u80, qnan80));
+ try test__addxf3(0x1.23456789abcdefp+5, @bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), @bitCast(u80, qnan80));
+
+ // NaN + inf = NaN
+ try test__addxf3(qnan80, math.inf(f80), @bitCast(u80, qnan80));
+
+ // inf + NaN = NaN
+ try test__addxf3(math.inf(f80), qnan80, @bitCast(u80, qnan80));
+
+ // inf + inf = inf
+ try test__addxf3(math.inf(f80), math.inf(f80), @bitCast(u80, math.inf(f80)));
+
+ // inf + -inf = NaN
+ try test__addxf3(math.inf(f80), -math.inf(f80), @bitCast(u80, qnan80));
+
+ // -inf + inf = NaN
+ try test__addxf3(-math.inf(f80), math.inf(f80), @bitCast(u80, qnan80));
+
+ // inf + any = inf
+ try test__addxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @bitCast(u80, math.inf(f80)));
+
+ // any + inf = inf
+ try test__addxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @bitCast(u80, math.inf(f80)));
+
+ // any + any
+ try test__addxf3(0x1.23456789abcdp+5, 0x1.dcba987654321p+5, 0x4005_BFFFFFFFFFFFC400);
+ try test__addxf3(0x1.23456734245345543849abcdefp+5, 0x1.edcba52449872455634654321fp-1, 0x4004_957E_4AE4_5ABC_B0F3);
+ try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x1.0p-63, 0x3FFF_FFFFFFFFFFFFFFFF); // exact
+ try test__addxf3(0x1.ffff_ffff_ffff_fffep+0, 0x0.0p0, 0x3FFF_FFFFFFFFFFFFFFFF); // exact
+ try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x1.4p-63, 0x3FFF_FFFFFFFFFFFFFFFF); // round down
+ try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x1.8p-63, 0x4000_8000000000000000); // round up to even
+ try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x1.cp-63, 0x4000_8000000000000000); // round up
+ try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x2.0p-63, 0x4000_8000000000000000); // exact
+ try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x2.1p-63, 0x4000_8000000000000000); // round down
+ try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x3.0p-63, 0x4000_8000000000000000); // round down to even
+ try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x3.1p-63, 0x4000_8000000000000001); // round up
+ try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x4.0p-63, 0x4000_8000000000000001); // exact
+
+ try test__addxf3(0x1.0fff_ffff_ffff_fffep+0, 0x1.0p-63, 0x3FFF_8800000000000000); // exact
+ try test__addxf3(0x1.0fff_ffff_ffff_fffep+0, 0x1.7p-63, 0x3FFF_8800000000000000); // round down
+ try test__addxf3(0x1.0fff_ffff_ffff_fffep+0, 0x1.8p-63, 0x3FFF_8800000000000000); // round down to even
+ try test__addxf3(0x1.0fff_ffff_ffff_fffep+0, 0x1.9p-63, 0x3FFF_8800000000000001); // round up
+ try test__addxf3(0x1.0fff_ffff_ffff_fffep+0, 0x2.0p-63, 0x3FFF_8800000000000001); // exact
+ try test__addxf3(0x0.ffff_ffff_ffff_fffcp-16382, 0x0.0000_0000_0000_0002p-16382, 0x0000_7FFFFFFFFFFFFFFF); // exact
+ try test__addxf3(0x0.1fff_ffff_ffff_fffcp-16382, 0x0.0000_0000_0000_0002p-16382, 0x0000_0FFFFFFFFFFFFFFF); // exact
+}
diff --git a/lib/compiler_rt/addo.zig b/lib/compiler_rt/addo.zig
new file mode 100644
index 0000000000..d14fe36710
--- /dev/null
+++ b/lib/compiler_rt/addo.zig
@@ -0,0 +1,48 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
+pub const panic = @import("common.zig").panic;
+
+comptime {
+ @export(__addosi4, .{ .name = "__addosi4", .linkage = linkage });
+ @export(__addodi4, .{ .name = "__addodi4", .linkage = linkage });
+ @export(__addoti4, .{ .name = "__addoti4", .linkage = linkage });
+}
+
+// addo - add overflow
+// * return a+%b.
+// * return if a+b overflows => 1 else => 0
+// - addoXi4_generic as default
+
+inline fn addoXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
+ @setRuntimeSafety(builtin.is_test);
+ overflow.* = 0;
+ var sum: ST = a +% b;
+ // Hackers Delight: section Overflow Detection, subsection Signed Add/Subtract
+ // Let sum = a +% b == a + b + carry == wraparound addition.
+ // Overflow in a+b+carry occurs, iff a and b have opposite signs
+ // and the sign of a+b+carry is the same as a (or equivalently b).
+ // Slower routine: res = ~(a ^ b) & ((sum ^ a)
+ // Faster routine: res = (sum ^ a) & (sum ^ b)
+ // Overflow occured, iff (res < 0)
+ if (((sum ^ a) & (sum ^ b)) < 0)
+ overflow.* = 1;
+ return sum;
+}
+
+pub fn __addosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
+ return addoXi4_generic(i32, a, b, overflow);
+}
+pub fn __addodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
+ return addoXi4_generic(i64, a, b, overflow);
+}
+pub fn __addoti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
+ return addoXi4_generic(i128, a, b, overflow);
+}
+
+test {
+ _ = @import("addosi4_test.zig");
+ _ = @import("addodi4_test.zig");
+ _ = @import("addoti4_test.zig");
+}
diff --git a/lib/compiler_rt/addodi4_test.zig b/lib/compiler_rt/addodi4_test.zig
new file mode 100644
index 0000000000..f70a80a5b2
--- /dev/null
+++ b/lib/compiler_rt/addodi4_test.zig
@@ -0,0 +1,77 @@
+const addv = @import("addo.zig");
+const std = @import("std");
+const testing = std.testing;
+const math = std.math;
+
+fn test__addodi4(a: i64, b: i64) !void {
+ var result_ov: c_int = undefined;
+ var expected_ov: c_int = undefined;
+ var result = addv.__addodi4(a, b, &result_ov);
+ var expected: i64 = simple_addodi4(a, b, &expected_ov);
+ try testing.expectEqual(expected, result);
+ try testing.expectEqual(expected_ov, result_ov);
+}
+
+fn simple_addodi4(a: i64, b: i64, overflow: *c_int) i64 {
+ overflow.* = 0;
+ const min: i64 = math.minInt(i64);
+ const max: i64 = math.maxInt(i64);
+ if (((a > 0) and (b > max - a)) or
+ ((a < 0) and (b < min - a)))
+ overflow.* = 1;
+ return a +% b;
+}
+
+test "addodi4" {
+ const min: i64 = math.minInt(i64);
+ const max: i64 = math.maxInt(i64);
+ var i: i64 = 1;
+ while (i < max) : (i *|= 2) {
+ try test__addodi4(i, i);
+ try test__addodi4(-i, -i);
+ try test__addodi4(i, -i);
+ try test__addodi4(-i, i);
+ }
+
+ // edge cases
+ // 0 + 0 = 0
+ // MIN + MIN overflow
+ // MAX + MAX overflow
+ // 0 + MIN MIN
+ // 0 + MAX MAX
+ // MIN + 0 MIN
+ // MAX + 0 MAX
+ // MIN + MAX -1
+ // MAX + MIN -1
+ try test__addodi4(0, 0);
+ try test__addodi4(min, min);
+ try test__addodi4(max, max);
+ try test__addodi4(0, min);
+ try test__addodi4(0, max);
+ try test__addodi4(min, 0);
+ try test__addodi4(max, 0);
+ try test__addodi4(min, max);
+ try test__addodi4(max, min);
+
+ // derived edge cases
+ // MIN+1 + MIN overflow
+ // MAX-1 + MAX overflow
+ // 1 + MIN = MIN+1
+ // -1 + MIN overflow
+ // -1 + MAX = MAX-1
+ // +1 + MAX overflow
+ // MIN + 1 = MIN+1
+ // MIN + -1 overflow
+ // MAX + 1 overflow
+ // MAX + -1 = MAX-1
+ try test__addodi4(min + 1, min);
+ try test__addodi4(max - 1, max);
+ try test__addodi4(1, min);
+ try test__addodi4(-1, min);
+ try test__addodi4(-1, max);
+ try test__addodi4(1, max);
+ try test__addodi4(min, 1);
+ try test__addodi4(min, -1);
+ try test__addodi4(max, -1);
+ try test__addodi4(max, 1);
+}
diff --git a/lib/compiler_rt/addosi4_test.zig b/lib/compiler_rt/addosi4_test.zig
new file mode 100644
index 0000000000..a8f81d70d1
--- /dev/null
+++ b/lib/compiler_rt/addosi4_test.zig
@@ -0,0 +1,78 @@
+const addv = @import("addo.zig");
+const testing = @import("std").testing;
+
+fn test__addosi4(a: i32, b: i32) !void {
+ var result_ov: c_int = undefined;
+ var expected_ov: c_int = undefined;
+ var result = addv.__addosi4(a, b, &result_ov);
+ var expected: i32 = simple_addosi4(a, b, &expected_ov);
+ try testing.expectEqual(expected, result);
+ try testing.expectEqual(expected_ov, result_ov);
+}
+
+fn simple_addosi4(a: i32, b: i32, overflow: *c_int) i32 {
+ overflow.* = 0;
+ const min: i32 = -2147483648;
+ const max: i32 = 2147483647;
+ if (((a > 0) and (b > max - a)) or
+ ((a < 0) and (b < min - a)))
+ overflow.* = 1;
+ return a +% b;
+}
+
+test "addosi4" {
+ // -2^31 <= i32 <= 2^31-1
+ // 2^31 = 2147483648
+ // 2^31-1 = 2147483647
+ const min: i32 = -2147483648;
+ const max: i32 = 2147483647;
+ var i: i32 = 1;
+ while (i < max) : (i *|= 2) {
+ try test__addosi4(i, i);
+ try test__addosi4(-i, -i);
+ try test__addosi4(i, -i);
+ try test__addosi4(-i, i);
+ }
+
+ // edge cases
+ // 0 + 0 = 0
+ // MIN + MIN overflow
+ // MAX + MAX overflow
+ // 0 + MIN MIN
+ // 0 + MAX MAX
+ // MIN + 0 MIN
+ // MAX + 0 MAX
+ // MIN + MAX -1
+ // MAX + MIN -1
+ try test__addosi4(0, 0);
+ try test__addosi4(min, min);
+ try test__addosi4(max, max);
+ try test__addosi4(0, min);
+ try test__addosi4(0, max);
+ try test__addosi4(min, 0);
+ try test__addosi4(max, 0);
+ try test__addosi4(min, max);
+ try test__addosi4(max, min);
+
+ // derived edge cases
+ // MIN+1 + MIN overflow
+ // MAX-1 + MAX overflow
+ // 1 + MIN = MIN+1
+ // -1 + MIN overflow
+ // -1 + MAX = MAX-1
+ // +1 + MAX overflow
+ // MIN + 1 = MIN+1
+ // MIN + -1 overflow
+ // MAX + 1 overflow
+ // MAX + -1 = MAX-1
+ try test__addosi4(min + 1, min);
+ try test__addosi4(max - 1, max);
+ try test__addosi4(1, min);
+ try test__addosi4(-1, min);
+ try test__addosi4(-1, max);
+ try test__addosi4(1, max);
+ try test__addosi4(min, 1);
+ try test__addosi4(min, -1);
+ try test__addosi4(max, -1);
+ try test__addosi4(max, 1);
+}
diff --git a/lib/compiler_rt/addoti4_test.zig b/lib/compiler_rt/addoti4_test.zig
new file mode 100644
index 0000000000..dd0f4e3d3c
--- /dev/null
+++ b/lib/compiler_rt/addoti4_test.zig
@@ -0,0 +1,77 @@
+const addv = @import("addo.zig");
+const std = @import("std");
+const testing = std.testing;
+const math = std.math;
+
+fn test__addoti4(a: i128, b: i128) !void {
+ var result_ov: c_int = undefined;
+ var expected_ov: c_int = undefined;
+ var result = addv.__addoti4(a, b, &result_ov);
+ var expected: i128 = simple_addoti4(a, b, &expected_ov);
+ try testing.expectEqual(expected, result);
+ try testing.expectEqual(expected_ov, result_ov);
+}
+
+fn simple_addoti4(a: i128, b: i128, overflow: *c_int) i128 {
+ overflow.* = 0;
+ const min: i128 = math.minInt(i128);
+ const max: i128 = math.maxInt(i128);
+ if (((a > 0) and (b > max - a)) or
+ ((a < 0) and (b < min - a)))
+ overflow.* = 1;
+ return a +% b;
+}
+
+test "addoti4" {
+ const min: i128 = math.minInt(i128);
+ const max: i128 = math.maxInt(i128);
+ var i: i128 = 1;
+ while (i < max) : (i *|= 2) {
+ try test__addoti4(i, i);
+ try test__addoti4(-i, -i);
+ try test__addoti4(i, -i);
+ try test__addoti4(-i, i);
+ }
+
+ // edge cases
+ // 0 + 0 = 0
+ // MIN + MIN overflow
+ // MAX + MAX overflow
+ // 0 + MIN MIN
+ // 0 + MAX MAX
+ // MIN + 0 MIN
+ // MAX + 0 MAX
+ // MIN + MAX -1
+ // MAX + MIN -1
+ try test__addoti4(0, 0);
+ try test__addoti4(min, min);
+ try test__addoti4(max, max);
+ try test__addoti4(0, min);
+ try test__addoti4(0, max);
+ try test__addoti4(min, 0);
+ try test__addoti4(max, 0);
+ try test__addoti4(min, max);
+ try test__addoti4(max, min);
+
+ // derived edge cases
+ // MIN+1 + MIN overflow
+ // MAX-1 + MAX overflow
+ // 1 + MIN = MIN+1
+ // -1 + MIN overflow
+ // -1 + MAX = MAX-1
+ // +1 + MAX overflow
+ // MIN + 1 = MIN+1
+ // MIN + -1 overflow
+ // MAX + 1 overflow
+ // MAX + -1 = MAX-1
+ try test__addoti4(min + 1, min);
+ try test__addoti4(max - 1, max);
+ try test__addoti4(1, min);
+ try test__addoti4(-1, min);
+ try test__addoti4(-1, max);
+ try test__addoti4(1, max);
+ try test__addoti4(min, 1);
+ try test__addoti4(min, -1);
+ try test__addoti4(max, -1);
+ try test__addoti4(max, 1);
+}
diff --git a/lib/compiler_rt/addsf3.zig b/lib/compiler_rt/addsf3.zig
new file mode 100644
index 0000000000..83f8285371
--- /dev/null
+++ b/lib/compiler_rt/addsf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const addf3 = @import("./addf3.zig").addf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = common.linkage });
+ } else {
+ @export(__addsf3, .{ .name = "__addsf3", .linkage = common.linkage });
+ }
+}
+
+fn __addsf3(a: f32, b: f32) callconv(.C) f32 {
+ return addf3(f32, a, b);
+}
+
+fn __aeabi_fadd(a: f32, b: f32) callconv(.AAPCS) f32 {
+ return addf3(f32, a, b);
+}
diff --git a/lib/compiler_rt/addtf3.zig b/lib/compiler_rt/addtf3.zig
new file mode 100644
index 0000000000..2a22493ded
--- /dev/null
+++ b/lib/compiler_rt/addtf3.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const addf3 = @import("./addf3.zig").addf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__addkf3, .{ .name = "__addkf3", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_add, .{ .name = "_Qp_add", .linkage = common.linkage });
+ } else {
+ @export(__addtf3, .{ .name = "__addtf3", .linkage = common.linkage });
+ }
+}
+
+pub fn __addtf3(a: f128, b: f128) callconv(.C) f128 {
+ return addf3(f128, a, b);
+}
+
+fn __addkf3(a: f128, b: f128) callconv(.C) f128 {
+ return addf3(f128, a, b);
+}
+
+fn _Qp_add(c: *f128, a: *f128, b: *f128) callconv(.C) void {
+ c.* = addf3(f128, a.*, b.*);
+}
diff --git a/lib/compiler_rt/addxf3.zig b/lib/compiler_rt/addxf3.zig
new file mode 100644
index 0000000000..72cf955632
--- /dev/null
+++ b/lib/compiler_rt/addxf3.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const addf3 = @import("./addf3.zig").addf3;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__addxf3, .{ .name = "__addxf3", .linkage = common.linkage });
+}
+
+pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
+ return addf3(f80, a, b);
+}
diff --git a/lib/compiler_rt/arm.zig b/lib/compiler_rt/arm.zig
new file mode 100644
index 0000000000..145d3992f7
--- /dev/null
+++ b/lib/compiler_rt/arm.zig
@@ -0,0 +1,188 @@
+// ARM specific builtins
+const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (!builtin.is_test) {
+ if (arch.isARM() or arch.isThumb()) {
+ @export(__aeabi_unwind_cpp_pr0, .{ .name = "__aeabi_unwind_cpp_pr0", .linkage = common.linkage });
+ @export(__aeabi_unwind_cpp_pr1, .{ .name = "__aeabi_unwind_cpp_pr1", .linkage = common.linkage });
+ @export(__aeabi_unwind_cpp_pr2, .{ .name = "__aeabi_unwind_cpp_pr2", .linkage = common.linkage });
+
+ @export(__aeabi_ldivmod, .{ .name = "__aeabi_ldivmod", .linkage = common.linkage });
+ @export(__aeabi_uldivmod, .{ .name = "__aeabi_uldivmod", .linkage = common.linkage });
+
+ @export(__aeabi_idivmod, .{ .name = "__aeabi_idivmod", .linkage = common.linkage });
+ @export(__aeabi_uidivmod, .{ .name = "__aeabi_uidivmod", .linkage = common.linkage });
+
+ @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy", .linkage = common.linkage });
+ @export(__aeabi_memcpy4, .{ .name = "__aeabi_memcpy4", .linkage = common.linkage });
+ @export(__aeabi_memcpy8, .{ .name = "__aeabi_memcpy8", .linkage = common.linkage });
+
+ @export(__aeabi_memmove, .{ .name = "__aeabi_memmove", .linkage = common.linkage });
+ @export(__aeabi_memmove4, .{ .name = "__aeabi_memmove4", .linkage = common.linkage });
+ @export(__aeabi_memmove8, .{ .name = "__aeabi_memmove8", .linkage = common.linkage });
+
+ @export(__aeabi_memset, .{ .name = "__aeabi_memset", .linkage = common.linkage });
+ @export(__aeabi_memset4, .{ .name = "__aeabi_memset4", .linkage = common.linkage });
+ @export(__aeabi_memset8, .{ .name = "__aeabi_memset8", .linkage = common.linkage });
+
+ @export(__aeabi_memclr, .{ .name = "__aeabi_memclr", .linkage = common.linkage });
+ @export(__aeabi_memclr4, .{ .name = "__aeabi_memclr4", .linkage = common.linkage });
+ @export(__aeabi_memclr8, .{ .name = "__aeabi_memclr8", .linkage = common.linkage });
+
+ if (builtin.os.tag == .linux) {
+ @export(__aeabi_read_tp, .{ .name = "__aeabi_read_tp", .linkage = common.linkage });
+ }
+ }
+ }
+}
+
+const __divmodsi4 = @import("int.zig").__divmodsi4;
+const __udivmodsi4 = @import("int.zig").__udivmodsi4;
+const __divmoddi4 = @import("int.zig").__divmoddi4;
+const __udivmoddi4 = @import("int.zig").__udivmoddi4;
+
+extern fn memset(dest: ?[*]u8, c: u8, n: usize) ?[*]u8;
+extern fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) ?[*]u8;
+extern fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) ?[*]u8;
+
+pub fn __aeabi_memcpy(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memcpy(dest, src, n);
+}
+pub fn __aeabi_memcpy4(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memcpy(dest, src, n);
+}
+pub fn __aeabi_memcpy8(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memcpy(dest, src, n);
+}
+
+pub fn __aeabi_memmove(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memmove(dest, src, n);
+}
+pub fn __aeabi_memmove4(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memmove(dest, src, n);
+}
+pub fn __aeabi_memmove8(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memmove(dest, src, n);
+}
+
+pub fn __aeabi_memset(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ // This is dentical to the standard `memset` definition but with the last
+ // two arguments swapped
+ _ = memset(dest, c, n);
+}
+pub fn __aeabi_memset4(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memset(dest, c, n);
+}
+pub fn __aeabi_memset8(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memset(dest, c, n);
+}
+
+pub fn __aeabi_memclr(dest: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memset(dest, 0, n);
+}
+pub fn __aeabi_memclr4(dest: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memset(dest, 0, n);
+}
+pub fn __aeabi_memclr8(dest: [*]u8, n: usize) callconv(.AAPCS) void {
+ @setRuntimeSafety(false);
+ _ = memset(dest, 0, n);
+}
+
+// Dummy functions to avoid errors during the linking phase
+pub fn __aeabi_unwind_cpp_pr0() callconv(.AAPCS) void {}
+pub fn __aeabi_unwind_cpp_pr1() callconv(.AAPCS) void {}
+pub fn __aeabi_unwind_cpp_pr2() callconv(.AAPCS) void {}
+
+// This function can only clobber r0 according to the ABI
+pub fn __aeabi_read_tp() callconv(.Naked) void {
+ @setRuntimeSafety(false);
+ asm volatile (
+ \\ mrc p15, 0, r0, c13, c0, 3
+ \\ bx lr
+ );
+ unreachable;
+}
+
+// The following functions are wrapped in an asm block to ensure the required
+// calling convention is always respected
+
+pub fn __aeabi_uidivmod() callconv(.Naked) void {
+ @setRuntimeSafety(false);
+ // Divide r0 by r1; the quotient goes in r0, the remainder in r1
+ asm volatile (
+ \\ push {lr}
+ \\ sub sp, #4
+ \\ mov r2, sp
+ \\ bl __udivmodsi4
+ \\ ldr r1, [sp]
+ \\ add sp, #4
+ \\ pop {pc}
+ ::: "memory");
+ unreachable;
+}
+
+pub fn __aeabi_uldivmod() callconv(.Naked) void {
+ @setRuntimeSafety(false);
+ // Divide r1:r0 by r3:r2; the quotient goes in r1:r0, the remainder in r3:r2
+ asm volatile (
+ \\ push {r4, lr}
+ \\ sub sp, #16
+ \\ add r4, sp, #8
+ \\ str r4, [sp]
+ \\ bl __udivmoddi4
+ \\ ldr r2, [sp, #8]
+ \\ ldr r3, [sp, #12]
+ \\ add sp, #16
+ \\ pop {r4, pc}
+ ::: "memory");
+ unreachable;
+}
+
+pub fn __aeabi_idivmod() callconv(.Naked) void {
+ @setRuntimeSafety(false);
+ // Divide r0 by r1; the quotient goes in r0, the remainder in r1
+ asm volatile (
+ \\ push {lr}
+ \\ sub sp, #4
+ \\ mov r2, sp
+ \\ bl __divmodsi4
+ \\ ldr r1, [sp]
+ \\ add sp, #4
+ \\ pop {pc}
+ ::: "memory");
+ unreachable;
+}
+
+pub fn __aeabi_ldivmod() callconv(.Naked) void {
+ @setRuntimeSafety(false);
+ // Divide r1:r0 by r3:r2; the quotient goes in r1:r0, the remainder in r3:r2
+ asm volatile (
+ \\ push {r4, lr}
+ \\ sub sp, #16
+ \\ add r4, sp, #8
+ \\ str r4, [sp]
+ \\ bl __divmoddi4
+ \\ ldr r2, [sp, #8]
+ \\ ldr r3, [sp, #12]
+ \\ add sp, #16
+ \\ pop {r4, pc}
+ ::: "memory");
+ unreachable;
+}
diff --git a/lib/std/special/compiler_rt/ashldi3_test.zig b/lib/compiler_rt/ashldi3_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/ashldi3_test.zig
rename to lib/compiler_rt/ashldi3_test.zig
diff --git a/lib/std/special/compiler_rt/ashlti3_test.zig b/lib/compiler_rt/ashlti3_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/ashlti3_test.zig
rename to lib/compiler_rt/ashlti3_test.zig
diff --git a/lib/std/special/compiler_rt/ashrdi3_test.zig b/lib/compiler_rt/ashrdi3_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/ashrdi3_test.zig
rename to lib/compiler_rt/ashrdi3_test.zig
diff --git a/lib/std/special/compiler_rt/ashrti3_test.zig b/lib/compiler_rt/ashrti3_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/ashrti3_test.zig
rename to lib/compiler_rt/ashrti3_test.zig
diff --git a/lib/std/special/compiler_rt/atomics.zig b/lib/compiler_rt/atomics.zig
similarity index 90%
rename from lib/std/special/compiler_rt/atomics.zig
rename to lib/compiler_rt/atomics.zig
index 7727d7af3d..6935a858aa 100644
--- a/lib/std/special/compiler_rt/atomics.zig
+++ b/lib/compiler_rt/atomics.zig
@@ -1,8 +1,9 @@
const std = @import("std");
const builtin = @import("builtin");
-const arch = builtin.cpu.arch;
-
+const cpu = builtin.cpu;
+const arch = cpu.arch;
const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
+pub const panic = @import("common.zig").panic;
// This parameter is true iff the target architecture supports the bare minimum
// to implement the atomic load/store intrinsics.
@@ -24,6 +25,11 @@ const supports_atomic_ops = switch (arch) {
// load/store atomically.
// Objects bigger than this threshold require the use of a lock.
const largest_atomic_size = switch (arch) {
+ // On SPARC systems that lacks CAS and/or swap instructions, the only
+ // available atomic operation is a test-and-set (`ldstub`), so we force
+ // every atomic memory access to go through the lock.
+ .sparc, .sparcel => if (cpu.features.featureSetHas(.hasleoncasa)) @sizeOf(usize) else 0,
+
// XXX: On x86/x86_64 we could check the presence of cmpxchg8b/cmpxchg16b
// and set this parameter accordingly.
else => @sizeOf(usize),
@@ -36,20 +42,44 @@ const SpinlockTable = struct {
const max_spinlocks = 64;
const Spinlock = struct {
+ // SPARC ldstub instruction will write a 255 into the memory location.
+ // We'll use that as a sign that the lock is currently held.
+ // See also: Section B.7 in SPARCv8 spec & A.29 in SPARCv9 spec.
+ const sparc_lock: type = enum(u8) { Unlocked = 0, Locked = 255 };
+ const other_lock: type = enum(usize) { Unlocked = 0, Locked };
+
// Prevent false sharing by providing enough padding between two
// consecutive spinlock elements
- v: enum(usize) { Unlocked = 0, Locked } align(cache_line_size) = .Unlocked,
+ v: if (arch.isSPARC()) sparc_lock else other_lock align(cache_line_size) = .Unlocked,
fn acquire(self: *@This()) void {
while (true) {
- switch (@atomicRmw(@TypeOf(self.v), &self.v, .Xchg, .Locked, .Acquire)) {
+ const flag = if (comptime arch.isSPARC()) flag: {
+ break :flag asm volatile ("ldstub [%[addr]], %[flag]"
+ : [flag] "=r" (-> @TypeOf(self.v)),
+ : [addr] "r" (&self.v),
+ : "memory"
+ );
+ } else flag: {
+ break :flag @atomicRmw(@TypeOf(self.v), &self.v, .Xchg, .Locked, .Acquire);
+ };
+
+ switch (flag) {
.Unlocked => break,
.Locked => {},
}
}
}
fn release(self: *@This()) void {
- @atomicStore(@TypeOf(self.v), &self.v, .Unlocked, .Release);
+ if (comptime arch.isSPARC()) {
+ _ = asm volatile ("clrb [%[addr]]"
+ :
+ : [addr] "r" (&self.v),
+ : "memory"
+ );
+ } else {
+ @atomicStore(@TypeOf(self.v), &self.v, .Unlocked, .Release);
+ }
}
};
diff --git a/lib/std/special/compiler_rt/aulldiv.zig b/lib/compiler_rt/aulldiv.zig
similarity index 80%
rename from lib/std/special/compiler_rt/aulldiv.zig
rename to lib/compiler_rt/aulldiv.zig
index 7709e17e63..d9517c6d10 100644
--- a/lib/std/special/compiler_rt/aulldiv.zig
+++ b/lib/compiler_rt/aulldiv.zig
@@ -1,7 +1,20 @@
+const std = @import("std");
const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const abi = builtin.abi;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (arch == .i386 and abi == .msvc) {
+ // Don't let LLVM apply the stdcall name mangling on those MSVC builtins
+ @export(_alldiv, .{ .name = "\x01__alldiv", .linkage = common.linkage });
+ @export(_aulldiv, .{ .name = "\x01__aulldiv", .linkage = common.linkage });
+ }
+}
pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 {
- @setRuntimeSafety(builtin.is_test);
const s_a = a >> (64 - 1);
const s_b = b >> (64 - 1);
diff --git a/lib/std/special/compiler_rt/aullrem.zig b/lib/compiler_rt/aullrem.zig
similarity index 81%
rename from lib/std/special/compiler_rt/aullrem.zig
rename to lib/compiler_rt/aullrem.zig
index dbd52cd377..43821eb9d3 100644
--- a/lib/std/special/compiler_rt/aullrem.zig
+++ b/lib/compiler_rt/aullrem.zig
@@ -1,7 +1,20 @@
+const std = @import("std");
const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const abi = builtin.abi;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (arch == .i386 and abi == .msvc) {
+ // Don't let LLVM apply the stdcall name mangling on those MSVC builtins
+ @export(_allrem, .{ .name = "\x01__allrem", .linkage = common.linkage });
+ @export(_aullrem, .{ .name = "\x01__aullrem", .linkage = common.linkage });
+ }
+}
pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 {
- @setRuntimeSafety(builtin.is_test);
const s_a = a >> (64 - 1);
const s_b = b >> (64 - 1);
diff --git a/lib/std/special/compiler_rt/bswap.zig b/lib/compiler_rt/bswap.zig
similarity index 89%
rename from lib/std/special/compiler_rt/bswap.zig
rename to lib/compiler_rt/bswap.zig
index f1d2138811..9f7d2cb879 100644
--- a/lib/std/special/compiler_rt/bswap.zig
+++ b/lib/compiler_rt/bswap.zig
@@ -1,5 +1,14 @@
const std = @import("std");
const builtin = @import("builtin");
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__bswapsi2, .{ .name = "__bswapsi2", .linkage = common.linkage });
+ @export(__bswapdi2, .{ .name = "__bswapdi2", .linkage = common.linkage });
+ @export(__bswapti2, .{ .name = "__bswapti2", .linkage = common.linkage });
+}
// bswap - byteswap
// - bswapXi2 for unoptimized big and little endian
@@ -12,7 +21,6 @@ const builtin = @import("builtin");
// 00 00 00 ff << 3*8 (rightmost byte)
inline fn bswapXi2(comptime T: type, a: T) T {
- @setRuntimeSafety(builtin.is_test);
switch (@bitSizeOf(T)) {
32 => {
// zig fmt: off
diff --git a/lib/std/special/compiler_rt/bswapdi2_test.zig b/lib/compiler_rt/bswapdi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/bswapdi2_test.zig
rename to lib/compiler_rt/bswapdi2_test.zig
diff --git a/lib/std/special/compiler_rt/bswapsi2_test.zig b/lib/compiler_rt/bswapsi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/bswapsi2_test.zig
rename to lib/compiler_rt/bswapsi2_test.zig
diff --git a/lib/std/special/compiler_rt/bswapti2_test.zig b/lib/compiler_rt/bswapti2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/bswapti2_test.zig
rename to lib/compiler_rt/bswapti2_test.zig
diff --git a/lib/compiler_rt/ceil.zig b/lib/compiler_rt/ceil.zig
new file mode 100644
index 0000000000..406f61fbb9
--- /dev/null
+++ b/lib/compiler_rt/ceil.zig
@@ -0,0 +1,180 @@
+//! Ported from musl, which is MIT licensed.
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/ceilf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/ceil.c
+
+const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const math = std.math;
+const expect = std.testing.expect;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__ceilh, .{ .name = "__ceilh", .linkage = common.linkage });
+ @export(ceilf, .{ .name = "ceilf", .linkage = common.linkage });
+ @export(ceil, .{ .name = "ceil", .linkage = common.linkage });
+ @export(__ceilx, .{ .name = "__ceilx", .linkage = common.linkage });
+ const ceilq_sym_name = if (common.want_ppc_abi) "ceilf128" else "ceilq";
+ @export(ceilq, .{ .name = ceilq_sym_name, .linkage = common.linkage });
+ @export(ceill, .{ .name = "ceill", .linkage = common.linkage });
+}
+
+pub fn __ceilh(x: f16) callconv(.C) f16 {
+ // TODO: more efficient implementation
+ return @floatCast(f16, ceilf(x));
+}
+
+pub fn ceilf(x: f32) callconv(.C) f32 {
+ var u = @bitCast(u32, x);
+ var e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F;
+ var m: u32 = undefined;
+
+ // TODO: Shouldn't need this explicit check.
+ if (x == 0.0) {
+ return x;
+ }
+
+ if (e >= 23) {
+ return x;
+ } else if (e >= 0) {
+ m = @as(u32, 0x007FFFFF) >> @intCast(u5, e);
+ if (u & m == 0) {
+ return x;
+ }
+ math.doNotOptimizeAway(x + 0x1.0p120);
+ if (u >> 31 == 0) {
+ u += m;
+ }
+ u &= ~m;
+ return @bitCast(f32, u);
+ } else {
+ math.doNotOptimizeAway(x + 0x1.0p120);
+ if (u >> 31 != 0) {
+ return -0.0;
+ } else {
+ return 1.0;
+ }
+ }
+}
+
+pub fn ceil(x: f64) callconv(.C) f64 {
+ const f64_toint = 1.0 / math.floatEps(f64);
+
+ const u = @bitCast(u64, x);
+ const e = (u >> 52) & 0x7FF;
+ var y: f64 = undefined;
+
+ if (e >= 0x3FF + 52 or x == 0) {
+ return x;
+ }
+
+ if (u >> 63 != 0) {
+ y = x - f64_toint + f64_toint - x;
+ } else {
+ y = x + f64_toint - f64_toint - x;
+ }
+
+ if (e <= 0x3FF - 1) {
+ math.doNotOptimizeAway(y);
+ if (u >> 63 != 0) {
+ return -0.0;
+ } else {
+ return 1.0;
+ }
+ } else if (y < 0) {
+ return x + y + 1;
+ } else {
+ return x + y;
+ }
+}
+
+pub fn __ceilx(x: f80) callconv(.C) f80 {
+ // TODO: more efficient implementation
+ return @floatCast(f80, ceilq(x));
+}
+
+pub fn ceilq(x: f128) callconv(.C) f128 {
+ const f128_toint = 1.0 / math.floatEps(f128);
+
+ const u = @bitCast(u128, x);
+ const e = (u >> 112) & 0x7FFF;
+ var y: f128 = undefined;
+
+ if (e >= 0x3FFF + 112 or x == 0) return x;
+
+ if (u >> 127 != 0) {
+ y = x - f128_toint + f128_toint - x;
+ } else {
+ y = x + f128_toint - f128_toint - x;
+ }
+
+ if (e <= 0x3FFF - 1) {
+ math.doNotOptimizeAway(y);
+ if (u >> 127 != 0) {
+ return -0.0;
+ } else {
+ return 1.0;
+ }
+ } else if (y < 0) {
+ return x + y + 1;
+ } else {
+ return x + y;
+ }
+}
+
+pub fn ceill(x: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __ceilh(x),
+ 32 => return ceilf(x),
+ 64 => return ceil(x),
+ 80 => return __ceilx(x),
+ 128 => return ceilq(x),
+ else => @compileError("unreachable"),
+ }
+}
+
+test "ceil32" {
+ try expect(ceilf(1.3) == 2.0);
+ try expect(ceilf(-1.3) == -1.0);
+ try expect(ceilf(0.2) == 1.0);
+}
+
+test "ceil64" {
+ try expect(ceil(1.3) == 2.0);
+ try expect(ceil(-1.3) == -1.0);
+ try expect(ceil(0.2) == 1.0);
+}
+
+test "ceil128" {
+ try expect(ceilq(1.3) == 2.0);
+ try expect(ceilq(-1.3) == -1.0);
+ try expect(ceilq(0.2) == 1.0);
+}
+
+test "ceil32.special" {
+ try expect(ceilf(0.0) == 0.0);
+ try expect(ceilf(-0.0) == -0.0);
+ try expect(math.isPositiveInf(ceilf(math.inf(f32))));
+ try expect(math.isNegativeInf(ceilf(-math.inf(f32))));
+ try expect(math.isNan(ceilf(math.nan(f32))));
+}
+
+test "ceil64.special" {
+ try expect(ceil(0.0) == 0.0);
+ try expect(ceil(-0.0) == -0.0);
+ try expect(math.isPositiveInf(ceil(math.inf(f64))));
+ try expect(math.isNegativeInf(ceil(-math.inf(f64))));
+ try expect(math.isNan(ceil(math.nan(f64))));
+}
+
+test "ceil128.special" {
+ try expect(ceilq(0.0) == 0.0);
+ try expect(ceilq(-0.0) == -0.0);
+ try expect(math.isPositiveInf(ceilq(math.inf(f128))));
+ try expect(math.isNegativeInf(ceilq(-math.inf(f128))));
+ try expect(math.isNan(ceilq(math.nan(f128))));
+}
diff --git a/lib/std/special/compiler_rt/clear_cache.zig b/lib/compiler_rt/clear_cache.zig
similarity index 96%
rename from lib/std/special/compiler_rt/clear_cache.zig
rename to lib/compiler_rt/clear_cache.zig
index d6ce02249e..b21606814c 100644
--- a/lib/std/special/compiler_rt/clear_cache.zig
+++ b/lib/compiler_rt/clear_cache.zig
@@ -2,6 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const os = builtin.os.tag;
+pub const panic = @import("common.zig").panic;
// Ported from llvm-project d32170dbd5b0d54436537b6b75beaf44324e0c28
@@ -10,7 +11,13 @@ const os = builtin.os.tag;
// It is expected to invalidate the instruction cache for the
// specified range.
-pub fn clear_cache(start: usize, end: usize) callconv(.C) void {
+comptime {
+ if (builtin.zig_backend != .stage2_llvm) {
+ _ = clear_cache;
+ }
+}
+
+fn clear_cache(start: usize, end: usize) callconv(.C) void {
const x86 = switch (arch) {
.i386, .x86_64 => true,
else => false,
@@ -36,7 +43,7 @@ pub fn clear_cache(start: usize, end: usize) callconv(.C) void {
else => false,
};
const sparc = switch (arch) {
- .sparc, .sparcv9, .sparcel => true,
+ .sparc, .sparc64, .sparcel => true,
else => false,
};
const apple = switch (os) {
diff --git a/lib/std/special/compiler_rt/clzdi2_test.zig b/lib/compiler_rt/clzdi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/clzdi2_test.zig
rename to lib/compiler_rt/clzdi2_test.zig
diff --git a/lib/std/special/compiler_rt/clzsi2_test.zig b/lib/compiler_rt/clzsi2_test.zig
similarity index 92%
rename from lib/std/special/compiler_rt/clzsi2_test.zig
rename to lib/compiler_rt/clzsi2_test.zig
index 7d07b3e9c1..ef64e24fe1 100644
--- a/lib/std/special/compiler_rt/clzsi2_test.zig
+++ b/lib/compiler_rt/clzsi2_test.zig
@@ -1,14 +1,26 @@
+const builtin = @import("builtin");
const clz = @import("count0bits.zig");
const testing = @import("std").testing;
fn test__clzsi2(a: u32, expected: i32) !void {
- // XXX At high optimization levels this test may be horribly miscompiled if
- // one of the naked implementations is selected.
- var nakedClzsi2 = clz.__clzsi2;
- var actualClzsi2 = @ptrCast(fn (a: i32) callconv(.C) i32, nakedClzsi2);
- var x = @bitCast(i32, a);
- var result = actualClzsi2(x);
- try testing.expectEqual(expected, result);
+ // stage1 and stage2 diverge on function pointer semantics
+ switch (builtin.zig_backend) {
+ .stage1 => {
+ // Use of `var` here is working around a stage1 bug.
+ var nakedClzsi2 = clz.__clzsi2;
+ var actualClzsi2 = @ptrCast(fn (a: i32) callconv(.C) i32, nakedClzsi2);
+ var x = @bitCast(i32, a);
+ var result = actualClzsi2(x);
+ try testing.expectEqual(expected, result);
+ },
+ else => {
+ const nakedClzsi2 = clz.__clzsi2;
+ const actualClzsi2 = @ptrCast(*const fn (a: i32) callconv(.C) i32, &nakedClzsi2);
+ const x = @bitCast(i32, a);
+ const result = actualClzsi2(x);
+ try testing.expectEqual(expected, result);
+ },
+ }
}
test "clzsi2" {
diff --git a/lib/std/special/compiler_rt/clzti2_test.zig b/lib/compiler_rt/clzti2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/clzti2_test.zig
rename to lib/compiler_rt/clzti2_test.zig
diff --git a/lib/std/special/compiler_rt/cmp.zig b/lib/compiler_rt/cmp.zig
similarity index 68%
rename from lib/std/special/compiler_rt/cmp.zig
rename to lib/compiler_rt/cmp.zig
index 9eb4227527..8ff2c38cd4 100644
--- a/lib/std/special/compiler_rt/cmp.zig
+++ b/lib/compiler_rt/cmp.zig
@@ -1,5 +1,18 @@
const std = @import("std");
const builtin = @import("builtin");
+const is_test = builtin.is_test;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__cmpsi2, .{ .name = "__cmpsi2", .linkage = common.linkage });
+ @export(__cmpdi2, .{ .name = "__cmpdi2", .linkage = common.linkage });
+ @export(__cmpti2, .{ .name = "__cmpti2", .linkage = common.linkage });
+ @export(__ucmpsi2, .{ .name = "__ucmpsi2", .linkage = common.linkage });
+ @export(__ucmpdi2, .{ .name = "__ucmpdi2", .linkage = common.linkage });
+ @export(__ucmpti2, .{ .name = "__ucmpti2", .linkage = common.linkage });
+}
// cmp - signed compare
// - cmpXi2_generic for unoptimized little and big endian
@@ -12,7 +25,6 @@ const builtin = @import("builtin");
// a > b => 2
inline fn XcmpXi2(comptime T: type, a: T, b: T) i32 {
- @setRuntimeSafety(builtin.is_test);
var cmp1: i32 = 0;
var cmp2: i32 = 0;
if (a > b)
diff --git a/lib/compiler_rt/cmpdf2.zig b/lib/compiler_rt/cmpdf2.zig
new file mode 100644
index 0000000000..67dbcd8b4d
--- /dev/null
+++ b/lib/compiler_rt/cmpdf2.zig
@@ -0,0 +1,68 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = common.linkage });
+ @export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = common.linkage });
+ @export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = common.linkage });
+ } else {
+ @export(__eqdf2, .{ .name = "__eqdf2", .linkage = common.linkage });
+ @export(__nedf2, .{ .name = "__nedf2", .linkage = common.linkage });
+ @export(__ledf2, .{ .name = "__ledf2", .linkage = common.linkage });
+ @export(__cmpdf2, .{ .name = "__cmpdf2", .linkage = common.linkage });
+ @export(__ltdf2, .{ .name = "__ltdf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
+/// if a is greater than b, they return 1; and if a and b are equal they return 0.
+/// If either argument is NaN they return 1..."
+///
+/// Note that this matches the definition of `__ledf2`, `__eqdf2`, `__nedf2`, `__cmpdf2`,
+/// and `__ltdf2`.
+fn __cmpdf2(a: f64, b: f64) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f64, comparef.LE, a, b));
+}
+
+/// "These functions return a value less than or equal to zero if neither argument is NaN,
+/// and a is less than or equal to b."
+pub fn __ledf2(a: f64, b: f64) callconv(.C) i32 {
+ return __cmpdf2(a, b);
+}
+
+/// "These functions return zero if neither argument is NaN, and a and b are equal."
+/// Note that due to some kind of historical accident, __eqdf2 and __nedf2 are defined
+/// to have the same return value.
+pub fn __eqdf2(a: f64, b: f64) callconv(.C) i32 {
+ return __cmpdf2(a, b);
+}
+
+/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
+/// Note that due to some kind of historical accident, __eqdf2 and __nedf2 are defined
+/// to have the same return value.
+pub fn __nedf2(a: f64, b: f64) callconv(.C) i32 {
+ return __cmpdf2(a, b);
+}
+
+/// "These functions return a value less than zero if neither argument is NaN, and a
+/// is strictly less than b."
+pub fn __ltdf2(a: f64, b: f64) callconv(.C) i32 {
+ return __cmpdf2(a, b);
+}
+
+fn __aeabi_dcmpeq(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) == .Equal);
+}
+
+fn __aeabi_dcmplt(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) == .Less);
+}
+
+fn __aeabi_dcmple(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) != .Greater);
+}
diff --git a/lib/std/special/compiler_rt/cmpdi2_test.zig b/lib/compiler_rt/cmpdi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/cmpdi2_test.zig
rename to lib/compiler_rt/cmpdi2_test.zig
diff --git a/lib/compiler_rt/cmpsf2.zig b/lib/compiler_rt/cmpsf2.zig
new file mode 100644
index 0000000000..1ac40ef6e2
--- /dev/null
+++ b/lib/compiler_rt/cmpsf2.zig
@@ -0,0 +1,68 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = common.linkage });
+ @export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = common.linkage });
+ @export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = common.linkage });
+ } else {
+ @export(__eqsf2, .{ .name = "__eqsf2", .linkage = common.linkage });
+ @export(__nesf2, .{ .name = "__nesf2", .linkage = common.linkage });
+ @export(__lesf2, .{ .name = "__lesf2", .linkage = common.linkage });
+ @export(__cmpsf2, .{ .name = "__cmpsf2", .linkage = common.linkage });
+ @export(__ltsf2, .{ .name = "__ltsf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
+/// if a is greater than b, they return 1; and if a and b are equal they return 0.
+/// If either argument is NaN they return 1..."
+///
+/// Note that this matches the definition of `__lesf2`, `__eqsf2`, `__nesf2`, `__cmpsf2`,
+/// and `__ltsf2`.
+fn __cmpsf2(a: f32, b: f32) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f32, comparef.LE, a, b));
+}
+
+/// "These functions return a value less than or equal to zero if neither argument is NaN,
+/// and a is less than or equal to b."
+pub fn __lesf2(a: f32, b: f32) callconv(.C) i32 {
+ return __cmpsf2(a, b);
+}
+
+/// "These functions return zero if neither argument is NaN, and a and b are equal."
+/// Note that due to some kind of historical accident, __eqsf2 and __nesf2 are defined
+/// to have the same return value.
+pub fn __eqsf2(a: f32, b: f32) callconv(.C) i32 {
+ return __cmpsf2(a, b);
+}
+
+/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
+/// Note that due to some kind of historical accident, __eqsf2 and __nesf2 are defined
+/// to have the same return value.
+pub fn __nesf2(a: f32, b: f32) callconv(.C) i32 {
+ return __cmpsf2(a, b);
+}
+
+/// "These functions return a value less than zero if neither argument is NaN, and a
+/// is strictly less than b."
+pub fn __ltsf2(a: f32, b: f32) callconv(.C) i32 {
+ return __cmpsf2(a, b);
+}
+
+fn __aeabi_fcmpeq(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) == .Equal);
+}
+
+fn __aeabi_fcmplt(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) == .Less);
+}
+
+fn __aeabi_fcmple(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) != .Greater);
+}
diff --git a/lib/std/special/compiler_rt/cmpsi2_test.zig b/lib/compiler_rt/cmpsi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/cmpsi2_test.zig
rename to lib/compiler_rt/cmpsi2_test.zig
diff --git a/lib/compiler_rt/cmptf2.zig b/lib/compiler_rt/cmptf2.zig
new file mode 100644
index 0000000000..00263f943a
--- /dev/null
+++ b/lib/compiler_rt/cmptf2.zig
@@ -0,0 +1,122 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__eqkf2, .{ .name = "__eqkf2", .linkage = common.linkage });
+ @export(__nekf2, .{ .name = "__nekf2", .linkage = common.linkage });
+ @export(__ltkf2, .{ .name = "__ltkf2", .linkage = common.linkage });
+ @export(__lekf2, .{ .name = "__lekf2", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_cmp, .{ .name = "_Qp_cmp", .linkage = common.linkage });
+ @export(_Qp_feq, .{ .name = "_Qp_feq", .linkage = common.linkage });
+ @export(_Qp_fne, .{ .name = "_Qp_fne", .linkage = common.linkage });
+ @export(_Qp_flt, .{ .name = "_Qp_flt", .linkage = common.linkage });
+ @export(_Qp_fle, .{ .name = "_Qp_fle", .linkage = common.linkage });
+ @export(_Qp_fgt, .{ .name = "_Qp_fgt", .linkage = common.linkage });
+ @export(_Qp_fge, .{ .name = "_Qp_fge", .linkage = common.linkage });
+ } else {
+ @export(__eqtf2, .{ .name = "__eqtf2", .linkage = common.linkage });
+ @export(__netf2, .{ .name = "__netf2", .linkage = common.linkage });
+ @export(__letf2, .{ .name = "__letf2", .linkage = common.linkage });
+ @export(__cmptf2, .{ .name = "__cmptf2", .linkage = common.linkage });
+ @export(__lttf2, .{ .name = "__lttf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
+/// if a is greater than b, they return 1; and if a and b are equal they return 0.
+/// If either argument is NaN they return 1..."
+///
+/// Note that this matches the definition of `__letf2`, `__eqtf2`, `__netf2`, `__cmptf2`,
+/// and `__lttf2`.
+fn __cmptf2(a: f128, b: f128) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f128, comparef.LE, a, b));
+}
+
+/// "These functions return a value less than or equal to zero if neither argument is NaN,
+/// and a is less than or equal to b."
+fn __letf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+/// "These functions return zero if neither argument is NaN, and a and b are equal."
+/// Note that due to some kind of historical accident, __eqtf2 and __netf2 are defined
+/// to have the same return value.
+fn __eqtf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
+/// Note that due to some kind of historical accident, __eqtf2 and __netf2 are defined
+/// to have the same return value.
+fn __netf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+/// "These functions return a value less than zero if neither argument is NaN, and a
+/// is strictly less than b."
+fn __lttf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+fn __eqkf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+fn __nekf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+fn __ltkf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+fn __lekf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+const SparcFCMP = enum(i32) {
+ Equal = 0,
+ Less = 1,
+ Greater = 2,
+ Unordered = 3,
+};
+
+fn _Qp_cmp(a: *const f128, b: *const f128) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f128, SparcFCMP, a.*, b.*));
+}
+
+fn _Qp_feq(a: *const f128, b: *const f128) callconv(.C) bool {
+ return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) == .Equal;
+}
+
+fn _Qp_fne(a: *const f128, b: *const f128) callconv(.C) bool {
+ return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) != .Equal;
+}
+
+fn _Qp_flt(a: *const f128, b: *const f128) callconv(.C) bool {
+ return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) == .Less;
+}
+
+fn _Qp_fgt(a: *const f128, b: *const f128) callconv(.C) bool {
+ return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) == .Greater;
+}
+
+fn _Qp_fge(a: *const f128, b: *const f128) callconv(.C) bool {
+ return switch (@intToEnum(SparcFCMP, _Qp_cmp(a, b))) {
+ .Equal, .Greater => true,
+ .Less, .Unordered => false,
+ };
+}
+
+fn _Qp_fle(a: *const f128, b: *const f128) callconv(.C) bool {
+ return switch (@intToEnum(SparcFCMP, _Qp_cmp(a, b))) {
+ .Equal, .Less => true,
+ .Greater, .Unordered => false,
+ };
+}
diff --git a/lib/std/special/compiler_rt/cmpti2_test.zig b/lib/compiler_rt/cmpti2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/cmpti2_test.zig
rename to lib/compiler_rt/cmpti2_test.zig
diff --git a/lib/compiler_rt/cmpxf2.zig b/lib/compiler_rt/cmpxf2.zig
new file mode 100644
index 0000000000..7286316f99
--- /dev/null
+++ b/lib/compiler_rt/cmpxf2.zig
@@ -0,0 +1,50 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__eqxf2, .{ .name = "__eqxf2", .linkage = common.linkage });
+ @export(__nexf2, .{ .name = "__nexf2", .linkage = common.linkage });
+ @export(__lexf2, .{ .name = "__lexf2", .linkage = common.linkage });
+ @export(__cmpxf2, .{ .name = "__cmpxf2", .linkage = common.linkage });
+ @export(__ltxf2, .{ .name = "__ltxf2", .linkage = common.linkage });
+}
+
+/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
+/// if a is greater than b, they return 1; and if a and b are equal they return 0.
+/// If either argument is NaN they return 1..."
+///
+/// Note that this matches the definition of `__lexf2`, `__eqxf2`, `__nexf2`, `__cmpxf2`,
+/// and `__ltxf2`.
+fn __cmpxf2(a: f80, b: f80) callconv(.C) i32 {
+ return @enumToInt(comparef.cmp_f80(comparef.LE, a, b));
+}
+
+/// "These functions return a value less than or equal to zero if neither argument is NaN,
+/// and a is less than or equal to b."
+fn __lexf2(a: f80, b: f80) callconv(.C) i32 {
+ return __cmpxf2(a, b);
+}
+
+/// "These functions return zero if neither argument is NaN, and a and b are equal."
+/// Note that due to some kind of historical accident, __eqxf2 and __nexf2 are defined
+/// to have the same return value.
+fn __eqxf2(a: f80, b: f80) callconv(.C) i32 {
+ return __cmpxf2(a, b);
+}
+
+/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
+/// Note that due to some kind of historical accident, __eqxf2 and __nexf2 are defined
+/// to have the same return value.
+fn __nexf2(a: f80, b: f80) callconv(.C) i32 {
+ return __cmpxf2(a, b);
+}
+
+/// "These functions return a value less than zero if neither argument is NaN, and a
+/// is strictly less than b."
+fn __ltxf2(a: f80, b: f80) callconv(.C) i32 {
+ return __cmpxf2(a, b);
+}
diff --git a/lib/compiler_rt/common.zig b/lib/compiler_rt/common.zig
new file mode 100644
index 0000000000..538b237e5e
--- /dev/null
+++ b/lib/compiler_rt/common.zig
@@ -0,0 +1,202 @@
+const std = @import("std");
+const builtin = @import("builtin");
+
+pub const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
+pub const want_aeabi = switch (builtin.abi) {
+ .eabi,
+ .eabihf,
+ .musleabi,
+ .musleabihf,
+ .gnueabi,
+ .gnueabihf,
+ => switch (builtin.cpu.arch) {
+ .arm, .armeb, .thumb, .thumbeb => true,
+ else => false,
+ },
+ else => false,
+};
+pub const want_ppc_abi = builtin.cpu.arch.isPPC() or builtin.cpu.arch.isPPC64();
+
+/// This governs whether to use these symbol names for f16/f32 conversions
+/// rather than the standard names:
+/// * __gnu_f2h_ieee
+/// * __gnu_h2f_ieee
+/// Known correct configurations:
+/// x86_64-freestanding-none => true
+/// x86_64-linux-none => true
+/// x86_64-linux-gnu => true
+/// x86_64-linux-musl => true
+/// x86_64-linux-eabi => true
+/// arm-linux-musleabihf => true
+/// arm-linux-gnueabihf => true
+/// arm-linux-eabihf => false
+/// wasm32-wasi-musl => false
+/// wasm32-freestanding-none => false
+/// x86_64-windows-gnu => true
+/// x86_64-windows-msvc => true
+/// any-macos-any => false
+pub const gnu_f16_abi = switch (builtin.cpu.arch) {
+ .wasm32, .wasm64 => false,
+
+ .arm, .armeb, .thumb, .thumbeb => switch (builtin.abi) {
+ .eabi, .eabihf => false,
+ else => true,
+ },
+
+ else => !builtin.os.tag.isDarwin(),
+};
+
+pub const want_sparc_abi = builtin.cpu.arch.isSPARC();
+
+// Avoid dragging in the runtime safety mechanisms into this .o file,
+// unless we're trying to test compiler-rt.
+pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn {
+ _ = error_return_trace;
+ if (builtin.is_test) {
+ @setCold(true);
+ std.debug.panic("{s}", .{msg});
+ } else {
+ unreachable;
+ }
+}
+
+/// AArch64 is the only ABI (at the moment) to support f16 arguments without the
+/// need for extending them to wider fp types.
+/// TODO remove this; do this type selection in the language rather than
+/// here in compiler-rt.
+pub const F16T = if (builtin.cpu.arch.isAARCH64()) f16 else u16;
+
+pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
+ switch (Z) {
+ u16 => {
+ // 16x16 --> 32 bit multiply
+ const product = @as(u32, a) * @as(u32, b);
+ hi.* = @intCast(u16, product >> 16);
+ lo.* = @truncate(u16, product);
+ },
+ u32 => {
+ // 32x32 --> 64 bit multiply
+ const product = @as(u64, a) * @as(u64, b);
+ hi.* = @truncate(u32, product >> 32);
+ lo.* = @truncate(u32, product);
+ },
+ u64 => {
+ const S = struct {
+ fn loWord(x: u64) u64 {
+ return @truncate(u32, x);
+ }
+ fn hiWord(x: u64) u64 {
+ return @truncate(u32, x >> 32);
+ }
+ };
+ // 64x64 -> 128 wide multiply for platforms that don't have such an operation;
+ // many 64-bit platforms have this operation, but they tend to have hardware
+ // floating-point, so we don't bother with a special case for them here.
+ // Each of the component 32x32 -> 64 products
+ const plolo: u64 = S.loWord(a) * S.loWord(b);
+ const plohi: u64 = S.loWord(a) * S.hiWord(b);
+ const philo: u64 = S.hiWord(a) * S.loWord(b);
+ const phihi: u64 = S.hiWord(a) * S.hiWord(b);
+ // Sum terms that contribute to lo in a way that allows us to get the carry
+ const r0: u64 = S.loWord(plolo);
+ const r1: u64 = S.hiWord(plolo) +% S.loWord(plohi) +% S.loWord(philo);
+ lo.* = r0 +% (r1 << 32);
+ // Sum terms contributing to hi with the carry from lo
+ hi.* = S.hiWord(plohi) +% S.hiWord(philo) +% S.hiWord(r1) +% phihi;
+ },
+ u128 => {
+ const Word_LoMask = @as(u64, 0x00000000ffffffff);
+ const Word_HiMask = @as(u64, 0xffffffff00000000);
+ const Word_FullMask = @as(u64, 0xffffffffffffffff);
+ const S = struct {
+ fn Word_1(x: u128) u64 {
+ return @truncate(u32, x >> 96);
+ }
+ fn Word_2(x: u128) u64 {
+ return @truncate(u32, x >> 64);
+ }
+ fn Word_3(x: u128) u64 {
+ return @truncate(u32, x >> 32);
+ }
+ fn Word_4(x: u128) u64 {
+ return @truncate(u32, x);
+ }
+ };
+ // 128x128 -> 256 wide multiply for platforms that don't have such an operation;
+ // many 64-bit platforms have this operation, but they tend to have hardware
+ // floating-point, so we don't bother with a special case for them here.
+
+ const product11: u64 = S.Word_1(a) * S.Word_1(b);
+ const product12: u64 = S.Word_1(a) * S.Word_2(b);
+ const product13: u64 = S.Word_1(a) * S.Word_3(b);
+ const product14: u64 = S.Word_1(a) * S.Word_4(b);
+ const product21: u64 = S.Word_2(a) * S.Word_1(b);
+ const product22: u64 = S.Word_2(a) * S.Word_2(b);
+ const product23: u64 = S.Word_2(a) * S.Word_3(b);
+ const product24: u64 = S.Word_2(a) * S.Word_4(b);
+ const product31: u64 = S.Word_3(a) * S.Word_1(b);
+ const product32: u64 = S.Word_3(a) * S.Word_2(b);
+ const product33: u64 = S.Word_3(a) * S.Word_3(b);
+ const product34: u64 = S.Word_3(a) * S.Word_4(b);
+ const product41: u64 = S.Word_4(a) * S.Word_1(b);
+ const product42: u64 = S.Word_4(a) * S.Word_2(b);
+ const product43: u64 = S.Word_4(a) * S.Word_3(b);
+ const product44: u64 = S.Word_4(a) * S.Word_4(b);
+
+ const sum0: u128 = @as(u128, product44);
+ const sum1: u128 = @as(u128, product34) +%
+ @as(u128, product43);
+ const sum2: u128 = @as(u128, product24) +%
+ @as(u128, product33) +%
+ @as(u128, product42);
+ const sum3: u128 = @as(u128, product14) +%
+ @as(u128, product23) +%
+ @as(u128, product32) +%
+ @as(u128, product41);
+ const sum4: u128 = @as(u128, product13) +%
+ @as(u128, product22) +%
+ @as(u128, product31);
+ const sum5: u128 = @as(u128, product12) +%
+ @as(u128, product21);
+ const sum6: u128 = @as(u128, product11);
+
+ const r0: u128 = (sum0 & Word_FullMask) +%
+ ((sum1 & Word_LoMask) << 32);
+ const r1: u128 = (sum0 >> 64) +%
+ ((sum1 >> 32) & Word_FullMask) +%
+ (sum2 & Word_FullMask) +%
+ ((sum3 << 32) & Word_HiMask);
+
+ lo.* = r0 +% (r1 << 64);
+ hi.* = (r1 >> 64) +%
+ (sum1 >> 96) +%
+ (sum2 >> 64) +%
+ (sum3 >> 32) +%
+ sum4 +%
+ (sum5 << 32) +%
+ (sum6 << 64);
+ },
+ else => @compileError("unsupported"),
+ }
+}
+
+pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
+ const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
+ const integerBit = @as(Z, 1) << std.math.floatFractionalBits(T);
+
+ const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
+ significand.* <<= @intCast(std.math.Log2Int(Z), shift);
+ return @as(i32, 1) - shift;
+}
+
+pub inline fn fneg(a: anytype) @TypeOf(a) {
+ const F = @TypeOf(a);
+ const bits = @typeInfo(F).Float.bits;
+ const U = @Type(.{ .Int = .{
+ .signedness = .unsigned,
+ .bits = bits,
+ } });
+ const sign_bit_mask = @as(U, 1) << (bits - 1);
+ const negated = @bitCast(U, a) ^ sign_bit_mask;
+ return @bitCast(F, negated);
+}
diff --git a/lib/std/special/compiler_rt/comparedf2_test.zig b/lib/compiler_rt/comparedf2_test.zig
similarity index 74%
rename from lib/std/special/compiler_rt/comparedf2_test.zig
rename to lib/compiler_rt/comparedf2_test.zig
index a80297ffbf..a77718e57c 100644
--- a/lib/std/special/compiler_rt/comparedf2_test.zig
+++ b/lib/compiler_rt/comparedf2_test.zig
@@ -6,7 +6,15 @@ const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
-const comparedf2 = @import("compareXf2.zig");
+const __eqdf2 = @import("./cmpdf2.zig").__eqdf2;
+const __ledf2 = @import("./cmpdf2.zig").__ledf2;
+const __ltdf2 = @import("./cmpdf2.zig").__ltdf2;
+const __nedf2 = @import("./cmpdf2.zig").__nedf2;
+
+const __gedf2 = @import("./gedf2.zig").__gedf2;
+const __gtdf2 = @import("./gedf2.zig").__gtdf2;
+
+const __unorddf2 = @import("./unorddf2.zig").__unorddf2;
const TestVector = struct {
a: f64,
@@ -21,25 +29,25 @@ const TestVector = struct {
};
fn test__cmpdf2(vector: TestVector) bool {
- if (comparedf2.__eqdf2(vector.a, vector.b) != vector.eqReference) {
+ if (__eqdf2(vector.a, vector.b) != vector.eqReference) {
return false;
}
- if (comparedf2.__gedf2(vector.a, vector.b) != vector.geReference) {
+ if (__gedf2(vector.a, vector.b) != vector.geReference) {
return false;
}
- if (comparedf2.__gtdf2(vector.a, vector.b) != vector.gtReference) {
+ if (__gtdf2(vector.a, vector.b) != vector.gtReference) {
return false;
}
- if (comparedf2.__ledf2(vector.a, vector.b) != vector.leReference) {
+ if (__ledf2(vector.a, vector.b) != vector.leReference) {
return false;
}
- if (comparedf2.__ltdf2(vector.a, vector.b) != vector.ltReference) {
+ if (__ltdf2(vector.a, vector.b) != vector.ltReference) {
return false;
}
- if (comparedf2.__nedf2(vector.a, vector.b) != vector.neReference) {
+ if (__nedf2(vector.a, vector.b) != vector.neReference) {
return false;
}
- if (comparedf2.__unorddf2(vector.a, vector.b) != vector.unReference) {
+ if (__unorddf2(vector.a, vector.b) != vector.unReference) {
return false;
}
return true;
diff --git a/lib/compiler_rt/comparef.zig b/lib/compiler_rt/comparef.zig
new file mode 100644
index 0000000000..1fb6d2dfa0
--- /dev/null
+++ b/lib/compiler_rt/comparef.zig
@@ -0,0 +1,118 @@
+const std = @import("std");
+
+pub const LE = enum(i32) {
+ Less = -1,
+ Equal = 0,
+ Greater = 1,
+
+ const Unordered: LE = .Greater;
+};
+
+pub const GE = enum(i32) {
+ Less = -1,
+ Equal = 0,
+ Greater = 1,
+
+ const Unordered: GE = .Less;
+};
+
+pub inline fn cmpf2(comptime T: type, comptime RT: type, a: T, b: T) RT {
+ const bits = @typeInfo(T).Float.bits;
+ const srep_t = std.meta.Int(.signed, bits);
+ const rep_t = std.meta.Int(.unsigned, bits);
+
+ const significandBits = std.math.floatMantissaBits(T);
+ const exponentBits = std.math.floatExponentBits(T);
+ const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
+ const absMask = signBit - 1;
+ const infT = comptime std.math.inf(T);
+ const infRep = @bitCast(rep_t, infT);
+
+ const aInt = @bitCast(srep_t, a);
+ const bInt = @bitCast(srep_t, b);
+ const aAbs = @bitCast(rep_t, aInt) & absMask;
+ const bAbs = @bitCast(rep_t, bInt) & absMask;
+
+ // If either a or b is NaN, they are unordered.
+ if (aAbs > infRep or bAbs > infRep) return RT.Unordered;
+
+ // If a and b are both zeros, they are equal.
+ if ((aAbs | bAbs) == 0) return .Equal;
+
+ // If at least one of a and b is positive, we get the same result comparing
+ // a and b as signed integers as we would with a floating-point compare.
+ if ((aInt & bInt) >= 0) {
+ if (aInt < bInt) {
+ return .Less;
+ } else if (aInt == bInt) {
+ return .Equal;
+ } else return .Greater;
+ } else {
+ // Otherwise, both are negative, so we need to flip the sense of the
+ // comparison to get the correct result. (This assumes a twos- or ones-
+ // complement integer representation; if integers are represented in a
+ // sign-magnitude representation, then this flip is incorrect).
+ if (aInt > bInt) {
+ return .Less;
+ } else if (aInt == bInt) {
+ return .Equal;
+ } else return .Greater;
+ }
+}
+
+pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT {
+ const a_rep = std.math.break_f80(a);
+ const b_rep = std.math.break_f80(b);
+ const sig_bits = std.math.floatMantissaBits(f80);
+ const int_bit = 0x8000000000000000;
+ const sign_bit = 0x8000;
+ const special_exp = 0x7FFF;
+
+ // If either a or b is NaN, they are unordered.
+ if ((a_rep.exp & special_exp == special_exp and a_rep.fraction ^ int_bit != 0) or
+ (b_rep.exp & special_exp == special_exp and b_rep.fraction ^ int_bit != 0))
+ return RT.Unordered;
+
+ // If a and b are both zeros, they are equal.
+ if ((a_rep.fraction | b_rep.fraction) | ((a_rep.exp | b_rep.exp) & special_exp) == 0)
+ return .Equal;
+
+ if (@boolToInt(a_rep.exp == b_rep.exp) & @boolToInt(a_rep.fraction == b_rep.fraction) != 0) {
+ return .Equal;
+ } else if (a_rep.exp & sign_bit != b_rep.exp & sign_bit) {
+ // signs are different
+ if (@bitCast(i16, a_rep.exp) < @bitCast(i16, b_rep.exp)) {
+ return .Less;
+ } else {
+ return .Greater;
+ }
+ } else {
+ const a_fraction = a_rep.fraction | (@as(u80, a_rep.exp) << sig_bits);
+ const b_fraction = b_rep.fraction | (@as(u80, b_rep.exp) << sig_bits);
+ if (a_fraction < b_fraction) {
+ return .Less;
+ } else {
+ return .Greater;
+ }
+ }
+}
+
+pub inline fn unordcmp(comptime T: type, a: T, b: T) i32 {
+ const rep_t = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
+
+ const significandBits = std.math.floatMantissaBits(T);
+ const exponentBits = std.math.floatExponentBits(T);
+ const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
+ const absMask = signBit - 1;
+ const infRep = @bitCast(rep_t, std.math.inf(T));
+
+ const aAbs: rep_t = @bitCast(rep_t, a) & absMask;
+ const bAbs: rep_t = @bitCast(rep_t, b) & absMask;
+
+ return @boolToInt(aAbs > infRep or bAbs > infRep);
+}
+
+test {
+ _ = @import("comparesf2_test.zig");
+ _ = @import("comparedf2_test.zig");
+}
diff --git a/lib/std/special/compiler_rt/comparesf2_test.zig b/lib/compiler_rt/comparesf2_test.zig
similarity index 73%
rename from lib/std/special/compiler_rt/comparesf2_test.zig
rename to lib/compiler_rt/comparesf2_test.zig
index 8bc2c67956..b2fafd38dd 100644
--- a/lib/std/special/compiler_rt/comparesf2_test.zig
+++ b/lib/compiler_rt/comparesf2_test.zig
@@ -6,7 +6,15 @@ const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
-const comparesf2 = @import("compareXf2.zig");
+const __eqsf2 = @import("./cmpsf2.zig").__eqsf2;
+const __lesf2 = @import("./cmpsf2.zig").__lesf2;
+const __ltsf2 = @import("./cmpsf2.zig").__ltsf2;
+const __nesf2 = @import("./cmpsf2.zig").__nesf2;
+
+const __gesf2 = @import("./gesf2.zig").__gesf2;
+const __gtsf2 = @import("./gesf2.zig").__gtsf2;
+
+const __unordsf2 = @import("./unordsf2.zig").__unordsf2;
const TestVector = struct {
a: f32,
@@ -21,25 +29,25 @@ const TestVector = struct {
};
fn test__cmpsf2(vector: TestVector) bool {
- if (comparesf2.__eqsf2(vector.a, vector.b) != vector.eqReference) {
+ if (__eqsf2(vector.a, vector.b) != vector.eqReference) {
return false;
}
- if (comparesf2.__gesf2(vector.a, vector.b) != vector.geReference) {
+ if (__gesf2(vector.a, vector.b) != vector.geReference) {
return false;
}
- if (comparesf2.__gtsf2(vector.a, vector.b) != vector.gtReference) {
+ if (__gtsf2(vector.a, vector.b) != vector.gtReference) {
return false;
}
- if (comparesf2.__lesf2(vector.a, vector.b) != vector.leReference) {
+ if (__lesf2(vector.a, vector.b) != vector.leReference) {
return false;
}
- if (comparesf2.__ltsf2(vector.a, vector.b) != vector.ltReference) {
+ if (__ltsf2(vector.a, vector.b) != vector.ltReference) {
return false;
}
- if (comparesf2.__nesf2(vector.a, vector.b) != vector.neReference) {
+ if (__nesf2(vector.a, vector.b) != vector.neReference) {
return false;
}
- if (comparesf2.__unordsf2(vector.a, vector.b) != vector.unReference) {
+ if (__unordsf2(vector.a, vector.b) != vector.unReference) {
return false;
}
return true;
diff --git a/lib/compiler_rt/cos.zig b/lib/compiler_rt/cos.zig
new file mode 100644
index 0000000000..311d927168
--- /dev/null
+++ b/lib/compiler_rt/cos.zig
@@ -0,0 +1,170 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const math = std.math;
+const expect = std.testing.expect;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+const trig = @import("trig.zig");
+const rem_pio2 = @import("rem_pio2.zig").rem_pio2;
+const rem_pio2f = @import("rem_pio2f.zig").rem_pio2f;
+
+comptime {
+ @export(__cosh, .{ .name = "__cosh", .linkage = common.linkage });
+ @export(cosf, .{ .name = "cosf", .linkage = common.linkage });
+ @export(cos, .{ .name = "cos", .linkage = common.linkage });
+ @export(__cosx, .{ .name = "__cosx", .linkage = common.linkage });
+ const cosq_sym_name = if (common.want_ppc_abi) "cosf128" else "cosq";
+ @export(cosq, .{ .name = cosq_sym_name, .linkage = common.linkage });
+ @export(cosl, .{ .name = "cosl", .linkage = common.linkage });
+}
+
+pub fn __cosh(a: f16) callconv(.C) f16 {
+ // TODO: more efficient implementation
+ return @floatCast(f16, cosf(a));
+}
+
+pub fn cosf(x: f32) callconv(.C) f32 {
+ // Small multiples of pi/2 rounded to double precision.
+ const c1pio2: f64 = 1.0 * math.pi / 2.0; // 0x3FF921FB, 0x54442D18
+ const c2pio2: f64 = 2.0 * math.pi / 2.0; // 0x400921FB, 0x54442D18
+ const c3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2
+ const c4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18
+
+ var ix = @bitCast(u32, x);
+ const sign = ix >> 31 != 0;
+ ix &= 0x7fffffff;
+
+ if (ix <= 0x3f490fda) { // |x| ~<= pi/4
+ if (ix < 0x39800000) { // |x| < 2**-12
+ // raise inexact if x != 0
+ math.doNotOptimizeAway(x + 0x1p120);
+ return 1.0;
+ }
+ return trig.__cosdf(x);
+ }
+ if (ix <= 0x407b53d1) { // |x| ~<= 5*pi/4
+ if (ix > 0x4016cbe3) { // |x| ~> 3*pi/4
+ return -trig.__cosdf(if (sign) x + c2pio2 else x - c2pio2);
+ } else {
+ if (sign) {
+ return trig.__sindf(x + c1pio2);
+ } else {
+ return trig.__sindf(c1pio2 - x);
+ }
+ }
+ }
+ if (ix <= 0x40e231d5) { // |x| ~<= 9*pi/4
+ if (ix > 0x40afeddf) { // |x| ~> 7*pi/4
+ return trig.__cosdf(if (sign) x + c4pio2 else x - c4pio2);
+ } else {
+ if (sign) {
+ return trig.__sindf(-x - c3pio2);
+ } else {
+ return trig.__sindf(x - c3pio2);
+ }
+ }
+ }
+
+ // cos(Inf or NaN) is NaN
+ if (ix >= 0x7f800000) {
+ return x - x;
+ }
+
+ var y: f64 = undefined;
+ const n = rem_pio2f(x, &y);
+ return switch (n & 3) {
+ 0 => trig.__cosdf(y),
+ 1 => trig.__sindf(-y),
+ 2 => -trig.__cosdf(y),
+ else => trig.__sindf(y),
+ };
+}
+
+pub fn cos(x: f64) callconv(.C) f64 {
+ var ix = @bitCast(u64, x) >> 32;
+ ix &= 0x7fffffff;
+
+ // |x| ~< pi/4
+ if (ix <= 0x3fe921fb) {
+ if (ix < 0x3e46a09e) { // |x| < 2**-27 * sqrt(2)
+ // raise inexact if x!=0
+ math.doNotOptimizeAway(x + 0x1p120);
+ return 1.0;
+ }
+ return trig.__cos(x, 0);
+ }
+
+ // cos(Inf or NaN) is NaN
+ if (ix >= 0x7ff00000) {
+ return x - x;
+ }
+
+ var y: [2]f64 = undefined;
+ const n = rem_pio2(x, &y);
+ return switch (n & 3) {
+ 0 => trig.__cos(y[0], y[1]),
+ 1 => -trig.__sin(y[0], y[1], 1),
+ 2 => -trig.__cos(y[0], y[1]),
+ else => trig.__sin(y[0], y[1], 1),
+ };
+}
+
+pub fn __cosx(a: f80) callconv(.C) f80 {
+ // TODO: more efficient implementation
+ return @floatCast(f80, cosq(a));
+}
+
+pub fn cosq(a: f128) callconv(.C) f128 {
+ // TODO: more correct implementation
+ return cos(@floatCast(f64, a));
+}
+
+pub fn cosl(x: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __cosh(x),
+ 32 => return cosf(x),
+ 64 => return cos(x),
+ 80 => return __cosx(x),
+ 128 => return cosq(x),
+ else => @compileError("unreachable"),
+ }
+}
+
+test "cos32" {
+ const epsilon = 0.00001;
+
+ try expect(math.approxEqAbs(f32, cosf(0.0), 1.0, epsilon));
+ try expect(math.approxEqAbs(f32, cosf(0.2), 0.980067, epsilon));
+ try expect(math.approxEqAbs(f32, cosf(0.8923), 0.627623, epsilon));
+ try expect(math.approxEqAbs(f32, cosf(1.5), 0.070737, epsilon));
+ try expect(math.approxEqAbs(f32, cosf(-1.5), 0.070737, epsilon));
+ try expect(math.approxEqAbs(f32, cosf(37.45), 0.969132, epsilon));
+ try expect(math.approxEqAbs(f32, cosf(89.123), 0.400798, epsilon));
+}
+
+test "cos64" {
+ const epsilon = 0.000001;
+
+ try expect(math.approxEqAbs(f64, cos(0.0), 1.0, epsilon));
+ try expect(math.approxEqAbs(f64, cos(0.2), 0.980067, epsilon));
+ try expect(math.approxEqAbs(f64, cos(0.8923), 0.627623, epsilon));
+ try expect(math.approxEqAbs(f64, cos(1.5), 0.070737, epsilon));
+ try expect(math.approxEqAbs(f64, cos(-1.5), 0.070737, epsilon));
+ try expect(math.approxEqAbs(f64, cos(37.45), 0.969132, epsilon));
+ try expect(math.approxEqAbs(f64, cos(89.123), 0.40080, epsilon));
+}
+
+test "cos32.special" {
+ try expect(math.isNan(cosf(math.inf(f32))));
+ try expect(math.isNan(cosf(-math.inf(f32))));
+ try expect(math.isNan(cosf(math.nan(f32))));
+}
+
+test "cos64.special" {
+ try expect(math.isNan(cos(math.inf(f64))));
+ try expect(math.isNan(cos(-math.inf(f64))));
+ try expect(math.isNan(cos(math.nan(f64))));
+}
diff --git a/lib/std/special/compiler_rt/count0bits.zig b/lib/compiler_rt/count0bits.zig
similarity index 89%
rename from lib/std/special/compiler_rt/count0bits.zig
rename to lib/compiler_rt/count0bits.zig
index 1f6d28ae0b..d763e5c8a3 100644
--- a/lib/std/special/compiler_rt/count0bits.zig
+++ b/lib/compiler_rt/count0bits.zig
@@ -1,5 +1,21 @@
const std = @import("std");
const builtin = @import("builtin");
+const is_test = builtin.is_test;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__clzsi2, .{ .name = "__clzsi2", .linkage = common.linkage });
+ @export(__clzdi2, .{ .name = "__clzdi2", .linkage = common.linkage });
+ @export(__clzti2, .{ .name = "__clzti2", .linkage = common.linkage });
+ @export(__ctzsi2, .{ .name = "__ctzsi2", .linkage = common.linkage });
+ @export(__ctzdi2, .{ .name = "__ctzdi2", .linkage = common.linkage });
+ @export(__ctzti2, .{ .name = "__ctzti2", .linkage = common.linkage });
+ @export(__ffssi2, .{ .name = "__ffssi2", .linkage = common.linkage });
+ @export(__ffsdi2, .{ .name = "__ffsdi2", .linkage = common.linkage });
+ @export(__ffsti2, .{ .name = "__ffsti2", .linkage = common.linkage });
+}
// clz - count leading zeroes
// - clzXi2 for unoptimized little and big endian
@@ -15,8 +31,6 @@ const builtin = @import("builtin");
// - ffsXi2 for unoptimized little and big endian
inline fn clzXi2(comptime T: type, a: T) i32 {
- @setRuntimeSafety(builtin.is_test);
-
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
@@ -154,8 +168,6 @@ pub fn __clzti2(a: i128) callconv(.C) i32 {
}
inline fn ctzXi2(comptime T: type, a: T) i32 {
- @setRuntimeSafety(builtin.is_test);
-
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
@@ -191,8 +203,6 @@ pub fn __ctzti2(a: i128) callconv(.C) i32 {
}
inline fn ffsXi2(comptime T: type, a: T) i32 {
- @setRuntimeSafety(builtin.is_test);
-
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
diff --git a/lib/std/special/compiler_rt/ctzdi2_test.zig b/lib/compiler_rt/ctzdi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/ctzdi2_test.zig
rename to lib/compiler_rt/ctzdi2_test.zig
diff --git a/lib/std/special/compiler_rt/ctzsi2_test.zig b/lib/compiler_rt/ctzsi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/ctzsi2_test.zig
rename to lib/compiler_rt/ctzsi2_test.zig
diff --git a/lib/std/special/compiler_rt/ctzti2_test.zig b/lib/compiler_rt/ctzti2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/ctzti2_test.zig
rename to lib/compiler_rt/ctzti2_test.zig
diff --git a/lib/std/special/compiler_rt/divdf3.zig b/lib/compiler_rt/divdf3.zig
similarity index 60%
rename from lib/std/special/compiler_rt/divdf3.zig
rename to lib/compiler_rt/divdf3.zig
index 2148902de2..dd22f4836c 100644
--- a/lib/std/special/compiler_rt/divdf3.zig
+++ b/lib/compiler_rt/divdf3.zig
@@ -1,12 +1,35 @@
-// Ported from:
-//
-// https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/divdf3.c
+//! Ported from:
+//!
+//! https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/divdf3.c
const std = @import("std");
const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const is_test = builtin.is_test;
+const common = @import("common.zig");
+
+const normalize = common.normalize;
+const wideMultiply = common.wideMultiply;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ddiv, .{ .name = "__aeabi_ddiv", .linkage = common.linkage });
+ } else {
+ @export(__divdf3, .{ .name = "__divdf3", .linkage = common.linkage });
+ }
+}
pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
- @setRuntimeSafety(builtin.is_test);
+ return div(a, b);
+}
+
+fn __aeabi_ddiv(a: f64, b: f64) callconv(.AAPCS) f64 {
+ return div(a, b);
+}
+
+inline fn div(a: f64, b: f64) f64 {
const Z = std.meta.Int(.unsigned, 64);
const SignedZ = std.meta.Int(.signed, 64);
@@ -202,131 +225,6 @@ pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
}
}
-pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
- @setRuntimeSafety(builtin.is_test);
- switch (Z) {
- u32 => {
- // 32x32 --> 64 bit multiply
- const product = @as(u64, a) * @as(u64, b);
- hi.* = @truncate(u32, product >> 32);
- lo.* = @truncate(u32, product);
- },
- u64 => {
- const S = struct {
- fn loWord(x: u64) u64 {
- return @truncate(u32, x);
- }
- fn hiWord(x: u64) u64 {
- return @truncate(u32, x >> 32);
- }
- };
- // 64x64 -> 128 wide multiply for platforms that don't have such an operation;
- // many 64-bit platforms have this operation, but they tend to have hardware
- // floating-point, so we don't bother with a special case for them here.
- // Each of the component 32x32 -> 64 products
- const plolo: u64 = S.loWord(a) * S.loWord(b);
- const plohi: u64 = S.loWord(a) * S.hiWord(b);
- const philo: u64 = S.hiWord(a) * S.loWord(b);
- const phihi: u64 = S.hiWord(a) * S.hiWord(b);
- // Sum terms that contribute to lo in a way that allows us to get the carry
- const r0: u64 = S.loWord(plolo);
- const r1: u64 = S.hiWord(plolo) +% S.loWord(plohi) +% S.loWord(philo);
- lo.* = r0 +% (r1 << 32);
- // Sum terms contributing to hi with the carry from lo
- hi.* = S.hiWord(plohi) +% S.hiWord(philo) +% S.hiWord(r1) +% phihi;
- },
- u128 => {
- const Word_LoMask = @as(u64, 0x00000000ffffffff);
- const Word_HiMask = @as(u64, 0xffffffff00000000);
- const Word_FullMask = @as(u64, 0xffffffffffffffff);
- const S = struct {
- fn Word_1(x: u128) u64 {
- return @truncate(u32, x >> 96);
- }
- fn Word_2(x: u128) u64 {
- return @truncate(u32, x >> 64);
- }
- fn Word_3(x: u128) u64 {
- return @truncate(u32, x >> 32);
- }
- fn Word_4(x: u128) u64 {
- return @truncate(u32, x);
- }
- };
- // 128x128 -> 256 wide multiply for platforms that don't have such an operation;
- // many 64-bit platforms have this operation, but they tend to have hardware
- // floating-point, so we don't bother with a special case for them here.
-
- const product11: u64 = S.Word_1(a) * S.Word_1(b);
- const product12: u64 = S.Word_1(a) * S.Word_2(b);
- const product13: u64 = S.Word_1(a) * S.Word_3(b);
- const product14: u64 = S.Word_1(a) * S.Word_4(b);
- const product21: u64 = S.Word_2(a) * S.Word_1(b);
- const product22: u64 = S.Word_2(a) * S.Word_2(b);
- const product23: u64 = S.Word_2(a) * S.Word_3(b);
- const product24: u64 = S.Word_2(a) * S.Word_4(b);
- const product31: u64 = S.Word_3(a) * S.Word_1(b);
- const product32: u64 = S.Word_3(a) * S.Word_2(b);
- const product33: u64 = S.Word_3(a) * S.Word_3(b);
- const product34: u64 = S.Word_3(a) * S.Word_4(b);
- const product41: u64 = S.Word_4(a) * S.Word_1(b);
- const product42: u64 = S.Word_4(a) * S.Word_2(b);
- const product43: u64 = S.Word_4(a) * S.Word_3(b);
- const product44: u64 = S.Word_4(a) * S.Word_4(b);
-
- const sum0: u128 = @as(u128, product44);
- const sum1: u128 = @as(u128, product34) +%
- @as(u128, product43);
- const sum2: u128 = @as(u128, product24) +%
- @as(u128, product33) +%
- @as(u128, product42);
- const sum3: u128 = @as(u128, product14) +%
- @as(u128, product23) +%
- @as(u128, product32) +%
- @as(u128, product41);
- const sum4: u128 = @as(u128, product13) +%
- @as(u128, product22) +%
- @as(u128, product31);
- const sum5: u128 = @as(u128, product12) +%
- @as(u128, product21);
- const sum6: u128 = @as(u128, product11);
-
- const r0: u128 = (sum0 & Word_FullMask) +%
- ((sum1 & Word_LoMask) << 32);
- const r1: u128 = (sum0 >> 64) +%
- ((sum1 >> 32) & Word_FullMask) +%
- (sum2 & Word_FullMask) +%
- ((sum3 << 32) & Word_HiMask);
-
- lo.* = r0 +% (r1 << 64);
- hi.* = (r1 >> 64) +%
- (sum1 >> 96) +%
- (sum2 >> 64) +%
- (sum3 >> 32) +%
- sum4 +%
- (sum5 << 32) +%
- (sum6 << 64);
- },
- else => @compileError("unsupported"),
- }
-}
-
-pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
- @setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
- const significandBits = std.math.floatMantissaBits(T);
- const implicitBit = @as(Z, 1) << significandBits;
-
- const shift = @clz(Z, significand.*) - @clz(Z, implicitBit);
- significand.* <<= @intCast(std.math.Log2Int(Z), shift);
- return 1 - shift;
-}
-
-pub fn __aeabi_ddiv(a: f64, b: f64) callconv(.AAPCS) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __divdf3, .{ a, b });
-}
-
test {
_ = @import("divdf3_test.zig");
}
diff --git a/lib/std/special/compiler_rt/divdf3_test.zig b/lib/compiler_rt/divdf3_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/divdf3_test.zig
rename to lib/compiler_rt/divdf3_test.zig
diff --git a/lib/std/special/compiler_rt/divsf3.zig b/lib/compiler_rt/divsf3.zig
similarity index 90%
rename from lib/std/special/compiler_rt/divsf3.zig
rename to lib/compiler_rt/divsf3.zig
index 5e7dc7bb44..13565f9b64 100644
--- a/lib/std/special/compiler_rt/divsf3.zig
+++ b/lib/compiler_rt/divsf3.zig
@@ -1,12 +1,33 @@
-// Ported from:
-//
-// https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/divsf3.c
+//! Ported from:
+//!
+//! https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/divsf3.c
const std = @import("std");
const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+
+const common = @import("common.zig");
+const normalize = common.normalize;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fdiv, .{ .name = "__aeabi_fdiv", .linkage = common.linkage });
+ } else {
+ @export(__divsf3, .{ .name = "__divsf3", .linkage = common.linkage });
+ }
+}
pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
- @setRuntimeSafety(builtin.is_test);
+ return div(a, b);
+}
+
+fn __aeabi_fdiv(a: f32, b: f32) callconv(.AAPCS) f32 {
+ return div(a, b);
+}
+
+inline fn div(a: f32, b: f32) f32 {
const Z = std.meta.Int(.unsigned, 32);
const significandBits = std.math.floatMantissaBits(f32);
@@ -184,22 +205,6 @@ pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
}
}
-fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
- @setRuntimeSafety(builtin.is_test);
- const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
- const significandBits = std.math.floatMantissaBits(T);
- const implicitBit = @as(Z, 1) << significandBits;
-
- const shift = @clz(Z, significand.*) - @clz(Z, implicitBit);
- significand.* <<= @intCast(std.math.Log2Int(Z), shift);
- return 1 - shift;
-}
-
-pub fn __aeabi_fdiv(a: f32, b: f32) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __divsf3, .{ a, b });
-}
-
test {
_ = @import("divsf3_test.zig");
}
diff --git a/lib/std/special/compiler_rt/divsf3_test.zig b/lib/compiler_rt/divsf3_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/divsf3_test.zig
rename to lib/compiler_rt/divsf3_test.zig
diff --git a/lib/std/special/compiler_rt/divtf3.zig b/lib/compiler_rt/divtf3.zig
similarity index 92%
rename from lib/std/special/compiler_rt/divtf3.zig
rename to lib/compiler_rt/divtf3.zig
index fc26c60266..b6cabeab91 100644
--- a/lib/std/special/compiler_rt/divtf3.zig
+++ b/lib/compiler_rt/divtf3.zig
@@ -1,11 +1,35 @@
const std = @import("std");
const builtin = @import("builtin");
-const normalize = @import("divdf3.zig").normalize;
-const wideMultiply = @import("divdf3.zig").wideMultiply;
+const common = @import("common.zig");
+const normalize = common.normalize;
+const wideMultiply = common.wideMultiply;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__divkf3, .{ .name = "__divkf3", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_div, .{ .name = "_Qp_div", .linkage = common.linkage });
+ } else {
+ @export(__divtf3, .{ .name = "__divtf3", .linkage = common.linkage });
+ }
+}
pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 {
- @setRuntimeSafety(builtin.is_test);
+ return div(a, b);
+}
+
+fn __divkf3(a: f128, b: f128) callconv(.C) f128 {
+ return div(a, b);
+}
+
+fn _Qp_div(c: *f128, a: *const f128, b: *const f128) callconv(.C) void {
+ c.* = div(a.*, b.*);
+}
+
+inline fn div(a: f128, b: f128) f128 {
const Z = std.meta.Int(.unsigned, 128);
const significandBits = std.math.floatMantissaBits(f128);
diff --git a/lib/std/special/compiler_rt/divtf3_test.zig b/lib/compiler_rt/divtf3_test.zig
similarity index 85%
rename from lib/std/special/compiler_rt/divtf3_test.zig
rename to lib/compiler_rt/divtf3_test.zig
index f426f827e8..62204057d4 100644
--- a/lib/std/special/compiler_rt/divtf3_test.zig
+++ b/lib/compiler_rt/divtf3_test.zig
@@ -34,8 +34,12 @@ test "divtf3" {
try test__divtf3(math.qnan_f128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0);
// NaN / any = NaN
try test__divtf3(math.nan_f128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0);
- // inf / any = inf
- try test__divtf3(math.inf_f128, 0x1.23456789abcdefp+5, 0x7fff000000000000, 0);
+ // inf / any(except inf and nan) = inf
+ try test__divtf3(math.inf(f128), 0x1.23456789abcdefp+5, 0x7fff000000000000, 0);
+ // inf / inf = nan
+ try test__divtf3(math.inf(f128), math.inf(f128), 0x7fff800000000000, 0);
+ // inf / nan = nan
+ try test__divtf3(math.inf(f128), math.nan(f128), 0x7fff800000000000, 0);
try test__divtf3(0x1.a23b45362464523375893ab4cdefp+5, 0x1.eedcbaba3a94546558237654321fp-1, 0x4004b0b72924d407, 0x0717e84356c6eba2);
try test__divtf3(0x1.a2b34c56d745382f9abf2c3dfeffp-50, 0x1.ed2c3ba15935332532287654321fp-9, 0x3fd5b2af3f828c9b, 0x40e51f64cde8b1f2);
diff --git a/lib/compiler_rt/divti3.zig b/lib/compiler_rt/divti3.zig
new file mode 100644
index 0000000000..b99a9081a4
--- /dev/null
+++ b/lib/compiler_rt/divti3.zig
@@ -0,0 +1,54 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const udivmod = @import("udivmod.zig").udivmod;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (builtin.os.tag == .windows) {
+ switch (arch) {
+ .i386 => {
+ @export(__divti3, .{ .name = "__divti3", .linkage = common.linkage });
+ },
+ .x86_64 => {
+ // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
+ // that LLVM expects compiler-rt to have.
+ @export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = common.linkage });
+ },
+ else => {},
+ }
+ if (arch.isAARCH64()) {
+ @export(__divti3, .{ .name = "__divti3", .linkage = common.linkage });
+ }
+ } else {
+ @export(__divti3, .{ .name = "__divti3", .linkage = common.linkage });
+ }
+}
+
+pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
+ return div(a, b);
+}
+
+const v128 = @import("std").meta.Vector(2, u64);
+
+fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
+ return @bitCast(v128, div(@bitCast(i128, a), @bitCast(i128, b)));
+}
+
+inline fn div(a: i128, b: i128) i128 {
+ const s_a = a >> (128 - 1);
+ const s_b = b >> (128 - 1);
+
+ const an = (a ^ s_a) -% s_a;
+ const bn = (b ^ s_b) -% s_b;
+
+ const r = udivmod(u128, @bitCast(u128, an), @bitCast(u128, bn), null);
+ const s = s_a ^ s_b;
+ return (@bitCast(i128, r) ^ s) -% s;
+}
+
+test {
+ _ = @import("divti3_test.zig");
+}
diff --git a/lib/std/special/compiler_rt/divti3_test.zig b/lib/compiler_rt/divti3_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/divti3_test.zig
rename to lib/compiler_rt/divti3_test.zig
diff --git a/lib/compiler_rt/divxf3.zig b/lib/compiler_rt/divxf3.zig
new file mode 100644
index 0000000000..b8d27a6da0
--- /dev/null
+++ b/lib/compiler_rt/divxf3.zig
@@ -0,0 +1,210 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+
+const common = @import("common.zig");
+const normalize = common.normalize;
+const wideMultiply = common.wideMultiply;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__divxf3, .{ .name = "__divxf3", .linkage = common.linkage });
+}
+
+pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
+ const T = f80;
+ const Z = std.meta.Int(.unsigned, @bitSizeOf(T));
+
+ const significandBits = std.math.floatMantissaBits(T);
+ const fractionalBits = std.math.floatFractionalBits(T);
+ const exponentBits = std.math.floatExponentBits(T);
+
+ const signBit = (@as(Z, 1) << (significandBits + exponentBits));
+ const maxExponent = ((1 << exponentBits) - 1);
+ const exponentBias = (maxExponent >> 1);
+
+ const integerBit = (@as(Z, 1) << fractionalBits);
+ const quietBit = integerBit >> 1;
+ const significandMask = (@as(Z, 1) << significandBits) - 1;
+
+ const absMask = signBit - 1;
+ const qnanRep = @bitCast(Z, std.math.nan(T)) | quietBit;
+ const infRep = @bitCast(Z, std.math.inf(T));
+
+ const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent);
+ const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent);
+ const quotientSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit;
+
+ var aSignificand: Z = @bitCast(Z, a) & significandMask;
+ var bSignificand: Z = @bitCast(Z, b) & significandMask;
+ var scale: i32 = 0;
+
+ // Detect if a or b is zero, denormal, infinity, or NaN.
+ if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
+ const aAbs: Z = @bitCast(Z, a) & absMask;
+ const bAbs: Z = @bitCast(Z, b) & absMask;
+
+ // NaN / anything = qNaN
+ if (aAbs > infRep) return @bitCast(T, @bitCast(Z, a) | quietBit);
+ // anything / NaN = qNaN
+ if (bAbs > infRep) return @bitCast(T, @bitCast(Z, b) | quietBit);
+
+ if (aAbs == infRep) {
+ // infinity / infinity = NaN
+ if (bAbs == infRep) {
+ return @bitCast(T, qnanRep);
+ }
+ // infinity / anything else = +/- infinity
+ else {
+ return @bitCast(T, aAbs | quotientSign);
+ }
+ }
+
+ // anything else / infinity = +/- 0
+ if (bAbs == infRep) return @bitCast(T, quotientSign);
+
+ if (aAbs == 0) {
+ // zero / zero = NaN
+ if (bAbs == 0) {
+ return @bitCast(T, qnanRep);
+ }
+ // zero / anything else = +/- zero
+ else {
+ return @bitCast(T, quotientSign);
+ }
+ }
+ // anything else / zero = +/- infinity
+ if (bAbs == 0) return @bitCast(T, infRep | quotientSign);
+
+ // one or both of a or b is denormal, the other (if applicable) is a
+ // normal number. Renormalize one or both of a and b, and set scale to
+ // include the necessary exponent adjustment.
+ if (aAbs < integerBit) scale +%= normalize(T, &aSignificand);
+ if (bAbs < integerBit) scale -%= normalize(T, &bSignificand);
+ }
+ var quotientExponent: i32 = @bitCast(i32, aExponent -% bExponent) +% scale;
+
+ // Align the significand of b as a Q63 fixed-point number in the range
+ // [1, 2.0) and get a Q64 approximate reciprocal using a small minimax
+ // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This
+ // is accurate to about 3.5 binary digits.
+ const q63b = @intCast(u64, bSignificand);
+ var recip64 = @as(u64, 0x7504f333F9DE6484) -% q63b;
+ // 0x7504f333F9DE6484 / 2^64 + 1 = 3/4 + 1/sqrt(2)
+
+ // Now refine the reciprocal estimate using a Newton-Raphson iteration:
+ //
+ // x1 = x0 * (2 - x0 * b)
+ //
+ // This doubles the number of correct binary digits in the approximation
+ // with each iteration.
+ var correction64: u64 = undefined;
+ correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
+ recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
+ correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
+ recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
+ correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
+ recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
+ correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
+ recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
+ correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
+ recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
+
+ // The reciprocal may have overflowed to zero if the upper half of b is
+ // exactly 1.0. This would sabatoge the full-width final stage of the
+ // computation that follows, so we adjust the reciprocal down by one bit.
+ recip64 -%= 1;
+
+ // We need to perform one more iteration to get us to 112 binary digits;
+ // The last iteration needs to happen with extra precision.
+
+ // NOTE: This operation is equivalent to __multi3, which is not implemented
+ // in some architechures
+ var reciprocal: u128 = undefined;
+ var correction: u128 = undefined;
+ var dummy: u128 = undefined;
+ wideMultiply(u128, recip64, q63b, &dummy, &correction);
+
+ correction = -%correction;
+
+ const cHi = @truncate(u64, correction >> 64);
+ const cLo = @truncate(u64, correction);
+
+ var r64cH: u128 = undefined;
+ var r64cL: u128 = undefined;
+ wideMultiply(u128, recip64, cHi, &dummy, &r64cH);
+ wideMultiply(u128, recip64, cLo, &dummy, &r64cL);
+
+ reciprocal = r64cH + (r64cL >> 64);
+
+ // Adjust the final 128-bit reciprocal estimate downward to ensure that it
+ // is strictly smaller than the infinitely precise exact reciprocal. Because
+ // the computation of the Newton-Raphson step is truncating at every step,
+ // this adjustment is small; most of the work is already done.
+ reciprocal -%= 2;
+
+ // The numerical reciprocal is accurate to within 2^-112, lies in the
+ // interval [0.5, 1.0), and is strictly smaller than the true reciprocal
+ // of b. Multiplying a by this reciprocal thus gives a numerical q = a/b
+ // in Q127 with the following properties:
+ //
+ // 1. q < a/b
+ // 2. q is in the interval [0.5, 2.0)
+ // 3. The error in q is bounded away from 2^-63 (actually, we have
+ // many bits to spare, but this is all we need).
+
+ // We need a 128 x 128 multiply high to compute q.
+ var quotient128: u128 = undefined;
+ var quotientLo: u128 = undefined;
+ wideMultiply(u128, aSignificand << 2, reciprocal, "ient128, "ientLo);
+
+ // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
+ // Right shift the quotient if it falls in the [1,2) range and adjust the
+ // exponent accordingly.
+ var quotient: u64 = if (quotient128 < (integerBit << 1)) b: {
+ quotientExponent -= 1;
+ break :b @intCast(u64, quotient128);
+ } else @intCast(u64, quotient128 >> 1);
+
+ // We are going to compute a residual of the form
+ //
+ // r = a - q*b
+ //
+ // We know from the construction of q that r satisfies:
+ //
+ // 0 <= r < ulp(q)*b
+ //
+ // If r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
+ // already have the correct result. The exact halfway case cannot occur.
+ var residual: u64 = -%(quotient *% q63b);
+
+ const writtenExponent = quotientExponent + exponentBias;
+ if (writtenExponent >= maxExponent) {
+ // If we have overflowed the exponent, return infinity.
+ return @bitCast(T, infRep | quotientSign);
+ } else if (writtenExponent < 1) {
+ if (writtenExponent == 0) {
+ // Check whether the rounded result is normal.
+ if (residual > (bSignificand >> 1)) { // round
+ if (quotient == (integerBit - 1)) // If the rounded result is normal, return it
+ return @bitCast(T, @bitCast(Z, std.math.floatMin(T)) | quotientSign);
+ }
+ }
+ // Flush denormals to zero. In the future, it would be nice to add
+ // code to round them correctly.
+ return @bitCast(T, quotientSign);
+ } else {
+ const round = @boolToInt(residual > (bSignificand >> 1));
+ // Insert the exponent
+ var absResult = quotient | (@intCast(Z, writtenExponent) << significandBits);
+ // Round
+ absResult +%= round;
+ // Insert the sign and return
+ return @bitCast(T, absResult | quotientSign | integerBit);
+ }
+}
+
+test {
+ _ = @import("divxf3_test.zig");
+}
diff --git a/lib/compiler_rt/divxf3_test.zig b/lib/compiler_rt/divxf3_test.zig
new file mode 100644
index 0000000000..0ed2b74217
--- /dev/null
+++ b/lib/compiler_rt/divxf3_test.zig
@@ -0,0 +1,65 @@
+const std = @import("std");
+const math = std.math;
+const testing = std.testing;
+
+const __divxf3 = @import("divxf3.zig").__divxf3;
+
+fn compareResult(result: f80, expected: u80) bool {
+ const rep = @bitCast(u80, result);
+
+ if (rep == expected) return true;
+ // test other possible NaN representations (signal NaN)
+ if (math.isNan(result) and math.isNan(@bitCast(f80, expected))) return true;
+
+ return false;
+}
+
+fn expect__divxf3_result(a: f80, b: f80, expected: u80) !void {
+ const x = __divxf3(a, b);
+ const ret = compareResult(x, expected);
+ try testing.expect(ret == true);
+}
+
+fn test__divxf3(a: f80, b: f80) !void {
+ const integerBit = 1 << math.floatFractionalBits(f80);
+ const x = __divxf3(a, b);
+
+ // Next float (assuming normal, non-zero result)
+ const x_plus_eps = @bitCast(f80, (@bitCast(u80, x) + 1) | integerBit);
+ // Prev float (assuming normal, non-zero result)
+ const x_minus_eps = @bitCast(f80, (@bitCast(u80, x) - 1) | integerBit);
+
+ // Make sure result is more accurate than the adjacent floats
+ const err_x = @fabs(@mulAdd(f80, x, b, -a));
+ const err_x_plus_eps = @fabs(@mulAdd(f80, x_plus_eps, b, -a));
+ const err_x_minus_eps = @fabs(@mulAdd(f80, x_minus_eps, b, -a));
+
+ try testing.expect(err_x_minus_eps > err_x);
+ try testing.expect(err_x_plus_eps > err_x);
+}
+
+test "divxf3" {
+ // qNaN / any = qNaN
+ try expect__divxf3_result(math.qnan_f80, 0x1.23456789abcdefp+5, 0x7fffC000000000000000);
+ // NaN / any = NaN
+ try expect__divxf3_result(math.nan_f80, 0x1.23456789abcdefp+5, 0x7fffC000000000000000);
+ // inf / any(except inf and nan) = inf
+ try expect__divxf3_result(math.inf(f80), 0x1.23456789abcdefp+5, 0x7fff8000000000000000);
+ // inf / inf = nan
+ try expect__divxf3_result(math.inf(f80), math.inf(f80), 0x7fffC000000000000000);
+ // inf / nan = nan
+ try expect__divxf3_result(math.inf(f80), math.nan(f80), 0x7fffC000000000000000);
+
+ try test__divxf3(0x1.a23b45362464523375893ab4cdefp+5, 0x1.eedcbaba3a94546558237654321fp-1);
+ try test__divxf3(0x1.a2b34c56d745382f9abf2c3dfeffp-50, 0x1.ed2c3ba15935332532287654321fp-9);
+ try test__divxf3(0x1.2345f6aaaa786555f42432abcdefp+456, 0x1.edacbba9874f765463544dd3621fp+6400);
+ try test__divxf3(0x1.2d3456f789ba6322bc665544edefp-234, 0x1.eddcdba39f3c8b7a36564354321fp-4455);
+ try test__divxf3(0x1.2345f6b77b7a8953365433abcdefp+234, 0x1.edcba987d6bb3aa467754354321fp-4055);
+ try test__divxf3(0x1.a23b45362464523375893ab4cdefp+5, 0x1.a2b34c56d745382f9abf2c3dfeffp-50);
+ try test__divxf3(0x1.a23b45362464523375893ab4cdefp+5, 0x1.1234567890abcdef987654321123p0);
+ try test__divxf3(0x1.a23b45362464523375893ab4cdefp+5, 0x1.12394205810257120adae8929f23p+16);
+ try test__divxf3(0x1.a23b45362464523375893ab4cdefp+5, 0x1.febdcefa1231245f9abf2c3dfeffp-50);
+
+ // Result rounds down to zero
+ try expect__divxf3_result(6.72420628622418701252535563464350521E-4932, 2.0, 0x0);
+}
diff --git a/lib/std/special/compiler_rt/emutls.zig b/lib/compiler_rt/emutls.zig
similarity index 93%
rename from lib/std/special/compiler_rt/emutls.zig
rename to lib/compiler_rt/emutls.zig
index e6aa8930e9..723eac4af2 100644
--- a/lib/std/special/compiler_rt/emutls.zig
+++ b/lib/compiler_rt/emutls.zig
@@ -1,22 +1,26 @@
-// __emutls_get_address specific builtin
-//
-// derived work from LLVM Compiler Infrastructure - release 8.0 (MIT)
-// https://github.com/llvm-mirror/compiler-rt/blob/release_80/lib/builtins/emutls.c
-//
+//! __emutls_get_address specific builtin
+//!
+//! derived work from LLVM Compiler Infrastructure - release 8.0 (MIT)
+//! https://github.com/llvm-mirror/compiler-rt/blob/release_80/lib/builtins/emutls.c
const std = @import("std");
const builtin = @import("builtin");
+const common = @import("common.zig");
const abort = std.os.abort;
const assert = std.debug.assert;
const expect = std.testing.expect;
-// defined in C as:
-// typedef unsigned int gcc_word __attribute__((mode(word)));
+/// defined in C as:
+/// typedef unsigned int gcc_word __attribute__((mode(word)));
const gcc_word = usize;
+pub const panic = common.panic;
+
comptime {
- assert(builtin.link_libc);
+ if (builtin.link_libc and builtin.os.tag == .openbsd) {
+ @export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = common.linkage });
+ }
}
/// public entrypoint for generated code using EmulatedTLS
@@ -319,6 +323,8 @@ const emutls_control = extern struct {
};
test "simple_allocator" {
+ if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
+
var data1: *[64]u8 = simple_allocator.alloc([64]u8);
defer simple_allocator.free(data1);
for (data1) |*c| {
@@ -333,6 +339,8 @@ test "simple_allocator" {
}
test "__emutls_get_address zeroed" {
+ if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
+
var ctl = emutls_control.init(usize, null);
try expect(ctl.object.index == 0);
@@ -352,6 +360,8 @@ test "__emutls_get_address zeroed" {
}
test "__emutls_get_address with default_value" {
+ if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
+
var value: usize = 5678; // default value
var ctl = emutls_control.init(usize, &value);
try expect(ctl.object.index == 0);
@@ -370,6 +380,8 @@ test "__emutls_get_address with default_value" {
}
test "test default_value with differents sizes" {
+ if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
+
const testType = struct {
fn _testType(comptime T: type, value: T) !void {
var def: T = value;
diff --git a/lib/std/math/exp.zig b/lib/compiler_rt/exp.zig
similarity index 64%
rename from lib/std/math/exp.zig
rename to lib/compiler_rt/exp.zig
index 71a492c7ad..f34f226be4 100644
--- a/lib/std/math/exp.zig
+++ b/lib/compiler_rt/exp.zig
@@ -4,25 +4,31 @@
// https://git.musl-libc.org/cgit/musl/tree/src/math/expf.c
// https://git.musl-libc.org/cgit/musl/tree/src/math/exp.c
-const std = @import("../std.zig");
+const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
const math = std.math;
const expect = std.testing.expect;
+const common = @import("common.zig");
-/// Returns e raised to the power of x (e^x).
-///
-/// Special Cases:
-/// - exp(+inf) = +inf
-/// - exp(nan) = nan
-pub fn exp(x: anytype) @TypeOf(x) {
- const T = @TypeOf(x);
- return switch (T) {
- f32 => exp32(x),
- f64 => exp64(x),
- else => @compileError("exp not implemented for " ++ @typeName(T)),
- };
+pub const panic = common.panic;
+
+comptime {
+ @export(__exph, .{ .name = "__exph", .linkage = common.linkage });
+ @export(expf, .{ .name = "expf", .linkage = common.linkage });
+ @export(exp, .{ .name = "exp", .linkage = common.linkage });
+ @export(__expx, .{ .name = "__expx", .linkage = common.linkage });
+ const expq_sym_name = if (common.want_ppc_abi) "expf128" else "expq";
+ @export(expq, .{ .name = expq_sym_name, .linkage = common.linkage });
+ @export(expl, .{ .name = "expl", .linkage = common.linkage });
}
-fn exp32(x_: f32) f32 {
+pub fn __exph(a: f16) callconv(.C) f16 {
+ // TODO: more efficient implementation
+ return @floatCast(f16, expf(a));
+}
+
+pub fn expf(x_: f32) callconv(.C) f32 {
const half = [_]f32{ 0.5, -0.5 };
const ln2hi = 6.9314575195e-1;
const ln2lo = 1.4286067653e-6;
@@ -97,7 +103,7 @@ fn exp32(x_: f32) f32 {
}
}
-fn exp64(x_: f64) f64 {
+pub fn exp(x_: f64) callconv(.C) f64 {
const half = [_]f64{ 0.5, -0.5 };
const ln2hi: f64 = 6.93147180369123816490e-01;
const ln2lo: f64 = 1.90821492927058770002e-10;
@@ -181,37 +187,53 @@ fn exp64(x_: f64) f64 {
}
}
-test "math.exp" {
- try expect(exp(@as(f32, 0.0)) == exp32(0.0));
- try expect(exp(@as(f64, 0.0)) == exp64(0.0));
+pub fn __expx(a: f80) callconv(.C) f80 {
+ // TODO: more efficient implementation
+ return @floatCast(f80, expq(a));
}
-test "math.exp32" {
+pub fn expq(a: f128) callconv(.C) f128 {
+ // TODO: more correct implementation
+ return exp(@floatCast(f64, a));
+}
+
+pub fn expl(x: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __exph(x),
+ 32 => return expf(x),
+ 64 => return exp(x),
+ 80 => return __expx(x),
+ 128 => return expq(x),
+ else => @compileError("unreachable"),
+ }
+}
+
+test "exp32" {
const epsilon = 0.000001;
- try expect(exp32(0.0) == 1.0);
- try expect(math.approxEqAbs(f32, exp32(0.0), 1.0, epsilon));
- try expect(math.approxEqAbs(f32, exp32(0.2), 1.221403, epsilon));
- try expect(math.approxEqAbs(f32, exp32(0.8923), 2.440737, epsilon));
- try expect(math.approxEqAbs(f32, exp32(1.5), 4.481689, epsilon));
+ try expect(expf(0.0) == 1.0);
+ try expect(math.approxEqAbs(f32, expf(0.0), 1.0, epsilon));
+ try expect(math.approxEqAbs(f32, expf(0.2), 1.221403, epsilon));
+ try expect(math.approxEqAbs(f32, expf(0.8923), 2.440737, epsilon));
+ try expect(math.approxEqAbs(f32, expf(1.5), 4.481689, epsilon));
}
-test "math.exp64" {
+test "exp64" {
const epsilon = 0.000001;
- try expect(exp64(0.0) == 1.0);
- try expect(math.approxEqAbs(f64, exp64(0.0), 1.0, epsilon));
- try expect(math.approxEqAbs(f64, exp64(0.2), 1.221403, epsilon));
- try expect(math.approxEqAbs(f64, exp64(0.8923), 2.440737, epsilon));
- try expect(math.approxEqAbs(f64, exp64(1.5), 4.481689, epsilon));
+ try expect(exp(0.0) == 1.0);
+ try expect(math.approxEqAbs(f64, exp(0.0), 1.0, epsilon));
+ try expect(math.approxEqAbs(f64, exp(0.2), 1.221403, epsilon));
+ try expect(math.approxEqAbs(f64, exp(0.8923), 2.440737, epsilon));
+ try expect(math.approxEqAbs(f64, exp(1.5), 4.481689, epsilon));
}
-test "math.exp32.special" {
- try expect(math.isPositiveInf(exp32(math.inf(f32))));
- try expect(math.isNan(exp32(math.nan(f32))));
+test "exp32.special" {
+ try expect(math.isPositiveInf(expf(math.inf(f32))));
+ try expect(math.isNan(expf(math.nan(f32))));
}
-test "math.exp64.special" {
- try expect(math.isPositiveInf(exp64(math.inf(f64))));
- try expect(math.isNan(exp64(math.nan(f64))));
+test "exp64.special" {
+ try expect(math.isPositiveInf(exp(math.inf(f64))));
+ try expect(math.isNan(exp(math.nan(f64))));
}
diff --git a/lib/std/math/exp2.zig b/lib/compiler_rt/exp2.zig
similarity index 85%
rename from lib/std/math/exp2.zig
rename to lib/compiler_rt/exp2.zig
index 76530ec61f..e89a918501 100644
--- a/lib/std/math/exp2.zig
+++ b/lib/compiler_rt/exp2.zig
@@ -4,44 +4,31 @@
// https://git.musl-libc.org/cgit/musl/tree/src/math/exp2f.c
// https://git.musl-libc.org/cgit/musl/tree/src/math/exp2.c
-const std = @import("../std.zig");
+const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
const math = std.math;
const expect = std.testing.expect;
+const common = @import("common.zig");
-/// Returns 2 raised to the power of x (2^x).
-///
-/// Special Cases:
-/// - exp2(+inf) = +inf
-/// - exp2(nan) = nan
-pub fn exp2(x: anytype) @TypeOf(x) {
- const T = @TypeOf(x);
- return switch (T) {
- f32 => exp2_32(x),
- f64 => exp2_64(x),
- else => @compileError("exp2 not implemented for " ++ @typeName(T)),
- };
+pub const panic = common.panic;
+
+comptime {
+ @export(__exp2h, .{ .name = "__exp2h", .linkage = common.linkage });
+ @export(exp2f, .{ .name = "exp2f", .linkage = common.linkage });
+ @export(exp2, .{ .name = "exp2", .linkage = common.linkage });
+ @export(__exp2x, .{ .name = "__exp2x", .linkage = common.linkage });
+ const exp2q_sym_name = if (common.want_ppc_abi) "exp2f128" else "exp2q";
+ @export(exp2q, .{ .name = exp2q_sym_name, .linkage = common.linkage });
+ @export(exp2l, .{ .name = "exp2l", .linkage = common.linkage });
}
-const exp2ft = [_]f64{
- 0x1.6a09e667f3bcdp-1,
- 0x1.7a11473eb0187p-1,
- 0x1.8ace5422aa0dbp-1,
- 0x1.9c49182a3f090p-1,
- 0x1.ae89f995ad3adp-1,
- 0x1.c199bdd85529cp-1,
- 0x1.d5818dcfba487p-1,
- 0x1.ea4afa2a490dap-1,
- 0x1.0000000000000p+0,
- 0x1.0b5586cf9890fp+0,
- 0x1.172b83c7d517bp+0,
- 0x1.2387a6e756238p+0,
- 0x1.306fe0a31b715p+0,
- 0x1.3dea64c123422p+0,
- 0x1.4bfdad5362a27p+0,
- 0x1.5ab07dd485429p+0,
-};
+pub fn __exp2h(x: f16) callconv(.C) f16 {
+ // TODO: more efficient implementation
+ return @floatCast(f16, exp2f(x));
+}
-fn exp2_32(x: f32) f32 {
+pub fn exp2f(x: f32) callconv(.C) f32 {
const tblsiz = @intCast(u32, exp2ft.len);
const redux: f32 = 0x1.8p23 / @intToFloat(f32, tblsiz);
const P1: f32 = 0x1.62e430p-1;
@@ -98,6 +85,115 @@ fn exp2_32(x: f32) f32 {
return @floatCast(f32, r * uk);
}
+pub fn exp2(x: f64) callconv(.C) f64 {
+ const tblsiz: u32 = @intCast(u32, exp2dt.len / 2);
+ const redux: f64 = 0x1.8p52 / @intToFloat(f64, tblsiz);
+ const P1: f64 = 0x1.62e42fefa39efp-1;
+ const P2: f64 = 0x1.ebfbdff82c575p-3;
+ const P3: f64 = 0x1.c6b08d704a0a6p-5;
+ const P4: f64 = 0x1.3b2ab88f70400p-7;
+ const P5: f64 = 0x1.5d88003875c74p-10;
+
+ const ux = @bitCast(u64, x);
+ const ix = @intCast(u32, ux >> 32) & 0x7FFFFFFF;
+
+ // TODO: This should be handled beneath.
+ if (math.isNan(x)) {
+ return math.nan(f64);
+ }
+
+ // |x| >= 1022 or nan
+ if (ix >= 0x408FF000) {
+ // x >= 1024 or nan
+ if (ix >= 0x40900000 and ux >> 63 == 0) {
+ math.raiseOverflow();
+ return math.inf(f64);
+ }
+ // -inf or -nan
+ if (ix >= 0x7FF00000) {
+ return -1 / x;
+ }
+ // x <= -1022
+ if (ux >> 63 != 0) {
+ // underflow
+ if (x <= -1075 or x - 0x1.0p52 + 0x1.0p52 != x) {
+ math.doNotOptimizeAway(@floatCast(f32, -0x1.0p-149 / x));
+ }
+ if (x <= -1075) {
+ return 0;
+ }
+ }
+ }
+ // |x| < 0x1p-54
+ else if (ix < 0x3C900000) {
+ return 1.0 + x;
+ }
+
+ // NOTE: musl relies on unsafe behaviours which are replicated below
+ // (addition overflow, division truncation, casting). Appears that this
+ // produces the intended result but should confirm how GCC/Clang handle this
+ // to ensure.
+
+ // reduce x
+ var uf: f64 = x + redux;
+ // NOTE: musl performs an implicit 64-bit to 32-bit u32 truncation here
+ var i_0: u32 = @truncate(u32, @bitCast(u64, uf));
+ i_0 +%= tblsiz / 2;
+
+ const k: u32 = i_0 / tblsiz * tblsiz;
+ const ik: i32 = @divTrunc(@bitCast(i32, k), tblsiz);
+ i_0 %= tblsiz;
+ uf -= redux;
+
+ // r = exp2(y) = exp2t[i_0] * p(z - eps[i])
+ var z: f64 = x - uf;
+ const t: f64 = exp2dt[@intCast(usize, 2 * i_0)];
+ z -= exp2dt[@intCast(usize, 2 * i_0 + 1)];
+ const r: f64 = t + t * z * (P1 + z * (P2 + z * (P3 + z * (P4 + z * P5))));
+
+ return math.scalbn(r, ik);
+}
+
+pub fn __exp2x(x: f80) callconv(.C) f80 {
+ // TODO: more efficient implementation
+ return @floatCast(f80, exp2q(x));
+}
+
+pub fn exp2q(x: f128) callconv(.C) f128 {
+ // TODO: more correct implementation
+ return exp2(@floatCast(f64, x));
+}
+
+pub fn exp2l(x: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __exp2h(x),
+ 32 => return exp2f(x),
+ 64 => return exp2(x),
+ 80 => return __exp2x(x),
+ 128 => return exp2q(x),
+ else => @compileError("unreachable"),
+ }
+}
+
+const exp2ft = [_]f64{
+ 0x1.6a09e667f3bcdp-1,
+ 0x1.7a11473eb0187p-1,
+ 0x1.8ace5422aa0dbp-1,
+ 0x1.9c49182a3f090p-1,
+ 0x1.ae89f995ad3adp-1,
+ 0x1.c199bdd85529cp-1,
+ 0x1.d5818dcfba487p-1,
+ 0x1.ea4afa2a490dap-1,
+ 0x1.0000000000000p+0,
+ 0x1.0b5586cf9890fp+0,
+ 0x1.172b83c7d517bp+0,
+ 0x1.2387a6e756238p+0,
+ 0x1.306fe0a31b715p+0,
+ 0x1.3dea64c123422p+0,
+ 0x1.4bfdad5362a27p+0,
+ 0x1.5ab07dd485429p+0,
+};
+
const exp2dt = [_]f64{
// exp2(z + eps) eps
0x1.6a09e667f3d5dp-1, 0x1.9880p-44,
@@ -358,108 +454,34 @@ const exp2dt = [_]f64{
0x1.690f4b19e9471p+0, -0x1.9780p-45,
};
-fn exp2_64(x: f64) f64 {
- const tblsiz: u32 = @intCast(u32, exp2dt.len / 2);
- const redux: f64 = 0x1.8p52 / @intToFloat(f64, tblsiz);
- const P1: f64 = 0x1.62e42fefa39efp-1;
- const P2: f64 = 0x1.ebfbdff82c575p-3;
- const P3: f64 = 0x1.c6b08d704a0a6p-5;
- const P4: f64 = 0x1.3b2ab88f70400p-7;
- const P5: f64 = 0x1.5d88003875c74p-10;
-
- const ux = @bitCast(u64, x);
- const ix = @intCast(u32, ux >> 32) & 0x7FFFFFFF;
-
- // TODO: This should be handled beneath.
- if (math.isNan(x)) {
- return math.nan(f64);
- }
-
- // |x| >= 1022 or nan
- if (ix >= 0x408FF000) {
- // x >= 1024 or nan
- if (ix >= 0x40900000 and ux >> 63 == 0) {
- math.raiseOverflow();
- return math.inf(f64);
- }
- // -inf or -nan
- if (ix >= 0x7FF00000) {
- return -1 / x;
- }
- // x <= -1022
- if (ux >> 63 != 0) {
- // underflow
- if (x <= -1075 or x - 0x1.0p52 + 0x1.0p52 != x) {
- math.doNotOptimizeAway(@floatCast(f32, -0x1.0p-149 / x));
- }
- if (x <= -1075) {
- return 0;
- }
- }
- }
- // |x| < 0x1p-54
- else if (ix < 0x3C900000) {
- return 1.0 + x;
- }
-
- // NOTE: musl relies on unsafe behaviours which are replicated below
- // (addition overflow, division truncation, casting). Appears that this
- // produces the intended result but should confirm how GCC/Clang handle this
- // to ensure.
-
- // reduce x
- var uf: f64 = x + redux;
- // NOTE: musl performs an implicit 64-bit to 32-bit u32 truncation here
- var i_0: u32 = @truncate(u32, @bitCast(u64, uf));
- i_0 +%= tblsiz / 2;
-
- const k: u32 = i_0 / tblsiz * tblsiz;
- const ik: i32 = @divTrunc(@bitCast(i32, k), tblsiz);
- i_0 %= tblsiz;
- uf -= redux;
-
- // r = exp2(y) = exp2t[i_0] * p(z - eps[i])
- var z: f64 = x - uf;
- const t: f64 = exp2dt[@intCast(usize, 2 * i_0)];
- z -= exp2dt[@intCast(usize, 2 * i_0 + 1)];
- const r: f64 = t + t * z * (P1 + z * (P2 + z * (P3 + z * (P4 + z * P5))));
-
- return math.scalbn(r, ik);
-}
-
-test "math.exp2" {
- try expect(exp2(@as(f32, 0.8923)) == exp2_32(0.8923));
- try expect(exp2(@as(f64, 0.8923)) == exp2_64(0.8923));
-}
-
-test "math.exp2_32" {
+test "exp2_32" {
const epsilon = 0.000001;
- try expect(exp2_32(0.0) == 1.0);
- try expect(math.approxEqAbs(f32, exp2_32(0.2), 1.148698, epsilon));
- try expect(math.approxEqAbs(f32, exp2_32(0.8923), 1.856133, epsilon));
- try expect(math.approxEqAbs(f32, exp2_32(1.5), 2.828427, epsilon));
- try expect(math.approxEqAbs(f32, exp2_32(37.45), 187747237888, epsilon));
- try expect(math.approxEqAbs(f32, exp2_32(-1), 0.5, epsilon));
+ try expect(exp2f(0.0) == 1.0);
+ try expect(math.approxEqAbs(f32, exp2f(0.2), 1.148698, epsilon));
+ try expect(math.approxEqAbs(f32, exp2f(0.8923), 1.856133, epsilon));
+ try expect(math.approxEqAbs(f32, exp2f(1.5), 2.828427, epsilon));
+ try expect(math.approxEqAbs(f32, exp2f(37.45), 187747237888, epsilon));
+ try expect(math.approxEqAbs(f32, exp2f(-1), 0.5, epsilon));
}
-test "math.exp2_64" {
+test "exp2_64" {
const epsilon = 0.000001;
- try expect(exp2_64(0.0) == 1.0);
- try expect(math.approxEqAbs(f64, exp2_64(0.2), 1.148698, epsilon));
- try expect(math.approxEqAbs(f64, exp2_64(0.8923), 1.856133, epsilon));
- try expect(math.approxEqAbs(f64, exp2_64(1.5), 2.828427, epsilon));
- try expect(math.approxEqAbs(f64, exp2_64(-1), 0.5, epsilon));
- try expect(math.approxEqAbs(f64, exp2_64(-0x1.a05cc754481d1p-2), 0x1.824056efc687cp-1, epsilon));
+ try expect(exp2(0.0) == 1.0);
+ try expect(math.approxEqAbs(f64, exp2(0.2), 1.148698, epsilon));
+ try expect(math.approxEqAbs(f64, exp2(0.8923), 1.856133, epsilon));
+ try expect(math.approxEqAbs(f64, exp2(1.5), 2.828427, epsilon));
+ try expect(math.approxEqAbs(f64, exp2(-1), 0.5, epsilon));
+ try expect(math.approxEqAbs(f64, exp2(-0x1.a05cc754481d1p-2), 0x1.824056efc687cp-1, epsilon));
}
-test "math.exp2_32.special" {
- try expect(math.isPositiveInf(exp2_32(math.inf(f32))));
- try expect(math.isNan(exp2_32(math.nan(f32))));
+test "exp2_32.special" {
+ try expect(math.isPositiveInf(exp2f(math.inf(f32))));
+ try expect(math.isNan(exp2f(math.nan(f32))));
}
-test "math.exp2_64.special" {
- try expect(math.isPositiveInf(exp2_64(math.inf(f64))));
- try expect(math.isNan(exp2_64(math.nan(f64))));
+test "exp2_64.special" {
+ try expect(math.isPositiveInf(exp2(math.inf(f64))));
+ try expect(math.isNan(exp2(math.nan(f64))));
}
diff --git a/lib/compiler_rt/extenddftf2.zig b/lib/compiler_rt/extenddftf2.zig
new file mode 100644
index 0000000000..21e497b3a4
--- /dev/null
+++ b/lib/compiler_rt/extenddftf2.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__extenddfkf2, .{ .name = "__extenddfkf2", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_dtoq, .{ .name = "_Qp_dtoq", .linkage = common.linkage });
+ } else {
+ @export(__extenddftf2, .{ .name = "__extenddftf2", .linkage = common.linkage });
+ }
+}
+
+pub fn __extenddftf2(a: f64) callconv(.C) f128 {
+ return extendf(f128, f64, @bitCast(u64, a));
+}
+
+fn __extenddfkf2(a: f64) callconv(.C) f128 {
+ return extendf(f128, f64, @bitCast(u64, a));
+}
+
+fn _Qp_dtoq(c: *f128, a: f64) callconv(.C) void {
+ c.* = extendf(f128, f64, @bitCast(u64, a));
+}
diff --git a/lib/compiler_rt/extenddfxf2.zig b/lib/compiler_rt/extenddfxf2.zig
new file mode 100644
index 0000000000..e76b2fc038
--- /dev/null
+++ b/lib/compiler_rt/extenddfxf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const extend_f80 = @import("./extendf.zig").extend_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extenddfxf2, .{ .name = "__extenddfxf2", .linkage = common.linkage });
+}
+
+fn __extenddfxf2(a: f64) callconv(.C) f80 {
+ return extend_f80(f64, @bitCast(u64, a));
+}
diff --git a/lib/std/special/compiler_rt/extendXfYf2.zig b/lib/compiler_rt/extendf.zig
similarity index 50%
rename from lib/std/special/compiler_rt/extendXfYf2.zig
rename to lib/compiler_rt/extendf.zig
index 2c3f0c88fc..8eb23c1d82 100644
--- a/lib/std/special/compiler_rt/extendXfYf2.zig
+++ b/lib/compiler_rt/extendf.zig
@@ -1,50 +1,10 @@
const std = @import("std");
-const builtin = @import("builtin");
-const is_test = builtin.is_test;
-const native_arch = builtin.cpu.arch;
-
-pub fn __extendsfdf2(a: f32) callconv(.C) f64 {
- return extendXfYf2(f64, f32, @bitCast(u32, a));
-}
-
-pub fn __extenddftf2(a: f64) callconv(.C) f128 {
- return extendXfYf2(f128, f64, @bitCast(u64, a));
-}
-
-pub fn __extendsftf2(a: f32) callconv(.C) f128 {
- return extendXfYf2(f128, f32, @bitCast(u32, a));
-}
-
-// AArch64 is the only ABI (at the moment) to support f16 arguments without the
-// need for extending them to wider fp types.
-pub const F16T = if (native_arch.isAARCH64()) f16 else u16;
-
-pub fn __extendhfsf2(a: F16T) callconv(.C) f32 {
- return extendXfYf2(f32, f16, @bitCast(u16, a));
-}
-
-pub fn __extendhftf2(a: F16T) callconv(.C) f128 {
- return extendXfYf2(f128, f16, @bitCast(u16, a));
-}
-
-pub fn __extendxftf2(a: c_longdouble) callconv(.C) f128 {
- _ = a;
- @panic("TODO implement");
-}
-
-pub fn __aeabi_h2f(arg: u16) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f32, f16, arg });
-}
-
-pub fn __aeabi_f2d(arg: f32) callconv(.AAPCS) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f64, f32, @bitCast(u32, arg) });
-}
-
-inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) dst_t {
- @setRuntimeSafety(builtin.is_test);
+pub inline fn extendf(
+ comptime dst_t: type,
+ comptime src_t: type,
+ a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits),
+) dst_t {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
@@ -112,6 +72,71 @@ inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.In
return @bitCast(dst_t, result);
}
-test {
- _ = @import("extendXfYf2_test.zig");
+pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) f80 {
+ const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
+ const src_sig_bits = std.math.floatMantissaBits(src_t);
+ const dst_int_bit = 0x8000000000000000;
+ const dst_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+
+ const dst_exp_bias = 16383;
+
+ const src_bits = @bitSizeOf(src_t);
+ const src_exp_bits = src_bits - src_sig_bits - 1;
+ const src_inf_exp = (1 << src_exp_bits) - 1;
+ const src_exp_bias = src_inf_exp >> 1;
+
+ const src_min_normal = 1 << src_sig_bits;
+ const src_inf = src_inf_exp << src_sig_bits;
+ const src_sign_mask = 1 << (src_sig_bits + src_exp_bits);
+ const src_abs_mask = src_sign_mask - 1;
+ const src_qnan = 1 << (src_sig_bits - 1);
+ const src_nan_code = src_qnan - 1;
+
+ var dst: std.math.F80 = undefined;
+
+ // Break a into a sign and representation of the absolute value
+ const a_abs = a & src_abs_mask;
+ const sign: u16 = if (a & src_sign_mask != 0) 0x8000 else 0;
+
+ if (a_abs -% src_min_normal < src_inf - src_min_normal) {
+ // a is a normal number.
+ // Extend to the destination type by shifting the significand and
+ // exponent into the proper position and rebiasing the exponent.
+ dst.exp = @intCast(u16, a_abs >> src_sig_bits);
+ dst.exp += dst_exp_bias - src_exp_bias;
+ dst.fraction = @as(u64, a_abs) << (dst_sig_bits - src_sig_bits);
+ dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
+ } else if (a_abs >= src_inf) {
+ // a is NaN or infinity.
+ // Conjure the result by beginning with infinity, then setting the qNaN
+ // bit (if needed) and right-aligning the rest of the trailing NaN
+ // payload field.
+ dst.exp = 0x7fff;
+ dst.fraction = dst_int_bit;
+ dst.fraction |= @as(u64, a_abs & src_qnan) << (dst_sig_bits - src_sig_bits);
+ dst.fraction |= @as(u64, a_abs & src_nan_code) << (dst_sig_bits - src_sig_bits);
+ } else if (a_abs != 0) {
+ // a is denormal.
+ // renormalize the significand and clear the leading bit, then insert
+ // the correct adjusted exponent in the destination type.
+ const scale: u16 = @clz(src_rep_t, a_abs) -
+ @clz(src_rep_t, @as(src_rep_t, src_min_normal));
+
+ dst.fraction = @as(u64, a_abs) << @intCast(u6, dst_sig_bits - src_sig_bits + scale);
+ dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
+ dst.exp = @truncate(u16, a_abs >> @intCast(u4, src_sig_bits - scale));
+ dst.exp ^= 1;
+ dst.exp |= dst_exp_bias - src_exp_bias - scale + 1;
+ } else {
+ // a is zero.
+ dst.exp = 0;
+ dst.fraction = 0;
+ }
+
+ dst.exp |= sign;
+ return std.math.make_f80(dst);
+}
+
+test {
+ _ = @import("extendf_test.zig");
}
diff --git a/lib/std/special/compiler_rt/extendXfYf2_test.zig b/lib/compiler_rt/extendf_test.zig
similarity index 87%
rename from lib/std/special/compiler_rt/extendXfYf2_test.zig
rename to lib/compiler_rt/extendf_test.zig
index d0c4f82e97..1102092a04 100644
--- a/lib/std/special/compiler_rt/extendXfYf2_test.zig
+++ b/lib/compiler_rt/extendf_test.zig
@@ -1,22 +1,22 @@
const builtin = @import("builtin");
-const __extendhfsf2 = @import("extendXfYf2.zig").__extendhfsf2;
-const __extendhftf2 = @import("extendXfYf2.zig").__extendhftf2;
-const __extendsftf2 = @import("extendXfYf2.zig").__extendsftf2;
-const __extenddftf2 = @import("extendXfYf2.zig").__extenddftf2;
-const F16T = @import("extendXfYf2.zig").F16T;
+const __extendhfsf2 = @import("extendhfsf2.zig").__extendhfsf2;
+const __extendhftf2 = @import("extendhftf2.zig").__extendhftf2;
+const __extendsftf2 = @import("extendsftf2.zig").__extendsftf2;
+const __extenddftf2 = @import("extenddftf2.zig").__extenddftf2;
+const F16T = @import("./common.zig").F16T;
-fn test__extenddftf2(a: f64, expectedHi: u64, expectedLo: u64) !void {
+fn test__extenddftf2(a: f64, expected_hi: u64, expected_lo: u64) !void {
const x = __extenddftf2(a);
const rep = @bitCast(u128, x);
const hi = @intCast(u64, rep >> 64);
const lo = @truncate(u64, rep);
- if (hi == expectedHi and lo == expectedLo)
+ if (hi == expected_hi and lo == expected_lo)
return;
// test other possible NaN representation(signal NaN)
- if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
+ if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
((hi & 0xffffffffffff) > 0 or lo > 0))
{
@@ -43,18 +43,18 @@ fn test__extendhfsf2(a: u16, expected: u32) !void {
return error.TestFailure;
}
-fn test__extendsftf2(a: f32, expectedHi: u64, expectedLo: u64) !void {
+fn test__extendsftf2(a: f32, expected_hi: u64, expected_lo: u64) !void {
const x = __extendsftf2(a);
const rep = @bitCast(u128, x);
const hi = @intCast(u64, rep >> 64);
const lo = @truncate(u64, rep);
- if (hi == expectedHi and lo == expectedLo)
+ if (hi == expected_hi and lo == expected_lo)
return;
// test other possible NaN representation(signal NaN)
- if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
+ if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
((hi & 0xffffffffffff) > 0 or lo > 0))
{
@@ -159,18 +159,18 @@ fn makeInf32() f32 {
return @bitCast(f32, @as(u32, 0x7f800000));
}
-fn test__extendhftf2(a: u16, expectedHi: u64, expectedLo: u64) !void {
+fn test__extendhftf2(a: u16, expected_hi: u64, expected_lo: u64) !void {
const x = __extendhftf2(@bitCast(F16T, a));
const rep = @bitCast(u128, x);
const hi = @intCast(u64, rep >> 64);
const lo = @truncate(u64, rep);
- if (hi == expectedHi and lo == expectedLo)
+ if (hi == expected_hi and lo == expected_lo)
return;
// test other possible NaN representation(signal NaN)
- if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
+ if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
((hi & 0xffffffffffff) > 0 or lo > 0))
{
diff --git a/lib/compiler_rt/extendhfsf2.zig b/lib/compiler_rt/extendhfsf2.zig
new file mode 100644
index 0000000000..a6bf5f5be5
--- /dev/null
+++ b/lib/compiler_rt/extendhfsf2.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.gnu_f16_abi) {
+ @export(__gnu_h2f_ieee, .{ .name = "__gnu_h2f_ieee", .linkage = common.linkage });
+ } else if (common.want_aeabi) {
+ @export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = common.linkage });
+ } else {
+ @export(__extendhfsf2, .{ .name = "__extendhfsf2", .linkage = common.linkage });
+ }
+}
+
+pub fn __extendhfsf2(a: common.F16T) callconv(.C) f32 {
+ return extendf(f32, f16, @bitCast(u16, a));
+}
+
+fn __gnu_h2f_ieee(a: common.F16T) callconv(.C) f32 {
+ return extendf(f32, f16, @bitCast(u16, a));
+}
+
+fn __aeabi_h2f(a: u16) callconv(.AAPCS) f32 {
+ return extendf(f32, f16, @bitCast(u16, a));
+}
diff --git a/lib/compiler_rt/extendhftf2.zig b/lib/compiler_rt/extendhftf2.zig
new file mode 100644
index 0000000000..5d339fabce
--- /dev/null
+++ b/lib/compiler_rt/extendhftf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extendhftf2, .{ .name = "__extendhftf2", .linkage = common.linkage });
+}
+
+pub fn __extendhftf2(a: common.F16T) callconv(.C) f128 {
+ return extendf(f128, f16, @bitCast(u16, a));
+}
diff --git a/lib/compiler_rt/extendhfxf2.zig b/lib/compiler_rt/extendhfxf2.zig
new file mode 100644
index 0000000000..e509f96575
--- /dev/null
+++ b/lib/compiler_rt/extendhfxf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const extend_f80 = @import("./extendf.zig").extend_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extendhfxf2, .{ .name = "__extendhfxf2", .linkage = common.linkage });
+}
+
+fn __extendhfxf2(a: common.F16T) callconv(.C) f80 {
+ return extend_f80(f16, @bitCast(u16, a));
+}
diff --git a/lib/compiler_rt/extendsfdf2.zig b/lib/compiler_rt/extendsfdf2.zig
new file mode 100644
index 0000000000..7fd69f6c22
--- /dev/null
+++ b/lib/compiler_rt/extendsfdf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = common.linkage });
+ } else {
+ @export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = common.linkage });
+ }
+}
+
+fn __extendsfdf2(a: f32) callconv(.C) f64 {
+ return extendf(f64, f32, @bitCast(u32, a));
+}
+
+fn __aeabi_f2d(a: f32) callconv(.AAPCS) f64 {
+ return extendf(f64, f32, @bitCast(u32, a));
+}
diff --git a/lib/compiler_rt/extendsftf2.zig b/lib/compiler_rt/extendsftf2.zig
new file mode 100644
index 0000000000..acdc0d586d
--- /dev/null
+++ b/lib/compiler_rt/extendsftf2.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__extendsfkf2, .{ .name = "__extendsfkf2", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_stoq, .{ .name = "_Qp_stoq", .linkage = common.linkage });
+ } else {
+ @export(__extendsftf2, .{ .name = "__extendsftf2", .linkage = common.linkage });
+ }
+}
+
+pub fn __extendsftf2(a: f32) callconv(.C) f128 {
+ return extendf(f128, f32, @bitCast(u32, a));
+}
+
+fn __extendsfkf2(a: f32) callconv(.C) f128 {
+ return extendf(f128, f32, @bitCast(u32, a));
+}
+
+fn _Qp_stoq(c: *f128, a: f32) callconv(.C) void {
+ c.* = extendf(f128, f32, @bitCast(u32, a));
+}
diff --git a/lib/compiler_rt/extendsfxf2.zig b/lib/compiler_rt/extendsfxf2.zig
new file mode 100644
index 0000000000..41bb5ace85
--- /dev/null
+++ b/lib/compiler_rt/extendsfxf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const extend_f80 = @import("./extendf.zig").extend_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extendsfxf2, .{ .name = "__extendsfxf2", .linkage = common.linkage });
+}
+
+fn __extendsfxf2(a: f32) callconv(.C) f80 {
+ return extend_f80(f32, @bitCast(u32, a));
+}
diff --git a/lib/compiler_rt/extendxftf2.zig b/lib/compiler_rt/extendxftf2.zig
new file mode 100644
index 0000000000..bb5d6a377b
--- /dev/null
+++ b/lib/compiler_rt/extendxftf2.zig
@@ -0,0 +1,50 @@
+const std = @import("std");
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extendxftf2, .{ .name = "__extendxftf2", .linkage = common.linkage });
+}
+
+fn __extendxftf2(a: f80) callconv(.C) f128 {
+ const src_int_bit: u64 = 0x8000000000000000;
+ const src_sig_mask = ~src_int_bit;
+ const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+ const dst_sig_bits = std.math.floatMantissaBits(f128);
+
+ const dst_bits = @bitSizeOf(f128);
+
+ const dst_min_normal = @as(u128, 1) << dst_sig_bits;
+
+ // Break a into a sign and representation of the absolute value
+ var a_rep = std.math.break_f80(a);
+ const sign = a_rep.exp & 0x8000;
+ a_rep.exp &= 0x7FFF;
+ var abs_result: u128 = undefined;
+
+ if (a_rep.exp == 0 and a_rep.fraction == 0) {
+ // zero
+ abs_result = 0;
+ } else if (a_rep.exp == 0x7FFF) {
+ // a is nan or infinite
+ abs_result = @as(u128, a_rep.fraction) << (dst_sig_bits - src_sig_bits);
+ abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
+ } else if (a_rep.fraction & src_int_bit != 0) {
+ // a is a normal value
+ abs_result = @as(u128, a_rep.fraction & src_sig_mask) << (dst_sig_bits - src_sig_bits);
+ abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
+ } else {
+ // a is denormal
+ // renormalize the significand and clear the leading bit and integer part,
+ // then insert the correct adjusted exponent in the destination type.
+ const scale: u32 = @clz(u64, a_rep.fraction);
+ abs_result = @as(u128, a_rep.fraction) << @intCast(u7, dst_sig_bits - src_sig_bits + scale + 1);
+ abs_result ^= dst_min_normal;
+ abs_result |= @as(u128, scale + 1) << dst_sig_bits;
+ }
+
+ // Apply the signbit to (dst_t)abs(a).
+ const result: u128 align(@alignOf(f128)) = abs_result | @as(u128, sign) << (dst_bits - 16);
+ return @bitCast(f128, result);
+}
diff --git a/lib/compiler_rt/fabs.zig b/lib/compiler_rt/fabs.zig
new file mode 100644
index 0000000000..fd3a58a9b7
--- /dev/null
+++ b/lib/compiler_rt/fabs.zig
@@ -0,0 +1,55 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fabsh, .{ .name = "__fabsh", .linkage = common.linkage });
+ @export(fabsf, .{ .name = "fabsf", .linkage = common.linkage });
+ @export(fabs, .{ .name = "fabs", .linkage = common.linkage });
+ @export(__fabsx, .{ .name = "__fabsx", .linkage = common.linkage });
+ const fabsq_sym_name = if (common.want_ppc_abi) "fabsf128" else "fabsq";
+ @export(fabsq, .{ .name = fabsq_sym_name, .linkage = common.linkage });
+ @export(fabsl, .{ .name = "fabsl", .linkage = common.linkage });
+}
+
+pub fn __fabsh(a: f16) callconv(.C) f16 {
+ return generic_fabs(a);
+}
+
+pub fn fabsf(a: f32) callconv(.C) f32 {
+ return generic_fabs(a);
+}
+
+pub fn fabs(a: f64) callconv(.C) f64 {
+ return generic_fabs(a);
+}
+
+pub fn __fabsx(a: f80) callconv(.C) f80 {
+ return generic_fabs(a);
+}
+
+pub fn fabsq(a: f128) callconv(.C) f128 {
+ return generic_fabs(a);
+}
+
+pub fn fabsl(x: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __fabsh(x),
+ 32 => return fabsf(x),
+ 64 => return fabs(x),
+ 80 => return __fabsx(x),
+ 128 => return fabsq(x),
+ else => @compileError("unreachable"),
+ }
+}
+
+inline fn generic_fabs(x: anytype) @TypeOf(x) {
+ const T = @TypeOf(x);
+ const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
+ const float_bits = @bitCast(TBits, x);
+ const remove_sign = ~@as(TBits, 0) >> 1;
+ return @bitCast(T, float_bits & remove_sign);
+}
diff --git a/lib/std/special/compiler_rt/ffsdi2_test.zig b/lib/compiler_rt/ffsdi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/ffsdi2_test.zig
rename to lib/compiler_rt/ffsdi2_test.zig
diff --git a/lib/std/special/compiler_rt/ffssi2_test.zig b/lib/compiler_rt/ffssi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/ffssi2_test.zig
rename to lib/compiler_rt/ffssi2_test.zig
diff --git a/lib/std/special/compiler_rt/ffsti2_test.zig b/lib/compiler_rt/ffsti2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/ffsti2_test.zig
rename to lib/compiler_rt/ffsti2_test.zig
diff --git a/lib/compiler_rt/fixdfdi.zig b/lib/compiler_rt/fixdfdi.zig
new file mode 100644
index 0000000000..5935f23524
--- /dev/null
+++ b/lib/compiler_rt/fixdfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = common.linkage });
+ } else {
+ @export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixdfdi(a: f64) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
+
+fn __aeabi_d2lz(a: f64) callconv(.AAPCS) i64 {
+ return floatToInt(i64, a);
+}
diff --git a/lib/compiler_rt/fixdfsi.zig b/lib/compiler_rt/fixdfsi.zig
new file mode 100644
index 0000000000..983c84ccb1
--- /dev/null
+++ b/lib/compiler_rt/fixdfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = common.linkage });
+ } else {
+ @export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixdfsi(a: f64) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
+
+fn __aeabi_d2iz(a: f64) callconv(.AAPCS) i32 {
+ return floatToInt(i32, a);
+}
diff --git a/lib/compiler_rt/fixdfti.zig b/lib/compiler_rt/fixdfti.zig
new file mode 100644
index 0000000000..b2476ce2f3
--- /dev/null
+++ b/lib/compiler_rt/fixdfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixdfti, .{ .name = "__fixdfti", .linkage = common.linkage });
+}
+
+pub fn __fixdfti(a: f64) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
diff --git a/lib/compiler_rt/fixhfdi.zig b/lib/compiler_rt/fixhfdi.zig
new file mode 100644
index 0000000000..28e871f495
--- /dev/null
+++ b/lib/compiler_rt/fixhfdi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixhfdi, .{ .name = "__fixhfdi", .linkage = common.linkage });
+}
+
+fn __fixhfdi(a: f16) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
diff --git a/lib/compiler_rt/fixhfsi.zig b/lib/compiler_rt/fixhfsi.zig
new file mode 100644
index 0000000000..23440eea22
--- /dev/null
+++ b/lib/compiler_rt/fixhfsi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixhfsi, .{ .name = "__fixhfsi", .linkage = common.linkage });
+}
+
+fn __fixhfsi(a: f16) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
diff --git a/lib/compiler_rt/fixhfti.zig b/lib/compiler_rt/fixhfti.zig
new file mode 100644
index 0000000000..36fc1bf607
--- /dev/null
+++ b/lib/compiler_rt/fixhfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixhfti, .{ .name = "__fixhfti", .linkage = common.linkage });
+}
+
+fn __fixhfti(a: f16) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
diff --git a/lib/std/special/compiler_rt/fixint_test.zig b/lib/compiler_rt/fixint_test.zig
similarity index 67%
rename from lib/std/special/compiler_rt/fixint_test.zig
rename to lib/compiler_rt/fixint_test.zig
index 9c31444ac5..57b4093809 100644
--- a/lib/std/special/compiler_rt/fixint_test.zig
+++ b/lib/compiler_rt/fixint_test.zig
@@ -11,49 +11,49 @@ fn test__fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t, expected:
}
test "fixint.i1" {
- try test__fixint(f32, i1, -math.inf_f32, -1);
- try test__fixint(f32, i1, -math.f32_max, -1);
+ try test__fixint(f32, i1, -math.inf(f32), -1);
+ try test__fixint(f32, i1, -math.floatMax(f32), -1);
try test__fixint(f32, i1, -2.0, -1);
try test__fixint(f32, i1, -1.1, -1);
try test__fixint(f32, i1, -1.0, -1);
try test__fixint(f32, i1, -0.9, 0);
try test__fixint(f32, i1, -0.1, 0);
- try test__fixint(f32, i1, -math.f32_min, 0);
+ try test__fixint(f32, i1, -math.floatMin(f32), 0);
try test__fixint(f32, i1, -0.0, 0);
try test__fixint(f32, i1, 0.0, 0);
- try test__fixint(f32, i1, math.f32_min, 0);
+ try test__fixint(f32, i1, math.floatMin(f32), 0);
try test__fixint(f32, i1, 0.1, 0);
try test__fixint(f32, i1, 0.9, 0);
try test__fixint(f32, i1, 1.0, 0);
try test__fixint(f32, i1, 2.0, 0);
- try test__fixint(f32, i1, math.f32_max, 0);
- try test__fixint(f32, i1, math.inf_f32, 0);
+ try test__fixint(f32, i1, math.floatMax(f32), 0);
+ try test__fixint(f32, i1, math.inf(f32), 0);
}
test "fixint.i2" {
- try test__fixint(f32, i2, -math.inf_f32, -2);
- try test__fixint(f32, i2, -math.f32_max, -2);
+ try test__fixint(f32, i2, -math.inf(f32), -2);
+ try test__fixint(f32, i2, -math.floatMax(f32), -2);
try test__fixint(f32, i2, -2.0, -2);
try test__fixint(f32, i2, -1.9, -1);
try test__fixint(f32, i2, -1.1, -1);
try test__fixint(f32, i2, -1.0, -1);
try test__fixint(f32, i2, -0.9, 0);
try test__fixint(f32, i2, -0.1, 0);
- try test__fixint(f32, i2, -math.f32_min, 0);
+ try test__fixint(f32, i2, -math.floatMin(f32), 0);
try test__fixint(f32, i2, -0.0, 0);
try test__fixint(f32, i2, 0.0, 0);
- try test__fixint(f32, i2, math.f32_min, 0);
+ try test__fixint(f32, i2, math.floatMin(f32), 0);
try test__fixint(f32, i2, 0.1, 0);
try test__fixint(f32, i2, 0.9, 0);
try test__fixint(f32, i2, 1.0, 1);
try test__fixint(f32, i2, 2.0, 1);
- try test__fixint(f32, i2, math.f32_max, 1);
- try test__fixint(f32, i2, math.inf_f32, 1);
+ try test__fixint(f32, i2, math.floatMax(f32), 1);
+ try test__fixint(f32, i2, math.inf(f32), 1);
}
test "fixint.i3" {
- try test__fixint(f32, i3, -math.inf_f32, -4);
- try test__fixint(f32, i3, -math.f32_max, -4);
+ try test__fixint(f32, i3, -math.inf(f32), -4);
+ try test__fixint(f32, i3, -math.floatMax(f32), -4);
try test__fixint(f32, i3, -4.0, -4);
try test__fixint(f32, i3, -3.0, -3);
try test__fixint(f32, i3, -2.0, -2);
@@ -62,23 +62,23 @@ test "fixint.i3" {
try test__fixint(f32, i3, -1.0, -1);
try test__fixint(f32, i3, -0.9, 0);
try test__fixint(f32, i3, -0.1, 0);
- try test__fixint(f32, i3, -math.f32_min, 0);
+ try test__fixint(f32, i3, -math.floatMin(f32), 0);
try test__fixint(f32, i3, -0.0, 0);
try test__fixint(f32, i3, 0.0, 0);
- try test__fixint(f32, i3, math.f32_min, 0);
+ try test__fixint(f32, i3, math.floatMin(f32), 0);
try test__fixint(f32, i3, 0.1, 0);
try test__fixint(f32, i3, 0.9, 0);
try test__fixint(f32, i3, 1.0, 1);
try test__fixint(f32, i3, 2.0, 2);
try test__fixint(f32, i3, 3.0, 3);
try test__fixint(f32, i3, 4.0, 3);
- try test__fixint(f32, i3, math.f32_max, 3);
- try test__fixint(f32, i3, math.inf_f32, 3);
+ try test__fixint(f32, i3, math.floatMax(f32), 3);
+ try test__fixint(f32, i3, math.inf(f32), 3);
}
test "fixint.i32" {
- try test__fixint(f64, i32, -math.inf_f64, math.minInt(i32));
- try test__fixint(f64, i32, -math.f64_max, math.minInt(i32));
+ try test__fixint(f64, i32, -math.inf(f64), math.minInt(i32));
+ try test__fixint(f64, i32, -math.floatMax(f64), math.minInt(i32));
try test__fixint(f64, i32, @as(f64, math.minInt(i32)), math.minInt(i32));
try test__fixint(f64, i32, @as(f64, math.minInt(i32)) + 1, math.minInt(i32) + 1);
try test__fixint(f64, i32, -2.0, -2);
@@ -87,22 +87,22 @@ test "fixint.i32" {
try test__fixint(f64, i32, -1.0, -1);
try test__fixint(f64, i32, -0.9, 0);
try test__fixint(f64, i32, -0.1, 0);
- try test__fixint(f64, i32, -math.f32_min, 0);
+ try test__fixint(f64, i32, -@as(f64, math.floatMin(f32)), 0);
try test__fixint(f64, i32, -0.0, 0);
try test__fixint(f64, i32, 0.0, 0);
- try test__fixint(f64, i32, math.f32_min, 0);
+ try test__fixint(f64, i32, @as(f64, math.floatMin(f32)), 0);
try test__fixint(f64, i32, 0.1, 0);
try test__fixint(f64, i32, 0.9, 0);
try test__fixint(f64, i32, 1.0, 1);
try test__fixint(f64, i32, @as(f64, math.maxInt(i32)) - 1, math.maxInt(i32) - 1);
try test__fixint(f64, i32, @as(f64, math.maxInt(i32)), math.maxInt(i32));
- try test__fixint(f64, i32, math.f64_max, math.maxInt(i32));
- try test__fixint(f64, i32, math.inf_f64, math.maxInt(i32));
+ try test__fixint(f64, i32, math.floatMax(f64), math.maxInt(i32));
+ try test__fixint(f64, i32, math.inf(f64), math.maxInt(i32));
}
test "fixint.i64" {
- try test__fixint(f64, i64, -math.inf_f64, math.minInt(i64));
- try test__fixint(f64, i64, -math.f64_max, math.minInt(i64));
+ try test__fixint(f64, i64, -math.inf(f64), math.minInt(i64));
+ try test__fixint(f64, i64, -math.floatMax(f64), math.minInt(i64));
try test__fixint(f64, i64, @as(f64, math.minInt(i64)), math.minInt(i64));
try test__fixint(f64, i64, @as(f64, math.minInt(i64)) + 1, math.minInt(i64));
try test__fixint(f64, i64, @as(f64, math.minInt(i64) / 2), math.minInt(i64) / 2);
@@ -112,22 +112,22 @@ test "fixint.i64" {
try test__fixint(f64, i64, -1.0, -1);
try test__fixint(f64, i64, -0.9, 0);
try test__fixint(f64, i64, -0.1, 0);
- try test__fixint(f64, i64, -math.f32_min, 0);
+ try test__fixint(f64, i64, -@as(f64, math.floatMin(f32)), 0);
try test__fixint(f64, i64, -0.0, 0);
try test__fixint(f64, i64, 0.0, 0);
- try test__fixint(f64, i64, math.f32_min, 0);
+ try test__fixint(f64, i64, @as(f64, math.floatMin(f32)), 0);
try test__fixint(f64, i64, 0.1, 0);
try test__fixint(f64, i64, 0.9, 0);
try test__fixint(f64, i64, 1.0, 1);
try test__fixint(f64, i64, @as(f64, math.maxInt(i64)) - 1, math.maxInt(i64));
try test__fixint(f64, i64, @as(f64, math.maxInt(i64)), math.maxInt(i64));
- try test__fixint(f64, i64, math.f64_max, math.maxInt(i64));
- try test__fixint(f64, i64, math.inf_f64, math.maxInt(i64));
+ try test__fixint(f64, i64, math.floatMax(f64), math.maxInt(i64));
+ try test__fixint(f64, i64, math.inf(f64), math.maxInt(i64));
}
test "fixint.i128" {
- try test__fixint(f64, i128, -math.inf_f64, math.minInt(i128));
- try test__fixint(f64, i128, -math.f64_max, math.minInt(i128));
+ try test__fixint(f64, i128, -math.inf(f64), math.minInt(i128));
+ try test__fixint(f64, i128, -math.floatMax(f64), math.minInt(i128));
try test__fixint(f64, i128, @as(f64, math.minInt(i128)), math.minInt(i128));
try test__fixint(f64, i128, @as(f64, math.minInt(i128)) + 1, math.minInt(i128));
try test__fixint(f64, i128, -2.0, -2);
@@ -136,15 +136,15 @@ test "fixint.i128" {
try test__fixint(f64, i128, -1.0, -1);
try test__fixint(f64, i128, -0.9, 0);
try test__fixint(f64, i128, -0.1, 0);
- try test__fixint(f64, i128, -math.f32_min, 0);
+ try test__fixint(f64, i128, -@as(f64, math.floatMin(f32)), 0);
try test__fixint(f64, i128, -0.0, 0);
try test__fixint(f64, i128, 0.0, 0);
- try test__fixint(f64, i128, math.f32_min, 0);
+ try test__fixint(f64, i128, @as(f64, math.floatMin(f32)), 0);
try test__fixint(f64, i128, 0.1, 0);
try test__fixint(f64, i128, 0.9, 0);
try test__fixint(f64, i128, 1.0, 1);
try test__fixint(f64, i128, @as(f64, math.maxInt(i128)) - 1, math.maxInt(i128));
try test__fixint(f64, i128, @as(f64, math.maxInt(i128)), math.maxInt(i128));
- try test__fixint(f64, i128, math.f64_max, math.maxInt(i128));
- try test__fixint(f64, i128, math.inf_f64, math.maxInt(i128));
+ try test__fixint(f64, i128, math.floatMax(f64), math.maxInt(i128));
+ try test__fixint(f64, i128, math.inf(f64), math.maxInt(i128));
}
diff --git a/lib/compiler_rt/fixsfdi.zig b/lib/compiler_rt/fixsfdi.zig
new file mode 100644
index 0000000000..0c4fb7f3f6
--- /dev/null
+++ b/lib/compiler_rt/fixsfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = common.linkage });
+ } else {
+ @export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixsfdi(a: f32) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
+
+fn __aeabi_f2lz(a: f32) callconv(.AAPCS) i64 {
+ return floatToInt(i64, a);
+}
diff --git a/lib/compiler_rt/fixsfsi.zig b/lib/compiler_rt/fixsfsi.zig
new file mode 100644
index 0000000000..f48e354cd2
--- /dev/null
+++ b/lib/compiler_rt/fixsfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = common.linkage });
+ } else {
+ @export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixsfsi(a: f32) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
+
+fn __aeabi_f2iz(a: f32) callconv(.AAPCS) i32 {
+ return floatToInt(i32, a);
+}
diff --git a/lib/compiler_rt/fixsfti.zig b/lib/compiler_rt/fixsfti.zig
new file mode 100644
index 0000000000..4bf68ec8b0
--- /dev/null
+++ b/lib/compiler_rt/fixsfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixsfti, .{ .name = "__fixsfti", .linkage = common.linkage });
+}
+
+pub fn __fixsfti(a: f32) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
diff --git a/lib/compiler_rt/fixtfdi.zig b/lib/compiler_rt/fixtfdi.zig
new file mode 100644
index 0000000000..9cc9835352
--- /dev/null
+++ b/lib/compiler_rt/fixtfdi.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__fixkfdi, .{ .name = "__fixkfdi", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_qtox, .{ .name = "_Qp_qtox", .linkage = common.linkage });
+ } else {
+ @export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixtfdi(a: f128) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
+
+fn __fixkfdi(a: f128) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
+
+fn _Qp_qtox(a: *const f128) callconv(.C) i64 {
+ return floatToInt(i64, a.*);
+}
diff --git a/lib/compiler_rt/fixtfsi.zig b/lib/compiler_rt/fixtfsi.zig
new file mode 100644
index 0000000000..f46208f02b
--- /dev/null
+++ b/lib/compiler_rt/fixtfsi.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__fixkfsi, .{ .name = "__fixkfsi", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_qtoi, .{ .name = "_Qp_qtoi", .linkage = common.linkage });
+ } else {
+ @export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixtfsi(a: f128) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
+
+fn __fixkfsi(a: f128) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
+
+fn _Qp_qtoi(a: *const f128) callconv(.C) i32 {
+ return floatToInt(i32, a.*);
+}
diff --git a/lib/compiler_rt/fixtfti.zig b/lib/compiler_rt/fixtfti.zig
new file mode 100644
index 0000000000..9ba761729e
--- /dev/null
+++ b/lib/compiler_rt/fixtfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixtfti, .{ .name = "__fixtfti", .linkage = common.linkage });
+}
+
+pub fn __fixtfti(a: f128) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
diff --git a/lib/compiler_rt/fixunsdfdi.zig b/lib/compiler_rt/fixunsdfdi.zig
new file mode 100644
index 0000000000..edc0806405
--- /dev/null
+++ b/lib/compiler_rt/fixunsdfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = common.linkage });
+ } else {
+ @export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixunsdfdi(a: f64) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
+
+fn __aeabi_d2ulz(a: f64) callconv(.AAPCS) u64 {
+ return floatToInt(u64, a);
+}
diff --git a/lib/compiler_rt/fixunsdfsi.zig b/lib/compiler_rt/fixunsdfsi.zig
new file mode 100644
index 0000000000..cc413f3983
--- /dev/null
+++ b/lib/compiler_rt/fixunsdfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = common.linkage });
+ } else {
+ @export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixunsdfsi(a: f64) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
+
+fn __aeabi_d2uiz(a: f64) callconv(.AAPCS) u32 {
+ return floatToInt(u32, a);
+}
diff --git a/lib/compiler_rt/fixunsdfti.zig b/lib/compiler_rt/fixunsdfti.zig
new file mode 100644
index 0000000000..ce3c4aabdd
--- /dev/null
+++ b/lib/compiler_rt/fixunsdfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = common.linkage });
+}
+
+pub fn __fixunsdfti(a: f64) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
diff --git a/lib/compiler_rt/fixunshfdi.zig b/lib/compiler_rt/fixunshfdi.zig
new file mode 100644
index 0000000000..5058bc5e68
--- /dev/null
+++ b/lib/compiler_rt/fixunshfdi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunshfdi, .{ .name = "__fixunshfdi", .linkage = common.linkage });
+}
+
+fn __fixunshfdi(a: f16) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
diff --git a/lib/compiler_rt/fixunshfsi.zig b/lib/compiler_rt/fixunshfsi.zig
new file mode 100644
index 0000000000..5755048814
--- /dev/null
+++ b/lib/compiler_rt/fixunshfsi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunshfsi, .{ .name = "__fixunshfsi", .linkage = common.linkage });
+}
+
+fn __fixunshfsi(a: f16) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
diff --git a/lib/compiler_rt/fixunshfti.zig b/lib/compiler_rt/fixunshfti.zig
new file mode 100644
index 0000000000..b804c52f96
--- /dev/null
+++ b/lib/compiler_rt/fixunshfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunshfti, .{ .name = "__fixunshfti", .linkage = common.linkage });
+}
+
+pub fn __fixunshfti(a: f16) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
diff --git a/lib/compiler_rt/fixunssfdi.zig b/lib/compiler_rt/fixunssfdi.zig
new file mode 100644
index 0000000000..544dfcd97e
--- /dev/null
+++ b/lib/compiler_rt/fixunssfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = common.linkage });
+ } else {
+ @export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixunssfdi(a: f32) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
+
+fn __aeabi_f2ulz(a: f32) callconv(.AAPCS) u64 {
+ return floatToInt(u64, a);
+}
diff --git a/lib/compiler_rt/fixunssfsi.zig b/lib/compiler_rt/fixunssfsi.zig
new file mode 100644
index 0000000000..24b1e86694
--- /dev/null
+++ b/lib/compiler_rt/fixunssfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = common.linkage });
+ } else {
+ @export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixunssfsi(a: f32) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
+
+fn __aeabi_f2uiz(a: f32) callconv(.AAPCS) u32 {
+ return floatToInt(u32, a);
+}
diff --git a/lib/compiler_rt/fixunssfti.zig b/lib/compiler_rt/fixunssfti.zig
new file mode 100644
index 0000000000..7b1965b5ab
--- /dev/null
+++ b/lib/compiler_rt/fixunssfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = common.linkage });
+}
+
+pub fn __fixunssfti(a: f32) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
diff --git a/lib/compiler_rt/fixunstfdi.zig b/lib/compiler_rt/fixunstfdi.zig
new file mode 100644
index 0000000000..0657bf20c1
--- /dev/null
+++ b/lib/compiler_rt/fixunstfdi.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__fixunskfdi, .{ .name = "__fixunskfdi", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_qtoux, .{ .name = "_Qp_qtoux", .linkage = common.linkage });
+ } else {
+ @export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixunstfdi(a: f128) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
+
+fn __fixunskfdi(a: f128) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
+
+fn _Qp_qtoux(a: *const f128) callconv(.C) u64 {
+ return floatToInt(u64, a.*);
+}
diff --git a/lib/compiler_rt/fixunstfsi.zig b/lib/compiler_rt/fixunstfsi.zig
new file mode 100644
index 0000000000..70725ddf38
--- /dev/null
+++ b/lib/compiler_rt/fixunstfsi.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__fixunskfsi, .{ .name = "__fixunskfsi", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_qtoui, .{ .name = "_Qp_qtoui", .linkage = common.linkage });
+ } else {
+ @export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = common.linkage });
+ }
+}
+
+pub fn __fixunstfsi(a: f128) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
+
+fn __fixunskfsi(a: f128) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
+
+fn _Qp_qtoui(a: *const f128) callconv(.C) u32 {
+ return floatToInt(u32, a.*);
+}
diff --git a/lib/compiler_rt/fixunstfti.zig b/lib/compiler_rt/fixunstfti.zig
new file mode 100644
index 0000000000..5e39db1065
--- /dev/null
+++ b/lib/compiler_rt/fixunstfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = common.linkage });
+}
+
+pub fn __fixunstfti(a: f128) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
diff --git a/lib/compiler_rt/fixunsxfdi.zig b/lib/compiler_rt/fixunsxfdi.zig
new file mode 100644
index 0000000000..cb2760af4e
--- /dev/null
+++ b/lib/compiler_rt/fixunsxfdi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunsxfdi, .{ .name = "__fixunsxfdi", .linkage = common.linkage });
+}
+
+fn __fixunsxfdi(a: f80) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
diff --git a/lib/compiler_rt/fixunsxfsi.zig b/lib/compiler_rt/fixunsxfsi.zig
new file mode 100644
index 0000000000..bec36abbf4
--- /dev/null
+++ b/lib/compiler_rt/fixunsxfsi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunsxfsi, .{ .name = "__fixunsxfsi", .linkage = common.linkage });
+}
+
+fn __fixunsxfsi(a: f80) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
diff --git a/lib/compiler_rt/fixunsxfti.zig b/lib/compiler_rt/fixunsxfti.zig
new file mode 100644
index 0000000000..acd41469be
--- /dev/null
+++ b/lib/compiler_rt/fixunsxfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunsxfti, .{ .name = "__fixunsxfti", .linkage = common.linkage });
+}
+
+pub fn __fixunsxfti(a: f80) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
diff --git a/lib/compiler_rt/fixxfdi.zig b/lib/compiler_rt/fixxfdi.zig
new file mode 100644
index 0000000000..0f249e0a92
--- /dev/null
+++ b/lib/compiler_rt/fixxfdi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixxfdi, .{ .name = "__fixxfdi", .linkage = common.linkage });
+}
+
+fn __fixxfdi(a: f80) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
diff --git a/lib/compiler_rt/fixxfsi.zig b/lib/compiler_rt/fixxfsi.zig
new file mode 100644
index 0000000000..ac2158b7b8
--- /dev/null
+++ b/lib/compiler_rt/fixxfsi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixxfsi, .{ .name = "__fixxfsi", .linkage = common.linkage });
+}
+
+fn __fixxfsi(a: f80) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
diff --git a/lib/compiler_rt/fixxfti.zig b/lib/compiler_rt/fixxfti.zig
new file mode 100644
index 0000000000..fb547f4115
--- /dev/null
+++ b/lib/compiler_rt/fixxfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixxfti, .{ .name = "__fixxfti", .linkage = common.linkage });
+}
+
+fn __fixxfti(a: f80) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
diff --git a/lib/compiler_rt/float_to_int.zig b/lib/compiler_rt/float_to_int.zig
new file mode 100644
index 0000000000..49d41be442
--- /dev/null
+++ b/lib/compiler_rt/float_to_int.zig
@@ -0,0 +1,55 @@
+const Int = @import("std").meta.Int;
+const math = @import("std").math;
+const Log2Int = math.Log2Int;
+
+pub inline fn floatToInt(comptime I: type, a: anytype) I {
+ const F = @TypeOf(a);
+ const float_bits = @typeInfo(F).Float.bits;
+ const int_bits = @typeInfo(I).Int.bits;
+ const rep_t = Int(.unsigned, float_bits);
+ const sig_bits = math.floatMantissaBits(F);
+ const exp_bits = math.floatExponentBits(F);
+ const fractional_bits = math.floatFractionalBits(F);
+
+ const implicit_bit = if (F != f80) (@as(rep_t, 1) << sig_bits) else 0;
+ const max_exp = (1 << (exp_bits - 1));
+ const exp_bias = max_exp - 1;
+ const sig_mask = (@as(rep_t, 1) << sig_bits) - 1;
+
+ // Break a into sign, exponent, significand
+ const a_rep: rep_t = @bitCast(rep_t, a);
+ const negative = (a_rep >> (float_bits - 1)) != 0;
+ const exponent = @intCast(i32, (a_rep << 1) >> (sig_bits + 1)) - exp_bias;
+ const significand: rep_t = (a_rep & sig_mask) | implicit_bit;
+
+ // If the exponent is negative, the result rounds to zero.
+ if (exponent < 0) return 0;
+
+ // If the value is too large for the integer type, saturate.
+ switch (@typeInfo(I).Int.signedness) {
+ .unsigned => {
+ if (negative) return 0;
+ if (@intCast(c_uint, exponent) >= @minimum(int_bits, max_exp)) return math.maxInt(I);
+ },
+ .signed => if (@intCast(c_uint, exponent) >= @minimum(int_bits - 1, max_exp)) {
+ return if (negative) math.minInt(I) else math.maxInt(I);
+ },
+ }
+
+ // If 0 <= exponent < sig_bits, right shift to get the result.
+ // Otherwise, shift left.
+ var result: I = undefined;
+ if (exponent < fractional_bits) {
+ result = @intCast(I, significand >> @intCast(Log2Int(rep_t), fractional_bits - exponent));
+ } else {
+ result = @intCast(I, significand) << @intCast(Log2Int(I), exponent - fractional_bits);
+ }
+
+ if ((@typeInfo(I).Int.signedness == .signed) and negative)
+ return ~result +% 1;
+ return result;
+}
+
+test {
+ _ = @import("float_to_int_test.zig");
+}
diff --git a/lib/compiler_rt/float_to_int_test.zig b/lib/compiler_rt/float_to_int_test.zig
new file mode 100644
index 0000000000..676c12e914
--- /dev/null
+++ b/lib/compiler_rt/float_to_int_test.zig
@@ -0,0 +1,950 @@
+const std = @import("std");
+const testing = std.testing;
+const math = std.math;
+
+const __fixunshfti = @import("fixunshfti.zig").__fixunshfti;
+const __fixunsxfti = @import("fixunsxfti.zig").__fixunsxfti;
+
+// Conversion from f32
+const __fixsfsi = @import("fixsfsi.zig").__fixsfsi;
+const __fixunssfsi = @import("fixunssfsi.zig").__fixunssfsi;
+const __fixsfdi = @import("fixsfdi.zig").__fixsfdi;
+const __fixunssfdi = @import("fixunssfdi.zig").__fixunssfdi;
+const __fixsfti = @import("fixsfti.zig").__fixsfti;
+const __fixunssfti = @import("fixunssfti.zig").__fixunssfti;
+
+// Conversion from f64
+const __fixdfsi = @import("fixdfsi.zig").__fixdfsi;
+const __fixunsdfsi = @import("fixunsdfsi.zig").__fixunsdfsi;
+const __fixdfdi = @import("fixdfdi.zig").__fixdfdi;
+const __fixunsdfdi = @import("fixunsdfdi.zig").__fixunsdfdi;
+const __fixdfti = @import("fixdfti.zig").__fixdfti;
+const __fixunsdfti = @import("fixunsdfti.zig").__fixunsdfti;
+
+// Conversion from f128
+const __fixtfsi = @import("fixtfsi.zig").__fixtfsi;
+const __fixunstfsi = @import("fixunstfsi.zig").__fixunstfsi;
+const __fixtfdi = @import("fixtfdi.zig").__fixtfdi;
+const __fixunstfdi = @import("fixunstfdi.zig").__fixunstfdi;
+const __fixtfti = @import("fixtfti.zig").__fixtfti;
+const __fixunstfti = @import("fixunstfti.zig").__fixunstfti;
+
+fn test__fixsfsi(a: f32, expected: i32) !void {
+ const x = __fixsfsi(a);
+ try testing.expect(x == expected);
+}
+
+fn test__fixunssfsi(a: f32, expected: u32) !void {
+ const x = __fixunssfsi(a);
+ try testing.expect(x == expected);
+}
+
+test "fixsfsi" {
+ try test__fixsfsi(-math.floatMax(f32), math.minInt(i32));
+
+ try test__fixsfsi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i32));
+ try test__fixsfsi(-0x1.FFFFFFFFFFFFFp+1023, -0x80000000);
+
+ try test__fixsfsi(-0x1.0000000000000p+127, -0x80000000);
+ try test__fixsfsi(-0x1.FFFFFFFFFFFFFp+126, -0x80000000);
+ try test__fixsfsi(-0x1.FFFFFFFFFFFFEp+126, -0x80000000);
+
+ try test__fixsfsi(-0x1.0000000000001p+63, -0x80000000);
+ try test__fixsfsi(-0x1.0000000000000p+63, -0x80000000);
+ try test__fixsfsi(-0x1.FFFFFFFFFFFFFp+62, -0x80000000);
+ try test__fixsfsi(-0x1.FFFFFFFFFFFFEp+62, -0x80000000);
+
+ try test__fixsfsi(-0x1.FFFFFEp+62, -0x80000000);
+ try test__fixsfsi(-0x1.FFFFFCp+62, -0x80000000);
+
+ try test__fixsfsi(-0x1.000000p+31, -0x80000000);
+ try test__fixsfsi(-0x1.FFFFFFp+30, -0x80000000);
+ try test__fixsfsi(-0x1.FFFFFEp+30, -0x7FFFFF80);
+ try test__fixsfsi(-0x1.FFFFFCp+30, -0x7FFFFF00);
+
+ try test__fixsfsi(-2.01, -2);
+ try test__fixsfsi(-2.0, -2);
+ try test__fixsfsi(-1.99, -1);
+ try test__fixsfsi(-1.0, -1);
+ try test__fixsfsi(-0.99, 0);
+ try test__fixsfsi(-0.5, 0);
+ try test__fixsfsi(-math.floatMin(f32), 0);
+ try test__fixsfsi(0.0, 0);
+ try test__fixsfsi(math.floatMin(f32), 0);
+ try test__fixsfsi(0.5, 0);
+ try test__fixsfsi(0.99, 0);
+ try test__fixsfsi(1.0, 1);
+ try test__fixsfsi(1.5, 1);
+ try test__fixsfsi(1.99, 1);
+ try test__fixsfsi(2.0, 2);
+ try test__fixsfsi(2.01, 2);
+
+ try test__fixsfsi(0x1.FFFFFCp+30, 0x7FFFFF00);
+ try test__fixsfsi(0x1.FFFFFEp+30, 0x7FFFFF80);
+ try test__fixsfsi(0x1.FFFFFFp+30, 0x7FFFFFFF);
+ try test__fixsfsi(0x1.000000p+31, 0x7FFFFFFF);
+
+ try test__fixsfsi(0x1.FFFFFCp+62, 0x7FFFFFFF);
+ try test__fixsfsi(0x1.FFFFFEp+62, 0x7FFFFFFF);
+
+ try test__fixsfsi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFF);
+ try test__fixsfsi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFF);
+ try test__fixsfsi(0x1.0000000000000p+63, 0x7FFFFFFF);
+ try test__fixsfsi(0x1.0000000000001p+63, 0x7FFFFFFF);
+
+ try test__fixsfsi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFF);
+ try test__fixsfsi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFF);
+ try test__fixsfsi(0x1.0000000000000p+127, 0x7FFFFFFF);
+
+ try test__fixsfsi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFF);
+ try test__fixsfsi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i32));
+
+ try test__fixsfsi(math.floatMax(f32), math.maxInt(i32));
+}
+
+test "fixunssfsi" {
+ try test__fixunssfsi(0.0, 0);
+
+ try test__fixunssfsi(0.5, 0);
+ try test__fixunssfsi(0.99, 0);
+ try test__fixunssfsi(1.0, 1);
+ try test__fixunssfsi(1.5, 1);
+ try test__fixunssfsi(1.99, 1);
+ try test__fixunssfsi(2.0, 2);
+ try test__fixunssfsi(2.01, 2);
+ try test__fixunssfsi(-0.5, 0);
+ try test__fixunssfsi(-0.99, 0);
+
+ try test__fixunssfsi(-1.0, 0);
+ try test__fixunssfsi(-1.5, 0);
+ try test__fixunssfsi(-1.99, 0);
+ try test__fixunssfsi(-2.0, 0);
+ try test__fixunssfsi(-2.01, 0);
+
+ try test__fixunssfsi(0x1.000000p+31, 0x80000000);
+ try test__fixunssfsi(0x1.000000p+32, 0xFFFFFFFF);
+ try test__fixunssfsi(0x1.FFFFFEp+31, 0xFFFFFF00);
+ try test__fixunssfsi(0x1.FFFFFEp+30, 0x7FFFFF80);
+ try test__fixunssfsi(0x1.FFFFFCp+30, 0x7FFFFF00);
+
+ try test__fixunssfsi(-0x1.FFFFFEp+30, 0);
+ try test__fixunssfsi(-0x1.FFFFFCp+30, 0);
+}
+
+fn test__fixsfdi(a: f32, expected: i64) !void {
+ const x = __fixsfdi(a);
+ try testing.expect(x == expected);
+}
+
+fn test__fixunssfdi(a: f32, expected: u64) !void {
+ const x = __fixunssfdi(a);
+ try testing.expect(x == expected);
+}
+
+test "fixsfdi" {
+ try test__fixsfdi(-math.floatMax(f32), math.minInt(i64));
+
+ try test__fixsfdi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i64));
+ try test__fixsfdi(-0x1.FFFFFFFFFFFFFp+1023, -0x8000000000000000);
+
+ try test__fixsfdi(-0x1.0000000000000p+127, -0x8000000000000000);
+ try test__fixsfdi(-0x1.FFFFFFFFFFFFFp+126, -0x8000000000000000);
+ try test__fixsfdi(-0x1.FFFFFFFFFFFFEp+126, -0x8000000000000000);
+
+ try test__fixsfdi(-0x1.0000000000001p+63, -0x8000000000000000);
+ try test__fixsfdi(-0x1.0000000000000p+63, -0x8000000000000000);
+ try test__fixsfdi(-0x1.FFFFFFFFFFFFFp+62, -0x8000000000000000);
+ try test__fixsfdi(-0x1.FFFFFFFFFFFFEp+62, -0x8000000000000000);
+
+ try test__fixsfdi(-0x1.FFFFFFp+62, -0x8000000000000000);
+ try test__fixsfdi(-0x1.FFFFFEp+62, -0x7fffff8000000000);
+ try test__fixsfdi(-0x1.FFFFFCp+62, -0x7fffff0000000000);
+
+ try test__fixsfdi(-2.01, -2);
+ try test__fixsfdi(-2.0, -2);
+ try test__fixsfdi(-1.99, -1);
+ try test__fixsfdi(-1.0, -1);
+ try test__fixsfdi(-0.99, 0);
+ try test__fixsfdi(-0.5, 0);
+ try test__fixsfdi(-math.floatMin(f32), 0);
+ try test__fixsfdi(0.0, 0);
+ try test__fixsfdi(math.floatMin(f32), 0);
+ try test__fixsfdi(0.5, 0);
+ try test__fixsfdi(0.99, 0);
+ try test__fixsfdi(1.0, 1);
+ try test__fixsfdi(1.5, 1);
+ try test__fixsfdi(1.99, 1);
+ try test__fixsfdi(2.0, 2);
+ try test__fixsfdi(2.01, 2);
+
+ try test__fixsfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
+ try test__fixsfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
+ try test__fixsfdi(0x1.FFFFFFp+62, 0x7FFFFFFFFFFFFFFF);
+
+ try test__fixsfdi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFFFFF);
+ try test__fixsfdi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFFFF);
+ try test__fixsfdi(0x1.0000000000000p+63, 0x7FFFFFFFFFFFFFFF);
+ try test__fixsfdi(0x1.0000000000001p+63, 0x7FFFFFFFFFFFFFFF);
+
+ try test__fixsfdi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFFFFF);
+ try test__fixsfdi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFFFF);
+ try test__fixsfdi(0x1.0000000000000p+127, 0x7FFFFFFFFFFFFFFF);
+
+ try test__fixsfdi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFFFFFFFFFF);
+ try test__fixsfdi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i64));
+
+ try test__fixsfdi(math.floatMax(f32), math.maxInt(i64));
+}
+
+test "fixunssfdi" {
+ try test__fixunssfdi(0.0, 0);
+
+ try test__fixunssfdi(0.5, 0);
+ try test__fixunssfdi(0.99, 0);
+ try test__fixunssfdi(1.0, 1);
+ try test__fixunssfdi(1.5, 1);
+ try test__fixunssfdi(1.99, 1);
+ try test__fixunssfdi(2.0, 2);
+ try test__fixunssfdi(2.01, 2);
+ try test__fixunssfdi(-0.5, 0);
+ try test__fixunssfdi(-0.99, 0);
+
+ try test__fixunssfdi(-1.0, 0);
+ try test__fixunssfdi(-1.5, 0);
+ try test__fixunssfdi(-1.99, 0);
+ try test__fixunssfdi(-2.0, 0);
+ try test__fixunssfdi(-2.01, 0);
+
+ try test__fixunssfdi(0x1.FFFFFEp+63, 0xFFFFFF0000000000);
+ try test__fixunssfdi(0x1.000000p+63, 0x8000000000000000);
+ try test__fixunssfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
+ try test__fixunssfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
+
+ try test__fixunssfdi(-0x1.FFFFFEp+62, 0x0000000000000000);
+ try test__fixunssfdi(-0x1.FFFFFCp+62, 0x0000000000000000);
+}
+
+fn test__fixsfti(a: f32, expected: i128) !void {
+ const x = __fixsfti(a);
+ try testing.expect(x == expected);
+}
+
+fn test__fixunssfti(a: f32, expected: u128) !void {
+ const x = __fixunssfti(a);
+ try testing.expect(x == expected);
+}
+
+test "fixsfti" {
+ try test__fixsfti(-math.floatMax(f32), math.minInt(i128));
+
+ try test__fixsfti(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i128));
+ try test__fixsfti(-0x1.FFFFFFFFFFFFFp+1023, -0x80000000000000000000000000000000);
+
+ try test__fixsfti(-0x1.0000000000000p+127, -0x80000000000000000000000000000000);
+ try test__fixsfti(-0x1.FFFFFFFFFFFFFp+126, -0x80000000000000000000000000000000);
+ try test__fixsfti(-0x1.FFFFFFFFFFFFEp+126, -0x80000000000000000000000000000000);
+ try test__fixsfti(-0x1.FFFFFF0000000p+126, -0x80000000000000000000000000000000);
+ try test__fixsfti(-0x1.FFFFFE0000000p+126, -0x7FFFFF80000000000000000000000000);
+ try test__fixsfti(-0x1.FFFFFC0000000p+126, -0x7FFFFF00000000000000000000000000);
+
+ try test__fixsfti(-0x1.0000000000001p+63, -0x8000000000000000);
+ try test__fixsfti(-0x1.0000000000000p+63, -0x8000000000000000);
+ try test__fixsfti(-0x1.FFFFFFFFFFFFFp+62, -0x8000000000000000);
+ try test__fixsfti(-0x1.FFFFFFFFFFFFEp+62, -0x8000000000000000);
+
+ try test__fixsfti(-0x1.FFFFFFp+62, -0x8000000000000000);
+ try test__fixsfti(-0x1.FFFFFEp+62, -0x7fffff8000000000);
+ try test__fixsfti(-0x1.FFFFFCp+62, -0x7fffff0000000000);
+
+ try test__fixsfti(-0x1.000000p+31, -0x80000000);
+ try test__fixsfti(-0x1.FFFFFFp+30, -0x80000000);
+ try test__fixsfti(-0x1.FFFFFEp+30, -0x7FFFFF80);
+ try test__fixsfti(-0x1.FFFFFCp+30, -0x7FFFFF00);
+
+ try test__fixsfti(-2.01, -2);
+ try test__fixsfti(-2.0, -2);
+ try test__fixsfti(-1.99, -1);
+ try test__fixsfti(-1.0, -1);
+ try test__fixsfti(-0.99, 0);
+ try test__fixsfti(-0.5, 0);
+ try test__fixsfti(-math.floatMin(f32), 0);
+ try test__fixsfti(0.0, 0);
+ try test__fixsfti(math.floatMin(f32), 0);
+ try test__fixsfti(0.5, 0);
+ try test__fixsfti(0.99, 0);
+ try test__fixsfti(1.0, 1);
+ try test__fixsfti(1.5, 1);
+ try test__fixsfti(1.99, 1);
+ try test__fixsfti(2.0, 2);
+ try test__fixsfti(2.01, 2);
+
+ try test__fixsfti(0x1.FFFFFCp+30, 0x7FFFFF00);
+ try test__fixsfti(0x1.FFFFFEp+30, 0x7FFFFF80);
+ try test__fixsfti(0x1.FFFFFFp+30, 0x80000000);
+ try test__fixsfti(0x1.000000p+31, 0x80000000);
+
+ try test__fixsfti(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
+ try test__fixsfti(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
+ try test__fixsfti(0x1.FFFFFFp+62, 0x8000000000000000);
+
+ try test__fixsfti(0x1.FFFFFFFFFFFFEp+62, 0x8000000000000000);
+ try test__fixsfti(0x1.FFFFFFFFFFFFFp+62, 0x8000000000000000);
+ try test__fixsfti(0x1.0000000000000p+63, 0x8000000000000000);
+ try test__fixsfti(0x1.0000000000001p+63, 0x8000000000000000);
+
+ try test__fixsfti(0x1.FFFFFC0000000p+126, 0x7FFFFF00000000000000000000000000);
+ try test__fixsfti(0x1.FFFFFE0000000p+126, 0x7FFFFF80000000000000000000000000);
+ try test__fixsfti(0x1.FFFFFF0000000p+126, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+ try test__fixsfti(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+ try test__fixsfti(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+ try test__fixsfti(0x1.0000000000000p+127, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+
+ try test__fixsfti(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+ try test__fixsfti(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i128));
+
+ try test__fixsfti(math.floatMax(f32), math.maxInt(i128));
+}
+
+test "fixunssfti" {
+ try test__fixunssfti(0.0, 0);
+
+ try test__fixunssfti(0.5, 0);
+ try test__fixunssfti(0.99, 0);
+ try test__fixunssfti(1.0, 1);
+ try test__fixunssfti(1.5, 1);
+ try test__fixunssfti(1.99, 1);
+ try test__fixunssfti(2.0, 2);
+ try test__fixunssfti(2.01, 2);
+ try test__fixunssfti(-0.5, 0);
+ try test__fixunssfti(-0.99, 0);
+
+ try test__fixunssfti(-1.0, 0);
+ try test__fixunssfti(-1.5, 0);
+ try test__fixunssfti(-1.99, 0);
+ try test__fixunssfti(-2.0, 0);
+ try test__fixunssfti(-2.01, 0);
+
+ try test__fixunssfti(0x1.FFFFFEp+63, 0xFFFFFF0000000000);
+ try test__fixunssfti(0x1.000000p+63, 0x8000000000000000);
+ try test__fixunssfti(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
+ try test__fixunssfti(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
+ try test__fixunssfti(0x1.FFFFFEp+127, 0xFFFFFF00000000000000000000000000);
+ try test__fixunssfti(0x1.000000p+127, 0x80000000000000000000000000000000);
+ try test__fixunssfti(0x1.FFFFFEp+126, 0x7FFFFF80000000000000000000000000);
+ try test__fixunssfti(0x1.FFFFFCp+126, 0x7FFFFF00000000000000000000000000);
+
+ try test__fixunssfti(-0x1.FFFFFEp+62, 0x0000000000000000);
+ try test__fixunssfti(-0x1.FFFFFCp+62, 0x0000000000000000);
+ try test__fixunssfti(-0x1.FFFFFEp+126, 0x0000000000000000);
+ try test__fixunssfti(-0x1.FFFFFCp+126, 0x0000000000000000);
+ try test__fixunssfti(math.floatMax(f32), 0xffffff00000000000000000000000000);
+ try test__fixunssfti(math.inf(f32), math.maxInt(u128));
+}
+
+fn test__fixdfsi(a: f64, expected: i32) !void {
+ const x = __fixdfsi(a);
+ try testing.expect(x == expected);
+}
+
+fn test__fixunsdfsi(a: f64, expected: u32) !void {
+ const x = __fixunsdfsi(a);
+ try testing.expect(x == expected);
+}
+
+test "fixdfsi" {
+ try test__fixdfsi(-math.floatMax(f64), math.minInt(i32));
+
+ try test__fixdfsi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i32));
+ try test__fixdfsi(-0x1.FFFFFFFFFFFFFp+1023, -0x80000000);
+
+ try test__fixdfsi(-0x1.0000000000000p+127, -0x80000000);
+ try test__fixdfsi(-0x1.FFFFFFFFFFFFFp+126, -0x80000000);
+ try test__fixdfsi(-0x1.FFFFFFFFFFFFEp+126, -0x80000000);
+
+ try test__fixdfsi(-0x1.0000000000001p+63, -0x80000000);
+ try test__fixdfsi(-0x1.0000000000000p+63, -0x80000000);
+ try test__fixdfsi(-0x1.FFFFFFFFFFFFFp+62, -0x80000000);
+ try test__fixdfsi(-0x1.FFFFFFFFFFFFEp+62, -0x80000000);
+
+ try test__fixdfsi(-0x1.FFFFFEp+62, -0x80000000);
+ try test__fixdfsi(-0x1.FFFFFCp+62, -0x80000000);
+
+ try test__fixdfsi(-0x1.000000p+31, -0x80000000);
+ try test__fixdfsi(-0x1.FFFFFFp+30, -0x7FFFFFC0);
+ try test__fixdfsi(-0x1.FFFFFEp+30, -0x7FFFFF80);
+
+ try test__fixdfsi(-2.01, -2);
+ try test__fixdfsi(-2.0, -2);
+ try test__fixdfsi(-1.99, -1);
+ try test__fixdfsi(-1.0, -1);
+ try test__fixdfsi(-0.99, 0);
+ try test__fixdfsi(-0.5, 0);
+ try test__fixdfsi(-math.floatMin(f64), 0);
+ try test__fixdfsi(0.0, 0);
+ try test__fixdfsi(math.floatMin(f64), 0);
+ try test__fixdfsi(0.5, 0);
+ try test__fixdfsi(0.99, 0);
+ try test__fixdfsi(1.0, 1);
+ try test__fixdfsi(1.5, 1);
+ try test__fixdfsi(1.99, 1);
+ try test__fixdfsi(2.0, 2);
+ try test__fixdfsi(2.01, 2);
+
+ try test__fixdfsi(0x1.FFFFFEp+30, 0x7FFFFF80);
+ try test__fixdfsi(0x1.FFFFFFp+30, 0x7FFFFFC0);
+ try test__fixdfsi(0x1.000000p+31, 0x7FFFFFFF);
+
+ try test__fixdfsi(0x1.FFFFFCp+62, 0x7FFFFFFF);
+ try test__fixdfsi(0x1.FFFFFEp+62, 0x7FFFFFFF);
+
+ try test__fixdfsi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFF);
+ try test__fixdfsi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFF);
+ try test__fixdfsi(0x1.0000000000000p+63, 0x7FFFFFFF);
+ try test__fixdfsi(0x1.0000000000001p+63, 0x7FFFFFFF);
+
+ try test__fixdfsi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFF);
+ try test__fixdfsi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFF);
+ try test__fixdfsi(0x1.0000000000000p+127, 0x7FFFFFFF);
+
+ try test__fixdfsi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFF);
+ try test__fixdfsi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i32));
+
+ try test__fixdfsi(math.floatMax(f64), math.maxInt(i32));
+}
+
+test "fixunsdfsi" {
+ try test__fixunsdfsi(0.0, 0);
+
+ try test__fixunsdfsi(0.5, 0);
+ try test__fixunsdfsi(0.99, 0);
+ try test__fixunsdfsi(1.0, 1);
+ try test__fixunsdfsi(1.5, 1);
+ try test__fixunsdfsi(1.99, 1);
+ try test__fixunsdfsi(2.0, 2);
+ try test__fixunsdfsi(2.01, 2);
+ try test__fixunsdfsi(-0.5, 0);
+ try test__fixunsdfsi(-0.99, 0);
+ try test__fixunsdfsi(-1.0, 0);
+ try test__fixunsdfsi(-1.5, 0);
+ try test__fixunsdfsi(-1.99, 0);
+ try test__fixunsdfsi(-2.0, 0);
+ try test__fixunsdfsi(-2.01, 0);
+
+ try test__fixunsdfsi(0x1.000000p+31, 0x80000000);
+ try test__fixunsdfsi(0x1.000000p+32, 0xFFFFFFFF);
+ try test__fixunsdfsi(0x1.FFFFFEp+31, 0xFFFFFF00);
+ try test__fixunsdfsi(0x1.FFFFFEp+30, 0x7FFFFF80);
+ try test__fixunsdfsi(0x1.FFFFFCp+30, 0x7FFFFF00);
+
+ try test__fixunsdfsi(-0x1.FFFFFEp+30, 0);
+ try test__fixunsdfsi(-0x1.FFFFFCp+30, 0);
+
+ try test__fixunsdfsi(0x1.FFFFFFFEp+31, 0xFFFFFFFF);
+ try test__fixunsdfsi(0x1.FFFFFFFC00000p+30, 0x7FFFFFFF);
+ try test__fixunsdfsi(0x1.FFFFFFF800000p+30, 0x7FFFFFFE);
+}
+
+fn test__fixdfdi(a: f64, expected: i64) !void {
+ const x = __fixdfdi(a);
+ try testing.expect(x == expected);
+}
+
+fn test__fixunsdfdi(a: f64, expected: u64) !void {
+ const x = __fixunsdfdi(a);
+ try testing.expect(x == expected);
+}
+
+test "fixdfdi" {
+ try test__fixdfdi(-math.floatMax(f64), math.minInt(i64));
+
+ try test__fixdfdi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i64));
+ try test__fixdfdi(-0x1.FFFFFFFFFFFFFp+1023, -0x8000000000000000);
+
+ try test__fixdfdi(-0x1.0000000000000p+127, -0x8000000000000000);
+ try test__fixdfdi(-0x1.FFFFFFFFFFFFFp+126, -0x8000000000000000);
+ try test__fixdfdi(-0x1.FFFFFFFFFFFFEp+126, -0x8000000000000000);
+
+ try test__fixdfdi(-0x1.0000000000001p+63, -0x8000000000000000);
+ try test__fixdfdi(-0x1.0000000000000p+63, -0x8000000000000000);
+ try test__fixdfdi(-0x1.FFFFFFFFFFFFFp+62, -0x7FFFFFFFFFFFFC00);
+ try test__fixdfdi(-0x1.FFFFFFFFFFFFEp+62, -0x7FFFFFFFFFFFF800);
+
+ try test__fixdfdi(-0x1.FFFFFEp+62, -0x7fffff8000000000);
+ try test__fixdfdi(-0x1.FFFFFCp+62, -0x7fffff0000000000);
+
+ try test__fixdfdi(-2.01, -2);
+ try test__fixdfdi(-2.0, -2);
+ try test__fixdfdi(-1.99, -1);
+ try test__fixdfdi(-1.0, -1);
+ try test__fixdfdi(-0.99, 0);
+ try test__fixdfdi(-0.5, 0);
+ try test__fixdfdi(-math.floatMin(f64), 0);
+ try test__fixdfdi(0.0, 0);
+ try test__fixdfdi(math.floatMin(f64), 0);
+ try test__fixdfdi(0.5, 0);
+ try test__fixdfdi(0.99, 0);
+ try test__fixdfdi(1.0, 1);
+ try test__fixdfdi(1.5, 1);
+ try test__fixdfdi(1.99, 1);
+ try test__fixdfdi(2.0, 2);
+ try test__fixdfdi(2.01, 2);
+
+ try test__fixdfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
+ try test__fixdfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
+
+ try test__fixdfdi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
+ try test__fixdfdi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
+ try test__fixdfdi(0x1.0000000000000p+63, 0x7FFFFFFFFFFFFFFF);
+ try test__fixdfdi(0x1.0000000000001p+63, 0x7FFFFFFFFFFFFFFF);
+
+ try test__fixdfdi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFFFFF);
+ try test__fixdfdi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFFFF);
+ try test__fixdfdi(0x1.0000000000000p+127, 0x7FFFFFFFFFFFFFFF);
+
+ try test__fixdfdi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFFFFFFFFFF);
+ try test__fixdfdi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i64));
+
+ try test__fixdfdi(math.floatMax(f64), math.maxInt(i64));
+}
+
+test "fixunsdfdi" {
+ try test__fixunsdfdi(0.0, 0);
+ try test__fixunsdfdi(0.5, 0);
+ try test__fixunsdfdi(0.99, 0);
+ try test__fixunsdfdi(1.0, 1);
+ try test__fixunsdfdi(1.5, 1);
+ try test__fixunsdfdi(1.99, 1);
+ try test__fixunsdfdi(2.0, 2);
+ try test__fixunsdfdi(2.01, 2);
+ try test__fixunsdfdi(-0.5, 0);
+ try test__fixunsdfdi(-0.99, 0);
+ try test__fixunsdfdi(-1.0, 0);
+ try test__fixunsdfdi(-1.5, 0);
+ try test__fixunsdfdi(-1.99, 0);
+ try test__fixunsdfdi(-2.0, 0);
+ try test__fixunsdfdi(-2.01, 0);
+
+ try test__fixunsdfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
+ try test__fixunsdfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
+
+ try test__fixunsdfdi(-0x1.FFFFFEp+62, 0);
+ try test__fixunsdfdi(-0x1.FFFFFCp+62, 0);
+
+ try test__fixunsdfdi(0x1.FFFFFFFFFFFFFp+63, 0xFFFFFFFFFFFFF800);
+ try test__fixunsdfdi(0x1.0000000000000p+63, 0x8000000000000000);
+ try test__fixunsdfdi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
+ try test__fixunsdfdi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
+
+ try test__fixunsdfdi(-0x1.FFFFFFFFFFFFFp+62, 0);
+ try test__fixunsdfdi(-0x1.FFFFFFFFFFFFEp+62, 0);
+}
+
+fn test__fixdfti(a: f64, expected: i128) !void {
+ const x = __fixdfti(a);
+ try testing.expect(x == expected);
+}
+
+fn test__fixunsdfti(a: f64, expected: u128) !void {
+ const x = __fixunsdfti(a);
+ try testing.expect(x == expected);
+}
+
+test "fixdfti" {
+ try test__fixdfti(-math.floatMax(f64), math.minInt(i128));
+
+ try test__fixdfti(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i128));
+ try test__fixdfti(-0x1.FFFFFFFFFFFFFp+1023, -0x80000000000000000000000000000000);
+
+ try test__fixdfti(-0x1.0000000000000p+127, -0x80000000000000000000000000000000);
+ try test__fixdfti(-0x1.FFFFFFFFFFFFFp+126, -0x7FFFFFFFFFFFFC000000000000000000);
+ try test__fixdfti(-0x1.FFFFFFFFFFFFEp+126, -0x7FFFFFFFFFFFF8000000000000000000);
+
+ try test__fixdfti(-0x1.0000000000001p+63, -0x8000000000000800);
+ try test__fixdfti(-0x1.0000000000000p+63, -0x8000000000000000);
+ try test__fixdfti(-0x1.FFFFFFFFFFFFFp+62, -0x7FFFFFFFFFFFFC00);
+ try test__fixdfti(-0x1.FFFFFFFFFFFFEp+62, -0x7FFFFFFFFFFFF800);
+
+ try test__fixdfti(-0x1.FFFFFEp+62, -0x7fffff8000000000);
+ try test__fixdfti(-0x1.FFFFFCp+62, -0x7fffff0000000000);
+
+ try test__fixdfti(-2.01, -2);
+ try test__fixdfti(-2.0, -2);
+ try test__fixdfti(-1.99, -1);
+ try test__fixdfti(-1.0, -1);
+ try test__fixdfti(-0.99, 0);
+ try test__fixdfti(-0.5, 0);
+ try test__fixdfti(-math.floatMin(f64), 0);
+ try test__fixdfti(0.0, 0);
+ try test__fixdfti(math.floatMin(f64), 0);
+ try test__fixdfti(0.5, 0);
+ try test__fixdfti(0.99, 0);
+ try test__fixdfti(1.0, 1);
+ try test__fixdfti(1.5, 1);
+ try test__fixdfti(1.99, 1);
+ try test__fixdfti(2.0, 2);
+ try test__fixdfti(2.01, 2);
+
+ try test__fixdfti(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
+ try test__fixdfti(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
+
+ try test__fixdfti(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
+ try test__fixdfti(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
+ try test__fixdfti(0x1.0000000000000p+63, 0x8000000000000000);
+ try test__fixdfti(0x1.0000000000001p+63, 0x8000000000000800);
+
+ try test__fixdfti(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFF8000000000000000000);
+ try test__fixdfti(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFC000000000000000000);
+ try test__fixdfti(0x1.0000000000000p+127, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+
+ try test__fixdfti(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+ try test__fixdfti(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i128));
+
+ try test__fixdfti(math.floatMax(f64), math.maxInt(i128));
+}
+
+test "fixunsdfti" {
+ try test__fixunsdfti(0.0, 0);
+
+ try test__fixunsdfti(0.5, 0);
+ try test__fixunsdfti(0.99, 0);
+ try test__fixunsdfti(1.0, 1);
+ try test__fixunsdfti(1.5, 1);
+ try test__fixunsdfti(1.99, 1);
+ try test__fixunsdfti(2.0, 2);
+ try test__fixunsdfti(2.01, 2);
+ try test__fixunsdfti(-0.5, 0);
+ try test__fixunsdfti(-0.99, 0);
+ try test__fixunsdfti(-1.0, 0);
+ try test__fixunsdfti(-1.5, 0);
+ try test__fixunsdfti(-1.99, 0);
+ try test__fixunsdfti(-2.0, 0);
+ try test__fixunsdfti(-2.01, 0);
+
+ try test__fixunsdfti(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
+ try test__fixunsdfti(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
+
+ try test__fixunsdfti(-0x1.FFFFFEp+62, 0);
+ try test__fixunsdfti(-0x1.FFFFFCp+62, 0);
+
+ try test__fixunsdfti(0x1.FFFFFFFFFFFFFp+63, 0xFFFFFFFFFFFFF800);
+ try test__fixunsdfti(0x1.0000000000000p+63, 0x8000000000000000);
+ try test__fixunsdfti(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
+ try test__fixunsdfti(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
+
+ try test__fixunsdfti(0x1.FFFFFFFFFFFFFp+127, 0xFFFFFFFFFFFFF8000000000000000000);
+ try test__fixunsdfti(0x1.0000000000000p+127, 0x80000000000000000000000000000000);
+ try test__fixunsdfti(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFC000000000000000000);
+ try test__fixunsdfti(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFF8000000000000000000);
+ try test__fixunsdfti(0x1.0000000000000p+128, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+
+ try test__fixunsdfti(-0x1.FFFFFFFFFFFFFp+62, 0);
+ try test__fixunsdfti(-0x1.FFFFFFFFFFFFEp+62, 0);
+}
+
+fn test__fixtfsi(a: f128, expected: i32) !void {
+ const x = __fixtfsi(a);
+ try testing.expect(x == expected);
+}
+
+fn test__fixunstfsi(a: f128, expected: u32) !void {
+ const x = __fixunstfsi(a);
+ try testing.expect(x == expected);
+}
+
+test "fixtfsi" {
+ try test__fixtfsi(-math.floatMax(f128), math.minInt(i32));
+
+ try test__fixtfsi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i32));
+ try test__fixtfsi(-0x1.FFFFFFFFFFFFFp+1023, -0x80000000);
+
+ try test__fixtfsi(-0x1.0000000000000p+127, -0x80000000);
+ try test__fixtfsi(-0x1.FFFFFFFFFFFFFp+126, -0x80000000);
+ try test__fixtfsi(-0x1.FFFFFFFFFFFFEp+126, -0x80000000);
+
+ try test__fixtfsi(-0x1.0000000000001p+63, -0x80000000);
+ try test__fixtfsi(-0x1.0000000000000p+63, -0x80000000);
+ try test__fixtfsi(-0x1.FFFFFFFFFFFFFp+62, -0x80000000);
+ try test__fixtfsi(-0x1.FFFFFFFFFFFFEp+62, -0x80000000);
+
+ try test__fixtfsi(-0x1.FFFFFEp+62, -0x80000000);
+ try test__fixtfsi(-0x1.FFFFFCp+62, -0x80000000);
+
+ try test__fixtfsi(-0x1.000000p+31, -0x80000000);
+ try test__fixtfsi(-0x1.FFFFFFp+30, -0x7FFFFFC0);
+ try test__fixtfsi(-0x1.FFFFFEp+30, -0x7FFFFF80);
+ try test__fixtfsi(-0x1.FFFFFCp+30, -0x7FFFFF00);
+
+ try test__fixtfsi(-2.01, -2);
+ try test__fixtfsi(-2.0, -2);
+ try test__fixtfsi(-1.99, -1);
+ try test__fixtfsi(-1.0, -1);
+ try test__fixtfsi(-0.99, 0);
+ try test__fixtfsi(-0.5, 0);
+ try test__fixtfsi(-math.floatMin(f32), 0);
+ try test__fixtfsi(0.0, 0);
+ try test__fixtfsi(math.floatMin(f32), 0);
+ try test__fixtfsi(0.5, 0);
+ try test__fixtfsi(0.99, 0);
+ try test__fixtfsi(1.0, 1);
+ try test__fixtfsi(1.5, 1);
+ try test__fixtfsi(1.99, 1);
+ try test__fixtfsi(2.0, 2);
+ try test__fixtfsi(2.01, 2);
+
+ try test__fixtfsi(0x1.FFFFFCp+30, 0x7FFFFF00);
+ try test__fixtfsi(0x1.FFFFFEp+30, 0x7FFFFF80);
+ try test__fixtfsi(0x1.FFFFFFp+30, 0x7FFFFFC0);
+ try test__fixtfsi(0x1.000000p+31, 0x7FFFFFFF);
+
+ try test__fixtfsi(0x1.FFFFFCp+62, 0x7FFFFFFF);
+ try test__fixtfsi(0x1.FFFFFEp+62, 0x7FFFFFFF);
+
+ try test__fixtfsi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFF);
+ try test__fixtfsi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFF);
+ try test__fixtfsi(0x1.0000000000000p+63, 0x7FFFFFFF);
+ try test__fixtfsi(0x1.0000000000001p+63, 0x7FFFFFFF);
+
+ try test__fixtfsi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFF);
+ try test__fixtfsi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFF);
+ try test__fixtfsi(0x1.0000000000000p+127, 0x7FFFFFFF);
+
+ try test__fixtfsi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFF);
+ try test__fixtfsi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i32));
+
+ try test__fixtfsi(math.floatMax(f128), math.maxInt(i32));
+}
+
+test "fixunstfsi" {
+ try test__fixunstfsi(math.inf(f128), 0xffffffff);
+ try test__fixunstfsi(0, 0x0);
+ try test__fixunstfsi(0x1.23456789abcdefp+5, 0x24);
+ try test__fixunstfsi(0x1.23456789abcdefp-3, 0x0);
+ try test__fixunstfsi(0x1.23456789abcdefp+20, 0x123456);
+ try test__fixunstfsi(0x1.23456789abcdefp+40, 0xffffffff);
+ try test__fixunstfsi(0x1.23456789abcdefp+256, 0xffffffff);
+ try test__fixunstfsi(-0x1.23456789abcdefp+3, 0x0);
+
+ try test__fixunstfsi(0x1p+32, 0xFFFFFFFF);
+}
+
+fn test__fixtfdi(a: f128, expected: i64) !void {
+ const x = __fixtfdi(a);
+ try testing.expect(x == expected);
+}
+
+fn test__fixunstfdi(a: f128, expected: u64) !void {
+ const x = __fixunstfdi(a);
+ try testing.expect(x == expected);
+}
+
+test "fixtfdi" {
+ try test__fixtfdi(-math.floatMax(f128), math.minInt(i64));
+
+ try test__fixtfdi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i64));
+ try test__fixtfdi(-0x1.FFFFFFFFFFFFFp+1023, -0x8000000000000000);
+
+ try test__fixtfdi(-0x1.0000000000000p+127, -0x8000000000000000);
+ try test__fixtfdi(-0x1.FFFFFFFFFFFFFp+126, -0x8000000000000000);
+ try test__fixtfdi(-0x1.FFFFFFFFFFFFEp+126, -0x8000000000000000);
+
+ try test__fixtfdi(-0x1.0000000000001p+63, -0x8000000000000000);
+ try test__fixtfdi(-0x1.0000000000000p+63, -0x8000000000000000);
+ try test__fixtfdi(-0x1.FFFFFFFFFFFFFp+62, -0x7FFFFFFFFFFFFC00);
+ try test__fixtfdi(-0x1.FFFFFFFFFFFFEp+62, -0x7FFFFFFFFFFFF800);
+
+ try test__fixtfdi(-0x1.FFFFFEp+62, -0x7FFFFF8000000000);
+ try test__fixtfdi(-0x1.FFFFFCp+62, -0x7FFFFF0000000000);
+
+ try test__fixtfdi(-0x1.000000p+31, -0x80000000);
+ try test__fixtfdi(-0x1.FFFFFFp+30, -0x7FFFFFC0);
+ try test__fixtfdi(-0x1.FFFFFEp+30, -0x7FFFFF80);
+ try test__fixtfdi(-0x1.FFFFFCp+30, -0x7FFFFF00);
+
+ try test__fixtfdi(-2.01, -2);
+ try test__fixtfdi(-2.0, -2);
+ try test__fixtfdi(-1.99, -1);
+ try test__fixtfdi(-1.0, -1);
+ try test__fixtfdi(-0.99, 0);
+ try test__fixtfdi(-0.5, 0);
+ try test__fixtfdi(-math.floatMin(f64), 0);
+ try test__fixtfdi(0.0, 0);
+ try test__fixtfdi(math.floatMin(f64), 0);
+ try test__fixtfdi(0.5, 0);
+ try test__fixtfdi(0.99, 0);
+ try test__fixtfdi(1.0, 1);
+ try test__fixtfdi(1.5, 1);
+ try test__fixtfdi(1.99, 1);
+ try test__fixtfdi(2.0, 2);
+ try test__fixtfdi(2.01, 2);
+
+ try test__fixtfdi(0x1.FFFFFCp+30, 0x7FFFFF00);
+ try test__fixtfdi(0x1.FFFFFEp+30, 0x7FFFFF80);
+ try test__fixtfdi(0x1.FFFFFFp+30, 0x7FFFFFC0);
+ try test__fixtfdi(0x1.000000p+31, 0x80000000);
+
+ try test__fixtfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
+ try test__fixtfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
+
+ try test__fixtfdi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
+ try test__fixtfdi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
+ try test__fixtfdi(0x1.0000000000000p+63, 0x7FFFFFFFFFFFFFFF);
+ try test__fixtfdi(0x1.0000000000001p+63, 0x7FFFFFFFFFFFFFFF);
+
+ try test__fixtfdi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFFFFF);
+ try test__fixtfdi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFFFF);
+ try test__fixtfdi(0x1.0000000000000p+127, 0x7FFFFFFFFFFFFFFF);
+
+ try test__fixtfdi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFFFFFFFFFF);
+ try test__fixtfdi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i64));
+
+ try test__fixtfdi(math.floatMax(f128), math.maxInt(i64));
+}
+
+test "fixunstfdi" {
+ try test__fixunstfdi(0.0, 0);
+
+ try test__fixunstfdi(0.5, 0);
+ try test__fixunstfdi(0.99, 0);
+ try test__fixunstfdi(1.0, 1);
+ try test__fixunstfdi(1.5, 1);
+ try test__fixunstfdi(1.99, 1);
+ try test__fixunstfdi(2.0, 2);
+ try test__fixunstfdi(2.01, 2);
+ try test__fixunstfdi(-0.5, 0);
+ try test__fixunstfdi(-0.99, 0);
+ try test__fixunstfdi(-1.0, 0);
+ try test__fixunstfdi(-1.5, 0);
+ try test__fixunstfdi(-1.99, 0);
+ try test__fixunstfdi(-2.0, 0);
+ try test__fixunstfdi(-2.01, 0);
+
+ try test__fixunstfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
+ try test__fixunstfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
+
+ try test__fixunstfdi(-0x1.FFFFFEp+62, 0);
+ try test__fixunstfdi(-0x1.FFFFFCp+62, 0);
+
+ try test__fixunstfdi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
+ try test__fixunstfdi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
+
+ try test__fixunstfdi(-0x1.FFFFFFFFFFFFFp+62, 0);
+ try test__fixunstfdi(-0x1.FFFFFFFFFFFFEp+62, 0);
+
+ try test__fixunstfdi(0x1.FFFFFFFFFFFFFFFEp+63, 0xFFFFFFFFFFFFFFFF);
+ try test__fixunstfdi(0x1.0000000000000002p+63, 0x8000000000000001);
+ try test__fixunstfdi(0x1.0000000000000000p+63, 0x8000000000000000);
+ try test__fixunstfdi(0x1.FFFFFFFFFFFFFFFCp+62, 0x7FFFFFFFFFFFFFFF);
+ try test__fixunstfdi(0x1.FFFFFFFFFFFFFFF8p+62, 0x7FFFFFFFFFFFFFFE);
+ try test__fixunstfdi(0x1p+64, 0xFFFFFFFFFFFFFFFF);
+
+ try test__fixunstfdi(-0x1.0000000000000000p+63, 0);
+ try test__fixunstfdi(-0x1.FFFFFFFFFFFFFFFCp+62, 0);
+ try test__fixunstfdi(-0x1.FFFFFFFFFFFFFFF8p+62, 0);
+}
+
+fn test__fixtfti(a: f128, expected: i128) !void {
+ const x = __fixtfti(a);
+ try testing.expect(x == expected);
+}
+
+fn test__fixunstfti(a: f128, expected: u128) !void {
+ const x = __fixunstfti(a);
+ try testing.expect(x == expected);
+}
+
+test "fixtfti" {
+ try test__fixtfti(-math.floatMax(f128), math.minInt(i128));
+
+ try test__fixtfti(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i128));
+ try test__fixtfti(-0x1.FFFFFFFFFFFFFp+1023, -0x80000000000000000000000000000000);
+
+ try test__fixtfti(-0x1.0000000000000p+127, -0x80000000000000000000000000000000);
+ try test__fixtfti(-0x1.FFFFFFFFFFFFFp+126, -0x7FFFFFFFFFFFFC000000000000000000);
+ try test__fixtfti(-0x1.FFFFFFFFFFFFEp+126, -0x7FFFFFFFFFFFF8000000000000000000);
+
+ try test__fixtfti(-0x1.0000000000001p+63, -0x8000000000000800);
+ try test__fixtfti(-0x1.0000000000000p+63, -0x8000000000000000);
+ try test__fixtfti(-0x1.FFFFFFFFFFFFFp+62, -0x7FFFFFFFFFFFFC00);
+ try test__fixtfti(-0x1.FFFFFFFFFFFFEp+62, -0x7FFFFFFFFFFFF800);
+
+ try test__fixtfti(-0x1.FFFFFEp+62, -0x7fffff8000000000);
+ try test__fixtfti(-0x1.FFFFFCp+62, -0x7fffff0000000000);
+
+ try test__fixtfti(-2.01, -2);
+ try test__fixtfti(-2.0, -2);
+ try test__fixtfti(-1.99, -1);
+ try test__fixtfti(-1.0, -1);
+ try test__fixtfti(-0.99, 0);
+ try test__fixtfti(-0.5, 0);
+ try test__fixtfti(-math.floatMin(f128), 0);
+ try test__fixtfti(0.0, 0);
+ try test__fixtfti(math.floatMin(f128), 0);
+ try test__fixtfti(0.5, 0);
+ try test__fixtfti(0.99, 0);
+ try test__fixtfti(1.0, 1);
+ try test__fixtfti(1.5, 1);
+ try test__fixtfti(1.99, 1);
+ try test__fixtfti(2.0, 2);
+ try test__fixtfti(2.01, 2);
+
+ try test__fixtfti(0x1.FFFFFCp+62, 0x7FFFFF0000000000);
+ try test__fixtfti(0x1.FFFFFEp+62, 0x7FFFFF8000000000);
+
+ try test__fixtfti(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800);
+ try test__fixtfti(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00);
+ try test__fixtfti(0x1.0000000000000p+63, 0x8000000000000000);
+ try test__fixtfti(0x1.0000000000001p+63, 0x8000000000000800);
+
+ try test__fixtfti(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFF8000000000000000000);
+ try test__fixtfti(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFC000000000000000000);
+ try test__fixtfti(0x1.0000000000000p+127, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+
+ try test__fixtfti(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+ try test__fixtfti(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i128));
+
+ try test__fixtfti(math.floatMax(f128), math.maxInt(i128));
+}
+
+test "fixunstfti" {
+ try test__fixunstfti(math.inf(f128), 0xffffffffffffffffffffffffffffffff);
+
+ try test__fixunstfti(0.0, 0);
+
+ try test__fixunstfti(0.5, 0);
+ try test__fixunstfti(0.99, 0);
+ try test__fixunstfti(1.0, 1);
+ try test__fixunstfti(1.5, 1);
+ try test__fixunstfti(1.99, 1);
+ try test__fixunstfti(2.0, 2);
+ try test__fixunstfti(2.01, 2);
+ try test__fixunstfti(-0.01, 0);
+ try test__fixunstfti(-0.99, 0);
+
+ try test__fixunstfti(0x1p+128, 0xffffffffffffffffffffffffffffffff);
+
+ try test__fixunstfti(0x1.FFFFFEp+126, 0x7fffff80000000000000000000000000);
+ try test__fixunstfti(0x1.FFFFFEp+127, 0xffffff00000000000000000000000000);
+ try test__fixunstfti(0x1.FFFFFEp+128, 0xffffffffffffffffffffffffffffffff);
+ try test__fixunstfti(0x1.FFFFFEp+129, 0xffffffffffffffffffffffffffffffff);
+}
+
+fn test__fixunshfti(a: f16, expected: u128) !void {
+ const x = __fixunshfti(a);
+ try testing.expect(x == expected);
+}
+
+test "fixunshfti for f16" {
+ try test__fixunshfti(math.inf(f16), math.maxInt(u128));
+ try test__fixunshfti(math.floatMax(f16), 65504);
+}
+
+fn test__fixunsxfti(a: f80, expected: u128) !void {
+ const x = __fixunsxfti(a);
+ try testing.expect(x == expected);
+}
+
+test "fixunsxfti for f80" {
+ try test__fixunsxfti(math.inf(f80), math.maxInt(u128));
+ try test__fixunsxfti(math.floatMax(f80), math.maxInt(u128));
+ try test__fixunsxfti(math.maxInt(u64), math.maxInt(u64));
+}
diff --git a/lib/compiler_rt/floatdidf.zig b/lib/compiler_rt/floatdidf.zig
new file mode 100644
index 0000000000..9117e2189d
--- /dev/null
+++ b/lib/compiler_rt/floatdidf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = common.linkage });
+ } else {
+ @export(__floatdidf, .{ .name = "__floatdidf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatdidf(a: i64) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
+
+fn __aeabi_l2d(a: i64) callconv(.AAPCS) f64 {
+ return intToFloat(f64, a);
+}
diff --git a/lib/compiler_rt/floatdihf.zig b/lib/compiler_rt/floatdihf.zig
new file mode 100644
index 0000000000..f2f7236d6f
--- /dev/null
+++ b/lib/compiler_rt/floatdihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatdihf, .{ .name = "__floatdihf", .linkage = common.linkage });
+}
+
+fn __floatdihf(a: i64) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
diff --git a/lib/compiler_rt/floatdisf.zig b/lib/compiler_rt/floatdisf.zig
new file mode 100644
index 0000000000..3de94c5103
--- /dev/null
+++ b/lib/compiler_rt/floatdisf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = common.linkage });
+ } else {
+ @export(__floatdisf, .{ .name = "__floatdisf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatdisf(a: i64) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
+
+fn __aeabi_l2f(a: i64) callconv(.AAPCS) f32 {
+ return intToFloat(f32, a);
+}
diff --git a/lib/compiler_rt/floatditf.zig b/lib/compiler_rt/floatditf.zig
new file mode 100644
index 0000000000..731c6d8d86
--- /dev/null
+++ b/lib/compiler_rt/floatditf.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatdikf, .{ .name = "__floatdikf", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_xtoq, .{ .name = "_Qp_xtoq", .linkage = common.linkage });
+ } else {
+ @export(__floatditf, .{ .name = "__floatditf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatditf(a: i64) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatdikf(a: i64) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn _Qp_xtoq(c: *f128, a: i64) callconv(.C) void {
+ c.* = intToFloat(f128, a);
+}
diff --git a/lib/compiler_rt/floatdixf.zig b/lib/compiler_rt/floatdixf.zig
new file mode 100644
index 0000000000..7d80fdbeb8
--- /dev/null
+++ b/lib/compiler_rt/floatdixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatdixf, .{ .name = "__floatdixf", .linkage = common.linkage });
+}
+
+fn __floatdixf(a: i64) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
diff --git a/lib/compiler_rt/floatsidf.zig b/lib/compiler_rt/floatsidf.zig
new file mode 100644
index 0000000000..e31c2616fd
--- /dev/null
+++ b/lib/compiler_rt/floatsidf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = common.linkage });
+ } else {
+ @export(__floatsidf, .{ .name = "__floatsidf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatsidf(a: i32) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
+
+fn __aeabi_i2d(a: i32) callconv(.AAPCS) f64 {
+ return intToFloat(f64, a);
+}
diff --git a/lib/compiler_rt/floatsihf.zig b/lib/compiler_rt/floatsihf.zig
new file mode 100644
index 0000000000..84b54298b5
--- /dev/null
+++ b/lib/compiler_rt/floatsihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatsihf, .{ .name = "__floatsihf", .linkage = common.linkage });
+}
+
+fn __floatsihf(a: i32) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
diff --git a/lib/compiler_rt/floatsisf.zig b/lib/compiler_rt/floatsisf.zig
new file mode 100644
index 0000000000..87f83315c1
--- /dev/null
+++ b/lib/compiler_rt/floatsisf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = common.linkage });
+ } else {
+ @export(__floatsisf, .{ .name = "__floatsisf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatsisf(a: i32) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
+
+fn __aeabi_i2f(a: i32) callconv(.AAPCS) f32 {
+ return intToFloat(f32, a);
+}
diff --git a/lib/compiler_rt/floatsitf.zig b/lib/compiler_rt/floatsitf.zig
new file mode 100644
index 0000000000..0954199170
--- /dev/null
+++ b/lib/compiler_rt/floatsitf.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatsikf, .{ .name = "__floatsikf", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_itoq, .{ .name = "_Qp_itoq", .linkage = common.linkage });
+ } else {
+ @export(__floatsitf, .{ .name = "__floatsitf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatsitf(a: i32) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatsikf(a: i32) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn _Qp_itoq(c: *f128, a: i32) callconv(.C) void {
+ c.* = intToFloat(f128, a);
+}
diff --git a/lib/compiler_rt/floatsixf.zig b/lib/compiler_rt/floatsixf.zig
new file mode 100644
index 0000000000..76d266e17a
--- /dev/null
+++ b/lib/compiler_rt/floatsixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatsixf, .{ .name = "__floatsixf", .linkage = common.linkage });
+}
+
+fn __floatsixf(a: i32) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
diff --git a/lib/compiler_rt/floattidf.zig b/lib/compiler_rt/floattidf.zig
new file mode 100644
index 0000000000..1f1ac2f2ef
--- /dev/null
+++ b/lib/compiler_rt/floattidf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattidf, .{ .name = "__floattidf", .linkage = common.linkage });
+}
+
+pub fn __floattidf(a: i128) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
diff --git a/lib/compiler_rt/floattihf.zig b/lib/compiler_rt/floattihf.zig
new file mode 100644
index 0000000000..c7e45c7d53
--- /dev/null
+++ b/lib/compiler_rt/floattihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattihf, .{ .name = "__floattihf", .linkage = common.linkage });
+}
+
+fn __floattihf(a: i128) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
diff --git a/lib/compiler_rt/floattisf.zig b/lib/compiler_rt/floattisf.zig
new file mode 100644
index 0000000000..5eb493d09b
--- /dev/null
+++ b/lib/compiler_rt/floattisf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattisf, .{ .name = "__floattisf", .linkage = common.linkage });
+}
+
+pub fn __floattisf(a: i128) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
diff --git a/lib/compiler_rt/floattitf.zig b/lib/compiler_rt/floattitf.zig
new file mode 100644
index 0000000000..0764c2d2c2
--- /dev/null
+++ b/lib/compiler_rt/floattitf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattitf, .{ .name = "__floattitf", .linkage = common.linkage });
+}
+
+pub fn __floattitf(a: i128) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
diff --git a/lib/compiler_rt/floattixf.zig b/lib/compiler_rt/floattixf.zig
new file mode 100644
index 0000000000..def9bef4d5
--- /dev/null
+++ b/lib/compiler_rt/floattixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattixf, .{ .name = "__floattixf", .linkage = common.linkage });
+}
+
+fn __floattixf(a: i128) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
diff --git a/lib/compiler_rt/floatundidf.zig b/lib/compiler_rt/floatundidf.zig
new file mode 100644
index 0000000000..d49575639e
--- /dev/null
+++ b/lib/compiler_rt/floatundidf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ul2d, .{ .name = "__aeabi_ul2d", .linkage = common.linkage });
+ } else {
+ @export(__floatundidf, .{ .name = "__floatundidf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatundidf(a: u64) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
+
+fn __aeabi_ul2d(a: u64) callconv(.AAPCS) f64 {
+ return intToFloat(f64, a);
+}
diff --git a/lib/compiler_rt/floatundihf.zig b/lib/compiler_rt/floatundihf.zig
new file mode 100644
index 0000000000..6eff8aaec3
--- /dev/null
+++ b/lib/compiler_rt/floatundihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatundihf, .{ .name = "__floatundihf", .linkage = common.linkage });
+}
+
+fn __floatundihf(a: u64) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
diff --git a/lib/compiler_rt/floatundisf.zig b/lib/compiler_rt/floatundisf.zig
new file mode 100644
index 0000000000..963670d85b
--- /dev/null
+++ b/lib/compiler_rt/floatundisf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ul2f, .{ .name = "__aeabi_ul2f", .linkage = common.linkage });
+ } else {
+ @export(__floatundisf, .{ .name = "__floatundisf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatundisf(a: u64) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
+
+fn __aeabi_ul2f(a: u64) callconv(.AAPCS) f32 {
+ return intToFloat(f32, a);
+}
diff --git a/lib/compiler_rt/floatunditf.zig b/lib/compiler_rt/floatunditf.zig
new file mode 100644
index 0000000000..1eda21891d
--- /dev/null
+++ b/lib/compiler_rt/floatunditf.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatundikf, .{ .name = "__floatundikf", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_uxtoq, .{ .name = "_Qp_uxtoq", .linkage = common.linkage });
+ } else {
+ @export(__floatunditf, .{ .name = "__floatunditf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatunditf(a: u64) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatundikf(a: u64) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn _Qp_uxtoq(c: *f128, a: u64) callconv(.C) void {
+ c.* = intToFloat(f128, a);
+}
diff --git a/lib/compiler_rt/floatundixf.zig b/lib/compiler_rt/floatundixf.zig
new file mode 100644
index 0000000000..331b74df4f
--- /dev/null
+++ b/lib/compiler_rt/floatundixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatundixf, .{ .name = "__floatundixf", .linkage = common.linkage });
+}
+
+fn __floatundixf(a: u64) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
diff --git a/lib/compiler_rt/floatunsidf.zig b/lib/compiler_rt/floatunsidf.zig
new file mode 100644
index 0000000000..1f5a47287a
--- /dev/null
+++ b/lib/compiler_rt/floatunsidf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ui2d, .{ .name = "__aeabi_ui2d", .linkage = common.linkage });
+ } else {
+ @export(__floatunsidf, .{ .name = "__floatunsidf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatunsidf(a: u32) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
+
+fn __aeabi_ui2d(a: u32) callconv(.AAPCS) f64 {
+ return intToFloat(f64, a);
+}
diff --git a/lib/compiler_rt/floatunsihf.zig b/lib/compiler_rt/floatunsihf.zig
new file mode 100644
index 0000000000..b2f679c18c
--- /dev/null
+++ b/lib/compiler_rt/floatunsihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatunsihf, .{ .name = "__floatunsihf", .linkage = common.linkage });
+}
+
+pub fn __floatunsihf(a: u32) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
diff --git a/lib/compiler_rt/floatunsisf.zig b/lib/compiler_rt/floatunsisf.zig
new file mode 100644
index 0000000000..46f336a4d8
--- /dev/null
+++ b/lib/compiler_rt/floatunsisf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ui2f, .{ .name = "__aeabi_ui2f", .linkage = common.linkage });
+ } else {
+ @export(__floatunsisf, .{ .name = "__floatunsisf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatunsisf(a: u32) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
+
+fn __aeabi_ui2f(a: u32) callconv(.AAPCS) f32 {
+ return intToFloat(f32, a);
+}
diff --git a/lib/compiler_rt/floatunsitf.zig b/lib/compiler_rt/floatunsitf.zig
new file mode 100644
index 0000000000..bee656c801
--- /dev/null
+++ b/lib/compiler_rt/floatunsitf.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatunsikf, .{ .name = "__floatunsikf", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_uitoq, .{ .name = "_Qp_uitoq", .linkage = common.linkage });
+ } else {
+ @export(__floatunsitf, .{ .name = "__floatunsitf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatunsitf(a: u32) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatunsikf(a: u32) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn _Qp_uitoq(c: *f128, a: u32) callconv(.C) void {
+ c.* = intToFloat(f128, a);
+}
diff --git a/lib/compiler_rt/floatunsixf.zig b/lib/compiler_rt/floatunsixf.zig
new file mode 100644
index 0000000000..40492564fc
--- /dev/null
+++ b/lib/compiler_rt/floatunsixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatunsixf, .{ .name = "__floatunsixf", .linkage = common.linkage });
+}
+
+fn __floatunsixf(a: u32) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
diff --git a/lib/compiler_rt/floatuntidf.zig b/lib/compiler_rt/floatuntidf.zig
new file mode 100644
index 0000000000..a77a952fe9
--- /dev/null
+++ b/lib/compiler_rt/floatuntidf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = common.linkage });
+}
+
+pub fn __floatuntidf(a: u128) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
diff --git a/lib/compiler_rt/floatuntihf.zig b/lib/compiler_rt/floatuntihf.zig
new file mode 100644
index 0000000000..0263b1da98
--- /dev/null
+++ b/lib/compiler_rt/floatuntihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatuntihf, .{ .name = "__floatuntihf", .linkage = common.linkage });
+}
+
+fn __floatuntihf(a: u128) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
diff --git a/lib/compiler_rt/floatuntisf.zig b/lib/compiler_rt/floatuntisf.zig
new file mode 100644
index 0000000000..3edf636987
--- /dev/null
+++ b/lib/compiler_rt/floatuntisf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = common.linkage });
+}
+
+pub fn __floatuntisf(a: u128) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
diff --git a/lib/compiler_rt/floatuntitf.zig b/lib/compiler_rt/floatuntitf.zig
new file mode 100644
index 0000000000..1a755cccdb
--- /dev/null
+++ b/lib/compiler_rt/floatuntitf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatuntikf, .{ .name = "__floatuntikf", .linkage = common.linkage });
+ } else {
+ @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = common.linkage });
+ }
+}
+
+pub fn __floatuntitf(a: u128) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatuntikf(a: u128) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
diff --git a/lib/compiler_rt/floatuntixf.zig b/lib/compiler_rt/floatuntixf.zig
new file mode 100644
index 0000000000..07017d1f57
--- /dev/null
+++ b/lib/compiler_rt/floatuntixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatuntixf, .{ .name = "__floatuntixf", .linkage = common.linkage });
+}
+
+pub fn __floatuntixf(a: u128) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
diff --git a/lib/compiler_rt/floor.zig b/lib/compiler_rt/floor.zig
new file mode 100644
index 0000000000..ef02786eb4
--- /dev/null
+++ b/lib/compiler_rt/floor.zig
@@ -0,0 +1,224 @@
+//! Ported from musl, which is licensed under the MIT license:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/floorf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/floor.c
+
+const std = @import("std");
+const builtin = @import("builtin");
+const math = std.math;
+const expect = std.testing.expect;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floorh, .{ .name = "__floorh", .linkage = common.linkage });
+ @export(floorf, .{ .name = "floorf", .linkage = common.linkage });
+ @export(floor, .{ .name = "floor", .linkage = common.linkage });
+ @export(__floorx, .{ .name = "__floorx", .linkage = common.linkage });
+ const floorq_sym_name = if (common.want_ppc_abi) "floorf128" else "floorq";
+ @export(floorq, .{ .name = floorq_sym_name, .linkage = common.linkage });
+ @export(floorl, .{ .name = "floorl", .linkage = common.linkage });
+}
+
+pub fn __floorh(x: f16) callconv(.C) f16 {
+ var u = @bitCast(u16, x);
+ const e = @intCast(i16, (u >> 10) & 31) - 15;
+ var m: u16 = undefined;
+
+ // TODO: Shouldn't need this explicit check.
+ if (x == 0.0) {
+ return x;
+ }
+
+ if (e >= 10) {
+ return x;
+ }
+
+ if (e >= 0) {
+ m = @as(u16, 1023) >> @intCast(u4, e);
+ if (u & m == 0) {
+ return x;
+ }
+ math.doNotOptimizeAway(x + 0x1.0p120);
+ if (u >> 15 != 0) {
+ u += m;
+ }
+ return @bitCast(f16, u & ~m);
+ } else {
+ math.doNotOptimizeAway(x + 0x1.0p120);
+ if (u >> 15 == 0) {
+ return 0.0;
+ } else {
+ return -1.0;
+ }
+ }
+}
+
+pub fn floorf(x: f32) callconv(.C) f32 {
+ var u = @bitCast(u32, x);
+ const e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F;
+ var m: u32 = undefined;
+
+ // TODO: Shouldn't need this explicit check.
+ if (x == 0.0) {
+ return x;
+ }
+
+ if (e >= 23) {
+ return x;
+ }
+
+ if (e >= 0) {
+ m = @as(u32, 0x007FFFFF) >> @intCast(u5, e);
+ if (u & m == 0) {
+ return x;
+ }
+ math.doNotOptimizeAway(x + 0x1.0p120);
+ if (u >> 31 != 0) {
+ u += m;
+ }
+ return @bitCast(f32, u & ~m);
+ } else {
+ math.doNotOptimizeAway(x + 0x1.0p120);
+ if (u >> 31 == 0) {
+ return 0.0;
+ } else {
+ return -1.0;
+ }
+ }
+}
+
+pub fn floor(x: f64) callconv(.C) f64 {
+ const f64_toint = 1.0 / math.floatEps(f64);
+
+ const u = @bitCast(u64, x);
+ const e = (u >> 52) & 0x7FF;
+ var y: f64 = undefined;
+
+ if (e >= 0x3FF + 52 or x == 0) {
+ return x;
+ }
+
+ if (u >> 63 != 0) {
+ y = x - f64_toint + f64_toint - x;
+ } else {
+ y = x + f64_toint - f64_toint - x;
+ }
+
+ if (e <= 0x3FF - 1) {
+ math.doNotOptimizeAway(y);
+ if (u >> 63 != 0) {
+ return -1.0;
+ } else {
+ return 0.0;
+ }
+ } else if (y > 0) {
+ return x + y - 1;
+ } else {
+ return x + y;
+ }
+}
+
+pub fn __floorx(x: f80) callconv(.C) f80 {
+ // TODO: more efficient implementation
+ return @floatCast(f80, floorq(x));
+}
+
+pub fn floorq(x: f128) callconv(.C) f128 {
+ const f128_toint = 1.0 / math.floatEps(f128);
+
+ const u = @bitCast(u128, x);
+ const e = (u >> 112) & 0x7FFF;
+ var y: f128 = undefined;
+
+ if (e >= 0x3FFF + 112 or x == 0) return x;
+
+ if (u >> 127 != 0) {
+ y = x - f128_toint + f128_toint - x;
+ } else {
+ y = x + f128_toint - f128_toint - x;
+ }
+
+ if (e <= 0x3FFF - 1) {
+ math.doNotOptimizeAway(y);
+ if (u >> 127 != 0) {
+ return -1.0;
+ } else {
+ return 0.0;
+ }
+ } else if (y > 0) {
+ return x + y - 1;
+ } else {
+ return x + y;
+ }
+}
+
+pub fn floorl(x: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __floorh(x),
+ 32 => return floorf(x),
+ 64 => return floor(x),
+ 80 => return __floorx(x),
+ 128 => return floorq(x),
+ else => @compileError("unreachable"),
+ }
+}
+
+test "floor16" {
+ try expect(__floorh(1.3) == 1.0);
+ try expect(__floorh(-1.3) == -2.0);
+ try expect(__floorh(0.2) == 0.0);
+}
+
+test "floor32" {
+ try expect(floorf(1.3) == 1.0);
+ try expect(floorf(-1.3) == -2.0);
+ try expect(floorf(0.2) == 0.0);
+}
+
+test "floor64" {
+ try expect(floor(1.3) == 1.0);
+ try expect(floor(-1.3) == -2.0);
+ try expect(floor(0.2) == 0.0);
+}
+
+test "floor128" {
+ try expect(floorq(1.3) == 1.0);
+ try expect(floorq(-1.3) == -2.0);
+ try expect(floorq(0.2) == 0.0);
+}
+
+test "floor16.special" {
+ try expect(__floorh(0.0) == 0.0);
+ try expect(__floorh(-0.0) == -0.0);
+ try expect(math.isPositiveInf(__floorh(math.inf(f16))));
+ try expect(math.isNegativeInf(__floorh(-math.inf(f16))));
+ try expect(math.isNan(__floorh(math.nan(f16))));
+}
+
+test "floor32.special" {
+ try expect(floorf(0.0) == 0.0);
+ try expect(floorf(-0.0) == -0.0);
+ try expect(math.isPositiveInf(floorf(math.inf(f32))));
+ try expect(math.isNegativeInf(floorf(-math.inf(f32))));
+ try expect(math.isNan(floorf(math.nan(f32))));
+}
+
+test "floor64.special" {
+ try expect(floor(0.0) == 0.0);
+ try expect(floor(-0.0) == -0.0);
+ try expect(math.isPositiveInf(floor(math.inf(f64))));
+ try expect(math.isNegativeInf(floor(-math.inf(f64))));
+ try expect(math.isNan(floor(math.nan(f64))));
+}
+
+test "floor128.special" {
+ try expect(floorq(0.0) == 0.0);
+ try expect(floorq(-0.0) == -0.0);
+ try expect(math.isPositiveInf(floorq(math.inf(f128))));
+ try expect(math.isNegativeInf(floorq(-math.inf(f128))));
+ try expect(math.isNan(floorq(math.nan(f128))));
+}
diff --git a/lib/std/math/fma.zig b/lib/compiler_rt/fma.zig
similarity index 68%
rename from lib/std/math/fma.zig
rename to lib/compiler_rt/fma.zig
index dd76481e10..aa37276ac3 100644
--- a/lib/std/math/fma.zig
+++ b/lib/compiler_rt/fma.zig
@@ -1,29 +1,35 @@
-// Ported from musl, which is MIT licensed:
-// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
-//
-// https://git.musl-libc.org/cgit/musl/tree/src/math/fmal.c
-// https://git.musl-libc.org/cgit/musl/tree/src/math/fmaf.c
-// https://git.musl-libc.org/cgit/musl/tree/src/math/fma.c
+//! Ported from musl, which is MIT licensed:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/fmal.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/fmaf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/fma.c
-const std = @import("../std.zig");
+const std = @import("std");
+const builtin = @import("builtin");
const math = std.math;
const expect = std.testing.expect;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
-/// Returns x * y + z with a single rounding error.
-pub fn fma(comptime T: type, x: T, y: T, z: T) T {
- return switch (T) {
- f32 => fma32(x, y, z),
- f64 => fma64(x, y, z),
- f128 => fma128(x, y, z),
+pub const panic = common.panic;
- // TODO this is not correct for some targets
- c_longdouble => @floatCast(c_longdouble, fma128(x, y, z)),
-
- else => @compileError("fma not implemented for " ++ @typeName(T)),
- };
+comptime {
+ @export(__fmah, .{ .name = "__fmah", .linkage = common.linkage });
+ @export(fmaf, .{ .name = "fmaf", .linkage = common.linkage });
+ @export(fma, .{ .name = "fma", .linkage = common.linkage });
+ @export(__fmax, .{ .name = "__fmax", .linkage = common.linkage });
+ const fmaq_sym_name = if (common.want_ppc_abi) "fmaf128" else "fmaq";
+ @export(fmaq, .{ .name = fmaq_sym_name, .linkage = common.linkage });
+ @export(fmal, .{ .name = "fmal", .linkage = common.linkage });
}
-fn fma32(x: f32, y: f32, z: f32) f32 {
+pub fn __fmah(x: f16, y: f16, z: f16) callconv(.C) f16 {
+ // TODO: more efficient implementation
+ return @floatCast(f16, fmaf(x, y, z));
+}
+
+pub fn fmaf(x: f32, y: f32, z: f32) callconv(.C) f32 {
const xy = @as(f64, x) * y;
const xy_z = xy + z;
const u = @bitCast(u64, xy_z);
@@ -37,8 +43,8 @@ fn fma32(x: f32, y: f32, z: f32) f32 {
}
}
-// NOTE: Upstream fma.c has been rewritten completely to raise fp exceptions more accurately.
-fn fma64(x: f64, y: f64, z: f64) f64 {
+/// NOTE: Upstream fma.c has been rewritten completely to raise fp exceptions more accurately.
+pub fn fma(x: f64, y: f64, z: f64) callconv(.C) f64 {
if (!math.isFinite(x) or !math.isFinite(y)) {
return x * y + z;
}
@@ -66,7 +72,7 @@ fn fma64(x: f64, y: f64, z: f64) f64 {
if (spread <= 53 * 2) {
zs = math.scalbn(zs, -spread);
} else {
- zs = math.copysign(f64, math.f64_min, zs);
+ zs = math.copysign(math.floatMin(f64), zs);
}
const xy = dd_mul(xs, ys);
@@ -85,6 +91,76 @@ fn fma64(x: f64, y: f64, z: f64) f64 {
}
}
+pub fn __fmax(a: f80, b: f80, c: f80) callconv(.C) f80 {
+ // TODO: more efficient implementation
+ return @floatCast(f80, fmaq(a, b, c));
+}
+
+/// Fused multiply-add: Compute x * y + z with a single rounding error.
+///
+/// We use scaling to avoid overflow/underflow, along with the
+/// canonical precision-doubling technique adapted from:
+///
+/// Dekker, T. A Floating-Point Technique for Extending the
+/// Available Precision. Numer. Math. 18, 224-242 (1971).
+pub fn fmaq(x: f128, y: f128, z: f128) callconv(.C) f128 {
+ if (!math.isFinite(x) or !math.isFinite(y)) {
+ return x * y + z;
+ }
+ if (!math.isFinite(z)) {
+ return z;
+ }
+ if (x == 0.0 or y == 0.0) {
+ return x * y + z;
+ }
+ if (z == 0.0) {
+ return x * y;
+ }
+
+ const x1 = math.frexp(x);
+ var ex = x1.exponent;
+ var xs = x1.significand;
+ const x2 = math.frexp(y);
+ var ey = x2.exponent;
+ var ys = x2.significand;
+ const x3 = math.frexp(z);
+ var ez = x3.exponent;
+ var zs = x3.significand;
+
+ var spread = ex + ey - ez;
+ if (spread <= 113 * 2) {
+ zs = math.scalbn(zs, -spread);
+ } else {
+ zs = math.copysign(math.floatMin(f128), zs);
+ }
+
+ const xy = dd_mul128(xs, ys);
+ const r = dd_add128(xy.hi, zs);
+ spread = ex + ey;
+
+ if (r.hi == 0.0) {
+ return xy.hi + zs + math.scalbn(xy.lo, spread);
+ }
+
+ const adj = add_adjusted128(r.lo, xy.lo);
+ if (spread + math.ilogb(r.hi) > -16383) {
+ return math.scalbn(r.hi + adj, spread);
+ } else {
+ return add_and_denorm128(r.hi, adj, spread);
+ }
+}
+
+pub fn fmal(x: c_longdouble, y: c_longdouble, z: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __fmah(x, y, z),
+ 32 => return fmaf(x, y, z),
+ 64 => return fma(x, y, z),
+ 80 => return __fmax(x, y, z),
+ 128 => return fmaq(x, y, z),
+ else => @compileError("unreachable"),
+ }
+}
+
const dd = struct {
hi: f64,
lo: f64,
@@ -240,98 +316,38 @@ fn dd_mul128(a: f128, b: f128) dd128 {
return ret;
}
-/// Fused multiply-add: Compute x * y + z with a single rounding error.
-///
-/// We use scaling to avoid overflow/underflow, along with the
-/// canonical precision-doubling technique adapted from:
-///
-/// Dekker, T. A Floating-Point Technique for Extending the
-/// Available Precision. Numer. Math. 18, 224-242 (1971).
-fn fma128(x: f128, y: f128, z: f128) f128 {
- if (!math.isFinite(x) or !math.isFinite(y)) {
- return x * y + z;
- }
- if (!math.isFinite(z)) {
- return z;
- }
- if (x == 0.0 or y == 0.0) {
- return x * y + z;
- }
- if (z == 0.0) {
- return x * y;
- }
-
- const x1 = math.frexp(x);
- var ex = x1.exponent;
- var xs = x1.significand;
- const x2 = math.frexp(y);
- var ey = x2.exponent;
- var ys = x2.significand;
- const x3 = math.frexp(z);
- var ez = x3.exponent;
- var zs = x3.significand;
-
- var spread = ex + ey - ez;
- if (spread <= 113 * 2) {
- zs = math.scalbn(zs, -spread);
- } else {
- zs = math.copysign(f128, math.f128_min, zs);
- }
-
- const xy = dd_mul128(xs, ys);
- const r = dd_add128(xy.hi, zs);
- spread = ex + ey;
-
- if (r.hi == 0.0) {
- return xy.hi + zs + math.scalbn(xy.lo, spread);
- }
-
- const adj = add_adjusted128(r.lo, xy.lo);
- if (spread + math.ilogb(r.hi) > -16383) {
- return math.scalbn(r.hi + adj, spread);
- } else {
- return add_and_denorm128(r.hi, adj, spread);
- }
-}
-
-test "type dispatch" {
- try expect(fma(f32, 0.0, 1.0, 1.0) == fma32(0.0, 1.0, 1.0));
- try expect(fma(f64, 0.0, 1.0, 1.0) == fma64(0.0, 1.0, 1.0));
- try expect(fma(f128, 0.0, 1.0, 1.0) == fma128(0.0, 1.0, 1.0));
-}
-
test "32" {
const epsilon = 0.000001;
- try expect(math.approxEqAbs(f32, fma32(0.0, 5.0, 9.124), 9.124, epsilon));
- try expect(math.approxEqAbs(f32, fma32(0.2, 5.0, 9.124), 10.124, epsilon));
- try expect(math.approxEqAbs(f32, fma32(0.8923, 5.0, 9.124), 13.5855, epsilon));
- try expect(math.approxEqAbs(f32, fma32(1.5, 5.0, 9.124), 16.624, epsilon));
- try expect(math.approxEqAbs(f32, fma32(37.45, 5.0, 9.124), 196.374004, epsilon));
- try expect(math.approxEqAbs(f32, fma32(89.123, 5.0, 9.124), 454.739005, epsilon));
- try expect(math.approxEqAbs(f32, fma32(123123.234375, 5.0, 9.124), 615625.295875, epsilon));
+ try expect(math.approxEqAbs(f32, fmaf(0.0, 5.0, 9.124), 9.124, epsilon));
+ try expect(math.approxEqAbs(f32, fmaf(0.2, 5.0, 9.124), 10.124, epsilon));
+ try expect(math.approxEqAbs(f32, fmaf(0.8923, 5.0, 9.124), 13.5855, epsilon));
+ try expect(math.approxEqAbs(f32, fmaf(1.5, 5.0, 9.124), 16.624, epsilon));
+ try expect(math.approxEqAbs(f32, fmaf(37.45, 5.0, 9.124), 196.374004, epsilon));
+ try expect(math.approxEqAbs(f32, fmaf(89.123, 5.0, 9.124), 454.739005, epsilon));
+ try expect(math.approxEqAbs(f32, fmaf(123123.234375, 5.0, 9.124), 615625.295875, epsilon));
}
test "64" {
const epsilon = 0.000001;
- try expect(math.approxEqAbs(f64, fma64(0.0, 5.0, 9.124), 9.124, epsilon));
- try expect(math.approxEqAbs(f64, fma64(0.2, 5.0, 9.124), 10.124, epsilon));
- try expect(math.approxEqAbs(f64, fma64(0.8923, 5.0, 9.124), 13.5855, epsilon));
- try expect(math.approxEqAbs(f64, fma64(1.5, 5.0, 9.124), 16.624, epsilon));
- try expect(math.approxEqAbs(f64, fma64(37.45, 5.0, 9.124), 196.374, epsilon));
- try expect(math.approxEqAbs(f64, fma64(89.123, 5.0, 9.124), 454.739, epsilon));
- try expect(math.approxEqAbs(f64, fma64(123123.234375, 5.0, 9.124), 615625.295875, epsilon));
+ try expect(math.approxEqAbs(f64, fma(0.0, 5.0, 9.124), 9.124, epsilon));
+ try expect(math.approxEqAbs(f64, fma(0.2, 5.0, 9.124), 10.124, epsilon));
+ try expect(math.approxEqAbs(f64, fma(0.8923, 5.0, 9.124), 13.5855, epsilon));
+ try expect(math.approxEqAbs(f64, fma(1.5, 5.0, 9.124), 16.624, epsilon));
+ try expect(math.approxEqAbs(f64, fma(37.45, 5.0, 9.124), 196.374, epsilon));
+ try expect(math.approxEqAbs(f64, fma(89.123, 5.0, 9.124), 454.739, epsilon));
+ try expect(math.approxEqAbs(f64, fma(123123.234375, 5.0, 9.124), 615625.295875, epsilon));
}
test "128" {
const epsilon = 0.000001;
- try expect(math.approxEqAbs(f128, fma128(0.0, 5.0, 9.124), 9.124, epsilon));
- try expect(math.approxEqAbs(f128, fma128(0.2, 5.0, 9.124), 10.124, epsilon));
- try expect(math.approxEqAbs(f128, fma128(0.8923, 5.0, 9.124), 13.5855, epsilon));
- try expect(math.approxEqAbs(f128, fma128(1.5, 5.0, 9.124), 16.624, epsilon));
- try expect(math.approxEqAbs(f128, fma128(37.45, 5.0, 9.124), 196.374, epsilon));
- try expect(math.approxEqAbs(f128, fma128(89.123, 5.0, 9.124), 454.739, epsilon));
- try expect(math.approxEqAbs(f128, fma128(123123.234375, 5.0, 9.124), 615625.295875, epsilon));
+ try expect(math.approxEqAbs(f128, fmaq(0.0, 5.0, 9.124), 9.124, epsilon));
+ try expect(math.approxEqAbs(f128, fmaq(0.2, 5.0, 9.124), 10.124, epsilon));
+ try expect(math.approxEqAbs(f128, fmaq(0.8923, 5.0, 9.124), 13.5855, epsilon));
+ try expect(math.approxEqAbs(f128, fmaq(1.5, 5.0, 9.124), 16.624, epsilon));
+ try expect(math.approxEqAbs(f128, fmaq(37.45, 5.0, 9.124), 196.374, epsilon));
+ try expect(math.approxEqAbs(f128, fmaq(89.123, 5.0, 9.124), 454.739, epsilon));
+ try expect(math.approxEqAbs(f128, fmaq(123123.234375, 5.0, 9.124), 615625.295875, epsilon));
}
diff --git a/lib/compiler_rt/fmax.zig b/lib/compiler_rt/fmax.zig
new file mode 100644
index 0000000000..5fb87e0183
--- /dev/null
+++ b/lib/compiler_rt/fmax.zig
@@ -0,0 +1,69 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const math = std.math;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fmaxh, .{ .name = "__fmaxh", .linkage = common.linkage });
+ @export(fmaxf, .{ .name = "fmaxf", .linkage = common.linkage });
+ @export(fmax, .{ .name = "fmax", .linkage = common.linkage });
+ @export(__fmaxx, .{ .name = "__fmaxx", .linkage = common.linkage });
+ const fmaxq_sym_name = if (common.want_ppc_abi) "fmaxf128" else "fmaxq";
+ @export(fmaxq, .{ .name = fmaxq_sym_name, .linkage = common.linkage });
+ @export(fmaxl, .{ .name = "fmaxl", .linkage = common.linkage });
+}
+
+pub fn __fmaxh(x: f16, y: f16) callconv(.C) f16 {
+ return generic_fmax(f16, x, y);
+}
+
+pub fn fmaxf(x: f32, y: f32) callconv(.C) f32 {
+ return generic_fmax(f32, x, y);
+}
+
+pub fn fmax(x: f64, y: f64) callconv(.C) f64 {
+ return generic_fmax(f64, x, y);
+}
+
+pub fn __fmaxx(x: f80, y: f80) callconv(.C) f80 {
+ return generic_fmax(f80, x, y);
+}
+
+pub fn fmaxq(x: f128, y: f128) callconv(.C) f128 {
+ return generic_fmax(f128, x, y);
+}
+
+pub fn fmaxl(x: c_longdouble, y: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __fmaxh(x, y),
+ 32 => return fmaxf(x, y),
+ 64 => return fmax(x, y),
+ 80 => return __fmaxx(x, y),
+ 128 => return fmaxq(x, y),
+ else => @compileError("unreachable"),
+ }
+}
+
+inline fn generic_fmax(comptime T: type, x: T, y: T) T {
+ if (math.isNan(x))
+ return y;
+ if (math.isNan(y))
+ return x;
+ return if (x < y) y else x;
+}
+
+test "generic_fmax" {
+ inline for ([_]type{ f32, f64, c_longdouble, f80, f128 }) |T| {
+ const nan_val = math.nan(T);
+
+ try std.testing.expect(math.isNan(generic_fmax(T, nan_val, nan_val)));
+ try std.testing.expectEqual(@as(T, 1.0), generic_fmax(T, nan_val, 1.0));
+ try std.testing.expectEqual(@as(T, 1.0), generic_fmax(T, 1.0, nan_val));
+
+ try std.testing.expectEqual(@as(T, 10.0), generic_fmax(T, 1.0, 10.0));
+ try std.testing.expectEqual(@as(T, 1.0), generic_fmax(T, 1.0, -1.0));
+ }
+}
diff --git a/lib/compiler_rt/fmin.zig b/lib/compiler_rt/fmin.zig
new file mode 100644
index 0000000000..cc2fd7b3ac
--- /dev/null
+++ b/lib/compiler_rt/fmin.zig
@@ -0,0 +1,69 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const math = std.math;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fminh, .{ .name = "__fminh", .linkage = common.linkage });
+ @export(fminf, .{ .name = "fminf", .linkage = common.linkage });
+ @export(fmin, .{ .name = "fmin", .linkage = common.linkage });
+ @export(__fminx, .{ .name = "__fminx", .linkage = common.linkage });
+ const fminq_sym_name = if (common.want_ppc_abi) "fminf128" else "fminq";
+ @export(fminq, .{ .name = fminq_sym_name, .linkage = common.linkage });
+ @export(fminl, .{ .name = "fminl", .linkage = common.linkage });
+}
+
+pub fn __fminh(x: f16, y: f16) callconv(.C) f16 {
+ return generic_fmin(f16, x, y);
+}
+
+pub fn fminf(x: f32, y: f32) callconv(.C) f32 {
+ return generic_fmin(f32, x, y);
+}
+
+pub fn fmin(x: f64, y: f64) callconv(.C) f64 {
+ return generic_fmin(f64, x, y);
+}
+
+pub fn __fminx(x: f80, y: f80) callconv(.C) f80 {
+ return generic_fmin(f80, x, y);
+}
+
+pub fn fminq(x: f128, y: f128) callconv(.C) f128 {
+ return generic_fmin(f128, x, y);
+}
+
+pub fn fminl(x: c_longdouble, y: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __fminh(x, y),
+ 32 => return fminf(x, y),
+ 64 => return fmin(x, y),
+ 80 => return __fminx(x, y),
+ 128 => return fminq(x, y),
+ else => @compileError("unreachable"),
+ }
+}
+
+inline fn generic_fmin(comptime T: type, x: T, y: T) T {
+ if (math.isNan(x))
+ return y;
+ if (math.isNan(y))
+ return x;
+ return if (x < y) x else y;
+}
+
+test "generic_fmin" {
+ inline for ([_]type{ f32, f64, c_longdouble, f80, f128 }) |T| {
+ const nan_val = math.nan(T);
+
+ try std.testing.expect(math.isNan(generic_fmin(T, nan_val, nan_val)));
+ try std.testing.expectEqual(@as(T, 1.0), generic_fmin(T, nan_val, 1.0));
+ try std.testing.expectEqual(@as(T, 1.0), generic_fmin(T, 1.0, nan_val));
+
+ try std.testing.expectEqual(@as(T, 1.0), generic_fmin(T, 1.0, 10.0));
+ try std.testing.expectEqual(@as(T, -1.0), generic_fmin(T, 1.0, -1.0));
+ }
+}
diff --git a/lib/compiler_rt/fmod.zig b/lib/compiler_rt/fmod.zig
new file mode 100644
index 0000000000..22b20438cc
--- /dev/null
+++ b/lib/compiler_rt/fmod.zig
@@ -0,0 +1,388 @@
+const builtin = @import("builtin");
+const std = @import("std");
+const math = std.math;
+const assert = std.debug.assert;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+const normalize = common.normalize;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fmodh, .{ .name = "__fmodh", .linkage = common.linkage });
+ @export(fmodf, .{ .name = "fmodf", .linkage = common.linkage });
+ @export(fmod, .{ .name = "fmod", .linkage = common.linkage });
+ @export(__fmodx, .{ .name = "__fmodx", .linkage = common.linkage });
+ const fmodq_sym_name = if (common.want_ppc_abi) "fmodf128" else "fmodq";
+ @export(fmodq, .{ .name = fmodq_sym_name, .linkage = common.linkage });
+ @export(fmodl, .{ .name = "fmodl", .linkage = common.linkage });
+}
+
+pub fn __fmodh(x: f16, y: f16) callconv(.C) f16 {
+ // TODO: more efficient implementation
+ return @floatCast(f16, fmodf(x, y));
+}
+
+pub fn fmodf(x: f32, y: f32) callconv(.C) f32 {
+ return generic_fmod(f32, x, y);
+}
+
+pub fn fmod(x: f64, y: f64) callconv(.C) f64 {
+ return generic_fmod(f64, x, y);
+}
+
+/// fmodx - floating modulo large, returns the remainder of division for f80 types
+/// Logic and flow heavily inspired by MUSL fmodl for 113 mantissa digits
+pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 {
+ const T = f80;
+ const Z = std.meta.Int(.unsigned, @bitSizeOf(T));
+
+ const significandBits = math.floatMantissaBits(T);
+ const fractionalBits = math.floatFractionalBits(T);
+ const exponentBits = math.floatExponentBits(T);
+
+ const signBit = (@as(Z, 1) << (significandBits + exponentBits));
+ const maxExponent = ((1 << exponentBits) - 1);
+
+ var aRep = @bitCast(Z, a);
+ var bRep = @bitCast(Z, b);
+
+ const signA = aRep & signBit;
+ var expA = @intCast(i32, (@bitCast(Z, a) >> significandBits) & maxExponent);
+ var expB = @intCast(i32, (@bitCast(Z, b) >> significandBits) & maxExponent);
+
+ // There are 3 cases where the answer is undefined, check for:
+ // - fmodx(val, 0)
+ // - fmodx(val, NaN)
+ // - fmodx(inf, val)
+ // The sign on checked values does not matter.
+ // Doing (a * b) / (a * b) procudes undefined results
+ // because the three cases always produce undefined calculations:
+ // - 0 / 0
+ // - val * NaN
+ // - inf / inf
+ if (b == 0 or math.isNan(b) or expA == maxExponent) {
+ return (a * b) / (a * b);
+ }
+
+ // Remove the sign from both
+ aRep &= ~signBit;
+ bRep &= ~signBit;
+ if (aRep <= bRep) {
+ if (aRep == bRep) {
+ return 0 * a;
+ }
+ return a;
+ }
+
+ if (expA == 0) expA = normalize(f80, &aRep);
+ if (expB == 0) expB = normalize(f80, &bRep);
+
+ var highA: u64 = 0;
+ var highB: u64 = 0;
+ var lowA: u64 = @truncate(u64, aRep);
+ var lowB: u64 = @truncate(u64, bRep);
+
+ while (expA > expB) : (expA -= 1) {
+ var high = highA -% highB;
+ var low = lowA -% lowB;
+ if (lowA < lowB) {
+ high -%= 1;
+ }
+ if (high >> 63 == 0) {
+ if ((high | low) == 0) {
+ return 0 * a;
+ }
+ highA = 2 *% high + (low >> 63);
+ lowA = 2 *% low;
+ } else {
+ highA = 2 *% highA + (lowA >> 63);
+ lowA = 2 *% lowA;
+ }
+ }
+
+ var high = highA -% highB;
+ var low = lowA -% lowB;
+ if (lowA < lowB) {
+ high -%= 1;
+ }
+ if (high >> 63 == 0) {
+ if ((high | low) == 0) {
+ return 0 * a;
+ }
+ highA = high;
+ lowA = low;
+ }
+
+ while ((lowA >> fractionalBits) == 0) {
+ lowA = 2 *% lowA;
+ expA = expA - 1;
+ }
+
+ // Combine the exponent with the sign and significand, normalize if happened to be denormalized
+ if (expA < -fractionalBits) {
+ return @bitCast(T, signA);
+ } else if (expA <= 0) {
+ return @bitCast(T, (lowA >> @intCast(math.Log2Int(u64), 1 - expA)) | signA);
+ } else {
+ return @bitCast(T, lowA | (@as(Z, @intCast(u16, expA)) << significandBits) | signA);
+ }
+}
+
+/// fmodq - floating modulo large, returns the remainder of division for f128 types
+/// Logic and flow heavily inspired by MUSL fmodl for 113 mantissa digits
+pub fn fmodq(a: f128, b: f128) callconv(.C) f128 {
+ var amod = a;
+ var bmod = b;
+ const aPtr_u64 = @ptrCast([*]u64, &amod);
+ const bPtr_u64 = @ptrCast([*]u64, &bmod);
+ const aPtr_u16 = @ptrCast([*]u16, &amod);
+ const bPtr_u16 = @ptrCast([*]u16, &bmod);
+
+ const exp_and_sign_index = comptime switch (builtin.target.cpu.arch.endian()) {
+ .Little => 7,
+ .Big => 0,
+ };
+ const low_index = comptime switch (builtin.target.cpu.arch.endian()) {
+ .Little => 0,
+ .Big => 1,
+ };
+ const high_index = comptime switch (builtin.target.cpu.arch.endian()) {
+ .Little => 1,
+ .Big => 0,
+ };
+
+ const signA = aPtr_u16[exp_and_sign_index] & 0x8000;
+ var expA = @intCast(i32, (aPtr_u16[exp_and_sign_index] & 0x7fff));
+ var expB = @intCast(i32, (bPtr_u16[exp_and_sign_index] & 0x7fff));
+
+ // There are 3 cases where the answer is undefined, check for:
+ // - fmodq(val, 0)
+ // - fmodq(val, NaN)
+ // - fmodq(inf, val)
+ // The sign on checked values does not matter.
+ // Doing (a * b) / (a * b) procudes undefined results
+ // because the three cases always produce undefined calculations:
+ // - 0 / 0
+ // - val * NaN
+ // - inf / inf
+ if (b == 0 or std.math.isNan(b) or expA == 0x7fff) {
+ return (a * b) / (a * b);
+ }
+
+ // Remove the sign from both
+ aPtr_u16[exp_and_sign_index] = @bitCast(u16, @intCast(i16, expA));
+ bPtr_u16[exp_and_sign_index] = @bitCast(u16, @intCast(i16, expB));
+ if (amod <= bmod) {
+ if (amod == bmod) {
+ return 0 * a;
+ }
+ return a;
+ }
+
+ if (expA == 0) {
+ amod *= 0x1p120;
+ expA = @as(i32, aPtr_u16[exp_and_sign_index]) - 120;
+ }
+
+ if (expB == 0) {
+ bmod *= 0x1p120;
+ expB = @as(i32, bPtr_u16[exp_and_sign_index]) - 120;
+ }
+
+ // OR in extra non-stored mantissa digit
+ var highA: u64 = (aPtr_u64[high_index] & (std.math.maxInt(u64) >> 16)) | 1 << 48;
+ var highB: u64 = (bPtr_u64[high_index] & (std.math.maxInt(u64) >> 16)) | 1 << 48;
+ var lowA: u64 = aPtr_u64[low_index];
+ var lowB: u64 = bPtr_u64[low_index];
+
+ while (expA > expB) : (expA -= 1) {
+ var high = highA -% highB;
+ var low = lowA -% lowB;
+ if (lowA < lowB) {
+ high -%= 1;
+ }
+ if (high >> 63 == 0) {
+ if ((high | low) == 0) {
+ return 0 * a;
+ }
+ highA = 2 *% high + (low >> 63);
+ lowA = 2 *% low;
+ } else {
+ highA = 2 *% highA + (lowA >> 63);
+ lowA = 2 *% lowA;
+ }
+ }
+
+ var high = highA -% highB;
+ var low = lowA -% lowB;
+ if (lowA < lowB) {
+ high -= 1;
+ }
+ if (high >> 63 == 0) {
+ if ((high | low) == 0) {
+ return 0 * a;
+ }
+ highA = high;
+ lowA = low;
+ }
+
+ while (highA >> 48 == 0) {
+ highA = 2 *% highA + (lowA >> 63);
+ lowA = 2 *% lowA;
+ expA = expA - 1;
+ }
+
+ // Overwrite the current amod with the values in highA and lowA
+ aPtr_u64[high_index] = highA;
+ aPtr_u64[low_index] = lowA;
+
+ // Combine the exponent with the sign, normalize if happend to be denormalized
+ if (expA <= 0) {
+ aPtr_u16[exp_and_sign_index] = @truncate(u16, @bitCast(u32, (expA +% 120))) | signA;
+ amod *= 0x1p-120;
+ } else {
+ aPtr_u16[exp_and_sign_index] = @truncate(u16, @bitCast(u32, expA)) | signA;
+ }
+
+ return amod;
+}
+
+pub fn fmodl(a: c_longdouble, b: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __fmodh(a, b),
+ 32 => return fmodf(a, b),
+ 64 => return fmod(a, b),
+ 80 => return __fmodx(a, b),
+ 128 => return fmodq(a, b),
+ else => @compileError("unreachable"),
+ }
+}
+
+inline fn generic_fmod(comptime T: type, x: T, y: T) T {
+ const bits = @typeInfo(T).Float.bits;
+ const uint = std.meta.Int(.unsigned, bits);
+ const log2uint = math.Log2Int(uint);
+ comptime assert(T == f32 or T == f64);
+ const digits = if (T == f32) 23 else 52;
+ const exp_bits = if (T == f32) 9 else 12;
+ const bits_minus_1 = bits - 1;
+ const mask = if (T == f32) 0xff else 0x7ff;
+ var ux = @bitCast(uint, x);
+ var uy = @bitCast(uint, y);
+ var ex = @intCast(i32, (ux >> digits) & mask);
+ var ey = @intCast(i32, (uy >> digits) & mask);
+ const sx = if (T == f32) @intCast(u32, ux & 0x80000000) else @intCast(i32, ux >> bits_minus_1);
+ var i: uint = undefined;
+
+ if (uy << 1 == 0 or math.isNan(@bitCast(T, uy)) or ex == mask)
+ return (x * y) / (x * y);
+
+ if (ux << 1 <= uy << 1) {
+ if (ux << 1 == uy << 1)
+ return 0 * x;
+ return x;
+ }
+
+ // normalize x and y
+ if (ex == 0) {
+ i = ux << exp_bits;
+ while (i >> bits_minus_1 == 0) : ({
+ ex -= 1;
+ i <<= 1;
+ }) {}
+ ux <<= @intCast(log2uint, @bitCast(u32, -ex + 1));
+ } else {
+ ux &= math.maxInt(uint) >> exp_bits;
+ ux |= 1 << digits;
+ }
+ if (ey == 0) {
+ i = uy << exp_bits;
+ while (i >> bits_minus_1 == 0) : ({
+ ey -= 1;
+ i <<= 1;
+ }) {}
+ uy <<= @intCast(log2uint, @bitCast(u32, -ey + 1));
+ } else {
+ uy &= math.maxInt(uint) >> exp_bits;
+ uy |= 1 << digits;
+ }
+
+ // x mod y
+ while (ex > ey) : (ex -= 1) {
+ i = ux -% uy;
+ if (i >> bits_minus_1 == 0) {
+ if (i == 0)
+ return 0 * x;
+ ux = i;
+ }
+ ux <<= 1;
+ }
+ i = ux -% uy;
+ if (i >> bits_minus_1 == 0) {
+ if (i == 0)
+ return 0 * x;
+ ux = i;
+ }
+ while (ux >> digits == 0) : ({
+ ux <<= 1;
+ ex -= 1;
+ }) {}
+
+ // scale result up
+ if (ex > 0) {
+ ux -%= 1 << digits;
+ ux |= @as(uint, @bitCast(u32, ex)) << digits;
+ } else {
+ ux >>= @intCast(log2uint, @bitCast(u32, -ex + 1));
+ }
+ if (T == f32) {
+ ux |= sx;
+ } else {
+ ux |= @intCast(uint, sx) << bits_minus_1;
+ }
+ return @bitCast(T, ux);
+}
+
+test "fmodf" {
+ const nan_val = math.nan(f32);
+ const inf_val = math.inf(f32);
+
+ try std.testing.expect(math.isNan(fmodf(nan_val, 1.0)));
+ try std.testing.expect(math.isNan(fmodf(1.0, nan_val)));
+ try std.testing.expect(math.isNan(fmodf(inf_val, 1.0)));
+ try std.testing.expect(math.isNan(fmodf(0.0, 0.0)));
+ try std.testing.expect(math.isNan(fmodf(1.0, 0.0)));
+
+ try std.testing.expectEqual(@as(f32, 0.0), fmodf(0.0, 2.0));
+ try std.testing.expectEqual(@as(f32, -0.0), fmodf(-0.0, 2.0));
+
+ try std.testing.expectEqual(@as(f32, -2.0), fmodf(-32.0, 10.0));
+ try std.testing.expectEqual(@as(f32, -2.0), fmodf(-32.0, -10.0));
+ try std.testing.expectEqual(@as(f32, 2.0), fmodf(32.0, 10.0));
+ try std.testing.expectEqual(@as(f32, 2.0), fmodf(32.0, -10.0));
+}
+
+test "fmod" {
+ const nan_val = math.nan(f64);
+ const inf_val = math.inf(f64);
+
+ try std.testing.expect(math.isNan(fmod(nan_val, 1.0)));
+ try std.testing.expect(math.isNan(fmod(1.0, nan_val)));
+ try std.testing.expect(math.isNan(fmod(inf_val, 1.0)));
+ try std.testing.expect(math.isNan(fmod(0.0, 0.0)));
+ try std.testing.expect(math.isNan(fmod(1.0, 0.0)));
+
+ try std.testing.expectEqual(@as(f64, 0.0), fmod(0.0, 2.0));
+ try std.testing.expectEqual(@as(f64, -0.0), fmod(-0.0, 2.0));
+
+ try std.testing.expectEqual(@as(f64, -2.0), fmod(-32.0, 10.0));
+ try std.testing.expectEqual(@as(f64, -2.0), fmod(-32.0, -10.0));
+ try std.testing.expectEqual(@as(f64, 2.0), fmod(32.0, 10.0));
+ try std.testing.expectEqual(@as(f64, 2.0), fmod(32.0, -10.0));
+}
+
+test {
+ _ = @import("fmodq_test.zig");
+ _ = @import("fmodx_test.zig");
+}
diff --git a/lib/compiler_rt/fmodq_test.zig b/lib/compiler_rt/fmodq_test.zig
new file mode 100644
index 0000000000..07ddb8d182
--- /dev/null
+++ b/lib/compiler_rt/fmodq_test.zig
@@ -0,0 +1,52 @@
+const std = @import("std");
+const fmod = @import("fmod.zig");
+const testing = std.testing;
+
+fn test_fmodq(a: f128, b: f128, exp: f128) !void {
+ const res = fmod.fmodq(a, b);
+ try testing.expect(exp == res);
+}
+
+fn test_fmodq_nans() !void {
+ try testing.expect(std.math.isNan(fmod.fmodq(1.0, std.math.nan(f128))));
+ try testing.expect(std.math.isNan(fmod.fmodq(1.0, -std.math.nan(f128))));
+ try testing.expect(std.math.isNan(fmod.fmodq(std.math.nan(f128), 1.0)));
+ try testing.expect(std.math.isNan(fmod.fmodq(-std.math.nan(f128), 1.0)));
+}
+
+fn test_fmodq_infs() !void {
+ try testing.expect(fmod.fmodq(1.0, std.math.inf(f128)) == 1.0);
+ try testing.expect(fmod.fmodq(1.0, -std.math.inf(f128)) == 1.0);
+ try testing.expect(std.math.isNan(fmod.fmodq(std.math.inf(f128), 1.0)));
+ try testing.expect(std.math.isNan(fmod.fmodq(-std.math.inf(f128), 1.0)));
+}
+
+test "fmodq" {
+ try test_fmodq(6.8, 4.0, 2.8);
+ try test_fmodq(6.8, -4.0, 2.8);
+ try test_fmodq(-6.8, 4.0, -2.8);
+ try test_fmodq(-6.8, -4.0, -2.8);
+ try test_fmodq(3.0, 2.0, 1.0);
+ try test_fmodq(-5.0, 3.0, -2.0);
+ try test_fmodq(3.0, 2.0, 1.0);
+ try test_fmodq(1.0, 2.0, 1.0);
+ try test_fmodq(0.0, 1.0, 0.0);
+ try test_fmodq(-0.0, 1.0, -0.0);
+ try test_fmodq(7046119.0, 5558362.0, 1487757.0);
+ try test_fmodq(9010357.0, 1957236.0, 1181413.0);
+ try test_fmodq(5192296858534827628530496329220095, 10.0, 5.0);
+ try test_fmodq(5192296858534827628530496329220095, 922337203681230954775807, 220474884073715748246157);
+
+ // Denormals
+ const a1: f128 = 0xedcb34a235253948765432134674p-16494;
+ const b1: f128 = 0x5d2e38791cfbc0737402da5a9518p-16494;
+ const exp1: f128 = 0x336ec3affb2db8618e4e7d5e1c44p-16494;
+ try test_fmodq(a1, b1, exp1);
+ const a2: f128 = 0x0.7654_3210_fdec_ba98_7654_3210_fdecp-16382;
+ const b2: f128 = 0x0.0012_fdac_bdef_1234_fdec_3222_1111p-16382;
+ const exp2: f128 = 0x0.0001_aecd_9d66_4a6e_67b7_d7d0_a901p-16382;
+ try test_fmodq(a2, b2, exp2);
+
+ try test_fmodq_nans();
+ try test_fmodq_infs();
+}
diff --git a/lib/compiler_rt/fmodx_test.zig b/lib/compiler_rt/fmodx_test.zig
new file mode 100644
index 0000000000..4bb1b5654a
--- /dev/null
+++ b/lib/compiler_rt/fmodx_test.zig
@@ -0,0 +1,51 @@
+const std = @import("std");
+const fmod = @import("fmod.zig");
+const testing = std.testing;
+
+fn test_fmodx(a: f80, b: f80, exp: f80) !void {
+ const res = fmod.__fmodx(a, b);
+ try testing.expect(exp == res);
+}
+
+fn test_fmodx_nans() !void {
+ try testing.expect(std.math.isNan(fmod.__fmodx(1.0, std.math.nan(f80))));
+ try testing.expect(std.math.isNan(fmod.__fmodx(1.0, -std.math.nan(f80))));
+ try testing.expect(std.math.isNan(fmod.__fmodx(std.math.nan(f80), 1.0)));
+ try testing.expect(std.math.isNan(fmod.__fmodx(-std.math.nan(f80), 1.0)));
+}
+
+fn test_fmodx_infs() !void {
+ try testing.expect(fmod.__fmodx(1.0, std.math.inf(f80)) == 1.0);
+ try testing.expect(fmod.__fmodx(1.0, -std.math.inf(f80)) == 1.0);
+ try testing.expect(std.math.isNan(fmod.__fmodx(std.math.inf(f80), 1.0)));
+ try testing.expect(std.math.isNan(fmod.__fmodx(-std.math.inf(f80), 1.0)));
+}
+
+test "fmodx" {
+ try test_fmodx(6.4, 4.0, 2.4);
+ try test_fmodx(6.4, -4.0, 2.4);
+ try test_fmodx(-6.4, 4.0, -2.4);
+ try test_fmodx(-6.4, -4.0, -2.4);
+ try test_fmodx(3.0, 2.0, 1.0);
+ try test_fmodx(-5.0, 3.0, -2.0);
+ try test_fmodx(3.0, 2.0, 1.0);
+ try test_fmodx(1.0, 2.0, 1.0);
+ try test_fmodx(0.0, 1.0, 0.0);
+ try test_fmodx(-0.0, 1.0, -0.0);
+ try test_fmodx(7046119.0, 5558362.0, 1487757.0);
+ try test_fmodx(9010357.0, 1957236.0, 1181413.0);
+ try test_fmodx(9223372036854775807, 10.0, 7.0);
+
+ // Denormals
+ const a1: f80 = 0x0.76e5_9a51_1a92_9ca4p-16381;
+ const b1: f80 = 0x0.2e97_1c3c_8e7d_e03ap-16381;
+ const exp1: f80 = 0x0.19b7_61d7_fd96_dc30p-16381;
+ try test_fmodx(a1, b1, exp1);
+ const a2: f80 = 0x0.76e5_9a51_1a92_9ca4p-16381;
+ const b2: f80 = 0x0.0e97_1c3c_8e7d_e03ap-16381;
+ const exp2: f80 = 0x0.022c_b86c_a6a3_9ad4p-16381;
+ try test_fmodx(a2, b2, exp2);
+
+ try test_fmodx_nans();
+ try test_fmodx_infs();
+}
diff --git a/lib/compiler_rt/gedf2.zig b/lib/compiler_rt/gedf2.zig
new file mode 100644
index 0000000000..684ba665b5
--- /dev/null
+++ b/lib/compiler_rt/gedf2.zig
@@ -0,0 +1,36 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dcmpge, .{ .name = "__aeabi_dcmpge", .linkage = common.linkage });
+ @export(__aeabi_dcmpgt, .{ .name = "__aeabi_dcmpgt", .linkage = common.linkage });
+ } else {
+ @export(__gedf2, .{ .name = "__gedf2", .linkage = common.linkage });
+ @export(__gtdf2, .{ .name = "__gtdf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions return a value greater than or equal to zero if neither
+/// argument is NaN, and a is greater than or equal to b."
+pub fn __gedf2(a: f64, b: f64) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f64, comparef.GE, a, b));
+}
+
+/// "These functions return a value greater than zero if neither argument is NaN,
+/// and a is strictly greater than b."
+pub fn __gtdf2(a: f64, b: f64) callconv(.C) i32 {
+ return __gedf2(a, b);
+}
+
+fn __aeabi_dcmpge(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.GE, a, b) != .Less);
+}
+
+fn __aeabi_dcmpgt(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.GE, a, b) == .Greater);
+}
diff --git a/lib/compiler_rt/gesf2.zig b/lib/compiler_rt/gesf2.zig
new file mode 100644
index 0000000000..3d455e52bf
--- /dev/null
+++ b/lib/compiler_rt/gesf2.zig
@@ -0,0 +1,36 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fcmpge, .{ .name = "__aeabi_fcmpge", .linkage = common.linkage });
+ @export(__aeabi_fcmpgt, .{ .name = "__aeabi_fcmpgt", .linkage = common.linkage });
+ } else {
+ @export(__gesf2, .{ .name = "__gesf2", .linkage = common.linkage });
+ @export(__gtsf2, .{ .name = "__gtsf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions return a value greater than or equal to zero if neither
+/// argument is NaN, and a is greater than or equal to b."
+pub fn __gesf2(a: f32, b: f32) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f32, comparef.GE, a, b));
+}
+
+/// "These functions return a value greater than zero if neither argument is NaN,
+/// and a is strictly greater than b."
+pub fn __gtsf2(a: f32, b: f32) callconv(.C) i32 {
+ return __gesf2(a, b);
+}
+
+fn __aeabi_fcmpge(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.GE, a, b) != .Less);
+}
+
+fn __aeabi_fcmpgt(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) == .Greater);
+}
diff --git a/lib/compiler_rt/getf2.zig b/lib/compiler_rt/getf2.zig
new file mode 100644
index 0000000000..8d9d39c1f9
--- /dev/null
+++ b/lib/compiler_rt/getf2.zig
@@ -0,0 +1,39 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__gekf2, .{ .name = "__gekf2", .linkage = common.linkage });
+ @export(__gtkf2, .{ .name = "__gtkf2", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ // These exports are handled in cmptf2.zig because gt and ge on sparc
+ // are based on calling _Qp_cmp.
+ } else {
+ @export(__getf2, .{ .name = "__getf2", .linkage = common.linkage });
+ @export(__gttf2, .{ .name = "__gttf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions return a value greater than or equal to zero if neither
+/// argument is NaN, and a is greater than or equal to b."
+fn __getf2(a: f128, b: f128) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f128, comparef.GE, a, b));
+}
+
+/// "These functions return a value greater than zero if neither argument is NaN,
+/// and a is strictly greater than b."
+fn __gttf2(a: f128, b: f128) callconv(.C) i32 {
+ return __getf2(a, b);
+}
+
+fn __gekf2(a: f128, b: f128) callconv(.C) i32 {
+ return __getf2(a, b);
+}
+
+fn __gtkf2(a: f128, b: f128) callconv(.C) i32 {
+ return __getf2(a, b);
+}
diff --git a/lib/compiler_rt/gexf2.zig b/lib/compiler_rt/gexf2.zig
new file mode 100644
index 0000000000..6bb88fbb8f
--- /dev/null
+++ b/lib/compiler_rt/gexf2.zig
@@ -0,0 +1,17 @@
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__gexf2, .{ .name = "__gexf2", .linkage = common.linkage });
+ @export(__gtxf2, .{ .name = "__gtxf2", .linkage = common.linkage });
+}
+
+fn __gexf2(a: f80, b: f80) callconv(.C) i32 {
+ return @enumToInt(comparef.cmp_f80(comparef.GE, a, b));
+}
+
+fn __gtxf2(a: f80, b: f80) callconv(.C) i32 {
+ return __gexf2(a, b);
+}
diff --git a/lib/std/special/compiler_rt/int.zig b/lib/compiler_rt/int.zig
similarity index 93%
rename from lib/std/special/compiler_rt/int.zig
rename to lib/compiler_rt/int.zig
index 0f3400d37e..53205e2ed9 100644
--- a/lib/std/special/compiler_rt/int.zig
+++ b/lib/compiler_rt/int.zig
@@ -1,22 +1,45 @@
-// Builtin functions that operate on integer types
+//! Builtin functions that operate on integer types
+
const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
-
+const arch = builtin.cpu.arch;
+const is_test = builtin.is_test;
+const common = @import("common.zig");
const udivmod = @import("udivmod.zig").udivmod;
-pub fn __divmoddi4(a: i64, b: i64, rem: *i64) callconv(.C) i64 {
- @setRuntimeSafety(builtin.is_test);
+pub const panic = common.panic;
+comptime {
+ @export(__udivmoddi4, .{ .name = "__udivmoddi4", .linkage = common.linkage });
+ @export(__mulsi3, .{ .name = "__mulsi3", .linkage = common.linkage });
+ @export(__divmoddi4, .{ .name = "__divmoddi4", .linkage = common.linkage });
+ if (common.want_aeabi) {
+ @export(__aeabi_idiv, .{ .name = "__aeabi_idiv", .linkage = common.linkage });
+ @export(__aeabi_uidiv, .{ .name = "__aeabi_uidiv", .linkage = common.linkage });
+ } else {
+ @export(__divsi3, .{ .name = "__divsi3", .linkage = common.linkage });
+ @export(__udivsi3, .{ .name = "__udivsi3", .linkage = common.linkage });
+ }
+ @export(__divdi3, .{ .name = "__divdi3", .linkage = common.linkage });
+ @export(__udivdi3, .{ .name = "__udivdi3", .linkage = common.linkage });
+ @export(__modsi3, .{ .name = "__modsi3", .linkage = common.linkage });
+ @export(__moddi3, .{ .name = "__moddi3", .linkage = common.linkage });
+ @export(__umodsi3, .{ .name = "__umodsi3", .linkage = common.linkage });
+ @export(__umoddi3, .{ .name = "__umoddi3", .linkage = common.linkage });
+ @export(__divmodsi4, .{ .name = "__divmodsi4", .linkage = common.linkage });
+ @export(__udivmodsi4, .{ .name = "__udivmodsi4", .linkage = common.linkage });
+}
+
+pub fn __divmoddi4(a: i64, b: i64, rem: *i64) callconv(.C) i64 {
const d = __divdi3(a, b);
rem.* = a -% (d *% b);
return d;
}
pub fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?*u64) callconv(.C) u64 {
- @setRuntimeSafety(builtin.is_test);
return udivmod(u64, a, b, maybe_rem);
}
@@ -25,8 +48,6 @@ test "test_udivmoddi4" {
}
pub fn __divdi3(a: i64, b: i64) callconv(.C) i64 {
- @setRuntimeSafety(builtin.is_test);
-
// Set aside the sign of the quotient.
const sign = @bitCast(u64, (a ^ b) >> 63);
// Take absolute value of a and b via abs(x) = (x^(x >> 63)) - (x >> 63).
@@ -64,8 +85,6 @@ fn test_one_divdi3(a: i64, b: i64, expected_q: i64) !void {
}
pub fn __moddi3(a: i64, b: i64) callconv(.C) i64 {
- @setRuntimeSafety(builtin.is_test);
-
// Take absolute value of a and b via abs(x) = (x^(x >> 63)) - (x >> 63).
const abs_a = (a ^ (a >> 63)) -% (a >> 63);
const abs_b = (b ^ (b >> 63)) -% (b >> 63);
@@ -104,13 +123,10 @@ fn test_one_moddi3(a: i64, b: i64, expected_r: i64) !void {
}
pub fn __udivdi3(a: u64, b: u64) callconv(.C) u64 {
- @setRuntimeSafety(builtin.is_test);
return __udivmoddi4(a, b, null);
}
pub fn __umoddi3(a: u64, b: u64) callconv(.C) u64 {
- @setRuntimeSafety(builtin.is_test);
-
var r: u64 = undefined;
_ = __udivmoddi4(a, b, &r);
return r;
@@ -130,8 +146,6 @@ fn test_one_umoddi3(a: u64, b: u64, expected_r: u64) !void {
}
pub fn __divmodsi4(a: i32, b: i32, rem: *i32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
-
const d = __divsi3(a, b);
rem.* = a -% (d * b);
return d;
@@ -166,16 +180,20 @@ fn test_one_divmodsi4(a: i32, b: i32, expected_q: i32, expected_r: i32) !void {
}
pub fn __udivmodsi4(a: u32, b: u32, rem: *u32) callconv(.C) u32 {
- @setRuntimeSafety(builtin.is_test);
-
const d = __udivsi3(a, b);
rem.* = @bitCast(u32, @bitCast(i32, a) -% (@bitCast(i32, d) * @bitCast(i32, b)));
return d;
}
pub fn __divsi3(n: i32, d: i32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
+ return div_i32(n, d);
+}
+fn __aeabi_idiv(n: i32, d: i32) callconv(.AAPCS) i32 {
+ return div_i32(n, d);
+}
+
+inline fn div_i32(n: i32, d: i32) i32 {
// Set aside the sign of the quotient.
const sign = @bitCast(u32, (n ^ d) >> 31);
// Take absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31).
@@ -213,8 +231,14 @@ fn test_one_divsi3(a: i32, b: i32, expected_q: i32) !void {
}
pub fn __udivsi3(n: u32, d: u32) callconv(.C) u32 {
- @setRuntimeSafety(builtin.is_test);
+ return div_u32(n, d);
+}
+fn __aeabi_uidiv(n: u32, d: u32) callconv(.AAPCS) u32 {
+ return div_u32(n, d);
+}
+
+inline fn div_u32(n: u32, d: u32) u32 {
const n_uword_bits: c_uint = 32;
// special cases
if (d == 0) return 0; // ?!
@@ -400,8 +424,6 @@ fn test_one_udivsi3(a: u32, b: u32, expected_q: u32) !void {
}
pub fn __modsi3(n: i32, d: i32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
-
return n -% __divsi3(n, d) *% d;
}
@@ -431,8 +453,6 @@ fn test_one_modsi3(a: i32, b: i32, expected_r: i32) !void {
}
pub fn __umodsi3(n: u32, d: u32) callconv(.C) u32 {
- @setRuntimeSafety(builtin.is_test);
-
return n -% __udivsi3(n, d) *% d;
}
@@ -583,8 +603,6 @@ fn test_one_umodsi3(a: u32, b: u32, expected_r: u32) !void {
}
pub fn __mulsi3(a: i32, b: i32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
-
var ua = @bitCast(u32, a);
var ub = @bitCast(u32, b);
var r: u32 = 0;
diff --git a/lib/compiler_rt/int_to_float.zig b/lib/compiler_rt/int_to_float.zig
new file mode 100644
index 0000000000..233dfec815
--- /dev/null
+++ b/lib/compiler_rt/int_to_float.zig
@@ -0,0 +1,58 @@
+const Int = @import("std").meta.Int;
+const math = @import("std").math;
+
+pub fn intToFloat(comptime T: type, x: anytype) T {
+ if (x == 0) return 0;
+
+ // Various constants whose values follow from the type parameters.
+ // Any reasonable optimizer will fold and propagate all of these.
+ const Z = Int(.unsigned, @bitSizeOf(@TypeOf(x)));
+ const uT = Int(.unsigned, @bitSizeOf(T));
+ const inf = math.inf(T);
+ const float_bits = @bitSizeOf(T);
+ const int_bits = @bitSizeOf(@TypeOf(x));
+ const exp_bits = math.floatExponentBits(T);
+ const fractional_bits = math.floatFractionalBits(T);
+ const exp_bias = math.maxInt(Int(.unsigned, exp_bits - 1));
+ const implicit_bit = if (T != f80) @as(uT, 1) << fractional_bits else 0;
+ const max_exp = exp_bias;
+
+ // Sign
+ var abs_val = math.absCast(x);
+ const sign_bit = if (x < 0) @as(uT, 1) << (float_bits - 1) else 0;
+ var result: uT = sign_bit;
+
+ // Compute significand
+ var exp = int_bits - @clz(Z, abs_val) - 1;
+ if (int_bits <= fractional_bits or exp <= fractional_bits) {
+ const shift_amt = fractional_bits - @intCast(math.Log2Int(uT), exp);
+
+ // Shift up result to line up with the significand - no rounding required
+ result = (@intCast(uT, abs_val) << shift_amt);
+ result ^= implicit_bit; // Remove implicit integer bit
+ } else {
+ var shift_amt = @intCast(math.Log2Int(Z), exp - fractional_bits);
+ const exact_tie: bool = @ctz(Z, abs_val) == shift_amt - 1;
+
+ // Shift down result and remove implicit integer bit
+ result = @intCast(uT, (abs_val >> (shift_amt - 1))) ^ (implicit_bit << 1);
+
+ // Round result, including round-to-even for exact ties
+ result = ((result + 1) >> 1) & ~@as(uT, @boolToInt(exact_tie));
+ }
+
+ // Compute exponent
+ if ((int_bits > max_exp) and (exp > max_exp)) // If exponent too large, overflow to infinity
+ return @bitCast(T, sign_bit | @bitCast(uT, inf));
+
+ result += (@as(uT, exp) + exp_bias) << math.floatMantissaBits(T);
+
+ // If the result included a carry, we need to restore the explicit integer bit
+ if (T == f80) result |= 1 << fractional_bits;
+
+ return @bitCast(T, sign_bit | result);
+}
+
+test {
+ _ = @import("int_to_float_test.zig");
+}
diff --git a/lib/compiler_rt/int_to_float_test.zig b/lib/compiler_rt/int_to_float_test.zig
new file mode 100644
index 0000000000..f6eabbf4ba
--- /dev/null
+++ b/lib/compiler_rt/int_to_float_test.zig
@@ -0,0 +1,838 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const testing = std.testing;
+const math = std.math;
+
+const __floatunsihf = @import("floatunsihf.zig").__floatunsihf;
+
+// Conversion to f32
+const __floatsisf = @import("floatsisf.zig").__floatsisf;
+const __floatunsisf = @import("floatunsisf.zig").__floatunsisf;
+const __floatdisf = @import("floatdisf.zig").__floatdisf;
+const __floatundisf = @import("floatundisf.zig").__floatundisf;
+const __floattisf = @import("floattisf.zig").__floattisf;
+const __floatuntisf = @import("floatuntisf.zig").__floatuntisf;
+
+// Conversion to f64
+const __floatsidf = @import("floatsidf.zig").__floatsidf;
+const __floatunsidf = @import("floatunsidf.zig").__floatunsidf;
+const __floatdidf = @import("floatdidf.zig").__floatdidf;
+const __floatundidf = @import("floatundidf.zig").__floatundidf;
+const __floattidf = @import("floattidf.zig").__floattidf;
+const __floatuntidf = @import("floatuntidf.zig").__floatuntidf;
+
+// Conversion to f128
+const __floatsitf = @import("floatsitf.zig").__floatsitf;
+const __floatunsitf = @import("floatunsitf.zig").__floatunsitf;
+const __floatditf = @import("floatditf.zig").__floatditf;
+const __floatunditf = @import("floatunditf.zig").__floatunditf;
+const __floattitf = @import("floattitf.zig").__floattitf;
+const __floatuntitf = @import("floatuntitf.zig").__floatuntitf;
+
+fn test__floatsisf(a: i32, expected: u32) !void {
+ const r = __floatsisf(a);
+ try std.testing.expect(@bitCast(u32, r) == expected);
+}
+
+fn test_one_floatunsisf(a: u32, expected: u32) !void {
+ const r = __floatunsisf(a);
+ try std.testing.expect(@bitCast(u32, r) == expected);
+}
+
+test "floatsisf" {
+ try test__floatsisf(0, 0x00000000);
+ try test__floatsisf(1, 0x3f800000);
+ try test__floatsisf(-1, 0xbf800000);
+ try test__floatsisf(0x7FFFFFFF, 0x4f000000);
+ try test__floatsisf(@bitCast(i32, @intCast(u32, 0x80000000)), 0xcf000000);
+}
+
+test "floatunsisf" {
+ // Test the produced bit pattern
+ try test_one_floatunsisf(0, 0);
+ try test_one_floatunsisf(1, 0x3f800000);
+ try test_one_floatunsisf(0x7FFFFFFF, 0x4f000000);
+ try test_one_floatunsisf(0x80000000, 0x4f000000);
+ try test_one_floatunsisf(0xFFFFFFFF, 0x4f800000);
+}
+
+fn test__floatdisf(a: i64, expected: f32) !void {
+ const x = __floatdisf(a);
+ try testing.expect(x == expected);
+}
+
+fn test__floatundisf(a: u64, expected: f32) !void {
+ try std.testing.expectEqual(expected, __floatundisf(a));
+}
+
+test "floatdisf" {
+ try test__floatdisf(0, 0.0);
+ try test__floatdisf(1, 1.0);
+ try test__floatdisf(2, 2.0);
+ try test__floatdisf(-1, -1.0);
+ try test__floatdisf(-2, -2.0);
+ try test__floatdisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ try test__floatdisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ try test__floatdisf(@bitCast(i64, @as(u64, 0x8000008000000000)), -0x1.FFFFFEp+62);
+ try test__floatdisf(@bitCast(i64, @as(u64, 0x8000010000000000)), -0x1.FFFFFCp+62);
+ try test__floatdisf(@bitCast(i64, @as(u64, 0x8000000000000000)), -0x1.000000p+63);
+ try test__floatdisf(@bitCast(i64, @as(u64, 0x8000000000000001)), -0x1.000000p+63);
+ try test__floatdisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+ try test__floatdisf(0x0007FB72EA000000, 0x1.FEDCBAp+50);
+ try test__floatdisf(0x0007FB72EB000000, 0x1.FEDCBAp+50);
+ try test__floatdisf(0x0007FB72EBFFFFFF, 0x1.FEDCBAp+50);
+ try test__floatdisf(0x0007FB72EC000000, 0x1.FEDCBCp+50);
+ try test__floatdisf(0x0007FB72E8000001, 0x1.FEDCBAp+50);
+ try test__floatdisf(0x0007FB72E6000000, 0x1.FEDCBAp+50);
+ try test__floatdisf(0x0007FB72E7000000, 0x1.FEDCBAp+50);
+ try test__floatdisf(0x0007FB72E7FFFFFF, 0x1.FEDCBAp+50);
+ try test__floatdisf(0x0007FB72E4000001, 0x1.FEDCBAp+50);
+ try test__floatdisf(0x0007FB72E4000000, 0x1.FEDCB8p+50);
+}
+
+test "floatundisf" {
+ try test__floatundisf(0, 0.0);
+ try test__floatundisf(1, 1.0);
+ try test__floatundisf(2, 2.0);
+ try test__floatundisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ try test__floatundisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ try test__floatundisf(0x8000008000000000, 0x1p+63);
+ try test__floatundisf(0x8000010000000000, 0x1.000002p+63);
+ try test__floatundisf(0x8000000000000000, 0x1p+63);
+ try test__floatundisf(0x8000000000000001, 0x1p+63);
+ try test__floatundisf(0xFFFFFFFFFFFFFFFE, 0x1p+64);
+ try test__floatundisf(0xFFFFFFFFFFFFFFFF, 0x1p+64);
+ try test__floatundisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+ try test__floatundisf(0x0007FB72EA000000, 0x1.FEDCBAp+50);
+ try test__floatundisf(0x0007FB72EB000000, 0x1.FEDCBAp+50);
+ try test__floatundisf(0x0007FB72EBFFFFFF, 0x1.FEDCBAp+50);
+ try test__floatundisf(0x0007FB72EC000000, 0x1.FEDCBCp+50);
+ try test__floatundisf(0x0007FB72E8000001, 0x1.FEDCBAp+50);
+ try test__floatundisf(0x0007FB72E6000000, 0x1.FEDCBAp+50);
+ try test__floatundisf(0x0007FB72E7000000, 0x1.FEDCBAp+50);
+ try test__floatundisf(0x0007FB72E7FFFFFF, 0x1.FEDCBAp+50);
+ try test__floatundisf(0x0007FB72E4000001, 0x1.FEDCBAp+50);
+ try test__floatundisf(0x0007FB72E4000000, 0x1.FEDCB8p+50);
+}
+
+fn test__floattisf(a: i128, expected: f32) !void {
+ const x = __floattisf(a);
+ try testing.expect(x == expected);
+}
+
+fn test__floatuntisf(a: u128, expected: f32) !void {
+ const x = __floatuntisf(a);
+ try testing.expect(x == expected);
+}
+
+test "floattisf" {
+ try test__floattisf(0, 0.0);
+
+ try test__floattisf(1, 1.0);
+ try test__floattisf(2, 2.0);
+ try test__floattisf(-1, -1.0);
+ try test__floattisf(-2, -2.0);
+
+ try test__floattisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ try test__floattisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+
+ try test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000008000000000), -0x1.FFFFFEp+62);
+ try test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000010000000000), -0x1.FFFFFCp+62);
+
+ try test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000000000000000), -0x1.000000p+63);
+ try test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000000000000001), -0x1.000000p+63);
+
+ try test__floattisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ try test__floattisf(0x0007FB72EA000000, 0x1.FEDCBAp+50);
+ try test__floattisf(0x0007FB72EB000000, 0x1.FEDCBAp+50);
+ try test__floattisf(0x0007FB72EBFFFFFF, 0x1.FEDCBAp+50);
+ try test__floattisf(0x0007FB72EC000000, 0x1.FEDCBCp+50);
+ try test__floattisf(0x0007FB72E8000001, 0x1.FEDCBAp+50);
+
+ try test__floattisf(0x0007FB72E6000000, 0x1.FEDCBAp+50);
+ try test__floattisf(0x0007FB72E7000000, 0x1.FEDCBAp+50);
+ try test__floattisf(0x0007FB72E7FFFFFF, 0x1.FEDCBAp+50);
+ try test__floattisf(0x0007FB72E4000001, 0x1.FEDCBAp+50);
+ try test__floattisf(0x0007FB72E4000000, 0x1.FEDCB8p+50);
+
+ try test__floattisf(make_ti(0x0007FB72E8000000, 0), 0x1.FEDCBAp+114);
+
+ try test__floattisf(make_ti(0x0007FB72EA000000, 0), 0x1.FEDCBAp+114);
+ try test__floattisf(make_ti(0x0007FB72EB000000, 0), 0x1.FEDCBAp+114);
+ try test__floattisf(make_ti(0x0007FB72EBFFFFFF, 0), 0x1.FEDCBAp+114);
+ try test__floattisf(make_ti(0x0007FB72EC000000, 0), 0x1.FEDCBCp+114);
+ try test__floattisf(make_ti(0x0007FB72E8000001, 0), 0x1.FEDCBAp+114);
+
+ try test__floattisf(make_ti(0x0007FB72E6000000, 0), 0x1.FEDCBAp+114);
+ try test__floattisf(make_ti(0x0007FB72E7000000, 0), 0x1.FEDCBAp+114);
+ try test__floattisf(make_ti(0x0007FB72E7FFFFFF, 0), 0x1.FEDCBAp+114);
+ try test__floattisf(make_ti(0x0007FB72E4000001, 0), 0x1.FEDCBAp+114);
+ try test__floattisf(make_ti(0x0007FB72E4000000, 0), 0x1.FEDCB8p+114);
+}
+
+test "floatuntisf" {
+ try test__floatuntisf(0, 0.0);
+
+ try test__floatuntisf(1, 1.0);
+ try test__floatuntisf(2, 2.0);
+ try test__floatuntisf(20, 20.0);
+
+ try test__floatuntisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ try test__floatuntisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+
+ try test__floatuntisf(make_uti(0x8000008000000000, 0), 0x1.000001p+127);
+ try test__floatuntisf(make_uti(0x8000000000000800, 0), 0x1.0p+127);
+ try test__floatuntisf(make_uti(0x8000010000000000, 0), 0x1.000002p+127);
+
+ try test__floatuntisf(make_uti(0x8000000000000000, 0), 0x1.000000p+127);
+
+ try test__floatuntisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ try test__floatuntisf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ try test__floatuntisf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+
+ try test__floatuntisf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+
+ try test__floatuntisf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ try test__floatuntisf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ try test__floatuntisf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ try test__floatuntisf(0xFFFFFFFFFFFFFFFE, 0x1p+64);
+ try test__floatuntisf(0xFFFFFFFFFFFFFFFF, 0x1p+64);
+
+ try test__floatuntisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ try test__floatuntisf(0x0007FB72EA000000, 0x1.FEDCBAp+50);
+ try test__floatuntisf(0x0007FB72EB000000, 0x1.FEDCBAp+50);
+ try test__floatuntisf(0x0007FB72EBFFFFFF, 0x1.FEDCBAp+50);
+ try test__floatuntisf(0x0007FB72EC000000, 0x1.FEDCBCp+50);
+ try test__floatuntisf(0x0007FB72E8000001, 0x1.FEDCBAp+50);
+
+ try test__floatuntisf(0x0007FB72E6000000, 0x1.FEDCBAp+50);
+ try test__floatuntisf(0x0007FB72E7000000, 0x1.FEDCBAp+50);
+ try test__floatuntisf(0x0007FB72E7FFFFFF, 0x1.FEDCBAp+50);
+ try test__floatuntisf(0x0007FB72E4000001, 0x1.FEDCBAp+50);
+ try test__floatuntisf(0x0007FB72E4000000, 0x1.FEDCB8p+50);
+
+ try test__floatuntisf(make_uti(0x0000000000001FED, 0xCB90000000000001), 0x1.FEDCBAp+76);
+ try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBA0000000000000), 0x1.FEDCBAp+76);
+ try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBAFFFFFFFFFFFFF), 0x1.FEDCBAp+76);
+ try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBB0000000000000), 0x1.FEDCBCp+76);
+ try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBB0000000000001), 0x1.FEDCBCp+76);
+ try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBBFFFFFFFFFFFFF), 0x1.FEDCBCp+76);
+ try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBC0000000000000), 0x1.FEDCBCp+76);
+ try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBC0000000000001), 0x1.FEDCBCp+76);
+ try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBD0000000000000), 0x1.FEDCBCp+76);
+ try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBD0000000000001), 0x1.FEDCBEp+76);
+ try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBDFFFFFFFFFFFFF), 0x1.FEDCBEp+76);
+ try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBE0000000000000), 0x1.FEDCBEp+76);
+
+ // Test overflow to infinity
+ try test__floatuntisf(@as(u128, math.maxInt(u128)), @bitCast(f32, math.inf(f32)));
+}
+
+fn test_one_floatsidf(a: i32, expected: u64) !void {
+ const r = __floatsidf(a);
+ try std.testing.expect(@bitCast(u64, r) == expected);
+}
+
+fn test_one_floatunsidf(a: u32, expected: u64) !void {
+ const r = __floatunsidf(a);
+ try std.testing.expect(@bitCast(u64, r) == expected);
+}
+
+test "floatsidf" {
+ try test_one_floatsidf(0, 0x0000000000000000);
+ try test_one_floatsidf(1, 0x3ff0000000000000);
+ try test_one_floatsidf(-1, 0xbff0000000000000);
+ try test_one_floatsidf(0x7FFFFFFF, 0x41dfffffffc00000);
+ try test_one_floatsidf(@bitCast(i32, @intCast(u32, 0x80000000)), 0xc1e0000000000000);
+}
+
+test "floatunsidf" {
+ try test_one_floatunsidf(0, 0x0000000000000000);
+ try test_one_floatunsidf(1, 0x3ff0000000000000);
+ try test_one_floatunsidf(0x7FFFFFFF, 0x41dfffffffc00000);
+ try test_one_floatunsidf(@intCast(u32, 0x80000000), 0x41e0000000000000);
+ try test_one_floatunsidf(@intCast(u32, 0xFFFFFFFF), 0x41efffffffe00000);
+}
+
+fn test__floatdidf(a: i64, expected: f64) !void {
+ const r = __floatdidf(a);
+ try testing.expect(r == expected);
+}
+
+fn test__floatundidf(a: u64, expected: f64) !void {
+ const r = __floatundidf(a);
+ try testing.expect(r == expected);
+}
+
+test "floatdidf" {
+ try test__floatdidf(0, 0.0);
+ try test__floatdidf(1, 1.0);
+ try test__floatdidf(2, 2.0);
+ try test__floatdidf(20, 20.0);
+ try test__floatdidf(-1, -1.0);
+ try test__floatdidf(-2, -2.0);
+ try test__floatdidf(-20, -20.0);
+ try test__floatdidf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ try test__floatdidf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ try test__floatdidf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ try test__floatdidf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+ try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000008000000000)), -0x1.FFFFFEp+62);
+ try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000000800)), -0x1.FFFFFFFFFFFFEp+62);
+ try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000010000000000)), -0x1.FFFFFCp+62);
+ try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000001000)), -0x1.FFFFFFFFFFFFCp+62);
+ try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000000000)), -0x1.000000p+63);
+ try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000000001)), -0x1.000000p+63); // 0x8000000000000001
+ try test__floatdidf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+ try test__floatdidf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ try test__floatdidf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ try test__floatdidf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ try test__floatdidf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ try test__floatdidf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+ try test__floatdidf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ try test__floatdidf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ try test__floatdidf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ try test__floatdidf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ try test__floatdidf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+ try test__floatdidf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ try test__floatdidf(0x023479FD0E092DA1, 0x1.1A3CFE870496Dp+57);
+ try test__floatdidf(0x023479FD0E092DB0, 0x1.1A3CFE870496Ep+57);
+ try test__floatdidf(0x023479FD0E092DB8, 0x1.1A3CFE870496Ep+57);
+ try test__floatdidf(0x023479FD0E092DB6, 0x1.1A3CFE870496Ep+57);
+ try test__floatdidf(0x023479FD0E092DBF, 0x1.1A3CFE870496Ep+57);
+ try test__floatdidf(0x023479FD0E092DC1, 0x1.1A3CFE870496Ep+57);
+ try test__floatdidf(0x023479FD0E092DC7, 0x1.1A3CFE870496Ep+57);
+ try test__floatdidf(0x023479FD0E092DC8, 0x1.1A3CFE870496Ep+57);
+ try test__floatdidf(0x023479FD0E092DCF, 0x1.1A3CFE870496Ep+57);
+ try test__floatdidf(0x023479FD0E092DD0, 0x1.1A3CFE870496Ep+57);
+ try test__floatdidf(0x023479FD0E092DD1, 0x1.1A3CFE870496Fp+57);
+ try test__floatdidf(0x023479FD0E092DD8, 0x1.1A3CFE870496Fp+57);
+ try test__floatdidf(0x023479FD0E092DDF, 0x1.1A3CFE870496Fp+57);
+ try test__floatdidf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+}
+
+test "floatundidf" {
+ try test__floatundidf(0, 0.0);
+ try test__floatundidf(1, 1.0);
+ try test__floatundidf(2, 2.0);
+ try test__floatundidf(20, 20.0);
+ try test__floatundidf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ try test__floatundidf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ try test__floatundidf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ try test__floatundidf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+ try test__floatundidf(0x8000008000000000, 0x1.000001p+63);
+ try test__floatundidf(0x8000000000000800, 0x1.0000000000001p+63);
+ try test__floatundidf(0x8000010000000000, 0x1.000002p+63);
+ try test__floatundidf(0x8000000000001000, 0x1.0000000000002p+63);
+ try test__floatundidf(0x8000000000000000, 0x1p+63);
+ try test__floatundidf(0x8000000000000001, 0x1p+63);
+ try test__floatundidf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+ try test__floatundidf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ try test__floatundidf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ try test__floatundidf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ try test__floatundidf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ try test__floatundidf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+ try test__floatundidf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ try test__floatundidf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ try test__floatundidf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ try test__floatundidf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ try test__floatundidf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+ try test__floatundidf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ try test__floatundidf(0x023479FD0E092DA1, 0x1.1A3CFE870496Dp+57);
+ try test__floatundidf(0x023479FD0E092DB0, 0x1.1A3CFE870496Ep+57);
+ try test__floatundidf(0x023479FD0E092DB8, 0x1.1A3CFE870496Ep+57);
+ try test__floatundidf(0x023479FD0E092DB6, 0x1.1A3CFE870496Ep+57);
+ try test__floatundidf(0x023479FD0E092DBF, 0x1.1A3CFE870496Ep+57);
+ try test__floatundidf(0x023479FD0E092DC1, 0x1.1A3CFE870496Ep+57);
+ try test__floatundidf(0x023479FD0E092DC7, 0x1.1A3CFE870496Ep+57);
+ try test__floatundidf(0x023479FD0E092DC8, 0x1.1A3CFE870496Ep+57);
+ try test__floatundidf(0x023479FD0E092DCF, 0x1.1A3CFE870496Ep+57);
+ try test__floatundidf(0x023479FD0E092DD0, 0x1.1A3CFE870496Ep+57);
+ try test__floatundidf(0x023479FD0E092DD1, 0x1.1A3CFE870496Fp+57);
+ try test__floatundidf(0x023479FD0E092DD8, 0x1.1A3CFE870496Fp+57);
+ try test__floatundidf(0x023479FD0E092DDF, 0x1.1A3CFE870496Fp+57);
+ try test__floatundidf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+}
+
+fn test__floattidf(a: i128, expected: f64) !void {
+ const x = __floattidf(a);
+ try testing.expect(x == expected);
+}
+
+fn test__floatuntidf(a: u128, expected: f64) !void {
+ const x = __floatuntidf(a);
+ try testing.expect(x == expected);
+}
+
+test "floattidf" {
+ try test__floattidf(0, 0.0);
+
+ try test__floattidf(1, 1.0);
+ try test__floattidf(2, 2.0);
+ try test__floattidf(20, 20.0);
+ try test__floattidf(-1, -1.0);
+ try test__floattidf(-2, -2.0);
+ try test__floattidf(-20, -20.0);
+
+ try test__floattidf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ try test__floattidf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ try test__floattidf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ try test__floattidf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+
+ try test__floattidf(make_ti(0x8000008000000000, 0), -0x1.FFFFFEp+126);
+ try test__floattidf(make_ti(0x8000000000000800, 0), -0x1.FFFFFFFFFFFFEp+126);
+ try test__floattidf(make_ti(0x8000010000000000, 0), -0x1.FFFFFCp+126);
+ try test__floattidf(make_ti(0x8000000000001000, 0), -0x1.FFFFFFFFFFFFCp+126);
+
+ try test__floattidf(make_ti(0x8000000000000000, 0), -0x1.000000p+127);
+ try test__floattidf(make_ti(0x8000000000000001, 0), -0x1.000000p+127);
+
+ try test__floattidf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ try test__floattidf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ try test__floattidf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ try test__floattidf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ try test__floattidf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ try test__floattidf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+
+ try test__floattidf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ try test__floattidf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ try test__floattidf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ try test__floattidf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ try test__floattidf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ try test__floattidf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ try test__floattidf(0x023479FD0E092DA1, 0x1.1A3CFE870496Dp+57);
+ try test__floattidf(0x023479FD0E092DB0, 0x1.1A3CFE870496Ep+57);
+ try test__floattidf(0x023479FD0E092DB8, 0x1.1A3CFE870496Ep+57);
+ try test__floattidf(0x023479FD0E092DB6, 0x1.1A3CFE870496Ep+57);
+ try test__floattidf(0x023479FD0E092DBF, 0x1.1A3CFE870496Ep+57);
+ try test__floattidf(0x023479FD0E092DC1, 0x1.1A3CFE870496Ep+57);
+ try test__floattidf(0x023479FD0E092DC7, 0x1.1A3CFE870496Ep+57);
+ try test__floattidf(0x023479FD0E092DC8, 0x1.1A3CFE870496Ep+57);
+ try test__floattidf(0x023479FD0E092DCF, 0x1.1A3CFE870496Ep+57);
+ try test__floattidf(0x023479FD0E092DD0, 0x1.1A3CFE870496Ep+57);
+ try test__floattidf(0x023479FD0E092DD1, 0x1.1A3CFE870496Fp+57);
+ try test__floattidf(0x023479FD0E092DD8, 0x1.1A3CFE870496Fp+57);
+ try test__floattidf(0x023479FD0E092DDF, 0x1.1A3CFE870496Fp+57);
+ try test__floattidf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+
+ try test__floattidf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
+ try test__floattidf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496Dp+121);
+ try test__floattidf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496Ep+121);
+ try test__floattidf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496Ep+121);
+ try test__floattidf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496Ep+121);
+ try test__floattidf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496Ep+121);
+ try test__floattidf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496Ep+121);
+ try test__floattidf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496Ep+121);
+ try test__floattidf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496Ep+121);
+ try test__floattidf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496Ep+121);
+ try test__floattidf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496Ep+121);
+ try test__floattidf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496Fp+121);
+ try test__floattidf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496Fp+121);
+ try test__floattidf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496Fp+121);
+ try test__floattidf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
+}
+
+test "floatuntidf" {
+ try test__floatuntidf(0, 0.0);
+
+ try test__floatuntidf(1, 1.0);
+ try test__floatuntidf(2, 2.0);
+ try test__floatuntidf(20, 20.0);
+
+ try test__floatuntidf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ try test__floatuntidf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ try test__floatuntidf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ try test__floatuntidf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+
+ try test__floatuntidf(make_uti(0x8000008000000000, 0), 0x1.000001p+127);
+ try test__floatuntidf(make_uti(0x8000000000000800, 0), 0x1.0000000000001p+127);
+ try test__floatuntidf(make_uti(0x8000010000000000, 0), 0x1.000002p+127);
+ try test__floatuntidf(make_uti(0x8000000000001000, 0), 0x1.0000000000002p+127);
+
+ try test__floatuntidf(make_uti(0x8000000000000000, 0), 0x1.000000p+127);
+ try test__floatuntidf(make_uti(0x8000000000000001, 0), 0x1.0000000000000002p+127);
+
+ try test__floatuntidf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ try test__floatuntidf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ try test__floatuntidf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ try test__floatuntidf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ try test__floatuntidf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ try test__floatuntidf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+
+ try test__floatuntidf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ try test__floatuntidf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ try test__floatuntidf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ try test__floatuntidf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ try test__floatuntidf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ try test__floatuntidf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ try test__floatuntidf(0x023479FD0E092DA1, 0x1.1A3CFE870496Dp+57);
+ try test__floatuntidf(0x023479FD0E092DB0, 0x1.1A3CFE870496Ep+57);
+ try test__floatuntidf(0x023479FD0E092DB8, 0x1.1A3CFE870496Ep+57);
+ try test__floatuntidf(0x023479FD0E092DB6, 0x1.1A3CFE870496Ep+57);
+ try test__floatuntidf(0x023479FD0E092DBF, 0x1.1A3CFE870496Ep+57);
+ try test__floatuntidf(0x023479FD0E092DC1, 0x1.1A3CFE870496Ep+57);
+ try test__floatuntidf(0x023479FD0E092DC7, 0x1.1A3CFE870496Ep+57);
+ try test__floatuntidf(0x023479FD0E092DC8, 0x1.1A3CFE870496Ep+57);
+ try test__floatuntidf(0x023479FD0E092DCF, 0x1.1A3CFE870496Ep+57);
+ try test__floatuntidf(0x023479FD0E092DD0, 0x1.1A3CFE870496Ep+57);
+ try test__floatuntidf(0x023479FD0E092DD1, 0x1.1A3CFE870496Fp+57);
+ try test__floatuntidf(0x023479FD0E092DD8, 0x1.1A3CFE870496Fp+57);
+ try test__floatuntidf(0x023479FD0E092DDF, 0x1.1A3CFE870496Fp+57);
+ try test__floatuntidf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+
+ try test__floatuntidf(make_uti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496Dp+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496Ep+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496Ep+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496Ep+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496Ep+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496Ep+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496Ep+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496Ep+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496Ep+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496Ep+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496Fp+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496Fp+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496Fp+121);
+ try test__floatuntidf(make_uti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
+}
+
+fn test__floatsitf(a: i32, expected: u128) !void {
+ const r = __floatsitf(a);
+ try std.testing.expect(@bitCast(u128, r) == expected);
+}
+
+test "floatsitf" {
+ try test__floatsitf(0, 0);
+ try test__floatsitf(0x7FFFFFFF, 0x401dfffffffc00000000000000000000);
+ try test__floatsitf(0x12345678, 0x401b2345678000000000000000000000);
+ try test__floatsitf(-0x12345678, 0xc01b2345678000000000000000000000);
+ try test__floatsitf(@bitCast(i32, @intCast(u32, 0xffffffff)), 0xbfff0000000000000000000000000000);
+ try test__floatsitf(@bitCast(i32, @intCast(u32, 0x80000000)), 0xc01e0000000000000000000000000000);
+}
+
+fn test__floatunsitf(a: u32, expected_hi: u64, expected_lo: u64) !void {
+ const x = __floatunsitf(a);
+
+ const x_repr = @bitCast(u128, x);
+ const x_hi = @intCast(u64, x_repr >> 64);
+ const x_lo = @truncate(u64, x_repr);
+
+ if (x_hi == expected_hi and x_lo == expected_lo) {
+ return;
+ }
+ // nan repr
+ else if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
+ if ((x_hi & 0x7fff000000000000) == 0x7fff000000000000 and ((x_hi & 0xffffffffffff) > 0 or x_lo > 0)) {
+ return;
+ }
+ }
+
+ @panic("__floatunsitf test failure");
+}
+
+test "floatunsitf" {
+ try test__floatunsitf(0x7fffffff, 0x401dfffffffc0000, 0x0);
+ try test__floatunsitf(0, 0x0, 0x0);
+ try test__floatunsitf(0xffffffff, 0x401efffffffe0000, 0x0);
+ try test__floatunsitf(0x12345678, 0x401b234567800000, 0x0);
+}
+
+fn test__floatditf(a: i64, expected: f128) !void {
+ const x = __floatditf(a);
+ try testing.expect(x == expected);
+}
+
+fn test__floatunditf(a: u64, expected_hi: u64, expected_lo: u64) !void {
+ const x = __floatunditf(a);
+
+ const x_repr = @bitCast(u128, x);
+ const x_hi = @intCast(u64, x_repr >> 64);
+ const x_lo = @truncate(u64, x_repr);
+
+ if (x_hi == expected_hi and x_lo == expected_lo) {
+ return;
+ }
+ // nan repr
+ else if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
+ if ((x_hi & 0x7fff000000000000) == 0x7fff000000000000 and ((x_hi & 0xffffffffffff) > 0 or x_lo > 0)) {
+ return;
+ }
+ }
+
+ @panic("__floatunditf test failure");
+}
+
+test "floatditf" {
+ try test__floatditf(0x7fffffffffffffff, make_tf(0x403dffffffffffff, 0xfffc000000000000));
+ try test__floatditf(0x123456789abcdef1, make_tf(0x403b23456789abcd, 0xef10000000000000));
+ try test__floatditf(0x2, make_tf(0x4000000000000000, 0x0));
+ try test__floatditf(0x1, make_tf(0x3fff000000000000, 0x0));
+ try test__floatditf(0x0, make_tf(0x0, 0x0));
+ try test__floatditf(@bitCast(i64, @as(u64, 0xffffffffffffffff)), make_tf(0xbfff000000000000, 0x0));
+ try test__floatditf(@bitCast(i64, @as(u64, 0xfffffffffffffffe)), make_tf(0xc000000000000000, 0x0));
+ try test__floatditf(-0x123456789abcdef1, make_tf(0xc03b23456789abcd, 0xef10000000000000));
+ try test__floatditf(@bitCast(i64, @as(u64, 0x8000000000000000)), make_tf(0xc03e000000000000, 0x0));
+}
+
+test "floatunditf" {
+ try test__floatunditf(0xffffffffffffffff, 0x403effffffffffff, 0xfffe000000000000);
+ try test__floatunditf(0xfffffffffffffffe, 0x403effffffffffff, 0xfffc000000000000);
+ try test__floatunditf(0x8000000000000000, 0x403e000000000000, 0x0);
+ try test__floatunditf(0x7fffffffffffffff, 0x403dffffffffffff, 0xfffc000000000000);
+ try test__floatunditf(0x123456789abcdef1, 0x403b23456789abcd, 0xef10000000000000);
+ try test__floatunditf(0x2, 0x4000000000000000, 0x0);
+ try test__floatunditf(0x1, 0x3fff000000000000, 0x0);
+ try test__floatunditf(0x0, 0x0, 0x0);
+}
+
+fn test__floattitf(a: i128, expected: f128) !void {
+ const x = __floattitf(a);
+ try testing.expect(x == expected);
+}
+
+fn test__floatuntitf(a: u128, expected: f128) !void {
+ const x = __floatuntitf(a);
+ try testing.expect(x == expected);
+}
+
+test "floattitf" {
+ try test__floattitf(0, 0.0);
+
+ try test__floattitf(1, 1.0);
+ try test__floattitf(2, 2.0);
+ try test__floattitf(20, 20.0);
+ try test__floattitf(-1, -1.0);
+ try test__floattitf(-2, -2.0);
+ try test__floattitf(-20, -20.0);
+
+ try test__floattitf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ try test__floattitf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ try test__floattitf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ try test__floattitf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+
+ try test__floattitf(make_ti(0x8000008000000000, 0), -0x1.FFFFFEp+126);
+ try test__floattitf(make_ti(0x8000000000000800, 0), -0x1.FFFFFFFFFFFFEp+126);
+ try test__floattitf(make_ti(0x8000010000000000, 0), -0x1.FFFFFCp+126);
+ try test__floattitf(make_ti(0x8000000000001000, 0), -0x1.FFFFFFFFFFFFCp+126);
+
+ try test__floattitf(make_ti(0x8000000000000000, 0), -0x1.000000p+127);
+ try test__floattitf(make_ti(0x8000000000000001, 0), -0x1.FFFFFFFFFFFFFFFCp+126);
+
+ try test__floattitf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ try test__floattitf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ try test__floattitf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ try test__floattitf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ try test__floattitf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ try test__floattitf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+
+ try test__floattitf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ try test__floattitf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ try test__floattitf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ try test__floattitf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ try test__floattitf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ try test__floattitf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ try test__floattitf(0x023479FD0E092DA1, 0x1.1A3CFE870496D08p+57);
+ try test__floattitf(0x023479FD0E092DB0, 0x1.1A3CFE870496D8p+57);
+ try test__floattitf(0x023479FD0E092DB8, 0x1.1A3CFE870496DCp+57);
+ try test__floattitf(0x023479FD0E092DB6, 0x1.1A3CFE870496DBp+57);
+ try test__floattitf(0x023479FD0E092DBF, 0x1.1A3CFE870496DF8p+57);
+ try test__floattitf(0x023479FD0E092DC1, 0x1.1A3CFE870496E08p+57);
+ try test__floattitf(0x023479FD0E092DC7, 0x1.1A3CFE870496E38p+57);
+ try test__floattitf(0x023479FD0E092DC8, 0x1.1A3CFE870496E4p+57);
+ try test__floattitf(0x023479FD0E092DCF, 0x1.1A3CFE870496E78p+57);
+ try test__floattitf(0x023479FD0E092DD0, 0x1.1A3CFE870496E8p+57);
+ try test__floattitf(0x023479FD0E092DD1, 0x1.1A3CFE870496E88p+57);
+ try test__floattitf(0x023479FD0E092DD8, 0x1.1A3CFE870496ECp+57);
+ try test__floattitf(0x023479FD0E092DDF, 0x1.1A3CFE870496EF8p+57);
+ try test__floattitf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+
+ try test__floattitf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
+ try test__floattitf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496D08p+121);
+ try test__floattitf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496D8p+121);
+ try test__floattitf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496DCp+121);
+ try test__floattitf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496DBp+121);
+ try test__floattitf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496DF8p+121);
+ try test__floattitf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496E08p+121);
+ try test__floattitf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496E38p+121);
+ try test__floattitf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496E4p+121);
+ try test__floattitf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496E78p+121);
+ try test__floattitf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496E8p+121);
+ try test__floattitf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496E88p+121);
+ try test__floattitf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496ECp+121);
+ try test__floattitf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496EF8p+121);
+ try test__floattitf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
+
+ try test__floattitf(make_ti(0, 0xFFFFFFFFFFFFFFFF), 0x1.FFFFFFFFFFFFFFFEp+63);
+
+ try test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC2801), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ try test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3000), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ try test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC37FF), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ try test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3800), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ try test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4000), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ try test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC47FF), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ try test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4800), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ try test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4801), 0x1.23456789ABCDEF0123456789ABC5p+124);
+ try test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC57FF), 0x1.23456789ABCDEF0123456789ABC5p+124);
+}
+
+test "floatuntitf" {
+ try test__floatuntitf(0, 0.0);
+
+ try test__floatuntitf(1, 1.0);
+ try test__floatuntitf(2, 2.0);
+ try test__floatuntitf(20, 20.0);
+
+ try test__floatuntitf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ try test__floatuntitf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ try test__floatuntitf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ try test__floatuntitf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+ try test__floatuntitf(0x7FFFFFFFFFFFFFFF, 0xF.FFFFFFFFFFFFFFEp+59);
+ try test__floatuntitf(0xFFFFFFFFFFFFFFFE, 0xF.FFFFFFFFFFFFFFEp+60);
+ try test__floatuntitf(0xFFFFFFFFFFFFFFFF, 0xF.FFFFFFFFFFFFFFFp+60);
+
+ try test__floatuntitf(0x8000008000000000, 0x8.000008p+60);
+ try test__floatuntitf(0x8000000000000800, 0x8.0000000000008p+60);
+ try test__floatuntitf(0x8000010000000000, 0x8.00001p+60);
+ try test__floatuntitf(0x8000000000001000, 0x8.000000000001p+60);
+
+ try test__floatuntitf(0x8000000000000000, 0x8p+60);
+ try test__floatuntitf(0x8000000000000001, 0x8.000000000000001p+60);
+
+ try test__floatuntitf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ try test__floatuntitf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ try test__floatuntitf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ try test__floatuntitf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ try test__floatuntitf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ try test__floatuntitf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+
+ try test__floatuntitf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ try test__floatuntitf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ try test__floatuntitf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ try test__floatuntitf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ try test__floatuntitf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ try test__floatuntitf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ try test__floatuntitf(0x023479FD0E092DA1, 0x1.1A3CFE870496D08p+57);
+ try test__floatuntitf(0x023479FD0E092DB0, 0x1.1A3CFE870496D8p+57);
+ try test__floatuntitf(0x023479FD0E092DB8, 0x1.1A3CFE870496DCp+57);
+ try test__floatuntitf(0x023479FD0E092DB6, 0x1.1A3CFE870496DBp+57);
+ try test__floatuntitf(0x023479FD0E092DBF, 0x1.1A3CFE870496DF8p+57);
+ try test__floatuntitf(0x023479FD0E092DC1, 0x1.1A3CFE870496E08p+57);
+ try test__floatuntitf(0x023479FD0E092DC7, 0x1.1A3CFE870496E38p+57);
+ try test__floatuntitf(0x023479FD0E092DC8, 0x1.1A3CFE870496E4p+57);
+ try test__floatuntitf(0x023479FD0E092DCF, 0x1.1A3CFE870496E78p+57);
+ try test__floatuntitf(0x023479FD0E092DD0, 0x1.1A3CFE870496E8p+57);
+ try test__floatuntitf(0x023479FD0E092DD1, 0x1.1A3CFE870496E88p+57);
+ try test__floatuntitf(0x023479FD0E092DD8, 0x1.1A3CFE870496ECp+57);
+ try test__floatuntitf(0x023479FD0E092DDF, 0x1.1A3CFE870496EF8p+57);
+ try test__floatuntitf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+
+ try test__floatuntitf(make_uti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496D08p+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496D8p+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496DCp+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496DBp+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496DF8p+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496E08p+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496E38p+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496E4p+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496E78p+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496E8p+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496E88p+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496ECp+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496EF8p+121);
+ try test__floatuntitf(make_uti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
+
+ try test__floatuntitf(make_uti(0, 0xFFFFFFFFFFFFFFFF), 0x1.FFFFFFFFFFFFFFFEp+63);
+
+ try test__floatuntitf(make_uti(0xFFFFFFFFFFFFFFFF, 0x0000000000000000), 0x1.FFFFFFFFFFFFFFFEp+127);
+ try test__floatuntitf(make_uti(0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF), 0x1.0000000000000000p+128);
+
+ try test__floatuntitf(make_uti(0x123456789ABCDEF0, 0x123456789ABC2801), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ try test__floatuntitf(make_uti(0x123456789ABCDEF0, 0x123456789ABC3000), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ try test__floatuntitf(make_uti(0x123456789ABCDEF0, 0x123456789ABC37FF), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ try test__floatuntitf(make_uti(0x123456789ABCDEF0, 0x123456789ABC3800), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ try test__floatuntitf(make_uti(0x123456789ABCDEF0, 0x123456789ABC4000), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ try test__floatuntitf(make_uti(0x123456789ABCDEF0, 0x123456789ABC47FF), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ try test__floatuntitf(make_uti(0x123456789ABCDEF0, 0x123456789ABC4800), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ try test__floatuntitf(make_uti(0x123456789ABCDEF0, 0x123456789ABC4801), 0x1.23456789ABCDEF0123456789ABC5p+124);
+ try test__floatuntitf(make_uti(0x123456789ABCDEF0, 0x123456789ABC57FF), 0x1.23456789ABCDEF0123456789ABC5p+124);
+}
+
+fn make_ti(high: u64, low: u64) i128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return @bitCast(i128, result);
+}
+
+fn make_uti(high: u64, low: u64) u128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return result;
+}
+
+fn make_tf(high: u64, low: u64) f128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return @bitCast(f128, result);
+}
+
+test "conversion to f16" {
+ try testing.expect(__floatunsihf(@as(u32, 0)) == 0.0);
+ try testing.expect(__floatunsihf(@as(u32, 1)) == 1.0);
+ try testing.expect(__floatunsihf(@as(u32, 65504)) == 65504);
+ try testing.expect(__floatunsihf(@as(u32, 65504 + (1 << 4))) == math.inf(f16));
+}
+
+test "conversion to f32" {
+ try testing.expect(__floatunsisf(@as(u32, 0)) == 0.0);
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u32))) != 1.0);
+ try testing.expect(__floatsisf(@as(i32, math.minInt(i32))) != 1.0);
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u24))) == math.maxInt(u24));
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u24)) + 1) == math.maxInt(u24) + 1); // 0x100_0000 - Exact
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u24)) + 2) == math.maxInt(u24) + 1); // 0x100_0001 - Tie: Rounds down to even
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u24)) + 3) == math.maxInt(u24) + 3); // 0x100_0002 - Exact
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u24)) + 4) == math.maxInt(u24) + 5); // 0x100_0003 - Tie: Rounds up to even
+ try testing.expect(__floatunsisf(@as(u32, math.maxInt(u24)) + 5) == math.maxInt(u24) + 5); // 0x100_0004 - Exact
+}
+
+test "conversion to f80" {
+ if (builtin.zig_backend == .stage1 and builtin.cpu.arch != .x86_64)
+ return error.SkipZigTest; // https://github.com/ziglang/zig/issues/11408
+
+ const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+ try testing.expect(intToFloat(f80, @as(i80, -12)) == -12);
+ try testing.expect(@floatToInt(u80, intToFloat(f80, @as(u64, math.maxInt(u64)) + 0)) == math.maxInt(u64) + 0);
+ try testing.expect(@floatToInt(u80, intToFloat(f80, @as(u80, math.maxInt(u64)) + 1)) == math.maxInt(u64) + 1);
+
+ try testing.expect(intToFloat(f80, @as(u32, 0)) == 0.0);
+ try testing.expect(intToFloat(f80, @as(u32, 1)) == 1.0);
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u32, math.maxInt(u24)) + 0)) == math.maxInt(u24));
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u64)) + 0)) == math.maxInt(u64));
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u64)) + 1)) == math.maxInt(u64) + 1); // Exact
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u64)) + 2)) == math.maxInt(u64) + 1); // Rounds down
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u64)) + 3)) == math.maxInt(u64) + 3); // Tie - Exact
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u64)) + 4)) == math.maxInt(u64) + 5); // Rounds up
+
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u65)) + 0)) == math.maxInt(u65) + 1); // Rounds up
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u65)) + 1)) == math.maxInt(u65) + 1); // Exact
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u65)) + 2)) == math.maxInt(u65) + 1); // Rounds down
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u65)) + 3)) == math.maxInt(u65) + 1); // Tie - Rounds down
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u65)) + 4)) == math.maxInt(u65) + 5); // Rounds up
+ try testing.expect(@floatToInt(u128, intToFloat(f80, @as(u80, math.maxInt(u65)) + 5)) == math.maxInt(u65) + 5); // Exact
+}
diff --git a/lib/compiler_rt/log.zig b/lib/compiler_rt/log.zig
new file mode 100644
index 0000000000..90a38ba381
--- /dev/null
+++ b/lib/compiler_rt/log.zig
@@ -0,0 +1,194 @@
+//! Ported from musl, which is licensed under the MIT license:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/lnf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/ln.c
+
+const std = @import("std");
+const builtin = @import("builtin");
+const math = std.math;
+const testing = std.testing;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__logh, .{ .name = "__logh", .linkage = common.linkage });
+ @export(logf, .{ .name = "logf", .linkage = common.linkage });
+ @export(log, .{ .name = "log", .linkage = common.linkage });
+ @export(__logx, .{ .name = "__logx", .linkage = common.linkage });
+ const logq_sym_name = if (common.want_ppc_abi) "logf128" else "logq";
+ @export(logq, .{ .name = logq_sym_name, .linkage = common.linkage });
+ @export(logl, .{ .name = "logl", .linkage = common.linkage });
+}
+
+pub fn __logh(a: f16) callconv(.C) f16 {
+ // TODO: more efficient implementation
+ return @floatCast(f16, logf(a));
+}
+
+pub fn logf(x_: f32) callconv(.C) f32 {
+ const ln2_hi: f32 = 6.9313812256e-01;
+ const ln2_lo: f32 = 9.0580006145e-06;
+ const Lg1: f32 = 0xaaaaaa.0p-24;
+ const Lg2: f32 = 0xccce13.0p-25;
+ const Lg3: f32 = 0x91e9ee.0p-25;
+ const Lg4: f32 = 0xf89e26.0p-26;
+
+ var x = x_;
+ var ix = @bitCast(u32, x);
+ var k: i32 = 0;
+
+ // x < 2^(-126)
+ if (ix < 0x00800000 or ix >> 31 != 0) {
+ // log(+-0) = -inf
+ if (ix << 1 == 0) {
+ return -math.inf(f32);
+ }
+ // log(-#) = nan
+ if (ix >> 31 != 0) {
+ return math.nan(f32);
+ }
+
+ // subnormal, scale x
+ k -= 25;
+ x *= 0x1.0p25;
+ ix = @bitCast(u32, x);
+ } else if (ix >= 0x7F800000) {
+ return x;
+ } else if (ix == 0x3F800000) {
+ return 0;
+ }
+
+ // x into [sqrt(2) / 2, sqrt(2)]
+ ix += 0x3F800000 - 0x3F3504F3;
+ k += @intCast(i32, ix >> 23) - 0x7F;
+ ix = (ix & 0x007FFFFF) + 0x3F3504F3;
+ x = @bitCast(f32, ix);
+
+ const f = x - 1.0;
+ const s = f / (2.0 + f);
+ const z = s * s;
+ const w = z * z;
+ const t1 = w * (Lg2 + w * Lg4);
+ const t2 = z * (Lg1 + w * Lg3);
+ const R = t2 + t1;
+ const hfsq = 0.5 * f * f;
+ const dk = @intToFloat(f32, k);
+
+ return s * (hfsq + R) + dk * ln2_lo - hfsq + f + dk * ln2_hi;
+}
+
+pub fn log(x_: f64) callconv(.C) f64 {
+ const ln2_hi: f64 = 6.93147180369123816490e-01;
+ const ln2_lo: f64 = 1.90821492927058770002e-10;
+ const Lg1: f64 = 6.666666666666735130e-01;
+ const Lg2: f64 = 3.999999999940941908e-01;
+ const Lg3: f64 = 2.857142874366239149e-01;
+ const Lg4: f64 = 2.222219843214978396e-01;
+ const Lg5: f64 = 1.818357216161805012e-01;
+ const Lg6: f64 = 1.531383769920937332e-01;
+ const Lg7: f64 = 1.479819860511658591e-01;
+
+ var x = x_;
+ var ix = @bitCast(u64, x);
+ var hx = @intCast(u32, ix >> 32);
+ var k: i32 = 0;
+
+ if (hx < 0x00100000 or hx >> 31 != 0) {
+ // log(+-0) = -inf
+ if (ix << 1 == 0) {
+ return -math.inf(f64);
+ }
+ // log(-#) = nan
+ if (hx >> 31 != 0) {
+ return math.nan(f64);
+ }
+
+ // subnormal, scale x
+ k -= 54;
+ x *= 0x1.0p54;
+ hx = @intCast(u32, @bitCast(u64, ix) >> 32);
+ } else if (hx >= 0x7FF00000) {
+ return x;
+ } else if (hx == 0x3FF00000 and ix << 32 == 0) {
+ return 0;
+ }
+
+ // x into [sqrt(2) / 2, sqrt(2)]
+ hx += 0x3FF00000 - 0x3FE6A09E;
+ k += @intCast(i32, hx >> 20) - 0x3FF;
+ hx = (hx & 0x000FFFFF) + 0x3FE6A09E;
+ ix = (@as(u64, hx) << 32) | (ix & 0xFFFFFFFF);
+ x = @bitCast(f64, ix);
+
+ const f = x - 1.0;
+ const hfsq = 0.5 * f * f;
+ const s = f / (2.0 + f);
+ const z = s * s;
+ const w = z * z;
+ const t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
+ const t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
+ const R = t2 + t1;
+ const dk = @intToFloat(f64, k);
+
+ return s * (hfsq + R) + dk * ln2_lo - hfsq + f + dk * ln2_hi;
+}
+
+pub fn __logx(a: f80) callconv(.C) f80 {
+ // TODO: more efficient implementation
+ return @floatCast(f80, logq(a));
+}
+
+pub fn logq(a: f128) callconv(.C) f128 {
+ // TODO: more correct implementation
+ return log(@floatCast(f64, a));
+}
+
+pub fn logl(x: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __logh(x),
+ 32 => return logf(x),
+ 64 => return log(x),
+ 80 => return __logx(x),
+ 128 => return logq(x),
+ else => @compileError("unreachable"),
+ }
+}
+
+test "ln32" {
+ const epsilon = 0.000001;
+
+ try testing.expect(math.approxEqAbs(f32, logf(0.2), -1.609438, epsilon));
+ try testing.expect(math.approxEqAbs(f32, logf(0.8923), -0.113953, epsilon));
+ try testing.expect(math.approxEqAbs(f32, logf(1.5), 0.405465, epsilon));
+ try testing.expect(math.approxEqAbs(f32, logf(37.45), 3.623007, epsilon));
+ try testing.expect(math.approxEqAbs(f32, logf(89.123), 4.490017, epsilon));
+ try testing.expect(math.approxEqAbs(f32, logf(123123.234375), 11.720941, epsilon));
+}
+
+test "ln64" {
+ const epsilon = 0.000001;
+
+ try testing.expect(math.approxEqAbs(f64, log(0.2), -1.609438, epsilon));
+ try testing.expect(math.approxEqAbs(f64, log(0.8923), -0.113953, epsilon));
+ try testing.expect(math.approxEqAbs(f64, log(1.5), 0.405465, epsilon));
+ try testing.expect(math.approxEqAbs(f64, log(37.45), 3.623007, epsilon));
+ try testing.expect(math.approxEqAbs(f64, log(89.123), 4.490017, epsilon));
+ try testing.expect(math.approxEqAbs(f64, log(123123.234375), 11.720941, epsilon));
+}
+
+test "ln32.special" {
+ try testing.expect(math.isPositiveInf(logf(math.inf(f32))));
+ try testing.expect(math.isNegativeInf(logf(0.0)));
+ try testing.expect(math.isNan(logf(-1.0)));
+ try testing.expect(math.isNan(logf(math.nan(f32))));
+}
+
+test "ln64.special" {
+ try testing.expect(math.isPositiveInf(log(math.inf(f64))));
+ try testing.expect(math.isNegativeInf(log(0.0)));
+ try testing.expect(math.isNan(log(-1.0)));
+ try testing.expect(math.isNan(log(math.nan(f64))));
+}
diff --git a/lib/compiler_rt/log10.zig b/lib/compiler_rt/log10.zig
new file mode 100644
index 0000000000..406eb8d0c1
--- /dev/null
+++ b/lib/compiler_rt/log10.zig
@@ -0,0 +1,222 @@
+//! Ported from musl, which is licensed under the MIT license:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/log10f.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/log10.c
+
+const std = @import("std");
+const builtin = @import("builtin");
+const math = std.math;
+const testing = std.testing;
+const maxInt = std.math.maxInt;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__log10h, .{ .name = "__log10h", .linkage = common.linkage });
+ @export(log10f, .{ .name = "log10f", .linkage = common.linkage });
+ @export(log10, .{ .name = "log10", .linkage = common.linkage });
+ @export(__log10x, .{ .name = "__log10x", .linkage = common.linkage });
+ const log10q_sym_name = if (common.want_ppc_abi) "log10f128" else "log10q";
+ @export(log10q, .{ .name = log10q_sym_name, .linkage = common.linkage });
+ @export(log10l, .{ .name = "log10l", .linkage = common.linkage });
+}
+
+pub fn __log10h(a: f16) callconv(.C) f16 {
+ // TODO: more efficient implementation
+ return @floatCast(f16, log10f(a));
+}
+
+pub fn log10f(x_: f32) callconv(.C) f32 {
+ const ivln10hi: f32 = 4.3432617188e-01;
+ const ivln10lo: f32 = -3.1689971365e-05;
+ const log10_2hi: f32 = 3.0102920532e-01;
+ const log10_2lo: f32 = 7.9034151668e-07;
+ const Lg1: f32 = 0xaaaaaa.0p-24;
+ const Lg2: f32 = 0xccce13.0p-25;
+ const Lg3: f32 = 0x91e9ee.0p-25;
+ const Lg4: f32 = 0xf89e26.0p-26;
+
+ var x = x_;
+ var u = @bitCast(u32, x);
+ var ix = u;
+ var k: i32 = 0;
+
+ // x < 2^(-126)
+ if (ix < 0x00800000 or ix >> 31 != 0) {
+ // log(+-0) = -inf
+ if (ix << 1 == 0) {
+ return -math.inf(f32);
+ }
+ // log(-#) = nan
+ if (ix >> 31 != 0) {
+ return math.nan(f32);
+ }
+
+ k -= 25;
+ x *= 0x1.0p25;
+ ix = @bitCast(u32, x);
+ } else if (ix >= 0x7F800000) {
+ return x;
+ } else if (ix == 0x3F800000) {
+ return 0;
+ }
+
+ // x into [sqrt(2) / 2, sqrt(2)]
+ ix += 0x3F800000 - 0x3F3504F3;
+ k += @intCast(i32, ix >> 23) - 0x7F;
+ ix = (ix & 0x007FFFFF) + 0x3F3504F3;
+ x = @bitCast(f32, ix);
+
+ const f = x - 1.0;
+ const s = f / (2.0 + f);
+ const z = s * s;
+ const w = z * z;
+ const t1 = w * (Lg2 + w * Lg4);
+ const t2 = z * (Lg1 + w * Lg3);
+ const R = t2 + t1;
+ const hfsq = 0.5 * f * f;
+
+ var hi = f - hfsq;
+ u = @bitCast(u32, hi);
+ u &= 0xFFFFF000;
+ hi = @bitCast(f32, u);
+ const lo = f - hi - hfsq + s * (hfsq + R);
+ const dk = @intToFloat(f32, k);
+
+ return dk * log10_2lo + (lo + hi) * ivln10lo + lo * ivln10hi + hi * ivln10hi + dk * log10_2hi;
+}
+
+pub fn log10(x_: f64) callconv(.C) f64 {
+ const ivln10hi: f64 = 4.34294481878168880939e-01;
+ const ivln10lo: f64 = 2.50829467116452752298e-11;
+ const log10_2hi: f64 = 3.01029995663611771306e-01;
+ const log10_2lo: f64 = 3.69423907715893078616e-13;
+ const Lg1: f64 = 6.666666666666735130e-01;
+ const Lg2: f64 = 3.999999999940941908e-01;
+ const Lg3: f64 = 2.857142874366239149e-01;
+ const Lg4: f64 = 2.222219843214978396e-01;
+ const Lg5: f64 = 1.818357216161805012e-01;
+ const Lg6: f64 = 1.531383769920937332e-01;
+ const Lg7: f64 = 1.479819860511658591e-01;
+
+ var x = x_;
+ var ix = @bitCast(u64, x);
+ var hx = @intCast(u32, ix >> 32);
+ var k: i32 = 0;
+
+ if (hx < 0x00100000 or hx >> 31 != 0) {
+ // log(+-0) = -inf
+ if (ix << 1 == 0) {
+ return -math.inf(f32);
+ }
+ // log(-#) = nan
+ if (hx >> 31 != 0) {
+ return math.nan(f32);
+ }
+
+ // subnormal, scale x
+ k -= 54;
+ x *= 0x1.0p54;
+ hx = @intCast(u32, @bitCast(u64, x) >> 32);
+ } else if (hx >= 0x7FF00000) {
+ return x;
+ } else if (hx == 0x3FF00000 and ix << 32 == 0) {
+ return 0;
+ }
+
+ // x into [sqrt(2) / 2, sqrt(2)]
+ hx += 0x3FF00000 - 0x3FE6A09E;
+ k += @intCast(i32, hx >> 20) - 0x3FF;
+ hx = (hx & 0x000FFFFF) + 0x3FE6A09E;
+ ix = (@as(u64, hx) << 32) | (ix & 0xFFFFFFFF);
+ x = @bitCast(f64, ix);
+
+ const f = x - 1.0;
+ const hfsq = 0.5 * f * f;
+ const s = f / (2.0 + f);
+ const z = s * s;
+ const w = z * z;
+ const t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
+ const t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
+ const R = t2 + t1;
+
+ // hi + lo = f - hfsq + s * (hfsq + R) ~ log(1 + f)
+ var hi = f - hfsq;
+ var hii = @bitCast(u64, hi);
+ hii &= @as(u64, maxInt(u64)) << 32;
+ hi = @bitCast(f64, hii);
+ const lo = f - hi - hfsq + s * (hfsq + R);
+
+ // val_hi + val_lo ~ log10(1 + f) + k * log10(2)
+ var val_hi = hi * ivln10hi;
+ const dk = @intToFloat(f64, k);
+ const y = dk * log10_2hi;
+ var val_lo = dk * log10_2lo + (lo + hi) * ivln10lo + lo * ivln10hi;
+
+ // Extra precision multiplication
+ const ww = y + val_hi;
+ val_lo += (y - ww) + val_hi;
+ val_hi = ww;
+
+ return val_lo + val_hi;
+}
+
+pub fn __log10x(a: f80) callconv(.C) f80 {
+ // TODO: more efficient implementation
+ return @floatCast(f80, log10q(a));
+}
+
+pub fn log10q(a: f128) callconv(.C) f128 {
+ // TODO: more correct implementation
+ return log10(@floatCast(f64, a));
+}
+
+pub fn log10l(x: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __log10h(x),
+ 32 => return log10f(x),
+ 64 => return log10(x),
+ 80 => return __log10x(x),
+ 128 => return log10q(x),
+ else => @compileError("unreachable"),
+ }
+}
+
+test "log10_32" {
+ const epsilon = 0.000001;
+
+ try testing.expect(math.approxEqAbs(f32, log10f(0.2), -0.698970, epsilon));
+ try testing.expect(math.approxEqAbs(f32, log10f(0.8923), -0.049489, epsilon));
+ try testing.expect(math.approxEqAbs(f32, log10f(1.5), 0.176091, epsilon));
+ try testing.expect(math.approxEqAbs(f32, log10f(37.45), 1.573452, epsilon));
+ try testing.expect(math.approxEqAbs(f32, log10f(89.123), 1.94999, epsilon));
+ try testing.expect(math.approxEqAbs(f32, log10f(123123.234375), 5.09034, epsilon));
+}
+
+test "log10_64" {
+ const epsilon = 0.000001;
+
+ try testing.expect(math.approxEqAbs(f64, log10(0.2), -0.698970, epsilon));
+ try testing.expect(math.approxEqAbs(f64, log10(0.8923), -0.049489, epsilon));
+ try testing.expect(math.approxEqAbs(f64, log10(1.5), 0.176091, epsilon));
+ try testing.expect(math.approxEqAbs(f64, log10(37.45), 1.573452, epsilon));
+ try testing.expect(math.approxEqAbs(f64, log10(89.123), 1.94999, epsilon));
+ try testing.expect(math.approxEqAbs(f64, log10(123123.234375), 5.09034, epsilon));
+}
+
+test "log10_32.special" {
+ try testing.expect(math.isPositiveInf(log10f(math.inf(f32))));
+ try testing.expect(math.isNegativeInf(log10f(0.0)));
+ try testing.expect(math.isNan(log10f(-1.0)));
+ try testing.expect(math.isNan(log10f(math.nan(f32))));
+}
+
+test "log10_64.special" {
+ try testing.expect(math.isPositiveInf(log10(math.inf(f64))));
+ try testing.expect(math.isNegativeInf(log10(0.0)));
+ try testing.expect(math.isNan(log10(-1.0)));
+ try testing.expect(math.isNan(log10(math.nan(f64))));
+}
diff --git a/lib/compiler_rt/log2.zig b/lib/compiler_rt/log2.zig
new file mode 100644
index 0000000000..6f6c07212a
--- /dev/null
+++ b/lib/compiler_rt/log2.zig
@@ -0,0 +1,212 @@
+//! Ported from musl, which is licensed under the MIT license:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/log2f.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/log2.c
+
+const std = @import("std");
+const builtin = @import("builtin");
+const math = std.math;
+const expect = std.testing.expect;
+const maxInt = std.math.maxInt;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__log2h, .{ .name = "__log2h", .linkage = common.linkage });
+ @export(log2f, .{ .name = "log2f", .linkage = common.linkage });
+ @export(log2, .{ .name = "log2", .linkage = common.linkage });
+ @export(__log2x, .{ .name = "__log2x", .linkage = common.linkage });
+ const log2q_sym_name = if (common.want_ppc_abi) "log2f128" else "log2q";
+ @export(log2q, .{ .name = log2q_sym_name, .linkage = common.linkage });
+ @export(log2l, .{ .name = "log2l", .linkage = common.linkage });
+}
+
+pub fn __log2h(a: f16) callconv(.C) f16 {
+ // TODO: more efficient implementation
+ return @floatCast(f16, log2f(a));
+}
+
+pub fn log2f(x_: f32) callconv(.C) f32 {
+ const ivln2hi: f32 = 1.4428710938e+00;
+ const ivln2lo: f32 = -1.7605285393e-04;
+ const Lg1: f32 = 0xaaaaaa.0p-24;
+ const Lg2: f32 = 0xccce13.0p-25;
+ const Lg3: f32 = 0x91e9ee.0p-25;
+ const Lg4: f32 = 0xf89e26.0p-26;
+
+ var x = x_;
+ var u = @bitCast(u32, x);
+ var ix = u;
+ var k: i32 = 0;
+
+ // x < 2^(-126)
+ if (ix < 0x00800000 or ix >> 31 != 0) {
+ // log(+-0) = -inf
+ if (ix << 1 == 0) {
+ return -math.inf(f32);
+ }
+ // log(-#) = nan
+ if (ix >> 31 != 0) {
+ return math.nan(f32);
+ }
+
+ k -= 25;
+ x *= 0x1.0p25;
+ ix = @bitCast(u32, x);
+ } else if (ix >= 0x7F800000) {
+ return x;
+ } else if (ix == 0x3F800000) {
+ return 0;
+ }
+
+ // x into [sqrt(2) / 2, sqrt(2)]
+ ix += 0x3F800000 - 0x3F3504F3;
+ k += @intCast(i32, ix >> 23) - 0x7F;
+ ix = (ix & 0x007FFFFF) + 0x3F3504F3;
+ x = @bitCast(f32, ix);
+
+ const f = x - 1.0;
+ const s = f / (2.0 + f);
+ const z = s * s;
+ const w = z * z;
+ const t1 = w * (Lg2 + w * Lg4);
+ const t2 = z * (Lg1 + w * Lg3);
+ const R = t2 + t1;
+ const hfsq = 0.5 * f * f;
+
+ var hi = f - hfsq;
+ u = @bitCast(u32, hi);
+ u &= 0xFFFFF000;
+ hi = @bitCast(f32, u);
+ const lo = f - hi - hfsq + s * (hfsq + R);
+ return (lo + hi) * ivln2lo + lo * ivln2hi + hi * ivln2hi + @intToFloat(f32, k);
+}
+
+pub fn log2(x_: f64) callconv(.C) f64 {
+ const ivln2hi: f64 = 1.44269504072144627571e+00;
+ const ivln2lo: f64 = 1.67517131648865118353e-10;
+ const Lg1: f64 = 6.666666666666735130e-01;
+ const Lg2: f64 = 3.999999999940941908e-01;
+ const Lg3: f64 = 2.857142874366239149e-01;
+ const Lg4: f64 = 2.222219843214978396e-01;
+ const Lg5: f64 = 1.818357216161805012e-01;
+ const Lg6: f64 = 1.531383769920937332e-01;
+ const Lg7: f64 = 1.479819860511658591e-01;
+
+ var x = x_;
+ var ix = @bitCast(u64, x);
+ var hx = @intCast(u32, ix >> 32);
+ var k: i32 = 0;
+
+ if (hx < 0x00100000 or hx >> 31 != 0) {
+ // log(+-0) = -inf
+ if (ix << 1 == 0) {
+ return -math.inf(f64);
+ }
+ // log(-#) = nan
+ if (hx >> 31 != 0) {
+ return math.nan(f64);
+ }
+
+ // subnormal, scale x
+ k -= 54;
+ x *= 0x1.0p54;
+ hx = @intCast(u32, @bitCast(u64, x) >> 32);
+ } else if (hx >= 0x7FF00000) {
+ return x;
+ } else if (hx == 0x3FF00000 and ix << 32 == 0) {
+ return 0;
+ }
+
+ // x into [sqrt(2) / 2, sqrt(2)]
+ hx += 0x3FF00000 - 0x3FE6A09E;
+ k += @intCast(i32, hx >> 20) - 0x3FF;
+ hx = (hx & 0x000FFFFF) + 0x3FE6A09E;
+ ix = (@as(u64, hx) << 32) | (ix & 0xFFFFFFFF);
+ x = @bitCast(f64, ix);
+
+ const f = x - 1.0;
+ const hfsq = 0.5 * f * f;
+ const s = f / (2.0 + f);
+ const z = s * s;
+ const w = z * z;
+ const t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
+ const t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
+ const R = t2 + t1;
+
+ // hi + lo = f - hfsq + s * (hfsq + R) ~ log(1 + f)
+ var hi = f - hfsq;
+ var hii = @bitCast(u64, hi);
+ hii &= @as(u64, maxInt(u64)) << 32;
+ hi = @bitCast(f64, hii);
+ const lo = f - hi - hfsq + s * (hfsq + R);
+
+ var val_hi = hi * ivln2hi;
+ var val_lo = (lo + hi) * ivln2lo + lo * ivln2hi;
+
+ // spadd(val_hi, val_lo, y)
+ const y = @intToFloat(f64, k);
+ const ww = y + val_hi;
+ val_lo += (y - ww) + val_hi;
+ val_hi = ww;
+
+ return val_lo + val_hi;
+}
+
+pub fn __log2x(a: f80) callconv(.C) f80 {
+ // TODO: more efficient implementation
+ return @floatCast(f80, log2q(a));
+}
+
+pub fn log2q(a: f128) callconv(.C) f128 {
+ // TODO: more correct implementation
+ return log2(@floatCast(f64, a));
+}
+
+pub fn log2l(x: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __log2h(x),
+ 32 => return log2f(x),
+ 64 => return log2(x),
+ 80 => return __log2x(x),
+ 128 => return log2q(x),
+ else => @compileError("unreachable"),
+ }
+}
+
+test "log2_32" {
+ const epsilon = 0.000001;
+
+ try expect(math.approxEqAbs(f32, log2f(0.2), -2.321928, epsilon));
+ try expect(math.approxEqAbs(f32, log2f(0.8923), -0.164399, epsilon));
+ try expect(math.approxEqAbs(f32, log2f(1.5), 0.584962, epsilon));
+ try expect(math.approxEqAbs(f32, log2f(37.45), 5.226894, epsilon));
+ try expect(math.approxEqAbs(f32, log2f(123123.234375), 16.909744, epsilon));
+}
+
+test "log2_64" {
+ const epsilon = 0.000001;
+
+ try expect(math.approxEqAbs(f64, log2(0.2), -2.321928, epsilon));
+ try expect(math.approxEqAbs(f64, log2(0.8923), -0.164399, epsilon));
+ try expect(math.approxEqAbs(f64, log2(1.5), 0.584962, epsilon));
+ try expect(math.approxEqAbs(f64, log2(37.45), 5.226894, epsilon));
+ try expect(math.approxEqAbs(f64, log2(123123.234375), 16.909744, epsilon));
+}
+
+test "log2_32.special" {
+ try expect(math.isPositiveInf(log2f(math.inf(f32))));
+ try expect(math.isNegativeInf(log2f(0.0)));
+ try expect(math.isNan(log2f(-1.0)));
+ try expect(math.isNan(log2f(math.nan(f32))));
+}
+
+test "log2_64.special" {
+ try expect(math.isPositiveInf(log2(math.inf(f64))));
+ try expect(math.isNegativeInf(log2(0.0)));
+ try expect(math.isNan(log2(-1.0)));
+ try expect(math.isNan(log2(math.nan(f64))));
+}
diff --git a/lib/std/special/compiler_rt/lshrdi3_test.zig b/lib/compiler_rt/lshrdi3_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/lshrdi3_test.zig
rename to lib/compiler_rt/lshrdi3_test.zig
diff --git a/lib/std/special/compiler_rt/lshrti3_test.zig b/lib/compiler_rt/lshrti3_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/lshrti3_test.zig
rename to lib/compiler_rt/lshrti3_test.zig
diff --git a/lib/compiler_rt/modti3.zig b/lib/compiler_rt/modti3.zig
new file mode 100644
index 0000000000..5fa34938ff
--- /dev/null
+++ b/lib/compiler_rt/modti3.zig
@@ -0,0 +1,58 @@
+//! Ported from:
+//!
+//! https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/modti3.c
+
+const std = @import("std");
+const builtin = @import("builtin");
+const udivmod = @import("udivmod.zig").udivmod;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (builtin.os.tag == .windows) {
+ switch (arch) {
+ .i386 => {
+ @export(__modti3, .{ .name = "__modti3", .linkage = common.linkage });
+ },
+ .x86_64 => {
+ // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
+ // that LLVM expects compiler-rt to have.
+ @export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = common.linkage });
+ },
+ else => {},
+ }
+ if (arch.isAARCH64()) {
+ @export(__modti3, .{ .name = "__modti3", .linkage = common.linkage });
+ }
+ } else {
+ @export(__modti3, .{ .name = "__modti3", .linkage = common.linkage });
+ }
+}
+
+pub fn __modti3(a: i128, b: i128) callconv(.C) i128 {
+ return mod(a, b);
+}
+
+const v128 = @import("std").meta.Vector(2, u64);
+
+fn __modti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
+ return @bitCast(v128, mod(@bitCast(i128, a), @bitCast(i128, b)));
+}
+
+inline fn mod(a: i128, b: i128) i128 {
+ const s_a = a >> (128 - 1); // s = a < 0 ? -1 : 0
+ const s_b = b >> (128 - 1); // s = b < 0 ? -1 : 0
+
+ const an = (a ^ s_a) -% s_a; // negate if s == -1
+ const bn = (b ^ s_b) -% s_b; // negate if s == -1
+
+ var r: u128 = undefined;
+ _ = udivmod(u128, @bitCast(u128, an), @bitCast(u128, bn), &r);
+ return (@bitCast(i128, r) ^ s_a) -% s_a; // negate if s == -1
+}
+
+test {
+ _ = @import("modti3_test.zig");
+}
diff --git a/lib/std/special/compiler_rt/modti3_test.zig b/lib/compiler_rt/modti3_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/modti3_test.zig
rename to lib/compiler_rt/modti3_test.zig
diff --git a/lib/compiler_rt/muldf3.zig b/lib/compiler_rt/muldf3.zig
new file mode 100644
index 0000000000..ef7ab9fbf7
--- /dev/null
+++ b/lib/compiler_rt/muldf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const mulf3 = @import("./mulf3.zig").mulf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dmul, .{ .name = "__aeabi_dmul", .linkage = common.linkage });
+ } else {
+ @export(__muldf3, .{ .name = "__muldf3", .linkage = common.linkage });
+ }
+}
+
+pub fn __muldf3(a: f64, b: f64) callconv(.C) f64 {
+ return mulf3(f64, a, b);
+}
+
+fn __aeabi_dmul(a: f64, b: f64) callconv(.AAPCS) f64 {
+ return mulf3(f64, a, b);
+}
diff --git a/lib/std/special/compiler_rt/muldi3.zig b/lib/compiler_rt/muldi3.zig
similarity index 67%
rename from lib/std/special/compiler_rt/muldi3.zig
rename to lib/compiler_rt/muldi3.zig
index f0d857e1e9..a51c6c7b76 100644
--- a/lib/std/special/compiler_rt/muldi3.zig
+++ b/lib/compiler_rt/muldi3.zig
@@ -1,10 +1,36 @@
+//! Ported from
+//! https://github.com/llvm/llvm-project/blob/llvmorg-9.0.0/compiler-rt/lib/builtins/muldi3.c
+
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const native_endian = builtin.cpu.arch.endian();
+const common = @import("common.zig");
-// Ported from
-// https://github.com/llvm/llvm-project/blob/llvmorg-9.0.0/compiler-rt/lib/builtins/muldi3.c
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_lmul, .{ .name = "__aeabi_lmul", .linkage = common.linkage });
+ } else {
+ @export(__muldi3, .{ .name = "__muldi3", .linkage = common.linkage });
+ }
+}
+
+pub fn __muldi3(a: i64, b: i64) callconv(.C) i64 {
+ return mul(a, b);
+}
+
+fn __aeabi_lmul(a: i64, b: i64) callconv(.AAPCS) i64 {
+ return mul(a, b);
+}
+
+inline fn mul(a: i64, b: i64) i64 {
+ const x = dwords{ .all = a };
+ const y = dwords{ .all = b };
+ var r = dwords{ .all = muldsi3(x.s.low, y.s.low) };
+ r.s.high +%= x.s.high *% y.s.low +% x.s.low *% y.s.high;
+ return r.all;
+}
const dwords = extern union {
all: i64,
@@ -20,9 +46,7 @@ const dwords = extern union {
},
};
-fn __muldsi3(a: u32, b: u32) i64 {
- @setRuntimeSafety(is_test);
-
+fn muldsi3(a: u32, b: u32) i64 {
const bits_in_word_2 = @sizeOf(i32) * 8 / 2;
const lower_mask = (~@as(u32, 0)) >> bits_in_word_2;
@@ -42,16 +66,6 @@ fn __muldsi3(a: u32, b: u32) i64 {
return r.all;
}
-pub fn __muldi3(a: i64, b: i64) callconv(.C) i64 {
- @setRuntimeSafety(is_test);
-
- const x = dwords{ .all = a };
- const y = dwords{ .all = b };
- var r = dwords{ .all = __muldsi3(x.s.low, y.s.low) };
- r.s.high +%= x.s.high *% y.s.low +% x.s.low *% y.s.high;
- return r.all;
-}
-
test {
_ = @import("muldi3_test.zig");
}
diff --git a/lib/std/special/compiler_rt/muldi3_test.zig b/lib/compiler_rt/muldi3_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/muldi3_test.zig
rename to lib/compiler_rt/muldi3_test.zig
diff --git a/lib/compiler_rt/mulf3.zig b/lib/compiler_rt/mulf3.zig
new file mode 100644
index 0000000000..f6949ee3ce
--- /dev/null
+++ b/lib/compiler_rt/mulf3.zig
@@ -0,0 +1,203 @@
+const std = @import("std");
+const math = std.math;
+const builtin = @import("builtin");
+const common = @import("./common.zig");
+
+/// Ported from:
+/// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/fp_mul_impl.inc
+pub inline fn mulf3(comptime T: type, a: T, b: T) T {
+ @setRuntimeSafety(builtin.is_test);
+ const typeWidth = @typeInfo(T).Float.bits;
+ const significandBits = math.floatMantissaBits(T);
+ const fractionalBits = math.floatFractionalBits(T);
+ const exponentBits = math.floatExponentBits(T);
+
+ const Z = std.meta.Int(.unsigned, typeWidth);
+
+ // ZSignificand is large enough to contain the significand, including an explicit integer bit
+ const ZSignificand = PowerOfTwoSignificandZ(T);
+ const ZSignificandBits = @typeInfo(ZSignificand).Int.bits;
+
+ const roundBit = (1 << (ZSignificandBits - 1));
+ const signBit = (@as(Z, 1) << (significandBits + exponentBits));
+ const maxExponent = ((1 << exponentBits) - 1);
+ const exponentBias = (maxExponent >> 1);
+
+ const integerBit = (@as(ZSignificand, 1) << fractionalBits);
+ const quietBit = integerBit >> 1;
+ const significandMask = (@as(Z, 1) << significandBits) - 1;
+
+ const absMask = signBit - 1;
+ const qnanRep = @bitCast(Z, math.nan(T)) | quietBit;
+ const infRep = @bitCast(Z, math.inf(T));
+ const minNormalRep = @bitCast(Z, math.floatMin(T));
+
+ const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent);
+ const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent);
+ const productSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit;
+
+ var aSignificand: ZSignificand = @intCast(ZSignificand, @bitCast(Z, a) & significandMask);
+ var bSignificand: ZSignificand = @intCast(ZSignificand, @bitCast(Z, b) & significandMask);
+ var scale: i32 = 0;
+
+ // Detect if a or b is zero, denormal, infinity, or NaN.
+ if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
+ const aAbs: Z = @bitCast(Z, a) & absMask;
+ const bAbs: Z = @bitCast(Z, b) & absMask;
+
+ // NaN * anything = qNaN
+ if (aAbs > infRep) return @bitCast(T, @bitCast(Z, a) | quietBit);
+ // anything * NaN = qNaN
+ if (bAbs > infRep) return @bitCast(T, @bitCast(Z, b) | quietBit);
+
+ if (aAbs == infRep) {
+ // infinity * non-zero = +/- infinity
+ if (bAbs != 0) {
+ return @bitCast(T, aAbs | productSign);
+ } else {
+ // infinity * zero = NaN
+ return @bitCast(T, qnanRep);
+ }
+ }
+
+ if (bAbs == infRep) {
+ //? non-zero * infinity = +/- infinity
+ if (aAbs != 0) {
+ return @bitCast(T, bAbs | productSign);
+ } else {
+ // zero * infinity = NaN
+ return @bitCast(T, qnanRep);
+ }
+ }
+
+ // zero * anything = +/- zero
+ if (aAbs == 0) return @bitCast(T, productSign);
+ // anything * zero = +/- zero
+ if (bAbs == 0) return @bitCast(T, productSign);
+
+ // one or both of a or b is denormal, the other (if applicable) is a
+ // normal number. Renormalize one or both of a and b, and set scale to
+ // include the necessary exponent adjustment.
+ if (aAbs < minNormalRep) scale += normalize(T, &aSignificand);
+ if (bAbs < minNormalRep) scale += normalize(T, &bSignificand);
+ }
+
+ // Or in the implicit significand bit. (If we fell through from the
+ // denormal path it was already set by normalize( ), but setting it twice
+ // won't hurt anything.)
+ aSignificand |= integerBit;
+ bSignificand |= integerBit;
+
+ // Get the significand of a*b. Before multiplying the significands, shift
+ // one of them left to left-align it in the field. Thus, the product will
+ // have (exponentBits + 2) integral digits, all but two of which must be
+ // zero. Normalizing this result is just a conditional left-shift by one
+ // and bumping the exponent accordingly.
+ var productHi: ZSignificand = undefined;
+ var productLo: ZSignificand = undefined;
+ const left_align_shift = ZSignificandBits - fractionalBits - 1;
+ common.wideMultiply(ZSignificand, aSignificand, bSignificand << left_align_shift, &productHi, &productLo);
+
+ var productExponent: i32 = @intCast(i32, aExponent + bExponent) - exponentBias + scale;
+
+ // Normalize the significand, adjust exponent if needed.
+ if ((productHi & integerBit) != 0) {
+ productExponent +%= 1;
+ } else {
+ productHi = (productHi << 1) | (productLo >> (ZSignificandBits - 1));
+ productLo = productLo << 1;
+ }
+
+ // If we have overflowed the type, return +/- infinity.
+ if (productExponent >= maxExponent) return @bitCast(T, infRep | productSign);
+
+ var result: Z = undefined;
+ if (productExponent <= 0) {
+ // Result is denormal before rounding
+ //
+ // If the result is so small that it just underflows to zero, return
+ // a zero of the appropriate sign. Mathematically there is no need to
+ // handle this case separately, but we make it a special case to
+ // simplify the shift logic.
+ const shift: u32 = @truncate(u32, @as(Z, 1) -% @bitCast(u32, productExponent));
+ if (shift >= ZSignificandBits) return @bitCast(T, productSign);
+
+ // Otherwise, shift the significand of the result so that the round
+ // bit is the high bit of productLo.
+ const sticky = wideShrWithTruncation(ZSignificand, &productHi, &productLo, shift);
+ productLo |= @boolToInt(sticky);
+ result = productHi;
+
+ // We include the integer bit so that rounding will carry to the exponent,
+ // but it will be removed later if the result is still denormal
+ if (significandBits != fractionalBits) result |= integerBit;
+ } else {
+ // Result is normal before rounding; insert the exponent.
+ result = productHi & significandMask;
+ result |= @intCast(Z, productExponent) << significandBits;
+ }
+
+ // Final rounding. The final result may overflow to infinity, or underflow
+ // to zero, but those are the correct results in those cases. We use the
+ // default IEEE-754 round-to-nearest, ties-to-even rounding mode.
+ if (productLo > roundBit) result +%= 1;
+ if (productLo == roundBit) result +%= result & 1;
+
+ // Restore any explicit integer bit, if it was rounded off
+ if (significandBits != fractionalBits) {
+ if ((result >> significandBits) != 0) {
+ result |= integerBit;
+ } else {
+ result &= ~integerBit;
+ }
+ }
+
+ // Insert the sign of the result:
+ result |= productSign;
+
+ return @bitCast(T, result);
+}
+
+/// Returns `true` if the right shift is inexact (i.e. any bit shifted out is non-zero)
+///
+/// This is analogous to an shr version of `@shlWithOverflow`
+fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool {
+ @setRuntimeSafety(builtin.is_test);
+ const typeWidth = @typeInfo(Z).Int.bits;
+ const S = math.Log2Int(Z);
+ var inexact = false;
+ if (count < typeWidth) {
+ inexact = (lo.* << @intCast(S, typeWidth -% count)) != 0;
+ lo.* = (hi.* << @intCast(S, typeWidth -% count)) | (lo.* >> @intCast(S, count));
+ hi.* = hi.* >> @intCast(S, count);
+ } else if (count < 2 * typeWidth) {
+ inexact = (hi.* << @intCast(S, 2 * typeWidth -% count) | lo.*) != 0;
+ lo.* = hi.* >> @intCast(S, count -% typeWidth);
+ hi.* = 0;
+ } else {
+ inexact = (hi.* | lo.*) != 0;
+ lo.* = 0;
+ hi.* = 0;
+ }
+ return inexact;
+}
+
+fn normalize(comptime T: type, significand: *PowerOfTwoSignificandZ(T)) i32 {
+ const Z = PowerOfTwoSignificandZ(T);
+ const integerBit = @as(Z, 1) << math.floatFractionalBits(T);
+
+ const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
+ significand.* <<= @intCast(math.Log2Int(Z), shift);
+ return @as(i32, 1) - shift;
+}
+
+/// Returns a power-of-two integer type that is large enough to contain
+/// the significand of T, including an explicit integer bit
+fn PowerOfTwoSignificandZ(comptime T: type) type {
+ const bits = math.ceilPowerOfTwoAssert(u16, math.floatFractionalBits(T) + 1);
+ return std.meta.Int(.unsigned, bits);
+}
+
+test {
+ _ = @import("mulf3_test.zig");
+}
diff --git a/lib/compiler_rt/mulf3_test.zig b/lib/compiler_rt/mulf3_test.zig
new file mode 100644
index 0000000000..203745e632
--- /dev/null
+++ b/lib/compiler_rt/mulf3_test.zig
@@ -0,0 +1,171 @@
+// Ported from:
+//
+// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/test/builtins/Unit/multf3_test.c
+
+const std = @import("std");
+const math = std.math;
+const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64);
+const inf128 = @bitCast(f128, @as(u128, 0x7fff000000000000) << 64);
+
+const __multf3 = @import("multf3.zig").__multf3;
+const __mulxf3 = @import("mulxf3.zig").__mulxf3;
+const __muldf3 = @import("muldf3.zig").__muldf3;
+const __mulsf3 = @import("mulsf3.zig").__mulsf3;
+
+// return true if equal
+// use two 64-bit integers intead of one 128-bit integer
+// because 128-bit integer constant can't be assigned directly
+fn compareResultLD(result: f128, expectedHi: u64, expectedLo: u64) bool {
+ const rep = @bitCast(u128, result);
+ const hi = @intCast(u64, rep >> 64);
+ const lo = @truncate(u64, rep);
+
+ if (hi == expectedHi and lo == expectedLo) {
+ return true;
+ }
+ // test other possible NaN representation(signal NaN)
+ if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
+ if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
+ ((hi & 0xffffffffffff) > 0 or lo > 0))
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+fn test__multf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
+ const x = __multf3(a, b);
+
+ if (compareResultLD(x, expected_hi, expected_lo))
+ return;
+
+ @panic("__multf3 test failure");
+}
+
+fn makeNaN128(rand: u64) f128 {
+ const int_result = @as(u128, 0x7fff000000000000 | (rand & 0xffffffffffff)) << 64;
+ const float_result = @bitCast(f128, int_result);
+ return float_result;
+}
+test "multf3" {
+ // qNaN * any = qNaN
+ try test__multf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
+
+ // NaN * any = NaN
+ const a = makeNaN128(0x800030000000);
+ try test__multf3(a, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
+ // inf * any = inf
+ try test__multf3(inf128, 0x1.23456789abcdefp+5, 0x7fff000000000000, 0x0);
+
+ // any * any
+ try test__multf3(
+ @bitCast(f128, @as(u128, 0x40042eab345678439abcdefea5678234)),
+ @bitCast(f128, @as(u128, 0x3ffeedcb34a235253948765432134675)),
+ 0x400423e7f9e3c9fc,
+ 0xd906c2c2a85777c4,
+ );
+
+ try test__multf3(
+ @bitCast(f128, @as(u128, 0x3fcd353e45674d89abacc3a2ebf3ff50)),
+ @bitCast(f128, @as(u128, 0x3ff6ed8764648369535adf4be3214568)),
+ 0x3fc52a163c6223fc,
+ 0xc94c4bf0430768b4,
+ );
+
+ try test__multf3(
+ 0x1.234425696abcad34a35eeffefdcbap+456,
+ 0x451.ed98d76e5d46e5f24323dff21ffp+600,
+ 0x44293a91de5e0e94,
+ 0xe8ed17cc2cdf64ac,
+ );
+
+ try test__multf3(
+ @bitCast(f128, @as(u128, 0x3f154356473c82a9fabf2d22ace345df)),
+ @bitCast(f128, @as(u128, 0x3e38eda98765476743ab21da23d45679)),
+ 0x3d4f37c1a3137cae,
+ 0xfc6807048bc2836a,
+ );
+
+ try test__multf3(0x1.23456734245345p-10000, 0x1.edcba524498724p-6497, 0x0, 0x0);
+
+ // Denormal operands.
+ try test__multf3(
+ 0x0.0000000000000000000000000001p-16382,
+ 0x1p16383,
+ 0x3f90000000000000,
+ 0x0,
+ );
+ try test__multf3(
+ 0x1p16383,
+ 0x0.0000000000000000000000000001p-16382,
+ 0x3f90000000000000,
+ 0x0,
+ );
+
+ try test__multf3(0x1.0000_0000_0000_0000_0000_0000_0001p+0, 0x1.8p+5, 0x4004_8000_0000_0000, 0x0000_0000_0000_0002);
+ try test__multf3(0x1.0000_0000_0000_0000_0000_0000_0002p+0, 0x1.8p+5, 0x4004_8000_0000_0000, 0x0000_0000_0000_0003);
+ try test__multf3(2.0, math.floatTrueMin(f128), 0x0000_0000_0000_0000, 0x0000_0000_0000_0002);
+}
+
+const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1)));
+
+fn test__mulxf3(a: f80, b: f80, expected: u80) !void {
+ const x = __mulxf3(a, b);
+ const rep = @bitCast(u80, x);
+
+ if (rep == expected)
+ return;
+
+ if (math.isNan(@bitCast(f80, expected)) and math.isNan(x))
+ return; // We don't currently test NaN payload propagation
+
+ return error.TestFailed;
+}
+
+test "mulxf3" {
+ // NaN * any = NaN
+ try test__mulxf3(qnan80, 0x1.23456789abcdefp+5, @bitCast(u80, qnan80));
+ try test__mulxf3(@bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), 0x1.23456789abcdefp+5, @bitCast(u80, qnan80));
+
+ // any * NaN = NaN
+ try test__mulxf3(0x1.23456789abcdefp+5, qnan80, @bitCast(u80, qnan80));
+ try test__mulxf3(0x1.23456789abcdefp+5, @bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), @bitCast(u80, qnan80));
+
+ // NaN * inf = NaN
+ try test__mulxf3(qnan80, math.inf(f80), @bitCast(u80, qnan80));
+
+ // inf * NaN = NaN
+ try test__mulxf3(math.inf(f80), qnan80, @bitCast(u80, qnan80));
+
+ // inf * inf = inf
+ try test__mulxf3(math.inf(f80), math.inf(f80), @bitCast(u80, math.inf(f80)));
+
+ // inf * -inf = -inf
+ try test__mulxf3(math.inf(f80), -math.inf(f80), @bitCast(u80, -math.inf(f80)));
+
+ // -inf + inf = -inf
+ try test__mulxf3(-math.inf(f80), math.inf(f80), @bitCast(u80, -math.inf(f80)));
+
+ // inf * any = inf
+ try test__mulxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @bitCast(u80, math.inf(f80)));
+
+ // any * inf = inf
+ try test__mulxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @bitCast(u80, math.inf(f80)));
+
+ // any * any
+ try test__mulxf3(0x1.0p+0, 0x1.dcba987654321p+5, 0x4004_ee5d_4c3b_2a19_0800);
+ try test__mulxf3(0x1.0000_0000_0000_0004p+0, 0x1.8p+5, 0x4004_C000_0000_0000_0003); // exact
+
+ try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.0p+5, 0x4004_8000_0000_0000_0001); // exact
+ try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.7ffep+5, 0x4004_BFFF_0000_0000_0001); // round down
+ try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.8p+5, 0x4004_C000_0000_0000_0002); // round up to even
+ try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.8002p+5, 0x4004_C001_0000_0000_0002); // round up
+ try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.0p+6, 0x4005_8000_0000_0000_0001); // exact
+
+ try test__mulxf3(0x1.0000_0001p+0, 0x1.0000_0001p+0, 0x3FFF_8000_0001_0000_0000); // round down to even
+ try test__mulxf3(0x1.0000_0001p+0, 0x1.0000_0001_0002p+0, 0x3FFF_8000_0001_0001_0001); // round up
+ try test__mulxf3(0x0.8000_0000_0000_0000p-16382, 2.0, 0x0001_8000_0000_0000_0000); // denormal -> normal
+ try test__mulxf3(0x0.7fff_ffff_ffff_fffep-16382, 0x2.0000_0000_0000_0008p0, 0x0001_8000_0000_0000_0000); // denormal -> normal
+ try test__mulxf3(0x0.7fff_ffff_ffff_fffep-16382, 0x1.0000_0000_0000_0000p0, 0x0000_3FFF_FFFF_FFFF_FFFF); // denormal -> denormal
+}
diff --git a/lib/compiler_rt/mulo.zig b/lib/compiler_rt/mulo.zig
new file mode 100644
index 0000000000..cd2d127c34
--- /dev/null
+++ b/lib/compiler_rt/mulo.zig
@@ -0,0 +1,79 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const math = std.math;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__mulosi4, .{ .name = "__mulosi4", .linkage = common.linkage });
+ @export(__mulodi4, .{ .name = "__mulodi4", .linkage = common.linkage });
+ @export(__muloti4, .{ .name = "__muloti4", .linkage = common.linkage });
+}
+
+// mulo - multiplication overflow
+// * return a*%b.
+// * return if a*b overflows => 1 else => 0
+// - muloXi4_genericSmall as default
+// - muloXi4_genericFast for 2*bitsize <= usize
+
+inline fn muloXi4_genericSmall(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
+ overflow.* = 0;
+ const min = math.minInt(ST);
+ var res: ST = a *% b;
+ // Hacker's Delight section Overflow subsection Multiplication
+ // case a=-2^{31}, b=-1 problem, because
+ // on some machines a*b = -2^{31} with overflow
+ // Then -2^{31}/-1 overflows and any result is possible.
+ // => check with a<0 and b=-2^{31}
+ if ((a < 0 and b == min) or (a != 0 and @divTrunc(res, a) != b))
+ overflow.* = 1;
+ return res;
+}
+
+inline fn muloXi4_genericFast(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
+ overflow.* = 0;
+ const EST = switch (ST) {
+ i32 => i64,
+ i64 => i128,
+ i128 => i256,
+ else => unreachable,
+ };
+ const min = math.minInt(ST);
+ const max = math.maxInt(ST);
+ var res: EST = @as(EST, a) * @as(EST, b);
+ //invariant: -2^{bitwidth(EST)} < res < 2^{bitwidth(EST)-1}
+ if (res < min or max < res)
+ overflow.* = 1;
+ return @truncate(ST, res);
+}
+
+pub fn __mulosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
+ if (2 * @bitSizeOf(i32) <= @bitSizeOf(usize)) {
+ return muloXi4_genericFast(i32, a, b, overflow);
+ } else {
+ return muloXi4_genericSmall(i32, a, b, overflow);
+ }
+}
+
+pub fn __mulodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
+ if (2 * @bitSizeOf(i64) <= @bitSizeOf(usize)) {
+ return muloXi4_genericFast(i64, a, b, overflow);
+ } else {
+ return muloXi4_genericSmall(i64, a, b, overflow);
+ }
+}
+
+pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
+ if (2 * @bitSizeOf(i128) <= @bitSizeOf(usize)) {
+ return muloXi4_genericFast(i128, a, b, overflow);
+ } else {
+ return muloXi4_genericSmall(i128, a, b, overflow);
+ }
+}
+
+test {
+ _ = @import("mulosi4_test.zig");
+ _ = @import("mulodi4_test.zig");
+ _ = @import("muloti4_test.zig");
+}
diff --git a/lib/std/special/compiler_rt/mulodi4_test.zig b/lib/compiler_rt/mulodi4_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/mulodi4_test.zig
rename to lib/compiler_rt/mulodi4_test.zig
diff --git a/lib/std/special/compiler_rt/mulosi4_test.zig b/lib/compiler_rt/mulosi4_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/mulosi4_test.zig
rename to lib/compiler_rt/mulosi4_test.zig
diff --git a/lib/std/special/compiler_rt/muloti4_test.zig b/lib/compiler_rt/muloti4_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/muloti4_test.zig
rename to lib/compiler_rt/muloti4_test.zig
diff --git a/lib/compiler_rt/mulsf3.zig b/lib/compiler_rt/mulsf3.zig
new file mode 100644
index 0000000000..3294f5b1c7
--- /dev/null
+++ b/lib/compiler_rt/mulsf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const mulf3 = @import("./mulf3.zig").mulf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fmul, .{ .name = "__aeabi_fmul", .linkage = common.linkage });
+ } else {
+ @export(__mulsf3, .{ .name = "__mulsf3", .linkage = common.linkage });
+ }
+}
+
+pub fn __mulsf3(a: f32, b: f32) callconv(.C) f32 {
+ return mulf3(f32, a, b);
+}
+
+fn __aeabi_fmul(a: f32, b: f32) callconv(.AAPCS) f32 {
+ return mulf3(f32, a, b);
+}
diff --git a/lib/compiler_rt/multf3.zig b/lib/compiler_rt/multf3.zig
new file mode 100644
index 0000000000..d4449ab72e
--- /dev/null
+++ b/lib/compiler_rt/multf3.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const mulf3 = @import("./mulf3.zig").mulf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__mulkf3, .{ .name = "__mulkf3", .linkage = common.linkage });
+ } else if (common.want_sparc_abi) {
+ @export(_Qp_mul, .{ .name = "_Qp_mul", .linkage = common.linkage });
+ } else {
+ @export(__multf3, .{ .name = "__multf3", .linkage = common.linkage });
+ }
+}
+
+pub fn __multf3(a: f128, b: f128) callconv(.C) f128 {
+ return mulf3(f128, a, b);
+}
+
+fn __mulkf3(a: f128, b: f128) callconv(.C) f128 {
+ return mulf3(f128, a, b);
+}
+
+fn _Qp_mul(c: *f128, a: *const f128, b: *const f128) callconv(.C) void {
+ c.* = mulf3(f128, a.*, b.*);
+}
diff --git a/lib/std/special/compiler_rt/multi3.zig b/lib/compiler_rt/multi3.zig
similarity index 52%
rename from lib/std/special/compiler_rt/multi3.zig
rename to lib/compiler_rt/multi3.zig
index 4e5c49730a..ba41cb7917 100644
--- a/lib/std/special/compiler_rt/multi3.zig
+++ b/lib/compiler_rt/multi3.zig
@@ -1,31 +1,52 @@
-const compiler_rt = @import("../compiler_rt.zig");
+//! Ported from git@github.com:llvm-project/llvm-project-20170507.git
+//! ae684fad6d34858c014c94da69c15e7774a633c3
+//! 2018-08-13
+
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
+const arch = builtin.cpu.arch;
const native_endian = builtin.cpu.arch.endian();
+const common = @import("common.zig");
-// Ported from git@github.com:llvm-project/llvm-project-20170507.git
-// ae684fad6d34858c014c94da69c15e7774a633c3
-// 2018-08-13
+pub const panic = common.panic;
+
+comptime {
+ if (builtin.os.tag == .windows) {
+ switch (arch) {
+ .i386 => {
+ @export(__multi3, .{ .name = "__multi3", .linkage = common.linkage });
+ },
+ .x86_64 => {
+ // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
+ // that LLVM expects compiler-rt to have.
+ @export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = common.linkage });
+ },
+ else => {},
+ }
+ } else {
+ @export(__multi3, .{ .name = "__multi3", .linkage = common.linkage });
+ }
+}
pub fn __multi3(a: i128, b: i128) callconv(.C) i128 {
- @setRuntimeSafety(is_test);
+ return mul(a, b);
+}
+
+const v128 = @Vector(2, u64);
+
+fn __multi3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
+ return @bitCast(v128, mul(@bitCast(i128, a), @bitCast(i128, b)));
+}
+
+inline fn mul(a: i128, b: i128) i128 {
const x = twords{ .all = a };
const y = twords{ .all = b };
- var r = twords{ .all = __mulddi3(x.s.low, y.s.low) };
+ var r = twords{ .all = mulddi3(x.s.low, y.s.low) };
r.s.high +%= x.s.high *% y.s.low +% x.s.low *% y.s.high;
return r.all;
}
-const v128 = std.meta.Vector(2, u64);
-pub fn __multi3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
- return @bitCast(v128, @call(.{ .modifier = .always_inline }, __multi3, .{
- @bitCast(i128, a),
- @bitCast(i128, b),
- }));
-}
-
-fn __mulddi3(a: u64, b: u64) i128 {
+fn mulddi3(a: u64, b: u64) i128 {
const bits_in_dword_2 = (@sizeOf(i64) * 8) / 2;
const lower_mask = ~@as(u64, 0) >> bits_in_dword_2;
var r: twords = undefined;
diff --git a/lib/std/special/compiler_rt/multi3_test.zig b/lib/compiler_rt/multi3_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/multi3_test.zig
rename to lib/compiler_rt/multi3_test.zig
diff --git a/lib/compiler_rt/mulxf3.zig b/lib/compiler_rt/mulxf3.zig
new file mode 100644
index 0000000000..353d27c290
--- /dev/null
+++ b/lib/compiler_rt/mulxf3.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const mulf3 = @import("./mulf3.zig").mulf3;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__mulxf3, .{ .name = "__mulxf3", .linkage = common.linkage });
+}
+
+pub fn __mulxf3(a: f80, b: f80) callconv(.C) f80 {
+ return mulf3(f80, a, b);
+}
diff --git a/lib/compiler_rt/negXi2.zig b/lib/compiler_rt/negXi2.zig
new file mode 100644
index 0000000000..086f80c6b3
--- /dev/null
+++ b/lib/compiler_rt/negXi2.zig
@@ -0,0 +1,41 @@
+//! neg - negate (the number)
+//! - negXi2 for unoptimized little and big endian
+//! sfffffff = 2^31-1
+//! two's complement inverting bits and add 1 would result in -INT_MIN == 0
+//! => -INT_MIN = -2^31 forbidden
+//! * size optimized builds
+//! * machines that dont support carry operations
+
+const std = @import("std");
+const builtin = @import("builtin");
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__negsi2, .{ .name = "__negsi2", .linkage = common.linkage });
+ @export(__negdi2, .{ .name = "__negdi2", .linkage = common.linkage });
+ @export(__negti2, .{ .name = "__negti2", .linkage = common.linkage });
+}
+
+pub fn __negsi2(a: i32) callconv(.C) i32 {
+ return negXi2(i32, a);
+}
+
+pub fn __negdi2(a: i64) callconv(.C) i64 {
+ return negXi2(i64, a);
+}
+
+pub fn __negti2(a: i128) callconv(.C) i128 {
+ return negXi2(i128, a);
+}
+
+inline fn negXi2(comptime T: type, a: T) T {
+ return -a;
+}
+
+test {
+ _ = @import("negsi2_test.zig");
+ _ = @import("negdi2_test.zig");
+ _ = @import("negti2_test.zig");
+}
diff --git a/lib/compiler_rt/negdf2.zig b/lib/compiler_rt/negdf2.zig
new file mode 100644
index 0000000000..c730ada7e0
--- /dev/null
+++ b/lib/compiler_rt/negdf2.zig
@@ -0,0 +1,19 @@
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dneg, .{ .name = "__aeabi_dneg", .linkage = common.linkage });
+ } else {
+ @export(__negdf2, .{ .name = "__negdf2", .linkage = common.linkage });
+ }
+}
+
+fn __negdf2(a: f64) callconv(.C) f64 {
+ return common.fneg(a);
+}
+
+fn __aeabi_dneg(a: f64) callconv(.AAPCS) f64 {
+ return common.fneg(a);
+}
diff --git a/lib/std/special/compiler_rt/negdi2_test.zig b/lib/compiler_rt/negdi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/negdi2_test.zig
rename to lib/compiler_rt/negdi2_test.zig
diff --git a/lib/compiler_rt/negsf2.zig b/lib/compiler_rt/negsf2.zig
new file mode 100644
index 0000000000..4cb32097ba
--- /dev/null
+++ b/lib/compiler_rt/negsf2.zig
@@ -0,0 +1,19 @@
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fneg, .{ .name = "__aeabi_fneg", .linkage = common.linkage });
+ } else {
+ @export(__negsf2, .{ .name = "__negsf2", .linkage = common.linkage });
+ }
+}
+
+fn __negsf2(a: f32) callconv(.C) f32 {
+ return common.fneg(a);
+}
+
+fn __aeabi_fneg(a: f32) callconv(.AAPCS) f32 {
+ return common.fneg(a);
+}
diff --git a/lib/std/special/compiler_rt/negsi2_test.zig b/lib/compiler_rt/negsi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/negsi2_test.zig
rename to lib/compiler_rt/negsi2_test.zig
diff --git a/lib/compiler_rt/negtf2.zig b/lib/compiler_rt/negtf2.zig
new file mode 100644
index 0000000000..c1c1e97802
--- /dev/null
+++ b/lib/compiler_rt/negtf2.zig
@@ -0,0 +1,11 @@
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__negtf2, .{ .name = "__negtf2", .linkage = common.linkage });
+}
+
+fn __negtf2(a: f128) callconv(.C) f128 {
+ return common.fneg(a);
+}
diff --git a/lib/std/special/compiler_rt/negti2_test.zig b/lib/compiler_rt/negti2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/negti2_test.zig
rename to lib/compiler_rt/negti2_test.zig
diff --git a/lib/std/special/compiler_rt/negv.zig b/lib/compiler_rt/negv.zig
similarity index 58%
rename from lib/std/special/compiler_rt/negv.zig
rename to lib/compiler_rt/negv.zig
index 09abb040d5..361cd80ee7 100644
--- a/lib/std/special/compiler_rt/negv.zig
+++ b/lib/compiler_rt/negv.zig
@@ -1,20 +1,16 @@
-// negv - negate oVerflow
-// * @panic, if result can not be represented
-// - negvXi4_generic for unoptimized version
+//! negv - negate oVerflow
+//! * @panic, if result can not be represented
+//! - negvXi4_generic for unoptimized version
+const std = @import("std");
+const builtin = @import("builtin");
+const common = @import("common.zig");
-// assume -0 == 0 is gracefully handled by the hardware
-inline fn negvXi(comptime ST: type, a: ST) ST {
- const UT = switch (ST) {
- i32 => u32,
- i64 => u64,
- i128 => u128,
- else => unreachable,
- };
- const N: UT = @bitSizeOf(ST);
- const min: ST = @bitCast(ST, (@as(UT, 1) << (N - 1)));
- if (a == min)
- @panic("compiler_rt negv: overflow");
- return -a;
+pub const panic = common.panic;
+
+comptime {
+ @export(__negvsi2, .{ .name = "__negvsi2", .linkage = common.linkage });
+ @export(__negvdi2, .{ .name = "__negvdi2", .linkage = common.linkage });
+ @export(__negvti2, .{ .name = "__negvti2", .linkage = common.linkage });
}
pub fn __negvsi2(a: i32) callconv(.C) i32 {
@@ -29,6 +25,20 @@ pub fn __negvti2(a: i128) callconv(.C) i128 {
return negvXi(i128, a);
}
+inline fn negvXi(comptime ST: type, a: ST) ST {
+ const UT = switch (ST) {
+ i32 => u32,
+ i64 => u64,
+ i128 => u128,
+ else => unreachable,
+ };
+ const N: UT = @bitSizeOf(ST);
+ const min: ST = @bitCast(ST, (@as(UT, 1) << (N - 1)));
+ if (a == min)
+ @panic("compiler_rt negv: overflow");
+ return -a;
+}
+
test {
_ = @import("negvsi2_test.zig");
_ = @import("negvdi2_test.zig");
diff --git a/lib/std/special/compiler_rt/negvdi2_test.zig b/lib/compiler_rt/negvdi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/negvdi2_test.zig
rename to lib/compiler_rt/negvdi2_test.zig
diff --git a/lib/std/special/compiler_rt/negvsi2_test.zig b/lib/compiler_rt/negvsi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/negvsi2_test.zig
rename to lib/compiler_rt/negvsi2_test.zig
diff --git a/lib/std/special/compiler_rt/negvti2_test.zig b/lib/compiler_rt/negvti2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/negvti2_test.zig
rename to lib/compiler_rt/negvti2_test.zig
diff --git a/lib/compiler_rt/negxf2.zig b/lib/compiler_rt/negxf2.zig
new file mode 100644
index 0000000000..4e8258453b
--- /dev/null
+++ b/lib/compiler_rt/negxf2.zig
@@ -0,0 +1,11 @@
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__negxf2, .{ .name = "__negxf2", .linkage = common.linkage });
+}
+
+fn __negxf2(a: f80) callconv(.C) f80 {
+ return common.fneg(a);
+}
diff --git a/lib/std/special/compiler_rt/os_version_check.zig b/lib/compiler_rt/os_version_check.zig
similarity index 50%
rename from lib/std/special/compiler_rt/os_version_check.zig
rename to lib/compiler_rt/os_version_check.zig
index 55617dec75..2c6cdb54dc 100644
--- a/lib/std/special/compiler_rt/os_version_check.zig
+++ b/lib/compiler_rt/os_version_check.zig
@@ -1,5 +1,14 @@
-const testing = @import("std").testing;
+const std = @import("std");
+const testing = std.testing;
const builtin = @import("builtin");
+const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
+pub const panic = @import("common.zig").panic;
+
+comptime {
+ if (builtin.os.tag.isDarwin()) {
+ @export(__isPlatformVersionAtLeast, .{ .name = "__isPlatformVersionAtLeast", .linkage = linkage });
+ }
+}
// Ported from llvm-project 13.0.0 d7b669b3a30345cfcdb2fde2af6f48aa4b94845d
//
@@ -16,30 +25,32 @@ const builtin = @import("builtin");
// the newer codepath, which merely calls out to the Darwin _availability_version_check API which is
// available on macOS 10.15+, iOS 13+, tvOS 13+ and watchOS 6+.
-inline fn constructVersion(major: u32, minor: u32, subminor: u32) u32 {
- return ((major & 0xffff) << 16) | ((minor & 0xff) << 8) | (subminor & 0xff);
-}
+const __isPlatformVersionAtLeast = if (builtin.os.tag.isDarwin()) struct {
+ inline fn constructVersion(major: u32, minor: u32, subminor: u32) u32 {
+ return ((major & 0xffff) << 16) | ((minor & 0xff) << 8) | (subminor & 0xff);
+ }
-// Darwin-only
-pub fn __isPlatformVersionAtLeast(platform: u32, major: u32, minor: u32, subminor: u32) callconv(.C) i32 {
- const build_version = dyld_build_version_t{
- .platform = platform,
- .version = constructVersion(major, minor, subminor),
+ // Darwin-only
+ fn __isPlatformVersionAtLeast(platform: u32, major: u32, minor: u32, subminor: u32) callconv(.C) i32 {
+ const build_version = dyld_build_version_t{
+ .platform = platform,
+ .version = constructVersion(major, minor, subminor),
+ };
+ return @boolToInt(_availability_version_check(1, &[_]dyld_build_version_t{build_version}));
+ }
+
+ // _availability_version_check darwin API support.
+ const dyld_platform_t = u32;
+ const dyld_build_version_t = extern struct {
+ platform: dyld_platform_t,
+ version: u32,
};
- return @boolToInt(_availability_version_check(1, &[_]dyld_build_version_t{build_version}));
-}
-
-// _availability_version_check darwin API support.
-const dyld_platform_t = u32;
-const dyld_build_version_t = extern struct {
- platform: dyld_platform_t,
- version: u32,
-};
-// Darwin-only
-extern "c" fn _availability_version_check(count: u32, versions: [*c]const dyld_build_version_t) bool;
+ // Darwin-only
+ extern "c" fn _availability_version_check(count: u32, versions: [*c]const dyld_build_version_t) bool;
+}.__isPlatformVersionAtLeast else struct {};
test "isPlatformVersionAtLeast" {
- if (!builtin.os.tag.isDarwin()) return error.SkipZigTest;
+ if (!comptime builtin.os.tag.isDarwin()) return error.SkipZigTest;
// Note: this test depends on the actual host OS version since it is merely calling into the
// native Darwin API.
diff --git a/lib/std/special/compiler_rt/parity.zig b/lib/compiler_rt/parity.zig
similarity index 68%
rename from lib/std/special/compiler_rt/parity.zig
rename to lib/compiler_rt/parity.zig
index ae634b0790..2f48a38bff 100644
--- a/lib/std/special/compiler_rt/parity.zig
+++ b/lib/compiler_rt/parity.zig
@@ -1,12 +1,31 @@
+//! parity - if number of bits set is even => 0, else => 1
+//! - pariytXi2_generic for big and little endian
+
const std = @import("std");
const builtin = @import("builtin");
+const common = @import("common.zig");
-// parity - if number of bits set is even => 0, else => 1
-// - pariytXi2_generic for big and little endian
+pub const panic = common.panic;
+
+comptime {
+ @export(__paritysi2, .{ .name = "__paritysi2", .linkage = common.linkage });
+ @export(__paritydi2, .{ .name = "__paritydi2", .linkage = common.linkage });
+ @export(__parityti2, .{ .name = "__parityti2", .linkage = common.linkage });
+}
+
+pub fn __paritysi2(a: i32) callconv(.C) i32 {
+ return parityXi2(i32, a);
+}
+
+pub fn __paritydi2(a: i64) callconv(.C) i32 {
+ return parityXi2(i64, a);
+}
+
+pub fn __parityti2(a: i128) callconv(.C) i32 {
+ return parityXi2(i128, a);
+}
inline fn parityXi2(comptime T: type, a: T) i32 {
- @setRuntimeSafety(builtin.is_test);
-
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
@@ -23,18 +42,6 @@ inline fn parityXi2(comptime T: type, a: T) i32 {
return (@intCast(u16, 0x6996) >> @intCast(u4, x)) & 1; // optimization for >>2 and >>1
}
-pub fn __paritysi2(a: i32) callconv(.C) i32 {
- return parityXi2(i32, a);
-}
-
-pub fn __paritydi2(a: i64) callconv(.C) i32 {
- return parityXi2(i64, a);
-}
-
-pub fn __parityti2(a: i128) callconv(.C) i32 {
- return parityXi2(i128, a);
-}
-
test {
_ = @import("paritysi2_test.zig");
_ = @import("paritydi2_test.zig");
diff --git a/lib/std/special/compiler_rt/paritydi2_test.zig b/lib/compiler_rt/paritydi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/paritydi2_test.zig
rename to lib/compiler_rt/paritydi2_test.zig
diff --git a/lib/std/special/compiler_rt/paritysi2_test.zig b/lib/compiler_rt/paritysi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/paritysi2_test.zig
rename to lib/compiler_rt/paritysi2_test.zig
diff --git a/lib/std/special/compiler_rt/parityti2_test.zig b/lib/compiler_rt/parityti2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/parityti2_test.zig
rename to lib/compiler_rt/parityti2_test.zig
diff --git a/lib/std/special/compiler_rt/popcount.zig b/lib/compiler_rt/popcount.zig
similarity index 58%
rename from lib/std/special/compiler_rt/popcount.zig
rename to lib/compiler_rt/popcount.zig
index 362b232fb8..803e93f35a 100644
--- a/lib/std/special/compiler_rt/popcount.zig
+++ b/lib/compiler_rt/popcount.zig
@@ -1,17 +1,36 @@
+//! popcount - population count
+//! counts the number of 1 bits
+//! SWAR-Popcount: count bits of duos, aggregate to nibbles, and bytes inside
+//! x-bit register in parallel to sum up all bytes
+//! SWAR-Masks and factors can be defined as 2-adic fractions
+//! TAOCP: Combinational Algorithms, Bitwise Tricks And Techniques,
+//! subsubsection "Working with the rightmost bits" and "Sideways addition".
+
const builtin = @import("builtin");
const std = @import("std");
+const common = @import("common.zig");
-// popcount - population count
-// counts the number of 1 bits
+pub const panic = common.panic;
-// SWAR-Popcount: count bits of duos, aggregate to nibbles, and bytes inside
-// x-bit register in parallel to sum up all bytes
-// SWAR-Masks and factors can be defined as 2-adic fractions
-// TAOCP: Combinational Algorithms, Bitwise Tricks And Techniques,
-// subsubsection "Working with the rightmost bits" and "Sideways addition".
+comptime {
+ @export(__popcountsi2, .{ .name = "__popcountsi2", .linkage = common.linkage });
+ @export(__popcountdi2, .{ .name = "__popcountdi2", .linkage = common.linkage });
+ @export(__popcountti2, .{ .name = "__popcountti2", .linkage = common.linkage });
+}
+
+pub fn __popcountsi2(a: i32) callconv(.C) i32 {
+ return popcountXi2(i32, a);
+}
+
+pub fn __popcountdi2(a: i64) callconv(.C) i32 {
+ return popcountXi2(i64, a);
+}
+
+pub fn __popcountti2(a: i128) callconv(.C) i32 {
+ return popcountXi2(i128, a);
+}
inline fn popcountXi2(comptime ST: type, a: ST) i32 {
- @setRuntimeSafety(builtin.is_test);
const UT = switch (ST) {
i32 => u32,
i64 => u64,
@@ -30,18 +49,6 @@ inline fn popcountXi2(comptime ST: type, a: ST) i32 {
return @intCast(i32, x);
}
-pub fn __popcountsi2(a: i32) callconv(.C) i32 {
- return popcountXi2(i32, a);
-}
-
-pub fn __popcountdi2(a: i64) callconv(.C) i32 {
- return popcountXi2(i64, a);
-}
-
-pub fn __popcountti2(a: i128) callconv(.C) i32 {
- return popcountXi2(i128, a);
-}
-
test {
_ = @import("popcountsi2_test.zig");
_ = @import("popcountdi2_test.zig");
diff --git a/lib/std/special/compiler_rt/popcountdi2_test.zig b/lib/compiler_rt/popcountdi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/popcountdi2_test.zig
rename to lib/compiler_rt/popcountdi2_test.zig
diff --git a/lib/std/special/compiler_rt/popcountsi2_test.zig b/lib/compiler_rt/popcountsi2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/popcountsi2_test.zig
rename to lib/compiler_rt/popcountsi2_test.zig
diff --git a/lib/std/special/compiler_rt/popcountti2_test.zig b/lib/compiler_rt/popcountti2_test.zig
similarity index 100%
rename from lib/std/special/compiler_rt/popcountti2_test.zig
rename to lib/compiler_rt/popcountti2_test.zig
diff --git a/lib/std/math/__rem_pio2.zig b/lib/compiler_rt/rem_pio2.zig
similarity index 94%
rename from lib/std/math/__rem_pio2.zig
rename to lib/compiler_rt/rem_pio2.zig
index c8cb8fb644..73d477ee12 100644
--- a/lib/std/math/__rem_pio2.zig
+++ b/lib/compiler_rt/rem_pio2.zig
@@ -3,11 +3,11 @@
//
// https://git.musl-libc.org/cgit/musl/tree/src/math/__rem_pio2.c
-const std = @import("../std.zig");
-const __rem_pio2_large = @import("__rem_pio2_large.zig").__rem_pio2_large;
+const std = @import("std");
+const rem_pio2_large = @import("rem_pio2_large.zig").rem_pio2_large;
const math = std.math;
-const toint = 1.5 / math.epsilon(f64);
+const toint = 1.5 / math.floatEps(f64);
// pi/4
const pio4 = 0x1.921fb54442d18p-1;
// invpio2: 53 bits of 2/pi
@@ -82,10 +82,10 @@ fn medium(ix: u32, x: f64, y: *[2]f64) i32 {
// Returns the remainder of x rem pi/2 in y[0]+y[1]
//
-// use __rem_pio2_large() for large x
+// use rem_pio2_large() for large x
//
// caller must handle the case when reduction is not needed: |x| ~<= pi/4 */
-pub fn __rem_pio2(x: f64, y: *[2]f64) i32 {
+pub fn rem_pio2(x: f64, y: *[2]f64) i32 {
var z: f64 = undefined;
var tx: [3]f64 = undefined;
var ty: [2]f64 = undefined;
@@ -186,7 +186,7 @@ pub fn __rem_pio2(x: f64, y: *[2]f64) i32 {
while (tx[U(i)] == 0.0) {
i -= 1;
}
- n = __rem_pio2_large(tx[0..], ty[0..], @intCast(i32, (ix >> 20)) - (0x3ff + 23), i + 1, 1);
+ n = rem_pio2_large(tx[0..], ty[0..], @intCast(i32, (ix >> 20)) - (0x3ff + 23), i + 1, 1);
if (sign) {
y[0] = -ty[0];
y[1] = -ty[1];
diff --git a/lib/std/math/__rem_pio2_large.zig b/lib/compiler_rt/rem_pio2_large.zig
similarity index 75%
rename from lib/std/math/__rem_pio2_large.zig
rename to lib/compiler_rt/rem_pio2_large.zig
index 140e85f7f6..c8a53b741c 100644
--- a/lib/std/math/__rem_pio2_large.zig
+++ b/lib/compiler_rt/rem_pio2_large.zig
@@ -3,23 +3,22 @@
//
// https://git.musl-libc.org/cgit/musl/tree/src/math/__rem_pio2_large.c
-const std = @import("../std.zig");
+const std = @import("std");
const math = std.math;
const init_jk = [_]i32{ 3, 4, 4, 6 }; // initial value for jk
-//
-// Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
-//
-// integer array, contains the (24*i)-th to (24*i+23)-th
-// bit of 2/pi after binary point. The corresponding
-// floating value is
-//
-// ipio2[i] * 2^(-24(i+1)).
-//
-// NB: This table must have at least (e0-3)/24 + jk terms.
-// For quad precision (e0 <= 16360, jk = 6), this is 686.
///
+/// Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
+///
+/// integer array, contains the (24*i)-th to (24*i+23)-th
+/// bit of 2/pi after binary point. The corresponding
+/// floating value is
+///
+/// ipio2[i] * 2^(-24(i+1)).
+///
+/// NB: This table must have at least (e0-3)/24 + jk terms.
+/// For quad precision (e0 <= 16360, jk = 6), this is 686.
const ipio2 = [_]i32{
0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62,
0x95993C, 0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A,
@@ -33,7 +32,6 @@ const ipio2 = [_]i32{
0x91615E, 0xE61B08, 0x659985, 0x5F14A0, 0x68408D, 0xFFD880,
0x4D7327, 0x310606, 0x1556CA, 0x73A8C9, 0x60E27B, 0xC08C6B,
- //#if LDBL_MAX_EXP > 1024
0x47C419, 0xC367CD, 0xDCE809, 0x2A8359, 0xC4768B, 0x961CA6,
0xDDAF44, 0xD15719, 0x053EA5, 0xFF0705, 0x3F7E33, 0xE832C2,
0xDE4F98, 0x327DBB, 0xC33D26, 0xEF6B1E, 0x5EF89F, 0x3A1F35,
@@ -137,9 +135,7 @@ const ipio2 = [_]i32{
0x237C7E, 0x32B90F, 0x8EF5A7, 0xE75614, 0x08F121, 0x2A9DB5,
0x4D7E6F, 0x5119A5, 0xABF9B5, 0xD6DF82, 0x61DD96, 0x023616,
0x9F3AC4, 0xA1A283, 0x6DED72, 0x7A8D39, 0xA9B882, 0x5C326B,
- 0x5B2746, 0xED3400, 0x7700D2, 0x55F4FC, 0x4D5901,
- 0x8071E0,
- //#endif
+ 0x5B2746, 0xED3400, 0x7700D2, 0x55F4FC, 0x4D5901, 0x8071E0,
};
const PIo2 = [_]f64{
@@ -157,109 +153,109 @@ fn U(x: anytype) usize {
return @intCast(usize, x);
}
-// Returns the last three digits of N with y = x - N*pi/2 so that |y| < pi/2.
-//
-// The method is to compute the integer (mod 8) and fraction parts of
-// (2/pi)*x without doing the full multiplication. In general we
-// skip the part of the product that are known to be a huge integer (
-// more accurately, = 0 mod 8 ). Thus the number of operations are
-// independent of the exponent of the input.
-//
-// (2/pi) is represented by an array of 24-bit integers in ipio2[].
-//
-// Input parameters:
-// x[] The input value (must be positive) is broken into nx
-// pieces of 24-bit integers in double precision format.
-// x[i] will be the i-th 24 bit of x. The scaled exponent
-// of x[0] is given in input parameter e0 (i.e., x[0]*2^e0
-// match x's up to 24 bits.
-//
-// Example of breaking a double positive z into x[0]+x[1]+x[2]:
-// e0 = ilogb(z)-23
-// z = scalbn(z,-e0)
-// for i = 0,1,2
-// x[i] = floor(z)
-// z = (z-x[i])*2**24
-//
-//
-// y[] ouput result in an array of double precision numbers.
-// The dimension of y[] is:
-// 24-bit precision 1
-// 53-bit precision 2
-// 64-bit precision 2
-// 113-bit precision 3
-// The actual value is the sum of them. Thus for 113-bit
-// precison, one may have to do something like:
-//
-// long double t,w,r_head, r_tail;
-// t = (long double)y[2] + (long double)y[1];
-// w = (long double)y[0];
-// r_head = t+w;
-// r_tail = w - (r_head - t);
-//
-// e0 The exponent of x[0]. Must be <= 16360 or you need to
-// expand the ipio2 table.
-//
-// nx dimension of x[]
-//
-// prec an integer indicating the precision:
-// 0 24 bits (single)
-// 1 53 bits (double)
-// 2 64 bits (extended)
-// 3 113 bits (quad)
-//
-// Here is the description of some local variables:
-//
-// jk jk+1 is the initial number of terms of ipio2[] needed
-// in the computation. The minimum and recommended value
-// for jk is 3,4,4,6 for single, double, extended, and quad.
-// jk+1 must be 2 larger than you might expect so that our
-// recomputation test works. (Up to 24 bits in the integer
-// part (the 24 bits of it that we compute) and 23 bits in
-// the fraction part may be lost to cancelation before we
-// recompute.)
-//
-// jz local integer variable indicating the number of
-// terms of ipio2[] used.
-//
-// jx nx - 1
-//
-// jv index for pointing to the suitable ipio2[] for the
-// computation. In general, we want
-// ( 2^e0*x[0] * ipio2[jv-1]*2^(-24jv) )/8
-// is an integer. Thus
-// e0-3-24*jv >= 0 or (e0-3)/24 >= jv
-// Hence jv = max(0,(e0-3)/24).
-//
-// jp jp+1 is the number of terms in PIo2[] needed, jp = jk.
-//
-// q[] double array with integral value, representing the
-// 24-bits chunk of the product of x and 2/pi.
-//
-// q0 the corresponding exponent of q[0]. Note that the
-// exponent for q[i] would be q0-24*i.
-//
-// PIo2[] double precision array, obtained by cutting pi/2
-// into 24 bits chunks.
-//
-// f[] ipio2[] in floating point
-//
-// iq[] integer array by breaking up q[] in 24-bits chunk.
-//
-// fq[] final product of x*(2/pi) in fq[0],..,fq[jk]
-//
-// ih integer. If >0 it indicates q[] is >= 0.5, hence
-// it also indicates the *sign* of the result.
-//
+/// Returns the last three digits of N with y = x - N*pi/2 so that |y| < pi/2.
///
-//
-// Constants:
-// The hexadecimal values are the intended ones for the following
-// constants. The decimal values may be used, provided that the
-// compiler will convert from decimal to binary accurately enough
-// to produce the hexadecimal values shown.
+/// The method is to compute the integer (mod 8) and fraction parts of
+/// (2/pi)*x without doing the full multiplication. In general we
+/// skip the part of the product that are known to be a huge integer (
+/// more accurately, = 0 mod 8 ). Thus the number of operations are
+/// independent of the exponent of the input.
///
-pub fn __rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
+/// (2/pi) is represented by an array of 24-bit integers in ipio2[].
+///
+/// Input parameters:
+/// x[] The input value (must be positive) is broken into nx
+/// pieces of 24-bit integers in double precision format.
+/// x[i] will be the i-th 24 bit of x. The scaled exponent
+/// of x[0] is given in input parameter e0 (i.e., x[0]*2^e0
+/// match x's up to 24 bits.
+///
+/// Example of breaking a double positive z into x[0]+x[1]+x[2]:
+/// e0 = ilogb(z)-23
+/// z = scalbn(z,-e0)
+/// for i = 0,1,2
+/// x[i] = floor(z)
+/// z = (z-x[i])*2**24
+///
+///
+/// y[] ouput result in an array of double precision numbers.
+/// The dimension of y[] is:
+/// 24-bit precision 1
+/// 53-bit precision 2
+/// 64-bit precision 2
+/// 113-bit precision 3
+/// The actual value is the sum of them. Thus for 113-bit
+/// precison, one may have to do something like:
+///
+/// long double t,w,r_head, r_tail;
+/// t = (long double)y[2] + (long double)y[1];
+/// w = (long double)y[0];
+/// r_head = t+w;
+/// r_tail = w - (r_head - t);
+///
+/// e0 The exponent of x[0]. Must be <= 16360 or you need to
+/// expand the ipio2 table.
+///
+/// nx dimension of x[]
+///
+/// prec an integer indicating the precision:
+/// 0 24 bits (single)
+/// 1 53 bits (double)
+/// 2 64 bits (extended)
+/// 3 113 bits (quad)
+///
+/// Here is the description of some local variables:
+///
+/// jk jk+1 is the initial number of terms of ipio2[] needed
+/// in the computation. The minimum and recommended value
+/// for jk is 3,4,4,6 for single, double, extended, and quad.
+/// jk+1 must be 2 larger than you might expect so that our
+/// recomputation test works. (Up to 24 bits in the integer
+/// part (the 24 bits of it that we compute) and 23 bits in
+/// the fraction part may be lost to cancelation before we
+/// recompute.)
+///
+/// jz local integer variable indicating the number of
+/// terms of ipio2[] used.
+///
+/// jx nx - 1
+///
+/// jv index for pointing to the suitable ipio2[] for the
+/// computation. In general, we want
+/// ( 2^e0*x[0] * ipio2[jv-1]*2^(-24jv) )/8
+/// is an integer. Thus
+/// e0-3-24*jv >= 0 or (e0-3)/24 >= jv
+/// Hence jv = max(0,(e0-3)/24).
+///
+/// jp jp+1 is the number of terms in PIo2[] needed, jp = jk.
+///
+/// q[] double array with integral value, representing the
+/// 24-bits chunk of the product of x and 2/pi.
+///
+/// q0 the corresponding exponent of q[0]. Note that the
+/// exponent for q[i] would be q0-24*i.
+///
+/// PIo2[] double precision array, obtained by cutting pi/2
+/// into 24 bits chunks.
+///
+/// f[] ipio2[] in floating point
+///
+/// iq[] integer array by breaking up q[] in 24-bits chunk.
+///
+/// fq[] final product of x*(2/pi) in fq[0],..,fq[jk]
+///
+/// ih integer. If >0 it indicates q[] is >= 0.5, hence
+/// it also indicates the *sign* of the result.
+///
+///
+///
+/// Constants:
+/// The hexadecimal values are the intended ones for the following
+/// constants. The decimal values may be used, provided that the
+/// compiler will convert from decimal to binary accurately enough
+/// to produce the hexadecimal values shown.
+///
+pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
var jz: i32 = undefined;
var jx: i32 = undefined;
var jv: i32 = undefined;
@@ -333,7 +329,7 @@ pub fn __rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
// compute n
z = math.scalbn(z, q0); // actual value of z
- z -= 8.0 * math.floor(z * 0.125); // trim off integer >= 8
+ z -= 8.0 * @floor(z * 0.125); // trim off integer >= 8
n = @floatToInt(i32, z);
z -= @intToFloat(f64, n);
ih = 0;
diff --git a/lib/std/math/__rem_pio2f.zig b/lib/compiler_rt/rem_pio2f.zig
similarity index 87%
rename from lib/std/math/__rem_pio2f.zig
rename to lib/compiler_rt/rem_pio2f.zig
index 9f78e18d36..34397dd734 100644
--- a/lib/std/math/__rem_pio2f.zig
+++ b/lib/compiler_rt/rem_pio2f.zig
@@ -3,11 +3,11 @@
//
// https://git.musl-libc.org/cgit/musl/tree/src/math/__rem_pio2f.c
-const std = @import("../std.zig");
-const __rem_pio2_large = @import("__rem_pio2_large.zig").__rem_pio2_large;
+const std = @import("std");
+const rem_pio2_large = @import("rem_pio2_large.zig").rem_pio2_large;
const math = std.math;
-const toint = 1.5 / math.epsilon(f64);
+const toint = 1.5 / math.floatEps(f64);
// pi/4
const pio4 = 0x1.921fb6p-1;
// invpio2: 53 bits of 2/pi
@@ -19,8 +19,8 @@ const pio2_1t = 1.58932547735281966916e-08; // 0x3E5110b4, 0x611A6263
// Returns the remainder of x rem pi/2 in *y
// use double precision for everything except passing x
-// use __rem_pio2_large() for large x
-pub fn __rem_pio2f(x: f32, y: *f64) i32 {
+// use rem_pio2_large() for large x
+pub fn rem_pio2f(x: f32, y: *f64) i32 {
var tx: [1]f64 = undefined;
var ty: [1]f64 = undefined;
var @"fn": f64 = undefined;
@@ -60,7 +60,7 @@ pub fn __rem_pio2f(x: f32, y: *f64) i32 {
e0 = (ix >> 23) - (0x7f + 23); // e0 = ilogb(|x|)-23, positive
ui = ix - (e0 << 23);
tx[0] = @bitCast(f32, ui);
- n = __rem_pio2_large(&tx, &ty, @intCast(i32, e0), 1, 0);
+ n = rem_pio2_large(&tx, &ty, @intCast(i32, e0), 1, 0);
if (sign) {
y.* = -ty[0];
return -n;
diff --git a/lib/compiler_rt/round.zig b/lib/compiler_rt/round.zig
new file mode 100644
index 0000000000..acd26d8823
--- /dev/null
+++ b/lib/compiler_rt/round.zig
@@ -0,0 +1,195 @@
+//! Ported from musl, which is licensed under the MIT license:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/roundf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/round.c
+
+const std = @import("std");
+const builtin = @import("builtin");
+const math = std.math;
+const expect = std.testing.expect;
+const arch = builtin.cpu.arch;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__roundh, .{ .name = "__roundh", .linkage = common.linkage });
+ @export(roundf, .{ .name = "roundf", .linkage = common.linkage });
+ @export(round, .{ .name = "round", .linkage = common.linkage });
+ @export(__roundx, .{ .name = "__roundx", .linkage = common.linkage });
+ const roundq_sym_name = if (common.want_ppc_abi) "roundf128" else "roundq";
+ @export(roundq, .{ .name = roundq_sym_name, .linkage = common.linkage });
+ @export(roundl, .{ .name = "roundl", .linkage = common.linkage });
+}
+
+pub fn __roundh(x: f16) callconv(.C) f16 {
+ // TODO: more efficient implementation
+ return @floatCast(f16, roundf(x));
+}
+
+pub fn roundf(x_: f32) callconv(.C) f32 {
+ const f32_toint = 1.0 / math.floatEps(f32);
+
+ var x = x_;
+ const u = @bitCast(u32, x);
+ const e = (u >> 23) & 0xFF;
+ var y: f32 = undefined;
+
+ if (e >= 0x7F + 23) {
+ return x;
+ }
+ if (u >> 31 != 0) {
+ x = -x;
+ }
+ if (e < 0x7F - 1) {
+ math.doNotOptimizeAway(x + f32_toint);
+ return 0 * @bitCast(f32, u);
+ }
+
+ y = x + f32_toint - f32_toint - x;
+ if (y > 0.5) {
+ y = y + x - 1;
+ } else if (y <= -0.5) {
+ y = y + x + 1;
+ } else {
+ y = y + x;
+ }
+
+ if (u >> 31 != 0) {
+ return -y;
+ } else {
+ return y;
+ }
+}
+
+pub fn round(x_: f64) callconv(.C) f64 {
+ const f64_toint = 1.0 / math.floatEps(f64);
+
+ var x = x_;
+ const u = @bitCast(u64, x);
+ const e = (u >> 52) & 0x7FF;
+ var y: f64 = undefined;
+
+ if (e >= 0x3FF + 52) {
+ return x;
+ }
+ if (u >> 63 != 0) {
+ x = -x;
+ }
+ if (e < 0x3ff - 1) {
+ math.doNotOptimizeAway(x + f64_toint);
+ return 0 * @bitCast(f64, u);
+ }
+
+ y = x + f64_toint - f64_toint - x;
+ if (y > 0.5) {
+ y = y + x - 1;
+ } else if (y <= -0.5) {
+ y = y + x + 1;
+ } else {
+ y = y + x;
+ }
+
+ if (u >> 63 != 0) {
+ return -y;
+ } else {
+ return y;
+ }
+}
+
+pub fn __roundx(x: f80) callconv(.C) f80 {
+ // TODO: more efficient implementation
+ return @floatCast(f80, roundq(x));
+}
+
+pub fn roundq(x_: f128) callconv(.C) f128 {
+ const f128_toint = 1.0 / math.floatEps(f128);
+
+ var x = x_;
+ const u = @bitCast(u128, x);
+ const e = (u >> 112) & 0x7FFF;
+ var y: f128 = undefined;
+
+ if (e >= 0x3FFF + 112) {
+ return x;
+ }
+ if (u >> 127 != 0) {
+ x = -x;
+ }
+ if (e < 0x3FFF - 1) {
+ math.doNotOptimizeAway(x + f128_toint);
+ return 0 * @bitCast(f128, u);
+ }
+
+ y = x + f128_toint - f128_toint - x;
+ if (y > 0.5) {
+ y = y + x - 1;
+ } else if (y <= -0.5) {
+ y = y + x + 1;
+ } else {
+ y = y + x;
+ }
+
+ if (u >> 127 != 0) {
+ return -y;
+ } else {
+ return y;
+ }
+}
+
+pub fn roundl(x: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __roundh(x),
+ 32 => return roundf(x),
+ 64 => return round(x),
+ 80 => return __roundx(x),
+ 128 => return roundq(x),
+ else => @compileError("unreachable"),
+ }
+}
+
+test "round32" {
+ try expect(roundf(1.3) == 1.0);
+ try expect(roundf(-1.3) == -1.0);
+ try expect(roundf(0.2) == 0.0);
+ try expect(roundf(1.8) == 2.0);
+}
+
+test "round64" {
+ try expect(round(1.3) == 1.0);
+ try expect(round(-1.3) == -1.0);
+ try expect(round(0.2) == 0.0);
+ try expect(round(1.8) == 2.0);
+}
+
+test "round128" {
+ try expect(roundq(1.3) == 1.0);
+ try expect(roundq(-1.3) == -1.0);
+ try expect(roundq(0.2) == 0.0);
+ try expect(roundq(1.8) == 2.0);
+}
+
+test "round32.special" {
+ try expect(roundf(0.0) == 0.0);
+ try expect(roundf(-0.0) == -0.0);
+ try expect(math.isPositiveInf(roundf(math.inf(f32))));
+ try expect(math.isNegativeInf(roundf(-math.inf(f32))));
+ try expect(math.isNan(roundf(math.nan(f32))));
+}
+
+test "round64.special" {
+ try expect(round(0.0) == 0.0);
+ try expect(round(-0.0) == -0.0);
+ try expect(math.isPositiveInf(round(math.inf(f64))));
+ try expect(math.isNegativeInf(round(-math.inf(f64))));
+ try expect(math.isNan(round(math.nan(f64))));
+}
+
+test "round128.special" {
+ try expect(roundq(0.0) == 0.0);
+ try expect(roundq(-0.0) == -0.0);
+ try expect(math.isPositiveInf(roundq(math.inf(f128))));
+ try expect(math.isNegativeInf(roundq(-math.inf(f128))));
+ try expect(math.isNan(roundq(math.nan(f128))));
+}
diff --git a/lib/std/special/compiler_rt/shift.zig b/lib/compiler_rt/shift.zig
similarity index 67%
rename from lib/std/special/compiler_rt/shift.zig
rename to lib/compiler_rt/shift.zig
index edcf246daf..ee8b634fbb 100644
--- a/lib/std/special/compiler_rt/shift.zig
+++ b/lib/compiler_rt/shift.zig
@@ -1,13 +1,33 @@
const std = @import("std");
+const builtin = @import("builtin");
const Log2Int = std.math.Log2Int;
-const native_endian = @import("builtin").cpu.arch.endian();
+const native_endian = builtin.cpu.arch.endian();
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__ashlti3, .{ .name = "__ashlti3", .linkage = common.linkage });
+ @export(__ashrti3, .{ .name = "__ashrti3", .linkage = common.linkage });
+ @export(__lshrti3, .{ .name = "__lshrti3", .linkage = common.linkage });
+
+ if (common.want_aeabi) {
+ @export(__aeabi_llsl, .{ .name = "__aeabi_llsl", .linkage = common.linkage });
+ @export(__aeabi_lasr, .{ .name = "__aeabi_lasr", .linkage = common.linkage });
+ @export(__aeabi_llsr, .{ .name = "__aeabi_llsr", .linkage = common.linkage });
+ } else {
+ @export(__ashldi3, .{ .name = "__ashldi3", .linkage = common.linkage });
+ @export(__ashrdi3, .{ .name = "__ashrdi3", .linkage = common.linkage });
+ @export(__lshrdi3, .{ .name = "__lshrdi3", .linkage = common.linkage });
+ }
+}
fn Dwords(comptime T: type, comptime signed_half: bool) type {
return extern union {
- pub const bits = @divExact(@typeInfo(T).Int.bits, 2);
- pub const HalfTU = std.meta.Int(.unsigned, bits);
- pub const HalfTS = std.meta.Int(.signed, bits);
- pub const HalfT = if (signed_half) HalfTS else HalfTU;
+ const bits = @divExact(@typeInfo(T).Int.bits, 2);
+ const HalfTU = std.meta.Int(.unsigned, bits);
+ const HalfTS = std.meta.Int(.signed, bits);
+ const HalfT = if (signed_half) HalfTS else HalfTU;
all: T,
s: if (native_endian == .Little)
@@ -19,7 +39,7 @@ fn Dwords(comptime T: type, comptime signed_half: bool) type {
// Arithmetic shift left
// Precondition: 0 <= b < bits_in_dword
-pub inline fn ashlXi3(comptime T: type, a: T, b: i32) T {
+inline fn ashlXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, false);
const S = Log2Int(dwords.HalfT);
@@ -42,7 +62,7 @@ pub inline fn ashlXi3(comptime T: type, a: T, b: i32) T {
// Arithmetic shift right
// Precondition: 0 <= b < T.bit_count
-pub inline fn ashrXi3(comptime T: type, a: T, b: i32) T {
+inline fn ashrXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, true);
const S = Log2Int(dwords.HalfT);
@@ -69,7 +89,7 @@ pub inline fn ashrXi3(comptime T: type, a: T, b: i32) T {
// Logical shift right
// Precondition: 0 <= b < T.bit_count
-pub inline fn lshrXi3(comptime T: type, a: T, b: i32) T {
+inline fn lshrXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, false);
const S = Log2Int(dwords.HalfT);
@@ -93,30 +113,34 @@ pub inline fn lshrXi3(comptime T: type, a: T, b: i32) T {
pub fn __ashldi3(a: i64, b: i32) callconv(.C) i64 {
return ashlXi3(i64, a, b);
}
+fn __aeabi_llsl(a: i64, b: i32) callconv(.AAPCS) i64 {
+ return ashlXi3(i64, a, b);
+}
+
pub fn __ashlti3(a: i128, b: i32) callconv(.C) i128 {
return ashlXi3(i128, a, b);
}
+
pub fn __ashrdi3(a: i64, b: i32) callconv(.C) i64 {
return ashrXi3(i64, a, b);
}
+fn __aeabi_lasr(a: i64, b: i32) callconv(.AAPCS) i64 {
+ return ashrXi3(i64, a, b);
+}
+
pub fn __ashrti3(a: i128, b: i32) callconv(.C) i128 {
return ashrXi3(i128, a, b);
}
+
pub fn __lshrdi3(a: i64, b: i32) callconv(.C) i64 {
return lshrXi3(i64, a, b);
}
-pub fn __lshrti3(a: i128, b: i32) callconv(.C) i128 {
- return lshrXi3(i128, a, b);
+fn __aeabi_llsr(a: i64, b: i32) callconv(.AAPCS) i64 {
+ return lshrXi3(i64, a, b);
}
-pub fn __aeabi_llsl(a: i64, b: i32) callconv(.AAPCS) i64 {
- return ashlXi3(i64, a, b);
-}
-pub fn __aeabi_lasr(a: i64, b: i32) callconv(.AAPCS) i64 {
- return ashrXi3(i64, a, b);
-}
-pub fn __aeabi_llsr(a: i64, b: i32) callconv(.AAPCS) i64 {
- return lshrXi3(i64, a, b);
+pub fn __lshrti3(a: i128, b: i32) callconv(.C) i128 {
+ return lshrXi3(i128, a, b);
}
test {
diff --git a/lib/compiler_rt/sin.zig b/lib/compiler_rt/sin.zig
new file mode 100644
index 0000000000..1b93aab948
--- /dev/null
+++ b/lib/compiler_rt/sin.zig
@@ -0,0 +1,188 @@
+//! Ported from musl, which is licensed under the MIT license:
+//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
+//!
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/sinf.c
+//! https://git.musl-libc.org/cgit/musl/tree/src/math/sin.c
+
+const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const math = std.math;
+const expect = std.testing.expect;
+const common = @import("common.zig");
+
+const trig = @import("trig.zig");
+const rem_pio2 = @import("rem_pio2.zig").rem_pio2;
+const rem_pio2f = @import("rem_pio2f.zig").rem_pio2f;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__sinh, .{ .name = "__sinh", .linkage = common.linkage });
+ @export(sinf, .{ .name = "sinf", .linkage = common.linkage });
+ @export(sin, .{ .name = "sin", .linkage = common.linkage });
+ @export(__sinx, .{ .name = "__sinx", .linkage = common.linkage });
+ const sinq_sym_name = if (common.want_ppc_abi) "sinf128" else "sinq";
+ @export(sinq, .{ .name = sinq_sym_name, .linkage = common.linkage });
+ @export(sinl, .{ .name = "sinl", .linkage = common.linkage });
+}
+
+pub fn __sinh(x: f16) callconv(.C) f16 {
+ // TODO: more efficient implementation
+ return @floatCast(f16, sinf(x));
+}
+
+pub fn sinf(x: f32) callconv(.C) f32 {
+ // Small multiples of pi/2 rounded to double precision.
+ const s1pio2: f64 = 1.0 * math.pi / 2.0; // 0x3FF921FB, 0x54442D18
+ const s2pio2: f64 = 2.0 * math.pi / 2.0; // 0x400921FB, 0x54442D18
+ const s3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2
+ const s4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18
+
+ var ix = @bitCast(u32, x);
+ const sign = ix >> 31 != 0;
+ ix &= 0x7fffffff;
+
+ if (ix <= 0x3f490fda) { // |x| ~<= pi/4
+ if (ix < 0x39800000) { // |x| < 2**-12
+ // raise inexact if x!=0 and underflow if subnormal
+ math.doNotOptimizeAway(if (ix < 0x00800000) x / 0x1p120 else x + 0x1p120);
+ return x;
+ }
+ return trig.__sindf(x);
+ }
+ if (ix <= 0x407b53d1) { // |x| ~<= 5*pi/4
+ if (ix <= 0x4016cbe3) { // |x| ~<= 3pi/4
+ if (sign) {
+ return -trig.__cosdf(x + s1pio2);
+ } else {
+ return trig.__cosdf(x - s1pio2);
+ }
+ }
+ return trig.__sindf(if (sign) -(x + s2pio2) else -(x - s2pio2));
+ }
+ if (ix <= 0x40e231d5) { // |x| ~<= 9*pi/4
+ if (ix <= 0x40afeddf) { // |x| ~<= 7*pi/4
+ if (sign) {
+ return trig.__cosdf(x + s3pio2);
+ } else {
+ return -trig.__cosdf(x - s3pio2);
+ }
+ }
+ return trig.__sindf(if (sign) x + s4pio2 else x - s4pio2);
+ }
+
+ // sin(Inf or NaN) is NaN
+ if (ix >= 0x7f800000) {
+ return x - x;
+ }
+
+ var y: f64 = undefined;
+ const n = rem_pio2f(x, &y);
+ return switch (n & 3) {
+ 0 => trig.__sindf(y),
+ 1 => trig.__cosdf(y),
+ 2 => trig.__sindf(-y),
+ else => -trig.__cosdf(y),
+ };
+}
+
+pub fn sin(x: f64) callconv(.C) f64 {
+ var ix = @bitCast(u64, x) >> 32;
+ ix &= 0x7fffffff;
+
+ // |x| ~< pi/4
+ if (ix <= 0x3fe921fb) {
+ if (ix < 0x3e500000) { // |x| < 2**-26
+ // raise inexact if x != 0 and underflow if subnormal
+ math.doNotOptimizeAway(if (ix < 0x00100000) x / 0x1p120 else x + 0x1p120);
+ return x;
+ }
+ return trig.__sin(x, 0.0, 0);
+ }
+
+ // sin(Inf or NaN) is NaN
+ if (ix >= 0x7ff00000) {
+ return x - x;
+ }
+
+ var y: [2]f64 = undefined;
+ const n = rem_pio2(x, &y);
+ return switch (n & 3) {
+ 0 => trig.__sin(y[0], y[1], 1),
+ 1 => trig.__cos(y[0], y[1]),
+ 2 => -trig.__sin(y[0], y[1], 1),
+ else => -trig.__cos(y[0], y[1]),
+ };
+}
+
+pub fn __sinx(x: f80) callconv(.C) f80 {
+ // TODO: more efficient implementation
+ return @floatCast(f80, sinq(x));
+}
+
+pub fn sinq(x: f128) callconv(.C) f128 {
+ // TODO: more correct implementation
+ return sin(@floatCast(f64, x));
+}
+
+pub fn sinl(x: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __sinh(x),
+ 32 => return sinf(x),
+ 64 => return sin(x),
+ 80 => return __sinx(x),
+ 128 => return sinq(x),
+ else => @compileError("unreachable"),
+ }
+}
+
+test "sin32" {
+ const epsilon = 0.00001;
+
+ try expect(math.approxEqAbs(f32, sinf(0.0), 0.0, epsilon));
+ try expect(math.approxEqAbs(f32, sinf(0.2), 0.198669, epsilon));
+ try expect(math.approxEqAbs(f32, sinf(0.8923), 0.778517, epsilon));
+ try expect(math.approxEqAbs(f32, sinf(1.5), 0.997495, epsilon));
+ try expect(math.approxEqAbs(f32, sinf(-1.5), -0.997495, epsilon));
+ try expect(math.approxEqAbs(f32, sinf(37.45), -0.246544, epsilon));
+ try expect(math.approxEqAbs(f32, sinf(89.123), 0.916166, epsilon));
+}
+
+test "sin64" {
+ const epsilon = 0.000001;
+
+ try expect(math.approxEqAbs(f64, sin(0.0), 0.0, epsilon));
+ try expect(math.approxEqAbs(f64, sin(0.2), 0.198669, epsilon));
+ try expect(math.approxEqAbs(f64, sin(0.8923), 0.778517, epsilon));
+ try expect(math.approxEqAbs(f64, sin(1.5), 0.997495, epsilon));
+ try expect(math.approxEqAbs(f64, sin(-1.5), -0.997495, epsilon));
+ try expect(math.approxEqAbs(f64, sin(37.45), -0.246543, epsilon));
+ try expect(math.approxEqAbs(f64, sin(89.123), 0.916166, epsilon));
+}
+
+test "sin32.special" {
+ try expect(sinf(0.0) == 0.0);
+ try expect(sinf(-0.0) == -0.0);
+ try expect(math.isNan(sinf(math.inf(f32))));
+ try expect(math.isNan(sinf(-math.inf(f32))));
+ try expect(math.isNan(sinf(math.nan(f32))));
+}
+
+test "sin64.special" {
+ try expect(sin(0.0) == 0.0);
+ try expect(sin(-0.0) == -0.0);
+ try expect(math.isNan(sin(math.inf(f64))));
+ try expect(math.isNan(sin(-math.inf(f64))));
+ try expect(math.isNan(sin(math.nan(f64))));
+}
+
+test "sin32 #9901" {
+ const float = @bitCast(f32, @as(u32, 0b11100011111111110000000000000000));
+ _ = sinf(float);
+}
+
+test "sin64 #9901" {
+ const float = @bitCast(f64, @as(u64, 0b1111111101000001000000001111110111111111100000000000000000000001));
+ _ = sin(float);
+}
diff --git a/lib/compiler_rt/sincos.zig b/lib/compiler_rt/sincos.zig
new file mode 100644
index 0000000000..c839356a36
--- /dev/null
+++ b/lib/compiler_rt/sincos.zig
@@ -0,0 +1,266 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const math = std.math;
+const trig = @import("trig.zig");
+const rem_pio2 = @import("rem_pio2.zig").rem_pio2;
+const rem_pio2f = @import("rem_pio2f.zig").rem_pio2f;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__sincosh, .{ .name = "__sincosh", .linkage = common.linkage });
+ @export(sincosf, .{ .name = "sincosf", .linkage = common.linkage });
+ @export(sincos, .{ .name = "sincos", .linkage = common.linkage });
+ @export(__sincosx, .{ .name = "__sincosx", .linkage = common.linkage });
+ const sincosq_sym_name = if (common.want_ppc_abi) "sincosf128" else "sincosq";
+ @export(sincosq, .{ .name = sincosq_sym_name, .linkage = common.linkage });
+ @export(sincosl, .{ .name = "sincosl", .linkage = common.linkage });
+}
+
+pub fn __sincosh(x: f16, r_sin: *f16, r_cos: *f16) callconv(.C) void {
+ // TODO: more efficient implementation
+ var big_sin: f32 = undefined;
+ var big_cos: f32 = undefined;
+ sincosf(x, &big_sin, &big_cos);
+ r_sin.* = @floatCast(f16, big_sin);
+ r_cos.* = @floatCast(f16, big_cos);
+}
+
+pub fn sincosf(x: f32, r_sin: *f32, r_cos: *f32) callconv(.C) void {
+ const sc1pio2: f64 = 1.0 * math.pi / 2.0; // 0x3FF921FB, 0x54442D18
+ const sc2pio2: f64 = 2.0 * math.pi / 2.0; // 0x400921FB, 0x54442D18
+ const sc3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2
+ const sc4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18
+
+ const pre_ix = @bitCast(u32, x);
+ const sign = pre_ix >> 31 != 0;
+ const ix = pre_ix & 0x7fffffff;
+
+ // |x| ~<= pi/4
+ if (ix <= 0x3f490fda) {
+ // |x| < 2**-12
+ if (ix < 0x39800000) {
+ // raise inexact if x!=0 and underflow if subnormal
+ math.doNotOptimizeAway(if (ix < 0x00100000) x / 0x1p120 else x + 0x1p120);
+ r_sin.* = x;
+ r_cos.* = 1.0;
+ return;
+ }
+ r_sin.* = trig.__sindf(x);
+ r_cos.* = trig.__cosdf(x);
+ return;
+ }
+
+ // |x| ~<= 5*pi/4
+ if (ix <= 0x407b53d1) {
+ // |x| ~<= 3pi/4
+ if (ix <= 0x4016cbe3) {
+ if (sign) {
+ r_sin.* = -trig.__cosdf(x + sc1pio2);
+ r_cos.* = trig.__sindf(x + sc1pio2);
+ } else {
+ r_sin.* = trig.__cosdf(sc1pio2 - x);
+ r_cos.* = trig.__sindf(sc1pio2 - x);
+ }
+ return;
+ }
+ // -sin(x+c) is not correct if x+c could be 0: -0 vs +0
+ r_sin.* = -trig.__sindf(if (sign) x + sc2pio2 else x - sc2pio2);
+ r_cos.* = -trig.__cosdf(if (sign) x + sc2pio2 else x - sc2pio2);
+ return;
+ }
+
+ // |x| ~<= 9*pi/4
+ if (ix <= 0x40e231d5) {
+ // |x| ~<= 7*pi/4
+ if (ix <= 0x40afeddf) {
+ if (sign) {
+ r_sin.* = trig.__cosdf(x + sc3pio2);
+ r_cos.* = -trig.__sindf(x + sc3pio2);
+ } else {
+ r_sin.* = -trig.__cosdf(x - sc3pio2);
+ r_cos.* = trig.__sindf(x - sc3pio2);
+ }
+ return;
+ }
+ r_sin.* = trig.__sindf(if (sign) x + sc4pio2 else x - sc4pio2);
+ r_cos.* = trig.__cosdf(if (sign) x + sc4pio2 else x - sc4pio2);
+ return;
+ }
+
+ // sin(Inf or NaN) is NaN
+ if (ix >= 0x7f800000) {
+ const result = x - x;
+ r_sin.* = result;
+ r_cos.* = result;
+ return;
+ }
+
+ // general argument reduction needed
+ var y: f64 = undefined;
+ const n = rem_pio2f(x, &y);
+ const s = trig.__sindf(y);
+ const c = trig.__cosdf(y);
+ switch (n & 3) {
+ 0 => {
+ r_sin.* = s;
+ r_cos.* = c;
+ },
+ 1 => {
+ r_sin.* = c;
+ r_cos.* = -s;
+ },
+ 2 => {
+ r_sin.* = -s;
+ r_cos.* = -c;
+ },
+ else => {
+ r_sin.* = -c;
+ r_cos.* = s;
+ },
+ }
+}
+
+pub fn sincos(x: f64, r_sin: *f64, r_cos: *f64) callconv(.C) void {
+ const ix = @truncate(u32, @bitCast(u64, x) >> 32) & 0x7fffffff;
+
+ // |x| ~< pi/4
+ if (ix <= 0x3fe921fb) {
+ // if |x| < 2**-27 * sqrt(2)
+ if (ix < 0x3e46a09e) {
+ // raise inexact if x != 0 and underflow if subnormal
+ math.doNotOptimizeAway(if (ix < 0x00100000) x / 0x1p120 else x + 0x1p120);
+ r_sin.* = x;
+ r_cos.* = 1.0;
+ return;
+ }
+ r_sin.* = trig.__sin(x, 0.0, 0);
+ r_cos.* = trig.__cos(x, 0.0);
+ return;
+ }
+
+ // sincos(Inf or NaN) is NaN
+ if (ix >= 0x7ff00000) {
+ const result = x - x;
+ r_sin.* = result;
+ r_cos.* = result;
+ return;
+ }
+
+ // argument reduction needed
+ var y: [2]f64 = undefined;
+ const n = rem_pio2(x, &y);
+ const s = trig.__sin(y[0], y[1], 1);
+ const c = trig.__cos(y[0], y[1]);
+ switch (n & 3) {
+ 0 => {
+ r_sin.* = s;
+ r_cos.* = c;
+ },
+ 1 => {
+ r_sin.* = c;
+ r_cos.* = -s;
+ },
+ 2 => {
+ r_sin.* = -s;
+ r_cos.* = -c;
+ },
+ else => {
+ r_sin.* = -c;
+ r_cos.* = s;
+ },
+ }
+}
+
+pub fn __sincosx(x: f80, r_sin: *f80, r_cos: *f80) callconv(.C) void {
+ // TODO: more efficient implementation
+ //return sincos_generic(f80, x, r_sin, r_cos);
+ var big_sin: f128 = undefined;
+ var big_cos: f128 = undefined;
+ sincosq(x, &big_sin, &big_cos);
+ r_sin.* = @floatCast(f80, big_sin);
+ r_cos.* = @floatCast(f80, big_cos);
+}
+
+pub fn sincosq(x: f128, r_sin: *f128, r_cos: *f128) callconv(.C) void {
+ // TODO: more correct implementation
+ //return sincos_generic(f128, x, r_sin, r_cos);
+ var small_sin: f64 = undefined;
+ var small_cos: f64 = undefined;
+ sincos(@floatCast(f64, x), &small_sin, &small_cos);
+ r_sin.* = small_sin;
+ r_cos.* = small_cos;
+}
+
+pub fn sincosl(x: c_longdouble, r_sin: *c_longdouble, r_cos: *c_longdouble) callconv(.C) void {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __sincosh(x, r_sin, r_cos),
+ 32 => return sincosf(x, r_sin, r_cos),
+ 64 => return sincos(x, r_sin, r_cos),
+ 80 => return __sincosx(x, r_sin, r_cos),
+ 128 => return sincosq(x, r_sin, r_cos),
+ else => @compileError("unreachable"),
+ }
+}
+
+pub const rem_pio2_generic = @compileError("TODO");
+
+/// Ported from musl sincosl.c. Needs the following dependencies to be complete:
+/// * rem_pio2_generic ported from __rem_pio2l.c
+/// * trig.sin_generic ported from __sinl.c
+/// * trig.cos_generic ported from __cosl.c
+inline fn sincos_generic(comptime F: type, x: F, r_sin: *F, r_cos: *F) void {
+ const sc1pio4: F = 1.0 * math.pi / 4.0;
+ const bits = @typeInfo(F).Float.bits;
+ const I = std.meta.Int(.unsigned, bits);
+ const ix = @bitCast(I, x) & (math.maxInt(I) >> 1);
+ const se = @truncate(u16, ix >> (bits - 16));
+
+ if (se == 0x7fff) {
+ const result = x - x;
+ r_sin.* = result;
+ r_cos.* = result;
+ return;
+ }
+
+ if (@bitCast(F, ix) < sc1pio4) {
+ if (se < 0x3fff - math.floatFractionalBits(F) - 1) {
+ // raise underflow if subnormal
+ if (se == 0) {
+ math.doNotOptimizeAway(x * 0x1p-120);
+ }
+ r_sin.* = x;
+ // raise inexact if x!=0
+ r_cos.* = 1.0 + x;
+ return;
+ }
+ r_sin.* = trig.sin_generic(F, x, 0, 0);
+ r_cos.* = trig.cos_generic(F, x, 0);
+ return;
+ }
+
+ var y: [2]F = undefined;
+ const n = rem_pio2_generic(F, x, &y);
+ const s = trig.sin_generic(F, y[0], y[1], 1);
+ const c = trig.cos_generic(F, y[0], y[1]);
+ switch (n & 3) {
+ 0 => {
+ r_sin.* = s;
+ r_cos.* = c;
+ },
+ 1 => {
+ r_sin.* = c;
+ r_cos.* = -s;
+ },
+ 2 => {
+ r_sin.* = -s;
+ r_cos.* = -c;
+ },
+ else => {
+ r_sin.* = -c;
+ r_cos.* = s;
+ },
+ }
+}
diff --git a/lib/compiler_rt/sqrt.zig b/lib/compiler_rt/sqrt.zig
new file mode 100644
index 0000000000..01b09213fe
--- /dev/null
+++ b/lib/compiler_rt/sqrt.zig
@@ -0,0 +1,310 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const arch = builtin.cpu.arch;
+const math = std.math;
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__sqrth, .{ .name = "__sqrth", .linkage = common.linkage });
+ @export(sqrtf, .{ .name = "sqrtf", .linkage = common.linkage });
+ @export(sqrt, .{ .name = "sqrt", .linkage = common.linkage });
+ @export(__sqrtx, .{ .name = "__sqrtx", .linkage = common.linkage });
+ const sqrtq_sym_name = if (common.want_ppc_abi) "sqrtf128" else "sqrtq";
+ @export(sqrtq, .{ .name = sqrtq_sym_name, .linkage = common.linkage });
+ @export(sqrtl, .{ .name = "sqrtl", .linkage = common.linkage });
+}
+
+pub fn __sqrth(x: f16) callconv(.C) f16 {
+ // TODO: more efficient implementation
+ return @floatCast(f16, sqrtf(x));
+}
+
+pub fn sqrtf(x: f32) callconv(.C) f32 {
+ const tiny: f32 = 1.0e-30;
+ const sign: i32 = @bitCast(i32, @as(u32, 0x80000000));
+ var ix: i32 = @bitCast(i32, x);
+
+ if ((ix & 0x7F800000) == 0x7F800000) {
+ return x * x + x; // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = snan
+ }
+
+ // zero
+ if (ix <= 0) {
+ if (ix & ~sign == 0) {
+ return x; // sqrt (+-0) = +-0
+ }
+ if (ix < 0) {
+ return math.snan(f32);
+ }
+ }
+
+ // normalize
+ var m = ix >> 23;
+ if (m == 0) {
+ // subnormal
+ var i: i32 = 0;
+ while (ix & 0x00800000 == 0) : (i += 1) {
+ ix <<= 1;
+ }
+ m -= i - 1;
+ }
+
+ m -= 127; // unbias exponent
+ ix = (ix & 0x007FFFFF) | 0x00800000;
+
+ if (m & 1 != 0) { // odd m, double x to even
+ ix += ix;
+ }
+
+ m >>= 1; // m = [m / 2]
+
+ // sqrt(x) bit by bit
+ ix += ix;
+ var q: i32 = 0; // q = sqrt(x)
+ var s: i32 = 0;
+ var r: i32 = 0x01000000; // r = moving bit right -> left
+
+ while (r != 0) {
+ const t = s + r;
+ if (t <= ix) {
+ s = t + r;
+ ix -= t;
+ q += r;
+ }
+ ix += ix;
+ r >>= 1;
+ }
+
+ // floating add to find rounding direction
+ if (ix != 0) {
+ var z = 1.0 - tiny; // inexact
+ if (z >= 1.0) {
+ z = 1.0 + tiny;
+ if (z > 1.0) {
+ q += 2;
+ } else {
+ if (q & 1 != 0) {
+ q += 1;
+ }
+ }
+ }
+ }
+
+ ix = (q >> 1) + 0x3f000000;
+ ix += m << 23;
+ return @bitCast(f32, ix);
+}
+
+/// NOTE: The original code is full of implicit signed -> unsigned assumptions and u32 wraparound
+/// behaviour. Most intermediate i32 values are changed to u32 where appropriate but there are
+/// potentially some edge cases remaining that are not handled in the same way.
+pub fn sqrt(x: f64) callconv(.C) f64 {
+ const tiny: f64 = 1.0e-300;
+ const sign: u32 = 0x80000000;
+ const u = @bitCast(u64, x);
+
+ var ix0 = @intCast(u32, u >> 32);
+ var ix1 = @intCast(u32, u & 0xFFFFFFFF);
+
+ // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = nan
+ if (ix0 & 0x7FF00000 == 0x7FF00000) {
+ return x * x + x;
+ }
+
+ // sqrt(+-0) = +-0
+ if (x == 0.0) {
+ return x;
+ }
+ // sqrt(-ve) = snan
+ if (ix0 & sign != 0) {
+ return math.snan(f64);
+ }
+
+ // normalize x
+ var m = @intCast(i32, ix0 >> 20);
+ if (m == 0) {
+ // subnormal
+ while (ix0 == 0) {
+ m -= 21;
+ ix0 |= ix1 >> 11;
+ ix1 <<= 21;
+ }
+
+ // subnormal
+ var i: u32 = 0;
+ while (ix0 & 0x00100000 == 0) : (i += 1) {
+ ix0 <<= 1;
+ }
+ m -= @intCast(i32, i) - 1;
+ ix0 |= ix1 >> @intCast(u5, 32 - i);
+ ix1 <<= @intCast(u5, i);
+ }
+
+ // unbias exponent
+ m -= 1023;
+ ix0 = (ix0 & 0x000FFFFF) | 0x00100000;
+ if (m & 1 != 0) {
+ ix0 += ix0 + (ix1 >> 31);
+ ix1 = ix1 +% ix1;
+ }
+ m >>= 1;
+
+ // sqrt(x) bit by bit
+ ix0 += ix0 + (ix1 >> 31);
+ ix1 = ix1 +% ix1;
+
+ var q: u32 = 0;
+ var q1: u32 = 0;
+ var s0: u32 = 0;
+ var s1: u32 = 0;
+ var r: u32 = 0x00200000;
+ var t: u32 = undefined;
+ var t1: u32 = undefined;
+
+ while (r != 0) {
+ t = s0 +% r;
+ if (t <= ix0) {
+ s0 = t + r;
+ ix0 -= t;
+ q += r;
+ }
+ ix0 = ix0 +% ix0 +% (ix1 >> 31);
+ ix1 = ix1 +% ix1;
+ r >>= 1;
+ }
+
+ r = sign;
+ while (r != 0) {
+ t1 = s1 +% r;
+ t = s0;
+ if (t < ix0 or (t == ix0 and t1 <= ix1)) {
+ s1 = t1 +% r;
+ if (t1 & sign == sign and s1 & sign == 0) {
+ s0 += 1;
+ }
+ ix0 -= t;
+ if (ix1 < t1) {
+ ix0 -= 1;
+ }
+ ix1 = ix1 -% t1;
+ q1 += r;
+ }
+ ix0 = ix0 +% ix0 +% (ix1 >> 31);
+ ix1 = ix1 +% ix1;
+ r >>= 1;
+ }
+
+ // rounding direction
+ if (ix0 | ix1 != 0) {
+ var z = 1.0 - tiny; // raise inexact
+ if (z >= 1.0) {
+ z = 1.0 + tiny;
+ if (q1 == 0xFFFFFFFF) {
+ q1 = 0;
+ q += 1;
+ } else if (z > 1.0) {
+ if (q1 == 0xFFFFFFFE) {
+ q += 1;
+ }
+ q1 += 2;
+ } else {
+ q1 += q1 & 1;
+ }
+ }
+ }
+
+ ix0 = (q >> 1) + 0x3FE00000;
+ ix1 = q1 >> 1;
+ if (q & 1 != 0) {
+ ix1 |= 0x80000000;
+ }
+
+ // NOTE: musl here appears to rely on signed twos-complement wraparound. +% has the same
+ // behaviour at least.
+ var iix0 = @intCast(i32, ix0);
+ iix0 = iix0 +% (m << 20);
+
+ const uz = (@intCast(u64, iix0) << 32) | ix1;
+ return @bitCast(f64, uz);
+}
+
+pub fn __sqrtx(x: f80) callconv(.C) f80 {
+ // TODO: more efficient implementation
+ return @floatCast(f80, sqrtq(x));
+}
+
+pub fn sqrtq(x: f128) callconv(.C) f128 {
+ // TODO: more correct implementation
+ return sqrt(@floatCast(f64, x));
+}
+
+pub fn sqrtl(x: c_longdouble) callconv(.C) c_longdouble {
+ switch (@typeInfo(c_longdouble).Float.bits) {
+ 16 => return __sqrth(x),
+ 32 => return sqrtf(x),
+ 64 => return sqrt(x),
+ 80 => return __sqrtx(x),
+ 128 => return sqrtq(x),
+ else => @compileError("unreachable"),
+ }
+}
+
+test "sqrtf" {
+ const V = [_]f32{
+ 0.0,
+ 4.089288054930154,
+ 7.538757127071935,
+ 8.97780793672623,
+ 5.304443821913729,
+ 5.682408965311888,
+ 0.5846878579110049,
+ 3.650338664297043,
+ 0.3178091951800732,
+ 7.1505232436382835,
+ 3.6589165881946464,
+ };
+
+ // Note that @sqrt will either generate the sqrt opcode (if supported by the
+ // target ISA) or a call to `sqrtf` otherwise.
+ for (V) |val|
+ try std.testing.expectEqual(@sqrt(val), sqrtf(val));
+}
+
+test "sqrtf special" {
+ try std.testing.expect(math.isPositiveInf(sqrtf(math.inf(f32))));
+ try std.testing.expect(sqrtf(0.0) == 0.0);
+ try std.testing.expect(sqrtf(-0.0) == -0.0);
+ try std.testing.expect(math.isNan(sqrtf(-1.0)));
+ try std.testing.expect(math.isNan(sqrtf(math.nan(f32))));
+}
+
+test "sqrt" {
+ const V = [_]f64{
+ 0.0,
+ 4.089288054930154,
+ 7.538757127071935,
+ 8.97780793672623,
+ 5.304443821913729,
+ 5.682408965311888,
+ 0.5846878579110049,
+ 3.650338664297043,
+ 0.3178091951800732,
+ 7.1505232436382835,
+ 3.6589165881946464,
+ };
+
+ // Note that @sqrt will either generate the sqrt opcode (if supported by the
+ // target ISA) or a call to `sqrtf` otherwise.
+ for (V) |val|
+ try std.testing.expectEqual(@sqrt(val), sqrt(val));
+}
+
+test "sqrt special" {
+ try std.testing.expect(math.isPositiveInf(sqrt(math.inf(f64))));
+ try std.testing.expect(sqrt(0.0) == 0.0);
+ try std.testing.expect(sqrt(-0.0) == -0.0);
+ try std.testing.expect(math.isNan(sqrt(-1.0)));
+ try std.testing.expect(math.isNan(sqrt(math.nan(f64))));
+}
diff --git a/lib/std/special/compiler_rt/stack_probe.zig b/lib/compiler_rt/stack_probe.zig
similarity index 82%
rename from lib/std/special/compiler_rt/stack_probe.zig
rename to lib/compiler_rt/stack_probe.zig
index 90919dcbb8..5ebb851825 100644
--- a/lib/std/special/compiler_rt/stack_probe.zig
+++ b/lib/compiler_rt/stack_probe.zig
@@ -1,4 +1,43 @@
-const native_arch = @import("builtin").cpu.arch;
+const std = @import("std");
+const builtin = @import("builtin");
+const os_tag = builtin.os.tag;
+const arch = builtin.cpu.arch;
+const abi = builtin.abi;
+const is_test = builtin.is_test;
+
+const is_gnu = abi.isGnu();
+const is_mingw = os_tag == .windows and is_gnu;
+
+const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
+const strong_linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Strong;
+pub const panic = @import("common.zig").panic;
+
+comptime {
+ if (builtin.os.tag == .windows) {
+ // Default stack-probe functions emitted by LLVM
+ if (is_mingw) {
+ @export(_chkstk, .{ .name = "_alloca", .linkage = strong_linkage });
+ @export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = strong_linkage });
+ } else if (!builtin.link_libc) {
+ // This symbols are otherwise exported by MSVCRT.lib
+ @export(_chkstk, .{ .name = "_chkstk", .linkage = strong_linkage });
+ @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
+ }
+
+ if (arch.isAARCH64()) {
+ @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
+ }
+ }
+
+ switch (arch) {
+ .i386,
+ .x86_64,
+ => {
+ @export(zig_probe_stack, .{ .name = "__zig_probe_stack", .linkage = linkage });
+ },
+ else => {},
+ }
+}
// Zig's own stack-probe routine (available only on x86 and x86_64)
pub fn zig_probe_stack() callconv(.Naked) void {
@@ -8,7 +47,7 @@ pub fn zig_probe_stack() callconv(.Naked) void {
// invalid so let's update it on the go, otherwise we'll get a segfault
// instead of triggering the stack growth.
- switch (native_arch) {
+ switch (arch) {
.x86_64 => {
// %rax = probe length, %rsp = stack pointer
asm volatile (
@@ -60,7 +99,7 @@ pub fn zig_probe_stack() callconv(.Naked) void {
fn win_probe_stack_only() void {
@setRuntimeSafety(false);
- switch (native_arch) {
+ switch (arch) {
.x86_64 => {
asm volatile (
\\ push %%rcx
@@ -105,7 +144,7 @@ fn win_probe_stack_only() void {
},
else => {},
}
- if (comptime native_arch.isAARCH64()) {
+ if (comptime arch.isAARCH64()) {
// NOTE: page size hardcoded to 4096 for now
asm volatile (
\\ lsl x16, x15, #4
@@ -127,7 +166,7 @@ fn win_probe_stack_only() void {
fn win_probe_stack_adjust_sp() void {
@setRuntimeSafety(false);
- switch (native_arch) {
+ switch (arch) {
.x86_64 => {
asm volatile (
\\ push %%rcx
@@ -201,9 +240,9 @@ pub fn _chkstk() callconv(.Naked) void {
}
pub fn __chkstk() callconv(.Naked) void {
@setRuntimeSafety(false);
- if (comptime native_arch.isAARCH64()) {
+ if (comptime arch.isAARCH64()) {
@call(.{ .modifier = .always_inline }, win_probe_stack_only, .{});
- } else switch (native_arch) {
+ } else switch (arch) {
.i386 => @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{}),
.x86_64 => @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{}),
else => unreachable,
diff --git a/lib/compiler_rt/subdf3.zig b/lib/compiler_rt/subdf3.zig
new file mode 100644
index 0000000000..9d62ffe480
--- /dev/null
+++ b/lib/compiler_rt/subdf3.zig
@@ -0,0 +1,21 @@
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dsub, .{ .name = "__aeabi_dsub", .linkage = common.linkage });
+ } else {
+ @export(__subdf3, .{ .name = "__subdf3", .linkage = common.linkage });
+ }
+}
+
+fn __subdf3(a: f64, b: f64) callconv(.C) f64 {
+ const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63));
+ return a + neg_b;
+}
+
+fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
+ const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63));
+ return a + neg_b;
+}
diff --git a/lib/compiler_rt/subo.zig b/lib/compiler_rt/subo.zig
new file mode 100644
index 0000000000..a7dcf258aa
--- /dev/null
+++ b/lib/compiler_rt/subo.zig
@@ -0,0 +1,47 @@
+//! subo - subtract overflow
+//! * return a-%b.
+//! * return if a-b overflows => 1 else => 0
+//! - suboXi4_generic as default
+
+const std = @import("std");
+const builtin = @import("builtin");
+const common = @import("common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__subosi4, .{ .name = "__subosi4", .linkage = common.linkage });
+ @export(__subodi4, .{ .name = "__subodi4", .linkage = common.linkage });
+ @export(__suboti4, .{ .name = "__suboti4", .linkage = common.linkage });
+}
+
+pub fn __subosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
+ return suboXi4_generic(i32, a, b, overflow);
+}
+pub fn __subodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
+ return suboXi4_generic(i64, a, b, overflow);
+}
+pub fn __suboti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
+ return suboXi4_generic(i128, a, b, overflow);
+}
+
+inline fn suboXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
+ overflow.* = 0;
+ var sum: ST = a -% b;
+ // Hackers Delight: section Overflow Detection, subsection Signed Add/Subtract
+ // Let sum = a -% b == a - b - carry == wraparound subtraction.
+ // Overflow in a-b-carry occurs, iff a and b have opposite signs
+ // and the sign of a-b-carry is opposite of a (or equivalently same as b).
+ // Faster routine: res = (a ^ b) & (sum ^ a)
+ // Slower routine: res = (sum^a) & ~(sum^b)
+ // Overflow occured, iff (res < 0)
+ if (((a ^ b) & (sum ^ a)) < 0)
+ overflow.* = 1;
+ return sum;
+}
+
+test {
+ _ = @import("subosi4_test.zig");
+ _ = @import("subodi4_test.zig");
+ _ = @import("suboti4_test.zig");
+}
diff --git a/lib/compiler_rt/subodi4_test.zig b/lib/compiler_rt/subodi4_test.zig
new file mode 100644
index 0000000000..687e97c71c
--- /dev/null
+++ b/lib/compiler_rt/subodi4_test.zig
@@ -0,0 +1,81 @@
+const subo = @import("subo.zig");
+const std = @import("std");
+const testing = std.testing;
+const math = std.math;
+
+fn test__subodi4(a: i64, b: i64) !void {
+ var result_ov: c_int = undefined;
+ var expected_ov: c_int = undefined;
+ var result = subo.__subodi4(a, b, &result_ov);
+ var expected: i64 = simple_subodi4(a, b, &expected_ov);
+ try testing.expectEqual(expected, result);
+ try testing.expectEqual(expected_ov, result_ov);
+}
+
+// 2 cases on evaluating `a-b`:
+// 1. `a-b` may underflow, iff b>0 && a<0 and a-b < min <=> a