From 8824491fc7f28116838b670f9ab46d6ba0d28276 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 4 Aug 2020 18:38:42 -0700
Subject: [PATCH 001/153] stage2: add missing newlines to cli error messages
---
src-self-hosted/main.zig | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 8b545ab546..04ebee457c 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -336,39 +336,39 @@ fn buildOutputType(
} else if (mem.startsWith(u8, arg, "-l")) {
try system_libs.append(arg[2..]);
} else {
- std.debug.print("unrecognized parameter: '{}'", .{arg});
+ std.debug.print("unrecognized parameter: '{}'\n", .{arg});
process.exit(1);
}
} else if (mem.endsWith(u8, arg, ".s") or mem.endsWith(u8, arg, ".S")) {
- std.debug.print("assembly files not supported yet", .{});
+ std.debug.print("assembly files not supported yet\n", .{});
process.exit(1);
} else if (mem.endsWith(u8, arg, ".o") or
mem.endsWith(u8, arg, ".obj") or
mem.endsWith(u8, arg, ".a") or
mem.endsWith(u8, arg, ".lib"))
{
- std.debug.print("object files and static libraries not supported yet", .{});
+ std.debug.print("object files and static libraries not supported yet\n", .{});
process.exit(1);
} else if (mem.endsWith(u8, arg, ".c") or
mem.endsWith(u8, arg, ".cpp"))
{
- std.debug.print("compilation of C and C++ source code requires LLVM extensions which are not implemented yet", .{});
+ std.debug.print("compilation of C and C++ source code requires LLVM extensions which are not implemented yet\n", .{});
process.exit(1);
} else if (mem.endsWith(u8, arg, ".so") or
mem.endsWith(u8, arg, ".dylib") or
mem.endsWith(u8, arg, ".dll"))
{
- std.debug.print("linking against dynamic libraries not yet supported", .{});
+ std.debug.print("linking against dynamic libraries not yet supported\n", .{});
process.exit(1);
} else if (mem.endsWith(u8, arg, ".zig") or mem.endsWith(u8, arg, ".zir")) {
if (root_src_file) |other| {
- std.debug.print("found another zig file '{}' after root source file '{}'", .{ arg, other });
+ std.debug.print("found another zig file '{}' after root source file '{}'\n", .{ arg, other });
process.exit(1);
} else {
root_src_file = arg;
}
} else {
- std.debug.print("unrecognized file extension of parameter '{}'", .{arg});
+ std.debug.print("unrecognized file extension of parameter '{}'\n", .{arg});
}
}
}
@@ -385,7 +385,7 @@ fn buildOutputType(
};
if (system_libs.items.len != 0) {
- std.debug.print("linking against system libraries not yet supported", .{});
+ std.debug.print("linking against system libraries not yet supported\n", .{});
process.exit(1);
}
From f23fb3087befc041161498a086bac2a92acea6cb Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 4 Aug 2020 22:54:59 -0700
Subject: [PATCH 002/153] zig build: fix addBuildOption for `[]const u8` and
`?[]const u8`
---
build.zig | 1 -
lib/std/build.zig | 37 ++++++++++++++++++++++++++++---------
2 files changed, 28 insertions(+), 10 deletions(-)
diff --git a/build.zig b/build.zig
index 7e9920bda2..9fd86f93ab 100644
--- a/build.zig
+++ b/build.zig
@@ -107,7 +107,6 @@ pub fn build(b: *Builder) !void {
const is_wasmtime_enabled = b.option(bool, "enable-wasmtime", "Use Wasmtime to enable and run WASI libstd tests") orelse false;
const glibc_multi_dir = b.option([]const u8, "enable-foreign-glibc", "Provide directory with glibc installations to run cross compiled tests that link glibc");
-
test_stage2.addBuildOption(bool, "enable_qemu", is_qemu_enabled);
test_stage2.addBuildOption(bool, "enable_wine", is_wine_enabled);
test_stage2.addBuildOption(bool, "enable_wasmtime", is_wasmtime_enabled);
diff --git a/lib/std/build.zig b/lib/std/build.zig
index 8bfd230263..817b103b58 100644
--- a/lib/std/build.zig
+++ b/lib/std/build.zig
@@ -1708,15 +1708,34 @@ pub const LibExeObjStep = struct {
pub fn addBuildOption(self: *LibExeObjStep, comptime T: type, name: []const u8, value: T) void {
const out = self.build_options_contents.outStream();
- if (T == []const []const u8) {
- out.print("pub const {}: []const []const u8 = &[_][]const u8{{\n", .{name}) catch unreachable;
- for (value) |slice| {
- out.writeAll(" ") catch unreachable;
- std.zig.renderStringLiteral(slice, out) catch unreachable;
- out.writeAll(",\n") catch unreachable;
- }
- out.writeAll("};\n") catch unreachable;
- return;
+ switch (T) {
+ []const []const u8 => {
+ out.print("pub const {}: []const []const u8 = &[_][]const u8{{\n", .{name}) catch unreachable;
+ for (value) |slice| {
+ out.writeAll(" ") catch unreachable;
+ std.zig.renderStringLiteral(slice, out) catch unreachable;
+ out.writeAll(",\n") catch unreachable;
+ }
+ out.writeAll("};\n") catch unreachable;
+ return;
+ },
+ []const u8 => {
+ out.print("pub const {}: []const u8 = ", .{name}) catch unreachable;
+ std.zig.renderStringLiteral(value, out) catch unreachable;
+ out.writeAll(";\n") catch unreachable;
+ return;
+ },
+ ?[]const u8 => {
+ out.print("pub const {}: ?[]const u8 = ", .{name}) catch unreachable;
+ if (value) |payload| {
+ std.zig.renderStringLiteral(payload, out) catch unreachable;
+ out.writeAll(";\n") catch unreachable;
+ } else {
+ out.writeAll("null;\n") catch unreachable;
+ }
+ return;
+ },
+ else => {},
}
switch (@typeInfo(T)) {
.Enum => |enum_info| {
From d159ba9295b331663a62438673f3543c35fca136 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 4 Aug 2020 23:04:20 -0700
Subject: [PATCH 003/153] zig fmt
---
lib/std/json.zig | 2 +-
lib/std/mem.zig | 2 +-
lib/std/net.zig | 19 +++++++++----------
lib/std/os/bits/windows.zig | 2 +-
lib/std/os/windows.zig | 2 +-
lib/std/special/compiler_rt/floatditf.zig | 2 +-
6 files changed, 14 insertions(+), 15 deletions(-)
diff --git a/lib/std/json.zig b/lib/std/json.zig
index 3a94ceec37..59c765ac42 100644
--- a/lib/std/json.zig
+++ b/lib/std/json.zig
@@ -1742,7 +1742,7 @@ test "parse into tagged union" {
A: struct { x: u32 },
B: struct { y: u32 },
};
- testing.expectEqual(T{ .B = .{.y = 42} }, try parse(T, &TokenStream.init("{\"y\":42}"), ParseOptions{}));
+ testing.expectEqual(T{ .B = .{ .y = 42 } }, try parse(T, &TokenStream.init("{\"y\":42}"), ParseOptions{}));
}
}
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 0d6d7c118a..2fb364b340 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -2038,7 +2038,7 @@ pub fn replace(comptime T: type, input: []const T, needle: []const T, replacemen
var replacements: usize = 0;
while (slide < input.len) {
if (mem.indexOf(T, input[slide..], needle) == @as(usize, 0)) {
- mem.copy(T, output[i..i + replacement.len], replacement);
+ mem.copy(T, output[i .. i + replacement.len], replacement);
i += replacement.len;
slide += needle.len;
replacements += 1;
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 514e48e0a7..0cd34376e1 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -77,23 +77,23 @@ pub const Address = extern union {
}
pub fn parseIp6(buf: []const u8, port: u16) !Address {
- return Address{.in6 = try Ip6Address.parse(buf, port) };
+ return Address{ .in6 = try Ip6Address.parse(buf, port) };
}
pub fn resolveIp6(buf: []const u8, port: u16) !Address {
- return Address{.in6 = try Ip6Address.resolve(buf, port) };
+ return Address{ .in6 = try Ip6Address.resolve(buf, port) };
}
pub fn parseIp4(buf: []const u8, port: u16) !Address {
- return Address {.in = try Ip4Address.parse(buf, port) };
+ return Address{ .in = try Ip4Address.parse(buf, port) };
}
pub fn initIp4(addr: [4]u8, port: u16) Address {
- return Address{.in = Ip4Address.init(addr, port) };
+ return Address{ .in = Ip4Address.init(addr, port) };
}
pub fn initIp6(addr: [16]u8, port: u16, flowinfo: u32, scope_id: u32) Address {
- return Address{.in6 = Ip6Address.init(addr, port, flowinfo, scope_id) };
+ return Address{ .in6 = Ip6Address.init(addr, port, flowinfo, scope_id) };
}
pub fn initUnix(path: []const u8) !Address {
@@ -136,8 +136,8 @@ pub const Address = extern union {
/// on the address family.
pub fn initPosix(addr: *align(4) const os.sockaddr) Address {
switch (addr.family) {
- os.AF_INET => return Address{ .in = Ip4Address{ .sa = @ptrCast(*const os.sockaddr_in, addr).*} },
- os.AF_INET6 => return Address{ .in6 = Ip6Address{ .sa = @ptrCast(*const os.sockaddr_in6, addr).*} },
+ os.AF_INET => return Address{ .in = Ip4Address{ .sa = @ptrCast(*const os.sockaddr_in, addr).* } },
+ os.AF_INET6 => return Address{ .in6 = Ip6Address{ .sa = @ptrCast(*const os.sockaddr_in6, addr).* } },
else => unreachable,
}
}
@@ -193,7 +193,7 @@ pub const Ip4Address = extern struct {
.sa = .{
.port = mem.nativeToBig(u16, port),
.addr = undefined,
- }
+ },
};
const out_ptr = mem.sliceAsBytes(@as(*[1]u32, &result.sa.addr)[0..]);
@@ -240,7 +240,7 @@ pub const Ip4Address = extern struct {
}
pub fn init(addr: [4]u8, port: u16) Ip4Address {
- return Ip4Address {
+ return Ip4Address{
.sa = os.sockaddr_in{
.port = mem.nativeToBig(u16, port),
.addr = @ptrCast(*align(1) const u32, &addr).*,
@@ -598,7 +598,6 @@ pub const Ip6Address = extern struct {
}
};
-
pub fn connectUnixSocket(path: []const u8) !fs.File {
const opt_non_block = if (std.io.is_async) os.SOCK_NONBLOCK else 0;
const sockfd = try os.socket(
diff --git a/lib/std/os/bits/windows.zig b/lib/std/os/bits/windows.zig
index 53a590ff1e..5316a048cc 100644
--- a/lib/std/os/bits/windows.zig
+++ b/lib/std/os/bits/windows.zig
@@ -261,4 +261,4 @@ pub const O_LARGEFILE = 0;
pub const O_NOATIME = 0o1000000;
pub const O_PATH = 0o10000000;
pub const O_TMPFILE = 0o20200000;
-pub const O_NDELAY = O_NONBLOCK;
\ No newline at end of file
+pub const O_NDELAY = O_NONBLOCK;
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index bf41d0ae93..2b3dc29b04 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -51,7 +51,7 @@ pub const OpenFileOptions = struct {
open_dir: bool = false,
/// If false, tries to open path as a reparse point without dereferencing it.
/// Defaults to true.
- follow_symlinks: bool = true,
+ follow_symlinks: bool = true,
};
/// TODO when share_access_nonblocking is false, this implementation uses
diff --git a/lib/std/special/compiler_rt/floatditf.zig b/lib/std/special/compiler_rt/floatditf.zig
index 35264f27e9..581a0e0532 100644
--- a/lib/std/special/compiler_rt/floatditf.zig
+++ b/lib/std/special/compiler_rt/floatditf.zig
@@ -18,7 +18,7 @@ pub fn __floatditf(arg: i64) callconv(.C) f128 {
var aAbs = @bitCast(u64, arg);
if (arg < 0) {
sign = 1 << 127;
- aAbs = ~@bitCast(u64, arg)+ 1;
+ aAbs = ~@bitCast(u64, arg) + 1;
}
// Exponent of (fp_t)a is the width of abs(a).
From cf4936bcb0d48df5578efe76156c5c7ffc0d7b1e Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 4 Aug 2020 23:45:53 -0700
Subject: [PATCH 004/153] std.os tests: improve robustness of "symlink with
relative paths"
---
lib/std/os/test.zig | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index b39e239ed4..b17ddfaf7e 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -112,8 +112,11 @@ test "openat smoke test" {
test "symlink with relative paths" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
+ const cwd = fs.cwd();
+ cwd.deleteFile("file.txt") catch {};
+ cwd.deleteFile("symlinked") catch {};
+
// First, try relative paths in cwd
- var cwd = fs.cwd();
try cwd.writeFile("file.txt", "nonsense");
if (builtin.os.tag == .windows) {
From a5b76d2474c5d1cbbb1c08def6b30ad1836da3bc Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Sun, 2 Aug 2020 20:46:06 -0400
Subject: [PATCH 005/153] Stage2: minor File.ELF refactor
---
src-self-hosted/codegen.zig | 25 ++--
src-self-hosted/codegen/c.zig | 8 +-
src-self-hosted/link.zig | 243 ++++++++++++++++------------------
3 files changed, 130 insertions(+), 146 deletions(-)
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index 58249256a1..be544aaa29 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -134,7 +134,7 @@ pub fn generateSymbol(
}
return Result{
.fail = try ErrorMsg.create(
- bin_file.allocator,
+ bin_file.base.allocator,
src,
"TODO implement generateSymbol for more kinds of arrays",
.{},
@@ -150,21 +150,22 @@ pub fn generateSymbol(
// If the decl changes vaddr, then this symbol needs to get regenerated.
const vaddr = bin_file.local_symbols.items[decl.link.local_sym_index].st_value;
const endian = bin_file.base.options.target.cpu.arch.endian();
- switch (bin_file.ptr_width) {
- .p32 => {
+ switch (bin_file.base.options.target.cpu.arch.ptrBitWidth()) {
+ 32 => {
try code.resize(4);
mem.writeInt(u32, code.items[0..4], @intCast(u32, vaddr), endian);
},
- .p64 => {
+ 64 => {
try code.resize(8);
mem.writeInt(u64, code.items[0..8], vaddr, endian);
},
+ else => unreachable,
}
return Result{ .appended = {} };
}
return Result{
.fail = try ErrorMsg.create(
- bin_file.allocator,
+ bin_file.base.allocator,
src,
"TODO implement generateSymbol for pointer {}",
.{typed_value.val},
@@ -180,7 +181,7 @@ pub fn generateSymbol(
}
return Result{
.fail = try ErrorMsg.create(
- bin_file.allocator,
+ bin_file.base.allocator,
src,
"TODO implement generateSymbol for int type '{}'",
.{typed_value.ty},
@@ -190,7 +191,7 @@ pub fn generateSymbol(
else => |t| {
return Result{
.fail = try ErrorMsg.create(
- bin_file.allocator,
+ bin_file.base.allocator,
src,
"TODO implement generateSymbol for type '{}'",
.{@tagName(t)},
@@ -387,10 +388,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const fn_type = module_fn.owner_decl.typed_value.most_recent.typed_value.ty;
- var branch_stack = std.ArrayList(Branch).init(bin_file.allocator);
+ var branch_stack = std.ArrayList(Branch).init(bin_file.base.allocator);
defer {
assert(branch_stack.items.len == 1);
- branch_stack.items[0].deinit(bin_file.allocator);
+ branch_stack.items[0].deinit(bin_file.base.allocator);
branch_stack.deinit();
}
const branch = try branch_stack.addOne();
@@ -413,7 +414,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
};
var function = Self{
- .gpa = bin_file.allocator,
+ .gpa = bin_file.base.allocator,
.target = &bin_file.base.options.target,
.bin_file = bin_file,
.mod_fn = module_fn,
@@ -432,7 +433,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.rbrace_src = src_data.rbrace_src,
.source = src_data.source,
};
- defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
+ defer function.exitlude_jump_relocs.deinit(bin_file.base.allocator);
var call_info = function.resolveCallingConventionValues(src, fn_type) catch |err| switch (err) {
error.CodegenFail => return Result{ .fail = function.err_msg.? },
@@ -2054,7 +2055,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
fn fail(self: *Self, src: usize, comptime format: []const u8, args: anytype) error{ CodegenFail, OutOfMemory } {
@setCold(true);
assert(self.err_msg == null);
- self.err_msg = try ErrorMsg.create(self.bin_file.allocator, src, format, args);
+ self.err_msg = try ErrorMsg.create(self.bin_file.base.allocator, src, format, args);
return error.CodegenFail;
}
diff --git a/src-self-hosted/codegen/c.zig b/src-self-hosted/codegen/c.zig
index e8fe435af1..db9d9a1030 100644
--- a/src-self-hosted/codegen/c.zig
+++ b/src-self-hosted/codegen/c.zig
@@ -44,8 +44,8 @@ fn renderType(file: *C, writer: std.ArrayList(u8).Writer, T: Type, src: usize) !
fn renderFunctionSignature(file: *C, writer: std.ArrayList(u8).Writer, decl: *Decl) !void {
const tv = decl.typed_value.most_recent.typed_value;
try renderType(file, writer, tv.ty.fnReturnType(), decl.src());
- const name = try map(file.allocator, mem.spanZ(decl.name));
- defer file.allocator.free(name);
+ const name = try map(file.base.allocator, mem.spanZ(decl.name));
+ defer file.base.allocator.free(name);
try writer.print(" {}(", .{name});
if (tv.ty.fnParamLen() == 0)
try writer.writeAll("void)")
@@ -64,8 +64,8 @@ pub fn generate(file: *C, decl: *Decl) !void {
fn genArray(file: *C, decl: *Decl) !void {
const tv = decl.typed_value.most_recent.typed_value;
// TODO: prevent inline asm constants from being emitted
- const name = try map(file.allocator, mem.span(decl.name));
- defer file.allocator.free(name);
+ const name = try map(file.base.allocator, mem.span(decl.name));
+ defer file.base.allocator.free(name);
if (tv.val.cast(Value.Payload.Bytes)) |payload|
if (tv.ty.arraySentinel()) |sentinel|
if (sentinel.toUnsignedInt() == 0)
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 7bf83d7576..30d44fcdef 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -36,9 +36,12 @@ pub const Options = struct {
program_code_size_hint: u64 = 256 * 1024,
};
+
pub const File = struct {
tag: Tag,
options: Options,
+ file: ?fs.File,
+ allocator: *Allocator,
/// Attempts incremental linking, if the file already exists. If
/// incremental linking fails, falls back to truncating the file and
@@ -66,15 +69,23 @@ pub const File = struct {
pub fn makeWritable(base: *File, dir: fs.Dir, sub_path: []const u8) !void {
switch (base.tag) {
- .elf => return @fieldParentPtr(Elf, "base", base).makeWritable(dir, sub_path),
+ .elf => {
+ if (base.file != null) return;
+ base.file = try dir.createFile(sub_path, .{
+ .truncate = false,
+ .read = true,
+ .mode = determineMode(base.options),
+ });
+ },
.c => {},
}
}
pub fn makeExecutable(base: *File) !void {
- switch (base.tag) {
- .elf => return @fieldParentPtr(Elf, "base", base).makeExecutable(),
- .c => unreachable,
+ std.debug.assert(base.tag != .c);
+ if (base.file) |f| {
+ f.close();
+ base.file = null;
}
}
@@ -100,6 +111,7 @@ pub const File = struct {
}
pub fn deinit(base: *File) void {
+ if (base.file) |f| f.close();
switch (base.tag) {
.elf => @fieldParentPtr(Elf, "base", base).deinit(),
.c => @fieldParentPtr(C, "base", base).deinit(),
@@ -111,12 +123,12 @@ pub const File = struct {
.elf => {
const parent = @fieldParentPtr(Elf, "base", base);
parent.deinit();
- parent.allocator.destroy(parent);
+ base.allocator.destroy(parent);
},
.c => {
const parent = @fieldParentPtr(C, "base", base);
parent.deinit();
- parent.allocator.destroy(parent);
+ base.allocator.destroy(parent);
},
}
}
@@ -172,11 +184,10 @@ pub const File = struct {
base: File,
- allocator: *Allocator,
header: std.ArrayList(u8),
constants: std.ArrayList(u8),
main: std.ArrayList(u8),
- file: ?fs.File,
+
called: std.StringHashMap(void),
need_stddef: bool = false,
need_stdint: bool = false,
@@ -196,9 +207,9 @@ pub const File = struct {
.base = .{
.tag = .c,
.options = options,
+ .file = file,
+ .allocator = allocator,
},
- .allocator = allocator,
- .file = file,
.main = std.ArrayList(u8).init(allocator),
.header = std.ArrayList(u8).init(allocator),
.constants = std.ArrayList(u8).init(allocator),
@@ -209,7 +220,7 @@ pub const File = struct {
}
pub fn fail(self: *C, src: usize, comptime format: []const u8, args: anytype) !void {
- self.error_msg = try Module.ErrorMsg.create(self.allocator, src, format, args);
+ self.error_msg = try Module.ErrorMsg.create(self.base.allocator, src, format, args);
return error.AnalysisFail;
}
@@ -218,8 +229,6 @@ pub const File = struct {
self.header.deinit();
self.constants.deinit();
self.called.deinit();
- if (self.file) |f|
- f.close();
}
pub fn updateDecl(self: *File.C, module: *Module, decl: *Module.Decl) !void {
@@ -232,7 +241,7 @@ pub const File = struct {
}
pub fn flush(self: *File.C) !void {
- const writer = self.file.?.writer();
+ const writer = self.base.file.?.writer();
try writer.writeAll(@embedFile("cbe.h"));
var includes = false;
if (self.need_stddef) {
@@ -259,8 +268,8 @@ pub const File = struct {
}
}
try writer.writeAll(self.main.items);
- self.file.?.close();
- self.file = null;
+ self.base.file.?.close();
+ self.base.file = null;
}
};
@@ -269,9 +278,6 @@ pub const File = struct {
base: File,
- allocator: *Allocator,
- file: ?fs.File,
- owns_file_handle: bool,
ptr_width: enum { p32, p64 },
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
@@ -454,7 +460,6 @@ pub const File = struct {
else => |e| return e,
};
- elf_file.owns_file_handle = true;
return &elf_file.base;
}
@@ -467,12 +472,11 @@ pub const File = struct {
}
var self: Elf = .{
.base = .{
+ .file = file,
.tag = .elf,
.options = options,
+ .allocator = allocator,
},
- .allocator = allocator,
- .file = file,
- .owns_file_handle = false,
.ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
32 => .p32,
64 => .p64,
@@ -499,16 +503,15 @@ pub const File = struct {
.base = .{
.tag = .elf,
.options = options,
+ .allocator = allocator,
+ .file = file,
},
- .allocator = allocator,
- .file = file,
.ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
32 => .p32,
64 => .p64,
else => return error.UnsupportedELFArchitecture,
},
.shdr_table_dirty = true,
- .owns_file_handle = false,
};
errdefer self.deinit();
@@ -542,39 +545,18 @@ pub const File = struct {
}
pub fn deinit(self: *Elf) void {
- self.sections.deinit(self.allocator);
- self.program_headers.deinit(self.allocator);
- self.shstrtab.deinit(self.allocator);
- self.debug_strtab.deinit(self.allocator);
- self.local_symbols.deinit(self.allocator);
- self.global_symbols.deinit(self.allocator);
- self.global_symbol_free_list.deinit(self.allocator);
- self.local_symbol_free_list.deinit(self.allocator);
- self.offset_table_free_list.deinit(self.allocator);
- self.text_block_free_list.deinit(self.allocator);
- self.dbg_line_fn_free_list.deinit(self.allocator);
- self.offset_table.deinit(self.allocator);
- if (self.owns_file_handle) {
- if (self.file) |f| f.close();
- }
- }
-
- pub fn makeExecutable(self: *Elf) !void {
- assert(self.owns_file_handle);
- if (self.file) |f| {
- f.close();
- self.file = null;
- }
- }
-
- pub fn makeWritable(self: *Elf, dir: fs.Dir, sub_path: []const u8) !void {
- assert(self.owns_file_handle);
- if (self.file != null) return;
- self.file = try dir.createFile(sub_path, .{
- .truncate = false,
- .read = true,
- .mode = determineMode(self.base.options),
- });
+ self.sections.deinit(self.base.allocator);
+ self.program_headers.deinit(self.base.allocator);
+ self.shstrtab.deinit(self.base.allocator);
+ self.debug_strtab.deinit(self.base.allocator);
+ self.local_symbols.deinit(self.base.allocator);
+ self.global_symbols.deinit(self.base.allocator);
+ self.global_symbol_free_list.deinit(self.base.allocator);
+ self.local_symbol_free_list.deinit(self.base.allocator);
+ self.offset_table_free_list.deinit(self.base.allocator);
+ self.text_block_free_list.deinit(self.base.allocator);
+ self.dbg_line_fn_free_list.deinit(self.base.allocator);
+ self.offset_table.deinit(self.base.allocator);
}
fn getDebugLineProgramOff(self: Elf) u32 {
@@ -662,7 +644,7 @@ pub const File = struct {
/// TODO Improve this to use a table.
fn makeString(self: *Elf, bytes: []const u8) !u32 {
- try self.shstrtab.ensureCapacity(self.allocator, self.shstrtab.items.len + bytes.len + 1);
+ try self.shstrtab.ensureCapacity(self.base.allocator, self.shstrtab.items.len + bytes.len + 1);
const result = self.shstrtab.items.len;
self.shstrtab.appendSliceAssumeCapacity(bytes);
self.shstrtab.appendAssumeCapacity(0);
@@ -671,7 +653,7 @@ pub const File = struct {
/// TODO Improve this to use a table.
fn makeDebugString(self: *Elf, bytes: []const u8) !u32 {
- try self.debug_strtab.ensureCapacity(self.allocator, self.debug_strtab.items.len + bytes.len + 1);
+ try self.debug_strtab.ensureCapacity(self.base.allocator, self.debug_strtab.items.len + bytes.len + 1);
const result = self.debug_strtab.items.len;
self.debug_strtab.appendSliceAssumeCapacity(bytes);
self.debug_strtab.appendAssumeCapacity(0);
@@ -703,7 +685,7 @@ pub const File = struct {
const p_align = 0x1000;
const off = self.findFreeSpace(file_size, p_align);
log.debug(.link, "found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
- try self.program_headers.append(self.allocator, .{
+ try self.program_headers.append(self.base.allocator, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -721,14 +703,14 @@ pub const File = struct {
const file_size = @as(u64, ptr_size) * self.base.options.symbol_count_hint;
// We really only need ptr alignment but since we are using PROGBITS, linux requires
// page align.
- const p_align = 0x1000;
+ const p_align = if (self.base.options.target.os.tag == .linux) 0x1000 else @as(u16, ptr_size);
const off = self.findFreeSpace(file_size, p_align);
log.debug(.link, "found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
// TODO instead of hard coding the vaddr, make a function to find a vaddr to put things at.
// we'll need to re-use that function anyway, in case the GOT grows and overlaps something
// else in virtual memory.
- const default_got_addr = 0x4000000;
- try self.program_headers.append(self.allocator, .{
+ const default_got_addr = if (ptr_size == 2) @as(u32, 0x8000) else 0x4000000;
+ try self.program_headers.append(self.base.allocator, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -743,10 +725,10 @@ pub const File = struct {
if (self.shstrtab_index == null) {
self.shstrtab_index = @intCast(u16, self.sections.items.len);
assert(self.shstrtab.items.len == 0);
- try self.shstrtab.append(self.allocator, 0); // need a 0 at position 0
+ try self.shstrtab.append(self.base.allocator, 0); // need a 0 at position 0
const off = self.findFreeSpace(self.shstrtab.items.len, 1);
log.debug(.link, "found shstrtab free space 0x{x} to 0x{x}\n", .{ off, off + self.shstrtab.items.len });
- try self.sections.append(self.allocator, .{
+ try self.sections.append(self.base.allocator, .{
.sh_name = try self.makeString(".shstrtab"),
.sh_type = elf.SHT_STRTAB,
.sh_flags = 0,
@@ -765,7 +747,7 @@ pub const File = struct {
self.text_section_index = @intCast(u16, self.sections.items.len);
const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- try self.sections.append(self.allocator, .{
+ try self.sections.append(self.base.allocator, .{
.sh_name = try self.makeString(".text"),
.sh_type = elf.SHT_PROGBITS,
.sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
@@ -783,7 +765,7 @@ pub const File = struct {
self.got_section_index = @intCast(u16, self.sections.items.len);
const phdr = &self.program_headers.items[self.phdr_got_index.?];
- try self.sections.append(self.allocator, .{
+ try self.sections.append(self.base.allocator, .{
.sh_name = try self.makeString(".got"),
.sh_type = elf.SHT_PROGBITS,
.sh_flags = elf.SHF_ALLOC,
@@ -805,7 +787,7 @@ pub const File = struct {
const off = self.findFreeSpace(file_size, min_align);
log.debug(.link, "found symtab free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
- try self.sections.append(self.allocator, .{
+ try self.sections.append(self.base.allocator, .{
.sh_name = try self.makeString(".symtab"),
.sh_type = elf.SHT_SYMTAB,
.sh_flags = 0,
@@ -824,7 +806,7 @@ pub const File = struct {
if (self.debug_str_section_index == null) {
self.debug_str_section_index = @intCast(u16, self.sections.items.len);
assert(self.debug_strtab.items.len == 0);
- try self.sections.append(self.allocator, .{
+ try self.sections.append(self.base.allocator, .{
.sh_name = try self.makeString(".debug_str"),
.sh_type = elf.SHT_PROGBITS,
.sh_flags = elf.SHF_MERGE | elf.SHF_STRINGS,
@@ -849,7 +831,7 @@ pub const File = struct {
off,
off + file_size_hint,
});
- try self.sections.append(self.allocator, .{
+ try self.sections.append(self.base.allocator, .{
.sh_name = try self.makeString(".debug_info"),
.sh_type = elf.SHT_PROGBITS,
.sh_flags = 0,
@@ -874,7 +856,7 @@ pub const File = struct {
off,
off + file_size_hint,
});
- try self.sections.append(self.allocator, .{
+ try self.sections.append(self.base.allocator, .{
.sh_name = try self.makeString(".debug_abbrev"),
.sh_type = elf.SHT_PROGBITS,
.sh_flags = 0,
@@ -899,7 +881,7 @@ pub const File = struct {
off,
off + file_size_hint,
});
- try self.sections.append(self.allocator, .{
+ try self.sections.append(self.base.allocator, .{
.sh_name = try self.makeString(".debug_aranges"),
.sh_type = elf.SHT_PROGBITS,
.sh_flags = 0,
@@ -924,7 +906,7 @@ pub const File = struct {
off,
off + file_size_hint,
});
- try self.sections.append(self.allocator, .{
+ try self.sections.append(self.base.allocator, .{
.sh_name = try self.makeString(".debug_line"),
.sh_type = elf.SHT_PROGBITS,
.sh_flags = 0,
@@ -1019,7 +1001,7 @@ pub const File = struct {
const abbrev_offset = 0;
self.debug_abbrev_table_offset = abbrev_offset;
- try self.file.?.pwriteAll(&abbrev_buf, debug_abbrev_sect.sh_offset + abbrev_offset);
+ try self.base.file.?.pwriteAll(&abbrev_buf, debug_abbrev_sect.sh_offset + abbrev_offset);
if (!self.shdr_table_dirty) {
// Then it won't get written with the others and we need to do it.
try self.writeSectHeader(self.debug_abbrev_section_index.?);
@@ -1030,7 +1012,7 @@ pub const File = struct {
if (self.debug_info_section_dirty) {
const debug_info_sect = &self.sections.items[self.debug_info_section_index.?];
- var di_buf = std.ArrayList(u8).init(self.allocator);
+ var di_buf = std.ArrayList(u8).init(self.base.allocator);
defer di_buf.deinit();
// Enough for a 64-bit header and main compilation unit without resizing.
@@ -1101,7 +1083,7 @@ pub const File = struct {
debug_info_sect.sh_offset + needed_size,
});
- try self.file.?.pwriteAll(di_buf.items, debug_info_sect.sh_offset);
+ try self.base.file.?.pwriteAll(di_buf.items, debug_info_sect.sh_offset);
if (!self.shdr_table_dirty) {
// Then it won't get written with the others and we need to do it.
try self.writeSectHeader(self.debug_info_section_index.?);
@@ -1112,7 +1094,7 @@ pub const File = struct {
if (self.debug_aranges_section_dirty) {
const debug_aranges_sect = &self.sections.items[self.debug_aranges_section_index.?];
- var di_buf = std.ArrayList(u8).init(self.allocator);
+ var di_buf = std.ArrayList(u8).init(self.base.allocator);
defer di_buf.deinit();
// Enough for all the data without resizing. When support for more compilation units
@@ -1172,7 +1154,7 @@ pub const File = struct {
debug_aranges_sect.sh_offset + needed_size,
});
- try self.file.?.pwriteAll(di_buf.items, debug_aranges_sect.sh_offset);
+ try self.base.file.?.pwriteAll(di_buf.items, debug_aranges_sect.sh_offset);
if (!self.shdr_table_dirty) {
// Then it won't get written with the others and we need to do it.
try self.writeSectHeader(self.debug_aranges_section_index.?);
@@ -1187,7 +1169,7 @@ pub const File = struct {
const debug_line_sect = &self.sections.items[self.debug_line_section_index.?];
- var di_buf = std.ArrayList(u8).init(self.allocator);
+ var di_buf = std.ArrayList(u8).init(self.base.allocator);
defer di_buf.deinit();
// The size of this header is variable, depending on the number of directories,
@@ -1294,8 +1276,8 @@ pub const File = struct {
switch (self.ptr_width) {
.p32 => {
- const buf = try self.allocator.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
- defer self.allocator.free(buf);
+ const buf = try self.base.allocator.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
+ defer self.base.allocator.free(buf);
for (buf) |*phdr, i| {
phdr.* = progHeaderTo32(self.program_headers.items[i]);
@@ -1303,11 +1285,11 @@ pub const File = struct {
bswapAllFields(elf.Elf32_Phdr, phdr);
}
}
- try self.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
},
.p64 => {
- const buf = try self.allocator.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
- defer self.allocator.free(buf);
+ const buf = try self.base.allocator.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
+ defer self.base.allocator.free(buf);
for (buf) |*phdr, i| {
phdr.* = self.program_headers.items[i];
@@ -1315,7 +1297,7 @@ pub const File = struct {
bswapAllFields(elf.Elf64_Phdr, phdr);
}
}
- try self.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
},
}
self.phdr_table_dirty = false;
@@ -1334,7 +1316,7 @@ pub const File = struct {
shstrtab_sect.sh_size = needed_size;
log.debug(.link, "writing shstrtab start=0x{x} end=0x{x}\n", .{ shstrtab_sect.sh_offset, shstrtab_sect.sh_offset + needed_size });
- try self.file.?.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset);
+ try self.base.file.?.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset);
if (!self.shdr_table_dirty) {
// Then it won't get written with the others and we need to do it.
try self.writeSectHeader(self.shstrtab_index.?);
@@ -1355,7 +1337,7 @@ pub const File = struct {
debug_strtab_sect.sh_size = needed_size;
log.debug(.link, "debug_strtab start=0x{x} end=0x{x}\n", .{ debug_strtab_sect.sh_offset, debug_strtab_sect.sh_offset + needed_size });
- try self.file.?.pwriteAll(self.debug_strtab.items, debug_strtab_sect.sh_offset);
+ try self.base.file.?.pwriteAll(self.debug_strtab.items, debug_strtab_sect.sh_offset);
if (!self.shdr_table_dirty) {
// Then it won't get written with the others and we need to do it.
try self.writeSectHeader(self.debug_str_section_index.?);
@@ -1382,20 +1364,21 @@ pub const File = struct {
switch (self.ptr_width) {
.p32 => {
- const buf = try self.allocator.alloc(elf.Elf32_Shdr, self.sections.items.len);
- defer self.allocator.free(buf);
+ const buf = try self.base.allocator.alloc(elf.Elf32_Shdr, self.sections.items.len);
+ defer self.base.allocator.free(buf);
for (buf) |*shdr, i| {
shdr.* = sectHeaderTo32(self.sections.items[i]);
+ std.log.debug(.link, "writing section {}\n", .{shdr.*});
if (foreign_endian) {
bswapAllFields(elf.Elf32_Shdr, shdr);
}
}
- try self.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
},
.p64 => {
- const buf = try self.allocator.alloc(elf.Elf64_Shdr, self.sections.items.len);
- defer self.allocator.free(buf);
+ const buf = try self.base.allocator.alloc(elf.Elf64_Shdr, self.sections.items.len);
+ defer self.base.allocator.free(buf);
for (buf) |*shdr, i| {
shdr.* = self.sections.items[i];
@@ -1404,7 +1387,7 @@ pub const File = struct {
bswapAllFields(elf.Elf64_Shdr, shdr);
}
}
- try self.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
},
}
self.shdr_table_dirty = false;
@@ -1557,7 +1540,7 @@ pub const File = struct {
assert(index == e_ehsize);
- try self.file.?.pwriteAll(hdr_buf[0..index], 0);
+ try self.base.file.?.pwriteAll(hdr_buf[0..index], 0);
}
fn freeTextBlock(self: *Elf, text_block: *TextBlock) void {
@@ -1587,7 +1570,7 @@ pub const File = struct {
if (!already_have_free_list_node and prev.freeListEligible(self.*)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
- self.text_block_free_list.append(self.allocator, prev) catch {};
+ self.text_block_free_list.append(self.base.allocator, prev) catch {};
}
} else {
text_block.prev = null;
@@ -1688,7 +1671,7 @@ pub const File = struct {
const sym = self.local_symbols.items[last.local_sym_index];
break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
} else 0;
- const amt = try self.file.?.copyRangeAll(shdr.sh_offset, self.file.?, new_offset, text_size);
+ const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, text_size);
if (amt != text_size) return error.InputOutput;
shdr.sh_offset = new_offset;
phdr.p_offset = new_offset;
@@ -1739,8 +1722,8 @@ pub const File = struct {
pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
if (decl.link.local_sym_index != 0) return;
- try self.local_symbols.ensureCapacity(self.allocator, self.local_symbols.items.len + 1);
- try self.offset_table.ensureCapacity(self.allocator, self.offset_table.items.len + 1);
+ try self.local_symbols.ensureCapacity(self.base.allocator, self.local_symbols.items.len + 1);
+ try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
if (self.local_symbol_free_list.popOrNull()) |i| {
log.debug(.link, "reusing symbol index {} for {}\n", .{ i, decl.name });
@@ -1776,8 +1759,8 @@ pub const File = struct {
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
self.freeTextBlock(&decl.link);
if (decl.link.local_sym_index != 0) {
- self.local_symbol_free_list.append(self.allocator, decl.link.local_sym_index) catch {};
- self.offset_table_free_list.append(self.allocator, decl.link.offset_table_index) catch {};
+ self.local_symbol_free_list.append(self.base.allocator, decl.link.local_sym_index) catch {};
+ self.offset_table_free_list.append(self.base.allocator, decl.link.offset_table_index) catch {};
self.local_symbols.items[decl.link.local_sym_index].st_info = 0;
@@ -1787,7 +1770,7 @@ pub const File = struct {
// is desired for both.
_ = self.dbg_line_fn_free_list.remove(&decl.fn_link);
if (decl.fn_link.prev) |prev| {
- _ = self.dbg_line_fn_free_list.put(self.allocator, prev, {}) catch {};
+ _ = self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {};
prev.next = decl.fn_link.next;
if (decl.fn_link.next) |next| {
next.prev = prev;
@@ -1810,10 +1793,10 @@ pub const File = struct {
const tracy = trace(@src());
defer tracy.end();
- var code_buffer = std.ArrayList(u8).init(self.allocator);
+ var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- var dbg_line_buffer = std.ArrayList(u8).init(self.allocator);
+ var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator);
defer dbg_line_buffer.deinit();
const typed_value = decl.typed_value.most_recent.typed_value;
@@ -1936,7 +1919,7 @@ pub const File = struct {
const section_offset = local_sym.st_value - self.program_headers.items[self.phdr_load_re_index.?].p_vaddr;
const file_offset = self.sections.items[self.text_section_index.?].sh_offset + section_offset;
- try self.file.?.pwriteAll(code, file_offset);
+ try self.base.file.?.pwriteAll(code, file_offset);
// If the Decl is a function, we need to update the .debug_line program.
if (is_fn) {
@@ -1966,7 +1949,7 @@ pub const File = struct {
if (src_fn.off + src_fn.len + min_nop_size > next.off) {
// It grew too big, so we move it to a new location.
if (src_fn.prev) |prev| {
- _ = self.dbg_line_fn_free_list.put(self.allocator, prev, {}) catch {};
+ _ = self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {};
prev.next = src_fn.next;
}
next.prev = src_fn.prev;
@@ -2009,7 +1992,7 @@ pub const File = struct {
debug_line_sect.sh_offset,
new_offset,
});
- const amt = try self.file.?.copyRangeAll(debug_line_sect.sh_offset, self.file.?, new_offset, existing_size);
+ const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size);
if (amt != existing_size) return error.InputOutput;
debug_line_sect.sh_offset = new_offset;
}
@@ -2041,7 +2024,7 @@ pub const File = struct {
const tracy = trace(@src());
defer tracy.end();
- try self.global_symbols.ensureCapacity(self.allocator, self.global_symbols.items.len + exports.len);
+ try self.global_symbols.ensureCapacity(self.base.allocator, self.global_symbols.items.len + exports.len);
const typed_value = decl.typed_value.most_recent.typed_value;
if (decl.link.local_sym_index == 0) return;
const decl_sym = self.local_symbols.items[decl.link.local_sym_index];
@@ -2052,7 +2035,7 @@ pub const File = struct {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
- try Module.ErrorMsg.create(self.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
+ try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
);
continue;
}
@@ -2070,7 +2053,7 @@ pub const File = struct {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
- try Module.ErrorMsg.create(self.allocator, 0, "Unimplemented: GlobalLinkage.LinkOnce", .{}),
+ try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: GlobalLinkage.LinkOnce", .{}),
);
continue;
},
@@ -2125,12 +2108,12 @@ pub const File = struct {
const file_pos = shdr.sh_offset + decl.fn_link.off + self.getRelocDbgLineOff();
var data: [4]u8 = undefined;
leb128.writeUnsignedFixed(4, &data, casted_line_off);
- try self.file.?.pwriteAll(&data, file_pos);
+ try self.base.file.?.pwriteAll(&data, file_pos);
}
pub fn deleteExport(self: *Elf, exp: Export) void {
const sym_index = exp.sym_index orelse return;
- self.global_symbol_free_list.append(self.allocator, sym_index) catch {};
+ self.global_symbol_free_list.append(self.base.allocator, sym_index) catch {};
self.global_symbols.items[sym_index].st_info = 0;
}
@@ -2143,14 +2126,14 @@ pub const File = struct {
if (foreign_endian) {
bswapAllFields(elf.Elf32_Phdr, &phdr[0]);
}
- return self.file.?.pwriteAll(mem.sliceAsBytes(&phdr), offset);
+ return self.base.file.?.pwriteAll(mem.sliceAsBytes(&phdr), offset);
},
64 => {
var phdr = [1]elf.Elf64_Phdr{self.program_headers.items[index]};
if (foreign_endian) {
bswapAllFields(elf.Elf64_Phdr, &phdr[0]);
}
- return self.file.?.pwriteAll(mem.sliceAsBytes(&phdr), offset);
+ return self.base.file.?.pwriteAll(mem.sliceAsBytes(&phdr), offset);
},
else => return error.UnsupportedArchitecture,
}
@@ -2166,7 +2149,7 @@ pub const File = struct {
bswapAllFields(elf.Elf32_Shdr, &shdr[0]);
}
const offset = self.shdr_table_offset.? + index * @sizeOf(elf.Elf32_Shdr);
- return self.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
+ return self.base.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
},
64 => {
var shdr = [1]elf.Elf64_Shdr{self.sections.items[index]};
@@ -2174,7 +2157,7 @@ pub const File = struct {
bswapAllFields(elf.Elf64_Shdr, &shdr[0]);
}
const offset = self.shdr_table_offset.? + index * @sizeOf(elf.Elf64_Shdr);
- return self.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
+ return self.base.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
},
else => return error.UnsupportedArchitecture,
}
@@ -2191,7 +2174,7 @@ pub const File = struct {
if (needed_size > allocated_size) {
// Must move the entire got section.
const new_offset = self.findFreeSpace(needed_size, entry_size);
- const amt = try self.file.?.copyRangeAll(shdr.sh_offset, self.file.?, new_offset, shdr.sh_size);
+ const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, shdr.sh_size);
if (amt != shdr.sh_size) return error.InputOutput;
shdr.sh_offset = new_offset;
phdr.p_offset = new_offset;
@@ -2211,12 +2194,12 @@ pub const File = struct {
.p32 => {
var buf: [4]u8 = undefined;
mem.writeInt(u32, &buf, @intCast(u32, self.offset_table.items[index]), endian);
- try self.file.?.pwriteAll(&buf, off);
+ try self.base.file.?.pwriteAll(&buf, off);
},
.p64 => {
var buf: [8]u8 = undefined;
mem.writeInt(u64, &buf, self.offset_table.items[index], endian);
- try self.file.?.pwriteAll(&buf, off);
+ try self.base.file.?.pwriteAll(&buf, off);
},
}
}
@@ -2239,7 +2222,7 @@ pub const File = struct {
// Move all the symbols to a new file location.
const new_offset = self.findFreeSpace(needed_size, sym_align);
const existing_size = @as(u64, syms_sect.sh_info) * sym_size;
- const amt = try self.file.?.copyRangeAll(syms_sect.sh_offset, self.file.?, new_offset, existing_size);
+ const amt = try self.base.file.?.copyRangeAll(syms_sect.sh_offset, self.base.file.?, new_offset, existing_size);
if (amt != existing_size) return error.InputOutput;
syms_sect.sh_offset = new_offset;
}
@@ -2264,7 +2247,7 @@ pub const File = struct {
bswapAllFields(elf.Elf32_Sym, &sym[0]);
}
const off = syms_sect.sh_offset + @sizeOf(elf.Elf32_Sym) * index;
- try self.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
},
.p64 => {
var sym = [1]elf.Elf64_Sym{self.local_symbols.items[index]};
@@ -2272,7 +2255,7 @@ pub const File = struct {
bswapAllFields(elf.Elf64_Sym, &sym[0]);
}
const off = syms_sect.sh_offset + @sizeOf(elf.Elf64_Sym) * index;
- try self.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
},
}
}
@@ -2287,8 +2270,8 @@ pub const File = struct {
const global_syms_off = syms_sect.sh_offset + self.local_symbols.items.len * sym_size;
switch (self.ptr_width) {
.p32 => {
- const buf = try self.allocator.alloc(elf.Elf32_Sym, self.global_symbols.items.len);
- defer self.allocator.free(buf);
+ const buf = try self.base.allocator.alloc(elf.Elf32_Sym, self.global_symbols.items.len);
+ defer self.base.allocator.free(buf);
for (buf) |*sym, i| {
sym.* = .{
@@ -2303,11 +2286,11 @@ pub const File = struct {
bswapAllFields(elf.Elf32_Sym, sym);
}
}
- try self.file.?.pwriteAll(mem.sliceAsBytes(buf), global_syms_off);
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), global_syms_off);
},
.p64 => {
- const buf = try self.allocator.alloc(elf.Elf64_Sym, self.global_symbols.items.len);
- defer self.allocator.free(buf);
+ const buf = try self.base.allocator.alloc(elf.Elf64_Sym, self.global_symbols.items.len);
+ defer self.base.allocator.free(buf);
for (buf) |*sym, i| {
sym.* = .{
@@ -2322,7 +2305,7 @@ pub const File = struct {
bswapAllFields(elf.Elf64_Sym, sym);
}
}
- try self.file.?.pwriteAll(mem.sliceAsBytes(buf), global_syms_off);
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), global_syms_off);
},
}
}
@@ -2437,7 +2420,7 @@ pub const File = struct {
vec_index += 1;
}
}
- try self.file.?.pwritevAll(vecs[0..vec_index], offset - prev_padding_size);
+ try self.base.file.?.pwritevAll(vecs[0..vec_index], offset - prev_padding_size);
}
const min_nop_size = 2;
From a85452b2c2a258c212a02b759634193db2f08ff0 Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Sun, 2 Aug 2020 20:48:39 -0400
Subject: [PATCH 006/153] Codegen: 16-bit pointers
---
src-self-hosted/codegen.zig | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index be544aaa29..defeca25f9 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -151,6 +151,10 @@ pub fn generateSymbol(
const vaddr = bin_file.local_symbols.items[decl.link.local_sym_index].st_value;
const endian = bin_file.base.options.target.cpu.arch.endian();
switch (bin_file.base.options.target.cpu.arch.ptrBitWidth()) {
+ 16 => {
+ try code.resize(2);
+ mem.writeInt(u16, code.items[0..2], @intCast(u16, vaddr), endian);
+ },
32 => {
try code.resize(4);
mem.writeInt(u32, code.items[0..4], @intCast(u32, vaddr), endian);
From a2bb246db4c2bb88f402215d5db79a535dbff4b6 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 5 Aug 2020 13:30:43 -0700
Subject: [PATCH 007/153] Revert "std.fmt.format: small optimization to avoid
runtime bloat"
This reverts commit 11d38a7e520f485206b7b010f64127d864194e4c.
The benefits of this commit are not enough to justify the compromise
that it made.
closes #5977
---
lib/std/fmt.zig | 2 --
1 file changed, 2 deletions(-)
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index df222daddd..c9ba3b3470 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -88,8 +88,6 @@ pub fn format(
if (args.len > ArgSetType.bit_count) {
@compileError("32 arguments max are supported per format call");
}
- if (args.len == 0)
- return writer.writeAll(fmt);
const State = enum {
Start,
From 747d46f22c2b5bef2d111564b5e4d362228004a2 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Wed, 5 Aug 2020 17:23:35 +0200
Subject: [PATCH 008/153] Initial draft of GetFinalPathNameByHandle
This commit proposes an initial draft of `GetPathNameByHandle` function
which wraps NT syscalls and strives to emulate (currently only
partially) the `kernel32.GetFinalPathNameByHandleW` function.
---
lib/std/os.zig | 10 +---
lib/std/os/windows.zig | 88 +++++++++++++++++++++++++++++-------
lib/std/os/windows/bits.zig | 26 +++++++++++
lib/std/os/windows/ntdll.zig | 34 ++++++++++++++
4 files changed, 133 insertions(+), 25 deletions(-)
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 06b61e8c38..88ce77dc56 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -4060,7 +4060,6 @@ pub fn realpathZ(pathname: [*:0]const u8, out_buffer: *[MAX_PATH_BYTES]u8) RealP
}
/// Same as `realpath` except `pathname` is UTF16LE-encoded.
-/// TODO use ntdll to emulate `GetFinalPathNameByHandleW` routine
pub fn realpathW(pathname: []const u16, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
const w = windows;
@@ -4095,15 +4094,10 @@ pub fn realpathW(pathname: []const u16, out_buffer: *[MAX_PATH_BYTES]u8) RealPat
defer w.CloseHandle(h_file);
var wide_buf: [w.PATH_MAX_WIDE]u16 = undefined;
- const wide_slice = try w.GetFinalPathNameByHandleW(h_file, &wide_buf, wide_buf.len, w.VOLUME_NAME_DOS);
-
- // Windows returns \\?\ prepended to the path.
- // We strip it to make this function consistent across platforms.
- const prefix = [_]u16{ '\\', '\\', '?', '\\' };
- const start_index = if (mem.startsWith(u16, wide_slice, &prefix)) prefix.len else 0;
+ const wide_slice = try w.GetFinalPathNameByHandle(h_file, wide_buf[0..]);
// Trust that Windows gives us valid UTF-16LE.
- const end_index = std.unicode.utf16leToUtf8(out_buffer, wide_slice[start_index..]) catch unreachable;
+ const end_index = std.unicode.utf16leToUtf8(out_buffer, wide_slice) catch unreachable;
return out_buffer[0..end_index];
}
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index 2b3dc29b04..d803c8ae83 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -903,24 +903,78 @@ pub const GetFinalPathNameByHandleError = error{
Unexpected,
};
-pub fn GetFinalPathNameByHandleW(
- hFile: HANDLE,
- buf_ptr: [*]u16,
- buf_len: DWORD,
- flags: DWORD,
-) GetFinalPathNameByHandleError![:0]u16 {
- const rc = kernel32.GetFinalPathNameByHandleW(hFile, buf_ptr, buf_len, flags);
- if (rc == 0) {
- switch (kernel32.GetLastError()) {
- .FILE_NOT_FOUND => return error.FileNotFound,
- .PATH_NOT_FOUND => return error.FileNotFound,
- .NOT_ENOUGH_MEMORY => return error.SystemResources,
- .FILENAME_EXCED_RANGE => return error.NameTooLong,
- .INVALID_PARAMETER => unreachable,
- else => |err| return unexpectedError(err),
- }
+/// Returns canonical (normalized) path of handle. The output path assumes
+/// Win32 namespace, however, '\\?\' prefix is *not* prepended to the result.
+/// TODO support other namespaces/volume names.
+pub fn GetFinalPathNameByHandle(hFile: HANDLE, out_buffer: []u16) GetFinalPathNameByHandleError![]u16 {
+ // The implementation is based on implementation found in Wine sources:
+ // [LINK]
+ var buffer: [@sizeOf(OBJECT_NAME_INFORMATION) + MAX_PATH * 2]u8 = undefined;
+ var dummy: ULONG = undefined;
+ var rc = ntdll.NtQueryObject(hFile, OBJECT_INFORMATION_CLASS.ObjectNameInformation, &buffer, buffer.len, &dummy);
+ switch (rc) {
+ .SUCCESS => {},
+ else => return unexpectedStatus(rc),
}
- return buf_ptr[0..rc :0];
+
+ const object_name = @ptrCast(*const OBJECT_NAME_INFORMATION, @alignCast(@alignOf(OBJECT_NAME_INFORMATION), &buffer));
+ const object_path = @as([*]const u16, object_name.Name.Buffer)[0..object_name.Name.Length / 2];
+
+ // Since `NtQueryObject` returns a fully-qualified NT path, we need to translate
+ // the result into a Win32/DOS path (e.g., \Device\HarddiskVolume4\foo would become
+ // C:\foo).
+ const dos_drive_letters = &[_]u16{ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z' };
+ var query_path = [_]u16{ '\\', 'D', 'o', 's', 'D', 'e', 'v', 'i', 'c', 'e', 's', '\\', 'C', ':' };
+ for (dos_drive_letters) |drive_letter| {
+ const drive = &[_]u16{ drive_letter, ':' };
+ std.mem.copy(u16, query_path[12..], drive[0..]);
+
+ var sym_handle: HANDLE = undefined;
+ const len_bytes = @intCast(u16, query_path.len) * 2;
+ var nt_name = UNICODE_STRING{
+ .Length = len_bytes,
+ .MaximumLength = len_bytes,
+ .Buffer = @intToPtr([*]u16, @ptrToInt(&query_path)),
+ };
+ var attr = OBJECT_ATTRIBUTES{
+ .Length = @sizeOf(OBJECT_ATTRIBUTES),
+ .RootDirectory = null,
+ .Attributes = 0,
+ .ObjectName = &nt_name,
+ .SecurityDescriptor = null,
+ .SecurityQualityOfService = null,
+ };
+ rc = ntdll.NtOpenSymbolicLinkObject(&sym_handle, SYMBOLIC_LINK_QUERY, attr);
+ switch (rc) {
+ .SUCCESS => {},
+ .OBJECT_NAME_NOT_FOUND => continue,
+ else => return unexpectedStatus(rc),
+ }
+
+ var link_buffer: [MAX_PATH]u8 = undefined;
+ var link = UNICODE_STRING{
+ .Length = 0,
+ .MaximumLength = MAX_PATH,
+ .Buffer = @intToPtr([*]u16, @ptrToInt(&link_buffer[0])),
+ };
+ rc = ntdll.NtQuerySymbolicLinkObject(sym_handle, &link, null);
+ CloseHandle(sym_handle);
+ switch (rc) {
+ .SUCCESS => {},
+ else => return unexpectedStatus(rc),
+ }
+
+ const link_path = @as([*]const u16, link.Buffer)[0..link.Length / 2];
+ const idx = std.mem.indexOf(u16, object_path, link_path) orelse continue;
+
+ std.mem.copy(u16, out_buffer[0..], drive[0..]);
+ std.mem.copy(u16, out_buffer[2..], object_path[link_path.len..]);
+
+ return out_buffer[0..object_path.len - link_path.len + 2];
+ }
+
+ // If we're here, that means there was no match so error out!
+ unreachable;
}
pub const GetFileSizeError = error{Unexpected};
diff --git a/lib/std/os/windows/bits.zig b/lib/std/os/windows/bits.zig
index 9f50570e3e..74fe7040ab 100644
--- a/lib/std/os/windows/bits.zig
+++ b/lib/std/os/windows/bits.zig
@@ -1573,3 +1573,29 @@ pub const SYMLINK_FLAG_RELATIVE: ULONG = 0x1;
pub const SYMBOLIC_LINK_FLAG_DIRECTORY: DWORD = 0x1;
pub const SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE: DWORD = 0x2;
+
+pub const OBJECT_INFORMATION_CLASS = extern enum {
+ ObjectBasicInformation,
+ ObjectNameInformation,
+ ObjectTypeInformation,
+ ObjectAllInformation,
+ ObjectDataInformation,
+};
+pub const OBJECT_NAME_INFORMATION = extern struct {
+ Name: UNICODE_STRING,
+};
+
+pub const DIRECTORY_QUERY: DWORD = 0x0001;
+pub const DIRECTORY_TRAVERSE: DWORD = 0x0002;
+pub const DIRECTORY_CREATE_OBJECT: DWORD = 0x0004;
+pub const DIRECTORY_CREATE_SUBDIRECTORY: DWORD = 0x0008;
+pub const DIRECTORY_ALL_ACCESS: DWORD = STANDARD_RIGHTS_REQUIRED | 0xF;
+
+pub const OBJDIR_INFORMATION = extern struct {
+ ObjectName: UNICODE_STRING,
+ ObjectTypeName: UNICODE_STRING,
+ Data: [1]BYTE,
+};
+
+pub const SYMBOLIC_LINK_QUERY: DWORD = 0x0001;
+pub const SYMBOLIC_LINK_ALL_ACCESS: DWORD = STANDARD_RIGHTS_REQUIRED | 0x1;
\ No newline at end of file
diff --git a/lib/std/os/windows/ntdll.zig b/lib/std/os/windows/ntdll.zig
index 5edad85c20..c41e613abb 100644
--- a/lib/std/os/windows/ntdll.zig
+++ b/lib/std/os/windows/ntdll.zig
@@ -106,3 +106,37 @@ pub extern "NtDll" fn NtWaitForKeyedEvent(
Alertable: BOOLEAN,
Timeout: ?*LARGE_INTEGER,
) callconv(.Stdcall) NTSTATUS;
+
+pub extern "NtDll" fn NtQueryObject(
+ Handle: HANDLE,
+ ObjectInformationClass: OBJECT_INFORMATION_CLASS,
+ ObjectInformation: *c_void,
+ ObjectInformationLength: ULONG,
+ ReturnLength: *ULONG,
+) callconv(.Stdcall) NTSTATUS;
+
+pub extern "NtDll" fn NtOpenSymbolicLinkObject(
+ pHandle: *HANDLE,
+ DesiredAccess: DWORD,
+ ObjectAttributes: OBJECT_ATTRIBUTES,
+) callconv(.Stdcall) NTSTATUS;
+pub extern "NtDll" fn NtQuerySymbolicLinkObject(
+ SymbolicLinkHandle: HANDLE,
+ pLinkName: *UNICODE_STRING,
+ pDataWritten: ?*ULONG,
+) callconv(.Stdcall) NTSTATUS;
+
+pub extern "NtDll" fn NtOpenDirectoryObject(
+ DirectoryObjectHandle: *HANDLE,
+ DesiredAccess: DWORD,
+ ObjectAttributes: OBJECT_ATTRIBUTES,
+) callconv(.Stdcall) NTSTATUS;
+pub extern "NtDll" fn NtQueryDirectoryObject(
+ DirectoryHandle: HANDLE,
+ Buffer: ?*c_void,
+ Length: ULONG,
+ ReturnSingleEntry: BOOLEAN,
+ RestartScan: BOOLEAN,
+ Context: *ULONG,
+ ReturnLength: *ULONG,
+) callconv(.Stdcall) NTSTATUS;
\ No newline at end of file
From 2628a8846e429ba7f51927985733f8e33f3b6a65 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Wed, 5 Aug 2020 18:34:44 +0200
Subject: [PATCH 009/153] Use NtQueryInformationFile unless unavailable
Favour newer API which uses `NtQueryInformationFile` with class flags
`FileNormalizedNameInformation` and `FileVolumeNameInformation`
instead of lower-level `NtQueryObject`. `NtQueryObject` is still
used as a fallback in case the former are unavailable.
---
lib/std/os/windows.zig | 70 ++++++++++++++++++++++++++++++++++--------
1 file changed, 57 insertions(+), 13 deletions(-)
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index d803c8ae83..d3d3c4f210 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -907,18 +907,61 @@ pub const GetFinalPathNameByHandleError = error{
/// Win32 namespace, however, '\\?\' prefix is *not* prepended to the result.
/// TODO support other namespaces/volume names.
pub fn GetFinalPathNameByHandle(hFile: HANDLE, out_buffer: []u16) GetFinalPathNameByHandleError![]u16 {
- // The implementation is based on implementation found in Wine sources:
- // [LINK]
- var buffer: [@sizeOf(OBJECT_NAME_INFORMATION) + MAX_PATH * 2]u8 = undefined;
- var dummy: ULONG = undefined;
- var rc = ntdll.NtQueryObject(hFile, OBJECT_INFORMATION_CLASS.ObjectNameInformation, &buffer, buffer.len, &dummy);
- switch (rc) {
- .SUCCESS => {},
- else => return unexpectedStatus(rc),
- }
+ // The implementation is based on implementation found in Wine sources, however,
+ // we make the following tweaks. First of all, if `NtQueryInformationFile` supports
+ // `FILE_INFORMATION_CLASS.FileNormalizedNameInformation` and `FileVolumeNameInformation`,
+ // we use those two calls to generate a valid Win32/DOS path. Otherwise, we fallback to
+ // more widely supported `NtQueryObject` generic routine.
+ // Wine source: https://source.winehq.com/git/wine.git/blob/HEAD:/dlls/kernelbase/file.c#l1708
+ var buffer: [PATH_MAX_WIDE]u16 = undefined;
+ const object_path = blk: {
+ var path_buffer: [PATH_MAX_WIDE * 2]u8 = undefined;
+ var io: IO_STATUS_BLOCK = undefined;
+ var rc = ntdll.NtQueryInformationFile(hFile, &io, &path_buffer, path_buffer.len, FILE_INFORMATION_CLASS.FileNormalizedNameInformation);
+ switch (rc) {
+ .SUCCESS => {
+ var nt_volume_buffer: [MAX_PATH]u8 = undefined;
+ rc = ntdll.NtQueryInformationFile(hFile, &io, &nt_volume_buffer, nt_volume_buffer.len, FILE_INFORMATION_CLASS.FileVolumeNameInformation);
+ switch (rc) {
+ .SUCCESS => {
+ const file_name = @ptrCast(*const FILE_NAME_INFORMATION, @alignCast(@alignOf(FILE_NAME_INFORMATION), &path_buffer[0]));
+ const file_name_u16 = @ptrCast([*]const u16, &file_name.FileName[0])[0..file_name.FileNameLength / 2];
- const object_name = @ptrCast(*const OBJECT_NAME_INFORMATION, @alignCast(@alignOf(OBJECT_NAME_INFORMATION), &buffer));
- const object_path = @as([*]const u16, object_name.Name.Buffer)[0..object_name.Name.Length / 2];
+ if (file_name_u16.len > PATH_MAX_WIDE) return error.NameTooLong;
+
+ const volume_name = @ptrCast(*const FILE_NAME_INFORMATION, @alignCast(@alignOf(FILE_NAME_INFORMATION), &nt_volume_buffer[0]));
+ const volume_name_u16 = @ptrCast([*]const u16, &volume_name.FileName[0])[0..volume_name.FileNameLength / 2];
+
+ std.mem.copy(u16, buffer[0..], volume_name_u16);
+ std.mem.copy(u16, buffer[volume_name_u16.len..], file_name_u16);
+
+ break :blk buffer[0..volume_name_u16.len + file_name_u16.len];
+ },
+ .INVALID_PARAMETER => {}, // fall through
+ else => return unexpectedStatus(rc),
+ }
+ },
+ .INVALID_PARAMETER => {}, // fall through
+ else => return unexpectedStatus(rc),
+ }
+
+ var dummy: ULONG = undefined;
+ rc = ntdll.NtQueryObject(hFile, OBJECT_INFORMATION_CLASS.ObjectNameInformation, &path_buffer, path_buffer.len, &dummy);
+ switch (rc) {
+ .SUCCESS => {},
+ else => return unexpectedStatus(rc),
+ }
+
+ const object_name = @ptrCast(*const OBJECT_NAME_INFORMATION, @alignCast(@alignOf(OBJECT_NAME_INFORMATION), &buffer));
+
+ // Apparently, `NtQueryObject` has a bug when signalling an error condition.
+ // To check for error, i.e., FileNotFound, we check if the result Buffer is non null
+ // and Length is greater than zero.
+ // Source: https://stackoverflow.com/questions/65170/how-to-get-name-associated-with-open-handle
+ if (object_name.Name.Length == 0) return error.FileNotFound;
+
+ break :blk @as([*]const u16, object_name.Name.Buffer)[0..object_name.Name.Length / 2];
+ };
// Since `NtQueryObject` returns a fully-qualified NT path, we need to translate
// the result into a Win32/DOS path (e.g., \Device\HarddiskVolume4\foo would become
@@ -927,7 +970,7 @@ pub fn GetFinalPathNameByHandle(hFile: HANDLE, out_buffer: []u16) GetFinalPathNa
var query_path = [_]u16{ '\\', 'D', 'o', 's', 'D', 'e', 'v', 'i', 'c', 'e', 's', '\\', 'C', ':' };
for (dos_drive_letters) |drive_letter| {
const drive = &[_]u16{ drive_letter, ':' };
- std.mem.copy(u16, query_path[12..], drive[0..]);
+ std.mem.copy(u16, query_path[query_path.len - 2..], drive[0..]);
var sym_handle: HANDLE = undefined;
const len_bytes = @intCast(u16, query_path.len) * 2;
@@ -944,7 +987,7 @@ pub fn GetFinalPathNameByHandle(hFile: HANDLE, out_buffer: []u16) GetFinalPathNa
.SecurityDescriptor = null,
.SecurityQualityOfService = null,
};
- rc = ntdll.NtOpenSymbolicLinkObject(&sym_handle, SYMBOLIC_LINK_QUERY, attr);
+ var rc = ntdll.NtOpenSymbolicLinkObject(&sym_handle, SYMBOLIC_LINK_QUERY, attr);
switch (rc) {
.SUCCESS => {},
.OBJECT_NAME_NOT_FOUND => continue,
@@ -967,6 +1010,7 @@ pub fn GetFinalPathNameByHandle(hFile: HANDLE, out_buffer: []u16) GetFinalPathNa
const link_path = @as([*]const u16, link.Buffer)[0..link.Length / 2];
const idx = std.mem.indexOf(u16, object_path, link_path) orelse continue;
+ // TODO check the provided buffer is actually big enough.
std.mem.copy(u16, out_buffer[0..], drive[0..]);
std.mem.copy(u16, out_buffer[2..], object_path[link_path.len..]);
From e8abfef2aa8579121fbd13b8df84e8f68b533fa4 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Wed, 5 Aug 2020 19:10:10 +0200
Subject: [PATCH 010/153] Add docs
---
lib/std/os/windows.zig | 24 ++++++++++++++++++------
1 file changed, 18 insertions(+), 6 deletions(-)
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index d3d3c4f210..03dbcdbd02 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -898,7 +898,6 @@ pub fn SetFilePointerEx_CURRENT_get(handle: HANDLE) SetFilePointerError!u64 {
pub const GetFinalPathNameByHandleError = error{
FileNotFound,
- SystemResources,
NameTooLong,
Unexpected,
};
@@ -963,9 +962,19 @@ pub fn GetFinalPathNameByHandle(hFile: HANDLE, out_buffer: []u16) GetFinalPathNa
break :blk @as([*]const u16, object_name.Name.Buffer)[0..object_name.Name.Length / 2];
};
- // Since `NtQueryObject` returns a fully-qualified NT path, we need to translate
- // the result into a Win32/DOS path (e.g., \Device\HarddiskVolume4\foo would become
- // C:\foo).
+ // By now, we got a a fully-qualified NT path, which we need to translate
+ // into a Win32/DOS path, for instance:
+ // \Device\HarddiskVolume4\foo => C:\foo
+ //
+ // NOTE:
+ // I couldn't figure out a better way of doing this unfortunately...
+ // This snippet below is in part based around `QueryDosDeviceW` implementation
+ // found in Wine. The trick with `NtQueryDirectoryObject` for some reason
+ // only lists `\DosDevices\Global` as the only SymblinkObject available, which
+ // means it's not possible to query the kernel for all available DOS volume name
+ // symlinks.
+ // TODO investigate!
+ // Wine source: https://source.winehq.com/git/wine.git/blob/HEAD:/dlls/kernelbase/volume.c#l1009
const dos_drive_letters = &[_]u16{ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z' };
var query_path = [_]u16{ '\\', 'D', 'o', 's', 'D', 'e', 'v', 'i', 'c', 'e', 's', '\\', 'C', ':' };
for (dos_drive_letters) |drive_letter| {
@@ -1010,14 +1019,17 @@ pub fn GetFinalPathNameByHandle(hFile: HANDLE, out_buffer: []u16) GetFinalPathNa
const link_path = @as([*]const u16, link.Buffer)[0..link.Length / 2];
const idx = std.mem.indexOf(u16, object_path, link_path) orelse continue;
- // TODO check the provided buffer is actually big enough.
+ // TODO is this the most appropriate error here?
+ if (out_buffer.len < drive.len + object_path.len) return error.NameTooLong;
+
std.mem.copy(u16, out_buffer[0..], drive[0..]);
std.mem.copy(u16, out_buffer[2..], object_path[link_path.len..]);
return out_buffer[0..object_path.len - link_path.len + 2];
}
- // If we're here, that means there was no match so error out!
+ // If we're here, that means there was no match so we panic!
+ // TODO should we actually panic here or return an error instead?
unreachable;
}
From bdda8fa7a818c90803fb64f9784040bf3b9f0a5e Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Thu, 6 Aug 2020 18:51:17 +0200
Subject: [PATCH 011/153] Redo GetFinalPathNameByHandle using DeviceIoControl
This commit reimagines `std.os.windows.GetFinalPathNameByHandle`
using `DeviceIoControl` to query the OS mount manager for the DOS
(symlink) paths for the given NT volume name. In particular,
it uses `IOCTL_MOUNTMGR_QUERY_POINTS` ioctl opcode to query the
manager for the available moount points.
---
lib/std/os.zig | 2 +-
lib/std/os/windows.zig | 259 ++++++++++++++++++-----------------
lib/std/os/windows/bits.zig | 38 ++---
lib/std/os/windows/ntdll.zig | 34 -----
4 files changed, 149 insertions(+), 184 deletions(-)
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 88ce77dc56..04c2340cad 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -4094,7 +4094,7 @@ pub fn realpathW(pathname: []const u16, out_buffer: *[MAX_PATH_BYTES]u8) RealPat
defer w.CloseHandle(h_file);
var wide_buf: [w.PATH_MAX_WIDE]u16 = undefined;
- const wide_slice = try w.GetFinalPathNameByHandle(h_file, wide_buf[0..]);
+ const wide_slice = try w.GetFinalPathNameByHandle(h_file, .{}, wide_buf[0..]);
// Trust that Windows gives us valid UTF-16LE.
const end_index = std.unicode.utf16leToUtf8(out_buffer, wide_slice) catch unreachable;
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index 03dbcdbd02..c45f1c1fd8 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -902,135 +902,142 @@ pub const GetFinalPathNameByHandleError = error{
Unexpected,
};
-/// Returns canonical (normalized) path of handle. The output path assumes
-/// Win32 namespace, however, '\\?\' prefix is *not* prepended to the result.
-/// TODO support other namespaces/volume names.
-pub fn GetFinalPathNameByHandle(hFile: HANDLE, out_buffer: []u16) GetFinalPathNameByHandleError![]u16 {
- // The implementation is based on implementation found in Wine sources, however,
- // we make the following tweaks. First of all, if `NtQueryInformationFile` supports
- // `FILE_INFORMATION_CLASS.FileNormalizedNameInformation` and `FileVolumeNameInformation`,
- // we use those two calls to generate a valid Win32/DOS path. Otherwise, we fallback to
- // more widely supported `NtQueryObject` generic routine.
- // Wine source: https://source.winehq.com/git/wine.git/blob/HEAD:/dlls/kernelbase/file.c#l1708
- var buffer: [PATH_MAX_WIDE]u16 = undefined;
- const object_path = blk: {
- var path_buffer: [PATH_MAX_WIDE * 2]u8 = undefined;
- var io: IO_STATUS_BLOCK = undefined;
- var rc = ntdll.NtQueryInformationFile(hFile, &io, &path_buffer, path_buffer.len, FILE_INFORMATION_CLASS.FileNormalizedNameInformation);
- switch (rc) {
- .SUCCESS => {
- var nt_volume_buffer: [MAX_PATH]u8 = undefined;
- rc = ntdll.NtQueryInformationFile(hFile, &io, &nt_volume_buffer, nt_volume_buffer.len, FILE_INFORMATION_CLASS.FileVolumeNameInformation);
- switch (rc) {
- .SUCCESS => {
- const file_name = @ptrCast(*const FILE_NAME_INFORMATION, @alignCast(@alignOf(FILE_NAME_INFORMATION), &path_buffer[0]));
- const file_name_u16 = @ptrCast([*]const u16, &file_name.FileName[0])[0..file_name.FileNameLength / 2];
+/// Specifies how to format volume path in the result of `GetFinalPathNameByHandle`.
+/// Defaults to DOS volume names.
+pub const GetFinalPathNameByHandleFormat = struct {
+ volume_name: enum {
+ /// Format as DOS volume name
+ Dos,
+ /// Format as NT volume name
+ Nt,
+ } = .Dos,
+};
- if (file_name_u16.len > PATH_MAX_WIDE) return error.NameTooLong;
-
- const volume_name = @ptrCast(*const FILE_NAME_INFORMATION, @alignCast(@alignOf(FILE_NAME_INFORMATION), &nt_volume_buffer[0]));
- const volume_name_u16 = @ptrCast([*]const u16, &volume_name.FileName[0])[0..volume_name.FileNameLength / 2];
-
- std.mem.copy(u16, buffer[0..], volume_name_u16);
- std.mem.copy(u16, buffer[volume_name_u16.len..], file_name_u16);
-
- break :blk buffer[0..volume_name_u16.len + file_name_u16.len];
- },
- .INVALID_PARAMETER => {}, // fall through
- else => return unexpectedStatus(rc),
- }
- },
- .INVALID_PARAMETER => {}, // fall through
- else => return unexpectedStatus(rc),
- }
-
- var dummy: ULONG = undefined;
- rc = ntdll.NtQueryObject(hFile, OBJECT_INFORMATION_CLASS.ObjectNameInformation, &path_buffer, path_buffer.len, &dummy);
- switch (rc) {
- .SUCCESS => {},
- else => return unexpectedStatus(rc),
- }
-
- const object_name = @ptrCast(*const OBJECT_NAME_INFORMATION, @alignCast(@alignOf(OBJECT_NAME_INFORMATION), &buffer));
-
- // Apparently, `NtQueryObject` has a bug when signalling an error condition.
- // To check for error, i.e., FileNotFound, we check if the result Buffer is non null
- // and Length is greater than zero.
- // Source: https://stackoverflow.com/questions/65170/how-to-get-name-associated-with-open-handle
- if (object_name.Name.Length == 0) return error.FileNotFound;
-
- break :blk @as([*]const u16, object_name.Name.Buffer)[0..object_name.Name.Length / 2];
- };
-
- // By now, we got a a fully-qualified NT path, which we need to translate
- // into a Win32/DOS path, for instance:
- // \Device\HarddiskVolume4\foo => C:\foo
- //
- // NOTE:
- // I couldn't figure out a better way of doing this unfortunately...
- // This snippet below is in part based around `QueryDosDeviceW` implementation
- // found in Wine. The trick with `NtQueryDirectoryObject` for some reason
- // only lists `\DosDevices\Global` as the only SymblinkObject available, which
- // means it's not possible to query the kernel for all available DOS volume name
- // symlinks.
- // TODO investigate!
- // Wine source: https://source.winehq.com/git/wine.git/blob/HEAD:/dlls/kernelbase/volume.c#l1009
- const dos_drive_letters = &[_]u16{ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z' };
- var query_path = [_]u16{ '\\', 'D', 'o', 's', 'D', 'e', 'v', 'i', 'c', 'e', 's', '\\', 'C', ':' };
- for (dos_drive_letters) |drive_letter| {
- const drive = &[_]u16{ drive_letter, ':' };
- std.mem.copy(u16, query_path[query_path.len - 2..], drive[0..]);
-
- var sym_handle: HANDLE = undefined;
- const len_bytes = @intCast(u16, query_path.len) * 2;
- var nt_name = UNICODE_STRING{
- .Length = len_bytes,
- .MaximumLength = len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(&query_path)),
- };
- var attr = OBJECT_ATTRIBUTES{
- .Length = @sizeOf(OBJECT_ATTRIBUTES),
- .RootDirectory = null,
- .Attributes = 0,
- .ObjectName = &nt_name,
- .SecurityDescriptor = null,
- .SecurityQualityOfService = null,
- };
- var rc = ntdll.NtOpenSymbolicLinkObject(&sym_handle, SYMBOLIC_LINK_QUERY, attr);
- switch (rc) {
- .SUCCESS => {},
- .OBJECT_NAME_NOT_FOUND => continue,
- else => return unexpectedStatus(rc),
- }
-
- var link_buffer: [MAX_PATH]u8 = undefined;
- var link = UNICODE_STRING{
- .Length = 0,
- .MaximumLength = MAX_PATH,
- .Buffer = @intToPtr([*]u16, @ptrToInt(&link_buffer[0])),
- };
- rc = ntdll.NtQuerySymbolicLinkObject(sym_handle, &link, null);
- CloseHandle(sym_handle);
- switch (rc) {
- .SUCCESS => {},
- else => return unexpectedStatus(rc),
- }
-
- const link_path = @as([*]const u16, link.Buffer)[0..link.Length / 2];
- const idx = std.mem.indexOf(u16, object_path, link_path) orelse continue;
-
- // TODO is this the most appropriate error here?
- if (out_buffer.len < drive.len + object_path.len) return error.NameTooLong;
-
- std.mem.copy(u16, out_buffer[0..], drive[0..]);
- std.mem.copy(u16, out_buffer[2..], object_path[link_path.len..]);
-
- return out_buffer[0..object_path.len - link_path.len + 2];
+/// Returns canonical (normalized) path of handle.
+/// Use `GetFinalPathNameByHandleFormat` to specify whether the path is meant to include
+/// NT or DOS volume name (e.g., `\Device\HarddiskVolume0\foo.txt` versus `C:\foo.txt`).
+/// If DOS volume name format is selected, note that this function does *not* prepend
+/// `\\?\` prefix to the resultant path.
+pub fn GetFinalPathNameByHandle(
+ hFile: HANDLE,
+ fmt: GetFinalPathNameByHandleFormat,
+ out_buffer: []u16,
+) GetFinalPathNameByHandleError![]u16 {
+ // Get normalized path; doesn't include volume name though.
+ var path_buffer: [@sizeOf(FILE_NAME_INFORMATION) + PATH_MAX_WIDE * 2 + 2]u8 = undefined;
+ var io: IO_STATUS_BLOCK = undefined;
+ var rc = ntdll.NtQueryInformationFile(hFile, &io, &path_buffer, path_buffer.len, FILE_INFORMATION_CLASS.FileNormalizedNameInformation);
+ switch (rc) {
+ .SUCCESS => {},
+ .INVALID_PARAMETER => unreachable,
+ else => return unexpectedStatus(rc),
}
- // If we're here, that means there was no match so we panic!
- // TODO should we actually panic here or return an error instead?
- unreachable;
+ // Get NT volume name.
+ var volume_buffer: [MAX_PATH]u8 = undefined; // MAX_PATH bytes should be enough since it's Windows-defined name
+ rc = ntdll.NtQueryInformationFile(hFile, &io, &volume_buffer, volume_buffer.len, FILE_INFORMATION_CLASS.FileVolumeNameInformation);
+ switch (rc) {
+ .SUCCESS => {},
+ .INVALID_PARAMETER => unreachable,
+ else => return unexpectedStatus(rc),
+ }
+
+ const file_name = @ptrCast(*const FILE_NAME_INFORMATION, @alignCast(@alignOf(FILE_NAME_INFORMATION), &path_buffer[0]));
+ const file_name_u16 = @ptrCast([*]const u16, &file_name.FileName[0])[0 .. file_name.FileNameLength / 2];
+
+ const volume_name = @ptrCast(*const FILE_NAME_INFORMATION, @alignCast(@alignOf(FILE_NAME_INFORMATION), &volume_buffer[0]));
+
+ switch (fmt.volume_name) {
+ .Nt => {
+ // Nothing to do, we simply copy the bytes to the user-provided buffer.
+ const volume_name_u16 = @ptrCast([*]const u16, &volume_name.FileName[0])[0 .. volume_name.FileNameLength / 2];
+
+ if (out_buffer.len < volume_name_u16.len + file_name_u16.len) return error.NameTooLong;
+
+ std.mem.copy(u16, out_buffer[0..], volume_name_u16);
+ std.mem.copy(u16, out_buffer[volume_name_u16.len..], file_name_u16);
+
+ return out_buffer[0 .. volume_name_u16.len + file_name_u16.len];
+ },
+ .Dos => {
+ // Get DOS volume name. DOS volume names are actually symbolic link objects to the
+ // actual NT volume. For example:
+ // (NT) \Device\HarddiskVolume4 => (DOS) \DosDevices\C: == (DOS) C:
+ const MIN_SIZE = @sizeOf(MOUNTMGR_MOUNT_POINT) + MAX_PATH;
+ // We initialize the input buffer to all zeros for convenience since
+ // `DeviceIoControl` with `IOCTL_MOUNTMGR_QUERY_POINTS` expects this.
+ var input_buf = [_]u8{0} ** MIN_SIZE;
+ var output_buf: [MIN_SIZE * 4]u8 = undefined;
+
+ // This surprising path is a filesystem path to the mount manager on Windows.
+ // Source: https://stackoverflow.com/questions/3012828/using-ioctl-mountmgr-query-points
+ const mgmt_path = "\\MountPointManager";
+ const mgmt_path_u16 = sliceToPrefixedFileW(mgmt_path) catch unreachable;
+ const mgmt_handle = OpenFile(mgmt_path_u16.span(), .{
+ .access_mask = SYNCHRONIZE,
+ .share_access = FILE_SHARE_READ | FILE_SHARE_WRITE,
+ .creation = FILE_OPEN,
+ .io_mode = .blocking,
+ }) catch |err| switch (err) {
+ error.IsDir => unreachable,
+ error.NotDir => unreachable,
+ error.NoDevice => unreachable,
+ error.AccessDenied => unreachable,
+ error.PipeBusy => unreachable,
+ error.PathAlreadyExists => unreachable,
+ error.WouldBlock => unreachable,
+ else => |e| return e,
+ };
+ defer CloseHandle(mgmt_handle);
+
+ var input_struct = @ptrCast(*MOUNTMGR_MOUNT_POINT, @alignCast(@alignOf(MOUNTMGR_MOUNT_POINT), &input_buf[0]));
+ input_struct.DeviceNameOffset = @sizeOf(MOUNTMGR_MOUNT_POINT);
+ input_struct.DeviceNameLength = @intCast(USHORT, volume_name.FileNameLength);
+ @memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..], @ptrCast([*]const u8, &volume_name.FileName[0]), volume_name.FileNameLength);
+
+ try DeviceIoControl(
+ mgmt_handle,
+ IOCTL_MOUNTMGR_QUERY_POINTS,
+ input_buf[0 .. @sizeOf(MOUNTMGR_MOUNT_POINT) + volume_name.FileNameLength],
+ output_buf[0..],
+ );
+ const mount_points_struct = @ptrCast(*const MOUNTMGR_MOUNT_POINTS, @alignCast(@alignOf(MOUNTMGR_MOUNT_POINTS), &output_buf[0]));
+
+ const mount_points = @ptrCast(
+ [*]const MOUNTMGR_MOUNT_POINT,
+ @alignCast(@alignOf(MOUNTMGR_MOUNT_POINT), &mount_points_struct.MountPoints[0]),
+ )[0..mount_points_struct.NumberOfMountPoints];
+
+ var found: bool = false;
+ for (mount_points) |mount_point| {
+ const symlink = @ptrCast(
+ [*]const u16,
+ @alignCast(@alignOf(u16), &output_buf[mount_point.SymbolicLinkNameOffset]),
+ )[0 .. mount_point.SymbolicLinkNameLength / 2];
+
+ // Look for `\DosDevices\` prefix. We don't really care if there are more than one symlinks
+ // with traditional DOS drive letters, so pick the first one available.
+ const prefix = &[_]u16{ '\\', 'D', 'o', 's', 'D', 'e', 'v', 'i', 'c', 'e', 's', '\\' };
+
+ if (std.mem.indexOf(u16, symlink, prefix)) |idx| {
+ if (idx != 0) continue;
+
+ const drive_letter = symlink[prefix.len..];
+
+ if (out_buffer.len < drive_letter.len + file_name_u16.len) return error.NameTooLong;
+
+ std.mem.copy(u16, out_buffer[0..], drive_letter);
+ std.mem.copy(u16, out_buffer[drive_letter.len..], file_name_u16);
+
+ return out_buffer[0 .. drive_letter.len + file_name_u16.len];
+ }
+ }
+
+ // If we've ended up here, then something went wrong/is corrupted in the OS,
+ // so error out!
+ return error.FileNotFound;
+ },
+ }
}
pub const GetFileSizeError = error{Unexpected};
diff --git a/lib/std/os/windows/bits.zig b/lib/std/os/windows/bits.zig
index 74fe7040ab..516af6d4fc 100644
--- a/lib/std/os/windows/bits.zig
+++ b/lib/std/os/windows/bits.zig
@@ -1574,28 +1574,20 @@ pub const SYMLINK_FLAG_RELATIVE: ULONG = 0x1;
pub const SYMBOLIC_LINK_FLAG_DIRECTORY: DWORD = 0x1;
pub const SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE: DWORD = 0x2;
-pub const OBJECT_INFORMATION_CLASS = extern enum {
- ObjectBasicInformation,
- ObjectNameInformation,
- ObjectTypeInformation,
- ObjectAllInformation,
- ObjectDataInformation,
+pub const MOUNTMGR_MOUNT_POINT = extern struct {
+ SymbolicLinkNameOffset: ULONG,
+ SymbolicLinkNameLength: USHORT,
+ Reserved1: USHORT,
+ UniqueIdOffset: ULONG,
+ UniqueIdLength: USHORT,
+ Reserved2: USHORT,
+ DeviceNameOffset: ULONG,
+ DeviceNameLength: USHORT,
+ Reserved3: USHORT,
};
-pub const OBJECT_NAME_INFORMATION = extern struct {
- Name: UNICODE_STRING,
+pub const MOUNTMGR_MOUNT_POINTS = extern struct {
+ Size: ULONG,
+ NumberOfMountPoints: ULONG,
+ MountPoints: [1]MOUNTMGR_MOUNT_POINT,
};
-
-pub const DIRECTORY_QUERY: DWORD = 0x0001;
-pub const DIRECTORY_TRAVERSE: DWORD = 0x0002;
-pub const DIRECTORY_CREATE_OBJECT: DWORD = 0x0004;
-pub const DIRECTORY_CREATE_SUBDIRECTORY: DWORD = 0x0008;
-pub const DIRECTORY_ALL_ACCESS: DWORD = STANDARD_RIGHTS_REQUIRED | 0xF;
-
-pub const OBJDIR_INFORMATION = extern struct {
- ObjectName: UNICODE_STRING,
- ObjectTypeName: UNICODE_STRING,
- Data: [1]BYTE,
-};
-
-pub const SYMBOLIC_LINK_QUERY: DWORD = 0x0001;
-pub const SYMBOLIC_LINK_ALL_ACCESS: DWORD = STANDARD_RIGHTS_REQUIRED | 0x1;
\ No newline at end of file
+pub const IOCTL_MOUNTMGR_QUERY_POINTS: ULONG = 0x6d0008;
\ No newline at end of file
diff --git a/lib/std/os/windows/ntdll.zig b/lib/std/os/windows/ntdll.zig
index c41e613abb..5edad85c20 100644
--- a/lib/std/os/windows/ntdll.zig
+++ b/lib/std/os/windows/ntdll.zig
@@ -106,37 +106,3 @@ pub extern "NtDll" fn NtWaitForKeyedEvent(
Alertable: BOOLEAN,
Timeout: ?*LARGE_INTEGER,
) callconv(.Stdcall) NTSTATUS;
-
-pub extern "NtDll" fn NtQueryObject(
- Handle: HANDLE,
- ObjectInformationClass: OBJECT_INFORMATION_CLASS,
- ObjectInformation: *c_void,
- ObjectInformationLength: ULONG,
- ReturnLength: *ULONG,
-) callconv(.Stdcall) NTSTATUS;
-
-pub extern "NtDll" fn NtOpenSymbolicLinkObject(
- pHandle: *HANDLE,
- DesiredAccess: DWORD,
- ObjectAttributes: OBJECT_ATTRIBUTES,
-) callconv(.Stdcall) NTSTATUS;
-pub extern "NtDll" fn NtQuerySymbolicLinkObject(
- SymbolicLinkHandle: HANDLE,
- pLinkName: *UNICODE_STRING,
- pDataWritten: ?*ULONG,
-) callconv(.Stdcall) NTSTATUS;
-
-pub extern "NtDll" fn NtOpenDirectoryObject(
- DirectoryObjectHandle: *HANDLE,
- DesiredAccess: DWORD,
- ObjectAttributes: OBJECT_ATTRIBUTES,
-) callconv(.Stdcall) NTSTATUS;
-pub extern "NtDll" fn NtQueryDirectoryObject(
- DirectoryHandle: HANDLE,
- Buffer: ?*c_void,
- Length: ULONG,
- ReturnSingleEntry: BOOLEAN,
- RestartScan: BOOLEAN,
- Context: *ULONG,
- ReturnLength: *ULONG,
-) callconv(.Stdcall) NTSTATUS;
\ No newline at end of file
From 310aa87198806d695292bace8136138c0e3875ee Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Fri, 7 Aug 2020 23:21:06 +0200
Subject: [PATCH 012/153] Fix alignment issue
---
lib/std/os/windows.zig | 25 ++++++++++---------------
1 file changed, 10 insertions(+), 15 deletions(-)
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index c45f1c1fd8..544b4d7a61 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -924,7 +924,7 @@ pub fn GetFinalPathNameByHandle(
out_buffer: []u16,
) GetFinalPathNameByHandleError![]u16 {
// Get normalized path; doesn't include volume name though.
- var path_buffer: [@sizeOf(FILE_NAME_INFORMATION) + PATH_MAX_WIDE * 2 + 2]u8 = undefined;
+ var path_buffer: [PATH_MAX_WIDE * 2]u8 align(@alignOf(FILE_NAME_INFORMATION)) = undefined;
var io: IO_STATUS_BLOCK = undefined;
var rc = ntdll.NtQueryInformationFile(hFile, &io, &path_buffer, path_buffer.len, FILE_INFORMATION_CLASS.FileNormalizedNameInformation);
switch (rc) {
@@ -934,7 +934,7 @@ pub fn GetFinalPathNameByHandle(
}
// Get NT volume name.
- var volume_buffer: [MAX_PATH]u8 = undefined; // MAX_PATH bytes should be enough since it's Windows-defined name
+ var volume_buffer: [MAX_PATH]u8 align(@alignOf(FILE_NAME_INFORMATION)) = undefined; // MAX_PATH bytes should be enough since it's Windows-defined name
rc = ntdll.NtQueryInformationFile(hFile, &io, &volume_buffer, volume_buffer.len, FILE_INFORMATION_CLASS.FileVolumeNameInformation);
switch (rc) {
.SUCCESS => {},
@@ -942,10 +942,10 @@ pub fn GetFinalPathNameByHandle(
else => return unexpectedStatus(rc),
}
- const file_name = @ptrCast(*const FILE_NAME_INFORMATION, @alignCast(@alignOf(FILE_NAME_INFORMATION), &path_buffer[0]));
+ const file_name = @ptrCast(*const FILE_NAME_INFORMATION, &path_buffer[0]);
const file_name_u16 = @ptrCast([*]const u16, &file_name.FileName[0])[0 .. file_name.FileNameLength / 2];
- const volume_name = @ptrCast(*const FILE_NAME_INFORMATION, @alignCast(@alignOf(FILE_NAME_INFORMATION), &volume_buffer[0]));
+ const volume_name = @ptrCast(*const FILE_NAME_INFORMATION, &volume_buffer[0]);
switch (fmt.volume_name) {
.Nt => {
@@ -966,8 +966,8 @@ pub fn GetFinalPathNameByHandle(
const MIN_SIZE = @sizeOf(MOUNTMGR_MOUNT_POINT) + MAX_PATH;
// We initialize the input buffer to all zeros for convenience since
// `DeviceIoControl` with `IOCTL_MOUNTMGR_QUERY_POINTS` expects this.
- var input_buf = [_]u8{0} ** MIN_SIZE;
- var output_buf: [MIN_SIZE * 4]u8 = undefined;
+ var input_buf: [MIN_SIZE]u8 align(@alignOf(MOUNTMGR_MOUNT_POINT)) = [_]u8{0} ** MIN_SIZE;
+ var output_buf: [MIN_SIZE * 4]u8 align(@alignOf(MOUNTMGR_MOUNT_POINTS)) = undefined;
// This surprising path is a filesystem path to the mount manager on Windows.
// Source: https://stackoverflow.com/questions/3012828/using-ioctl-mountmgr-query-points
@@ -990,22 +990,17 @@ pub fn GetFinalPathNameByHandle(
};
defer CloseHandle(mgmt_handle);
- var input_struct = @ptrCast(*MOUNTMGR_MOUNT_POINT, @alignCast(@alignOf(MOUNTMGR_MOUNT_POINT), &input_buf[0]));
+ var input_struct = @ptrCast(*MOUNTMGR_MOUNT_POINT, &input_buf[0]);
input_struct.DeviceNameOffset = @sizeOf(MOUNTMGR_MOUNT_POINT);
input_struct.DeviceNameLength = @intCast(USHORT, volume_name.FileNameLength);
@memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..], @ptrCast([*]const u8, &volume_name.FileName[0]), volume_name.FileNameLength);
- try DeviceIoControl(
- mgmt_handle,
- IOCTL_MOUNTMGR_QUERY_POINTS,
- input_buf[0 .. @sizeOf(MOUNTMGR_MOUNT_POINT) + volume_name.FileNameLength],
- output_buf[0..],
- );
- const mount_points_struct = @ptrCast(*const MOUNTMGR_MOUNT_POINTS, @alignCast(@alignOf(MOUNTMGR_MOUNT_POINTS), &output_buf[0]));
+ try DeviceIoControl(mgmt_handle, IOCTL_MOUNTMGR_QUERY_POINTS, input_buf[0..], output_buf[0..]);
+ const mount_points_struct = @ptrCast(*const MOUNTMGR_MOUNT_POINTS, &output_buf[0]);
const mount_points = @ptrCast(
[*]const MOUNTMGR_MOUNT_POINT,
- @alignCast(@alignOf(MOUNTMGR_MOUNT_POINT), &mount_points_struct.MountPoints[0]),
+ &mount_points_struct.MountPoints[0],
)[0..mount_points_struct.NumberOfMountPoints];
var found: bool = false;
From 2fc18b52788f789ceba7b4f60e850de3ce67495c Mon Sep 17 00:00:00 2001
From: Isaac Freund
Date: Sat, 8 Aug 2020 01:18:22 +0200
Subject: [PATCH 013/153] stage2: make link data in Decl into unions
This will allow for implementation of non-Elf backends without wasting
memory.
---
src-self-hosted/Module.zig | 24 +++++++----
src-self-hosted/codegen.zig | 10 ++---
src-self-hosted/link.zig | 84 +++++++++++++++++++++----------------
3 files changed, 69 insertions(+), 49 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index bda2b3205f..e056b02b2e 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -177,14 +177,14 @@ pub const Decl = struct {
/// Represents the position of the code in the output file.
/// This is populated regardless of semantic analysis and code generation.
- link: link.File.Elf.TextBlock = link.File.Elf.TextBlock.empty,
+ link: link.File.LinkBlock,
/// Represents the function in the linked output file, if the `Decl` is a function.
/// This is stored here and not in `Fn` because `Decl` survives across updates but
/// `Fn` does not.
/// TODO Look into making `Fn` a longer lived structure and moving this field there
/// to save on memory usage.
- fn_link: link.File.Elf.SrcFn = link.File.Elf.SrcFn.empty,
+ fn_link: link.File.LinkFn,
contents_hash: std.zig.SrcHash,
@@ -1538,10 +1538,13 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
if (!srcHashEql(decl.contents_hash, contents_hash)) {
try self.markOutdatedDecl(decl);
decl.contents_hash = contents_hash;
- } else if (decl.fn_link.len != 0) {
- // TODO Look into detecting when this would be unnecessary by storing enough state
- // in `Decl` to notice that the line number did not change.
- self.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
+ } else switch (self.bin_file.tag) {
+ .elf => if (decl.fn_link.elf.len != 0) {
+ // TODO Look into detecting when this would be unnecessary by storing enough state
+ // in `Decl` to notice that the line number did not change.
+ self.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
+ },
+ .c => {},
}
}
} else {
@@ -1745,7 +1748,14 @@ fn allocateNewDecl(
.analysis = .unreferenced,
.deletion_flag = false,
.contents_hash = contents_hash,
- .link = link.File.Elf.TextBlock.empty,
+ .link = switch (self.bin_file.tag) {
+ .elf => .{ .elf = link.File.Elf.TextBlock.empty },
+ .c => .{ .c = {} },
+ },
+ .fn_link = switch (self.bin_file.tag) {
+ .elf => .{ .elf = link.File.Elf.SrcFn.empty },
+ .c => .{ .c = {} },
+ },
.generation = 0,
};
return new_decl;
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index defeca25f9..fffee883d4 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -145,10 +145,10 @@ pub fn generateSymbol(
if (typed_value.val.cast(Value.Payload.DeclRef)) |payload| {
const decl = payload.decl;
if (decl.analysis != .complete) return error.AnalysisFail;
- assert(decl.link.local_sym_index != 0);
+ assert(decl.link.elf.local_sym_index != 0);
// TODO handle the dependency of this symbol on the decl's vaddr.
// If the decl changes vaddr, then this symbol needs to get regenerated.
- const vaddr = bin_file.local_symbols.items[decl.link.local_sym_index].st_value;
+ const vaddr = bin_file.local_symbols.items[decl.link.elf.local_sym_index].st_value;
const endian = bin_file.base.options.target.cpu.arch.endian();
switch (bin_file.base.options.target.cpu.arch.ptrBitWidth()) {
16 => {
@@ -1085,7 +1085,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const got = &self.bin_file.program_headers.items[self.bin_file.phdr_got_index.?];
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.offset_table_index * ptr_bytes);
+ const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
// ff 14 25 xx xx xx xx call [addr]
try self.code.ensureCapacity(self.code.items.len + 7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
@@ -1106,7 +1106,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const got = &self.bin_file.program_headers.items[self.bin_file.phdr_got_index.?];
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.offset_table_index * ptr_bytes);
+ const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
try self.genSetReg(inst.base.src, .ra, .{ .memory = got_addr });
const jalr = instructions.Jalr{
@@ -1934,7 +1934,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (typed_value.val.cast(Value.Payload.DeclRef)) |payload| {
const got = &self.bin_file.program_headers.items[self.bin_file.phdr_got_index.?];
const decl = payload.decl;
- const got_addr = got.p_vaddr + decl.link.offset_table_index * ptr_bytes;
+ const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
}
return self.fail(src, "TODO codegen more kinds of const pointers", .{});
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 30d44fcdef..be30c1df31 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -38,6 +38,16 @@ pub const Options = struct {
pub const File = struct {
+ pub const LinkBlock = union {
+ elf: Elf.TextBlock,
+ c: void,
+ };
+
+ pub const LinkFn = union {
+ elf: Elf.SrcFn,
+ c: void,
+ };
+
tag: Tag,
options: Options,
file: ?fs.File,
@@ -1720,31 +1730,31 @@ pub const File = struct {
}
pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
- if (decl.link.local_sym_index != 0) return;
+ if (decl.link.elf.local_sym_index != 0) return;
try self.local_symbols.ensureCapacity(self.base.allocator, self.local_symbols.items.len + 1);
try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
if (self.local_symbol_free_list.popOrNull()) |i| {
log.debug(.link, "reusing symbol index {} for {}\n", .{ i, decl.name });
- decl.link.local_sym_index = i;
+ decl.link.elf.local_sym_index = i;
} else {
log.debug(.link, "allocating symbol index {} for {}\n", .{ self.local_symbols.items.len, decl.name });
- decl.link.local_sym_index = @intCast(u32, self.local_symbols.items.len);
+ decl.link.elf.local_sym_index = @intCast(u32, self.local_symbols.items.len);
_ = self.local_symbols.addOneAssumeCapacity();
}
if (self.offset_table_free_list.popOrNull()) |i| {
- decl.link.offset_table_index = i;
+ decl.link.elf.offset_table_index = i;
} else {
- decl.link.offset_table_index = @intCast(u32, self.offset_table.items.len);
+ decl.link.elf.offset_table_index = @intCast(u32, self.offset_table.items.len);
_ = self.offset_table.addOneAssumeCapacity();
self.offset_table_count_dirty = true;
}
const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- self.local_symbols.items[decl.link.local_sym_index] = .{
+ self.local_symbols.items[decl.link.elf.local_sym_index] = .{
.st_name = 0,
.st_info = 0,
.st_other = 0,
@@ -1752,39 +1762,39 @@ pub const File = struct {
.st_value = phdr.p_vaddr,
.st_size = 0,
};
- self.offset_table.items[decl.link.offset_table_index] = 0;
+ self.offset_table.items[decl.link.elf.offset_table_index] = 0;
}
pub fn freeDecl(self: *Elf, decl: *Module.Decl) void {
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
- self.freeTextBlock(&decl.link);
- if (decl.link.local_sym_index != 0) {
- self.local_symbol_free_list.append(self.base.allocator, decl.link.local_sym_index) catch {};
- self.offset_table_free_list.append(self.base.allocator, decl.link.offset_table_index) catch {};
+ self.freeTextBlock(&decl.link.elf);
+ if (decl.link.elf.local_sym_index != 0) {
+ self.local_symbol_free_list.append(self.base.allocator, decl.link.elf.local_sym_index) catch {};
+ self.offset_table_free_list.append(self.base.allocator, decl.link.elf.offset_table_index) catch {};
- self.local_symbols.items[decl.link.local_sym_index].st_info = 0;
+ self.local_symbols.items[decl.link.elf.local_sym_index].st_info = 0;
- decl.link.local_sym_index = 0;
+ decl.link.elf.local_sym_index = 0;
}
// TODO make this logic match freeTextBlock. Maybe abstract the logic out since the same thing
// is desired for both.
- _ = self.dbg_line_fn_free_list.remove(&decl.fn_link);
- if (decl.fn_link.prev) |prev| {
+ _ = self.dbg_line_fn_free_list.remove(&decl.fn_link.elf);
+ if (decl.fn_link.elf.prev) |prev| {
_ = self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {};
- prev.next = decl.fn_link.next;
- if (decl.fn_link.next) |next| {
+ prev.next = decl.fn_link.elf.next;
+ if (decl.fn_link.elf.next) |next| {
next.prev = prev;
} else {
self.dbg_line_fn_last = prev;
}
- } else if (decl.fn_link.next) |next| {
+ } else if (decl.fn_link.elf.next) |next| {
self.dbg_line_fn_first = next;
next.prev = null;
}
- if (self.dbg_line_fn_first == &decl.fn_link) {
+ if (self.dbg_line_fn_first == &decl.fn_link.elf) {
self.dbg_line_fn_first = null;
}
- if (self.dbg_line_fn_last == &decl.fn_link) {
+ if (self.dbg_line_fn_last == &decl.fn_link.elf) {
self.dbg_line_fn_last = null;
}
}
@@ -1870,24 +1880,24 @@ pub const File = struct {
const stt_bits: u8 = if (is_fn) elf.STT_FUNC else elf.STT_OBJECT;
- assert(decl.link.local_sym_index != 0); // Caller forgot to allocateDeclIndexes()
- const local_sym = &self.local_symbols.items[decl.link.local_sym_index];
+ assert(decl.link.elf.local_sym_index != 0); // Caller forgot to allocateDeclIndexes()
+ const local_sym = &self.local_symbols.items[decl.link.elf.local_sym_index];
if (local_sym.st_size != 0) {
- const capacity = decl.link.capacity(self.*);
+ const capacity = decl.link.elf.capacity(self.*);
const need_realloc = code.len > capacity or
!mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
if (need_realloc) {
- const vaddr = try self.growTextBlock(&decl.link, code.len, required_alignment);
+ const vaddr = try self.growTextBlock(&decl.link.elf, code.len, required_alignment);
log.debug(.link, "growing {} from 0x{x} to 0x{x}\n", .{ decl.name, local_sym.st_value, vaddr });
if (vaddr != local_sym.st_value) {
local_sym.st_value = vaddr;
log.debug(.link, " (writing new offset table entry)\n", .{});
- self.offset_table.items[decl.link.offset_table_index] = vaddr;
- try self.writeOffsetTableEntry(decl.link.offset_table_index);
+ self.offset_table.items[decl.link.elf.offset_table_index] = vaddr;
+ try self.writeOffsetTableEntry(decl.link.elf.offset_table_index);
}
} else if (code.len < local_sym.st_size) {
- self.shrinkTextBlock(&decl.link, code.len);
+ self.shrinkTextBlock(&decl.link.elf, code.len);
}
local_sym.st_size = code.len;
local_sym.st_name = try self.updateString(local_sym.st_name, mem.spanZ(decl.name));
@@ -1895,13 +1905,13 @@ pub const File = struct {
local_sym.st_other = 0;
local_sym.st_shndx = self.text_section_index.?;
// TODO this write could be avoided if no fields of the symbol were changed.
- try self.writeSymbol(decl.link.local_sym_index);
+ try self.writeSymbol(decl.link.elf.local_sym_index);
} else {
const decl_name = mem.spanZ(decl.name);
const name_str_index = try self.makeString(decl_name);
- const vaddr = try self.allocateTextBlock(&decl.link, code.len, required_alignment);
+ const vaddr = try self.allocateTextBlock(&decl.link.elf, code.len, required_alignment);
log.debug(.link, "allocated text block for {} at 0x{x}\n", .{ decl_name, vaddr });
- errdefer self.freeTextBlock(&decl.link);
+ errdefer self.freeTextBlock(&decl.link.elf);
local_sym.* = .{
.st_name = name_str_index,
@@ -1911,10 +1921,10 @@ pub const File = struct {
.st_value = vaddr,
.st_size = code.len,
};
- self.offset_table.items[decl.link.offset_table_index] = vaddr;
+ self.offset_table.items[decl.link.elf.offset_table_index] = vaddr;
- try self.writeSymbol(decl.link.local_sym_index);
- try self.writeOffsetTableEntry(decl.link.offset_table_index);
+ try self.writeSymbol(decl.link.elf.local_sym_index);
+ try self.writeOffsetTableEntry(decl.link.elf.offset_table_index);
}
const section_offset = local_sym.st_value - self.program_headers.items[self.phdr_load_re_index.?].p_vaddr;
@@ -1941,7 +1951,7 @@ pub const File = struct {
// Now we have the full contents and may allocate a region to store it.
const debug_line_sect = &self.sections.items[self.debug_line_section_index.?];
- const src_fn = &decl.fn_link;
+ const src_fn = &decl.fn_link.elf;
src_fn.len = @intCast(u32, dbg_line_buffer.items.len);
if (self.dbg_line_fn_last) |last| {
if (src_fn.next) |next| {
@@ -2026,8 +2036,8 @@ pub const File = struct {
try self.global_symbols.ensureCapacity(self.base.allocator, self.global_symbols.items.len + exports.len);
const typed_value = decl.typed_value.most_recent.typed_value;
- if (decl.link.local_sym_index == 0) return;
- const decl_sym = self.local_symbols.items[decl.link.local_sym_index];
+ if (decl.link.elf.local_sym_index == 0) return;
+ const decl_sym = self.local_symbols.items[decl.link.elf.local_sym_index];
for (exports) |exp| {
if (exp.options.section) |section_name| {
@@ -2105,7 +2115,7 @@ pub const File = struct {
const casted_line_off = @intCast(u28, line_delta);
const shdr = &self.sections.items[self.debug_line_section_index.?];
- const file_pos = shdr.sh_offset + decl.fn_link.off + self.getRelocDbgLineOff();
+ const file_pos = shdr.sh_offset + decl.fn_link.elf.off + self.getRelocDbgLineOff();
var data: [4]u8 = undefined;
leb128.writeUnsignedFixed(4, &data, casted_line_off);
try self.base.file.?.pwriteAll(&data, file_pos);
From ab483281d31c852e77fe0afc6a623fedd1574546 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 7 Aug 2020 11:50:15 -0700
Subject: [PATCH 014/153] stage1: elide `@intToPtr` alignment safety check for
1-byte alignment
---
src/codegen.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 3f3d80d51d..023c94f245 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -3480,8 +3480,9 @@ static LLVMValueRef ir_render_widen_or_shorten(CodeGen *g, IrExecutableGen *exec
static LLVMValueRef ir_render_int_to_ptr(CodeGen *g, IrExecutableGen *executable, IrInstGenIntToPtr *instruction) {
ZigType *wanted_type = instruction->base.value->type;
LLVMValueRef target_val = ir_llvm_value(g, instruction->target);
+ const uint32_t align_bytes = get_ptr_align(g, wanted_type);
- if (ir_want_runtime_safety(g, &instruction->base)) {
+ if (ir_want_runtime_safety(g, &instruction->base) && align_bytes > 1) {
ZigType *usize = g->builtin_types.entry_usize;
LLVMValueRef zero = LLVMConstNull(usize->llvm_type);
@@ -3498,7 +3499,6 @@ static LLVMValueRef ir_render_int_to_ptr(CodeGen *g, IrExecutableGen *executable
}
{
- const uint32_t align_bytes = get_ptr_align(g, wanted_type);
LLVMValueRef alignment_minus_1 = LLVMConstInt(usize->llvm_type, align_bytes - 1, false);
LLVMValueRef anded_val = LLVMBuildAnd(g->builder, target_val, alignment_minus_1, "");
LLVMValueRef is_ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, anded_val, zero, "");
From 30bace66d4336ebc9f27d68e5807a80ab3b68d67 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 7 Aug 2020 01:15:11 -0700
Subject: [PATCH 015/153] refactor now that stage1 supports anon default struct
field inits
---
src-self-hosted/link.zig | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index be30c1df31..6fd9329188 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -325,17 +325,17 @@ pub const File = struct {
/// local symbols, they cannot be mixed. So we must buffer all the global symbols and
/// write them at the end. These are only the local symbols. The length of this array
/// is the value used for sh_info in the .symtab section.
- local_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = std.ArrayListUnmanaged(elf.Elf64_Sym){},
- global_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = std.ArrayListUnmanaged(elf.Elf64_Sym){},
+ local_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
+ global_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
- local_symbol_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
- global_symbol_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
- offset_table_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
+ local_symbol_free_list: std.ArrayListUnmanaged(u32) = .{},
+ global_symbol_free_list: std.ArrayListUnmanaged(u32) = .{},
+ offset_table_free_list: std.ArrayListUnmanaged(u32) = .{},
/// Same order as in the file. The value is the absolute vaddr value.
/// If the vaddr of the executable program header changes, the entire
/// offset table needs to be rewritten.
- offset_table: std.ArrayListUnmanaged(u64) = std.ArrayListUnmanaged(u64){},
+ offset_table: std.ArrayListUnmanaged(u64) = .{},
phdr_table_dirty: bool = false,
shdr_table_dirty: bool = false,
From cc17f84cccc540143f3fd19fe32218478d4a0c6f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 7 Aug 2020 22:35:15 -0700
Subject: [PATCH 016/153] std: introduce GeneralPurposeAllocator
`std.GeneralPurposeAllocator` is now available. It is a function that
takes a configuration struct (with default field values) and returns an
allocator. There is a detailed description of this allocator in the
doc comments at the top of the new file.
The main feature of this allocator is that it is *safe*. It
prevents double-free, use-after-free, and detects leaks.
Some deprecation compile errors are removed.
The Allocator interface gains `old_align` as a new parameter to
`resizeFn`. This is useful to quickly look up allocations.
`std.heap.page_allocator` is improved to use mmap address hints to avoid
obtaining the same virtual address pages when unmapping and mapping
pages. The new general purpose allocator uses the page allocator as its
backing allocator by default.
`std.testing.allocator` is replaced with usage of this new allocator,
which does leak checking, and so the LeakCheckAllocator is retired.
stage1 is improved so that the `@typeInfo` of a pointer has a lazy value
for the alignment of the child type, to avoid false dependency loops
when dealing with pointers to async function frames.
The `std.mem.Allocator` interface is refactored to be in its own file.
`std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`.
This allocator is great for debug mode, however it needs some work to
have better performance in release modes. The next step will be setting
up a series of tests in ziglang/gotta-go-fast and then making
improvements to the implementation.
---
lib/std/array_list.zig | 1 +
lib/std/debug.zig | 3 -
lib/std/heap.zig | 54 +-
lib/std/heap/arena_allocator.zig | 2 +-
lib/std/heap/general_purpose_allocator.zig | 920 +++++++++++++++++++++
lib/std/heap/logging_allocator.zig | 16 +-
lib/std/mem.zig | 396 +--------
lib/std/mem/Allocator.zig | 410 +++++++++
lib/std/mutex.zig | 94 ++-
lib/std/special/test_runner.zig | 19 +-
lib/std/testing.zig | 30 +-
lib/std/testing/failing_allocator.zig | 12 +-
lib/std/testing/leak_count_allocator.zig | 51 --
src/ir.cpp | 40 +-
14 files changed, 1497 insertions(+), 551 deletions(-)
create mode 100644 lib/std/heap/general_purpose_allocator.zig
create mode 100644 lib/std/mem/Allocator.zig
delete mode 100644 lib/std/testing/leak_count_allocator.zig
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index aecbc73bfe..e064b38566 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -263,6 +263,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
if (better_capacity >= new_capacity) break;
}
+ // TODO This can be optimized to avoid needlessly copying undefined memory.
const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), better_capacity);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 3346598ab7..982bada939 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -19,9 +19,6 @@ const windows = std.os.windows;
pub const leb = @import("debug/leb128.zig");
-pub const global_allocator = @compileError("Please switch to std.testing.allocator.");
-pub const failing_allocator = @compileError("Please switch to std.testing.failing_allocator.");
-
pub const runtime_safety = switch (builtin.mode) {
.Debug, .ReleaseSafe => true,
.ReleaseFast, .ReleaseSmall => false,
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index a8ab729413..30ffa57eed 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -12,6 +12,7 @@ const maxInt = std.math.maxInt;
pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator;
pub const loggingAllocator = @import("heap/logging_allocator.zig").loggingAllocator;
pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
+pub const GeneralPurposeAllocator = @import("heap/general_purpose_allocator.zig").GeneralPurposeAllocator;
const Allocator = mem.Allocator;
@@ -53,7 +54,7 @@ fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Allocato
return ptr[0..mem.alignBackwardAnyAlign(full_len, len_align)];
}
-fn cResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize {
+fn cResize(self: *Allocator, buf: []u8, old_align: u29, new_len: usize, len_align: u29) Allocator.Error!usize {
if (new_len == 0) {
c.free(buf.ptr);
return 0;
@@ -88,8 +89,6 @@ var wasm_page_allocator_state = Allocator{
.resizeFn = WasmPageAllocator.resize,
};
-pub const direct_allocator = @compileError("deprecated; use std.heap.page_allocator");
-
/// Verifies that the adjusted length will still map to the full length
pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
const aligned_len = mem.alignAllocLen(full_len, len, len_align);
@@ -97,10 +96,13 @@ pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
return aligned_len;
}
+/// TODO Utilize this on Windows.
+pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
+
const PageAllocator = struct {
fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 {
assert(n > 0);
- const alignedLen = mem.alignForward(n, mem.page_size);
+ const aligned_len = mem.alignForward(n, mem.page_size);
if (builtin.os.tag == .windows) {
const w = os.windows;
@@ -112,14 +114,14 @@ const PageAllocator = struct {
// see https://devblogs.microsoft.com/oldnewthing/?p=42223
const addr = w.VirtualAlloc(
null,
- alignedLen,
+ aligned_len,
w.MEM_COMMIT | w.MEM_RESERVE,
w.PAGE_READWRITE,
) catch return error.OutOfMemory;
// If the allocation is sufficiently aligned, use it.
if (@ptrToInt(addr) & (alignment - 1) == 0) {
- return @ptrCast([*]u8, addr)[0..alignPageAllocLen(alignedLen, n, len_align)];
+ return @ptrCast([*]u8, addr)[0..alignPageAllocLen(aligned_len, n, len_align)];
}
// If it wasn't, actually do an explicitely aligned allocation.
@@ -146,20 +148,24 @@ const PageAllocator = struct {
// until it succeeds.
const ptr = w.VirtualAlloc(
@intToPtr(*c_void, aligned_addr),
- alignedLen,
+ aligned_len,
w.MEM_COMMIT | w.MEM_RESERVE,
w.PAGE_READWRITE,
) catch continue;
- return @ptrCast([*]u8, ptr)[0..alignPageAllocLen(alignedLen, n, len_align)];
+ return @ptrCast([*]u8, ptr)[0..alignPageAllocLen(aligned_len, n, len_align)];
}
}
- const maxDropLen = alignment - std.math.min(alignment, mem.page_size);
- const allocLen = if (maxDropLen <= alignedLen - n) alignedLen else mem.alignForward(alignedLen + maxDropLen, mem.page_size);
+ const max_drop_len = alignment - std.math.min(alignment, mem.page_size);
+ const alloc_len = if (max_drop_len <= aligned_len - n)
+ aligned_len
+ else
+ mem.alignForward(aligned_len + max_drop_len, mem.page_size);
+ const hint = @atomicLoad(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, .Unordered);
const slice = os.mmap(
- null,
- allocLen,
+ hint,
+ alloc_len,
os.PROT_READ | os.PROT_WRITE,
os.MAP_PRIVATE | os.MAP_ANONYMOUS,
-1,
@@ -168,25 +174,29 @@ const PageAllocator = struct {
assert(mem.isAligned(@ptrToInt(slice.ptr), mem.page_size));
const aligned_addr = mem.alignForward(@ptrToInt(slice.ptr), alignment);
+ const result_ptr = @alignCast(mem.page_size, @intToPtr([*]u8, aligned_addr));
// Unmap the extra bytes that were only requested in order to guarantee
// that the range of memory we were provided had a proper alignment in
// it somewhere. The extra bytes could be at the beginning, or end, or both.
- const dropLen = aligned_addr - @ptrToInt(slice.ptr);
- if (dropLen != 0) {
- os.munmap(slice[0..dropLen]);
+ const drop_len = aligned_addr - @ptrToInt(slice.ptr);
+ if (drop_len != 0) {
+ os.munmap(slice[0..drop_len]);
}
// Unmap extra pages
- const alignedBufferLen = allocLen - dropLen;
- if (alignedBufferLen > alignedLen) {
- os.munmap(@alignCast(mem.page_size, @intToPtr([*]u8, aligned_addr))[alignedLen..alignedBufferLen]);
+ const aligned_buffer_len = alloc_len - drop_len;
+ if (aligned_buffer_len > aligned_len) {
+ os.munmap(result_ptr[aligned_len..aligned_buffer_len]);
}
- return @intToPtr([*]u8, aligned_addr)[0..alignPageAllocLen(alignedLen, n, len_align)];
+ const new_hint = @alignCast(mem.page_size, result_ptr + aligned_len);
+ _ = @cmpxchgStrong(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic);
+
+ return result_ptr[0..alignPageAllocLen(aligned_len, n, len_align)];
}
- fn resize(allocator: *Allocator, buf_unaligned: []u8, new_size: usize, len_align: u29) Allocator.Error!usize {
+ fn resize(allocator: *Allocator, buf_unaligned: []u8, buf_align: u29, new_size: usize, len_align: u29) Allocator.Error!usize {
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
if (builtin.os.tag == .windows) {
@@ -229,6 +239,7 @@ const PageAllocator = struct {
if (new_size_aligned < buf_aligned_len) {
const ptr = @intToPtr([*]align(mem.page_size) u8, @ptrToInt(buf_unaligned.ptr) + new_size_aligned);
+ // TODO: if the next_mmap_addr_hint is within the unmapped range, update it
os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]);
if (new_size_aligned == 0)
return 0;
@@ -236,6 +247,7 @@ const PageAllocator = struct {
}
// TODO: call mremap
+ // TODO: if the next_mmap_addr_hint is within the remapped range, update it
return error.OutOfMemory;
}
};
@@ -538,7 +550,7 @@ pub const FixedBufferAllocator = struct {
return result;
}
- fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) Allocator.Error!usize {
+ fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_size: usize, len_align: u29) Allocator.Error!usize {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
assert(self.ownsSlice(buf)); // sanity check
diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig
index a5d8aaea45..4a833bcb28 100644
--- a/lib/std/heap/arena_allocator.zig
+++ b/lib/std/heap/arena_allocator.zig
@@ -49,7 +49,7 @@ pub const ArenaAllocator = struct {
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
const big_enough_len = prev_len + actual_min_size;
const len = big_enough_len + big_enough_len / 2;
- const buf = try self.child_allocator.callAllocFn(len, @alignOf(BufNode), 1);
+ const buf = try self.child_allocator.allocFn(self.child_allocator, len, @alignOf(BufNode), 1);
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
buf_node.* = BufNode{
.data = buf,
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
new file mode 100644
index 0000000000..207857708e
--- /dev/null
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -0,0 +1,920 @@
+//! # General Purpose Allocator
+//!
+//! ## Design Priorities
+//!
+//! ### `OptimizationMode.debug` and `OptimizationMode.release_safe`:
+//!
+//! * Detect double free, and print stack trace of:
+//! - Where it was first allocated
+//! - Where it was freed the first time
+//! - Where it was freed the second time
+//!
+//! * Detect leaks and print stack trace of:
+//! - Where it was allocated
+//!
+//! * When a page of memory is no longer needed, give it back to resident memory
+//! as soon as possible, so that it causes page faults when used.
+//!
+//! * Do not re-use memory slots, so that memory safety is upheld. For small
+//! allocations, this is handled here; for larger ones it is handled in the
+//! backing allocator (by default `std.heap.page_allocator`).
+//!
+//! * Make pointer math errors unlikely to harm memory from
+//! unrelated allocations.
+//!
+//! * It's OK for these mechanisms to cost some extra overhead bytes.
+//!
+//! * It's OK for performance cost for these mechanisms.
+//!
+//! * Rogue memory writes should not harm the allocator's state.
+//!
+//! * Cross platform. Operates based on a backing allocator which makes it work
+//! everywhere, even freestanding.
+//!
+//! * Compile-time configuration.
+//!
+//! ### `OptimizationMode.release_fast` (note: not much work has gone into this use case yet):
+//!
+//! * Low fragmentation is primary concern
+//! * Performance of worst-case latency is secondary concern
+//! * Performance of average-case latency is next
+//! * Finally, having freed memory unmapped, and pointer math errors unlikely to
+//! harm memory from unrelated allocations are nice-to-haves.
+//!
+//! ### `OptimizationMode.release_small` (note: not much work has gone into this use case yet):
+//!
+//! * Small binary code size of the executable is the primary concern.
+//! * Next, defer to the `.release_fast` priority list.
+//!
+//! ## Basic Design:
+//!
+//! Small allocations are divided into buckets:
+//!
+//! ```
+//! index obj_size
+//! 0 1
+//! 1 2
+//! 2 4
+//! 3 8
+//! 4 16
+//! 5 32
+//! 6 64
+//! 7 128
+//! 8 256
+//! 9 512
+//! 10 1024
+//! 11 2048
+//! ```
+//!
+//! The main allocator state has an array of all the "current" buckets for each
+//! size class. Each slot in the array can be null, meaning the bucket for that
+//! size class is not allocated. When the first object is allocated for a given
+//! size class, it allocates 1 page of memory from the OS. This page is
+//! divided into "slots" - one per allocated object. Along with the page of memory
+//! for object slots, as many pages as necessary are allocated to store the
+//! BucketHeader, followed by "used bits", and two stack traces for each slot
+//! (allocation trace and free trace).
+//!
+//! The "used bits" are 1 bit per slot representing whether the slot is used.
+//! Allocations use the data to iterate to find a free slot. Frees assert that the
+//! corresponding bit is 1 and set it to 0.
+//!
+//! Buckets have prev and next pointers. When there is only one bucket for a given
+//! size class, both prev and next point to itself. When all slots of a bucket are
+//! used, a new bucket is allocated, and enters the doubly linked list. The main
+//! allocator state tracks the "current" bucket for each size class. Leak detection
+//! currently only checks the current bucket.
+//!
+//! Resizing detects if the size class is unchanged or smaller, in which case the same
+//! pointer is returned unmodified. If a larger size class is required,
+//! `error.OutOfMemory` is returned.
+//!
+//! Large objects are allocated directly using the backing allocator and their metadata is stored
+//! in a `std.HashMap` using the backing allocator.
+
+const std = @import("std");
+const math = std.math;
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+const page_size = std.mem.page_size;
+const StackTrace = std.builtin.StackTrace;
+
+/// Integer type for pointing to slots in a small allocation
+const SlotIndex = std.meta.Int(false, math.log2(page_size) + 1);
+
+pub const Config = struct {
+ /// Number of stack frames to capture.
+ stack_trace_frames: usize = if (std.debug.runtime_safety) @as(usize, 6) else @as(usize, 0),
+
+ /// If true, the allocator will have two fields:
+ /// * `total_requested_bytes` which tracks the total allocated bytes of memory requested.
+ /// * `requested_memory_limit` which causes allocations to return `error.OutOfMemory`
+ /// when the `total_requested_bytes` exceeds this limit.
+ /// If false, these fields will be `void`.
+ enable_memory_limit: bool = false,
+
+ /// Whether to enable safety checks.
+ safety: bool = std.debug.runtime_safety,
+
+ /// Whether the allocator may be used simultaneously from multiple threads.
+ thread_safe: bool = !std.builtin.single_threaded,
+};
+
+pub fn GeneralPurposeAllocator(comptime config: Config) type {
+ return struct {
+ allocator: Allocator = Allocator{
+ .allocFn = alloc,
+ .resizeFn = resize,
+ },
+ backing_allocator: *Allocator = std.heap.page_allocator,
+ buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
+ large_allocations: LargeAllocTable = .{},
+
+ total_requested_bytes: @TypeOf(total_requested_bytes_init) = total_requested_bytes_init,
+ requested_memory_limit: @TypeOf(requested_memory_limit_init) = requested_memory_limit_init,
+
+ mutex: @TypeOf(mutex_init) = mutex_init,
+
+ const Self = @This();
+
+ const total_requested_bytes_init = if (config.enable_memory_limit) @as(usize, 0) else {};
+ const requested_memory_limit_init = if (config.enable_memory_limit) @as(usize, math.maxInt(usize)) else {};
+
+ const mutex_init = if (config.thread_safe) std.Mutex.init() else std.Mutex.Dummy.init();
+
+ const stack_n = config.stack_trace_frames;
+ const one_trace_size = @sizeOf(usize) * stack_n;
+ const traces_per_slot = 2;
+
+ pub const Error = std.mem.Allocator.Error;
+
+ const small_bucket_count = math.log2(page_size);
+ const largest_bucket_object_size = 1 << (small_bucket_count - 1);
+
+ const LargeAlloc = struct {
+ bytes: []u8,
+ stack_addresses: [stack_n]usize,
+
+ fn dumpStackTrace(self: *LargeAlloc) void {
+ var len: usize = 0;
+ while (len < stack_n and self.stack_addresses[len] != 0) {
+ len += 1;
+ }
+ const stack_trace = StackTrace{
+ .instruction_addresses = &self.stack_addresses,
+ .index = len,
+ };
+ std.debug.dumpStackTrace(stack_trace);
+ }
+ };
+ const LargeAllocTable = std.HashMapUnmanaged(usize, LargeAlloc, hash_addr, eql_addr, false);
+
+ // Bucket: In memory, in order:
+ // * BucketHeader
+ // * bucket_used_bits: [N]u8, // 1 bit for every slot; 1 byte for every 8 slots
+ // * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
+
+ const BucketHeader = struct {
+ prev: *BucketHeader,
+ next: *BucketHeader,
+ page: [*]align(page_size) u8,
+ alloc_cursor: SlotIndex,
+ used_count: SlotIndex,
+
+ fn usedBits(bucket: *BucketHeader, index: usize) *u8 {
+ return @intToPtr(*u8, @ptrToInt(bucket) + @sizeOf(BucketHeader) + index);
+ }
+
+ fn stackTracePtr(
+ bucket: *BucketHeader,
+ size_class: usize,
+ slot_index: SlotIndex,
+ trace_kind: TraceKind,
+ ) *[stack_n]usize {
+ const start_ptr = @ptrCast([*]u8, bucket) + bucketStackFramesStart(size_class);
+ const addr = start_ptr + one_trace_size * traces_per_slot * slot_index +
+ @enumToInt(trace_kind) * @as(usize, one_trace_size);
+ return @ptrCast(*[stack_n]usize, @alignCast(@alignOf(usize), addr));
+ }
+
+ fn captureStackTrace(
+ bucket: *BucketHeader,
+ return_address: usize,
+ size_class: usize,
+ slot_index: SlotIndex,
+ trace_kind: TraceKind,
+ ) void {
+ // Initialize them to 0. When determining the count we must look
+ // for non zero addresses.
+ const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind);
+ collectStackTrace(return_address, stack_addresses);
+ }
+ };
+
+ fn bucketStackTrace(
+ bucket: *BucketHeader,
+ size_class: usize,
+ slot_index: SlotIndex,
+ trace_kind: TraceKind,
+ ) StackTrace {
+ const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind);
+ var len: usize = 0;
+ while (len < stack_n and stack_addresses[len] != 0) {
+ len += 1;
+ }
+ return StackTrace{
+ .instruction_addresses = stack_addresses,
+ .index = len,
+ };
+ }
+
+ fn bucketStackFramesStart(size_class: usize) usize {
+ return std.mem.alignForward(
+ @sizeOf(BucketHeader) + usedBitsCount(size_class),
+ @alignOf(usize),
+ );
+ }
+
+ fn bucketSize(size_class: usize) usize {
+ const slot_count = @divExact(page_size, size_class);
+ return bucketStackFramesStart(size_class) + one_trace_size * traces_per_slot * slot_count;
+ }
+
+ fn usedBitsCount(size_class: usize) usize {
+ const slot_count = @divExact(page_size, size_class);
+ if (slot_count < 8) return 1;
+ return @divExact(slot_count, 8);
+ }
+
+ fn detectLeaksInBucket(
+ bucket: *BucketHeader,
+ size_class: usize,
+ used_bits_count: usize,
+ ) void {
+ var used_bits_byte: usize = 0;
+ while (used_bits_byte < used_bits_count) : (used_bits_byte += 1) {
+ const used_byte = bucket.usedBits(used_bits_byte).*;
+ if (used_byte != 0) {
+ var bit_index: u3 = 0;
+ while (true) : (bit_index += 1) {
+ const is_used = @truncate(u1, used_byte >> bit_index) != 0;
+ if (is_used) {
+ std.debug.print("\nMemory leak detected:\n", .{});
+ const slot_index = @intCast(SlotIndex, used_bits_byte * 8 + bit_index);
+ const stack_trace = bucketStackTrace(
+ bucket,
+ size_class,
+ slot_index,
+ .alloc,
+ );
+ std.debug.dumpStackTrace(stack_trace);
+ }
+ if (bit_index == math.maxInt(u3))
+ break;
+ }
+ }
+ }
+ }
+
+ pub fn deinit(self: *Self) void {
+ for (self.buckets) |optional_bucket, bucket_i| {
+ const first_bucket = optional_bucket orelse continue;
+ const size_class = @as(usize, 1) << @intCast(u6, bucket_i);
+ const used_bits_count = usedBitsCount(size_class);
+ var bucket = first_bucket;
+ while (true) {
+ detectLeaksInBucket(bucket, size_class, used_bits_count);
+ bucket = bucket.next;
+ if (bucket == first_bucket)
+ break;
+ }
+ }
+ for (self.large_allocations.items()) |*large_alloc| {
+ std.debug.print("\nMemory leak detected:\n", .{});
+ large_alloc.value.dumpStackTrace();
+ }
+ self.large_allocations.deinit(self.backing_allocator);
+ self.* = undefined;
+ }
+
+ fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
+ std.mem.set(usize, addresses, 0);
+ var stack_trace = StackTrace{
+ .instruction_addresses = addresses,
+ .index = 0,
+ };
+ std.debug.captureStackTrace(first_trace_addr, &stack_trace);
+ }
+
+ fn allocSlot(self: *Self, size_class: usize, trace_addr: usize) Error![*]u8 {
+ const bucket_index = math.log2(size_class);
+ const first_bucket = self.buckets[bucket_index] orelse try self.createBucket(
+ size_class,
+ bucket_index,
+ );
+ var bucket = first_bucket;
+ const slot_count = @divExact(page_size, size_class);
+ while (bucket.alloc_cursor == slot_count) {
+ const prev_bucket = bucket;
+ bucket = prev_bucket.next;
+ if (bucket == first_bucket) {
+ // make a new one
+ bucket = try self.createBucket(size_class, bucket_index);
+ bucket.prev = prev_bucket;
+ bucket.next = prev_bucket.next;
+ prev_bucket.next = bucket;
+ bucket.next.prev = bucket;
+ }
+ }
+ // change the allocator's current bucket to be this one
+ self.buckets[bucket_index] = bucket;
+
+ const slot_index = bucket.alloc_cursor;
+ bucket.alloc_cursor += 1;
+
+ var used_bits_byte = bucket.usedBits(slot_index / 8);
+ const used_bit_index: u3 = @intCast(u3, slot_index % 8); // TODO cast should be unnecessary
+ used_bits_byte.* |= (@as(u8, 1) << used_bit_index);
+ bucket.used_count += 1;
+ bucket.captureStackTrace(trace_addr, size_class, slot_index, .alloc);
+ return bucket.page + slot_index * size_class;
+ }
+
+ fn searchBucket(
+ self: *Self,
+ bucket_index: usize,
+ addr: usize,
+ ) ?*BucketHeader {
+ const first_bucket = self.buckets[bucket_index] orelse return null;
+ var bucket = first_bucket;
+ while (true) {
+ const in_bucket_range = (addr >= @ptrToInt(bucket.page) and
+ addr < @ptrToInt(bucket.page) + page_size);
+ if (in_bucket_range) return bucket;
+ bucket = bucket.prev;
+ if (bucket == first_bucket) {
+ return null;
+ }
+ self.buckets[bucket_index] = bucket;
+ }
+ }
+
+ fn freeSlot(
+ self: *Self,
+ bucket: *BucketHeader,
+ bucket_index: usize,
+ size_class: usize,
+ slot_index: SlotIndex,
+ used_byte: *u8,
+ used_bit_index: u3,
+ trace_addr: usize,
+ ) void {
+ // Capture stack trace to be the "first free", in case a double free happens.
+ bucket.captureStackTrace(@returnAddress(), size_class, slot_index, .free);
+
+ used_byte.* &= ~(@as(u8, 1) << used_bit_index);
+ bucket.used_count -= 1;
+ if (bucket.used_count == 0) {
+ if (bucket.next == bucket) {
+ // it's the only bucket and therefore the current one
+ self.buckets[bucket_index] = null;
+ } else {
+ bucket.next.prev = bucket.prev;
+ bucket.prev.next = bucket.next;
+ self.buckets[bucket_index] = bucket.prev;
+ }
+ self.backing_allocator.free(bucket.page[0..page_size]);
+ const bucket_size = bucketSize(size_class);
+ const bucket_slice = @ptrCast([*]align(@alignOf(BucketHeader)) u8, bucket)[0..bucket_size];
+ self.backing_allocator.free(bucket_slice);
+ } else {
+ // TODO Set the slot data to undefined.
+ // Related: https://github.com/ziglang/zig/issues/4298
+ }
+ }
+
+ /// This function assumes the object is in the large object storage regardless
+ /// of the parameters.
+ fn resizeLarge(
+ self: *Self,
+ old_mem: []u8,
+ old_align: u29,
+ new_size: usize,
+ len_align: u29,
+ return_addr: usize,
+ ) Error!usize {
+ const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse {
+ if (config.safety) {
+ @panic("Invalid free");
+ } else {
+ unreachable;
+ }
+ };
+
+ if (config.safety and old_mem.len != entry.value.bytes.len) {
+ std.debug.print("\nAllocation size {} bytes does not match free size {}. Allocated here:\n", .{
+ entry.value.bytes.len,
+ old_mem.len,
+ });
+ entry.value.dumpStackTrace();
+
+ @panic("\nFree here:");
+ }
+
+ const result_len = try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align);
+
+ if (result_len == 0) {
+ self.large_allocations.removeAssertDiscard(@ptrToInt(old_mem.ptr));
+ return 0;
+ }
+
+ entry.value.bytes = old_mem.ptr[0..result_len];
+ collectStackTrace(return_addr, &entry.value.stack_addresses);
+ return result_len;
+ }
+
+ pub fn setRequestedMemoryLimit(self: *Self, limit: usize) void {
+ self.requested_memory_limit = limit;
+ }
+
+ fn resize(
+ allocator: *Allocator,
+ old_mem: []u8,
+ old_align: u29,
+ new_size: usize,
+ len_align: u29,
+ ) Error!usize {
+ const self = @fieldParentPtr(Self, "allocator", allocator);
+
+ const held = self.mutex.acquire();
+ defer held.release();
+
+ const prev_req_bytes = self.total_requested_bytes;
+ if (config.enable_memory_limit) {
+ const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
+ if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
+ return error.OutOfMemory;
+ }
+ self.total_requested_bytes = new_req_bytes;
+ }
+ errdefer if (config.enable_memory_limit) {
+ self.total_requested_bytes = prev_req_bytes;
+ };
+
+ assert(old_mem.len != 0);
+
+ const aligned_size = math.max(old_mem.len, old_align);
+ if (aligned_size > largest_bucket_object_size) {
+ return self.resizeLarge(old_mem, old_align, new_size, len_align, @returnAddress());
+ }
+ const size_class_hint = up_to_nearest_power_of_2(usize, aligned_size);
+
+ var bucket_index = math.log2(size_class_hint);
+ var size_class: usize = size_class_hint;
+ const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) {
+ if (self.searchBucket(bucket_index, @ptrToInt(old_mem.ptr))) |bucket| {
+ break bucket;
+ }
+ size_class *= 2;
+ } else {
+ return self.resizeLarge(old_mem, old_align, new_size, len_align, @returnAddress());
+ };
+ const byte_offset = @ptrToInt(old_mem.ptr) - @ptrToInt(bucket.page);
+ const slot_index = @intCast(SlotIndex, byte_offset / size_class);
+ const used_byte_index = slot_index / 8;
+ const used_bit_index = @intCast(u3, slot_index % 8);
+ const used_byte = bucket.usedBits(used_byte_index);
+ const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0;
+ if (!is_used) {
+ if (config.safety) {
+ // print allocation stack trace
+ std.debug.print("\nDouble free detected, allocated here:\n", .{});
+ const alloc_stack_trace = bucketStackTrace(bucket, size_class, slot_index, .alloc);
+ std.debug.dumpStackTrace(alloc_stack_trace);
+ std.debug.print("\nFirst free here:\n", .{});
+ const free_stack_trace = bucketStackTrace(bucket, size_class, slot_index, .free);
+ std.debug.dumpStackTrace(free_stack_trace);
+ @panic("\nSecond free here:");
+ } else {
+ unreachable;
+ }
+ }
+ if (new_size == 0) {
+ self.freeSlot(bucket, bucket_index, size_class, slot_index, used_byte, used_bit_index, @returnAddress());
+ return @as(usize, 0);
+ }
+ const new_aligned_size = math.max(new_size, old_align);
+ const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size);
+ if (new_size_class <= size_class) {
+ return new_size;
+ }
+ return error.OutOfMemory;
+ }
+
+ fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8 {
+ const self = @fieldParentPtr(Self, "allocator", allocator);
+
+ const held = self.mutex.acquire();
+ defer held.release();
+
+ const prev_req_bytes = self.total_requested_bytes;
+ if (config.enable_memory_limit) {
+ const new_req_bytes = prev_req_bytes + len;
+ if (new_req_bytes > self.requested_memory_limit) {
+ return error.OutOfMemory;
+ }
+ self.total_requested_bytes = new_req_bytes;
+ }
+ errdefer if (config.enable_memory_limit) {
+ self.total_requested_bytes = prev_req_bytes;
+ };
+
+ const new_aligned_size = math.max(len, ptr_align);
+ if (new_aligned_size > largest_bucket_object_size) {
+ try self.large_allocations.ensureCapacity(
+ self.backing_allocator,
+ self.large_allocations.entries.items.len + 1,
+ );
+
+ const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align);
+
+ const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
+ assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
+ gop.entry.value.bytes = slice;
+ collectStackTrace(@returnAddress(), &gop.entry.value.stack_addresses);
+
+ return slice;
+ } else {
+ const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size);
+ const ptr = try self.allocSlot(new_size_class, @returnAddress());
+ return ptr[0..len];
+ }
+ }
+
+ fn createBucket(self: *Self, size_class: usize, bucket_index: usize) Error!*BucketHeader {
+ const page = try self.backing_allocator.allocAdvanced(u8, page_size, page_size, .exact);
+ errdefer self.backing_allocator.free(page);
+
+ const bucket_size = bucketSize(size_class);
+ const bucket_bytes = try self.backing_allocator.allocAdvanced(u8, @alignOf(BucketHeader), bucket_size, .exact);
+ const ptr = @ptrCast(*BucketHeader, bucket_bytes.ptr);
+ ptr.* = BucketHeader{
+ .prev = ptr,
+ .next = ptr,
+ .page = page.ptr,
+ .alloc_cursor = 0,
+ .used_count = 0,
+ };
+ self.buckets[bucket_index] = ptr;
+ // Set the used bits to all zeroes
+ @memset(@as(*[1]u8, ptr.usedBits(0)), 0, usedBitsCount(size_class));
+ return ptr;
+ }
+ };
+}
+
+const TraceKind = enum {
+ alloc,
+ free,
+};
+
+fn up_to_nearest_power_of_2(comptime T: type, n: T) T {
+ var power: T = 1;
+ while (power < n)
+ power *= 2;
+ return power;
+}
+
+fn hash_addr(addr: usize) u32 {
+ if (@sizeOf(usize) == @sizeOf(u32))
+ return addr;
+ comptime assert(@sizeOf(usize) == 8);
+ return @intCast(u32, addr >> 32) ^ @truncate(u32, addr);
+}
+
+fn eql_addr(a: usize, b: usize) bool {
+ return a == b;
+}
+
+const test_config = Config{};
+
+test "small allocations - free in same order" {
+ var gpda = GeneralPurposeAllocator(test_config){};
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ var list = std.ArrayList(*u64).init(std.testing.allocator);
+ defer list.deinit();
+
+ var i: usize = 0;
+ while (i < 513) : (i += 1) {
+ const ptr = try allocator.create(u64);
+ try list.append(ptr);
+ }
+
+ for (list.items) |ptr| {
+ allocator.destroy(ptr);
+ }
+}
+
+test "small allocations - free in reverse order" {
+ var gpda = GeneralPurposeAllocator(test_config){};
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ var list = std.ArrayList(*u64).init(std.testing.allocator);
+ defer list.deinit();
+
+ var i: usize = 0;
+ while (i < 513) : (i += 1) {
+ const ptr = try allocator.create(u64);
+ try list.append(ptr);
+ }
+
+ while (list.popOrNull()) |ptr| {
+ allocator.destroy(ptr);
+ }
+}
+
+test "large allocations" {
+ var gpda = GeneralPurposeAllocator(test_config){};
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ const ptr1 = try allocator.alloc(u64, 42768);
+ const ptr2 = try allocator.alloc(u64, 52768);
+ allocator.free(ptr1);
+ const ptr3 = try allocator.alloc(u64, 62768);
+ allocator.free(ptr3);
+ allocator.free(ptr2);
+}
+
+test "realloc" {
+ var gpda = GeneralPurposeAllocator(test_config){};
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
+ defer allocator.free(slice);
+ slice[0] = 0x12;
+
+ // This reallocation should keep its pointer address.
+ const old_slice = slice;
+ slice = try allocator.realloc(slice, 2);
+ assert(old_slice.ptr == slice.ptr);
+ assert(slice[0] == 0x12);
+ slice[1] = 0x34;
+
+ // This requires upgrading to a larger size class
+ slice = try allocator.realloc(slice, 17);
+ assert(slice[0] == 0x12);
+ assert(slice[1] == 0x34);
+}
+
+test "shrink" {
+ var gpda = GeneralPurposeAllocator(test_config){};
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ var slice = try allocator.alloc(u8, 20);
+ defer allocator.free(slice);
+
+ std.mem.set(u8, slice, 0x11);
+
+ slice = allocator.shrink(slice, 17);
+
+ for (slice) |b| {
+ assert(b == 0x11);
+ }
+
+ slice = allocator.shrink(slice, 16);
+
+ for (slice) |b| {
+ assert(b == 0x11);
+ }
+}
+
+test "large object - grow" {
+ var gpda = GeneralPurposeAllocator(test_config){};
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
+ defer allocator.free(slice1);
+
+ var old = slice1;
+ slice1 = try allocator.realloc(slice1, page_size * 2 - 10);
+ assert(slice1.ptr == old.ptr);
+
+ slice1 = try allocator.realloc(slice1, page_size * 2);
+ assert(slice1.ptr == old.ptr);
+
+ slice1 = try allocator.realloc(slice1, page_size * 2 + 1);
+}
+
+test "realloc small object to large object" {
+ var gpda = GeneralPurposeAllocator(test_config){};
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ var slice = try allocator.alloc(u8, 70);
+ defer allocator.free(slice);
+ slice[0] = 0x12;
+ slice[60] = 0x34;
+
+ // This requires upgrading to a large object
+ const large_object_size = page_size * 2 + 50;
+ slice = try allocator.realloc(slice, large_object_size);
+ assert(slice[0] == 0x12);
+ assert(slice[60] == 0x34);
+}
+
+test "shrink large object to large object" {
+ var gpda = GeneralPurposeAllocator(test_config){};
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ var slice = try allocator.alloc(u8, page_size * 2 + 50);
+ defer allocator.free(slice);
+ slice[0] = 0x12;
+ slice[60] = 0x34;
+
+ slice = try allocator.resize(slice, page_size * 2 + 1);
+ assert(slice[0] == 0x12);
+ assert(slice[60] == 0x34);
+
+ slice = allocator.shrink(slice, page_size * 2 + 1);
+ assert(slice[0] == 0x12);
+ assert(slice[60] == 0x34);
+
+ slice = try allocator.realloc(slice, page_size * 2);
+ assert(slice[0] == 0x12);
+ assert(slice[60] == 0x34);
+}
+
+test "shrink large object to large object with larger alignment" {
+ var gpda = GeneralPurposeAllocator(test_config){};
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ var debug_buffer: [1000]u8 = undefined;
+ const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
+
+ const alloc_size = page_size * 2 + 50;
+ var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
+ defer allocator.free(slice);
+
+ var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
+ while (isAligned(@ptrToInt(slice.ptr), page_size * 2)) {
+ try stuff_to_free.append(slice);
+ slice = try allocator.alignedAlloc(u8, 16, alloc_size);
+ }
+ while (stuff_to_free.popOrNull()) |item| {
+ allocator.free(item);
+ }
+ slice[0] = 0x12;
+ slice[60] = 0x34;
+
+ slice = try allocator.alignedRealloc(slice, page_size * 2, alloc_size / 2);
+ assert(slice[0] == 0x12);
+ assert(slice[60] == 0x34);
+}
+
+test "realloc large object to small object" {
+ var gpda = GeneralPurposeAllocator(test_config){};
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ var slice = try allocator.alloc(u8, page_size * 2 + 50);
+ defer allocator.free(slice);
+ slice[0] = 0x12;
+ slice[16] = 0x34;
+
+ slice = try allocator.realloc(slice, 19);
+ assert(slice[0] == 0x12);
+ assert(slice[16] == 0x34);
+}
+
+test "non-page-allocator backing allocator" {
+ var gpda = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator };
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ const ptr = try allocator.create(i32);
+ defer allocator.destroy(ptr);
+}
+
+test "realloc large object to larger alignment" {
+ var gpda = GeneralPurposeAllocator(test_config){};
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ var debug_buffer: [1000]u8 = undefined;
+ const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
+
+ var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
+ defer allocator.free(slice);
+
+ var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
+ while (isAligned(@ptrToInt(slice.ptr), page_size * 2)) {
+ try stuff_to_free.append(slice);
+ slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
+ }
+ while (stuff_to_free.popOrNull()) |item| {
+ allocator.free(item);
+ }
+ slice[0] = 0x12;
+ slice[16] = 0x34;
+
+ slice = try allocator.alignedRealloc(slice, 32, page_size * 2 + 100);
+ assert(slice[0] == 0x12);
+ assert(slice[16] == 0x34);
+
+ slice = try allocator.alignedRealloc(slice, 32, page_size * 2 + 25);
+ assert(slice[0] == 0x12);
+ assert(slice[16] == 0x34);
+
+ slice = try allocator.alignedRealloc(slice, page_size * 2, page_size * 2 + 100);
+ assert(slice[0] == 0x12);
+ assert(slice[16] == 0x34);
+}
+
+fn isAligned(addr: usize, alignment: usize) bool {
+ // 000010000 // example addr
+ // 000001111 // subtract 1
+ // 111110000 // binary not
+ const aligned_addr = (addr & ~(alignment - 1));
+ return aligned_addr == addr;
+}
+
+test "isAligned works" {
+ assert(isAligned(0, 4));
+ assert(isAligned(1, 1));
+ assert(isAligned(2, 1));
+ assert(isAligned(2, 2));
+ assert(!isAligned(2, 4));
+ assert(isAligned(3, 1));
+ assert(!isAligned(3, 2));
+ assert(!isAligned(3, 4));
+ assert(isAligned(4, 4));
+ assert(isAligned(4, 2));
+ assert(isAligned(4, 1));
+ assert(!isAligned(4, 8));
+ assert(!isAligned(4, 16));
+}
+
+test "large object shrinks to small but allocation fails during shrink" {
+ var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, 3);
+ var gpda = GeneralPurposeAllocator(.{}){ .backing_allocator = &failing_allocator.allocator };
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ var slice = try allocator.alloc(u8, page_size * 2 + 50);
+ defer allocator.free(slice);
+ slice[0] = 0x12;
+ slice[3] = 0x34;
+
+ // Next allocation will fail in the backing allocator of the GeneralPurposeAllocator
+
+ slice = allocator.shrink(slice, 4);
+ assert(slice[0] == 0x12);
+ assert(slice[3] == 0x34);
+}
+
+test "objects of size 1024 and 2048" {
+ var gpda = GeneralPurposeAllocator(test_config){};
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ const slice = try allocator.alloc(u8, 1025);
+ const slice2 = try allocator.alloc(u8, 3000);
+
+ allocator.free(slice);
+ allocator.free(slice2);
+}
+
+test "setting a memory cap" {
+ var gpda = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
+ defer gpda.deinit();
+ const allocator = &gpda.allocator;
+
+ gpda.setRequestedMemoryLimit(1010);
+
+ const small = try allocator.create(i32);
+ assert(gpda.total_requested_bytes == 4);
+
+ const big = try allocator.alloc(u8, 1000);
+ assert(gpda.total_requested_bytes == 1004);
+
+ std.testing.expectError(error.OutOfMemory, allocator.create(u64));
+
+ allocator.destroy(small);
+ assert(gpda.total_requested_bytes == 1000);
+
+ allocator.free(big);
+ assert(gpda.total_requested_bytes == 0);
+
+ const exact = try allocator.alloc(u8, 1010);
+ assert(gpda.total_requested_bytes == 1010);
+ allocator.free(exact);
+}
diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig
index d3055c75ee..5f91efa10a 100644
--- a/lib/std/heap/logging_allocator.zig
+++ b/lib/std/heap/logging_allocator.zig
@@ -26,7 +26,7 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
self.out_stream.print("alloc : {}", .{len}) catch {};
- const result = self.parent_allocator.callAllocFn(len, ptr_align, len_align);
+ const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align);
if (result) |buff| {
self.out_stream.print(" success!\n", .{}) catch {};
} else |err| {
@@ -35,7 +35,13 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
return result;
}
- fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
+ fn resize(
+ allocator: *Allocator,
+ buf: []u8,
+ buf_align: u29,
+ new_len: usize,
+ len_align: u29,
+ ) error{OutOfMemory}!usize {
const self = @fieldParentPtr(Self, "allocator", allocator);
if (new_len == 0) {
self.out_stream.print("free : {}\n", .{buf.len}) catch {};
@@ -44,7 +50,7 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
} else {
self.out_stream.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
}
- if (self.parent_allocator.callResizeFn(buf, new_len, len_align)) |resized_len| {
+ if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align)) |resized_len| {
if (new_len > buf.len) {
self.out_stream.print(" success!\n", .{}) catch {};
}
@@ -74,9 +80,9 @@ test "LoggingAllocator" {
const allocator = &loggingAllocator(&fixedBufferAllocator.allocator, fbs.outStream()).allocator;
var a = try allocator.alloc(u8, 10);
- a.len = allocator.shrinkBytes(a, 5, 0);
+ a.len = allocator.shrinkBytes(a, 1, 5, 0);
std.debug.assert(a.len == 5);
- std.testing.expectError(error.OutOfMemory, allocator.callResizeFn(a, 20, 0));
+ std.testing.expectError(error.OutOfMemory, allocator.resizeFn(allocator, a, 1, 20, 0));
allocator.free(a);
std.testing.expectEqualSlices(u8,
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 2fb364b340..a8ca09fb74 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -8,391 +8,13 @@ const meta = std.meta;
const trait = meta.trait;
const testing = std.testing;
-// https://github.com/ziglang/zig/issues/2564
+/// https://github.com/ziglang/zig/issues/2564
pub const page_size = switch (builtin.arch) {
.wasm32, .wasm64 => 64 * 1024,
else => 4 * 1024,
};
-pub const Allocator = struct {
- pub const Error = error{OutOfMemory};
-
- /// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
- ///
- /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
- /// otherwise, the length must be aligned to `len_align`.
- ///
- /// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
- allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8,
-
- /// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
- /// length returned by `allocFn` or `resizeFn`.
- ///
- /// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no
- /// longer be passed to `resizeFn`.
- ///
- /// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
- /// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
- /// unmodified and error.OutOfMemory MUST be returned.
- ///
- /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
- /// otherwise, the length must be aligned to `len_align`.
- ///
- /// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
- resizeFn: fn (self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize,
-
- pub fn callAllocFn(self: *Allocator, new_len: usize, alignment: u29, len_align: u29) Error![]u8 {
- return self.allocFn(self, new_len, alignment, len_align);
- }
-
- pub fn callResizeFn(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize {
- return self.resizeFn(self, buf, new_len, len_align);
- }
-
- /// Set to resizeFn if in-place resize is not supported.
- pub fn noResize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) Error!usize {
- if (new_len > buf.len)
- return error.OutOfMemory;
- return new_len;
- }
-
- /// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning
- /// error.OutOfMemory should be impossible.
- pub fn shrinkBytes(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) usize {
- assert(new_len <= buf.len);
- return self.callResizeFn(buf, new_len, len_align) catch unreachable;
- }
-
- /// Realloc is used to modify the size or alignment of an existing allocation,
- /// as well as to provide the allocator with an opportunity to move an allocation
- /// to a better location.
- /// When the size/alignment is greater than the previous allocation, this function
- /// returns `error.OutOfMemory` when the requested new allocation could not be granted.
- /// When the size/alignment is less than or equal to the previous allocation,
- /// this function returns `error.OutOfMemory` when the allocator decides the client
- /// would be better off keeping the extra alignment/size. Clients will call
- /// `callResizeFn` when they require the allocator to track a new alignment/size,
- /// and so this function should only return success when the allocator considers
- /// the reallocation desirable from the allocator's perspective.
- /// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
- /// reallocation failure, even when `new_n` <= `old_mem.len`. A `FixedBufferAllocator`
- /// would always return `error.OutOfMemory` for `reallocFn` when the size/alignment
- /// is less than or equal to the old allocation, because it cannot reclaim the memory,
- /// and thus the `std.ArrayList` would be better off retaining its capacity.
- /// When `reallocFn` returns,
- /// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same
- /// as `old_mem` was when `reallocFn` is called. The bytes of
- /// `return_value[old_mem.len..]` have undefined values.
- /// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
- fn reallocBytes(
- self: *Allocator,
- /// Guaranteed to be the same as what was returned from most recent call to
- /// `allocFn` or `resizeFn`.
- /// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
- /// is guaranteed to be >= 1.
- old_mem: []u8,
- /// If `old_mem.len == 0` then this is `undefined`, otherwise:
- /// Guaranteed to be the same as what was passed to `allocFn`.
- /// Guaranteed to be >= 1.
- /// Guaranteed to be a power of 2.
- old_alignment: u29,
- /// If `new_byte_count` is 0 then this is a free and it is guaranteed that
- /// `old_mem.len != 0`.
- new_byte_count: usize,
- /// Guaranteed to be >= 1.
- /// Guaranteed to be a power of 2.
- /// Returned slice's pointer must have this alignment.
- new_alignment: u29,
- /// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly
- /// non-zero means the length of the returned slice must be aligned by `len_align`
- /// `new_len` must be aligned by `len_align`
- len_align: u29,
- ) Error![]u8 {
- if (old_mem.len == 0) {
- const new_mem = try self.callAllocFn(new_byte_count, new_alignment, len_align);
- @memset(new_mem.ptr, undefined, new_byte_count);
- return new_mem;
- }
-
- if (isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
- if (new_byte_count <= old_mem.len) {
- const shrunk_len = self.shrinkBytes(old_mem, new_byte_count, len_align);
- return old_mem.ptr[0..shrunk_len];
- }
- if (self.callResizeFn(old_mem, new_byte_count, len_align)) |resized_len| {
- assert(resized_len >= new_byte_count);
- @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
- return old_mem.ptr[0..resized_len];
- } else |_| {}
- }
- if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) {
- return error.OutOfMemory;
- }
- return self.moveBytes(old_mem, new_byte_count, new_alignment, len_align);
- }
-
- /// Move the given memory to a new location in the given allocator to accomodate a new
- /// size and alignment.
- fn moveBytes(self: *Allocator, old_mem: []u8, new_len: usize, new_alignment: u29, len_align: u29) Error![]u8 {
- assert(old_mem.len > 0);
- assert(new_len > 0);
- const new_mem = try self.callAllocFn(new_len, new_alignment, len_align);
- @memcpy(new_mem.ptr, old_mem.ptr, std.math.min(new_len, old_mem.len));
- // DISABLED TO AVOID BUGS IN TRANSLATE C
- // use './zig build test-translate-c' to reproduce, some of the symbols in the
- // generated C code will be a sequence of 0xaa (the undefined value), meaning
- // it is printing data that has been freed
- //@memset(old_mem.ptr, undefined, old_mem.len);
- _ = self.shrinkBytes(old_mem, 0, 0);
- return new_mem;
- }
-
- /// Returns a pointer to undefined memory.
- /// Call `destroy` with the result to free the memory.
- pub fn create(self: *Allocator, comptime T: type) Error!*T {
- if (@sizeOf(T) == 0) return &(T{});
- const slice = try self.alloc(T, 1);
- return &slice[0];
- }
-
- /// `ptr` should be the return value of `create`, or otherwise
- /// have the same address and alignment property.
- pub fn destroy(self: *Allocator, ptr: anytype) void {
- const T = @TypeOf(ptr).Child;
- if (@sizeOf(T) == 0) return;
- const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
- _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], 0, 0);
- }
-
- /// Allocates an array of `n` items of type `T` and sets all the
- /// items to `undefined`. Depending on the Allocator
- /// implementation, it may be required to call `free` once the
- /// memory is no longer needed, to avoid a resource leak. If the
- /// `Allocator` implementation is unknown, then correct code will
- /// call `free` when done.
- ///
- /// For allocating a single item, see `create`.
- pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T {
- return self.alignedAlloc(T, null, n);
- }
-
- pub fn allocWithOptions(
- self: *Allocator,
- comptime Elem: type,
- n: usize,
- /// null means naturally aligned
- comptime optional_alignment: ?u29,
- comptime optional_sentinel: ?Elem,
- ) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) {
- if (optional_sentinel) |sentinel| {
- const ptr = try self.alignedAlloc(Elem, optional_alignment, n + 1);
- ptr[n] = sentinel;
- return ptr[0..n :sentinel];
- } else {
- return self.alignedAlloc(Elem, optional_alignment, n);
- }
- }
-
- fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, comptime sentinel: ?Elem) type {
- if (sentinel) |s| {
- return [:s]align(alignment orelse @alignOf(Elem)) Elem;
- } else {
- return []align(alignment orelse @alignOf(Elem)) Elem;
- }
- }
-
- /// Allocates an array of `n + 1` items of type `T` and sets the first `n`
- /// items to `undefined` and the last item to `sentinel`. Depending on the
- /// Allocator implementation, it may be required to call `free` once the
- /// memory is no longer needed, to avoid a resource leak. If the
- /// `Allocator` implementation is unknown, then correct code will
- /// call `free` when done.
- ///
- /// For allocating a single item, see `create`.
- ///
- /// Deprecated; use `allocWithOptions`.
- pub fn allocSentinel(self: *Allocator, comptime Elem: type, n: usize, comptime sentinel: Elem) Error![:sentinel]Elem {
- return self.allocWithOptions(Elem, n, null, sentinel);
- }
-
- /// Deprecated: use `allocAdvanced`
- pub fn alignedAlloc(
- self: *Allocator,
- comptime T: type,
- /// null means naturally aligned
- comptime alignment: ?u29,
- n: usize,
- ) Error![]align(alignment orelse @alignOf(T)) T {
- return self.allocAdvanced(T, alignment, n, .exact);
- }
-
- const Exact = enum { exact, at_least };
- pub fn allocAdvanced(
- self: *Allocator,
- comptime T: type,
- /// null means naturally aligned
- comptime alignment: ?u29,
- n: usize,
- exact: Exact,
- ) Error![]align(alignment orelse @alignOf(T)) T {
- const a = if (alignment) |a| blk: {
- if (a == @alignOf(T)) return allocAdvanced(self, T, null, n, exact);
- break :blk a;
- } else @alignOf(T);
-
- if (n == 0) {
- return @as([*]align(a) T, undefined)[0..0];
- }
-
- const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
- // TODO The `if (alignment == null)` blocks are workarounds for zig not being able to
- // access certain type information about T without creating a circular dependency in async
- // functions that heap-allocate their own frame with @Frame(func).
- const sizeOfT = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T);
- const byte_slice = try self.callAllocFn(byte_count, a, if (exact == .exact) @as(u29, 0) else sizeOfT);
- switch (exact) {
- .exact => assert(byte_slice.len == byte_count),
- .at_least => assert(byte_slice.len >= byte_count),
- }
- @memset(byte_slice.ptr, undefined, byte_slice.len);
- if (alignment == null) {
- // This if block is a workaround (see comment above)
- return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..@divExact(byte_slice.len, @sizeOf(T))];
- } else {
- return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
- }
- }
-
- /// This function requests a new byte size for an existing allocation,
- /// which can be larger, smaller, or the same size as the old memory
- /// allocation.
- /// This function is preferred over `shrink`, because it can fail, even
- /// when shrinking. This gives the allocator a chance to perform a
- /// cheap shrink operation if possible, or otherwise return OutOfMemory,
- /// indicating that the caller should keep their capacity, for example
- /// in `std.ArrayList.shrink`.
- /// If you need guaranteed success, call `shrink`.
- /// If `new_n` is 0, this is the same as `free` and it always succeeds.
- pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
- const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
- break :t Error![]align(Slice.alignment) Slice.child;
- } {
- const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
- return self.reallocAdvanced(old_mem, old_alignment, new_n, .exact);
- }
-
- pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
- const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
- break :t Error![]align(Slice.alignment) Slice.child;
- } {
- const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
- return self.reallocAdvanced(old_mem, old_alignment, new_n, .at_least);
- }
-
- // Deprecated: use `reallocAdvanced`
- pub fn alignedRealloc(
- self: *Allocator,
- old_mem: anytype,
- comptime new_alignment: u29,
- new_n: usize,
- ) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
- return self.reallocAdvanced(old_mem, new_alignment, new_n, .exact);
- }
-
- /// This is the same as `realloc`, except caller may additionally request
- /// a new alignment, which can be larger, smaller, or the same as the old
- /// allocation.
- pub fn reallocAdvanced(
- self: *Allocator,
- old_mem: anytype,
- comptime new_alignment: u29,
- new_n: usize,
- exact: Exact,
- ) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
- const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
- const T = Slice.child;
- if (old_mem.len == 0) {
- return self.allocAdvanced(T, new_alignment, new_n, exact);
- }
- if (new_n == 0) {
- self.free(old_mem);
- return @as([*]align(new_alignment) T, undefined)[0..0];
- }
-
- const old_byte_slice = mem.sliceAsBytes(old_mem);
- const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
- // Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
- const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, if (exact == .exact) @as(u29, 0) else @sizeOf(T));
- return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice));
- }
-
- /// Prefer calling realloc to shrink if you can tolerate failure, such as
- /// in an ArrayList data structure with a storage capacity.
- /// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
- /// Returned slice has same alignment as old_mem.
- /// Shrinking to 0 is the same as calling `free`.
- pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
- const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
- break :t []align(Slice.alignment) Slice.child;
- } {
- const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
- return self.alignedShrink(old_mem, old_alignment, new_n);
- }
-
- /// This is the same as `shrink`, except caller may additionally request
- /// a new alignment, which must be smaller or the same as the old
- /// allocation.
- pub fn alignedShrink(
- self: *Allocator,
- old_mem: anytype,
- comptime new_alignment: u29,
- new_n: usize,
- ) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
- const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
- const T = Slice.child;
-
- if (new_n == old_mem.len)
- return old_mem;
- assert(new_n < old_mem.len);
- assert(new_alignment <= Slice.alignment);
-
- // Here we skip the overflow checking on the multiplication because
- // new_n <= old_mem.len and the multiplication didn't overflow for that operation.
- const byte_count = @sizeOf(T) * new_n;
-
- const old_byte_slice = mem.sliceAsBytes(old_mem);
- @memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
- _ = self.shrinkBytes(old_byte_slice, byte_count, 0);
- return old_mem[0..new_n];
- }
-
- /// Free an array allocated with `alloc`. To free a single item,
- /// see `destroy`.
- pub fn free(self: *Allocator, memory: anytype) void {
- const Slice = @typeInfo(@TypeOf(memory)).Pointer;
- const bytes = mem.sliceAsBytes(memory);
- const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
- if (bytes_len == 0) return;
- const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
- @memset(non_const_ptr, undefined, bytes_len);
- _ = self.shrinkBytes(non_const_ptr[0..bytes_len], 0, 0);
- }
-
- /// Copies `m` to newly allocated memory. Caller owns the memory.
- pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
- const new_buf = try allocator.alloc(T, m.len);
- copy(T, new_buf, m);
- return new_buf;
- }
-
- /// Copies `m` to newly allocated memory, with a null-terminated element. Caller owns the memory.
- pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
- const new_buf = try allocator.alloc(T, m.len + 1);
- copy(T, new_buf, m);
- new_buf[m.len] = 0;
- return new_buf[0..m.len :0];
- }
-};
+pub const Allocator = @import("mem/Allocator.zig");
/// Detects and asserts if the std.mem.Allocator interface is violated by the caller
/// or the allocator.
@@ -424,7 +46,8 @@ pub fn ValidationAllocator(comptime T: type) type {
}
const self = @fieldParentPtr(@This(), "allocator", allocator);
- const result = try self.getUnderlyingAllocatorPtr().callAllocFn(n, ptr_align, len_align);
+ const underlying = self.getUnderlyingAllocatorPtr();
+ const result = try underlying.allocFn(underlying, n, ptr_align, len_align);
assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
if (len_align == 0) {
assert(result.len == n);
@@ -434,14 +57,21 @@ pub fn ValidationAllocator(comptime T: type) type {
}
return result;
}
- pub fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) Allocator.Error!usize {
+ pub fn resize(
+ allocator: *Allocator,
+ buf: []u8,
+ buf_align: u29,
+ new_len: usize,
+ len_align: u29,
+ ) Allocator.Error!usize {
assert(buf.len > 0);
if (len_align != 0) {
assert(mem.isAlignedAnyAlign(new_len, len_align));
assert(new_len >= len_align);
}
const self = @fieldParentPtr(@This(), "allocator", allocator);
- const result = try self.getUnderlyingAllocatorPtr().callResizeFn(buf, new_len, len_align);
+ const underlying = self.getUnderlyingAllocatorPtr();
+ const result = try underlying.resizeFn(underlying, buf, buf_align, new_len, len_align);
if (len_align == 0) {
assert(result == new_len);
} else {
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
new file mode 100644
index 0000000000..f7a036d5ee
--- /dev/null
+++ b/lib/std/mem/Allocator.zig
@@ -0,0 +1,410 @@
+//! The standard memory allocation interface.
+
+const std = @import("../std.zig");
+const assert = std.debug.assert;
+const math = std.math;
+const mem = std.mem;
+const Allocator = @This();
+
+pub const Error = error{OutOfMemory};
+
+/// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
+///
+/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
+/// otherwise, the length must be aligned to `len_align`.
+///
+/// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
+allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8,
+
+/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
+/// length returned by `allocFn` or `resizeFn`. `buf_align` must equal the same value
+/// that was passed as the `ptr_align` parameter to the original `allocFn` call.
+///
+/// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no
+/// longer be passed to `resizeFn`.
+///
+/// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`.
+/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
+/// unmodified and error.OutOfMemory MUST be returned.
+///
+/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
+/// otherwise, the length must be aligned to `len_align`. Note that `len_align` does *not*
+/// provide a way to modify the alignment of a pointer. Rather it provides an API for
+/// accepting more bytes of memory from the allocator than requested.
+///
+/// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
+resizeFn: fn (self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29) Error!usize,
+
+/// Set to resizeFn if in-place resize is not supported.
+pub fn noResize(self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29) Error!usize {
+ if (new_len > buf.len)
+ return error.OutOfMemory;
+ return new_len;
+}
+
+/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning
+/// error.OutOfMemory should be impossible.
+pub fn shrinkBytes(self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29) usize {
+ assert(new_len <= buf.len);
+ return self.resizeFn(self, buf, buf_align, new_len, len_align) catch unreachable;
+}
+
+/// Realloc is used to modify the size or alignment of an existing allocation,
+/// as well as to provide the allocator with an opportunity to move an allocation
+/// to a better location.
+/// When the size/alignment is greater than the previous allocation, this function
+/// returns `error.OutOfMemory` when the requested new allocation could not be granted.
+/// When the size/alignment is less than or equal to the previous allocation,
+/// this function returns `error.OutOfMemory` when the allocator decides the client
+/// would be better off keeping the extra alignment/size. Clients will call
+/// `resizeFn` when they require the allocator to track a new alignment/size,
+/// and so this function should only return success when the allocator considers
+/// the reallocation desirable from the allocator's perspective.
+/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
+/// reallocation failure, even when `new_n` <= `old_mem.len`. A `FixedBufferAllocator`
+/// would always return `error.OutOfMemory` for `reallocFn` when the size/alignment
+/// is less than or equal to the old allocation, because it cannot reclaim the memory,
+/// and thus the `std.ArrayList` would be better off retaining its capacity.
+/// When `reallocFn` returns,
+/// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same
+/// as `old_mem` was when `reallocFn` is called. The bytes of
+/// `return_value[old_mem.len..]` have undefined values.
+/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
+fn reallocBytes(
+ self: *Allocator,
+ /// Guaranteed to be the same as what was returned from most recent call to
+ /// `allocFn` or `resizeFn`.
+ /// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
+ /// is guaranteed to be >= 1.
+ old_mem: []u8,
+ /// If `old_mem.len == 0` then this is `undefined`, otherwise:
+ /// Guaranteed to be the same as what was passed to `allocFn`.
+ /// Guaranteed to be >= 1.
+ /// Guaranteed to be a power of 2.
+ old_alignment: u29,
+ /// If `new_byte_count` is 0 then this is a free and it is guaranteed that
+ /// `old_mem.len != 0`.
+ new_byte_count: usize,
+ /// Guaranteed to be >= 1.
+ /// Guaranteed to be a power of 2.
+ /// Returned slice's pointer must have this alignment.
+ new_alignment: u29,
+ /// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly
+ /// non-zero means the length of the returned slice must be aligned by `len_align`
+ /// `new_len` must be aligned by `len_align`
+ len_align: u29,
+) Error![]u8 {
+ if (old_mem.len == 0) {
+ const new_mem = try self.allocFn(self, new_byte_count, new_alignment, len_align);
+ // TODO: https://github.com/ziglang/zig/issues/4298
+ @memset(new_mem.ptr, undefined, new_byte_count);
+ return new_mem;
+ }
+
+ if (mem.isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
+ if (new_byte_count <= old_mem.len) {
+ const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align);
+ return old_mem.ptr[0..shrunk_len];
+ }
+ if (self.resizeFn(self, old_mem, old_alignment, new_byte_count, len_align)) |resized_len| {
+ assert(resized_len >= new_byte_count);
+ // TODO: https://github.com/ziglang/zig/issues/4298
+ @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
+ return old_mem.ptr[0..resized_len];
+ } else |_| {}
+ }
+ if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) {
+ return error.OutOfMemory;
+ }
+ return self.moveBytes(old_mem, old_alignment, new_byte_count, new_alignment, len_align);
+}
+
+/// Move the given memory to a new location in the given allocator to accomodate a new
+/// size and alignment.
+fn moveBytes(
+ self: *Allocator,
+ old_mem: []u8,
+ old_align: u29,
+ new_len: usize,
+ new_alignment: u29,
+ len_align: u29,
+) Error![]u8 {
+ assert(old_mem.len > 0);
+ assert(new_len > 0);
+ const new_mem = try self.allocFn(self, new_len, new_alignment, len_align);
+ @memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len));
+ // TODO DISABLED TO AVOID BUGS IN TRANSLATE C
+ // TODO see also https://github.com/ziglang/zig/issues/4298
+ // use './zig build test-translate-c' to reproduce, some of the symbols in the
+ // generated C code will be a sequence of 0xaa (the undefined value), meaning
+ // it is printing data that has been freed
+ //@memset(old_mem.ptr, undefined, old_mem.len);
+ _ = self.shrinkBytes(old_mem, old_align, 0, 0);
+ return new_mem;
+}
+
+/// Returns a pointer to undefined memory.
+/// Call `destroy` with the result to free the memory.
+pub fn create(self: *Allocator, comptime T: type) Error!*T {
+ if (@sizeOf(T) == 0) return &(T{});
+ const slice = try self.alloc(T, 1);
+ return &slice[0];
+}
+
+/// `ptr` should be the return value of `create`, or otherwise
+/// have the same address and alignment property.
+pub fn destroy(self: *Allocator, ptr: anytype) void {
+ const T = @TypeOf(ptr).Child;
+ if (@sizeOf(T) == 0) return;
+ const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
+ const ptr_align = @typeInfo(@TypeOf(ptr)).Pointer.alignment;
+ _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], ptr_align, 0, 0);
+}
+
+/// Allocates an array of `n` items of type `T` and sets all the
+/// items to `undefined`. Depending on the Allocator
+/// implementation, it may be required to call `free` once the
+/// memory is no longer needed, to avoid a resource leak. If the
+/// `Allocator` implementation is unknown, then correct code will
+/// call `free` when done.
+///
+/// For allocating a single item, see `create`.
+pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T {
+ return self.alignedAlloc(T, null, n);
+}
+
+pub fn allocWithOptions(
+ self: *Allocator,
+ comptime Elem: type,
+ n: usize,
+ /// null means naturally aligned
+ comptime optional_alignment: ?u29,
+ comptime optional_sentinel: ?Elem,
+) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) {
+ if (optional_sentinel) |sentinel| {
+ const ptr = try self.alignedAlloc(Elem, optional_alignment, n + 1);
+ ptr[n] = sentinel;
+ return ptr[0..n :sentinel];
+ } else {
+ return self.alignedAlloc(Elem, optional_alignment, n);
+ }
+}
+
+fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, comptime sentinel: ?Elem) type {
+ if (sentinel) |s| {
+ return [:s]align(alignment orelse @alignOf(Elem)) Elem;
+ } else {
+ return []align(alignment orelse @alignOf(Elem)) Elem;
+ }
+}
+
+/// Allocates an array of `n + 1` items of type `T` and sets the first `n`
+/// items to `undefined` and the last item to `sentinel`. Depending on the
+/// Allocator implementation, it may be required to call `free` once the
+/// memory is no longer needed, to avoid a resource leak. If the
+/// `Allocator` implementation is unknown, then correct code will
+/// call `free` when done.
+///
+/// For allocating a single item, see `create`.
+///
+/// Deprecated; use `allocWithOptions`.
+pub fn allocSentinel(self: *Allocator, comptime Elem: type, n: usize, comptime sentinel: Elem) Error![:sentinel]Elem {
+ return self.allocWithOptions(Elem, n, null, sentinel);
+}
+
+/// Deprecated: use `allocAdvanced`
+pub fn alignedAlloc(
+ self: *Allocator,
+ comptime T: type,
+ /// null means naturally aligned
+ comptime alignment: ?u29,
+ n: usize,
+) Error![]align(alignment orelse @alignOf(T)) T {
+ return self.allocAdvanced(T, alignment, n, .exact);
+}
+
+const Exact = enum { exact, at_least };
+pub fn allocAdvanced(
+ self: *Allocator,
+ comptime T: type,
+ /// null means naturally aligned
+ comptime alignment: ?u29,
+ n: usize,
+ exact: Exact,
+) Error![]align(alignment orelse @alignOf(T)) T {
+ const a = if (alignment) |a| blk: {
+ if (a == @alignOf(T)) return allocAdvanced(self, T, null, n, exact);
+ break :blk a;
+ } else @alignOf(T);
+
+ if (n == 0) {
+ return @as([*]align(a) T, undefined)[0..0];
+ }
+
+ const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
+ // TODO The `if (alignment == null)` blocks are workarounds for zig not being able to
+ // access certain type information about T without creating a circular dependency in async
+ // functions that heap-allocate their own frame with @Frame(func).
+ const sizeOfT = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T);
+ const byte_slice = try self.allocFn(self, byte_count, a, if (exact == .exact) @as(u29, 0) else sizeOfT);
+ switch (exact) {
+ .exact => assert(byte_slice.len == byte_count),
+ .at_least => assert(byte_slice.len >= byte_count),
+ }
+ // TODO: https://github.com/ziglang/zig/issues/4298
+ @memset(byte_slice.ptr, undefined, byte_slice.len);
+ if (alignment == null) {
+ // This if block is a workaround (see comment above)
+ return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..@divExact(byte_slice.len, @sizeOf(T))];
+ } else {
+ return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
+ }
+}
+
+/// Increases or decreases the size of an allocation. It is guaranteed to not move the pointer.
+pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) {
+ const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
+ const T = Slice.child;
+ if (new_n == 0) {
+ self.free(old_mem);
+ return &[0]T{};
+ }
+ const old_byte_slice = mem.sliceAsBytes(old_mem);
+ const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
+ const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0);
+ assert(rc == new_byte_count);
+ const new_byte_slice = old_mem.ptr[0..new_byte_count];
+ return mem.bytesAsSlice(T, new_byte_slice);
+}
+
+/// This function requests a new byte size for an existing allocation,
+/// which can be larger, smaller, or the same size as the old memory
+/// allocation.
+/// This function is preferred over `shrink`, because it can fail, even
+/// when shrinking. This gives the allocator a chance to perform a
+/// cheap shrink operation if possible, or otherwise return OutOfMemory,
+/// indicating that the caller should keep their capacity, for example
+/// in `std.ArrayList.shrink`.
+/// If you need guaranteed success, call `shrink`.
+/// If `new_n` is 0, this is the same as `free` and it always succeeds.
+pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
+ const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
+ break :t Error![]align(Slice.alignment) Slice.child;
+} {
+ const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
+ return self.reallocAdvanced(old_mem, old_alignment, new_n, .exact);
+}
+
+pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
+ const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
+ break :t Error![]align(Slice.alignment) Slice.child;
+} {
+ const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
+ return self.reallocAdvanced(old_mem, old_alignment, new_n, .at_least);
+}
+
+// Deprecated: use `reallocAdvanced`
+pub fn alignedRealloc(
+ self: *Allocator,
+ old_mem: anytype,
+ comptime new_alignment: u29,
+ new_n: usize,
+) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
+ return self.reallocAdvanced(old_mem, new_alignment, new_n, .exact);
+}
+
+/// This is the same as `realloc`, except caller may additionally request
+/// a new alignment, which can be larger, smaller, or the same as the old
+/// allocation.
+pub fn reallocAdvanced(
+ self: *Allocator,
+ old_mem: anytype,
+ comptime new_alignment: u29,
+ new_n: usize,
+ exact: Exact,
+) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
+ const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
+ const T = Slice.child;
+ if (old_mem.len == 0) {
+ return self.allocAdvanced(T, new_alignment, new_n, exact);
+ }
+ if (new_n == 0) {
+ self.free(old_mem);
+ return @as([*]align(new_alignment) T, undefined)[0..0];
+ }
+
+ const old_byte_slice = mem.sliceAsBytes(old_mem);
+ const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
+ // Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
+ const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, if (exact == .exact) @as(u29, 0) else @sizeOf(T));
+ return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice));
+}
+
+/// Prefer calling realloc to shrink if you can tolerate failure, such as
+/// in an ArrayList data structure with a storage capacity.
+/// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
+/// Returned slice has same alignment as old_mem.
+/// Shrinking to 0 is the same as calling `free`.
+pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
+ const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
+ break :t []align(Slice.alignment) Slice.child;
+} {
+ const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
+ return self.alignedShrink(old_mem, old_alignment, new_n);
+}
+
+/// This is the same as `shrink`, except caller may additionally request
+/// a new alignment, which must be smaller or the same as the old
+/// allocation.
+pub fn alignedShrink(
+ self: *Allocator,
+ old_mem: anytype,
+ comptime new_alignment: u29,
+ new_n: usize,
+) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
+ const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
+ const T = Slice.child;
+
+ if (new_n == old_mem.len)
+ return old_mem;
+ assert(new_n < old_mem.len);
+ assert(new_alignment <= Slice.alignment);
+
+ // Here we skip the overflow checking on the multiplication because
+ // new_n <= old_mem.len and the multiplication didn't overflow for that operation.
+ const byte_count = @sizeOf(T) * new_n;
+
+ const old_byte_slice = mem.sliceAsBytes(old_mem);
+ // TODO: https://github.com/ziglang/zig/issues/4298
+ @memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
+ _ = self.shrinkBytes(old_byte_slice, Slice.alignment, byte_count, 0);
+ return old_mem[0..new_n];
+}
+
+/// Free an array allocated with `alloc`. To free a single item,
+/// see `destroy`.
+pub fn free(self: *Allocator, memory: anytype) void {
+ const Slice = @typeInfo(@TypeOf(memory)).Pointer;
+ const bytes = mem.sliceAsBytes(memory);
+ const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
+ if (bytes_len == 0) return;
+ const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
+ // TODO: https://github.com/ziglang/zig/issues/4298
+ @memset(non_const_ptr, undefined, bytes_len);
+ _ = self.shrinkBytes(non_const_ptr[0..bytes_len], Slice.alignment, 0, 0);
+}
+
+/// Copies `m` to newly allocated memory. Caller owns the memory.
+pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
+ const new_buf = try allocator.alloc(T, m.len);
+ mem.copy(T, new_buf, m);
+ return new_buf;
+}
+
+/// Copies `m` to newly allocated memory, with a null-terminated element. Caller owns the memory.
+pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
+ const new_buf = try allocator.alloc(T, m.len + 1);
+ mem.copy(T, new_buf, m);
+ new_buf[m.len] = 0;
+ return new_buf[0..m.len :0];
+}
diff --git a/lib/std/mutex.zig b/lib/std/mutex.zig
index a57519cd14..71fac4758c 100644
--- a/lib/std/mutex.zig
+++ b/lib/std/mutex.zig
@@ -30,49 +30,7 @@ const ResetEvent = std.ResetEvent;
/// // ... lock not acquired
/// }
pub const Mutex = if (builtin.single_threaded)
- struct {
- lock: @TypeOf(lock_init),
-
- const lock_init = if (std.debug.runtime_safety) false else {};
-
- pub const Held = struct {
- mutex: *Mutex,
-
- pub fn release(self: Held) void {
- if (std.debug.runtime_safety) {
- self.mutex.lock = false;
- }
- }
- };
-
- /// Create a new mutex in unlocked state.
- pub fn init() Mutex {
- return Mutex{ .lock = lock_init };
- }
-
- /// Free a mutex created with init. Calling this while the
- /// mutex is held is illegal behavior.
- pub fn deinit(self: *Mutex) void {
- self.* = undefined;
- }
-
- /// Try to acquire the mutex without blocking. Returns null if
- /// the mutex is unavailable. Otherwise returns Held. Call
- /// release on Held.
- pub fn tryAcquire(self: *Mutex) ?Held {
- if (std.debug.runtime_safety) {
- if (self.lock) return null;
- self.lock = true;
- }
- return Held{ .mutex = self };
- }
-
- /// Acquire the mutex. Will deadlock if the mutex is already
- /// held by the calling thread.
- pub fn acquire(self: *Mutex) Held {
- return self.tryAcquire() orelse @panic("deadlock detected");
- }
- }
+ Dummy
else if (builtin.os.tag == .windows)
// https://locklessinc.com/articles/keyed_events/
extern union {
@@ -82,6 +40,8 @@ else if (builtin.os.tag == .windows)
const WAKE = 1 << 8;
const WAIT = 1 << 9;
+ pub const Dummy = Dummy;
+
pub fn init() Mutex {
return Mutex{ .waiters = 0 };
}
@@ -166,6 +126,8 @@ else if (builtin.link_libc or builtin.os.tag == .linux)
struct {
state: usize,
+ pub const Dummy = Dummy;
+
/// number of times to spin trying to acquire the lock.
/// https://webkit.org/blog/6161/locking-in-webkit/
const SPIN_COUNT = 40;
@@ -298,6 +260,52 @@ else if (builtin.link_libc or builtin.os.tag == .linux)
else
SpinLock;
+/// This has the sematics as `Mutex`, however it does not actually do any
+/// synchronization. Operations are safety-checked no-ops.
+pub const Dummy = struct {
+ lock: @TypeOf(lock_init),
+
+ const lock_init = if (std.debug.runtime_safety) false else {};
+
+ pub const Held = struct {
+ mutex: *Dummy,
+
+ pub fn release(self: Held) void {
+ if (std.debug.runtime_safety) {
+ self.mutex.lock = false;
+ }
+ }
+ };
+
+ /// Create a new mutex in unlocked state.
+ pub fn init() Dummy {
+ return Dummy{ .lock = lock_init };
+ }
+
+ /// Free a mutex created with init. Calling this while the
+ /// mutex is held is illegal behavior.
+ pub fn deinit(self: *Dummy) void {
+ self.* = undefined;
+ }
+
+ /// Try to acquire the mutex without blocking. Returns null if
+ /// the mutex is unavailable. Otherwise returns Held. Call
+ /// release on Held.
+ pub fn tryAcquire(self: *Dummy) ?Held {
+ if (std.debug.runtime_safety) {
+ if (self.lock) return null;
+ self.lock = true;
+ }
+ return Held{ .mutex = self };
+ }
+
+ /// Acquire the mutex. Will deadlock if the mutex is already
+ /// held by the calling thread.
+ pub fn acquire(self: *Dummy) Held {
+ return self.tryAcquire() orelse @panic("deadlock detected");
+ }
+};
+
const TestContext = struct {
mutex: *Mutex,
data: i128,
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index 301457dde0..49f174333a 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -20,14 +20,15 @@ pub fn main() anyerror!void {
async_frame_buffer = &[_]u8{};
for (test_fn_list) |test_fn, i| {
- std.testing.base_allocator_instance.reset();
+ std.testing.allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
+ defer std.testing.allocator_instance.deinit();
std.testing.log_level = .warn;
var test_node = root_node.start(test_fn.name, null);
test_node.activate();
progress.refresh();
if (progress.terminal == null) {
- std.debug.warn("{}/{} {}...", .{ i + 1, test_fn_list.len, test_fn.name });
+ std.debug.print("{}/{} {}...", .{ i + 1, test_fn_list.len, test_fn.name });
}
const result = if (test_fn.async_frame_size) |size| switch (io_mode) {
.evented => blk: {
@@ -42,24 +43,20 @@ pub fn main() anyerror!void {
skip_count += 1;
test_node.end();
progress.log("{}...SKIP (async test)\n", .{test_fn.name});
- if (progress.terminal == null) std.debug.warn("SKIP (async test)\n", .{});
+ if (progress.terminal == null) std.debug.print("SKIP (async test)\n", .{});
continue;
},
} else test_fn.func();
if (result) |_| {
ok_count += 1;
test_node.end();
- std.testing.allocator_instance.validate() catch |err| switch (err) {
- error.Leak => std.debug.panic("", .{}),
- else => std.debug.panic("error.{}", .{@errorName(err)}),
- };
- if (progress.terminal == null) std.debug.warn("OK\n", .{});
+ if (progress.terminal == null) std.debug.print("OK\n", .{});
} else |err| switch (err) {
error.SkipZigTest => {
skip_count += 1;
test_node.end();
progress.log("{}...SKIP\n", .{test_fn.name});
- if (progress.terminal == null) std.debug.warn("SKIP\n", .{});
+ if (progress.terminal == null) std.debug.print("SKIP\n", .{});
},
else => {
progress.log("", .{});
@@ -69,9 +66,9 @@ pub fn main() anyerror!void {
}
root_node.end();
if (ok_count == test_fn_list.len) {
- std.debug.warn("All {} tests passed.\n", .{ok_count});
+ std.debug.print("All {} tests passed.\n", .{ok_count});
} else {
- std.debug.warn("{} passed; {} skipped.\n", .{ ok_count, skip_count });
+ std.debug.print("{} passed; {} skipped.\n", .{ ok_count, skip_count });
}
}
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index 8f16b50cd2..e5584e42cc 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -1,18 +1,16 @@
const std = @import("std.zig");
-const warn = std.debug.warn;
+const print = std.debug.print;
-pub const LeakCountAllocator = @import("testing/leak_count_allocator.zig").LeakCountAllocator;
pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator;
/// This should only be used in temporary test programs.
pub const allocator = &allocator_instance.allocator;
-pub var allocator_instance = LeakCountAllocator.init(&base_allocator_instance.allocator);
+pub var allocator_instance: std.heap.GeneralPurposeAllocator(.{}) = undefined;
pub const failing_allocator = &failing_allocator_instance.allocator;
pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0);
-pub var base_allocator_instance = std.mem.validationWrap(std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..]));
-var allocator_mem: [2 * 1024 * 1024]u8 = undefined;
+pub var base_allocator_instance = std.heap.FixedBufferAllocator.init("");
/// TODO https://github.com/ziglang/zig/issues/5738
pub var log_level = std.log.Level.warn;
@@ -326,22 +324,22 @@ test "expectEqual vector" {
pub fn expectEqualStrings(expected: []const u8, actual: []const u8) void {
if (std.mem.indexOfDiff(u8, actual, expected)) |diff_index| {
- warn("\n====== expected this output: =========\n", .{});
+ print("\n====== expected this output: =========\n", .{});
printWithVisibleNewlines(expected);
- warn("\n======== instead found this: =========\n", .{});
+ print("\n======== instead found this: =========\n", .{});
printWithVisibleNewlines(actual);
- warn("\n======================================\n", .{});
+ print("\n======================================\n", .{});
var diff_line_number: usize = 1;
for (expected[0..diff_index]) |value| {
if (value == '\n') diff_line_number += 1;
}
- warn("First difference occurs on line {}:\n", .{diff_line_number});
+ print("First difference occurs on line {}:\n", .{diff_line_number});
- warn("expected:\n", .{});
+ print("expected:\n", .{});
printIndicatorLine(expected, diff_index);
- warn("found:\n", .{});
+ print("found:\n", .{});
printIndicatorLine(actual, diff_index);
@panic("test failure");
@@ -362,9 +360,9 @@ fn printIndicatorLine(source: []const u8, indicator_index: usize) void {
{
var i: usize = line_begin_index;
while (i < indicator_index) : (i += 1)
- warn(" ", .{});
+ print(" ", .{});
}
- warn("^\n", .{});
+ print("^\n", .{});
}
fn printWithVisibleNewlines(source: []const u8) void {
@@ -372,15 +370,15 @@ fn printWithVisibleNewlines(source: []const u8) void {
while (std.mem.indexOf(u8, source[i..], "\n")) |nl| : (i += nl + 1) {
printLine(source[i .. i + nl]);
}
- warn("{}␃\n", .{source[i..]}); // End of Text symbol (ETX)
+ print("{}␃\n", .{source[i..]}); // End of Text symbol (ETX)
}
fn printLine(line: []const u8) void {
if (line.len != 0) switch (line[line.len - 1]) {
- ' ', '\t' => warn("{}⏎\n", .{line}), // Carriage return symbol,
+ ' ', '\t' => print("{}⏎\n", .{line}), // Carriage return symbol,
else => {},
};
- warn("{}\n", .{line});
+ print("{}\n", .{line});
}
test "" {
diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig
index ade3e9d85a..7febaaac64 100644
--- a/lib/std/testing/failing_allocator.zig
+++ b/lib/std/testing/failing_allocator.zig
@@ -50,16 +50,22 @@ pub const FailingAllocator = struct {
if (self.index == self.fail_index) {
return error.OutOfMemory;
}
- const result = try self.internal_allocator.callAllocFn(len, ptr_align, len_align);
+ const result = try self.internal_allocator.allocFn(self.internal_allocator, len, ptr_align, len_align);
self.allocated_bytes += result.len;
self.allocations += 1;
self.index += 1;
return result;
}
- fn resize(allocator: *std.mem.Allocator, old_mem: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
+ fn resize(
+ allocator: *std.mem.Allocator,
+ old_mem: []u8,
+ old_align: u29,
+ new_len: usize,
+ len_align: u29,
+ ) error{OutOfMemory}!usize {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
- const r = self.internal_allocator.callResizeFn(old_mem, new_len, len_align) catch |e| {
+ const r = self.internal_allocator.resizeFn(self.internal_allocator, old_mem, old_align, new_len, len_align) catch |e| {
std.debug.assert(new_len > old_mem.len);
return e;
};
diff --git a/lib/std/testing/leak_count_allocator.zig b/lib/std/testing/leak_count_allocator.zig
deleted file mode 100644
index 87564aeea7..0000000000
--- a/lib/std/testing/leak_count_allocator.zig
+++ /dev/null
@@ -1,51 +0,0 @@
-const std = @import("../std.zig");
-
-/// This allocator is used in front of another allocator and counts the numbers of allocs and frees.
-/// The test runner asserts every alloc has a corresponding free at the end of each test.
-///
-/// The detection algorithm is incredibly primitive and only accounts for number of calls.
-/// This should be replaced by the general purpose debug allocator.
-pub const LeakCountAllocator = struct {
- count: usize,
- allocator: std.mem.Allocator,
- internal_allocator: *std.mem.Allocator,
-
- pub fn init(allocator: *std.mem.Allocator) LeakCountAllocator {
- return .{
- .count = 0,
- .allocator = .{
- .allocFn = alloc,
- .resizeFn = resize,
- },
- .internal_allocator = allocator,
- };
- }
-
- fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
- const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator);
- const ptr = try self.internal_allocator.callAllocFn(len, ptr_align, len_align);
- self.count += 1;
- return ptr;
- }
-
- fn resize(allocator: *std.mem.Allocator, old_mem: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize {
- const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator);
- if (new_size == 0) {
- if (self.count == 0) {
- std.debug.panic("error - too many calls to free, most likely double free", .{});
- }
- self.count -= 1;
- }
- return self.internal_allocator.callResizeFn(old_mem, new_size, len_align) catch |e| {
- std.debug.assert(new_size > old_mem.len);
- return e;
- };
- }
-
- pub fn validate(self: LeakCountAllocator) !void {
- if (self.count > 0) {
- std.debug.warn("error - detected leaked allocations without matching free: {}\n", .{self.count});
- return error.Leak;
- }
- }
-};
diff --git a/src/ir.cpp b/src/ir.cpp
index 3aadc2557e..8934a20545 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -25067,12 +25067,12 @@ static PtrLen size_enum_index_to_ptr_len(BuiltinPtrSize size_enum_index) {
zig_unreachable();
}
-static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, ZigType *ptr_type_entry) {
- Error err;
+static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, IrInst *source_instr, ZigType *ptr_type_entry) {
ZigType *attrs_type;
BuiltinPtrSize size_enum_index;
if (is_slice(ptr_type_entry)) {
- attrs_type = ptr_type_entry->data.structure.fields[slice_ptr_index]->type_entry;
+ TypeStructField *ptr_field = ptr_type_entry->data.structure.fields[slice_ptr_index];
+ attrs_type = resolve_struct_field_type(ira->codegen, ptr_field);
size_enum_index = BuiltinPtrSizeSlice;
} else if (ptr_type_entry->id == ZigTypeIdPointer) {
attrs_type = ptr_type_entry;
@@ -25081,9 +25081,6 @@ static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, ZigType *ptr_type_ent
zig_unreachable();
}
- if ((err = type_resolve(ira->codegen, attrs_type->data.pointer.child_type, ResolveStatusSizeKnown)))
- return nullptr;
-
ZigType *type_info_pointer_type = ir_type_info_get_type(ira, "Pointer", nullptr);
assertNoError(type_resolve(ira->codegen, type_info_pointer_type, ResolveStatusSizeKnown));
@@ -25114,9 +25111,18 @@ static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, ZigType *ptr_type_ent
fields[2]->data.x_bool = attrs_type->data.pointer.is_volatile;
// alignment: u32
ensure_field_index(result->type, "alignment", 3);
- fields[3]->special = ConstValSpecialStatic;
fields[3]->type = ira->codegen->builtin_types.entry_num_lit_int;
- bigint_init_unsigned(&fields[3]->data.x_bigint, get_ptr_align(ira->codegen, attrs_type));
+ if (attrs_type->data.pointer.explicit_alignment != 0) {
+ fields[3]->special = ConstValSpecialStatic;
+ bigint_init_unsigned(&fields[3]->data.x_bigint, attrs_type->data.pointer.explicit_alignment);
+ } else {
+ LazyValueAlignOf *lazy_align_of = heap::c_allocator.create();
+ lazy_align_of->ira = ira; ira_ref(ira);
+ fields[3]->special = ConstValSpecialLazy;
+ fields[3]->data.x_lazy = &lazy_align_of->base;
+ lazy_align_of->base.id = LazyValueIdAlignOf;
+ lazy_align_of->target_type = ir_const_type(ira, source_instr, attrs_type->data.pointer.child_type);
+ }
// child: type
ensure_field_index(result->type, "child", 4);
fields[4]->special = ConstValSpecialStatic;
@@ -25130,7 +25136,7 @@ static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, ZigType *ptr_type_ent
// sentinel: anytype
ensure_field_index(result->type, "sentinel", 6);
fields[6]->special = ConstValSpecialStatic;
- if (attrs_type->data.pointer.child_type->id != ZigTypeIdOpaque) {
+ if (attrs_type->data.pointer.sentinel != nullptr) {
fields[6]->type = get_optional_type(ira->codegen, attrs_type->data.pointer.child_type);
set_optional_payload(fields[6], attrs_type->data.pointer.sentinel);
} else {
@@ -25165,9 +25171,6 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
assert(type_entry != nullptr);
assert(!type_is_invalid(type_entry));
- if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
- return err;
-
auto entry = ira->codegen->type_info_cache.maybe_get(type_entry);
if (entry != nullptr) {
*out = entry->value;
@@ -25231,7 +25234,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
}
case ZigTypeIdPointer:
{
- result = create_ptr_like_type_info(ira, type_entry);
+ result = create_ptr_like_type_info(ira, source_instr, type_entry);
if (result == nullptr)
return ErrorSemanticAnalyzeFail;
break;
@@ -25317,6 +25320,9 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
}
case ZigTypeIdEnum:
{
+ if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
+ return err;
+
result = ira->codegen->pass1_arena->create();
result->special = ConstValSpecialStatic;
result->type = ir_type_info_get_type(ira, "Enum", nullptr);
@@ -25455,6 +25461,9 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
}
case ZigTypeIdUnion:
{
+ if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
+ return err;
+
result = ira->codegen->pass1_arena->create();
result->special = ConstValSpecialStatic;
result->type = ir_type_info_get_type(ira, "Union", nullptr);
@@ -25545,12 +25554,15 @@ static Error ir_make_type_info_value(IrAnalyze *ira, IrInst* source_instr, ZigTy
case ZigTypeIdStruct:
{
if (type_entry->data.structure.special == StructSpecialSlice) {
- result = create_ptr_like_type_info(ira, type_entry);
+ result = create_ptr_like_type_info(ira, source_instr, type_entry);
if (result == nullptr)
return ErrorSemanticAnalyzeFail;
break;
}
+ if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
+ return err;
+
result = ira->codegen->pass1_arena->create();
result->special = ConstValSpecialStatic;
result->type = ir_type_info_get_type(ira, "Struct", nullptr);
From 0347df82e8c821906ef0d07ec65fe4b3884c0212 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 7 Aug 2020 23:26:58 -0700
Subject: [PATCH 017/153] improvements & fixes for general purpose allocator
integration
* std.Mutex API is improved to not have init() deinit(). This API is
designed to support static initialization and does not require any
resource cleanup. This also happens to work around some kind of
stage1 behavior that wasn't letting the new allocator mutex code
get compiled.
* the general purpose allocator now returns a bool from deinit()
which tells if there were any leaks. This value is used by the test
runner to fail the tests if there are any.
* self-hosted compiler is updated to use the general purpose allocator
when not linking against libc.
---
lib/std/atomic/queue.zig | 2 +-
lib/std/debug.zig | 4 +-
lib/std/heap.zig | 8 +-
lib/std/heap/general_purpose_allocator.zig | 45 +++--
lib/std/mutex.zig | 204 +++++++++------------
lib/std/once.zig | 2 +-
lib/std/special/test_runner.zig | 11 +-
lib/std/std.zig | 3 +-
src-self-hosted/main.zig | 5 +-
src-self-hosted/test.zig | 3 -
10 files changed, 142 insertions(+), 145 deletions(-)
diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig
index 880af37ef4..4df7522f29 100644
--- a/lib/std/atomic/queue.zig
+++ b/lib/std/atomic/queue.zig
@@ -22,7 +22,7 @@ pub fn Queue(comptime T: type) type {
return Self{
.head = null,
.tail = null,
- .mutex = std.Mutex.init(),
+ .mutex = std.Mutex{},
};
}
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 982bada939..fb1c3a3a88 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -47,7 +47,7 @@ pub const LineInfo = struct {
}
};
-var stderr_mutex = std.Mutex.init();
+var stderr_mutex = std.Mutex{};
/// Deprecated. Use `std.log` functions for logging or `std.debug.print` for
/// "printf debugging".
@@ -232,7 +232,7 @@ pub fn panic(comptime format: []const u8, args: anytype) noreturn {
var panicking: u8 = 0;
// Locked to avoid interleaving panic messages from multiple threads.
-var panic_mutex = std.Mutex.init();
+var panic_mutex = std.Mutex{};
/// Counts how many times the panic handler is invoked by this thread.
/// This is used to catch and handle panics triggered by the panic handler.
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index 30ffa57eed..2e881dff94 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -464,7 +464,13 @@ pub const HeapAllocator = switch (builtin.os.tag) {
return buf;
}
- fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize {
+ fn resize(
+ allocator: *Allocator,
+ buf: []u8,
+ buf_align: u29,
+ new_size: usize,
+ len_align: u29,
+ ) error{OutOfMemory}!usize {
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
if (new_size == 0) {
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 207857708e..6ed9980a4f 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -140,7 +140,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const total_requested_bytes_init = if (config.enable_memory_limit) @as(usize, 0) else {};
const requested_memory_limit_init = if (config.enable_memory_limit) @as(usize, math.maxInt(usize)) else {};
- const mutex_init = if (config.thread_safe) std.Mutex.init() else std.Mutex.Dummy.init();
+ const mutex_init = if (config.thread_safe) std.Mutex{} else std.mutex.Dummy{};
const stack_n = config.stack_trace_frames;
const one_trace_size = @sizeOf(usize) * stack_n;
@@ -250,7 +250,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
bucket: *BucketHeader,
size_class: usize,
used_bits_count: usize,
- ) void {
+ ) bool {
+ var leaks = false;
var used_bits_byte: usize = 0;
while (used_bits_byte < used_bits_count) : (used_bits_byte += 1) {
const used_byte = bucket.usedBits(used_bits_byte).*;
@@ -268,22 +269,26 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
.alloc,
);
std.debug.dumpStackTrace(stack_trace);
+ leaks = true;
}
if (bit_index == math.maxInt(u3))
break;
}
}
}
+ return leaks;
}
- pub fn deinit(self: *Self) void {
+ /// Returns whether there were leaks.
+ pub fn deinit(self: *Self) bool {
+ var leaks = false;
for (self.buckets) |optional_bucket, bucket_i| {
const first_bucket = optional_bucket orelse continue;
const size_class = @as(usize, 1) << @intCast(u6, bucket_i);
const used_bits_count = usedBitsCount(size_class);
var bucket = first_bucket;
while (true) {
- detectLeaksInBucket(bucket, size_class, used_bits_count);
+ leaks = detectLeaksInBucket(bucket, size_class, used_bits_count) or leaks;
bucket = bucket.next;
if (bucket == first_bucket)
break;
@@ -292,9 +297,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
for (self.large_allocations.items()) |*large_alloc| {
std.debug.print("\nMemory leak detected:\n", .{});
large_alloc.value.dumpStackTrace();
+ leaks = true;
}
self.large_allocations.deinit(self.backing_allocator);
self.* = undefined;
+ return leaks;
}
fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
@@ -600,7 +607,7 @@ const test_config = Config{};
test "small allocations - free in same order" {
var gpda = GeneralPurposeAllocator(test_config){};
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
var list = std.ArrayList(*u64).init(std.testing.allocator);
@@ -619,7 +626,7 @@ test "small allocations - free in same order" {
test "small allocations - free in reverse order" {
var gpda = GeneralPurposeAllocator(test_config){};
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
var list = std.ArrayList(*u64).init(std.testing.allocator);
@@ -638,7 +645,7 @@ test "small allocations - free in reverse order" {
test "large allocations" {
var gpda = GeneralPurposeAllocator(test_config){};
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
const ptr1 = try allocator.alloc(u64, 42768);
@@ -651,7 +658,7 @@ test "large allocations" {
test "realloc" {
var gpda = GeneralPurposeAllocator(test_config){};
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
@@ -673,7 +680,7 @@ test "realloc" {
test "shrink" {
var gpda = GeneralPurposeAllocator(test_config){};
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, 20);
@@ -696,7 +703,7 @@ test "shrink" {
test "large object - grow" {
var gpda = GeneralPurposeAllocator(test_config){};
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
@@ -714,7 +721,7 @@ test "large object - grow" {
test "realloc small object to large object" {
var gpda = GeneralPurposeAllocator(test_config){};
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, 70);
@@ -731,7 +738,7 @@ test "realloc small object to large object" {
test "shrink large object to large object" {
var gpda = GeneralPurposeAllocator(test_config){};
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, page_size * 2 + 50);
@@ -754,7 +761,7 @@ test "shrink large object to large object" {
test "shrink large object to large object with larger alignment" {
var gpda = GeneralPurposeAllocator(test_config){};
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
var debug_buffer: [1000]u8 = undefined;
@@ -782,7 +789,7 @@ test "shrink large object to large object with larger alignment" {
test "realloc large object to small object" {
var gpda = GeneralPurposeAllocator(test_config){};
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, page_size * 2 + 50);
@@ -797,7 +804,7 @@ test "realloc large object to small object" {
test "non-page-allocator backing allocator" {
var gpda = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator };
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
const ptr = try allocator.create(i32);
@@ -806,7 +813,7 @@ test "non-page-allocator backing allocator" {
test "realloc large object to larger alignment" {
var gpda = GeneralPurposeAllocator(test_config){};
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
var debug_buffer: [1000]u8 = undefined;
@@ -866,7 +873,7 @@ test "isAligned works" {
test "large object shrinks to small but allocation fails during shrink" {
var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, 3);
var gpda = GeneralPurposeAllocator(.{}){ .backing_allocator = &failing_allocator.allocator };
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, page_size * 2 + 50);
@@ -883,7 +890,7 @@ test "large object shrinks to small but allocation fails during shrink" {
test "objects of size 1024 and 2048" {
var gpda = GeneralPurposeAllocator(test_config){};
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
const slice = try allocator.alloc(u8, 1025);
@@ -895,7 +902,7 @@ test "objects of size 1024 and 2048" {
test "setting a memory cap" {
var gpda = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
- defer gpda.deinit();
+ defer std.testing.expect(!gpda.deinit());
const allocator = &gpda.allocator;
gpda.setRequestedMemoryLimit(1010);
diff --git a/lib/std/mutex.zig b/lib/std/mutex.zig
index 71fac4758c..3361a42e21 100644
--- a/lib/std/mutex.zig
+++ b/lib/std/mutex.zig
@@ -15,8 +15,7 @@ const ResetEvent = std.ResetEvent;
/// deadlock detection.
///
/// Example usage:
-/// var m = Mutex.init();
-/// defer m.deinit();
+/// var m = Mutex{};
///
/// const lock = m.acquire();
/// defer lock.release();
@@ -32,101 +31,11 @@ const ResetEvent = std.ResetEvent;
pub const Mutex = if (builtin.single_threaded)
Dummy
else if (builtin.os.tag == .windows)
-// https://locklessinc.com/articles/keyed_events/
- extern union {
- locked: u8,
- waiters: u32,
-
- const WAKE = 1 << 8;
- const WAIT = 1 << 9;
-
- pub const Dummy = Dummy;
-
- pub fn init() Mutex {
- return Mutex{ .waiters = 0 };
- }
-
- pub fn deinit(self: *Mutex) void {
- self.* = undefined;
- }
-
- pub fn tryAcquire(self: *Mutex) ?Held {
- if (@atomicRmw(u8, &self.locked, .Xchg, 1, .Acquire) != 0)
- return null;
- return Held{ .mutex = self };
- }
-
- pub fn acquire(self: *Mutex) Held {
- return self.tryAcquire() orelse self.acquireSlow();
- }
-
- fn acquireSpinning(self: *Mutex) Held {
- @setCold(true);
- while (true) : (SpinLock.yield()) {
- return self.tryAcquire() orelse continue;
- }
- }
-
- fn acquireSlow(self: *Mutex) Held {
- // try to use NT keyed events for blocking, falling back to spinlock if unavailable
- @setCold(true);
- const handle = ResetEvent.OsEvent.Futex.getEventHandle() orelse return self.acquireSpinning();
- const key = @ptrCast(*const c_void, &self.waiters);
-
- while (true) : (SpinLock.loopHint(1)) {
- const waiters = @atomicLoad(u32, &self.waiters, .Monotonic);
-
- // try and take lock if unlocked
- if ((waiters & 1) == 0) {
- if (@atomicRmw(u8, &self.locked, .Xchg, 1, .Acquire) == 0) {
- return Held{ .mutex = self };
- }
-
- // otherwise, try and update the waiting count.
- // then unset the WAKE bit so that another unlocker can wake up a thread.
- } else if (@cmpxchgWeak(u32, &self.waiters, waiters, (waiters + WAIT) | 1, .Monotonic, .Monotonic) == null) {
- const rc = windows.ntdll.NtWaitForKeyedEvent(handle, key, windows.FALSE, null);
- assert(rc == .SUCCESS);
- _ = @atomicRmw(u32, &self.waiters, .Sub, WAKE, .Monotonic);
- }
- }
- }
-
- pub const Held = struct {
- mutex: *Mutex,
-
- pub fn release(self: Held) void {
- // unlock without a rmw/cmpxchg instruction
- @atomicStore(u8, @ptrCast(*u8, &self.mutex.locked), 0, .Release);
- const handle = ResetEvent.OsEvent.Futex.getEventHandle() orelse return;
- const key = @ptrCast(*const c_void, &self.mutex.waiters);
-
- while (true) : (SpinLock.loopHint(1)) {
- const waiters = @atomicLoad(u32, &self.mutex.waiters, .Monotonic);
-
- // no one is waiting
- if (waiters < WAIT) return;
- // someone grabbed the lock and will do the wake instead
- if (waiters & 1 != 0) return;
- // someone else is currently waking up
- if (waiters & WAKE != 0) return;
-
- // try to decrease the waiter count & set the WAKE bit meaning a thread is waking up
- if (@cmpxchgWeak(u32, &self.mutex.waiters, waiters, waiters - WAIT + WAKE, .Release, .Monotonic) == null) {
- const rc = windows.ntdll.NtReleaseKeyedEvent(handle, key, windows.FALSE, null);
- assert(rc == .SUCCESS);
- return;
- }
- }
- }
- };
- }
+ WindowsMutex
else if (builtin.link_libc or builtin.os.tag == .linux)
// stack-based version of https://github.com/Amanieu/parking_lot/blob/master/core/src/word_lock.rs
struct {
- state: usize,
-
- pub const Dummy = Dummy;
+ state: usize = 0,
/// number of times to spin trying to acquire the lock.
/// https://webkit.org/blog/6161/locking-in-webkit/
@@ -141,14 +50,6 @@ else if (builtin.link_libc or builtin.os.tag == .linux)
event: ResetEvent,
};
- pub fn init() Mutex {
- return Mutex{ .state = 0 };
- }
-
- pub fn deinit(self: *Mutex) void {
- self.* = undefined;
- }
-
pub fn tryAcquire(self: *Mutex) ?Held {
if (@cmpxchgWeak(usize, &self.state, 0, MUTEX_LOCK, .Acquire, .Monotonic) != null)
return null;
@@ -263,7 +164,7 @@ else
/// This has the sematics as `Mutex`, however it does not actually do any
/// synchronization. Operations are safety-checked no-ops.
pub const Dummy = struct {
- lock: @TypeOf(lock_init),
+ lock: @TypeOf(lock_init) = lock_init,
const lock_init = if (std.debug.runtime_safety) false else {};
@@ -278,15 +179,7 @@ pub const Dummy = struct {
};
/// Create a new mutex in unlocked state.
- pub fn init() Dummy {
- return Dummy{ .lock = lock_init };
- }
-
- /// Free a mutex created with init. Calling this while the
- /// mutex is held is illegal behavior.
- pub fn deinit(self: *Dummy) void {
- self.* = undefined;
- }
+ pub const init = Dummy{};
/// Try to acquire the mutex without blocking. Returns null if
/// the mutex is unavailable. Otherwise returns Held. Call
@@ -306,6 +199,90 @@ pub const Dummy = struct {
}
};
+// https://locklessinc.com/articles/keyed_events/
+const WindowsMutex = struct {
+ state: State = State{ .waiters = 0 },
+
+ const State = extern union {
+ locked: u8,
+ waiters: u32,
+ };
+
+ const WAKE = 1 << 8;
+ const WAIT = 1 << 9;
+
+ pub fn tryAcquire(self: *WindowsMutex) ?Held {
+ if (@atomicRmw(u8, &self.state.locked, .Xchg, 1, .Acquire) != 0)
+ return null;
+ return Held{ .mutex = self };
+ }
+
+ pub fn acquire(self: *WindowsMutex) Held {
+ return self.tryAcquire() orelse self.acquireSlow();
+ }
+
+ fn acquireSpinning(self: *WindowsMutex) Held {
+ @setCold(true);
+ while (true) : (SpinLock.yield()) {
+ return self.tryAcquire() orelse continue;
+ }
+ }
+
+ fn acquireSlow(self: *WindowsMutex) Held {
+ // try to use NT keyed events for blocking, falling back to spinlock if unavailable
+ @setCold(true);
+ const handle = ResetEvent.OsEvent.Futex.getEventHandle() orelse return self.acquireSpinning();
+ const key = @ptrCast(*const c_void, &self.state.waiters);
+
+ while (true) : (SpinLock.loopHint(1)) {
+ const waiters = @atomicLoad(u32, &self.state.waiters, .Monotonic);
+
+ // try and take lock if unlocked
+ if ((waiters & 1) == 0) {
+ if (@atomicRmw(u8, &self.state.locked, .Xchg, 1, .Acquire) == 0) {
+ return Held{ .mutex = self };
+ }
+
+ // otherwise, try and update the waiting count.
+ // then unset the WAKE bit so that another unlocker can wake up a thread.
+ } else if (@cmpxchgWeak(u32, &self.state.waiters, waiters, (waiters + WAIT) | 1, .Monotonic, .Monotonic) == null) {
+ const rc = windows.ntdll.NtWaitForKeyedEvent(handle, key, windows.FALSE, null);
+ assert(rc == .SUCCESS);
+ _ = @atomicRmw(u32, &self.state.waiters, .Sub, WAKE, .Monotonic);
+ }
+ }
+ }
+
+ pub const Held = struct {
+ mutex: *WindowsMutex,
+
+ pub fn release(self: Held) void {
+ // unlock without a rmw/cmpxchg instruction
+ @atomicStore(u8, @ptrCast(*u8, &self.mutex.state.locked), 0, .Release);
+ const handle = ResetEvent.OsEvent.Futex.getEventHandle() orelse return;
+ const key = @ptrCast(*const c_void, &self.mutex.state.waiters);
+
+ while (true) : (SpinLock.loopHint(1)) {
+ const waiters = @atomicLoad(u32, &self.mutex.state.waiters, .Monotonic);
+
+ // no one is waiting
+ if (waiters < WAIT) return;
+ // someone grabbed the lock and will do the wake instead
+ if (waiters & 1 != 0) return;
+ // someone else is currently waking up
+ if (waiters & WAKE != 0) return;
+
+ // try to decrease the waiter count & set the WAKE bit meaning a thread is waking up
+ if (@cmpxchgWeak(u32, &self.mutex.state.waiters, waiters, waiters - WAIT + WAKE, .Release, .Monotonic) == null) {
+ const rc = windows.ntdll.NtReleaseKeyedEvent(handle, key, windows.FALSE, null);
+ assert(rc == .SUCCESS);
+ return;
+ }
+ }
+ }
+ };
+};
+
const TestContext = struct {
mutex: *Mutex,
data: i128,
@@ -314,8 +291,7 @@ const TestContext = struct {
};
test "std.Mutex" {
- var mutex = Mutex.init();
- defer mutex.deinit();
+ var mutex = Mutex{};
var context = TestContext{
.mutex = &mutex,
diff --git a/lib/std/once.zig b/lib/std/once.zig
index 2a1a02f17d..cf42d021a2 100644
--- a/lib/std/once.zig
+++ b/lib/std/once.zig
@@ -10,7 +10,7 @@ pub fn once(comptime f: fn () void) Once(f) {
pub fn Once(comptime f: fn () void) type {
return struct {
done: bool = false,
- mutex: std.Mutex = std.Mutex.init(),
+ mutex: std.Mutex = std.Mutex{},
/// Call the function `f`.
/// If `call` is invoked multiple times `f` will be executed only the
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index 49f174333a..384cd7f572 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -19,9 +19,14 @@ pub fn main() anyerror!void {
// ignores the alignment of the slice.
async_frame_buffer = &[_]u8{};
+ var leaks: usize = 0;
for (test_fn_list) |test_fn, i| {
std.testing.allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
- defer std.testing.allocator_instance.deinit();
+ defer {
+ if (std.testing.allocator_instance.deinit()) {
+ leaks += 1;
+ }
+ }
std.testing.log_level = .warn;
var test_node = root_node.start(test_fn.name, null);
@@ -70,6 +75,10 @@ pub fn main() anyerror!void {
} else {
std.debug.print("{} passed; {} skipped.\n", .{ ok_count, skip_count });
}
+ if (leaks != 0) {
+ std.debug.print("{} tests leaked memory\n", .{ok_count});
+ std.process.exit(1);
+ }
}
pub fn log(
diff --git a/lib/std/std.zig b/lib/std/std.zig
index 50aaef6f11..940fc498d6 100644
--- a/lib/std/std.zig
+++ b/lib/std/std.zig
@@ -13,7 +13,8 @@ pub const ComptimeStringMap = @import("comptime_string_map.zig").ComptimeStringM
pub const DynLib = @import("dynamic_library.zig").DynLib;
pub const HashMap = hash_map.HashMap;
pub const HashMapUnmanaged = hash_map.HashMapUnmanaged;
-pub const Mutex = @import("mutex.zig").Mutex;
+pub const mutex = @import("mutex.zig");
+pub const Mutex = mutex.Mutex;
pub const PackedIntArray = @import("packed_int_array.zig").PackedIntArray;
pub const PackedIntArrayEndian = @import("packed_int_array.zig").PackedIntArrayEndian;
pub const PackedIntSlice = @import("packed_int_array.zig").PackedIntSlice;
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 04ebee457c..5e2120f41a 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -60,9 +60,10 @@ pub fn log(
std.debug.print(prefix ++ format, args);
}
+var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
+
pub fn main() !void {
- // TODO general purpose allocator in the zig std lib
- const gpa = if (std.builtin.link_libc) std.heap.c_allocator else std.heap.page_allocator;
+ const gpa = if (std.builtin.link_libc) std.heap.c_allocator else &general_purpose_allocator.allocator;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = &arena_instance.allocator;
diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig
index 6f21b2f49c..7d4cc7d563 100644
--- a/src-self-hosted/test.zig
+++ b/src-self-hosted/test.zig
@@ -407,8 +407,6 @@ pub const TestContext = struct {
defer root_node.end();
for (self.cases.items) |case| {
- std.testing.base_allocator_instance.reset();
-
var prg_node = root_node.start(case.name, case.updates.items.len);
prg_node.activate();
defer prg_node.end();
@@ -419,7 +417,6 @@ pub const TestContext = struct {
progress.refresh_rate_ns = 0;
try self.runOneCase(std.testing.allocator, &prg_node, case);
- try std.testing.allocator_instance.validate();
}
}
From cd6cdd0a752ba3e134ba371246cc02b1d7faaa88 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 8 Aug 2020 00:34:13 -0700
Subject: [PATCH 018/153] std.mem.Allocator: add return_address to the
interface
The high level Allocator interface API functions will now do a
`@returnAddress()` so that stack traces captured by allocator
implementations have a return address that does not include the
Allocator overhead functions. This makes `4` a more reasonable default
for how many stack frames to capture.
---
lib/std/heap.zig | 67 +++++++--
lib/std/heap/arena_allocator.zig | 4 +-
lib/std/heap/general_purpose_allocator.zig | 39 ++---
lib/std/heap/logging_allocator.zig | 17 ++-
lib/std/mem.zig | 15 +-
lib/std/mem/Allocator.zig | 166 +++++++++++++++------
lib/std/testing/failing_allocator.zig | 13 +-
7 files changed, 230 insertions(+), 91 deletions(-)
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index 2e881dff94..88f27fad49 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -100,7 +100,7 @@ pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
const PageAllocator = struct {
- fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
assert(n > 0);
const aligned_len = mem.alignForward(n, mem.page_size);
@@ -196,7 +196,14 @@ const PageAllocator = struct {
return result_ptr[0..alignPageAllocLen(aligned_len, n, len_align)];
}
- fn resize(allocator: *Allocator, buf_unaligned: []u8, buf_align: u29, new_size: usize, len_align: u29) Allocator.Error!usize {
+ fn resize(
+ allocator: *Allocator,
+ buf_unaligned: []u8,
+ buf_align: u29,
+ new_size: usize,
+ len_align: u29,
+ return_address: usize,
+ ) Allocator.Error!usize {
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
if (builtin.os.tag == .windows) {
@@ -344,7 +351,7 @@ const WasmPageAllocator = struct {
return mem.alignForward(memsize, mem.page_size) / mem.page_size;
}
- fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29) error{OutOfMemory}![]u8 {
+ fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
const page_count = nPages(len);
const page_idx = try allocPages(page_count, alignment);
return @intToPtr([*]u8, page_idx * mem.page_size)[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)];
@@ -397,7 +404,14 @@ const WasmPageAllocator = struct {
}
}
- fn resize(allocator: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!usize {
+ fn resize(
+ allocator: *Allocator,
+ buf: []u8,
+ buf_align: u29,
+ new_len: usize,
+ len_align: u29,
+ return_address: usize,
+ ) error{OutOfMemory}!usize {
const aligned_len = mem.alignForward(buf.len, mem.page_size);
if (new_len > aligned_len) return error.OutOfMemory;
const current_n = nPages(aligned_len);
@@ -437,7 +451,13 @@ pub const HeapAllocator = switch (builtin.os.tag) {
return @intToPtr(*align(1) usize, @ptrToInt(buf.ptr) + buf.len);
}
- fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
+ fn alloc(
+ allocator: *Allocator,
+ n: usize,
+ ptr_align: u29,
+ len_align: u29,
+ return_address: usize,
+ ) error{OutOfMemory}![]u8 {
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
const amt = n + ptr_align - 1 + @sizeOf(usize);
@@ -470,6 +490,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
buf_align: u29,
new_size: usize,
len_align: u29,
+ return_address: usize,
) error{OutOfMemory}!usize {
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
if (new_size == 0) {
@@ -542,7 +563,7 @@ pub const FixedBufferAllocator = struct {
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
}
- fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const aligned_addr = mem.alignForward(@ptrToInt(self.buffer.ptr) + self.end_index, ptr_align);
const adjusted_index = aligned_addr - @ptrToInt(self.buffer.ptr);
@@ -556,7 +577,14 @@ pub const FixedBufferAllocator = struct {
return result;
}
- fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_size: usize, len_align: u29) Allocator.Error!usize {
+ fn resize(
+ allocator: *Allocator,
+ buf: []u8,
+ buf_align: u29,
+ new_size: usize,
+ len_align: u29,
+ return_address: usize,
+ ) Allocator.Error!usize {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
assert(self.ownsSlice(buf)); // sanity check
@@ -606,7 +634,7 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
};
}
- fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
while (true) {
@@ -654,18 +682,31 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
return &self.allocator;
}
- fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![*]u8 {
+ fn alloc(
+ allocator: *Allocator,
+ len: usize,
+ ptr_align: u29,
+ len_align: u29,
+ return_address: usize,
+ ) error{OutOfMemory}![*]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align) catch
return fallback_allocator.alloc(len, ptr_align);
}
- fn resize(self: *Allocator, buf: []u8, new_len: usize, len_align: u29) error{OutOfMemory}!void {
+ fn resize(
+ self: *Allocator,
+ buf: []u8,
+ buf_align: u29,
+ new_len: usize,
+ len_align: u29,
+ return_address: usize,
+ ) error{OutOfMemory}!void {
const self = @fieldParentPtr(Self, "allocator", allocator);
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
- try self.fixed_buffer_allocator.callResizeFn(buf, new_len);
+ try self.fixed_buffer_allocator.resize(buf, new_len);
} else {
- try self.fallback_allocator.callResizeFn(buf, new_len);
+ try self.fallback_allocator.resize(buf, new_len);
}
}
};
@@ -950,7 +991,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) mem.Allocator.
slice[60] = 0x34;
// realloc to a smaller size but with a larger alignment
- slice = try allocator.alignedRealloc(slice, mem.page_size * 32, alloc_size / 2);
+ slice = try allocator.reallocAdvanced(slice, mem.page_size * 32, alloc_size / 2, .exact);
testing.expect(slice[0] == 0x12);
testing.expect(slice[60] == 0x34);
}
diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig
index 4a833bcb28..191f0c19e9 100644
--- a/lib/std/heap/arena_allocator.zig
+++ b/lib/std/heap/arena_allocator.zig
@@ -49,7 +49,7 @@ pub const ArenaAllocator = struct {
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
const big_enough_len = prev_len + actual_min_size;
const len = big_enough_len + big_enough_len / 2;
- const buf = try self.child_allocator.allocFn(self.child_allocator, len, @alignOf(BufNode), 1);
+ const buf = try self.child_allocator.allocFn(self.child_allocator, len, @alignOf(BufNode), 1, @returnAddress());
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
buf_node.* = BufNode{
.data = buf,
@@ -60,7 +60,7 @@ pub const ArenaAllocator = struct {
return buf_node;
}
- fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 6ed9980a4f..9abf9cf253 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -104,7 +104,7 @@ const SlotIndex = std.meta.Int(false, math.log2(page_size) + 1);
pub const Config = struct {
/// Number of stack frames to capture.
- stack_trace_frames: usize = if (std.debug.runtime_safety) @as(usize, 6) else @as(usize, 0),
+ stack_trace_frames: usize = if (std.debug.runtime_safety) @as(usize, 4) else @as(usize, 0),
/// If true, the allocator will have two fields:
/// * `total_requested_bytes` which tracks the total allocated bytes of memory requested.
@@ -199,7 +199,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
fn captureStackTrace(
bucket: *BucketHeader,
- return_address: usize,
+ ret_addr: usize,
size_class: usize,
slot_index: SlotIndex,
trace_kind: TraceKind,
@@ -207,7 +207,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
// Initialize them to 0. When determining the count we must look
// for non zero addresses.
const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind);
- collectStackTrace(return_address, stack_addresses);
+ collectStackTrace(ret_addr, stack_addresses);
}
};
@@ -284,7 +284,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
var leaks = false;
for (self.buckets) |optional_bucket, bucket_i| {
const first_bucket = optional_bucket orelse continue;
- const size_class = @as(usize, 1) << @intCast(u6, bucket_i);
+ const size_class = @as(usize, 1) << @intCast(math.Log2Int(usize), bucket_i);
const used_bits_count = usedBitsCount(size_class);
var bucket = first_bucket;
while (true) {
@@ -377,7 +377,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
trace_addr: usize,
) void {
// Capture stack trace to be the "first free", in case a double free happens.
- bucket.captureStackTrace(@returnAddress(), size_class, slot_index, .free);
+ bucket.captureStackTrace(trace_addr, size_class, slot_index, .free);
used_byte.* &= ~(@as(u8, 1) << used_bit_index);
bucket.used_count -= 1;
@@ -408,7 +408,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
old_align: u29,
new_size: usize,
len_align: u29,
- return_addr: usize,
+ ret_addr: usize,
) Error!usize {
const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse {
if (config.safety) {
@@ -428,7 +428,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
@panic("\nFree here:");
}
- const result_len = try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align);
+ const result_len = try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr);
if (result_len == 0) {
self.large_allocations.removeAssertDiscard(@ptrToInt(old_mem.ptr));
@@ -436,7 +436,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
entry.value.bytes = old_mem.ptr[0..result_len];
- collectStackTrace(return_addr, &entry.value.stack_addresses);
+ collectStackTrace(ret_addr, &entry.value.stack_addresses);
return result_len;
}
@@ -450,6 +450,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
old_align: u29,
new_size: usize,
len_align: u29,
+ ret_addr: usize,
) Error!usize {
const self = @fieldParentPtr(Self, "allocator", allocator);
@@ -472,7 +473,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const aligned_size = math.max(old_mem.len, old_align);
if (aligned_size > largest_bucket_object_size) {
- return self.resizeLarge(old_mem, old_align, new_size, len_align, @returnAddress());
+ return self.resizeLarge(old_mem, old_align, new_size, len_align, ret_addr);
}
const size_class_hint = up_to_nearest_power_of_2(usize, aligned_size);
@@ -484,7 +485,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
size_class *= 2;
} else {
- return self.resizeLarge(old_mem, old_align, new_size, len_align, @returnAddress());
+ return self.resizeLarge(old_mem, old_align, new_size, len_align, ret_addr);
};
const byte_offset = @ptrToInt(old_mem.ptr) - @ptrToInt(bucket.page);
const slot_index = @intCast(SlotIndex, byte_offset / size_class);
@@ -507,7 +508,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
}
if (new_size == 0) {
- self.freeSlot(bucket, bucket_index, size_class, slot_index, used_byte, used_bit_index, @returnAddress());
+ self.freeSlot(bucket, bucket_index, size_class, slot_index, used_byte, used_bit_index, ret_addr);
return @as(usize, 0);
}
const new_aligned_size = math.max(new_size, old_align);
@@ -518,7 +519,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return error.OutOfMemory;
}
- fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8 {
+ fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
const held = self.mutex.acquire();
@@ -543,17 +544,17 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
self.large_allocations.entries.items.len + 1,
);
- const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align);
+ const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
gop.entry.value.bytes = slice;
- collectStackTrace(@returnAddress(), &gop.entry.value.stack_addresses);
+ collectStackTrace(ret_addr, &gop.entry.value.stack_addresses);
return slice;
} else {
const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size);
- const ptr = try self.allocSlot(new_size_class, @returnAddress());
+ const ptr = try self.allocSlot(new_size_class, ret_addr);
return ptr[0..len];
}
}
@@ -782,7 +783,7 @@ test "shrink large object to large object with larger alignment" {
slice[0] = 0x12;
slice[60] = 0x34;
- slice = try allocator.alignedRealloc(slice, page_size * 2, alloc_size / 2);
+ slice = try allocator.reallocAdvanced(slice, page_size * 2, alloc_size / 2, .exact);
assert(slice[0] == 0x12);
assert(slice[60] == 0x34);
}
@@ -833,15 +834,15 @@ test "realloc large object to larger alignment" {
slice[0] = 0x12;
slice[16] = 0x34;
- slice = try allocator.alignedRealloc(slice, 32, page_size * 2 + 100);
+ slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 100, .exact);
assert(slice[0] == 0x12);
assert(slice[16] == 0x34);
- slice = try allocator.alignedRealloc(slice, 32, page_size * 2 + 25);
+ slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 25, .exact);
assert(slice[0] == 0x12);
assert(slice[16] == 0x34);
- slice = try allocator.alignedRealloc(slice, page_size * 2, page_size * 2 + 100);
+ slice = try allocator.reallocAdvanced(slice, page_size * 2, page_size * 2 + 100, .exact);
assert(slice[0] == 0x12);
assert(slice[16] == 0x34);
}
diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig
index 5f91efa10a..eff20a5c46 100644
--- a/lib/std/heap/logging_allocator.zig
+++ b/lib/std/heap/logging_allocator.zig
@@ -23,10 +23,16 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
};
}
- fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
+ fn alloc(
+ allocator: *Allocator,
+ len: usize,
+ ptr_align: u29,
+ len_align: u29,
+ ra: usize,
+ ) error{OutOfMemory}![]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
self.out_stream.print("alloc : {}", .{len}) catch {};
- const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align);
+ const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
if (result) |buff| {
self.out_stream.print(" success!\n", .{}) catch {};
} else |err| {
@@ -41,6 +47,7 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
buf_align: u29,
new_len: usize,
len_align: u29,
+ ra: usize,
) error{OutOfMemory}!usize {
const self = @fieldParentPtr(Self, "allocator", allocator);
if (new_len == 0) {
@@ -50,7 +57,7 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {
} else {
self.out_stream.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
}
- if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align)) |resized_len| {
+ if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| {
if (new_len > buf.len) {
self.out_stream.print(" success!\n", .{}) catch {};
}
@@ -80,9 +87,9 @@ test "LoggingAllocator" {
const allocator = &loggingAllocator(&fixedBufferAllocator.allocator, fbs.outStream()).allocator;
var a = try allocator.alloc(u8, 10);
- a.len = allocator.shrinkBytes(a, 1, 5, 0);
+ a = allocator.shrink(a, 5);
std.debug.assert(a.len == 5);
- std.testing.expectError(error.OutOfMemory, allocator.resizeFn(allocator, a, 1, 20, 0));
+ std.testing.expectError(error.OutOfMemory, allocator.resize(a, 20));
allocator.free(a);
std.testing.expectEqualSlices(u8,
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index a8ca09fb74..5a0927ea6e 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -37,7 +37,13 @@ pub fn ValidationAllocator(comptime T: type) type {
if (*T == *Allocator) return &self.underlying_allocator;
return &self.underlying_allocator.allocator;
}
- pub fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 {
+ pub fn alloc(
+ allocator: *Allocator,
+ n: usize,
+ ptr_align: u29,
+ len_align: u29,
+ ret_addr: usize,
+ ) Allocator.Error![]u8 {
assert(n > 0);
assert(mem.isValidAlign(ptr_align));
if (len_align != 0) {
@@ -47,7 +53,7 @@ pub fn ValidationAllocator(comptime T: type) type {
const self = @fieldParentPtr(@This(), "allocator", allocator);
const underlying = self.getUnderlyingAllocatorPtr();
- const result = try underlying.allocFn(underlying, n, ptr_align, len_align);
+ const result = try underlying.allocFn(underlying, n, ptr_align, len_align, ret_addr);
assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
if (len_align == 0) {
assert(result.len == n);
@@ -63,6 +69,7 @@ pub fn ValidationAllocator(comptime T: type) type {
buf_align: u29,
new_len: usize,
len_align: u29,
+ ret_addr: usize,
) Allocator.Error!usize {
assert(buf.len > 0);
if (len_align != 0) {
@@ -71,7 +78,7 @@ pub fn ValidationAllocator(comptime T: type) type {
}
const self = @fieldParentPtr(@This(), "allocator", allocator);
const underlying = self.getUnderlyingAllocatorPtr();
- const result = try underlying.resizeFn(underlying, buf, buf_align, new_len, len_align);
+ const result = try underlying.resizeFn(underlying, buf, buf_align, new_len, len_align, ret_addr);
if (len_align == 0) {
assert(result == new_len);
} else {
@@ -111,7 +118,7 @@ var failAllocator = Allocator{
.allocFn = failAllocatorAlloc,
.resizeFn = Allocator.noResize,
};
-fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29) Allocator.Error![]u8 {
+fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
return error.OutOfMemory;
}
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
index f7a036d5ee..8bdab81bc6 100644
--- a/lib/std/mem/Allocator.zig
+++ b/lib/std/mem/Allocator.zig
@@ -14,7 +14,10 @@ pub const Error = error{OutOfMemory};
/// otherwise, the length must be aligned to `len_align`.
///
/// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
-allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error![]u8,
+///
+/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
+/// If the value is `0` it means no return address has been provided.
+allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
/// length returned by `allocFn` or `resizeFn`. `buf_align` must equal the same value
@@ -33,22 +36,25 @@ allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Error
/// accepting more bytes of memory from the allocator than requested.
///
/// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
-resizeFn: fn (self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29) Error!usize,
+///
+/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
+/// If the value is `0` it means no return address has been provided.
+resizeFn: fn (self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
/// Set to resizeFn if in-place resize is not supported.
-pub fn noResize(self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29) Error!usize {
+pub fn noResize(
+ self: *Allocator,
+ buf: []u8,
+ buf_align: u29,
+ new_len: usize,
+ len_align: u29,
+ ret_addr: usize,
+) Error!usize {
if (new_len > buf.len)
return error.OutOfMemory;
return new_len;
}
-/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning
-/// error.OutOfMemory should be impossible.
-pub fn shrinkBytes(self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29) usize {
- assert(new_len <= buf.len);
- return self.resizeFn(self, buf, buf_align, new_len, len_align) catch unreachable;
-}
-
/// Realloc is used to modify the size or alignment of an existing allocation,
/// as well as to provide the allocator with an opportunity to move an allocation
/// to a better location.
@@ -93,9 +99,10 @@ fn reallocBytes(
/// non-zero means the length of the returned slice must be aligned by `len_align`
/// `new_len` must be aligned by `len_align`
len_align: u29,
+ return_address: usize,
) Error![]u8 {
if (old_mem.len == 0) {
- const new_mem = try self.allocFn(self, new_byte_count, new_alignment, len_align);
+ const new_mem = try self.allocFn(self, new_byte_count, new_alignment, len_align, return_address);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(new_mem.ptr, undefined, new_byte_count);
return new_mem;
@@ -103,10 +110,10 @@ fn reallocBytes(
if (mem.isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
if (new_byte_count <= old_mem.len) {
- const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align);
+ const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address);
return old_mem.ptr[0..shrunk_len];
}
- if (self.resizeFn(self, old_mem, old_alignment, new_byte_count, len_align)) |resized_len| {
+ if (self.resizeFn(self, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
assert(resized_len >= new_byte_count);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
@@ -116,7 +123,7 @@ fn reallocBytes(
if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) {
return error.OutOfMemory;
}
- return self.moveBytes(old_mem, old_alignment, new_byte_count, new_alignment, len_align);
+ return self.moveBytes(old_mem, old_alignment, new_byte_count, new_alignment, len_align, return_address);
}
/// Move the given memory to a new location in the given allocator to accomodate a new
@@ -128,10 +135,11 @@ fn moveBytes(
new_len: usize,
new_alignment: u29,
len_align: u29,
+ return_address: usize,
) Error![]u8 {
assert(old_mem.len > 0);
assert(new_len > 0);
- const new_mem = try self.allocFn(self, new_len, new_alignment, len_align);
+ const new_mem = try self.allocFn(self, new_len, new_alignment, len_align, return_address);
@memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len));
// TODO DISABLED TO AVOID BUGS IN TRANSLATE C
// TODO see also https://github.com/ziglang/zig/issues/4298
@@ -139,7 +147,7 @@ fn moveBytes(
// generated C code will be a sequence of 0xaa (the undefined value), meaning
// it is printing data that has been freed
//@memset(old_mem.ptr, undefined, old_mem.len);
- _ = self.shrinkBytes(old_mem, old_align, 0, 0);
+ _ = self.shrinkBytes(old_mem, old_align, 0, 0, return_address);
return new_mem;
}
@@ -147,7 +155,7 @@ fn moveBytes(
/// Call `destroy` with the result to free the memory.
pub fn create(self: *Allocator, comptime T: type) Error!*T {
if (@sizeOf(T) == 0) return &(T{});
- const slice = try self.alloc(T, 1);
+ const slice = try self.allocAdvancedWithRetAddr(T, null, 1, .exact, @returnAddress());
return &slice[0];
}
@@ -158,7 +166,7 @@ pub fn destroy(self: *Allocator, ptr: anytype) void {
if (@sizeOf(T) == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
const ptr_align = @typeInfo(@TypeOf(ptr)).Pointer.alignment;
- _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], ptr_align, 0, 0);
+ _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], ptr_align, 0, 0, @returnAddress());
}
/// Allocates an array of `n` items of type `T` and sets all the
@@ -170,7 +178,7 @@ pub fn destroy(self: *Allocator, ptr: anytype) void {
///
/// For allocating a single item, see `create`.
pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T {
- return self.alignedAlloc(T, null, n);
+ return self.allocAdvancedWithRetAddr(T, null, n, .exact, @returnAddress());
}
pub fn allocWithOptions(
@@ -180,13 +188,25 @@ pub fn allocWithOptions(
/// null means naturally aligned
comptime optional_alignment: ?u29,
comptime optional_sentinel: ?Elem,
+) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) {
+ return self.allocWithOptionsRetAddr(Elem, n, optional_alignment, optional_sentinel, @returnAddress());
+}
+
+pub fn allocWithOptionsRetAddr(
+ self: *Allocator,
+ comptime Elem: type,
+ n: usize,
+ /// null means naturally aligned
+ comptime optional_alignment: ?u29,
+ comptime optional_sentinel: ?Elem,
+ return_address: usize,
) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) {
if (optional_sentinel) |sentinel| {
- const ptr = try self.alignedAlloc(Elem, optional_alignment, n + 1);
+ const ptr = try self.allocAdvancedWithRetAddr(Elem, optional_alignment, n + 1, .exact, return_address);
ptr[n] = sentinel;
return ptr[0..n :sentinel];
} else {
- return self.alignedAlloc(Elem, optional_alignment, n);
+ return self.allocAdvancedWithRetAddr(Elem, optional_alignment, n, .exact, return_address);
}
}
@@ -208,8 +228,13 @@ fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, compti
/// For allocating a single item, see `create`.
///
/// Deprecated; use `allocWithOptions`.
-pub fn allocSentinel(self: *Allocator, comptime Elem: type, n: usize, comptime sentinel: Elem) Error![:sentinel]Elem {
- return self.allocWithOptions(Elem, n, null, sentinel);
+pub fn allocSentinel(
+ self: *Allocator,
+ comptime Elem: type,
+ n: usize,
+ comptime sentinel: Elem,
+) Error![:sentinel]Elem {
+ return self.allocWithOptionsRetAddr(Elem, n, null, sentinel, @returnAddress());
}
/// Deprecated: use `allocAdvanced`
@@ -220,10 +245,9 @@ pub fn alignedAlloc(
comptime alignment: ?u29,
n: usize,
) Error![]align(alignment orelse @alignOf(T)) T {
- return self.allocAdvanced(T, alignment, n, .exact);
+ return self.allocAdvancedWithRetAddr(T, alignment, n, .exact, @returnAddress());
}
-const Exact = enum { exact, at_least };
pub fn allocAdvanced(
self: *Allocator,
comptime T: type,
@@ -231,9 +255,23 @@ pub fn allocAdvanced(
comptime alignment: ?u29,
n: usize,
exact: Exact,
+) Error![]align(alignment orelse @alignOf(T)) T {
+ return self.allocAdvancedWithRetAddr(T, alignment, n, exact, @returnAddress());
+}
+
+pub const Exact = enum { exact, at_least };
+
+pub fn allocAdvancedWithRetAddr(
+ self: *Allocator,
+ comptime T: type,
+ /// null means naturally aligned
+ comptime alignment: ?u29,
+ n: usize,
+ exact: Exact,
+ return_address: usize,
) Error![]align(alignment orelse @alignOf(T)) T {
const a = if (alignment) |a| blk: {
- if (a == @alignOf(T)) return allocAdvanced(self, T, null, n, exact);
+ if (a == @alignOf(T)) return allocAdvancedWithRetAddr(self, T, null, n, exact, return_address);
break :blk a;
} else @alignOf(T);
@@ -245,8 +283,12 @@ pub fn allocAdvanced(
// TODO The `if (alignment == null)` blocks are workarounds for zig not being able to
// access certain type information about T without creating a circular dependency in async
// functions that heap-allocate their own frame with @Frame(func).
- const sizeOfT = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T);
- const byte_slice = try self.allocFn(self, byte_count, a, if (exact == .exact) @as(u29, 0) else sizeOfT);
+ const size_of_T = if (alignment == null) @intCast(u29, @divExact(byte_count, n)) else @sizeOf(T);
+ const len_align: u29 = switch (exact) {
+ .exact => 0,
+ .at_least => size_of_T,
+ };
+ const byte_slice = try self.allocFn(self, byte_count, a, len_align, return_address);
switch (exact) {
.exact => assert(byte_slice.len == byte_count),
.at_least => assert(byte_slice.len >= byte_count),
@@ -271,7 +313,7 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol
}
const old_byte_slice = mem.sliceAsBytes(old_mem);
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
- const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0);
+ const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
assert(rc == new_byte_count);
const new_byte_slice = old_mem.ptr[0..new_byte_count];
return mem.bytesAsSlice(T, new_byte_slice);
@@ -292,7 +334,7 @@ pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
break :t Error![]align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
- return self.reallocAdvanced(old_mem, old_alignment, new_n, .exact);
+ return self.reallocAdvancedWithRetAddr(old_mem, old_alignment, new_n, .exact, @returnAddress());
}
pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
@@ -300,17 +342,7 @@ pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
break :t Error![]align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
- return self.reallocAdvanced(old_mem, old_alignment, new_n, .at_least);
-}
-
-// Deprecated: use `reallocAdvanced`
-pub fn alignedRealloc(
- self: *Allocator,
- old_mem: anytype,
- comptime new_alignment: u29,
- new_n: usize,
-) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
- return self.reallocAdvanced(old_mem, new_alignment, new_n, .exact);
+ return self.reallocAdvancedWithRetAddr(old_mem, old_alignment, new_n, .at_least, @returnAddress());
}
/// This is the same as `realloc`, except caller may additionally request
@@ -322,6 +354,17 @@ pub fn reallocAdvanced(
comptime new_alignment: u29,
new_n: usize,
exact: Exact,
+) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
+ return self.reallocAdvancedWithRetAddr(old_mem, new_alignment, new_n, exact, @returnAddress());
+}
+
+pub fn reallocAdvancedWithRetAddr(
+ self: *Allocator,
+ old_mem: anytype,
+ comptime new_alignment: u29,
+ new_n: usize,
+ exact: Exact,
+ return_address: usize,
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
@@ -336,7 +379,11 @@ pub fn reallocAdvanced(
const old_byte_slice = mem.sliceAsBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
- const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, if (exact == .exact) @as(u29, 0) else @sizeOf(T));
+ const len_align: u29 = switch (exact) {
+ .exact => 0,
+ .at_least => @sizeOf(T),
+ };
+ const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, len_align, return_address);
return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice));
}
@@ -350,7 +397,7 @@ pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
break :t []align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
- return self.alignedShrink(old_mem, old_alignment, new_n);
+ return self.alignedShrinkWithRetAddr(old_mem, old_alignment, new_n, @returnAddress());
}
/// This is the same as `shrink`, except caller may additionally request
@@ -361,6 +408,19 @@ pub fn alignedShrink(
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
+) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
+ return self.alignedShrinkWithRetAddr(old_mem, new_alignment, new_n, @returnAddress());
+}
+
+/// This is the same as `alignedShrink`, except caller may additionally pass
+/// the return address of the first stack frame, which may be relevant for
+/// allocators which collect stack traces.
+pub fn alignedShrinkWithRetAddr(
+ self: *Allocator,
+ old_mem: anytype,
+ comptime new_alignment: u29,
+ new_n: usize,
+ return_address: usize,
) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
@@ -377,7 +437,7 @@ pub fn alignedShrink(
const old_byte_slice = mem.sliceAsBytes(old_mem);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
- _ = self.shrinkBytes(old_byte_slice, Slice.alignment, byte_count, 0);
+ _ = self.shrinkBytes(old_byte_slice, Slice.alignment, byte_count, 0, return_address);
return old_mem[0..new_n];
}
@@ -391,7 +451,7 @@ pub fn free(self: *Allocator, memory: anytype) void {
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(non_const_ptr, undefined, bytes_len);
- _ = self.shrinkBytes(non_const_ptr[0..bytes_len], Slice.alignment, 0, 0);
+ _ = self.shrinkBytes(non_const_ptr[0..bytes_len], Slice.alignment, 0, 0, @returnAddress());
}
/// Copies `m` to newly allocated memory. Caller owns the memory.
@@ -408,3 +468,19 @@ pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
new_buf[m.len] = 0;
return new_buf[0..m.len :0];
}
+
+/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning
+/// error.OutOfMemory should be impossible.
+/// This function allows a runtime `buf_align` value. Callers should generally prefer
+/// to call `shrink` directly.
+pub fn shrinkBytes(
+ self: *Allocator,
+ buf: []u8,
+ buf_align: u29,
+ new_len: usize,
+ len_align: u29,
+ return_address: usize,
+) usize {
+ assert(new_len <= buf.len);
+ return self.resizeFn(self, buf, buf_align, new_len, len_align, return_address) catch unreachable;
+}
diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig
index 7febaaac64..d8b243d0fa 100644
--- a/lib/std/testing/failing_allocator.zig
+++ b/lib/std/testing/failing_allocator.zig
@@ -45,12 +45,18 @@ pub const FailingAllocator = struct {
};
}
- fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29) error{OutOfMemory}![]u8 {
+ fn alloc(
+ allocator: *std.mem.Allocator,
+ len: usize,
+ ptr_align: u29,
+ len_align: u29,
+ return_address: usize,
+ ) error{OutOfMemory}![]u8 {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
if (self.index == self.fail_index) {
return error.OutOfMemory;
}
- const result = try self.internal_allocator.allocFn(self.internal_allocator, len, ptr_align, len_align);
+ const result = try self.internal_allocator.allocFn(self.internal_allocator, len, ptr_align, len_align, return_address);
self.allocated_bytes += result.len;
self.allocations += 1;
self.index += 1;
@@ -63,9 +69,10 @@ pub const FailingAllocator = struct {
old_align: u29,
new_len: usize,
len_align: u29,
+ ra: usize,
) error{OutOfMemory}!usize {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
- const r = self.internal_allocator.resizeFn(self.internal_allocator, old_mem, old_align, new_len, len_align) catch |e| {
+ const r = self.internal_allocator.resizeFn(self.internal_allocator, old_mem, old_align, new_len, len_align, ra) catch |e| {
std.debug.assert(new_len > old_mem.len);
return e;
};
From 9f5a7d5922f48170551e7f6938b6de9eadeba0ff Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 8 Aug 2020 00:37:22 -0700
Subject: [PATCH 019/153] utilize math.ceilPowerOfTwo
---
lib/std/heap/general_purpose_allocator.zig | 13 +++----------
lib/std/math.zig | 4 ++++
2 files changed, 7 insertions(+), 10 deletions(-)
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 9abf9cf253..98f029579c 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -475,7 +475,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (aligned_size > largest_bucket_object_size) {
return self.resizeLarge(old_mem, old_align, new_size, len_align, ret_addr);
}
- const size_class_hint = up_to_nearest_power_of_2(usize, aligned_size);
+ const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size);
var bucket_index = math.log2(size_class_hint);
var size_class: usize = size_class_hint;
@@ -512,7 +512,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return @as(usize, 0);
}
const new_aligned_size = math.max(new_size, old_align);
- const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size);
+ const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
if (new_size_class <= size_class) {
return new_size;
}
@@ -553,7 +553,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return slice;
} else {
- const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size);
+ const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
const ptr = try self.allocSlot(new_size_class, ret_addr);
return ptr[0..len];
}
@@ -586,13 +586,6 @@ const TraceKind = enum {
free,
};
-fn up_to_nearest_power_of_2(comptime T: type, n: T) T {
- var power: T = 1;
- while (power < n)
- power *= 2;
- return power;
-}
-
fn hash_addr(addr: usize) u32 {
if (@sizeOf(usize) == @sizeOf(u32))
return addr;
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 111a618cef..17237ea9f0 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -837,6 +837,10 @@ pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) {
return @intCast(T, x);
}
+pub fn ceilPowerOfTwoAssert(comptime T: type, value: T) T {
+ return ceilPowerOfTwo(T, value) catch unreachable;
+}
+
test "math.ceilPowerOfTwoPromote" {
testCeilPowerOfTwoPromote();
comptime testCeilPowerOfTwoPromote();
From 1b1921f0e26b9cb8ac78003c9061acac2da5e7ab Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 8 Aug 2020 01:00:29 -0700
Subject: [PATCH 020/153] stage1: deal with WebAssembly not supporting
@returnAddress()
This makes `@returnAddress()` return 0 for WebAssembly (when not using
the Emscripten OS) and avoids trying to capture stack traces for the
general purpose allocator on that target.
---
lib/std/heap.zig | 11 +++++++++--
lib/std/heap/general_purpose_allocator.zig | 5 ++++-
src/codegen.cpp | 6 ++++++
3 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index 88f27fad49..0c9dbca369 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -37,7 +37,7 @@ var c_allocator_state = Allocator{
.resizeFn = cResize,
};
-fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Allocator.Error![]u8 {
+fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Allocator.Error![]u8 {
assert(ptr_align <= @alignOf(c_longdouble));
const ptr = @ptrCast([*]u8, c.malloc(len) orelse return error.OutOfMemory);
if (len_align == 0) {
@@ -54,7 +54,14 @@ fn cAlloc(self: *Allocator, len: usize, ptr_align: u29, len_align: u29) Allocato
return ptr[0..mem.alignBackwardAnyAlign(full_len, len_align)];
}
-fn cResize(self: *Allocator, buf: []u8, old_align: u29, new_len: usize, len_align: u29) Allocator.Error!usize {
+fn cResize(
+ self: *Allocator,
+ buf: []u8,
+ old_align: u29,
+ new_len: usize,
+ len_align: u29,
+ ret_addr: usize,
+) Allocator.Error!usize {
if (new_len == 0) {
c.free(buf.ptr);
return 0;
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 98f029579c..5369560356 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -102,9 +102,12 @@ const StackTrace = std.builtin.StackTrace;
/// Integer type for pointing to slots in a small allocation
const SlotIndex = std.meta.Int(false, math.log2(page_size) + 1);
+// WebAssembly doesn't support stack tracing yet.
+const default_stack_trace_frames: usize = if (std.Target.current.cpu.arch.isWasm()) 0 else 4;
+
pub const Config = struct {
/// Number of stack frames to capture.
- stack_trace_frames: usize = if (std.debug.runtime_safety) @as(usize, 4) else @as(usize, 0),
+ stack_trace_frames: usize = if (std.debug.runtime_safety) default_stack_trace_frames else @as(usize, 0),
/// If true, the allocator will have two fields:
/// * `total_requested_bytes` which tracks the total allocated bytes of memory requested.
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 023c94f245..6941eae466 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -5886,6 +5886,12 @@ static LLVMValueRef ir_render_breakpoint(CodeGen *g, IrExecutableGen *executable
static LLVMValueRef ir_render_return_address(CodeGen *g, IrExecutableGen *executable,
IrInstGenReturnAddress *instruction)
{
+ if (target_is_wasm(g->zig_target) && g->zig_target->os != OsEmscripten) {
+ // I got this error from LLVM 10:
+ // "Non-Emscripten WebAssembly hasn't implemented __builtin_return_address"
+ return LLVMConstNull(get_llvm_type(g, instruction->base.value->type));
+ }
+
LLVMValueRef zero = LLVMConstNull(g->builtin_types.entry_i32->llvm_type);
LLVMValueRef ptr_val = LLVMBuildCall(g->builder, get_return_address_fn_val(g), &zero, 1, "");
return LLVMBuildPtrToInt(g->builder, ptr_val, g->builtin_types.entry_usize->llvm_type, "");
From 88ac0c128755b076cf357a9289e9ee847bf2794a Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 8 Aug 2020 01:21:08 -0700
Subject: [PATCH 021/153] restore previous behavior of allowing
std.testing.allocator
even outside of unit tests
---
lib/std/testing.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index e5584e42cc..27026149bc 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -5,7 +5,7 @@ pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAll
/// This should only be used in temporary test programs.
pub const allocator = &allocator_instance.allocator;
-pub var allocator_instance: std.heap.GeneralPurposeAllocator(.{}) = undefined;
+pub var allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
pub const failing_allocator = &failing_allocator_instance.allocator;
pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0);
From 72b5ceed66005f03933a4e5bbd0eda08378b0fd0 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 8 Aug 2020 02:15:34 -0700
Subject: [PATCH 022/153] update langref in light of new general purpose
allocator
---
doc/langref.html.in | 14 +++++++++++---
1 file changed, 11 insertions(+), 3 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 941c2cc1a6..dbdeb0fb42 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -9357,9 +9357,17 @@ pub fn main() !void {
is handled correctly? In this case, use {#syntax#}std.testing.FailingAllocator{#endsyntax#}.
- Finally, if none of the above apply, you need a general purpose allocator. Zig does not
- yet have a general purpose allocator in the standard library,
- but one is being actively developed.
+ Are you writing a test? In this case, use {#syntax#}std.testing.allocator{#endsyntax#}.
+
+
+ Finally, if none of the above apply, you need a general purpose allocator.
+ Zig's general purpose allocator is available as a function that takes a {#link|comptime#}
+ {#link|struct#} of configuration options and returns a type.
+ Generally, you will set up one {#syntax#}std.heap.GeneralPurposeAllocator#{endsyntax#} in
+ your main function, and then pass it or sub-allocators around to various parts of your
+ application.
+
+
You can also consider {#link|Implementing an Allocator#}.
From 051aadd7810de9b68f415ec00a3867f6f783b961 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 8 Aug 2020 02:15:46 -0700
Subject: [PATCH 023/153] std lib general purpose allocator: disable stack
tracing on mips
Sadly, trying to collect stack frames goes into an infinite loop on
mips. This sets the default number of stack frames to collect to 0 on
mips.
---
doc/langref.html.in | 2 +-
lib/std/heap/general_purpose_allocator.zig | 18 ++++++++++++++++--
2 files changed, 17 insertions(+), 3 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index dbdeb0fb42..edafc82ab8 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -9363,7 +9363,7 @@ pub fn main() !void {
Finally, if none of the above apply, you need a general purpose allocator.
Zig's general purpose allocator is available as a function that takes a {#link|comptime#}
{#link|struct#} of configuration options and returns a type.
- Generally, you will set up one {#syntax#}std.heap.GeneralPurposeAllocator#{endsyntax#} in
+ Generally, you will set up one {#syntax#}std.heap.GeneralPurposeAllocator{#endsyntax#} in
your main function, and then pass it or sub-allocators around to various parts of your
application.
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 5369560356..27f2d10586 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -102,8 +102,22 @@ const StackTrace = std.builtin.StackTrace;
/// Integer type for pointing to slots in a small allocation
const SlotIndex = std.meta.Int(false, math.log2(page_size) + 1);
-// WebAssembly doesn't support stack tracing yet.
-const default_stack_trace_frames: usize = if (std.Target.current.cpu.arch.isWasm()) 0 else 4;
+const sys_can_stack_trace = switch (std.Target.current.cpu.arch) {
+ // Observed to go into an infinite loop.
+ // TODO: Make this work.
+ .mips,
+ .mipsel,
+ => false,
+
+ // `@returnAddress()` in LLVM 10 gives
+ // "Non-Emscripten WebAssembly hasn't implemented __builtin_return_address".
+ .wasm32,
+ .wasm64,
+ => std.Target.current.os.tag == .emscripten,
+
+ else => true,
+};
+const default_stack_trace_frames: usize = if (sys_can_stack_trace) 4 else 0;
pub const Config = struct {
/// Number of stack frames to capture.
From cb9405cdbdd7c9dc30c84ea9a8a17ced2cae66af Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 8 Aug 2020 12:04:19 -0700
Subject: [PATCH 024/153] don't collect stack trace frames in release safe mode
by default
We don't pass no-omit-frame-pointer in release safe by default, so it
also makes sense to not try to collect stack trace frames by default in
release safe mode.
---
lib/std/heap/general_purpose_allocator.zig | 112 +++++++++------------
1 file changed, 47 insertions(+), 65 deletions(-)
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 27f2d10586..2e2fc6d010 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -95,6 +95,7 @@
const std = @import("std");
const math = std.math;
const assert = std.debug.assert;
+const mem = std.mem;
const Allocator = std.mem.Allocator;
const page_size = std.mem.page_size;
const StackTrace = std.builtin.StackTrace;
@@ -117,11 +118,15 @@ const sys_can_stack_trace = switch (std.Target.current.cpu.arch) {
else => true,
};
-const default_stack_trace_frames: usize = if (sys_can_stack_trace) 4 else 0;
+const default_sys_stack_trace_frames: usize = if (sys_can_stack_trace) 4 else 0;
+const default_stack_trace_frames: usize = switch (std.builtin.mode) {
+ .Debug => default_sys_stack_trace_frames,
+ else => 0,
+};
pub const Config = struct {
/// Number of stack frames to capture.
- stack_trace_frames: usize = if (std.debug.runtime_safety) default_stack_trace_frames else @as(usize, 0),
+ stack_trace_frames: usize = default_stack_trace_frames,
/// If true, the allocator will have two fields:
/// * `total_requested_bytes` which tracks the total allocated bytes of memory requested.
@@ -163,7 +168,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const one_trace_size = @sizeOf(usize) * stack_n;
const traces_per_slot = 2;
- pub const Error = std.mem.Allocator.Error;
+ pub const Error = mem.Allocator.Error;
const small_bucket_count = math.log2(page_size);
const largest_bucket_object_size = 1 << (small_bucket_count - 1);
@@ -246,7 +251,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
fn bucketStackFramesStart(size_class: usize) usize {
- return std.mem.alignForward(
+ return mem.alignForward(
@sizeOf(BucketHeader) + usedBitsCount(size_class),
@alignOf(usize),
);
@@ -322,7 +327,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
- std.mem.set(usize, addresses, 0);
+ if (stack_n == 0) return;
+ mem.set(usize, addresses, 0);
var stack_trace = StackTrace{
.instruction_addresses = addresses,
.index = 0,
@@ -679,14 +685,14 @@ test "realloc" {
// This reallocation should keep its pointer address.
const old_slice = slice;
slice = try allocator.realloc(slice, 2);
- assert(old_slice.ptr == slice.ptr);
- assert(slice[0] == 0x12);
+ std.testing.expect(old_slice.ptr == slice.ptr);
+ std.testing.expect(slice[0] == 0x12);
slice[1] = 0x34;
// This requires upgrading to a larger size class
slice = try allocator.realloc(slice, 17);
- assert(slice[0] == 0x12);
- assert(slice[1] == 0x34);
+ std.testing.expect(slice[0] == 0x12);
+ std.testing.expect(slice[1] == 0x34);
}
test "shrink" {
@@ -697,18 +703,18 @@ test "shrink" {
var slice = try allocator.alloc(u8, 20);
defer allocator.free(slice);
- std.mem.set(u8, slice, 0x11);
+ mem.set(u8, slice, 0x11);
slice = allocator.shrink(slice, 17);
for (slice) |b| {
- assert(b == 0x11);
+ std.testing.expect(b == 0x11);
}
slice = allocator.shrink(slice, 16);
for (slice) |b| {
- assert(b == 0x11);
+ std.testing.expect(b == 0x11);
}
}
@@ -722,10 +728,10 @@ test "large object - grow" {
var old = slice1;
slice1 = try allocator.realloc(slice1, page_size * 2 - 10);
- assert(slice1.ptr == old.ptr);
+ std.testing.expect(slice1.ptr == old.ptr);
slice1 = try allocator.realloc(slice1, page_size * 2);
- assert(slice1.ptr == old.ptr);
+ std.testing.expect(slice1.ptr == old.ptr);
slice1 = try allocator.realloc(slice1, page_size * 2 + 1);
}
@@ -743,8 +749,8 @@ test "realloc small object to large object" {
// This requires upgrading to a large object
const large_object_size = page_size * 2 + 50;
slice = try allocator.realloc(slice, large_object_size);
- assert(slice[0] == 0x12);
- assert(slice[60] == 0x34);
+ std.testing.expect(slice[0] == 0x12);
+ std.testing.expect(slice[60] == 0x34);
}
test "shrink large object to large object" {
@@ -758,16 +764,16 @@ test "shrink large object to large object" {
slice[60] = 0x34;
slice = try allocator.resize(slice, page_size * 2 + 1);
- assert(slice[0] == 0x12);
- assert(slice[60] == 0x34);
+ std.testing.expect(slice[0] == 0x12);
+ std.testing.expect(slice[60] == 0x34);
slice = allocator.shrink(slice, page_size * 2 + 1);
- assert(slice[0] == 0x12);
- assert(slice[60] == 0x34);
+ std.testing.expect(slice[0] == 0x12);
+ std.testing.expect(slice[60] == 0x34);
slice = try allocator.realloc(slice, page_size * 2);
- assert(slice[0] == 0x12);
- assert(slice[60] == 0x34);
+ std.testing.expect(slice[0] == 0x12);
+ std.testing.expect(slice[60] == 0x34);
}
test "shrink large object to large object with larger alignment" {
@@ -783,7 +789,7 @@ test "shrink large object to large object with larger alignment" {
defer allocator.free(slice);
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
- while (isAligned(@ptrToInt(slice.ptr), page_size * 2)) {
+ while (mem.isAligned(@ptrToInt(slice.ptr), page_size * 2)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, alloc_size);
}
@@ -794,8 +800,8 @@ test "shrink large object to large object with larger alignment" {
slice[60] = 0x34;
slice = try allocator.reallocAdvanced(slice, page_size * 2, alloc_size / 2, .exact);
- assert(slice[0] == 0x12);
- assert(slice[60] == 0x34);
+ std.testing.expect(slice[0] == 0x12);
+ std.testing.expect(slice[60] == 0x34);
}
test "realloc large object to small object" {
@@ -809,8 +815,8 @@ test "realloc large object to small object" {
slice[16] = 0x34;
slice = try allocator.realloc(slice, 19);
- assert(slice[0] == 0x12);
- assert(slice[16] == 0x34);
+ std.testing.expect(slice[0] == 0x12);
+ std.testing.expect(slice[16] == 0x34);
}
test "non-page-allocator backing allocator" {
@@ -834,7 +840,7 @@ test "realloc large object to larger alignment" {
defer allocator.free(slice);
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
- while (isAligned(@ptrToInt(slice.ptr), page_size * 2)) {
+ while (mem.isAligned(@ptrToInt(slice.ptr), page_size * 2)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
}
@@ -845,40 +851,16 @@ test "realloc large object to larger alignment" {
slice[16] = 0x34;
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 100, .exact);
- assert(slice[0] == 0x12);
- assert(slice[16] == 0x34);
+ std.testing.expect(slice[0] == 0x12);
+ std.testing.expect(slice[16] == 0x34);
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 25, .exact);
- assert(slice[0] == 0x12);
- assert(slice[16] == 0x34);
+ std.testing.expect(slice[0] == 0x12);
+ std.testing.expect(slice[16] == 0x34);
slice = try allocator.reallocAdvanced(slice, page_size * 2, page_size * 2 + 100, .exact);
- assert(slice[0] == 0x12);
- assert(slice[16] == 0x34);
-}
-
-fn isAligned(addr: usize, alignment: usize) bool {
- // 000010000 // example addr
- // 000001111 // subtract 1
- // 111110000 // binary not
- const aligned_addr = (addr & ~(alignment - 1));
- return aligned_addr == addr;
-}
-
-test "isAligned works" {
- assert(isAligned(0, 4));
- assert(isAligned(1, 1));
- assert(isAligned(2, 1));
- assert(isAligned(2, 2));
- assert(!isAligned(2, 4));
- assert(isAligned(3, 1));
- assert(!isAligned(3, 2));
- assert(!isAligned(3, 4));
- assert(isAligned(4, 4));
- assert(isAligned(4, 2));
- assert(isAligned(4, 1));
- assert(!isAligned(4, 8));
- assert(!isAligned(4, 16));
+ std.testing.expect(slice[0] == 0x12);
+ std.testing.expect(slice[16] == 0x34);
}
test "large object shrinks to small but allocation fails during shrink" {
@@ -895,8 +877,8 @@ test "large object shrinks to small but allocation fails during shrink" {
// Next allocation will fail in the backing allocator of the GeneralPurposeAllocator
slice = allocator.shrink(slice, 4);
- assert(slice[0] == 0x12);
- assert(slice[3] == 0x34);
+ std.testing.expect(slice[0] == 0x12);
+ std.testing.expect(slice[3] == 0x34);
}
test "objects of size 1024 and 2048" {
@@ -919,20 +901,20 @@ test "setting a memory cap" {
gpda.setRequestedMemoryLimit(1010);
const small = try allocator.create(i32);
- assert(gpda.total_requested_bytes == 4);
+ std.testing.expect(gpda.total_requested_bytes == 4);
const big = try allocator.alloc(u8, 1000);
- assert(gpda.total_requested_bytes == 1004);
+ std.testing.expect(gpda.total_requested_bytes == 1004);
std.testing.expectError(error.OutOfMemory, allocator.create(u64));
allocator.destroy(small);
- assert(gpda.total_requested_bytes == 1000);
+ std.testing.expect(gpda.total_requested_bytes == 1000);
allocator.free(big);
- assert(gpda.total_requested_bytes == 0);
+ std.testing.expect(gpda.total_requested_bytes == 0);
const exact = try allocator.alloc(u8, 1010);
- assert(gpda.total_requested_bytes == 1010);
+ std.testing.expect(gpda.total_requested_bytes == 1010);
allocator.free(exact);
}
From a2f8c23a96d6124f1f263aa68087f18321e298c7 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 8 Aug 2020 13:04:30 -0700
Subject: [PATCH 025/153] std.heap.page_allocator: Windows support for growing
without remapping
---
lib/std/heap.zig | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index 0c9dbca369..4a2837d408 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -225,7 +225,7 @@ const PageAllocator = struct {
w.VirtualFree(buf_unaligned.ptr, 0, w.MEM_RELEASE);
return 0;
}
- if (new_size < buf_unaligned.len) {
+ if (new_size <= buf_unaligned.len) {
const base_addr = @ptrToInt(buf_unaligned.ptr);
const old_addr_end = base_addr + buf_unaligned.len;
const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size);
@@ -240,10 +240,10 @@ const PageAllocator = struct {
}
return alignPageAllocLen(new_size_aligned, new_size, len_align);
}
- if (new_size == buf_unaligned.len) {
+ const old_size_aligned = mem.alignForward(buf_unaligned.len, mem.page_size);
+ if (new_size_aligned <= old_size_aligned) {
return alignPageAllocLen(new_size_aligned, new_size, len_align);
}
- // new_size > buf_unaligned.len not implemented
return error.OutOfMemory;
}
From 4d0f83e23e736ca41ad71040fb119e44bf956819 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 8 Aug 2020 13:05:04 -0700
Subject: [PATCH 026/153] GeneralPurposeAllocator: naming convention refactor
---
lib/std/heap/general_purpose_allocator.zig | 104 ++++++++++-----------
1 file changed, 52 insertions(+), 52 deletions(-)
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 2e2fc6d010..04cce1dffd 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -623,9 +623,9 @@ fn eql_addr(a: usize, b: usize) bool {
const test_config = Config{};
test "small allocations - free in same order" {
- var gpda = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(test_config){};
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
var list = std.ArrayList(*u64).init(std.testing.allocator);
defer list.deinit();
@@ -642,9 +642,9 @@ test "small allocations - free in same order" {
}
test "small allocations - free in reverse order" {
- var gpda = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(test_config){};
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
var list = std.ArrayList(*u64).init(std.testing.allocator);
defer list.deinit();
@@ -661,9 +661,9 @@ test "small allocations - free in reverse order" {
}
test "large allocations" {
- var gpda = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(test_config){};
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
const ptr1 = try allocator.alloc(u64, 42768);
const ptr2 = try allocator.alloc(u64, 52768);
@@ -674,9 +674,9 @@ test "large allocations" {
}
test "realloc" {
- var gpda = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(test_config){};
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
defer allocator.free(slice);
@@ -696,9 +696,9 @@ test "realloc" {
}
test "shrink" {
- var gpda = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(test_config){};
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
var slice = try allocator.alloc(u8, 20);
defer allocator.free(slice);
@@ -719,14 +719,14 @@ test "shrink" {
}
test "large object - grow" {
- var gpda = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(test_config){};
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
defer allocator.free(slice1);
- var old = slice1;
+ const old = slice1;
slice1 = try allocator.realloc(slice1, page_size * 2 - 10);
std.testing.expect(slice1.ptr == old.ptr);
@@ -737,9 +737,9 @@ test "large object - grow" {
}
test "realloc small object to large object" {
- var gpda = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(test_config){};
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
var slice = try allocator.alloc(u8, 70);
defer allocator.free(slice);
@@ -754,9 +754,9 @@ test "realloc small object to large object" {
}
test "shrink large object to large object" {
- var gpda = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(test_config){};
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
@@ -777,9 +777,9 @@ test "shrink large object to large object" {
}
test "shrink large object to large object with larger alignment" {
- var gpda = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(test_config){};
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
var debug_buffer: [1000]u8 = undefined;
const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
@@ -805,9 +805,9 @@ test "shrink large object to large object with larger alignment" {
}
test "realloc large object to small object" {
- var gpda = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(test_config){};
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
@@ -820,18 +820,18 @@ test "realloc large object to small object" {
}
test "non-page-allocator backing allocator" {
- var gpda = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator };
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator };
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
const ptr = try allocator.create(i32);
defer allocator.destroy(ptr);
}
test "realloc large object to larger alignment" {
- var gpda = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(test_config){};
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
var debug_buffer: [1000]u8 = undefined;
const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
@@ -865,9 +865,9 @@ test "realloc large object to larger alignment" {
test "large object shrinks to small but allocation fails during shrink" {
var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, 3);
- var gpda = GeneralPurposeAllocator(.{}){ .backing_allocator = &failing_allocator.allocator };
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = &failing_allocator.allocator };
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
@@ -882,9 +882,9 @@ test "large object shrinks to small but allocation fails during shrink" {
}
test "objects of size 1024 and 2048" {
- var gpda = GeneralPurposeAllocator(test_config){};
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(test_config){};
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
const slice = try allocator.alloc(u8, 1025);
const slice2 = try allocator.alloc(u8, 3000);
@@ -894,27 +894,27 @@ test "objects of size 1024 and 2048" {
}
test "setting a memory cap" {
- var gpda = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
- defer std.testing.expect(!gpda.deinit());
- const allocator = &gpda.allocator;
+ var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
+ defer std.testing.expect(!gpa.deinit());
+ const allocator = &gpa.allocator;
- gpda.setRequestedMemoryLimit(1010);
+ gpa.setRequestedMemoryLimit(1010);
const small = try allocator.create(i32);
- std.testing.expect(gpda.total_requested_bytes == 4);
+ std.testing.expect(gpa.total_requested_bytes == 4);
const big = try allocator.alloc(u8, 1000);
- std.testing.expect(gpda.total_requested_bytes == 1004);
+ std.testing.expect(gpa.total_requested_bytes == 1004);
std.testing.expectError(error.OutOfMemory, allocator.create(u64));
allocator.destroy(small);
- std.testing.expect(gpda.total_requested_bytes == 1000);
+ std.testing.expect(gpa.total_requested_bytes == 1000);
allocator.free(big);
- std.testing.expect(gpda.total_requested_bytes == 0);
+ std.testing.expect(gpa.total_requested_bytes == 0);
const exact = try allocator.alloc(u8, 1010);
- std.testing.expect(gpda.total_requested_bytes == 1010);
+ std.testing.expect(gpa.total_requested_bytes == 1010);
allocator.free(exact);
}
From 5b57e35ce0c83dc48587c19cf28db509bba2226b Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 8 Aug 2020 13:46:18 -0700
Subject: [PATCH 027/153] fix general purpose allocator test cases on Windows
The tests are cleverly testing some alignment stuff, but were getting
thwarted by Windows choosing to allocate 64K aligned pages.
---
lib/std/heap/general_purpose_allocator.zig | 22 +++++++++++++++++-----
1 file changed, 17 insertions(+), 5 deletions(-)
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 04cce1dffd..dbab929c5c 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -317,7 +317,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
}
for (self.large_allocations.items()) |*large_alloc| {
- std.debug.print("\nMemory leak detected:\n", .{});
+ std.debug.print("\nMemory leak detected (0x{x}):\n", .{@ptrToInt(large_alloc.value.bytes.ptr)});
large_alloc.value.dumpStackTrace();
leaks = true;
}
@@ -788,8 +788,15 @@ test "shrink large object to large object with larger alignment" {
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
defer allocator.free(slice);
+ const big_alignment: usize = switch (std.Target.current.os.tag) {
+ .windows => page_size * 32, // Windows aligns to 64K.
+ else => page_size * 2,
+ };
+ // This loop allocates until we find a page that is not aligned to the big
+ // alignment. Then we shrink the allocation after the loop, but increase the
+ // alignment to the higher one, that we know will force it to realloc.
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
- while (mem.isAligned(@ptrToInt(slice.ptr), page_size * 2)) {
+ while (mem.isAligned(@ptrToInt(slice.ptr), big_alignment)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, alloc_size);
}
@@ -799,7 +806,7 @@ test "shrink large object to large object with larger alignment" {
slice[0] = 0x12;
slice[60] = 0x34;
- slice = try allocator.reallocAdvanced(slice, page_size * 2, alloc_size / 2, .exact);
+ slice = try allocator.reallocAdvanced(slice, big_alignment, alloc_size / 2, .exact);
std.testing.expect(slice[0] == 0x12);
std.testing.expect(slice[60] == 0x34);
}
@@ -839,8 +846,13 @@ test "realloc large object to larger alignment" {
var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
defer allocator.free(slice);
+ const big_alignment: usize = switch (std.Target.current.os.tag) {
+ .windows => page_size * 32, // Windows aligns to 64K.
+ else => page_size * 2,
+ };
+ // This loop allocates until we find a page that is not aligned to the big alignment.
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
- while (mem.isAligned(@ptrToInt(slice.ptr), page_size * 2)) {
+ while (mem.isAligned(@ptrToInt(slice.ptr), big_alignment)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
}
@@ -858,7 +870,7 @@ test "realloc large object to larger alignment" {
std.testing.expect(slice[0] == 0x12);
std.testing.expect(slice[16] == 0x34);
- slice = try allocator.reallocAdvanced(slice, page_size * 2, page_size * 2 + 100, .exact);
+ slice = try allocator.reallocAdvanced(slice, big_alignment, page_size * 2 + 100, .exact);
std.testing.expect(slice[0] == 0x12);
std.testing.expect(slice[16] == 0x34);
}
From f98cffc615bb7ea73eee3bd24ae8a957fe8ebb82 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 8 Aug 2020 15:59:03 -0700
Subject: [PATCH 028/153] std: general purpose allocator: use AutoHashMap
As pointed out by Sahnvour, AutoHashMap is both more convenient and will
have better performance in this case.
---
lib/std/heap/general_purpose_allocator.zig | 13 +------------
1 file changed, 1 insertion(+), 12 deletions(-)
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index dbab929c5c..cb7f36fcc3 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -189,7 +189,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
std.debug.dumpStackTrace(stack_trace);
}
};
- const LargeAllocTable = std.HashMapUnmanaged(usize, LargeAlloc, hash_addr, eql_addr, false);
+ const LargeAllocTable = std.AutoHashMapUnmanaged(usize, LargeAlloc);
// Bucket: In memory, in order:
// * BucketHeader
@@ -609,17 +609,6 @@ const TraceKind = enum {
free,
};
-fn hash_addr(addr: usize) u32 {
- if (@sizeOf(usize) == @sizeOf(u32))
- return addr;
- comptime assert(@sizeOf(usize) == 8);
- return @intCast(u32, addr >> 32) ^ @truncate(u32, addr);
-}
-
-fn eql_addr(a: usize, b: usize) bool {
- return a == b;
-}
-
const test_config = Config{};
test "small allocations - free in same order" {
From fd47839064775c0cc956f12a012f0893e1f3a440 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 8 Aug 2020 18:19:48 -0700
Subject: [PATCH 029/153] stage2: fix crash on empty source file
---
src-self-hosted/Module.zig | 8 +++-----
src-self-hosted/link.zig | 14 ++++++++++----
src-self-hosted/main.zig | 3 +++
test/stage2/compare_output.zig | 5 ++++-
4 files changed, 20 insertions(+), 10 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index e056b02b2e..e3233c6403 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -949,10 +949,8 @@ pub fn update(self: *Module) !void {
try self.deleteDecl(decl);
}
- if (self.totalErrorCount() == 0) {
- // This is needed before reading the error flags.
- try self.bin_file.flush();
- }
+ // This is needed before reading the error flags.
+ try self.bin_file.flush();
self.link_error_flags = self.bin_file.errorFlags();
@@ -2537,7 +2535,7 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst
}
}
- return self.fail(scope, inst.src, "TODO implement type coercion from {} to {}", .{ inst.ty, dest_type });
+ return self.fail(scope, inst.src, "expected {}, found {}", .{ dest_type, inst.ty });
}
pub fn storePtr(self: *Module, scope: *Scope, src: usize, ptr: *Inst, uncasted_value: *Inst) !*Inst {
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 6fd9329188..8bfd3bdb48 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -1172,7 +1172,10 @@ pub const File = struct {
self.debug_aranges_section_dirty = false;
}
- if (self.debug_line_header_dirty) {
+ if (self.debug_line_header_dirty) debug_line: {
+ if (self.dbg_line_fn_first == null) {
+ break :debug_line; // Error in module; leave debug_line_header_dirty=true.
+ }
const dbg_line_prg_off = self.getDebugLineProgramOff();
const dbg_line_prg_end = self.getDebugLineProgramEnd();
assert(dbg_line_prg_end != 0);
@@ -1403,18 +1406,21 @@ pub const File = struct {
self.shdr_table_dirty = false;
}
if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
- log.debug(.link, "no_entry_point_found = true\n", .{});
+ log.debug(.link, "flushing. no_entry_point_found = true\n", .{});
self.error_flags.no_entry_point_found = true;
} else {
+ log.debug(.link, "flushing. no_entry_point_found = false\n", .{});
self.error_flags.no_entry_point_found = false;
try self.writeElfHeader();
}
- // The point of flush() is to commit changes, so nothing should be dirty after this.
+ // The point of flush() is to commit changes, so in theory, nothing should
+ // be dirty after this. However, it is possible for some things to remain
+ // dirty because they fail to be written in the event of compile errors,
+ // such as debug_line_header_dirty.
assert(!self.debug_info_section_dirty);
assert(!self.debug_abbrev_section_dirty);
assert(!self.debug_aranges_section_dirty);
- assert(!self.debug_line_header_dirty);
assert(!self.phdr_table_dirty);
assert(!self.shdr_table_dirty);
assert(!self.shstrtab_dirty);
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 5e2120f41a..2795da0017 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -64,6 +64,9 @@ var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
pub fn main() !void {
const gpa = if (std.builtin.link_libc) std.heap.c_allocator else &general_purpose_allocator.allocator;
+ defer if (!std.builtin.link_libc) {
+ _ = general_purpose_allocator.deinit();
+ };
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = &arena_instance.allocator;
diff --git a/test/stage2/compare_output.zig b/test/stage2/compare_output.zig
index 1e4db06572..bb3e542f13 100644
--- a/test/stage2/compare_output.zig
+++ b/test/stage2/compare_output.zig
@@ -23,6 +23,9 @@ pub fn addCases(ctx: *TestContext) !void {
{
var case = ctx.exe("hello world with updates", linux_x64);
+
+ case.addError("", &[_][]const u8{":1:1: error: no entry point found"});
+
// Regular old hello world
case.addCompareOutput(
\\export fn _start() noreturn {
@@ -123,7 +126,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\
);
}
-
+
{
var case = ctx.exe("hello world", linux_riscv64);
// Regular old hello world
From cd8e6b66d046f73e4a9a4211557f52bdc1424de7 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Sun, 9 Aug 2020 10:47:52 +0200
Subject: [PATCH 030/153] Address some review comments
---
lib/std/os/windows.zig | 42 +++++++++++++++++++++----------------
lib/std/os/windows/bits.zig | 2 +-
2 files changed, 25 insertions(+), 19 deletions(-)
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index 544b4d7a61..6451c64e06 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -924,23 +924,12 @@ pub fn GetFinalPathNameByHandle(
out_buffer: []u16,
) GetFinalPathNameByHandleError![]u16 {
// Get normalized path; doesn't include volume name though.
- var path_buffer: [PATH_MAX_WIDE * 2]u8 align(@alignOf(FILE_NAME_INFORMATION)) = undefined;
- var io: IO_STATUS_BLOCK = undefined;
- var rc = ntdll.NtQueryInformationFile(hFile, &io, &path_buffer, path_buffer.len, FILE_INFORMATION_CLASS.FileNormalizedNameInformation);
- switch (rc) {
- .SUCCESS => {},
- .INVALID_PARAMETER => unreachable,
- else => return unexpectedStatus(rc),
- }
+ var path_buffer: [@sizeOf(FILE_NAME_INFORMATION) + PATH_MAX_WIDE * 2]u8 align(@alignOf(FILE_NAME_INFORMATION)) = undefined;
+ try QueryInformationFile(hFile, FILE_INFORMATION_CLASS.FileNormalizedNameInformation, path_buffer[0..]);
// Get NT volume name.
- var volume_buffer: [MAX_PATH]u8 align(@alignOf(FILE_NAME_INFORMATION)) = undefined; // MAX_PATH bytes should be enough since it's Windows-defined name
- rc = ntdll.NtQueryInformationFile(hFile, &io, &volume_buffer, volume_buffer.len, FILE_INFORMATION_CLASS.FileVolumeNameInformation);
- switch (rc) {
- .SUCCESS => {},
- .INVALID_PARAMETER => unreachable,
- else => return unexpectedStatus(rc),
- }
+ var volume_buffer: [@sizeOf(FILE_NAME_INFORMATION) + MAX_PATH]u8 align(@alignOf(FILE_NAME_INFORMATION)) = undefined; // MAX_PATH bytes should be enough since it's Windows-defined name
+ try QueryInformationFile(hFile, FILE_INFORMATION_CLASS.FileVolumeNameInformation, volume_buffer[0..]);
const file_name = @ptrCast(*const FILE_NAME_INFORMATION, &path_buffer[0]);
const file_name_u16 = @ptrCast([*]const u16, &file_name.FileName[0])[0 .. file_name.FileNameLength / 2];
@@ -1014,9 +1003,7 @@ pub fn GetFinalPathNameByHandle(
// with traditional DOS drive letters, so pick the first one available.
const prefix = &[_]u16{ '\\', 'D', 'o', 's', 'D', 'e', 'v', 'i', 'c', 'e', 's', '\\' };
- if (std.mem.indexOf(u16, symlink, prefix)) |idx| {
- if (idx != 0) continue;
-
+ if (std.mem.startsWith(u16, symlink, prefix)) {
const drive_letter = symlink[prefix.len..];
if (out_buffer.len < drive_letter.len + file_name_u16.len) return error.NameTooLong;
@@ -1035,6 +1022,25 @@ pub fn GetFinalPathNameByHandle(
}
}
+pub const QueryInformationFileError = error{Unexpected};
+
+pub fn QueryInformationFile(
+ handle: HANDLE,
+ info_class: FILE_INFORMATION_CLASS,
+ out_buffer: []u8,
+) QueryInformationFileError!void {
+ var io: IO_STATUS_BLOCK = undefined;
+ const len_bytes = std.math.cast(u32, out_buffer.len) catch |err| switch (err) {
+ error.Overflow => std.math.maxInt(u32), // If the provided buffer is larger than what we can handle, set size to max what we can handle
+ };
+ const rc = ntdll.NtQueryInformationFile(handle, &io, out_buffer.ptr, len_bytes, info_class);
+ switch (rc) {
+ .SUCCESS => {},
+ .INVALID_PARAMETER => unreachable,
+ else => return unexpectedStatus(rc),
+ }
+}
+
pub const GetFileSizeError = error{Unexpected};
pub fn GetFileSizeEx(hFile: HANDLE) GetFileSizeError!u64 {
diff --git a/lib/std/os/windows/bits.zig b/lib/std/os/windows/bits.zig
index 516af6d4fc..44bc1ba437 100644
--- a/lib/std/os/windows/bits.zig
+++ b/lib/std/os/windows/bits.zig
@@ -1590,4 +1590,4 @@ pub const MOUNTMGR_MOUNT_POINTS = extern struct {
NumberOfMountPoints: ULONG,
MountPoints: [1]MOUNTMGR_MOUNT_POINT,
};
-pub const IOCTL_MOUNTMGR_QUERY_POINTS: ULONG = 0x6d0008;
\ No newline at end of file
+pub const IOCTL_MOUNTMGR_QUERY_POINTS: ULONG = 0x6d0008;
From 06a1184c92dd51630c542df6f34b09ec4dad341b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Felix=20=28xq=29=20Quei=C3=9Fner?=
Date: Sun, 9 Aug 2020 12:48:26 +0200
Subject: [PATCH 031/153] Fixes double alignment
---
lib/std/fmt.zig | 24 ++++++++++++++++++++++--
1 file changed, 22 insertions(+), 2 deletions(-)
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index c9ba3b3470..4e35e23d4b 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -560,13 +560,19 @@ fn formatFloatValue(
options: FormatOptions,
writer: anytype,
) !void {
+ // this buffer should be enough to display all decimal places of a decimal f64 number.
+ var buf: [512]u8 = undefined;
+ var buf_stream = std.io.fixedBufferStream(&buf);
+
if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "e")) {
- return formatFloatScientific(value, options, writer);
+ try formatFloatScientific(value, options, buf_stream.writer());
} else if (comptime std.mem.eql(u8, fmt, "d")) {
- return formatFloatDecimal(value, options, writer);
+ try formatFloatDecimal(value, options, buf_stream.writer());
} else {
@compileError("Unknown format string: '" ++ fmt ++ "'");
}
+
+ return formatBuf(buf[0..buf_stream.pos], options, writer);
}
pub fn formatText(
@@ -1791,3 +1797,17 @@ test "padding" {
try testFmt("==================Filled", "{:=>24}", .{"Filled"});
try testFmt(" Centered ", "{:^24}", .{"Centered"});
}
+
+test "decimal float padding" {
+ var number: f32 = 3.1415;
+ try testFmt("left-pad: **3.141\n", "left-pad: {d:*>7.3}\n", .{number});
+ try testFmt("center-pad: *3.141*\n", "center-pad: {d:*^7.3}\n", .{number});
+ try testFmt("right-pad: 3.141**\n", "right-pad: {d:*<7.3}\n", .{number});
+}
+
+test "sci float padding" {
+ var number: f32 = 3.1415;
+ try testFmt("left-pad: **3.141e+00\n", "left-pad: {e:*>11.3}\n", .{number});
+ try testFmt("center-pad: *3.141e+00*\n", "center-pad: {e:*^11.3}\n", .{number});
+ try testFmt("right-pad: 3.141e+00**\n", "right-pad: {e:*<11.3}\n", .{number});
+}
From ada06e2996c70f5d25c60f06d2d171e105a020ad Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Felix=20=28xq=29=20Quei=C3=9Fner?=
Date: Sun, 9 Aug 2020 14:09:02 +0200
Subject: [PATCH 032/153] Makes formatFloatValue not return error.NoSpaceLeft
anymore.
---
lib/std/fmt.zig | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 4e35e23d4b..6141b18b5b 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -565,9 +565,15 @@ fn formatFloatValue(
var buf_stream = std.io.fixedBufferStream(&buf);
if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "e")) {
- try formatFloatScientific(value, options, buf_stream.writer());
+ formatFloatScientific(value, options, buf_stream.writer()) catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ else => |e| return e,
+ };
} else if (comptime std.mem.eql(u8, fmt, "d")) {
- try formatFloatDecimal(value, options, buf_stream.writer());
+ formatFloatDecimal(value, options, buf_stream.writer()) catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ else => |e| return e,
+ };
} else {
@compileError("Unknown format string: '" ++ fmt ++ "'");
}
From 6701046cdd85d7a702be38a593e8385a5dfb4562 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Felix=20Quei=C3=9Fner?=
Date: Sun, 9 Aug 2020 17:40:58 +0200
Subject: [PATCH 033/153] Uses getWritten instead of .pos + slicing
Co-authored-by: Joachim Schmidt
---
lib/std/fmt.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 6141b18b5b..6dbef5db67 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -578,7 +578,7 @@ fn formatFloatValue(
@compileError("Unknown format string: '" ++ fmt ++ "'");
}
- return formatBuf(buf[0..buf_stream.pos], options, writer);
+ return formatBuf(buf_stream.getWritten(), options, writer);
}
pub fn formatText(
From 73b9f657460b040eaed4145a88f76db997d73987 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Mon, 10 Aug 2020 12:30:24 +0200
Subject: [PATCH 034/153] Validate DOS path before returning
And some other minor refactors which address more review comments.
---
lib/std/os/windows.zig | 17 +++++++++++------
1 file changed, 11 insertions(+), 6 deletions(-)
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index 6451c64e06..cf42266d26 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -897,6 +897,7 @@ pub fn SetFilePointerEx_CURRENT_get(handle: HANDLE) SetFilePointerError!u64 {
}
pub const GetFinalPathNameByHandleError = error{
+ BadPathName,
FileNotFound,
NameTooLong,
Unexpected,
@@ -925,11 +926,11 @@ pub fn GetFinalPathNameByHandle(
) GetFinalPathNameByHandleError![]u16 {
// Get normalized path; doesn't include volume name though.
var path_buffer: [@sizeOf(FILE_NAME_INFORMATION) + PATH_MAX_WIDE * 2]u8 align(@alignOf(FILE_NAME_INFORMATION)) = undefined;
- try QueryInformationFile(hFile, FILE_INFORMATION_CLASS.FileNormalizedNameInformation, path_buffer[0..]);
+ try QueryInformationFile(hFile, .FileNormalizedNameInformation, path_buffer[0..]);
// Get NT volume name.
var volume_buffer: [@sizeOf(FILE_NAME_INFORMATION) + MAX_PATH]u8 align(@alignOf(FILE_NAME_INFORMATION)) = undefined; // MAX_PATH bytes should be enough since it's Windows-defined name
- try QueryInformationFile(hFile, FILE_INFORMATION_CLASS.FileVolumeNameInformation, volume_buffer[0..]);
+ try QueryInformationFile(hFile, .FileVolumeNameInformation, volume_buffer[0..]);
const file_name = @ptrCast(*const FILE_NAME_INFORMATION, &path_buffer[0]);
const file_name_u16 = @ptrCast([*]const u16, &file_name.FileName[0])[0 .. file_name.FileNameLength / 2];
@@ -1010,8 +1011,14 @@ pub fn GetFinalPathNameByHandle(
std.mem.copy(u16, out_buffer[0..], drive_letter);
std.mem.copy(u16, out_buffer[drive_letter.len..], file_name_u16);
+ const total_len = drive_letter.len + file_name_u16.len;
- return out_buffer[0 .. drive_letter.len + file_name_u16.len];
+ // Validate that DOS does not contain any spurious nul bytes.
+ if (std.mem.indexOfScalar(u16, out_buffer[0..total_len], 0)) |_| {
+ return error.BadPathName;
+ }
+
+ return out_buffer[0..total_len];
}
}
@@ -1030,9 +1037,7 @@ pub fn QueryInformationFile(
out_buffer: []u8,
) QueryInformationFileError!void {
var io: IO_STATUS_BLOCK = undefined;
- const len_bytes = std.math.cast(u32, out_buffer.len) catch |err| switch (err) {
- error.Overflow => std.math.maxInt(u32), // If the provided buffer is larger than what we can handle, set size to max what we can handle
- };
+ const len_bytes = std.math.cast(u32, out_buffer.len) catch unreachable;
const rc = ntdll.NtQueryInformationFile(handle, &io, out_buffer.ptr, len_bytes, info_class);
switch (rc) {
.SUCCESS => {},
From 901bf0a2e2821caa1087b03ed308700ec9135c87 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Mon, 10 Aug 2020 20:44:22 +0200
Subject: [PATCH 035/153] Convert prefix from u8 to u16 programmatically
---
lib/std/os/windows.zig | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index cf42266d26..c93feb20d7 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -1002,7 +1002,10 @@ pub fn GetFinalPathNameByHandle(
// Look for `\DosDevices\` prefix. We don't really care if there are more than one symlinks
// with traditional DOS drive letters, so pick the first one available.
- const prefix = &[_]u16{ '\\', 'D', 'o', 's', 'D', 'e', 'v', 'i', 'c', 'e', 's', '\\' };
+ const prefix_u8 = "\\DosDevices\\";
+ var prefix_buf_u16: [prefix_u8.len]u16 = undefined;
+ const prefix_len_u16 = std.unicode.utf8ToUtf16Le(prefix_buf_u16[0..], prefix_u8[0..]) catch unreachable;
+ const prefix = prefix_buf_u16[0..prefix_len_u16];
if (std.mem.startsWith(u16, symlink, prefix)) {
const drive_letter = symlink[prefix.len..];
From 56e1080ba3535939565f5291526c184c34f6f63a Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 10 Aug 2020 17:05:39 -0700
Subject: [PATCH 036/153] std: dwarf_bits correction
---
lib/std/dwarf_bits.zig | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/lib/std/dwarf_bits.zig b/lib/std/dwarf_bits.zig
index 1d018eed66..a40fa8a277 100644
--- a/lib/std/dwarf_bits.zig
+++ b/lib/std/dwarf_bits.zig
@@ -69,7 +69,7 @@ pub const TAG_lo_user = 0x4080;
pub const TAG_hi_user = 0xffff;
// SGI/MIPS Extensions.
-pub const DW_TAG_MIPS_loop = 0x4081;
+pub const TAG_MIPS_loop = 0x4081;
// HP extensions. See: ftp://ftp.hp.com/pub/lang/tools/WDB/wdb-4.0.tar.gz .
pub const TAG_HP_array_descriptor = 0x4090;
@@ -263,9 +263,9 @@ pub const AT_MIPS_has_inlines = 0x200b;
// HP extensions.
pub const AT_HP_block_index = 0x2000;
-pub const AT_HP_unmodifiable = 0x2001; // Same as DW_AT_MIPS_fde.
-pub const AT_HP_prologue = 0x2005; // Same as DW_AT_MIPS_loop_unroll.
-pub const AT_HP_epilogue = 0x2008; // Same as DW_AT_MIPS_stride.
+pub const AT_HP_unmodifiable = 0x2001; // Same as AT_MIPS_fde.
+pub const AT_HP_prologue = 0x2005; // Same as AT_MIPS_loop_unroll.
+pub const AT_HP_epilogue = 0x2008; // Same as AT_MIPS_stride.
pub const AT_HP_actuals_stmt_list = 0x2010;
pub const AT_HP_proc_per_section = 0x2011;
pub const AT_HP_raw_data_ptr = 0x2012;
From 900a897e90eca08f78ef632fd11e01ec9bcb3674 Mon Sep 17 00:00:00 2001
From: joachimschmidt557
Date: Sun, 9 Aug 2020 14:57:49 +0200
Subject: [PATCH 037/153] Update tools/process_headers.zig to latest zig
---
tools/process_headers.zig | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/tools/process_headers.zig b/tools/process_headers.zig
index 639cc797be..d17069401e 100644
--- a/tools/process_headers.zig
+++ b/tools/process_headers.zig
@@ -248,7 +248,7 @@ const Contents = struct {
};
const HashToContents = std.StringHashMap(Contents);
-const TargetToHash = std.HashMap(DestTarget, []const u8, DestTarget.hash, DestTarget.eql);
+const TargetToHash = std.HashMap(DestTarget, []const u8, DestTarget.hash, DestTarget.eql, true);
const PathTable = std.StringHashMap(*TargetToHash);
const LibCVendor = enum {
@@ -339,7 +339,7 @@ pub fn main() !void {
try dir_stack.append(target_include_dir);
while (dir_stack.popOrNull()) |full_dir_name| {
- var dir = std.fs.cwd().openDirList(full_dir_name) catch |err| switch (err) {
+ var dir = std.fs.cwd().openDir(full_dir_name, .{ .iterate = true }) catch |err| switch (err) {
error.FileNotFound => continue :search,
error.AccessDenied => continue :search,
else => return err,
@@ -354,7 +354,8 @@ pub fn main() !void {
.Directory => try dir_stack.append(full_path),
.File => {
const rel_path = try std.fs.path.relative(allocator, target_include_dir, full_path);
- const raw_bytes = try std.io.readFileAlloc(allocator, full_path);
+ const max_size = 2 * 1024 * 1024 * 1024;
+ const raw_bytes = try std.fs.cwd().readFileAlloc(allocator, full_path, max_size);
const trimmed = std.mem.trim(u8, raw_bytes, " \r\n\t");
total_bytes += raw_bytes.len;
const hash = try allocator.alloc(u8, 32);
@@ -365,14 +366,14 @@ pub fn main() !void {
const gop = try hash_to_contents.getOrPut(hash);
if (gop.found_existing) {
max_bytes_saved += raw_bytes.len;
- gop.kv.value.hit_count += 1;
+ gop.entry.value.hit_count += 1;
std.debug.warn("duplicate: {} {} ({Bi:2})\n", .{
libc_target.name,
rel_path,
raw_bytes.len,
});
} else {
- gop.kv.value = Contents{
+ gop.entry.value = Contents{
.bytes = trimmed,
.hit_count = 1,
.hash = hash,
@@ -380,13 +381,13 @@ pub fn main() !void {
};
}
const path_gop = try path_table.getOrPut(rel_path);
- const target_to_hash = if (path_gop.found_existing) path_gop.kv.value else blk: {
+ const target_to_hash = if (path_gop.found_existing) path_gop.entry.value else blk: {
const ptr = try allocator.create(TargetToHash);
ptr.* = TargetToHash.init(allocator);
- path_gop.kv.value = ptr;
+ path_gop.entry.value = ptr;
break :blk ptr;
};
- assert((try target_to_hash.put(dest_target, hash)) == null);
+ try target_to_hash.putNoClobber(dest_target, hash);
},
else => std.debug.warn("warning: weird file: {}\n", .{full_path}),
}
@@ -410,7 +411,7 @@ pub fn main() !void {
{
var hash_it = path_kv.value.iterator();
while (hash_it.next()) |hash_kv| {
- const contents = &hash_to_contents.get(hash_kv.value).?.value;
+ const contents = &hash_to_contents.get(hash_kv.value).?;
try contents_list.append(contents);
}
}
@@ -432,7 +433,7 @@ pub fn main() !void {
}
var hash_it = path_kv.value.iterator();
while (hash_it.next()) |hash_kv| {
- const contents = &hash_to_contents.get(hash_kv.value).?.value;
+ const contents = &hash_to_contents.get(hash_kv.value).?;
if (contents.is_generic) continue;
const dest_target = hash_kv.key;
From 20510d209be44590f390c370f9e477d84ab46454 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 10 Aug 2020 19:34:27 -0700
Subject: [PATCH 038/153] GeneralPurposeAllocator: use std.log instead of
std.debug.print
`std.builtin.StackTrace` gains a `format` function.
GeneralPurposeAllocator uses `std.log.err` instead of directly printing
to stderr. Some errors are recoverable.
The test runner is modified to fail the test run if any log messages of
"err" or worse severity are encountered.
self-hosted is modified to always print log messages of "err" severity
or worse even if they have not been explicitly enabled.
This makes GeneralPurposeAllocator available on the freestanding target.
---
lib/std/builtin.zig | 19 ++++++
lib/std/heap/general_purpose_allocator.zig | 68 +++++++++++++---------
lib/std/special/test_runner.zig | 12 +++-
src-self-hosted/main.zig | 22 +++----
4 files changed, 84 insertions(+), 37 deletions(-)
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 499011eab9..e9c53d7ee5 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -52,6 +52,25 @@ pub const subsystem: ?SubSystem = blk: {
pub const StackTrace = struct {
index: usize,
instruction_addresses: []usize,
+
+ pub fn format(
+ self: StackTrace,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
+ defer arena.deinit();
+ const debug_info = std.debug.getSelfDebugInfo() catch |err| {
+ return writer.print("\nUnable to print stack trace: Unable to open debug info: {}\n", .{@errorName(err)});
+ };
+ const tty_config = std.debug.detectTTYConfig();
+ try writer.writeAll("\n");
+ std.debug.writeStackTrace(self, writer, &arena.allocator, debug_info, tty_config) catch |err| {
+ try writer.print("Unable to print stack trace: {}\n", .{@errorName(err)});
+ };
+ try writer.writeAll("\n");
+ }
};
/// This data structure is used by the Zig language code generation and
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index cb7f36fcc3..957c38939c 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -4,12 +4,12 @@
//!
//! ### `OptimizationMode.debug` and `OptimizationMode.release_safe`:
//!
-//! * Detect double free, and print stack trace of:
+//! * Detect double free, and emit stack trace of:
//! - Where it was first allocated
//! - Where it was freed the first time
//! - Where it was freed the second time
//!
-//! * Detect leaks and print stack trace of:
+//! * Detect leaks and emit stack trace of:
//! - Where it was allocated
//!
//! * When a page of memory is no longer needed, give it back to resident memory
@@ -178,15 +178,18 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
stack_addresses: [stack_n]usize,
fn dumpStackTrace(self: *LargeAlloc) void {
+ std.debug.dumpStackTrace(self.getStackTrace());
+ }
+
+ fn getStackTrace(self: *LargeAlloc) std.builtin.StackTrace {
var len: usize = 0;
while (len < stack_n and self.stack_addresses[len] != 0) {
len += 1;
}
- const stack_trace = StackTrace{
+ return .{
.instruction_addresses = &self.stack_addresses,
.index = len,
};
- std.debug.dumpStackTrace(stack_trace);
}
};
const LargeAllocTable = std.AutoHashMapUnmanaged(usize, LargeAlloc);
@@ -282,15 +285,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
while (true) : (bit_index += 1) {
const is_used = @truncate(u1, used_byte >> bit_index) != 0;
if (is_used) {
- std.debug.print("\nMemory leak detected:\n", .{});
const slot_index = @intCast(SlotIndex, used_bits_byte * 8 + bit_index);
- const stack_trace = bucketStackTrace(
- bucket,
- size_class,
- slot_index,
- .alloc,
- );
- std.debug.dumpStackTrace(stack_trace);
+ const stack_trace = bucketStackTrace(bucket, size_class, slot_index, .alloc);
+ std.log.err(.std, "Memory leak detected: {}", .{stack_trace});
leaks = true;
}
if (bit_index == math.maxInt(u3))
@@ -301,8 +298,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return leaks;
}
- /// Returns whether there were leaks.
- pub fn deinit(self: *Self) bool {
+ /// Emits log messages for leaks and then returns whether there were any leaks.
+ pub fn detectLeaks(self: *Self) bool {
var leaks = false;
for (self.buckets) |optional_bucket, bucket_i| {
const first_bucket = optional_bucket orelse continue;
@@ -317,10 +314,14 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
}
for (self.large_allocations.items()) |*large_alloc| {
- std.debug.print("\nMemory leak detected (0x{x}):\n", .{@ptrToInt(large_alloc.value.bytes.ptr)});
- large_alloc.value.dumpStackTrace();
+ std.log.err(.std, "Memory leak detected: {}", .{large_alloc.value.getStackTrace()});
leaks = true;
}
+ return leaks;
+ }
+
+ pub fn deinit(self: *Self) bool {
+ const leaks = if (config.safety) self.detectLeaks() else false;
self.large_allocations.deinit(self.backing_allocator);
self.* = undefined;
return leaks;
@@ -442,13 +443,18 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
};
if (config.safety and old_mem.len != entry.value.bytes.len) {
- std.debug.print("\nAllocation size {} bytes does not match free size {}. Allocated here:\n", .{
+ var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
+ var free_stack_trace = StackTrace{
+ .instruction_addresses = &addresses,
+ .index = 0,
+ };
+ std.debug.captureStackTrace(ret_addr, &free_stack_trace);
+ std.log.err(.std, "Allocation size {} bytes does not match free size {}. Allocation: {} Free: {}", .{
entry.value.bytes.len,
old_mem.len,
+ entry.value.getStackTrace(),
+ free_stack_trace,
});
- entry.value.dumpStackTrace();
-
- @panic("\nFree here:");
}
const result_len = try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr);
@@ -518,14 +524,24 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0;
if (!is_used) {
if (config.safety) {
- // print allocation stack trace
- std.debug.print("\nDouble free detected, allocated here:\n", .{});
const alloc_stack_trace = bucketStackTrace(bucket, size_class, slot_index, .alloc);
- std.debug.dumpStackTrace(alloc_stack_trace);
- std.debug.print("\nFirst free here:\n", .{});
const free_stack_trace = bucketStackTrace(bucket, size_class, slot_index, .free);
- std.debug.dumpStackTrace(free_stack_trace);
- @panic("\nSecond free here:");
+ var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
+ var second_free_stack_trace = StackTrace{
+ .instruction_addresses = &addresses,
+ .index = 0,
+ };
+ std.debug.captureStackTrace(ret_addr, &second_free_stack_trace);
+ std.log.err(.std, "Double free detected. Allocation: {} First free: {} Second free: {}", .{
+ alloc_stack_trace,
+ free_stack_trace,
+ second_free_stack_trace,
+ });
+ if (new_size == 0) {
+ // Recoverable.
+ return @as(usize, 0);
+ }
+ @panic("Unrecoverable double free");
} else {
unreachable;
}
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index 384cd7f572..f4c8c6de9d 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -4,6 +4,8 @@ const builtin = @import("builtin");
pub const io_mode: io.Mode = builtin.test_io_mode;
+var log_err_count: usize = 0;
+
pub fn main() anyerror!void {
const test_fn_list = builtin.test_functions;
var ok_count: usize = 0;
@@ -75,8 +77,13 @@ pub fn main() anyerror!void {
} else {
std.debug.print("{} passed; {} skipped.\n", .{ ok_count, skip_count });
}
+ if (log_err_count != 0) {
+ std.debug.print("{} errors were logged.\n", .{log_err_count});
+ }
if (leaks != 0) {
- std.debug.print("{} tests leaked memory\n", .{ok_count});
+ std.debug.print("{} tests leaked memory.\n", .{ok_count});
+ }
+ if (leaks != 0 or log_err_count != 0) {
std.process.exit(1);
}
}
@@ -87,6 +94,9 @@ pub fn log(
comptime format: []const u8,
args: anytype,
) void {
+ if (@enumToInt(message_level) <= @enumToInt(std.log.Level.err)) {
+ log_err_count += 1;
+ }
if (@enumToInt(message_level) <= @enumToInt(std.testing.log_level)) {
std.debug.print("[{}] ({}): " ++ format, .{ @tagName(scope), @tagName(message_level) } ++ args);
}
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 2795da0017..2f6610da28 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -42,17 +42,19 @@ pub fn log(
comptime format: []const u8,
args: anytype,
) void {
- if (@enumToInt(level) > @enumToInt(std.log.level))
- return;
+ // Hide anything more verbose than warn unless it was added with `-Dlog=foo`.
+ if (@enumToInt(level) > @enumToInt(std.log.level) or
+ @enumToInt(level) > @enumToInt(std.log.Level.warn))
+ {
+ const scope_name = @tagName(scope);
+ const ok = comptime for (build_options.log_scopes) |log_scope| {
+ if (mem.eql(u8, log_scope, scope_name))
+ break true;
+ } else false;
- const scope_name = @tagName(scope);
- const ok = comptime for (build_options.log_scopes) |log_scope| {
- if (mem.eql(u8, log_scope, scope_name))
- break true;
- } else false;
-
- if (!ok)
- return;
+ if (!ok)
+ return;
+ }
const prefix = "[" ++ @tagName(level) ++ "] " ++ "(" ++ @tagName(scope) ++ "): ";
From dfcac3cd76c899c412fa3b78946ebce4f42f2ae8 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Sun, 9 Aug 2020 16:26:18 +0300
Subject: [PATCH 039/153] translate-c: always add extern token for functions
without body
---
src-self-hosted/translate_c.zig | 2 +-
test/translate_c.zig | 14 +++++++-------
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig
index 2bd38d9f1c..9b5ad2644b 100644
--- a/src-self-hosted/translate_c.zig
+++ b/src-self-hosted/translate_c.zig
@@ -4907,7 +4907,7 @@ fn finishTransFnProto(
const pub_tok = if (is_pub) try appendToken(rp.c, .Keyword_pub, "pub") else null;
const extern_export_inline_tok = if (is_export)
try appendToken(rp.c, .Keyword_export, "export")
- else if (cc == .C and is_extern)
+ else if (is_extern)
try appendToken(rp.c, .Keyword_extern, "extern")
else
null;
diff --git a/test/translate_c.zig b/test/translate_c.zig
index 07719466d6..7e94798efb 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -1260,11 +1260,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\void __attribute__((cdecl)) foo4(float *a);
\\void __attribute__((thiscall)) foo5(float *a);
, &[_][]const u8{
- \\pub fn foo1(a: [*c]f32) callconv(.Fastcall) void;
- \\pub fn foo2(a: [*c]f32) callconv(.Stdcall) void;
- \\pub fn foo3(a: [*c]f32) callconv(.Vectorcall) void;
+ \\pub extern fn foo1(a: [*c]f32) callconv(.Fastcall) void;
+ \\pub extern fn foo2(a: [*c]f32) callconv(.Stdcall) void;
+ \\pub extern fn foo3(a: [*c]f32) callconv(.Vectorcall) void;
\\pub extern fn foo4(a: [*c]f32) void;
- \\pub fn foo5(a: [*c]f32) callconv(.Thiscall) void;
+ \\pub extern fn foo5(a: [*c]f32) callconv(.Thiscall) void;
});
cases.addWithTarget("Calling convention", CrossTarget.parse(.{
@@ -1274,8 +1274,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\void __attribute__((pcs("aapcs"))) foo1(float *a);
\\void __attribute__((pcs("aapcs-vfp"))) foo2(float *a);
, &[_][]const u8{
- \\pub fn foo1(a: [*c]f32) callconv(.AAPCS) void;
- \\pub fn foo2(a: [*c]f32) callconv(.AAPCSVFP) void;
+ \\pub extern fn foo1(a: [*c]f32) callconv(.AAPCS) void;
+ \\pub extern fn foo2(a: [*c]f32) callconv(.AAPCSVFP) void;
});
cases.addWithTarget("Calling convention", CrossTarget.parse(.{
@@ -1284,7 +1284,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
}) catch unreachable,
\\void __attribute__((aarch64_vector_pcs)) foo1(float *a);
, &[_][]const u8{
- \\pub fn foo1(a: [*c]f32) callconv(.Vectorcall) void;
+ \\pub extern fn foo1(a: [*c]f32) callconv(.Vectorcall) void;
});
cases.add("Parameterless function prototypes",
From cf5932b236424d8e1b52500bc80ffb6d79e20134 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Sun, 9 Aug 2020 19:24:30 +0300
Subject: [PATCH 040/153] translate-c: convert int to bool if bool is expected
---
src-self-hosted/clang.zig | 1 +
src-self-hosted/translate_c.zig | 8 ++++++++
src/zig_clang.cpp | 13 +++++++++++++
src/zig_clang.h | 1 +
test/translate_c.zig | 24 ++++++++++++------------
5 files changed, 35 insertions(+), 12 deletions(-)
diff --git a/src-self-hosted/clang.zig b/src-self-hosted/clang.zig
index 6222f3d8e4..2551829084 100644
--- a/src-self-hosted/clang.zig
+++ b/src-self-hosted/clang.zig
@@ -1086,6 +1086,7 @@ pub extern fn ZigClangVarDecl_getTypeSourceInfo_getType(self: *const struct_ZigC
pub extern fn ZigClangIntegerLiteral_EvaluateAsInt(*const ZigClangIntegerLiteral, *ZigClangExprEvalResult, *const ZigClangASTContext) bool;
pub extern fn ZigClangIntegerLiteral_getBeginLoc(*const ZigClangIntegerLiteral) ZigClangSourceLocation;
+pub extern fn ZigClangIntegerLiteral_isZero(*const ZigClangIntegerLiteral, *bool, *const ZigClangASTContext) bool;
pub extern fn ZigClangReturnStmt_getRetValue(*const ZigClangReturnStmt) ?*const ZigClangExpr;
diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig
index 9b5ad2644b..a6b5862fae 100644
--- a/src-self-hosted/translate_c.zig
+++ b/src-self-hosted/translate_c.zig
@@ -1684,6 +1684,14 @@ fn transBoolExpr(
lrvalue: LRValue,
grouped: bool,
) TransError!*ast.Node {
+ if (ZigClangStmt_getStmtClass(@ptrCast(*const ZigClangStmt, expr)) == .IntegerLiteralClass) {
+ var is_zero: bool = undefined;
+ if (!ZigClangIntegerLiteral_isZero(@ptrCast(*const ZigClangIntegerLiteral, expr), &is_zero, rp.c.clang_context)) {
+ return revertAndWarn(rp, error.UnsupportedTranslation, ZigClangExpr_getBeginLoc(expr), "invalid integer literal", .{});
+ }
+ return try transCreateNodeBoolLiteral(rp.c, !is_zero);
+ }
+
const lparen = if (grouped)
try appendToken(rp.c, .LParen, "(")
else
diff --git a/src/zig_clang.cpp b/src/zig_clang.cpp
index f333a6b7a0..21d0c5c0ca 100644
--- a/src/zig_clang.cpp
+++ b/src/zig_clang.cpp
@@ -2510,6 +2510,19 @@ struct ZigClangSourceLocation ZigClangIntegerLiteral_getBeginLoc(const struct Zi
return bitcast(casted->getBeginLoc());
}
+bool ZigClangIntegerLiteral_isZero(const struct ZigClangIntegerLiteral *self, bool *result, const struct ZigClangASTContext *ctx) {
+ auto casted_self = reinterpret_cast(self);
+ auto casted_ctx = reinterpret_cast(ctx);
+ clang::Expr::EvalResult eval_result;
+ if (!casted_self->EvaluateAsInt(eval_result, *casted_ctx)) {
+ return false;
+ }
+ const llvm::APSInt result_int = eval_result.Val.getInt();
+ const llvm::APSInt zero(result_int.getBitWidth(), result_int.isUnsigned());
+ *result = zero == result_int;
+ return true;
+}
+
const struct ZigClangExpr *ZigClangReturnStmt_getRetValue(const struct ZigClangReturnStmt *self) {
auto casted = reinterpret_cast(self);
return reinterpret_cast(casted->getRetValue());
diff --git a/src/zig_clang.h b/src/zig_clang.h
index 94a37d98c7..2c358d880e 100644
--- a/src/zig_clang.h
+++ b/src/zig_clang.h
@@ -1086,6 +1086,7 @@ ZIG_EXTERN_C struct ZigClangQualType ZigClangCStyleCastExpr_getType(const struct
ZIG_EXTERN_C bool ZigClangIntegerLiteral_EvaluateAsInt(const struct ZigClangIntegerLiteral *, struct ZigClangExprEvalResult *, const struct ZigClangASTContext *);
ZIG_EXTERN_C struct ZigClangSourceLocation ZigClangIntegerLiteral_getBeginLoc(const struct ZigClangIntegerLiteral *);
+ZIG_EXTERN_C bool ZigClangIntegerLiteral_isZero(const struct ZigClangIntegerLiteral *, bool *, const struct ZigClangASTContext *);
ZIG_EXTERN_C const struct ZigClangExpr *ZigClangReturnStmt_getRetValue(const struct ZigClangReturnStmt *);
diff --git a/test/translate_c.zig b/test/translate_c.zig
index 7e94798efb..ac549695aa 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -99,10 +99,10 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\}
, &[_][]const u8{
\\pub export fn foo() void {
- \\ while (@as(c_int, 0) != 0) while (@as(c_int, 0) != 0) {};
- \\ while (true) while (@as(c_int, 0) != 0) {};
+ \\ while (false) while (false) {};
+ \\ while (true) while (false) {};
\\ while (true) while (true) {
- \\ if (!(@as(c_int, 0) != 0)) break;
+ \\ if (!false) break;
\\ };
\\}
});
@@ -1634,8 +1634,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
, &[_][]const u8{
\\pub export fn foo() c_int {
\\ var a: c_int = 5;
- \\ while (@as(c_int, 2) != 0) a = 2;
- \\ while (@as(c_int, 4) != 0) {
+ \\ while (true) a = 2;
+ \\ while (true) {
\\ var a_1: c_int = 4;
\\ a_1 = 9;
\\ _ = @as(c_int, 6);
@@ -1644,11 +1644,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ while (true) {
\\ var a_1: c_int = 2;
\\ a_1 = 12;
- \\ if (!(@as(c_int, 4) != 0)) break;
+ \\ if (!true) break;
\\ }
\\ while (true) {
\\ a = 7;
- \\ if (!(@as(c_int, 4) != 0)) break;
+ \\ if (!true) break;
\\ }
\\}
});
@@ -1702,8 +1702,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\}
, &[_][]const u8{
\\pub export fn bar() c_int {
- \\ if ((if (@as(c_int, 2) != 0) @as(c_int, 5) else (if (@as(c_int, 5) != 0) @as(c_int, 4) else @as(c_int, 6))) != 0) _ = @as(c_int, 2);
- \\ return if (@as(c_int, 2) != 0) @as(c_int, 5) else if (@as(c_int, 5) != 0) @as(c_int, 4) else @as(c_int, 6);
+ \\ if ((if (true) @as(c_int, 5) else (if (true) @as(c_int, 4) else @as(c_int, 6))) != 0) _ = @as(c_int, 2);
+ \\ return if (true) @as(c_int, 5) else if (true) @as(c_int, 4) else @as(c_int, 6);
\\}
});
@@ -2214,7 +2214,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\}
, &[_][]const u8{
\\pub export fn foo() c_int {
- \\ if (@as(c_int, 2) != 0) {
+ \\ if (true) {
\\ var a: c_int = 2;
\\ }
\\ if ((blk: {
@@ -2748,8 +2748,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\}
, &[_][]const u8{
\\pub fn foo() callconv(.C) void {
- \\ if (@as(c_int, 1) != 0) while (true) {
- \\ if (!(@as(c_int, 0) != 0)) break;
+ \\ if (true) while (true) {
+ \\ if (!false) break;
\\ };
\\}
});
From 2b28cebf644b29543fcb52504b11931a7c797ffb Mon Sep 17 00:00:00 2001
From: Vexu
Date: Tue, 11 Aug 2020 12:20:58 +0300
Subject: [PATCH 041/153] translate-c: use mangled name when macro translation
fails
Closes #6009
---
src-self-hosted/translate_c.zig | 321 ++++++++++++--------------------
test/translate_c.zig | 4 +
2 files changed, 127 insertions(+), 198 deletions(-)
diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig
index a6b5862fae..5af9bbc3c4 100644
--- a/src-self-hosted/translate_c.zig
+++ b/src-self-hosted/translate_c.zig
@@ -3089,7 +3089,7 @@ fn transUnaryExprOrTypeTraitExpr(
.AlignOf => "@alignOf",
.PreferredAlignOf,
.VecStep,
- .OpenMPRequiredSimdAlign,
+ .OpenMPRequiredSimdAlign,
=> return revertAndWarn(
rp,
error.UnsupportedTranslation,
@@ -5220,26 +5220,32 @@ pub fn freeErrors(errors: []ClangErrMsg) void {
ZigClangErrorMsg_delete(errors.ptr, errors.len);
}
-const CTokIterator = struct {
+const MacroCtx = struct {
source: []const u8,
list: []const CToken,
i: usize = 0,
+ loc: ZigClangSourceLocation,
+ name: []const u8,
- fn peek(self: *CTokIterator) ?CToken.Id {
+ fn peek(self: *MacroCtx) ?CToken.Id {
if (self.i >= self.list.len) return null;
return self.list[self.i + 1].id;
}
- fn next(self: *CTokIterator) ?CToken.Id {
+ fn next(self: *MacroCtx) ?CToken.Id {
if (self.i >= self.list.len) return null;
self.i += 1;
return self.list[self.i].id;
}
- fn slice(self: *CTokIterator, index: usize) []const u8 {
- const tok = self.list[index];
+ fn slice(self: *MacroCtx) []const u8 {
+ const tok = self.list[self.i];
return self.source[tok.start..tok.end];
}
+
+ fn fail(self: *MacroCtx, c: *Context, comptime fmt: []const u8, args: anytype) !void {
+ return failDecl(c, self.loc, self.name, fmt, args);
+ }
};
fn transPreprocessorEntities(c: *Context, unit: *ZigClangASTUnit) Error!void {
@@ -5286,18 +5292,21 @@ fn transPreprocessorEntities(c: *Context, unit: *ZigClangASTUnit) Error!void {
try tok_list.append(tok);
}
- var tok_it = CTokIterator{
+ var macro_ctx = MacroCtx{
.source = slice,
.list = tok_list.items,
+ .name = mangled_name,
+ .loc = begin_loc,
};
- assert(mem.eql(u8, tok_it.slice(0), name));
+ assert(mem.eql(u8, macro_ctx.slice(), name));
var macro_fn = false;
- switch (tok_it.peek().?) {
+ switch (macro_ctx.peek().?) {
.Identifier => {
// if it equals itself, ignore. for example, from stdio.h:
// #define stdin stdin
- if (mem.eql(u8, name, tok_it.slice(1))) {
+ const tok = macro_ctx.list[1];
+ if (mem.eql(u8, name, slice[tok.start..tok.end])) {
continue;
}
},
@@ -5308,15 +5317,15 @@ fn transPreprocessorEntities(c: *Context, unit: *ZigClangASTUnit) Error!void {
},
.LParen => {
// if the name is immediately followed by a '(' then it is a function
- macro_fn = tok_it.list[0].end == tok_it.list[1].start;
+ macro_fn = macro_ctx.list[0].end == macro_ctx.list[1].start;
},
else => {},
}
(if (macro_fn)
- transMacroFnDefine(c, &tok_it, mangled_name, begin_loc)
+ transMacroFnDefine(c, ¯o_ctx)
else
- transMacroDefine(c, &tok_it, mangled_name, begin_loc)) catch |err| switch (err) {
+ transMacroDefine(c, ¯o_ctx)) catch |err| switch (err) {
error.ParseError => continue,
error.OutOfMemory => |e| return e,
};
@@ -5326,24 +5335,18 @@ fn transPreprocessorEntities(c: *Context, unit: *ZigClangASTUnit) Error!void {
}
}
-fn transMacroDefine(c: *Context, it: *CTokIterator, name: []const u8, source_loc: ZigClangSourceLocation) ParseError!void {
+fn transMacroDefine(c: *Context, m: *MacroCtx) ParseError!void {
const scope = &c.global_scope.base;
const visib_tok = try appendToken(c, .Keyword_pub, "pub");
const mut_tok = try appendToken(c, .Keyword_const, "const");
- const name_tok = try appendIdentifier(c, name);
+ const name_tok = try appendIdentifier(c, m.name);
const eq_token = try appendToken(c, .Equal, "=");
- const init_node = try parseCExpr(c, it, source_loc, scope);
- const last = it.next().?;
+ const init_node = try parseCExpr(c, m, scope);
+ const last = m.next().?;
if (last != .Eof and last != .Nl)
- return failDecl(
- c,
- source_loc,
- name,
- "unable to translate C expr: unexpected token .{}",
- .{@tagName(last)},
- );
+ return m.fail(c, "unable to translate C expr: unexpected token .{}", .{@tagName(last)});
const semicolon_token = try appendToken(c, .Semicolon, ";");
const node = try ast.Node.VarDecl.create(c.arena, .{
@@ -5355,10 +5358,10 @@ fn transMacroDefine(c: *Context, it: *CTokIterator, name: []const u8, source_loc
.eq_token = eq_token,
.init_node = init_node,
});
- _ = try c.global_scope.macro_table.put(name, &node.base);
+ _ = try c.global_scope.macro_table.put(m.name, &node.base);
}
-fn transMacroFnDefine(c: *Context, it: *CTokIterator, name: []const u8, source_loc: ZigClangSourceLocation) ParseError!void {
+fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void {
var block_scope = try Scope.Block.init(c, &c.global_scope.base, null);
defer block_scope.deinit();
const scope = &block_scope.base;
@@ -5366,34 +5369,22 @@ fn transMacroFnDefine(c: *Context, it: *CTokIterator, name: []const u8, source_l
const pub_tok = try appendToken(c, .Keyword_pub, "pub");
const inline_tok = try appendToken(c, .Keyword_inline, "inline");
const fn_tok = try appendToken(c, .Keyword_fn, "fn");
- const name_tok = try appendIdentifier(c, name);
+ const name_tok = try appendIdentifier(c, m.name);
_ = try appendToken(c, .LParen, "(");
- if (it.next().? != .LParen) {
- return failDecl(
- c,
- source_loc,
- name,
- "unable to translate C expr: expected '('",
- .{},
- );
+ if (m.next().? != .LParen) {
+ return m.fail(c, "unable to translate C expr: expected '('", .{});
}
var fn_params = std.ArrayList(ast.Node.FnProto.ParamDecl).init(c.gpa);
defer fn_params.deinit();
while (true) {
- if (it.next().? != .Identifier) {
- return failDecl(
- c,
- source_loc,
- name,
- "unable to translate C expr: expected identifier",
- .{},
- );
+ if (m.next().? != .Identifier) {
+ return m.fail(c, "unable to translate C expr: expected identifier", .{});
}
- const mangled_name = try block_scope.makeMangledName(c, it.slice(it.i));
+ const mangled_name = try block_scope.makeMangledName(c, m.slice());
const param_name_tok = try appendIdentifier(c, mangled_name);
_ = try appendToken(c, .Colon, ":");
@@ -5411,20 +5402,14 @@ fn transMacroFnDefine(c: *Context, it: *CTokIterator, name: []const u8, source_l
.param_type = .{ .any_type = &any_type.base },
};
- if (it.peek().? != .Comma)
+ if (m.peek().? != .Comma)
break;
- _ = it.next();
+ _ = m.next();
_ = try appendToken(c, .Comma, ",");
}
- if (it.next().? != .RParen) {
- return failDecl(
- c,
- source_loc,
- name,
- "unable to translate C expr: expected ')'",
- .{},
- );
+ if (m.next().? != .RParen) {
+ return m.fail(c, "unable to translate C expr: expected ')'", .{});
}
_ = try appendToken(c, .RParen, ")");
@@ -5432,16 +5417,10 @@ fn transMacroFnDefine(c: *Context, it: *CTokIterator, name: []const u8, source_l
const type_of = try c.createBuiltinCall("@TypeOf", 1);
const return_kw = try appendToken(c, .Keyword_return, "return");
- const expr = try parseCExpr(c, it, source_loc, scope);
- const last = it.next().?;
+ const expr = try parseCExpr(c, m, scope);
+ const last = m.next().?;
if (last != .Eof and last != .Nl)
- return failDecl(
- c,
- source_loc,
- name,
- "unable to translate C expr: unexpected token .{}",
- .{@tagName(last)},
- );
+ return m.fail(c, "unable to translate C expr: unexpected token .{}", .{@tagName(last)});
_ = try appendToken(c, .Semicolon, ";");
const type_of_arg = if (expr.tag != .Block) expr else blk: {
const blk = @fieldParentPtr(ast.Node.Block, "base", expr);
@@ -5472,32 +5451,26 @@ fn transMacroFnDefine(c: *Context, it: *CTokIterator, name: []const u8, source_l
});
mem.copy(ast.Node.FnProto.ParamDecl, fn_proto.params(), fn_params.items);
- _ = try c.global_scope.macro_table.put(name, &fn_proto.base);
+ _ = try c.global_scope.macro_table.put(m.name, &fn_proto.base);
}
const ParseError = Error || error{ParseError};
-fn parseCExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSourceLocation, scope: *Scope) ParseError!*ast.Node {
- const node = try parseCPrefixOpExpr(c, it, source_loc, scope);
- switch (it.next().?) {
+fn parseCExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!*ast.Node {
+ const node = try parseCPrefixOpExpr(c, m, scope);
+ switch (m.next().?) {
.QuestionMark => {
// must come immediately after expr
_ = try appendToken(c, .RParen, ")");
const if_node = try transCreateNodeIf(c);
if_node.condition = node;
- if_node.body = try parseCPrimaryExpr(c, it, source_loc, scope);
- if (it.next().? != .Colon) {
- try failDecl(
- c,
- source_loc,
- it.slice(0),
- "unable to translate C expr: expected ':'",
- .{},
- );
+ if_node.body = try parseCPrimaryExpr(c, m, scope);
+ if (m.next().? != .Colon) {
+ try m.fail(c, "unable to translate C expr: expected ':'", .{});
return error.ParseError;
}
if_node.@"else" = try transCreateNodeElse(c);
- if_node.@"else".?.body = try parseCPrimaryExpr(c, it, source_loc, scope);
+ if_node.@"else".?.body = try parseCPrimaryExpr(c, m, scope);
return &if_node.base;
},
.Comma => {
@@ -5520,10 +5493,10 @@ fn parseCExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSourceLocation
};
try block_scope.statements.append(&op_node.base);
- last = try parseCPrefixOpExpr(c, it, source_loc, scope);
+ last = try parseCPrefixOpExpr(c, m, scope);
_ = try appendToken(c, .Semicolon, ";");
- if (it.next().? != .Comma) {
- it.i -= 1;
+ if (m.next().? != .Comma) {
+ m.i -= 1;
break;
}
}
@@ -5534,16 +5507,16 @@ fn parseCExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSourceLocation
return &block_node.base;
},
else => {
- it.i -= 1;
+ m.i -= 1;
return node;
},
}
}
-fn parseCNumLit(c: *Context, it: *CTokIterator, source_loc: ZigClangSourceLocation) ParseError!*ast.Node {
- var lit_bytes = it.slice(it.i);
+fn parseCNumLit(c: *Context, m: *MacroCtx) ParseError!*ast.Node {
+ var lit_bytes = m.slice();
- switch (it.list[it.i].id) {
+ switch (m.list[m.i].id) {
.IntegerLiteral => |suffix| {
if (lit_bytes.len > 2 and lit_bytes[0] == '0') {
switch (lit_bytes[1]) {
@@ -5604,8 +5577,8 @@ fn parseCNumLit(c: *Context, it: *CTokIterator, source_loc: ZigClangSourceLocati
}
}
-fn zigifyEscapeSequences(ctx: *Context, source_bytes: []const u8, name: []const u8, source_loc: ZigClangSourceLocation) ![]const u8 {
- var source = source_bytes;
+fn zigifyEscapeSequences(ctx: *Context, m: *MacroCtx) ![]const u8 {
+ var source = m.slice();
for (source) |c, i| {
if (c == '\"' or c == '\'') {
source = source[i..];
@@ -5677,11 +5650,11 @@ fn zigifyEscapeSequences(ctx: *Context, source_bytes: []const u8, name: []const
bytes[i] = '?';
},
'u', 'U' => {
- try failDecl(ctx, source_loc, name, "macro tokenizing failed: TODO unicode escape sequences", .{});
+ try m.fail(ctx, "macro tokenizing failed: TODO unicode escape sequences", .{});
return error.ParseError;
},
else => {
- try failDecl(ctx, source_loc, name, "macro tokenizing failed: unknown escape sequence", .{});
+ try m.fail(ctx, "macro tokenizing failed: unknown escape sequence", .{});
return error.ParseError;
},
}
@@ -5700,21 +5673,21 @@ fn zigifyEscapeSequences(ctx: *Context, source_bytes: []const u8, name: []const
switch (c) {
'0'...'9' => {
num = std.math.mul(u8, num, 16) catch {
- try failDecl(ctx, source_loc, name, "macro tokenizing failed: hex literal overflowed", .{});
+ try m.fail(ctx, "macro tokenizing failed: hex literal overflowed", .{});
return error.ParseError;
};
num += c - '0';
},
'a'...'f' => {
num = std.math.mul(u8, num, 16) catch {
- try failDecl(ctx, source_loc, name, "macro tokenizing failed: hex literal overflowed", .{});
+ try m.fail(ctx, "macro tokenizing failed: hex literal overflowed", .{});
return error.ParseError;
};
num += c - 'a' + 10;
},
'A'...'F' => {
num = std.math.mul(u8, num, 16) catch {
- try failDecl(ctx, source_loc, name, "macro tokenizing failed: hex literal overflowed", .{});
+ try m.fail(ctx, "macro tokenizing failed: hex literal overflowed", .{});
return error.ParseError;
};
num += c - 'A' + 10;
@@ -5741,7 +5714,7 @@ fn zigifyEscapeSequences(ctx: *Context, source_bytes: []const u8, name: []const
if (accept_digit) {
count += 1;
num = std.math.mul(u8, num, 8) catch {
- try failDecl(ctx, source_loc, name, "macro tokenizing failed: octal literal overflowed", .{});
+ try m.fail(ctx, "macro tokenizing failed: octal literal overflowed", .{});
return error.ParseError;
};
num += c - '0';
@@ -5764,13 +5737,13 @@ fn zigifyEscapeSequences(ctx: *Context, source_bytes: []const u8, name: []const
return bytes[0..i];
}
-fn parseCPrimaryExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSourceLocation, scope: *Scope) ParseError!*ast.Node {
- const tok = it.next().?;
- const slice = it.slice(it.i);
+fn parseCPrimaryExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!*ast.Node {
+ const tok = m.next().?;
+ const slice = m.slice();
switch (tok) {
.CharLiteral => {
if (slice[0] != '\'' or slice[1] == '\\' or slice.len == 3) {
- const token = try appendToken(c, .CharLiteral, try zigifyEscapeSequences(c, slice, it.slice(0), source_loc));
+ const token = try appendToken(c, .CharLiteral, try zigifyEscapeSequences(c, m));
const node = try c.arena.create(ast.Node.OneToken);
node.* = .{
.base = .{ .tag = .CharLiteral },
@@ -5788,7 +5761,7 @@ fn parseCPrimaryExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSourceL
}
},
.StringLiteral => {
- const token = try appendToken(c, .StringLiteral, try zigifyEscapeSequences(c, slice, it.slice(0), source_loc));
+ const token = try appendToken(c, .StringLiteral, try zigifyEscapeSequences(c, m));
const node = try c.arena.create(ast.Node.OneToken);
node.* = .{
.base = .{ .tag = .StringLiteral },
@@ -5797,7 +5770,7 @@ fn parseCPrimaryExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSourceL
return &node.base;
},
.IntegerLiteral, .FloatLiteral => {
- return parseCNumLit(c, it, source_loc);
+ return parseCNumLit(c, m);
},
// eventually this will be replaced by std.c.parse which will handle these correctly
.Keyword_void => return transCreateNodeIdentifierUnchecked(c, "c_void"),
@@ -5808,61 +5781,55 @@ fn parseCPrimaryExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSourceL
.Keyword_float => return transCreateNodeIdentifierUnchecked(c, "f32"),
.Keyword_short => return transCreateNodeIdentifierUnchecked(c, "c_short"),
.Keyword_char => return transCreateNodeIdentifierUnchecked(c, "u8"),
- .Keyword_unsigned => if (it.next()) |t| switch (t) {
+ .Keyword_unsigned => if (m.next()) |t| switch (t) {
.Keyword_char => return transCreateNodeIdentifierUnchecked(c, "u8"),
.Keyword_short => return transCreateNodeIdentifierUnchecked(c, "c_ushort"),
.Keyword_int => return transCreateNodeIdentifierUnchecked(c, "c_uint"),
- .Keyword_long => if (it.peek() != null and it.peek().? == .Keyword_long) {
- _ = it.next();
+ .Keyword_long => if (m.peek() != null and m.peek().? == .Keyword_long) {
+ _ = m.next();
return transCreateNodeIdentifierUnchecked(c, "c_ulonglong");
} else return transCreateNodeIdentifierUnchecked(c, "c_ulong"),
else => {
- it.i -= 1;
+ m.i -= 1;
return transCreateNodeIdentifierUnchecked(c, "c_uint");
},
} else {
return transCreateNodeIdentifierUnchecked(c, "c_uint");
},
- .Keyword_signed => if (it.next()) |t| switch (t) {
+ .Keyword_signed => if (m.next()) |t| switch (t) {
.Keyword_char => return transCreateNodeIdentifierUnchecked(c, "i8"),
.Keyword_short => return transCreateNodeIdentifierUnchecked(c, "c_short"),
.Keyword_int => return transCreateNodeIdentifierUnchecked(c, "c_int"),
- .Keyword_long => if (it.peek() != null and it.peek().? == .Keyword_long) {
- _ = it.next();
+ .Keyword_long => if (m.peek() != null and m.peek().? == .Keyword_long) {
+ _ = m.next();
return transCreateNodeIdentifierUnchecked(c, "c_longlong");
} else return transCreateNodeIdentifierUnchecked(c, "c_long"),
else => {
- it.i -= 1;
+ m.i -= 1;
return transCreateNodeIdentifierUnchecked(c, "c_int");
},
} else {
return transCreateNodeIdentifierUnchecked(c, "c_int");
},
.Identifier => {
- const mangled_name = scope.getAlias(it.slice(it.i));
+ const mangled_name = scope.getAlias(slice);
return transCreateNodeIdentifier(c, mangled_name);
},
.LParen => {
- const inner_node = try parseCExpr(c, it, source_loc, scope);
+ const inner_node = try parseCExpr(c, m, scope);
- const next_id = it.next().?;
+ const next_id = m.next().?;
if (next_id != .RParen) {
- try failDecl(
- c,
- source_loc,
- it.slice(0),
- "unable to translate C expr: expected ')'' instead got: {}",
- .{@tagName(next_id)},
- );
+ try m.fail(c, "unable to translate C expr: expected ')'' instead got: {}", .{@tagName(next_id)});
return error.ParseError;
}
var saw_l_paren = false;
var saw_integer_literal = false;
- switch (it.peek().?) {
+ switch (m.peek().?) {
// (type)(to_cast)
.LParen => {
saw_l_paren = true;
- _ = it.next();
+ _ = m.next();
},
// (type)identifier
.Identifier => {},
@@ -5876,16 +5843,10 @@ fn parseCPrimaryExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSourceL
// hack to get zig fmt to render a comma in builtin calls
_ = try appendToken(c, .Comma, ",");
- const node_to_cast = try parseCExpr(c, it, source_loc, scope);
+ const node_to_cast = try parseCExpr(c, m, scope);
- if (saw_l_paren and it.next().? != .RParen) {
- try failDecl(
- c,
- source_loc,
- it.slice(0),
- "unable to translate C expr: expected ')''",
- .{},
- );
+ if (saw_l_paren and m.next().? != .RParen) {
+ try m.fail(c, "unable to translate C expr: expected ')''", .{});
return error.ParseError;
}
@@ -5913,13 +5874,7 @@ fn parseCPrimaryExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSourceL
return &group_node.base;
},
else => {
- try failDecl(
- c,
- source_loc,
- it.slice(0),
- "unable to translate C expr: unexpected token .{}",
- .{@tagName(tok)},
- );
+ try m.fail(c, "unable to translate C expr: unexpected token .{}", .{@tagName(tok)});
return error.ParseError;
},
}
@@ -6026,52 +5981,40 @@ fn macroIntToBool(c: *Context, node: *ast.Node) !*ast.Node {
return &group_node.base;
}
-fn parseCSuffixOpExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSourceLocation, scope: *Scope) ParseError!*ast.Node {
- var node = try parseCPrimaryExpr(c, it, source_loc, scope);
+fn parseCSuffixOpExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!*ast.Node {
+ var node = try parseCPrimaryExpr(c, m, scope);
while (true) {
var op_token: ast.TokenIndex = undefined;
var op_id: ast.Node.Tag = undefined;
var bool_op = false;
- switch (it.next().?) {
+ switch (m.next().?) {
.Period => {
- if (it.next().? != .Identifier) {
- try failDecl(
- c,
- source_loc,
- it.slice(0),
- "unable to translate C expr: expected identifier",
- .{},
- );
+ if (m.next().? != .Identifier) {
+ try m.fail(c, "unable to translate C expr: expected identifier", .{});
return error.ParseError;
}
- node = try transCreateNodeFieldAccess(c, node, it.slice(it.i));
+ node = try transCreateNodeFieldAccess(c, node, m.slice());
continue;
},
.Arrow => {
- if (it.next().? != .Identifier) {
- try failDecl(
- c,
- source_loc,
- it.slice(0),
- "unable to translate C expr: expected identifier",
- .{},
- );
+ if (m.next().? != .Identifier) {
+ try m.fail(c, "unable to translate C expr: expected identifier", .{});
return error.ParseError;
}
const deref = try transCreateNodePtrDeref(c, node);
- node = try transCreateNodeFieldAccess(c, deref, it.slice(it.i));
+ node = try transCreateNodeFieldAccess(c, deref, m.slice());
continue;
},
.Asterisk => {
- if (it.peek().? == .RParen) {
+ if (m.peek().? == .RParen) {
// type *)
// hack to get zig fmt to render a comma in builtin calls
_ = try appendToken(c, .Comma, ",");
// last token of `node`
- const prev_id = it.list[it.i - 1].id;
+ const prev_id = m.list[m.i - 1].id;
if (prev_id == .Keyword_void) {
const ptr = try transCreateNodePtrType(c, false, false, .Asterisk);
@@ -6142,17 +6085,11 @@ fn parseCSuffixOpExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSource
},
.LBracket => {
const arr_node = try transCreateNodeArrayAccess(c, node);
- arr_node.index_expr = try parseCPrefixOpExpr(c, it, source_loc, scope);
+ arr_node.index_expr = try parseCPrefixOpExpr(c, m, scope);
arr_node.rtoken = try appendToken(c, .RBracket, "]");
node = &arr_node.base;
- if (it.next().? != .RBracket) {
- try failDecl(
- c,
- source_loc,
- it.slice(0),
- "unable to translate C expr: expected ']'",
- .{},
- );
+ if (m.next().? != .RBracket) {
+ try m.fail(c, "unable to translate C expr: expected ']'", .{});
return error.ParseError;
}
continue;
@@ -6162,19 +6099,13 @@ fn parseCSuffixOpExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSource
var call_params = std.ArrayList(*ast.Node).init(c.gpa);
defer call_params.deinit();
while (true) {
- const arg = try parseCPrefixOpExpr(c, it, source_loc, scope);
+ const arg = try parseCPrefixOpExpr(c, m, scope);
try call_params.append(arg);
- switch (it.next().?) {
+ switch (m.next().?) {
.Comma => _ = try appendToken(c, .Comma, ","),
.RParen => break,
else => {
- try failDecl(
- c,
- source_loc,
- it.slice(0),
- "unable to translate C expr: expected ',' or ')'",
- .{},
- );
+ try m.fail(c, "unable to translate C expr: expected ',' or ')'", .{});
return error.ParseError;
},
}
@@ -6201,19 +6132,13 @@ fn parseCSuffixOpExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSource
defer init_vals.deinit();
while (true) {
- const val = try parseCPrefixOpExpr(c, it, source_loc, scope);
+ const val = try parseCPrefixOpExpr(c, m, scope);
try init_vals.append(val);
- switch (it.next().?) {
+ switch (m.next().?) {
.Comma => _ = try appendToken(c, .Comma, ","),
.RBrace => break,
else => {
- try failDecl(
- c,
- source_loc,
- it.slice(0),
- "unable to translate C expr: expected ',' or '}}'",
- .{},
- );
+ try m.fail(c, "unable to translate C expr: expected ',' or '}}'", .{});
return error.ParseError;
},
}
@@ -6262,22 +6187,22 @@ fn parseCSuffixOpExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSource
op_id = .ArrayCat;
op_token = try appendToken(c, .PlusPlus, "++");
- it.i -= 1;
+ m.i -= 1;
},
.Identifier => {
op_id = .ArrayCat;
op_token = try appendToken(c, .PlusPlus, "++");
- it.i -= 1;
+ m.i -= 1;
},
else => {
- it.i -= 1;
+ m.i -= 1;
return node;
},
}
const cast_fn = if (bool_op) macroIntToBool else macroBoolToInt;
const lhs_node = try cast_fn(c, node);
- const rhs_node = try parseCPrefixOpExpr(c, it, source_loc, scope);
+ const rhs_node = try parseCPrefixOpExpr(c, m, scope);
const op_node = try c.arena.create(ast.Node.SimpleInfixOp);
op_node.* = .{
.base = .{ .tag = op_id },
@@ -6289,36 +6214,36 @@ fn parseCSuffixOpExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSource
}
}
-fn parseCPrefixOpExpr(c: *Context, it: *CTokIterator, source_loc: ZigClangSourceLocation, scope: *Scope) ParseError!*ast.Node {
- switch (it.next().?) {
+fn parseCPrefixOpExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!*ast.Node {
+ switch (m.next().?) {
.Bang => {
const node = try transCreateNodeSimplePrefixOp(c, .BoolNot, .Bang, "!");
- node.rhs = try parseCPrefixOpExpr(c, it, source_loc, scope);
+ node.rhs = try parseCPrefixOpExpr(c, m, scope);
return &node.base;
},
.Minus => {
const node = try transCreateNodeSimplePrefixOp(c, .Negation, .Minus, "-");
- node.rhs = try parseCPrefixOpExpr(c, it, source_loc, scope);
+ node.rhs = try parseCPrefixOpExpr(c, m, scope);
return &node.base;
},
- .Plus => return try parseCPrefixOpExpr(c, it, source_loc, scope),
+ .Plus => return try parseCPrefixOpExpr(c, m, scope),
.Tilde => {
const node = try transCreateNodeSimplePrefixOp(c, .BitNot, .Tilde, "~");
- node.rhs = try parseCPrefixOpExpr(c, it, source_loc, scope);
+ node.rhs = try parseCPrefixOpExpr(c, m, scope);
return &node.base;
},
.Asterisk => {
- const node = try parseCPrefixOpExpr(c, it, source_loc, scope);
+ const node = try parseCPrefixOpExpr(c, m, scope);
return try transCreateNodePtrDeref(c, node);
},
.Ampersand => {
const node = try transCreateNodeSimplePrefixOp(c, .AddressOf, .Ampersand, "&");
- node.rhs = try parseCPrefixOpExpr(c, it, source_loc, scope);
+ node.rhs = try parseCPrefixOpExpr(c, m, scope);
return &node.base;
},
else => {
- it.i -= 1;
- return try parseCSuffixOpExpr(c, it, source_loc, scope);
+ m.i -= 1;
+ return try parseCSuffixOpExpr(c, m, scope);
},
}
}
diff --git a/test/translate_c.zig b/test/translate_c.zig
index ac549695aa..38faffe747 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -1679,8 +1679,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
cases.add("shadowing primitive types",
\\unsigned anyerror = 2;
+ \\#define noreturn _Noreturn
, &[_][]const u8{
\\pub export var anyerror_1: c_uint = @bitCast(c_uint, @as(c_int, 2));
+ ,
+
+ \\pub const noreturn_2 = @compileError("unable to translate C expr: unexpected token .Keyword_noreturn");
});
cases.add("floats",
From 6febe7e977072fea1bf7b6003be2d6c1f3654905 Mon Sep 17 00:00:00 2001
From: Maciej Walczak <14938807+xackus@users.noreply.github.com>
Date: Tue, 11 Aug 2020 21:49:43 +0200
Subject: [PATCH 042/153] copy_file_range linux syscall (#6010)
---
lib/std/builtin.zig | 8 +++++
lib/std/c/linux.zig | 2 ++
lib/std/fs/file.zig | 9 ++---
lib/std/fs/test.zig | 26 +++++++++++++++
lib/std/os.zig | 79 ++++++++++++++++++++++++++++++++++++++++++++
lib/std/os/linux.zig | 12 +++++++
lib/std/target.zig | 28 ++++++++++++++++
7 files changed, 157 insertions(+), 7 deletions(-)
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index e9c53d7ee5..49edab1fd7 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -447,6 +447,14 @@ pub const Version = struct {
if (self.max.order(ver) == .lt) return false;
return true;
}
+
+ /// Checks if system is guaranteed to be at least `version` or older than `version`.
+ /// Returns `null` if a runtime check is required.
+ pub fn isAtLeast(self: Range, ver: Version) ?bool {
+ if (self.min.order(ver) != .lt) return true;
+ if (self.max.order(ver) == .lt) return false;
+ return null;
+ }
};
pub fn order(lhs: Version, rhs: Version) std.math.Order {
diff --git a/lib/std/c/linux.zig b/lib/std/c/linux.zig
index 4ceeb5a773..c00fc68bca 100644
--- a/lib/std/c/linux.zig
+++ b/lib/std/c/linux.zig
@@ -91,6 +91,8 @@ pub extern "c" fn sendfile(
count: usize,
) isize;
+pub extern "c" fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: c_uint) isize;
+
pub const pthread_attr_t = extern struct {
__size: [56]u8,
__align: c_long,
diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig
index 3b79e4e01b..ce71571c9f 100644
--- a/lib/std/fs/file.zig
+++ b/lib/std/fs/file.zig
@@ -607,15 +607,10 @@ pub const File = struct {
}
}
- pub const CopyRangeError = PWriteError || PReadError;
+ pub const CopyRangeError = os.CopyFileRangeError;
pub fn copyRange(in: File, in_offset: u64, out: File, out_offset: u64, len: usize) CopyRangeError!usize {
- // TODO take advantage of copy_file_range OS APIs
- var buf: [8 * 4096]u8 = undefined;
- const adjusted_count = math.min(buf.len, len);
- const amt_read = try in.pread(buf[0..adjusted_count], in_offset);
- if (amt_read == 0) return @as(usize, 0);
- return out.pwrite(buf[0..amt_read], out_offset);
+ return os.copy_file_range(in.handle, in_offset, out.handle, out_offset, len, 0);
}
/// Returns the number of bytes copied. If the number read is smaller than `buffer.len`, it
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index c9f1711967..26d8632c37 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -328,6 +328,32 @@ test "sendfile" {
testing.expect(mem.eql(u8, written_buf[0..amt], "header1\nsecond header\nine1\nsecontrailer1\nsecond trailer\n"));
}
+test "copyRangeAll" {
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ try tmp.dir.makePath("os_test_tmp");
+ defer tmp.dir.deleteTree("os_test_tmp") catch {};
+
+ var dir = try tmp.dir.openDir("os_test_tmp", .{});
+ defer dir.close();
+
+ var src_file = try dir.createFile("file1.txt", .{ .read = true });
+ defer src_file.close();
+
+ const data = "u6wj+JmdF3qHsFPE BUlH2g4gJCmEz0PP";
+ try src_file.writeAll(data);
+
+ var dest_file = try dir.createFile("file2.txt", .{ .read = true });
+ defer dest_file.close();
+
+ var written_buf: [100]u8 = undefined;
+ _ = try src_file.copyRangeAll(0, dest_file, 0, data.len);
+
+ const amt = try dest_file.preadAll(&written_buf, 0);
+ testing.expect(mem.eql(u8, written_buf[0..amt], data));
+}
+
test "fs.copyFile" {
const data = "u6wj+JmdF3qHsFPE BUlH2g4gJCmEz0PP";
const src_file = "tmp_test_copy_file.txt";
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 04c2340cad..ae2b232ef7 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -4926,6 +4926,85 @@ pub fn sendfile(
return total_written;
}
+pub const CopyFileRangeError = error{
+ FileTooBig,
+ InputOutput,
+ IsDir,
+ OutOfMemory,
+ NoSpaceLeft,
+ Unseekable,
+ PermissionDenied,
+ FileBusy,
+} || PReadError || PWriteError || UnexpectedError;
+
+/// Transfer data between file descriptors at specified offsets.
+/// Returns the number of bytes written, which can less than requested.
+///
+/// The `copy_file_range` call copies `len` bytes from one file descriptor to another. When possible,
+/// this is done within the operating system kernel, which can provide better performance
+/// characteristics than transferring data from kernel to user space and back, such as with
+/// `pread` and `pwrite` calls.
+///
+/// `fd_in` must be a file descriptor opened for reading, and `fd_out` must be a file descriptor
+/// opened for writing. They may be any kind of file descriptor; however, if `fd_in` is not a regular
+/// file system file, it may cause this function to fall back to calling `pread` and `pwrite`, in which case
+/// atomicity guarantees no longer apply.
+///
+/// If `fd_in` and `fd_out` are the same, source and target ranges must not overlap.
+/// The file descriptor seek positions are ignored and not updated.
+/// When `off_in` is past the end of the input file, it successfully reads 0 bytes.
+///
+/// `flags` has different meanings per operating system; refer to the respective man pages.
+///
+/// These systems support in-kernel data copying:
+/// * Linux 4.5 (cross-filesystem 5.3)
+///
+/// Other systems fall back to calling `pread` / `pwrite`.
+///
+/// Maximum offsets on Linux are `math.maxInt(i64)`.
+pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len: usize, flags: u32) CopyFileRangeError!usize {
+ const use_c = std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 }).ok;
+
+ // TODO support for other systems than linux
+ const try_syscall = comptime std.Target.current.os.isAtLeast(.linux, .{ .major = 4, .minor = 5 }) != false;
+
+ if (use_c or try_syscall) {
+ const sys = if (use_c) std.c else linux;
+
+ var off_in_copy = @bitCast(i64, off_in);
+ var off_out_copy = @bitCast(i64, off_out);
+
+ const rc = sys.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags);
+
+ // TODO avoid wasting a syscall every time if kernel is too old and returns ENOSYS https://github.com/ziglang/zig/issues/1018
+
+ switch (sys.getErrno(rc)) {
+ 0 => return @intCast(usize, rc),
+ EBADF => unreachable,
+ EFBIG => return error.FileTooBig,
+ EIO => return error.InputOutput,
+ EISDIR => return error.IsDir,
+ ENOMEM => return error.OutOfMemory,
+ ENOSPC => return error.NoSpaceLeft,
+ EOVERFLOW => return error.Unseekable,
+ EPERM => return error.PermissionDenied,
+ ETXTBSY => return error.FileBusy,
+ EINVAL => {}, // these may not be regular files, try fallback
+ EXDEV => {}, // support for cross-filesystem copy added in Linux 5.3, use fallback
+ ENOSYS => {}, // syscall added in Linux 4.5, use fallback
+ else => |err| return unexpectedErrno(err),
+ }
+ }
+
+ var buf: [8 * 4096]u8 = undefined;
+ const adjusted_count = math.min(buf.len, len);
+ const amt_read = try pread(fd_in, buf[0..adjusted_count], off_in);
+ // TODO without @as the line below fails to compile for wasm32-wasi:
+ // error: integer value 0 cannot be coerced to type 'os.PWriteError!usize'
+ if (amt_read == 0) return @as(usize, 0);
+ return pwrite(fd_out, buf[0..amt_read], off_out);
+}
+
pub const PollError = error{
/// The kernel had no space to allocate file descriptor tables.
SystemResources,
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index 15d6e53c1e..d9f7dda03e 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -1210,6 +1210,18 @@ pub fn signalfd4(fd: fd_t, mask: *const sigset_t, flags: i32) usize {
);
}
+pub fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: u32) usize {
+ return syscall6(
+ .copy_file_range,
+ @bitCast(usize, @as(isize, fd_in)),
+ @ptrToInt(off_in),
+ @bitCast(usize, @as(isize, fd_out)),
+ @ptrToInt(off_out),
+ len,
+ flags,
+ );
+}
+
test "" {
if (builtin.os.tag == .linux) {
_ = @import("linux/test.zig");
diff --git a/lib/std/target.zig b/lib/std/target.zig
index 04a65b45ad..14cea9dd57 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -100,6 +100,14 @@ pub const Target = struct {
pub fn includesVersion(self: Range, ver: WindowsVersion) bool {
return @enumToInt(ver) >= @enumToInt(self.min) and @enumToInt(ver) <= @enumToInt(self.max);
}
+
+ /// Checks if system is guaranteed to be at least `version` or older than `version`.
+ /// Returns `null` if a runtime check is required.
+ pub fn isAtLeast(self: Range, ver: WindowsVersion) ?bool {
+ if (@enumToInt(self.min) >= @enumToInt(ver)) return true;
+ if (@enumToInt(self.max) < @enumToInt(ver)) return false;
+ return null;
+ }
};
/// This function is defined to serialize a Zig source code representation of this
@@ -135,6 +143,12 @@ pub const Target = struct {
pub fn includesVersion(self: LinuxVersionRange, ver: Version) bool {
return self.range.includesVersion(ver);
}
+
+ /// Checks if system is guaranteed to be at least `version` or older than `version`.
+ /// Returns `null` if a runtime check is required.
+ pub fn isAtLeast(self: LinuxVersionRange, ver: Version) ?bool {
+ return self.range.isAtLeast(ver);
+ }
};
/// The version ranges here represent the minimum OS version to be supported
@@ -158,6 +172,8 @@ pub const Target = struct {
///
/// Binaries built with a given maximum version will continue to function on newer operating system
/// versions. However, such a binary may not take full advantage of the newer operating system APIs.
+ ///
+ /// See `Os.isAtLeast`.
pub const VersionRange = union {
none: void,
semver: Version.Range,
@@ -273,6 +289,18 @@ pub const Target = struct {
};
}
+ /// Checks if system is guaranteed to be at least `version` or older than `version`.
+ /// Returns `null` if a runtime check is required.
+ pub fn isAtLeast(self: Os, comptime tag: Tag, version: anytype) ?bool {
+ if (self.tag != tag) return false;
+
+ return switch (tag) {
+ .linux => self.version_range.linux.isAtLeast(version),
+ .windows => self.version_range.windows.isAtLeast(version),
+ else => self.version_range.semver.isAtLeast(version),
+ };
+ }
+
pub fn requiresLibC(os: Os) bool {
return switch (os.tag) {
.freebsd,
From 3ccfd58bb6765bcb6c586e91fd638a8117b4634f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 11 Aug 2020 14:04:08 -0700
Subject: [PATCH 043/153] std.mem.Allocator: fix not passing return_address
This makes collected stack traces omit less useful frames. For user
applications which only store a fixed number of stack frames this can
make a big difference.
---
lib/std/mem/Allocator.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
index 8bdab81bc6..e73050165e 100644
--- a/lib/std/mem/Allocator.zig
+++ b/lib/std/mem/Allocator.zig
@@ -369,7 +369,7 @@ pub fn reallocAdvancedWithRetAddr(
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (old_mem.len == 0) {
- return self.allocAdvanced(T, new_alignment, new_n, exact);
+ return self.allocAdvancedWithRetAddr(T, new_alignment, new_n, exact, return_address);
}
if (new_n == 0) {
self.free(old_mem);
From 60ce5edaf9f9bd9c500eb74d6e775baa7867ed84 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 11 Aug 2020 14:05:28 -0700
Subject: [PATCH 044/153] GeneralPurposeAllocator: default to store more stack
frames in test mode
Now tests will by default store 8 stack frames per allocation rather
than the normal default of 4.
---
lib/std/heap/general_purpose_allocator.zig | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 957c38939c..7b885bca36 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -118,7 +118,8 @@ const sys_can_stack_trace = switch (std.Target.current.cpu.arch) {
else => true,
};
-const default_sys_stack_trace_frames: usize = if (sys_can_stack_trace) 4 else 0;
+const default_test_stack_trace_frames: usize = if (std.builtin.is_test) 8 else 4;
+const default_sys_stack_trace_frames: usize = if (sys_can_stack_trace) default_test_stack_trace_frames else 0;
const default_stack_trace_frames: usize = switch (std.builtin.mode) {
.Debug => default_sys_stack_trace_frames,
else => 0,
From 7612931c8020592a8c78b15339c696590579e03c Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 11 Aug 2020 14:07:06 -0700
Subject: [PATCH 045/153] stage2: set up per-Decl .debug_info
---
src-self-hosted/codegen.zig | 113 ++++++++-------
src-self-hosted/link.zig | 273 +++++++++++++++++++++++++++++-------
2 files changed, 284 insertions(+), 102 deletions(-)
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index fffee883d4..db99b88d0e 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -50,6 +50,7 @@ pub fn generateSymbol(
typed_value: TypedValue,
code: *std.ArrayList(u8),
dbg_line: *std.ArrayList(u8),
+ dbg_info: *std.ArrayList(u8),
) GenerateSymbolError!Result {
const tracy = trace(@src());
defer tracy.end();
@@ -57,61 +58,62 @@ pub fn generateSymbol(
switch (typed_value.ty.zigTypeTag()) {
.Fn => {
switch (bin_file.base.options.target.cpu.arch) {
- //.arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.wasm32 => return Function(.wasm32).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.wasm64 => return Function(.wasm64).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, dbg_line),
- //.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, dbg_line),
+ //.arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.wasm32 => return Function(.wasm32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.wasm64 => return Function(.wasm64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."),
}
},
.Array => {
+ // TODO populate .debug_info for the array
if (typed_value.val.cast(Value.Payload.Bytes)) |payload| {
if (typed_value.ty.arraySentinel()) |sentinel| {
try code.ensureCapacity(code.items.len + payload.data.len + 1);
@@ -120,7 +122,7 @@ pub fn generateSymbol(
switch (try generateSymbol(bin_file, src, .{
.ty = typed_value.ty.elemType(),
.val = sentinel,
- }, code, dbg_line)) {
+ }, code, dbg_line, dbg_info)) {
.appended => return Result{ .appended = {} },
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
@@ -142,6 +144,8 @@ pub fn generateSymbol(
};
},
.Pointer => {
+ // TODO populate .debug_info for the pointer
+
if (typed_value.val.cast(Value.Payload.DeclRef)) |payload| {
const decl = payload.decl;
if (decl.analysis != .complete) return error.AnalysisFail;
@@ -177,6 +181,8 @@ pub fn generateSymbol(
};
},
.Int => {
+ // TODO populate .debug_info for the integer
+
const info = typed_value.ty.intInfo(bin_file.base.options.target);
if (info.bits == 8 and !info.signed) {
const x = typed_value.val.toUnsignedInt();
@@ -218,6 +224,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
mod_fn: *const Module.Fn,
code: *std.ArrayList(u8),
dbg_line: *std.ArrayList(u8),
+ dbg_info: *std.ArrayList(u8),
err_msg: ?*ErrorMsg,
args: []MCValue,
ret_mcv: MCValue,
@@ -387,6 +394,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
typed_value: TypedValue,
code: *std.ArrayList(u8),
dbg_line: *std.ArrayList(u8),
+ dbg_info: *std.ArrayList(u8),
) GenerateSymbolError!Result {
const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
@@ -424,6 +432,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.mod_fn = module_fn,
.code = code,
.dbg_line = dbg_line,
+ .dbg_info = dbg_info,
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 8bfd3bdb48..529175dc17 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -342,9 +342,10 @@ pub const File = struct {
shstrtab_dirty: bool = false,
debug_strtab_dirty: bool = false,
offset_table_count_dirty: bool = false,
- debug_info_section_dirty: bool = false,
debug_abbrev_section_dirty: bool = false,
debug_aranges_section_dirty: bool = false,
+
+ debug_info_header_dirty: bool = false,
debug_line_header_dirty: bool = false,
error_flags: ErrorFlags = ErrorFlags{},
@@ -364,7 +365,7 @@ pub const File = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh text block, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
- text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = std.ArrayListUnmanaged(*TextBlock){},
+ text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = .{},
last_text_block: ?*TextBlock = null,
/// A list of `SrcFn` whose Line Number Programs have surplus capacity.
@@ -373,6 +374,12 @@ pub const File = struct {
dbg_line_fn_first: ?*SrcFn = null,
dbg_line_fn_last: ?*SrcFn = null,
+ /// A list of `TextBlock` whose corresponding .debug_info tags have surplus capacity.
+ /// This is the same concept as `text_block_free_list`; see those doc comments.
+ dbg_info_decl_free_list: std.AutoHashMapUnmanaged(*TextBlock, void) = .{},
+ dbg_info_decl_first: ?*TextBlock = null,
+ dbg_info_decl_last: ?*TextBlock = null,
+
/// `alloc_num / alloc_den` is the factor of padding when allocating.
const alloc_num = 4;
const alloc_den = 3;
@@ -398,11 +405,24 @@ pub const File = struct {
prev: ?*TextBlock,
next: ?*TextBlock,
+ /// Previous/next linked list pointers. This value is `next ^ prev`.
+ /// This is the linked list node for this Decl's corresponding .debug_info tag.
+ dbg_info_prev: ?*TextBlock,
+ dbg_info_next: ?*TextBlock,
+ /// Offset into .debug_info pointing to the tag for this Decl.
+ dbg_info_off: u32,
+ /// Size of the .debug_info tag for this Decl, not including padding.
+ dbg_info_len: u32,
+
pub const empty = TextBlock{
.local_sym_index = 0,
.offset_table_index = undefined,
.prev = null,
.next = null,
+ .dbg_info_prev = null,
+ .dbg_info_next = null,
+ .dbg_info_off = undefined,
+ .dbg_info_len = undefined,
};
/// Returns how much room there is to grow in virtual address space.
@@ -566,6 +586,7 @@ pub const File = struct {
self.offset_table_free_list.deinit(self.base.allocator);
self.text_block_free_list.deinit(self.base.allocator);
self.dbg_line_fn_free_list.deinit(self.base.allocator);
+ self.dbg_info_decl_free_list.deinit(self.base.allocator);
self.offset_table.deinit(self.base.allocator);
}
@@ -854,7 +875,7 @@ pub const File = struct {
.sh_entsize = 0,
});
self.shdr_table_dirty = true;
- self.debug_info_section_dirty = true;
+ self.debug_info_header_dirty = true;
}
if (self.debug_abbrev_section_index == null) {
self.debug_abbrev_section_index = @intCast(u16, self.sections.items.len);
@@ -1019,21 +1040,36 @@ pub const File = struct {
self.debug_abbrev_section_dirty = false;
}
- if (self.debug_info_section_dirty) {
+
+ if (self.debug_info_header_dirty) debug_info: {
+ // If this value is null it means there is an error in the module;
+ // leave debug_info_header_dirty=true.
+ const first_dbg_info_decl = self.dbg_info_decl_first orelse break :debug_info;
+ const last_dbg_info_decl = self.dbg_info_decl_last.?;
const debug_info_sect = &self.sections.items[self.debug_info_section_index.?];
var di_buf = std.ArrayList(u8).init(self.base.allocator);
defer di_buf.deinit();
- // Enough for a 64-bit header and main compilation unit without resizing.
- try di_buf.ensureCapacity(100);
+ // We have a function to compute the upper bound size, because it's needed
+ // for determining where to put the offset of the first `LinkBlock`.
+ try di_buf.ensureCapacity(self.dbgInfoNeededHeaderBytes());
// initial length - length of the .debug_info contribution for this compilation unit,
// not including the initial length itself.
// We have to come back and write it later after we know the size.
- const init_len_index = di_buf.items.len;
- di_buf.items.len += init_len_size;
- const after_init_len = di_buf.items.len;
+ const after_init_len = di_buf.items.len + init_len_size;
+ const dbg_info_end = last_dbg_info_decl.dbg_info_off + last_dbg_info_decl.dbg_info_len;
+ const init_len = dbg_info_end - after_init_len;
+ switch (self.ptr_width) {
+ .p32 => {
+ mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, init_len), target_endian);
+ },
+ .p64 => {
+ di_buf.appendNTimesAssumeCapacity(0xff, 4);
+ mem.writeInt(u64, di_buf.addManyAsArrayAssumeCapacity(8), init_len, target_endian);
+ },
+ }
mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), 4, target_endian); // DWARF version
const abbrev_offset = self.debug_abbrev_table_offset.?;
switch (self.ptr_width) {
@@ -1068,39 +1104,15 @@ pub const File = struct {
// Until then we say it is C99.
mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), DW.LANG_C99, target_endian);
- const init_len = di_buf.items.len - after_init_len;
- switch (self.ptr_width) {
- .p32 => {
- mem.writeInt(u32, di_buf.items[init_len_index..][0..4], @intCast(u32, init_len), target_endian);
- },
- .p64 => {
- // initial length - length of the .debug_info contribution for this compilation unit,
- // not including the initial length itself.
- di_buf.items[init_len_index..][0..4].* = [_]u8{ 0xff, 0xff, 0xff, 0xff };
- mem.writeInt(u64, di_buf.items[init_len_index + 4..][0..8], init_len, target_endian);
- },
+ if (di_buf.items.len > first_dbg_info_decl.dbg_info_off) {
+ // Move the first N decls to the end to make more padding for the header.
+ @panic("TODO: handle .debug_info header exceeding its padding");
}
-
- const needed_size = di_buf.items.len;
- const allocated_size = self.allocatedSize(debug_info_sect.sh_offset);
- if (needed_size > allocated_size) {
- debug_info_sect.sh_size = 0; // free the space
- debug_info_sect.sh_offset = self.findFreeSpace(needed_size, 1);
- }
- debug_info_sect.sh_size = needed_size;
- log.debug(.link, ".debug_info start=0x{x} end=0x{x}\n", .{
- debug_info_sect.sh_offset,
- debug_info_sect.sh_offset + needed_size,
- });
-
- try self.base.file.?.pwriteAll(di_buf.items, debug_info_sect.sh_offset);
- if (!self.shdr_table_dirty) {
- // Then it won't get written with the others and we need to do it.
- try self.writeSectHeader(self.debug_info_section_index.?);
- }
-
- self.debug_info_section_dirty = false;
+ const jmp_amt = first_dbg_info_decl.dbg_info_off - di_buf.items.len;
+ try self.pwriteDbgInfoNops(0, di_buf.items, jmp_amt, debug_info_sect.sh_offset);
+ self.debug_info_header_dirty = false;
}
+
if (self.debug_aranges_section_dirty) {
const debug_aranges_sect = &self.sections.items[self.debug_aranges_section_index.?];
@@ -1266,7 +1278,7 @@ pub const File = struct {
@panic("TODO: handle .debug_line header exceeding its padding");
}
const jmp_amt = dbg_line_prg_off - di_buf.items.len;
- try self.pwriteWithNops(0, di_buf.items, jmp_amt, debug_line_sect.sh_offset);
+ try self.pwriteDbgLineNops(0, di_buf.items, jmp_amt, debug_line_sect.sh_offset);
self.debug_line_header_dirty = false;
}
@@ -1417,8 +1429,7 @@ pub const File = struct {
// The point of flush() is to commit changes, so in theory, nothing should
// be dirty after this. However, it is possible for some things to remain
// dirty because they fail to be written in the event of compile errors,
- // such as debug_line_header_dirty.
- assert(!self.debug_info_section_dirty);
+ // such as debug_line_header_dirty and debug_info_header_dirty.
assert(!self.debug_abbrev_section_dirty);
assert(!self.debug_aranges_section_dirty);
assert(!self.phdr_table_dirty);
@@ -1700,8 +1711,8 @@ pub const File = struct {
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
// range of the compilation unit. When we expand the text section, this range changes,
- // so the .debug_info section becomes dirty.
- self.debug_info_section_dirty = true;
+ // so the DW_TAG_compile_unit tag of the .debug_info section becomes dirty.
+ self.debug_info_header_dirty = true;
// This becomes dirty for the same reason. We could potentially make this more
// fine-grained with the addition of support for more compilation units. It is planned to
// model each package as a different compilation unit.
@@ -1815,6 +1826,9 @@ pub const File = struct {
var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator);
defer dbg_line_buffer.deinit();
+ var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator);
+ defer dbg_info_buffer.deinit();
+
const typed_value = decl.typed_value.most_recent.typed_value;
const is_fn: bool = switch (typed_value.ty.zigTypeTag()) {
.Fn => true,
@@ -1871,7 +1885,7 @@ pub const File = struct {
// do the work of setting prologue_end=true and epilogue_begin=true.
dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy);
}
- const res = try codegen.generateSymbol(self, decl.src(), typed_value, &code_buffer, &dbg_line_buffer);
+ const res = try codegen.generateSymbol(self, decl.src(), typed_value, &code_buffer, &dbg_line_buffer, &dbg_info_buffer);
const code = switch (res) {
.externally_managed => |x| x,
.appended => code_buffer.items,
@@ -1937,6 +1951,8 @@ pub const File = struct {
const file_offset = self.sections.items[self.text_section_index.?].sh_offset + section_offset;
try self.base.file.?.pwriteAll(code, file_offset);
+ try self.updateDeclDebugInfo(module, decl, dbg_info_buffer.items);
+
// If the Decl is a function, we need to update the .debug_line program.
if (is_fn) {
// Perform the relocation based on vaddr.
@@ -1956,6 +1972,10 @@ pub const File = struct {
// Now we have the full contents and may allocate a region to store it.
+ // This logic is nearly identical to the logic below in `updateDeclDebugInfo` for
+ // `TextBlock` and the .debug_info. If you are editing this logic, you
+ // probably need to edit that logic too.
+
const debug_line_sect = &self.sections.items[self.debug_line_section_index.?];
const src_fn = &decl.fn_link.elf;
src_fn.len = @intCast(u32, dbg_line_buffer.items.len);
@@ -1972,7 +1992,7 @@ pub const File = struct {
src_fn.next = null;
// Populate where it used to be with NOPs.
const file_pos = debug_line_sect.sh_offset + src_fn.off;
- try self.pwriteWithNops(0, &[0]u8{}, src_fn.len, file_pos);
+ try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos);
// TODO Look at the free list before appending at the end.
src_fn.prev = last;
last.next = src_fn;
@@ -2022,7 +2042,7 @@ pub const File = struct {
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_line section.
const file_pos = debug_line_sect.sh_offset + src_fn.off;
- try self.pwriteWithNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos);
+ try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos);
}
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
@@ -2030,6 +2050,88 @@ pub const File = struct {
return self.updateDeclExports(module, decl, decl_exports);
}
+ pub fn updateDeclDebugInfo(self: *Elf, module: *Module, decl: *Module.Decl, dbg_info_buf: []const u8) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ // This logic is nearly identical to the logic above in `updateDecl` for
+ // `SrcFn` and the line number programs. If you are editing this logic, you
+ // probably need to edit that logic too.
+
+ const debug_info_sect = &self.sections.items[self.debug_info_section_index.?];
+ const text_block = &decl.link.elf;
+ if (self.dbg_info_decl_last) |last| {
+ if (text_block.dbg_info_next) |next| {
+ // Update existing Decl - non-last item.
+ if (text_block.dbg_info_off + text_block.dbg_info_len + min_nop_size > next.dbg_info_off) {
+ // It grew too big, so we move it to a new location.
+ if (text_block.dbg_info_prev) |prev| {
+ _ = self.dbg_info_decl_free_list.put(self.base.allocator, prev, {}) catch {};
+ prev.dbg_info_next = text_block.dbg_info_next;
+ }
+ next.dbg_info_prev = text_block.dbg_info_prev;
+ text_block.dbg_info_next = null;
+ // Populate where it used to be with NOPs.
+ const file_pos = debug_info_sect.sh_offset + text_block.dbg_info_off;
+ try self.pwriteDbgInfoNops(0, &[0]u8{}, text_block.dbg_info_len, file_pos);
+ // TODO Look at the free list before appending at the end.
+ text_block.dbg_info_prev = last;
+ last.dbg_info_next = text_block;
+ self.dbg_info_decl_last = text_block;
+
+ text_block.dbg_info_off = last.dbg_info_off + (last.dbg_info_len * alloc_num / alloc_den);
+ }
+ } else if (text_block.dbg_info_prev == null) {
+ // Append new Decl.
+ // TODO Look at the free list before appending at the end.
+ text_block.dbg_info_prev = last;
+ last.dbg_info_next = text_block;
+ self.dbg_info_decl_last = text_block;
+
+ text_block.dbg_info_off = last.dbg_info_off + (last.dbg_info_len * alloc_num / alloc_den);
+ }
+ } else {
+ // This is the first Decl of the .debug_info
+ self.dbg_info_decl_first = text_block;
+ self.dbg_info_decl_last = text_block;
+
+ text_block.dbg_info_off = self.dbgInfoNeededHeaderBytes() * alloc_num / alloc_den;
+ }
+
+ const last_decl = self.dbg_info_decl_last.?;
+ const needed_size = last_decl.dbg_info_off + last_decl.dbg_info_len;
+ if (needed_size != debug_info_sect.sh_size) {
+ if (needed_size > self.allocatedSize(debug_info_sect.sh_offset)) {
+ const new_offset = self.findFreeSpace(needed_size, 1);
+ const existing_size = last_decl.dbg_info_off;
+ log.debug(.link, "moving .debug_info section: {} bytes from 0x{x} to 0x{x}\n", .{
+ existing_size,
+ debug_info_sect.sh_offset,
+ new_offset,
+ });
+ const amt = try self.base.file.?.copyRangeAll(debug_info_sect.sh_offset, self.base.file.?, new_offset, existing_size);
+ if (amt != existing_size) return error.InputOutput;
+ debug_info_sect.sh_offset = new_offset;
+ }
+ debug_info_sect.sh_size = needed_size;
+ self.shdr_table_dirty = true; // TODO look into making only the one section dirty
+ self.debug_info_header_dirty = true;
+ }
+ const prev_padding_size: u32 = if (text_block.dbg_info_prev) |prev|
+ text_block.dbg_info_off - (prev.dbg_info_off + prev.dbg_info_len)
+ else
+ 0;
+ const next_padding_size: u32 = if (text_block.dbg_info_next) |next|
+ next.dbg_info_off - (text_block.dbg_info_off + text_block.dbg_info_len)
+ else
+ 0;
+
+ // We only have support for one compilation unit so far, so the offsets are directly
+ // from the .debug_info section.
+ const file_pos = debug_info_sect.sh_offset + text_block.dbg_info_off;
+ try self.pwriteDbgInfoNops(prev_padding_size, dbg_info_buf, next_padding_size, file_pos);
+ }
+
/// Must be called only after a successful call to `updateDecl`.
pub fn updateDeclExports(
self: *Elf,
@@ -2221,6 +2323,9 @@ pub const File = struct {
}
fn writeSymbol(self: *Elf, index: usize) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
const syms_sect = &self.sections.items[self.symtab_section_index.?];
// Make sure we are not pointlessly writing symbol data that will have to get relocated
// due to running out of space.
@@ -2361,18 +2466,27 @@ pub const File = struct {
}
+ fn dbgInfoNeededHeaderBytes(self: Elf) u32 {
+ return 120;
+ }
+
+ const min_nop_size = 2;
+
/// Writes to the file a buffer, prefixed and suffixed by the specified number of
/// bytes of NOPs. Asserts each padding size is at least `min_nop_size` and total padding bytes
/// are less than 126,976 bytes (if this limit is ever reached, this function can be
/// improved to make more than one pwritev call, or the limit can be raised by a fixed
/// amount by increasing the length of `vecs`).
- fn pwriteWithNops(
+ fn pwriteDbgLineNops(
self: *Elf,
prev_padding_size: usize,
buf: []const u8,
next_padding_size: usize,
offset: usize,
) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
const page_of_nops = [1]u8{DW.LNS_negate_stmt} ** 4096;
const three_byte_nop = [3]u8{DW.LNS_advance_pc, 0b1000_0000, 0};
var vecs: [32]std.os.iovec_const = undefined;
@@ -2439,7 +2553,66 @@ pub const File = struct {
try self.base.file.?.pwritevAll(vecs[0..vec_index], offset - prev_padding_size);
}
- const min_nop_size = 2;
+ /// Writes to the file a buffer, prefixed and suffixed by the specified number of
+ /// bytes of padding.
+ fn pwriteDbgInfoNops(
+ self: *Elf,
+ prev_padding_size: usize,
+ buf: []const u8,
+ next_padding_size: usize,
+ offset: usize,
+ ) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const page_of_nops = [1]u8{0} ** 4096;
+ var vecs: [32]std.os.iovec_const = undefined;
+ var vec_index: usize = 0;
+ {
+ var padding_left = prev_padding_size;
+ while (padding_left > page_of_nops.len) {
+ vecs[vec_index] = .{
+ .iov_base = &page_of_nops,
+ .iov_len = page_of_nops.len,
+ };
+ vec_index += 1;
+ padding_left -= page_of_nops.len;
+ }
+ if (padding_left > 0) {
+ vecs[vec_index] = .{
+ .iov_base = &page_of_nops,
+ .iov_len = padding_left,
+ };
+ vec_index += 1;
+ }
+ }
+
+ vecs[vec_index] = .{
+ .iov_base = buf.ptr,
+ .iov_len = buf.len,
+ };
+ vec_index += 1;
+
+ {
+ var padding_left = next_padding_size;
+ while (padding_left > page_of_nops.len) {
+ vecs[vec_index] = .{
+ .iov_base = &page_of_nops,
+ .iov_len = page_of_nops.len,
+ };
+ vec_index += 1;
+ padding_left -= page_of_nops.len;
+ }
+ if (padding_left > 0) {
+ vecs[vec_index] = .{
+ .iov_base = &page_of_nops,
+ .iov_len = padding_left,
+ };
+ vec_index += 1;
+ }
+ }
+ try self.base.file.?.pwritevAll(vecs[0..vec_index], offset - prev_padding_size);
+ }
};
};
From a2a5cea286e87356b32a304853233e47a9fe70a7 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 11 Aug 2020 19:02:21 -0700
Subject: [PATCH 046/153] stage2: emit DW_TAG_subprogram for function Decls
these have the virtual address range, return type, and name.
---
src-self-hosted/link.zig | 174 ++++++++++++++++++++++++++++++++++++---
src-self-hosted/main.zig | 2 +-
2 files changed, 163 insertions(+), 13 deletions(-)
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 529175dc17..77534fca4a 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -14,6 +14,7 @@ const trace = @import("tracy.zig").trace;
const leb128 = std.debug.leb;
const Package = @import("Package.zig");
const Value = @import("value.zig").Value;
+const Type = @import("type.zig").Type;
// TODO Turn back on zig fmt when https://github.com/ziglang/zig/issues/5948 is implemented.
// zig fmt: off
@@ -985,6 +986,12 @@ pub const File = struct {
}
}
+ pub const abbrev_compile_unit = 1;
+ pub const abbrev_subprogram = 2;
+ pub const abbrev_subprogram_retvoid = 3;
+ pub const abbrev_base_type = 4;
+ pub const abbrev_pad1 = 5;
+
/// Commit pending changes and write headers.
pub fn flush(self: *Elf) !void {
const target_endian = self.base.options.target.cpu.arch.endian();
@@ -1005,7 +1012,7 @@ pub const File = struct {
// These are LEB encoded but since the values are all less than 127
// we can simply append these bytes.
const abbrev_buf = [_]u8{
- 1, DW.TAG_compile_unit, DW.CHILDREN_no, // header
+ abbrev_compile_unit, DW.TAG_compile_unit, DW.CHILDREN_yes, // header
DW.AT_stmt_list, DW.FORM_sec_offset,
DW.AT_low_pc , DW.FORM_addr,
DW.AT_high_pc , DW.FORM_addr,
@@ -1015,6 +1022,28 @@ pub const File = struct {
DW.AT_language , DW.FORM_data2,
0, 0, // table sentinel
+ abbrev_subprogram, DW.TAG_subprogram, DW.CHILDREN_yes, // header
+ DW.AT_low_pc , DW.FORM_addr,
+ DW.AT_high_pc , DW.FORM_data4,
+ DW.AT_type , DW.FORM_ref4,
+ DW.AT_name , DW.FORM_string,
+ 0, 0, // table sentinel
+
+ abbrev_subprogram_retvoid, DW.TAG_subprogram, DW.CHILDREN_yes, // header
+ DW.AT_low_pc , DW.FORM_addr,
+ DW.AT_high_pc , DW.FORM_data4,
+ DW.AT_name , DW.FORM_string,
+ 0, 0, // table sentinel
+
+ abbrev_base_type, DW.TAG_base_type, DW.CHILDREN_no, // header
+ DW.AT_encoding , DW.FORM_data1,
+ DW.AT_byte_size, DW.FORM_data1,
+ DW.AT_name , DW.FORM_string,
+ 0, 0, // table sentinel
+
+ abbrev_pad1, DW.TAG_unspecified_type, DW.CHILDREN_no, // header
+ 0, 0, // table sentinel
+
0, 0, 0, // section sentinel
};
@@ -1092,7 +1121,7 @@ pub const File = struct {
const low_pc = text_phdr.p_vaddr;
const high_pc = text_phdr.p_vaddr + text_phdr.p_memsz;
- di_buf.appendAssumeCapacity(1); // abbrev tag, matching the value from the abbrev table header
+ di_buf.appendAssumeCapacity(abbrev_compile_unit);
self.writeDwarfAddrAssumeCapacity(&di_buf, 0); // DW.AT_stmt_list, DW.FORM_sec_offset
self.writeDwarfAddrAssumeCapacity(&di_buf, low_pc);
self.writeDwarfAddrAssumeCapacity(&di_buf, high_pc);
@@ -1834,6 +1863,7 @@ pub const File = struct {
.Fn => true,
else => false,
};
+ var fn_ret_has_bits: bool = undefined;
if (is_fn) {
// For functions we need to add a prologue to the debug line program.
try dbg_line_buffer.ensureCapacity(26);
@@ -1884,6 +1914,31 @@ pub const File = struct {
// Emit a line for the begin curly with prologue_end=false. The codegen will
// do the work of setting prologue_end=true and epilogue_begin=true.
dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy);
+
+ // .debug_info subprogram
+ const decl_name_with_null = decl.name[0..mem.lenZ(decl.name) + 1];
+ try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len);
+
+ fn_ret_has_bits = typed_value.ty.fnReturnType().hasCodeGenBits();
+ if (fn_ret_has_bits) {
+ dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram);
+ } else {
+ dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid);
+ }
+ // These get overwritten after generating the machine code. These values are
+ // "relocations" and have to be in this fixed place so that functions can be
+ // moved in virtual address space.
+ assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len);
+ dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr
+ assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len);
+ dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4
+ if (fn_ret_has_bits) {
+ assert(self.getRelocDbgInfoSubprogramRetType() == dbg_info_buffer.items.len);
+ dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4
+ }
+ dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string
+ } else {
+ // TODO implement .debug_info for global variables
}
const res = try codegen.generateSymbol(self, decl.src(), typed_value, &code_buffer, &dbg_line_buffer, &dbg_info_buffer);
const code = switch (res) {
@@ -1951,22 +2006,40 @@ pub const File = struct {
const file_offset = self.sections.items[self.text_section_index.?].sh_offset + section_offset;
try self.base.file.?.pwriteAll(code, file_offset);
- try self.updateDeclDebugInfo(module, decl, dbg_info_buffer.items);
+ const target_endian = self.base.options.target.cpu.arch.endian();
+
+ const text_block = &decl.link.elf;
// If the Decl is a function, we need to update the .debug_line program.
+ var fn_ret_type_index: usize = undefined;
if (is_fn) {
- // Perform the relocation based on vaddr.
- const target_endian = self.base.options.target.cpu.arch.endian();
+ // Perform the relocations based on vaddr.
switch (self.ptr_width) {
.p32 => {
- const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4];
- mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian);
+ {
+ const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4];
+ mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian);
+ }
+ {
+ const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4];
+ mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian);
+ }
},
.p64 => {
- const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8];
- mem.writeInt(u64, ptr, local_sym.st_value, target_endian);
+ {
+ const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8];
+ mem.writeInt(u64, ptr, local_sym.st_value, target_endian);
+ }
+ {
+ const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8];
+ mem.writeInt(u64, ptr, local_sym.st_value, target_endian);
+ }
},
}
+ {
+ const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4];
+ mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian);
+ }
try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence });
@@ -2043,14 +2116,69 @@ pub const File = struct {
// from the .debug_line section.
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos);
+
+ // .debug_info
+ try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 2);
+ // End the TAG_subprogram children.
+ dbg_info_buffer.appendAssumeCapacity(0);
+ if (fn_ret_has_bits) {
+ // Now we do the return type of the function. The relocation must be performed
+ // later after the offset for this subprogram is computed.
+ fn_ret_type_index = dbg_info_buffer.items.len;
+ try self.addDbgInfoType(typed_value.ty.fnReturnType(), &dbg_info_buffer);
+ }
}
+ try self.updateDeclDebugInfoAllocation(text_block, @intCast(u32, dbg_info_buffer.items.len));
+
+ if (is_fn and fn_ret_has_bits) {
+ // Perform function return type relocation.
+ mem.writeInt(
+ u32,
+ dbg_info_buffer.items[self.getRelocDbgInfoSubprogramRetType()..][0..4],
+ text_block.dbg_info_off + @intCast(u32, fn_ret_type_index),
+ target_endian,
+ );
+ }
+
+ try self.writeDeclDebugInfo(text_block, dbg_info_buffer.items);
+
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl, decl_exports);
}
- pub fn updateDeclDebugInfo(self: *Elf, module: *Module, decl: *Module.Decl, dbg_info_buf: []const u8) !void {
+ /// Asserts the type has codegen bits.
+ fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !void {
+ switch (ty.zigTypeTag()) {
+ .Void, .NoReturn => unreachable,
+ .Bool => {
+ try dbg_info_buffer.appendSlice(&[_]u8{
+ abbrev_base_type,
+ DW.ATE_boolean, // DW.AT_encoding , DW.FORM_data1
+ 1, // DW.AT_byte_size, DW.FORM_data1
+ 'b', 'o', 'o', 'l', 0, // DW.AT_name, DW.FORM_string
+ });
+ },
+ .Int => {
+ const info = ty.intInfo(self.base.options.target);
+ try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12);
+ dbg_info_buffer.appendAssumeCapacity(abbrev_base_type);
+ // DW.AT_encoding, DW.FORM_data1
+ dbg_info_buffer.appendAssumeCapacity(if (info.signed) DW.ATE_signed else DW.ATE_unsigned);
+ // DW.AT_byte_size, DW.FORM_data1
+ dbg_info_buffer.appendAssumeCapacity(@intCast(u8, ty.abiSize(self.base.options.target)));
+ // DW.AT_name, DW.FORM_string
+ try dbg_info_buffer.writer().print("{}\x00", .{ty});
+ },
+ else => {
+ log.err(.compiler, "TODO implement .debug_info for type '{}'", .{ty});
+ try dbg_info_buffer.append(abbrev_pad1);
+ },
+ }
+ }
+
+ fn updateDeclDebugInfoAllocation(self: *Elf, text_block: *TextBlock, len: u32) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -2059,7 +2187,7 @@ pub const File = struct {
// probably need to edit that logic too.
const debug_info_sect = &self.sections.items[self.debug_info_section_index.?];
- const text_block = &decl.link.elf;
+ text_block.dbg_info_len = len;
if (self.dbg_info_decl_last) |last| {
if (text_block.dbg_info_next) |next| {
// Update existing Decl - non-last item.
@@ -2097,6 +2225,17 @@ pub const File = struct {
text_block.dbg_info_off = self.dbgInfoNeededHeaderBytes() * alloc_num / alloc_den;
}
+ }
+
+ fn writeDeclDebugInfo(self: *Elf, text_block: *TextBlock, dbg_info_buf: []const u8) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ // This logic is nearly identical to the logic above in `updateDecl` for
+ // `SrcFn` and the line number programs. If you are editing this logic, you
+ // probably need to edit that logic too.
+
+ const debug_info_sect = &self.sections.items[self.debug_info_section_index.?];
const last_decl = self.dbg_info_decl_last.?;
const needed_size = last_decl.dbg_info_off + last_decl.dbg_info_len;
@@ -2441,6 +2580,9 @@ pub const File = struct {
/// The reloc offset for the virtual address of a function in its Line Number Program.
/// Size is a virtual address integer.
const dbg_line_vaddr_reloc_index = 3;
+ /// The reloc offset for the virtual address of a function in its .debug_info TAG_subprogram.
+ /// Size is a virtual address integer.
+ const dbg_info_low_pc_reloc_index = 1;
/// The reloc offset for the line offset of a function from the previous function's line.
/// It's a fixed-size 4-byte ULEB128.
@@ -2452,6 +2594,14 @@ pub const File = struct {
return self.getRelocDbgLineOff() + 5;
}
+ fn getRelocDbgInfoSubprogramHighPC(self: Elf) u32 {
+ return dbg_info_low_pc_reloc_index + self.ptrWidthBytes();
+ }
+
+ fn getRelocDbgInfoSubprogramRetType(self: Elf) u32 {
+ return self.getRelocDbgInfoSubprogramHighPC() + 4;
+ }
+
fn dbgLineNeededHeaderBytes(self: Elf) u32 {
const directory_entry_format_count = 1;
const file_name_entry_format_count = 1;
@@ -2565,7 +2715,7 @@ pub const File = struct {
const tracy = trace(@src());
defer tracy.end();
- const page_of_nops = [1]u8{0} ** 4096;
+ const page_of_nops = [1]u8{abbrev_pad1} ** 4096;
var vecs: [32]std.os.iovec_const = undefined;
var vec_index: usize = 0;
{
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 2f6610da28..76d8651646 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -59,7 +59,7 @@ pub fn log(
const prefix = "[" ++ @tagName(level) ++ "] " ++ "(" ++ @tagName(scope) ++ "): ";
// Print the message to stderr, silently ignoring any errors
- std.debug.print(prefix ++ format, args);
+ std.debug.print(prefix ++ format ++ "\n", args);
}
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
From 8282f4271cf3a3e0d2159c698142b3cafe6b1603 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 11 Aug 2020 22:23:32 -0700
Subject: [PATCH 047/153] stage2: basic support for parameters .debug_info
see #6014
---
src-self-hosted/Module.zig | 24 ++++-
src-self-hosted/codegen.zig | 137 +++++++++++++++++-----------
src-self-hosted/codegen/riscv64.zig | 9 ++
src-self-hosted/codegen/x86.zig | 79 ++++++++++++++++
src-self-hosted/codegen/x86_64.zig | 109 ++++++++++++++++++++++
src-self-hosted/ir.zig | 16 +++-
src-self-hosted/link.zig | 113 ++++++++++++++++-------
src-self-hosted/type.zig | 66 +++++++++++++-
src-self-hosted/zir.zig | 29 +++++-
src-self-hosted/zir_sema.zig | 5 +-
10 files changed, 491 insertions(+), 96 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index e3233c6403..6abd4f51e1 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -1301,14 +1301,16 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
for (fn_proto.params()) |param, i| {
const name_token = param.name_token.?;
const src = tree.token_locs[name_token].start;
- const param_name = tree.tokenSlice(name_token);
- const arg = try gen_scope_arena.allocator.create(zir.Inst.NoOp);
+ const param_name = tree.tokenSlice(name_token); // TODO: call identifierTokenString
+ const arg = try gen_scope_arena.allocator.create(zir.Inst.Arg);
arg.* = .{
.base = .{
.tag = .arg,
.src = src,
},
- .positionals = .{},
+ .positionals = .{
+ .name = param_name,
+ },
.kw_args = .{},
};
gen_scope.instructions.items[i] = &arg.base;
@@ -1934,6 +1936,20 @@ pub fn addBinOp(
return &inst.base;
}
+pub fn addArg(self: *Module, block: *Scope.Block, src: usize, ty: Type, name: [*:0]const u8) !*Inst {
+ const inst = try block.arena.create(Inst.Arg);
+ inst.* = .{
+ .base = .{
+ .tag = .arg,
+ .ty = ty,
+ .src = src,
+ },
+ .name = name,
+ };
+ try block.instructions.append(self.gpa, &inst.base);
+ return &inst.base;
+}
+
pub fn addBr(
self: *Module,
scope_block: *Scope.Block,
@@ -2535,7 +2551,7 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst
}
}
- return self.fail(scope, inst.src, "expected {}, found {}", .{ dest_type, inst.ty });
+ return self.fail(scope, inst.src, "expected {}, found {}", .{ dest_type, inst.ty });
}
pub fn storePtr(self: *Module, scope: *Scope, src: usize, ptr: *Inst, uncasted_value: *Inst) !*Inst {
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index db99b88d0e..a3c95349e1 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -51,6 +51,7 @@ pub fn generateSymbol(
code: *std.ArrayList(u8),
dbg_line: *std.ArrayList(u8),
dbg_info: *std.ArrayList(u8),
+ dbg_info_type_relocs: *link.File.Elf.DbgInfoTypeRelocsTable,
) GenerateSymbolError!Result {
const tracy = trace(@src());
defer tracy.end();
@@ -58,57 +59,57 @@ pub fn generateSymbol(
switch (typed_value.ty.zigTypeTag()) {
.Fn => {
switch (bin_file.base.options.target.cpu.arch) {
- //.arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.wasm32 => return Function(.wasm32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.wasm64 => return Function(.wasm64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
- //.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info),
+ //.arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.wasm32 => return Function(.wasm32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.wasm64 => return Function(.wasm64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
+ //.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."),
}
},
@@ -122,7 +123,7 @@ pub fn generateSymbol(
switch (try generateSymbol(bin_file, src, .{
.ty = typed_value.ty.elemType(),
.val = sentinel,
- }, code, dbg_line, dbg_info)) {
+ }, code, dbg_line, dbg_info, dbg_info_type_relocs)) {
.appended => return Result{ .appended = {} },
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
@@ -225,6 +226,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
code: *std.ArrayList(u8),
dbg_line: *std.ArrayList(u8),
dbg_info: *std.ArrayList(u8),
+ dbg_info_type_relocs: *link.File.Elf.DbgInfoTypeRelocsTable,
err_msg: ?*ErrorMsg,
args: []MCValue,
ret_mcv: MCValue,
@@ -395,6 +397,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
code: *std.ArrayList(u8),
dbg_line: *std.ArrayList(u8),
dbg_info: *std.ArrayList(u8),
+ dbg_info_type_relocs: *link.File.Elf.DbgInfoTypeRelocsTable,
) GenerateSymbolError!Result {
const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
@@ -433,6 +436,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.code = code,
.dbg_line = dbg_line,
.dbg_info = dbg_info,
+ .dbg_info_type_relocs = dbg_info_type_relocs,
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
@@ -610,6 +614,23 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
+ /// Adds a Type to the .debug_info at the current position. The bytes will be populated later,
+ /// after codegen for this symbol is done.
+ fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
+ assert(ty.hasCodeGenBits());
+ const index = self.dbg_info.items.len;
+ try self.dbg_info.resize(index + 4); // DW.AT_type, DW.FORM_ref4
+
+ const gop = try self.dbg_info_type_relocs.getOrPut(self.gpa, ty);
+ if (!gop.found_existing) {
+ gop.entry.value = .{
+ .off = undefined,
+ .relocs = .{},
+ };
+ }
+ try gop.entry.value.relocs.append(self.gpa, @intCast(u32, index));
+ }
+
fn genFuncInst(self: *Self, inst: *ir.Inst) !MCValue {
switch (inst.tag) {
.add => return self.genAdd(inst.castTag(.add).?),
@@ -1009,7 +1030,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
- fn genArg(self: *Self, inst: *ir.Inst.NoOp) !MCValue {
+ fn genArg(self: *Self, inst: *ir.Inst.Arg) !MCValue {
if (FreeRegInt == u0) {
return self.fail(inst.base.src, "TODO implement Register enum for {}", .{self.target.cpu.arch});
}
@@ -1022,10 +1043,20 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const result = self.args[self.arg_index];
self.arg_index += 1;
+ const name_with_null = inst.name[0..mem.lenZ(inst.name) + 1];
switch (result) {
.register => |reg| {
branch.registers.putAssumeCapacityNoClobber(reg, .{ .inst = &inst.base });
branch.markRegUsed(reg);
+
+ try self.dbg_info.ensureCapacity(self.dbg_info.items.len + 8 + name_with_null.len);
+ self.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
+ self.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc
+ 1, // ULEB128 dwarf expression length
+ reg.dwarfLocOp(),
+ });
+ try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4
+ self.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string
},
else => {},
}
diff --git a/src-self-hosted/codegen/riscv64.zig b/src-self-hosted/codegen/riscv64.zig
index c5c762709a..793731c83c 100644
--- a/src-self-hosted/codegen/riscv64.zig
+++ b/src-self-hosted/codegen/riscv64.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const DW = std.dwarf;
pub const instructions = struct {
pub const CallBreak = packed struct {
@@ -48,6 +49,10 @@ pub const RawRegister = enum(u8) {
x8, x9, x10, x11, x12, x13, x14, x15,
x16, x17, x18, x19, x20, x21, x22, x23,
x24, x25, x26, x27, x28, x29, x30, x31,
+
+ pub fn dwarfLocOp(reg: RawRegister) u8 {
+ return @enumToInt(reg) + DW.OP_reg0;
+ }
};
pub const Register = enum(u8) {
@@ -83,6 +88,10 @@ pub const Register = enum(u8) {
}
return null;
}
+
+ pub fn dwarfLocOp(reg: Register) u8 {
+ return @enumToInt(reg) + DW.OP_reg0;
+ }
};
// zig fmt: on
diff --git a/src-self-hosted/codegen/x86.zig b/src-self-hosted/codegen/x86.zig
index e0d0848bf5..fdad4e56db 100644
--- a/src-self-hosted/codegen/x86.zig
+++ b/src-self-hosted/codegen/x86.zig
@@ -1,3 +1,6 @@
+const std = @import("std");
+const DW = std.dwarf;
+
// zig fmt: off
pub const Register = enum(u8) {
// 0 through 7, 32-bit registers. id is int value
@@ -37,8 +40,84 @@ pub const Register = enum(u8) {
else => null,
};
}
+
+ /// Convert from any register to its 32 bit alias.
+ pub fn to32(self: Register) Register {
+ return @intToEnum(Register, @as(u8, self.id()));
+ }
+
+ /// Convert from any register to its 16 bit alias.
+ pub fn to16(self: Register) Register {
+ return @intToEnum(Register, @as(u8, self.id()) + 8);
+ }
+
+ /// Convert from any register to its 8 bit alias.
+ pub fn to8(self: Register) Register {
+ return @intToEnum(Register, @as(u8, self.id()) + 16);
+ }
+
+
+ pub fn dwarfLocOp(reg: Register) u8 {
+ return switch (reg.to32()) {
+ .eax => DW.OP_reg0,
+ .ecx => DW.OP_reg1,
+ .edx => DW.OP_reg2,
+ .ebx => DW.OP_reg3,
+ .esp => DW.OP_reg4,
+ .ebp => DW.OP_reg5,
+ .esi => DW.OP_reg6,
+ .edi => DW.OP_reg7,
+ else => unreachable,
+ };
+ }
};
// zig fmt: on
pub const callee_preserved_regs = [_]Register{ .eax, .ecx, .edx, .esi, .edi };
+
+// TODO add these to Register enum and corresponding dwarfLocOp
+// // Return Address register. This is stored in `0(%esp, "")` and is not a physical register.
+// RA = (8, "RA"),
+//
+// ST0 = (11, "st0"),
+// ST1 = (12, "st1"),
+// ST2 = (13, "st2"),
+// ST3 = (14, "st3"),
+// ST4 = (15, "st4"),
+// ST5 = (16, "st5"),
+// ST6 = (17, "st6"),
+// ST7 = (18, "st7"),
+//
+// XMM0 = (21, "xmm0"),
+// XMM1 = (22, "xmm1"),
+// XMM2 = (23, "xmm2"),
+// XMM3 = (24, "xmm3"),
+// XMM4 = (25, "xmm4"),
+// XMM5 = (26, "xmm5"),
+// XMM6 = (27, "xmm6"),
+// XMM7 = (28, "xmm7"),
+//
+// MM0 = (29, "mm0"),
+// MM1 = (30, "mm1"),
+// MM2 = (31, "mm2"),
+// MM3 = (32, "mm3"),
+// MM4 = (33, "mm4"),
+// MM5 = (34, "mm5"),
+// MM6 = (35, "mm6"),
+// MM7 = (36, "mm7"),
+//
+// MXCSR = (39, "mxcsr"),
+//
+// ES = (40, "es"),
+// CS = (41, "cs"),
+// SS = (42, "ss"),
+// DS = (43, "ds"),
+// FS = (44, "fs"),
+// GS = (45, "gs"),
+//
+// TR = (48, "tr"),
+// LDTR = (49, "ldtr"),
+//
+// FS_BASE = (93, "fs.base"),
+// GS_BASE = (94, "gs.base"),
diff --git a/src-self-hosted/codegen/x86_64.zig b/src-self-hosted/codegen/x86_64.zig
index c149613ae9..dea39f82cd 100644
--- a/src-self-hosted/codegen/x86_64.zig
+++ b/src-self-hosted/codegen/x86_64.zig
@@ -1,4 +1,6 @@
+const std = @import("std");
const Type = @import("../Type.zig");
+const DW = std.dwarf;
// zig fmt: off
@@ -101,6 +103,30 @@ pub const Register = enum(u8) {
pub fn to8(self: Register) Register {
return @intToEnum(Register, @as(u8, self.id()) + 48);
}
+
+ pub fn dwarfLocOp(self: Register) u8 {
+ return switch (self.to64()) {
+ .rax => DW.OP_reg0,
+ .rdx => DW.OP_reg1,
+ .rcx => DW.OP_reg2,
+ .rbx => DW.OP_reg3,
+ .rsi => DW.OP_reg4,
+ .rdi => DW.OP_reg5,
+ .rbp => DW.OP_reg6,
+ .rsp => DW.OP_reg7,
+
+ .r8 => DW.OP_reg8,
+ .r9 => DW.OP_reg9,
+ .r10 => DW.OP_reg10,
+ .r11 => DW.OP_reg11,
+ .r12 => DW.OP_reg12,
+ .r13 => DW.OP_reg13,
+ .r14 => DW.OP_reg14,
+ .r15 => DW.OP_reg15,
+
+ else => unreachable,
+ };
+ }
};
// zig fmt: on
@@ -109,3 +135,86 @@ pub const Register = enum(u8) {
pub const callee_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .rsi, .rdi, .r8, .r9, .r10, .r11 };
pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 };
pub const c_abi_int_return_regs = [_]Register{ .rax, .rdx };
+
+// TODO add these registers to the enum and populate dwarfLocOp
+// // Return Address register. This is stored in `0(%rsp, "")` and is not a physical register.
+// RA = (16, "RA"),
+//
+// XMM0 = (17, "xmm0"),
+// XMM1 = (18, "xmm1"),
+// XMM2 = (19, "xmm2"),
+// XMM3 = (20, "xmm3"),
+// XMM4 = (21, "xmm4"),
+// XMM5 = (22, "xmm5"),
+// XMM6 = (23, "xmm6"),
+// XMM7 = (24, "xmm7"),
+//
+// XMM8 = (25, "xmm8"),
+// XMM9 = (26, "xmm9"),
+// XMM10 = (27, "xmm10"),
+// XMM11 = (28, "xmm11"),
+// XMM12 = (29, "xmm12"),
+// XMM13 = (30, "xmm13"),
+// XMM14 = (31, "xmm14"),
+// XMM15 = (32, "xmm15"),
+//
+// ST0 = (33, "st0"),
+// ST1 = (34, "st1"),
+// ST2 = (35, "st2"),
+// ST3 = (36, "st3"),
+// ST4 = (37, "st4"),
+// ST5 = (38, "st5"),
+// ST6 = (39, "st6"),
+// ST7 = (40, "st7"),
+//
+// MM0 = (41, "mm0"),
+// MM1 = (42, "mm1"),
+// MM2 = (43, "mm2"),
+// MM3 = (44, "mm3"),
+// MM4 = (45, "mm4"),
+// MM5 = (46, "mm5"),
+// MM6 = (47, "mm6"),
+// MM7 = (48, "mm7"),
+//
+// RFLAGS = (49, "rFLAGS"),
+// ES = (50, "es"),
+// CS = (51, "cs"),
+// SS = (52, "ss"),
+// DS = (53, "ds"),
+// FS = (54, "fs"),
+// GS = (55, "gs"),
+//
+// FS_BASE = (58, "fs.base"),
+// GS_BASE = (59, "gs.base"),
+//
+// TR = (62, "tr"),
+// LDTR = (63, "ldtr"),
+// MXCSR = (64, "mxcsr"),
+// FCW = (65, "fcw"),
+// FSW = (66, "fsw"),
+//
+// XMM16 = (67, "xmm16"),
+// XMM17 = (68, "xmm17"),
+// XMM18 = (69, "xmm18"),
+// XMM19 = (70, "xmm19"),
+// XMM20 = (71, "xmm20"),
+// XMM21 = (72, "xmm21"),
+// XMM22 = (73, "xmm22"),
+// XMM23 = (74, "xmm23"),
+// XMM24 = (75, "xmm24"),
+// XMM25 = (76, "xmm25"),
+// XMM26 = (77, "xmm26"),
+// XMM27 = (78, "xmm27"),
+// XMM28 = (79, "xmm28"),
+// XMM29 = (80, "xmm29"),
+// XMM30 = (81, "xmm30"),
+// XMM31 = (82, "xmm31"),
+//
+// K0 = (118, "k0"),
+// K1 = (119, "k1"),
+// K2 = (120, "k2"),
+// K3 = (121, "k3"),
+// K4 = (122, "k4"),
+// K5 = (123, "k5"),
+// K6 = (124, "k6"),
+// K7 = (125, "k7"),
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 1188230a54..176fd7e303 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -87,7 +87,6 @@ pub const Inst = struct {
.alloc,
.retvoid,
.unreach,
- .arg,
.breakpoint,
.dbg_stmt,
=> NoOp,
@@ -115,6 +114,7 @@ pub const Inst = struct {
.store,
=> BinOp,
+ .arg => Arg,
.assembly => Assembly,
.block => Block,
.br => Br,
@@ -253,6 +253,20 @@ pub const Inst = struct {
}
};
+ pub const Arg = struct {
+ pub const base_tag = Tag.arg;
+
+ base: Inst,
+ name: [*:0]const u8,
+
+ pub fn operandCount(self: *const Arg) usize {
+ return 0;
+ }
+ pub fn getOperand(self: *const Arg, index: usize) ?*Inst {
+ return null;
+ }
+ };
+
pub const Assembly = struct {
pub const base_tag = Tag.assembly;
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 77534fca4a..d05c676a38 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -391,6 +391,17 @@ pub const File = struct {
const minimum_text_block_size = 64;
const min_text_capacity = minimum_text_block_size * alloc_num / alloc_den;
+ pub const DbgInfoTypeRelocsTable = std.HashMapUnmanaged(Type, DbgInfoTypeReloc, Type.hash, Type.eql, true);
+
+ const DbgInfoTypeReloc = struct {
+ /// Offset from `TextBlock.dbg_info_off` (the buffer that is local to a Decl).
+ /// This is where the .debug_info tag for the type is.
+ off: u32,
+ /// Offset from `TextBlock.dbg_info_off` (the buffer that is local to a Decl).
+ /// List of DW.AT_type / DW.FORM_ref4 that points to the type.
+ relocs: std.ArrayListUnmanaged(u32),
+ };
+
pub const TextBlock = struct {
/// Each decl always gets a local symbol with the fully qualified name.
/// The vaddr and size are found here directly.
@@ -991,6 +1002,7 @@ pub const File = struct {
pub const abbrev_subprogram_retvoid = 3;
pub const abbrev_base_type = 4;
pub const abbrev_pad1 = 5;
+ pub const abbrev_parameter = 6;
/// Commit pending changes and write headers.
pub fn flush(self: *Elf) !void {
@@ -1044,6 +1056,12 @@ pub const File = struct {
abbrev_pad1, DW.TAG_unspecified_type, DW.CHILDREN_no, // header
0, 0, // table sentinel
+ abbrev_parameter, DW.TAG_formal_parameter, DW.CHILDREN_no, // header
+ DW.AT_location , DW.FORM_exprloc,
+ DW.AT_type , DW.FORM_ref4,
+ DW.AT_name , DW.FORM_string,
+ 0, 0, // table sentinel
+
0, 0, 0, // section sentinel
};
@@ -1088,7 +1106,8 @@ pub const File = struct {
// not including the initial length itself.
// We have to come back and write it later after we know the size.
const after_init_len = di_buf.items.len + init_len_size;
- const dbg_info_end = last_dbg_info_decl.dbg_info_off + last_dbg_info_decl.dbg_info_len;
+ // +1 for the final 0 that ends the compilation unit children.
+ const dbg_info_end = last_dbg_info_decl.dbg_info_off + last_dbg_info_decl.dbg_info_len + 1;
const init_len = dbg_info_end - after_init_len;
switch (self.ptr_width) {
.p32 => {
@@ -1138,7 +1157,7 @@ pub const File = struct {
@panic("TODO: handle .debug_info header exceeding its padding");
}
const jmp_amt = first_dbg_info_decl.dbg_info_off - di_buf.items.len;
- try self.pwriteDbgInfoNops(0, di_buf.items, jmp_amt, debug_info_sect.sh_offset);
+ try self.pwriteDbgInfoNops(0, di_buf.items, jmp_amt, false, debug_info_sect.sh_offset);
self.debug_info_header_dirty = false;
}
@@ -1858,12 +1877,19 @@ pub const File = struct {
var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator);
defer dbg_info_buffer.deinit();
+ var dbg_info_type_relocs: DbgInfoTypeRelocsTable = .{};
+ defer {
+ for (dbg_info_type_relocs.items()) |*entry| {
+ entry.value.relocs.deinit(self.base.allocator);
+ }
+ dbg_info_type_relocs.deinit(self.base.allocator);
+ }
+
const typed_value = decl.typed_value.most_recent.typed_value;
const is_fn: bool = switch (typed_value.ty.zigTypeTag()) {
.Fn => true,
else => false,
};
- var fn_ret_has_bits: bool = undefined;
if (is_fn) {
// For functions we need to add a prologue to the debug line program.
try dbg_line_buffer.ensureCapacity(26);
@@ -1919,7 +1945,8 @@ pub const File = struct {
const decl_name_with_null = decl.name[0..mem.lenZ(decl.name) + 1];
try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len);
- fn_ret_has_bits = typed_value.ty.fnReturnType().hasCodeGenBits();
+ const fn_ret_type = typed_value.ty.fnReturnType();
+ const fn_ret_has_bits = fn_ret_type.hasCodeGenBits();
if (fn_ret_has_bits) {
dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram);
} else {
@@ -1933,14 +1960,21 @@ pub const File = struct {
assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len);
dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4
if (fn_ret_has_bits) {
- assert(self.getRelocDbgInfoSubprogramRetType() == dbg_info_buffer.items.len);
+ const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type);
+ if (!gop.found_existing) {
+ gop.entry.value = .{
+ .off = undefined,
+ .relocs = .{},
+ };
+ }
+ try gop.entry.value.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len));
dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4
}
dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string
} else {
// TODO implement .debug_info for global variables
}
- const res = try codegen.generateSymbol(self, decl.src(), typed_value, &code_buffer, &dbg_line_buffer, &dbg_info_buffer);
+ const res = try codegen.generateSymbol(self, decl.src(), typed_value, &code_buffer, &dbg_line_buffer, &dbg_info_buffer, &dbg_info_type_relocs);
const code = switch (res) {
.externally_managed => |x| x,
.appended => code_buffer.items,
@@ -2011,7 +2045,6 @@ pub const File = struct {
const text_block = &decl.link.elf;
// If the Decl is a function, we need to update the .debug_line program.
- var fn_ret_type_index: usize = undefined;
if (is_fn) {
// Perform the relocations based on vaddr.
switch (self.ptr_width) {
@@ -2117,28 +2150,30 @@ pub const File = struct {
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos);
- // .debug_info
- try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 2);
- // End the TAG_subprogram children.
- dbg_info_buffer.appendAssumeCapacity(0);
- if (fn_ret_has_bits) {
- // Now we do the return type of the function. The relocation must be performed
- // later after the offset for this subprogram is computed.
- fn_ret_type_index = dbg_info_buffer.items.len;
- try self.addDbgInfoType(typed_value.ty.fnReturnType(), &dbg_info_buffer);
- }
+ // .debug_info - End the TAG_subprogram children.
+ try dbg_info_buffer.append(0);
+ }
+
+ // Now we emit the .debug_info types of the Decl. These will count towards the size of
+ // the buffer, so we have to do it before computing the offset, and we can't perform the actual
+ // relocations yet.
+ for (dbg_info_type_relocs.items()) |*entry| {
+ entry.value.off = @intCast(u32, dbg_info_buffer.items.len);
+ try self.addDbgInfoType(entry.key, &dbg_info_buffer);
}
try self.updateDeclDebugInfoAllocation(text_block, @intCast(u32, dbg_info_buffer.items.len));
- if (is_fn and fn_ret_has_bits) {
- // Perform function return type relocation.
- mem.writeInt(
- u32,
- dbg_info_buffer.items[self.getRelocDbgInfoSubprogramRetType()..][0..4],
- text_block.dbg_info_off + @intCast(u32, fn_ret_type_index),
- target_endian,
- );
+ // Now that we have the offset assigned we can finally perform type relocations.
+ for (dbg_info_type_relocs.items()) |entry| {
+ for (entry.value.relocs.items) |off| {
+ mem.writeInt(
+ u32,
+ dbg_info_buffer.items[off..][0..4],
+ text_block.dbg_info_off + entry.value.off,
+ target_endian,
+ );
+ }
}
try self.writeDeclDebugInfo(text_block, dbg_info_buffer.items);
@@ -2151,7 +2186,8 @@ pub const File = struct {
/// Asserts the type has codegen bits.
fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !void {
switch (ty.zigTypeTag()) {
- .Void, .NoReturn => unreachable,
+ .Void => unreachable,
+ .NoReturn => unreachable,
.Bool => {
try dbg_info_buffer.appendSlice(&[_]u8{
abbrev_base_type,
@@ -2201,7 +2237,7 @@ pub const File = struct {
text_block.dbg_info_next = null;
// Populate where it used to be with NOPs.
const file_pos = debug_info_sect.sh_offset + text_block.dbg_info_off;
- try self.pwriteDbgInfoNops(0, &[0]u8{}, text_block.dbg_info_len, file_pos);
+ try self.pwriteDbgInfoNops(0, &[0]u8{}, text_block.dbg_info_len, false, file_pos);
// TODO Look at the free list before appending at the end.
text_block.dbg_info_prev = last;
last.dbg_info_next = text_block;
@@ -2238,7 +2274,8 @@ pub const File = struct {
const debug_info_sect = &self.sections.items[self.debug_info_section_index.?];
const last_decl = self.dbg_info_decl_last.?;
- const needed_size = last_decl.dbg_info_off + last_decl.dbg_info_len;
+ // +1 for a trailing zero to end the children of the decl tag.
+ const needed_size = last_decl.dbg_info_off + last_decl.dbg_info_len + 1;
if (needed_size != debug_info_sect.sh_size) {
if (needed_size > self.allocatedSize(debug_info_sect.sh_offset)) {
const new_offset = self.findFreeSpace(needed_size, 1);
@@ -2265,10 +2302,13 @@ pub const File = struct {
else
0;
+ // To end the children of the decl tag.
+ const trailing_zero = text_block.dbg_info_next == null;
+
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_info section.
const file_pos = debug_info_sect.sh_offset + text_block.dbg_info_off;
- try self.pwriteDbgInfoNops(prev_padding_size, dbg_info_buf, next_padding_size, file_pos);
+ try self.pwriteDbgInfoNops(prev_padding_size, dbg_info_buf, next_padding_size, trailing_zero, file_pos);
}
/// Must be called only after a successful call to `updateDecl`.
@@ -2598,10 +2638,6 @@ pub const File = struct {
return dbg_info_low_pc_reloc_index + self.ptrWidthBytes();
}
- fn getRelocDbgInfoSubprogramRetType(self: Elf) u32 {
- return self.getRelocDbgInfoSubprogramHighPC() + 4;
- }
-
fn dbgLineNeededHeaderBytes(self: Elf) u32 {
const directory_entry_format_count = 1;
const file_name_entry_format_count = 1;
@@ -2710,6 +2746,7 @@ pub const File = struct {
prev_padding_size: usize,
buf: []const u8,
next_padding_size: usize,
+ trailing_zero: bool,
offset: usize,
) !void {
const tracy = trace(@src());
@@ -2761,6 +2798,16 @@ pub const File = struct {
vec_index += 1;
}
}
+
+ if (trailing_zero) {
+ var zbuf = [1]u8{0};
+ vecs[vec_index] = .{
+ .iov_base = &zbuf,
+ .iov_len = zbuf.len,
+ };
+ vec_index += 1;
+ }
+
try self.base.file.?.pwritevAll(vecs[0..vec_index], offset - prev_padding_size);
}
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index ca46caeaa8..8d643c2005 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -103,7 +103,6 @@ pub const Type = extern union {
}
pub fn eql(a: Type, b: Type) bool {
- //std.debug.warn("test {} == {}\n", .{ a, b });
// As a shortcut, if the small tags / addresses match, we're done.
if (a.tag_if_small_enough == b.tag_if_small_enough)
return true;
@@ -197,6 +196,71 @@ pub const Type = extern union {
}
}
+ pub fn hash(self: Type) u32 {
+ var hasher = std.hash.Wyhash.init(0);
+ const zig_type_tag = self.zigTypeTag();
+ std.hash.autoHash(&hasher, zig_type_tag);
+ switch (zig_type_tag) {
+ .Type,
+ .Void,
+ .Bool,
+ .NoReturn,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Undefined,
+ .Null,
+ => {}, // The zig type tag is all that is needed to distinguish.
+
+ .Pointer => {
+ // TODO implement more pointer type hashing
+ },
+ .Int => {
+ // Detect that e.g. u64 != usize, even if the bits match on a particular target.
+ if (self.isNamedInt()) {
+ std.hash.autoHash(&hasher, self.tag());
+ } else {
+ // Remaining cases are arbitrary sized integers.
+ // The target will not be branched upon, because we handled target-dependent cases above.
+ const info = self.intInfo(@as(Target, undefined));
+ std.hash.autoHash(&hasher, info.signed);
+ std.hash.autoHash(&hasher, info.bits);
+ }
+ },
+ .Array => {
+ std.hash.autoHash(&hasher, self.arrayLen());
+ std.hash.autoHash(&hasher, self.elemType().hash());
+ // TODO hash array sentinel
+ },
+ .Fn => {
+ std.hash.autoHash(&hasher, self.fnReturnType().hash());
+ std.hash.autoHash(&hasher, self.fnCallingConvention());
+ const params_len = self.fnParamLen();
+ std.hash.autoHash(&hasher, params_len);
+ var i: usize = 0;
+ while (i < params_len) : (i += 1) {
+ std.hash.autoHash(&hasher, self.fnParamType(i).hash());
+ }
+ },
+ .Float,
+ .Struct,
+ .Optional,
+ .ErrorUnion,
+ .ErrorSet,
+ .Enum,
+ .Union,
+ .BoundFn,
+ .Opaque,
+ .Frame,
+ .AnyFrame,
+ .Vector,
+ .EnumLiteral,
+ => {
+ // TODO implement more type hashing
+ },
+ }
+ return @truncate(u32, hasher.final());
+ }
+
pub fn copy(self: Type, allocator: *Allocator) error{OutOfMemory}!Type {
if (self.tag_if_small_enough < Tag.no_payload_count) {
return Type{ .tag_if_small_enough = self.tag_if_small_enough };
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index 5fb81df051..29043af840 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -211,7 +211,6 @@ pub const Inst = struct {
pub fn Type(tag: Tag) type {
return switch (tag) {
- .arg,
.breakpoint,
.dbg_stmt,
.returnvoid,
@@ -268,6 +267,7 @@ pub const Inst = struct {
.xor,
=> BinOp,
+ .arg => Arg,
.block => Block,
.@"break" => Break,
.breakvoid => BreakVoid,
@@ -431,6 +431,16 @@ pub const Inst = struct {
kw_args: struct {},
};
+ pub const Arg = struct {
+ pub const base_tag = Tag.arg;
+ base: Inst,
+
+ positionals: struct {
+ name: []const u8,
+ },
+ kw_args: struct {},
+ };
+
pub const Block = struct {
pub const base_tag = Tag.block;
base: Inst,
@@ -1843,7 +1853,6 @@ const EmitZIR = struct {
const new_inst = switch (inst.tag) {
.constant => unreachable, // excluded from function bodies
- .arg => try self.emitNoOp(inst.src, .arg),
.breakpoint => try self.emitNoOp(inst.src, .breakpoint),
.unreach => try self.emitNoOp(inst.src, .@"unreachable"),
.retvoid => try self.emitNoOp(inst.src, .returnvoid),
@@ -1886,6 +1895,22 @@ const EmitZIR = struct {
break :blk &new_inst.base;
},
+ .arg => blk: {
+ const old_inst = inst.castTag(.arg).?;
+ const new_inst = try self.arena.allocator.create(Inst.Arg);
+ new_inst.* = .{
+ .base = .{
+ .src = inst.src,
+ .tag = .arg,
+ },
+ .positionals = .{
+ .name = try self.arena.allocator.dupe(u8, mem.spanZ(old_inst.name)),
+ },
+ .kw_args = .{},
+ };
+ break :blk &new_inst.base;
+ },
+
.block => blk: {
const old_inst = inst.castTag(.block).?;
const new_inst = try self.arena.allocator.create(Inst.Block);
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index f2ed8abeac..2b2739a308 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -408,7 +408,7 @@ fn analyzeInstCompileError(mod: *Module, scope: *Scope, inst: *zir.Inst.CompileE
return mod.fail(scope, inst.base.src, "{}", .{inst.positionals.msg});
}
-fn analyzeInstArg(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst {
+fn analyzeInstArg(mod: *Module, scope: *Scope, inst: *zir.Inst.Arg) InnerError!*Inst {
const b = try mod.requireRuntimeBlock(scope, inst.base.src);
const fn_ty = b.func.?.owner_decl.typed_value.most_recent.typed_value.ty;
const param_index = b.instructions.items.len;
@@ -420,7 +420,8 @@ fn analyzeInstArg(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!
});
}
const param_type = fn_ty.fnParamType(param_index);
- return mod.addNoOp(b, inst.base.src, param_type, .arg);
+ const name = try scope.arena().dupeZ(u8, inst.positionals.name);
+ return mod.addArg(b, inst.base.src, param_type, name);
}
fn analyzeInstBlock(mod: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerError!*Inst {
From 67d684d89aeb0bb3cfa86c57e8d77359d45743fa Mon Sep 17 00:00:00 2001
From: Josias
Date: Wed, 12 Aug 2020 08:56:34 +0000
Subject: [PATCH 048/153] docs: Fix reference to General Purpose Allocator
---
doc/langref.html.in | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index edafc82ab8..cfde67d622 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -9299,10 +9299,8 @@ fn concat(allocator: *Allocator, a: []const u8, b: []const u8) ![]u8 {
which will also do perform basic leak detection.
- Currently Zig has no general purpose allocator, but there is
- one under active development.
- Once it is merged into the Zig standard library it will become available to import
- with {#syntax#}std.heap.default_allocator{#endsyntax#}. However, it will still be recommended to
+ Zig has a general purpose allocator available to be imported
+ with {#syntax#}std.heap.GeneralPurposeAllocator{#endsyntax#}. However, it is still recommended to
follow the {#link|Choosing an Allocator#} guide.
From 7db2c11537552250462a5f4ab162e5ef4183489c Mon Sep 17 00:00:00 2001
From: heidezomp
Date: Wed, 12 Aug 2020 14:03:02 +0200
Subject: [PATCH 049/153] std.log: add scoped logging struct
* Add a std.log.scoped function that returns a scoped logging struct
* Add a std.log.default struct that logs using the .default scope
Implementation of daurnimator's proposal:
https://github.com/ziglang/zig/issues/5943#issuecomment-669043489
Note that I named the function "scoped" instead of "scope" so as not to
clash with the scope parameter that is used everywhere; this seemed a
better solution to me than renaming the scope parameter to "s" or
"log_scope" or the like.
---
lib/std/log.zig | 84 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 84 insertions(+)
diff --git a/lib/std/log.zig b/lib/std/log.zig
index d8bcba38cc..d511773654 100644
--- a/lib/std/log.zig
+++ b/lib/std/log.zig
@@ -200,3 +200,87 @@ pub fn debug(
) void {
log(.debug, scope, format, args);
}
+
+pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
+ return struct {
+ /// Log an emergency message to stderr. This log level is intended to be used
+ /// for conditions that cannot be handled and is usually followed by a panic.
+ pub fn emerg(
+ comptime format: []const u8,
+ args: anytype,
+ ) void {
+ @setCold(true);
+ log(.emerg, scope, format, args);
+ }
+
+ /// Log an alert message to stderr. This log level is intended to be used for
+ /// conditions that should be corrected immediately (e.g. database corruption).
+ pub fn alert(
+ comptime format: []const u8,
+ args: anytype,
+ ) void {
+ @setCold(true);
+ log(.alert, scope, format, args);
+ }
+
+ /// Log a critical message to stderr. This log level is intended to be used
+ /// when a bug has been detected or something has gone wrong and it will have
+ /// an effect on the operation of the program.
+ pub fn crit(
+ comptime format: []const u8,
+ args: anytype,
+ ) void {
+ @setCold(true);
+ log(.crit, scope, format, args);
+ }
+
+ /// Log an error message to stderr. This log level is intended to be used when
+ /// a bug has been detected or something has gone wrong but it is recoverable.
+ pub fn err(
+ comptime format: []const u8,
+ args: anytype,
+ ) void {
+ @setCold(true);
+ log(.err, scope, format, args);
+ }
+
+ /// Log a warning message to stderr. This log level is intended to be used if
+ /// it is uncertain whether something has gone wrong or not, but the
+ /// circumstances would be worth investigating.
+ pub fn warn(
+ comptime format: []const u8,
+ args: anytype,
+ ) void {
+ log(.warn, scope, format, args);
+ }
+
+ /// Log a notice message to stderr. This log level is intended to be used for
+ /// non-error but significant conditions.
+ pub fn notice(
+ comptime format: []const u8,
+ args: anytype,
+ ) void {
+ log(.notice, scope, format, args);
+ }
+
+ /// Log an info message to stderr. This log level is intended to be used for
+ /// general messages about the state of the program.
+ pub fn info(
+ comptime format: []const u8,
+ args: anytype,
+ ) void {
+ log(.info, scope, format, args);
+ }
+
+ /// Log a debug message to stderr. This log level is intended to be used for
+ /// messages which are only useful for debugging.
+ pub fn debug(
+ comptime format: []const u8,
+ args: anytype,
+ ) void {
+ log(.debug, scope, format, args);
+ }
+ };
+}
+
+pub const default = scoped(.default);
From 25607079f0f405d09202378d7435a59a7a96d649 Mon Sep 17 00:00:00 2001
From: heidezomp
Date: Wed, 12 Aug 2020 15:37:56 +0200
Subject: [PATCH 050/153] std.log: add documentation for scoped logging
* Add short documentation to std.log.scoped and std.log.default
* Update the module documentation and example to explain the difference
between using explicit scopes, using a scoped logging namespace, and
using the default namespace
---
lib/std/log.zig | 19 ++++++++++++++++++-
1 file changed, 18 insertions(+), 1 deletion(-)
diff --git a/lib/std/log.zig b/lib/std/log.zig
index d511773654..8411293867 100644
--- a/lib/std/log.zig
+++ b/lib/std/log.zig
@@ -2,12 +2,16 @@ const std = @import("std.zig");
const builtin = std.builtin;
const root = @import("root");
-//! std.log is standardized interface for logging which allows for the logging
+//! std.log is a standardized interface for logging which allows for the logging
//! of programs and libraries using this interface to be formatted and filtered
//! by the implementer of the root.log function.
//!
//! The scope parameter should be used to give context to the logging. For
//! example, a library called 'libfoo' might use .libfoo as its scope.
+//! This parameter can either be passed explicitly to the logging functions
+//! provided here, or a scoped logging namespace can be created
+//! using the `log.scoped` function. If logging scopes are not relevant for
+//! your use case, the `log.default` scope namespace can be used.
//!
//! An example root.log might look something like this:
//!
@@ -44,16 +48,26 @@ const root = @import("root");
//! }
//!
//! pub fn main() void {
+//! // Using explicit scopes:
//! // Won't be printed as log_level is .warn
//! std.log.info(.my_project, "Starting up.", .{});
//! std.log.err(.nice_library, "Something went very wrong, sorry.", .{});
//! // Won't be printed as it gets filtered out by our log function
//! std.log.err(.lib_that_logs_too_much, "Added 1 + 1", .{});
+//!
+//! // Using a scoped logging namespace:
+//! const scoped_log = std.log.scoped(.my_project);
+//! scoped_log.alert("The scope for this message is implicitly .my_project", .{});
+//!
+//! // Using the default namespace:
+//! // Won't be printed as log_level is .warn
+//! std.log.default.info("I don't care about my namespace", .{});
//! }
//! ```
//! Which produces the following output:
//! ```
//! [err] (nice_library): Something went very wrong, sorry.
+//! [alert] (my_project): The scope for this message is implicitly .my_project
//! ```
pub const Level = enum {
@@ -201,6 +215,8 @@ pub fn debug(
log(.debug, scope, format, args);
}
+/// Returns a scoped logging namespace that logs all messages using the scope
+/// provided here.
pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
return struct {
/// Log an emergency message to stderr. This log level is intended to be used
@@ -283,4 +299,5 @@ pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
};
}
+/// The default scoped logging namespace.
pub const default = scoped(.default);
From bf2ed0f571736241151a358b2533d45cb769db68 Mon Sep 17 00:00:00 2001
From: heidezomp
Date: Wed, 12 Aug 2020 15:54:21 +0200
Subject: [PATCH 051/153] std.log: don't state in docs that messages are logged
to stderr
Since the logger implementation can be overridden, the messages might
not be logged to stderr at all.
---
lib/std/log.zig | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/lib/std/log.zig b/lib/std/log.zig
index 8411293867..9a0dcecc05 100644
--- a/lib/std/log.zig
+++ b/lib/std/log.zig
@@ -129,7 +129,7 @@ fn log(
}
}
-/// Log an emergency message to stderr. This log level is intended to be used
+/// Log an emergency message. This log level is intended to be used
/// for conditions that cannot be handled and is usually followed by a panic.
pub fn emerg(
comptime scope: @Type(.EnumLiteral),
@@ -140,7 +140,7 @@ pub fn emerg(
log(.emerg, scope, format, args);
}
-/// Log an alert message to stderr. This log level is intended to be used for
+/// Log an alert message. This log level is intended to be used for
/// conditions that should be corrected immediately (e.g. database corruption).
pub fn alert(
comptime scope: @Type(.EnumLiteral),
@@ -151,7 +151,7 @@ pub fn alert(
log(.alert, scope, format, args);
}
-/// Log a critical message to stderr. This log level is intended to be used
+/// Log a critical message. This log level is intended to be used
/// when a bug has been detected or something has gone wrong and it will have
/// an effect on the operation of the program.
pub fn crit(
@@ -163,7 +163,7 @@ pub fn crit(
log(.crit, scope, format, args);
}
-/// Log an error message to stderr. This log level is intended to be used when
+/// Log an error message. This log level is intended to be used when
/// a bug has been detected or something has gone wrong but it is recoverable.
pub fn err(
comptime scope: @Type(.EnumLiteral),
@@ -174,7 +174,7 @@ pub fn err(
log(.err, scope, format, args);
}
-/// Log a warning message to stderr. This log level is intended to be used if
+/// Log a warning message. This log level is intended to be used if
/// it is uncertain whether something has gone wrong or not, but the
/// circumstances would be worth investigating.
pub fn warn(
@@ -185,7 +185,7 @@ pub fn warn(
log(.warn, scope, format, args);
}
-/// Log a notice message to stderr. This log level is intended to be used for
+/// Log a notice message. This log level is intended to be used for
/// non-error but significant conditions.
pub fn notice(
comptime scope: @Type(.EnumLiteral),
@@ -195,7 +195,7 @@ pub fn notice(
log(.notice, scope, format, args);
}
-/// Log an info message to stderr. This log level is intended to be used for
+/// Log an info message. This log level is intended to be used for
/// general messages about the state of the program.
pub fn info(
comptime scope: @Type(.EnumLiteral),
@@ -205,7 +205,7 @@ pub fn info(
log(.info, scope, format, args);
}
-/// Log a debug message to stderr. This log level is intended to be used for
+/// Log a debug message. This log level is intended to be used for
/// messages which are only useful for debugging.
pub fn debug(
comptime scope: @Type(.EnumLiteral),
@@ -219,7 +219,7 @@ pub fn debug(
/// provided here.
pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
return struct {
- /// Log an emergency message to stderr. This log level is intended to be used
+ /// Log an emergency message. This log level is intended to be used
/// for conditions that cannot be handled and is usually followed by a panic.
pub fn emerg(
comptime format: []const u8,
@@ -229,7 +229,7 @@ pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
log(.emerg, scope, format, args);
}
- /// Log an alert message to stderr. This log level is intended to be used for
+ /// Log an alert message. This log level is intended to be used for
/// conditions that should be corrected immediately (e.g. database corruption).
pub fn alert(
comptime format: []const u8,
@@ -239,7 +239,7 @@ pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
log(.alert, scope, format, args);
}
- /// Log a critical message to stderr. This log level is intended to be used
+ /// Log a critical message. This log level is intended to be used
/// when a bug has been detected or something has gone wrong and it will have
/// an effect on the operation of the program.
pub fn crit(
@@ -250,7 +250,7 @@ pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
log(.crit, scope, format, args);
}
- /// Log an error message to stderr. This log level is intended to be used when
+ /// Log an error message. This log level is intended to be used when
/// a bug has been detected or something has gone wrong but it is recoverable.
pub fn err(
comptime format: []const u8,
@@ -260,7 +260,7 @@ pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
log(.err, scope, format, args);
}
- /// Log a warning message to stderr. This log level is intended to be used if
+ /// Log a warning message. This log level is intended to be used if
/// it is uncertain whether something has gone wrong or not, but the
/// circumstances would be worth investigating.
pub fn warn(
@@ -270,7 +270,7 @@ pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
log(.warn, scope, format, args);
}
- /// Log a notice message to stderr. This log level is intended to be used for
+ /// Log a notice message. This log level is intended to be used for
/// non-error but significant conditions.
pub fn notice(
comptime format: []const u8,
@@ -279,7 +279,7 @@ pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
log(.notice, scope, format, args);
}
- /// Log an info message to stderr. This log level is intended to be used for
+ /// Log an info message. This log level is intended to be used for
/// general messages about the state of the program.
pub fn info(
comptime format: []const u8,
@@ -288,7 +288,7 @@ pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
log(.info, scope, format, args);
}
- /// Log a debug message to stderr. This log level is intended to be used for
+ /// Log a debug message. This log level is intended to be used for
/// messages which are only useful for debugging.
pub fn debug(
comptime format: []const u8,
From 373488157751dbaa22415ce842c4f5616fc845ff Mon Sep 17 00:00:00 2001
From: Vexu
Date: Wed, 12 Aug 2020 23:35:11 +0300
Subject: [PATCH 052/153] add error for unused/duplicate block labels
---
lib/std/dwarf.zig | 2 +-
lib/std/special/c.zig | 6 ++--
lib/std/zig/render.zig | 2 +-
src-self-hosted/translate_c.zig | 12 +++----
src/all_types.hpp | 3 ++
src/ir.cpp | 60 +++++++++++++++++++++++++++++++++
test/compile_errors.zig | 23 +++++++++++++
7 files changed, 97 insertions(+), 11 deletions(-)
diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig
index c5802240d4..085e394a8f 100644
--- a/lib/std/dwarf.zig
+++ b/lib/std/dwarf.zig
@@ -322,7 +322,7 @@ fn parseFormValue(allocator: *mem.Allocator, in_stream: anytype, form_id: u64, e
FORM_block1 => parseFormValueBlock(allocator, in_stream, endian, 1),
FORM_block2 => parseFormValueBlock(allocator, in_stream, endian, 2),
FORM_block4 => parseFormValueBlock(allocator, in_stream, endian, 4),
- FORM_block => x: {
+ FORM_block => {
const block_len = try nosuspend leb.readULEB128(usize, in_stream);
return parseFormValueBlockLen(allocator, in_stream, block_len);
},
diff --git a/lib/std/special/c.zig b/lib/std/special/c.zig
index c769bc358b..170bb98620 100644
--- a/lib/std/special/c.zig
+++ b/lib/std/special/c.zig
@@ -536,7 +536,7 @@ fn generic_fmod(comptime T: type, x: T, y: T) T {
// normalize x and y
if (ex == 0) {
i = ux << exp_bits;
- while (i >> bits_minus_1 == 0) : (b: {
+ while (i >> bits_minus_1 == 0) : ({
ex -= 1;
i <<= 1;
}) {}
@@ -547,7 +547,7 @@ fn generic_fmod(comptime T: type, x: T, y: T) T {
}
if (ey == 0) {
i = uy << exp_bits;
- while (i >> bits_minus_1 == 0) : (b: {
+ while (i >> bits_minus_1 == 0) : ({
ey -= 1;
i <<= 1;
}) {}
@@ -573,7 +573,7 @@ fn generic_fmod(comptime T: type, x: T, y: T) T {
return 0 * x;
ux = i;
}
- while (ux >> digits == 0) : (b: {
+ while (ux >> digits == 0) : ({
ux <<= 1;
ex -= 1;
}) {}
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index 454ddde160..c516250a17 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -2385,7 +2385,7 @@ fn renderTokenOffset(
}
}
- if (next_token_id != .LineComment) blk: {
+ if (next_token_id != .LineComment) {
switch (space) {
Space.None, Space.NoNewline => return,
Space.Newline => {
diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig
index 5af9bbc3c4..8b1c6537f7 100644
--- a/src-self-hosted/translate_c.zig
+++ b/src-self-hosted/translate_c.zig
@@ -931,7 +931,7 @@ fn transRecordDecl(c: *Context, record_decl: *const ZigClangRecordDecl) Error!?*
else => |e| return e,
};
- const align_expr = blk: {
+ const align_expr = blk_2: {
const alignment = ZigClangFieldDecl_getAlignedAttribute(field_decl, rp.c.clang_context);
if (alignment != 0) {
_ = try appendToken(rp.c, .Keyword_align, "align");
@@ -940,9 +940,9 @@ fn transRecordDecl(c: *Context, record_decl: *const ZigClangRecordDecl) Error!?*
const expr = try transCreateNodeInt(rp.c, alignment / 8);
_ = try appendToken(rp.c, .RParen, ")");
- break :blk expr;
+ break :blk_2 expr;
}
- break :blk null;
+ break :blk_2 null;
};
const field_node = try c.arena.create(ast.Node.ContainerField);
@@ -1073,9 +1073,9 @@ fn transEnumDecl(c: *Context, enum_decl: *const ZigClangEnumDecl) Error!?*ast.No
const field_name_tok = try appendIdentifier(c, field_name);
- const int_node = if (!pure_enum) blk: {
+ const int_node = if (!pure_enum) blk_2: {
_ = try appendToken(c, .Colon, "=");
- break :blk try transCreateNodeAPInt(c, ZigClangEnumConstantDecl_getInitVal(enum_const));
+ break :blk_2 try transCreateNodeAPInt(c, ZigClangEnumConstantDecl_getInitVal(enum_const));
} else
null;
@@ -2388,7 +2388,7 @@ fn transZeroInitExpr(
ty: *const ZigClangType,
) TransError!*ast.Node {
switch (ZigClangType_getTypeClass(ty)) {
- .Builtin => blk: {
+ .Builtin => {
const builtin_ty = @ptrCast(*const ZigClangBuiltinType, ty);
switch (ZigClangBuiltinType_getKind(builtin_ty)) {
.Bool => return try transCreateNodeBoolLiteral(rp.c, false),
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 3939a8ac25..5f142e3dcb 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -2438,6 +2438,7 @@ struct ScopeBlock {
LVal lval;
bool safety_off;
bool fast_math_on;
+ bool name_used;
};
// This scope is created from every defer expression.
@@ -2488,6 +2489,8 @@ struct ScopeLoop {
ZigList *incoming_blocks;
ResultLocPeerParent *peer_parent;
ScopeExpr *spill_scope;
+
+ bool name_used;
};
// This scope blocks certain things from working such as comptime continue
diff --git a/src/ir.cpp b/src/ir.cpp
index 8934a20545..40be4e147b 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -5476,6 +5476,25 @@ static ResultLocPeer *create_peer_result(ResultLocPeerParent *peer_parent) {
return result;
}
+static bool is_duplicate_label(CodeGen *g, Scope *scope, AstNode *node, Buf *name) {
+ if (name == nullptr) return false;
+
+ for (;;) {
+ if (scope == nullptr || scope->id == ScopeIdFnDef) {
+ break;
+ } else if (scope->id == ScopeIdBlock || scope->id == ScopeIdLoop) {
+ Buf *this_block_name = scope->id == ScopeIdBlock ? ((ScopeBlock *)scope)->name : ((ScopeLoop *)scope)->name;
+ if (this_block_name != nullptr && buf_eql_buf(name, this_block_name)) {
+ ErrorMsg *msg = add_node_error(g, node, buf_sprintf("redeclaration of label '%s'", buf_ptr(name)));
+ add_error_note(g, msg, scope->source_node, buf_sprintf("previous declaration is here"));
+ return true;
+ }
+ }
+ scope = scope->parent;
+ }
+ return false;
+}
+
static IrInstSrc *ir_gen_block(IrBuilderSrc *irb, Scope *parent_scope, AstNode *block_node, LVal lval,
ResultLoc *result_loc)
{
@@ -5484,6 +5503,9 @@ static IrInstSrc *ir_gen_block(IrBuilderSrc *irb, Scope *parent_scope, AstNode *
ZigList incoming_values = {0};
ZigList incoming_blocks = {0};
+ if (is_duplicate_label(irb->codegen, parent_scope, block_node, block_node->data.block.name))
+ return irb->codegen->invalid_inst_src;
+
ScopeBlock *scope_block = create_block_scope(irb->codegen, block_node, parent_scope);
Scope *outer_block_scope = &scope_block->base;
@@ -5495,6 +5517,9 @@ static IrInstSrc *ir_gen_block(IrBuilderSrc *irb, Scope *parent_scope, AstNode *
}
if (block_node->data.block.statements.length == 0) {
+ if (scope_block->name != nullptr) {
+ add_node_error(irb->codegen, block_node, buf_sprintf("unused block label"));
+ }
// {}
return ir_lval_wrap(irb, parent_scope, ir_build_const_void(irb, child_scope, block_node), lval, result_loc);
}
@@ -5552,6 +5577,10 @@ static IrInstSrc *ir_gen_block(IrBuilderSrc *irb, Scope *parent_scope, AstNode *
}
}
+ if (scope_block->name != nullptr && scope_block->name_used == false) {
+ add_node_error(irb->codegen, block_node, buf_sprintf("unused block label"));
+ }
+
if (found_invalid_inst)
return irb->codegen->invalid_inst_src;
@@ -8152,6 +8181,9 @@ static IrInstSrc *ir_gen_while_expr(IrBuilderSrc *irb, Scope *scope, AstNode *no
ZigList incoming_values = {0};
ZigList incoming_blocks = {0};
+ if (is_duplicate_label(irb->codegen, payload_scope, node, node->data.while_expr.name))
+ return irb->codegen->invalid_inst_src;
+
ScopeLoop *loop_scope = create_loop_scope(irb->codegen, node, payload_scope);
loop_scope->break_block = end_block;
loop_scope->continue_block = continue_block;
@@ -8169,6 +8201,10 @@ static IrInstSrc *ir_gen_while_expr(IrBuilderSrc *irb, Scope *scope, AstNode *no
if (body_result == irb->codegen->invalid_inst_src)
return body_result;
+ if (loop_scope->name != nullptr && loop_scope->name_used == false) {
+ add_node_error(irb->codegen, node, buf_sprintf("unused while label"));
+ }
+
if (!instr_is_unreachable(body_result)) {
ir_mark_gen(ir_build_check_statement_is_void(irb, payload_scope, node->data.while_expr.body, body_result));
ir_mark_gen(ir_build_br(irb, payload_scope, node, continue_block, is_comptime));
@@ -8263,6 +8299,9 @@ static IrInstSrc *ir_gen_while_expr(IrBuilderSrc *irb, Scope *scope, AstNode *no
ZigList incoming_values = {0};
ZigList incoming_blocks = {0};
+ if (is_duplicate_label(irb->codegen, child_scope, node, node->data.while_expr.name))
+ return irb->codegen->invalid_inst_src;
+
ScopeLoop *loop_scope = create_loop_scope(irb->codegen, node, child_scope);
loop_scope->break_block = end_block;
loop_scope->continue_block = continue_block;
@@ -8280,6 +8319,10 @@ static IrInstSrc *ir_gen_while_expr(IrBuilderSrc *irb, Scope *scope, AstNode *no
if (body_result == irb->codegen->invalid_inst_src)
return body_result;
+ if (loop_scope->name != nullptr && loop_scope->name_used == false) {
+ add_node_error(irb->codegen, node, buf_sprintf("unused while label"));
+ }
+
if (!instr_is_unreachable(body_result)) {
ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.while_expr.body, body_result));
ir_mark_gen(ir_build_br(irb, child_scope, node, continue_block, is_comptime));
@@ -8353,6 +8396,9 @@ static IrInstSrc *ir_gen_while_expr(IrBuilderSrc *irb, Scope *scope, AstNode *no
Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);
+ if (is_duplicate_label(irb->codegen, subexpr_scope, node, node->data.while_expr.name))
+ return irb->codegen->invalid_inst_src;
+
ScopeLoop *loop_scope = create_loop_scope(irb->codegen, node, subexpr_scope);
loop_scope->break_block = end_block;
loop_scope->continue_block = continue_block;
@@ -8369,6 +8415,10 @@ static IrInstSrc *ir_gen_while_expr(IrBuilderSrc *irb, Scope *scope, AstNode *no
if (body_result == irb->codegen->invalid_inst_src)
return body_result;
+ if (loop_scope->name != nullptr && loop_scope->name_used == false) {
+ add_node_error(irb->codegen, node, buf_sprintf("unused while label"));
+ }
+
if (!instr_is_unreachable(body_result)) {
ir_mark_gen(ir_build_check_statement_is_void(irb, scope, node->data.while_expr.body, body_result));
ir_mark_gen(ir_build_br(irb, scope, node, continue_block, is_comptime));
@@ -8501,6 +8551,9 @@ static IrInstSrc *ir_gen_for_expr(IrBuilderSrc *irb, Scope *parent_scope, AstNod
elem_ptr : ir_build_load_ptr(irb, &spill_scope->base, elem_node, elem_ptr);
build_decl_var_and_init(irb, parent_scope, elem_node, elem_var, elem_value, buf_ptr(elem_var_name), is_comptime);
+ if (is_duplicate_label(irb->codegen, child_scope, node, node->data.for_expr.name))
+ return irb->codegen->invalid_inst_src;
+
ZigList incoming_values = {0};
ZigList incoming_blocks = {0};
ScopeLoop *loop_scope = create_loop_scope(irb->codegen, node, child_scope);
@@ -8520,6 +8573,10 @@ static IrInstSrc *ir_gen_for_expr(IrBuilderSrc *irb, Scope *parent_scope, AstNod
if (body_result == irb->codegen->invalid_inst_src)
return irb->codegen->invalid_inst_src;
+ if (loop_scope->name != nullptr && loop_scope->name_used == false) {
+ add_node_error(irb->codegen, node, buf_sprintf("unused for label"));
+ }
+
if (!instr_is_unreachable(body_result)) {
ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.for_expr.body, body_result));
ir_mark_gen(ir_build_br(irb, child_scope, node, continue_block, is_comptime));
@@ -9464,6 +9521,7 @@ static IrInstSrc *ir_gen_break(IrBuilderSrc *irb, Scope *break_scope, AstNode *n
if (node->data.break_expr.name == nullptr ||
(this_loop_scope->name != nullptr && buf_eql_buf(node->data.break_expr.name, this_loop_scope->name)))
{
+ this_loop_scope->name_used = true;
loop_scope = this_loop_scope;
break;
}
@@ -9473,6 +9531,7 @@ static IrInstSrc *ir_gen_break(IrBuilderSrc *irb, Scope *break_scope, AstNode *n
(this_block_scope->name != nullptr && buf_eql_buf(node->data.break_expr.name, this_block_scope->name)))
{
assert(this_block_scope->end_block != nullptr);
+ this_block_scope->name_used = true;
return ir_gen_return_from_block(irb, break_scope, node, this_block_scope);
}
} else if (search_scope->id == ScopeIdSuspend) {
@@ -9540,6 +9599,7 @@ static IrInstSrc *ir_gen_continue(IrBuilderSrc *irb, Scope *continue_scope, AstN
if (node->data.continue_expr.name == nullptr ||
(this_loop_scope->name != nullptr && buf_eql_buf(node->data.continue_expr.name, this_loop_scope->name)))
{
+ this_loop_scope->name_used = true;
loop_scope = this_loop_scope;
break;
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 6b231a323d..4adc538602 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -2,6 +2,29 @@ const tests = @import("tests.zig");
const std = @import("std");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.addTest("duplicate/unused labels",
+ \\comptime {
+ \\ blk: { blk: while (false) {} }
+ \\ blk: while (false) { blk: for (@as([0]void, undefined)) |_| {} }
+ \\ blk: for (@as([0]void, undefined)) |_| { blk: {} }
+ \\}
+ \\comptime {
+ \\ blk: {}
+ \\ blk: while(false) {}
+ \\ blk: for(@as([0]void, undefined)) |_| {}
+ \\}
+ , &[_][]const u8{
+ "tmp.zig:2:17: error: redeclaration of label 'blk'",
+ "tmp.zig:2:10: note: previous declaration is here",
+ "tmp.zig:3:31: error: redeclaration of label 'blk'",
+ "tmp.zig:3:10: note: previous declaration is here",
+ "tmp.zig:4:51: error: redeclaration of label 'blk'",
+ "tmp.zig:4:10: note: previous declaration is here",
+ "tmp.zig:7:10: error: unused block label",
+ "tmp.zig:8:10: error: unused while label",
+ "tmp.zig:9:10: error: unused for label",
+ });
+
cases.addTest("@alignCast of zero sized types",
\\export fn foo() void {
\\ const a: *void = undefined;
From de4f3f11f735708cf9ffe4bbdbbfa693b6b07916 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 12 Aug 2020 21:13:07 -0700
Subject: [PATCH 053/153] stage2: astgen for while loops
See #6021
---
src-self-hosted/astgen.zig | 176 +++++++++++++++++++++++++++++++----
src-self-hosted/codegen.zig | 7 ++
src-self-hosted/ir.zig | 19 ++++
src-self-hosted/link.zig | 3 -
src-self-hosted/zir.zig | 75 +++++++++++++++
src-self-hosted/zir_sema.zig | 10 ++
6 files changed, 269 insertions(+), 21 deletions(-)
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 029e7d4b8c..1827d53043 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -48,20 +48,21 @@ pub fn typeExpr(mod: *Module, scope: *Scope, type_node: *ast.Node) InnerError!*z
pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerError!*zir.Inst {
switch (node.tag) {
.VarDecl => unreachable, // Handled in `blockExpr`.
- .Assign => unreachable, // Handled in `blockExpr`.
- .AssignBitAnd => unreachable, // Handled in `blockExpr`.
- .AssignBitOr => unreachable, // Handled in `blockExpr`.
- .AssignBitShiftLeft => unreachable, // Handled in `blockExpr`.
- .AssignBitShiftRight => unreachable, // Handled in `blockExpr`.
- .AssignBitXor => unreachable, // Handled in `blockExpr`.
- .AssignDiv => unreachable, // Handled in `blockExpr`.
- .AssignSub => unreachable, // Handled in `blockExpr`.
- .AssignSubWrap => unreachable, // Handled in `blockExpr`.
- .AssignMod => unreachable, // Handled in `blockExpr`.
- .AssignAdd => unreachable, // Handled in `blockExpr`.
- .AssignAddWrap => unreachable, // Handled in `blockExpr`.
- .AssignMul => unreachable, // Handled in `blockExpr`.
- .AssignMulWrap => unreachable, // Handled in `blockExpr`.
+
+ .Assign => return rlWrapVoid(mod, scope, rl, node, try assign(mod, scope, node.castTag(.Assign).?)),
+ .AssignBitAnd => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitAnd).?, .bitand)),
+ .AssignBitOr => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitOr).?, .bitor)),
+ .AssignBitShiftLeft => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitShiftLeft).?, .shl)),
+ .AssignBitShiftRight => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitShiftRight).?, .shr)),
+ .AssignBitXor => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitXor).?, .xor)),
+ .AssignDiv => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignDiv).?, .div)),
+ .AssignSub => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignSub).?, .sub)),
+ .AssignSubWrap => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignSubWrap).?, .subwrap)),
+ .AssignMod => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignMod).?, .mod_rem)),
+ .AssignAdd => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignAdd).?, .add)),
+ .AssignAddWrap => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignAddWrap).?, .addwrap)),
+ .AssignMul => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignMul).?, .mul)),
+ .AssignMulWrap => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignMulWrap).?, .mulwrap)),
.Add => return simpleBinOp(mod, scope, rl, node.castTag(.Add).?, .add),
.AddWrap => return simpleBinOp(mod, scope, rl, node.castTag(.AddWrap).?, .addwrap),
@@ -96,6 +97,7 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.Unreachable => return unreach(mod, scope, node.castTag(.Unreachable).?),
.Return => return ret(mod, scope, node.castTag(.Return).?),
.If => return ifExpr(mod, scope, rl, node.castTag(.If).?),
+ .While => return whileExpr(mod, scope, rl, node.castTag(.While).?),
.Period => return rlWrap(mod, scope, rl, try field(mod, scope, node.castTag(.Period).?)),
.Deref => return rlWrap(mod, scope, rl, try deref(mod, scope, node.castTag(.Deref).?)),
.BoolNot => return rlWrap(mod, scope, rl, try boolNot(mod, scope, node.castTag(.BoolNot).?)),
@@ -127,10 +129,7 @@ pub fn blockExpr(mod: *Module, parent_scope: *Scope, block_node: *ast.Node.Block
const var_decl_node = statement.castTag(.VarDecl).?;
scope = try varDecl(mod, scope, var_decl_node, &block_arena.allocator);
},
- .Assign => {
- const ass = statement.castTag(.Assign).?;
- try assign(mod, scope, ass);
- },
+ .Assign => try assign(mod, scope, statement.castTag(.Assign).?),
.AssignBitAnd => try assignOp(mod, scope, statement.castTag(.AssignBitAnd).?, .bitand),
.AssignBitOr => try assignOp(mod, scope, statement.castTag(.AssignBitOr).?, .bitor),
.AssignBitShiftLeft => try assignOp(mod, scope, statement.castTag(.AssignBitShiftLeft).?, .shl),
@@ -454,6 +453,132 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
return &block.base;
}
+fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.While) InnerError!*zir.Inst {
+ if (while_node.payload) |payload| {
+ return mod.failNode(scope, payload, "TODO implement astgen.whileExpr for optionals", .{});
+ }
+ if (while_node.@"else") |else_node| {
+ if (else_node.payload) |payload| {
+ return mod.failNode(scope, payload, "TODO implement astgen.whileExpr for error unions", .{});
+ }
+ }
+
+ var expr_scope: Scope.GenZIR = .{
+ .parent = scope,
+ .decl = scope.decl().?,
+ .arena = scope.arena(),
+ .instructions = .{},
+ };
+ defer expr_scope.instructions.deinit(mod.gpa);
+
+ var loop_scope: Scope.GenZIR = .{
+ .parent = &expr_scope.base,
+ .decl = expr_scope.decl,
+ .arena = expr_scope.arena,
+ .instructions = .{},
+ };
+ defer loop_scope.instructions.deinit(mod.gpa);
+
+ var continue_scope: Scope.GenZIR = .{
+ .parent = &loop_scope.base,
+ .decl = loop_scope.decl,
+ .arena = loop_scope.arena,
+ .instructions = .{},
+ };
+ defer continue_scope.instructions.deinit(mod.gpa);
+
+ const tree = scope.tree();
+ const while_src = tree.token_locs[while_node.while_token].start;
+ const bool_type = try addZIRInstConst(mod, scope, while_src, .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.bool_type),
+ });
+ const void_type = try addZIRInstConst(mod, scope, while_src, .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.void_type),
+ });
+ const cond = try expr(mod, &continue_scope.base, .{ .ty = bool_type }, while_node.condition);
+
+ const condbr = try addZIRInstSpecial(mod, &continue_scope.base, while_src, zir.Inst.CondBr, .{
+ .condition = cond,
+ .then_body = undefined, // populated below
+ .else_body = undefined, // populated below
+ }, .{});
+ const cond_block = try addZIRInstBlock(mod, &loop_scope.base, while_src, .{
+ .instructions = try loop_scope.arena.dupe(*zir.Inst, continue_scope.instructions.items),
+ });
+ if (while_node.continue_expr) |cont_expr| {
+ const cont_expr_result = try expr(mod, &loop_scope.base, .{ .ty = void_type }, cont_expr);
+ if (!cont_expr_result.tag.isNoReturn()) {
+ _ = try addZIRNoOp(mod, &loop_scope.base, while_src, .repeat);
+ }
+ } else {
+ _ = try addZIRNoOp(mod, &loop_scope.base, while_src, .repeat);
+ }
+ const loop = try addZIRInstLoop(mod, &expr_scope.base, while_src, .{
+ .instructions = try expr_scope.arena.dupe(*zir.Inst, loop_scope.instructions.items),
+ });
+ const while_block = try addZIRInstBlock(mod, scope, while_src, .{
+ .instructions = try expr_scope.arena.dupe(*zir.Inst, expr_scope.instructions.items),
+ });
+ var then_scope: Scope.GenZIR = .{
+ .parent = &continue_scope.base,
+ .decl = continue_scope.decl,
+ .arena = continue_scope.arena,
+ .instructions = .{},
+ };
+ defer then_scope.instructions.deinit(mod.gpa);
+
+ // Most result location types can be forwarded directly; however
+ // if we need to write to a pointer which has an inferred type,
+ // proper type inference requires peer type resolution on the while's
+ // branches.
+ const branch_rl: ResultLoc = switch (rl) {
+ .discard, .none, .ty, .ptr, .lvalue => rl,
+ .inferred_ptr, .bitcasted_ptr, .block_ptr => .{ .block_ptr = while_block },
+ };
+
+ const then_result = try expr(mod, &then_scope.base, branch_rl, while_node.body);
+ if (!then_result.tag.isNoReturn()) {
+ const then_src = tree.token_locs[while_node.body.lastToken()].start;
+ _ = try addZIRInst(mod, &then_scope.base, then_src, zir.Inst.Break, .{
+ .block = cond_block,
+ .operand = then_result,
+ }, .{});
+ }
+ condbr.positionals.then_body = .{
+ .instructions = try then_scope.arena.dupe(*zir.Inst, then_scope.instructions.items),
+ };
+
+ var else_scope: Scope.GenZIR = .{
+ .parent = &continue_scope.base,
+ .decl = continue_scope.decl,
+ .arena = continue_scope.arena,
+ .instructions = .{},
+ };
+ defer else_scope.instructions.deinit(mod.gpa);
+
+ if (while_node.@"else") |else_node| {
+ const else_result = try expr(mod, &else_scope.base, branch_rl, else_node.body);
+ if (!else_result.tag.isNoReturn()) {
+ const else_src = tree.token_locs[else_node.body.lastToken()].start;
+ _ = try addZIRInst(mod, &else_scope.base, else_src, zir.Inst.Break, .{
+ .block = while_block,
+ .operand = else_result,
+ }, .{});
+ }
+ } else {
+ const else_src = tree.token_locs[while_node.lastToken()].start;
+ _ = try addZIRInst(mod, &else_scope.base, else_src, zir.Inst.BreakVoid, .{
+ .block = while_block,
+ }, .{});
+ }
+ condbr.positionals.else_body = .{
+ .instructions = try else_scope.arena.dupe(*zir.Inst, else_scope.instructions.items),
+ };
+ return &while_block.base;
+}
+
fn ret(mod: *Module, scope: *Scope, cfe: *ast.Node.ControlFlowExpression) InnerError!*zir.Inst {
const tree = scope.tree();
const src = tree.token_locs[cfe.ltoken].start;
@@ -1094,6 +1219,15 @@ fn rlWrap(mod: *Module, scope: *Scope, rl: ResultLoc, result: *zir.Inst) InnerEr
}
}
+fn rlWrapVoid(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node, result: void) InnerError!*zir.Inst {
+ const src = scope.tree().token_locs[node.firstToken()].start;
+ const void_inst = try addZIRInstConst(mod, scope, src, .{
+ .ty = Type.initTag(.void),
+ .val = Value.initTag(.void_value),
+ });
+ return rlWrap(mod, scope, rl, void_inst);
+}
+
pub fn addZIRInstSpecial(
mod: *Module,
scope: *Scope,
@@ -1211,3 +1345,9 @@ pub fn addZIRInstBlock(mod: *Module, scope: *Scope, src: usize, body: zir.Module
const P = std.meta.fieldInfo(zir.Inst.Block, "positionals").field_type;
return addZIRInstSpecial(mod, scope, src, zir.Inst.Block, P{ .body = body }, .{});
}
+
+/// TODO The existence of this function is a workaround for a bug in stage1.
+pub fn addZIRInstLoop(mod: *Module, scope: *Scope, src: usize, body: zir.Module.Body) !*zir.Inst.Loop {
+ const P = std.meta.fieldInfo(zir.Inst.Loop, "positionals").field_type;
+ return addZIRInstSpecial(mod, scope, src, zir.Inst.Loop, P{ .body = body }, .{});
+}
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index a3c95349e1..6e8ab34478 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -23,6 +23,8 @@ pub const BlockData = struct {
relocs: std.ArrayListUnmanaged(Reloc) = .{},
};
+pub const LoopData = struct { };
+
pub const Reloc = union(enum) {
/// The value is an offset into the `Function` `code` from the beginning.
/// To perform the reloc, write 32-bit signed little-endian integer
@@ -657,6 +659,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.isnonnull => return self.genIsNonNull(inst.castTag(.isnonnull).?),
.isnull => return self.genIsNull(inst.castTag(.isnull).?),
.load => return self.genLoad(inst.castTag(.load).?),
+ .loop => return self.genLoop(inst.castTag(.loop).?),
.not => return self.genNot(inst.castTag(.not).?),
.ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?),
.ref => return self.genRef(inst.castTag(.ref).?),
@@ -1346,6 +1349,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
+ fn genLoop(self: *Self, inst: *ir.Inst.Loop) !MCValue {
+ return self.fail(inst.base.src, "TODO codegen loop", .{});
+ }
+
fn genBlock(self: *Self, inst: *ir.Inst.Block) !MCValue {
if (inst.base.ty.hasCodeGenBits()) {
return self.fail(inst.base.src, "TODO codegen Block with non-void type", .{});
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 176fd7e303..f4262592de 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -70,6 +70,7 @@ pub const Inst = struct {
isnull,
/// Read a value from a pointer.
load,
+ loop,
ptrtoint,
ref,
ret,
@@ -122,6 +123,7 @@ pub const Inst = struct {
.call => Call,
.condbr => CondBr,
.constant => Constant,
+ .loop => Loop,
};
}
@@ -401,6 +403,23 @@ pub const Inst = struct {
return null;
}
};
+
+ pub const Loop = struct {
+ pub const base_tag = Tag.loop;
+
+ base: Inst,
+ body: Body,
+ /// This memory is reserved for codegen code to do whatever it needs to here.
+ codegen: codegen.LoopData = .{},
+
+ pub fn operandCount(self: *const Loop) usize {
+ return 0;
+ }
+ pub fn getOperand(self: *const Loop, index: usize) ?*Inst {
+ return null;
+ }
+ };
+
};
pub const Body = struct {
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index d05c676a38..431d510b01 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -1484,9 +1484,6 @@ pub const File = struct {
assert(!self.shdr_table_dirty);
assert(!self.shstrtab_dirty);
assert(!self.debug_strtab_dirty);
- assert(!self.offset_table_count_dirty);
- const syms_sect = &self.sections.items[self.symtab_section_index.?];
- assert(syms_sect.sh_info == self.local_symbols.items.len);
}
fn writeDwarfAddrAssumeCapacity(self: *Elf, buf: *std.ArrayList(u8), addr: u64) void {
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index 29043af840..ba0c05d587 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -151,6 +151,8 @@ pub const Inst = struct {
isnonnull,
/// Return a boolean true if an optional is null. `x == null`
isnull,
+ /// A labeled block of code that loops forever.
+ loop,
/// Ambiguously remainder division or modulus. If the computation would possibly have
/// a different value depending on whether the operation is remainder division or modulus,
/// a compile error is emitted. Otherwise the computation is performed.
@@ -173,6 +175,8 @@ pub const Inst = struct {
/// the memory location is in the stack frame, local to the scope containing the
/// instruction.
ref,
+ /// Sends control flow back to the loop block operand.
+ repeat,
/// Obtains a pointer to the return value.
ret_ptr,
/// Obtains the return type of the in-scope function.
@@ -279,7 +283,9 @@ pub const Inst = struct {
.declval_in_module => DeclValInModule,
.coerce_result_block_ptr => CoerceResultBlockPtr,
.compileerror => CompileError,
+ .loop => Loop,
.@"const" => Const,
+ .repeat => Repeat,
.str => Str,
.int => Int,
.inttype => IntType,
@@ -372,10 +378,12 @@ pub const Inst = struct {
.breakvoid,
.condbr,
.compileerror,
+ .repeat,
.@"return",
.returnvoid,
.unreach_nocheck,
.@"unreachable",
+ .loop,
=> true,
};
}
@@ -567,6 +575,16 @@ pub const Inst = struct {
kw_args: struct {},
};
+ pub const Repeat = struct {
+ pub const base_tag = Tag.repeat;
+ base: Inst,
+
+ positionals: struct {
+ loop: *Loop,
+ },
+ kw_args: struct {},
+ };
+
pub const Str = struct {
pub const base_tag = Tag.str;
base: Inst,
@@ -587,6 +605,16 @@ pub const Inst = struct {
kw_args: struct {},
};
+ pub const Loop = struct {
+ pub const base_tag = Tag.loop;
+ base: Inst,
+
+ positionals: struct {
+ body: Module.Body,
+ },
+ kw_args: struct {},
+ };
+
pub const FieldPtr = struct {
pub const base_tag = Tag.fieldptr;
base: Inst,
@@ -848,12 +876,14 @@ pub const Module = struct {
.module = &self,
.inst_table = InstPtrTable.init(allocator),
.block_table = std.AutoHashMap(*Inst.Block, []const u8).init(allocator),
+ .loop_table = std.AutoHashMap(*Inst.Loop, []const u8).init(allocator),
.arena = std.heap.ArenaAllocator.init(allocator),
.indent = 2,
};
defer write.arena.deinit();
defer write.inst_table.deinit();
defer write.block_table.deinit();
+ defer write.loop_table.deinit();
// First, build a map of *Inst to @ or % indexes
try write.inst_table.ensureCapacity(self.decls.len);
@@ -882,6 +912,7 @@ const Writer = struct {
module: *const Module,
inst_table: InstPtrTable,
block_table: std.AutoHashMap(*Inst.Block, []const u8),
+ loop_table: std.AutoHashMap(*Inst.Loop, []const u8),
arena: std.heap.ArenaAllocator,
indent: usize,
@@ -962,6 +993,9 @@ const Writer = struct {
if (inst.cast(Inst.Block)) |block| {
const name = try std.fmt.allocPrint(&self.arena.allocator, "label_{}", .{i});
try self.block_table.put(block, name);
+ } else if (inst.cast(Inst.Loop)) |loop| {
+ const name = try std.fmt.allocPrint(&self.arena.allocator, "loop_{}", .{i});
+ try self.loop_table.put(loop, name);
}
self.indent += 2;
try self.writeInstToStream(stream, inst);
@@ -980,6 +1014,10 @@ const Writer = struct {
const name = self.block_table.get(param).?;
return std.zig.renderStringLiteral(name, stream);
},
+ *Inst.Loop => {
+ const name = self.loop_table.get(param).?;
+ return std.zig.renderStringLiteral(name, stream);
+ },
else => |T| @compileError("unimplemented: rendering parameter of type " ++ @typeName(T)),
}
}
@@ -1016,8 +1054,10 @@ pub fn parse(allocator: *Allocator, source: [:0]const u8) Allocator.Error!Module
.decls = .{},
.unnamed_index = 0,
.block_table = std.StringHashMap(*Inst.Block).init(allocator),
+ .loop_table = std.StringHashMap(*Inst.Loop).init(allocator),
};
defer parser.block_table.deinit();
+ defer parser.loop_table.deinit();
errdefer parser.arena.deinit();
parser.parseRoot() catch |err| switch (err) {
@@ -1044,6 +1084,7 @@ const Parser = struct {
error_msg: ?ErrorMsg = null,
unnamed_index: usize,
block_table: std.StringHashMap(*Inst.Block),
+ loop_table: std.StringHashMap(*Inst.Loop),
const Body = struct {
instructions: std.ArrayList(*Inst),
@@ -1255,6 +1296,8 @@ const Parser = struct {
if (InstType == Inst.Block) {
try self.block_table.put(inst_name, inst_specific);
+ } else if (InstType == Inst.Loop) {
+ try self.loop_table.put(inst_name, inst_specific);
}
if (@hasField(InstType, "ty")) {
@@ -1366,6 +1409,10 @@ const Parser = struct {
const name = try self.parseStringLiteral();
return self.block_table.get(name).?;
},
+ *Inst.Loop => {
+ const name = try self.parseStringLiteral();
+ return self.loop_table.get(name).?;
+ },
else => @compileError("Unimplemented: ir parseParameterGeneric for type " ++ @typeName(T)),
}
return self.fail("TODO parse parameter {}", .{@typeName(T)});
@@ -1431,8 +1478,10 @@ pub fn emit(allocator: *Allocator, old_module: IrModule) !Module {
.primitive_table = std.AutoHashMap(Inst.Primitive.Builtin, *Decl).init(allocator),
.indent = 0,
.block_table = std.AutoHashMap(*ir.Inst.Block, *Inst.Block).init(allocator),
+ .loop_table = std.AutoHashMap(*ir.Inst.Loop, *Inst.Loop).init(allocator),
};
defer ctx.block_table.deinit();
+ defer ctx.loop_table.deinit();
defer ctx.decls.deinit(allocator);
defer ctx.names.deinit();
defer ctx.primitive_table.deinit();
@@ -1456,6 +1505,7 @@ const EmitZIR = struct {
primitive_table: std.AutoHashMap(Inst.Primitive.Builtin, *Decl),
indent: usize,
block_table: std.AutoHashMap(*ir.Inst.Block, *Inst.Block),
+ loop_table: std.AutoHashMap(*ir.Inst.Loop, *Inst.Loop),
fn emit(self: *EmitZIR) !void {
// Put all the Decls in a list and sort them by name to avoid nondeterminism introduced
@@ -1936,6 +1986,31 @@ const EmitZIR = struct {
break :blk &new_inst.base;
},
+ .loop => blk: {
+ const old_inst = inst.castTag(.loop).?;
+ const new_inst = try self.arena.allocator.create(Inst.Loop);
+
+ try self.loop_table.put(old_inst, new_inst);
+
+ var loop_body = std.ArrayList(*Inst).init(self.allocator);
+ defer loop_body.deinit();
+
+ try self.emitBody(old_inst.body, inst_table, &loop_body);
+
+ new_inst.* = .{
+ .base = .{
+ .src = inst.src,
+ .tag = Inst.Loop.base_tag,
+ },
+ .positionals = .{
+ .body = .{ .instructions = loop_body.toOwnedSlice() },
+ },
+ .kw_args = .{},
+ };
+
+ break :blk &new_inst.base;
+ },
+
.brvoid => blk: {
const old_inst = inst.cast(ir.Inst.BrVoid).?;
const new_block = self.block_table.get(old_inst.block).?;
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index 2b2739a308..e36487e8d5 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -60,6 +60,8 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
return mod.constIntBig(scope, old_inst.src, Type.initTag(.comptime_int), big_int);
},
.inttype => return analyzeInstIntType(mod, scope, old_inst.castTag(.inttype).?),
+ .loop => return analyzeInstLoop(mod, scope, old_inst.castTag(.loop).?),
+ .repeat => return analyzeInstRepeat(mod, scope, old_inst.castTag(.repeat).?),
.param_type => return analyzeInstParamType(mod, scope, old_inst.castTag(.param_type).?),
.ptrtoint => return analyzeInstPtrToInt(mod, scope, old_inst.castTag(.ptrtoint).?),
.fieldptr => return analyzeInstFieldPtr(mod, scope, old_inst.castTag(.fieldptr).?),
@@ -424,6 +426,14 @@ fn analyzeInstArg(mod: *Module, scope: *Scope, inst: *zir.Inst.Arg) InnerError!*
return mod.addArg(b, inst.base.src, param_type, name);
}
+fn analyzeInstRepeat(mod: *Module, scope: *Scope, inst: *zir.Inst.Repeat) InnerError!*Inst {
+ return mod.fail(scope, inst.base.src, "TODO analyze .repeat ZIR", .{});
+}
+
+fn analyzeInstLoop(mod: *Module, scope: *Scope, inst: *zir.Inst.Loop) InnerError!*Inst {
+ return mod.fail(scope, inst.base.src, "TODO analyze .loop ZIR", .{});
+}
+
fn analyzeInstBlock(mod: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerError!*Inst {
const parent_block = scope.cast(Scope.Block).?;
From b59e2c1e009e8e2df2de02d61d8f4ef463a652be Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Sun, 9 Aug 2020 15:41:43 -0400
Subject: [PATCH 054/153] CBE: improve noreturn definition, support noreturn on
MSVC
---
src-self-hosted/cbe.h | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/src-self-hosted/cbe.h b/src-self-hosted/cbe.h
index 66e7b8bd3e..7d0e48eaae 100644
--- a/src-self-hosted/cbe.h
+++ b/src-self-hosted/cbe.h
@@ -1,7 +1,9 @@
#if __STDC_VERSION__ >= 201112L
#define noreturn _Noreturn
-#elif __GNUC__ && !__STRICT_ANSI__
+#elif __GNUC__
#define noreturn __attribute__ ((noreturn))
+#elif _MSC_VER
+#define noreturn __declspec(noreturn)
#else
#define noreturn
#endif
From dd1f1487e436650f93e05648b5ba879d6832c2a7 Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Sun, 9 Aug 2020 17:21:40 -0400
Subject: [PATCH 055/153] CBE: Use zig_noreturn instead of noreturn to avoid
namespace conflict
---
src-self-hosted/cbe.h | 8 ++++----
src-self-hosted/codegen/c.zig | 3 +--
src-self-hosted/link.zig | 1 -
test/stage2/cbe.zig | 17 ++++++++---------
4 files changed, 13 insertions(+), 16 deletions(-)
diff --git a/src-self-hosted/cbe.h b/src-self-hosted/cbe.h
index 7d0e48eaae..691aa45725 100644
--- a/src-self-hosted/cbe.h
+++ b/src-self-hosted/cbe.h
@@ -1,10 +1,10 @@
#if __STDC_VERSION__ >= 201112L
-#define noreturn _Noreturn
+#define zig_noreturn _Noreturn
#elif __GNUC__
-#define noreturn __attribute__ ((noreturn))
+#define zig_noreturn __attribute__ ((noreturn))
#elif _MSC_VER
-#define noreturn __declspec(noreturn)
+#define zig_noreturn __declspec(noreturn)
#else
-#define noreturn
+#define zig_noreturn
#endif
diff --git a/src-self-hosted/codegen/c.zig b/src-self-hosted/codegen/c.zig
index db9d9a1030..39110d03d7 100644
--- a/src-self-hosted/codegen/c.zig
+++ b/src-self-hosted/codegen/c.zig
@@ -24,8 +24,7 @@ fn renderType(file: *C, writer: std.ArrayList(u8).Writer, T: Type, src: usize) !
} else {
switch (T.zigTypeTag()) {
.NoReturn => {
- file.need_noreturn = true;
- try writer.writeAll("noreturn void");
+ try writer.writeAll("zig_noreturn void");
},
.Void => try writer.writeAll("void"),
.Int => {
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 431d510b01..28340a4024 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -202,7 +202,6 @@ pub const File = struct {
called: std.StringHashMap(void),
need_stddef: bool = false,
need_stdint: bool = false,
- need_noreturn: bool = false,
error_msg: *Module.ErrorMsg = undefined,
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: Options) !*File {
diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig
index 2c3f0631c8..0529c07e2a 100644
--- a/test/stage2/cbe.zig
+++ b/test/stage2/cbe.zig
@@ -12,7 +12,7 @@ pub fn addCases(ctx: *TestContext) !void {
ctx.c("empty start function", linux_x64,
\\export fn _start() noreturn {}
,
- \\noreturn void _start(void) {}
+ \\zig_noreturn void _start(void) {}
\\
);
ctx.c("less empty start function", linux_x64,
@@ -22,19 +22,19 @@ pub fn addCases(ctx: *TestContext) !void {
\\ main();
\\}
,
- \\noreturn void main(void);
+ \\zig_noreturn void main(void);
\\
- \\noreturn void _start(void) {
+ \\zig_noreturn void _start(void) {
\\ main();
\\}
\\
- \\noreturn void main(void) {}
+ \\zig_noreturn void main(void) {}
\\
);
// TODO: implement return values
// TODO: figure out a way to prevent asm constants from being generated
ctx.c("inline asm", linux_x64,
- \\fn exitGood() void {
+ \\fn exitGood() noreturn {
\\ asm volatile ("syscall"
\\ :
\\ : [number] "{rax}" (231),
@@ -48,21 +48,20 @@ pub fn addCases(ctx: *TestContext) !void {
,
\\#include
\\
- \\void exitGood(void);
+ \\zig_noreturn void exitGood(void);
\\
\\const char *const exitGood__anon_0 = "{rax}";
\\const char *const exitGood__anon_1 = "{rdi}";
\\const char *const exitGood__anon_2 = "syscall";
\\
- \\noreturn void _start(void) {
+ \\zig_noreturn void _start(void) {
\\ exitGood();
\\}
\\
- \\void exitGood(void) {
+ \\zig_noreturn void exitGood(void) {
\\ register size_t rax_constant __asm__("rax") = 231;
\\ register size_t rdi_constant __asm__("rdi") = 0;
\\ __asm volatile ("syscall" :: ""(rax_constant), ""(rdi_constant));
- \\ return;
\\}
\\
);
From 4d778e630a1902e59e2c0f92674d7f9bda026cf6 Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Sun, 9 Aug 2020 17:53:56 -0400
Subject: [PATCH 056/153] Tests: print generated C on test failure
---
src-self-hosted/test.zig | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig
index 7d4cc7d563..265c26b2dc 100644
--- a/src-self-hosted/test.zig
+++ b/src-self-hosted/test.zig
@@ -478,6 +478,10 @@ pub const TestContext = struct {
for (all_errors.list) |err| {
std.debug.warn(":{}:{}: error: {}\n================\n", .{ err.line + 1, err.column + 1, err.msg });
}
+ if (case.cbe) {
+ const C = module.bin_file.cast(link.File.C).?;
+ std.debug.warn("Generated C: \n===============\n{}\n\n===========\n\n", .{C.main.items});
+ }
std.debug.warn("Test failed.\n", .{});
std.process.exit(1);
}
From d3eec7d46b1dea6c99dd4beea370a3629843c111 Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Sun, 9 Aug 2020 18:45:34 -0400
Subject: [PATCH 057/153] CBE: working parameters
---
src-self-hosted/codegen/c.zig | 82 ++++++++++++++++++++++++++---------
test/stage2/cbe.zig | 34 +++++++++++++++
2 files changed, 96 insertions(+), 20 deletions(-)
diff --git a/src-self-hosted/codegen/c.zig b/src-self-hosted/codegen/c.zig
index 39110d03d7..ba68a3a5a4 100644
--- a/src-self-hosted/codegen/c.zig
+++ b/src-self-hosted/codegen/c.zig
@@ -40,16 +40,33 @@ fn renderType(file: *C, writer: std.ArrayList(u8).Writer, T: Type, src: usize) !
}
}
+fn renderValue(file: *C, writer: std.ArrayList(u8).Writer, val: Value, src: usize) !void {
+ switch (val.tag()) {
+ .int_u64 => return writer.print("{}", .{val.toUnsignedInt()}),
+ else => |e| return file.fail(src, "TODO implement value {}", .{e}),
+ }
+}
+
fn renderFunctionSignature(file: *C, writer: std.ArrayList(u8).Writer, decl: *Decl) !void {
const tv = decl.typed_value.most_recent.typed_value;
try renderType(file, writer, tv.ty.fnReturnType(), decl.src());
const name = try map(file.base.allocator, mem.spanZ(decl.name));
defer file.base.allocator.free(name);
try writer.print(" {}(", .{name});
- if (tv.ty.fnParamLen() == 0)
- try writer.writeAll("void)")
- else
- return file.fail(decl.src(), "TODO implement parameters", .{});
+ var param_len = tv.ty.fnParamLen();
+ if (param_len == 0)
+ try writer.writeAll("void")
+ else {
+ var index: usize = 0;
+ while (index < param_len) : (index += 1) {
+ if (index > 0) {
+ try writer.writeAll(", ");
+ }
+ try renderType(file, writer, tv.ty.fnParamType(index), decl.src());
+ try writer.print(" arg{}", .{index});
+ }
+ }
+ try writer.writeByte(')');
}
pub fn generate(file: *C, decl: *Decl) !void {
@@ -87,15 +104,19 @@ fn genFn(file: *C, decl: *Decl) !void {
const func: *Module.Fn = tv.val.cast(Value.Payload.Function).?.func;
const instructions = func.analysis.success.instructions;
+ var argdex: usize = 0;
if (instructions.len > 0) {
try writer.writeAll("\n");
for (instructions) |inst| {
switch (inst.tag) {
- .assembly => try genAsm(file, inst.castTag(.assembly).?, decl),
+ .assembly => try genAsm(file, inst.castTag(.assembly).?, decl, &argdex),
.call => try genCall(file, inst.castTag(.call).?, decl),
.ret => try genRet(file, inst.castTag(.ret).?, decl, tv.ty.fnReturnType()),
.retvoid => try file.main.writer().print(" return;\n", .{}),
+ .arg => {},
.dbg_stmt => try genDbgStmt(file, inst.castTag(.dbg_stmt).?, decl),
+ .breakpoint => try genBreak(file, inst.castTag(.breakpoint).?, decl),
+ .unreach => try genUnreach(file, inst.castTag(.unreach).?, decl),
else => |e| return file.fail(decl.src(), "TODO implement C codegen for {}", .{e}),
}
}
@@ -126,13 +147,23 @@ fn genCall(file: *C, inst: *Inst.Call, decl: *Decl) !void {
try renderFunctionSignature(file, header, target);
try header.writeAll(";\n");
}
- try writer.print("{}();\n", .{tname});
+ try writer.print("{}(", .{tname});
+ if (inst.args.len != 0) {
+ for (inst.args) |arg, i| {
+ if (i > 0) {
+ try writer.writeAll(", ");
+ }
+ if (arg.cast(Inst.Constant)) |con| {
+ try renderValue(file, writer, con.val, decl.src());
+ } else {
+ return file.fail(decl.src(), "TODO call pass arg {}", .{arg});
+ }
+ }
+ }
+ try writer.writeAll(");\n");
} else {
return file.fail(decl.src(), "TODO non-function call target?", .{});
}
- if (inst.args.len != 0) {
- return file.fail(decl.src(), "TODO function arguments", .{});
- }
} else {
return file.fail(decl.src(), "TODO non-constant call inst?", .{});
}
@@ -142,24 +173,33 @@ fn genDbgStmt(file: *C, inst: *Inst.NoOp, decl: *Decl) !void {
// TODO emit #line directive here with line number and filename
}
-fn genAsm(file: *C, as: *Inst.Assembly, decl: *Decl) !void {
+fn genBreak(file: *C, inst: *Inst.NoOp, decl: *Decl) !void {
+ // TODO ??
+}
+
+fn genUnreach(file: *C, inst: *Inst.NoOp, decl: *Decl) !void {
+ // TODO ??
+}
+
+fn genAsm(file: *C, as: *Inst.Assembly, decl: *Decl, argdex: *usize) !void {
const writer = file.main.writer();
try writer.writeAll(" ");
for (as.inputs) |i, index| {
if (i[0] == '{' and i[i.len - 1] == '}') {
const reg = i[1 .. i.len - 1];
const arg = as.args[index];
+ try writer.writeAll("register ");
+ try renderType(file, writer, arg.ty, decl.src());
+ try writer.print(" {}_constant __asm__(\"{}\") = ", .{ reg, reg });
if (arg.castTag(.constant)) |c| {
- if (c.val.tag() == .int_u64) {
- try writer.writeAll("register ");
- try renderType(file, writer, arg.ty, decl.src());
- try writer.print(" {}_constant __asm__(\"{}\") = {};\n ", .{ reg, reg, c.val.toUnsignedInt() });
- } else {
- return file.fail(decl.src(), "TODO inline asm {} args", .{c.val.tag()});
- }
+ try renderValue(file, writer, c.val, decl.src());
+ } else if (arg.castTag(.arg)) |inst| {
+ try writer.print("arg{}", .{argdex.*});
+ argdex.* += 1;
} else {
- return file.fail(decl.src(), "TODO non-constant inline asm args", .{});
+ return file.fail(decl.src(), "TODO non-constant inline asm args ({})", .{arg.tag});
}
+ try writer.writeAll(";\n ");
} else {
return file.fail(decl.src(), "TODO non-explicit inline asm regs", .{});
}
@@ -180,12 +220,14 @@ fn genAsm(file: *C, as: *Inst.Assembly, decl: *Decl) !void {
if (index > 0) {
try writer.writeAll(", ");
}
- if (arg.castTag(.constant)) |c| {
- try writer.print("\"\"({}_constant)", .{reg});
+ try writer.writeAll("\"\"(");
+ if (arg.tag == .constant or arg.tag == .arg) {
+ try writer.print("{}_constant", .{reg});
} else {
// This is blocked by the earlier test
unreachable;
}
+ try writer.writeByte(')');
} else {
// This is blocked by the earlier test
unreachable;
diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig
index 0529c07e2a..36c4514d49 100644
--- a/test/stage2/cbe.zig
+++ b/test/stage2/cbe.zig
@@ -65,4 +65,38 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
\\
);
+ ctx.c("exit", linux_x64,
+ \\export fn _start() noreturn {
+ \\ exit(0);
+ \\}
+ \\
+ \\fn exit(code: usize) noreturn {
+ \\ asm volatile ("syscall"
+ \\ :
+ \\ : [number] "{rax}" (231),
+ \\ [arg1] "{rdi}" (code)
+ \\ );
+ \\ unreachable;
+ \\}
+ \\
+ ,
+ \\#include
+ \\
+ \\zig_noreturn void exit(size_t arg0);
+ \\
+ \\const char *const exit__anon_0 = "{rax}";
+ \\const char *const exit__anon_1 = "{rdi}";
+ \\const char *const exit__anon_2 = "syscall";
+ \\
+ \\zig_noreturn void _start(void) {
+ \\ exit(0);
+ \\}
+ \\
+ \\zig_noreturn void exit(size_t arg0) {
+ \\ register size_t rax_constant __asm__("rax") = 231;
+ \\ register size_t rdi_constant __asm__("rdi") = arg0;
+ \\ __asm volatile ("syscall" :: ""(rax_constant), ""(rdi_constant));
+ \\}
+ \\
+ );
}
From 78fe86dcd23821b6a40bc5a0067513c452e50265 Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Sun, 9 Aug 2020 18:52:32 -0400
Subject: [PATCH 058/153] CBE: support unreachable on GCC
---
src-self-hosted/cbe.h | 5 +++++
src-self-hosted/codegen/c.zig | 2 +-
test/stage2/cbe.zig | 3 ++-
3 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/src-self-hosted/cbe.h b/src-self-hosted/cbe.h
index 691aa45725..854032227d 100644
--- a/src-self-hosted/cbe.h
+++ b/src-self-hosted/cbe.h
@@ -8,3 +8,8 @@
#define zig_noreturn
#endif
+#if __GNUC__
+#define zig_unreachable() __builtin_unreachable()
+#else
+#define zig_unreachable()
+#endif
diff --git a/src-self-hosted/codegen/c.zig b/src-self-hosted/codegen/c.zig
index ba68a3a5a4..591e47967e 100644
--- a/src-self-hosted/codegen/c.zig
+++ b/src-self-hosted/codegen/c.zig
@@ -178,7 +178,7 @@ fn genBreak(file: *C, inst: *Inst.NoOp, decl: *Decl) !void {
}
fn genUnreach(file: *C, inst: *Inst.NoOp, decl: *Decl) !void {
- // TODO ??
+ try file.main.writer().writeAll(" zig_unreachable();\n");
}
fn genAsm(file: *C, as: *Inst.Assembly, decl: *Decl, argdex: *usize) !void {
diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig
index 36c4514d49..59bc062b74 100644
--- a/test/stage2/cbe.zig
+++ b/test/stage2/cbe.zig
@@ -65,7 +65,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
\\
);
- ctx.c("exit", linux_x64,
+ ctx.c("exit with parameter", linux_x64,
\\export fn _start() noreturn {
\\ exit(0);
\\}
@@ -96,6 +96,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ register size_t rax_constant __asm__("rax") = 231;
\\ register size_t rdi_constant __asm__("rdi") = arg0;
\\ __asm volatile ("syscall" :: ""(rax_constant), ""(rdi_constant));
+ \\ zig_unreachable();
\\}
\\
);
From dbd1e42ef227bf55e9356b2d1a1cbde1fbec0e82 Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Sun, 9 Aug 2020 19:44:16 -0400
Subject: [PATCH 059/153] CBE: Sorta working intcasts?
---
src-self-hosted/codegen/c.zig | 27 ++++++++++++++++++--------
test/stage2/cbe.zig | 36 +++++++++++++++++++++++++++++++++++
2 files changed, 55 insertions(+), 8 deletions(-)
diff --git a/src-self-hosted/codegen/c.zig b/src-self-hosted/codegen/c.zig
index 591e47967e..907bbccda1 100644
--- a/src-self-hosted/codegen/c.zig
+++ b/src-self-hosted/codegen/c.zig
@@ -117,6 +117,8 @@ fn genFn(file: *C, decl: *Decl) !void {
.dbg_stmt => try genDbgStmt(file, inst.castTag(.dbg_stmt).?, decl),
.breakpoint => try genBreak(file, inst.castTag(.breakpoint).?, decl),
.unreach => try genUnreach(file, inst.castTag(.unreach).?, decl),
+ // This will be handled correctly later?
+ .intcast => {},
else => |e| return file.fail(decl.src(), "TODO implement C codegen for {}", .{e}),
}
}
@@ -129,6 +131,20 @@ fn genRet(file: *C, inst: *Inst.UnOp, decl: *Decl, expected_return_type: Type) !
return file.fail(decl.src(), "TODO return {}", .{expected_return_type});
}
+fn genIntCast(file: *C, inst: *Inst.UnOp, decl: *Decl, argdex: *usize) !void {
+ const op = inst.operand;
+ const writer = file.main.writer();
+ try writer.writeByte('(');
+ try renderType(file, writer, inst.base.ty, decl.src());
+ try writer.writeByte(')');
+ if (op.castTag(.arg)) |_| {
+ try writer.print("arg{}", .{argdex.*});
+ argdex.* += 1;
+ } else {
+ return file.fail(decl.src(), "TODO intcast {} to {}", .{ op, inst.base.ty });
+ }
+}
+
fn genCall(file: *C, inst: *Inst.Call, decl: *Decl) !void {
const writer = file.main.writer();
const header = file.header.writer();
@@ -196,6 +212,8 @@ fn genAsm(file: *C, as: *Inst.Assembly, decl: *Decl, argdex: *usize) !void {
} else if (arg.castTag(.arg)) |inst| {
try writer.print("arg{}", .{argdex.*});
argdex.* += 1;
+ } else if (arg.castTag(.intcast)) |inst| {
+ try genIntCast(file, inst, decl, argdex);
} else {
return file.fail(decl.src(), "TODO non-constant inline asm args ({})", .{arg.tag});
}
@@ -220,14 +238,7 @@ fn genAsm(file: *C, as: *Inst.Assembly, decl: *Decl, argdex: *usize) !void {
if (index > 0) {
try writer.writeAll(", ");
}
- try writer.writeAll("\"\"(");
- if (arg.tag == .constant or arg.tag == .arg) {
- try writer.print("{}_constant", .{reg});
- } else {
- // This is blocked by the earlier test
- unreachable;
- }
- try writer.writeByte(')');
+ try writer.print("\"\"({}_constant)", .{reg});
} else {
// This is blocked by the earlier test
unreachable;
diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig
index 59bc062b74..4b35286f94 100644
--- a/test/stage2/cbe.zig
+++ b/test/stage2/cbe.zig
@@ -100,4 +100,40 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
\\
);
+ ctx.c("exit with u8 parameter", linux_x64,
+ \\export fn _start() noreturn {
+ \\ exit(0);
+ \\}
+ \\
+ \\fn exit(code: u8) noreturn {
+ \\ asm volatile ("syscall"
+ \\ :
+ \\ : [number] "{rax}" (231),
+ \\ [arg1] "{rdi}" (code)
+ \\ );
+ \\ unreachable;
+ \\}
+ \\
+ ,
+ \\#include
+ \\#include
+ \\
+ \\zig_noreturn void exit(uint8_t arg0);
+ \\
+ \\const char *const exit__anon_0 = "{rax}";
+ \\const char *const exit__anon_1 = "{rdi}";
+ \\const char *const exit__anon_2 = "syscall";
+ \\
+ \\zig_noreturn void _start(void) {
+ \\ exit(0);
+ \\}
+ \\
+ \\zig_noreturn void exit(uint8_t arg0) {
+ \\ register size_t rax_constant __asm__("rax") = 231;
+ \\ register size_t rdi_constant __asm__("rdi") = (size_t)arg0;
+ \\ __asm volatile ("syscall" :: ""(rax_constant), ""(rdi_constant));
+ \\ zig_unreachable();
+ \\}
+ \\
+ );
}
From 5a166cead80d4d77bffba0b116fdeffb5820f5a4 Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Tue, 11 Aug 2020 10:24:32 -0400
Subject: [PATCH 060/153] CBE: fix handling of IR dependencies
---
src-self-hosted/codegen/c.zig | 151 ++++++++++++++++++++++------------
src-self-hosted/link.zig | 2 +-
test/stage2/cbe.zig | 3 +-
3 files changed, 102 insertions(+), 54 deletions(-)
diff --git a/src-self-hosted/codegen/c.zig b/src-self-hosted/codegen/c.zig
index 907bbccda1..76585a6dfb 100644
--- a/src-self-hosted/codegen/c.zig
+++ b/src-self-hosted/codegen/c.zig
@@ -94,6 +94,28 @@ fn genArray(file: *C, decl: *Decl) !void {
return file.fail(decl.src(), "TODO non-byte arrays", .{});
}
+const Context = struct {
+ file: *C,
+ decl: *Decl,
+ inst_map: std.AutoHashMap(*Inst, []u8),
+ argdex: usize = 0,
+ unnamed_index: usize = 0,
+
+ fn name(self: *Context) ![]u8 {
+ const val = try std.fmt.allocPrint(self.file.base.allocator, "__temp_{}", .{self.unnamed_index});
+ self.unnamed_index += 1;
+ return val;
+ }
+
+ fn deinit(self: *Context) void {
+ for (self.inst_map.items()) |kv| {
+ self.file.base.allocator.free(kv.value);
+ }
+ self.inst_map.deinit();
+ self.* = undefined;
+ }
+};
+
fn genFn(file: *C, decl: *Decl) !void {
const writer = file.main.writer();
const tv = decl.typed_value.most_recent.typed_value;
@@ -102,24 +124,31 @@ fn genFn(file: *C, decl: *Decl) !void {
try writer.writeAll(" {");
+ var ctx = Context{
+ .file = file,
+ .decl = decl,
+ .inst_map = std.AutoHashMap(*Inst, []u8).init(file.base.allocator),
+ };
+ defer ctx.deinit();
+
const func: *Module.Fn = tv.val.cast(Value.Payload.Function).?.func;
const instructions = func.analysis.success.instructions;
- var argdex: usize = 0;
if (instructions.len > 0) {
try writer.writeAll("\n");
for (instructions) |inst| {
- switch (inst.tag) {
- .assembly => try genAsm(file, inst.castTag(.assembly).?, decl, &argdex),
- .call => try genCall(file, inst.castTag(.call).?, decl),
- .ret => try genRet(file, inst.castTag(.ret).?, decl, tv.ty.fnReturnType()),
- .retvoid => try file.main.writer().print(" return;\n", .{}),
- .arg => {},
- .dbg_stmt => try genDbgStmt(file, inst.castTag(.dbg_stmt).?, decl),
- .breakpoint => try genBreak(file, inst.castTag(.breakpoint).?, decl),
- .unreach => try genUnreach(file, inst.castTag(.unreach).?, decl),
- // This will be handled correctly later?
- .intcast => {},
+ if (switch (inst.tag) {
+ .assembly => try genAsm(&ctx, inst.castTag(.assembly).?),
+ .call => try genCall(&ctx, inst.castTag(.call).?),
+ .ret => try genRet(&ctx, inst.castTag(.ret).?),
+ .retvoid => try genRetVoid(&ctx),
+ .arg => try genArg(&ctx),
+ .dbg_stmt => try genDbgStmt(&ctx, inst.castTag(.dbg_stmt).?),
+ .breakpoint => try genBreak(&ctx, inst.castTag(.breakpoint).?),
+ .unreach => try genUnreach(&ctx, inst.castTag(.unreach).?),
+ .intcast => try genIntCast(&ctx, inst.castTag(.intcast).?),
else => |e| return file.fail(decl.src(), "TODO implement C codegen for {}", .{e}),
+ }) |name| {
+ try ctx.inst_map.putNoClobber(inst, name);
}
}
}
@@ -127,27 +156,40 @@ fn genFn(file: *C, decl: *Decl) !void {
try writer.writeAll("}\n\n");
}
-fn genRet(file: *C, inst: *Inst.UnOp, decl: *Decl, expected_return_type: Type) !void {
- return file.fail(decl.src(), "TODO return {}", .{expected_return_type});
+fn genArg(ctx: *Context) !?[]u8 {
+ const name = try std.fmt.allocPrint(ctx.file.base.allocator, "arg{}", .{ctx.argdex});
+ ctx.argdex += 1;
+ return name;
}
-fn genIntCast(file: *C, inst: *Inst.UnOp, decl: *Decl, argdex: *usize) !void {
+fn genRetVoid(ctx: *Context) !?[]u8 {
+ try ctx.file.main.writer().print(" return;\n", .{});
+ return null;
+}
+
+fn genRet(ctx: *Context, inst: *Inst.UnOp) !?[]u8 {
+ return ctx.file.fail(ctx.decl.src(), "TODO return", .{});
+}
+
+fn genIntCast(ctx: *Context, inst: *Inst.UnOp) !?[]u8 {
+ if (inst.base.isUnused())
+ return null;
const op = inst.operand;
- const writer = file.main.writer();
- try writer.writeByte('(');
- try renderType(file, writer, inst.base.ty, decl.src());
- try writer.writeByte(')');
- if (op.castTag(.arg)) |_| {
- try writer.print("arg{}", .{argdex.*});
- argdex.* += 1;
- } else {
- return file.fail(decl.src(), "TODO intcast {} to {}", .{ op, inst.base.ty });
- }
+ const writer = ctx.file.main.writer();
+ const name = try ctx.name();
+ const from = ctx.inst_map.get(op) orelse
+ return ctx.file.fail(ctx.decl.src(), "Internal error in C backend: intCast argument not found in inst_map", .{});
+ try writer.writeAll(" const ");
+ try renderType(ctx.file, writer, inst.base.ty, ctx.decl.src());
+ try writer.print(" {} = (", .{name});
+ try renderType(ctx.file, writer, inst.base.ty, ctx.decl.src());
+ try writer.print("){};\n", .{from});
+ return name;
}
-fn genCall(file: *C, inst: *Inst.Call, decl: *Decl) !void {
- const writer = file.main.writer();
- const header = file.header.writer();
+fn genCall(ctx: *Context, inst: *Inst.Call) !?[]u8 {
+ const writer = ctx.file.main.writer();
+ const header = ctx.file.header.writer();
try writer.writeAll(" ");
if (inst.func.castTag(.constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
@@ -158,9 +200,9 @@ fn genCall(file: *C, inst: *Inst.Call, decl: *Decl) !void {
try writer.print("(void)", .{});
}
const tname = mem.spanZ(target.name);
- if (file.called.get(tname) == null) {
- try file.called.put(tname, void{});
- try renderFunctionSignature(file, header, target);
+ if (ctx.file.called.get(tname) == null) {
+ try ctx.file.called.put(tname, void{});
+ try renderFunctionSignature(ctx.file, header, target);
try header.writeAll(";\n");
}
try writer.print("{}(", .{tname});
@@ -170,61 +212,65 @@ fn genCall(file: *C, inst: *Inst.Call, decl: *Decl) !void {
try writer.writeAll(", ");
}
if (arg.cast(Inst.Constant)) |con| {
- try renderValue(file, writer, con.val, decl.src());
+ try renderValue(ctx.file, writer, con.val, ctx.decl.src());
} else {
- return file.fail(decl.src(), "TODO call pass arg {}", .{arg});
+ return ctx.file.fail(ctx.decl.src(), "TODO call pass arg {}", .{arg});
}
}
}
try writer.writeAll(");\n");
} else {
- return file.fail(decl.src(), "TODO non-function call target?", .{});
+ return ctx.file.fail(ctx.decl.src(), "TODO non-function call target?", .{});
}
} else {
- return file.fail(decl.src(), "TODO non-constant call inst?", .{});
+ return ctx.file.fail(ctx.decl.src(), "TODO non-constant call inst?", .{});
}
+ return null;
}
-fn genDbgStmt(file: *C, inst: *Inst.NoOp, decl: *Decl) !void {
+fn genDbgStmt(ctx: *Context, inst: *Inst.NoOp) !?[]u8 {
// TODO emit #line directive here with line number and filename
+ return null;
}
-fn genBreak(file: *C, inst: *Inst.NoOp, decl: *Decl) !void {
+fn genBreak(ctx: *Context, inst: *Inst.NoOp) !?[]u8 {
// TODO ??
+ return null;
}
-fn genUnreach(file: *C, inst: *Inst.NoOp, decl: *Decl) !void {
- try file.main.writer().writeAll(" zig_unreachable();\n");
+fn genUnreach(ctx: *Context, inst: *Inst.NoOp) !?[]u8 {
+ try ctx.file.main.writer().writeAll(" zig_unreachable();\n");
+ return null;
}
-fn genAsm(file: *C, as: *Inst.Assembly, decl: *Decl, argdex: *usize) !void {
- const writer = file.main.writer();
+fn genAsm(ctx: *Context, as: *Inst.Assembly) !?[]u8 {
+ const writer = ctx.file.main.writer();
try writer.writeAll(" ");
for (as.inputs) |i, index| {
if (i[0] == '{' and i[i.len - 1] == '}') {
const reg = i[1 .. i.len - 1];
const arg = as.args[index];
try writer.writeAll("register ");
- try renderType(file, writer, arg.ty, decl.src());
+ try renderType(ctx.file, writer, arg.ty, ctx.decl.src());
try writer.print(" {}_constant __asm__(\"{}\") = ", .{ reg, reg });
+ // TODO merge constant handling into inst_map as well
if (arg.castTag(.constant)) |c| {
- try renderValue(file, writer, c.val, decl.src());
- } else if (arg.castTag(.arg)) |inst| {
- try writer.print("arg{}", .{argdex.*});
- argdex.* += 1;
- } else if (arg.castTag(.intcast)) |inst| {
- try genIntCast(file, inst, decl, argdex);
+ try renderValue(ctx.file, writer, c.val, ctx.decl.src());
+ try writer.writeAll(";\n ");
} else {
- return file.fail(decl.src(), "TODO non-constant inline asm args ({})", .{arg.tag});
+ const gop = try ctx.inst_map.getOrPut(arg);
+ if (!gop.found_existing) {
+ return ctx.file.fail(ctx.decl.src(), "Internal error in C backend: asm argument not found in inst_map", .{});
+ }
+ try writer.print("{};\n ", .{gop.entry.value});
}
- try writer.writeAll(";\n ");
} else {
- return file.fail(decl.src(), "TODO non-explicit inline asm regs", .{});
+ return ctx.file.fail(ctx.decl.src(), "TODO non-explicit inline asm regs", .{});
}
}
try writer.print("__asm {} (\"{}\"", .{ if (as.is_volatile) @as([]const u8, "volatile") else "", as.asm_source });
if (as.output) |o| {
- return file.fail(decl.src(), "TODO inline asm output", .{});
+ return ctx.file.fail(ctx.decl.src(), "TODO inline asm output", .{});
}
if (as.inputs.len > 0) {
if (as.output == null) {
@@ -246,4 +292,5 @@ fn genAsm(file: *C, as: *Inst.Assembly, decl: *Decl, argdex: *usize) !void {
}
}
try writer.writeAll(");\n");
+ return null;
}
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 28340a4024..06917cc490 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -229,7 +229,7 @@ pub const File = struct {
return &c_file.base;
}
- pub fn fail(self: *C, src: usize, comptime format: []const u8, args: anytype) !void {
+ pub fn fail(self: *C, src: usize, comptime format: []const u8, args: anytype) anyerror {
self.error_msg = try Module.ErrorMsg.create(self.base.allocator, src, format, args);
return error.AnalysisFail;
}
diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig
index 4b35286f94..5e2d56b5ed 100644
--- a/test/stage2/cbe.zig
+++ b/test/stage2/cbe.zig
@@ -129,8 +129,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\}
\\
\\zig_noreturn void exit(uint8_t arg0) {
+ \\ const size_t __temp_0 = (size_t)arg0;
\\ register size_t rax_constant __asm__("rax") = 231;
- \\ register size_t rdi_constant __asm__("rdi") = (size_t)arg0;
+ \\ register size_t rdi_constant __asm__("rdi") = __temp_0;
\\ __asm volatile ("syscall" :: ""(rax_constant), ""(rdi_constant));
\\ zig_unreachable();
\\}
From 1eb5aaa4b5bee902bed354ef6ebfbb643a624440 Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Wed, 12 Aug 2020 22:15:09 -0400
Subject: [PATCH 061/153] CBE: renderValue pays attention to Type, not Tag
---
src-self-hosted/codegen/c.zig | 80 ++++++++++++++++++-----------------
src-self-hosted/value.zig | 75 ++++++++++++++++++++++++++++++++
2 files changed, 116 insertions(+), 39 deletions(-)
diff --git a/src-self-hosted/codegen/c.zig b/src-self-hosted/codegen/c.zig
index 76585a6dfb..f0d3d8367a 100644
--- a/src-self-hosted/codegen/c.zig
+++ b/src-self-hosted/codegen/c.zig
@@ -17,41 +17,43 @@ fn map(allocator: *std.mem.Allocator, name: []const u8) ![]const u8 {
return allocator.dupe(u8, name);
}
-fn renderType(file: *C, writer: std.ArrayList(u8).Writer, T: Type, src: usize) !void {
- if (T.tag() == .usize) {
- file.need_stddef = true;
- try writer.writeAll("size_t");
- } else {
- switch (T.zigTypeTag()) {
- .NoReturn => {
- try writer.writeAll("zig_noreturn void");
- },
- .Void => try writer.writeAll("void"),
- .Int => {
- if (T.tag() == .u8) {
- file.need_stdint = true;
- try writer.writeAll("uint8_t");
- } else {
- return file.fail(src, "TODO implement int types", .{});
- }
- },
- else => |e| return file.fail(src, "TODO implement type {}", .{e}),
- }
+fn renderType(ctx: *Context, writer: std.ArrayList(u8).Writer, T: Type) !void {
+ switch (T.zigTypeTag()) {
+ .NoReturn => {
+ try writer.writeAll("zig_noreturn void");
+ },
+ .Void => try writer.writeAll("void"),
+ .Int => {
+ if (T.tag() == .u8) {
+ ctx.file.need_stdint = true;
+ try writer.writeAll("uint8_t");
+ } else if (T.tag() == .usize) {
+ ctx.file.need_stddef = true;
+ try writer.writeAll("size_t");
+ } else {
+ return ctx.file.fail(ctx.decl.src(), "TODO implement int types", .{});
+ }
+ },
+ else => |e| return ctx.file.fail(ctx.decl.src(), "TODO implement type {}", .{e}),
}
}
-fn renderValue(file: *C, writer: std.ArrayList(u8).Writer, val: Value, src: usize) !void {
- switch (val.tag()) {
- .int_u64 => return writer.print("{}", .{val.toUnsignedInt()}),
- else => |e| return file.fail(src, "TODO implement value {}", .{e}),
+fn renderValue(ctx: *Context, writer: std.ArrayList(u8).Writer, T: Type, val: Value) !void {
+ switch (T.zigTypeTag()) {
+ .Int => {
+ if (T.isSignedInt())
+ return writer.print("{}", .{val.toSignedInt()});
+ return writer.print("{}", .{val.toUnsignedInt()});
+ },
+ else => |e| return ctx.file.fail(ctx.decl.src(), "TODO implement value {}", .{e}),
}
}
-fn renderFunctionSignature(file: *C, writer: std.ArrayList(u8).Writer, decl: *Decl) !void {
+fn renderFunctionSignature(ctx: *Context, writer: std.ArrayList(u8).Writer, decl: *Decl) !void {
const tv = decl.typed_value.most_recent.typed_value;
- try renderType(file, writer, tv.ty.fnReturnType(), decl.src());
- const name = try map(file.base.allocator, mem.spanZ(decl.name));
- defer file.base.allocator.free(name);
+ try renderType(ctx, writer, tv.ty.fnReturnType());
+ const name = try map(ctx.file.base.allocator, mem.spanZ(decl.name));
+ defer ctx.file.base.allocator.free(name);
try writer.print(" {}(", .{name});
var param_len = tv.ty.fnParamLen();
if (param_len == 0)
@@ -62,7 +64,7 @@ fn renderFunctionSignature(file: *C, writer: std.ArrayList(u8).Writer, decl: *De
if (index > 0) {
try writer.writeAll(", ");
}
- try renderType(file, writer, tv.ty.fnParamType(index), decl.src());
+ try renderType(ctx, writer, tv.ty.fnParamType(index));
try writer.print(" arg{}", .{index});
}
}
@@ -120,10 +122,6 @@ fn genFn(file: *C, decl: *Decl) !void {
const writer = file.main.writer();
const tv = decl.typed_value.most_recent.typed_value;
- try renderFunctionSignature(file, writer, decl);
-
- try writer.writeAll(" {");
-
var ctx = Context{
.file = file,
.decl = decl,
@@ -131,6 +129,10 @@ fn genFn(file: *C, decl: *Decl) !void {
};
defer ctx.deinit();
+ try renderFunctionSignature(&ctx, writer, decl);
+
+ try writer.writeAll(" {");
+
const func: *Module.Fn = tv.val.cast(Value.Payload.Function).?.func;
const instructions = func.analysis.success.instructions;
if (instructions.len > 0) {
@@ -180,9 +182,9 @@ fn genIntCast(ctx: *Context, inst: *Inst.UnOp) !?[]u8 {
const from = ctx.inst_map.get(op) orelse
return ctx.file.fail(ctx.decl.src(), "Internal error in C backend: intCast argument not found in inst_map", .{});
try writer.writeAll(" const ");
- try renderType(ctx.file, writer, inst.base.ty, ctx.decl.src());
+ try renderType(ctx, writer, inst.base.ty);
try writer.print(" {} = (", .{name});
- try renderType(ctx.file, writer, inst.base.ty, ctx.decl.src());
+ try renderType(ctx, writer, inst.base.ty);
try writer.print("){};\n", .{from});
return name;
}
@@ -202,7 +204,7 @@ fn genCall(ctx: *Context, inst: *Inst.Call) !?[]u8 {
const tname = mem.spanZ(target.name);
if (ctx.file.called.get(tname) == null) {
try ctx.file.called.put(tname, void{});
- try renderFunctionSignature(ctx.file, header, target);
+ try renderFunctionSignature(ctx, header, target);
try header.writeAll(";\n");
}
try writer.print("{}(", .{tname});
@@ -212,7 +214,7 @@ fn genCall(ctx: *Context, inst: *Inst.Call) !?[]u8 {
try writer.writeAll(", ");
}
if (arg.cast(Inst.Constant)) |con| {
- try renderValue(ctx.file, writer, con.val, ctx.decl.src());
+ try renderValue(ctx, writer, arg.ty, con.val);
} else {
return ctx.file.fail(ctx.decl.src(), "TODO call pass arg {}", .{arg});
}
@@ -251,11 +253,11 @@ fn genAsm(ctx: *Context, as: *Inst.Assembly) !?[]u8 {
const reg = i[1 .. i.len - 1];
const arg = as.args[index];
try writer.writeAll("register ");
- try renderType(ctx.file, writer, arg.ty, ctx.decl.src());
+ try renderType(ctx, writer, arg.ty);
try writer.print(" {}_constant __asm__(\"{}\") = ", .{ reg, reg });
// TODO merge constant handling into inst_map as well
if (arg.castTag(.constant)) |c| {
- try renderValue(ctx.file, writer, c.val, ctx.decl.src());
+ try renderValue(ctx, writer, arg.ty, c.val);
try writer.writeAll(";\n ");
} else {
const gop = try ctx.inst_map.getOrPut(arg);
diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig
index e71805dc5b..b2fad9207d 100644
--- a/src-self-hosted/value.zig
+++ b/src-self-hosted/value.zig
@@ -568,6 +568,81 @@ pub const Value = extern union {
}
}
+ /// Asserts the value is an integer and it fits in a i64
+ pub fn toSignedInt(self: Value) i64 {
+ switch (self.tag()) {
+ .ty,
+ .int_type,
+ .u8_type,
+ .i8_type,
+ .u16_type,
+ .i16_type,
+ .u32_type,
+ .i32_type,
+ .u64_type,
+ .i64_type,
+ .usize_type,
+ .isize_type,
+ .c_short_type,
+ .c_ushort_type,
+ .c_int_type,
+ .c_uint_type,
+ .c_long_type,
+ .c_ulong_type,
+ .c_longlong_type,
+ .c_ulonglong_type,
+ .c_longdouble_type,
+ .f16_type,
+ .f32_type,
+ .f64_type,
+ .f128_type,
+ .c_void_type,
+ .bool_type,
+ .void_type,
+ .type_type,
+ .anyerror_type,
+ .comptime_int_type,
+ .comptime_float_type,
+ .noreturn_type,
+ .null_type,
+ .undefined_type,
+ .fn_noreturn_no_args_type,
+ .fn_void_no_args_type,
+ .fn_naked_noreturn_no_args_type,
+ .fn_ccc_void_no_args_type,
+ .single_const_pointer_to_comptime_int_type,
+ .const_slice_u8_type,
+ .null_value,
+ .function,
+ .ref_val,
+ .decl_ref,
+ .elem_ptr,
+ .bytes,
+ .repeated,
+ .float_16,
+ .float_32,
+ .float_64,
+ .float_128,
+ .void_value,
+ .unreachable_value,
+ .empty_array,
+ => unreachable,
+
+ .undef => unreachable,
+
+ .zero,
+ .bool_false,
+ => return 0,
+
+ .bool_true => return 1,
+
+ .int_u64 => return @intCast(i64, self.cast(Payload.Int_i64).?.int),
+ .int_i64 => return self.cast(Payload.Int_i64).?.int,
+ .int_big_positive => return self.cast(Payload.IntBigPositive).?.asBigInt().to(i64) catch unreachable,
+ .int_big_negative => return self.cast(Payload.IntBigNegative).?.asBigInt().to(i64) catch unreachable,
+ }
+ }
+
pub fn toBool(self: Value) bool {
return switch (self.tag()) {
.bool_true => true,
From eec53d67abb3a3d894de945f549994a16cb92474 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 12 Aug 2020 21:59:40 -0700
Subject: [PATCH 062/153] stage2: anyerror -> explicit error set
---
src-self-hosted/link.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 06917cc490..7c5e645fb5 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -229,7 +229,7 @@ pub const File = struct {
return &c_file.base;
}
- pub fn fail(self: *C, src: usize, comptime format: []const u8, args: anytype) anyerror {
+ pub fn fail(self: *C, src: usize, comptime format: []const u8, args: anytype) error{AnalysisFail, OutOfMemory} {
self.error_msg = try Module.ErrorMsg.create(self.base.allocator, src, format, args);
return error.AnalysisFail;
}
From 3e2e6baee568b0fb9d019fc53974aad30386b824 Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Wed, 12 Aug 2020 23:50:00 +0200
Subject: [PATCH 063/153] Add std.os.getFdPath and std.fs.Dir.realpath
`std.os.getFdPath` is very platform-specific and can be used to query
the OS for a canonical path to a file handle. Currently supported hosts
are Linux, macOS and Windows.
`std.fs.Dir.realpath` (and null-terminated, plus WTF16 versions) are
similar to `std.os.realpath`, however, they resolve a path wrt to this
`Dir` instance.
If the input pathname argument turns out to be an absolute path, this
function reverts to calling `realpath` on that pathname completely
ignoring this `Dir`.
---
lib/std/fs.zig | 119 +++++++++++++++++++++++++++++++++++++++++++-
lib/std/fs/test.zig | 58 +++++++++++++++------
lib/std/os.zig | 65 +++++++++++++++++-------
3 files changed, 210 insertions(+), 32 deletions(-)
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 57c1534e97..a492a43499 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -926,6 +926,123 @@ pub const Dir = struct {
return self.openDir(sub_path, open_dir_options);
}
+ /// This function returns the canonicalized absolute pathname of
+ /// `pathname` relative to this `Dir`. If `pathname` is absolute, ignores this
+ /// `Dir` handle and returns the canonicalized absolute pathname of `pathname`
+ /// argument.
+ /// This function is not universally supported by all platforms.
+ /// Currently supported hosts are: Linux, macOS, and Windows.
+ /// See also `Dir.realpathZ`, `Dir.realpathW`, and `Dir.realpathAlloc`.
+ pub fn realpath(self: Dir, pathname: []const u8, out_buffer: []u8) ![]u8 {
+ if (builtin.os.tag == .wasi) {
+ @compileError("realpath is unsupported in WASI");
+ }
+ if (builtin.os.tag == .windows) {
+ const pathname_w = try os.windows.sliceToPrefixedFileW(pathname);
+ return self.realpathW(pathname_w.span(), out_buffer);
+ }
+ const pathname_c = try os.toPosixPath(pathname);
+ return self.realpathZ(&pathname_c, out_buffer);
+ }
+
+ /// Same as `Dir.realpath` except `pathname` is null-terminated.
+ /// See also `Dir.realpath`, `realpathZ`.
+ pub fn realpathZ(self: Dir, pathname: [*:0]const u8, out_buffer: []u8) ![]u8 {
+ if (builtin.os.tag == .windows) {
+ const pathname_w = try os.windows.cStrToPrefixedFileW(pathname);
+ return self.realpathW(pathname_w.span(), out_buffer);
+ }
+
+ const flags = if (builtin.os.tag == .linux) os.O_PATH | os.O_NONBLOCK | os.O_CLOEXEC else os.O_NONBLOCK | os.O_CLOEXEC;
+ const fd = os.openatZ(self.fd, pathname, flags, 0) catch |err| switch (err) {
+ error.FileLocksNotSupported => unreachable,
+ else => |e| return e,
+ };
+ defer os.close(fd);
+
+ // Use of MAX_PATH_BYTES here is valid as the realpath function does not
+ // have a variant that takes an arbitrary-size buffer.
+ // TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008
+ // NULL out parameter (GNU's canonicalize_file_name) to handle overelong
+ // paths. musl supports passing NULL but restricts the output to PATH_MAX
+ // anyway.
+ var buffer: [MAX_PATH_BYTES]u8 = undefined;
+ const out_path = try os.getFdPath(fd, &buffer);
+
+ if (out_path.len > out_buffer.len) {
+ return error.NameTooLong;
+ }
+
+ mem.copy(u8, out_buffer, out_path);
+
+ return out_buffer[0..out_path.len];
+ }
+
+ /// Windows-only. Same as `Dir.realpath` except `pathname` is WTF16 encoded.
+ /// See also `Dir.realpath`, `realpathW`.
+ pub fn realpathW(self: Dir, pathname: []const u16, out_buffer: []u8) ![]u8 {
+ const w = os.windows;
+
+ const access_mask = w.GENERIC_READ | w.SYNCHRONIZE;
+ const share_access = w.FILE_SHARE_READ;
+ const creation = w.FILE_OPEN;
+ const h_file = blk: {
+ const res = w.OpenFile(pathname, .{
+ .dir = self.fd,
+ .access_mask = access_mask,
+ .share_access = share_access,
+ .creation = creation,
+ .io_mode = .blocking,
+ }) catch |err| switch (err) {
+ error.IsDir => break :blk w.OpenFile(pathname, .{
+ .dir = self.fd,
+ .access_mask = access_mask,
+ .share_access = share_access,
+ .creation = creation,
+ .io_mode = .blocking,
+ .open_dir = true,
+ }) catch |er| switch (er) {
+ error.WouldBlock => unreachable,
+ else => |e2| return e2,
+ },
+ error.WouldBlock => unreachable,
+ else => |e| return e,
+ };
+ break :blk res;
+ };
+ defer w.CloseHandle(h_file);
+
+ // Use of MAX_PATH_BYTES here is valid as the realpath function does not
+ // have a variant that takes an arbitrary-size buffer.
+ // TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008
+ // NULL out parameter (GNU's canonicalize_file_name) to handle overelong
+ // paths. musl supports passing NULL but restricts the output to PATH_MAX
+ // anyway.
+ var buffer: [MAX_PATH_BYTES]u8 = undefined;
+ const out_path = try os.getFdPath(h_file, &buffer);
+
+ if (out_path.len > out_buffer.len) {
+ return error.NameTooLong;
+ }
+
+ mem.copy(u8, out_buffer, out_path);
+
+ return out_buffer[0..out_path.len];
+ }
+
+ /// Same as `Dir.realpath` except caller must free the returned memory.
+ /// See also `Dir.realpath`.
+ pub fn realpathAlloc(self: Dir, allocator: *Allocator, pathname: []const u8) ![]u8 {
+ // Use of MAX_PATH_BYTES here is valid as the realpath function does not
+ // have a variant that takes an arbitrary-size buffer.
+ // TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008
+ // NULL out parameter (GNU's canonicalize_file_name) to handle overelong
+ // paths. musl supports passing NULL but restricts the output to PATH_MAX
+ // anyway.
+ var buf: [MAX_PATH_BYTES]u8 = undefined;
+ return allocator.dupe(u8, try self.realpath(pathname, buf[0..]));
+ }
+
/// Changes the current working directory to the open directory handle.
/// This modifies global state and can have surprising effects in multi-
/// threaded applications. Most applications and especially libraries should
@@ -2060,7 +2177,7 @@ pub fn selfExeDirPath(out_buffer: []u8) SelfExePathError![]const u8 {
}
/// `realpath`, except caller must free the returned memory.
-/// TODO integrate with `Dir`
+/// See also `Dir.realpath`.
pub fn realpathAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 {
// Use of MAX_PATH_BYTES here is valid as the realpath function does not
// have a variant that takes an arbitrary-size buffer.
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 26d8632c37..9e6f4bb3ac 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -109,17 +109,57 @@ test "Dir.Iterator" {
testing.expect(contains(&entries, Dir.Entry{ .name = "some_dir", .kind = Dir.Entry.Kind.Directory }));
}
-fn entry_eql(lhs: Dir.Entry, rhs: Dir.Entry) bool {
+fn entryEql(lhs: Dir.Entry, rhs: Dir.Entry) bool {
return mem.eql(u8, lhs.name, rhs.name) and lhs.kind == rhs.kind;
}
fn contains(entries: *const std.ArrayList(Dir.Entry), el: Dir.Entry) bool {
for (entries.items) |entry| {
- if (entry_eql(entry, el)) return true;
+ if (entryEql(entry, el)) return true;
}
return false;
}
+test "Dir.realpath smoke test" {
+ switch (builtin.os.tag) {
+ .linux, .windows, .macosx, .ios, .watchos, .tvos => {},
+ else => return error.SkipZigTest,
+ }
+
+ var tmp_dir = tmpDir(.{});
+ defer tmp_dir.cleanup();
+
+ var file = try tmp_dir.dir.createFile("test_file", .{ .lock = File.Lock.Shared });
+ // We need to close the file immediately as otherwise on Windows we'll end up
+ // with a sharing violation.
+ file.close();
+
+ var arena = ArenaAllocator.init(testing.allocator);
+ defer arena.deinit();
+
+ const base_path = blk: {
+ const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
+ break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ };
+
+ // First, test non-alloc version
+ {
+ var buf1: [fs.MAX_PATH_BYTES]u8 = undefined;
+ const file_path = try tmp_dir.dir.realpath("test_file", buf1[0..]);
+ const expected_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "test_file" });
+
+ testing.expect(mem.eql(u8, file_path, expected_path));
+ }
+
+ // Next, test alloc version
+ {
+ const file_path = try tmp_dir.dir.realpathAlloc(&arena.allocator, "test_file");
+ const expected_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "test_file" });
+
+ testing.expect(mem.eql(u8, file_path, expected_path));
+ }
+}
+
test "readAllAlloc" {
var tmp_dir = tmpDir(.{});
defer tmp_dir.cleanup();
@@ -167,12 +207,7 @@ test "directory operations on files" {
testing.expectError(error.NotDir, tmp_dir.dir.deleteDir(test_file_name));
if (builtin.os.tag != .wasi) {
- // TODO: use Dir's realpath function once that exists
- const absolute_path = blk: {
- const relative_path = try fs.path.join(testing.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..], test_file_name });
- defer testing.allocator.free(relative_path);
- break :blk try fs.realpathAlloc(testing.allocator, relative_path);
- };
+ const absolute_path = try tmp_dir.dir.realpathAlloc(testing.allocator, test_file_name);
defer testing.allocator.free(absolute_path);
testing.expectError(error.PathAlreadyExists, fs.makeDirAbsolute(absolute_path));
@@ -206,12 +241,7 @@ test "file operations on directories" {
testing.expectError(error.IsDir, tmp_dir.dir.openFile(test_dir_name, .{ .write = true }));
if (builtin.os.tag != .wasi) {
- // TODO: use Dir's realpath function once that exists
- const absolute_path = blk: {
- const relative_path = try fs.path.join(testing.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..], test_dir_name });
- defer testing.allocator.free(relative_path);
- break :blk try fs.realpathAlloc(testing.allocator, relative_path);
- };
+ const absolute_path = try tmp_dir.dir.realpathAlloc(testing.allocator, test_dir_name);
defer testing.allocator.free(absolute_path);
testing.expectError(error.IsDir, fs.createFileAbsolute(absolute_path, .{}));
diff --git a/lib/std/os.zig b/lib/std/os.zig
index ae2b232ef7..123dfc9747 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -4025,23 +4025,15 @@ pub fn realpathZ(pathname: [*:0]const u8, out_buffer: *[MAX_PATH_BYTES]u8) RealP
const pathname_w = try windows.cStrToPrefixedFileW(pathname);
return realpathW(pathname_w.span(), out_buffer);
}
- if (builtin.os.tag == .linux and !builtin.link_libc) {
- const fd = openZ(pathname, linux.O_PATH | linux.O_NONBLOCK | linux.O_CLOEXEC, 0) catch |err| switch (err) {
+ if (!builtin.link_libc) {
+ const flags = if (builtin.os.tag == .linux) O_PATH | O_NONBLOCK | O_CLOEXEC else O_NONBLOCK | O_CLOEXEC;
+ const fd = openZ(pathname, flags, 0) catch |err| switch (err) {
error.FileLocksNotSupported => unreachable,
else => |e| return e,
};
defer close(fd);
- var procfs_buf: ["/proc/self/fd/-2147483648".len:0]u8 = undefined;
- const proc_path = std.fmt.bufPrint(procfs_buf[0..], "/proc/self/fd/{}\x00", .{fd}) catch unreachable;
-
- const target = readlinkZ(@ptrCast([*:0]const u8, proc_path.ptr), out_buffer) catch |err| {
- switch (err) {
- error.UnsupportedReparsePointType => unreachable, // Windows only,
- else => |e| return e,
- }
- };
- return target;
+ return getFdPath(fd, out_buffer);
}
const result_path = std.c.realpath(pathname, out_buffer) orelse switch (std.c._errno().*) {
EINVAL => unreachable,
@@ -4093,12 +4085,51 @@ pub fn realpathW(pathname: []const u16, out_buffer: *[MAX_PATH_BYTES]u8) RealPat
};
defer w.CloseHandle(h_file);
- var wide_buf: [w.PATH_MAX_WIDE]u16 = undefined;
- const wide_slice = try w.GetFinalPathNameByHandle(h_file, .{}, wide_buf[0..]);
+ return getFdPath(h_file, out_buffer);
+}
- // Trust that Windows gives us valid UTF-16LE.
- const end_index = std.unicode.utf16leToUtf8(out_buffer, wide_slice) catch unreachable;
- return out_buffer[0..end_index];
+/// Return canonical path of handle `fd`.
+/// This function is very host-specific and is not universally supported by all hosts.
+/// For example, while it generally works on Linux, macOS or Windows, it is unsupported
+/// on FreeBSD, or WASI.
+pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
+ switch (builtin.os.tag) {
+ .windows => {
+ var wide_buf: [windows.PATH_MAX_WIDE]u16 = undefined;
+ const wide_slice = try windows.GetFinalPathNameByHandle(fd, .{}, wide_buf[0..]);
+
+ // Trust that Windows gives us valid UTF-16LE.
+ const end_index = std.unicode.utf16leToUtf8(out_buffer, wide_slice) catch unreachable;
+ return out_buffer[0..end_index];
+ },
+ .macosx, .ios, .watchos, .tvos => {
+ // On macOS, we can use F_GETPATH fcntl command to query the OS for
+ // the path to the file descriptor.
+ @memset(out_buffer, 0, MAX_PATH_BYTES);
+ switch (errno(system.fcntl(fd, F_GETPATH, out_buffer))) {
+ 0 => {},
+ EBADF => return error.FileNotFound,
+ // TODO man pages for fcntl on macOS don't really tell you what
+ // errno values to expect when command is F_GETPATH...
+ else => |err| return unexpectedErrno(err),
+ }
+ const len = mem.indexOfScalar(u8, out_buffer[0..], @as(u8, 0)) orelse MAX_PATH_BYTES;
+ return out_buffer[0..len];
+ },
+ .linux => {
+ var procfs_buf: ["/proc/self/fd/-2147483648".len:0]u8 = undefined;
+ const proc_path = std.fmt.bufPrint(procfs_buf[0..], "/proc/self/fd/{}\x00", .{fd}) catch unreachable;
+
+ const target = readlinkZ(@ptrCast([*:0]const u8, proc_path.ptr), out_buffer) catch |err| {
+ switch (err) {
+ error.UnsupportedReparsePointType => unreachable, // Windows only,
+ else => |e| return e,
+ }
+ };
+ return target;
+ },
+ else => @compileError("querying for canonical path of a handle is unsupported on this host"),
+ }
}
/// Spurious wakeups are possible and no precision of timing is guaranteed.
From cb06d62603c764a91aeb9bcedc0b9472d746f9e5 Mon Sep 17 00:00:00 2001
From: Koakuma
Date: Thu, 13 Aug 2020 18:30:38 +0700
Subject: [PATCH 064/153] Add "ppc" prefix to number-modeled CPUs (#6006)
This is to prevent "expected token 'Symbol', found 'IntLiteral'" errors
when building zig files.
---
lib/std/target/powerpc.zig | 56 +++++++++++++++++++-------------------
1 file changed, 28 insertions(+), 28 deletions(-)
diff --git a/lib/std/target/powerpc.zig b/lib/std/target/powerpc.zig
index ffea7344fc..19b050c5bd 100644
--- a/lib/std/target/powerpc.zig
+++ b/lib/std/target/powerpc.zig
@@ -384,8 +384,8 @@ pub const all_features = blk: {
};
pub const cpu = struct {
- pub const @"440" = CpuModel{
- .name = "440",
+ pub const @"ppc440" = CpuModel{
+ .name = "ppc440",
.llvm_name = "440",
.features = featureSet(&[_]Feature{
.booke,
@@ -396,8 +396,8 @@ pub const cpu = struct {
.msync,
}),
};
- pub const @"450" = CpuModel{
- .name = "450",
+ pub const @"ppc450" = CpuModel{
+ .name = "ppc450",
.llvm_name = "450",
.features = featureSet(&[_]Feature{
.booke,
@@ -408,70 +408,70 @@ pub const cpu = struct {
.msync,
}),
};
- pub const @"601" = CpuModel{
- .name = "601",
+ pub const @"ppc601" = CpuModel{
+ .name = "ppc601",
.llvm_name = "601",
.features = featureSet(&[_]Feature{
.fpu,
}),
};
- pub const @"602" = CpuModel{
- .name = "602",
+ pub const @"ppc602" = CpuModel{
+ .name = "ppc602",
.llvm_name = "602",
.features = featureSet(&[_]Feature{
.fpu,
}),
};
- pub const @"603" = CpuModel{
- .name = "603",
+ pub const @"ppc603" = CpuModel{
+ .name = "ppc603",
.llvm_name = "603",
.features = featureSet(&[_]Feature{
.fres,
.frsqrte,
}),
};
- pub const @"603e" = CpuModel{
- .name = "603e",
+ pub const @"ppc603e" = CpuModel{
+ .name = "ppc603e",
.llvm_name = "603e",
.features = featureSet(&[_]Feature{
.fres,
.frsqrte,
}),
};
- pub const @"603ev" = CpuModel{
- .name = "603ev",
+ pub const @"ppc603ev" = CpuModel{
+ .name = "ppc603ev",
.llvm_name = "603ev",
.features = featureSet(&[_]Feature{
.fres,
.frsqrte,
}),
};
- pub const @"604" = CpuModel{
- .name = "604",
+ pub const @"ppc604" = CpuModel{
+ .name = "ppc604",
.llvm_name = "604",
.features = featureSet(&[_]Feature{
.fres,
.frsqrte,
}),
};
- pub const @"604e" = CpuModel{
- .name = "604e",
+ pub const @"ppc604e" = CpuModel{
+ .name = "ppc604e",
.llvm_name = "604e",
.features = featureSet(&[_]Feature{
.fres,
.frsqrte,
}),
};
- pub const @"620" = CpuModel{
- .name = "620",
+ pub const @"ppc620" = CpuModel{
+ .name = "ppc620",
.llvm_name = "620",
.features = featureSet(&[_]Feature{
.fres,
.frsqrte,
}),
};
- pub const @"7400" = CpuModel{
- .name = "7400",
+ pub const @"ppc7400" = CpuModel{
+ .name = "ppc7400",
.llvm_name = "7400",
.features = featureSet(&[_]Feature{
.altivec,
@@ -479,8 +479,8 @@ pub const cpu = struct {
.frsqrte,
}),
};
- pub const @"7450" = CpuModel{
- .name = "7450",
+ pub const @"ppc7450" = CpuModel{
+ .name = "ppc7450",
.llvm_name = "7450",
.features = featureSet(&[_]Feature{
.altivec,
@@ -488,16 +488,16 @@ pub const cpu = struct {
.frsqrte,
}),
};
- pub const @"750" = CpuModel{
- .name = "750",
+ pub const @"ppc750" = CpuModel{
+ .name = "ppc750",
.llvm_name = "750",
.features = featureSet(&[_]Feature{
.fres,
.frsqrte,
}),
};
- pub const @"970" = CpuModel{
- .name = "970",
+ pub const @"ppc970" = CpuModel{
+ .name = "ppc970",
.llvm_name = "970",
.features = featureSet(&[_]Feature{
.@"64bit",
From c5368ba20c79b62d658c71033df9aeb7aa6e4581 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Thu, 13 Aug 2020 15:27:29 +0300
Subject: [PATCH 065/153] translate-c: ensure generated labels are unique
---
src-self-hosted/translate_c.zig | 124 +++++++++++++++++---------------
test/translate_c.zig | 26 +++----
2 files changed, 81 insertions(+), 69 deletions(-)
diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig
index 8b1c6537f7..de8f633076 100644
--- a/src-self-hosted/translate_c.zig
+++ b/src-self-hosted/translate_c.zig
@@ -61,7 +61,8 @@ const Scope = struct {
pending_block: Block,
cases: []*ast.Node,
case_index: usize,
- has_default: bool = false,
+ switch_label: ?[]const u8,
+ default_label: ?[]const u8,
};
/// Used for the scope of condition expressions, for example `if (cond)`.
@@ -73,7 +74,7 @@ const Scope = struct {
fn getBlockScope(self: *Condition, c: *Context) !*Block {
if (self.block) |*b| return b;
- self.block = try Block.init(c, &self.base, "blk");
+ self.block = try Block.init(c, &self.base, true);
return &self.block.?;
}
@@ -93,21 +94,22 @@ const Scope = struct {
mangle_count: u32 = 0,
lbrace: ast.TokenIndex,
- fn init(c: *Context, parent: *Scope, label: ?[]const u8) !Block {
- return Block{
+ fn init(c: *Context, parent: *Scope, labeled: bool) !Block {
+ var blk = Block{
.base = .{
.id = .Block,
.parent = parent,
},
.statements = std.ArrayList(*ast.Node).init(c.gpa),
.variables = AliasList.init(c.gpa),
- .label = if (label) |l| blk: {
- const ll = try appendIdentifier(c, l);
- _ = try appendToken(c, .Colon, ":");
- break :blk ll;
- } else null,
+ .label = null,
.lbrace = try appendToken(c, .LBrace, "{"),
};
+ if (labeled) {
+ blk.label = try appendIdentifier(c, try blk.makeMangledName(c, "blk"));
+ _ = try appendToken(c, .Colon, ":");
+ }
+ return blk;
}
fn deinit(self: *Block) void {
@@ -577,7 +579,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const ZigClangFunctionDecl) Error!void {
// actual function definition with body
const body_stmt = ZigClangFunctionDecl_getBody(fn_decl);
- var block_scope = try Scope.Block.init(rp.c, &c.global_scope.base, null);
+ var block_scope = try Scope.Block.init(rp.c, &c.global_scope.base, false);
defer block_scope.deinit();
var scope = &block_scope.base;
@@ -1307,7 +1309,7 @@ fn transBinaryOperator(
const rhs = try transExpr(rp, &block_scope.base, ZigClangBinaryOperator_getRHS(stmt), .used, .r_value);
if (expr) {
_ = try appendToken(rp.c, .Semicolon, ";");
- const break_node = try transCreateNodeBreakToken(rp.c, block_scope.label, rhs);
+ const break_node = try transCreateNodeBreak(rp.c, block_scope.label, rhs);
try block_scope.statements.append(&break_node.base);
const block_node = try block_scope.complete(rp.c);
const rparen = try appendToken(rp.c, .RParen, ")");
@@ -1476,7 +1478,7 @@ fn transCompoundStmtInline(
}
fn transCompoundStmt(rp: RestorePoint, scope: *Scope, stmt: *const ZigClangCompoundStmt) TransError!*ast.Node {
- var block_scope = try Scope.Block.init(rp.c, scope, null);
+ var block_scope = try Scope.Block.init(rp.c, scope, false);
defer block_scope.deinit();
try transCompoundStmtInline(rp, &block_scope.base, stmt, &block_scope);
const node = try block_scope.complete(rp.c);
@@ -2587,7 +2589,7 @@ fn transForLoop(
defer if (block_scope) |*bs| bs.deinit();
if (ZigClangForStmt_getInit(stmt)) |init| {
- block_scope = try Scope.Block.init(rp.c, scope, null);
+ block_scope = try Scope.Block.init(rp.c, scope, false);
loop_scope.parent = &block_scope.?.base;
const init_node = try transStmt(rp, &block_scope.?.base, init, .unused, .r_value);
try block_scope.?.statements.append(init_node);
@@ -2673,17 +2675,19 @@ fn transSwitch(
.cases = switch_node.cases(),
.case_index = 0,
.pending_block = undefined,
+ .default_label = null,
+ .switch_label = null,
};
// tmp block that all statements will go before being picked up by a case or default
- var block_scope = try Scope.Block.init(rp.c, &switch_scope.base, null);
+ var block_scope = try Scope.Block.init(rp.c, &switch_scope.base, false);
defer block_scope.deinit();
// Note that we do not defer a deinit here; the switch_scope.pending_block field
// has its own memory management. This resource is freed inside `transCase` and
// then the final pending_block is freed at the bottom of this function with
// pending_block.deinit().
- switch_scope.pending_block = try Scope.Block.init(rp.c, scope, null);
+ switch_scope.pending_block = try Scope.Block.init(rp.c, scope, false);
try switch_scope.pending_block.statements.append(&switch_node.base);
const last = try transStmt(rp, &block_scope.base, ZigClangSwitchStmt_getBody(stmt), .unused, .r_value);
@@ -2698,11 +2702,19 @@ fn transSwitch(
switch_scope.pending_block.statements.appendAssumeCapacity(n);
}
- switch_scope.pending_block.label = try appendIdentifier(rp.c, "__switch");
- _ = try appendToken(rp.c, .Colon, ":");
- if (!switch_scope.has_default) {
+ if (switch_scope.default_label == null) {
+ switch_scope.switch_label = try block_scope.makeMangledName(rp.c, "switch");
+ }
+ if (switch_scope.switch_label) |l| {
+ switch_scope.pending_block.label = try appendIdentifier(rp.c, l);
+ _ = try appendToken(rp.c, .Colon, ":");
+ }
+ if (switch_scope.default_label == null) {
const else_prong = try transCreateNodeSwitchCase(rp.c, try transCreateNodeSwitchElse(rp.c));
- else_prong.expr = &(try transCreateNodeBreak(rp.c, "__switch", null)).base;
+ else_prong.expr = blk: {
+ var br = try CtrlFlow.init(rp.c, .Break, switch_scope.switch_label.?);
+ break :blk &(try br.finish(null)).base;
+ };
_ = try appendToken(rp.c, .Comma, ",");
if (switch_scope.case_index >= switch_scope.cases.len)
@@ -2726,7 +2738,7 @@ fn transCase(
) TransError!*ast.Node {
const block_scope = scope.findBlockScope(rp.c) catch unreachable;
const switch_scope = scope.getSwitch();
- const label = try std.fmt.allocPrint(rp.c.arena, "__case_{}", .{switch_scope.case_index - @boolToInt(switch_scope.has_default)});
+ const label = try block_scope.makeMangledName(rp.c, "case");
_ = try appendToken(rp.c, .Semicolon, ";");
const expr = if (ZigClangCaseStmt_getRHS(stmt)) |rhs| blk: {
@@ -2746,7 +2758,10 @@ fn transCase(
try transExpr(rp, scope, ZigClangCaseStmt_getLHS(stmt), .used, .r_value);
const switch_prong = try transCreateNodeSwitchCase(rp.c, expr);
- switch_prong.expr = &(try transCreateNodeBreak(rp.c, label, null)).base;
+ switch_prong.expr = blk: {
+ var br = try CtrlFlow.init(rp.c, .Break, label);
+ break :blk &(try br.finish(null)).base;
+ };
_ = try appendToken(rp.c, .Comma, ",");
if (switch_scope.case_index >= switch_scope.cases.len)
@@ -2763,7 +2778,7 @@ fn transCase(
const pending_node = try switch_scope.pending_block.complete(rp.c);
switch_scope.pending_block.deinit();
- switch_scope.pending_block = try Scope.Block.init(rp.c, scope, null);
+ switch_scope.pending_block = try Scope.Block.init(rp.c, scope, false);
try switch_scope.pending_block.statements.append(&pending_node.base);
@@ -2777,12 +2792,14 @@ fn transDefault(
) TransError!*ast.Node {
const block_scope = scope.findBlockScope(rp.c) catch unreachable;
const switch_scope = scope.getSwitch();
- const label = "__default";
- switch_scope.has_default = true;
+ switch_scope.default_label = try block_scope.makeMangledName(rp.c, "default");
_ = try appendToken(rp.c, .Semicolon, ";");
const else_prong = try transCreateNodeSwitchCase(rp.c, try transCreateNodeSwitchElse(rp.c));
- else_prong.expr = &(try transCreateNodeBreak(rp.c, label, null)).base;
+ else_prong.expr = blk: {
+ var br = try CtrlFlow.init(rp.c, .Break, switch_scope.default_label.?);
+ break :blk &(try br.finish(null)).base;
+ };
_ = try appendToken(rp.c, .Comma, ",");
if (switch_scope.case_index >= switch_scope.cases.len)
@@ -2790,7 +2807,7 @@ fn transDefault(
switch_scope.cases[switch_scope.case_index] = &else_prong.base;
switch_scope.case_index += 1;
- switch_scope.pending_block.label = try appendIdentifier(rp.c, label);
+ switch_scope.pending_block.label = try appendIdentifier(rp.c, switch_scope.default_label.?);
_ = try appendToken(rp.c, .Colon, ":");
// take all pending statements
@@ -2799,7 +2816,7 @@ fn transDefault(
const pending_node = try switch_scope.pending_block.complete(rp.c);
switch_scope.pending_block.deinit();
- switch_scope.pending_block = try Scope.Block.init(rp.c, scope, null);
+ switch_scope.pending_block = try Scope.Block.init(rp.c, scope, false);
try switch_scope.pending_block.statements.append(&pending_node.base);
return transStmt(rp, scope, ZigClangDefaultStmt_getSubStmt(stmt), .unused, .r_value);
@@ -2894,7 +2911,7 @@ fn transStmtExpr(rp: RestorePoint, scope: *Scope, stmt: *const ZigClangStmtExpr,
return transCompoundStmt(rp, scope, comp);
}
const lparen = try appendToken(rp.c, .LParen, "(");
- var block_scope = try Scope.Block.init(rp.c, scope, "blk");
+ var block_scope = try Scope.Block.init(rp.c, scope, true);
defer block_scope.deinit();
var it = ZigClangCompoundStmt_body_begin(comp);
@@ -3209,7 +3226,7 @@ fn transCreatePreCrement(
// zig: _ref.* += 1;
// zig: break :blk _ref.*
// zig: })
- var block_scope = try Scope.Block.init(rp.c, scope, "blk");
+ var block_scope = try Scope.Block.init(rp.c, scope, true);
defer block_scope.deinit();
const ref = try block_scope.makeMangledName(rp.c, "ref");
@@ -3239,7 +3256,7 @@ fn transCreatePreCrement(
const assign = try transCreateNodeInfixOp(rp, scope, ref_node, op, token, one, .used, false);
try block_scope.statements.append(assign);
- const break_node = try transCreateNodeBreakToken(rp.c, block_scope.label, ref_node);
+ const break_node = try transCreateNodeBreak(rp.c, block_scope.label, ref_node);
try block_scope.statements.append(&break_node.base);
const block_node = try block_scope.complete(rp.c);
// semicolon must immediately follow rbrace because it is the last token in a block
@@ -3283,7 +3300,7 @@ fn transCreatePostCrement(
// zig: _ref.* += 1;
// zig: break :blk _tmp
// zig: })
- var block_scope = try Scope.Block.init(rp.c, scope, "blk");
+ var block_scope = try Scope.Block.init(rp.c, scope, true);
defer block_scope.deinit();
const ref = try block_scope.makeMangledName(rp.c, "ref");
@@ -3458,7 +3475,7 @@ fn transCreateCompoundAssign(
// zig: _ref.* = _ref.* + rhs;
// zig: break :blk _ref.*
// zig: })
- var block_scope = try Scope.Block.init(rp.c, scope, "blk");
+ var block_scope = try Scope.Block.init(rp.c, scope, true);
defer block_scope.deinit();
const ref = try block_scope.makeMangledName(rp.c, "ref");
@@ -3526,7 +3543,7 @@ fn transCreateCompoundAssign(
try block_scope.statements.append(assign);
}
- const break_node = try transCreateNodeBreakToken(rp.c, block_scope.label, ref_node);
+ const break_node = try transCreateNodeBreak(rp.c, block_scope.label, ref_node);
try block_scope.statements.append(&break_node.base);
const block_node = try block_scope.complete(rp.c);
const grouped_expr = try rp.c.arena.create(ast.Node.GroupedExpression);
@@ -3602,8 +3619,16 @@ fn transCPtrCast(
fn transBreak(rp: RestorePoint, scope: *Scope) TransError!*ast.Node {
const break_scope = scope.getBreakableScope();
- const label_text: ?[]const u8 = if (break_scope.id == .Switch) "__switch" else null;
- const br = try transCreateNodeBreak(rp.c, label_text, null);
+ const label_text: ?[]const u8 = if (break_scope.id == .Switch) blk: {
+ const swtch = @fieldParentPtr(Scope.Switch, "base", break_scope);
+ const block_scope = try scope.findBlockScope(rp.c);
+ swtch.switch_label = try block_scope.makeMangledName(rp.c, "switch");
+ break :blk swtch.switch_label;
+ } else
+ null;
+
+ var cf = try CtrlFlow.init(rp.c, .Break, label_text);
+ const br = try cf.finish(null);
_ = try appendToken(rp.c, .Semicolon, ";");
return &br.base;
}
@@ -3634,7 +3659,7 @@ fn transBinaryConditionalOperator(rp: RestorePoint, scope: *Scope, stmt: *const
// })
const lparen = try appendToken(rp.c, .LParen, "(");
- var block_scope = try Scope.Block.init(rp.c, scope, "blk");
+ var block_scope = try Scope.Block.init(rp.c, scope, true);
defer block_scope.deinit();
const mangled_name = try block_scope.makeMangledName(rp.c, "cond_temp");
@@ -4082,8 +4107,7 @@ fn transCreateNodeAssign(
// zig: lhs = _tmp;
// zig: break :blk _tmp
// zig: })
- const label_name = "blk";
- var block_scope = try Scope.Block.init(rp.c, scope, label_name);
+ var block_scope = try Scope.Block.init(rp.c, scope, true);
defer block_scope.deinit();
const tmp = try block_scope.makeMangledName(rp.c, "tmp");
@@ -4118,7 +4142,7 @@ fn transCreateNodeAssign(
try block_scope.statements.append(assign);
const break_node = blk: {
- var tmp_ctrl_flow = try CtrlFlow.init(rp.c, .Break, label_name);
+ var tmp_ctrl_flow = try CtrlFlow.init(rp.c, .Break, tokenSlice(rp.c, block_scope.label.?));
const rhs_expr = try transCreateNodeIdentifier(rp.c, tmp);
break :blk try tmp_ctrl_flow.finish(rhs_expr);
};
@@ -4495,23 +4519,12 @@ fn transCreateNodeElse(c: *Context) !*ast.Node.Else {
return node;
}
-fn transCreateNodeBreakToken(
+fn transCreateNodeBreak(
c: *Context,
label: ?ast.TokenIndex,
rhs: ?*ast.Node,
) !*ast.Node.ControlFlowExpression {
- const other_token = label orelse return transCreateNodeBreak(c, null, rhs);
- const loc = c.token_locs.items[other_token];
- const label_name = c.source_buffer.items[loc.start..loc.end];
- return transCreateNodeBreak(c, label_name, rhs);
-}
-
-fn transCreateNodeBreak(
- c: *Context,
- label: ?[]const u8,
- rhs: ?*ast.Node,
-) !*ast.Node.ControlFlowExpression {
- var ctrl_flow = try CtrlFlow.init(c, .Break, label);
+ var ctrl_flow = try CtrlFlow.init(c, .Break, if (label) |l| tokenSlice(c, l) else null);
return ctrl_flow.finish(rhs);
}
@@ -5362,7 +5375,7 @@ fn transMacroDefine(c: *Context, m: *MacroCtx) ParseError!void {
}
fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void {
- var block_scope = try Scope.Block.init(c, &c.global_scope.base, null);
+ var block_scope = try Scope.Block.init(c, &c.global_scope.base, false);
defer block_scope.deinit();
const scope = &block_scope.base;
@@ -5475,8 +5488,7 @@ fn parseCExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!*ast.Node {
},
.Comma => {
_ = try appendToken(c, .Semicolon, ";");
- const label_name = "blk";
- var block_scope = try Scope.Block.init(c, scope, label_name);
+ var block_scope = try Scope.Block.init(c, scope, true);
defer block_scope.deinit();
var last = node;
@@ -5501,7 +5513,7 @@ fn parseCExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!*ast.Node {
}
}
- const break_node = try transCreateNodeBreak(c, label_name, last);
+ const break_node = try transCreateNodeBreak(c, block_scope.label, last);
try block_scope.statements.append(&break_node.base);
const block_node = try block_scope.complete(c);
return &block_node.base;
diff --git a/test/translate_c.zig b/test/translate_c.zig
index 38faffe747..c632700bc5 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -1730,16 +1730,16 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub export fn switch_fn(arg_i: c_int) c_int {
\\ var i = arg_i;
\\ var res: c_int = 0;
- \\ __switch: {
- \\ __case_2: {
- \\ __default: {
- \\ __case_1: {
- \\ __case_0: {
+ \\ @"switch": {
+ \\ case_2: {
+ \\ default: {
+ \\ case_1: {
+ \\ case: {
\\ switch (i) {
- \\ @as(c_int, 0) => break :__case_0,
- \\ @as(c_int, 1)...@as(c_int, 3) => break :__case_1,
- \\ else => break :__default,
- \\ @as(c_int, 4) => break :__case_2,
+ \\ @as(c_int, 0) => break :case,
+ \\ @as(c_int, 1)...@as(c_int, 3) => break :case_1,
+ \\ else => break :default,
+ \\ @as(c_int, 4) => break :case_2,
\\ }
\\ }
\\ res = 1;
@@ -1747,7 +1747,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ res = 2;
\\ }
\\ res = (@as(c_int, 3) * i);
- \\ break :__switch;
+ \\ break :@"switch";
\\ }
\\ res = 5;
\\ }
@@ -2782,11 +2782,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ var x = arg_x;
\\ return blk: {
\\ const tmp = x;
- \\ (blk: {
+ \\ (blk_1: {
\\ const ref = &p;
- \\ const tmp_1 = ref.*;
+ \\ const tmp_2 = ref.*;
\\ ref.* += 1;
- \\ break :blk tmp_1;
+ \\ break :blk_1 tmp_2;
\\ }).?.* = tmp;
\\ break :blk tmp;
\\ };
From a8e0f667c64bc6413a2c10ac76cd4e3000ceaf1b Mon Sep 17 00:00:00 2001
From: heidezomp
Date: Thu, 13 Aug 2020 16:50:38 +0200
Subject: [PATCH 066/153] std.log: (breaking) remove scope parameter from
logging functions
The logging functions in std.log don't take a scope parameter anymore,
but use the .default scope. To provide your own scope, use the logging
functions in std.log.scoped(.some_other_scope).
As per nmichaels' suggestion: https://github.com/ziglang/zig/pull/6039#issuecomment-673148971
---
lib/std/log.zig | 123 +++++++++++++++---------------------------------
1 file changed, 37 insertions(+), 86 deletions(-)
diff --git a/lib/std/log.zig b/lib/std/log.zig
index 9a0dcecc05..706851d0be 100644
--- a/lib/std/log.zig
+++ b/lib/std/log.zig
@@ -129,92 +129,6 @@ fn log(
}
}
-/// Log an emergency message. This log level is intended to be used
-/// for conditions that cannot be handled and is usually followed by a panic.
-pub fn emerg(
- comptime scope: @Type(.EnumLiteral),
- comptime format: []const u8,
- args: anytype,
-) void {
- @setCold(true);
- log(.emerg, scope, format, args);
-}
-
-/// Log an alert message. This log level is intended to be used for
-/// conditions that should be corrected immediately (e.g. database corruption).
-pub fn alert(
- comptime scope: @Type(.EnumLiteral),
- comptime format: []const u8,
- args: anytype,
-) void {
- @setCold(true);
- log(.alert, scope, format, args);
-}
-
-/// Log a critical message. This log level is intended to be used
-/// when a bug has been detected or something has gone wrong and it will have
-/// an effect on the operation of the program.
-pub fn crit(
- comptime scope: @Type(.EnumLiteral),
- comptime format: []const u8,
- args: anytype,
-) void {
- @setCold(true);
- log(.crit, scope, format, args);
-}
-
-/// Log an error message. This log level is intended to be used when
-/// a bug has been detected or something has gone wrong but it is recoverable.
-pub fn err(
- comptime scope: @Type(.EnumLiteral),
- comptime format: []const u8,
- args: anytype,
-) void {
- @setCold(true);
- log(.err, scope, format, args);
-}
-
-/// Log a warning message. This log level is intended to be used if
-/// it is uncertain whether something has gone wrong or not, but the
-/// circumstances would be worth investigating.
-pub fn warn(
- comptime scope: @Type(.EnumLiteral),
- comptime format: []const u8,
- args: anytype,
-) void {
- log(.warn, scope, format, args);
-}
-
-/// Log a notice message. This log level is intended to be used for
-/// non-error but significant conditions.
-pub fn notice(
- comptime scope: @Type(.EnumLiteral),
- comptime format: []const u8,
- args: anytype,
-) void {
- log(.notice, scope, format, args);
-}
-
-/// Log an info message. This log level is intended to be used for
-/// general messages about the state of the program.
-pub fn info(
- comptime scope: @Type(.EnumLiteral),
- comptime format: []const u8,
- args: anytype,
-) void {
- log(.info, scope, format, args);
-}
-
-/// Log a debug message. This log level is intended to be used for
-/// messages which are only useful for debugging.
-pub fn debug(
- comptime scope: @Type(.EnumLiteral),
- comptime format: []const u8,
- args: anytype,
-) void {
- log(.debug, scope, format, args);
-}
-
/// Returns a scoped logging namespace that logs all messages using the scope
/// provided here.
pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
@@ -301,3 +215,40 @@ pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
/// The default scoped logging namespace.
pub const default = scoped(.default);
+
+/// Log an emergency message using the default scope. This log level is
+/// intended to be used for conditions that cannot be handled and is usually
+/// followed by a panic.
+pub const emerg = default.emerg;
+
+/// Log an alert message using the default scope. This log level is intended to
+/// be used for conditions that should be corrected immediately (e.g. database
+/// corruption).
+pub const alert = default.alert;
+
+/// Log a critical message using the default scope. This log level is intended
+/// to be used when a bug has been detected or something has gone wrong and it
+/// will have an effect on the operation of the program.
+pub const crit = default.crit;
+
+/// Log an error message using the default scope. This log level is intended to
+/// be used when a bug has been detected or something has gone wrong but it is
+/// recoverable.
+pub const err = default.err;
+
+/// Log a warning message using the default scope. This log level is intended
+/// to be used if it is uncertain whether something has gone wrong or not, but
+/// the circumstances would be worth investigating.
+pub const warn = default.warn;
+
+/// Log a notice message using the default scope. This log level is intended to
+/// be used for non-error but significant conditions.
+pub const notice = default.notice;
+
+/// Log an info message using the default scope. This log level is intended to
+/// be used for general messages about the state of the program.
+pub const info = default.info;
+
+/// Log a debug message using the default scope. This log level is intended to
+/// be used for messages which are only useful for debugging.
+pub const debug = default.debug;
From 2439f67061b007197bdd1b0037ffe4ba7b520024 Mon Sep 17 00:00:00 2001
From: heidezomp
Date: Thu, 13 Aug 2020 17:12:16 +0200
Subject: [PATCH 067/153] std.log: update documentation and example for scoped
logging
---
doc/langref.html.in | 2 +-
lib/std/log.zig | 46 +++++++++++++++++++++++----------------------
2 files changed, 25 insertions(+), 23 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index cfde67d622..c9694ceb52 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -325,7 +325,7 @@ pub fn main() !void {
represents writing data to a file. When the disk is full, a write to the file will fail.
However, we typically do not expect writing text to the standard output to fail. To avoid having
to handle the failure case of printing to standard output, you can use alternate functions: the
- std.log function for proper logging or the std.debug.print function.
+ functions in std.log for proper logging or the std.debug.print function.
This documentation will use the latter option to print to standard error (stderr) and silently return
on failure. The next code sample, hello_again.zig demonstrates the use of
std.debug.print.
diff --git a/lib/std/log.zig b/lib/std/log.zig
index 706851d0be..8d5554aa46 100644
--- a/lib/std/log.zig
+++ b/lib/std/log.zig
@@ -6,12 +6,16 @@ const root = @import("root");
//! of programs and libraries using this interface to be formatted and filtered
//! by the implementer of the root.log function.
//!
-//! The scope parameter should be used to give context to the logging. For
-//! example, a library called 'libfoo' might use .libfoo as its scope.
-//! This parameter can either be passed explicitly to the logging functions
-//! provided here, or a scoped logging namespace can be created
-//! using the `log.scoped` function. If logging scopes are not relevant for
-//! your use case, the `log.default` scope namespace can be used.
+//! Each log message has an associated scope enum, which can be used to give
+//! context to the logging. The logging functions in std.log implicitly use a
+//! scope of .default.
+//!
+//! A logging namespace using a custom scope can be created using the
+//! std.log.scoped function, passing the scope as an argument; the logging
+//! functions in the resulting struct use the provided scope parameter.
+//! For example, a library called 'libfoo' might use
+//! `const log = std.log.scoped(.libfoo);` to use .libfoo as the scope of its
+//! log messages.
//!
//! An example root.log might look something like this:
//!
@@ -29,9 +33,9 @@ const root = @import("root");
//! args: anytype,
//! ) void {
//! // Ignore all non-critical logging from sources other than
-//! // .my_project and .nice_library
+//! // .my_project, .nice_library and .default
//! const scope_prefix = "(" ++ switch (scope) {
-//! .my_project, .nice_library => @tagName(scope),
+//! .my_project, .nice_library, .default => @tagName(scope),
//! else => if (@enumToInt(level) <= @enumToInt(std.log.Level.crit))
//! @tagName(scope)
//! else
@@ -48,26 +52,24 @@ const root = @import("root");
//! }
//!
//! pub fn main() void {
-//! // Using explicit scopes:
-//! // Won't be printed as log_level is .warn
-//! std.log.info(.my_project, "Starting up.", .{});
-//! std.log.err(.nice_library, "Something went very wrong, sorry.", .{});
-//! // Won't be printed as it gets filtered out by our log function
-//! std.log.err(.lib_that_logs_too_much, "Added 1 + 1", .{});
+//! // Using the default scope:
+//! std.log.info("Just a simple informational log message", .{}); // Won't be printed as log_level is .warn
+//! std.log.warn("Flux capacitor is starting to overheat", .{});
//!
-//! // Using a scoped logging namespace:
-//! const scoped_log = std.log.scoped(.my_project);
-//! scoped_log.alert("The scope for this message is implicitly .my_project", .{});
+//! // Using scoped logging:
+//! const my_project_log = std.log.scoped(.my_project);
+//! const nice_library_log = std.log.scoped(.nice_library);
+//! const verbose_lib_log = std.log.scoped(.verbose_lib);
//!
-//! // Using the default namespace:
-//! // Won't be printed as log_level is .warn
-//! std.log.default.info("I don't care about my namespace", .{});
+//! my_project_log.info("Starting up", .{}); // Won't be printed as log_level is .warn
+//! nice_library_log.err("Something went very wrong, sorry", .{});
+//! verbose_lib_log.err("Added 1 + 1: {}", .{1 + 1}); // Won't be printed as it gets filtered out by our log function
//! }
//! ```
//! Which produces the following output:
//! ```
-//! [err] (nice_library): Something went very wrong, sorry.
-//! [alert] (my_project): The scope for this message is implicitly .my_project
+//! [warn] (default): Flux capacitor is starting to overheat
+//! [err] (nice_library): Something went very wrong, sorry
//! ```
pub const Level = enum {
From 75eaf15740bc42167592f3bdd7205236828191c7 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Wed, 12 Aug 2020 16:00:29 +0300
Subject: [PATCH 068/153] stage2: add optional types
---
src-self-hosted/type.zig | 181 +++++++++++++++++++++++++++++++++------
1 file changed, 155 insertions(+), 26 deletions(-)
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index 8d643c2005..fbc34c27bc 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -70,6 +70,11 @@ pub const Type = extern union {
.single_mut_pointer => return .Pointer,
.single_const_pointer_to_comptime_int => return .Pointer,
.const_slice_u8 => return .Pointer,
+
+ .optional,
+ .optional_single_const_pointer,
+ .optional_single_mut_pointer,
+ => return .Optional,
}
}
@@ -179,9 +184,13 @@ pub const Type = extern union {
}
return true;
},
+ .Optional => {
+ if (a.tag() != b.tag())
+ return false;
+ return a.elemType().eql(b.elemType());
+ },
.Float,
.Struct,
- .Optional,
.ErrorUnion,
.ErrorSet,
.Enum,
@@ -241,9 +250,11 @@ pub const Type = extern union {
std.hash.autoHash(&hasher, self.fnParamType(i).hash());
}
},
+ .Optional => {
+ std.hash.autoHash(&hasher, self.elemType().hash());
+ },
.Float,
.Struct,
- .Optional,
.ErrorUnion,
.ErrorSet,
.Enum,
@@ -317,24 +328,8 @@ pub const Type = extern union {
};
return Type{ .ptr_otherwise = &new_payload.base };
},
- .single_const_pointer => {
- const payload = @fieldParentPtr(Payload.SingleConstPointer, "base", self.ptr_otherwise);
- const new_payload = try allocator.create(Payload.SingleConstPointer);
- new_payload.* = .{
- .base = payload.base,
- .pointee_type = try payload.pointee_type.copy(allocator),
- };
- return Type{ .ptr_otherwise = &new_payload.base };
- },
- .single_mut_pointer => {
- const payload = @fieldParentPtr(Payload.SingleMutPointer, "base", self.ptr_otherwise);
- const new_payload = try allocator.create(Payload.SingleMutPointer);
- new_payload.* = .{
- .base = payload.base,
- .pointee_type = try payload.pointee_type.copy(allocator),
- };
- return Type{ .ptr_otherwise = &new_payload.base };
- },
+ .single_const_pointer => return self.copyPayloadSingleField(allocator, Payload.SingleConstPointer, "pointee_type"),
+ .single_mut_pointer => return self.copyPayloadSingleField(allocator, Payload.SingleMutPointer, "pointee_type"),
.int_signed => return self.copyPayloadShallow(allocator, Payload.IntSigned),
.int_unsigned => return self.copyPayloadShallow(allocator, Payload.IntUnsigned),
.function => {
@@ -352,6 +347,9 @@ pub const Type = extern union {
};
return Type{ .ptr_otherwise = &new_payload.base };
},
+ .optional => return self.copyPayloadSingleField(allocator, Payload.Optional, "child_type"),
+ .optional_single_mut_pointer => return self.copyPayloadSingleField(allocator, Payload.OptionalSingleMutPointer, "pointee_type"),
+ .optional_single_const_pointer => return self.copyPayloadSingleField(allocator, Payload.OptionalSingleConstPointer, "pointee_type"),
}
}
@@ -362,6 +360,14 @@ pub const Type = extern union {
return Type{ .ptr_otherwise = &new_payload.base };
}
+ fn copyPayloadSingleField(self: Type, allocator: *Allocator, comptime T: type, comptime field_name: []const u8) error{OutOfMemory}!Type {
+ const payload = @fieldParentPtr(T, "base", self.ptr_otherwise);
+ const new_payload = try allocator.create(T);
+ new_payload.base = payload.base;
+ @field(new_payload, field_name) = try @field(payload, field_name).copy(allocator);
+ return Type{ .ptr_otherwise = &new_payload.base };
+ }
+
pub fn format(
self: Type,
comptime fmt: []const u8,
@@ -456,6 +462,24 @@ pub const Type = extern union {
const payload = @fieldParentPtr(Payload.IntUnsigned, "base", ty.ptr_otherwise);
return out_stream.print("u{}", .{payload.bits});
},
+ .optional => {
+ const payload = @fieldParentPtr(Payload.Optional, "base", ty.ptr_otherwise);
+ try out_stream.writeByte('?');
+ ty = payload.child_type;
+ continue;
+ },
+ .optional_single_const_pointer => {
+ const payload = @fieldParentPtr(Payload.OptionalSingleConstPointer, "base", ty.ptr_otherwise);
+ try out_stream.writeAll("?*const ");
+ ty = payload.pointee_type;
+ continue;
+ },
+ .optional_single_mut_pointer => {
+ const payload = @fieldParentPtr(Payload.OptionalSingleMutPointer, "base", ty.ptr_otherwise);
+ try out_stream.writeAll("?*");
+ ty = payload.pointee_type;
+ continue;
+ },
}
unreachable;
}
@@ -545,11 +569,15 @@ pub const Type = extern union {
.single_const_pointer_to_comptime_int,
.const_slice_u8,
.array_u8_sentinel_0,
- .array, // TODO check for zero bits
- .single_const_pointer,
- .single_mut_pointer,
- .int_signed, // TODO check for zero bits
- .int_unsigned, // TODO check for zero bits
+ // TODO lazy types
+ .array => self.elemType().hasCodeGenBits() and self.arrayLen() != 0,
+ .single_const_pointer => self.elemType().hasCodeGenBits(),
+ .single_mut_pointer => self.elemType().hasCodeGenBits(),
+ .int_signed => self.cast(Payload.IntSigned).?.bits == 0,
+ .int_unsigned => self.cast(Payload.IntUnsigned).?.bits == 0,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> true,
.c_void,
@@ -597,6 +625,8 @@ pub const Type = extern union {
.const_slice_u8,
.single_const_pointer,
.single_mut_pointer,
+ .optional_single_const_pointer,
+ .optional_single_mut_pointer,
=> return @divExact(target.cpu.arch.ptrBitWidth(), 8),
.c_short => return @divExact(CType.short.sizeInBits(target), 8),
@@ -629,6 +659,12 @@ pub const Type = extern union {
return std.math.ceilPowerOfTwoPromote(u16, (bits + 7) / 8);
},
+ .optional => {
+ const child_type = self.cast(Payload.Optional).?.child_type;
+ if (!child_type.hasCodeGenBits()) return 1;
+ return child_type.abiAlignment(target);
+ },
+
.c_void,
.void,
.type,
@@ -679,6 +715,8 @@ pub const Type = extern union {
.const_slice_u8,
.single_const_pointer,
.single_mut_pointer,
+ .optional_single_const_pointer,
+ .optional_single_mut_pointer,
=> return @divExact(target.cpu.arch.ptrBitWidth(), 8),
.c_short => return @divExact(CType.short.sizeInBits(target), 8),
@@ -708,6 +746,16 @@ pub const Type = extern union {
return std.math.ceilPowerOfTwoPromote(u16, (bits + 7) / 8);
},
+
+ .optional => {
+ const child_type = self.cast(Payload.Optional).?.child_type;
+ if (!child_type.hasCodeGenBits()) return 1;
+ // Optional types are represented as a struct with the child type as the first
+ // field and a boolean as the second. Since the child type's abi alignment is
+ // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
+ // to the child type's ABI alignment.
+ return child_type.abiAlignment(target) + child_type.abiSize(target);
+ },
};
}
@@ -756,6 +804,9 @@ pub const Type = extern union {
.function,
.int_unsigned,
.int_signed,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> false,
.single_const_pointer,
@@ -812,6 +863,9 @@ pub const Type = extern union {
.function,
.int_unsigned,
.int_signed,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> false,
.const_slice_u8 => true,
@@ -863,6 +917,9 @@ pub const Type = extern union {
.int_unsigned,
.int_signed,
.single_mut_pointer,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> false,
.single_const_pointer,
@@ -920,11 +977,14 @@ pub const Type = extern union {
.single_const_pointer,
.single_const_pointer_to_comptime_int,
.const_slice_u8,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> false,
};
}
- /// Asserts the type is a pointer or array type.
+ /// Asserts the type is a pointer, optional or array type.
pub fn elemType(self: Type) Type {
return switch (self.tag()) {
.u8,
@@ -974,6 +1034,9 @@ pub const Type = extern union {
.single_mut_pointer => self.cast(Payload.SingleMutPointer).?.pointee_type,
.array_u8_sentinel_0, .const_slice_u8 => Type.initTag(.u8),
.single_const_pointer_to_comptime_int => Type.initTag(.comptime_int),
+ .optional => self.cast(Payload.Optional).?.child_type,
+ .optional_single_mut_pointer => self.cast(Payload.OptionalSingleMutPointer).?.pointee_type,
+ .optional_single_const_pointer => self.cast(Payload.OptionalSingleConstPointer).?.pointee_type,
};
}
@@ -1024,6 +1087,9 @@ pub const Type = extern union {
.const_slice_u8,
.int_unsigned,
.int_signed,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> unreachable,
.array => self.cast(Payload.Array).?.len,
@@ -1078,6 +1144,9 @@ pub const Type = extern union {
.const_slice_u8,
.int_unsigned,
.int_signed,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> unreachable,
.array => return null,
@@ -1129,6 +1198,9 @@ pub const Type = extern union {
.u16,
.u32,
.u64,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> false,
.int_signed,
@@ -1184,6 +1256,9 @@ pub const Type = extern union {
.i16,
.i32,
.i64,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> false,
.int_unsigned,
@@ -1229,6 +1304,9 @@ pub const Type = extern union {
.single_const_pointer_to_comptime_int,
.array_u8_sentinel_0,
.const_slice_u8,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> unreachable,
.int_unsigned => .{ .signed = false, .bits = self.cast(Payload.IntUnsigned).?.bits },
@@ -1292,6 +1370,9 @@ pub const Type = extern union {
.i32,
.u64,
.i64,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> false,
.usize,
@@ -1384,6 +1465,9 @@ pub const Type = extern union {
.c_ulonglong,
.int_unsigned,
.int_signed,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> unreachable,
};
}
@@ -1442,6 +1526,9 @@ pub const Type = extern union {
.c_ulonglong,
.int_unsigned,
.int_signed,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> unreachable,
}
}
@@ -1499,6 +1586,9 @@ pub const Type = extern union {
.c_ulonglong,
.int_unsigned,
.int_signed,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> unreachable,
}
}
@@ -1556,6 +1646,9 @@ pub const Type = extern union {
.c_ulonglong,
.int_unsigned,
.int_signed,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> unreachable,
};
}
@@ -1610,6 +1703,9 @@ pub const Type = extern union {
.c_ulonglong,
.int_unsigned,
.int_signed,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> unreachable,
};
}
@@ -1664,6 +1760,9 @@ pub const Type = extern union {
.c_ulonglong,
.int_unsigned,
.int_signed,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> unreachable,
};
}
@@ -1718,6 +1817,9 @@ pub const Type = extern union {
.single_const_pointer_to_comptime_int,
.array_u8_sentinel_0,
.const_slice_u8,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> false,
};
}
@@ -1762,6 +1864,9 @@ pub const Type = extern union {
.array_u8_sentinel_0,
.const_slice_u8,
.c_void,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> return null,
.void => return Value.initTag(.void_value),
@@ -1851,6 +1956,9 @@ pub const Type = extern union {
.array,
.single_const_pointer,
.single_mut_pointer,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
=> return false,
};
}
@@ -1911,6 +2019,9 @@ pub const Type = extern union {
int_signed,
int_unsigned,
function,
+ optional,
+ optional_single_mut_pointer,
+ optional_single_const_pointer,
pub const last_no_payload_tag = Tag.const_slice_u8;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
@@ -1963,6 +2074,24 @@ pub const Type = extern union {
return_type: Type,
cc: std.builtin.CallingConvention,
};
+
+ pub const Optional = struct {
+ base: Payload = Payload{ .tag = .optional },
+
+ child_type: Type,
+ };
+
+ pub const OptionalSingleConstPointer = struct {
+ base: Payload = Payload{ .tag = .optional_single_const_pointer },
+
+ pointee_type: Type,
+ };
+
+ pub const OptionalSingleMutPointer = struct {
+ base: Payload = Payload{ .tag = .optional_single_mut_pointer },
+
+ pointee_type: Type,
+ };
};
};
From 5c1fe5861389462f309e3f0b69096a85f330dc20 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Wed, 12 Aug 2020 21:06:29 +0300
Subject: [PATCH 069/153] stage2: gen optional types
---
src-self-hosted/Module.zig | 18 ++++++++++++++++++
src-self-hosted/astgen.zig | 12 ++++++++++++
src-self-hosted/type.zig | 8 ++++----
src-self-hosted/zir.zig | 18 ++++++++++++++++++
src-self-hosted/zir_sema.zig | 29 +++++++++++++++++++++++++++++
5 files changed, 81 insertions(+), 4 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 6abd4f51e1..110fe05b8e 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -2476,6 +2476,24 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst
}
assert(inst.ty.zigTypeTag() != .Undefined);
+ // null to ?T
+ if (dest_type.zigTypeTag() == .Optional and inst.ty.zigTypeTag() == .Null) {
+ return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = inst.ty.onePossibleValue().? });
+ }
+
+ // T to ?T
+ if (dest_type.zigTypeTag() == .Optional) {
+ const child_type = dest_type.elemType();
+ if (inst.value()) |val| {
+ if (child_type.eql(inst.ty)) {
+ return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
+ }
+ return self.fail(scope, inst.src, "TODO optional wrap {} to {}", .{ val, inst.ty });
+ } else if (child_type.eql(inst.ty)) {
+ return self.fail(scope, inst.src, "TODO optional wrap {}", .{inst.ty});
+ }
+ }
+
// *[N]T to []T
if (inst.ty.isSinglePointer() and dest_type.isSlice() and
(!inst.ty.isConstPtr() or dest_type.isConstPtr()))
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 1827d53043..57f399d696 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -105,6 +105,7 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.UndefinedLiteral => return rlWrap(mod, scope, rl, try undefLiteral(mod, scope, node.castTag(.UndefinedLiteral).?)),
.BoolLiteral => return rlWrap(mod, scope, rl, try boolLiteral(mod, scope, node.castTag(.BoolLiteral).?)),
.NullLiteral => return rlWrap(mod, scope, rl, try nullLiteral(mod, scope, node.castTag(.NullLiteral).?)),
+ .OptionalType => return rlWrap(mod, scope, rl, try optionalType(mod, scope, node.castTag(.OptionalType).?)),
else => return mod.failNode(scope, node, "TODO implement astgen.Expr for {}", .{@tagName(node.tag)}),
}
}
@@ -293,6 +294,17 @@ fn boolNot(mod: *Module, scope: *Scope, node: *ast.Node.SimplePrefixOp) InnerErr
return addZIRUnOp(mod, scope, src, .boolnot, operand);
}
+fn optionalType(mod: *Module, scope: *Scope, node: *ast.Node.SimplePrefixOp) InnerError!*zir.Inst {
+ const tree = scope.tree();
+ const src = tree.token_locs[node.op_token].start;
+ const meta_type = try addZIRInstConst(mod, scope, src, .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.type_type),
+ });
+ const operand = try expr(mod, scope, .{ .ty = meta_type }, node.rhs);
+ return addZIRUnOp(mod, scope, src, .optional_type, operand);
+}
+
/// Identifier token -> String (allocated in scope.arena())
pub fn identifierTokenString(mod: *Module, scope: *Scope, token: ast.TokenIndex) InnerError![]const u8 {
const tree = scope.tree();
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index fbc34c27bc..eadb91ffea 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -569,16 +569,16 @@ pub const Type = extern union {
.single_const_pointer_to_comptime_int,
.const_slice_u8,
.array_u8_sentinel_0,
+ .optional,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
+ => true,
// TODO lazy types
.array => self.elemType().hasCodeGenBits() and self.arrayLen() != 0,
.single_const_pointer => self.elemType().hasCodeGenBits(),
.single_mut_pointer => self.elemType().hasCodeGenBits(),
.int_signed => self.cast(Payload.IntSigned).?.bits == 0,
.int_unsigned => self.cast(Payload.IntUnsigned).?.bits == 0,
- .optional,
- .optional_single_mut_pointer,
- .optional_single_const_pointer,
- => true,
.c_void,
.void,
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index ba0c05d587..c98fcf4a74 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -212,6 +212,8 @@ pub const Inst = struct {
@"unreachable",
/// Bitwise XOR. `^`
xor,
+ /// Create an optional type '?T'
+ optional_type,
pub fn Type(tag: Tag) type {
return switch (tag) {
@@ -240,6 +242,7 @@ pub const Inst = struct {
.typeof,
.single_const_ptr_type,
.single_mut_ptr_type,
+ .optional_type,
=> UnOp,
.add,
@@ -372,6 +375,7 @@ pub const Inst = struct {
.subwrap,
.typeof,
.xor,
+ .optional_type,
=> false,
.@"break",
@@ -2242,6 +2246,20 @@ const EmitZIR = struct {
std.debug.panic("TODO implement emitType for {}", .{ty});
}
},
+ .Optional => {
+ const inst = try self.arena.allocator.create(Inst.UnOp);
+ inst.* = .{
+ .base = .{
+ .src = src,
+ .tag = .optional_type,
+ },
+ .positionals = .{
+ .operand = (try self.emitType(src, ty.elemType())).inst,
+ },
+ .kw_args = .{},
+ };
+ return self.emitUnnamedDecl(&inst.base);
+ },
else => std.debug.panic("TODO implement emitType for {}", .{ty}),
},
}
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index e36487e8d5..e241caefc9 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -106,6 +106,7 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
.isnonnull => return analyzeInstIsNonNull(mod, scope, old_inst.castTag(.isnonnull).?, false),
.boolnot => return analyzeInstBoolNot(mod, scope, old_inst.castTag(.boolnot).?),
.typeof => return analyzeInstTypeOf(mod, scope, old_inst.castTag(.typeof).?),
+ .optional_type => return analyzeInstOptionalType(mod, scope, old_inst.castTag(.optional_type).?),
}
}
@@ -620,6 +621,34 @@ fn analyzeInstIntType(mod: *Module, scope: *Scope, inttype: *zir.Inst.IntType) I
return mod.fail(scope, inttype.base.src, "TODO implement inttype", .{});
}
+fn analyzeInstOptionalType(mod: *Module, scope: *Scope, optional: *zir.Inst.UnOp) InnerError!*Inst {
+ const child_type = try resolveType(mod, scope, optional.positionals.operand);
+
+ return mod.constType(scope, optional.base.src, Type.initPayload(switch (child_type.tag()) {
+ .single_const_pointer => blk: {
+ const payload = try scope.arena().create(Type.Payload.OptionalSingleConstPointer);
+ payload.* = .{
+ .pointee_type = child_type.elemType(),
+ };
+ break :blk &payload.base;
+ },
+ .single_mut_pointer => blk: {
+ const payload = try scope.arena().create(Type.Payload.OptionalSingleMutPointer);
+ payload.* = .{
+ .pointee_type = child_type.elemType(),
+ };
+ break :blk &payload.base;
+ },
+ else => blk: {
+ const payload = try scope.arena().create(Type.Payload.Optional);
+ payload.* = .{
+ .child_type = child_type,
+ };
+ break :blk &payload.base;
+ },
+ }));
+}
+
fn analyzeInstFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) InnerError!*Inst {
const return_type = try resolveType(mod, scope, fntype.positionals.return_type);
From 4a40282391f0b92a83a6a8c269c27a32be92884a Mon Sep 17 00:00:00 2001
From: Vexu
Date: Wed, 12 Aug 2020 22:30:14 +0300
Subject: [PATCH 070/153] stage2: implement unwrap optional
---
src-self-hosted/Module.zig | 26 +++++++++++++++++++--
src-self-hosted/astgen.zig | 12 ++++++++++
src-self-hosted/codegen.zig | 10 ++++++++
src-self-hosted/ir.zig | 22 ++++++++++++++++++
src-self-hosted/zir.zig | 35 ++++++++++++++++++++++++++++
src-self-hosted/zir_sema.zig | 42 +++++++++++++++++++++++++++++++++-
test/stage2/compare_output.zig | 5 ++++
7 files changed, 149 insertions(+), 3 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 110fe05b8e..bbbcde9a71 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -2016,6 +2016,28 @@ pub fn addCall(
return &inst.base;
}
+pub fn addUnwrapOptional(
+ self: *Module,
+ block: *Scope.Block,
+ src: usize,
+ ty: Type,
+ operand: *Inst,
+ safety_check: bool,
+) !*Inst {
+ const inst = try block.arena.create(Inst.UnwrapOptional);
+ inst.* = .{
+ .base = .{
+ .tag = .unwrap_optional,
+ .ty = ty,
+ .src = src,
+ },
+ .operand = operand,
+ .safety_check = safety_check,
+ };
+ try block.instructions.append(self.gpa, &inst.base);
+ return &inst.base;
+}
+
pub fn constInst(self: *Module, scope: *Scope, src: usize, typed_value: TypedValue) !*Inst {
const const_inst = try scope.arena().create(Inst.Constant);
const_inst.* = .{
@@ -2488,9 +2510,9 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst
if (child_type.eql(inst.ty)) {
return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
}
- return self.fail(scope, inst.src, "TODO optional wrap {} to {}", .{ val, inst.ty });
+ return self.fail(scope, inst.src, "TODO optional wrap {} to {}", .{ val, dest_type });
} else if (child_type.eql(inst.ty)) {
- return self.fail(scope, inst.src, "TODO optional wrap {}", .{inst.ty});
+ return self.fail(scope, inst.src, "TODO optional wrap {}", .{dest_type});
}
}
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 57f399d696..ed3f8ab1b6 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -106,6 +106,7 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.BoolLiteral => return rlWrap(mod, scope, rl, try boolLiteral(mod, scope, node.castTag(.BoolLiteral).?)),
.NullLiteral => return rlWrap(mod, scope, rl, try nullLiteral(mod, scope, node.castTag(.NullLiteral).?)),
.OptionalType => return rlWrap(mod, scope, rl, try optionalType(mod, scope, node.castTag(.OptionalType).?)),
+ .UnwrapOptional => return unwrapOptional(mod, scope, rl, node.castTag(.UnwrapOptional).?),
else => return mod.failNode(scope, node, "TODO implement astgen.Expr for {}", .{@tagName(node.tag)}),
}
}
@@ -305,6 +306,17 @@ fn optionalType(mod: *Module, scope: *Scope, node: *ast.Node.SimplePrefixOp) Inn
return addZIRUnOp(mod, scope, src, .optional_type, operand);
}
+fn unwrapOptional(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.SimpleSuffixOp) InnerError!*zir.Inst {
+ const tree = scope.tree();
+ const src = tree.token_locs[node.rtoken].start;
+
+ const operand = try expr(mod, scope, .lvalue, node.lhs);
+ const unwrapped_ptr = try addZIRInst(mod, scope, src, zir.Inst.UnwrapOptional, .{ .operand = operand }, .{});
+ if (rl == .lvalue) return unwrapped_ptr;
+
+ return rlWrap(mod, scope, rl, try addZIRUnOp(mod, scope, src, .deref, unwrapped_ptr));
+}
+
/// Identifier token -> String (allocated in scope.arena())
pub fn identifierTokenString(mod: *Module, scope: *Scope, token: ast.TokenIndex) InnerError![]const u8 {
const tree = scope.tree();
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index 6e8ab34478..887126ba2b 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -668,6 +668,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.store => return self.genStore(inst.castTag(.store).?),
.sub => return self.genSub(inst.castTag(.sub).?),
.unreach => return MCValue{ .unreach = {} },
+ .unwrap_optional => return self.genUnwrapOptional(inst.castTag(.unwrap_optional).?),
}
}
@@ -817,6 +818,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
+ fn genUnwrapOptional(self: *Self, inst: *ir.Inst.UnwrapOptional) !MCValue {
+ // No side effects, so if it's unreferenced, do nothing.
+ if (inst.base.isUnused())
+ return MCValue.dead;
+ switch (arch) {
+ else => return self.fail(inst.base.src, "TODO implement unwrap optional for {}", .{self.target.cpu.arch}),
+ }
+ }
+
fn genLoad(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
const elem_ty = inst.base.ty;
if (!elem_ty.hasCodeGenBits())
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index f4262592de..93952a4214 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -82,6 +82,7 @@ pub const Inst = struct {
not,
floatcast,
intcast,
+ unwrap_optional,
pub fn Type(tag: Tag) type {
return switch (tag) {
@@ -124,6 +125,7 @@ pub const Inst = struct {
.condbr => CondBr,
.constant => Constant,
.loop => Loop,
+ .unwrap_optional => UnwrapOptional,
};
}
@@ -420,6 +422,26 @@ pub const Inst = struct {
}
};
+ pub const UnwrapOptional = struct {
+ pub const base_tag = Tag.unwrap_optional;
+ base: Inst,
+
+ operand: *Inst,
+ safety_check: bool,
+
+ pub fn operandCount(self: *const UnwrapOptional) usize {
+ return 1;
+ }
+ pub fn getOperand(self: *const UnwrapOptional, index: usize) ?*Inst {
+ var i = index;
+
+ if (i < 1)
+ return self.operand;
+ i -= 1;
+
+ return null;
+ }
+ };
};
pub const Body = struct {
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index c98fcf4a74..84e9731126 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -214,6 +214,8 @@ pub const Inst = struct {
xor,
/// Create an optional type '?T'
optional_type,
+ /// Unwraps an optional value 'lhs.?'
+ unwrap_optional,
pub fn Type(tag: Tag) type {
return switch (tag) {
@@ -301,6 +303,7 @@ pub const Inst = struct {
.fntype => FnType,
.elemptr => ElemPtr,
.condbr => CondBr,
+ .unwrap_optional => UnwrapOptional,
};
}
@@ -376,6 +379,7 @@ pub const Inst = struct {
.typeof,
.xor,
.optional_type,
+ .unwrap_optional,
=> false,
.@"break",
@@ -816,6 +820,18 @@ pub const Inst = struct {
},
kw_args: struct {},
};
+
+ pub const UnwrapOptional = struct {
+ pub const base_tag = Tag.unwrap_optional;
+ base: Inst,
+
+ positionals: struct {
+ operand: *Inst,
+ },
+ kw_args: struct {
+ safety_check: bool = true,
+ },
+ };
};
pub const ErrorMsg = struct {
@@ -2141,6 +2157,25 @@ const EmitZIR = struct {
};
break :blk &new_inst.base;
},
+
+ .unwrap_optional => blk: {
+ const old_inst = inst.castTag(.unwrap_optional).?;
+
+ const new_inst = try self.arena.allocator.create(Inst.UnwrapOptional);
+ new_inst.* = .{
+ .base = .{
+ .src = inst.src,
+ .tag = Inst.UnwrapOptional.base_tag,
+ },
+ .positionals = .{
+ .operand = try self.resolveInst(new_body, old_inst.operand),
+ },
+ .kw_args = .{
+ .safety_check = old_inst.safety_check,
+ },
+ };
+ break :blk &new_inst.base;
+ },
};
try instructions.append(new_inst);
try inst_table.put(inst, new_inst);
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index e241caefc9..39fbf9221a 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -107,6 +107,7 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
.boolnot => return analyzeInstBoolNot(mod, scope, old_inst.castTag(.boolnot).?),
.typeof => return analyzeInstTypeOf(mod, scope, old_inst.castTag(.typeof).?),
.optional_type => return analyzeInstOptionalType(mod, scope, old_inst.castTag(.optional_type).?),
+ .unwrap_optional => return analyzeInstUnwrapOptional(mod, scope, old_inst.castTag(.unwrap_optional).?),
}
}
@@ -306,8 +307,19 @@ fn analyzeInstRetPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerErr
fn analyzeInstRef(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst {
const operand = try resolveInst(mod, scope, inst.positionals.operand);
- const b = try mod.requireRuntimeBlock(scope, inst.base.src);
const ptr_type = try mod.singleConstPtrType(scope, inst.base.src, operand.ty);
+
+ if (operand.value()) |val| {
+ const ref_payload = try scope.arena().create(Value.Payload.RefVal);
+ ref_payload.* = .{ .val = val };
+
+ return mod.constInst(scope, inst.base.src, .{
+ .ty = ptr_type,
+ .val = Value.initPayload(&ref_payload.base),
+ });
+ }
+
+ const b = try mod.requireRuntimeBlock(scope, inst.base.src);
return mod.addUnOp(b, inst.base.src, ptr_type, .ref, operand);
}
@@ -649,6 +661,34 @@ fn analyzeInstOptionalType(mod: *Module, scope: *Scope, optional: *zir.Inst.UnOp
}));
}
+fn analyzeInstUnwrapOptional(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnwrapOptional) InnerError!*Inst {
+ const operand = try resolveInst(mod, scope, unwrap.positionals.operand);
+ assert(operand.ty.zigTypeTag() == .Pointer);
+
+ if (operand.ty.elemType().zigTypeTag() != .Optional) {
+ return mod.fail(scope, unwrap.base.src, "expected optional type, found {}", .{operand.ty.elemType()});
+ }
+
+ const child_type = operand.ty.elemType().elemType();
+ const child_pointer = if (operand.ty.isConstPtr())
+ try mod.singleConstPtrType(scope, unwrap.base.src, child_type)
+ else
+ try mod.singleMutPtrType(scope, unwrap.base.src, child_type);
+
+ if (operand.value()) |val| {
+ if (val.tag() == .null_value) {
+ return mod.fail(scope, unwrap.base.src, "unable to unwrap null", .{});
+ }
+ return mod.constInst(scope, unwrap.base.src, .{
+ .ty = child_pointer,
+ .val = val,
+ });
+ }
+
+ const b = try mod.requireRuntimeBlock(scope, unwrap.base.src);
+ return mod.addUnwrapOptional(b, unwrap.base.src, child_pointer, operand, unwrap.kw_args.safety_check);
+}
+
fn analyzeInstFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) InnerError!*Inst {
const return_type = try resolveType(mod, scope, fntype.positionals.return_type);
diff --git a/test/stage2/compare_output.zig b/test/stage2/compare_output.zig
index bb3e542f13..477b5d92b1 100644
--- a/test/stage2/compare_output.zig
+++ b/test/stage2/compare_output.zig
@@ -31,6 +31,11 @@ pub fn addCases(ctx: *TestContext) !void {
\\export fn _start() noreturn {
\\ print();
\\
+ \\ const a: u32 = 2;
+ \\ const b: ?u32 = a;
+ \\ const c = b.?;
+ \\ if (c != 2) unreachable;
+ \\
\\ exit();
\\}
\\
From 6b2ce9d1e99902d429d9946bcc57e8cb02f4d224 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Thu, 13 Aug 2020 14:18:44 +0300
Subject: [PATCH 071/153] stage2: split unwrap_optional to safe and unsafe
verions
---
src-self-hosted/Module.zig | 24 +------------------
src-self-hosted/astgen.zig | 2 +-
src-self-hosted/codegen.zig | 5 ++--
src-self-hosted/ir.zig | 27 ++++-----------------
src-self-hosted/type.zig | 10 ++++++--
src-self-hosted/zir.zig | 43 +++++++---------------------------
src-self-hosted/zir_sema.zig | 10 +++++---
test/stage2/compare_output.zig | 29 +++++++++++++++++++----
8 files changed, 57 insertions(+), 93 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index bbbcde9a71..66da42cadf 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -2016,28 +2016,6 @@ pub fn addCall(
return &inst.base;
}
-pub fn addUnwrapOptional(
- self: *Module,
- block: *Scope.Block,
- src: usize,
- ty: Type,
- operand: *Inst,
- safety_check: bool,
-) !*Inst {
- const inst = try block.arena.create(Inst.UnwrapOptional);
- inst.* = .{
- .base = .{
- .tag = .unwrap_optional,
- .ty = ty,
- .src = src,
- },
- .operand = operand,
- .safety_check = safety_check,
- };
- try block.instructions.append(self.gpa, &inst.base);
- return &inst.base;
-}
-
pub fn constInst(self: *Module, scope: *Scope, src: usize, typed_value: TypedValue) !*Inst {
const const_inst = try scope.arena().create(Inst.Constant);
const_inst.* = .{
@@ -2500,7 +2478,7 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst
// null to ?T
if (dest_type.zigTypeTag() == .Optional and inst.ty.zigTypeTag() == .Null) {
- return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = inst.ty.onePossibleValue().? });
+ return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = Value.initTag(.null_value) });
}
// T to ?T
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index ed3f8ab1b6..95c4e63c92 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -311,7 +311,7 @@ fn unwrapOptional(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Si
const src = tree.token_locs[node.rtoken].start;
const operand = try expr(mod, scope, .lvalue, node.lhs);
- const unwrapped_ptr = try addZIRInst(mod, scope, src, zir.Inst.UnwrapOptional, .{ .operand = operand }, .{});
+ const unwrapped_ptr = try addZIRUnOp(mod, scope, src, .unwrap_optional_safe, operand);
if (rl == .lvalue) return unwrapped_ptr;
return rlWrap(mod, scope, rl, try addZIRUnOp(mod, scope, src, .deref, unwrapped_ptr));
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index 887126ba2b..37e775b4b3 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -668,7 +668,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.store => return self.genStore(inst.castTag(.store).?),
.sub => return self.genSub(inst.castTag(.sub).?),
.unreach => return MCValue{ .unreach = {} },
- .unwrap_optional => return self.genUnwrapOptional(inst.castTag(.unwrap_optional).?),
+ .unwrap_optional_safe => return self.genUnwrapOptional(inst.castTag(.unwrap_optional_safe).?, true),
+ .unwrap_optional_unsafe => return self.genUnwrapOptional(inst.castTag(.unwrap_optional_unsafe).?, false),
}
}
@@ -818,7 +819,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
- fn genUnwrapOptional(self: *Self, inst: *ir.Inst.UnwrapOptional) !MCValue {
+ fn genUnwrapOptional(self: *Self, inst: *ir.Inst.UnOp, safety_check: bool) !MCValue {
// No side effects, so if it's unreferenced, do nothing.
if (inst.base.isUnused())
return MCValue.dead;
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 93952a4214..307dcfe62e 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -82,7 +82,8 @@ pub const Inst = struct {
not,
floatcast,
intcast,
- unwrap_optional,
+ unwrap_optional_safe,
+ unwrap_optional_unsafe,
pub fn Type(tag: Tag) type {
return switch (tag) {
@@ -103,6 +104,8 @@ pub const Inst = struct {
.floatcast,
.intcast,
.load,
+ .unwrap_optional_safe,
+ .unwrap_optional_unsafe,
=> UnOp,
.add,
@@ -125,7 +128,6 @@ pub const Inst = struct {
.condbr => CondBr,
.constant => Constant,
.loop => Loop,
- .unwrap_optional => UnwrapOptional,
};
}
@@ -421,27 +423,6 @@ pub const Inst = struct {
return null;
}
};
-
- pub const UnwrapOptional = struct {
- pub const base_tag = Tag.unwrap_optional;
- base: Inst,
-
- operand: *Inst,
- safety_check: bool,
-
- pub fn operandCount(self: *const UnwrapOptional) usize {
- return 1;
- }
- pub fn getOperand(self: *const UnwrapOptional, index: usize) ?*Inst {
- var i = index;
-
- if (i < 1)
- return self.operand;
- i -= 1;
-
- return null;
- }
- };
};
pub const Body = struct {
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index eadb91ffea..ef78b046e6 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -185,8 +185,6 @@ pub const Type = extern union {
return true;
},
.Optional => {
- if (a.tag() != b.tag())
- return false;
return a.elemType().eql(b.elemType());
},
.Float,
@@ -662,6 +660,10 @@ pub const Type = extern union {
.optional => {
const child_type = self.cast(Payload.Optional).?.child_type;
if (!child_type.hasCodeGenBits()) return 1;
+
+ if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr())
+ return @divExact(target.cpu.arch.ptrBitWidth(), 8);
+
return child_type.abiAlignment(target);
},
@@ -750,6 +752,10 @@ pub const Type = extern union {
.optional => {
const child_type = self.cast(Payload.Optional).?.child_type;
if (!child_type.hasCodeGenBits()) return 1;
+
+ if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr())
+ return @divExact(target.cpu.arch.ptrBitWidth(), 8);
+
// Optional types are represented as a struct with the child type as the first
// field and a boolean as the second. Since the child type's abi alignment is
// guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index 84e9731126..10fb419440 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -215,7 +215,9 @@ pub const Inst = struct {
/// Create an optional type '?T'
optional_type,
/// Unwraps an optional value 'lhs.?'
- unwrap_optional,
+ unwrap_optional_safe,
+ /// Same as previous, but without safety checks. Used for orelse, if and while
+ unwrap_optional_unsafe,
pub fn Type(tag: Tag) type {
return switch (tag) {
@@ -245,6 +247,8 @@ pub const Inst = struct {
.single_const_ptr_type,
.single_mut_ptr_type,
.optional_type,
+ .unwrap_optional_safe,
+ .unwrap_optional_unsafe,
=> UnOp,
.add,
@@ -303,7 +307,6 @@ pub const Inst = struct {
.fntype => FnType,
.elemptr => ElemPtr,
.condbr => CondBr,
- .unwrap_optional => UnwrapOptional,
};
}
@@ -379,7 +382,8 @@ pub const Inst = struct {
.typeof,
.xor,
.optional_type,
- .unwrap_optional,
+ .unwrap_optional_safe,
+ .unwrap_optional_unsafe,
=> false,
.@"break",
@@ -820,18 +824,6 @@ pub const Inst = struct {
},
kw_args: struct {},
};
-
- pub const UnwrapOptional = struct {
- pub const base_tag = Tag.unwrap_optional;
- base: Inst,
-
- positionals: struct {
- operand: *Inst,
- },
- kw_args: struct {
- safety_check: bool = true,
- },
- };
};
pub const ErrorMsg = struct {
@@ -1935,6 +1927,8 @@ const EmitZIR = struct {
.isnonnull => try self.emitUnOp(inst.src, new_body, inst.castTag(.isnonnull).?, .isnonnull),
.load => try self.emitUnOp(inst.src, new_body, inst.castTag(.load).?, .deref),
.ref => try self.emitUnOp(inst.src, new_body, inst.castTag(.ref).?, .ref),
+ .unwrap_optional_safe => try self.emitUnOp(inst.src, new_body, inst.castTag(.unwrap_optional_safe).?, .unwrap_optional_safe),
+ .unwrap_optional_unsafe => try self.emitUnOp(inst.src, new_body, inst.castTag(.unwrap_optional_unsafe).?, .unwrap_optional_unsafe),
.add => try self.emitBinOp(inst.src, new_body, inst.castTag(.add).?, .add),
.sub => try self.emitBinOp(inst.src, new_body, inst.castTag(.sub).?, .sub),
@@ -2157,25 +2151,6 @@ const EmitZIR = struct {
};
break :blk &new_inst.base;
},
-
- .unwrap_optional => blk: {
- const old_inst = inst.castTag(.unwrap_optional).?;
-
- const new_inst = try self.arena.allocator.create(Inst.UnwrapOptional);
- new_inst.* = .{
- .base = .{
- .src = inst.src,
- .tag = Inst.UnwrapOptional.base_tag,
- },
- .positionals = .{
- .operand = try self.resolveInst(new_body, old_inst.operand),
- },
- .kw_args = .{
- .safety_check = old_inst.safety_check,
- },
- };
- break :blk &new_inst.base;
- },
};
try instructions.append(new_inst);
try inst_table.put(inst, new_inst);
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index 39fbf9221a..b217752494 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -107,7 +107,8 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
.boolnot => return analyzeInstBoolNot(mod, scope, old_inst.castTag(.boolnot).?),
.typeof => return analyzeInstTypeOf(mod, scope, old_inst.castTag(.typeof).?),
.optional_type => return analyzeInstOptionalType(mod, scope, old_inst.castTag(.optional_type).?),
- .unwrap_optional => return analyzeInstUnwrapOptional(mod, scope, old_inst.castTag(.unwrap_optional).?),
+ .unwrap_optional_safe => return analyzeInstUnwrapOptional(mod, scope, old_inst.castTag(.unwrap_optional_safe).?, true),
+ .unwrap_optional_unsafe => return analyzeInstUnwrapOptional(mod, scope, old_inst.castTag(.unwrap_optional_unsafe).?, false),
}
}
@@ -661,7 +662,7 @@ fn analyzeInstOptionalType(mod: *Module, scope: *Scope, optional: *zir.Inst.UnOp
}));
}
-fn analyzeInstUnwrapOptional(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnwrapOptional) InnerError!*Inst {
+fn analyzeInstUnwrapOptional(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, safety_check: bool) InnerError!*Inst {
const operand = try resolveInst(mod, scope, unwrap.positionals.operand);
assert(operand.ty.zigTypeTag() == .Pointer);
@@ -686,7 +687,10 @@ fn analyzeInstUnwrapOptional(mod: *Module, scope: *Scope, unwrap: *zir.Inst.Unwr
}
const b = try mod.requireRuntimeBlock(scope, unwrap.base.src);
- return mod.addUnwrapOptional(b, unwrap.base.src, child_pointer, operand, unwrap.kw_args.safety_check);
+ return if (safety_check)
+ mod.addUnOp(b, unwrap.base.src, child_pointer, .unwrap_optional_safe, operand)
+ else
+ mod.addUnOp(b, unwrap.base.src, child_pointer, .unwrap_optional_unsafe, operand);
}
fn analyzeInstFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) InnerError!*Inst {
diff --git a/test/stage2/compare_output.zig b/test/stage2/compare_output.zig
index 477b5d92b1..29312a8f53 100644
--- a/test/stage2/compare_output.zig
+++ b/test/stage2/compare_output.zig
@@ -31,11 +31,6 @@ pub fn addCases(ctx: *TestContext) !void {
\\export fn _start() noreturn {
\\ print();
\\
- \\ const a: u32 = 2;
- \\ const b: ?u32 = a;
- \\ const c = b.?;
- \\ if (c != 2) unreachable;
- \\
\\ exit();
\\}
\\
@@ -446,5 +441,29 @@ pub fn addCases(ctx: *TestContext) !void {
,
"",
);
+
+ // Optionals
+ case.addCompareOutput(
+ \\export fn _start() noreturn {
+ \\ const a: u32 = 2;
+ \\ const b: ?u32 = a;
+ \\ const c = b.?;
+ \\ if (c != 2) unreachable;
+ \\
+ \\ exit();
+ \\}
+ \\
+ \\fn exit() noreturn {
+ \\ asm volatile ("syscall"
+ \\ :
+ \\ : [number] "{rax}" (231),
+ \\ [arg1] "{rdi}" (0)
+ \\ : "rcx", "r11", "memory"
+ \\ );
+ \\ unreachable;
+ \\}
+ ,
+ "",
+ );
}
}
From 16d118a8d9ce48143ef406ff7b3a1f9d62021d98 Mon Sep 17 00:00:00 2001
From: heidezomp
Date: Thu, 13 Aug 2020 17:14:15 +0200
Subject: [PATCH 072/153] update std and src-self-hosted for std.log breaking
change
---
lib/std/heap/general_purpose_allocator.zig | 9 ++--
src-self-hosted/Module.zig | 18 ++++----
src-self-hosted/link.zig | 50 +++++++++++-----------
src-self-hosted/liveness.zig | 2 +-
src-self-hosted/main.zig | 2 +-
5 files changed, 41 insertions(+), 40 deletions(-)
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 7b885bca36..91a01bb837 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -93,6 +93,7 @@
//! in a `std.HashMap` using the backing allocator.
const std = @import("std");
+const log = std.log.scoped(.std);
const math = std.math;
const assert = std.debug.assert;
const mem = std.mem;
@@ -288,7 +289,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (is_used) {
const slot_index = @intCast(SlotIndex, used_bits_byte * 8 + bit_index);
const stack_trace = bucketStackTrace(bucket, size_class, slot_index, .alloc);
- std.log.err(.std, "Memory leak detected: {}", .{stack_trace});
+ log.err("Memory leak detected: {}", .{stack_trace});
leaks = true;
}
if (bit_index == math.maxInt(u3))
@@ -315,7 +316,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
}
for (self.large_allocations.items()) |*large_alloc| {
- std.log.err(.std, "Memory leak detected: {}", .{large_alloc.value.getStackTrace()});
+ log.err("Memory leak detected: {}", .{large_alloc.value.getStackTrace()});
leaks = true;
}
return leaks;
@@ -450,7 +451,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
.index = 0,
};
std.debug.captureStackTrace(ret_addr, &free_stack_trace);
- std.log.err(.std, "Allocation size {} bytes does not match free size {}. Allocation: {} Free: {}", .{
+ log.err("Allocation size {} bytes does not match free size {}. Allocation: {} Free: {}", .{
entry.value.bytes.len,
old_mem.len,
entry.value.getStackTrace(),
@@ -533,7 +534,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
.index = 0,
};
std.debug.captureStackTrace(ret_addr, &second_free_stack_trace);
- std.log.err(.std, "Double free detected. Allocation: {} First free: {} Second free: {}", .{
+ log.err("Double free detected. Allocation: {} First free: {} Second free: {}", .{
alloc_stack_trace,
free_stack_trace,
second_free_stack_trace,
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 6abd4f51e1..972b7960c8 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -6,7 +6,7 @@ const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
const TypedValue = @import("TypedValue.zig");
const assert = std.debug.assert;
-const log = std.log;
+const log = std.log.scoped(.module);
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Target = std.Target;
@@ -1055,7 +1055,7 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
// lifetime annotations in the ZIR.
var decl_arena = decl.typed_value.most_recent.arena.?.promote(self.gpa);
defer decl.typed_value.most_recent.arena.?.* = decl_arena.state;
- std.log.debug(.module, "analyze liveness of {}\n", .{decl.name});
+ log.debug("analyze liveness of {}\n", .{decl.name});
try liveness.analyze(self.gpa, &decl_arena.allocator, payload.func.analysis.success);
}
@@ -1117,7 +1117,7 @@ pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void {
.complete => return,
.outdated => blk: {
- log.debug(.module, "re-analyzing {}\n", .{decl.name});
+ log.debug("re-analyzing {}\n", .{decl.name});
// The exports this Decl performs will be re-discovered, so we remove them here
// prior to re-analysis.
@@ -1563,7 +1563,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
// Handle explicitly deleted decls from the source code. Not to be confused
// with when we delete decls because they are no longer referenced.
for (deleted_decls.items()) |entry| {
- log.debug(.module, "noticed '{}' deleted from source\n", .{entry.key.name});
+ log.debug("noticed '{}' deleted from source\n", .{entry.key.name});
try self.deleteDecl(entry.key);
}
}
@@ -1616,7 +1616,7 @@ fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
// Handle explicitly deleted decls from the source code. Not to be confused
// with when we delete decls because they are no longer referenced.
for (deleted_decls.items()) |entry| {
- log.debug(.module, "noticed '{}' deleted from source\n", .{entry.key.name});
+ log.debug("noticed '{}' deleted from source\n", .{entry.key.name});
try self.deleteDecl(entry.key);
}
}
@@ -1628,7 +1628,7 @@ fn deleteDecl(self: *Module, decl: *Decl) !void {
// not be present in the set, and this does nothing.
decl.scope.removeDecl(decl);
- log.debug(.module, "deleting decl '{}'\n", .{decl.name});
+ log.debug("deleting decl '{}'\n", .{decl.name});
const name_hash = decl.fullyQualifiedNameHash();
self.decl_table.removeAssertDiscard(name_hash);
// Remove itself from its dependencies, because we are about to destroy the decl pointer.
@@ -1715,17 +1715,17 @@ fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
const fn_zir = func.analysis.queued;
defer fn_zir.arena.promote(self.gpa).deinit();
func.analysis = .{ .in_progress = {} };
- log.debug(.module, "set {} to in_progress\n", .{decl.name});
+ log.debug("set {} to in_progress\n", .{decl.name});
try zir_sema.analyzeBody(self, &inner_block.base, fn_zir.body);
const instructions = try arena.allocator.dupe(*Inst, inner_block.instructions.items);
func.analysis = .{ .success = .{ .instructions = instructions } };
- log.debug(.module, "set {} to success\n", .{decl.name});
+ log.debug("set {} to success\n", .{decl.name});
}
fn markOutdatedDecl(self: *Module, decl: *Decl) !void {
- log.debug(.module, "mark {} outdated\n", .{decl.name});
+ log.debug("mark {} outdated\n", .{decl.name});
try self.work_queue.writeItem(.{ .analyze_decl = decl });
if (self.failed_decls.remove(decl)) |entry| {
entry.value.destroy(self.gpa);
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 7c5e645fb5..007cc62726 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -8,7 +8,7 @@ const fs = std.fs;
const elf = std.elf;
const codegen = @import("codegen.zig");
const c_codegen = @import("codegen/c.zig");
-const log = std.log;
+const log = std.log.scoped(.link);
const DW = std.dwarf;
const trace = @import("tracy.zig").trace;
const leb128 = std.debug.leb;
@@ -726,7 +726,7 @@ pub const File = struct {
const file_size = self.base.options.program_code_size_hint;
const p_align = 0x1000;
const off = self.findFreeSpace(file_size, p_align);
- log.debug(.link, "found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+ log.debug("found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
try self.program_headers.append(self.base.allocator, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
@@ -747,7 +747,7 @@ pub const File = struct {
// page align.
const p_align = if (self.base.options.target.os.tag == .linux) 0x1000 else @as(u16, ptr_size);
const off = self.findFreeSpace(file_size, p_align);
- log.debug(.link, "found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+ log.debug("found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
// TODO instead of hard coding the vaddr, make a function to find a vaddr to put things at.
// we'll need to re-use that function anyway, in case the GOT grows and overlaps something
// else in virtual memory.
@@ -769,7 +769,7 @@ pub const File = struct {
assert(self.shstrtab.items.len == 0);
try self.shstrtab.append(self.base.allocator, 0); // need a 0 at position 0
const off = self.findFreeSpace(self.shstrtab.items.len, 1);
- log.debug(.link, "found shstrtab free space 0x{x} to 0x{x}\n", .{ off, off + self.shstrtab.items.len });
+ log.debug("found shstrtab free space 0x{x} to 0x{x}\n", .{ off, off + self.shstrtab.items.len });
try self.sections.append(self.base.allocator, .{
.sh_name = try self.makeString(".shstrtab"),
.sh_type = elf.SHT_STRTAB,
@@ -827,7 +827,7 @@ pub const File = struct {
const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
const file_size = self.base.options.symbol_count_hint * each_size;
const off = self.findFreeSpace(file_size, min_align);
- log.debug(.link, "found symtab free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+ log.debug("found symtab free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
try self.sections.append(self.base.allocator, .{
.sh_name = try self.makeString(".symtab"),
@@ -869,7 +869,7 @@ pub const File = struct {
const file_size_hint = 200;
const p_align = 1;
const off = self.findFreeSpace(file_size_hint, p_align);
- log.debug(.link, "found .debug_info free space 0x{x} to 0x{x}\n", .{
+ log.debug("found .debug_info free space 0x{x} to 0x{x}\n", .{
off,
off + file_size_hint,
});
@@ -894,7 +894,7 @@ pub const File = struct {
const file_size_hint = 128;
const p_align = 1;
const off = self.findFreeSpace(file_size_hint, p_align);
- log.debug(.link, "found .debug_abbrev free space 0x{x} to 0x{x}\n", .{
+ log.debug("found .debug_abbrev free space 0x{x} to 0x{x}\n", .{
off,
off + file_size_hint,
});
@@ -919,7 +919,7 @@ pub const File = struct {
const file_size_hint = 160;
const p_align = 16;
const off = self.findFreeSpace(file_size_hint, p_align);
- log.debug(.link, "found .debug_aranges free space 0x{x} to 0x{x}\n", .{
+ log.debug("found .debug_aranges free space 0x{x} to 0x{x}\n", .{
off,
off + file_size_hint,
});
@@ -944,7 +944,7 @@ pub const File = struct {
const file_size_hint = 250;
const p_align = 1;
const off = self.findFreeSpace(file_size_hint, p_align);
- log.debug(.link, "found .debug_line free space 0x{x} to 0x{x}\n", .{
+ log.debug("found .debug_line free space 0x{x} to 0x{x}\n", .{
off,
off + file_size_hint,
});
@@ -1071,7 +1071,7 @@ pub const File = struct {
debug_abbrev_sect.sh_offset = self.findFreeSpace(needed_size, 1);
}
debug_abbrev_sect.sh_size = needed_size;
- log.debug(.link, ".debug_abbrev start=0x{x} end=0x{x}\n", .{
+ log.debug(".debug_abbrev start=0x{x} end=0x{x}\n", .{
debug_abbrev_sect.sh_offset,
debug_abbrev_sect.sh_offset + needed_size,
});
@@ -1218,7 +1218,7 @@ pub const File = struct {
debug_aranges_sect.sh_offset = self.findFreeSpace(needed_size, 16);
}
debug_aranges_sect.sh_size = needed_size;
- log.debug(.link, ".debug_aranges start=0x{x} end=0x{x}\n", .{
+ log.debug(".debug_aranges start=0x{x} end=0x{x}\n", .{
debug_aranges_sect.sh_offset,
debug_aranges_sect.sh_offset + needed_size,
});
@@ -1386,7 +1386,7 @@ pub const File = struct {
shstrtab_sect.sh_offset = self.findFreeSpace(needed_size, 1);
}
shstrtab_sect.sh_size = needed_size;
- log.debug(.link, "writing shstrtab start=0x{x} end=0x{x}\n", .{ shstrtab_sect.sh_offset, shstrtab_sect.sh_offset + needed_size });
+ log.debug("writing shstrtab start=0x{x} end=0x{x}\n", .{ shstrtab_sect.sh_offset, shstrtab_sect.sh_offset + needed_size });
try self.base.file.?.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset);
if (!self.shdr_table_dirty) {
@@ -1407,7 +1407,7 @@ pub const File = struct {
debug_strtab_sect.sh_offset = self.findFreeSpace(needed_size, 1);
}
debug_strtab_sect.sh_size = needed_size;
- log.debug(.link, "debug_strtab start=0x{x} end=0x{x}\n", .{ debug_strtab_sect.sh_offset, debug_strtab_sect.sh_offset + needed_size });
+ log.debug("debug_strtab start=0x{x} end=0x{x}\n", .{ debug_strtab_sect.sh_offset, debug_strtab_sect.sh_offset + needed_size });
try self.base.file.?.pwriteAll(self.debug_strtab.items, debug_strtab_sect.sh_offset);
if (!self.shdr_table_dirty) {
@@ -1441,7 +1441,7 @@ pub const File = struct {
for (buf) |*shdr, i| {
shdr.* = sectHeaderTo32(self.sections.items[i]);
- std.log.debug(.link, "writing section {}\n", .{shdr.*});
+ log.debug("writing section {}\n", .{shdr.*});
if (foreign_endian) {
bswapAllFields(elf.Elf32_Shdr, shdr);
}
@@ -1454,7 +1454,7 @@ pub const File = struct {
for (buf) |*shdr, i| {
shdr.* = self.sections.items[i];
- log.debug(.link, "writing section {}\n", .{shdr.*});
+ log.debug("writing section {}\n", .{shdr.*});
if (foreign_endian) {
bswapAllFields(elf.Elf64_Shdr, shdr);
}
@@ -1465,10 +1465,10 @@ pub const File = struct {
self.shdr_table_dirty = false;
}
if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
- log.debug(.link, "flushing. no_entry_point_found = true\n", .{});
+ log.debug("flushing. no_entry_point_found = true\n", .{});
self.error_flags.no_entry_point_found = true;
} else {
- log.debug(.link, "flushing. no_entry_point_found = false\n", .{});
+ log.debug("flushing. no_entry_point_found = false\n", .{});
self.error_flags.no_entry_point_found = false;
try self.writeElfHeader();
}
@@ -1797,10 +1797,10 @@ pub const File = struct {
try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
if (self.local_symbol_free_list.popOrNull()) |i| {
- log.debug(.link, "reusing symbol index {} for {}\n", .{ i, decl.name });
+ log.debug("reusing symbol index {} for {}\n", .{ i, decl.name });
decl.link.elf.local_sym_index = i;
} else {
- log.debug(.link, "allocating symbol index {} for {}\n", .{ self.local_symbols.items.len, decl.name });
+ log.debug("allocating symbol index {} for {}\n", .{ self.local_symbols.items.len, decl.name });
decl.link.elf.local_sym_index = @intCast(u32, self.local_symbols.items.len);
_ = self.local_symbols.addOneAssumeCapacity();
}
@@ -1993,11 +1993,11 @@ pub const File = struct {
!mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
if (need_realloc) {
const vaddr = try self.growTextBlock(&decl.link.elf, code.len, required_alignment);
- log.debug(.link, "growing {} from 0x{x} to 0x{x}\n", .{ decl.name, local_sym.st_value, vaddr });
+ log.debug("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, local_sym.st_value, vaddr });
if (vaddr != local_sym.st_value) {
local_sym.st_value = vaddr;
- log.debug(.link, " (writing new offset table entry)\n", .{});
+ log.debug(" (writing new offset table entry)\n", .{});
self.offset_table.items[decl.link.elf.offset_table_index] = vaddr;
try self.writeOffsetTableEntry(decl.link.elf.offset_table_index);
}
@@ -2015,7 +2015,7 @@ pub const File = struct {
const decl_name = mem.spanZ(decl.name);
const name_str_index = try self.makeString(decl_name);
const vaddr = try self.allocateTextBlock(&decl.link.elf, code.len, required_alignment);
- log.debug(.link, "allocated text block for {} at 0x{x}\n", .{ decl_name, vaddr });
+ log.debug("allocated text block for {} at 0x{x}\n", .{ decl_name, vaddr });
errdefer self.freeTextBlock(&decl.link.elf);
local_sym.* = .{
@@ -2125,7 +2125,7 @@ pub const File = struct {
if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) {
const new_offset = self.findFreeSpace(needed_size, 1);
const existing_size = last_src_fn.off;
- log.debug(.link, "moving .debug_line section: {} bytes from 0x{x} to 0x{x}\n", .{
+ log.debug("moving .debug_line section: {} bytes from 0x{x} to 0x{x}\n", .{
existing_size,
debug_line_sect.sh_offset,
new_offset,
@@ -2204,7 +2204,7 @@ pub const File = struct {
try dbg_info_buffer.writer().print("{}\x00", .{ty});
},
else => {
- log.err(.compiler, "TODO implement .debug_info for type '{}'", .{ty});
+ std.log.scoped(.compiler).err("TODO implement .debug_info for type '{}'", .{ty});
try dbg_info_buffer.append(abbrev_pad1);
},
}
@@ -2276,7 +2276,7 @@ pub const File = struct {
if (needed_size > self.allocatedSize(debug_info_sect.sh_offset)) {
const new_offset = self.findFreeSpace(needed_size, 1);
const existing_size = last_decl.dbg_info_off;
- log.debug(.link, "moving .debug_info section: {} bytes from 0x{x} to 0x{x}\n", .{
+ log.debug("moving .debug_info section: {} bytes from 0x{x} to 0x{x}\n", .{
existing_size,
debug_info_sect.sh_offset,
new_offset,
diff --git a/src-self-hosted/liveness.zig b/src-self-hosted/liveness.zig
index e8f80f30d5..6d2af35f32 100644
--- a/src-self-hosted/liveness.zig
+++ b/src-self-hosted/liveness.zig
@@ -114,5 +114,5 @@ fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void
@panic("Handle liveness analysis for instructions with many parameters");
}
- std.log.debug(.liveness, "analyze {}: 0b{b}\n", .{ base.tag, base.deaths });
+ std.log.scoped(.liveness).debug("analyze {}: 0b{b}\n", .{ base.tag, base.deaths });
}
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 76d8651646..61b32e0ea5 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -560,7 +560,7 @@ fn updateModule(gpa: *Allocator, module: *Module, zir_out_path: ?[]const u8) !vo
});
}
} else {
- std.log.info(.compiler, "Update completed in {} ms\n", .{update_nanos / std.time.ns_per_ms});
+ std.log.scoped(.compiler).info("Update completed in {} ms\n", .{update_nanos / std.time.ns_per_ms});
}
if (zir_out_path) |zop| {
From 13e472aa2a8113df6417c09727297a6106127f8e Mon Sep 17 00:00:00 2001
From: Vexu
Date: Thu, 13 Aug 2020 16:06:42 +0300
Subject: [PATCH 073/153] translate-c: add return if one is needed
---
lib/std/hash/auto_hash.zig | 2 +-
src-self-hosted/Module.zig | 4 +--
src-self-hosted/translate_c.zig | 40 +++++++++++++++++++++
test/run_translated_c.zig | 1 -
test/translate_c.zig | 61 +++++++++++++++++++++++----------
5 files changed, 86 insertions(+), 22 deletions(-)
diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig
index 85f8e4b0d2..996d6ede38 100644
--- a/lib/std/hash/auto_hash.zig
+++ b/lib/std/hash/auto_hash.zig
@@ -129,7 +129,7 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
}
},
- .Union => |info| blk: {
+ .Union => |info| {
if (info.tag_type) |tag_type| {
const tag = meta.activeTag(key);
const s = hash(hasher, tag, strat);
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 6abd4f51e1..88e59b5a2f 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -2798,7 +2798,7 @@ pub fn floatAdd(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs:
val_payload.* = .{ .val = lhs_val + rhs_val };
break :blk &val_payload.base;
},
- 128 => blk: {
+ 128 => {
return self.fail(scope, src, "TODO Implement addition for big floats", .{});
},
else => unreachable,
@@ -2832,7 +2832,7 @@ pub fn floatSub(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs:
val_payload.* = .{ .val = lhs_val - rhs_val };
break :blk &val_payload.base;
},
- 128 => blk: {
+ 128 => {
return self.fail(scope, src, "TODO Implement substraction for big floats", .{});
},
else => unreachable,
diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig
index de8f633076..2382375fc5 100644
--- a/src-self-hosted/translate_c.zig
+++ b/src-self-hosted/translate_c.zig
@@ -628,6 +628,46 @@ fn visitFnDecl(c: *Context, fn_decl: *const ZigClangFunctionDecl) Error!void {
error.UnsupportedType,
=> return failDecl(c, fn_decl_loc, fn_name, "unable to translate function", .{}),
};
+ // add return statement if the function didn't have one
+ blk: {
+ const fn_ty = @ptrCast(*const ZigClangFunctionType, fn_type);
+
+ if (ZigClangFunctionType_getNoReturnAttr(fn_ty)) break :blk;
+ const return_qt = ZigClangFunctionType_getReturnType(fn_ty);
+ if (isCVoid(return_qt)) break :blk;
+
+ if (block_scope.statements.items.len > 0) {
+ var last = block_scope.statements.items[block_scope.statements.items.len - 1];
+ while (true) {
+ switch (last.tag) {
+ .Block => {
+ const stmts = last.castTag(.Block).?.statements();
+ if (stmts.len == 0) break;
+
+ last = stmts[stmts.len - 1];
+ },
+ // no extra return needed
+ .Return => break :blk,
+ else => break,
+ }
+ }
+ }
+
+ const return_expr = try ast.Node.ControlFlowExpression.create(rp.c.arena, .{
+ .ltoken = try appendToken(rp.c, .Keyword_return, "return"),
+ .tag = .Return,
+ }, .{
+ .rhs = transZeroInitExpr(rp, scope, fn_decl_loc, ZigClangQualType_getTypePtr(return_qt)) catch |err| switch (err) {
+ error.OutOfMemory => |e| return e,
+ error.UnsupportedTranslation,
+ error.UnsupportedType,
+ => return failDecl(c, fn_decl_loc, fn_name, "unable to create a return value for function", .{}),
+ },
+ });
+ _ = try appendToken(rp.c, .Semicolon, ";");
+ try block_scope.statements.append(&return_expr.base);
+ }
+
const body_node = try block_scope.complete(rp.c);
proto_node.setTrailer("body_node", &body_node.base);
return addTopLevelDecl(c, fn_name, &proto_node.base);
diff --git a/test/run_translated_c.zig b/test/run_translated_c.zig
index efdc9702a4..3fa183ce3b 100644
--- a/test/run_translated_c.zig
+++ b/test/run_translated_c.zig
@@ -15,7 +15,6 @@ pub fn addCases(cases: *tests.RunTranslatedCContext) void {
\\ }
\\ if (s0 != 1) abort();
\\ if (s1 != 10) abort();
- \\ return 0;
\\}
, "");
diff --git a/test/translate_c.zig b/test/translate_c.zig
index c632700bc5..f7e983276e 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -3,12 +3,33 @@ const std = @import("std");
const CrossTarget = std.zig.CrossTarget;
pub fn addCases(cases: *tests.TranslateCContext) void {
+ cases.add("missing return stmt",
+ \\int foo() {}
+ \\int bar() {
+ \\ int a = 2;
+ \\}
+ \\int baz() {
+ \\ return 0;
+ \\}
+ , &[_][]const u8{
+ \\pub export fn foo() c_int {
+ \\ return 0;
+ \\}
+ \\pub export fn bar() c_int {
+ \\ var a: c_int = 2;
+ \\ return 0;
+ \\}
+ \\pub export fn baz() c_int {
+ \\ return 0;
+ \\}
+ });
+
cases.add("alignof",
- \\int main() {
+ \\void main() {
\\ int a = _Alignof(int);
\\}
, &[_][]const u8{
- \\pub export fn main() c_int {
+ \\pub export fn main() void {
\\ var a: c_int = @bitCast(c_int, @truncate(c_uint, @alignOf(c_int)));
\\}
});
@@ -539,6 +560,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ c = (a * b);
\\ c = @divTrunc(a, b);
\\ c = @rem(a, b);
+ \\ return 0;
\\}
\\pub export fn u() c_uint {
\\ var a: c_uint = undefined;
@@ -549,6 +571,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ c = (a *% b);
\\ c = (a / b);
\\ c = (a % b);
+ \\ return 0;
\\}
});
@@ -1596,13 +1619,13 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
});
cases.add("worst-case assign",
- \\int foo() {
+ \\void foo() {
\\ int a;
\\ int b;
\\ a = b = 2;
\\}
, &[_][]const u8{
- \\pub export fn foo() c_int {
+ \\pub export fn foo() void {
\\ var a: c_int = undefined;
\\ var b: c_int = undefined;
\\ a = blk: {
@@ -1650,11 +1673,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ a = 7;
\\ if (!true) break;
\\ }
+ \\ return 0;
\\}
});
cases.add("for loops",
- \\int foo() {
+ \\void foo() {
\\ for (int i = 2, b = 4; i + 2; i = 2) {
\\ int a = 2;
\\ a = 6, 5, 7;
@@ -1662,7 +1686,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ char i = 2;
\\}
, &[_][]const u8{
- \\pub export fn foo() c_int {
+ \\pub export fn foo() void {
\\ {
\\ var i: c_int = 2;
\\ var b: c_int = 4;
@@ -1712,7 +1736,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
});
cases.add("switch on int",
- \\int switch_fn(int i) {
+ \\void switch_fn(int i) {
\\ int res = 0;
\\ switch (i) {
\\ case 0:
@@ -1727,7 +1751,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ }
\\}
, &[_][]const u8{
- \\pub export fn switch_fn(arg_i: c_int) c_int {
+ \\pub export fn switch_fn(arg_i: c_int) void {
\\ var i = arg_i;
\\ var res: c_int = 0;
\\ @"switch": {
@@ -1787,13 +1811,13 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
});
cases.add("assign",
- \\int max(int a) {
+ \\void max(int a) {
\\ int tmp;
\\ tmp = a;
\\ a = tmp;
\\}
, &[_][]const u8{
- \\pub export fn max(arg_a: c_int) c_int {
+ \\pub export fn max(arg_a: c_int) void {
\\ var a = arg_a;
\\ var tmp: c_int = undefined;
\\ tmp = a;
@@ -2082,7 +2106,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ int b;
\\}a;
\\float b = 2.0f;
- \\int foo(void) {
+ \\void foo(void) {
\\ struct Foo *c;
\\ a.b;
\\ c->b;
@@ -2093,7 +2117,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\};
\\pub extern var a: struct_Foo;
\\pub export var b: f32 = 2;
- \\pub export fn foo() c_int {
+ \\pub export fn foo() void {
\\ var c: [*c]struct_Foo = undefined;
\\ _ = a.b;
\\ _ = c.*.b;
@@ -2204,11 +2228,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ if (a < b) return b;
\\ if (a < b) return b else return a;
\\ if (a < b) {} else {}
+ \\ return 0;
\\}
});
cases.add("if statements",
- \\int foo() {
+ \\void foo() {
\\ if (2) {
\\ int a = 2;
\\ }
@@ -2217,7 +2242,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ }
\\}
, &[_][]const u8{
- \\pub export fn foo() c_int {
+ \\pub export fn foo() void {
\\ if (true) {
\\ var a: c_int = 2;
\\ }
@@ -2811,12 +2836,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
});
cases.add("arg name aliasing decl which comes after",
- \\int foo(int bar) {
+ \\void foo(int bar) {
\\ bar = 2;
\\}
\\int bar = 4;
, &[_][]const u8{
- \\pub export fn foo(arg_bar_1: c_int) c_int {
+ \\pub export fn foo(arg_bar_1: c_int) void {
\\ var bar_1 = arg_bar_1;
\\ bar_1 = 2;
\\}
@@ -2824,12 +2849,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
});
cases.add("arg name aliasing macro which comes after",
- \\int foo(int bar) {
+ \\void foo(int bar) {
\\ bar = 2;
\\}
\\#define bar 4
, &[_][]const u8{
- \\pub export fn foo(arg_bar_1: c_int) c_int {
+ \\pub export fn foo(arg_bar_1: c_int) void {
\\ var bar_1 = arg_bar_1;
\\ bar_1 = 2;
\\}
From ec4953504a07f3025d5f32344180dd9b6a4de8ae Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 13 Aug 2020 10:04:46 -0700
Subject: [PATCH 074/153] stage2: implement safety checks at the zir_sema level
---
src-self-hosted/Module.zig | 72 +++++++++++++++++++++++++++++++++---
src-self-hosted/codegen.zig | 5 +--
src-self-hosted/ir.zig | 6 +--
src-self-hosted/zir.zig | 3 +-
src-self-hosted/zir_sema.zig | 36 +++++++++---------
5 files changed, 91 insertions(+), 31 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 66da42cadf..2e847519f3 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -2219,11 +2219,6 @@ pub fn wantSafety(self: *Module, scope: *Scope) bool {
};
}
-pub fn analyzeUnreach(self: *Module, scope: *Scope, src: usize) InnerError!*Inst {
- const b = try self.requireRuntimeBlock(scope, src);
- return self.addNoOp(b, src, Type.initTag(.noreturn), .unreach);
-}
-
pub fn analyzeIsNull(
self: *Module,
scope: *Scope,
@@ -2902,3 +2897,70 @@ pub fn dumpInst(self: *Module, scope: *Scope, inst: *Inst) void {
});
}
}
+
+pub const PanicId = enum {
+ unreach,
+ unwrap_null,
+};
+
+pub fn addSafetyCheck(mod: *Module, parent_block: *Scope.Block, ok: *Inst, panic_id: PanicId) !void {
+ const block_inst = try parent_block.arena.create(Inst.Block);
+ block_inst.* = .{
+ .base = .{
+ .tag = Inst.Block.base_tag,
+ .ty = Type.initTag(.void),
+ .src = ok.src,
+ },
+ .body = .{
+ .instructions = try parent_block.arena.alloc(*Inst, 1), // Only need space for the condbr.
+ },
+ };
+
+ const ok_body: ir.Body = .{
+ .instructions = try parent_block.arena.alloc(*Inst, 1), // Only need space for the brvoid.
+ };
+ const brvoid = try parent_block.arena.create(Inst.BrVoid);
+ brvoid.* = .{
+ .base = .{
+ .tag = .brvoid,
+ .ty = Type.initTag(.noreturn),
+ .src = ok.src,
+ },
+ .block = block_inst,
+ };
+ ok_body.instructions[0] = &brvoid.base;
+
+ var fail_block: Scope.Block = .{
+ .parent = parent_block,
+ .func = parent_block.func,
+ .decl = parent_block.decl,
+ .instructions = .{},
+ .arena = parent_block.arena,
+ };
+ defer fail_block.instructions.deinit(mod.gpa);
+
+ _ = try mod.safetyPanic(&fail_block, ok.src, panic_id);
+
+ const fail_body: ir.Body = .{ .instructions = try parent_block.arena.dupe(*Inst, fail_block.instructions.items) };
+
+ const condbr = try parent_block.arena.create(Inst.CondBr);
+ condbr.* = .{
+ .base = .{
+ .tag = .condbr,
+ .ty = Type.initTag(.noreturn),
+ .src = ok.src,
+ },
+ .condition = ok,
+ .then_body = ok_body,
+ .else_body = fail_body,
+ };
+ block_inst.body.instructions[0] = &condbr.base;
+
+ try parent_block.instructions.append(mod.gpa, &block_inst.base);
+}
+
+pub fn safetyPanic(mod: *Module, block: *Scope.Block, src: usize, panic_id: PanicId) !*Inst {
+ // TODO Once we have a panic function to call, call it here instead of breakpoint.
+ _ = try mod.addNoOp(block, src, Type.initTag(.void), .breakpoint);
+ return mod.addNoOp(block, src, Type.initTag(.noreturn), .unreach);
+}
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index 37e775b4b3..d25fa65f47 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -668,8 +668,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.store => return self.genStore(inst.castTag(.store).?),
.sub => return self.genSub(inst.castTag(.sub).?),
.unreach => return MCValue{ .unreach = {} },
- .unwrap_optional_safe => return self.genUnwrapOptional(inst.castTag(.unwrap_optional_safe).?, true),
- .unwrap_optional_unsafe => return self.genUnwrapOptional(inst.castTag(.unwrap_optional_unsafe).?, false),
+ .unwrap_optional => return self.genUnwrapOptional(inst.castTag(.unwrap_optional).?),
}
}
@@ -819,7 +818,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
- fn genUnwrapOptional(self: *Self, inst: *ir.Inst.UnOp, safety_check: bool) !MCValue {
+ fn genUnwrapOptional(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
// No side effects, so if it's unreferenced, do nothing.
if (inst.base.isUnused())
return MCValue.dead;
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 307dcfe62e..81f99f53ba 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -82,8 +82,7 @@ pub const Inst = struct {
not,
floatcast,
intcast,
- unwrap_optional_safe,
- unwrap_optional_unsafe,
+ unwrap_optional,
pub fn Type(tag: Tag) type {
return switch (tag) {
@@ -104,8 +103,7 @@ pub const Inst = struct {
.floatcast,
.intcast,
.load,
- .unwrap_optional_safe,
- .unwrap_optional_unsafe,
+ .unwrap_optional,
=> UnOp,
.add,
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index 10fb419440..2e638b0755 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -1927,8 +1927,7 @@ const EmitZIR = struct {
.isnonnull => try self.emitUnOp(inst.src, new_body, inst.castTag(.isnonnull).?, .isnonnull),
.load => try self.emitUnOp(inst.src, new_body, inst.castTag(.load).?, .deref),
.ref => try self.emitUnOp(inst.src, new_body, inst.castTag(.ref).?, .ref),
- .unwrap_optional_safe => try self.emitUnOp(inst.src, new_body, inst.castTag(.unwrap_optional_safe).?, .unwrap_optional_safe),
- .unwrap_optional_unsafe => try self.emitUnOp(inst.src, new_body, inst.castTag(.unwrap_optional_unsafe).?, .unwrap_optional_unsafe),
+ .unwrap_optional => try self.emitUnOp(inst.src, new_body, inst.castTag(.unwrap_optional).?, .unwrap_optional_unsafe),
.add => try self.emitBinOp(inst.src, new_body, inst.castTag(.add).?, .add),
.sub => try self.emitBinOp(inst.src, new_body, inst.castTag(.sub).?, .sub),
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index b217752494..74e57b7735 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -68,8 +68,8 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
.deref => return analyzeInstDeref(mod, scope, old_inst.castTag(.deref).?),
.as => return analyzeInstAs(mod, scope, old_inst.castTag(.as).?),
.@"asm" => return analyzeInstAsm(mod, scope, old_inst.castTag(.@"asm").?),
- .@"unreachable" => return analyzeInstUnreachable(mod, scope, old_inst.castTag(.@"unreachable").?),
- .unreach_nocheck => return analyzeInstUnreachNoChk(mod, scope, old_inst.castTag(.unreach_nocheck).?),
+ .@"unreachable" => return analyzeInstUnreachable(mod, scope, old_inst.castTag(.@"unreachable").?, true),
+ .unreach_nocheck => return analyzeInstUnreachable(mod, scope, old_inst.castTag(.unreach_nocheck).?, false),
.@"return" => return analyzeInstRet(mod, scope, old_inst.castTag(.@"return").?),
.returnvoid => return analyzeInstRetVoid(mod, scope, old_inst.castTag(.returnvoid).?),
.@"fn" => return analyzeInstFn(mod, scope, old_inst.castTag(.@"fn").?),
@@ -313,7 +313,7 @@ fn analyzeInstRef(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!
if (operand.value()) |val| {
const ref_payload = try scope.arena().create(Value.Payload.RefVal);
ref_payload.* = .{ .val = val };
-
+
return mod.constInst(scope, inst.base.src, .{
.ty = ptr_type,
.val = Value.initPayload(&ref_payload.base),
@@ -677,7 +677,7 @@ fn analyzeInstUnwrapOptional(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp
try mod.singleMutPtrType(scope, unwrap.base.src, child_type);
if (operand.value()) |val| {
- if (val.tag() == .null_value) {
+ if (val.isNull()) {
return mod.fail(scope, unwrap.base.src, "unable to unwrap null", .{});
}
return mod.constInst(scope, unwrap.base.src, .{
@@ -687,10 +687,11 @@ fn analyzeInstUnwrapOptional(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp
}
const b = try mod.requireRuntimeBlock(scope, unwrap.base.src);
- return if (safety_check)
- mod.addUnOp(b, unwrap.base.src, child_pointer, .unwrap_optional_safe, operand)
- else
- mod.addUnOp(b, unwrap.base.src, child_pointer, .unwrap_optional_unsafe, operand);
+ if (safety_check and mod.wantSafety(scope)) {
+ const is_non_null = try mod.addUnOp(b, unwrap.base.src, Type.initTag(.bool), .isnonnull, operand);
+ try mod.addSafetyCheck(b, is_non_null, .unwrap_null);
+ }
+ return mod.addUnOp(b, unwrap.base.src, child_pointer, .unwrap_optional, operand);
}
fn analyzeInstFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) InnerError!*Inst {
@@ -1167,18 +1168,19 @@ fn analyzeInstCondBr(mod: *Module, scope: *Scope, inst: *zir.Inst.CondBr) InnerE
return mod.addCondBr(parent_block, inst.base.src, cond, then_body, else_body);
}
-fn analyzeInstUnreachNoChk(mod: *Module, scope: *Scope, unreach: *zir.Inst.NoOp) InnerError!*Inst {
- return mod.analyzeUnreach(scope, unreach.base.src);
-}
-
-fn analyzeInstUnreachable(mod: *Module, scope: *Scope, unreach: *zir.Inst.NoOp) InnerError!*Inst {
+fn analyzeInstUnreachable(
+ mod: *Module,
+ scope: *Scope,
+ unreach: *zir.Inst.NoOp,
+ safety_check: bool,
+) InnerError!*Inst {
const b = try mod.requireRuntimeBlock(scope, unreach.base.src);
// TODO Add compile error for @optimizeFor occurring too late in a scope.
- if (mod.wantSafety(scope)) {
- // TODO Once we have a panic function to call, call it here instead of this.
- _ = try mod.addNoOp(b, unreach.base.src, Type.initTag(.void), .breakpoint);
+ if (safety_check and mod.wantSafety(scope)) {
+ return mod.safetyPanic(b, unreach.base.src, .unreach);
+ } else {
+ return mod.addNoOp(b, unreach.base.src, Type.initTag(.noreturn), .unreach);
}
- return mod.analyzeUnreach(scope, unreach.base.src);
}
fn analyzeInstRet(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst {
From 50139aa23245c7b6b6a5faff95f1a2c854051a38 Mon Sep 17 00:00:00 2001
From: Maks S <38307091+MadMax129@users.noreply.github.com>
Date: Sun, 28 Jun 2020 17:42:37 -0700
Subject: [PATCH 075/153] langref: explain why comptime_float cannot be divided
by comptime_int
Co-authored-by: Andrew Kelley
Co-authored-by: Veikka Tuominen
---
doc/langref.html.in | 22 +++++++++++++++++++---
1 file changed, 19 insertions(+), 3 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index cfde67d622..4c6284ac43 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -248,7 +248,7 @@ pub fn main() !void {
Following the hello.zig Zig code sample, the {#link|Zig Build System#} is used
- to build an executable program from the hello.zig source code. Then, the
+ to build an executable program from the hello.zig source code. Then, the
hello program is executed showing its output Hello, world!. The
lines beginning with $ represent command line prompts and a command.
Everything else is program output.
@@ -293,7 +293,7 @@ pub fn main() !void {
In Zig, a function's block of statements and expressions are surrounded by { and
} curly-braces. Inside of the main function are expressions that perform
- the task of outputting Hello, world! to standard output.
+ the task of outputting Hello, world! to standard output.
First, a constant identifier, stdout, is initialized to represent standard output's
@@ -5135,6 +5135,22 @@ test "float widening" {
var c: f64 = b;
var d: f128 = c;
assert(d == a);
+}
+ {#code_end#}
+ {#header_close#}
+ {#header_open|Type Coercion: Coercion Float to Int#}
+
+ A compiler error is appropriate because this ambiguous expression leaves the compiler
+ two choices about the coercion.
+
+ - Cast {#syntax#}54.0{#endsyntax#} to {#syntax#}comptime_int{#endsyntax#} resulting in {#syntax#}@as(comptime_int, 10){#endsyntax#}, which is casted to {#syntax#}@as(f32, 10){#endsyntax#}
+ - Cast {#syntax#}5{#endsyntax#} to {#syntax#}comptime_float{#endsyntax#} resulting in {#syntax#}@as(comptime_float, 10.8){#endsyntax#}, which is casted to {#syntax#}@as(f32, 10.8){#endsyntax#}
+
+
+ {#code_begin|test_err#}
+// Compile time coercion of float to int
+test "implicit cast to comptime_int" {
+ var f: f32 = 54.0 / 5;
}
{#code_end#}
{#header_close#}
@@ -8179,7 +8195,7 @@ const expect = std.testing.expect;
test "@src" {
doTheTest();
}
-
+
fn doTheTest() void {
const src = @src();
From a9590f3bf863493ed9b15ad8ab0d4fa7991805a5 Mon Sep 17 00:00:00 2001
From: Dmitry Atamanov
Date: Fri, 14 Aug 2020 03:14:32 +0500
Subject: [PATCH 076/153] Support tuples in mem.len and trait.isIndexable
(#5897)
---
lib/std/mem.zig | 10 +++++++++-
lib/std/meta/trait.zig | 4 +++-
2 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 5a0927ea6e..1ba64f47fa 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -614,7 +614,7 @@ test "spanZ" {
}
/// Takes a pointer to an array, an array, a vector, a sentinel-terminated pointer,
-/// or a slice, and returns the length.
+/// a slice or a tuple, and returns the length.
/// In the case of a sentinel-terminated array, it uses the array length.
/// For C pointers it assumes it is a pointer-to-many with a 0 sentinel.
pub fn len(value: anytype) usize {
@@ -633,6 +633,9 @@ pub fn len(value: anytype) usize {
.C => indexOfSentinel(info.child, 0, value),
.Slice => value.len,
},
+ .Struct => |info| if (info.is_tuple) {
+ return info.fields.len;
+ } else @compileError("invalid type given to std.mem.len"),
else => @compileError("invalid type given to std.mem.len"),
};
}
@@ -658,6 +661,11 @@ test "len" {
const vector: meta.Vector(2, u32) = [2]u32{ 1, 2 };
testing.expect(len(vector) == 2);
}
+ {
+ const tuple = .{ 1, 2 };
+ testing.expect(len(tuple) == 2);
+ testing.expect(tuple[0] == 1);
+ }
}
/// Takes a pointer to an array, an array, a sentinel-terminated pointer,
diff --git a/lib/std/meta/trait.zig b/lib/std/meta/trait.zig
index 1aa5455bb5..f04d6353c6 100644
--- a/lib/std/meta/trait.zig
+++ b/lib/std/meta/trait.zig
@@ -269,19 +269,21 @@ pub fn isIndexable(comptime T: type) bool {
}
return true;
}
- return comptime is(.Array)(T) or is(.Vector)(T);
+ return comptime is(.Array)(T) or is(.Vector)(T) or isTuple(T);
}
test "std.meta.trait.isIndexable" {
const array = [_]u8{0} ** 10;
const slice = @as([]const u8, &array);
const vector: meta.Vector(2, u32) = [_]u32{0} ** 2;
+ const tuple = .{ 1, 2, 3 };
testing.expect(isIndexable(@TypeOf(array)));
testing.expect(isIndexable(@TypeOf(&array)));
testing.expect(isIndexable(@TypeOf(slice)));
testing.expect(!isIndexable(meta.Child(@TypeOf(slice))));
testing.expect(isIndexable(@TypeOf(vector)));
+ testing.expect(isIndexable(@TypeOf(tuple)));
}
pub fn isNumber(comptime T: type) bool {
From fc402bdbbbb5be2a538f0469f3f213264e7fcc59 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 13 Aug 2020 11:03:13 -0700
Subject: [PATCH 077/153] stage2: zir_sema for loops
Also remove the "repeat" instruction and make it implied to be at the
end of a Loop body.
---
src-self-hosted/astgen.zig | 11 +++++------
src-self-hosted/zir.zig | 17 ++---------------
src-self-hosted/zir_sema.zig | 37 ++++++++++++++++++++++++++++++------
3 files changed, 38 insertions(+), 27 deletions(-)
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 95c4e63c92..d34c2dc458 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -531,13 +531,12 @@ fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.W
const cond_block = try addZIRInstBlock(mod, &loop_scope.base, while_src, .{
.instructions = try loop_scope.arena.dupe(*zir.Inst, continue_scope.instructions.items),
});
+ // TODO avoid emitting the continue expr when there
+ // are no jumps to it. This happens when the last statement of a while body is noreturn
+ // and there are no `continue` statements.
+ // The "repeat" at the end of a loop body is implied.
if (while_node.continue_expr) |cont_expr| {
- const cont_expr_result = try expr(mod, &loop_scope.base, .{ .ty = void_type }, cont_expr);
- if (!cont_expr_result.tag.isNoReturn()) {
- _ = try addZIRNoOp(mod, &loop_scope.base, while_src, .repeat);
- }
- } else {
- _ = try addZIRNoOp(mod, &loop_scope.base, while_src, .repeat);
+ _ = try expr(mod, &loop_scope.base, .{ .ty = void_type }, cont_expr);
}
const loop = try addZIRInstLoop(mod, &expr_scope.base, while_src, .{
.instructions = try expr_scope.arena.dupe(*zir.Inst, loop_scope.instructions.items),
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index 2e638b0755..974e6f9ef6 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -151,7 +151,8 @@ pub const Inst = struct {
isnonnull,
/// Return a boolean true if an optional is null. `x == null`
isnull,
- /// A labeled block of code that loops forever.
+ /// A labeled block of code that loops forever. At the end of the body it is implied
+ /// to repeat; no explicit "repeat" instruction terminates loop bodies.
loop,
/// Ambiguously remainder division or modulus. If the computation would possibly have
/// a different value depending on whether the operation is remainder division or modulus,
@@ -175,8 +176,6 @@ pub const Inst = struct {
/// the memory location is in the stack frame, local to the scope containing the
/// instruction.
ref,
- /// Sends control flow back to the loop block operand.
- repeat,
/// Obtains a pointer to the return value.
ret_ptr,
/// Obtains the return type of the in-scope function.
@@ -294,7 +293,6 @@ pub const Inst = struct {
.compileerror => CompileError,
.loop => Loop,
.@"const" => Const,
- .repeat => Repeat,
.str => Str,
.int => Int,
.inttype => IntType,
@@ -390,7 +388,6 @@ pub const Inst = struct {
.breakvoid,
.condbr,
.compileerror,
- .repeat,
.@"return",
.returnvoid,
.unreach_nocheck,
@@ -587,16 +584,6 @@ pub const Inst = struct {
kw_args: struct {},
};
- pub const Repeat = struct {
- pub const base_tag = Tag.repeat;
- base: Inst,
-
- positionals: struct {
- loop: *Loop,
- },
- kw_args: struct {},
- };
-
pub const Str = struct {
pub const base_tag = Tag.str;
base: Inst,
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index 74e57b7735..97c47db9b6 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -61,7 +61,6 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
},
.inttype => return analyzeInstIntType(mod, scope, old_inst.castTag(.inttype).?),
.loop => return analyzeInstLoop(mod, scope, old_inst.castTag(.loop).?),
- .repeat => return analyzeInstRepeat(mod, scope, old_inst.castTag(.repeat).?),
.param_type => return analyzeInstParamType(mod, scope, old_inst.castTag(.param_type).?),
.ptrtoint => return analyzeInstPtrToInt(mod, scope, old_inst.castTag(.ptrtoint).?),
.fieldptr => return analyzeInstFieldPtr(mod, scope, old_inst.castTag(.fieldptr).?),
@@ -440,12 +439,38 @@ fn analyzeInstArg(mod: *Module, scope: *Scope, inst: *zir.Inst.Arg) InnerError!*
return mod.addArg(b, inst.base.src, param_type, name);
}
-fn analyzeInstRepeat(mod: *Module, scope: *Scope, inst: *zir.Inst.Repeat) InnerError!*Inst {
- return mod.fail(scope, inst.base.src, "TODO analyze .repeat ZIR", .{});
-}
-
fn analyzeInstLoop(mod: *Module, scope: *Scope, inst: *zir.Inst.Loop) InnerError!*Inst {
- return mod.fail(scope, inst.base.src, "TODO analyze .loop ZIR", .{});
+ const parent_block = scope.cast(Scope.Block).?;
+
+ // Reserve space for a Loop instruction so that generated Break instructions can
+ // point to it, even if it doesn't end up getting used because the code ends up being
+ // comptime evaluated.
+ const loop_inst = try parent_block.arena.create(Inst.Loop);
+ loop_inst.* = .{
+ .base = .{
+ .tag = Inst.Loop.base_tag,
+ .ty = Type.initTag(.noreturn),
+ .src = inst.base.src,
+ },
+ .body = undefined,
+ };
+
+ var child_block: Scope.Block = .{
+ .parent = parent_block,
+ .func = parent_block.func,
+ .decl = parent_block.decl,
+ .instructions = .{},
+ .arena = parent_block.arena,
+ };
+ defer child_block.instructions.deinit(mod.gpa);
+
+ try analyzeBody(mod, &child_block.base, inst.positionals.body);
+
+ // Loop repetition is implied so the last instruction may or may not be a noreturn instruction.
+
+ try parent_block.instructions.append(mod.gpa, &loop_inst.base);
+ loop_inst.body = .{ .instructions = try parent_block.arena.dupe(*Inst, child_block.instructions.items) };
+ return &loop_inst.base;
}
fn analyzeInstBlock(mod: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerError!*Inst {
From 576581bd7b78825ce27d6a73fc42dd90eab8fbd1 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 13 Aug 2020 15:54:46 -0700
Subject: [PATCH 078/153] stage1: fix enums having wrong debug info
It wasn't wrong info, but e.g. GDB couldn't handle non-power-of-two
enum tags. Now we tell debug info that enum tags are always power of two
size.
closes #4526
closes #5432
---
src/analyze.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/analyze.cpp b/src/analyze.cpp
index e3e57ee34d..6a4a2ec052 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -8623,7 +8623,7 @@ static void resolve_llvm_types_enum(CodeGen *g, ZigType *enum_type, ResolveStatu
enum_type->llvm_type = get_llvm_type(g, tag_int_type);
// create debug type for tag
- uint64_t tag_debug_size_in_bits = tag_int_type->size_in_bits;
+ uint64_t tag_debug_size_in_bits = 8*tag_int_type->abi_size;
uint64_t tag_debug_align_in_bits = 8*tag_int_type->abi_align;
ZigLLVMDIType *tag_di_type = ZigLLVMCreateDebugEnumerationType(g->dbuilder,
ZigLLVMFileToScope(import->data.structure.root_struct->di_file), buf_ptr(&enum_type->name),
From 28a9da8bfc1a791e0eaf8c643827da88ea70f7d1 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 13 Aug 2020 20:27:25 -0700
Subject: [PATCH 079/153] stage2: implement while loops (bool condition)
* introduce a dump() function on Module.Fn which helpfully prints to
stderr the ZIR representation of a function (can be called before
attempting to codegen it). This is a debugging tool.
* implement x86 codegen for loops
* liveness: fix analysis of conditional branches. The logic was buggy
in a couple ways:
- it never actually saved the results into the IR instruction (fixed now)
- it incorrectly labeled operands as dying when their true death was
after the conditional branch ended (fixed now)
* zir rendering is enhanced to show liveness analysis results. this
helps when debugging liveness analysis.
* fix bug in zir rendering not numbering instructions correctly
closes #6021
---
lib/std/math.zig | 1 +
src-self-hosted/Module.zig | 17 +++
src-self-hosted/codegen.zig | 33 ++++-
src-self-hosted/ir.zig | 16 +-
src-self-hosted/link.zig | 2 +
src-self-hosted/liveness.zig | 125 ++++++++++------
src-self-hosted/zir.zig | 259 ++++++++++++++++++++++-----------
test/stage2/compare_output.zig | 39 +++++
8 files changed, 355 insertions(+), 137 deletions(-)
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 17237ea9f0..2c9065a89e 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -747,6 +747,7 @@ test "math.negateCast" {
/// Cast an integer to a different integer type. If the value doesn't fit,
/// return an error.
+/// TODO make this an optional not an error.
pub fn cast(comptime T: type, x: anytype) (error{Overflow}!T) {
comptime assert(@typeInfo(T) == .Int); // must pass an integer
comptime assert(@typeInfo(@TypeOf(x)) == .Int); // must pass an integer
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 8084c8ff42..03cb66d544 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -301,6 +301,23 @@ pub const Fn = struct {
body: zir.Module.Body,
arena: std.heap.ArenaAllocator.State,
};
+
+ /// For debugging purposes.
+ pub fn dump(self: *Fn, mod: Module) void {
+ std.debug.print("Module.Function(name={}) ", .{self.owner_decl.name});
+ switch (self.analysis) {
+ .queued => {
+ std.debug.print("queued\n", .{});
+ },
+ .in_progress => {
+ std.debug.print("in_progress\n", .{});
+ },
+ else => {
+ std.debug.print("\n", .{});
+ zir.dumpFn(mod, self);
+ },
+ }
+ }
};
pub const Scope = struct {
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index d25fa65f47..29bcf1bd01 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -23,8 +23,6 @@ pub const BlockData = struct {
relocs: std.ArrayListUnmanaged(Reloc) = .{},
};
-pub const LoopData = struct { };
-
pub const Reloc = union(enum) {
/// The value is an offset into the `Function` `code` from the beginning.
/// To perform the reloc, write 32-bit signed little-endian integer
@@ -556,7 +554,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn genBody(self: *Self, body: ir.Body) InnerError!void {
- const inst_table = &self.branch_stack.items[0].inst_table;
+ const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
+ const inst_table = &branch.inst_table;
for (body.instructions) |inst| {
const new_inst = try self.genFuncInst(inst);
try inst_table.putNoClobber(self.gpa, inst, new_inst);
@@ -1284,6 +1283,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn genCondBr(self: *Self, inst: *ir.Inst.CondBr) !MCValue {
+ // TODO Rework this so that the arch-independent logic isn't buried and duplicated.
switch (arch) {
.x86_64 => {
try self.code.ensureCapacity(self.code.items.len + 6);
@@ -1336,6 +1336,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn genX86CondBr(self: *Self, inst: *ir.Inst.CondBr, opcode: u8) !MCValue {
+ // TODO deal with liveness / deaths condbr's then_entry_deaths and else_entry_deaths
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode });
const reloc = Reloc{ .rel32 = self.code.items.len };
self.code.items.len += 4;
@@ -1360,14 +1361,36 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn genLoop(self: *Self, inst: *ir.Inst.Loop) !MCValue {
- return self.fail(inst.base.src, "TODO codegen loop", .{});
+ // A loop is a setup to be able to jump back to the beginning.
+ const start_index = self.code.items.len;
+ try self.genBody(inst.body);
+ try self.jump(inst.base.src, start_index);
+ return MCValue.unreach;
+ }
+
+ /// Send control flow to the `index` of `self.code`.
+ fn jump(self: *Self, src: usize, index: usize) !void {
+ switch (arch) {
+ .i386, .x86_64 => {
+ try self.code.ensureCapacity(self.code.items.len + 5);
+ if (math.cast(i8, @intCast(i32, index) - (@intCast(i32, self.code.items.len + 2)))) |delta| {
+ self.code.appendAssumeCapacity(0xeb); // jmp rel8
+ self.code.appendAssumeCapacity(@bitCast(u8, delta));
+ } else |_| {
+ const delta = @intCast(i32, index) - (@intCast(i32, self.code.items.len + 5));
+ self.code.appendAssumeCapacity(0xe9); // jmp rel32
+ mem.writeIntLittle(i32, self.code.addManyAsArrayAssumeCapacity(4), delta);
+ }
+ },
+ else => return self.fail(src, "TODO implement jump for {}", .{self.target.cpu.arch}),
+ }
}
fn genBlock(self: *Self, inst: *ir.Inst.Block) !MCValue {
if (inst.base.ty.hasCodeGenBits()) {
return self.fail(inst.base.src, "TODO codegen Block with non-void type", .{});
}
- // A block is nothing but a setup to be able to jump to the end.
+ // A block is a setup to be able to jump to the end.
defer inst.codegen.relocs.deinit(self.gpa);
try self.genBody(inst.body);
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 81f99f53ba..9ae9518efe 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -372,11 +372,11 @@ pub const Inst = struct {
then_body: Body,
else_body: Body,
/// Set of instructions whose lifetimes end at the start of one of the branches.
- /// The `true` branch is first: `deaths[0..true_death_count]`.
- /// The `false` branch is next: `(deaths + true_death_count)[..false_death_count]`.
+ /// The `then` branch is first: `deaths[0..then_death_count]`.
+ /// The `else` branch is next: `(deaths + then_death_count)[0..else_death_count]`.
deaths: [*]*Inst = undefined,
- true_death_count: u32 = 0,
- false_death_count: u32 = 0,
+ then_death_count: u32 = 0,
+ else_death_count: u32 = 0,
pub fn operandCount(self: *const CondBr) usize {
return 1;
@@ -390,6 +390,12 @@ pub const Inst = struct {
return null;
}
+ pub fn thenDeaths(self: *const CondBr) []*Inst {
+ return self.deaths[0..self.then_death_count];
+ }
+ pub fn elseDeaths(self: *const CondBr) []*Inst {
+ return (self.deaths + self.then_death_count)[0..self.else_death_count];
+ }
};
pub const Constant = struct {
@@ -411,8 +417,6 @@ pub const Inst = struct {
base: Inst,
body: Body,
- /// This memory is reserved for codegen code to do whatever it needs to here.
- codegen: codegen.LoopData = .{},
pub fn operandCount(self: *const Loop) usize {
return 0;
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 7c5e645fb5..a84edb65cf 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -1887,6 +1887,8 @@ pub const File = struct {
else => false,
};
if (is_fn) {
+ //typed_value.val.cast(Value.Payload.Function).?.func.dump(module.*);
+
// For functions we need to add a prologue to the debug line program.
try dbg_line_buffer.ensureCapacity(26);
diff --git a/src-self-hosted/liveness.zig b/src-self-hosted/liveness.zig
index e8f80f30d5..886d480e19 100644
--- a/src-self-hosted/liveness.zig
+++ b/src-self-hosted/liveness.zig
@@ -16,20 +16,42 @@ pub fn analyze(
var table = std.AutoHashMap(*ir.Inst, void).init(gpa);
defer table.deinit();
try table.ensureCapacity(body.instructions.len);
- try analyzeWithTable(arena, &table, body);
+ try analyzeWithTable(arena, &table, null, body);
}
-fn analyzeWithTable(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void), body: ir.Body) error{OutOfMemory}!void {
+fn analyzeWithTable(
+ arena: *std.mem.Allocator,
+ table: *std.AutoHashMap(*ir.Inst, void),
+ new_set: ?*std.AutoHashMap(*ir.Inst, void),
+ body: ir.Body,
+) error{OutOfMemory}!void {
var i: usize = body.instructions.len;
- while (i != 0) {
- i -= 1;
- const base = body.instructions[i];
- try analyzeInst(arena, table, base);
+ if (new_set) |ns| {
+ // We are only interested in doing this for instructions which are born
+ // before a conditional branch, so after obtaining the new set for
+ // each branch we prune the instructions which were born within.
+ while (i != 0) {
+ i -= 1;
+ const base = body.instructions[i];
+ _ = ns.remove(base);
+ try analyzeInst(arena, table, new_set, base);
+ }
+ } else {
+ while (i != 0) {
+ i -= 1;
+ const base = body.instructions[i];
+ try analyzeInst(arena, table, new_set, base);
+ }
}
}
-fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void), base: *ir.Inst) error{OutOfMemory}!void {
+fn analyzeInst(
+ arena: *std.mem.Allocator,
+ table: *std.AutoHashMap(*ir.Inst, void),
+ new_set: ?*std.AutoHashMap(*ir.Inst, void),
+ base: *ir.Inst,
+) error{OutOfMemory}!void {
if (table.contains(base)) {
base.deaths = 0;
} else {
@@ -42,56 +64,70 @@ fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void
.constant => return,
.block => {
const inst = base.castTag(.block).?;
- try analyzeWithTable(arena, table, inst.body);
+ try analyzeWithTable(arena, table, new_set, inst.body);
// We let this continue so that it can possibly mark the block as
// unreferenced below.
},
+ .loop => {
+ const inst = base.castTag(.loop).?;
+ try analyzeWithTable(arena, table, new_set, inst.body);
+ return; // Loop has no operands and it is always unreferenced.
+ },
.condbr => {
const inst = base.castTag(.condbr).?;
- var true_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
- defer true_table.deinit();
- try true_table.ensureCapacity(inst.then_body.instructions.len);
- try analyzeWithTable(arena, &true_table, inst.then_body);
-
- var false_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
- defer false_table.deinit();
- try false_table.ensureCapacity(inst.else_body.instructions.len);
- try analyzeWithTable(arena, &false_table, inst.else_body);
// Each death that occurs inside one branch, but not the other, needs
// to be added as a death immediately upon entering the other branch.
- // During the iteration of the table, we additionally propagate the
- // deaths to the parent table.
- var true_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
- defer true_entry_deaths.deinit();
- var false_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
- defer false_entry_deaths.deinit();
- {
- var it = false_table.iterator();
- while (it.next()) |entry| {
- const false_death = entry.key;
- if (!true_table.contains(false_death)) {
- try true_entry_deaths.append(false_death);
- // Here we are only adding to the parent table if the following iteration
- // would miss it.
- try table.putNoClobber(false_death, {});
- }
+
+ var then_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
+ defer then_table.deinit();
+ try analyzeWithTable(arena, table, &then_table, inst.then_body);
+
+ // Reset the table back to its state from before the branch.
+ for (then_table.items()) |entry| {
+ table.removeAssertDiscard(entry.key);
+ }
+
+ var else_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
+ defer else_table.deinit();
+ try analyzeWithTable(arena, table, &else_table, inst.else_body);
+
+ var then_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
+ defer then_entry_deaths.deinit();
+ var else_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
+ defer else_entry_deaths.deinit();
+
+ for (else_table.items()) |entry| {
+ const else_death = entry.key;
+ if (!then_table.contains(else_death)) {
+ try then_entry_deaths.append(else_death);
}
}
- {
- var it = true_table.iterator();
- while (it.next()) |entry| {
- const true_death = entry.key;
- try table.putNoClobber(true_death, {});
- if (!false_table.contains(true_death)) {
- try false_entry_deaths.append(true_death);
- }
+ // This loop is the same, except it's for the then branch, and it additionally
+ // has to put its items back into the table to undo the reset.
+ for (then_table.items()) |entry| {
+ const then_death = entry.key;
+ if (!else_table.contains(then_death)) {
+ try else_entry_deaths.append(then_death);
+ }
+ _ = try table.put(then_death, {});
+ }
+ // Now we have to correctly populate new_set.
+ if (new_set) |ns| {
+ try ns.ensureCapacity(ns.items().len + then_table.items().len + else_table.items().len);
+ for (then_table.items()) |entry| {
+ _ = ns.putAssumeCapacity(entry.key, {});
+ }
+ for (else_table.items()) |entry| {
+ _ = ns.putAssumeCapacity(entry.key, {});
}
}
- inst.true_death_count = std.math.cast(@TypeOf(inst.true_death_count), true_entry_deaths.items.len) catch return error.OutOfMemory;
- inst.false_death_count = std.math.cast(@TypeOf(inst.false_death_count), false_entry_deaths.items.len) catch return error.OutOfMemory;
- const allocated_slice = try arena.alloc(*ir.Inst, true_entry_deaths.items.len + false_entry_deaths.items.len);
+ inst.then_death_count = std.math.cast(@TypeOf(inst.then_death_count), then_entry_deaths.items.len) catch return error.OutOfMemory;
+ inst.else_death_count = std.math.cast(@TypeOf(inst.else_death_count), else_entry_deaths.items.len) catch return error.OutOfMemory;
+ const allocated_slice = try arena.alloc(*ir.Inst, then_entry_deaths.items.len + else_entry_deaths.items.len);
inst.deaths = allocated_slice.ptr;
+ std.mem.copy(*ir.Inst, inst.thenDeaths(), then_entry_deaths.items);
+ std.mem.copy(*ir.Inst, inst.elseDeaths(), else_entry_deaths.items);
// Continue on with the instruction analysis. The following code will find the condition
// instruction, and the deaths flag for the CondBr instruction will indicate whether the
@@ -108,6 +144,7 @@ fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void
if (prev == null) {
// Death.
base.deaths |= @as(ir.Inst.DeathsInt, 1) << bit_i;
+ if (new_set) |ns| try ns.putNoClobber(operand, {});
}
}
} else {
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index 974e6f9ef6..ef9351047b 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -822,6 +822,16 @@ pub const Module = struct {
decls: []*Decl,
arena: std.heap.ArenaAllocator,
error_msg: ?ErrorMsg = null,
+ metadata: std.AutoHashMap(*Inst, MetaData),
+ body_metadata: std.AutoHashMap(*Body, BodyMetaData),
+
+ pub const MetaData = struct {
+ deaths: ir.Inst.DeathsInt,
+ };
+
+ pub const BodyMetaData = struct {
+ deaths: []*Inst,
+ };
pub const Body = struct {
instructions: []*Inst,
@@ -878,6 +888,7 @@ pub const Module = struct {
.loop_table = std.AutoHashMap(*Inst.Loop, []const u8).init(allocator),
.arena = std.heap.ArenaAllocator.init(allocator),
.indent = 2,
+ .next_instr_index = undefined,
};
defer write.arena.deinit();
defer write.inst_table.deinit();
@@ -889,15 +900,10 @@ pub const Module = struct {
for (self.decls) |decl, decl_i| {
try write.inst_table.putNoClobber(decl.inst, .{ .inst = decl.inst, .index = null, .name = decl.name });
-
- if (decl.inst.cast(Inst.Fn)) |fn_inst| {
- for (fn_inst.positionals.body.instructions) |inst, inst_i| {
- try write.inst_table.putNoClobber(inst, .{ .inst = inst, .index = inst_i, .name = undefined });
- }
- }
}
for (self.decls) |decl, i| {
+ write.next_instr_index = 0;
try stream.print("@{} ", .{decl.name});
try write.writeInstToStream(stream, decl.inst);
try stream.writeByte('\n');
@@ -914,6 +920,7 @@ const Writer = struct {
loop_table: std.AutoHashMap(*Inst.Loop, []const u8),
arena: std.heap.ArenaAllocator,
indent: usize,
+ next_instr_index: usize,
fn writeInstToStream(
self: *Writer,
@@ -944,7 +951,7 @@ const Writer = struct {
if (i != 0) {
try stream.writeAll(", ");
}
- try self.writeParamToStream(stream, @field(inst.positionals, arg_field.name));
+ try self.writeParamToStream(stream, &@field(inst.positionals, arg_field.name));
}
comptime var need_comma = pos_fields.len != 0;
@@ -954,13 +961,13 @@ const Writer = struct {
if (@field(inst.kw_args, arg_field.name)) |non_optional| {
if (need_comma) try stream.writeAll(", ");
try stream.print("{}=", .{arg_field.name});
- try self.writeParamToStream(stream, non_optional);
+ try self.writeParamToStream(stream, &non_optional);
need_comma = true;
}
} else {
if (need_comma) try stream.writeAll(", ");
try stream.print("{}=", .{arg_field.name});
- try self.writeParamToStream(stream, @field(inst.kw_args, arg_field.name));
+ try self.writeParamToStream(stream, &@field(inst.kw_args, arg_field.name));
need_comma = true;
}
}
@@ -968,7 +975,8 @@ const Writer = struct {
try stream.writeByte(')');
}
- fn writeParamToStream(self: *Writer, stream: anytype, param: anytype) !void {
+ fn writeParamToStream(self: *Writer, stream: anytype, param_ptr: anytype) !void {
+ const param = param_ptr.*;
if (@typeInfo(@TypeOf(param)) == .Enum) {
return stream.writeAll(@tagName(param));
}
@@ -986,18 +994,36 @@ const Writer = struct {
},
Module.Body => {
try stream.writeAll("{\n");
- for (param.instructions) |inst, i| {
+ if (self.module.body_metadata.get(param_ptr)) |metadata| {
+ if (metadata.deaths.len > 0) {
+ try stream.writeByteNTimes(' ', self.indent);
+ try stream.writeAll("; deaths={");
+ for (metadata.deaths) |death, i| {
+ if (i != 0) try stream.writeAll(", ");
+ try self.writeInstParamToStream(stream, death);
+ }
+ try stream.writeAll("}\n");
+ }
+ }
+
+ for (param.instructions) |inst| {
+ const my_i = self.next_instr_index;
+ self.next_instr_index += 1;
+ try self.inst_table.putNoClobber(inst, .{ .inst = inst, .index = my_i, .name = undefined });
try stream.writeByteNTimes(' ', self.indent);
- try stream.print("%{} ", .{i});
+ try stream.print("%{} ", .{my_i});
if (inst.cast(Inst.Block)) |block| {
- const name = try std.fmt.allocPrint(&self.arena.allocator, "label_{}", .{i});
+ const name = try std.fmt.allocPrint(&self.arena.allocator, "label_{}", .{my_i});
try self.block_table.put(block, name);
} else if (inst.cast(Inst.Loop)) |loop| {
- const name = try std.fmt.allocPrint(&self.arena.allocator, "loop_{}", .{i});
+ const name = try std.fmt.allocPrint(&self.arena.allocator, "loop_{}", .{my_i});
try self.loop_table.put(loop, name);
}
self.indent += 2;
try self.writeInstToStream(stream, inst);
+ if (self.module.metadata.get(inst)) |metadata| {
+ try stream.print(" ; deaths=0b{b}", .{metadata.deaths});
+ }
self.indent -= 2;
try stream.writeByte('\n');
}
@@ -1070,6 +1096,8 @@ pub fn parse(allocator: *Allocator, source: [:0]const u8) Allocator.Error!Module
.decls = parser.decls.toOwnedSlice(allocator),
.arena = parser.arena,
.error_msg = parser.error_msg,
+ .metadata = std.AutoHashMap(*Inst, Module.MetaData).init(allocator),
+ .body_metadata = std.AutoHashMap(*Module.Body, Module.BodyMetaData).init(allocator),
};
}
@@ -1478,7 +1506,11 @@ pub fn emit(allocator: *Allocator, old_module: IrModule) !Module {
.indent = 0,
.block_table = std.AutoHashMap(*ir.Inst.Block, *Inst.Block).init(allocator),
.loop_table = std.AutoHashMap(*ir.Inst.Loop, *Inst.Loop).init(allocator),
+ .metadata = std.AutoHashMap(*Inst, Module.MetaData).init(allocator),
+ .body_metadata = std.AutoHashMap(*Module.Body, Module.BodyMetaData).init(allocator),
};
+ defer ctx.metadata.deinit();
+ defer ctx.body_metadata.deinit();
defer ctx.block_table.deinit();
defer ctx.loop_table.deinit();
defer ctx.decls.deinit(allocator);
@@ -1491,9 +1523,52 @@ pub fn emit(allocator: *Allocator, old_module: IrModule) !Module {
return Module{
.decls = ctx.decls.toOwnedSlice(allocator),
.arena = ctx.arena,
+ .metadata = ctx.metadata,
+ .body_metadata = ctx.body_metadata,
};
}
+/// For debugging purposes, prints a function representation to stderr.
+pub fn dumpFn(old_module: IrModule, module_fn: *IrModule.Fn) void {
+ const allocator = old_module.gpa;
+ var ctx: EmitZIR = .{
+ .allocator = allocator,
+ .decls = .{},
+ .arena = std.heap.ArenaAllocator.init(allocator),
+ .old_module = &old_module,
+ .next_auto_name = 0,
+ .names = std.StringHashMap(void).init(allocator),
+ .primitive_table = std.AutoHashMap(Inst.Primitive.Builtin, *Decl).init(allocator),
+ .indent = 0,
+ .block_table = std.AutoHashMap(*ir.Inst.Block, *Inst.Block).init(allocator),
+ .loop_table = std.AutoHashMap(*ir.Inst.Loop, *Inst.Loop).init(allocator),
+ .metadata = std.AutoHashMap(*Inst, Module.MetaData).init(allocator),
+ .body_metadata = std.AutoHashMap(*Module.Body, Module.BodyMetaData).init(allocator),
+ };
+ defer ctx.metadata.deinit();
+ defer ctx.body_metadata.deinit();
+ defer ctx.block_table.deinit();
+ defer ctx.loop_table.deinit();
+ defer ctx.decls.deinit(allocator);
+ defer ctx.names.deinit();
+ defer ctx.primitive_table.deinit();
+ defer ctx.arena.deinit();
+
+ const fn_ty = module_fn.owner_decl.typed_value.most_recent.typed_value.ty;
+ _ = ctx.emitFn(module_fn, 0, fn_ty) catch |err| {
+ std.debug.print("unable to dump function: {}\n", .{err});
+ return;
+ };
+ var module = Module{
+ .decls = ctx.decls.items,
+ .arena = ctx.arena,
+ .metadata = ctx.metadata,
+ .body_metadata = ctx.body_metadata,
+ };
+
+ module.dump();
+}
+
const EmitZIR = struct {
allocator: *Allocator,
arena: std.heap.ArenaAllocator,
@@ -1505,6 +1580,8 @@ const EmitZIR = struct {
indent: usize,
block_table: std.AutoHashMap(*ir.Inst.Block, *Inst.Block),
loop_table: std.AutoHashMap(*ir.Inst.Loop, *Inst.Loop),
+ metadata: std.AutoHashMap(*Inst, Module.MetaData),
+ body_metadata: std.AutoHashMap(*Module.Body, Module.BodyMetaData),
fn emit(self: *EmitZIR) !void {
// Put all the Decls in a list and sort them by name to avoid nondeterminism introduced
@@ -1604,7 +1681,7 @@ const EmitZIR = struct {
} else blk: {
break :blk (try self.emitTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val })).inst;
};
- try new_body.inst_table.putNoClobber(inst, new_inst);
+ _ = try new_body.inst_table.put(inst, new_inst);
return new_inst;
} else {
return new_body.inst_table.get(inst).?;
@@ -1655,6 +1732,70 @@ const EmitZIR = struct {
return &declref_inst.base;
}
+ fn emitFn(self: *EmitZIR, module_fn: *IrModule.Fn, src: usize, ty: Type) Allocator.Error!*Decl {
+ var inst_table = std.AutoHashMap(*ir.Inst, *Inst).init(self.allocator);
+ defer inst_table.deinit();
+
+ var instructions = std.ArrayList(*Inst).init(self.allocator);
+ defer instructions.deinit();
+
+ switch (module_fn.analysis) {
+ .queued => unreachable,
+ .in_progress => unreachable,
+ .success => |body| {
+ try self.emitBody(body, &inst_table, &instructions);
+ },
+ .sema_failure => {
+ const err_msg = self.old_module.failed_decls.get(module_fn.owner_decl).?;
+ const fail_inst = try self.arena.allocator.create(Inst.CompileError);
+ fail_inst.* = .{
+ .base = .{
+ .src = src,
+ .tag = Inst.CompileError.base_tag,
+ },
+ .positionals = .{
+ .msg = try self.arena.allocator.dupe(u8, err_msg.msg),
+ },
+ .kw_args = .{},
+ };
+ try instructions.append(&fail_inst.base);
+ },
+ .dependency_failure => {
+ const fail_inst = try self.arena.allocator.create(Inst.CompileError);
+ fail_inst.* = .{
+ .base = .{
+ .src = src,
+ .tag = Inst.CompileError.base_tag,
+ },
+ .positionals = .{
+ .msg = try self.arena.allocator.dupe(u8, "depends on another failed Decl"),
+ },
+ .kw_args = .{},
+ };
+ try instructions.append(&fail_inst.base);
+ },
+ }
+
+ const fn_type = try self.emitType(src, ty);
+
+ const arena_instrs = try self.arena.allocator.alloc(*Inst, instructions.items.len);
+ mem.copy(*Inst, arena_instrs, instructions.items);
+
+ const fn_inst = try self.arena.allocator.create(Inst.Fn);
+ fn_inst.* = .{
+ .base = .{
+ .src = src,
+ .tag = Inst.Fn.base_tag,
+ },
+ .positionals = .{
+ .fn_type = fn_type.inst,
+ .body = .{ .instructions = arena_instrs },
+ },
+ .kw_args = .{},
+ };
+ return self.emitUnnamedDecl(&fn_inst.base);
+ }
+
fn emitTypedValue(self: *EmitZIR, src: usize, typed_value: TypedValue) Allocator.Error!*Decl {
const allocator = &self.arena.allocator;
if (typed_value.val.cast(Value.Payload.DeclRef)) |decl_ref| {
@@ -1718,68 +1859,7 @@ const EmitZIR = struct {
},
.Fn => {
const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
-
- var inst_table = std.AutoHashMap(*ir.Inst, *Inst).init(self.allocator);
- defer inst_table.deinit();
-
- var instructions = std.ArrayList(*Inst).init(self.allocator);
- defer instructions.deinit();
-
- switch (module_fn.analysis) {
- .queued => unreachable,
- .in_progress => unreachable,
- .success => |body| {
- try self.emitBody(body, &inst_table, &instructions);
- },
- .sema_failure => {
- const err_msg = self.old_module.failed_decls.get(module_fn.owner_decl).?;
- const fail_inst = try self.arena.allocator.create(Inst.CompileError);
- fail_inst.* = .{
- .base = .{
- .src = src,
- .tag = Inst.CompileError.base_tag,
- },
- .positionals = .{
- .msg = try self.arena.allocator.dupe(u8, err_msg.msg),
- },
- .kw_args = .{},
- };
- try instructions.append(&fail_inst.base);
- },
- .dependency_failure => {
- const fail_inst = try self.arena.allocator.create(Inst.CompileError);
- fail_inst.* = .{
- .base = .{
- .src = src,
- .tag = Inst.CompileError.base_tag,
- },
- .positionals = .{
- .msg = try self.arena.allocator.dupe(u8, "depends on another failed Decl"),
- },
- .kw_args = .{},
- };
- try instructions.append(&fail_inst.base);
- },
- }
-
- const fn_type = try self.emitType(src, typed_value.ty);
-
- const arena_instrs = try self.arena.allocator.alloc(*Inst, instructions.items.len);
- mem.copy(*Inst, arena_instrs, instructions.items);
-
- const fn_inst = try self.arena.allocator.create(Inst.Fn);
- fn_inst.* = .{
- .base = .{
- .src = src,
- .tag = Inst.Fn.base_tag,
- },
- .positionals = .{
- .fn_type = fn_type.inst,
- .body = .{ .instructions = arena_instrs },
- },
- .kw_args = .{},
- };
- return self.emitUnnamedDecl(&fn_inst.base);
+ return self.emitFn(module_fn, src, typed_value.ty);
},
.Array => {
// TODO more checks to make sure this can be emitted as a string literal
@@ -1810,7 +1890,7 @@ const EmitZIR = struct {
}
}
- fn emitNoOp(self: *EmitZIR, src: usize, tag: Inst.Tag) Allocator.Error!*Inst {
+ fn emitNoOp(self: *EmitZIR, src: usize, old_inst: *ir.Inst.NoOp, tag: Inst.Tag) Allocator.Error!*Inst {
const new_inst = try self.arena.allocator.create(Inst.NoOp);
new_inst.* = .{
.base = .{
@@ -1902,10 +1982,10 @@ const EmitZIR = struct {
const new_inst = switch (inst.tag) {
.constant => unreachable, // excluded from function bodies
- .breakpoint => try self.emitNoOp(inst.src, .breakpoint),
- .unreach => try self.emitNoOp(inst.src, .@"unreachable"),
- .retvoid => try self.emitNoOp(inst.src, .returnvoid),
- .dbg_stmt => try self.emitNoOp(inst.src, .dbg_stmt),
+ .breakpoint => try self.emitNoOp(inst.src, inst.castTag(.breakpoint).?, .breakpoint),
+ .unreach => try self.emitNoOp(inst.src, inst.castTag(.unreach).?, .unreach_nocheck),
+ .retvoid => try self.emitNoOp(inst.src, inst.castTag(.retvoid).?, .returnvoid),
+ .dbg_stmt => try self.emitNoOp(inst.src, inst.castTag(.dbg_stmt).?, .dbg_stmt),
.not => try self.emitUnOp(inst.src, new_body, inst.castTag(.not).?, .boolnot),
.ret => try self.emitUnOp(inst.src, new_body, inst.castTag(.ret).?, .@"return"),
@@ -2119,10 +2199,24 @@ const EmitZIR = struct {
defer then_body.deinit();
defer else_body.deinit();
+ const then_deaths = try self.arena.allocator.alloc(*Inst, old_inst.thenDeaths().len);
+ const else_deaths = try self.arena.allocator.alloc(*Inst, old_inst.elseDeaths().len);
+
+ for (old_inst.thenDeaths()) |death, i| {
+ then_deaths[i] = try self.resolveInst(new_body, death);
+ }
+ for (old_inst.elseDeaths()) |death, i| {
+ else_deaths[i] = try self.resolveInst(new_body, death);
+ }
+
try self.emitBody(old_inst.then_body, inst_table, &then_body);
try self.emitBody(old_inst.else_body, inst_table, &else_body);
const new_inst = try self.arena.allocator.create(Inst.CondBr);
+
+ try self.body_metadata.put(&new_inst.positionals.then_body, .{ .deaths = then_deaths });
+ try self.body_metadata.put(&new_inst.positionals.else_body, .{ .deaths = else_deaths });
+
new_inst.* = .{
.base = .{
.src = inst.src,
@@ -2138,6 +2232,7 @@ const EmitZIR = struct {
break :blk &new_inst.base;
},
};
+ try self.metadata.put(new_inst, .{ .deaths = inst.deaths });
try instructions.append(new_inst);
try inst_table.put(inst, new_inst);
}
diff --git a/test/stage2/compare_output.zig b/test/stage2/compare_output.zig
index 29312a8f53..f151880225 100644
--- a/test/stage2/compare_output.zig
+++ b/test/stage2/compare_output.zig
@@ -465,5 +465,44 @@ pub fn addCases(ctx: *TestContext) !void {
,
"",
);
+
+ // While loops
+ case.addCompareOutput(
+ \\export fn _start() noreturn {
+ \\ var i: u32 = 0;
+ \\ while (i < 4) : (i += 1) print();
+ \\ assert(i == 4);
+ \\
+ \\ exit();
+ \\}
+ \\
+ \\fn print() void {
+ \\ asm volatile ("syscall"
+ \\ :
+ \\ : [number] "{rax}" (1),
+ \\ [arg1] "{rdi}" (1),
+ \\ [arg2] "{rsi}" (@ptrToInt("hello\n")),
+ \\ [arg3] "{rdx}" (6)
+ \\ : "rcx", "r11", "memory"
+ \\ );
+ \\ return;
+ \\}
+ \\
+ \\pub fn assert(ok: bool) void {
+ \\ if (!ok) unreachable; // assertion failure
+ \\}
+ \\
+ \\fn exit() noreturn {
+ \\ asm volatile ("syscall"
+ \\ :
+ \\ : [number] "{rax}" (231),
+ \\ [arg1] "{rdi}" (0)
+ \\ : "rcx", "r11", "memory"
+ \\ );
+ \\ unreachable;
+ \\}
+ ,
+ "hello\nhello\nhello\nhello\n",
+ );
}
}
From 4adc052f0b8fbb5c3f5ac06cc92d2fc9bd7e409e Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 14 Aug 2020 09:33:45 -0700
Subject: [PATCH 080/153] langref: fix html error
thanks tidy
---
doc/langref.html.in | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 4c6284ac43..7bb45c01ce 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -5142,11 +5142,11 @@ test "float widening" {
A compiler error is appropriate because this ambiguous expression leaves the compiler
two choices about the coercion.
+
- Cast {#syntax#}54.0{#endsyntax#} to {#syntax#}comptime_int{#endsyntax#} resulting in {#syntax#}@as(comptime_int, 10){#endsyntax#}, which is casted to {#syntax#}@as(f32, 10){#endsyntax#}
- Cast {#syntax#}5{#endsyntax#} to {#syntax#}comptime_float{#endsyntax#} resulting in {#syntax#}@as(comptime_float, 10.8){#endsyntax#}, which is casted to {#syntax#}@as(f32, 10.8){#endsyntax#}
-
{#code_begin|test_err#}
// Compile time coercion of float to int
test "implicit cast to comptime_int" {
From 5f7c7191ab16c4c9320c28652a0d4c4e53af0024 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 14 Aug 2020 11:28:40 -0700
Subject: [PATCH 081/153] stage2: astgen for non-labeled blocks
---
src-self-hosted/Module.zig | 2 +-
src-self-hosted/astgen.zig | 18 ++++++++++++++++--
src-self-hosted/link.zig | 4 +++-
3 files changed, 20 insertions(+), 4 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 03cb66d544..2d17765ccb 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -1343,7 +1343,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
const body_block = body_node.cast(ast.Node.Block).?;
- try astgen.blockExpr(self, params_scope, body_block);
+ _ = try astgen.blockExpr(self, params_scope, .none, body_block);
if (!fn_type.fnReturnType().isNoReturn() and (gen_scope.instructions.items.len == 0 or
!gen_scope.instructions.items[gen_scope.instructions.items.len - 1].tag.isNoReturn()))
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index d34c2dc458..3c149cd3dd 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -107,11 +107,17 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.NullLiteral => return rlWrap(mod, scope, rl, try nullLiteral(mod, scope, node.castTag(.NullLiteral).?)),
.OptionalType => return rlWrap(mod, scope, rl, try optionalType(mod, scope, node.castTag(.OptionalType).?)),
.UnwrapOptional => return unwrapOptional(mod, scope, rl, node.castTag(.UnwrapOptional).?),
+ .Block => return blockExpr(mod, scope, rl, node.castTag(.Block).?),
else => return mod.failNode(scope, node, "TODO implement astgen.Expr for {}", .{@tagName(node.tag)}),
}
}
-pub fn blockExpr(mod: *Module, parent_scope: *Scope, block_node: *ast.Node.Block) !void {
+pub fn blockExpr(
+ mod: *Module,
+ parent_scope: *Scope,
+ rl: ResultLoc,
+ block_node: *ast.Node.Block,
+) InnerError!*zir.Inst {
const tracy = trace(@src());
defer tracy.end();
@@ -122,9 +128,11 @@ pub fn blockExpr(mod: *Module, parent_scope: *Scope, block_node: *ast.Node.Block
var block_arena = std.heap.ArenaAllocator.init(mod.gpa);
defer block_arena.deinit();
+ const tree = parent_scope.tree();
+
var scope = parent_scope;
for (block_node.statements()) |statement| {
- const src = scope.tree().token_locs[statement.firstToken()].start;
+ const src = tree.token_locs[statement.firstToken()].start;
_ = try addZIRNoOp(mod, scope, src, .dbg_stmt);
switch (statement.tag) {
.VarDecl => {
@@ -154,6 +162,12 @@ pub fn blockExpr(mod: *Module, parent_scope: *Scope, block_node: *ast.Node.Block
},
}
}
+
+ const src = tree.token_locs[block_node.firstToken()].start;
+ return addZIRInstConst(mod, parent_scope, src, .{
+ .ty = Type.initTag(.void),
+ .val = Value.initTag(.void_value),
+ });
}
fn varDecl(
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index a84edb65cf..1faec19b60 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -1887,7 +1887,9 @@ pub const File = struct {
else => false,
};
if (is_fn) {
- //typed_value.val.cast(Value.Payload.Function).?.func.dump(module.*);
+ //if (mem.eql(u8, mem.spanZ(decl.name), "add")) {
+ // typed_value.val.cast(Value.Payload.Function).?.func.dump(module.*);
+ //}
// For functions we need to add a prologue to the debug line program.
try dbg_line_buffer.ensureCapacity(26);
From 05f80311bc08fc1024d2dee628f871dad0944a92 Mon Sep 17 00:00:00 2001
From: Michael Dusan
Date: Fri, 14 Aug 2020 11:21:40 -0400
Subject: [PATCH 082/153] ci linux: bump qemu-5.1.0
---
ci/azure/linux_script | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ci/azure/linux_script b/ci/azure/linux_script
index a55de24496..fb4caf18c0 100755
--- a/ci/azure/linux_script
+++ b/ci/azure/linux_script
@@ -14,7 +14,7 @@ sudo apt-get remove -y llvm-*
sudo rm -rf /usr/local/*
sudo apt-get install -y libxml2-dev libclang-10-dev llvm-10 llvm-10-dev liblld-10-dev cmake s3cmd gcc-7 g++-7 ninja-build tidy
-QEMUBASE="qemu-linux-x86_64-5.0.0-49ee115552"
+QEMUBASE="qemu-linux-x86_64-5.1.0"
wget https://ziglang.org/deps/$QEMUBASE.tar.xz
tar xf $QEMUBASE.tar.xz
PATH=$PWD/$QEMUBASE/bin:$PATH
From 7a39a038dbe99b5189591378eef89ae5c023806d Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 14 Aug 2020 13:08:41 -0700
Subject: [PATCH 083/153] stage2: proper semantic analysis of improper
returning of implicit void
---
src-self-hosted/Module.zig | 4 ++--
src-self-hosted/zir_sema.zig | 22 ++++++++++++++++++++--
test/stage2/cbe.zig | 18 ++++++++++++++----
test/stage2/compare_output.zig | 5 +++++
4 files changed, 41 insertions(+), 8 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 2d17765ccb..a172a4b679 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -1345,8 +1345,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
_ = try astgen.blockExpr(self, params_scope, .none, body_block);
- if (!fn_type.fnReturnType().isNoReturn() and (gen_scope.instructions.items.len == 0 or
- !gen_scope.instructions.items[gen_scope.instructions.items.len - 1].tag.isNoReturn()))
+ if (gen_scope.instructions.items.len == 0 or
+ !gen_scope.instructions.items[gen_scope.instructions.items.len - 1].tag.isNoReturn())
{
const src = tree.token_locs[body_block.rbrace].start;
_ = try astgen.addZIRNoOp(self, &gen_scope.base, src, .returnvoid);
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index 97c47db9b6..862df4ec9a 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -112,8 +112,17 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
}
pub fn analyzeBody(mod: *Module, scope: *Scope, body: zir.Module.Body) !void {
- for (body.instructions) |src_inst| {
- src_inst.analyzed_inst = try analyzeInst(mod, scope, src_inst);
+ for (body.instructions) |src_inst, i| {
+ const analyzed_inst = try analyzeInst(mod, scope, src_inst);
+ src_inst.analyzed_inst = analyzed_inst;
+ if (analyzed_inst.ty.zigTypeTag() == .NoReturn) {
+ for (body.instructions[i..]) |unreachable_inst| {
+ if (unreachable_inst.castTag(.dbg_stmt)) |dbg_stmt| {
+ return mod.fail(scope, dbg_stmt.base.src, "unreachable code", .{});
+ }
+ }
+ break;
+ }
}
}
@@ -1216,6 +1225,15 @@ fn analyzeInstRet(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!
fn analyzeInstRetVoid(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerError!*Inst {
const b = try mod.requireRuntimeBlock(scope, inst.base.src);
+ if (b.func) |func| {
+ // Need to emit a compile error if returning void is not allowed.
+ const void_inst = try mod.constVoid(scope, inst.base.src);
+ const fn_ty = func.owner_decl.typed_value.most_recent.typed_value.ty;
+ const casted_void = try mod.coerce(scope, fn_ty.fnReturnType(), void_inst);
+ if (casted_void.ty.zigTypeTag() != .Void) {
+ return mod.addUnOp(b, inst.base.src, Type.initTag(.noreturn), .ret, casted_void);
+ }
+ }
return mod.addNoOp(b, inst.base.src, Type.initTag(.noreturn), .retvoid);
}
diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig
index 5e2d56b5ed..0608221866 100644
--- a/test/stage2/cbe.zig
+++ b/test/stage2/cbe.zig
@@ -10,13 +10,19 @@ const linux_x64 = std.zig.CrossTarget{
pub fn addCases(ctx: *TestContext) !void {
ctx.c("empty start function", linux_x64,
- \\export fn _start() noreturn {}
+ \\export fn _start() noreturn {
+ \\ unreachable;
+ \\}
,
- \\zig_noreturn void _start(void) {}
+ \\zig_noreturn void _start(void) {
+ \\ zig_unreachable();
+ \\}
\\
);
ctx.c("less empty start function", linux_x64,
- \\fn main() noreturn {}
+ \\fn main() noreturn {
+ \\ unreachable;
+ \\}
\\
\\export fn _start() noreturn {
\\ main();
@@ -28,7 +34,9 @@ pub fn addCases(ctx: *TestContext) !void {
\\ main();
\\}
\\
- \\zig_noreturn void main(void) {}
+ \\zig_noreturn void main(void) {
+ \\ zig_unreachable();
+ \\}
\\
);
// TODO: implement return values
@@ -40,6 +48,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ : [number] "{rax}" (231),
\\ [arg1] "{rdi}" (0)
\\ );
+ \\ unreachable;
\\}
\\
\\export fn _start() noreturn {
@@ -62,6 +71,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ register size_t rax_constant __asm__("rax") = 231;
\\ register size_t rdi_constant __asm__("rdi") = 0;
\\ __asm volatile ("syscall" :: ""(rax_constant), ""(rdi_constant));
+ \\ zig_unreachable();
\\}
\\
);
diff --git a/test/stage2/compare_output.zig b/test/stage2/compare_output.zig
index f151880225..4664d001fd 100644
--- a/test/stage2/compare_output.zig
+++ b/test/stage2/compare_output.zig
@@ -26,6 +26,11 @@ pub fn addCases(ctx: *TestContext) !void {
case.addError("", &[_][]const u8{":1:1: error: no entry point found"});
+ case.addError(
+ \\export fn _start() noreturn {
+ \\}
+ , &[_][]const u8{":2:1: error: expected noreturn, found void"});
+
// Regular old hello world
case.addCompareOutput(
\\export fn _start() noreturn {
From 9a5a1013a833229e1d12588615c1a05644f76cc5 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 14 Aug 2020 15:27:48 -0700
Subject: [PATCH 084/153] std.zig.ast: extract out Node.LabeledBlock from
Node.Block
This is part of an ongoing effort to reduce size of in-memory AST. This
enum flattening pattern is widespread throughout the self-hosted
compiler.
This is a API breaking change for consumers of the self-hosted parser.
---
lib/std/zig/ast.zig | 92 ++++++++++++++++++++++++++++-----
lib/std/zig/parse.zig | 66 ++++++++++++-----------
lib/std/zig/render.zig | 39 +++++++++++---
src-self-hosted/Module.zig | 2 +-
src-self-hosted/astgen.zig | 40 ++++++++------
src-self-hosted/translate_c.zig | 90 ++++++++++++++++----------------
6 files changed, 220 insertions(+), 109 deletions(-)
diff --git a/lib/std/zig/ast.zig b/lib/std/zig/ast.zig
index 6b2b8b4cf2..9258fc58d0 100644
--- a/lib/std/zig/ast.zig
+++ b/lib/std/zig/ast.zig
@@ -526,6 +526,7 @@ pub const Node = struct {
Comptime,
Nosuspend,
Block,
+ LabeledBlock,
// Misc
DocComment,
@@ -654,6 +655,7 @@ pub const Node = struct {
.Comptime => Comptime,
.Nosuspend => Nosuspend,
.Block => Block,
+ .LabeledBlock => LabeledBlock,
.DocComment => DocComment,
.SwitchCase => SwitchCase,
.SwitchElse => SwitchElse,
@@ -666,6 +668,13 @@ pub const Node = struct {
.FieldInitializer => FieldInitializer,
};
}
+
+ pub fn isBlock(tag: Tag) bool {
+ return switch (tag) {
+ .Block, .LabeledBlock => true,
+ else => false,
+ };
+ }
};
/// Prefer `castTag` to this.
@@ -729,6 +738,7 @@ pub const Node = struct {
.Root,
.ContainerField,
.Block,
+ .LabeledBlock,
.Payload,
.PointerPayload,
.PointerIndexPayload,
@@ -739,6 +749,7 @@ pub const Node = struct {
.DocComment,
.TestDecl,
=> return false,
+
.While => {
const while_node = @fieldParentPtr(While, "base", n);
if (while_node.@"else") |@"else"| {
@@ -746,7 +757,7 @@ pub const Node = struct {
continue;
}
- return while_node.body.tag != .Block;
+ return !while_node.body.tag.isBlock();
},
.For => {
const for_node = @fieldParentPtr(For, "base", n);
@@ -755,7 +766,7 @@ pub const Node = struct {
continue;
}
- return for_node.body.tag != .Block;
+ return !for_node.body.tag.isBlock();
},
.If => {
const if_node = @fieldParentPtr(If, "base", n);
@@ -764,7 +775,7 @@ pub const Node = struct {
continue;
}
- return if_node.body.tag != .Block;
+ return !if_node.body.tag.isBlock();
},
.Else => {
const else_node = @fieldParentPtr(Else, "base", n);
@@ -773,29 +784,40 @@ pub const Node = struct {
},
.Defer => {
const defer_node = @fieldParentPtr(Defer, "base", n);
- return defer_node.expr.tag != .Block;
+ return !defer_node.expr.tag.isBlock();
},
.Comptime => {
const comptime_node = @fieldParentPtr(Comptime, "base", n);
- return comptime_node.expr.tag != .Block;
+ return !comptime_node.expr.tag.isBlock();
},
.Suspend => {
const suspend_node = @fieldParentPtr(Suspend, "base", n);
if (suspend_node.body) |body| {
- return body.tag != .Block;
+ return !body.tag.isBlock();
}
return true;
},
.Nosuspend => {
const nosuspend_node = @fieldParentPtr(Nosuspend, "base", n);
- return nosuspend_node.expr.tag != .Block;
+ return !nosuspend_node.expr.tag.isBlock();
},
else => return true,
}
}
}
+ /// Asserts the node is a Block or LabeledBlock and returns the statements slice.
+ pub fn blockStatements(base: *Node) []*Node {
+ if (base.castTag(.Block)) |block| {
+ return block.statements();
+ } else if (base.castTag(.LabeledBlock)) |labeled_block| {
+ return labeled_block.statements();
+ } else {
+ unreachable;
+ }
+ }
+
pub fn dump(self: *Node, indent: usize) void {
{
var i: usize = 0;
@@ -1460,7 +1482,6 @@ pub const Node = struct {
statements_len: NodeIndex,
lbrace: TokenIndex,
rbrace: TokenIndex,
- label: ?TokenIndex,
/// After this the caller must initialize the statements list.
pub fn alloc(allocator: *mem.Allocator, statements_len: NodeIndex) !*Block {
@@ -1483,10 +1504,6 @@ pub const Node = struct {
}
pub fn firstToken(self: *const Block) TokenIndex {
- if (self.label) |label| {
- return label;
- }
-
return self.lbrace;
}
@@ -1509,6 +1526,57 @@ pub const Node = struct {
}
};
+ /// The statements of the block follow LabeledBlock directly in memory.
+ pub const LabeledBlock = struct {
+ base: Node = Node{ .tag = .LabeledBlock },
+ statements_len: NodeIndex,
+ lbrace: TokenIndex,
+ rbrace: TokenIndex,
+ label: TokenIndex,
+
+ /// After this the caller must initialize the statements list.
+ pub fn alloc(allocator: *mem.Allocator, statements_len: NodeIndex) !*LabeledBlock {
+ const bytes = try allocator.alignedAlloc(u8, @alignOf(LabeledBlock), sizeInBytes(statements_len));
+ return @ptrCast(*LabeledBlock, bytes.ptr);
+ }
+
+ pub fn free(self: *LabeledBlock, allocator: *mem.Allocator) void {
+ const bytes = @ptrCast([*]u8, self)[0..sizeInBytes(self.statements_len)];
+ allocator.free(bytes);
+ }
+
+ pub fn iterate(self: *const LabeledBlock, index: usize) ?*Node {
+ var i = index;
+
+ if (i < self.statements_len) return self.statementsConst()[i];
+ i -= self.statements_len;
+
+ return null;
+ }
+
+ pub fn firstToken(self: *const LabeledBlock) TokenIndex {
+ return self.label;
+ }
+
+ pub fn lastToken(self: *const LabeledBlock) TokenIndex {
+ return self.rbrace;
+ }
+
+ pub fn statements(self: *LabeledBlock) []*Node {
+ const decls_start = @ptrCast([*]u8, self) + @sizeOf(LabeledBlock);
+ return @ptrCast([*]*Node, decls_start)[0..self.statements_len];
+ }
+
+ pub fn statementsConst(self: *const LabeledBlock) []const *Node {
+ const decls_start = @ptrCast([*]const u8, self) + @sizeOf(LabeledBlock);
+ return @ptrCast([*]const *Node, decls_start)[0..self.statements_len];
+ }
+
+ fn sizeInBytes(statements_len: NodeIndex) usize {
+ return @sizeOf(LabeledBlock) + @sizeOf(*Node) * @as(usize, statements_len);
+ }
+ };
+
pub const Defer = struct {
base: Node = Node{ .tag = .Defer },
defer_token: TokenIndex,
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
index 70a305007b..e0eb2dd895 100644
--- a/lib/std/zig/parse.zig
+++ b/lib/std/zig/parse.zig
@@ -364,9 +364,10 @@ const Parser = struct {
const name_node = try p.expectNode(parseStringLiteralSingle, .{
.ExpectedStringLiteral = .{ .token = p.tok_i },
});
- const block_node = try p.expectNode(parseBlock, .{
- .ExpectedLBrace = .{ .token = p.tok_i },
- });
+ const block_node = (try p.parseBlock(null)) orelse {
+ try p.errors.append(p.gpa, .{ .ExpectedLBrace = .{ .token = p.tok_i } });
+ return error.ParseError;
+ };
const test_node = try p.arena.allocator.create(Node.TestDecl);
test_node.* = .{
@@ -540,12 +541,14 @@ const Parser = struct {
if (p.eatToken(.Semicolon)) |_| {
break :blk null;
}
- break :blk try p.expectNodeRecoverable(parseBlock, .{
+ const body_block = (try p.parseBlock(null)) orelse {
// Since parseBlock only return error.ParseError on
// a missing '}' we can assume this function was
// supposed to end here.
- .ExpectedSemiOrLBrace = .{ .token = p.tok_i },
- });
+ try p.errors.append(p.gpa, .{ .ExpectedSemiOrLBrace = .{ .token = p.tok_i } });
+ break :blk null;
+ };
+ break :blk body_block;
},
.as_type => null,
};
@@ -823,10 +826,7 @@ const Parser = struct {
var colon: TokenIndex = undefined;
const label_token = p.parseBlockLabel(&colon);
- if (try p.parseBlock()) |node| {
- node.cast(Node.Block).?.label = label_token;
- return node;
- }
+ if (try p.parseBlock(label_token)) |node| return node;
if (try p.parseLoopStatement()) |node| {
if (node.cast(Node.For)) |for_node| {
@@ -1003,14 +1003,13 @@ const Parser = struct {
fn parseBlockExpr(p: *Parser) Error!?*Node {
var colon: TokenIndex = undefined;
const label_token = p.parseBlockLabel(&colon);
- const block_node = (try p.parseBlock()) orelse {
+ const block_node = (try p.parseBlock(label_token)) orelse {
if (label_token) |label| {
p.putBackToken(label + 1); // ":"
p.putBackToken(label); // IDENTIFIER
}
return null;
};
- block_node.cast(Node.Block).?.label = label_token;
return block_node;
}
@@ -1177,7 +1176,7 @@ const Parser = struct {
p.putBackToken(token); // IDENTIFIER
}
- if (try p.parseBlock()) |node| return node;
+ if (try p.parseBlock(null)) |node| return node;
if (try p.parseCurlySuffixExpr()) |node| return node;
return null;
@@ -1189,7 +1188,7 @@ const Parser = struct {
}
/// Block <- LBRACE Statement* RBRACE
- fn parseBlock(p: *Parser) !?*Node {
+ fn parseBlock(p: *Parser, label_token: ?TokenIndex) !?*Node {
const lbrace = p.eatToken(.LBrace) orelse return null;
var statements = std.ArrayList(*Node).init(p.gpa);
@@ -1211,16 +1210,26 @@ const Parser = struct {
const statements_len = @intCast(NodeIndex, statements.items.len);
- const block_node = try Node.Block.alloc(&p.arena.allocator, statements_len);
- block_node.* = .{
- .label = null,
- .lbrace = lbrace,
- .statements_len = statements_len,
- .rbrace = rbrace,
- };
- std.mem.copy(*Node, block_node.statements(), statements.items);
-
- return &block_node.base;
+ if (label_token) |label| {
+ const block_node = try Node.LabeledBlock.alloc(&p.arena.allocator, statements_len);
+ block_node.* = .{
+ .label = label,
+ .lbrace = lbrace,
+ .statements_len = statements_len,
+ .rbrace = rbrace,
+ };
+ std.mem.copy(*Node, block_node.statements(), statements.items);
+ return &block_node.base;
+ } else {
+ const block_node = try Node.Block.alloc(&p.arena.allocator, statements_len);
+ block_node.* = .{
+ .lbrace = lbrace,
+ .statements_len = statements_len,
+ .rbrace = rbrace,
+ };
+ std.mem.copy(*Node, block_node.statements(), statements.items);
+ return &block_node.base;
+ }
}
/// LoopExpr <- KEYWORD_inline? (ForExpr / WhileExpr)
@@ -1658,11 +1667,8 @@ const Parser = struct {
var colon: TokenIndex = undefined;
const label = p.parseBlockLabel(&colon);
- if (label) |token| {
- if (try p.parseBlock()) |node| {
- node.cast(Node.Block).?.label = token;
- return node;
- }
+ if (label) |label_token| {
+ if (try p.parseBlock(label_token)) |node| return node;
}
if (try p.parseLoopTypeExpr()) |node| {
@@ -3440,6 +3446,7 @@ const Parser = struct {
}
}
+ /// TODO Delete this function. I don't like the inversion of control.
fn expectNode(
p: *Parser,
parseFn: NodeParseFn,
@@ -3449,6 +3456,7 @@ const Parser = struct {
return (try p.expectNodeRecoverable(parseFn, err)) orelse return error.ParseError;
}
+ /// TODO Delete this function. I don't like the inversion of control.
fn expectNodeRecoverable(
p: *Parser,
parseFn: NodeParseFn,
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index c516250a17..7b9036f259 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -392,28 +392,50 @@ fn renderExpression(
return renderToken(tree, stream, any_type.token, indent, start_col, space);
},
- .Block => {
- const block = @fieldParentPtr(ast.Node.Block, "base", base);
+ .Block, .LabeledBlock => {
+ const block: struct {
+ label: ?ast.TokenIndex,
+ statements: []*ast.Node,
+ lbrace: ast.TokenIndex,
+ rbrace: ast.TokenIndex,
+ } = b: {
+ if (base.castTag(.Block)) |block| {
+ break :b .{
+ .label = null,
+ .statements = block.statements(),
+ .lbrace = block.lbrace,
+ .rbrace = block.rbrace,
+ };
+ } else if (base.castTag(.LabeledBlock)) |block| {
+ break :b .{
+ .label = block.label,
+ .statements = block.statements(),
+ .lbrace = block.lbrace,
+ .rbrace = block.rbrace,
+ };
+ } else {
+ unreachable;
+ }
+ };
if (block.label) |label| {
try renderToken(tree, stream, label, indent, start_col, Space.None);
try renderToken(tree, stream, tree.nextToken(label), indent, start_col, Space.Space);
}
- if (block.statements_len == 0) {
+ if (block.statements.len == 0) {
try renderToken(tree, stream, block.lbrace, indent + indent_delta, start_col, Space.None);
return renderToken(tree, stream, block.rbrace, indent, start_col, space);
} else {
const block_indent = indent + indent_delta;
try renderToken(tree, stream, block.lbrace, block_indent, start_col, Space.Newline);
- const block_statements = block.statements();
- for (block_statements) |statement, i| {
+ for (block.statements) |statement, i| {
try stream.writeByteNTimes(' ', block_indent);
try renderStatement(allocator, stream, tree, block_indent, start_col, statement);
- if (i + 1 < block_statements.len) {
- try renderExtraNewline(tree, stream, start_col, block_statements[i + 1]);
+ if (i + 1 < block.statements.len) {
+ try renderExtraNewline(tree, stream, start_col, block.statements[i + 1]);
}
}
@@ -1841,7 +1863,7 @@ fn renderExpression(
const rparen = tree.nextToken(for_node.array_expr.lastToken());
- const body_is_block = for_node.body.tag == .Block;
+ const body_is_block = for_node.body.tag.isBlock();
const src_one_line_to_body = !body_is_block and tree.tokensOnSameLine(rparen, for_node.body.firstToken());
const body_on_same_line = body_is_block or src_one_line_to_body;
@@ -2578,6 +2600,7 @@ fn renderDocCommentsToken(
fn nodeIsBlock(base: *const ast.Node) bool {
return switch (base.tag) {
.Block,
+ .LabeledBlock,
.If,
.For,
.While,
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index a172a4b679..6c0e942cc6 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -1343,7 +1343,7 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
const body_block = body_node.cast(ast.Node.Block).?;
- _ = try astgen.blockExpr(self, params_scope, .none, body_block);
+ try astgen.blockExpr(self, params_scope, body_block);
if (gen_scope.instructions.items.len == 0 or
!gen_scope.instructions.items[gen_scope.instructions.items.len - 1].tag.isNoReturn())
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 3c149cd3dd..41d48c46fc 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -107,31 +107,46 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.NullLiteral => return rlWrap(mod, scope, rl, try nullLiteral(mod, scope, node.castTag(.NullLiteral).?)),
.OptionalType => return rlWrap(mod, scope, rl, try optionalType(mod, scope, node.castTag(.OptionalType).?)),
.UnwrapOptional => return unwrapOptional(mod, scope, rl, node.castTag(.UnwrapOptional).?),
- .Block => return blockExpr(mod, scope, rl, node.castTag(.Block).?),
+ .Block => return rlWrapVoid(mod, scope, rl, node, try blockExpr(mod, scope, node.castTag(.Block).?)),
+ .LabeledBlock => return labeledBlockExpr(mod, scope, rl, node.castTag(.LabeledBlock).?),
else => return mod.failNode(scope, node, "TODO implement astgen.Expr for {}", .{@tagName(node.tag)}),
}
}
-pub fn blockExpr(
+pub fn blockExpr(mod: *Module, parent_scope: *Scope, block_node: *ast.Node.Block) InnerError!void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ try blockExprStmts(mod, parent_scope, &block_node.base, block_node.statements());
+}
+
+fn labeledBlockExpr(
mod: *Module,
parent_scope: *Scope,
rl: ResultLoc,
- block_node: *ast.Node.Block,
+ block_node: *ast.Node.LabeledBlock,
) InnerError!*zir.Inst {
const tracy = trace(@src());
defer tracy.end();
- if (block_node.label) |label| {
- return mod.failTok(parent_scope, label, "TODO implement labeled blocks", .{});
+ const statements = block_node.statements();
+
+ if (statements.len == 0) {
+ // Hot path for `{}`.
+ return rlWrapVoid(mod, parent_scope, rl, &block_node.base, {});
}
+ return mod.failNode(parent_scope, &block_node.base, "TODO implement labeled blocks", .{});
+}
+
+fn blockExprStmts(mod: *Module, parent_scope: *Scope, node: *ast.Node, statements: []*ast.Node) !void {
+ const tree = parent_scope.tree();
+
var block_arena = std.heap.ArenaAllocator.init(mod.gpa);
defer block_arena.deinit();
- const tree = parent_scope.tree();
-
var scope = parent_scope;
- for (block_node.statements()) |statement| {
+ for (statements) |statement| {
const src = tree.token_locs[statement.firstToken()].start;
_ = try addZIRNoOp(mod, scope, src, .dbg_stmt);
switch (statement.tag) {
@@ -162,12 +177,6 @@ pub fn blockExpr(
},
}
}
-
- const src = tree.token_locs[block_node.firstToken()].start;
- return addZIRInstConst(mod, parent_scope, src, .{
- .ty = Type.initTag(.void),
- .val = Value.initTag(.void_value),
- });
}
fn varDecl(
@@ -1184,6 +1193,7 @@ fn nodeMayNeedMemoryLocation(start_node: *ast.Node) bool {
.Slice,
.Deref,
.ArrayAccess,
+ .Block,
=> return false,
// Forward the question to a sub-expression.
@@ -1210,11 +1220,11 @@ fn nodeMayNeedMemoryLocation(start_node: *ast.Node) bool {
.Switch,
.Call,
.BuiltinCall, // TODO some of these can return false
+ .LabeledBlock,
=> return true,
// Depending on AST properties, they may need memory locations.
.If => return node.castTag(.If).?.@"else" != null,
- .Block => return node.castTag(.Block).?.label != null,
}
}
}
diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig
index 2382375fc5..98cb68f059 100644
--- a/src-self-hosted/translate_c.zig
+++ b/src-self-hosted/translate_c.zig
@@ -118,19 +118,31 @@ const Scope = struct {
self.* = undefined;
}
- fn complete(self: *Block, c: *Context) !*ast.Node.Block {
+ fn complete(self: *Block, c: *Context) !*ast.Node {
// We reserve 1 extra statement if the parent is a Loop. This is in case of
// do while, we want to put `if (cond) break;` at the end.
const alloc_len = self.statements.items.len + @boolToInt(self.base.parent.?.id == .Loop);
- const node = try ast.Node.Block.alloc(c.arena, alloc_len);
- node.* = .{
- .statements_len = self.statements.items.len,
- .lbrace = self.lbrace,
- .rbrace = try appendToken(c, .RBrace, "}"),
- .label = self.label,
- };
- mem.copy(*ast.Node, node.statements(), self.statements.items);
- return node;
+ const rbrace = try appendToken(c, .RBrace, "}");
+ if (self.label) |label| {
+ const node = try ast.Node.LabeledBlock.alloc(c.arena, alloc_len);
+ node.* = .{
+ .statements_len = self.statements.items.len,
+ .lbrace = self.lbrace,
+ .rbrace = rbrace,
+ .label = label,
+ };
+ mem.copy(*ast.Node, node.statements(), self.statements.items);
+ return &node.base;
+ } else {
+ const node = try ast.Node.Block.alloc(c.arena, alloc_len);
+ node.* = .{
+ .statements_len = self.statements.items.len,
+ .lbrace = self.lbrace,
+ .rbrace = rbrace,
+ };
+ mem.copy(*ast.Node, node.statements(), self.statements.items);
+ return &node.base;
+ }
}
/// Given the desired name, return a name that does not shadow anything from outer scopes.
@@ -320,15 +332,9 @@ pub const Context = struct {
return node;
}
- fn createBlock(c: *Context, label: ?[]const u8, statements_len: ast.NodeIndex) !*ast.Node.Block {
- const label_node = if (label) |l| blk: {
- const ll = try appendIdentifier(c, l);
- _ = try appendToken(c, .Colon, ":");
- break :blk ll;
- } else null;
+ fn createBlock(c: *Context, statements_len: ast.NodeIndex) !*ast.Node.Block {
const block_node = try ast.Node.Block.alloc(c.arena, statements_len);
block_node.* = .{
- .label = label_node,
.lbrace = try appendToken(c, .LBrace, "{"),
.statements_len = statements_len,
.rbrace = undefined,
@@ -640,8 +646,8 @@ fn visitFnDecl(c: *Context, fn_decl: *const ZigClangFunctionDecl) Error!void {
var last = block_scope.statements.items[block_scope.statements.items.len - 1];
while (true) {
switch (last.tag) {
- .Block => {
- const stmts = last.castTag(.Block).?.statements();
+ .Block, .LabeledBlock => {
+ const stmts = last.blockStatements();
if (stmts.len == 0) break;
last = stmts[stmts.len - 1];
@@ -669,7 +675,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const ZigClangFunctionDecl) Error!void {
}
const body_node = try block_scope.complete(rp.c);
- proto_node.setTrailer("body_node", &body_node.base);
+ proto_node.setTrailer("body_node", body_node);
return addTopLevelDecl(c, fn_name, &proto_node.base);
}
@@ -1275,7 +1281,7 @@ fn transStmt(
.WhileStmtClass => return transWhileLoop(rp, scope, @ptrCast(*const ZigClangWhileStmt, stmt)),
.DoStmtClass => return transDoWhileLoop(rp, scope, @ptrCast(*const ZigClangDoStmt, stmt)),
.NullStmtClass => {
- const block = try rp.c.createBlock(null, 0);
+ const block = try rp.c.createBlock(0);
block.rbrace = try appendToken(rp.c, .RBrace, "}");
return &block.base;
},
@@ -1356,7 +1362,7 @@ fn transBinaryOperator(
const grouped_expr = try rp.c.arena.create(ast.Node.GroupedExpression);
grouped_expr.* = .{
.lparen = lparen,
- .expr = &block_node.base,
+ .expr = block_node,
.rparen = rparen,
};
return maybeSuppressResult(rp, scope, result_used, &grouped_expr.base);
@@ -1521,8 +1527,7 @@ fn transCompoundStmt(rp: RestorePoint, scope: *Scope, stmt: *const ZigClangCompo
var block_scope = try Scope.Block.init(rp.c, scope, false);
defer block_scope.deinit();
try transCompoundStmtInline(rp, &block_scope.base, stmt, &block_scope);
- const node = try block_scope.complete(rp.c);
- return &node.base;
+ return try block_scope.complete(rp.c);
}
fn transCStyleCastExprClass(
@@ -2589,7 +2594,7 @@ fn transDoWhileLoop(
// zig: if (!cond) break;
// zig: }
const node = try transStmt(rp, &loop_scope, ZigClangDoStmt_getBody(stmt), .unused, .r_value);
- break :blk node.cast(ast.Node.Block).?;
+ break :blk node.castTag(.Block).?;
} else blk: {
// the C statement is without a block, so we need to create a block to contain it.
// c: do
@@ -2600,7 +2605,7 @@ fn transDoWhileLoop(
// zig: if (!cond) break;
// zig: }
new = true;
- const block = try rp.c.createBlock(null, 2);
+ const block = try rp.c.createBlock(2);
block.statements_len = 1; // over-allocated so we can add another below
block.statements()[0] = try transStmt(rp, &loop_scope, ZigClangDoStmt_getBody(stmt), .unused, .r_value);
break :blk block;
@@ -2659,8 +2664,7 @@ fn transForLoop(
while_node.body = try transStmt(rp, &loop_scope, ZigClangForStmt_getBody(stmt), .unused, .r_value);
if (block_scope) |*bs| {
try bs.statements.append(&while_node.base);
- const node = try bs.complete(rp.c);
- return &node.base;
+ return try bs.complete(rp.c);
} else {
_ = try appendToken(rp.c, .Semicolon, ";");
return &while_node.base;
@@ -2768,7 +2772,7 @@ fn transSwitch(
const result_node = try switch_scope.pending_block.complete(rp.c);
switch_scope.pending_block.deinit();
- return &result_node.base;
+ return result_node;
}
fn transCase(
@@ -2820,7 +2824,7 @@ fn transCase(
switch_scope.pending_block.deinit();
switch_scope.pending_block = try Scope.Block.init(rp.c, scope, false);
- try switch_scope.pending_block.statements.append(&pending_node.base);
+ try switch_scope.pending_block.statements.append(pending_node);
return transStmt(rp, scope, ZigClangCaseStmt_getSubStmt(stmt), .unused, .r_value);
}
@@ -2857,7 +2861,7 @@ fn transDefault(
const pending_node = try switch_scope.pending_block.complete(rp.c);
switch_scope.pending_block.deinit();
switch_scope.pending_block = try Scope.Block.init(rp.c, scope, false);
- try switch_scope.pending_block.statements.append(&pending_node.base);
+ try switch_scope.pending_block.statements.append(pending_node);
return transStmt(rp, scope, ZigClangDefaultStmt_getSubStmt(stmt), .unused, .r_value);
}
@@ -2972,7 +2976,7 @@ fn transStmtExpr(rp: RestorePoint, scope: *Scope, stmt: *const ZigClangStmtExpr,
const grouped_expr = try rp.c.arena.create(ast.Node.GroupedExpression);
grouped_expr.* = .{
.lparen = lparen,
- .expr = &block_node.base,
+ .expr = block_node,
.rparen = rparen,
};
return maybeSuppressResult(rp, scope, used, &grouped_expr.base);
@@ -3304,7 +3308,7 @@ fn transCreatePreCrement(
const grouped_expr = try rp.c.arena.create(ast.Node.GroupedExpression);
grouped_expr.* = .{
.lparen = try appendToken(rp.c, .LParen, "("),
- .expr = &block_node.base,
+ .expr = block_node,
.rparen = try appendToken(rp.c, .RParen, ")"),
};
return &grouped_expr.base;
@@ -3398,7 +3402,7 @@ fn transCreatePostCrement(
const grouped_expr = try rp.c.arena.create(ast.Node.GroupedExpression);
grouped_expr.* = .{
.lparen = try appendToken(rp.c, .LParen, "("),
- .expr = &block_node.base,
+ .expr = block_node,
.rparen = try appendToken(rp.c, .RParen, ")"),
};
return &grouped_expr.base;
@@ -3589,7 +3593,7 @@ fn transCreateCompoundAssign(
const grouped_expr = try rp.c.arena.create(ast.Node.GroupedExpression);
grouped_expr.* = .{
.lparen = try appendToken(rp.c, .LParen, "("),
- .expr = &block_node.base,
+ .expr = block_node,
.rparen = try appendToken(rp.c, .RParen, ")"),
};
return &grouped_expr.base;
@@ -3748,7 +3752,7 @@ fn transBinaryConditionalOperator(rp: RestorePoint, scope: *Scope, stmt: *const
const grouped_expr = try rp.c.arena.create(ast.Node.GroupedExpression);
grouped_expr.* = .{
.lparen = lparen,
- .expr = &block_node.base,
+ .expr = block_node,
.rparen = try appendToken(rp.c, .RParen, ")"),
};
return maybeSuppressResult(rp, scope, used, &grouped_expr.base);
@@ -4191,7 +4195,7 @@ fn transCreateNodeAssign(
const block_node = try block_scope.complete(rp.c);
// semicolon must immediately follow rbrace because it is the last token in a block
_ = try appendToken(rp.c, .Semicolon, ";");
- return &block_node.base;
+ return block_node;
}
fn transCreateNodeFieldAccess(c: *Context, container: *ast.Node, field_name: []const u8) !*ast.Node {
@@ -4484,7 +4488,6 @@ fn transCreateNodeMacroFn(c: *Context, name: []const u8, ref: *ast.Node, proto_a
const block = try ast.Node.Block.alloc(c.arena, 1);
block.* = .{
- .label = null,
.lbrace = block_lbrace,
.statements_len = 1,
.rbrace = try appendToken(c, .RBrace, "}"),
@@ -5475,9 +5478,9 @@ fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void {
if (last != .Eof and last != .Nl)
return m.fail(c, "unable to translate C expr: unexpected token .{}", .{@tagName(last)});
_ = try appendToken(c, .Semicolon, ";");
- const type_of_arg = if (expr.tag != .Block) expr else blk: {
- const blk = @fieldParentPtr(ast.Node.Block, "base", expr);
- const blk_last = blk.statements()[blk.statements_len - 1];
+ const type_of_arg = if (!expr.tag.isBlock()) expr else blk: {
+ const stmts = expr.blockStatements();
+ const blk_last = stmts[stmts.len - 1];
const br = blk_last.cast(ast.Node.ControlFlowExpression).?;
break :blk br.getRHS().?;
};
@@ -5500,7 +5503,7 @@ fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void {
.visib_token = pub_tok,
.extern_export_inline_token = inline_tok,
.name_token = name_tok,
- .body_node = &block_node.base,
+ .body_node = block_node,
});
mem.copy(ast.Node.FnProto.ParamDecl, fn_proto.params(), fn_params.items);
@@ -5555,8 +5558,7 @@ fn parseCExpr(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!*ast.Node {
const break_node = try transCreateNodeBreak(c, block_scope.label, last);
try block_scope.statements.append(&break_node.base);
- const block_node = try block_scope.complete(c);
- return &block_node.base;
+ return try block_scope.complete(c);
},
else => {
m.i -= 1;
From b49d3672f3fc722925a6dad4070f7faa92dd1878 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 14 Aug 2020 16:42:36 -0700
Subject: [PATCH 085/153] stage2 astgen for LabeledBlock
---
src-self-hosted/Module.zig | 1 +
src-self-hosted/astgen.zig | 22 ++++++++++++++++------
2 files changed, 17 insertions(+), 6 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 6c0e942cc6..9ae477312a 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -737,6 +737,7 @@ pub const Scope = struct {
arena: *Allocator,
/// The first N instructions in a function body ZIR are arg instructions.
instructions: std.ArrayListUnmanaged(*zir.Inst) = .{},
+ label: ?ast.TokenIndex = null,
};
/// This is always a `const` local and importantly the `inst` is a value type, not a pointer.
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 41d48c46fc..6cf2ff184d 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -129,14 +129,24 @@ fn labeledBlockExpr(
const tracy = trace(@src());
defer tracy.end();
- const statements = block_node.statements();
+ var block_scope: Scope.GenZIR = .{
+ .parent = parent_scope,
+ .decl = parent_scope.decl().?,
+ .arena = parent_scope.arena(),
+ .instructions = .{},
+ .label = block_node.label,
+ };
+ defer block_scope.instructions.deinit(mod.gpa);
- if (statements.len == 0) {
- // Hot path for `{}`.
- return rlWrapVoid(mod, parent_scope, rl, &block_node.base, {});
- }
+ try blockExprStmts(mod, &block_scope.base, &block_node.base, block_node.statements());
- return mod.failNode(parent_scope, &block_node.base, "TODO implement labeled blocks", .{});
+ const tree = parent_scope.tree();
+ const src = tree.token_locs[block_node.lbrace].start;
+ const block = try addZIRInstBlock(mod, parent_scope, src, .{
+ .instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items),
+ });
+
+ return &block.base;
}
fn blockExprStmts(mod: *Module, parent_scope: *Scope, node: *ast.Node, statements: []*ast.Node) !void {
From f356cba704e8e45832ac363b5bf205a63f39257a Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 14 Aug 2020 19:49:10 -0700
Subject: [PATCH 086/153] stage2: populate some of the astgen switch
possibilities
Idea here is simply to entice people to contribute astgen code :)
---
lib/std/zig/ast.zig | 16 +++++------
src-self-hosted/astgen.zig | 56 +++++++++++++++++++++++++++++++++++++-
2 files changed, 63 insertions(+), 9 deletions(-)
diff --git a/lib/std/zig/ast.zig b/lib/std/zig/ast.zig
index 9258fc58d0..f5149e3a82 100644
--- a/lib/std/zig/ast.zig
+++ b/lib/std/zig/ast.zig
@@ -530,15 +530,15 @@ pub const Node = struct {
// Misc
DocComment,
- SwitchCase,
- SwitchElse,
- Else,
- Payload,
- PointerPayload,
- PointerIndexPayload,
+ SwitchCase, // TODO make this not a child of AST Node
+ SwitchElse, // TODO make this not a child of AST Node
+ Else, // TODO make this not a child of AST Node
+ Payload, // TODO make this not a child of AST Node
+ PointerPayload, // TODO make this not a child of AST Node
+ PointerIndexPayload, // TODO make this not a child of AST Node
ContainerField,
- ErrorTag,
- FieldInitializer,
+ ErrorTag, // TODO make this not a child of AST Node
+ FieldInitializer, // TODO make this not a child of AST Node
pub fn Type(tag: Tag) type {
return switch (tag) {
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 6cf2ff184d..4dec87f364 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -47,7 +47,19 @@ pub fn typeExpr(mod: *Module, scope: *Scope, type_node: *ast.Node) InnerError!*z
/// Turn Zig AST into untyped ZIR istructions.
pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerError!*zir.Inst {
switch (node.tag) {
+ .Root => unreachable, // Top-level declaration.
+ .Use => unreachable, // Top-level declaration.
+ .TestDecl => unreachable, // Top-level declaration.
+ .DocComment => unreachable, // Top-level declaration.
.VarDecl => unreachable, // Handled in `blockExpr`.
+ .SwitchCase => unreachable, // Handled in `switchExpr`.
+ .SwitchElse => unreachable, // Handled in `switchExpr`.
+ .Else => unreachable, // Handled explicitly the control flow expression functions.
+ .Payload => unreachable, // Handled explicitly.
+ .PointerPayload => unreachable, // Handled explicitly.
+ .PointerIndexPayload => unreachable, // Handled explicitly.
+ .ErrorTag => unreachable, // Handled explicitly.
+ .FieldInitializer => unreachable, // Handled explicitly.
.Assign => return rlWrapVoid(mod, scope, rl, node, try assign(mod, scope, node.castTag(.Assign).?)),
.AssignBitAnd => return rlWrapVoid(mod, scope, rl, node, try assignOp(mod, scope, node.castTag(.AssignBitAnd).?, .bitand)),
@@ -109,7 +121,49 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.UnwrapOptional => return unwrapOptional(mod, scope, rl, node.castTag(.UnwrapOptional).?),
.Block => return rlWrapVoid(mod, scope, rl, node, try blockExpr(mod, scope, node.castTag(.Block).?)),
.LabeledBlock => return labeledBlockExpr(mod, scope, rl, node.castTag(.LabeledBlock).?),
- else => return mod.failNode(scope, node, "TODO implement astgen.Expr for {}", .{@tagName(node.tag)}),
+ .Defer => return mod.failNode(scope, node, "TODO implement astgen.expr for .Defer", .{}),
+ .Catch => return mod.failNode(scope, node, "TODO implement astgen.expr for .Catch", .{}),
+ .BoolAnd => return mod.failNode(scope, node, "TODO implement astgen.expr for .BoolAnd", .{}),
+ .BoolOr => return mod.failNode(scope, node, "TODO implement astgen.expr for .BoolOr", .{}),
+ .ErrorUnion => return mod.failNode(scope, node, "TODO implement astgen.expr for .ErrorUnion", .{}),
+ .MergeErrorSets => return mod.failNode(scope, node, "TODO implement astgen.expr for .MergeErrorSets", .{}),
+ .Range => return mod.failNode(scope, node, "TODO implement astgen.expr for .Range", .{}),
+ .OrElse => return mod.failNode(scope, node, "TODO implement astgen.expr for .OrElse", .{}),
+ .AddressOf => return mod.failNode(scope, node, "TODO implement astgen.expr for .AddressOf", .{}),
+ .Await => return mod.failNode(scope, node, "TODO implement astgen.expr for .Await", .{}),
+ .BitNot => return mod.failNode(scope, node, "TODO implement astgen.expr for .BitNot", .{}),
+ .Negation => return mod.failNode(scope, node, "TODO implement astgen.expr for .Negation", .{}),
+ .NegationWrap => return mod.failNode(scope, node, "TODO implement astgen.expr for .NegationWrap", .{}),
+ .Resume => return mod.failNode(scope, node, "TODO implement astgen.expr for .Resume", .{}),
+ .Try => return mod.failNode(scope, node, "TODO implement astgen.expr for .Try", .{}),
+ .ArrayType => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayType", .{}),
+ .ArrayTypeSentinel => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayTypeSentinel", .{}),
+ .PtrType => return mod.failNode(scope, node, "TODO implement astgen.expr for .PtrType", .{}),
+ .SliceType => return mod.failNode(scope, node, "TODO implement astgen.expr for .SliceType", .{}),
+ .Slice => return mod.failNode(scope, node, "TODO implement astgen.expr for .Slice", .{}),
+ .ArrayAccess => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayAccess", .{}),
+ .ArrayInitializer => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayInitializer", .{}),
+ .ArrayInitializerDot => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayInitializerDot", .{}),
+ .StructInitializer => return mod.failNode(scope, node, "TODO implement astgen.expr for .StructInitializer", .{}),
+ .StructInitializerDot => return mod.failNode(scope, node, "TODO implement astgen.expr for .StructInitializerDot", .{}),
+ .Switch => return mod.failNode(scope, node, "TODO implement astgen.expr for .Switch", .{}),
+ .For => return mod.failNode(scope, node, "TODO implement astgen.expr for .For", .{}),
+ .Suspend => return mod.failNode(scope, node, "TODO implement astgen.expr for .Suspend", .{}),
+ .Continue => return mod.failNode(scope, node, "TODO implement astgen.expr for .Continue", .{}),
+ .Break => return mod.failNode(scope, node, "TODO implement astgen.expr for .Break", .{}),
+ .AnyType => return mod.failNode(scope, node, "TODO implement astgen.expr for .AnyType", .{}),
+ .ErrorType => return mod.failNode(scope, node, "TODO implement astgen.expr for .ErrorType", .{}),
+ .FnProto => return mod.failNode(scope, node, "TODO implement astgen.expr for .FnProto", .{}),
+ .AnyFrameType => return mod.failNode(scope, node, "TODO implement astgen.expr for .AnyFrameType", .{}),
+ .EnumLiteral => return mod.failNode(scope, node, "TODO implement astgen.expr for .EnumLiteral", .{}),
+ .MultilineStringLiteral => return mod.failNode(scope, node, "TODO implement astgen.expr for .MultilineStringLiteral", .{}),
+ .CharLiteral => return mod.failNode(scope, node, "TODO implement astgen.expr for .CharLiteral", .{}),
+ .GroupedExpression => return mod.failNode(scope, node, "TODO implement astgen.expr for .GroupedExpression", .{}),
+ .ErrorSetDecl => return mod.failNode(scope, node, "TODO implement astgen.expr for .ErrorSetDecl", .{}),
+ .ContainerDecl => return mod.failNode(scope, node, "TODO implement astgen.expr for .ContainerDecl", .{}),
+ .Comptime => return mod.failNode(scope, node, "TODO implement astgen.expr for .Comptime", .{}),
+ .Nosuspend => return mod.failNode(scope, node, "TODO implement astgen.expr for .Nosuspend", .{}),
+ .ContainerField => return mod.failNode(scope, node, "TODO implement astgen.expr for .ContainerField", .{}),
}
}
From 0f3f96c85095876e7e6f3f00e60915ec41f63700 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 15 Aug 2020 00:52:25 -0700
Subject: [PATCH 087/153] stage2: astgen for labeled blocks and labeled breaks
---
src-self-hosted/Module.zig | 8 ++-
src-self-hosted/astgen.zig | 102 ++++++++++++++++++++++++++++++-----
src-self-hosted/zir_sema.zig | 2 +-
3 files changed, 98 insertions(+), 14 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 9ae477312a..2fad7e7a00 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -737,7 +737,13 @@ pub const Scope = struct {
arena: *Allocator,
/// The first N instructions in a function body ZIR are arg instructions.
instructions: std.ArrayListUnmanaged(*zir.Inst) = .{},
- label: ?ast.TokenIndex = null,
+ label: ?Label = null,
+
+ pub const Label = struct {
+ token: ast.TokenIndex,
+ block_inst: *zir.Inst.Block,
+ result_loc: astgen.ResultLoc,
+ };
};
/// This is always a `const` local and importantly the `inst` is a value type, not a pointer.
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 4dec87f364..1747d77df3 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -121,6 +121,8 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.UnwrapOptional => return unwrapOptional(mod, scope, rl, node.castTag(.UnwrapOptional).?),
.Block => return rlWrapVoid(mod, scope, rl, node, try blockExpr(mod, scope, node.castTag(.Block).?)),
.LabeledBlock => return labeledBlockExpr(mod, scope, rl, node.castTag(.LabeledBlock).?),
+ .Break => return rlWrap(mod, scope, rl, try breakExpr(mod, scope, node.castTag(.Break).?)),
+
.Defer => return mod.failNode(scope, node, "TODO implement astgen.expr for .Defer", .{}),
.Catch => return mod.failNode(scope, node, "TODO implement astgen.expr for .Catch", .{}),
.BoolAnd => return mod.failNode(scope, node, "TODO implement astgen.expr for .BoolAnd", .{}),
@@ -150,7 +152,6 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.For => return mod.failNode(scope, node, "TODO implement astgen.expr for .For", .{}),
.Suspend => return mod.failNode(scope, node, "TODO implement astgen.expr for .Suspend", .{}),
.Continue => return mod.failNode(scope, node, "TODO implement astgen.expr for .Continue", .{}),
- .Break => return mod.failNode(scope, node, "TODO implement astgen.expr for .Break", .{}),
.AnyType => return mod.failNode(scope, node, "TODO implement astgen.expr for .AnyType", .{}),
.ErrorType => return mod.failNode(scope, node, "TODO implement astgen.expr for .ErrorType", .{}),
.FnProto => return mod.failNode(scope, node, "TODO implement astgen.expr for .FnProto", .{}),
@@ -167,6 +168,55 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
}
}
+fn breakExpr(mod: *Module, parent_scope: *Scope, node: *ast.Node.ControlFlowExpression) InnerError!*zir.Inst {
+ const tree = parent_scope.tree();
+ const src = tree.token_locs[node.ltoken].start;
+
+ if (node.getLabel()) |break_label| {
+ // Look for the label in the scope.
+ var scope = parent_scope;
+ while (true) {
+ switch (scope.tag) {
+ .gen_zir => {
+ const gen_zir = scope.cast(Scope.GenZIR).?;
+ if (gen_zir.label) |label| {
+ if (try tokenIdentEql(mod, parent_scope, label.token, break_label)) {
+ if (node.getRHS()) |rhs| {
+ // Most result location types can be forwarded directly; however
+ // if we need to write to a pointer which has an inferred type,
+ // proper type inference requires peer type resolution on the block's
+ // break operand expressions.
+ const branch_rl: ResultLoc = switch (label.result_loc) {
+ .discard, .none, .ty, .ptr, .lvalue => label.result_loc,
+ .inferred_ptr, .bitcasted_ptr, .block_ptr => .{ .block_ptr = label.block_inst },
+ };
+ const operand = try expr(mod, parent_scope, branch_rl, rhs);
+ return try addZIRInst(mod, scope, src, zir.Inst.Break, .{
+ .block = label.block_inst,
+ .operand = operand,
+ }, .{});
+ } else {
+ return try addZIRInst(mod, scope, src, zir.Inst.BreakVoid, .{
+ .block = label.block_inst,
+ }, .{});
+ }
+ }
+ }
+ scope = gen_zir.parent;
+ },
+ .local_val => scope = scope.cast(Scope.LocalVal).?.parent,
+ .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
+ else => {
+ const label_name = try identifierTokenString(mod, parent_scope, break_label);
+ return mod.failTok(parent_scope, break_label, "label not found: '{}'", .{label_name});
+ },
+ }
+ }
+ } else {
+ return mod.failNode(parent_scope, &node.base, "TODO implement break from loop", .{});
+ }
+}
+
pub fn blockExpr(mod: *Module, parent_scope: *Scope, block_node: *ast.Node.Block) InnerError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -183,24 +233,44 @@ fn labeledBlockExpr(
const tracy = trace(@src());
defer tracy.end();
+ const tree = parent_scope.tree();
+ const src = tree.token_locs[block_node.lbrace].start;
+
+ // Create the Block ZIR instruction so that we can put it into the GenZIR struct
+ // so that break statements can reference it.
+ const gen_zir = parent_scope.getGenZIR();
+ const block_inst = try gen_zir.arena.create(zir.Inst.Block);
+ block_inst.* = .{
+ .base = .{
+ .tag = .block,
+ .src = src,
+ },
+ .positionals = .{
+ .body = .{ .instructions = undefined },
+ },
+ .kw_args = .{},
+ };
+
var block_scope: Scope.GenZIR = .{
.parent = parent_scope,
.decl = parent_scope.decl().?,
- .arena = parent_scope.arena(),
+ .arena = gen_zir.arena,
.instructions = .{},
- .label = block_node.label,
+ // TODO @as here is working around a stage1 miscompilation bug :(
+ .label = @as(?Scope.GenZIR.Label, Scope.GenZIR.Label{
+ .token = block_node.label,
+ .block_inst = block_inst,
+ .result_loc = rl,
+ }),
};
defer block_scope.instructions.deinit(mod.gpa);
try blockExprStmts(mod, &block_scope.base, &block_node.base, block_node.statements());
- const tree = parent_scope.tree();
- const src = tree.token_locs[block_node.lbrace].start;
- const block = try addZIRInstBlock(mod, parent_scope, src, .{
- .instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items),
- });
+ block_inst.positionals.body.instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items);
+ try gen_zir.instructions.append(mod.gpa, &block_inst.base);
- return &block.base;
+ return &block_inst.base;
}
fn blockExprStmts(mod: *Module, parent_scope: *Scope, node: *ast.Node, statements: []*ast.Node) !void {
@@ -344,7 +414,7 @@ fn assign(mod: *Module, scope: *Scope, infix_node: *ast.Node.SimpleInfixOp) Inne
if (infix_node.lhs.castTag(.Identifier)) |ident| {
// This intentionally does not support @"_" syntax.
const ident_name = scope.tree().tokenSlice(ident.token);
- if (std.mem.eql(u8, ident_name, "_")) {
+ if (mem.eql(u8, ident_name, "_")) {
_ = try expr(mod, scope, .discard, infix_node.rhs);
return;
}
@@ -404,12 +474,20 @@ fn unwrapOptional(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Si
return rlWrap(mod, scope, rl, try addZIRUnOp(mod, scope, src, .deref, unwrapped_ptr));
}
+/// Return whether the identifier names of two tokens are equal. Resolves @"" tokens without allocating.
+/// OK in theory it could do it without allocating. This implementation allocates when the @"" form is used.
+fn tokenIdentEql(mod: *Module, scope: *Scope, token1: ast.TokenIndex, token2: ast.TokenIndex) !bool {
+ const ident_name_1 = try identifierTokenString(mod, scope, token1);
+ const ident_name_2 = try identifierTokenString(mod, scope, token2);
+ return mem.eql(u8, ident_name_1, ident_name_2);
+}
+
/// Identifier token -> String (allocated in scope.arena())
-pub fn identifierTokenString(mod: *Module, scope: *Scope, token: ast.TokenIndex) InnerError![]const u8 {
+fn identifierTokenString(mod: *Module, scope: *Scope, token: ast.TokenIndex) InnerError![]const u8 {
const tree = scope.tree();
const ident_name = tree.tokenSlice(token);
- if (std.mem.startsWith(u8, ident_name, "@")) {
+ if (mem.startsWith(u8, ident_name, "@")) {
const raw_string = ident_name[1..];
var bad_index: usize = undefined;
return std.zig.parseStringLiteral(scope.arena(), raw_string, &bad_index) catch |err| switch (err) {
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index 862df4ec9a..2fe9e5cfba 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -504,7 +504,7 @@ fn analyzeInstBlock(mod: *Module, scope: *Scope, inst: *zir.Inst.Block) InnerErr
.decl = parent_block.decl,
.instructions = .{},
.arena = parent_block.arena,
- // TODO @as here is working around a miscompilation compiler bug :(
+ // TODO @as here is working around a stage1 miscompilation bug :(
.label = @as(?Scope.Block.Label, Scope.Block.Label{
.zir_block = inst,
.results = .{},
From 2cd19c05d0f615071102a5f18bb4cc800e86c07f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 15 Aug 2020 09:53:39 -0700
Subject: [PATCH 088/153] stage1: remove buggy "unable to inline function"
compile error
We still want this compile error but I'm giving up on implementing it
correctly in stage1. It's been buggy and has false positives sometimes.
I left the test cases there, but commented out, so that when we go
through the stage1 compile error cases and get coverage for them in
stage2 we can reactivate the test cases.
closes #2154
---
src/codegen.cpp | 13 -----------
test/compile_errors.zig | 51 +++++++++++++++++++++--------------------
2 files changed, 26 insertions(+), 38 deletions(-)
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 6941eae466..1bd8d5b7bc 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -7871,17 +7871,6 @@ static void gen_global_var(CodeGen *g, ZigVar *var, LLVMValueRef init_val,
// TODO ^^ make an actual global variable
}
-static void validate_inline_fns(CodeGen *g) {
- for (size_t i = 0; i < g->inline_fns.length; i += 1) {
- ZigFn *fn_entry = g->inline_fns.at(i);
- LLVMValueRef fn_val = LLVMGetNamedFunction(g->module, fn_entry->llvm_name);
- if (fn_val != nullptr) {
- add_node_error(g, fn_entry->proto_node, buf_sprintf("unable to inline function"));
- }
- }
- report_errors_and_maybe_exit(g);
-}
-
static void set_global_tls(CodeGen *g, ZigVar *var, LLVMValueRef global_value) {
bool is_extern = var->decl_node->data.variable_declaration.is_extern;
bool is_export = var->decl_node->data.variable_declaration.is_export;
@@ -8359,8 +8348,6 @@ static void zig_llvm_emit_output(CodeGen *g) {
exit(1);
}
- validate_inline_fns(g);
-
if (g->emit_bin) {
g->link_objects.append(&g->o_file_output_path);
if (g->bundle_compiler_rt && (g->out_type == OutTypeObj || (g->out_type == OutTypeLib && !g->is_dynamic))) {
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 4adc538602..70a9c47998 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -6151,32 +6151,33 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"tmp.zig:2:15: error: expected error union type, found '?i32'",
});
- cases.add("inline fn calls itself indirectly",
- \\export fn foo() void {
- \\ bar();
- \\}
- \\inline fn bar() void {
- \\ baz();
- \\ quux();
- \\}
- \\inline fn baz() void {
- \\ bar();
- \\ quux();
- \\}
- \\extern fn quux() void;
- , &[_][]const u8{
- "tmp.zig:4:1: error: unable to inline function",
- });
+ // TODO test this in stage2, but we won't even try in stage1
+ //cases.add("inline fn calls itself indirectly",
+ // \\export fn foo() void {
+ // \\ bar();
+ // \\}
+ // \\inline fn bar() void {
+ // \\ baz();
+ // \\ quux();
+ // \\}
+ // \\inline fn baz() void {
+ // \\ bar();
+ // \\ quux();
+ // \\}
+ // \\extern fn quux() void;
+ //, &[_][]const u8{
+ // "tmp.zig:4:1: error: unable to inline function",
+ //});
- cases.add("save reference to inline function",
- \\export fn foo() void {
- \\ quux(@ptrToInt(bar));
- \\}
- \\inline fn bar() void { }
- \\extern fn quux(usize) void;
- , &[_][]const u8{
- "tmp.zig:4:1: error: unable to inline function",
- });
+ //cases.add("save reference to inline function",
+ // \\export fn foo() void {
+ // \\ quux(@ptrToInt(bar));
+ // \\}
+ // \\inline fn bar() void { }
+ // \\extern fn quux(usize) void;
+ //, &[_][]const u8{
+ // "tmp.zig:4:1: error: unable to inline function",
+ //});
cases.add("signed integer division",
\\export fn foo(a: i32, b: i32) i32 {
From 66d76cc4f938209470fedd8e52f71267f0e0d0e4 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 15 Aug 2020 17:03:05 -0700
Subject: [PATCH 089/153] stage2: codegen for labeled blocks
---
src-self-hosted/astgen.zig | 3 +++
src-self-hosted/codegen.zig | 48 +++++++++++++++++++++++++---------
test/stage2/compare_output.zig | 37 ++++++++++++++++++++++++++
3 files changed, 76 insertions(+), 12 deletions(-)
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 1747d77df3..b5f439ec27 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -1165,6 +1165,9 @@ fn builtinCall(mod: *Module, scope: *Scope, rl: ResultLoc, call: *ast.Node.Built
return simpleCast(mod, scope, rl, call, .intcast);
} else if (mem.eql(u8, builtin_name, "@bitCast")) {
return bitCast(mod, scope, rl, call);
+ } else if (mem.eql(u8, builtin_name, "@breakpoint")) {
+ const src = tree.token_locs[call.builtin_token].start;
+ return rlWrap(mod, scope, rl, try addZIRNoOp(mod, scope, src, .breakpoint));
} else {
return mod.failTok(scope, call.builtin_token, "invalid builtin function: '{}'", .{builtin_name});
}
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index 29bcf1bd01..12d3884308 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -20,7 +20,21 @@ const leb128 = std.debug.leb;
/// The codegen-related data that is stored in `ir.Inst.Block` instructions.
pub const BlockData = struct {
- relocs: std.ArrayListUnmanaged(Reloc) = .{},
+ relocs: std.ArrayListUnmanaged(Reloc) = undefined,
+ /// The first break instruction encounters `null` here and chooses a
+ /// machine code value for the block result, populating this field.
+ /// Following break instructions encounter that value and use it for
+ /// the location to store their block results.
+ mcv: AnyMCValue = undefined,
+};
+
+/// Architecture-independent MCValue. Here, we have a type that is the same size as
+/// the architecture-specific MCValue. Next to the declaration of MCValue is a
+/// comptime assert that makes sure we guessed correctly about the size. This only
+/// exists so that we can bitcast an arch-independent field to and from the real MCValue.
+pub const AnyMCValue = extern struct {
+ a: u64,
+ b: u64,
};
pub const Reloc = union(enum) {
@@ -1387,16 +1401,23 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn genBlock(self: *Self, inst: *ir.Inst.Block) !MCValue {
- if (inst.base.ty.hasCodeGenBits()) {
- return self.fail(inst.base.src, "TODO codegen Block with non-void type", .{});
- }
- // A block is a setup to be able to jump to the end.
+ inst.codegen = .{
+ // A block is a setup to be able to jump to the end.
+ .relocs = .{},
+ // It also acts as a receptical for break operands.
+ // Here we use `MCValue.none` to represent a null value so that the first
+ // break instruction will choose a MCValue for the block result and overwrite
+ // this field. Following break instructions will use that MCValue to put their
+ // block results.
+ .mcv = @bitCast(AnyMCValue, MCValue { .none = {} }),
+ };
defer inst.codegen.relocs.deinit(self.gpa);
+
try self.genBody(inst.body);
for (inst.codegen.relocs.items) |reloc| try self.performReloc(inst.base.src, reloc);
- return MCValue.none;
+ return @bitCast(MCValue, inst.codegen.mcv);
}
fn performReloc(self: *Self, src: usize, reloc: Reloc) !void {
@@ -1416,13 +1437,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn genBr(self: *Self, inst: *ir.Inst.Br) !MCValue {
- if (!inst.operand.ty.hasCodeGenBits())
- return self.brVoid(inst.base.src, inst.block);
-
- const operand = try self.resolveInst(inst.operand);
- switch (arch) {
- else => return self.fail(inst.base.src, "TODO implement br for {}", .{self.target.cpu.arch}),
+ if (inst.operand.ty.hasCodeGenBits()) {
+ const operand = try self.resolveInst(inst.operand);
+ const block_mcv = @bitCast(MCValue, inst.block.codegen.mcv);
+ if (block_mcv == .none) {
+ inst.block.codegen.mcv = @bitCast(AnyMCValue, operand);
+ } else {
+ try self.setRegOrMem(inst.base.src, inst.block.base.ty, block_mcv, operand);
+ }
}
+ return self.brVoid(inst.base.src, inst.block);
}
fn genBrVoid(self: *Self, inst: *ir.Inst.BrVoid) !MCValue {
diff --git a/test/stage2/compare_output.zig b/test/stage2/compare_output.zig
index 4664d001fd..4979f1b2bc 100644
--- a/test/stage2/compare_output.zig
+++ b/test/stage2/compare_output.zig
@@ -509,5 +509,42 @@ pub fn addCases(ctx: *TestContext) !void {
,
"hello\nhello\nhello\nhello\n",
);
+
+ // Labeled blocks (no conditional branch)
+ case.addCompareOutput(
+ \\export fn _start() noreturn {
+ \\ assert(add(3, 4) == 20);
+ \\
+ \\ exit();
+ \\}
+ \\
+ \\fn add(a: u32, b: u32) u32 {
+ \\ const x: u32 = blk: {
+ \\ const c = a + b; // 7
+ \\ const d = a + c; // 10
+ \\ const e = d + b; // 14
+ \\ break :blk e;
+ \\ };
+ \\ const y = x + a; // 17
+ \\ const z = y + a; // 20
+ \\ return z;
+ \\}
+ \\
+ \\pub fn assert(ok: bool) void {
+ \\ if (!ok) unreachable; // assertion failure
+ \\}
+ \\
+ \\fn exit() noreturn {
+ \\ asm volatile ("syscall"
+ \\ :
+ \\ : [number] "{rax}" (231),
+ \\ [arg1] "{rdi}" (0)
+ \\ : "rcx", "r11", "memory"
+ \\ );
+ \\ unreachable;
+ \\}
+ ,
+ "",
+ );
}
}
From 8d8d568854d33be7bcc2bc9874029d1082914af7 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 15 Aug 2020 14:17:40 -0700
Subject: [PATCH 090/153] stage2: implement zig version
---
build.zig | 20 ++++++++++++++++++++
src-self-hosted/link.zig | 5 ++++-
src-self-hosted/main.zig | 7 ++-----
3 files changed, 26 insertions(+), 6 deletions(-)
diff --git a/build.zig b/build.zig
index 9fd86f93ab..95761a2fd3 100644
--- a/build.zig
+++ b/build.zig
@@ -10,6 +10,8 @@ const io = std.io;
const fs = std.fs;
const InstallDirectoryOptions = std.build.InstallDirectoryOptions;
+const zig_version = std.builtin.Version{ .major = 0, .minor = 6, .patch = 0 };
+
pub fn build(b: *Builder) !void {
b.setPreferredReleaseMode(.ReleaseFast);
const mode = b.standardReleaseOptions();
@@ -79,6 +81,24 @@ pub fn build(b: *Builder) !void {
const log_scopes = b.option([]const []const u8, "log", "Which log scopes to enable") orelse &[0][]const u8{};
+ const opt_version_string = b.option([]const u8, "version-string", "Override Zig version string. Default is to find out with git.");
+ const version = if (opt_version_string) |version| version else v: {
+ var code: u8 = undefined;
+ const version_untrimmed = b.execAllowFail(&[_][]const u8{
+ "git", "-C", b.build_root, "name-rev", "HEAD",
+ "--tags", "--name-only", "--no-undefined", "--always",
+ }, &code, .Ignore) catch |err| {
+ std.debug.print(
+ \\Unable to determine zig version string: {}
+ \\Provide the zig version string explicitly using the `version-string` build option.
+ , .{err});
+ std.process.exit(1);
+ };
+ const trimmed = mem.trim(u8, version_untrimmed, " \n\r");
+ break :v b.fmt("{}.{}.{}+{}", .{ zig_version.major, zig_version.minor, zig_version.patch, trimmed });
+ };
+ exe.addBuildOption([]const u8, "version", version);
+
exe.addBuildOption([]const []const u8, "log_scopes", log_scopes);
exe.addBuildOption(bool, "enable_tracy", tracy != null);
if (tracy) |tracy_path| {
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 1faec19b60..76d889c581 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -15,6 +15,9 @@ const leb128 = std.debug.leb;
const Package = @import("Package.zig");
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
+const build_options = @import("build_options");
+
+const producer_string = if (std.builtin.is_test) "zig test" else "zig " ++ build_options.version;
// TODO Turn back on zig fmt when https://github.com/ziglang/zig/issues/5948 is implemented.
// zig fmt: off
@@ -1132,7 +1135,7 @@ pub const File = struct {
// Write the form for the compile unit, which must match the abbrev table above.
const name_strp = try self.makeDebugString(self.base.options.root_pkg.root_src_path);
const comp_dir_strp = try self.makeDebugString(self.base.options.root_pkg.root_src_dir_path);
- const producer_strp = try self.makeDebugString("zig (TODO version here)");
+ const producer_strp = try self.makeDebugString(producer_string);
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 76d8651646..b9b4872d6b 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -95,11 +95,8 @@ pub fn main() !void {
const stdout = io.getStdOut().outStream();
return @import("print_targets.zig").cmdTargets(arena, cmd_args, stdout, info.target);
} else if (mem.eql(u8, cmd, "version")) {
- // Need to set up the build script to give the version as a comptime value.
- // TODO when you solve this, also take a look at link.zig, there is a placeholder
- // that says "TODO version here".
- std.debug.print("TODO version command not implemented yet\n", .{});
- return error.Unimplemented;
+ std.io.getStdOut().writeAll(build_options.version ++ "\n") catch process.exit(1);
+ return;
} else if (mem.eql(u8, cmd, "zen")) {
try io.getStdOut().writeAll(info_zen);
} else if (mem.eql(u8, cmd, "help")) {
From 692f38c2509da047e8be15fd7a932aa204f33fd3 Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Thu, 13 Aug 2020 12:23:37 -0400
Subject: [PATCH 091/153] astgen: minor cleanup
---
src-self-hosted/astgen.zig | 96 ++++++++++++++------------------------
1 file changed, 36 insertions(+), 60 deletions(-)
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index b5f439ec27..850cc5719b 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -335,76 +335,52 @@ fn varDecl(
// Depending on the type of AST the initialization expression is, we may need an lvalue
// or an rvalue as a result location. If it is an rvalue, we can use the instruction as
// the variable, no memory location needed.
- if (nodeMayNeedMemoryLocation(init_node)) {
+ const result_loc = if (nodeMayNeedMemoryLocation(init_node)) r: {
if (node.getTrailer("type_node")) |type_node| {
const type_inst = try typeExpr(mod, scope, type_node);
const alloc = try addZIRUnOp(mod, scope, name_src, .alloc, type_inst);
- const result_loc: ResultLoc = .{ .ptr = alloc };
- const init_inst = try expr(mod, scope, result_loc, init_node);
- const sub_scope = try block_arena.create(Scope.LocalVal);
- sub_scope.* = .{
- .parent = scope,
- .gen_zir = scope.getGenZIR(),
- .name = ident_name,
- .inst = init_inst,
- };
- return &sub_scope.base;
+ break :r ResultLoc{ .ptr = alloc };
} else {
const alloc = try addZIRNoOpT(mod, scope, name_src, .alloc_inferred);
- const result_loc: ResultLoc = .{ .inferred_ptr = alloc };
- const init_inst = try expr(mod, scope, result_loc, init_node);
- const sub_scope = try block_arena.create(Scope.LocalVal);
- sub_scope.* = .{
- .parent = scope,
- .gen_zir = scope.getGenZIR(),
- .name = ident_name,
- .inst = init_inst,
- };
- return &sub_scope.base;
+ break :r ResultLoc{ .inferred_ptr = alloc };
}
- } else {
- const result_loc: ResultLoc = if (node.getTrailer("type_node")) |type_node|
- .{ .ty = try typeExpr(mod, scope, type_node) }
+ } else r: {
+ if (node.getTrailer("type_node")) |type_node|
+ break :r ResultLoc{ .ty = try typeExpr(mod, scope, type_node) }
else
- .none;
- const init_inst = try expr(mod, scope, result_loc, init_node);
- const sub_scope = try block_arena.create(Scope.LocalVal);
- sub_scope.* = .{
- .parent = scope,
- .gen_zir = scope.getGenZIR(),
- .name = ident_name,
- .inst = init_inst,
- };
- return &sub_scope.base;
- }
+ break :r .none;
+ };
+ const init_inst = try expr(mod, scope, result_loc, init_node);
+ const sub_scope = try block_arena.create(Scope.LocalVal);
+ sub_scope.* = .{
+ .parent = scope,
+ .gen_zir = scope.getGenZIR(),
+ .name = ident_name,
+ .inst = init_inst,
+ };
+ return &sub_scope.base;
},
.Keyword_var => {
- if (node.getTrailer("type_node")) |type_node| {
+ const alloc = if (node.getTrailer("type_node")) |type_node| a: {
const type_inst = try typeExpr(mod, scope, type_node);
- const alloc = try addZIRUnOp(mod, scope, name_src, .alloc, type_inst);
- const result_loc: ResultLoc = .{ .ptr = alloc };
- const init_inst = try expr(mod, scope, result_loc, init_node);
- const sub_scope = try block_arena.create(Scope.LocalPtr);
- sub_scope.* = .{
- .parent = scope,
- .gen_zir = scope.getGenZIR(),
- .name = ident_name,
- .ptr = alloc,
- };
- return &sub_scope.base;
- } else {
- const alloc = try addZIRNoOp(mod, scope, name_src, .alloc_inferred);
- const result_loc = .{ .inferred_ptr = alloc.castTag(.alloc_inferred).? };
- const init_inst = try expr(mod, scope, result_loc, init_node);
- const sub_scope = try block_arena.create(Scope.LocalPtr);
- sub_scope.* = .{
- .parent = scope,
- .gen_zir = scope.getGenZIR(),
- .name = ident_name,
- .ptr = alloc,
- };
- return &sub_scope.base;
- }
+ break :a try addZIRUnOp(mod, scope, name_src, .alloc, type_inst);
+ } else try addZIRNoOp(mod, scope, name_src, .alloc_inferred);
+ const result_loc = r: {
+ if (node.getTrailer("type_node")) |type_node| {
+ break :r ResultLoc{ .ptr = alloc };
+ } else {
+ break :r ResultLoc{ .inferred_ptr = alloc.castTag(.alloc_inferred).? };
+ }
+ };
+ const init_inst = try expr(mod, scope, result_loc, init_node);
+ const sub_scope = try block_arena.create(Scope.LocalPtr);
+ sub_scope.* = .{
+ .parent = scope,
+ .gen_zir = scope.getGenZIR(),
+ .name = ident_name,
+ .ptr = alloc,
+ };
+ return &sub_scope.base;
},
else => unreachable,
}
From 34923e071e6f9f46ab8abb645929b975ef9ba7ff Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Thu, 13 Aug 2020 12:25:42 -0400
Subject: [PATCH 092/153] CBE: minor doc change
---
src-self-hosted/codegen/c.zig | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src-self-hosted/codegen/c.zig b/src-self-hosted/codegen/c.zig
index f0d3d8367a..9a7a4888be 100644
--- a/src-self-hosted/codegen/c.zig
+++ b/src-self-hosted/codegen/c.zig
@@ -11,8 +11,8 @@ const C = link.File.C;
const Decl = Module.Decl;
const mem = std.mem;
-/// Maps a name from Zig source to C. This will always give the same output for
-/// any given input.
+/// Maps a name from Zig source to C. Currently, this will always give the same
+/// output for any given input, sometimes resulting in broken identifiers.
fn map(allocator: *std.mem.Allocator, name: []const u8) ![]const u8 {
return allocator.dupe(u8, name);
}
From 93619a5e4e9ad0e29e521c2c30fc1e39f47a6ba8 Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Thu, 13 Aug 2020 12:35:17 -0400
Subject: [PATCH 093/153] Module: panic when encountering unimplemented node
---
src-self-hosted/Module.zig | 2 ++
1 file changed, 2 insertions(+)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 2fad7e7a00..6272ef6d98 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -1580,6 +1580,8 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
}
}
}
+ } else {
+ std.debug.panic("TODO: analyzeRootSrcFile {}", .{src_decl.tag});
}
// TODO also look for global variable declarations
// TODO also look for comptime blocks and exported globals
From 3ca8c42e7ab04886f536a7bab34c9ea563c62711 Mon Sep 17 00:00:00 2001
From: Noam Preil
Date: Sun, 16 Aug 2020 20:36:33 -0400
Subject: [PATCH 094/153] Astgen: further cleanup
---
src-self-hosted/astgen.zig | 19 ++++++++-----------
1 file changed, 8 insertions(+), 11 deletions(-)
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 850cc5719b..a88a178bb3 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -361,24 +361,21 @@ fn varDecl(
return &sub_scope.base;
},
.Keyword_var => {
- const alloc = if (node.getTrailer("type_node")) |type_node| a: {
+ const var_data: struct { result_loc: ResultLoc, alloc: *zir.Inst } = if (node.getTrailer("type_node")) |type_node| a: {
const type_inst = try typeExpr(mod, scope, type_node);
- break :a try addZIRUnOp(mod, scope, name_src, .alloc, type_inst);
- } else try addZIRNoOp(mod, scope, name_src, .alloc_inferred);
- const result_loc = r: {
- if (node.getTrailer("type_node")) |type_node| {
- break :r ResultLoc{ .ptr = alloc };
- } else {
- break :r ResultLoc{ .inferred_ptr = alloc.castTag(.alloc_inferred).? };
- }
+ const alloc = try addZIRUnOp(mod, scope, name_src, .alloc, type_inst);
+ break :a .{ .alloc = try addZIRUnOp(mod, scope, name_src, .alloc, type_inst), .result_loc = .{ .ptr = alloc } };
+ } else a: {
+ const alloc = try addZIRNoOp(mod, scope, name_src, .alloc_inferred);
+ break :a .{ .alloc = alloc, .result_loc = .{ .inferred_ptr = alloc.castTag(.alloc_inferred).? } };
};
- const init_inst = try expr(mod, scope, result_loc, init_node);
+ const init_inst = try expr(mod, scope, var_data.result_loc, init_node);
const sub_scope = try block_arena.create(Scope.LocalPtr);
sub_scope.* = .{
.parent = scope,
.gen_zir = scope.getGenZIR(),
.name = ident_name,
- .ptr = alloc,
+ .ptr = var_data.alloc,
};
return &sub_scope.base;
},
From f46e375bbe0ac0893717bd477eab78f51863e277 Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Mon, 17 Aug 2020 00:10:49 +0200
Subject: [PATCH 095/153] std/crypto: gimli.Aead.decrypt()'s ad should be const
---
lib/std/crypto/gimli.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/crypto/gimli.zig b/lib/std/crypto/gimli.zig
index 2bfbfe32f0..6861c72a63 100644
--- a/lib/std/crypto/gimli.zig
+++ b/lib/std/crypto/gimli.zig
@@ -269,7 +269,7 @@ pub const Aead = struct {
/// npub: public nonce
/// k: private key
/// NOTE: the check of the authentication tag is currently not done in constant time
- pub fn decrypt(m: []u8, c: []const u8, at: [State.RATE]u8, ad: []u8, npub: [16]u8, k: [32]u8) !void {
+ pub fn decrypt(m: []u8, c: []const u8, at: [State.RATE]u8, ad: []const u8, npub: [16]u8, k: [32]u8) !void {
assert(c.len == m.len);
var state = Aead.init(ad, npub, k);
From 3f0d80f25eccd12759bad21fb8429e646eff070b Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Fri, 14 Aug 2020 14:06:18 +0200
Subject: [PATCH 096/153] Improve curve25519-based crypto
This is a rewrite of the x25519 code, that generalizes support for
common primitives based on the same finite field.
- Low-level operations can now be performed over the curve25519 and
edwards25519 curves, as well as the ristretto255 group.
- Ed25519 signatures have been implemented.
- X25519 is now about twice as fast.
- mem.timingSafeEqual() has been added for constant-time comparison.
Domains have been clearly separated, making it easier to later add
platform-specific implementations.
---
lib/std/crypto.zig | 15 +-
lib/std/crypto/25519/curve25519.zig | 155 ++++++
lib/std/crypto/25519/ed25519.zig | 129 +++++
lib/std/crypto/25519/edwards25519.zig | 224 +++++++++
lib/std/crypto/25519/field25519.zig | 327 +++++++++++++
lib/std/crypto/25519/ristretto255.zig | 153 ++++++
lib/std/crypto/25519/scalar25519.zig | 185 +++++++
lib/std/crypto/25519/x25519.zig | 146 ++++++
lib/std/crypto/x25519.zig | 675 --------------------------
lib/std/mem.zig | 25 +
10 files changed, 1357 insertions(+), 677 deletions(-)
create mode 100644 lib/std/crypto/25519/curve25519.zig
create mode 100644 lib/std/crypto/25519/ed25519.zig
create mode 100644 lib/std/crypto/25519/edwards25519.zig
create mode 100644 lib/std/crypto/25519/field25519.zig
create mode 100644 lib/std/crypto/25519/ristretto255.zig
create mode 100644 lib/std/crypto/25519/scalar25519.zig
create mode 100644 lib/std/crypto/25519/x25519.zig
delete mode 100644 lib/std/crypto/x25519.zig
diff --git a/lib/std/crypto.zig b/lib/std/crypto.zig
index d0ec3277e8..d15ec2cff2 100644
--- a/lib/std/crypto.zig
+++ b/lib/std/crypto.zig
@@ -34,12 +34,17 @@ pub const chaCha20IETF = import_chaCha20.chaCha20IETF;
pub const chaCha20With64BitNonce = import_chaCha20.chaCha20With64BitNonce;
pub const Poly1305 = @import("crypto/poly1305.zig").Poly1305;
-pub const X25519 = @import("crypto/x25519.zig").X25519;
const import_aes = @import("crypto/aes.zig");
pub const AES128 = import_aes.AES128;
pub const AES256 = import_aes.AES256;
+pub const Curve25519 = @import("crypto/25519/curve25519.zig").Curve25519;
+pub const Ed25519 = @import("crypto/25519/ed25519.zig").Ed25519;
+pub const Edwards25519 = @import("crypto/25519/edwards25519.zig").Edwards25519;
+pub const X25519 = @import("crypto/25519/x25519.zig").X25519;
+pub const Ristretto255 = @import("crypto/25519/ristretto255.zig").Ristretto255;
+
const std = @import("std.zig");
pub const randomBytes = std.os.getrandom;
@@ -55,7 +60,13 @@ test "crypto" {
_ = @import("crypto/sha1.zig");
_ = @import("crypto/sha2.zig");
_ = @import("crypto/sha3.zig");
- _ = @import("crypto/x25519.zig");
+ _ = @import("crypto/25519/curve25519.zig");
+ _ = @import("crypto/25519/ed25519.zig");
+ _ = @import("crypto/25519/edwards25519.zig");
+ _ = @import("crypto/25519/field25519.zig");
+ _ = @import("crypto/25519/scalar25519.zig");
+ _ = @import("crypto/25519/x25519.zig");
+ _ = @import("crypto/25519/ristretto255.zig");
}
test "issue #4532: no index out of bounds" {
diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig
new file mode 100644
index 0000000000..a17d9baa7b
--- /dev/null
+++ b/lib/std/crypto/25519/curve25519.zig
@@ -0,0 +1,155 @@
+const std = @import("std");
+
+/// Group operations over Curve25519.
+pub const Curve25519 = struct {
+ /// The underlying prime field.
+ pub const Fe = @import("field25519.zig").Fe;
+ /// Field arithmetic mod the order of the main subgroup.
+ pub const scalar = @import("scalar25519.zig");
+
+ x: Fe,
+
+ /// Decode a Curve25519 point from its compressed (X) coordinates.
+ pub inline fn fromBytes(s: [32]u8) Curve25519 {
+ return .{ .x = Fe.fromBytes(s) };
+ }
+
+ /// Encode a Curve25519 point.
+ pub inline fn toBytes(p: Curve25519) [32]u8 {
+ return p.x.toBytes();
+ }
+
+ /// Return the Curve25519 base point.
+ pub inline fn basePoint() Curve25519 {
+ return .{ .x = Fe.curve25519BasePoint() };
+ }
+
+ /// Check that the encoding of a Curve25519 point is canonical.
+ pub fn rejectNonCanonical(s: [32]u8) !void {
+ return Fe.rejectNonCanonical(s, false);
+ }
+
+ /// Reject the neutral element.
+ pub fn rejectIdentity(p: Curve25519) !void {
+ if (p.x.isZero()) {
+ return error.IdentityElement;
+ }
+ }
+
+ fn ladder(p: Curve25519, s: [32]u8, comptime bits: usize) !Curve25519 {
+ var x1 = p.x;
+ var x2 = Fe.one();
+ var z2 = Fe.zero();
+ var x3 = x1;
+ var z3 = Fe.one();
+ var swap: u8 = 0;
+ var pos: usize = bits - 1;
+ while (true) {
+ const b = (s[pos / 8] >> @intCast(u3, pos & 7)) & 1;
+ swap ^= b;
+ Fe.cSwap2(&x2, &x3, &z2, &z3, swap);
+ swap = b;
+ var tmp0 = x3.sub(z3);
+ var tmp1 = x2.sub(z2);
+ x2 = x2.add(z2);
+ z2 = x3.add(z3);
+ z3 = tmp0.mul(x2);
+ z2 = z2.mul(tmp1);
+ tmp0 = tmp1.sq();
+ tmp1 = x2.sq();
+ x3 = z3.add(z2);
+ z2 = z3.sub(z2);
+ x2 = tmp1.mul(tmp0);
+ tmp1 = tmp1.sub(tmp0);
+ z2 = z2.sq();
+ z3 = tmp1.mul32(121666);
+ x3 = x3.sq();
+ tmp0 = tmp0.add(z3);
+ z3 = x1.mul(z2);
+ z2 = tmp1.mul(tmp0);
+ if (pos == 0) break;
+ pos -= 1;
+ }
+ Fe.cSwap2(&x2, &x3, &z2, &z3, swap);
+ z2 = z2.invert();
+ x2 = x2.mul(z2);
+ if (x2.isZero()) {
+ return error.IdentityElement;
+ }
+ return @as(Curve25519, .{ .x = x2 });
+ }
+
+ /// Multiply a Curve25519 point by a scalar after "clamping" it.
+ /// Clamping forces the scalar to be a multiple of the cofactor in
+ /// order to prevent small subgroups attacks. This is the standard
+ /// way to use Curve25519 for a DH operation.
+ /// Return error.IdentityElement if the resulting point is
+ /// the identity element.
+ pub fn clampedMul(p: Curve25519, s: [32]u8) !Curve25519 {
+ var t: [32]u8 = s;
+ scalar.clamp(&t);
+ return ladder(p, t, 255);
+ }
+
+ /// Multiply a Curve25519 point by a scalar without clamping it.
+ /// Return error.IdentityElement if the resulting point is
+ /// the identity element or error.WeakPublicKey if the public
+ /// key is a low-order point.
+ pub fn mul(p: Curve25519, s: [32]u8) !Curve25519 {
+ const cofactor = [_]u8{8} ++ [_]u8{0} ** 31;
+ _ = ladder(p, cofactor, 4) catch |_| return error.WeakPublicKey;
+ return ladder(p, s, 256);
+ }
+};
+
+test "curve25519" {
+ var s = [32]u8{ 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8 };
+ const p = try Curve25519.basePoint().clampedMul(s);
+ try p.rejectIdentity();
+ var buf: [128]u8 = undefined;
+ const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
+ std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{p.toBytes()}), "E6F2A4D1C28EE5C7AD0329268255A468AD407D2672824C0C0EB30EA6EF450145");
+ const q = try p.clampedMul(s);
+ std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{q.toBytes()}), "3614E119FFE55EC55B87D6B19971A9F4CBC78EFE80BEC55B96392BABCC712537");
+
+ try Curve25519.rejectNonCanonical(s);
+ s[31] |= 0x80;
+ std.testing.expectError(error.NonCanonical, Curve25519.rejectNonCanonical(s));
+}
+
+test "curve25519 small order check" {
+ var s: [32]u8 = [_]u8{1} ++ [_]u8{0} ** 31;
+ const small_order_ss: [7][32]u8 = .{
+ .{
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0 (order 4)
+ },
+ .{
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 1 (order 1)
+ },
+ .{
+ 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00, // 325606250916557431795983626356110631294008115727848805560023387167927233504 (order 8) */
+ },
+ .{
+ 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24, 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b, 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86, 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57, // 39382357235489614581723060781553021112529911719440698176882885853963445705823 (order 8)
+ },
+ .{
+ 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, // p-1 (order 2)
+ },
+ .{
+ 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, // p (=0, order 4)
+ },
+ .{
+ 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, // p+1 (=1, order 1)
+ },
+ };
+ for (small_order_ss) |small_order_s| {
+ std.testing.expectError(error.WeakPublicKey, Curve25519.fromBytes(small_order_s).mul(s));
+ var extra = small_order_s;
+ extra[31] ^= 0x80;
+ std.testing.expectError(error.WeakPublicKey, Curve25519.fromBytes(extra).mul(s));
+ var valid = small_order_s;
+ valid[31] = 0x40;
+ s[0] = 0;
+ std.testing.expectError(error.IdentityElement, Curve25519.fromBytes(valid).mul(s));
+ }
+}
diff --git a/lib/std/crypto/25519/ed25519.zig b/lib/std/crypto/25519/ed25519.zig
new file mode 100644
index 0000000000..c5baf37683
--- /dev/null
+++ b/lib/std/crypto/25519/ed25519.zig
@@ -0,0 +1,129 @@
+const std = @import("std");
+const fmt = std.fmt;
+const mem = std.mem;
+const Sha512 = std.crypto.Sha512;
+
+/// Ed25519 (EdDSA) signatures.
+pub const Ed25519 = struct {
+ /// The underlying elliptic curve.
+ pub const Curve = @import("edwards25519.zig").Edwards25519;
+ /// Length (in bytes) of a seed required to create a key pair.
+ pub const seed_length = 32;
+ /// Length (in bytes) of a compressed key pair.
+ pub const keypair_length = 64;
+ /// Length (in bytes) of a compressed public key.
+ pub const public_length = 32;
+ /// Length (in bytes) of a signature.
+ pub const signature_length = 64;
+ /// Length (in bytes) of optional random bytes, for non-deterministic signatures.
+ pub const noise_length = 32;
+
+ /// Derive a key pair from a secret seed.
+ pub fn createKeyPair(seed: [seed_length]u8) ![keypair_length]u8 {
+ var az: [Sha512.digest_length]u8 = undefined;
+ var h = Sha512.init();
+ h.update(&seed);
+ h.final(&az);
+ const p = try Curve.basePoint().clampedMul(az[0..32].*);
+ var keypair: [keypair_length]u8 = undefined;
+ mem.copy(u8, &keypair, &seed);
+ mem.copy(u8, keypair[seed_length..], &p.toBytes());
+ return keypair;
+ }
+
+ /// Return the public key for a given key pair.
+ pub fn publicKey(key_pair: [keypair_length]u8) [public_length]u8 {
+ var public_key: [public_length]u8 = undefined;
+ mem.copy(u8, public_key[0..], key_pair[seed_length..]);
+ return public_key;
+ }
+
+ /// Sign a message using a key pair, and optional random noise.
+ /// Having noise creates non-standard, non-deterministic signatures,
+ /// but has been proven to increase resilience against fault attacks.
+ pub fn sign(msg: []const u8, key_pair: [keypair_length]u8, noise: ?[noise_length]u8) ![signature_length]u8 {
+ const public_key = key_pair[32..];
+ var az: [Sha512.digest_length]u8 = undefined;
+ var h = Sha512.init();
+ h.update(key_pair[0..seed_length]);
+ h.final(&az);
+
+ h = Sha512.init();
+ if (noise) |*z| {
+ h.update(z);
+ }
+ h.update(az[32..]);
+ h.update(msg);
+ var nonce64: [64]u8 = undefined;
+ h.final(&nonce64);
+ const nonce = Curve.scalar.reduce64(nonce64);
+ const r = try Curve.basePoint().mul(nonce);
+
+ var sig: [signature_length]u8 = undefined;
+ mem.copy(u8, sig[0..32], &r.toBytes());
+ mem.copy(u8, sig[32..], public_key);
+ h = Sha512.init();
+ h.update(&sig);
+ h.update(msg);
+ var hram64: [Sha512.digest_length]u8 = undefined;
+ h.final(&hram64);
+ const hram = Curve.scalar.reduce64(hram64);
+
+ var x = az[0..32];
+ Curve.scalar.clamp(x);
+ const s = Curve.scalar.mulAdd(hram, x.*, nonce);
+ mem.copy(u8, sig[32..], s[0..]);
+ return sig;
+ }
+
+ /// Verify an Ed25519 signature given a message and a public key.
+ /// Returns error.InvalidSignature is the signature verification failed.
+ pub fn verify(sig: [signature_length]u8, msg: []const u8, public_key: [public_length]u8) !void {
+ const r = sig[0..32];
+ const s = sig[32..64];
+ try Curve.scalar.rejectNonCanonical(s.*);
+ try Curve.rejectNonCanonical(public_key);
+ const a = try Curve.fromBytes(public_key);
+ try a.rejectIdentity();
+
+ var h = Sha512.init();
+ h.update(r);
+ h.update(&public_key);
+ h.update(msg);
+ var hram64: [Sha512.digest_length]u8 = undefined;
+ h.final(&hram64);
+ const hram = Curve.scalar.reduce64(hram64);
+
+ const p = try a.neg().mul(hram);
+ const check = (try Curve.basePoint().mul(s.*)).add(p).toBytes();
+ if (mem.timingSafeEqual(u8, &check, r) == false) {
+ return error.InvalidSignature;
+ }
+ }
+};
+
+test "ed25519 key pair creation" {
+ var seed: [32]u8 = undefined;
+ try fmt.hexToBytes(seed[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
+ const key_pair = try Ed25519.createKeyPair(seed);
+ var buf: [256]u8 = undefined;
+ const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
+ std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{key_pair}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
+
+ const public_key = Ed25519.publicKey(key_pair);
+ std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{public_key}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
+}
+
+test "ed25519 signature" {
+ var seed: [32]u8 = undefined;
+ try fmt.hexToBytes(seed[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
+ const key_pair = try Ed25519.createKeyPair(seed);
+
+ const sig = try Ed25519.sign("test", key_pair, null);
+ var buf: [128]u8 = undefined;
+ const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
+ std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{sig}), "10A442B4A80CC4225B154F43BEF28D2472CA80221951262EB8E0DF9091575E2687CC486E77263C3418C757522D54F84B0359236ABBBD4ACD20DC297FDCA66808");
+ const public_key = Ed25519.publicKey(key_pair);
+ try Ed25519.verify(sig, "test", public_key);
+ std.testing.expectError(error.InvalidSignature, Ed25519.verify(sig, "TEST", public_key));
+}
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
new file mode 100644
index 0000000000..7e748609de
--- /dev/null
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -0,0 +1,224 @@
+const std = @import("std");
+const fmt = std.fmt;
+
+/// Group operations over Edwards25519.
+pub const Edwards25519 = struct {
+ /// The underlying prime field.
+ pub const Fe = @import("field25519.zig").Fe;
+ /// Field arithmetic mod the order of the main subgroup.
+ pub const scalar = @import("scalar25519.zig");
+
+ x: Fe,
+ y: Fe,
+ z: Fe,
+ t: Fe,
+
+ is_base: bool = false,
+
+ /// Decode an Edwards25519 point from its compressed (Y+sign) coordinates.
+ pub fn fromBytes(s: [32]u8) !Edwards25519 {
+ const z = Fe.one();
+ const y = Fe.fromBytes(s);
+ var u = y.sq();
+ var v = u.mul(Fe.edwards25519d());
+ u = u.sub(z);
+ v = v.add(z);
+ const v3 = v.sq().mul(v);
+ var x = v3.sq().mul(v).mul(u).pow2523().mul(v3).mul(u);
+ const vxx = x.sq().mul(v);
+ const has_m_root = vxx.sub(u).isZero();
+ const has_p_root = vxx.add(u).isZero();
+ if ((@boolToInt(has_m_root) | @boolToInt(has_p_root)) == 0) {
+ return error.InvalidEncoding;
+ }
+ x.cMov(x.mul(Fe.sqrtm1()), 1 - @boolToInt(has_m_root));
+ x.cMov(x.neg(), @boolToInt(x.isNegative()) ^ (s[31] >> 7));
+ const t = x.mul(y);
+ return @as(Edwards25519, .{ .x = x, .y = y, .z = z, .t = t });
+ }
+
+ /// Encode an Edwards25519 point.
+ pub fn toBytes(p: Edwards25519) [32]u8 {
+ const zi = p.z.invert();
+ var s = p.y.mul(zi).toBytes();
+ s[31] ^= @as(u8, @boolToInt(p.x.mul(zi).isNegative())) << 7;
+ return s;
+ }
+
+ /// Check that the encoding of a point is canonical.
+ pub fn rejectNonCanonical(s: [32]u8) !void {
+ return Fe.rejectNonCanonical(s, true);
+ }
+
+ /// Return the Edwards25519 base point.
+ pub inline fn basePoint() Edwards25519 {
+ return .{
+ .x = Fe{ .limbs = .{ 3990542415680775, 3398198340507945, 4322667446711068, 2814063955482877, 2839572215813860 } },
+ .y = Fe{ .limbs = .{ 1801439850948184, 1351079888211148, 450359962737049, 900719925474099, 1801439850948198 } },
+ .z = Fe.one(),
+ .t = Fe{ .limbs = .{ 1841354044333475, 16398895984059, 755974180946558, 900171276175154, 1821297809914039 } },
+ .is_base = true,
+ };
+ }
+
+ inline fn identityElement() Edwards25519 {
+ return .{ .x = Fe.zero(), .y = Fe.one(), .z = Fe.one(), .t = Fe.zero() };
+ }
+
+ /// Reject the neutral element.
+ pub fn rejectIdentity(p: Edwards25519) !void {
+ if (p.x.isZero()) {
+ return error.IdentityElement;
+ }
+ }
+
+ /// Flip the sign of the X coordinate.
+ pub inline fn neg(p: Edwards25519) Edwards25519 {
+ return .{ .x = p.x.neg(), .y = p.y, .z = p.z, .t = p.t.neg() };
+ }
+
+ /// Double an Edwards25519 point.
+ pub inline fn dbl(p: Edwards25519) Edwards25519 {
+ const t0 = p.x.add(p.y).sq();
+ var x = p.x.sq();
+ var z = p.y.sq();
+ const y = z.add(x);
+ z = z.sub(x);
+ x = t0.sub(y);
+ const t = p.z.sq2().sub(z);
+ return .{
+ .x = x.mul(t),
+ .y = y.mul(z),
+ .z = z.mul(t),
+ .t = x.mul(y),
+ };
+ }
+
+ /// Add two Edwards25519 points.
+ pub inline fn add(p: Edwards25519, q: Edwards25519) Edwards25519 {
+ const a = p.y.sub(p.x).mul(q.y.sub(q.x));
+ const b = p.x.add(p.y).mul(q.x.add(q.y));
+ const c = p.t.mul(q.t).mul(Fe.edwards25519d2());
+ var d = p.z.mul(q.z);
+ d = d.add(d);
+ const x = b.sub(a);
+ const y = b.add(a);
+ const z = d.add(c);
+ const t = d.sub(c);
+ return .{
+ .x = x.mul(t),
+ .y = y.mul(z),
+ .z = z.mul(t),
+ .t = x.mul(y),
+ };
+ }
+
+ inline fn cMov(p: *Edwards25519, a: Edwards25519, c: u64) void {
+ p.x.cMov(a.x, c);
+ p.y.cMov(a.y, c);
+ p.z.cMov(a.z, c);
+ p.t.cMov(a.t, c);
+ }
+
+ inline fn pcSelect(pc: [16]Edwards25519, b: u8) Edwards25519 {
+ var t = Edwards25519.identityElement();
+ comptime var i: u8 = 0;
+ inline while (i < 16) : (i += 1) {
+ t.cMov(pc[i], ((@intCast(usize, (b ^ i)) -% 1) >> 8) & 1);
+ }
+ return t;
+ }
+
+ fn pcMul(pc: [16]Edwards25519, s: [32]u8) !Edwards25519 {
+ var q = Edwards25519.identityElement();
+ var pos: usize = 252;
+ while (true) {
+ q = q.dbl().dbl().dbl().dbl();
+ const b = (s[pos / 8] >> @intCast(u3, pos & 7)) & 0xf;
+ q = q.add(pcSelect(pc, b));
+ if (pos == 0) break;
+ pos -= 4;
+ }
+ try q.rejectIdentity();
+ return q;
+ }
+
+ fn precompute(p: Edwards25519) [16]Edwards25519 {
+ var pc: [16]Edwards25519 = undefined;
+ pc[0] = Edwards25519.identityElement();
+ pc[1] = p;
+ var i: usize = 2;
+ while (i < 16) : (i += 1) {
+ pc[i] = pc[i - 1].add(p);
+ }
+ return pc;
+ }
+
+ fn _mul(p: Edwards25519, s: [32]u8) !Edwards25519 {
+ var pc: [16]Edwards25519 = undefined;
+ if (p.is_base) {
+ @setEvalBranchQuota(10000);
+ pc = comptime precompute(Edwards25519.basePoint());
+ } else {
+ pc = precompute(p);
+ pc[4].rejectIdentity() catch |_| return error.WeakPublicKey;
+ }
+ return pcMul(pc, s);
+ }
+
+ /// Multiply an Edwards25519 point by a scalar after "clamping" it.
+ /// Clamping forces the scalar to be a multiple of the cofactor in
+ /// order to prevent small subgroups attacks.
+ /// This is strongly recommended for DH operations.
+ /// Return error.WeakPublicKey if the resulting point is
+ /// the identity element.
+ pub fn clampedMul(p: Edwards25519, s: [32]u8) !Edwards25519 {
+ var t: [32]u8 = s;
+ scalar.clamp(&t);
+ return _mul(p, t);
+ }
+
+ /// Multiply an Edwards25519 point by a scalar without clamping it.
+ /// Return error.WeakPublicKey if the resulting point is
+ /// the identity element.
+ pub fn mul(p: Edwards25519, s: [32]u8) !Edwards25519 {
+ return _mul(p, s);
+ }
+};
+
+test "edwards25519 packing/unpacking" {
+ const s = [_]u8{170} ++ [_]u8{0} ** 31;
+ var b = Edwards25519.basePoint();
+ const pk = try b.mul(s);
+ var buf: [128]u8 = undefined;
+ const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
+ std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{pk.toBytes()}), "074BC7E0FCBD587FDBC0969444245FADC562809C8F6E97E949AF62484B5B81A6");
+
+ const small_order_ss: [7][32]u8 = .{
+ .{
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0 (order 4)
+ },
+ .{
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 1 (order 1)
+ },
+ .{
+ 0x26, 0xe8, 0x95, 0x8f, 0xc2, 0xb2, 0x27, 0xb0, 0x45, 0xc3, 0xf4, 0x89, 0xf2, 0xef, 0x98, 0xf0, 0xd5, 0xdf, 0xac, 0x05, 0xd3, 0xc6, 0x33, 0x39, 0xb1, 0x38, 0x02, 0x88, 0x6d, 0x53, 0xfc, 0x05, // 270738550114484064931822528722565878893680426757531351946374360975030340202(order 8)
+ },
+ .{
+ 0xc7, 0x17, 0x6a, 0x70, 0x3d, 0x4d, 0xd8, 0x4f, 0xba, 0x3c, 0x0b, 0x76, 0x0d, 0x10, 0x67, 0x0f, 0x2a, 0x20, 0x53, 0xfa, 0x2c, 0x39, 0xcc, 0xc6, 0x4e, 0xc7, 0xfd, 0x77, 0x92, 0xac, 0x03, 0x7a, // 55188659117513257062467267217118295137698188065244968500265048394206261417927 (order 8)
+ },
+ .{
+ 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, // p-1 (order 2)
+ },
+ .{
+ 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, // p (=0, order 4)
+ },
+ .{
+ 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, // p+1 (=1, order 1)
+ },
+ };
+ for (small_order_ss) |small_order_s| {
+ const small_p = try Edwards25519.fromBytes(small_order_s);
+ std.testing.expectError(error.WeakPublicKey, small_p.mul(s));
+ }
+}
diff --git a/lib/std/crypto/25519/field25519.zig b/lib/std/crypto/25519/field25519.zig
new file mode 100644
index 0000000000..9061cbf011
--- /dev/null
+++ b/lib/std/crypto/25519/field25519.zig
@@ -0,0 +1,327 @@
+const std = @import("std");
+const readIntLittle = std.mem.readIntLittle;
+const writeIntLittle = std.mem.writeIntLittle;
+
+pub const Fe = struct {
+ limbs: [5]u64 = undefined,
+
+ const MASK51: u64 = 0x7ffffffffffff;
+
+ pub inline fn zero() Fe {
+ return .{ .limbs = .{ 0, 0, 0, 0, 0 } };
+ }
+
+ pub inline fn one() Fe {
+ return .{ .limbs = .{ 1, 0, 0, 0, 0 } };
+ }
+
+ pub inline fn sqrtm1() Fe {
+ return .{ .limbs = .{ 1718705420411056, 234908883556509, 2233514472574048, 2117202627021982, 765476049583133 } }; // sqrt(-1)
+ }
+
+ pub inline fn curve25519BasePoint() Fe {
+ return .{ .limbs = .{ 9, 0, 0, 0, 0 } };
+ }
+
+ pub inline fn edwards25519d() Fe {
+ return .{ .limbs = .{ 929955233495203, 466365720129213, 1662059464998953, 2033849074728123, 1442794654840575 } }; // 37095705934669439343138083508754565189542113879843219016388785533085940283555
+ }
+
+ pub inline fn edwards25519d2() Fe {
+ return .{ .limbs = .{ 1859910466990425, 932731440258426, 1072319116312658, 1815898335770999, 633789495995903 } }; // 2d
+ }
+
+ // 1/sqrt(a-d)
+ pub inline fn edwards25519sqrtamd() Fe {
+ return .{ .limbs = .{ 278908739862762, 821645201101625, 8113234426968, 1777959178193151, 2118520810568447 } };
+ }
+
+ pub inline fn isZero(fe: Fe) bool {
+ var reduced = fe;
+ reduced.reduce();
+ const limbs = reduced.limbs;
+ return (limbs[0] | limbs[1] | limbs[2] | limbs[3] | limbs[4]) == 0;
+ }
+
+ pub inline fn equivalent(a: Fe, b: Fe) bool {
+ return a.sub(b).isZero();
+ }
+
+ pub fn fromBytes(s: [32]u8) Fe {
+ var fe: Fe = undefined;
+ fe.limbs[0] = readIntLittle(u64, s[0..8]) & MASK51;
+ fe.limbs[1] = (readIntLittle(u64, s[6..14]) >> 3) & MASK51;
+ fe.limbs[2] = (readIntLittle(u64, s[12..20]) >> 6) & MASK51;
+ fe.limbs[3] = (readIntLittle(u64, s[19..27]) >> 1) & MASK51;
+ fe.limbs[4] = (readIntLittle(u64, s[24..32]) >> 12) & MASK51;
+
+ return fe;
+ }
+
+ pub fn toBytes(fe: Fe) [32]u8 {
+ var reduced = fe;
+ reduced.reduce();
+ var s: [32]u8 = undefined;
+ writeIntLittle(u64, s[0..8], reduced.limbs[0] | (reduced.limbs[1] << 51));
+ writeIntLittle(u64, s[8..16], (reduced.limbs[1] >> 13) | (reduced.limbs[2] << 38));
+ writeIntLittle(u64, s[16..24], (reduced.limbs[2] >> 26) | (reduced.limbs[3] << 25));
+ writeIntLittle(u64, s[24..32], (reduced.limbs[3] >> 39) | (reduced.limbs[4] << 12));
+
+ return s;
+ }
+
+ pub fn rejectNonCanonical(s: [32]u8, comptime ignore_extra_bit: bool) !void {
+ var c: u16 = (s[31] & 0x7f) ^ 0x7f;
+ comptime var i = 30;
+ inline while (i > 0) : (i -= 1) {
+ c |= s[i] ^ 0xff;
+ }
+ c = (c -% 1) >> 8;
+ const d = (@intCast(u16, 0xed - 1) -% @intCast(u16, s[0])) >> 8;
+ const x = if (ignore_extra_bit) 0 else s[31] >> 7;
+ if ((((c & d) | x) & 1) != 0) {
+ return error.NonCanonical;
+ }
+ }
+
+ fn reduce(fe: *Fe) void {
+ comptime var i = 0;
+ comptime var j = 0;
+ const limbs = &fe.limbs;
+ inline while (j < 2) : (j += 1) {
+ i = 0;
+ inline while (i < 4) : (i += 1) {
+ limbs[i + 1] += limbs[i] >> 51;
+ limbs[i] &= MASK51;
+ }
+ limbs[0] += 19 * (limbs[4] >> 51);
+ limbs[4] &= MASK51;
+ }
+ limbs[0] += 19;
+ i = 0;
+ inline while (i < 4) : (i += 1) {
+ limbs[i + 1] += limbs[i] >> 51;
+ limbs[i] &= MASK51;
+ }
+ limbs[0] += 19 * (limbs[4] >> 51);
+ limbs[4] &= MASK51;
+
+ limbs[0] += 0x8000000000000 - 19;
+ limbs[1] += 0x8000000000000 - 1;
+ limbs[2] += 0x8000000000000 - 1;
+ limbs[3] += 0x8000000000000 - 1;
+ limbs[4] += 0x8000000000000 - 1;
+
+ i = 0;
+ inline while (i < 4) : (i += 1) {
+ limbs[i + 1] += limbs[i] >> 51;
+ limbs[i] &= MASK51;
+ }
+ limbs[4] &= MASK51;
+ }
+
+ pub inline fn add(a: Fe, b: Fe) Fe {
+ var fe: Fe = undefined;
+ comptime var i = 0;
+ inline while (i < 5) : (i += 1) {
+ fe.limbs[i] = a.limbs[i] + b.limbs[i];
+ }
+ return fe;
+ }
+
+ pub fn sub(a: Fe, b: Fe) Fe {
+ var fe = b;
+ comptime var i = 0;
+ inline while (i < 4) : (i += 1) {
+ fe.limbs[i + 1] += fe.limbs[i] >> 51;
+ fe.limbs[i] &= MASK51;
+ }
+ fe.limbs[0] += 19 * (fe.limbs[4] >> 51);
+ fe.limbs[4] &= MASK51;
+ fe.limbs[0] = (a.limbs[0] + 0xfffffffffffda) - fe.limbs[0];
+ fe.limbs[1] = (a.limbs[1] + 0xffffffffffffe) - fe.limbs[1];
+ fe.limbs[2] = (a.limbs[2] + 0xffffffffffffe) - fe.limbs[2];
+ fe.limbs[3] = (a.limbs[3] + 0xffffffffffffe) - fe.limbs[3];
+ fe.limbs[4] = (a.limbs[4] + 0xffffffffffffe) - fe.limbs[4];
+
+ return fe;
+ }
+
+ pub inline fn neg(a: Fe) Fe {
+ return zero().sub(a);
+ }
+
+ pub inline fn isNegative(a: Fe) bool {
+ return (a.toBytes()[0] & 1) != 0;
+ }
+
+ pub inline fn cMov(fe: *Fe, a: Fe, c: u64) void {
+ const mask: u64 = 0 -% c;
+ var x = fe.*;
+ comptime var i = 0;
+ inline while (i < 5) : (i += 1) {
+ x.limbs[i] ^= a.limbs[i];
+ }
+ i = 0;
+ inline while (i < 5) : (i += 1) {
+ x.limbs[i] &= mask;
+ }
+ i = 0;
+ inline while (i < 5) : (i += 1) {
+ fe.limbs[i] ^= x.limbs[i];
+ }
+ }
+
+ pub fn cSwap2(a0: *Fe, b0: *Fe, a1: *Fe, b1: *Fe, c: u64) void {
+ const mask: u64 = 0 -% c;
+ var x0 = a0.*;
+ var x1 = a1.*;
+ comptime var i = 0;
+ inline while (i < 5) : (i += 1) {
+ x0.limbs[i] ^= b0.limbs[i];
+ x1.limbs[i] ^= b1.limbs[i];
+ }
+ i = 0;
+ inline while (i < 5) : (i += 1) {
+ x0.limbs[i] &= mask;
+ x1.limbs[i] &= mask;
+ }
+ i = 0;
+ inline while (i < 5) : (i += 1) {
+ a0.limbs[i] ^= x0.limbs[i];
+ b0.limbs[i] ^= x0.limbs[i];
+ a1.limbs[i] ^= x1.limbs[i];
+ b1.limbs[i] ^= x1.limbs[i];
+ }
+ }
+
+ inline fn _carry128(r: *[5]u128) Fe {
+ var rs: [5]u64 = undefined;
+ comptime var i = 0;
+ inline while (i < 4) : (i += 1) {
+ rs[i] = @truncate(u64, r[i]) & MASK51;
+ r[i + 1] += @intCast(u64, r[i] >> 51);
+ }
+ rs[4] = @truncate(u64, r[4]) & MASK51;
+ var carry = @intCast(u64, r[4] >> 51);
+ rs[0] += 19 * carry;
+ carry = rs[0] >> 51;
+ rs[0] &= MASK51;
+ rs[1] += carry;
+ carry = rs[1] >> 51;
+ rs[1] &= MASK51;
+ rs[2] += carry;
+
+ return .{ .limbs = rs };
+ }
+
+ pub fn mul(a: Fe, b: Fe) Fe {
+ var ax: [5]u128 = undefined;
+ var bx: [5]u128 = undefined;
+ var a19: [5]u128 = undefined;
+ var r: [5]u128 = undefined;
+ comptime var i = 0;
+ inline while (i < 5) : (i += 1) {
+ ax[i] = @intCast(u128, a.limbs[i]);
+ bx[i] = @intCast(u128, b.limbs[i]);
+ }
+ i = 1;
+ inline while (i < 5) : (i += 1) {
+ a19[i] = 19 * ax[i];
+ }
+ r[0] = ax[0] * bx[0] + a19[1] * bx[4] + a19[2] * bx[3] + a19[3] * bx[2] + a19[4] * bx[1];
+ r[1] = ax[0] * bx[1] + ax[1] * bx[0] + a19[2] * bx[4] + a19[3] * bx[3] + a19[4] * bx[2];
+ r[2] = ax[0] * bx[2] + ax[1] * bx[1] + ax[2] * bx[0] + a19[3] * bx[4] + a19[4] * bx[3];
+ r[3] = ax[0] * bx[3] + ax[1] * bx[2] + ax[2] * bx[1] + ax[3] * bx[0] + a19[4] * bx[4];
+ r[4] = ax[0] * bx[4] + ax[1] * bx[3] + ax[2] * bx[2] + ax[3] * bx[1] + ax[4] * bx[0];
+
+ return _carry128(&r);
+ }
+
+ fn _sq(a: Fe, double: comptime bool) Fe {
+ var ax: [5]u128 = undefined;
+ var r: [5]u128 = undefined;
+ comptime var i = 0;
+ inline while (i < 5) : (i += 1) {
+ ax[i] = @intCast(u128, a.limbs[i]);
+ }
+ const a0_2 = 2 * ax[0];
+ const a1_2 = 2 * ax[1];
+ const a1_38 = 38 * ax[1];
+ const a2_38 = 38 * ax[2];
+ const a3_38 = 38 * ax[3];
+ const a3_19 = 19 * ax[3];
+ const a4_19 = 19 * ax[4];
+ r[0] = ax[0] * ax[0] + a1_38 * ax[4] + a2_38 * ax[3];
+ r[1] = a0_2 * ax[1] + a2_38 * ax[4] + a3_19 * ax[3];
+ r[2] = a0_2 * ax[2] + ax[1] * ax[1] + a3_38 * ax[4];
+ r[3] = a0_2 * ax[3] + a1_2 * ax[2] + a4_19 * ax[4];
+ r[4] = a0_2 * ax[4] + a1_2 * ax[3] + ax[2] * ax[2];
+ if (double) {
+ i = 0;
+ inline while (i < 5) : (i += 1) {
+ r[i] *= 2;
+ }
+ }
+ return _carry128(&r);
+ }
+
+ pub inline fn sq(a: Fe) Fe {
+ return _sq(a, false);
+ }
+
+ pub inline fn sq2(a: Fe) Fe {
+ return _sq(a, true);
+ }
+
+ pub inline fn mul32(a: Fe, comptime n: u32) Fe {
+ const sn = @intCast(u128, n);
+ var fe: Fe = undefined;
+ var x: u128 = 0;
+ comptime var i = 0;
+ inline while (i < 5) : (i += 1) {
+ x = a.limbs[i] * sn + (x >> 51);
+ fe.limbs[i] = @truncate(u64, x) & MASK51;
+ }
+ fe.limbs[0] += @intCast(u64, x >> 51) * 19;
+
+ return fe;
+ }
+
+ inline fn sqn(a: Fe, comptime n: comptime_int) Fe {
+ var i: usize = 0;
+ var fe = a;
+ while (i < n) : (i += 1) {
+ fe = fe.sq();
+ }
+ return fe;
+ }
+
+ pub fn invert(a: Fe) Fe {
+ var t0 = a.sq();
+ var t1 = t0.sqn(2).mul(a);
+ t0 = t0.mul(t1);
+ t1 = t1.mul(t0.sq());
+ t1 = t1.mul(t1.sqn(5));
+ var t2 = t1.sqn(10).mul(t1);
+ t2 = t2.mul(t2.sqn(20)).sqn(10);
+ t1 = t1.mul(t2);
+ t2 = t1.sqn(50).mul(t1);
+ return t1.mul(t2.mul(t2.sqn(100)).sqn(50)).sqn(5).mul(t0);
+ }
+
+ pub fn pow2523(a: Fe) Fe {
+ var c = a;
+ var i: usize = 0;
+ while (i < 249) : (i += 1) {
+ c = c.sq().mul(a);
+ }
+ return c.sq().sq().mul(a);
+ }
+
+ pub fn abs(a: Fe) Fe {
+ var r = a;
+ r.cMov(a.neg(), @boolToInt(a.isNegative()));
+ return r;
+ }
+};
diff --git a/lib/std/crypto/25519/ristretto255.zig b/lib/std/crypto/25519/ristretto255.zig
new file mode 100644
index 0000000000..71fa876c4f
--- /dev/null
+++ b/lib/std/crypto/25519/ristretto255.zig
@@ -0,0 +1,153 @@
+const std = @import("std");
+const fmt = std.fmt;
+
+/// Group operations over Edwards25519.
+pub const Ristretto255 = struct {
+ /// The underlying elliptic curve.
+ pub const Curve = @import("edwards25519.zig").Edwards25519;
+ /// The underlying prime field.
+ pub const Fe = Curve.Fe;
+ /// Field arithmetic mod the order of the main subgroup.
+ pub const scalar = Curve.scalar;
+
+ p: Curve = undefined,
+
+ fn sqrtRatioM1(u: Fe, v: Fe) !Fe {
+ const v3 = v.sq().mul(v); // v3 = v^3
+ var x = v3.sq().mul(u).mul(v). // x = uv^7
+ pow2523().mul(v3).mul(u); // x = uv^3(uv^7)^((q-5)/8)
+ const vxx = x.sq().mul(v); // vx^2
+ const m_root_check = vxx.sub(u); // vx^2-u
+ const p_root_check = vxx.add(u); // vx^2+u
+ const f_root_check = u.mul(Fe.sqrtm1()).add(vxx); // vx^2+u*sqrt(-1)
+ const has_m_root = m_root_check.isZero();
+ const has_p_root = p_root_check.isZero();
+ const has_f_root = f_root_check.isZero();
+ const x_sqrtm1 = x.mul(Fe.sqrtm1()); // x*sqrt(-1)
+ x.cMov(x_sqrtm1, @boolToInt(has_p_root) | @boolToInt(has_f_root));
+ x = x.abs();
+ if ((@boolToInt(has_m_root) | @boolToInt(has_p_root)) == 0) {
+ return error.NoRoot;
+ }
+ return x;
+ }
+
+ fn rejectNonCanonical(s: [32]u8) !void {
+ if ((s[0] & 1) != 0) {
+ return error.NonCanonical;
+ }
+ try Fe.rejectNonCanonical(s, false);
+ }
+
+ /// Reject the neutral element.
+ pub inline fn rejectIdentity(p: Ristretto255) !void {
+ return p.p.rejectIdentity();
+ }
+
+ /// Return the base point (Ristretto is a curve in desguise).
+ pub inline fn basePoint() Ristretto255 {
+ return .{ .p = Curve.basePoint() };
+ }
+
+ /// Decode a Ristretto255 representative.
+ pub fn fromBytes(s: [32]u8) !Ristretto255 {
+ try rejectNonCanonical(s);
+ const s_ = Fe.fromBytes(s);
+ const ss = s_.sq(); // s^2
+ const u1_ = Fe.one().sub(ss); // (1-s^2)
+ const u1u1 = u1_.sq(); // (1-s^2)^2
+ const u2_ = Fe.one().add(ss); // (1+s^2)
+ const u2u2 = u2_.sq(); // (1+s^2)^2
+ const v = Fe.edwards25519d().mul(u1u1).neg().sub(u2u2); // -(d*u1^2)-u2^2
+ const v_u2u2 = v.mul(u2u2); // v*u2^2
+ const inv_sqrt = sqrtRatioM1(Fe.one(), v_u2u2) catch |e| {
+ return error.InvalidEncoding;
+ };
+ var x = inv_sqrt.mul(u2_);
+ const y = inv_sqrt.mul(x).mul(v).mul(u1_);
+ x = x.mul(s_);
+ x = x.add(x).abs();
+ const t = x.mul(y);
+ if ((@boolToInt(t.isNegative()) | @boolToInt(y.isZero())) != 0) {
+ return error.InvalidEncoding;
+ }
+ const p: Curve = .{
+ .x = x,
+ .y = y,
+ .z = Fe.one(),
+ .t = t,
+ };
+ return @as(Ristretto255, .{ .p = p });
+ }
+
+ /// Encode to a Ristretto255 representative.
+ pub fn toBytes(e: Ristretto255) [32]u8 {
+ const p = &e.p;
+ var u1_ = p.z.add(p.y); // Z+Y
+ const zmy = p.z.sub(p.y); // Z-Y
+ u1_ = u1_.mul(zmy); // (Z+Y)*(Z-Y)
+ const u2_ = p.x.mul(p.y); // X*Y
+
+ const u1_u2u2 = u2_.sq().mul(u1_); // u1*u2^2
+
+ const inv_sqrt = sqrtRatioM1(Fe.one(), u1_u2u2) catch unreachable;
+ const den1 = inv_sqrt.mul(u1_);
+ const den2 = inv_sqrt.mul(u2_);
+ const z_inv = den1.mul(den2).mul(p.t); // den1*den2*T
+
+ const ix = p.x.mul(Fe.sqrtm1()); // X*sqrt(-1)
+ const iy = p.y.mul(Fe.sqrtm1()); // Y*sqrt(-1)
+ const eden = den1.mul(Fe.edwards25519sqrtamd()); // den1/sqrt(a-d)
+
+ const t_z_inv = p.t.mul(z_inv); // T*z_inv
+ const rotate = @boolToInt(t_z_inv.isNegative());
+
+ var x = p.x;
+ var y = p.y;
+ var den_inv = den2;
+
+ x.cMov(iy, rotate);
+ y.cMov(ix, rotate);
+ den_inv.cMov(eden, rotate);
+
+ const x_z_inv = x.mul(z_inv);
+ const yneg = y.neg();
+ y.cMov(yneg, @boolToInt(x_z_inv.isNegative()));
+
+ return p.z.sub(y).mul(den_inv).abs().toBytes();
+ }
+
+ /// Double a Ristretto255 element.
+ pub inline fn dbl(p: Ristretto255) Ristretto255 {
+ return .{ .p = p.p.dbl() };
+ }
+
+ /// Add two Ristretto255 elements.
+ pub inline fn add(p: Ristretto255, q: Ristretto255) Ristretto255 {
+ return .{ .p = p.p.add(q.p) };
+ }
+
+ /// Multiply a Ristretto255 element with a scalar.
+ /// Return error.WeakPublicKey if the resulting element is
+ /// the identity element.
+ pub inline fn mul(p: Ristretto255, s: [32]u8) !Ristretto255 {
+ return @as(Ristretto255, .{ .p = try p.p.mul(s) });
+ }
+};
+
+test "ristretto255" {
+ const p = Ristretto255.basePoint();
+ var buf: [256]u8 = undefined;
+ const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
+ std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{p.toBytes()}), "E2F2AE0A6ABC4E71A884A961C500515F58E30B6AA582DD8DB6A65945E08D2D76");
+
+ var r: [32]u8 = undefined;
+ try fmt.hexToBytes(r[0..], "6a493210f7499cd17fecb510ae0cea23a110e8d5b901f8acadd3095c73a3b919");
+ var q = try Ristretto255.fromBytes(r);
+ q = q.dbl().add(p);
+ std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{q.toBytes()}), "E882B131016B52C1D3337080187CF768423EFCCBB517BB495AB812C4160FF44E");
+
+ const s = [_]u8{15} ++ [_]u8{0} ** 31;
+ const w = try p.mul(s);
+ std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{w.toBytes()}), "E0C418F7C8D9C4CDD7395B93EA124F3AD99021BB681DFC3302A9D99A2E53E64E");
+}
diff --git a/lib/std/crypto/25519/scalar25519.zig b/lib/std/crypto/25519/scalar25519.zig
new file mode 100644
index 0000000000..a94e6531dc
--- /dev/null
+++ b/lib/std/crypto/25519/scalar25519.zig
@@ -0,0 +1,185 @@
+const std = @import("std");
+const mem = std.mem;
+
+inline fn fieldSize() [32]u8 {
+ return .{
+ 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, // 2^252+27742317777372353535851937790883648493
+ };
+}
+
+const ScalarExpanded = struct {
+ const L = fieldSize();
+ limbs: [64]i64 = [_]i64{0} ** 64,
+
+ fn fromBytes(s: [32]u8) ScalarExpanded {
+ var limbs: [64]i64 = undefined;
+ for (s) |x, idx| {
+ limbs[idx] = @intCast(i64, x);
+ }
+ mem.set(i64, limbs[32..], 0);
+ return .{ .limbs = limbs };
+ }
+
+ fn fromBytes64(s: [64]u8) ScalarExpanded {
+ var limbs: [64]i64 = undefined;
+ for (s) |x, idx| {
+ limbs[idx] = @intCast(i64, x);
+ }
+ return .{ .limbs = limbs };
+ }
+
+ fn reduce(e: *ScalarExpanded) void {
+ const limbs = &e.limbs;
+ var carry: i64 = undefined;
+ var i: usize = 63;
+ while (i >= 32) : (i -= 1) {
+ carry = 0;
+ const k = i - 12;
+ const xi = limbs[i];
+ var j = i - 32;
+ while (j < k) : (j += 1) {
+ const xj = limbs[j] + carry - 16 * xi * @intCast(i64, L[j - (i - 32)]);
+ carry = (xj + 128) >> 8;
+ limbs[j] = xj - carry * 256;
+ }
+ limbs[k] += carry;
+ limbs[i] = 0;
+ }
+ carry = 0;
+ comptime var j: usize = 0;
+ inline while (j < 32) : (j += 1) {
+ const xi = limbs[j] + carry - (limbs[31] >> 4) * @intCast(i64, L[j]);
+ carry = xi >> 8;
+ limbs[j] = xi & 255;
+ }
+ j = 0;
+ inline while (j < 32) : (j += 1) {
+ limbs[j] -= carry * @intCast(i64, L[j]);
+ }
+ j = 0;
+ inline while (j < 32) : (j += 1) {
+ limbs[j + 1] += limbs[j] >> 8;
+ }
+ }
+
+ fn toBytes(e: *ScalarExpanded) [32]u8 {
+ e.reduce();
+ var r: [32]u8 = undefined;
+ var i: usize = 0;
+ while (i < 32) : (i += 1) {
+ r[i] = @intCast(u8, e.limbs[i]);
+ }
+ return r;
+ }
+
+ fn add(a: ScalarExpanded, b: ScalarExpanded) ScalarExpanded {
+ var r = ScalarExpanded{};
+ comptime var i = 0;
+ inline while (i < 64) : (i += 1) {
+ r.limbs[i] = a.limbs[i] + b.limbs[i];
+ }
+ return r;
+ }
+
+ fn mul(a: ScalarExpanded, b: ScalarExpanded) ScalarExpanded {
+ var r = ScalarExpanded{};
+ var i: usize = 0;
+ while (i < 32) : (i += 1) {
+ const ai = a.limbs[i];
+ comptime var j = 0;
+ inline while (j < 32) : (j += 1) {
+ r.limbs[i + j] += ai * b.limbs[j];
+ }
+ }
+ r.reduce();
+ return r;
+ }
+
+ fn sq(a: ScalarExpanded) ScalarExpanded {
+ return a.mul(a);
+ }
+
+ fn mulAdd(a: ScalarExpanded, b: ScalarExpanded, c: ScalarExpanded) ScalarExpanded {
+ var r: ScalarExpanded = .{ .limbs = c.limbs };
+ var i: usize = 0;
+ while (i < 32) : (i += 1) {
+ const ai = a.limbs[i];
+ comptime var j = 0;
+ inline while (j < 32) : (j += 1) {
+ r.limbs[i + j] += ai * b.limbs[j];
+ }
+ }
+ r.reduce();
+ return r;
+ }
+};
+
+/// Reject a scalar whose encoding is not canonical.
+pub fn rejectNonCanonical(s: [32]u8) !void {
+ const L = fieldSize();
+ var c: u8 = 0;
+ var n: u8 = 1;
+ var i: usize = 31;
+ while (true) {
+ const xs = @intCast(u16, s[i]);
+ const xL = @intCast(u16, L[i]);
+ c |= @intCast(u8, ((xs -% xL) >> 8) & n);
+ n &= @intCast(u8, ((xs ^ xL) -% 1) >> 8);
+ if (i == 0) break;
+ i -= 1;
+ }
+ if (c == 0) {
+ return error.NonCanonical;
+ }
+}
+
+/// Reduce a scalar to the field size.
+pub fn reduce(s: [32]u8) [32]u8 {
+ return ScalarExpanded.fromBytes(s).toBytes();
+}
+
+/// Reduce a 64-bytes scalar to the field size.
+pub fn reduce64(s: [64]u8) [32]u8 {
+ return ScalarExpanded.fromBytes64(s).toBytes();
+}
+
+/// Perform the X25519 "clamping" operation.
+/// The scalar is then guaranteed to be a multiple of the cofactor.
+pub inline fn clamp(s: *[32]u8) void {
+ s[0] &= 248;
+ s[31] = (s[31] & 127) | 64;
+}
+
+/// Return a*b+c (mod L)
+pub fn mulAdd(a: [32]u8, b: [32]u8, c: [32]u8) [32]u8 {
+ return ScalarExpanded.fromBytes(a).mulAdd(ScalarExpanded.fromBytes(b), ScalarExpanded.fromBytes(c)).toBytes();
+}
+
+test "scalar25519" {
+ const bytes: [32]u8 = .{ 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 255 };
+ var x = ScalarExpanded.fromBytes(bytes);
+ var y = x.toBytes();
+ try rejectNonCanonical(y);
+ var buf: [128]u8 = undefined;
+ const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
+ std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{y}), "1E979B917937F3DE71D18077F961F6CEFF01030405060708010203040506070F");
+
+ const field_size = fieldSize();
+ const reduced = reduce(field_size);
+ std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{reduced}), "0000000000000000000000000000000000000000000000000000000000000000");
+}
+
+test "non-canonical scalar25519" {
+ const too_targe: [32]u8 = .{ 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10 };
+ std.testing.expectError(error.NonCanonical, rejectNonCanonical(too_targe));
+}
+
+test "mulAdd overflow check" {
+ const a: [32]u8 = [_]u8{0xff} ** 32;
+ const b: [32]u8 = [_]u8{0xff} ** 32;
+ const c: [32]u8 = [_]u8{0xff} ** 32;
+ const x = mulAdd(a, b, c);
+ var buf: [128]u8 = undefined;
+ const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
+ std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{x}), "D14DF91389432C25AD60FF9791B9FD1D67BEF517D273ECCE3D9A307C1B419903");
+}
diff --git a/lib/std/crypto/25519/x25519.zig b/lib/std/crypto/25519/x25519.zig
new file mode 100644
index 0000000000..55c860d13e
--- /dev/null
+++ b/lib/std/crypto/25519/x25519.zig
@@ -0,0 +1,146 @@
+const std = @import("std");
+const mem = std.mem;
+const fmt = std.fmt;
+
+/// X25519 DH function.
+pub const X25519 = struct {
+ /// The underlying elliptic curve.
+ pub const Curve = @import("curve25519.zig").Curve25519;
+ /// Length (in bytes) of a secret key.
+ pub const secret_length = 32;
+ /// Length (in bytes) of the output of the DH function.
+ pub const minimum_key_length = 32;
+
+ /// Compute the public key for a given private key.
+ pub fn createPublicKey(public_key: []u8, private_key: []const u8) bool {
+ std.debug.assert(private_key.len >= minimum_key_length);
+ std.debug.assert(public_key.len >= minimum_key_length);
+ var s: [32]u8 = undefined;
+ mem.copy(u8, &s, private_key[0..32]);
+ if (Curve.basePoint().clampedMul(s)) |q| {
+ mem.copy(u8, public_key, q.toBytes()[0..]);
+ return true;
+ } else |_| {
+ return false;
+ }
+ }
+
+ /// Compute the scalar product of a public key and a secret scalar.
+ /// Note that the output should not be used as a shared secret without
+ /// hashing it first.
+ pub fn create(out: []u8, private_key: []const u8, public_key: []const u8) bool {
+ std.debug.assert(out.len >= secret_length);
+ std.debug.assert(private_key.len >= minimum_key_length);
+ std.debug.assert(public_key.len >= minimum_key_length);
+ var s: [32]u8 = undefined;
+ var b: [32]u8 = undefined;
+ mem.copy(u8, &s, private_key[0..32]);
+ mem.copy(u8, &b, public_key[0..32]);
+ if (Curve.fromBytes(b).clampedMul(s)) |q| {
+ mem.copy(u8, out, q.toBytes()[0..]);
+ return true;
+ } else |_| {
+ return false;
+ }
+ }
+};
+
+test "x25519 public key calculation from secret key" {
+ var sk: [32]u8 = undefined;
+ var pk_expected: [32]u8 = undefined;
+ var pk_calculated: [32]u8 = undefined;
+ try fmt.hexToBytes(sk[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
+ try fmt.hexToBytes(pk_expected[0..], "f1814f0e8ff1043d8a44d25babff3cedcae6c22c3edaa48f857ae70de2baae50");
+ std.testing.expect(X25519.createPublicKey(pk_calculated[0..], &sk));
+ std.testing.expect(std.mem.eql(u8, &pk_calculated, &pk_expected));
+}
+
+test "x25519 rfc7748 vector1" {
+ const secret_key = "\xa5\x46\xe3\x6b\xf0\x52\x7c\x9d\x3b\x16\x15\x4b\x82\x46\x5e\xdd\x62\x14\x4c\x0a\xc1\xfc\x5a\x18\x50\x6a\x22\x44\xba\x44\x9a\xc4";
+ const public_key = "\xe6\xdb\x68\x67\x58\x30\x30\xdb\x35\x94\xc1\xa4\x24\xb1\x5f\x7c\x72\x66\x24\xec\x26\xb3\x35\x3b\x10\xa9\x03\xa6\xd0\xab\x1c\x4c";
+
+ const expected_output = "\xc3\xda\x55\x37\x9d\xe9\xc6\x90\x8e\x94\xea\x4d\xf2\x8d\x08\x4f\x32\xec\xcf\x03\x49\x1c\x71\xf7\x54\xb4\x07\x55\x77\xa2\x85\x52";
+
+ var output: [32]u8 = undefined;
+
+ std.testing.expect(X25519.create(output[0..], secret_key, public_key));
+ std.testing.expect(std.mem.eql(u8, &output, expected_output));
+}
+
+test "x25519 rfc7748 vector2" {
+ const secret_key = "\x4b\x66\xe9\xd4\xd1\xb4\x67\x3c\x5a\xd2\x26\x91\x95\x7d\x6a\xf5\xc1\x1b\x64\x21\xe0\xea\x01\xd4\x2c\xa4\x16\x9e\x79\x18\xba\x0d";
+ const public_key = "\xe5\x21\x0f\x12\x78\x68\x11\xd3\xf4\xb7\x95\x9d\x05\x38\xae\x2c\x31\xdb\xe7\x10\x6f\xc0\x3c\x3e\xfc\x4c\xd5\x49\xc7\x15\xa4\x93";
+
+ const expected_output = "\x95\xcb\xde\x94\x76\xe8\x90\x7d\x7a\xad\xe4\x5c\xb4\xb8\x73\xf8\x8b\x59\x5a\x68\x79\x9f\xa1\x52\xe6\xf8\xf7\x64\x7a\xac\x79\x57";
+
+ var output: [32]u8 = undefined;
+
+ std.testing.expect(X25519.create(output[0..], secret_key, public_key));
+ std.testing.expect(std.mem.eql(u8, &output, expected_output));
+}
+
+test "x25519 rfc7748 one iteration" {
+ const initial_value = "\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00".*;
+ const expected_output = "\x42\x2c\x8e\x7a\x62\x27\xd7\xbc\xa1\x35\x0b\x3e\x2b\xb7\x27\x9f\x78\x97\xb8\x7b\xb6\x85\x4b\x78\x3c\x60\xe8\x03\x11\xae\x30\x79";
+
+ var k: [32]u8 = initial_value;
+ var u: [32]u8 = initial_value;
+
+ var i: usize = 0;
+ while (i < 1) : (i += 1) {
+ var output: [32]u8 = undefined;
+ std.testing.expect(X25519.create(output[0..], &k, &u));
+
+ std.mem.copy(u8, u[0..], k[0..]);
+ std.mem.copy(u8, k[0..], output[0..]);
+ }
+
+ std.testing.expect(std.mem.eql(u8, k[0..], expected_output));
+}
+
+test "x25519 rfc7748 1,000 iterations" {
+ // These iteration tests are slow so we always skip them. Results have been verified.
+ if (true) {
+ return error.SkipZigTest;
+ }
+
+ const initial_value = "\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
+ const expected_output = "\x68\x4c\xf5\x9b\xa8\x33\x09\x55\x28\x00\xef\x56\x6f\x2f\x4d\x3c\x1c\x38\x87\xc4\x93\x60\xe3\x87\x5f\x2e\xb9\x4d\x99\x53\x2c\x51";
+
+ var k: [32]u8 = initial_value.*;
+ var u: [32]u8 = initial_value.*;
+
+ var i: usize = 0;
+ while (i < 1000) : (i += 1) {
+ var output: [32]u8 = undefined;
+ std.testing.expect(X25519.create(output[0..], &k, &u));
+
+ std.mem.copy(u8, u[0..], k[0..]);
+ std.mem.copy(u8, k[0..], output[0..]);
+ }
+
+ std.testing.expect(std.mem.eql(u8, k[0..], expected_output));
+}
+
+test "x25519 rfc7748 1,000,000 iterations" {
+ if (true) {
+ return error.SkipZigTest;
+ }
+
+ const initial_value = "\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
+ const expected_output = "\x7c\x39\x11\xe0\xab\x25\x86\xfd\x86\x44\x97\x29\x7e\x57\x5e\x6f\x3b\xc6\x01\xc0\x88\x3c\x30\xdf\x5f\x4d\xd2\xd2\x4f\x66\x54\x24";
+
+ var k: [32]u8 = initial_value.*;
+ var u: [32]u8 = initial_value.*;
+
+ var i: usize = 0;
+ while (i < 1000000) : (i += 1) {
+ var output: [32]u8 = undefined;
+ std.testing.expect(X25519.create(output[0..], &k, &u));
+
+ std.mem.copy(u8, u[0..], k[0..]);
+ std.mem.copy(u8, k[0..], output[0..]);
+ }
+
+ std.testing.expect(std.mem.eql(u8, k[0..], expected_output));
+}
diff --git a/lib/std/crypto/x25519.zig b/lib/std/crypto/x25519.zig
deleted file mode 100644
index e2e2bf90e5..0000000000
--- a/lib/std/crypto/x25519.zig
+++ /dev/null
@@ -1,675 +0,0 @@
-// Translated from monocypher which is licensed under CC-0/BSD-3.
-//
-// https://monocypher.org/
-
-const std = @import("../std.zig");
-const builtin = @import("builtin");
-const fmt = std.fmt;
-
-const Endian = builtin.Endian;
-const readIntLittle = std.mem.readIntLittle;
-const writeIntLittle = std.mem.writeIntLittle;
-
-// Based on Supercop's ref10 implementation.
-pub const X25519 = struct {
- pub const secret_length = 32;
- pub const minimum_key_length = 32;
-
- fn trimScalar(s: []u8) void {
- s[0] &= 248;
- s[31] &= 127;
- s[31] |= 64;
- }
-
- fn scalarBit(s: []const u8, i: usize) i32 {
- return (s[i >> 3] >> @intCast(u3, i & 7)) & 1;
- }
-
- pub fn create(out: []u8, private_key: []const u8, public_key: []const u8) bool {
- std.debug.assert(out.len >= secret_length);
- std.debug.assert(private_key.len >= minimum_key_length);
- std.debug.assert(public_key.len >= minimum_key_length);
-
- var storage: [7]Fe = undefined;
- var x1 = &storage[0];
- var x2 = &storage[1];
- var z2 = &storage[2];
- var x3 = &storage[3];
- var z3 = &storage[4];
- var t0 = &storage[5];
- var t1 = &storage[6];
-
- // computes the scalar product
- Fe.fromBytes(x1, public_key);
-
- // restrict the possible scalar values
- var e: [32]u8 = undefined;
- for (e[0..]) |_, i| {
- e[i] = private_key[i];
- }
- trimScalar(e[0..]);
-
- // computes the actual scalar product (the result is in x2 and z2)
-
- // Montgomery ladder
- // In projective coordinates, to avoid divisions: x = X / Z
- // We don't care about the y coordinate, it's only 1 bit of information
- Fe.init1(x2);
- Fe.init0(z2); // "zero" point
- Fe.copy(x3, x1);
- Fe.init1(z3);
-
- var swap: i32 = 0;
- var pos: isize = 254;
- while (pos >= 0) : (pos -= 1) {
- // constant time conditional swap before ladder step
- const b = scalarBit(&e, @intCast(usize, pos));
- swap ^= b; // xor trick avoids swapping at the end of the loop
- Fe.cswap(x2, x3, swap);
- Fe.cswap(z2, z3, swap);
- swap = b; // anticipates one last swap after the loop
-
- // Montgomery ladder step: replaces (P2, P3) by (P2*2, P2+P3)
- // with differential addition
- Fe.sub(t0, x3, z3);
- Fe.sub(t1, x2, z2);
- Fe.add(x2, x2, z2);
- Fe.add(z2, x3, z3);
- Fe.mul(z3, t0, x2);
- Fe.mul(z2, z2, t1);
- Fe.sq(t0, t1);
- Fe.sq(t1, x2);
- Fe.add(x3, z3, z2);
- Fe.sub(z2, z3, z2);
- Fe.mul(x2, t1, t0);
- Fe.sub(t1, t1, t0);
- Fe.sq(z2, z2);
- Fe.mulSmall(z3, t1, 121666);
- Fe.sq(x3, x3);
- Fe.add(t0, t0, z3);
- Fe.mul(z3, x1, z2);
- Fe.mul(z2, t1, t0);
- }
-
- // last swap is necessary to compensate for the xor trick
- // Note: after this swap, P3 == P2 + P1.
- Fe.cswap(x2, x3, swap);
- Fe.cswap(z2, z3, swap);
-
- // normalises the coordinates: x == X / Z
- Fe.invert(z2, z2);
- Fe.mul(x2, x2, z2);
- Fe.toBytes(out, x2);
-
- x1.secureZero();
- x2.secureZero();
- x3.secureZero();
- t0.secureZero();
- t1.secureZero();
- z2.secureZero();
- z3.secureZero();
- std.mem.secureZero(u8, e[0..]);
-
- // Returns false if the output is all zero
- // (happens with some malicious public keys)
- return !zerocmp(u8, out);
- }
-
- pub fn createPublicKey(public_key: []u8, private_key: []const u8) bool {
- var base_point = [_]u8{9} ++ [_]u8{0} ** 31;
- return create(public_key, private_key, &base_point);
- }
-};
-
-// Constant time compare to zero.
-fn zerocmp(comptime T: type, a: []const T) bool {
- var s: T = 0;
- for (a) |b| {
- s |= b;
- }
- return s == 0;
-}
-
-////////////////////////////////////
-/// Arithmetic modulo 2^255 - 19 ///
-////////////////////////////////////
-// Taken from Supercop's ref10 implementation.
-// A bit bigger than TweetNaCl, over 4 times faster.
-
-// field element
-const Fe = struct {
- b: [10]i32,
-
- fn secureZero(self: *Fe) void {
- std.mem.secureZero(u8, @ptrCast([*]u8, self)[0..@sizeOf(Fe)]);
- }
-
- fn init0(h: *Fe) void {
- for (h.b) |*e| {
- e.* = 0;
- }
- }
-
- fn init1(h: *Fe) void {
- for (h.b[1..]) |*e| {
- e.* = 0;
- }
- h.b[0] = 1;
- }
-
- fn copy(h: *Fe, f: *const Fe) void {
- for (h.b) |_, i| {
- h.b[i] = f.b[i];
- }
- }
-
- fn neg(h: *Fe, f: *const Fe) void {
- for (h.b) |_, i| {
- h.b[i] = -f.b[i];
- }
- }
-
- fn add(h: *Fe, f: *const Fe, g: *const Fe) void {
- for (h.b) |_, i| {
- h.b[i] = f.b[i] + g.b[i];
- }
- }
-
- fn sub(h: *Fe, f: *const Fe, g: *const Fe) void {
- for (h.b) |_, i| {
- h.b[i] = f.b[i] - g.b[i];
- }
- }
-
- fn cswap(f: *Fe, g: *Fe, b: i32) void {
- for (f.b) |_, i| {
- const x = (f.b[i] ^ g.b[i]) & -b;
- f.b[i] ^= x;
- g.b[i] ^= x;
- }
- }
-
- fn ccopy(f: *Fe, g: *const Fe, b: i32) void {
- for (f.b) |_, i| {
- const x = (f.b[i] ^ g.b[i]) & -b;
- f.b[i] ^= x;
- }
- }
-
- inline fn carryRound(c: []i64, t: []i64, comptime i: comptime_int, comptime shift: comptime_int, comptime mult: comptime_int) void {
- const j = (i + 1) % 10;
-
- c[i] = (t[i] + (@as(i64, 1) << shift)) >> (shift + 1);
- t[j] += c[i] * mult;
- t[i] -= c[i] * (@as(i64, 1) << (shift + 1));
- }
-
- fn carry1(h: *Fe, t: []i64) void {
- var c: [10]i64 = undefined;
-
- var sc = c[0..];
- var st = t[0..];
-
- carryRound(sc, st, 9, 24, 19);
- carryRound(sc, st, 1, 24, 1);
- carryRound(sc, st, 3, 24, 1);
- carryRound(sc, st, 5, 24, 1);
- carryRound(sc, st, 7, 24, 1);
- carryRound(sc, st, 0, 25, 1);
- carryRound(sc, st, 2, 25, 1);
- carryRound(sc, st, 4, 25, 1);
- carryRound(sc, st, 6, 25, 1);
- carryRound(sc, st, 8, 25, 1);
-
- for (h.b) |_, i| {
- h.b[i] = @intCast(i32, t[i]);
- }
- }
-
- fn carry2(h: *Fe, t: []i64) void {
- var c: [10]i64 = undefined;
-
- var sc = c[0..];
- var st = t[0..];
-
- carryRound(sc, st, 0, 25, 1);
- carryRound(sc, st, 4, 25, 1);
- carryRound(sc, st, 1, 24, 1);
- carryRound(sc, st, 5, 24, 1);
- carryRound(sc, st, 2, 25, 1);
- carryRound(sc, st, 6, 25, 1);
- carryRound(sc, st, 3, 24, 1);
- carryRound(sc, st, 7, 24, 1);
- carryRound(sc, st, 4, 25, 1);
- carryRound(sc, st, 8, 25, 1);
- carryRound(sc, st, 9, 24, 19);
- carryRound(sc, st, 0, 25, 1);
-
- for (h.b) |_, i| {
- h.b[i] = @intCast(i32, t[i]);
- }
- }
-
- fn fromBytes(h: *Fe, s: []const u8) void {
- std.debug.assert(s.len >= 32);
-
- var t: [10]i64 = undefined;
-
- t[0] = readIntLittle(u32, s[0..4]);
- t[1] = @as(u32, readIntLittle(u24, s[4..7])) << 6;
- t[2] = @as(u32, readIntLittle(u24, s[7..10])) << 5;
- t[3] = @as(u32, readIntLittle(u24, s[10..13])) << 3;
- t[4] = @as(u32, readIntLittle(u24, s[13..16])) << 2;
- t[5] = readIntLittle(u32, s[16..20]);
- t[6] = @as(u32, readIntLittle(u24, s[20..23])) << 7;
- t[7] = @as(u32, readIntLittle(u24, s[23..26])) << 5;
- t[8] = @as(u32, readIntLittle(u24, s[26..29])) << 4;
- t[9] = (@as(u32, readIntLittle(u24, s[29..32])) & 0x7fffff) << 2;
-
- carry1(h, t[0..]);
- }
-
- fn mulSmall(h: *Fe, f: *const Fe, comptime g: comptime_int) void {
- var t: [10]i64 = undefined;
-
- for (t[0..]) |_, i| {
- t[i] = @as(i64, f.b[i]) * g;
- }
-
- carry1(h, t[0..]);
- }
-
- fn mul(h: *Fe, f1: *const Fe, g1: *const Fe) void {
- const f = f1.b;
- const g = g1.b;
-
- var F: [10]i32 = undefined;
- var G: [10]i32 = undefined;
-
- F[1] = f[1] * 2;
- F[3] = f[3] * 2;
- F[5] = f[5] * 2;
- F[7] = f[7] * 2;
- F[9] = f[9] * 2;
-
- G[1] = g[1] * 19;
- G[2] = g[2] * 19;
- G[3] = g[3] * 19;
- G[4] = g[4] * 19;
- G[5] = g[5] * 19;
- G[6] = g[6] * 19;
- G[7] = g[7] * 19;
- G[8] = g[8] * 19;
- G[9] = g[9] * 19;
-
- // t's become h
- var t: [10]i64 = undefined;
-
- t[0] = f[0] * @as(i64, g[0]) + F[1] * @as(i64, G[9]) + f[2] * @as(i64, G[8]) + F[3] * @as(i64, G[7]) + f[4] * @as(i64, G[6]) + F[5] * @as(i64, G[5]) + f[6] * @as(i64, G[4]) + F[7] * @as(i64, G[3]) + f[8] * @as(i64, G[2]) + F[9] * @as(i64, G[1]);
- t[1] = f[0] * @as(i64, g[1]) + f[1] * @as(i64, g[0]) + f[2] * @as(i64, G[9]) + f[3] * @as(i64, G[8]) + f[4] * @as(i64, G[7]) + f[5] * @as(i64, G[6]) + f[6] * @as(i64, G[5]) + f[7] * @as(i64, G[4]) + f[8] * @as(i64, G[3]) + f[9] * @as(i64, G[2]);
- t[2] = f[0] * @as(i64, g[2]) + F[1] * @as(i64, g[1]) + f[2] * @as(i64, g[0]) + F[3] * @as(i64, G[9]) + f[4] * @as(i64, G[8]) + F[5] * @as(i64, G[7]) + f[6] * @as(i64, G[6]) + F[7] * @as(i64, G[5]) + f[8] * @as(i64, G[4]) + F[9] * @as(i64, G[3]);
- t[3] = f[0] * @as(i64, g[3]) + f[1] * @as(i64, g[2]) + f[2] * @as(i64, g[1]) + f[3] * @as(i64, g[0]) + f[4] * @as(i64, G[9]) + f[5] * @as(i64, G[8]) + f[6] * @as(i64, G[7]) + f[7] * @as(i64, G[6]) + f[8] * @as(i64, G[5]) + f[9] * @as(i64, G[4]);
- t[4] = f[0] * @as(i64, g[4]) + F[1] * @as(i64, g[3]) + f[2] * @as(i64, g[2]) + F[3] * @as(i64, g[1]) + f[4] * @as(i64, g[0]) + F[5] * @as(i64, G[9]) + f[6] * @as(i64, G[8]) + F[7] * @as(i64, G[7]) + f[8] * @as(i64, G[6]) + F[9] * @as(i64, G[5]);
- t[5] = f[0] * @as(i64, g[5]) + f[1] * @as(i64, g[4]) + f[2] * @as(i64, g[3]) + f[3] * @as(i64, g[2]) + f[4] * @as(i64, g[1]) + f[5] * @as(i64, g[0]) + f[6] * @as(i64, G[9]) + f[7] * @as(i64, G[8]) + f[8] * @as(i64, G[7]) + f[9] * @as(i64, G[6]);
- t[6] = f[0] * @as(i64, g[6]) + F[1] * @as(i64, g[5]) + f[2] * @as(i64, g[4]) + F[3] * @as(i64, g[3]) + f[4] * @as(i64, g[2]) + F[5] * @as(i64, g[1]) + f[6] * @as(i64, g[0]) + F[7] * @as(i64, G[9]) + f[8] * @as(i64, G[8]) + F[9] * @as(i64, G[7]);
- t[7] = f[0] * @as(i64, g[7]) + f[1] * @as(i64, g[6]) + f[2] * @as(i64, g[5]) + f[3] * @as(i64, g[4]) + f[4] * @as(i64, g[3]) + f[5] * @as(i64, g[2]) + f[6] * @as(i64, g[1]) + f[7] * @as(i64, g[0]) + f[8] * @as(i64, G[9]) + f[9] * @as(i64, G[8]);
- t[8] = f[0] * @as(i64, g[8]) + F[1] * @as(i64, g[7]) + f[2] * @as(i64, g[6]) + F[3] * @as(i64, g[5]) + f[4] * @as(i64, g[4]) + F[5] * @as(i64, g[3]) + f[6] * @as(i64, g[2]) + F[7] * @as(i64, g[1]) + f[8] * @as(i64, g[0]) + F[9] * @as(i64, G[9]);
- t[9] = f[0] * @as(i64, g[9]) + f[1] * @as(i64, g[8]) + f[2] * @as(i64, g[7]) + f[3] * @as(i64, g[6]) + f[4] * @as(i64, g[5]) + f[5] * @as(i64, g[4]) + f[6] * @as(i64, g[3]) + f[7] * @as(i64, g[2]) + f[8] * @as(i64, g[1]) + f[9] * @as(i64, g[0]);
-
- carry2(h, t[0..]);
- }
-
- // we could use Fe.mul() for this, but this is significantly faster
- fn sq(h: *Fe, fz: *const Fe) void {
- const f0 = fz.b[0];
- const f1 = fz.b[1];
- const f2 = fz.b[2];
- const f3 = fz.b[3];
- const f4 = fz.b[4];
- const f5 = fz.b[5];
- const f6 = fz.b[6];
- const f7 = fz.b[7];
- const f8 = fz.b[8];
- const f9 = fz.b[9];
-
- const f0_2 = f0 * 2;
- const f1_2 = f1 * 2;
- const f2_2 = f2 * 2;
- const f3_2 = f3 * 2;
- const f4_2 = f4 * 2;
- const f5_2 = f5 * 2;
- const f6_2 = f6 * 2;
- const f7_2 = f7 * 2;
- const f5_38 = f5 * 38;
- const f6_19 = f6 * 19;
- const f7_38 = f7 * 38;
- const f8_19 = f8 * 19;
- const f9_38 = f9 * 38;
-
- var t: [10]i64 = undefined;
-
- t[0] = f0 * @as(i64, f0) + f1_2 * @as(i64, f9_38) + f2_2 * @as(i64, f8_19) + f3_2 * @as(i64, f7_38) + f4_2 * @as(i64, f6_19) + f5 * @as(i64, f5_38);
- t[1] = f0_2 * @as(i64, f1) + f2 * @as(i64, f9_38) + f3_2 * @as(i64, f8_19) + f4 * @as(i64, f7_38) + f5_2 * @as(i64, f6_19);
- t[2] = f0_2 * @as(i64, f2) + f1_2 * @as(i64, f1) + f3_2 * @as(i64, f9_38) + f4_2 * @as(i64, f8_19) + f5_2 * @as(i64, f7_38) + f6 * @as(i64, f6_19);
- t[3] = f0_2 * @as(i64, f3) + f1_2 * @as(i64, f2) + f4 * @as(i64, f9_38) + f5_2 * @as(i64, f8_19) + f6 * @as(i64, f7_38);
- t[4] = f0_2 * @as(i64, f4) + f1_2 * @as(i64, f3_2) + f2 * @as(i64, f2) + f5_2 * @as(i64, f9_38) + f6_2 * @as(i64, f8_19) + f7 * @as(i64, f7_38);
- t[5] = f0_2 * @as(i64, f5) + f1_2 * @as(i64, f4) + f2_2 * @as(i64, f3) + f6 * @as(i64, f9_38) + f7_2 * @as(i64, f8_19);
- t[6] = f0_2 * @as(i64, f6) + f1_2 * @as(i64, f5_2) + f2_2 * @as(i64, f4) + f3_2 * @as(i64, f3) + f7_2 * @as(i64, f9_38) + f8 * @as(i64, f8_19);
- t[7] = f0_2 * @as(i64, f7) + f1_2 * @as(i64, f6) + f2_2 * @as(i64, f5) + f3_2 * @as(i64, f4) + f8 * @as(i64, f9_38);
- t[8] = f0_2 * @as(i64, f8) + f1_2 * @as(i64, f7_2) + f2_2 * @as(i64, f6) + f3_2 * @as(i64, f5_2) + f4 * @as(i64, f4) + f9 * @as(i64, f9_38);
- t[9] = f0_2 * @as(i64, f9) + f1_2 * @as(i64, f8) + f2_2 * @as(i64, f7) + f3_2 * @as(i64, f6) + f4 * @as(i64, f5_2);
-
- carry2(h, t[0..]);
- }
-
- fn sq2(h: *Fe, f: *const Fe) void {
- Fe.sq(h, f);
- Fe.mul_small(h, h, 2);
- }
-
- // This could be simplified, but it would be slower
- fn invert(out: *Fe, z: *const Fe) void {
- var i: usize = undefined;
-
- var t: [4]Fe = undefined;
- var t0 = &t[0];
- var t1 = &t[1];
- var t2 = &t[2];
- var t3 = &t[3];
-
- Fe.sq(t0, z);
- Fe.sq(t1, t0);
- Fe.sq(t1, t1);
- Fe.mul(t1, z, t1);
- Fe.mul(t0, t0, t1);
-
- Fe.sq(t2, t0);
- Fe.mul(t1, t1, t2);
-
- Fe.sq(t2, t1);
- i = 1;
- while (i < 5) : (i += 1) Fe.sq(t2, t2);
- Fe.mul(t1, t2, t1);
-
- Fe.sq(t2, t1);
- i = 1;
- while (i < 10) : (i += 1) Fe.sq(t2, t2);
- Fe.mul(t2, t2, t1);
-
- Fe.sq(t3, t2);
- i = 1;
- while (i < 20) : (i += 1) Fe.sq(t3, t3);
- Fe.mul(t2, t3, t2);
-
- Fe.sq(t2, t2);
- i = 1;
- while (i < 10) : (i += 1) Fe.sq(t2, t2);
- Fe.mul(t1, t2, t1);
-
- Fe.sq(t2, t1);
- i = 1;
- while (i < 50) : (i += 1) Fe.sq(t2, t2);
- Fe.mul(t2, t2, t1);
-
- Fe.sq(t3, t2);
- i = 1;
- while (i < 100) : (i += 1) Fe.sq(t3, t3);
- Fe.mul(t2, t3, t2);
-
- Fe.sq(t2, t2);
- i = 1;
- while (i < 50) : (i += 1) Fe.sq(t2, t2);
- Fe.mul(t1, t2, t1);
-
- Fe.sq(t1, t1);
- i = 1;
- while (i < 5) : (i += 1) Fe.sq(t1, t1);
- Fe.mul(out, t1, t0);
-
- t0.secureZero();
- t1.secureZero();
- t2.secureZero();
- t3.secureZero();
- }
-
- // This could be simplified, but it would be slower
- fn pow22523(out: *Fe, z: *const Fe) void {
- var i: usize = undefined;
-
- var t: [3]Fe = undefined;
- var t0 = &t[0];
- var t1 = &t[1];
- var t2 = &t[2];
-
- Fe.sq(t0, z);
- Fe.sq(t1, t0);
- Fe.sq(t1, t1);
- Fe.mul(t1, z, t1);
- Fe.mul(t0, t0, t1);
-
- Fe.sq(t0, t0);
- Fe.mul(t0, t1, t0);
-
- Fe.sq(t1, t0);
- i = 1;
- while (i < 5) : (i += 1) Fe.sq(t1, t1);
- Fe.mul(t0, t1, t0);
-
- Fe.sq(t1, t0);
- i = 1;
- while (i < 10) : (i += 1) Fe.sq(t1, t1);
- Fe.mul(t1, t1, t0);
-
- Fe.sq(t2, t1);
- i = 1;
- while (i < 20) : (i += 1) Fe.sq(t2, t2);
- Fe.mul(t1, t2, t1);
-
- Fe.sq(t1, t1);
- i = 1;
- while (i < 10) : (i += 1) Fe.sq(t1, t1);
- Fe.mul(t0, t1, t0);
-
- Fe.sq(t1, t0);
- i = 1;
- while (i < 50) : (i += 1) Fe.sq(t1, t1);
- Fe.mul(t1, t1, t0);
-
- Fe.sq(t2, t1);
- i = 1;
- while (i < 100) : (i += 1) Fe.sq(t2, t2);
- Fe.mul(t1, t2, t1);
-
- Fe.sq(t1, t1);
- i = 1;
- while (i < 50) : (i += 1) Fe.sq(t1, t1);
- Fe.mul(t0, t1, t0);
-
- Fe.sq(t0, t0);
- i = 1;
- while (i < 2) : (i += 1) Fe.sq(t0, t0);
- Fe.mul(out, t0, z);
-
- t0.secureZero();
- t1.secureZero();
- t2.secureZero();
- }
-
- inline fn toBytesRound(c: []i64, t: []i64, comptime i: comptime_int, comptime shift: comptime_int) void {
- c[i] = t[i] >> shift;
- if (i + 1 < 10) {
- t[i + 1] += c[i];
- }
- t[i] -= c[i] * (@as(i32, 1) << shift);
- }
-
- fn toBytes(s: []u8, h: *const Fe) void {
- std.debug.assert(s.len >= 32);
-
- var t: [10]i64 = undefined;
- for (h.b[0..]) |_, i| {
- t[i] = h.b[i];
- }
-
- var q = (19 * t[9] + ((@as(i32, 1) << 24))) >> 25;
- {
- var i: usize = 0;
- while (i < 5) : (i += 1) {
- q += t[2 * i];
- q >>= 26;
- q += t[2 * i + 1];
- q >>= 25;
- }
- }
- t[0] += 19 * q;
-
- var c: [10]i64 = undefined;
-
- var st = t[0..];
- var sc = c[0..];
-
- toBytesRound(sc, st, 0, 26);
- toBytesRound(sc, st, 1, 25);
- toBytesRound(sc, st, 2, 26);
- toBytesRound(sc, st, 3, 25);
- toBytesRound(sc, st, 4, 26);
- toBytesRound(sc, st, 5, 25);
- toBytesRound(sc, st, 6, 26);
- toBytesRound(sc, st, 7, 25);
- toBytesRound(sc, st, 8, 26);
- toBytesRound(sc, st, 9, 25);
-
- var ut: [10]u32 = undefined;
- for (ut[0..]) |_, i| {
- ut[i] = @bitCast(u32, @intCast(i32, t[i]));
- }
-
- writeIntLittle(u32, s[0..4], (ut[0] >> 0) | (ut[1] << 26));
- writeIntLittle(u32, s[4..8], (ut[1] >> 6) | (ut[2] << 19));
- writeIntLittle(u32, s[8..12], (ut[2] >> 13) | (ut[3] << 13));
- writeIntLittle(u32, s[12..16], (ut[3] >> 19) | (ut[4] << 6));
- writeIntLittle(u32, s[16..20], (ut[5] >> 0) | (ut[6] << 25));
- writeIntLittle(u32, s[20..24], (ut[6] >> 7) | (ut[7] << 19));
- writeIntLittle(u32, s[24..28], (ut[7] >> 13) | (ut[8] << 12));
- writeIntLittle(u32, s[28..32], (ut[8] >> 20) | (ut[9] << 6));
-
- std.mem.secureZero(i64, t[0..]);
- }
-
- // Parity check. Returns 0 if even, 1 if odd
- fn isNegative(f: *const Fe) bool {
- var s: [32]u8 = undefined;
- Fe.toBytes(s[0..], f);
- const isneg = s[0] & 1;
- s.secureZero();
- return isneg;
- }
-
- fn isNonZero(f: *const Fe) bool {
- var s: [32]u8 = undefined;
- Fe.toBytes(s[0..], f);
- const isnonzero = zerocmp(u8, s[0..]);
- s.secureZero();
- return isneg;
- }
-};
-
-test "x25519 public key calculation from secret key" {
- var sk: [32]u8 = undefined;
- var pk_expected: [32]u8 = undefined;
- var pk_calculated: [32]u8 = undefined;
- try fmt.hexToBytes(sk[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
- try fmt.hexToBytes(pk_expected[0..], "f1814f0e8ff1043d8a44d25babff3cedcae6c22c3edaa48f857ae70de2baae50");
- std.testing.expect(X25519.createPublicKey(pk_calculated[0..], &sk));
- std.testing.expect(std.mem.eql(u8, &pk_calculated, &pk_expected));
-}
-
-test "x25519 rfc7748 vector1" {
- const secret_key = "\xa5\x46\xe3\x6b\xf0\x52\x7c\x9d\x3b\x16\x15\x4b\x82\x46\x5e\xdd\x62\x14\x4c\x0a\xc1\xfc\x5a\x18\x50\x6a\x22\x44\xba\x44\x9a\xc4";
- const public_key = "\xe6\xdb\x68\x67\x58\x30\x30\xdb\x35\x94\xc1\xa4\x24\xb1\x5f\x7c\x72\x66\x24\xec\x26\xb3\x35\x3b\x10\xa9\x03\xa6\xd0\xab\x1c\x4c";
-
- const expected_output = "\xc3\xda\x55\x37\x9d\xe9\xc6\x90\x8e\x94\xea\x4d\xf2\x8d\x08\x4f\x32\xec\xcf\x03\x49\x1c\x71\xf7\x54\xb4\x07\x55\x77\xa2\x85\x52";
-
- var output: [32]u8 = undefined;
-
- std.testing.expect(X25519.create(output[0..], secret_key, public_key));
- std.testing.expect(std.mem.eql(u8, &output, expected_output));
-}
-
-test "x25519 rfc7748 vector2" {
- const secret_key = "\x4b\x66\xe9\xd4\xd1\xb4\x67\x3c\x5a\xd2\x26\x91\x95\x7d\x6a\xf5\xc1\x1b\x64\x21\xe0\xea\x01\xd4\x2c\xa4\x16\x9e\x79\x18\xba\x0d";
- const public_key = "\xe5\x21\x0f\x12\x78\x68\x11\xd3\xf4\xb7\x95\x9d\x05\x38\xae\x2c\x31\xdb\xe7\x10\x6f\xc0\x3c\x3e\xfc\x4c\xd5\x49\xc7\x15\xa4\x93";
-
- const expected_output = "\x95\xcb\xde\x94\x76\xe8\x90\x7d\x7a\xad\xe4\x5c\xb4\xb8\x73\xf8\x8b\x59\x5a\x68\x79\x9f\xa1\x52\xe6\xf8\xf7\x64\x7a\xac\x79\x57";
-
- var output: [32]u8 = undefined;
-
- std.testing.expect(X25519.create(output[0..], secret_key, public_key));
- std.testing.expect(std.mem.eql(u8, &output, expected_output));
-}
-
-test "x25519 rfc7748 one iteration" {
- const initial_value = "\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00".*;
- const expected_output = "\x42\x2c\x8e\x7a\x62\x27\xd7\xbc\xa1\x35\x0b\x3e\x2b\xb7\x27\x9f\x78\x97\xb8\x7b\xb6\x85\x4b\x78\x3c\x60\xe8\x03\x11\xae\x30\x79";
-
- var k: [32]u8 = initial_value;
- var u: [32]u8 = initial_value;
-
- var i: usize = 0;
- while (i < 1) : (i += 1) {
- var output: [32]u8 = undefined;
- std.testing.expect(X25519.create(output[0..], &k, &u));
-
- std.mem.copy(u8, u[0..], k[0..]);
- std.mem.copy(u8, k[0..], output[0..]);
- }
-
- std.testing.expect(std.mem.eql(u8, k[0..], expected_output));
-}
-
-test "x25519 rfc7748 1,000 iterations" {
- // These iteration tests are slow so we always skip them. Results have been verified.
- if (true) {
- return error.SkipZigTest;
- }
-
- const initial_value = "\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
- const expected_output = "\x68\x4c\xf5\x9b\xa8\x33\x09\x55\x28\x00\xef\x56\x6f\x2f\x4d\x3c\x1c\x38\x87\xc4\x93\x60\xe3\x87\x5f\x2e\xb9\x4d\x99\x53\x2c\x51";
-
- var k: [32]u8 = initial_value.*;
- var u: [32]u8 = initial_value.*;
-
- var i: usize = 0;
- while (i < 1000) : (i += 1) {
- var output: [32]u8 = undefined;
- std.testing.expect(X25519.create(output[0..], &k, &u));
-
- std.mem.copy(u8, u[0..], k[0..]);
- std.mem.copy(u8, k[0..], output[0..]);
- }
-
- std.testing.expect(std.mem.eql(u8, k[0..], expected_output));
-}
-
-test "x25519 rfc7748 1,000,000 iterations" {
- if (true) {
- return error.SkipZigTest;
- }
-
- const initial_value = "\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
- const expected_output = "\x7c\x39\x11\xe0\xab\x25\x86\xfd\x86\x44\x97\x29\x7e\x57\x5e\x6f\x3b\xc6\x01\xc0\x88\x3c\x30\xdf\x5f\x4d\xd2\xd2\x4f\x66\x54\x24";
-
- var k: [32]u8 = initial_value.*;
- var u: [32]u8 = initial_value.*;
-
- var i: usize = 0;
- while (i < 1000000) : (i += 1) {
- var output: [32]u8 = undefined;
- std.testing.expect(X25519.create(output[0..], &k, &u));
-
- std.mem.copy(u8, u[0..], k[0..]);
- std.mem.copy(u8, k[0..], output[0..]);
- }
-
- std.testing.expect(std.mem.eql(u8, k[0..], expected_output));
-}
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 1ba64f47fa..dc26ed3d33 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -334,6 +334,31 @@ test "mem.secureZero" {
testing.expectEqualSlices(u8, a[0..], b[0..]);
}
+/// Constant-time (for a given length) comparison.
+pub fn timingSafeEqual(comptime T: type, a: []const T, b: []const T) bool {
+ const length = a.len;
+ if (length != b.len) {
+ return false;
+ }
+ const ap = @ptrCast([*]const volatile T, a.ptr);
+ const bp = @ptrCast([*]const volatile T, b.ptr);
+ var c: u8 = 0;
+ var i: usize = 0;
+ while (i < length) : (i += 1) {
+ c |= a[i] ^ b[i];
+ }
+ return c == 0;
+}
+
+test "mem.timingSafeEqual" {
+ var a = [_]u8{0xfe} ** 8;
+ var b = [_]u8{0xfe} ** 8;
+
+ testing.expect(timingSafeEqual(u8, &a, &b));
+ a[0] += 1;
+ testing.expect(!timingSafeEqual(u8, &a, &b));
+}
+
/// Initializes all fields of the struct with their default value, or zero values if no default value is present.
/// If the field is present in the provided initial values, it will have that value instead.
/// Structs are initialized recursively.
From 5f9953f41ff7761cdf86c211c91de7470425771c Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Fri, 14 Aug 2020 16:08:26 +0200
Subject: [PATCH 097/153] Remove mem.timingSafeEqual() for now
This requires assembly implementations, and is not needed for
signature verification.
Thanks @daurnimator
---
lib/std/crypto/25519/ed25519.zig | 2 +-
lib/std/mem.zig | 25 -------------------------
2 files changed, 1 insertion(+), 26 deletions(-)
diff --git a/lib/std/crypto/25519/ed25519.zig b/lib/std/crypto/25519/ed25519.zig
index c5baf37683..f174fd8581 100644
--- a/lib/std/crypto/25519/ed25519.zig
+++ b/lib/std/crypto/25519/ed25519.zig
@@ -96,7 +96,7 @@ pub const Ed25519 = struct {
const p = try a.neg().mul(hram);
const check = (try Curve.basePoint().mul(s.*)).add(p).toBytes();
- if (mem.timingSafeEqual(u8, &check, r) == false) {
+ if (mem.eql(u8, &check, r) == false) {
return error.InvalidSignature;
}
}
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index dc26ed3d33..1ba64f47fa 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -334,31 +334,6 @@ test "mem.secureZero" {
testing.expectEqualSlices(u8, a[0..], b[0..]);
}
-/// Constant-time (for a given length) comparison.
-pub fn timingSafeEqual(comptime T: type, a: []const T, b: []const T) bool {
- const length = a.len;
- if (length != b.len) {
- return false;
- }
- const ap = @ptrCast([*]const volatile T, a.ptr);
- const bp = @ptrCast([*]const volatile T, b.ptr);
- var c: u8 = 0;
- var i: usize = 0;
- while (i < length) : (i += 1) {
- c |= a[i] ^ b[i];
- }
- return c == 0;
-}
-
-test "mem.timingSafeEqual" {
- var a = [_]u8{0xfe} ** 8;
- var b = [_]u8{0xfe} ** 8;
-
- testing.expect(timingSafeEqual(u8, &a, &b));
- a[0] += 1;
- testing.expect(!timingSafeEqual(u8, &a, &b));
-}
-
/// Initializes all fields of the struct with their default value, or zero values if no default value is present.
/// If the field is present in the provided initial values, it will have that value instead.
/// Structs are initialized recursively.
From 6af9bc8c686c5eeaff274fa7ebb96b1ead3212ce Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Fri, 14 Aug 2020 16:33:37 +0200
Subject: [PATCH 098/153] Initialize structures directly
Suggested by @kubkon, thanks!
---
lib/std/crypto/25519/curve25519.zig | 2 +-
lib/std/crypto/25519/edwards25519.zig | 2 +-
lib/std/crypto/25519/ristretto255.zig | 16 +++++-----------
3 files changed, 7 insertions(+), 13 deletions(-)
diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig
index a17d9baa7b..011c926f64 100644
--- a/lib/std/crypto/25519/curve25519.zig
+++ b/lib/std/crypto/25519/curve25519.zig
@@ -76,7 +76,7 @@ pub const Curve25519 = struct {
if (x2.isZero()) {
return error.IdentityElement;
}
- return @as(Curve25519, .{ .x = x2 });
+ return Curve25519 { .x = x2 };
}
/// Multiply a Curve25519 point by a scalar after "clamping" it.
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index 7e748609de..c28ed6865d 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -34,7 +34,7 @@ pub const Edwards25519 = struct {
x.cMov(x.mul(Fe.sqrtm1()), 1 - @boolToInt(has_m_root));
x.cMov(x.neg(), @boolToInt(x.isNegative()) ^ (s[31] >> 7));
const t = x.mul(y);
- return @as(Edwards25519, .{ .x = x, .y = y, .z = z, .t = t });
+ return Edwards25519 { .x = x, .y = y, .z = z, .t = t };
}
/// Encode an Edwards25519 point.
diff --git a/lib/std/crypto/25519/ristretto255.zig b/lib/std/crypto/25519/ristretto255.zig
index 71fa876c4f..f573145385 100644
--- a/lib/std/crypto/25519/ristretto255.zig
+++ b/lib/std/crypto/25519/ristretto255.zig
@@ -13,9 +13,8 @@ pub const Ristretto255 = struct {
p: Curve = undefined,
fn sqrtRatioM1(u: Fe, v: Fe) !Fe {
- const v3 = v.sq().mul(v); // v3 = v^3
- var x = v3.sq().mul(u).mul(v). // x = uv^7
- pow2523().mul(v3).mul(u); // x = uv^3(uv^7)^((q-5)/8)
+ const v3 = v.sq().mul(v); // v^3
+ var x = v3.sq().mul(u).mul(v).pow2523().mul(v3).mul(u); // uv^3(uv^7)^((q-5)/8)
const vxx = x.sq().mul(v); // vx^2
const m_root_check = vxx.sub(u); // vx^2-u
const p_root_check = vxx.add(u); // vx^2+u
@@ -77,7 +76,7 @@ pub const Ristretto255 = struct {
.z = Fe.one(),
.t = t,
};
- return @as(Ristretto255, .{ .p = p });
+ return Ristretto255 { .p = p };
}
/// Encode to a Ristretto255 representative.
@@ -87,25 +86,20 @@ pub const Ristretto255 = struct {
const zmy = p.z.sub(p.y); // Z-Y
u1_ = u1_.mul(zmy); // (Z+Y)*(Z-Y)
const u2_ = p.x.mul(p.y); // X*Y
-
const u1_u2u2 = u2_.sq().mul(u1_); // u1*u2^2
-
const inv_sqrt = sqrtRatioM1(Fe.one(), u1_u2u2) catch unreachable;
const den1 = inv_sqrt.mul(u1_);
const den2 = inv_sqrt.mul(u2_);
const z_inv = den1.mul(den2).mul(p.t); // den1*den2*T
-
const ix = p.x.mul(Fe.sqrtm1()); // X*sqrt(-1)
const iy = p.y.mul(Fe.sqrtm1()); // Y*sqrt(-1)
const eden = den1.mul(Fe.edwards25519sqrtamd()); // den1/sqrt(a-d)
-
const t_z_inv = p.t.mul(z_inv); // T*z_inv
- const rotate = @boolToInt(t_z_inv.isNegative());
+ const rotate = @boolToInt(t_z_inv.isNegative());
var x = p.x;
var y = p.y;
var den_inv = den2;
-
x.cMov(iy, rotate);
y.cMov(ix, rotate);
den_inv.cMov(eden, rotate);
@@ -131,7 +125,7 @@ pub const Ristretto255 = struct {
/// Return error.WeakPublicKey if the resulting element is
/// the identity element.
pub inline fn mul(p: Ristretto255, s: [32]u8) !Ristretto255 {
- return @as(Ristretto255, .{ .p = try p.p.mul(s) });
+ return Ristretto255 { .p = try p.p.mul(s) };
}
};
From 739b68938cb233d88e47ab73047787aba0ccb918 Mon Sep 17 00:00:00 2001
From: Frank Denis <124872+jedisct1@users.noreply.github.com>
Date: Fri, 14 Aug 2020 16:23:55 +0200
Subject: [PATCH 099/153] Update lib/std/crypto/25519/field25519.zig
Co-authored-by: Jakub Konka
---
lib/std/crypto/25519/field25519.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/crypto/25519/field25519.zig b/lib/std/crypto/25519/field25519.zig
index 9061cbf011..fe5c28056a 100644
--- a/lib/std/crypto/25519/field25519.zig
+++ b/lib/std/crypto/25519/field25519.zig
@@ -3,7 +3,7 @@ const readIntLittle = std.mem.readIntLittle;
const writeIntLittle = std.mem.writeIntLittle;
pub const Fe = struct {
- limbs: [5]u64 = undefined,
+ limbs: [5]u64,
const MASK51: u64 = 0x7ffffffffffff;
From c483bf4f97504f3c9174ec9fd516d8995971023c Mon Sep 17 00:00:00 2001
From: Frank Denis <124872+jedisct1@users.noreply.github.com>
Date: Fri, 14 Aug 2020 16:24:18 +0200
Subject: [PATCH 100/153] Update lib/std/crypto/25519/ristretto255.zig
Co-authored-by: Jakub Konka
---
lib/std/crypto/25519/ristretto255.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/crypto/25519/ristretto255.zig b/lib/std/crypto/25519/ristretto255.zig
index f573145385..997b3085c9 100644
--- a/lib/std/crypto/25519/ristretto255.zig
+++ b/lib/std/crypto/25519/ristretto255.zig
@@ -10,7 +10,7 @@ pub const Ristretto255 = struct {
/// Field arithmetic mod the order of the main subgroup.
pub const scalar = Curve.scalar;
- p: Curve = undefined,
+ p: Curve,
fn sqrtRatioM1(u: Fe, v: Fe) !Fe {
const v3 = v.sq().mul(v); // v^3
From dd8f7b396c24d78168d9582364ab5b3b4606ccc5 Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Fri, 14 Aug 2020 16:45:03 +0200
Subject: [PATCH 101/153] Rename the field and scalar modules
Suggested by @kubkon
---
lib/std/crypto.zig | 4 ++--
lib/std/crypto/25519/curve25519.zig | 4 ++--
lib/std/crypto/25519/edwards25519.zig | 4 ++--
lib/std/crypto/25519/{field25519.zig => field.zig} | 0
lib/std/crypto/25519/{scalar25519.zig => scalar.zig} | 2 +-
5 files changed, 7 insertions(+), 7 deletions(-)
rename lib/std/crypto/25519/{field25519.zig => field.zig} (100%)
rename lib/std/crypto/25519/{scalar25519.zig => scalar.zig} (99%)
diff --git a/lib/std/crypto.zig b/lib/std/crypto.zig
index d15ec2cff2..9fbe70f815 100644
--- a/lib/std/crypto.zig
+++ b/lib/std/crypto.zig
@@ -63,8 +63,8 @@ test "crypto" {
_ = @import("crypto/25519/curve25519.zig");
_ = @import("crypto/25519/ed25519.zig");
_ = @import("crypto/25519/edwards25519.zig");
- _ = @import("crypto/25519/field25519.zig");
- _ = @import("crypto/25519/scalar25519.zig");
+ _ = @import("crypto/25519/field.zig");
+ _ = @import("crypto/25519/scalar.zig");
_ = @import("crypto/25519/x25519.zig");
_ = @import("crypto/25519/ristretto255.zig");
}
diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig
index 011c926f64..4fb569ccfb 100644
--- a/lib/std/crypto/25519/curve25519.zig
+++ b/lib/std/crypto/25519/curve25519.zig
@@ -3,9 +3,9 @@ const std = @import("std");
/// Group operations over Curve25519.
pub const Curve25519 = struct {
/// The underlying prime field.
- pub const Fe = @import("field25519.zig").Fe;
+ pub const Fe = @import("field.zig").Fe;
/// Field arithmetic mod the order of the main subgroup.
- pub const scalar = @import("scalar25519.zig");
+ pub const scalar = @import("scalar.zig");
x: Fe,
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index c28ed6865d..3d21af40e5 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -4,9 +4,9 @@ const fmt = std.fmt;
/// Group operations over Edwards25519.
pub const Edwards25519 = struct {
/// The underlying prime field.
- pub const Fe = @import("field25519.zig").Fe;
+ pub const Fe = @import("field.zig").Fe;
/// Field arithmetic mod the order of the main subgroup.
- pub const scalar = @import("scalar25519.zig");
+ pub const scalar = @import("scalar.zig");
x: Fe,
y: Fe,
diff --git a/lib/std/crypto/25519/field25519.zig b/lib/std/crypto/25519/field.zig
similarity index 100%
rename from lib/std/crypto/25519/field25519.zig
rename to lib/std/crypto/25519/field.zig
diff --git a/lib/std/crypto/25519/scalar25519.zig b/lib/std/crypto/25519/scalar.zig
similarity index 99%
rename from lib/std/crypto/25519/scalar25519.zig
rename to lib/std/crypto/25519/scalar.zig
index a94e6531dc..6971b43489 100644
--- a/lib/std/crypto/25519/scalar25519.zig
+++ b/lib/std/crypto/25519/scalar.zig
@@ -174,7 +174,7 @@ test "non-canonical scalar25519" {
std.testing.expectError(error.NonCanonical, rejectNonCanonical(too_targe));
}
-test "mulAdd overflow check" {
+test "scalar25519 mulAdd overflow check" {
const a: [32]u8 = [_]u8{0xff} ** 32;
const b: [32]u8 = [_]u8{0xff} ** 32;
const c: [32]u8 = [_]u8{0xff} ** 32;
From ed558bfbaa737b187d894eddb8573cde15a3fb33 Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Sat, 15 Aug 2020 08:38:44 +0200
Subject: [PATCH 102/153] Address @daurnimator feedback
---
lib/std/crypto/25519/curve25519.zig | 19 +++++++------
lib/std/crypto/25519/ed25519.zig | 8 +++---
lib/std/crypto/25519/edwards25519.zig | 19 +++++++------
lib/std/crypto/25519/field.zig | 33 +++++++----------------
lib/std/crypto/25519/ristretto255.zig | 33 +++++++++++------------
lib/std/crypto/25519/scalar.zig | 39 +++++++++++----------------
lib/std/crypto/25519/x25519.zig | 34 +++++++++++------------
7 files changed, 79 insertions(+), 106 deletions(-)
diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig
index 4fb569ccfb..8b8f8a5586 100644
--- a/lib/std/crypto/25519/curve25519.zig
+++ b/lib/std/crypto/25519/curve25519.zig
@@ -21,7 +21,7 @@ pub const Curve25519 = struct {
/// Return the Curve25519 base point.
pub inline fn basePoint() Curve25519 {
- return .{ .x = Fe.curve25519BasePoint() };
+ return .{ .x = Fe.curve25519BasePoint };
}
/// Check that the encoding of a Curve25519 point is canonical.
@@ -38,10 +38,10 @@ pub const Curve25519 = struct {
fn ladder(p: Curve25519, s: [32]u8, comptime bits: usize) !Curve25519 {
var x1 = p.x;
- var x2 = Fe.one();
- var z2 = Fe.zero();
+ var x2 = Fe.one;
+ var z2 = Fe.zero;
var x3 = x1;
- var z3 = Fe.one();
+ var z3 = Fe.one;
var swap: u8 = 0;
var pos: usize = bits - 1;
while (true) {
@@ -76,7 +76,7 @@ pub const Curve25519 = struct {
if (x2.isZero()) {
return error.IdentityElement;
}
- return Curve25519 { .x = x2 };
+ return Curve25519{ .x = x2 };
}
/// Multiply a Curve25519 point by a scalar after "clamping" it.
@@ -88,7 +88,7 @@ pub const Curve25519 = struct {
pub fn clampedMul(p: Curve25519, s: [32]u8) !Curve25519 {
var t: [32]u8 = s;
scalar.clamp(&t);
- return ladder(p, t, 255);
+ return try ladder(p, t, 255);
}
/// Multiply a Curve25519 point by a scalar without clamping it.
@@ -98,7 +98,7 @@ pub const Curve25519 = struct {
pub fn mul(p: Curve25519, s: [32]u8) !Curve25519 {
const cofactor = [_]u8{8} ++ [_]u8{0} ** 31;
_ = ladder(p, cofactor, 4) catch |_| return error.WeakPublicKey;
- return ladder(p, s, 256);
+ return try ladder(p, s, 256);
}
};
@@ -107,10 +107,9 @@ test "curve25519" {
const p = try Curve25519.basePoint().clampedMul(s);
try p.rejectIdentity();
var buf: [128]u8 = undefined;
- const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
- std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{p.toBytes()}), "E6F2A4D1C28EE5C7AD0329268255A468AD407D2672824C0C0EB30EA6EF450145");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{p.toBytes()}), "E6F2A4D1C28EE5C7AD0329268255A468AD407D2672824C0C0EB30EA6EF450145");
const q = try p.clampedMul(s);
- std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{q.toBytes()}), "3614E119FFE55EC55B87D6B19971A9F4CBC78EFE80BEC55B96392BABCC712537");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{q.toBytes()}), "3614E119FFE55EC55B87D6B19971A9F4CBC78EFE80BEC55B96392BABCC712537");
try Curve25519.rejectNonCanonical(s);
s[31] |= 0x80;
diff --git a/lib/std/crypto/25519/ed25519.zig b/lib/std/crypto/25519/ed25519.zig
index f174fd8581..669bb94480 100644
--- a/lib/std/crypto/25519/ed25519.zig
+++ b/lib/std/crypto/25519/ed25519.zig
@@ -107,11 +107,10 @@ test "ed25519 key pair creation" {
try fmt.hexToBytes(seed[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
const key_pair = try Ed25519.createKeyPair(seed);
var buf: [256]u8 = undefined;
- const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
- std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{key_pair}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{key_pair}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
const public_key = Ed25519.publicKey(key_pair);
- std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{public_key}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{public_key}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
}
test "ed25519 signature" {
@@ -121,8 +120,7 @@ test "ed25519 signature" {
const sig = try Ed25519.sign("test", key_pair, null);
var buf: [128]u8 = undefined;
- const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
- std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{sig}), "10A442B4A80CC4225B154F43BEF28D2472CA80221951262EB8E0DF9091575E2687CC486E77263C3418C757522D54F84B0359236ABBBD4ACD20DC297FDCA66808");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{sig}), "10A442B4A80CC4225B154F43BEF28D2472CA80221951262EB8E0DF9091575E2687CC486E77263C3418C757522D54F84B0359236ABBBD4ACD20DC297FDCA66808");
const public_key = Ed25519.publicKey(key_pair);
try Ed25519.verify(sig, "test", public_key);
std.testing.expectError(error.InvalidSignature, Ed25519.verify(sig, "TEST", public_key));
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index 3d21af40e5..3f2ede511a 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -17,10 +17,10 @@ pub const Edwards25519 = struct {
/// Decode an Edwards25519 point from its compressed (Y+sign) coordinates.
pub fn fromBytes(s: [32]u8) !Edwards25519 {
- const z = Fe.one();
+ const z = Fe.one;
const y = Fe.fromBytes(s);
var u = y.sq();
- var v = u.mul(Fe.edwards25519d());
+ var v = u.mul(Fe.edwards25519d);
u = u.sub(z);
v = v.add(z);
const v3 = v.sq().mul(v);
@@ -31,10 +31,10 @@ pub const Edwards25519 = struct {
if ((@boolToInt(has_m_root) | @boolToInt(has_p_root)) == 0) {
return error.InvalidEncoding;
}
- x.cMov(x.mul(Fe.sqrtm1()), 1 - @boolToInt(has_m_root));
+ x.cMov(x.mul(Fe.sqrtm1), 1 - @boolToInt(has_m_root));
x.cMov(x.neg(), @boolToInt(x.isNegative()) ^ (s[31] >> 7));
const t = x.mul(y);
- return Edwards25519 { .x = x, .y = y, .z = z, .t = t };
+ return Edwards25519{ .x = x, .y = y, .z = z, .t = t };
}
/// Encode an Edwards25519 point.
@@ -55,14 +55,14 @@ pub const Edwards25519 = struct {
return .{
.x = Fe{ .limbs = .{ 3990542415680775, 3398198340507945, 4322667446711068, 2814063955482877, 2839572215813860 } },
.y = Fe{ .limbs = .{ 1801439850948184, 1351079888211148, 450359962737049, 900719925474099, 1801439850948198 } },
- .z = Fe.one(),
+ .z = Fe.one,
.t = Fe{ .limbs = .{ 1841354044333475, 16398895984059, 755974180946558, 900171276175154, 1821297809914039 } },
.is_base = true,
};
}
inline fn identityElement() Edwards25519 {
- return .{ .x = Fe.zero(), .y = Fe.one(), .z = Fe.one(), .t = Fe.zero() };
+ return .{ .x = Fe.zero, .y = Fe.one, .z = Fe.one, .t = Fe.zero };
}
/// Reject the neutral element.
@@ -98,7 +98,7 @@ pub const Edwards25519 = struct {
pub inline fn add(p: Edwards25519, q: Edwards25519) Edwards25519 {
const a = p.y.sub(p.x).mul(q.y.sub(q.x));
const b = p.x.add(p.y).mul(q.x.add(q.y));
- const c = p.t.mul(q.t).mul(Fe.edwards25519d2());
+ const c = p.t.mul(q.t).mul(Fe.edwards25519d2);
var d = p.z.mul(q.z);
d = d.add(d);
const x = b.sub(a);
@@ -124,7 +124,7 @@ pub const Edwards25519 = struct {
var t = Edwards25519.identityElement();
comptime var i: u8 = 0;
inline while (i < 16) : (i += 1) {
- t.cMov(pc[i], ((@intCast(usize, (b ^ i)) -% 1) >> 8) & 1);
+ t.cMov(pc[i], ((@as(usize, (b ^ i)) -% 1) >> 8) & 1);
}
return t;
}
@@ -191,8 +191,7 @@ test "edwards25519 packing/unpacking" {
var b = Edwards25519.basePoint();
const pk = try b.mul(s);
var buf: [128]u8 = undefined;
- const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
- std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{pk.toBytes()}), "074BC7E0FCBD587FDBC0969444245FADC562809C8F6E97E949AF62484B5B81A6");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{pk.toBytes()}), "074BC7E0FCBD587FDBC0969444245FADC562809C8F6E97E949AF62484B5B81A6");
const small_order_ss: [7][32]u8 = .{
.{
diff --git a/lib/std/crypto/25519/field.zig b/lib/std/crypto/25519/field.zig
index fe5c28056a..59aa1e3ba9 100644
--- a/lib/std/crypto/25519/field.zig
+++ b/lib/std/crypto/25519/field.zig
@@ -7,34 +7,19 @@ pub const Fe = struct {
const MASK51: u64 = 0x7ffffffffffff;
- pub inline fn zero() Fe {
- return .{ .limbs = .{ 0, 0, 0, 0, 0 } };
- }
+ pub const zero = Fe{ .limbs = .{ 0, 0, 0, 0, 0 } };
- pub inline fn one() Fe {
- return .{ .limbs = .{ 1, 0, 0, 0, 0 } };
- }
+ pub const one = Fe{ .limbs = .{ 1, 0, 0, 0, 0 } };
- pub inline fn sqrtm1() Fe {
- return .{ .limbs = .{ 1718705420411056, 234908883556509, 2233514472574048, 2117202627021982, 765476049583133 } }; // sqrt(-1)
- }
+ pub const sqrtm1 = Fe{ .limbs = .{ 1718705420411056, 234908883556509, 2233514472574048, 2117202627021982, 765476049583133 } }; // sqrt(-1)
- pub inline fn curve25519BasePoint() Fe {
- return .{ .limbs = .{ 9, 0, 0, 0, 0 } };
- }
+ pub const curve25519BasePoint = Fe{ .limbs = .{ 9, 0, 0, 0, 0 } };
- pub inline fn edwards25519d() Fe {
- return .{ .limbs = .{ 929955233495203, 466365720129213, 1662059464998953, 2033849074728123, 1442794654840575 } }; // 37095705934669439343138083508754565189542113879843219016388785533085940283555
- }
+ pub const edwards25519d = Fe{ .limbs = .{ 929955233495203, 466365720129213, 1662059464998953, 2033849074728123, 1442794654840575 } }; // 37095705934669439343138083508754565189542113879843219016388785533085940283555
- pub inline fn edwards25519d2() Fe {
- return .{ .limbs = .{ 1859910466990425, 932731440258426, 1072319116312658, 1815898335770999, 633789495995903 } }; // 2d
- }
+ pub const edwards25519d2 = Fe{ .limbs = .{ 1859910466990425, 932731440258426, 1072319116312658, 1815898335770999, 633789495995903 } }; // 2d
- // 1/sqrt(a-d)
- pub inline fn edwards25519sqrtamd() Fe {
- return .{ .limbs = .{ 278908739862762, 821645201101625, 8113234426968, 1777959178193151, 2118520810568447 } };
- }
+ pub const edwards25519sqrtamd = Fe{ .limbs = .{ 278908739862762, 821645201101625, 8113234426968, 1777959178193151, 2118520810568447 } }; // 1/sqrt(a-d)
pub inline fn isZero(fe: Fe) bool {
var reduced = fe;
@@ -77,7 +62,7 @@ pub const Fe = struct {
c |= s[i] ^ 0xff;
}
c = (c -% 1) >> 8;
- const d = (@intCast(u16, 0xed - 1) -% @intCast(u16, s[0])) >> 8;
+ const d = (@as(u16, 0xed - 1) -% @as(u16, s[0])) >> 8;
const x = if (ignore_extra_bit) 0 else s[31] >> 7;
if ((((c & d) | x) & 1) != 0) {
return error.NonCanonical;
@@ -148,7 +133,7 @@ pub const Fe = struct {
}
pub inline fn neg(a: Fe) Fe {
- return zero().sub(a);
+ return zero.sub(a);
}
pub inline fn isNegative(a: Fe) bool {
diff --git a/lib/std/crypto/25519/ristretto255.zig b/lib/std/crypto/25519/ristretto255.zig
index 997b3085c9..0bb8e1c92a 100644
--- a/lib/std/crypto/25519/ristretto255.zig
+++ b/lib/std/crypto/25519/ristretto255.zig
@@ -18,11 +18,11 @@ pub const Ristretto255 = struct {
const vxx = x.sq().mul(v); // vx^2
const m_root_check = vxx.sub(u); // vx^2-u
const p_root_check = vxx.add(u); // vx^2+u
- const f_root_check = u.mul(Fe.sqrtm1()).add(vxx); // vx^2+u*sqrt(-1)
+ const f_root_check = u.mul(Fe.sqrtm1).add(vxx); // vx^2+u*sqrt(-1)
const has_m_root = m_root_check.isZero();
const has_p_root = p_root_check.isZero();
const has_f_root = f_root_check.isZero();
- const x_sqrtm1 = x.mul(Fe.sqrtm1()); // x*sqrt(-1)
+ const x_sqrtm1 = x.mul(Fe.sqrtm1); // x*sqrt(-1)
x.cMov(x_sqrtm1, @boolToInt(has_p_root) | @boolToInt(has_f_root));
x = x.abs();
if ((@boolToInt(has_m_root) | @boolToInt(has_p_root)) == 0) {
@@ -53,13 +53,13 @@ pub const Ristretto255 = struct {
try rejectNonCanonical(s);
const s_ = Fe.fromBytes(s);
const ss = s_.sq(); // s^2
- const u1_ = Fe.one().sub(ss); // (1-s^2)
+ const u1_ = Fe.one.sub(ss); // (1-s^2)
const u1u1 = u1_.sq(); // (1-s^2)^2
- const u2_ = Fe.one().add(ss); // (1+s^2)
+ const u2_ = Fe.one.add(ss); // (1+s^2)
const u2u2 = u2_.sq(); // (1+s^2)^2
- const v = Fe.edwards25519d().mul(u1u1).neg().sub(u2u2); // -(d*u1^2)-u2^2
+ const v = Fe.edwards25519d.mul(u1u1).neg().sub(u2u2); // -(d*u1^2)-u2^2
const v_u2u2 = v.mul(u2u2); // v*u2^2
- const inv_sqrt = sqrtRatioM1(Fe.one(), v_u2u2) catch |e| {
+ const inv_sqrt = sqrtRatioM1(Fe.one, v_u2u2) catch |e| {
return error.InvalidEncoding;
};
var x = inv_sqrt.mul(u2_);
@@ -73,10 +73,10 @@ pub const Ristretto255 = struct {
const p: Curve = .{
.x = x,
.y = y,
- .z = Fe.one(),
+ .z = Fe.one,
.t = t,
};
- return Ristretto255 { .p = p };
+ return Ristretto255{ .p = p };
}
/// Encode to a Ristretto255 representative.
@@ -87,13 +87,13 @@ pub const Ristretto255 = struct {
u1_ = u1_.mul(zmy); // (Z+Y)*(Z-Y)
const u2_ = p.x.mul(p.y); // X*Y
const u1_u2u2 = u2_.sq().mul(u1_); // u1*u2^2
- const inv_sqrt = sqrtRatioM1(Fe.one(), u1_u2u2) catch unreachable;
+ const inv_sqrt = sqrtRatioM1(Fe.one, u1_u2u2) catch unreachable;
const den1 = inv_sqrt.mul(u1_);
const den2 = inv_sqrt.mul(u2_);
const z_inv = den1.mul(den2).mul(p.t); // den1*den2*T
- const ix = p.x.mul(Fe.sqrtm1()); // X*sqrt(-1)
- const iy = p.y.mul(Fe.sqrtm1()); // Y*sqrt(-1)
- const eden = den1.mul(Fe.edwards25519sqrtamd()); // den1/sqrt(a-d)
+ const ix = p.x.mul(Fe.sqrtm1); // X*sqrt(-1)
+ const iy = p.y.mul(Fe.sqrtm1); // Y*sqrt(-1)
+ const eden = den1.mul(Fe.edwards25519sqrtamd); // den1/sqrt(a-d)
const t_z_inv = p.t.mul(z_inv); // T*z_inv
const rotate = @boolToInt(t_z_inv.isNegative());
@@ -125,23 +125,22 @@ pub const Ristretto255 = struct {
/// Return error.WeakPublicKey if the resulting element is
/// the identity element.
pub inline fn mul(p: Ristretto255, s: [32]u8) !Ristretto255 {
- return Ristretto255 { .p = try p.p.mul(s) };
+ return Ristretto255{ .p = try p.p.mul(s) };
}
};
test "ristretto255" {
const p = Ristretto255.basePoint();
var buf: [256]u8 = undefined;
- const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
- std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{p.toBytes()}), "E2F2AE0A6ABC4E71A884A961C500515F58E30B6AA582DD8DB6A65945E08D2D76");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{p.toBytes()}), "E2F2AE0A6ABC4E71A884A961C500515F58E30B6AA582DD8DB6A65945E08D2D76");
var r: [32]u8 = undefined;
try fmt.hexToBytes(r[0..], "6a493210f7499cd17fecb510ae0cea23a110e8d5b901f8acadd3095c73a3b919");
var q = try Ristretto255.fromBytes(r);
q = q.dbl().add(p);
- std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{q.toBytes()}), "E882B131016B52C1D3337080187CF768423EFCCBB517BB495AB812C4160FF44E");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{q.toBytes()}), "E882B131016B52C1D3337080187CF768423EFCCBB517BB495AB812C4160FF44E");
const s = [_]u8{15} ++ [_]u8{0} ** 31;
const w = try p.mul(s);
- std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{w.toBytes()}), "E0C418F7C8D9C4CDD7395B93EA124F3AD99021BB681DFC3302A9D99A2E53E64E");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{w.toBytes()}), "E0C418F7C8D9C4CDD7395B93EA124F3AD99021BB681DFC3302A9D99A2E53E64E");
}
diff --git a/lib/std/crypto/25519/scalar.zig b/lib/std/crypto/25519/scalar.zig
index 6971b43489..c3340ab61e 100644
--- a/lib/std/crypto/25519/scalar.zig
+++ b/lib/std/crypto/25519/scalar.zig
@@ -1,20 +1,17 @@
const std = @import("std");
const mem = std.mem;
-inline fn fieldSize() [32]u8 {
- return .{
- 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, // 2^252+27742317777372353535851937790883648493
- };
-}
+const field_size = [32]u8{
+ 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, // 2^252+27742317777372353535851937790883648493
+};
const ScalarExpanded = struct {
- const L = fieldSize();
limbs: [64]i64 = [_]i64{0} ** 64,
fn fromBytes(s: [32]u8) ScalarExpanded {
var limbs: [64]i64 = undefined;
for (s) |x, idx| {
- limbs[idx] = @intCast(i64, x);
+ limbs[idx] = @as(i64, x);
}
mem.set(i64, limbs[32..], 0);
return .{ .limbs = limbs };
@@ -23,7 +20,7 @@ const ScalarExpanded = struct {
fn fromBytes64(s: [64]u8) ScalarExpanded {
var limbs: [64]i64 = undefined;
for (s) |x, idx| {
- limbs[idx] = @intCast(i64, x);
+ limbs[idx] = @as(i64, x);
}
return .{ .limbs = limbs };
}
@@ -38,7 +35,7 @@ const ScalarExpanded = struct {
const xi = limbs[i];
var j = i - 32;
while (j < k) : (j += 1) {
- const xj = limbs[j] + carry - 16 * xi * @intCast(i64, L[j - (i - 32)]);
+ const xj = limbs[j] + carry - 16 * xi * @as(i64, field_size[j - (i - 32)]);
carry = (xj + 128) >> 8;
limbs[j] = xj - carry * 256;
}
@@ -48,13 +45,13 @@ const ScalarExpanded = struct {
carry = 0;
comptime var j: usize = 0;
inline while (j < 32) : (j += 1) {
- const xi = limbs[j] + carry - (limbs[31] >> 4) * @intCast(i64, L[j]);
+ const xi = limbs[j] + carry - (limbs[31] >> 4) * @as(i64, field_size[j]);
carry = xi >> 8;
limbs[j] = xi & 255;
}
j = 0;
inline while (j < 32) : (j += 1) {
- limbs[j] -= carry * @intCast(i64, L[j]);
+ limbs[j] -= carry * @as(i64, field_size[j]);
}
j = 0;
inline while (j < 32) : (j += 1) {
@@ -116,15 +113,14 @@ const ScalarExpanded = struct {
/// Reject a scalar whose encoding is not canonical.
pub fn rejectNonCanonical(s: [32]u8) !void {
- const L = fieldSize();
var c: u8 = 0;
var n: u8 = 1;
var i: usize = 31;
while (true) {
- const xs = @intCast(u16, s[i]);
- const xL = @intCast(u16, L[i]);
- c |= @intCast(u8, ((xs -% xL) >> 8) & n);
- n &= @intCast(u8, ((xs ^ xL) -% 1) >> 8);
+ const xs = @as(u16, s[i]);
+ const xfield_size = @as(u16, field_size[i]);
+ c |= @intCast(u8, ((xs -% xfield_size) >> 8) & n);
+ n &= @intCast(u8, ((xs ^ xfield_size) -% 1) >> 8);
if (i == 0) break;
i -= 1;
}
@@ -161,12 +157,10 @@ test "scalar25519" {
var y = x.toBytes();
try rejectNonCanonical(y);
var buf: [128]u8 = undefined;
- const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
- std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{y}), "1E979B917937F3DE71D18077F961F6CEFF01030405060708010203040506070F");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{y}), "1E979B917937F3DE71D18077F961F6CEFF01030405060708010203040506070F");
- const field_size = fieldSize();
const reduced = reduce(field_size);
- std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{reduced}), "0000000000000000000000000000000000000000000000000000000000000000");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{reduced}), "0000000000000000000000000000000000000000000000000000000000000000");
}
test "non-canonical scalar25519" {
@@ -174,12 +168,11 @@ test "non-canonical scalar25519" {
std.testing.expectError(error.NonCanonical, rejectNonCanonical(too_targe));
}
-test "scalar25519 mulAdd overflow check" {
+test "mulAdd overflow check" {
const a: [32]u8 = [_]u8{0xff} ** 32;
const b: [32]u8 = [_]u8{0xff} ** 32;
const c: [32]u8 = [_]u8{0xff} ** 32;
const x = mulAdd(a, b, c);
var buf: [128]u8 = undefined;
- const alloc = &std.heap.FixedBufferAllocator.init(&buf).allocator;
- std.testing.expectEqualStrings(try std.fmt.allocPrint(alloc, "{X}", .{x}), "D14DF91389432C25AD60FF9791B9FD1D67BEF517D273ECCE3D9A307C1B419903");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{x}), "D14DF91389432C25AD60FF9791B9FD1D67BEF517D273ECCE3D9A307C1B419903");
}
diff --git a/lib/std/crypto/25519/x25519.zig b/lib/std/crypto/25519/x25519.zig
index 55c860d13e..ac88d40952 100644
--- a/lib/std/crypto/25519/x25519.zig
+++ b/lib/std/crypto/25519/x25519.zig
@@ -56,32 +56,32 @@ test "x25519 public key calculation from secret key" {
}
test "x25519 rfc7748 vector1" {
- const secret_key = "\xa5\x46\xe3\x6b\xf0\x52\x7c\x9d\x3b\x16\x15\x4b\x82\x46\x5e\xdd\x62\x14\x4c\x0a\xc1\xfc\x5a\x18\x50\x6a\x22\x44\xba\x44\x9a\xc4";
- const public_key = "\xe6\xdb\x68\x67\x58\x30\x30\xdb\x35\x94\xc1\xa4\x24\xb1\x5f\x7c\x72\x66\x24\xec\x26\xb3\x35\x3b\x10\xa9\x03\xa6\xd0\xab\x1c\x4c";
+ const secret_key = [32]u8{ 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 };
+ const public_key = [32]u8{ 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c, 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c };
- const expected_output = "\xc3\xda\x55\x37\x9d\xe9\xc6\x90\x8e\x94\xea\x4d\xf2\x8d\x08\x4f\x32\xec\xcf\x03\x49\x1c\x71\xf7\x54\xb4\x07\x55\x77\xa2\x85\x52";
+ const expected_output = [32]u8{ 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 };
var output: [32]u8 = undefined;
- std.testing.expect(X25519.create(output[0..], secret_key, public_key));
- std.testing.expect(std.mem.eql(u8, &output, expected_output));
+ std.testing.expect(X25519.create(output[0..], secret_key[0..], public_key[0..]));
+ std.testing.expect(std.mem.eql(u8, &output, expected_output[0..]));
}
test "x25519 rfc7748 vector2" {
- const secret_key = "\x4b\x66\xe9\xd4\xd1\xb4\x67\x3c\x5a\xd2\x26\x91\x95\x7d\x6a\xf5\xc1\x1b\x64\x21\xe0\xea\x01\xd4\x2c\xa4\x16\x9e\x79\x18\xba\x0d";
- const public_key = "\xe5\x21\x0f\x12\x78\x68\x11\xd3\xf4\xb7\x95\x9d\x05\x38\xae\x2c\x31\xdb\xe7\x10\x6f\xc0\x3c\x3e\xfc\x4c\xd5\x49\xc7\x15\xa4\x93";
+ const secret_key = [32]u8{ 0x4b, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c, 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d, 0x6a, 0xf5, 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4, 0x2c, 0xa4, 0x16, 0x9e, 0x79, 0x18, 0xba, 0x0d };
+ const public_key = [32]u8{ 0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3, 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38, 0xae, 0x2c, 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e, 0xfc, 0x4c, 0xd5, 0x49, 0xc7, 0x15, 0xa4, 0x93 };
- const expected_output = "\x95\xcb\xde\x94\x76\xe8\x90\x7d\x7a\xad\xe4\x5c\xb4\xb8\x73\xf8\x8b\x59\x5a\x68\x79\x9f\xa1\x52\xe6\xf8\xf7\x64\x7a\xac\x79\x57";
+ const expected_output = [32]u8{ 0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d, 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8, 0x73, 0xf8, 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52, 0xe6, 0xf8, 0xf7, 0x64, 0x7a, 0xac, 0x79, 0x57 };
var output: [32]u8 = undefined;
- std.testing.expect(X25519.create(output[0..], secret_key, public_key));
- std.testing.expect(std.mem.eql(u8, &output, expected_output));
+ std.testing.expect(X25519.create(output[0..], secret_key[0..], public_key[0..]));
+ std.testing.expect(std.mem.eql(u8, &output, expected_output[0..]));
}
test "x25519 rfc7748 one iteration" {
- const initial_value = "\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00".*;
- const expected_output = "\x42\x2c\x8e\x7a\x62\x27\xd7\xbc\xa1\x35\x0b\x3e\x2b\xb7\x27\x9f\x78\x97\xb8\x7b\xb6\x85\x4b\x78\x3c\x60\xe8\x03\x11\xae\x30\x79";
+ const initial_value = [32]u8{ 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ const expected_output = [32]u8{ 0x42, 0x2c, 0x8e, 0x7a, 0x62, 0x27, 0xd7, 0xbc, 0xa1, 0x35, 0x0b, 0x3e, 0x2b, 0xb7, 0x27, 0x9f, 0x78, 0x97, 0xb8, 0x7b, 0xb6, 0x85, 0x4b, 0x78, 0x3c, 0x60, 0xe8, 0x03, 0x11, 0xae, 0x30, 0x79 };
var k: [32]u8 = initial_value;
var u: [32]u8 = initial_value;
@@ -95,7 +95,7 @@ test "x25519 rfc7748 one iteration" {
std.mem.copy(u8, k[0..], output[0..]);
}
- std.testing.expect(std.mem.eql(u8, k[0..], expected_output));
+ std.testing.expect(std.mem.eql(u8, k[0..], expected_output[0..]));
}
test "x25519 rfc7748 1,000 iterations" {
@@ -104,8 +104,8 @@ test "x25519 rfc7748 1,000 iterations" {
return error.SkipZigTest;
}
- const initial_value = "\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
- const expected_output = "\x68\x4c\xf5\x9b\xa8\x33\x09\x55\x28\x00\xef\x56\x6f\x2f\x4d\x3c\x1c\x38\x87\xc4\x93\x60\xe3\x87\x5f\x2e\xb9\x4d\x99\x53\x2c\x51";
+ const initial_value = [32]u8{ 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ const expected_output = [32]u8{ 0x68, 0x4c, 0xf5, 0x9b, 0xa8, 0x33, 0x09, 0x55, 0x28, 0x00, 0xef, 0x56, 0x6f, 0x2f, 0x4d, 0x3c, 0x1c, 0x38, 0x87, 0xc4, 0x93, 0x60, 0xe3, 0x87, 0x5f, 0x2e, 0xb9, 0x4d, 0x99, 0x53, 0x2c, 0x51 };
var k: [32]u8 = initial_value.*;
var u: [32]u8 = initial_value.*;
@@ -127,8 +127,8 @@ test "x25519 rfc7748 1,000,000 iterations" {
return error.SkipZigTest;
}
- const initial_value = "\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
- const expected_output = "\x7c\x39\x11\xe0\xab\x25\x86\xfd\x86\x44\x97\x29\x7e\x57\x5e\x6f\x3b\xc6\x01\xc0\x88\x3c\x30\xdf\x5f\x4d\xd2\xd2\x4f\x66\x54\x24";
+ const initial_value = [32]u8{ 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ const expected_output = [32]u8{ 0x7c, 0x39, 0x11, 0xe0, 0xab, 0x25, 0x86, 0xfd, 0x86, 0x44, 0x97, 0x29, 0x7e, 0x57, 0x5e, 0x6f, 0x3b, 0xc6, 0x01, 0xc0, 0x88, 0x3c, 0x30, 0xdf, 0x5f, 0x4d, 0xd2, 0xd2, 0x4f, 0x66, 0x54, 0x24 };
var k: [32]u8 = initial_value.*;
var u: [32]u8 = initial_value.*;
From 263c44473896597346bc244d82a2b436d7d2da02 Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Sat, 15 Aug 2020 08:55:48 +0200
Subject: [PATCH 103/153] Move loop decrements into continuations
Suggested by @daurnimator
---
lib/std/crypto/25519/curve25519.zig | 3 +--
lib/std/crypto/25519/edwards25519.zig | 3 +--
lib/std/crypto/25519/scalar.zig | 3 +--
3 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig
index 8b8f8a5586..b3e014b6d1 100644
--- a/lib/std/crypto/25519/curve25519.zig
+++ b/lib/std/crypto/25519/curve25519.zig
@@ -44,7 +44,7 @@ pub const Curve25519 = struct {
var z3 = Fe.one;
var swap: u8 = 0;
var pos: usize = bits - 1;
- while (true) {
+ while (true) : (pos -= 1) {
const b = (s[pos / 8] >> @intCast(u3, pos & 7)) & 1;
swap ^= b;
Fe.cSwap2(&x2, &x3, &z2, &z3, swap);
@@ -68,7 +68,6 @@ pub const Curve25519 = struct {
z3 = x1.mul(z2);
z2 = tmp1.mul(tmp0);
if (pos == 0) break;
- pos -= 1;
}
Fe.cSwap2(&x2, &x3, &z2, &z3, swap);
z2 = z2.invert();
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index 3f2ede511a..a7044794b2 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -132,12 +132,11 @@ pub const Edwards25519 = struct {
fn pcMul(pc: [16]Edwards25519, s: [32]u8) !Edwards25519 {
var q = Edwards25519.identityElement();
var pos: usize = 252;
- while (true) {
+ while (true) : (pos -= 4) {
q = q.dbl().dbl().dbl().dbl();
const b = (s[pos / 8] >> @intCast(u3, pos & 7)) & 0xf;
q = q.add(pcSelect(pc, b));
if (pos == 0) break;
- pos -= 4;
}
try q.rejectIdentity();
return q;
diff --git a/lib/std/crypto/25519/scalar.zig b/lib/std/crypto/25519/scalar.zig
index c3340ab61e..3a3a29d4bc 100644
--- a/lib/std/crypto/25519/scalar.zig
+++ b/lib/std/crypto/25519/scalar.zig
@@ -116,13 +116,12 @@ pub fn rejectNonCanonical(s: [32]u8) !void {
var c: u8 = 0;
var n: u8 = 1;
var i: usize = 31;
- while (true) {
+ while (true) : (i -= 1) {
const xs = @as(u16, s[i]);
const xfield_size = @as(u16, field_size[i]);
c |= @intCast(u8, ((xs -% xfield_size) >> 8) & n);
n &= @intCast(u8, ((xs ^ xfield_size) -% 1) >> 8);
if (i == 0) break;
- i -= 1;
}
if (c == 0) {
return error.NonCanonical;
From bcef123d902b9d1d8a27b0414932b1b92f6f1a7e Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Sat, 15 Aug 2020 10:15:42 +0200
Subject: [PATCH 104/153] Address more review issues
---
lib/std/crypto/25519/curve25519.zig | 8 ++---
lib/std/crypto/25519/ed25519.zig | 13 ++++++--
lib/std/crypto/25519/edwards25519.zig | 48 +++++++++++----------------
lib/std/crypto/25519/ristretto255.zig | 8 ++---
lib/std/crypto/25519/x25519.zig | 26 +++++++--------
5 files changed, 49 insertions(+), 54 deletions(-)
diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig
index b3e014b6d1..9980c152eb 100644
--- a/lib/std/crypto/25519/curve25519.zig
+++ b/lib/std/crypto/25519/curve25519.zig
@@ -19,10 +19,8 @@ pub const Curve25519 = struct {
return p.x.toBytes();
}
- /// Return the Curve25519 base point.
- pub inline fn basePoint() Curve25519 {
- return .{ .x = Fe.curve25519BasePoint };
- }
+ /// The Curve25519 base point.
+ pub const basePoint = Curve25519{ .x = Fe.curve25519BasePoint };
/// Check that the encoding of a Curve25519 point is canonical.
pub fn rejectNonCanonical(s: [32]u8) !void {
@@ -103,7 +101,7 @@ pub const Curve25519 = struct {
test "curve25519" {
var s = [32]u8{ 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8 };
- const p = try Curve25519.basePoint().clampedMul(s);
+ const p = try Curve25519.basePoint.clampedMul(s);
try p.rejectIdentity();
var buf: [128]u8 = undefined;
std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{p.toBytes()}), "E6F2A4D1C28EE5C7AD0329268255A468AD407D2672824C0C0EB30EA6EF450145");
diff --git a/lib/std/crypto/25519/ed25519.zig b/lib/std/crypto/25519/ed25519.zig
index 669bb94480..eb004d2607 100644
--- a/lib/std/crypto/25519/ed25519.zig
+++ b/lib/std/crypto/25519/ed25519.zig
@@ -19,12 +19,19 @@ pub const Ed25519 = struct {
pub const noise_length = 32;
/// Derive a key pair from a secret seed.
+ ///
+ /// As in RFC 8032, an Ed25519 public key is generated by hashing
+ /// the secret key using the SHA-512 function, and interpreting the
+ /// bit-swapped, clamped lower-half of the output as the secret scalar.
+ ///
+ /// For this reason, an EdDSA secret key is commonly called a seed,
+ /// from which the actual secret is derived.
pub fn createKeyPair(seed: [seed_length]u8) ![keypair_length]u8 {
var az: [Sha512.digest_length]u8 = undefined;
var h = Sha512.init();
h.update(&seed);
h.final(&az);
- const p = try Curve.basePoint().clampedMul(az[0..32].*);
+ const p = try Curve.basePoint.clampedMul(az[0..32].*);
var keypair: [keypair_length]u8 = undefined;
mem.copy(u8, &keypair, &seed);
mem.copy(u8, keypair[seed_length..], &p.toBytes());
@@ -57,7 +64,7 @@ pub const Ed25519 = struct {
var nonce64: [64]u8 = undefined;
h.final(&nonce64);
const nonce = Curve.scalar.reduce64(nonce64);
- const r = try Curve.basePoint().mul(nonce);
+ const r = try Curve.basePoint.mul(nonce);
var sig: [signature_length]u8 = undefined;
mem.copy(u8, sig[0..32], &r.toBytes());
@@ -95,7 +102,7 @@ pub const Ed25519 = struct {
const hram = Curve.scalar.reduce64(hram64);
const p = try a.neg().mul(hram);
- const check = (try Curve.basePoint().mul(s.*)).add(p).toBytes();
+ const check = (try Curve.basePoint.mul(s.*)).add(p).toBytes();
if (mem.eql(u8, &check, r) == false) {
return error.InvalidSignature;
}
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index a7044794b2..a65e1dfc11 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -50,20 +50,16 @@ pub const Edwards25519 = struct {
return Fe.rejectNonCanonical(s, true);
}
- /// Return the Edwards25519 base point.
- pub inline fn basePoint() Edwards25519 {
- return .{
- .x = Fe{ .limbs = .{ 3990542415680775, 3398198340507945, 4322667446711068, 2814063955482877, 2839572215813860 } },
- .y = Fe{ .limbs = .{ 1801439850948184, 1351079888211148, 450359962737049, 900719925474099, 1801439850948198 } },
- .z = Fe.one,
- .t = Fe{ .limbs = .{ 1841354044333475, 16398895984059, 755974180946558, 900171276175154, 1821297809914039 } },
- .is_base = true,
- };
- }
+ /// The edwards25519 base point.
+ pub const basePoint = Edwards25519{
+ .x = Fe{ .limbs = .{ 3990542415680775, 3398198340507945, 4322667446711068, 2814063955482877, 2839572215813860 } },
+ .y = Fe{ .limbs = .{ 1801439850948184, 1351079888211148, 450359962737049, 900719925474099, 1801439850948198 } },
+ .z = Fe.one,
+ .t = Fe{ .limbs = .{ 1841354044333475, 16398895984059, 755974180946558, 900171276175154, 1821297809914039 } },
+ .is_base = true,
+ };
- inline fn identityElement() Edwards25519 {
- return .{ .x = Fe.zero, .y = Fe.one, .z = Fe.one, .t = Fe.zero };
- }
+ const identityElement = Edwards25519{ .x = Fe.zero, .y = Fe.one, .z = Fe.one, .t = Fe.zero };
/// Reject the neutral element.
pub fn rejectIdentity(p: Edwards25519) !void {
@@ -121,16 +117,16 @@ pub const Edwards25519 = struct {
}
inline fn pcSelect(pc: [16]Edwards25519, b: u8) Edwards25519 {
- var t = Edwards25519.identityElement();
+ var t = Edwards25519.identityElement;
comptime var i: u8 = 0;
inline while (i < 16) : (i += 1) {
- t.cMov(pc[i], ((@as(usize, (b ^ i)) -% 1) >> 8) & 1);
+ t.cMov(pc[i], ((@as(usize, b ^ i) -% 1) >> 8) & 1);
}
return t;
}
fn pcMul(pc: [16]Edwards25519, s: [32]u8) !Edwards25519 {
- var q = Edwards25519.identityElement();
+ var q = Edwards25519.identityElement;
var pos: usize = 252;
while (true) : (pos -= 4) {
q = q.dbl().dbl().dbl().dbl();
@@ -144,7 +140,7 @@ pub const Edwards25519 = struct {
fn precompute(p: Edwards25519) [16]Edwards25519 {
var pc: [16]Edwards25519 = undefined;
- pc[0] = Edwards25519.identityElement();
+ pc[0] = Edwards25519.identityElement;
pc[1] = p;
var i: usize = 2;
while (i < 16) : (i += 1) {
@@ -153,11 +149,14 @@ pub const Edwards25519 = struct {
return pc;
}
- fn _mul(p: Edwards25519, s: [32]u8) !Edwards25519 {
+ /// Multiply an Edwards25519 point by a scalar without clamping it.
+ /// Return error.WeakPublicKey if the resulting point is
+ /// the identity element.
+ pub fn mul(p: Edwards25519, s: [32]u8) !Edwards25519 {
var pc: [16]Edwards25519 = undefined;
if (p.is_base) {
@setEvalBranchQuota(10000);
- pc = comptime precompute(Edwards25519.basePoint());
+ pc = comptime precompute(Edwards25519.basePoint);
} else {
pc = precompute(p);
pc[4].rejectIdentity() catch |_| return error.WeakPublicKey;
@@ -174,20 +173,13 @@ pub const Edwards25519 = struct {
pub fn clampedMul(p: Edwards25519, s: [32]u8) !Edwards25519 {
var t: [32]u8 = s;
scalar.clamp(&t);
- return _mul(p, t);
- }
-
- /// Multiply an Edwards25519 point by a scalar without clamping it.
- /// Return error.WeakPublicKey if the resulting point is
- /// the identity element.
- pub fn mul(p: Edwards25519, s: [32]u8) !Edwards25519 {
- return _mul(p, s);
+ return mul(p, t);
}
};
test "edwards25519 packing/unpacking" {
const s = [_]u8{170} ++ [_]u8{0} ** 31;
- var b = Edwards25519.basePoint();
+ var b = Edwards25519.basePoint;
const pk = try b.mul(s);
var buf: [128]u8 = undefined;
std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{pk.toBytes()}), "074BC7E0FCBD587FDBC0969444245FADC562809C8F6E97E949AF62484B5B81A6");
diff --git a/lib/std/crypto/25519/ristretto255.zig b/lib/std/crypto/25519/ristretto255.zig
index 0bb8e1c92a..21b305f89d 100644
--- a/lib/std/crypto/25519/ristretto255.zig
+++ b/lib/std/crypto/25519/ristretto255.zig
@@ -43,10 +43,8 @@ pub const Ristretto255 = struct {
return p.p.rejectIdentity();
}
- /// Return the base point (Ristretto is a curve in desguise).
- pub inline fn basePoint() Ristretto255 {
- return .{ .p = Curve.basePoint() };
- }
+ /// The base point (Ristretto is a curve in desguise).
+ pub const basePoint = Ristretto255{ .p = Curve.basePoint };
/// Decode a Ristretto255 representative.
pub fn fromBytes(s: [32]u8) !Ristretto255 {
@@ -130,7 +128,7 @@ pub const Ristretto255 = struct {
};
test "ristretto255" {
- const p = Ristretto255.basePoint();
+ const p = Ristretto255.basePoint;
var buf: [256]u8 = undefined;
std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{p.toBytes()}), "E2F2AE0A6ABC4E71A884A961C500515F58E30B6AA582DD8DB6A65945E08D2D76");
diff --git a/lib/std/crypto/25519/x25519.zig b/lib/std/crypto/25519/x25519.zig
index ac88d40952..4b5a8b8482 100644
--- a/lib/std/crypto/25519/x25519.zig
+++ b/lib/std/crypto/25519/x25519.zig
@@ -17,7 +17,7 @@ pub const X25519 = struct {
std.debug.assert(public_key.len >= minimum_key_length);
var s: [32]u8 = undefined;
mem.copy(u8, &s, private_key[0..32]);
- if (Curve.basePoint().clampedMul(s)) |q| {
+ if (Curve.basePoint.clampedMul(s)) |q| {
mem.copy(u8, public_key, q.toBytes()[0..]);
return true;
} else |_| {
@@ -52,7 +52,7 @@ test "x25519 public key calculation from secret key" {
try fmt.hexToBytes(sk[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
try fmt.hexToBytes(pk_expected[0..], "f1814f0e8ff1043d8a44d25babff3cedcae6c22c3edaa48f857ae70de2baae50");
std.testing.expect(X25519.createPublicKey(pk_calculated[0..], &sk));
- std.testing.expect(std.mem.eql(u8, &pk_calculated, &pk_expected));
+ std.testing.expectEqual(pk_calculated, pk_expected);
}
test "x25519 rfc7748 vector1" {
@@ -64,7 +64,7 @@ test "x25519 rfc7748 vector1" {
var output: [32]u8 = undefined;
std.testing.expect(X25519.create(output[0..], secret_key[0..], public_key[0..]));
- std.testing.expect(std.mem.eql(u8, &output, expected_output[0..]));
+ std.testing.expectEqual(output, expected_output);
}
test "x25519 rfc7748 vector2" {
@@ -76,7 +76,7 @@ test "x25519 rfc7748 vector2" {
var output: [32]u8 = undefined;
std.testing.expect(X25519.create(output[0..], secret_key[0..], public_key[0..]));
- std.testing.expect(std.mem.eql(u8, &output, expected_output[0..]));
+ std.testing.expectEqual(output, expected_output);
}
test "x25519 rfc7748 one iteration" {
@@ -91,11 +91,11 @@ test "x25519 rfc7748 one iteration" {
var output: [32]u8 = undefined;
std.testing.expect(X25519.create(output[0..], &k, &u));
- std.mem.copy(u8, u[0..], k[0..]);
- std.mem.copy(u8, k[0..], output[0..]);
+ mem.copy(u8, u[0..], k[0..]);
+ mem.copy(u8, k[0..], output[0..]);
}
- std.testing.expect(std.mem.eql(u8, k[0..], expected_output[0..]));
+ std.testing.expectEqual(k, expected_output);
}
test "x25519 rfc7748 1,000 iterations" {
@@ -115,11 +115,11 @@ test "x25519 rfc7748 1,000 iterations" {
var output: [32]u8 = undefined;
std.testing.expect(X25519.create(output[0..], &k, &u));
- std.mem.copy(u8, u[0..], k[0..]);
- std.mem.copy(u8, k[0..], output[0..]);
+ mem.copy(u8, u[0..], k[0..]);
+ mem.copy(u8, k[0..], output[0..]);
}
- std.testing.expect(std.mem.eql(u8, k[0..], expected_output));
+ std.testing.expectEqual(k, expected_output);
}
test "x25519 rfc7748 1,000,000 iterations" {
@@ -138,9 +138,9 @@ test "x25519 rfc7748 1,000,000 iterations" {
var output: [32]u8 = undefined;
std.testing.expect(X25519.create(output[0..], &k, &u));
- std.mem.copy(u8, u[0..], k[0..]);
- std.mem.copy(u8, k[0..], output[0..]);
+ mem.copy(u8, u[0..], k[0..]);
+ mem.copy(u8, k[0..], output[0..]);
}
- std.testing.expect(std.mem.eql(u8, k[0..], expected_output));
+ std.testing.expectEqual(k[0..], expected_output);
}
From d86cde575239d4e38631d562fba8b4001d436ebd Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Sat, 15 Aug 2020 11:11:33 +0200
Subject: [PATCH 105/153] Add comment, use @truncate
---
lib/std/crypto/25519/curve25519.zig | 2 +-
lib/std/crypto/25519/edwards25519.zig | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig
index 9980c152eb..3a4871a1f3 100644
--- a/lib/std/crypto/25519/curve25519.zig
+++ b/lib/std/crypto/25519/curve25519.zig
@@ -43,7 +43,7 @@ pub const Curve25519 = struct {
var swap: u8 = 0;
var pos: usize = bits - 1;
while (true) : (pos -= 1) {
- const b = (s[pos / 8] >> @intCast(u3, pos & 7)) & 1;
+ const b = (s[pos >> 3] >> @truncate(u3, pos)) & 1;
swap ^= b;
Fe.cSwap2(&x2, &x3, &z2, &z3, swap);
swap = b;
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index a65e1dfc11..93b1a69d17 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -28,7 +28,7 @@ pub const Edwards25519 = struct {
const vxx = x.sq().mul(v);
const has_m_root = vxx.sub(u).isZero();
const has_p_root = vxx.add(u).isZero();
- if ((@boolToInt(has_m_root) | @boolToInt(has_p_root)) == 0) {
+ if ((@boolToInt(has_m_root) | @boolToInt(has_p_root)) == 0) { // best-effort to avoid two conditional branches
return error.InvalidEncoding;
}
x.cMov(x.mul(Fe.sqrtm1), 1 - @boolToInt(has_m_root));
@@ -130,7 +130,7 @@ pub const Edwards25519 = struct {
var pos: usize = 252;
while (true) : (pos -= 4) {
q = q.dbl().dbl().dbl().dbl();
- const b = (s[pos / 8] >> @intCast(u3, pos & 7)) & 0xf;
+ const b = (s[pos >> 3] >> @truncate(u3, pos)) & 0xf;
q = q.add(pcSelect(pc, b));
if (pos == 0) break;
}
From 5ab69633b712914cccdf2f08d717387864d6c4c7 Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Sat, 15 Aug 2020 11:48:34 +0200
Subject: [PATCH 106/153] Constify the ladder
---
lib/std/crypto/25519/curve25519.zig | 35 +++++++++++----------------
lib/std/crypto/25519/edwards25519.zig | 4 +--
2 files changed, 16 insertions(+), 23 deletions(-)
diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig
index 3a4871a1f3..46d7b9a3a6 100644
--- a/lib/std/crypto/25519/curve25519.zig
+++ b/lib/std/crypto/25519/curve25519.zig
@@ -43,28 +43,21 @@ pub const Curve25519 = struct {
var swap: u8 = 0;
var pos: usize = bits - 1;
while (true) : (pos -= 1) {
- const b = (s[pos >> 3] >> @truncate(u3, pos)) & 1;
- swap ^= b;
+ const bit = (s[pos >> 3] >> @truncate(u3, pos)) & 1;
+ swap ^= bit;
Fe.cSwap2(&x2, &x3, &z2, &z3, swap);
- swap = b;
- var tmp0 = x3.sub(z3);
- var tmp1 = x2.sub(z2);
- x2 = x2.add(z2);
- z2 = x3.add(z3);
- z3 = tmp0.mul(x2);
- z2 = z2.mul(tmp1);
- tmp0 = tmp1.sq();
- tmp1 = x2.sq();
- x3 = z3.add(z2);
- z2 = z3.sub(z2);
- x2 = tmp1.mul(tmp0);
- tmp1 = tmp1.sub(tmp0);
- z2 = z2.sq();
- z3 = tmp1.mul32(121666);
- x3 = x3.sq();
- tmp0 = tmp0.add(z3);
- z3 = x1.mul(z2);
- z2 = tmp1.mul(tmp0);
+ swap = bit;
+ const a = x2.add(z2);
+ const b = x2.sub(z2);
+ const aa = a.sq();
+ const bb = b.sq();
+ x2 = aa.mul(bb);
+ const e = aa.sub(bb);
+ const da = x3.sub(z3).mul(a);
+ const cb = x3.add(z3).mul(b);
+ x3 = da.add(cb).sq();
+ z3 = x1.mul(da.sub(cb).sq());
+ z2 = e.mul(bb.add(e.mul32(121666)));
if (pos == 0) break;
}
Fe.cSwap2(&x2, &x3, &z2, &z3, swap);
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index 93b1a69d17..5d70122921 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -130,8 +130,8 @@ pub const Edwards25519 = struct {
var pos: usize = 252;
while (true) : (pos -= 4) {
q = q.dbl().dbl().dbl().dbl();
- const b = (s[pos >> 3] >> @truncate(u3, pos)) & 0xf;
- q = q.add(pcSelect(pc, b));
+ const bit = (s[pos >> 3] >> @truncate(u3, pos)) & 0xf;
+ q = q.add(pcSelect(pc, bit));
if (pos == 0) break;
}
try q.rejectIdentity();
From 08dfbee961b7705d6f9d03fc982ec808f370f64a Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Sat, 15 Aug 2020 18:03:38 +0200
Subject: [PATCH 107/153] Benchmark signatures
---
lib/std/crypto/benchmark.zig | 32 +++++++++++++++++++++++++++++++-
1 file changed, 31 insertions(+), 1 deletion(-)
diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig
index f0f40bd231..267c2881b4 100644
--- a/lib/std/crypto/benchmark.zig
+++ b/lib/std/crypto/benchmark.zig
@@ -90,7 +90,6 @@ pub fn benchmarkKeyExchange(comptime DhKeyExchange: anytype, comptime exchange_c
var out: [DhKeyExchange.minimum_key_length]u8 = undefined;
prng.random.bytes(out[0..]);
- var offset: usize = 0;
var timer = try Timer.start();
const start = timer.lap();
{
@@ -107,6 +106,30 @@ pub fn benchmarkKeyExchange(comptime DhKeyExchange: anytype, comptime exchange_c
return throughput;
}
+const signatures = [_]Crypto{Crypto{ .ty = crypto.Ed25519, .name = "ed25519" }};
+
+pub fn benchmarkSignatures(comptime Signature: anytype, comptime signatures_count: comptime_int) !u64 {
+ var seed: [Signature.seed_length]u8 = undefined;
+ prng.random.bytes(seed[0..]);
+ const msg = [_]u8{0} ** 64;
+ const key_pair = try Signature.createKeyPair(seed);
+
+ var timer = try Timer.start();
+ const start = timer.lap();
+ {
+ var i: usize = 0;
+ while (i < signatures_count) : (i += 1) {
+ _ = try Signature.sign(&msg, key_pair, null);
+ }
+ }
+ const end = timer.read();
+
+ const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s;
+ const throughput = @floatToInt(u64, signatures_count / elapsed_s);
+
+ return throughput;
+}
+
fn usage() void {
std.debug.warn(
\\throughput_test [options]
@@ -183,4 +206,11 @@ pub fn main() !void {
try stdout.print("{:>11}: {:5} exchanges/s\n", .{ E.name, throughput });
}
}
+
+ inline for (signatures) |E| {
+ if (filter == null or std.mem.indexOf(u8, E.name, filter.?) != null) {
+ const throughput = try benchmarkSignatures(E.ty, mode(1000));
+ try stdout.print("{:>11}: {:5} signatures/s\n", .{ E.name, throughput });
+ }
+ }
}
From ab6ffa8a3c1e3fa802cacd970d3ed415ba25a85e Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Sat, 15 Aug 2020 21:00:22 +0200
Subject: [PATCH 108/153] Work around sqrtRatioM1() issue in release-safe mode
---
lib/std/crypto/25519/ristretto255.zig | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/std/crypto/25519/ristretto255.zig b/lib/std/crypto/25519/ristretto255.zig
index 21b305f89d..bfdeb41f9d 100644
--- a/lib/std/crypto/25519/ristretto255.zig
+++ b/lib/std/crypto/25519/ristretto255.zig
@@ -24,11 +24,11 @@ pub const Ristretto255 = struct {
const has_f_root = f_root_check.isZero();
const x_sqrtm1 = x.mul(Fe.sqrtm1); // x*sqrt(-1)
x.cMov(x_sqrtm1, @boolToInt(has_p_root) | @boolToInt(has_f_root));
- x = x.abs();
+ const xa = x.abs();
if ((@boolToInt(has_m_root) | @boolToInt(has_p_root)) == 0) {
return error.NoRoot;
}
- return x;
+ return xa;
}
fn rejectNonCanonical(s: [32]u8) !void {
From 37ae2464053628d10c7e7e7165ba8a267006b0ad Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Sat, 15 Aug 2020 21:14:56 +0200
Subject: [PATCH 109/153] Inline Fe.{sub,mul,sq} for a performance boost in
release-safe mode
---
lib/std/crypto/25519/field.zig | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/lib/std/crypto/25519/field.zig b/lib/std/crypto/25519/field.zig
index 59aa1e3ba9..121d187e17 100644
--- a/lib/std/crypto/25519/field.zig
+++ b/lib/std/crypto/25519/field.zig
@@ -114,7 +114,7 @@ pub const Fe = struct {
return fe;
}
- pub fn sub(a: Fe, b: Fe) Fe {
+ pub inline fn sub(a: Fe, b: Fe) Fe {
var fe = b;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
@@ -200,7 +200,7 @@ pub const Fe = struct {
return .{ .limbs = rs };
}
- pub fn mul(a: Fe, b: Fe) Fe {
+ pub inline fn mul(a: Fe, b: Fe) Fe {
var ax: [5]u128 = undefined;
var bx: [5]u128 = undefined;
var a19: [5]u128 = undefined;
@@ -223,7 +223,7 @@ pub const Fe = struct {
return _carry128(&r);
}
- fn _sq(a: Fe, double: comptime bool) Fe {
+ inline fn _sq(a: Fe, double: comptime bool) Fe {
var ax: [5]u128 = undefined;
var r: [5]u128 = undefined;
comptime var i = 0;
From 7f9a227abfbce0e67746cc57dd9b6a4bf0a8d94a Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Sun, 16 Aug 2020 00:58:14 +0200
Subject: [PATCH 110/153] deinline edwards25519.{add,dbl}
---
lib/std/crypto/25519/edwards25519.zig | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index 5d70122921..11f5963101 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -74,7 +74,7 @@ pub const Edwards25519 = struct {
}
/// Double an Edwards25519 point.
- pub inline fn dbl(p: Edwards25519) Edwards25519 {
+ pub fn dbl(p: Edwards25519) Edwards25519 {
const t0 = p.x.add(p.y).sq();
var x = p.x.sq();
var z = p.y.sq();
@@ -91,7 +91,7 @@ pub const Edwards25519 = struct {
}
/// Add two Edwards25519 points.
- pub inline fn add(p: Edwards25519, q: Edwards25519) Edwards25519 {
+ pub fn add(p: Edwards25519, q: Edwards25519) Edwards25519 {
const a = p.y.sub(p.x).mul(q.y.sub(q.x));
const b = p.x.add(p.y).mul(q.x.add(q.y));
const c = p.t.mul(q.t).mul(Fe.edwards25519d2);
From fc850aad619cc47eb7634a326bad670b43964532 Mon Sep 17 00:00:00 2001
From: Isaac Freund
Date: Sun, 16 Aug 2020 16:24:00 +0200
Subject: [PATCH 111/153] stage2: fix signed <-> unsigned Value casts
---
src-self-hosted/value.zig | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig
index b2fad9207d..c8af4716a6 100644
--- a/src-self-hosted/value.zig
+++ b/src-self-hosted/value.zig
@@ -562,7 +562,7 @@ pub const Value = extern union {
.bool_true => return 1,
.int_u64 => return self.cast(Payload.Int_u64).?.int,
- .int_i64 => return @intCast(u64, self.cast(Payload.Int_u64).?.int),
+ .int_i64 => return @intCast(u64, self.cast(Payload.Int_i64).?.int),
.int_big_positive => return self.cast(Payload.IntBigPositive).?.asBigInt().to(u64) catch unreachable,
.int_big_negative => return self.cast(Payload.IntBigNegative).?.asBigInt().to(u64) catch unreachable,
}
@@ -636,7 +636,7 @@ pub const Value = extern union {
.bool_true => return 1,
- .int_u64 => return @intCast(i64, self.cast(Payload.Int_i64).?.int),
+ .int_u64 => return @intCast(i64, self.cast(Payload.Int_u64).?.int),
.int_i64 => return self.cast(Payload.Int_i64).?.int,
.int_big_positive => return self.cast(Payload.IntBigPositive).?.asBigInt().to(i64) catch unreachable,
.int_big_negative => return self.cast(Payload.IntBigNegative).?.asBigInt().to(i64) catch unreachable,
From c52513e25b69cdd28c5af567b8d3d6a0d9fb979e Mon Sep 17 00:00:00 2001
From: Vexu
Date: Fri, 14 Aug 2020 17:39:18 +0300
Subject: [PATCH 112/153] stage2: astgen for ptr types and address of
---
src-self-hosted/astgen.zig | 51 ++++++++++++++++++++++++++++++++++--
src-self-hosted/zir.zig | 22 ++++++++++++++++
src-self-hosted/zir_sema.zig | 5 ++++
3 files changed, 76 insertions(+), 2 deletions(-)
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index b5f439ec27..795aeda5ed 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -113,6 +113,7 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.Period => return rlWrap(mod, scope, rl, try field(mod, scope, node.castTag(.Period).?)),
.Deref => return rlWrap(mod, scope, rl, try deref(mod, scope, node.castTag(.Deref).?)),
.BoolNot => return rlWrap(mod, scope, rl, try boolNot(mod, scope, node.castTag(.BoolNot).?)),
+ .AddressOf => return rlWrap(mod, scope, rl, try addressOf(mod, scope, node.castTag(.AddressOf).?)),
.FloatLiteral => return rlWrap(mod, scope, rl, try floatLiteral(mod, scope, node.castTag(.FloatLiteral).?)),
.UndefinedLiteral => return rlWrap(mod, scope, rl, try undefLiteral(mod, scope, node.castTag(.UndefinedLiteral).?)),
.BoolLiteral => return rlWrap(mod, scope, rl, try boolLiteral(mod, scope, node.castTag(.BoolLiteral).?)),
@@ -122,6 +123,7 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.Block => return rlWrapVoid(mod, scope, rl, node, try blockExpr(mod, scope, node.castTag(.Block).?)),
.LabeledBlock => return labeledBlockExpr(mod, scope, rl, node.castTag(.LabeledBlock).?),
.Break => return rlWrap(mod, scope, rl, try breakExpr(mod, scope, node.castTag(.Break).?)),
+ .PtrType => return rlWrap(mod, scope, rl, try ptrType(mod, scope, node.castTag(.PtrType).?)),
.Defer => return mod.failNode(scope, node, "TODO implement astgen.expr for .Defer", .{}),
.Catch => return mod.failNode(scope, node, "TODO implement astgen.expr for .Catch", .{}),
@@ -131,7 +133,6 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.MergeErrorSets => return mod.failNode(scope, node, "TODO implement astgen.expr for .MergeErrorSets", .{}),
.Range => return mod.failNode(scope, node, "TODO implement astgen.expr for .Range", .{}),
.OrElse => return mod.failNode(scope, node, "TODO implement astgen.expr for .OrElse", .{}),
- .AddressOf => return mod.failNode(scope, node, "TODO implement astgen.expr for .AddressOf", .{}),
.Await => return mod.failNode(scope, node, "TODO implement astgen.expr for .Await", .{}),
.BitNot => return mod.failNode(scope, node, "TODO implement astgen.expr for .BitNot", .{}),
.Negation => return mod.failNode(scope, node, "TODO implement astgen.expr for .Negation", .{}),
@@ -140,7 +141,6 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.Try => return mod.failNode(scope, node, "TODO implement astgen.expr for .Try", .{}),
.ArrayType => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayType", .{}),
.ArrayTypeSentinel => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayTypeSentinel", .{}),
- .PtrType => return mod.failNode(scope, node, "TODO implement astgen.expr for .PtrType", .{}),
.SliceType => return mod.failNode(scope, node, "TODO implement astgen.expr for .SliceType", .{}),
.Slice => return mod.failNode(scope, node, "TODO implement astgen.expr for .Slice", .{}),
.ArrayAccess => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayAccess", .{}),
@@ -452,6 +452,12 @@ fn boolNot(mod: *Module, scope: *Scope, node: *ast.Node.SimplePrefixOp) InnerErr
return addZIRUnOp(mod, scope, src, .boolnot, operand);
}
+fn addressOf(mod: *Module, scope: *Scope, node: *ast.Node.SimplePrefixOp) InnerError!*zir.Inst {
+ const tree = scope.tree();
+ const src = tree.token_locs[node.op_token].start;
+ return expr(mod, scope, .lvalue, node.rhs);
+}
+
fn optionalType(mod: *Module, scope: *Scope, node: *ast.Node.SimplePrefixOp) InnerError!*zir.Inst {
const tree = scope.tree();
const src = tree.token_locs[node.op_token].start;
@@ -463,6 +469,47 @@ fn optionalType(mod: *Module, scope: *Scope, node: *ast.Node.SimplePrefixOp) Inn
return addZIRUnOp(mod, scope, src, .optional_type, operand);
}
+fn ptrType(mod: *Module, scope: *Scope, node: *ast.Node.PtrType) InnerError!*zir.Inst {
+ const tree = scope.tree();
+ const src = tree.token_locs[node.op_token].start;
+ const meta_type = try addZIRInstConst(mod, scope, src, .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.type_type),
+ });
+
+ const simple = node.ptr_info.allowzero_token == null and
+ node.ptr_info.align_info == null and
+ node.ptr_info.volatile_token == null and
+ node.ptr_info.sentinel == null;
+
+ if (simple) {
+ const child_type = try expr(mod, scope, .{ .ty = meta_type }, node.rhs);
+ return addZIRUnOp(mod, scope, src, if (node.ptr_info.const_token == null)
+ .single_mut_ptr_type
+ else
+ .single_const_ptr_type, child_type);
+ }
+
+ const child_type = try expr(mod, scope, .{ .ty = meta_type }, node.rhs);
+
+ var kw_args: std.meta.fieldInfo(zir.Inst.PtrType, "kw_args").field_type = .{};
+ kw_args.@"allowzero" = node.ptr_info.allowzero_token != null;
+ if (node.ptr_info.align_info) |some| {
+ kw_args.@"align" = try expr(mod, scope, .none, some.node);
+ if (some.bit_range) |bit_range| {
+ kw_args.align_bit_start = try expr(mod, scope, .none, bit_range.start);
+ kw_args.align_bit_end = try expr(mod, scope, .none, bit_range.end);
+ }
+ }
+ kw_args.@"const" = node.ptr_info.const_token != null;
+ kw_args.@"volatile" = node.ptr_info.volatile_token != null;
+ if (node.ptr_info.sentinel) |some| {
+ kw_args.sentinel = try expr(mod, scope, .{ .ty = child_type }, some);
+ }
+
+ return addZIRInst(mod, scope, src, zir.Inst.PtrType, .{ .child_type = child_type }, kw_args);
+}
+
fn unwrapOptional(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.SimpleSuffixOp) InnerError!*zir.Inst {
const tree = scope.tree();
const src = tree.token_locs[node.rtoken].start;
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index ef9351047b..25b3b171b4 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -192,6 +192,8 @@ pub const Inst = struct {
single_const_ptr_type,
/// Create a mutable pointer type based on the element type. `*T`
single_mut_ptr_type,
+ /// Create a pointer type with attributes
+ ptr_type,
/// Write a value to a pointer. For loading, see `deref`.
store,
/// String Literal. Makes an anonymous Decl and then takes a pointer to it.
@@ -305,6 +307,7 @@ pub const Inst = struct {
.fntype => FnType,
.elemptr => ElemPtr,
.condbr => CondBr,
+ .ptr_type => PtrType,
};
}
@@ -382,6 +385,7 @@ pub const Inst = struct {
.optional_type,
.unwrap_optional_safe,
.unwrap_optional_unsafe,
+ .ptr_type,
=> false,
.@"break",
@@ -811,6 +815,24 @@ pub const Inst = struct {
},
kw_args: struct {},
};
+
+ pub const PtrType = struct {
+ pub const base_tag = Tag.ptr_type;
+ base: Inst,
+
+ positionals: struct {
+ child_type: *Inst,
+ },
+ kw_args: struct {
+ @"allowzero": bool = false,
+ @"align": ?*Inst = null,
+ align_bit_start: ?*Inst = null,
+ align_bit_end: ?*Inst = null,
+ @"const": bool = true,
+ @"volatile": bool = false,
+ sentinel: ?*Inst = null,
+ },
+ };
};
pub const ErrorMsg = struct {
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index 2fe9e5cfba..5047c61687 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -53,6 +53,7 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
.ret_type => return analyzeInstRetType(mod, scope, old_inst.castTag(.ret_type).?),
.single_const_ptr_type => return analyzeInstSingleConstPtrType(mod, scope, old_inst.castTag(.single_const_ptr_type).?),
.single_mut_ptr_type => return analyzeInstSingleMutPtrType(mod, scope, old_inst.castTag(.single_mut_ptr_type).?),
+ .ptr_type => return analyzeInstPtrType(mod, scope, old_inst.castTag(.ptr_type).?),
.store => return analyzeInstStore(mod, scope, old_inst.castTag(.store).?),
.str => return analyzeInstStr(mod, scope, old_inst.castTag(.str).?),
.int => {
@@ -1287,3 +1288,7 @@ fn analyzeInstSingleMutPtrType(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp
const ty = try mod.singleMutPtrType(scope, inst.base.src, elem_type);
return mod.constType(scope, inst.base.src, ty);
}
+
+fn analyzeInstPtrType(mod: *Module, scope: *Scope, inst: *zir.Inst.PtrType) InnerError!*Inst {
+ return mod.fail(scope, inst.base.src, "TODO implement ptr_type", .{});
+}
From 012fac255f35b9cdbe18c14753a195de89d07d28 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Fri, 14 Aug 2020 20:25:06 +0300
Subject: [PATCH 113/153] stage2: fix optimization causing wrong optional child
types
---
src-self-hosted/Module.zig | 117 ++++++++++++++++++---------------
src-self-hosted/astgen.zig | 2 +-
src-self-hosted/codegen.zig | 36 +++++++++-
src-self-hosted/ir.zig | 2 +
src-self-hosted/type.zig | 124 ++++++++++++++++++++++-------------
src-self-hosted/zir.zig | 4 +-
src-self-hosted/zir_sema.zig | 29 ++++----
7 files changed, 199 insertions(+), 115 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 2fad7e7a00..2584066499 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -2200,8 +2200,11 @@ pub fn analyzeDeclRef(self: *Module, scope: *Scope, src: usize, decl: *Decl) Inn
};
const decl_tv = try decl.typedValue();
- const ty_payload = try scope.arena().create(Type.Payload.SingleConstPointer);
- ty_payload.* = .{ .pointee_type = decl_tv.ty };
+ const ty_payload = try scope.arena().create(Type.Payload.Pointer);
+ ty_payload.* = .{
+ .base = .{ .tag = .single_const_pointer },
+ .pointee_type = decl_tv.ty,
+ };
const val_payload = try scope.arena().create(Value.Payload.DeclRef);
val_payload.* = .{ .decl = decl };
@@ -2425,6 +2428,16 @@ pub fn cmpNumeric(
return self.addBinOp(b, src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs);
}
+fn wrapOptional(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
+ if (inst.value()) |val| {
+ return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
+ }
+
+ // TODO how do we get the result location
+ const b = try self.requireRuntimeBlock(scope, inst.src);
+ return self.addUnOp(b, inst.src, dest_type, .wrap_optional, inst);
+}
+
fn makeIntType(self: *Module, scope: *Scope, signed: bool, bits: u16) !Type {
if (signed) {
const int_payload = try scope.arena().create(Type.Payload.IntSigned);
@@ -2502,14 +2515,12 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst
// T to ?T
if (dest_type.zigTypeTag() == .Optional) {
- const child_type = dest_type.elemType();
- if (inst.value()) |val| {
- if (child_type.eql(inst.ty)) {
- return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
- }
- return self.fail(scope, inst.src, "TODO optional wrap {} to {}", .{ val, dest_type });
- } else if (child_type.eql(inst.ty)) {
- return self.fail(scope, inst.src, "TODO optional wrap {}", .{dest_type});
+ var buf: Type.Payload.Pointer = undefined;
+ const child_type = dest_type.optionalChild(&buf);
+ if (child_type.eql(inst.ty)) {
+ return self.wrapOptional(scope, dest_type, inst);
+ } else if (try self.coerceNum(scope, child_type, inst)) |some| {
+ return self.wrapOptional(scope, dest_type, some);
}
}
@@ -2527,39 +2538,8 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst
}
// comptime known number to other number
- if (inst.value()) |val| {
- const src_zig_tag = inst.ty.zigTypeTag();
- const dst_zig_tag = dest_type.zigTypeTag();
-
- if (dst_zig_tag == .ComptimeInt or dst_zig_tag == .Int) {
- if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
- if (val.floatHasFraction()) {
- return self.fail(scope, inst.src, "fractional component prevents float value {} from being casted to type '{}'", .{ val, inst.ty });
- }
- return self.fail(scope, inst.src, "TODO float to int", .{});
- } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
- if (!val.intFitsInType(dest_type, self.target())) {
- return self.fail(scope, inst.src, "type {} cannot represent integer value {}", .{ inst.ty, val });
- }
- return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
- }
- } else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) {
- if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
- const res = val.floatCast(scope.arena(), dest_type, self.target()) catch |err| switch (err) {
- error.Overflow => return self.fail(
- scope,
- inst.src,
- "cast of value {} to type '{}' loses information",
- .{ val, dest_type },
- ),
- error.OutOfMemory => return error.OutOfMemory,
- };
- return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = res });
- } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
- return self.fail(scope, inst.src, "TODO int to float", .{});
- }
- }
- }
+ if (try self.coerceNum(scope, dest_type, inst)) |some|
+ return some;
// integer widening
if (inst.ty.zigTypeTag() == .Int and dest_type.zigTypeTag() == .Int) {
@@ -2591,6 +2571,42 @@ pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst
return self.fail(scope, inst.src, "expected {}, found {}", .{ dest_type, inst.ty });
}
+pub fn coerceNum(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !?*Inst {
+ const val = inst.value() orelse return null;
+ const src_zig_tag = inst.ty.zigTypeTag();
+ const dst_zig_tag = dest_type.zigTypeTag();
+
+ if (dst_zig_tag == .ComptimeInt or dst_zig_tag == .Int) {
+ if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
+ if (val.floatHasFraction()) {
+ return self.fail(scope, inst.src, "fractional component prevents float value {} from being casted to type '{}'", .{ val, inst.ty });
+ }
+ return self.fail(scope, inst.src, "TODO float to int", .{});
+ } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
+ if (!val.intFitsInType(dest_type, self.target())) {
+ return self.fail(scope, inst.src, "type {} cannot represent integer value {}", .{ inst.ty, val });
+ }
+ return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
+ }
+ } else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) {
+ if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
+ const res = val.floatCast(scope.arena(), dest_type, self.target()) catch |err| switch (err) {
+ error.Overflow => return self.fail(
+ scope,
+ inst.src,
+ "cast of value {} to type '{}' loses information",
+ .{ val, dest_type },
+ ),
+ error.OutOfMemory => return error.OutOfMemory,
+ };
+ return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = res });
+ } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
+ return self.fail(scope, inst.src, "TODO int to float", .{});
+ }
+ }
+ return null;
+}
+
pub fn storePtr(self: *Module, scope: *Scope, src: usize, ptr: *Inst, uncasted_value: *Inst) !*Inst {
if (ptr.ty.isConstPtr())
return self.fail(scope, src, "cannot assign to constant", .{});
@@ -2878,15 +2894,12 @@ pub fn floatSub(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs:
return Value.initPayload(val_payload);
}
-pub fn singleMutPtrType(self: *Module, scope: *Scope, src: usize, elem_ty: Type) error{OutOfMemory}!Type {
- const type_payload = try scope.arena().create(Type.Payload.SingleMutPointer);
- type_payload.* = .{ .pointee_type = elem_ty };
- return Type.initPayload(&type_payload.base);
-}
-
-pub fn singleConstPtrType(self: *Module, scope: *Scope, src: usize, elem_ty: Type) error{OutOfMemory}!Type {
- const type_payload = try scope.arena().create(Type.Payload.SingleConstPointer);
- type_payload.* = .{ .pointee_type = elem_ty };
+pub fn singlePtrType(self: *Module, scope: *Scope, src: usize, mutable: bool, elem_ty: Type) error{OutOfMemory}!Type {
+ const type_payload = try scope.arena().create(Type.Payload.Pointer);
+ type_payload.* = .{
+ .base = .{ .tag = if (mutable) .single_mut_pointer else .single_const_pointer },
+ .pointee_type = elem_ty,
+ };
return Type.initPayload(&type_payload.base);
}
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 795aeda5ed..fd4c5c2864 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -870,7 +870,7 @@ fn identifier(mod: *Module, scope: *Scope, rl: ResultLoc, ident: *ast.Node.OneTo
const int_type_payload = try scope.arena().create(Value.Payload.IntType);
int_type_payload.* = .{ .signed = is_signed, .bits = bit_count };
const result = try addZIRInstConst(mod, scope, src, .{
- .ty = Type.initTag(.comptime_int),
+ .ty = Type.initTag(.type),
.val = Value.initPayload(&int_type_payload.base),
});
return rlWrap(mod, scope, rl, result);
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index 12d3884308..58b7c97f7b 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -682,6 +682,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.sub => return self.genSub(inst.castTag(.sub).?),
.unreach => return MCValue{ .unreach = {} },
.unwrap_optional => return self.genUnwrapOptional(inst.castTag(.unwrap_optional).?),
+ .wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?),
}
}
@@ -840,6 +841,22 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
+ fn genWrapOptional(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
+ const optional_ty = inst.base.ty;
+
+ // No side effects, so if it's unreferenced, do nothing.
+ if (inst.base.isUnused())
+ return MCValue.dead;
+
+ // Optional type is just a boolean true
+ if (optional_ty.abiSize(self.target.*) == 1)
+ return MCValue{ .immediate = 1 };
+
+ switch (arch) {
+ else => return self.fail(inst.base.src, "TODO implement wrap optional for {}", .{self.target.cpu.arch}),
+ }
+ }
+
fn genLoad(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
const elem_ty = inst.base.ty;
if (!elem_ty.hasCodeGenBits())
@@ -2028,9 +2045,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return mcv;
}
- fn genTypedValue(self: *Self, src: usize, typed_value: TypedValue) !MCValue {
+ fn genTypedValue(self: *Self, src: usize, typed_value: TypedValue) error{ CodegenFail, OutOfMemory }!MCValue {
if (typed_value.val.isUndef())
- return MCValue.undef;
+ return MCValue{ .undef = {} };
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
switch (typed_value.ty.zigTypeTag()) {
@@ -2055,6 +2072,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
},
.ComptimeInt => unreachable, // semantic analysis prevents this
.ComptimeFloat => unreachable, // semantic analysis prevents this
+ .Optional => {
+ if (typed_value.ty.isPtrLikeOptional()) {
+ if (typed_value.val.isNull())
+ return MCValue{ .immediate = 0 };
+
+ var buf: Type.Payload.Pointer = undefined;
+ return self.genTypedValue(src, .{
+ .ty = typed_value.ty.optionalChild(&buf),
+ .val = typed_value.val,
+ });
+ } else if (typed_value.ty.abiSize(self.target.*) == 1) {
+ return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) };
+ }
+ return self.fail(src, "TODO non pointer optionals", .{});
+ },
else => return self.fail(src, "TODO implement const of type '{}'", .{typed_value.ty}),
}
}
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 9ae9518efe..4f83fa7030 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -83,6 +83,7 @@ pub const Inst = struct {
floatcast,
intcast,
unwrap_optional,
+ wrap_optional,
pub fn Type(tag: Tag) type {
return switch (tag) {
@@ -104,6 +105,7 @@ pub const Inst = struct {
.intcast,
.load,
.unwrap_optional,
+ .wrap_optional,
=> UnOp,
.add,
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index ef78b046e6..cc3f665c4c 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -107,6 +107,17 @@ pub const Type = extern union {
return @fieldParentPtr(T, "base", self.ptr_otherwise);
}
+ pub fn castPointer(self: Type) ?*Payload.Pointer {
+ return switch (self.tag()) {
+ .single_const_pointer,
+ .single_mut_pointer,
+ .optional_single_const_pointer,
+ .optional_single_mut_pointer,
+ => @fieldParentPtr(Payload.Pointer, "base", self.ptr_otherwise),
+ else => null,
+ };
+ }
+
pub fn eql(a: Type, b: Type) bool {
// As a shortcut, if the small tags / addresses match, we're done.
if (a.tag_if_small_enough == b.tag_if_small_enough)
@@ -126,8 +137,8 @@ pub const Type = extern union {
.Null => return true,
.Pointer => {
// Hot path for common case:
- if (a.cast(Payload.SingleConstPointer)) |a_payload| {
- if (b.cast(Payload.SingleConstPointer)) |b_payload| {
+ if (a.castPointer()) |a_payload| {
+ if (b.castPointer()) |b_payload| {
return eql(a_payload.pointee_type, b_payload.pointee_type);
}
}
@@ -185,7 +196,9 @@ pub const Type = extern union {
return true;
},
.Optional => {
- return a.elemType().eql(b.elemType());
+ var buf_a: Payload.Pointer = undefined;
+ var buf_b: Payload.Pointer = undefined;
+ return a.optionalChild(&buf_a).eql(b.optionalChild(&buf_b));
},
.Float,
.Struct,
@@ -249,7 +262,8 @@ pub const Type = extern union {
}
},
.Optional => {
- std.hash.autoHash(&hasher, self.elemType().hash());
+ var buf: Payload.Pointer = undefined;
+ std.hash.autoHash(&hasher, self.optionalChild(&buf).hash());
},
.Float,
.Struct,
@@ -326,8 +340,6 @@ pub const Type = extern union {
};
return Type{ .ptr_otherwise = &new_payload.base };
},
- .single_const_pointer => return self.copyPayloadSingleField(allocator, Payload.SingleConstPointer, "pointee_type"),
- .single_mut_pointer => return self.copyPayloadSingleField(allocator, Payload.SingleMutPointer, "pointee_type"),
.int_signed => return self.copyPayloadShallow(allocator, Payload.IntSigned),
.int_unsigned => return self.copyPayloadShallow(allocator, Payload.IntUnsigned),
.function => {
@@ -346,8 +358,11 @@ pub const Type = extern union {
return Type{ .ptr_otherwise = &new_payload.base };
},
.optional => return self.copyPayloadSingleField(allocator, Payload.Optional, "child_type"),
- .optional_single_mut_pointer => return self.copyPayloadSingleField(allocator, Payload.OptionalSingleMutPointer, "pointee_type"),
- .optional_single_const_pointer => return self.copyPayloadSingleField(allocator, Payload.OptionalSingleConstPointer, "pointee_type"),
+ .single_const_pointer,
+ .single_mut_pointer,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
+ => return self.copyPayloadSingleField(allocator, Payload.Pointer, "pointee_type"),
}
}
@@ -441,13 +456,13 @@ pub const Type = extern union {
continue;
},
.single_const_pointer => {
- const payload = @fieldParentPtr(Payload.SingleConstPointer, "base", ty.ptr_otherwise);
+ const payload = @fieldParentPtr(Payload.Pointer, "base", ty.ptr_otherwise);
try out_stream.writeAll("*const ");
ty = payload.pointee_type;
continue;
},
.single_mut_pointer => {
- const payload = @fieldParentPtr(Payload.SingleMutPointer, "base", ty.ptr_otherwise);
+ const payload = @fieldParentPtr(Payload.Pointer, "base", ty.ptr_otherwise);
try out_stream.writeAll("*");
ty = payload.pointee_type;
continue;
@@ -467,13 +482,13 @@ pub const Type = extern union {
continue;
},
.optional_single_const_pointer => {
- const payload = @fieldParentPtr(Payload.OptionalSingleConstPointer, "base", ty.ptr_otherwise);
+ const payload = @fieldParentPtr(Payload.Pointer, "base", ty.ptr_otherwise);
try out_stream.writeAll("?*const ");
ty = payload.pointee_type;
continue;
},
.optional_single_mut_pointer => {
- const payload = @fieldParentPtr(Payload.OptionalSingleMutPointer, "base", ty.ptr_otherwise);
+ const payload = @fieldParentPtr(Payload.Pointer, "base", ty.ptr_otherwise);
try out_stream.writeAll("?*");
ty = payload.pointee_type;
continue;
@@ -658,7 +673,8 @@ pub const Type = extern union {
},
.optional => {
- const child_type = self.cast(Payload.Optional).?.child_type;
+ var buf: Payload.Pointer = undefined;
+ const child_type = self.optionalChild(&buf);
if (!child_type.hasCodeGenBits()) return 1;
if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr())
@@ -750,7 +766,8 @@ pub const Type = extern union {
},
.optional => {
- const child_type = self.cast(Payload.Optional).?.child_type;
+ var buf: Payload.Pointer = undefined;
+ const child_type = self.optionalChild(&buf);
if (!child_type.hasCodeGenBits()) return 1;
if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr())
@@ -990,7 +1007,23 @@ pub const Type = extern union {
};
}
- /// Asserts the type is a pointer, optional or array type.
+ /// Asserts that the type is an optional
+ pub fn isPtrLikeOptional(self: Type) bool {
+ switch (self.tag()) {
+ .optional_single_const_pointer, .optional_single_mut_pointer => return true,
+ .optional => {
+ var buf: Payload.Pointer = undefined;
+ const child_type = self.optionalChild(&buf);
+ // optionals of zero sized pointers behave like bools
+ if (!child_type.hasCodeGenBits()) return false;
+
+ return child_type.zigTypeTag() == .Pointer and !child_type.isCPtr();
+ },
+ else => unreachable,
+ }
+ }
+
+ /// Asserts the type is a pointer or array type.
pub fn elemType(self: Type) Type {
return switch (self.tag()) {
.u8,
@@ -1033,16 +1066,38 @@ pub const Type = extern union {
.function,
.int_unsigned,
.int_signed,
+ .optional,
+ .optional_single_const_pointer,
+ .optional_single_mut_pointer,
=> unreachable,
.array => self.cast(Payload.Array).?.elem_type,
- .single_const_pointer => self.cast(Payload.SingleConstPointer).?.pointee_type,
- .single_mut_pointer => self.cast(Payload.SingleMutPointer).?.pointee_type,
+ .single_const_pointer => self.castPointer().?.pointee_type,
+ .single_mut_pointer => self.castPointer().?.pointee_type,
.array_u8_sentinel_0, .const_slice_u8 => Type.initTag(.u8),
.single_const_pointer_to_comptime_int => Type.initTag(.comptime_int),
+ };
+ }
+
+ /// Asserts that the type is an optional.
+ pub fn optionalChild(self: Type, buf: *Payload.Pointer) Type {
+ return switch (self.tag()) {
.optional => self.cast(Payload.Optional).?.child_type,
- .optional_single_mut_pointer => self.cast(Payload.OptionalSingleMutPointer).?.pointee_type,
- .optional_single_const_pointer => self.cast(Payload.OptionalSingleConstPointer).?.pointee_type,
+ .optional_single_mut_pointer => {
+ buf.* = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .pointee_type = self.castPointer().?.pointee_type
+ };
+ return Type.initPayload(&buf.base);
+ },
+ .optional_single_const_pointer => {
+ buf.* = .{
+ .base = .{ .tag = .single_const_pointer },
+ .pointee_type = self.castPointer().?.pointee_type
+ };
+ return Type.initPayload(&buf.base);
+ },
+ else => unreachable,
};
}
@@ -1901,13 +1956,8 @@ pub const Type = extern union {
ty = array.elem_type;
continue;
},
- .single_const_pointer => {
- const ptr = ty.cast(Payload.SingleConstPointer).?;
- ty = ptr.pointee_type;
- continue;
- },
- .single_mut_pointer => {
- const ptr = ty.cast(Payload.SingleMutPointer).?;
+ .single_const_pointer, .single_mut_pointer => {
+ const ptr = ty.castPointer().?;
ty = ptr.pointee_type;
continue;
},
@@ -2049,14 +2099,8 @@ pub const Type = extern union {
len: u64,
};
- pub const SingleConstPointer = struct {
- base: Payload = Payload{ .tag = .single_const_pointer },
-
- pointee_type: Type,
- };
-
- pub const SingleMutPointer = struct {
- base: Payload = Payload{ .tag = .single_mut_pointer },
+ pub const Pointer = struct {
+ base: Payload,
pointee_type: Type,
};
@@ -2086,18 +2130,6 @@ pub const Type = extern union {
child_type: Type,
};
-
- pub const OptionalSingleConstPointer = struct {
- base: Payload = Payload{ .tag = .optional_single_const_pointer },
-
- pointee_type: Type,
- };
-
- pub const OptionalSingleMutPointer = struct {
- base: Payload = Payload{ .tag = .optional_single_mut_pointer },
-
- pointee_type: Type,
- };
};
};
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index 25b3b171b4..695cf0013f 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -2017,6 +2017,7 @@ const EmitZIR = struct {
.load => try self.emitUnOp(inst.src, new_body, inst.castTag(.load).?, .deref),
.ref => try self.emitUnOp(inst.src, new_body, inst.castTag(.ref).?, .ref),
.unwrap_optional => try self.emitUnOp(inst.src, new_body, inst.castTag(.unwrap_optional).?, .unwrap_optional_unsafe),
+ .wrap_optional => try self.emitCast(inst.src, new_body, inst.castTag(.wrap_optional).?, .as),
.add => try self.emitBinOp(inst.src, new_body, inst.castTag(.add).?, .add),
.sub => try self.emitBinOp(inst.src, new_body, inst.castTag(.sub).?, .sub),
@@ -2360,6 +2361,7 @@ const EmitZIR = struct {
}
},
.Optional => {
+ var buf: Type.Payload.Pointer = undefined;
const inst = try self.arena.allocator.create(Inst.UnOp);
inst.* = .{
.base = .{
@@ -2367,7 +2369,7 @@ const EmitZIR = struct {
.tag = .optional_type,
},
.positionals = .{
- .operand = (try self.emitType(src, ty.elemType())).inst,
+ .operand = (try self.emitType(src, ty.optionalChild(&buf))).inst,
},
.kw_args = .{},
};
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index 5047c61687..281bc75b1d 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -317,7 +317,7 @@ fn analyzeInstRetPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.NoOp) InnerErr
fn analyzeInstRef(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst {
const operand = try resolveInst(mod, scope, inst.positionals.operand);
- const ptr_type = try mod.singleConstPtrType(scope, inst.base.src, operand.ty);
+ const ptr_type = try mod.singlePtrType(scope, inst.base.src, false, operand.ty);
if (operand.value()) |val| {
const ref_payload = try scope.arena().create(Value.Payload.RefVal);
@@ -358,7 +358,7 @@ fn analyzeInstEnsureResultNonError(mod: *Module, scope: *Scope, inst: *zir.Inst.
fn analyzeInstAlloc(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst {
const var_type = try resolveType(mod, scope, inst.positionals.operand);
- const ptr_type = try mod.singleMutPtrType(scope, inst.base.src, var_type);
+ const ptr_type = try mod.singlePtrType(scope, inst.base.src, true, var_type);
const b = try mod.requireRuntimeBlock(scope, inst.base.src);
return mod.addNoOp(b, inst.base.src, ptr_type, .alloc);
}
@@ -674,15 +674,17 @@ fn analyzeInstOptionalType(mod: *Module, scope: *Scope, optional: *zir.Inst.UnOp
return mod.constType(scope, optional.base.src, Type.initPayload(switch (child_type.tag()) {
.single_const_pointer => blk: {
- const payload = try scope.arena().create(Type.Payload.OptionalSingleConstPointer);
+ const payload = try scope.arena().create(Type.Payload.Pointer);
payload.* = .{
+ .base = .{ .tag = .optional_single_const_pointer },
.pointee_type = child_type.elemType(),
};
break :blk &payload.base;
},
.single_mut_pointer => blk: {
- const payload = try scope.arena().create(Type.Payload.OptionalSingleMutPointer);
+ const payload = try scope.arena().create(Type.Payload.Pointer);
payload.* = .{
+ .base = .{ .tag = .optional_single_mut_pointer },
.pointee_type = child_type.elemType(),
};
break :blk &payload.base;
@@ -705,11 +707,9 @@ fn analyzeInstUnwrapOptional(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp
return mod.fail(scope, unwrap.base.src, "expected optional type, found {}", .{operand.ty.elemType()});
}
- const child_type = operand.ty.elemType().elemType();
- const child_pointer = if (operand.ty.isConstPtr())
- try mod.singleConstPtrType(scope, unwrap.base.src, child_type)
- else
- try mod.singleMutPtrType(scope, unwrap.base.src, child_type);
+ var buf: Type.Payload.Pointer = undefined;
+ const child_type = try operand.ty.elemType().optionalChild(&buf).copy(scope.arena());
+ const child_pointer = try mod.singlePtrType(scope, unwrap.base.src, operand.ty.isConstPtr(), child_type);
if (operand.value()) |val| {
if (val.isNull()) {
@@ -913,8 +913,11 @@ fn analyzeInstElemPtr(mod: *Module, scope: *Scope, inst: *zir.Inst.ElemPtr) Inne
// required a larger index.
const elem_ptr = try array_ptr_val.elemPtr(scope.arena(), @intCast(usize, index_u64));
- const type_payload = try scope.arena().create(Type.Payload.SingleConstPointer);
- type_payload.* = .{ .pointee_type = array_ptr.ty.elemType().elemType() };
+ const type_payload = try scope.arena().create(Type.Payload.Pointer);
+ type_payload.* = .{
+ .base = .{ .tag = .single_const_pointer },
+ .pointee_type = array_ptr.ty.elemType().elemType(),
+ };
return mod.constInst(scope, inst.base.src, .{
.ty = Type.initPayload(&type_payload.base),
@@ -1279,13 +1282,13 @@ fn analyzeDeclVal(mod: *Module, scope: *Scope, inst: *zir.Inst.DeclVal) InnerErr
fn analyzeInstSingleConstPtrType(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst {
const elem_type = try resolveType(mod, scope, inst.positionals.operand);
- const ty = try mod.singleConstPtrType(scope, inst.base.src, elem_type);
+ const ty = try mod.singlePtrType(scope, inst.base.src, false, elem_type);
return mod.constType(scope, inst.base.src, ty);
}
fn analyzeInstSingleMutPtrType(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp) InnerError!*Inst {
const elem_type = try resolveType(mod, scope, inst.positionals.operand);
- const ty = try mod.singleMutPtrType(scope, inst.base.src, elem_type);
+ const ty = try mod.singlePtrType(scope, inst.base.src, true, elem_type);
return mod.constType(scope, inst.base.src, ty);
}
From db77b6b4e731670188e632657aa0aeadf9c87eb5 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Sat, 15 Aug 2020 19:17:50 +0300
Subject: [PATCH 114/153] stage2: astgen for if and while with optionals
---
src-self-hosted/astgen.zig | 83 +++++++++++++++++++++++++++++---------
1 file changed, 63 insertions(+), 20 deletions(-)
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index fd4c5c2864..3dad37487e 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -594,12 +594,55 @@ fn simpleBinOp(
return rlWrap(mod, scope, rl, result);
}
-fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) InnerError!*zir.Inst {
- if (if_node.payload) |payload| {
- return mod.failNode(scope, payload, "TODO implement astgen.IfExpr for optionals", .{});
+const CondKind = union(enum) {
+ bool,
+ optional: ?*zir.Inst,
+ err_union: ?*zir.Inst,
+
+ fn cond(self: *CondKind, mod: *Module, block_scope: *Scope.GenZIR, src: usize, cond_node: *ast.Node) !*zir.Inst {
+ switch (self.*) {
+ .bool => {
+ const bool_type = try addZIRInstConst(mod, &block_scope.base, src, .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.bool_type),
+ });
+ return try expr(mod, &block_scope.base, .{ .ty = bool_type }, cond_node);
+ },
+ .optional => {
+ const cond_ptr = try expr(mod, &block_scope.base, .lvalue, cond_node);
+ self.* = .{ .optional = cond_ptr };
+ const result = try addZIRUnOp(mod, &block_scope.base, src, .deref, cond_ptr);
+ return try addZIRUnOp(mod, &block_scope.base, src, .isnonnull, result);
+ },
+ .err_union => unreachable,
+ }
}
+
+ fn thenSubScope(self: CondKind, mod: *Module, then_scope: *Scope.GenZIR, payload_node: ?*ast.Node) !*Scope {
+ if (self == .bool) return &then_scope.base;
+
+ const payload = payload_node.?.castTag(.PointerPayload).?;
+ const is_ptr = payload.ptr_token != null;
+ const ident_node = payload.value_symbol.castTag(.Identifier).?;
+ const ident_name = try identifierTokenString(mod, &then_scope.base, ident_node.token);
+ if (mem.eql(u8, ident_name, "_")) {
+ if (is_ptr)
+ return mod.failTok(&then_scope.base, payload.ptr_token.?, "pointer modifier invalid on discard", .{});
+ return &then_scope.base;
+ }
+
+ return mod.failNode(&then_scope.base, payload.value_symbol, "TODO implement payload symbols", .{});
+ }
+};
+
+fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) InnerError!*zir.Inst {
+ var cond_kind: CondKind = .bool;
+ if (if_node.payload) |_| cond_kind = .{ .optional = null };
if (if_node.@"else") |else_node| {
if (else_node.payload) |payload| {
+ if (cond_kind != .optional) {
+ return mod.failNode(scope, payload, "else payload invalid on bool conditions", .{});
+ }
return mod.failNode(scope, payload, "TODO implement astgen.IfExpr for error unions", .{});
}
}
@@ -613,11 +656,7 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
const tree = scope.tree();
const if_src = tree.token_locs[if_node.if_token].start;
- const bool_type = try addZIRInstConst(mod, scope, if_src, .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.bool_type),
- });
- const cond = try expr(mod, &block_scope.base, .{ .ty = bool_type }, if_node.condition);
+ const cond = try cond_kind.cond(mod, &block_scope, if_src, if_node.condition);
const condbr = try addZIRInstSpecial(mod, &block_scope.base, if_src, zir.Inst.CondBr, .{
.condition = cond,
@@ -636,6 +675,9 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
};
defer then_scope.instructions.deinit(mod.gpa);
+ // declare payload to the then_scope
+ const then_sub_scope = try cond_kind.thenSubScope(mod, &then_scope, if_node.payload);
+
// Most result location types can be forwarded directly; however
// if we need to write to a pointer which has an inferred type,
// proper type inference requires peer type resolution on the if's
@@ -645,10 +687,10 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
.inferred_ptr, .bitcasted_ptr, .block_ptr => .{ .block_ptr = block },
};
- const then_result = try expr(mod, &then_scope.base, branch_rl, if_node.body);
+ const then_result = try expr(mod, then_sub_scope, branch_rl, if_node.body);
if (!then_result.tag.isNoReturn()) {
const then_src = tree.token_locs[if_node.body.lastToken()].start;
- _ = try addZIRInst(mod, &then_scope.base, then_src, zir.Inst.Break, .{
+ _ = try addZIRInst(mod, then_sub_scope, then_src, zir.Inst.Break, .{
.block = block,
.operand = then_result,
}, .{});
@@ -690,11 +732,13 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
}
fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.While) InnerError!*zir.Inst {
- if (while_node.payload) |payload| {
- return mod.failNode(scope, payload, "TODO implement astgen.whileExpr for optionals", .{});
- }
+ var cond_kind: CondKind = .bool;
+ if (while_node.payload) |_| cond_kind = .{ .optional = null };
if (while_node.@"else") |else_node| {
if (else_node.payload) |payload| {
+ if (cond_kind != .optional) {
+ return mod.failNode(scope, payload, "else payload invalid on bool conditions", .{});
+ }
return mod.failNode(scope, payload, "TODO implement astgen.whileExpr for error unions", .{});
}
}
@@ -725,15 +769,11 @@ fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.W
const tree = scope.tree();
const while_src = tree.token_locs[while_node.while_token].start;
- const bool_type = try addZIRInstConst(mod, scope, while_src, .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.bool_type),
- });
const void_type = try addZIRInstConst(mod, scope, while_src, .{
.ty = Type.initTag(.type),
.val = Value.initTag(.void_type),
});
- const cond = try expr(mod, &continue_scope.base, .{ .ty = bool_type }, while_node.condition);
+ const cond = try cond_kind.cond(mod, &continue_scope, while_src, while_node.condition);
const condbr = try addZIRInstSpecial(mod, &continue_scope.base, while_src, zir.Inst.CondBr, .{
.condition = cond,
@@ -764,6 +804,9 @@ fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.W
};
defer then_scope.instructions.deinit(mod.gpa);
+ // declare payload to the then_scope
+ const then_sub_scope = try cond_kind.thenSubScope(mod, &then_scope, while_node.payload);
+
// Most result location types can be forwarded directly; however
// if we need to write to a pointer which has an inferred type,
// proper type inference requires peer type resolution on the while's
@@ -773,10 +816,10 @@ fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.W
.inferred_ptr, .bitcasted_ptr, .block_ptr => .{ .block_ptr = while_block },
};
- const then_result = try expr(mod, &then_scope.base, branch_rl, while_node.body);
+ const then_result = try expr(mod, then_sub_scope, branch_rl, while_node.body);
if (!then_result.tag.isNoReturn()) {
const then_src = tree.token_locs[while_node.body.lastToken()].start;
- _ = try addZIRInst(mod, &then_scope.base, then_src, zir.Inst.Break, .{
+ _ = try addZIRInst(mod, then_sub_scope, then_src, zir.Inst.Break, .{
.block = cond_block,
.operand = then_result,
}, .{});
From ece4a2fc512bffba3845bf611370f5ea436c57b4 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Sat, 15 Aug 2020 19:39:11 +0300
Subject: [PATCH 115/153] stage2: astgen for if and while with error unions
---
src-self-hosted/astgen.zig | 56 +++++++++++++++++++++++++++---------
src-self-hosted/codegen.zig | 7 +++++
src-self-hosted/ir.zig | 2 ++
src-self-hosted/zir.zig | 13 +++++++++
src-self-hosted/zir_sema.zig | 11 +++++++
5 files changed, 75 insertions(+), 14 deletions(-)
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 3dad37487e..5335504ed2 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -614,11 +614,16 @@ const CondKind = union(enum) {
const result = try addZIRUnOp(mod, &block_scope.base, src, .deref, cond_ptr);
return try addZIRUnOp(mod, &block_scope.base, src, .isnonnull, result);
},
- .err_union => unreachable,
+ .err_union => {
+ const err_ptr = try expr(mod, &block_scope.base, .lvalue, cond_node);
+ self.* = .{ .err_union = err_ptr };
+ const result = try addZIRUnOp(mod, &block_scope.base, src, .deref, err_ptr);
+ return try addZIRUnOp(mod, &block_scope.base, src, .iserr, result);
+ },
}
}
- fn thenSubScope(self: CondKind, mod: *Module, then_scope: *Scope.GenZIR, payload_node: ?*ast.Node) !*Scope {
+ fn thenSubScope(self: CondKind, mod: *Module, then_scope: *Scope.GenZIR, src: usize, payload_node: ?*ast.Node) !*Scope {
if (self == .bool) return &then_scope.base;
const payload = payload_node.?.castTag(.PointerPayload).?;
@@ -633,6 +638,21 @@ const CondKind = union(enum) {
return mod.failNode(&then_scope.base, payload.value_symbol, "TODO implement payload symbols", .{});
}
+
+ fn elseSubScope(self: CondKind, mod: *Module, else_scope: *Scope.GenZIR, src: usize, payload_node: ?*ast.Node) !*Scope {
+ if (self != .err_union) return &else_scope.base;
+
+ const payload_ptr = try addZIRUnOp(mod, &else_scope.base, src, .unwrap_err_unsafe, self.err_union.?);
+
+ const payload = payload_node.?.castTag(.Payload).?;
+ const ident_node = payload.error_symbol.castTag(.Identifier).?;
+ const ident_name = try identifierTokenString(mod, &else_scope.base, ident_node.token);
+ if (mem.eql(u8, ident_name, "_")) {
+ return &else_scope.base;
+ }
+
+ return mod.failNode(&else_scope.base, payload.error_symbol, "TODO implement payload symbols", .{});
+ }
};
fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) InnerError!*zir.Inst {
@@ -643,7 +663,7 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
if (cond_kind != .optional) {
return mod.failNode(scope, payload, "else payload invalid on bool conditions", .{});
}
- return mod.failNode(scope, payload, "TODO implement astgen.IfExpr for error unions", .{});
+ cond_kind = .{ .err_union = null };
}
}
var block_scope: Scope.GenZIR = .{
@@ -667,6 +687,8 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
const block = try addZIRInstBlock(mod, scope, if_src, .{
.instructions = try block_scope.arena.dupe(*zir.Inst, block_scope.instructions.items),
});
+
+ const then_src = tree.token_locs[if_node.body.lastToken()].start;
var then_scope: Scope.GenZIR = .{
.parent = scope,
.decl = block_scope.decl,
@@ -676,7 +698,7 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
defer then_scope.instructions.deinit(mod.gpa);
// declare payload to the then_scope
- const then_sub_scope = try cond_kind.thenSubScope(mod, &then_scope, if_node.payload);
+ const then_sub_scope = try cond_kind.thenSubScope(mod, &then_scope, then_src, if_node.payload);
// Most result location types can be forwarded directly; however
// if we need to write to a pointer which has an inferred type,
@@ -689,7 +711,6 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
const then_result = try expr(mod, then_sub_scope, branch_rl, if_node.body);
if (!then_result.tag.isNoReturn()) {
- const then_src = tree.token_locs[if_node.body.lastToken()].start;
_ = try addZIRInst(mod, then_sub_scope, then_src, zir.Inst.Break, .{
.block = block,
.operand = then_result,
@@ -708,10 +729,13 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
defer else_scope.instructions.deinit(mod.gpa);
if (if_node.@"else") |else_node| {
- const else_result = try expr(mod, &else_scope.base, branch_rl, else_node.body);
+ const else_src = tree.token_locs[else_node.body.lastToken()].start;
+ // declare payload to the then_scope
+ const else_sub_scope = try cond_kind.elseSubScope(mod, &else_scope, else_src, else_node.payload);
+
+ const else_result = try expr(mod, else_sub_scope, branch_rl, else_node.body);
if (!else_result.tag.isNoReturn()) {
- const else_src = tree.token_locs[else_node.body.lastToken()].start;
- _ = try addZIRInst(mod, &else_scope.base, else_src, zir.Inst.Break, .{
+ _ = try addZIRInst(mod, else_sub_scope, else_src, zir.Inst.Break, .{
.block = block,
.operand = else_result,
}, .{});
@@ -739,7 +763,7 @@ fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.W
if (cond_kind != .optional) {
return mod.failNode(scope, payload, "else payload invalid on bool conditions", .{});
}
- return mod.failNode(scope, payload, "TODO implement astgen.whileExpr for error unions", .{});
+ cond_kind = .{ .err_union = null };
}
}
@@ -796,6 +820,8 @@ fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.W
const while_block = try addZIRInstBlock(mod, scope, while_src, .{
.instructions = try expr_scope.arena.dupe(*zir.Inst, expr_scope.instructions.items),
});
+
+ const then_src = tree.token_locs[while_node.body.lastToken()].start;
var then_scope: Scope.GenZIR = .{
.parent = &continue_scope.base,
.decl = continue_scope.decl,
@@ -805,7 +831,7 @@ fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.W
defer then_scope.instructions.deinit(mod.gpa);
// declare payload to the then_scope
- const then_sub_scope = try cond_kind.thenSubScope(mod, &then_scope, while_node.payload);
+ const then_sub_scope = try cond_kind.thenSubScope(mod, &then_scope, then_src, while_node.payload);
// Most result location types can be forwarded directly; however
// if we need to write to a pointer which has an inferred type,
@@ -818,7 +844,6 @@ fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.W
const then_result = try expr(mod, then_sub_scope, branch_rl, while_node.body);
if (!then_result.tag.isNoReturn()) {
- const then_src = tree.token_locs[while_node.body.lastToken()].start;
_ = try addZIRInst(mod, then_sub_scope, then_src, zir.Inst.Break, .{
.block = cond_block,
.operand = then_result,
@@ -837,10 +862,13 @@ fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.W
defer else_scope.instructions.deinit(mod.gpa);
if (while_node.@"else") |else_node| {
- const else_result = try expr(mod, &else_scope.base, branch_rl, else_node.body);
+ const else_src = tree.token_locs[else_node.body.lastToken()].start;
+ // declare payload to the then_scope
+ const else_sub_scope = try cond_kind.elseSubScope(mod, &else_scope, else_src, else_node.payload);
+
+ const else_result = try expr(mod, else_sub_scope, branch_rl, else_node.body);
if (!else_result.tag.isNoReturn()) {
- const else_src = tree.token_locs[else_node.body.lastToken()].start;
- _ = try addZIRInst(mod, &else_scope.base, else_src, zir.Inst.Break, .{
+ _ = try addZIRInst(mod, else_sub_scope, else_src, zir.Inst.Break, .{
.block = while_block,
.operand = else_result,
}, .{});
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index 58b7c97f7b..d37145a275 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -671,6 +671,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.intcast => return self.genIntCast(inst.castTag(.intcast).?),
.isnonnull => return self.genIsNonNull(inst.castTag(.isnonnull).?),
.isnull => return self.genIsNull(inst.castTag(.isnull).?),
+ .iserr => return self.genIsErr(inst.castTag(.iserr).?),
.load => return self.genLoad(inst.castTag(.load).?),
.loop => return self.genLoop(inst.castTag(.loop).?),
.not => return self.genNot(inst.castTag(.not).?),
@@ -1391,6 +1392,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
}
+ fn genIsErr(self: *Self, inst: *ir.Inst.UnOp) !MCValue {
+ switch (arch) {
+ else => return self.fail(inst.base.src, "TODO implement iserr for {}", .{self.target.cpu.arch}),
+ }
+ }
+
fn genLoop(self: *Self, inst: *ir.Inst.Loop) !MCValue {
// A loop is a setup to be able to jump back to the beginning.
const start_index = self.code.items.len;
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 4f83fa7030..91dfad45d7 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -68,6 +68,7 @@ pub const Inst = struct {
dbg_stmt,
isnonnull,
isnull,
+ iserr,
/// Read a value from a pointer.
load,
loop,
@@ -100,6 +101,7 @@ pub const Inst = struct {
.not,
.isnonnull,
.isnull,
+ .iserr,
.ptrtoint,
.floatcast,
.intcast,
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index 695cf0013f..276db6d522 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -151,6 +151,8 @@ pub const Inst = struct {
isnonnull,
/// Return a boolean true if an optional is null. `x == null`
isnull,
+ /// Return a boolean true if value is an error
+ iserr,
/// A labeled block of code that loops forever. At the end of the body it is implied
/// to repeat; no explicit "repeat" instruction terminates loop bodies.
loop,
@@ -219,6 +221,10 @@ pub const Inst = struct {
unwrap_optional_safe,
/// Same as previous, but without safety checks. Used for orelse, if and while
unwrap_optional_unsafe,
+ /// Gets the payload of an error union
+ unwrap_err_safe,
+ /// Same as previous, but without safety checks. Used for orelse, if and while
+ unwrap_err_unsafe,
pub fn Type(tag: Tag) type {
return switch (tag) {
@@ -237,6 +243,7 @@ pub const Inst = struct {
.@"return",
.isnull,
.isnonnull,
+ .iserr,
.ptrtoint,
.alloc,
.ensure_result_used,
@@ -250,6 +257,8 @@ pub const Inst = struct {
.optional_type,
.unwrap_optional_safe,
.unwrap_optional_unsafe,
+ .unwrap_err_safe,
+ .unwrap_err_unsafe,
=> UnOp,
.add,
@@ -363,6 +372,7 @@ pub const Inst = struct {
.inttype,
.isnonnull,
.isnull,
+ .iserr,
.mod_rem,
.mul,
.mulwrap,
@@ -385,6 +395,8 @@ pub const Inst = struct {
.optional_type,
.unwrap_optional_safe,
.unwrap_optional_unsafe,
+ .unwrap_err_safe,
+ .unwrap_err_unsafe,
.ptr_type,
=> false,
@@ -2014,6 +2026,7 @@ const EmitZIR = struct {
.ptrtoint => try self.emitUnOp(inst.src, new_body, inst.castTag(.ptrtoint).?, .ptrtoint),
.isnull => try self.emitUnOp(inst.src, new_body, inst.castTag(.isnull).?, .isnull),
.isnonnull => try self.emitUnOp(inst.src, new_body, inst.castTag(.isnonnull).?, .isnonnull),
+ .iserr => try self.emitUnOp(inst.src, new_body, inst.castTag(.iserr).?, .iserr),
.load => try self.emitUnOp(inst.src, new_body, inst.castTag(.load).?, .deref),
.ref => try self.emitUnOp(inst.src, new_body, inst.castTag(.ref).?, .ref),
.unwrap_optional => try self.emitUnOp(inst.src, new_body, inst.castTag(.unwrap_optional).?, .unwrap_optional_unsafe),
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index 281bc75b1d..362dbe7909 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -104,11 +104,14 @@ pub fn analyzeInst(mod: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!
.condbr => return analyzeInstCondBr(mod, scope, old_inst.castTag(.condbr).?),
.isnull => return analyzeInstIsNonNull(mod, scope, old_inst.castTag(.isnull).?, true),
.isnonnull => return analyzeInstIsNonNull(mod, scope, old_inst.castTag(.isnonnull).?, false),
+ .iserr => return analyzeInstIsErr(mod, scope, old_inst.castTag(.iserr).?, true),
.boolnot => return analyzeInstBoolNot(mod, scope, old_inst.castTag(.boolnot).?),
.typeof => return analyzeInstTypeOf(mod, scope, old_inst.castTag(.typeof).?),
.optional_type => return analyzeInstOptionalType(mod, scope, old_inst.castTag(.optional_type).?),
.unwrap_optional_safe => return analyzeInstUnwrapOptional(mod, scope, old_inst.castTag(.unwrap_optional_safe).?, true),
.unwrap_optional_unsafe => return analyzeInstUnwrapOptional(mod, scope, old_inst.castTag(.unwrap_optional_unsafe).?, false),
+ .unwrap_err_safe => return analyzeInstUnwrapErr(mod, scope, old_inst.castTag(.unwrap_err_safe).?, true),
+ .unwrap_err_unsafe => return analyzeInstUnwrapErr(mod, scope, old_inst.castTag(.unwrap_err_unsafe).?, false),
}
}
@@ -729,6 +732,10 @@ fn analyzeInstUnwrapOptional(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp
return mod.addUnOp(b, unwrap.base.src, child_pointer, .unwrap_optional, operand);
}
+fn analyzeInstUnwrapErr(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp, safety_check: bool) InnerError!*Inst {
+ return mod.fail(scope, unwrap.base.src, "TODO implement analyzeInstUnwrapErr", .{});
+}
+
fn analyzeInstFnType(mod: *Module, scope: *Scope, fntype: *zir.Inst.FnType) InnerError!*Inst {
const return_type = try resolveType(mod, scope, fntype.positionals.return_type);
@@ -1169,6 +1176,10 @@ fn analyzeInstIsNonNull(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, inver
return mod.analyzeIsNull(scope, inst.base.src, operand, invert_logic);
}
+fn analyzeInstIsErr(mod: *Module, scope: *Scope, inst: *zir.Inst.UnOp, invert_logic: bool) InnerError!*Inst {
+ return mod.fail(scope, inst.base.src, "TODO implement analyzeInstIsErr", .{});
+}
+
fn analyzeInstCondBr(mod: *Module, scope: *Scope, inst: *zir.Inst.CondBr) InnerError!*Inst {
const uncasted_cond = try resolveInst(mod, scope, inst.positionals.condition);
const cond = try mod.coerce(scope, Type.initTag(.bool), uncasted_cond);
From e23fc3905f65d65a743cb60783830b85199ed83b Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Sun, 16 Aug 2020 18:11:10 +0200
Subject: [PATCH 116/153] Add skeleton for MachO support in stage2
This commit adds an empty skeleton for MachO format support in stage2.
---
src-self-hosted/Module.zig | 5 ++
src-self-hosted/link.zig | 114 ++++++++++++++++++++++++++++++++++++-
2 files changed, 117 insertions(+), 2 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 6272ef6d98..b23d446925 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -1568,6 +1568,9 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
// in `Decl` to notice that the line number did not change.
self.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
},
+ .macho => {
+ // TODO Implement for MachO
+ },
.c => {},
}
}
@@ -1776,10 +1779,12 @@ fn allocateNewDecl(
.contents_hash = contents_hash,
.link = switch (self.bin_file.tag) {
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
+ .macho => .{ .macho = link.File.MachO.TextBlock.empty },
.c => .{ .c = {} },
},
.fn_link = switch (self.bin_file.tag) {
.elf => .{ .elf = link.File.Elf.SrcFn.empty },
+ .macho => .{ .macho = link.File.MachO.SrcFn.empty },
.c => .{ .c = {} },
},
.generation = 0,
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 76d889c581..fcdf556065 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -44,11 +44,13 @@ pub const Options = struct {
pub const File = struct {
pub const LinkBlock = union {
elf: Elf.TextBlock,
+ macho: MachO.TextBlock,
c: void,
};
pub const LinkFn = union {
elf: Elf.SrcFn,
+ macho: MachO.SrcFn,
c: void,
};
@@ -66,7 +68,7 @@ pub const File = struct {
.unknown => unreachable,
.coff => return error.TODOImplementCoff,
.elf => return Elf.openPath(allocator, dir, sub_path, options),
- .macho => return error.TODOImplementMacho,
+ .macho => return MachO.openPath(allocator, dir, sub_path, options),
.wasm => return error.TODOImplementWasm,
.c => return C.openPath(allocator, dir, sub_path, options),
.hex => return error.TODOImplementHex,
@@ -83,7 +85,7 @@ pub const File = struct {
pub fn makeWritable(base: *File, dir: fs.Dir, sub_path: []const u8) !void {
switch (base.tag) {
- .elf => {
+ .elf, .macho => {
if (base.file != null) return;
base.file = try dir.createFile(sub_path, .{
.truncate = false,
@@ -106,6 +108,7 @@ pub const File = struct {
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
+ .macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl),
.c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl),
}
}
@@ -113,6 +116,7 @@ pub const File = struct {
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
+ .macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
.c => {},
}
}
@@ -120,6 +124,7 @@ pub const File = struct {
pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void {
switch (base.tag) {
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
+ .macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl),
.c => {},
}
}
@@ -128,6 +133,7 @@ pub const File = struct {
if (base.file) |f| f.close();
switch (base.tag) {
.elf => @fieldParentPtr(Elf, "base", base).deinit(),
+ .macho => @fieldParentPtr(MachO, "base", base).deinit(),
.c => @fieldParentPtr(C, "base", base).deinit(),
}
}
@@ -139,6 +145,11 @@ pub const File = struct {
parent.deinit();
base.allocator.destroy(parent);
},
+ .macho => {
+ const parent = @fieldParentPtr(MachO, "base", base);
+ parent.deinit();
+ base.allocator.destroy(parent);
+ },
.c => {
const parent = @fieldParentPtr(C, "base", base);
parent.deinit();
@@ -153,6 +164,7 @@ pub const File = struct {
try switch (base.tag) {
.elf => @fieldParentPtr(Elf, "base", base).flush(),
+ .macho => @fieldParentPtr(MachO, "base", base).flush(),
.c => @fieldParentPtr(C, "base", base).flush(),
};
}
@@ -160,6 +172,7 @@ pub const File = struct {
pub fn freeDecl(base: *File, decl: *Module.Decl) void {
switch (base.tag) {
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
+ .macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl),
.c => unreachable,
}
}
@@ -167,6 +180,7 @@ pub const File = struct {
pub fn errorFlags(base: *File) ErrorFlags {
return switch (base.tag) {
.elf => @fieldParentPtr(Elf, "base", base).error_flags,
+ .macho => @fieldParentPtr(MachO, "base", base).error_flags,
.c => return .{ .no_entry_point_found = false },
};
}
@@ -180,12 +194,14 @@ pub const File = struct {
) !void {
switch (base.tag) {
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports),
+ .macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports),
.c => return {},
}
}
pub const Tag = enum {
elf,
+ macho,
c,
};
@@ -2815,6 +2831,100 @@ pub const File = struct {
}
};
+
+ pub const MachO = struct {
+ pub const base_tag: Tag = .macho;
+
+ base: File,
+
+ ptr_width: enum { p32, p64 },
+
+ error_flags: ErrorFlags = ErrorFlags{},
+
+ pub const TextBlock = struct {
+ pub const empty = TextBlock{};
+ };
+
+ pub const SrcFn = struct {
+ pub const empty = SrcFn{};
+ };
+
+ pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: Options) !*File {
+ assert(options.object_format == .macho);
+
+ const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = determineMode(options) });
+ errdefer file.close();
+
+ var macho_file = try allocator.create(MachO);
+ errdefer allocator.destroy(macho_file);
+
+ macho_file.* = openFile(allocator, file, options) catch |err| switch (err) {
+ error.IncrFailed => try createFile(allocator, file, options),
+ else => |e| return e,
+ };
+
+ return &macho_file.base;
+ }
+
+ /// Returns error.IncrFailed if incremental update could not be performed.
+ fn openFile(allocator: *Allocator, file: fs.File, options: Options) !MachO {
+ switch (options.output_mode) {
+ .Exe => {},
+ .Obj => {},
+ .Lib => return error.IncrFailed,
+ }
+ var self: MachO = .{
+ .base = .{
+ .file = file,
+ .tag = .macho,
+ .options = options,
+ .allocator = allocator,
+ },
+ .ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
+ 32 => .p32,
+ 64 => .p64,
+ else => return error.UnsupportedELFArchitecture,
+ },
+ };
+ errdefer self.deinit();
+
+ // TODO implement reading the macho file
+ return error.IncrFailed;
+ //try self.populateMissingMetadata();
+ //return self;
+ }
+
+ /// Truncates the existing file contents and overwrites the contents.
+ /// Returns an error if `file` is not already open with +read +write +seek abilities.
+ fn createFile(allocator: *Allocator, file: fs.File, options: Options) !MachO {
+ switch (options.output_mode) {
+ .Exe => return error.TODOImplementWritingMachOExeFiles,
+ .Obj => return error.TODOImplementWritingMachOObjFiles,
+ .Lib => return error.TODOImplementWritingLibFiles,
+ }
+ }
+
+ /// Commit pending changes and write headers.
+ pub fn flush(self: *MachO) !void {}
+
+ pub fn deinit(self: *MachO) void {}
+
+ pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {}
+
+ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {}
+
+ pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {}
+
+ /// Must be called only after a successful call to `updateDecl`.
+ pub fn updateDeclExports(
+ self: *MachO,
+ module: *Module,
+ decl: *const Module.Decl,
+ exports: []const *Module.Export,
+ ) !void {}
+
+ pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {}
+ };
};
/// Saturating multiplication
From 5cb96681d92e7a410577215ff057e459f10304dc Mon Sep 17 00:00:00 2001
From: Jakub Konka
Date: Mon, 17 Aug 2020 10:19:45 +0200
Subject: [PATCH 117/153] Move Mach-O to link/MachO.zig submodule
Remove `ptrWidth` since as of Catalina, all apps are 64bits only.
Signed-off-by: Jakub Konka
---
src-self-hosted/link.zig | 99 ++--------------------------------
src-self-hosted/link/MachO.zig | 93 ++++++++++++++++++++++++++++++++
2 files changed, 96 insertions(+), 96 deletions(-)
create mode 100644 src-self-hosted/link/MachO.zig
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index fcdf556065..17ec91fa5f 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -158,6 +158,7 @@ pub const File = struct {
}
}
+ /// Commit pending changes and write headers.
pub fn flush(base: *File) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1022,7 +1023,6 @@ pub const File = struct {
pub const abbrev_pad1 = 5;
pub const abbrev_parameter = 6;
- /// Commit pending changes and write headers.
pub fn flush(self: *Elf) !void {
const target_endian = self.base.options.target.cpu.arch.endian();
const foreign_endian = target_endian != std.Target.current.cpu.arch.endian();
@@ -2330,7 +2330,6 @@ pub const File = struct {
try self.pwriteDbgInfoNops(prev_padding_size, dbg_info_buf, next_padding_size, trailing_zero, file_pos);
}
- /// Must be called only after a successful call to `updateDecl`.
pub fn updateDeclExports(
self: *Elf,
module: *Module,
@@ -2832,99 +2831,7 @@ pub const File = struct {
};
- pub const MachO = struct {
- pub const base_tag: Tag = .macho;
-
- base: File,
-
- ptr_width: enum { p32, p64 },
-
- error_flags: ErrorFlags = ErrorFlags{},
-
- pub const TextBlock = struct {
- pub const empty = TextBlock{};
- };
-
- pub const SrcFn = struct {
- pub const empty = SrcFn{};
- };
-
- pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: Options) !*File {
- assert(options.object_format == .macho);
-
- const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = determineMode(options) });
- errdefer file.close();
-
- var macho_file = try allocator.create(MachO);
- errdefer allocator.destroy(macho_file);
-
- macho_file.* = openFile(allocator, file, options) catch |err| switch (err) {
- error.IncrFailed => try createFile(allocator, file, options),
- else => |e| return e,
- };
-
- return &macho_file.base;
- }
-
- /// Returns error.IncrFailed if incremental update could not be performed.
- fn openFile(allocator: *Allocator, file: fs.File, options: Options) !MachO {
- switch (options.output_mode) {
- .Exe => {},
- .Obj => {},
- .Lib => return error.IncrFailed,
- }
- var self: MachO = .{
- .base = .{
- .file = file,
- .tag = .macho,
- .options = options,
- .allocator = allocator,
- },
- .ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
- 32 => .p32,
- 64 => .p64,
- else => return error.UnsupportedELFArchitecture,
- },
- };
- errdefer self.deinit();
-
- // TODO implement reading the macho file
- return error.IncrFailed;
- //try self.populateMissingMetadata();
- //return self;
- }
-
- /// Truncates the existing file contents and overwrites the contents.
- /// Returns an error if `file` is not already open with +read +write +seek abilities.
- fn createFile(allocator: *Allocator, file: fs.File, options: Options) !MachO {
- switch (options.output_mode) {
- .Exe => return error.TODOImplementWritingMachOExeFiles,
- .Obj => return error.TODOImplementWritingMachOObjFiles,
- .Lib => return error.TODOImplementWritingLibFiles,
- }
- }
-
- /// Commit pending changes and write headers.
- pub fn flush(self: *MachO) !void {}
-
- pub fn deinit(self: *MachO) void {}
-
- pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {}
-
- pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {}
-
- pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {}
-
- /// Must be called only after a successful call to `updateDecl`.
- pub fn updateDeclExports(
- self: *MachO,
- module: *Module,
- decl: *const Module.Decl,
- exports: []const *Module.Export,
- ) !void {}
-
- pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {}
- };
+ pub const MachO = @import("link/MachO.zig");
};
/// Saturating multiplication
@@ -2965,7 +2872,7 @@ fn sectHeaderTo32(shdr: elf.Elf64_Shdr) elf.Elf32_Shdr {
};
}
-fn determineMode(options: Options) fs.File.Mode {
+pub fn determineMode(options: Options) fs.File.Mode {
// On common systems with a 0o022 umask, 0o777 will still result in a file created
// with 0o755 permissions, but it works appropriately if the system is configured
// more leniently. As another data point, C's fopen seems to open files with the
diff --git a/src-self-hosted/link/MachO.zig b/src-self-hosted/link/MachO.zig
new file mode 100644
index 0000000000..1b6c395ea0
--- /dev/null
+++ b/src-self-hosted/link/MachO.zig
@@ -0,0 +1,93 @@
+const MachO = @This();
+
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const fs = std.fs;
+
+const Module = @import("../Module.zig");
+const link = @import("../link.zig");
+const File = link.File;
+
+pub const base_tag: Tag = File.Tag.macho;
+
+base: File,
+
+error_flags: File.ErrorFlags = File.ErrorFlags{},
+
+pub const TextBlock = struct {
+ pub const empty = TextBlock{};
+};
+
+pub const SrcFn = struct {
+ pub const empty = SrcFn{};
+};
+
+pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*File {
+ assert(options.object_format == .macho);
+
+ const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) });
+ errdefer file.close();
+
+ var macho_file = try allocator.create(MachO);
+ errdefer allocator.destroy(macho_file);
+
+ macho_file.* = openFile(allocator, file, options) catch |err| switch (err) {
+ error.IncrFailed => try createFile(allocator, file, options),
+ else => |e| return e,
+ };
+
+ return &macho_file.base;
+}
+
+/// Returns error.IncrFailed if incremental update could not be performed.
+fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !MachO {
+ switch (options.output_mode) {
+ .Exe => {},
+ .Obj => {},
+ .Lib => return error.IncrFailed,
+ }
+ var self: MachO = .{
+ .base = .{
+ .file = file,
+ .tag = .macho,
+ .options = options,
+ .allocator = allocator,
+ },
+ };
+ errdefer self.deinit();
+
+ // TODO implement reading the macho file
+ return error.IncrFailed;
+ //try self.populateMissingMetadata();
+ //return self;
+}
+
+/// Truncates the existing file contents and overwrites the contents.
+/// Returns an error if `file` is not already open with +read +write +seek abilities.
+fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !MachO {
+ switch (options.output_mode) {
+ .Exe => return error.TODOImplementWritingMachOExeFiles,
+ .Obj => return error.TODOImplementWritingMachOObjFiles,
+ .Lib => return error.TODOImplementWritingLibFiles,
+ }
+}
+
+pub fn flush(self: *MachO) !void {}
+
+pub fn deinit(self: *MachO) void {}
+
+pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {}
+
+pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {}
+
+pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {}
+
+pub fn updateDeclExports(
+ self: *MachO,
+ module: *Module,
+ decl: *const Module.Decl,
+ exports: []const *Module.Export,
+) !void {}
+
+pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {}
From 13b2f1e90ba9e373c655fc881836209c4fa381fa Mon Sep 17 00:00:00 2001
From: Vexu
Date: Sun, 16 Aug 2020 21:51:05 +0300
Subject: [PATCH 118/153] address review feedback
---
src-self-hosted/Module.zig | 1 -
src-self-hosted/astgen.zig | 25 ++++++++++++-------------
src-self-hosted/codegen.zig | 4 ++--
src-self-hosted/type.zig | 28 +++++++++++++++++++++++++---
src-self-hosted/zir_sema.zig | 3 +--
5 files changed, 40 insertions(+), 21 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 2584066499..8eabe97c31 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -2433,7 +2433,6 @@ fn wrapOptional(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*In
return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
}
- // TODO how do we get the result location
const b = try self.requireRuntimeBlock(scope, inst.src);
return self.addUnOp(b, inst.src, dest_type, .wrap_optional, inst);
}
diff --git a/src-self-hosted/astgen.zig b/src-self-hosted/astgen.zig
index 5335504ed2..2c772f3887 100644
--- a/src-self-hosted/astgen.zig
+++ b/src-self-hosted/astgen.zig
@@ -453,8 +453,6 @@ fn boolNot(mod: *Module, scope: *Scope, node: *ast.Node.SimplePrefixOp) InnerErr
}
fn addressOf(mod: *Module, scope: *Scope, node: *ast.Node.SimplePrefixOp) InnerError!*zir.Inst {
- const tree = scope.tree();
- const src = tree.token_locs[node.op_token].start;
return expr(mod, scope, .lvalue, node.rhs);
}
@@ -490,8 +488,6 @@ fn ptrType(mod: *Module, scope: *Scope, node: *ast.Node.PtrType) InnerError!*zir
.single_const_ptr_type, child_type);
}
- const child_type = try expr(mod, scope, .{ .ty = meta_type }, node.rhs);
-
var kw_args: std.meta.fieldInfo(zir.Inst.PtrType, "kw_args").field_type = .{};
kw_args.@"allowzero" = node.ptr_info.allowzero_token != null;
if (node.ptr_info.align_info) |some| {
@@ -504,7 +500,12 @@ fn ptrType(mod: *Module, scope: *Scope, node: *ast.Node.PtrType) InnerError!*zir
kw_args.@"const" = node.ptr_info.const_token != null;
kw_args.@"volatile" = node.ptr_info.volatile_token != null;
if (node.ptr_info.sentinel) |some| {
- kw_args.sentinel = try expr(mod, scope, .{ .ty = child_type }, some);
+ kw_args.sentinel = try expr(mod, scope, .none, some);
+ }
+
+ const child_type = try expr(mod, scope, .{ .ty = meta_type }, node.rhs);
+ if (kw_args.sentinel) |some| {
+ kw_args.sentinel = try addZIRBinOp(mod, scope, some.src, .as, child_type, some);
}
return addZIRInst(mod, scope, src, zir.Inst.PtrType, .{ .child_type = child_type }, kw_args);
@@ -629,7 +630,9 @@ const CondKind = union(enum) {
const payload = payload_node.?.castTag(.PointerPayload).?;
const is_ptr = payload.ptr_token != null;
const ident_node = payload.value_symbol.castTag(.Identifier).?;
- const ident_name = try identifierTokenString(mod, &then_scope.base, ident_node.token);
+
+ // This intentionally does not support @"_" syntax.
+ const ident_name = then_scope.base.tree().tokenSlice(ident_node.token);
if (mem.eql(u8, ident_name, "_")) {
if (is_ptr)
return mod.failTok(&then_scope.base, payload.ptr_token.?, "pointer modifier invalid on discard", .{});
@@ -646,7 +649,9 @@ const CondKind = union(enum) {
const payload = payload_node.?.castTag(.Payload).?;
const ident_node = payload.error_symbol.castTag(.Identifier).?;
- const ident_name = try identifierTokenString(mod, &else_scope.base, ident_node.token);
+
+ // This intentionally does not support @"_" syntax.
+ const ident_name = else_scope.base.tree().tokenSlice(ident_node.token);
if (mem.eql(u8, ident_name, "_")) {
return &else_scope.base;
}
@@ -660,9 +665,6 @@ fn ifExpr(mod: *Module, scope: *Scope, rl: ResultLoc, if_node: *ast.Node.If) Inn
if (if_node.payload) |_| cond_kind = .{ .optional = null };
if (if_node.@"else") |else_node| {
if (else_node.payload) |payload| {
- if (cond_kind != .optional) {
- return mod.failNode(scope, payload, "else payload invalid on bool conditions", .{});
- }
cond_kind = .{ .err_union = null };
}
}
@@ -760,9 +762,6 @@ fn whileExpr(mod: *Module, scope: *Scope, rl: ResultLoc, while_node: *ast.Node.W
if (while_node.payload) |_| cond_kind = .{ .optional = null };
if (while_node.@"else") |else_node| {
if (else_node.payload) |payload| {
- if (cond_kind != .optional) {
- return mod.failNode(scope, payload, "else payload invalid on bool conditions", .{});
- }
cond_kind = .{ .err_union = null };
}
}
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index d37145a275..f49d3b41fd 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -2052,7 +2052,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return mcv;
}
- fn genTypedValue(self: *Self, src: usize, typed_value: TypedValue) error{ CodegenFail, OutOfMemory }!MCValue {
+ fn genTypedValue(self: *Self, src: usize, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
@@ -2199,7 +2199,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
};
}
- fn fail(self: *Self, src: usize, comptime format: []const u8, args: anytype) error{ CodegenFail, OutOfMemory } {
+ fn fail(self: *Self, src: usize, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
assert(self.err_msg == null);
self.err_msg = try ErrorMsg.create(self.bin_file.base.allocator, src, format, args);
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index cc3f665c4c..faba784f90 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -362,7 +362,7 @@ pub const Type = extern union {
.single_mut_pointer,
.optional_single_mut_pointer,
.optional_single_const_pointer,
- => return self.copyPayloadSingleField(allocator, Payload.Pointer, "pointee_type"),
+ => return self.copyPayloadSingleField(allocator, Payload.Pointer, "pointee_type"),
}
}
@@ -1086,14 +1086,14 @@ pub const Type = extern union {
.optional_single_mut_pointer => {
buf.* = .{
.base = .{ .tag = .single_mut_pointer },
- .pointee_type = self.castPointer().?.pointee_type
+ .pointee_type = self.castPointer().?.pointee_type,
};
return Type.initPayload(&buf.base);
},
.optional_single_const_pointer => {
buf.* = .{
.base = .{ .tag = .single_const_pointer },
- .pointee_type = self.castPointer().?.pointee_type
+ .pointee_type = self.castPointer().?.pointee_type,
};
return Type.initPayload(&buf.base);
},
@@ -1101,6 +1101,28 @@ pub const Type = extern union {
};
}
+ /// Asserts that the type is an optional.
+ /// Same as `optionalChild` but allocates the buffer if needed.
+ pub fn optionalChildAlloc(self: Type, allocator: *Allocator) !Type {
+ return switch (self.tag()) {
+ .optional => self.cast(Payload.Optional).?.child_type,
+ .optional_single_mut_pointer, .optional_single_const_pointer => {
+ const payload = try allocator.create(Payload.Pointer);
+ payload.* = .{
+ .base = .{
+ .tag = if (self.tag() == .optional_single_const_pointer)
+ .single_const_pointer
+ else
+ .single_mut_pointer,
+ },
+ .pointee_type = self.castPointer().?.pointee_type,
+ };
+ return Type.initPayload(&payload.base);
+ },
+ else => unreachable,
+ };
+ }
+
/// Asserts the type is an array or vector.
pub fn arrayLen(self: Type) u64 {
return switch (self.tag()) {
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index 362dbe7909..31ffb2cc0d 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -710,8 +710,7 @@ fn analyzeInstUnwrapOptional(mod: *Module, scope: *Scope, unwrap: *zir.Inst.UnOp
return mod.fail(scope, unwrap.base.src, "expected optional type, found {}", .{operand.ty.elemType()});
}
- var buf: Type.Payload.Pointer = undefined;
- const child_type = try operand.ty.elemType().optionalChild(&buf).copy(scope.arena());
+ const child_type = try operand.ty.elemType().optionalChildAlloc(scope.arena());
const child_pointer = try mod.singlePtrType(scope, unwrap.base.src, operand.ty.isConstPtr(), child_type);
if (operand.value()) |val| {
From 65185016f15ca69363113f85537542b0bdebe33f Mon Sep 17 00:00:00 2001
From: xackus <14938807+xackus@users.noreply.github.com>
Date: Sun, 31 May 2020 19:15:21 +0200
Subject: [PATCH 119/153] stage1: fix non-exhaustive enums with one field
---
src/analyze.cpp | 9 ++++++++-
src/ir.cpp | 11 +++++++----
test/stage1/behavior/enum.zig | 37 +++++++++++++++++++++++++++++++++++
3 files changed, 52 insertions(+), 5 deletions(-)
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 6a4a2ec052..4920571438 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -7303,7 +7303,14 @@ void render_const_value(CodeGen *g, Buf *buf, ZigValue *const_val) {
case ZigTypeIdEnum:
{
TypeEnumField *field = find_enum_field_by_tag(type_entry, &const_val->data.x_enum_tag);
- buf_appendf(buf, "%s.%s", buf_ptr(&type_entry->name), buf_ptr(field->name));
+ if(field != nullptr){
+ buf_appendf(buf, "%s.%s", buf_ptr(&type_entry->name), buf_ptr(field->name));
+ } else {
+ // untagged value in a non-exhaustive enum
+ buf_appendf(buf, "%s.(", buf_ptr(&type_entry->name));
+ bigint_append_buf(buf, &const_val->data.x_enum_tag, 10);
+ buf_appendf(buf, ")");
+ }
return;
}
case ZigTypeIdErrorUnion:
diff --git a/src/ir.cpp b/src/ir.cpp
index 40be4e147b..f56a0681df 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -14096,7 +14096,8 @@ static IrInstGen *ir_analyze_enum_to_int(IrAnalyze *ira, IrInst *source_instr, I
// If there is only one possible tag, then we know at comptime what it is.
if (enum_type->data.enumeration.layout == ContainerLayoutAuto &&
- enum_type->data.enumeration.src_field_count == 1)
+ enum_type->data.enumeration.src_field_count == 1 &&
+ !enum_type->data.enumeration.non_exhaustive)
{
IrInstGen *result = ir_const(ira, source_instr, tag_type);
init_const_bigint(result->value, tag_type,
@@ -14136,7 +14137,8 @@ static IrInstGen *ir_analyze_union_to_tag(IrAnalyze *ira, IrInst* source_instr,
// If there is only 1 possible tag, then we know at comptime what it is.
if (wanted_type->data.enumeration.layout == ContainerLayoutAuto &&
- wanted_type->data.enumeration.src_field_count == 1)
+ wanted_type->data.enumeration.src_field_count == 1 &&
+ !wanted_type->data.enumeration.non_exhaustive) // TODO are non-exhaustive union tag types supposed to be allowed?
{
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
result->value->special = ConstValSpecialStatic;
@@ -23814,7 +23816,8 @@ static IrInstGen *ir_analyze_instruction_switch_target(IrAnalyze *ira,
bigint_init_bigint(&result->value->data.x_enum_tag, &pointee_val->data.x_union.tag);
return result;
}
- if (tag_type->data.enumeration.src_field_count == 1) {
+ // TODO are non-exhaustive union tag types supposed to be allowed?
+ if (tag_type->data.enumeration.src_field_count == 1 && !tag_type->data.enumeration.non_exhaustive) {
IrInstGen *result = ir_const(ira, &switch_target_instruction->base.base, tag_type);
TypeEnumField *only_field = &tag_type->data.enumeration.fields[0];
bigint_init_bigint(&result->value->data.x_enum_tag, &only_field->value);
@@ -23829,7 +23832,7 @@ static IrInstGen *ir_analyze_instruction_switch_target(IrAnalyze *ira,
case ZigTypeIdEnum: {
if ((err = type_resolve(ira->codegen, target_type, ResolveStatusZeroBitsKnown)))
return ira->codegen->invalid_inst_gen;
- if (target_type->data.enumeration.src_field_count == 1) {
+ if (target_type->data.enumeration.src_field_count == 1 && !target_type->data.enumeration.non_exhaustive) {
TypeEnumField *only_field = &target_type->data.enumeration.fields[0];
IrInstGen *result = ir_const(ira, &switch_target_instruction->base.base, target_type);
bigint_init_bigint(&result->value->data.x_enum_tag, &only_field->value);
diff --git a/test/stage1/behavior/enum.zig b/test/stage1/behavior/enum.zig
index f569264520..1d424d6e39 100644
--- a/test/stage1/behavior/enum.zig
+++ b/test/stage1/behavior/enum.zig
@@ -85,6 +85,43 @@ test "empty non-exhaustive enum" {
comptime S.doTheTest(42);
}
+test "single field non-exhaustive enum" {
+ const S = struct {
+ const E = enum(u8) {
+ a,
+ _,
+ };
+ fn doTheTest(y: u8) void {
+ var e: E = .a;
+ expect(switch (e) {
+ .a => true,
+ _ => false,
+ });
+ e = @intToEnum(E, 12);
+ expect(switch (e) {
+ .a => false,
+ _ => true,
+ });
+
+ expect(switch (e) {
+ .a => false,
+ else => true,
+ });
+ e = .a;
+ expect(switch (e) {
+ .a => true,
+ else => false,
+ });
+
+ expect(@enumToInt(@intToEnum(E, y)) == y);
+ expect(@typeInfo(E).Enum.fields.len == 1);
+ expect(@typeInfo(E).Enum.is_exhaustive == false);
+ }
+ };
+ S.doTheTest(23);
+ comptime S.doTheTest(23);
+}
+
test "enum type" {
const foo1 = Foo{ .One = 13 };
const foo2 = Foo{
From 1e835e0fccfd82deb31922ffdee821b0b418709f Mon Sep 17 00:00:00 2001
From: Vexu
Date: Mon, 27 Jul 2020 18:04:08 +0300
Subject: [PATCH 120/153] disallow '_' prong when switching on non-exhaustive
tagged union
A tagged union cannot legally be initiated to an invalid enumeration
---
src/ir.cpp | 16 +++++++++++-----
test/compile_errors.zig | 32 ++++++++++++++++++++++++++++++--
test/stage1/behavior/union.zig | 23 +++++++++++++++++++++++
3 files changed, 64 insertions(+), 7 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index f56a0681df..347cae7e79 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -14138,7 +14138,7 @@ static IrInstGen *ir_analyze_union_to_tag(IrAnalyze *ira, IrInst* source_instr,
// If there is only 1 possible tag, then we know at comptime what it is.
if (wanted_type->data.enumeration.layout == ContainerLayoutAuto &&
wanted_type->data.enumeration.src_field_count == 1 &&
- !wanted_type->data.enumeration.non_exhaustive) // TODO are non-exhaustive union tag types supposed to be allowed?
+ !wanted_type->data.enumeration.non_exhaustive)
{
IrInstGen *result = ir_const(ira, source_instr, wanted_type);
result->value->special = ConstValSpecialStatic;
@@ -23816,7 +23816,6 @@ static IrInstGen *ir_analyze_instruction_switch_target(IrAnalyze *ira,
bigint_init_bigint(&result->value->data.x_enum_tag, &pointee_val->data.x_union.tag);
return result;
}
- // TODO are non-exhaustive union tag types supposed to be allowed?
if (tag_type->data.enumeration.src_field_count == 1 && !tag_type->data.enumeration.non_exhaustive) {
IrInstGen *result = ir_const(ira, &switch_target_instruction->base.base, tag_type);
TypeEnumField *only_field = &tag_type->data.enumeration.fields[0];
@@ -28839,6 +28838,10 @@ static IrInstGen *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira,
if (type_is_invalid(switch_type))
return ira->codegen->invalid_inst_gen;
+ ZigValue *original_value = ((IrInstSrcSwitchTarget *)(instruction->target_value))->target_value_ptr->child->value;
+ bool target_is_originally_union = original_value->type->id == ZigTypeIdPointer &&
+ original_value->type->data.pointer.child_type->id == ZigTypeIdUnion;
+
if (switch_type->id == ZigTypeIdEnum) {
HashMap field_prev_uses = {};
field_prev_uses.init(switch_type->data.enumeration.src_field_count);
@@ -28894,9 +28897,12 @@ static IrInstGen *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira,
}
}
if (instruction->have_underscore_prong) {
- if (!switch_type->data.enumeration.non_exhaustive){
+ if (!switch_type->data.enumeration.non_exhaustive) {
ir_add_error(ira, &instruction->base.base,
- buf_sprintf("switch on non-exhaustive enum has `_` prong"));
+ buf_sprintf("switch on exhaustive enum has `_` prong"));
+ } else if (target_is_originally_union) {
+ ir_add_error(ira, &instruction->base.base,
+ buf_sprintf("`_` prong not allowed when switching on tagged union"));
}
for (uint32_t i = 0; i < switch_type->data.enumeration.src_field_count; i += 1) {
TypeEnumField *enum_field = &switch_type->data.enumeration.fields[i];
@@ -28911,7 +28917,7 @@ static IrInstGen *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira,
}
}
} else if (instruction->else_prong == nullptr) {
- if (switch_type->data.enumeration.non_exhaustive) {
+ if (switch_type->data.enumeration.non_exhaustive && !target_is_originally_union) {
ir_add_error(ira, &instruction->base.base,
buf_sprintf("switch on non-exhaustive enum must include `else` or `_` prong"));
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 70a9c47998..6c123925da 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -51,6 +51,23 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"tmp.zig:17:23: error: cannot adjust alignment of zero sized type 'fn(u32) anytype'",
});
+ cases.addTest("switching with exhaustive enum has '_' prong ",
+ \\const E = enum{
+ \\ a,
+ \\ b,
+ \\};
+ \\pub export fn entry() void {
+ \\ var e: E = .b;
+ \\ switch (e) {
+ \\ .a => {},
+ \\ .b => {},
+ \\ _ => {},
+ \\ }
+ \\}
+ , &[_][]const u8{
+ "tmp.zig:7:5: error: switch on exhaustive enum has `_` prong",
+ });
+
cases.addTest("invalid pointer with @Type",
\\export fn entry() void {
\\ _ = @Type(.{ .Pointer = .{
@@ -564,6 +581,10 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ b,
\\ _,
\\};
+ \\const U = union(E) {
+ \\ a: i32,
+ \\ b: u32,
+ \\};
\\pub export fn entry() void {
\\ var e: E = .b;
\\ switch (e) { // error: switch not handling the tag `b`
@@ -574,10 +595,17 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ .a => {},
\\ .b => {},
\\ }
+ \\ var u = U{.a = 2};
+ \\ switch (u) { // error: `_` prong not allowed when switching on tagged union
+ \\ .a => {},
+ \\ .b => {},
+ \\ _ => {},
+ \\ }
\\}
, &[_][]const u8{
- "tmp.zig:8:5: error: enumeration value 'E.b' not handled in switch",
- "tmp.zig:12:5: error: switch on non-exhaustive enum must include `else` or `_` prong",
+ "tmp.zig:12:5: error: enumeration value 'E.b' not handled in switch",
+ "tmp.zig:16:5: error: switch on non-exhaustive enum must include `else` or `_` prong",
+ "tmp.zig:21:5: error: `_` prong not allowed when switching on tagged union",
});
cases.add("switch expression - unreachable else prong (bool)",
diff --git a/test/stage1/behavior/union.zig b/test/stage1/behavior/union.zig
index cf3412eb5b..e7c9ee406f 100644
--- a/test/stage1/behavior/union.zig
+++ b/test/stage1/behavior/union.zig
@@ -690,3 +690,26 @@ test "method call on an empty union" {
S.doTheTest();
comptime S.doTheTest();
}
+
+test "switching on non exhaustive union" {
+ const S = struct {
+ const E = enum(u8) {
+ a,
+ b,
+ _,
+ };
+ const U = union(E) {
+ a: i32,
+ b: u32,
+ };
+ fn doTheTest() void {
+ var a = U{ .a = 2 };
+ switch (a) {
+ .a => |val| expect(val == 2),
+ .b => unreachable,
+ }
+ }
+ };
+ S.doTheTest();
+ comptime S.doTheTest();
+}
From 2948f2d262926725b4b8cb5beeb4fdba00b48336 Mon Sep 17 00:00:00 2001
From: Vexu
Date: Mon, 27 Jul 2020 18:42:13 +0300
Subject: [PATCH 121/153] fix cast from invalid non-exhaustive enum to union
---
src/ir.cpp | 16 +++++++++++++++-
test/compile_errors.zig | 23 +++++++++++++++++++++++
2 files changed, 38 insertions(+), 1 deletion(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 347cae7e79..209dd6d7d0 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -14177,7 +14177,14 @@ static IrInstGen *ir_analyze_enum_to_union(IrAnalyze *ira, IrInst* source_instr,
if (!val)
return ira->codegen->invalid_inst_gen;
TypeUnionField *union_field = find_union_field_by_tag(wanted_type, &val->data.x_enum_tag);
- assert(union_field != nullptr);
+ if (union_field == nullptr) {
+ Buf *int_buf = buf_alloc();
+ bigint_append_buf(int_buf, &target->value->data.x_enum_tag, 10);
+
+ ir_add_error(ira, &target->base,
+ buf_sprintf("no tag by value %s", buf_ptr(int_buf)));
+ return ira->codegen->invalid_inst_gen;
+ }
ZigType *field_type = resolve_union_field_type(ira->codegen, union_field);
if (field_type == nullptr)
return ira->codegen->invalid_inst_gen;
@@ -14213,6 +14220,13 @@ static IrInstGen *ir_analyze_enum_to_union(IrAnalyze *ira, IrInst* source_instr,
return result;
}
+ if (target->value->type->data.enumeration.non_exhaustive) {
+ ir_add_error(ira, source_instr,
+ buf_sprintf("runtime cast to union '%s' from non-exhustive enum",
+ buf_ptr(&wanted_type->name)));
+ return ira->codegen->invalid_inst_gen;
+ }
+
// if the union has all fields 0 bits, we can do it
// and in fact it's a noop cast because the union value is just the enum value
if (wanted_type->data.unionation.gen_field_count == 0) {
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 6c123925da..0b9c754329 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -51,6 +51,29 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"tmp.zig:17:23: error: cannot adjust alignment of zero sized type 'fn(u32) anytype'",
});
+ cases.addTest("invalid non-exhaustive enum to union",
+ \\const E = enum(u8) {
+ \\ a,
+ \\ b,
+ \\ _,
+ \\};
+ \\const U = union(E) {
+ \\ a,
+ \\ b,
+ \\};
+ \\export fn foo() void {
+ \\ var e = @intToEnum(E, 15);
+ \\ var u: U = e;
+ \\}
+ \\export fn bar() void {
+ \\ const e = @intToEnum(E, 15);
+ \\ var u: U = e;
+ \\}
+ , &[_][]const u8{
+ "tmp.zig:12:16: error: runtime cast to union 'U' from non-exhustive enum",
+ "tmp.zig:16:16: error: no tag by value 15",
+ });
+
cases.addTest("switching with exhaustive enum has '_' prong ",
\\const E = enum{
\\ a,
From 795033c35fd7bb411277079e72b148bf7810c18e Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Sun, 16 Aug 2020 21:33:02 +0200
Subject: [PATCH 122/153] std/crypto: XChaCha20, detached modes and standard
AEAD API
* Factor redundant code in std/crypto/chacha20
* Add support for XChaCha20, and the XChaCha20-Poly1305 construction.
XChaCha20 is a 24-byte version of ChaCha20, is widely implemented
and is on the standards track:
https://tools.ietf.org/html/draft-irtf-cfrg-xchacha-03
* Add support for encryption/decryption with the authentication tag
detached from the ciphertext
* Add wrappers with an API similar to the Gimli AEAD type, so that
we can use and benchmark AEADs with a common API.
---
lib/std/crypto/chacha20.zig | 284 ++++++++++++++++++++++++++++--------
1 file changed, 226 insertions(+), 58 deletions(-)
diff --git a/lib/std/crypto/chacha20.zig b/lib/std/crypto/chacha20.zig
index f6008745af..52c0857a67 100644
--- a/lib/std/crypto/chacha20.zig
+++ b/lib/std/crypto/chacha20.zig
@@ -25,12 +25,24 @@ fn Rp(a: usize, b: usize, c: usize, d: usize) QuarterRound {
};
}
+fn initContext(key: [8]u32, d: [4]u32) [16]u32 {
+ var ctx: [16]u32 = undefined;
+ const c = "expand 32-byte k";
+ const constant_le = comptime [_]u32{
+ mem.readIntLittle(u32, c[0..4]),
+ mem.readIntLittle(u32, c[4..8]),
+ mem.readIntLittle(u32, c[8..12]),
+ mem.readIntLittle(u32, c[12..16]),
+ };
+ mem.copy(u32, ctx[0..], constant_le[0..4]);
+ mem.copy(u32, ctx[4..12], key[0..8]);
+ mem.copy(u32, ctx[12..16], d[0..4]);
+
+ return ctx;
+}
+
// The chacha family of ciphers are based on the salsa family.
-fn salsa20_wordtobyte(out: []u8, input: [16]u32) void {
- assert(out.len >= 64);
-
- var x: [16]u32 = undefined;
-
+fn chacha20Core(x: []u32, input: [16]u32) void {
for (x) |_, i|
x[i] = input[i];
@@ -59,33 +71,27 @@ fn salsa20_wordtobyte(out: []u8, input: [16]u32) void {
x[r.b] = std.math.rotl(u32, x[r.b] ^ x[r.c], @as(u32, 7));
}
}
+}
+fn hashToBytes(out: []u8, x: [16]u32) void {
for (x) |_, i| {
- mem.writeIntLittle(u32, out[4 * i ..][0..4], x[i] +% input[i]);
+ mem.writeIntLittle(u32, out[4 * i ..][0..4], x[i]);
}
}
fn chaCha20_internal(out: []u8, in: []const u8, key: [8]u32, counter: [4]u32) void {
- var ctx: [16]u32 = undefined;
+ var ctx = initContext(key, counter);
var remaining: usize = if (in.len > out.len) in.len else out.len;
var cursor: usize = 0;
- const c = "expand 32-byte k";
- const constant_le = [_]u32{
- mem.readIntLittle(u32, c[0..4]),
- mem.readIntLittle(u32, c[4..8]),
- mem.readIntLittle(u32, c[8..12]),
- mem.readIntLittle(u32, c[12..16]),
- };
-
- mem.copy(u32, ctx[0..], constant_le[0..4]);
- mem.copy(u32, ctx[4..12], key[0..8]);
- mem.copy(u32, ctx[12..16], counter[0..4]);
-
while (true) {
+ var x: [16]u32 = undefined;
var buf: [64]u8 = undefined;
- salsa20_wordtobyte(buf[0..], ctx);
-
+ chacha20Core(x[0..], ctx);
+ for (x) |_, i| {
+ x[i] +%= ctx[i];
+ }
+ hashToBytes(buf[0..], x);
if (remaining < 64) {
var i: usize = 0;
while (i < remaining) : (i += 1)
@@ -104,6 +110,20 @@ fn chaCha20_internal(out: []u8, in: []const u8, key: [8]u32, counter: [4]u32) vo
}
}
+fn keyToWords(key: [32]u8) [8]u32 {
+ var k: [8]u32 = undefined;
+ k[0] = mem.readIntLittle(u32, key[0..4]);
+ k[1] = mem.readIntLittle(u32, key[4..8]);
+ k[2] = mem.readIntLittle(u32, key[8..12]);
+ k[3] = mem.readIntLittle(u32, key[12..16]);
+ k[4] = mem.readIntLittle(u32, key[16..20]);
+ k[5] = mem.readIntLittle(u32, key[20..24]);
+ k[6] = mem.readIntLittle(u32, key[24..28]);
+ k[7] = mem.readIntLittle(u32, key[28..32]);
+
+ return k;
+}
+
/// ChaCha20 avoids the possibility of timing attacks, as there are no branches
/// on secret key data.
///
@@ -116,23 +136,12 @@ pub fn chaCha20IETF(out: []u8, in: []const u8, counter: u32, key: [32]u8, nonce:
assert(in.len >= out.len);
assert((in.len >> 6) + counter <= maxInt(u32));
- var k: [8]u32 = undefined;
var c: [4]u32 = undefined;
-
- k[0] = mem.readIntLittle(u32, key[0..4]);
- k[1] = mem.readIntLittle(u32, key[4..8]);
- k[2] = mem.readIntLittle(u32, key[8..12]);
- k[3] = mem.readIntLittle(u32, key[12..16]);
- k[4] = mem.readIntLittle(u32, key[16..20]);
- k[5] = mem.readIntLittle(u32, key[20..24]);
- k[6] = mem.readIntLittle(u32, key[24..28]);
- k[7] = mem.readIntLittle(u32, key[28..32]);
-
c[0] = counter;
c[1] = mem.readIntLittle(u32, nonce[0..4]);
c[2] = mem.readIntLittle(u32, nonce[4..8]);
c[3] = mem.readIntLittle(u32, nonce[8..12]);
- chaCha20_internal(out, in, k, c);
+ chaCha20_internal(out, in, keyToWords(key), c);
}
/// This is the original ChaCha20 before RFC 7539, which recommends using the
@@ -143,18 +152,8 @@ pub fn chaCha20With64BitNonce(out: []u8, in: []const u8, counter: u64, key: [32]
assert(counter +% (in.len >> 6) >= counter);
var cursor: usize = 0;
- var k: [8]u32 = undefined;
+ const k = keyToWords(key);
var c: [4]u32 = undefined;
-
- k[0] = mem.readIntLittle(u32, key[0..4]);
- k[1] = mem.readIntLittle(u32, key[4..8]);
- k[2] = mem.readIntLittle(u32, key[8..12]);
- k[3] = mem.readIntLittle(u32, key[12..16]);
- k[4] = mem.readIntLittle(u32, key[16..20]);
- k[5] = mem.readIntLittle(u32, key[20..24]);
- k[6] = mem.readIntLittle(u32, key[24..28]);
- k[7] = mem.readIntLittle(u32, key[28..32]);
-
c[0] = @truncate(u32, counter);
c[1] = @truncate(u32, counter >> 32);
c[2] = mem.readIntLittle(u32, nonce[0..4]);
@@ -437,15 +436,15 @@ test "crypto.chacha20 test vector 5" {
pub const chacha20poly1305_tag_size = 16;
-pub fn chacha20poly1305Seal(dst: []u8, plaintext: []const u8, data: []const u8, key: [32]u8, nonce: [12]u8) void {
- assert(dst.len >= plaintext.len + chacha20poly1305_tag_size);
+pub fn chacha20poly1305SealDetached(ciphertext: []u8, tag: *[chacha20poly1305_tag_size]u8, plaintext: []const u8, data: []const u8, key: [32]u8, nonce: [12]u8) void {
+ assert(ciphertext.len >= plaintext.len);
// derive poly1305 key
var polyKey = [_]u8{0} ** 32;
chaCha20IETF(polyKey[0..], polyKey[0..], 0, key, nonce);
// encrypt plaintext
- chaCha20IETF(dst[0..plaintext.len], plaintext, 1, key, nonce);
+ chaCha20IETF(ciphertext[0..plaintext.len], plaintext, 1, key, nonce);
// construct mac
var mac = Poly1305.init(polyKey[0..]);
@@ -455,7 +454,7 @@ pub fn chacha20poly1305Seal(dst: []u8, plaintext: []const u8, data: []const u8,
const padding = 16 - (data.len % 16);
mac.update(zeros[0..padding]);
}
- mac.update(dst[0..plaintext.len]);
+ mac.update(ciphertext[0..plaintext.len]);
if (plaintext.len % 16 != 0) {
const zeros = [_]u8{0} ** 16;
const padding = 16 - (plaintext.len % 16);
@@ -465,19 +464,17 @@ pub fn chacha20poly1305Seal(dst: []u8, plaintext: []const u8, data: []const u8,
mem.writeIntLittle(u64, lens[0..8], data.len);
mem.writeIntLittle(u64, lens[8..16], plaintext.len);
mac.update(lens[0..]);
- mac.final(dst[plaintext.len..]);
+ mac.final(tag);
}
-/// Verifies and decrypts an authenticated message produced by chacha20poly1305Seal.
-pub fn chacha20poly1305Open(dst: []u8, msgAndTag: []const u8, data: []const u8, key: [32]u8, nonce: [12]u8) !void {
- if (msgAndTag.len < chacha20poly1305_tag_size) {
- return error.InvalidMessage;
- }
+pub fn chacha20poly1305Seal(ciphertextAndTag: []u8, plaintext: []const u8, data: []const u8, key: [32]u8, nonce: [12]u8) void {
+ return chacha20poly1305SealDetached(ciphertextAndTag[0..plaintext.len], ciphertextAndTag[plaintext.len..][0..chacha20poly1305_tag_size], plaintext, data, key, nonce);
+}
+/// Verifies and decrypts an authenticated message produced by chacha20poly1305SealDetached.
+pub fn chacha20poly1305OpenDetached(dst: []u8, ciphertext: []const u8, tag: *const [chacha20poly1305_tag_size]u8, data: []const u8, key: [32]u8, nonce: [12]u8) !void {
// split ciphertext and tag
- assert(dst.len >= msgAndTag.len - chacha20poly1305_tag_size);
- var ciphertext = msgAndTag[0 .. msgAndTag.len - chacha20poly1305_tag_size];
- var polyTag = msgAndTag[ciphertext.len..];
+ assert(dst.len >= ciphertext.len);
// derive poly1305 key
var polyKey = [_]u8{0} ** 32;
@@ -510,7 +507,7 @@ pub fn chacha20poly1305Open(dst: []u8, msgAndTag: []const u8, data: []const u8,
// See https://github.com/ziglang/zig/issues/1776
var acc: u8 = 0;
for (computedTag) |_, i| {
- acc |= (computedTag[i] ^ polyTag[i]);
+ acc |= (computedTag[i] ^ tag.*[i]);
}
if (acc != 0) {
return error.AuthenticationFailed;
@@ -520,6 +517,75 @@ pub fn chacha20poly1305Open(dst: []u8, msgAndTag: []const u8, data: []const u8,
chaCha20IETF(dst[0..ciphertext.len], ciphertext, 1, key, nonce);
}
+/// Verifies and decrypts an authenticated message produced by chacha20poly1305Seal.
+pub fn chacha20poly1305Open(dst: []u8, ciphertextAndTag: []const u8, data: []const u8, key: [32]u8, nonce: [12]u8) !void {
+ if (ciphertextAndTag.len < chacha20poly1305_tag_size) {
+ return error.InvalidMessage;
+ }
+ const ciphertextLen = ciphertextAndTag.len - chacha20poly1305_tag_size;
+ return try chacha20poly1305OpenDetached(dst, ciphertextAndTag[0..ciphertextLen], ciphertextAndTag[ciphertextLen..][0..chacha20poly1305_tag_size], data, key, nonce);
+}
+
+fn hchacha20(input: [16]u8, key: [32]u8) [32]u8 {
+ var c: [4]u32 = undefined;
+ for (c) |_, i| {
+ c[i] = mem.readIntLittle(u32, input[4 * i ..][0..4]);
+ }
+ const ctx = initContext(keyToWords(key), c);
+ var x: [16]u32 = undefined;
+ chacha20Core(x[0..], ctx);
+ var out: [32]u8 = undefined;
+ mem.writeIntLittle(u32, out[0..4], x[0]);
+ mem.writeIntLittle(u32, out[4..8], x[1]);
+ mem.writeIntLittle(u32, out[8..12], x[2]);
+ mem.writeIntLittle(u32, out[12..16], x[3]);
+ mem.writeIntLittle(u32, out[16..20], x[12]);
+ mem.writeIntLittle(u32, out[20..24], x[13]);
+ mem.writeIntLittle(u32, out[24..28], x[14]);
+ mem.writeIntLittle(u32, out[28..32], x[15]);
+
+ return out;
+}
+
+fn extend(key: [32]u8, nonce: [24]u8) struct { key: [32]u8, nonce: [12]u8 } {
+ var subnonce: [12]u8 = undefined;
+ mem.set(u8, subnonce[0..4], 0);
+ mem.copy(u8, subnonce[4..], nonce[16..24]);
+ return .{
+ .key = hchacha20(nonce[0..16].*, key),
+ .nonce = subnonce,
+ };
+}
+
+pub fn xChaCha20IETF(out: []u8, in: []const u8, counter: u32, key: [32]u8, nonce: [24]u8) void {
+ const extended = extend(key, nonce);
+ chaCha20IETF(out, in, counter, extended.key, extended.nonce);
+}
+
+pub const xchacha20poly1305_tag_size = 16;
+
+pub fn xchacha20poly1305SealDetached(ciphertext: []u8, tag: *[chacha20poly1305_tag_size]u8, plaintext: []const u8, data: []const u8, key: [32]u8, nonce: [24]u8) void {
+ const extended = extend(key, nonce);
+ return chacha20poly1305SealDetached(ciphertext, tag, plaintext, data, extended.key, extended.nonce);
+}
+
+pub fn xchacha20poly1305Seal(ciphertextAndTag: []u8, plaintext: []const u8, data: []const u8, key: [32]u8, nonce: [24]u8) void {
+ const extended = extend(key, nonce);
+ return chacha20poly1305Seal(ciphertextAndTag, plaintext, data, extended.key, extended.nonce);
+}
+
+/// Verifies and decrypts an authenticated message produced by xchacha20poly1305SealDetached.
+pub fn xchacha20poly1305OpenDetached(plaintext: []u8, ciphertext: []const u8, tag: *const [chacha20poly1305_tag_size]u8, data: []const u8, key: [32]u8, nonce: [24]u8) !void {
+ const extended = extend(key, nonce);
+ return try chacha20poly1305OpenDetached(plaintext, ciphertext, tag, data, extended.key, extended.nonce);
+}
+
+/// Verifies and decrypts an authenticated message produced by xchacha20poly1305Seal.
+pub fn xchacha20poly1305Open(ciphertextAndTag: []u8, msgAndTag: []const u8, data: []const u8, key: [32]u8, nonce: [24]u8) !void {
+ const extended = extend(key, nonce);
+ return try chacha20poly1305Open(ciphertextAndTag, msgAndTag, data, extended.key, extended.nonce);
+}
+
test "seal" {
{
const plaintext = "";
@@ -636,3 +702,105 @@ test "open" {
testing.expectError(error.InvalidMessage, chacha20poly1305Open(out[0..], "", data[0..], key, bad_nonce));
}
}
+
+test "crypto.xchacha20" {
+ const key = [_]u8{69} ** 32;
+ const nonce = [_]u8{42} ** 24;
+ const input = "Ladies and Gentlemen of the class of '99: If I could offer you only one tip for the future, sunscreen would be it.";
+ {
+ var ciphertext: [input.len]u8 = undefined;
+ xChaCha20IETF(ciphertext[0..], input[0..], 0, key, nonce);
+ var buf: [2 * ciphertext.len]u8 = undefined;
+ testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{ciphertext}), "E0A1BCF939654AFDBDC1746EC49832647C19D891F0D1A81FC0C1703B4514BDEA584B512F6908C2C5E9DD18D5CBC1805DE5803FE3B9CA5F193FB8359E91FAB0C3BB40309A292EB1CF49685C65C4A3ADF4F11DB0CD2B6B67FBC174BC2E860E8F769FD3565BBFAD1C845E05A0FED9BE167C240D");
+ }
+ {
+ const data = "Additional data";
+ var ciphertext: [input.len + xchacha20poly1305_tag_size]u8 = undefined;
+ xchacha20poly1305Seal(ciphertext[0..], input, data, key, nonce);
+ var out: [input.len]u8 = undefined;
+ try xchacha20poly1305Open(out[0..], ciphertext[0..], data, key, nonce);
+ var buf: [2 * ciphertext.len]u8 = undefined;
+ testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{ciphertext}), "994D2DD32333F48E53650C02C7A2ABB8E018B0836D7175AEC779F52E961780768F815C58F1AA52D211498DB89B9216763F569C9433A6BBFCEFB4D4A49387A4C5207FBB3B5A92B5941294DF30588C6740D39DC16FA1F0E634F7246CF7CDCB978E44347D89381B7A74EB7084F754B90BDE9AAF5A94B8F2A85EFD0B50692AE2D425E234");
+ testing.expectEqualSlices(u8, out[0..], input);
+ ciphertext[0] += 1;
+ testing.expectError(error.AuthenticationFailed, xchacha20poly1305Open(out[0..], ciphertext[0..], data, key, nonce));
+ }
+}
+
+pub const Chacha20Poly1305 = struct {
+ pub const tag_length = 16;
+ pub const nonce_length = 12;
+ pub const key_length = 32;
+
+ /// c: ciphertext: output buffer should be of size m.len
+ /// at: authentication tag: output MAC
+ /// m: message
+ /// ad: Associated Data
+ /// npub: public nonce
+ /// k: private key
+ pub fn encrypt(c: []u8, at: *[tag_length]u8, m: []const u8, ad: []const u8, npub: [nonce_length]u8, k: [key_length]u8) void {
+ assert(c.len == m.len);
+ return chacha20poly1305SealDetached(c, at, m, ad, k, npub);
+ }
+
+ /// m: message: output buffer should be of size c.len
+ /// c: ciphertext
+ /// at: authentication tag
+ /// ad: Associated Data
+ /// npub: public nonce
+ /// k: private key
+ /// NOTE: the check of the authentication tag is currently not done in constant time
+ pub fn decrypt(m: []u8, c: []const u8, at: [tag_length]u8, ad: []const u8, npub: [nonce_length]u8, k: [key_length]u8) !void {
+ assert(c.len == m.len);
+ return try chacha20poly1305OpenDetached(m, c, at[0..], ad, k, npub);
+ }
+};
+
+pub const XChacha20Poly1305 = struct {
+ pub const tag_length = 16;
+ pub const nonce_length = 24;
+ pub const key_length = 32;
+
+ /// c: ciphertext: output buffer should be of size m.len
+ /// at: authentication tag: output MAC
+ /// m: message
+ /// ad: Associated Data
+ /// npub: public nonce
+ /// k: private key
+ pub fn encrypt(c: []u8, at: *[tag_length]u8, m: []const u8, ad: []const u8, npub: [nonce_length]u8, k: [key_length]u8) void {
+ assert(c.len == m.len);
+ return xchacha20poly1305SealDetached(c, at, m, ad, k, npub);
+ }
+
+ /// m: message: output buffer should be of size c.len
+ /// c: ciphertext
+ /// at: authentication tag
+ /// ad: Associated Data
+ /// npub: public nonce
+ /// k: private key
+ /// NOTE: the check of the authentication tag is currently not done in constant time
+ pub fn decrypt(m: []u8, c: []const u8, at: [tag_length]u8, ad: []const u8, npub: [nonce_length]u8, k: [key_length]u8) !void {
+ assert(c.len == m.len);
+ return try xchacha20poly1305OpenDetached(m, c, at[0..], ad, k, npub);
+ }
+};
+
+test "chacha20 AEAD API" {
+ const aeads = [_]type{ Chacha20Poly1305, XChacha20Poly1305 };
+ const input = "Ladies and Gentlemen of the class of '99: If I could offer you only one tip for the future, sunscreen would be it.";
+ const data = "Additional data";
+
+ inline for (aeads) |aead| {
+ const key = [_]u8{69} ** aead.key_length;
+ const nonce = [_]u8{42} ** aead.nonce_length;
+ var ciphertext: [input.len]u8 = undefined;
+ var tag: [aead.tag_length]u8 = undefined;
+ var out: [input.len]u8 = undefined;
+
+ aead.encrypt(ciphertext[0..], tag[0..], input, data, nonce, key);
+ try aead.decrypt(out[0..], ciphertext[0..], tag, data[0..], nonce, key);
+ testing.expectEqualSlices(u8, out[0..], input);
+ ciphertext[0] += 1;
+ testing.expectError(error.AuthenticationFailed, aead.decrypt(out[0..], ciphertext[0..], tag, data[0..], nonce, key));
+ }
+}
From cd591a9b25619809840b32d6bb115e573f54e221 Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Sun, 16 Aug 2020 23:02:36 +0200
Subject: [PATCH 123/153] No need for an explicit deref
---
lib/std/crypto/chacha20.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/crypto/chacha20.zig b/lib/std/crypto/chacha20.zig
index 52c0857a67..e51123eb81 100644
--- a/lib/std/crypto/chacha20.zig
+++ b/lib/std/crypto/chacha20.zig
@@ -507,7 +507,7 @@ pub fn chacha20poly1305OpenDetached(dst: []u8, ciphertext: []const u8, tag: *con
// See https://github.com/ziglang/zig/issues/1776
var acc: u8 = 0;
for (computedTag) |_, i| {
- acc |= (computedTag[i] ^ tag.*[i]);
+ acc |= (computedTag[i] ^ tag[i]);
}
if (acc != 0) {
return error.AuthenticationFailed;
From 5fabb44aebd599dd22e43ed43c1f8932af06cd27 Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Sun, 16 Aug 2020 23:37:42 +0200
Subject: [PATCH 124/153] Export crypto.aead
---
lib/std/crypto.zig | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/lib/std/crypto.zig b/lib/std/crypto.zig
index 9fbe70f815..c1909a1823 100644
--- a/lib/std/crypto.zig
+++ b/lib/std/crypto.zig
@@ -29,9 +29,10 @@ pub const HmacSha1 = hmac.HmacSha1;
pub const HmacSha256 = hmac.HmacSha256;
pub const HmacBlake2s256 = hmac.HmacBlake2s256;
-const import_chaCha20 = @import("crypto/chacha20.zig");
-pub const chaCha20IETF = import_chaCha20.chaCha20IETF;
-pub const chaCha20With64BitNonce = import_chaCha20.chaCha20With64BitNonce;
+pub const chacha20 = @import("crypto/chacha20.zig");
+pub const chaCha20IETF = chacha20.chaCha20IETF;
+pub const chaCha20With64BitNonce = chacha20.chaCha20With64BitNonce;
+pub const xChaCha20IETF = chacha20.xChaCha20IETF;
pub const Poly1305 = @import("crypto/poly1305.zig").Poly1305;
@@ -45,6 +46,12 @@ pub const Edwards25519 = @import("crypto/25519/edwards25519.zig").Edwards25519;
pub const X25519 = @import("crypto/25519/x25519.zig").X25519;
pub const Ristretto255 = @import("crypto/25519/ristretto255.zig").Ristretto255;
+pub const aead = struct {
+ pub const Gimli = gimli.Aead;
+ pub const ChaCha20Poly1305 = chacha20.Chacha20Poly1305;
+ pub const XChaCha20Poly1305 = chacha20.XChacha20Poly1305;
+};
+
const std = @import("std.zig");
pub const randomBytes = std.os.getrandom;
From 96a27557e2ef53b8d1d3132f02c83b716c966277 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 17 Aug 2020 15:27:41 -0700
Subject: [PATCH 125/153] stage2 test harness: at least build all compare
output tests
This should have been removed with an earlier commit that improved the
test harness.
---
test/stage2/compare_output.zig | 8 --------
1 file changed, 8 deletions(-)
diff --git a/test/stage2/compare_output.zig b/test/stage2/compare_output.zig
index 4979f1b2bc..83dfddb3e8 100644
--- a/test/stage2/compare_output.zig
+++ b/test/stage2/compare_output.zig
@@ -13,14 +13,6 @@ const linux_riscv64 = std.zig.CrossTarget{
};
pub fn addCases(ctx: *TestContext) !void {
- if (std.Target.current.os.tag != .linux or
- std.Target.current.cpu.arch != .x86_64)
- {
- // TODO implement self-hosted PE (.exe file) linking
- // TODO implement more ZIR so we don't depend on x86_64-linux
- return;
- }
-
{
var case = ctx.exe("hello world with updates", linux_x64);
From 3370b5f1097d0610048e29b322b4cbf81ff0a431 Mon Sep 17 00:00:00 2001
From: Isaac Freund
Date: Fri, 7 Aug 2020 00:53:55 +0200
Subject: [PATCH 126/153] stage2/wasm: implement basic container generation
Thus far, we only generate the type, function, export, and code
sections. These are sufficient to generate and export simple functions.
Codegen is currently hardcoded to `i32.const 42`, the main goal of this
commit is to create infrastructure for the container format which will
work with incremental compilation.
---
src-self-hosted/Module.zig | 4 +-
src-self-hosted/codegen/wasm.zig | 70 +++++
src-self-hosted/link.zig | 24 +-
src-self-hosted/link/Wasm.zig | 445 +++++++++++++++++++++++++++++++
src-self-hosted/main.zig | 1 +
5 files changed, 539 insertions(+), 5 deletions(-)
create mode 100644 src-self-hosted/codegen/wasm.zig
create mode 100644 src-self-hosted/link/Wasm.zig
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index df7c1986f3..5e22058acf 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -1571,7 +1571,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
.macho => {
// TODO Implement for MachO
},
- .c => {},
+ .c, .wasm => {},
}
}
} else {
@@ -1781,11 +1781,13 @@ fn allocateNewDecl(
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.TextBlock.empty },
.c => .{ .c = {} },
+ .wasm => .{ .wasm = {} },
},
.fn_link = switch (self.bin_file.tag) {
.elf => .{ .elf = link.File.Elf.SrcFn.empty },
.macho => .{ .macho = link.File.MachO.SrcFn.empty },
.c => .{ .c = {} },
+ .wasm => .{ .wasm = null },
},
.generation = 0,
};
diff --git a/src-self-hosted/codegen/wasm.zig b/src-self-hosted/codegen/wasm.zig
new file mode 100644
index 0000000000..78d8d22ded
--- /dev/null
+++ b/src-self-hosted/codegen/wasm.zig
@@ -0,0 +1,70 @@
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const ArrayList = std.ArrayList;
+const assert = std.debug.assert;
+const leb = std.debug.leb;
+
+const Decl = @import("../Module.zig").Decl;
+const Type = @import("../type.zig").Type;
+
+fn genValtype(ty: Type) u8 {
+ return switch (ty.tag()) {
+ .u32, .i32 => 0x7F,
+ .u64, .i64 => 0x7E,
+ .f32 => 0x7D,
+ .f64 => 0x7C,
+ else => @panic("TODO: Implement more types for wasm."),
+ };
+}
+
+pub fn genFunctype(buf: *ArrayList(u8), decl: *Decl) !void {
+ const ty = decl.typed_value.most_recent.typed_value.ty;
+ const writer = buf.writer();
+
+ // functype magic
+ try writer.writeByte(0x60);
+
+ // param types
+ try leb.writeULEB128(writer, @intCast(u32, ty.fnParamLen()));
+ if (ty.fnParamLen() != 0) {
+ const params = try buf.allocator.alloc(Type, ty.fnParamLen());
+ defer buf.allocator.free(params);
+ ty.fnParamTypes(params);
+ for (params) |param_type| try writer.writeByte(genValtype(param_type));
+ }
+
+ // return type
+ const return_type = ty.fnReturnType();
+ switch (return_type.tag()) {
+ .void, .noreturn => try leb.writeULEB128(writer, @as(u32, 0)),
+ else => {
+ try leb.writeULEB128(writer, @as(u32, 1));
+ try writer.writeByte(genValtype(return_type));
+ },
+ }
+}
+
+pub fn genCode(buf: *ArrayList(u8), decl: *Decl) !void {
+ assert(buf.items.len == 0);
+ const writer = buf.writer();
+
+ // Reserve space to write the size after generating the code
+ try writer.writeAll(&([1]u8{undefined} ** 5));
+
+ // Write the size of the locals vec
+ // TODO: implement locals
+ try leb.writeULEB128(writer, @as(u32, 0));
+
+ // Write instructions
+
+ // TODO: actually implement codegen
+ try writer.writeByte(0x41); // i32.const
+ try leb.writeILEB128(writer, @as(i32, 42));
+
+ // Write 'end' opcode
+ try writer.writeByte(0x0B);
+
+ // Fill in the size of the generated code to the reserved space at the
+ // beginning of the buffer.
+ leb.writeUnsignedFixed(5, buf.items[0..5], @intCast(u32, buf.items.len - 5));
+}
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index a835cc6b7c..6a51138785 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -46,12 +46,14 @@ pub const File = struct {
elf: Elf.TextBlock,
macho: MachO.TextBlock,
c: void,
+ wasm: void,
};
pub const LinkFn = union {
elf: Elf.SrcFn,
macho: MachO.SrcFn,
c: void,
+ wasm: ?Wasm.FnData,
};
tag: Tag,
@@ -69,7 +71,7 @@ pub const File = struct {
.coff => return error.TODOImplementCoff,
.elf => return Elf.openPath(allocator, dir, sub_path, options),
.macho => return MachO.openPath(allocator, dir, sub_path, options),
- .wasm => return error.TODOImplementWasm,
+ .wasm => return Wasm.openPath(allocator, dir, sub_path, options),
.c => return C.openPath(allocator, dir, sub_path, options),
.hex => return error.TODOImplementHex,
.raw => return error.TODOImplementRaw,
@@ -93,7 +95,7 @@ pub const File = struct {
.mode = determineMode(base.options),
});
},
- .c => {},
+ .c, .wasm => {},
}
}
@@ -102,6 +104,7 @@ pub const File = struct {
if (base.file) |f| {
f.close();
base.file = null;
+
}
}
@@ -110,6 +113,7 @@ pub const File = struct {
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl),
.c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl),
+ .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl),
}
}
@@ -117,7 +121,7 @@ pub const File = struct {
switch (base.tag) {
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
- .c => {},
+ .c, .wasm => {},
}
}
@@ -125,7 +129,7 @@ pub const File = struct {
switch (base.tag) {
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl),
- .c => {},
+ .c, .wasm => {},
}
}
@@ -135,6 +139,7 @@ pub const File = struct {
.elf => @fieldParentPtr(Elf, "base", base).deinit(),
.macho => @fieldParentPtr(MachO, "base", base).deinit(),
.c => @fieldParentPtr(C, "base", base).deinit(),
+ .wasm => @fieldParentPtr(Wasm, "base", base).deinit(),
}
}
@@ -155,6 +160,11 @@ pub const File = struct {
parent.deinit();
base.allocator.destroy(parent);
},
+ .wasm => {
+ const parent = @fieldParentPtr(Wasm, "base", base);
+ parent.deinit();
+ base.allocator.destroy(parent);
+ },
}
}
@@ -167,6 +177,7 @@ pub const File = struct {
.elf => @fieldParentPtr(Elf, "base", base).flush(),
.macho => @fieldParentPtr(MachO, "base", base).flush(),
.c => @fieldParentPtr(C, "base", base).flush(),
+ .wasm => @fieldParentPtr(Wasm, "base", base).flush(),
};
}
@@ -175,6 +186,7 @@ pub const File = struct {
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
.macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl),
.c => unreachable,
+ .wasm => @fieldParentPtr(Wasm, "base", base).freeDecl(decl),
}
}
@@ -183,6 +195,7 @@ pub const File = struct {
.elf => @fieldParentPtr(Elf, "base", base).error_flags,
.macho => @fieldParentPtr(MachO, "base", base).error_flags,
.c => return .{ .no_entry_point_found = false },
+ .wasm => return ErrorFlags{},
};
}
@@ -197,6 +210,7 @@ pub const File = struct {
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports),
.c => return {},
+ .wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclExports(module, decl, exports),
}
}
@@ -204,6 +218,7 @@ pub const File = struct {
elf,
macho,
c,
+ wasm,
};
pub const ErrorFlags = struct {
@@ -2832,6 +2847,7 @@ pub const File = struct {
};
pub const MachO = @import("link/MachO.zig");
+ const Wasm = @import("link/Wasm.zig");
};
/// Saturating multiplication
diff --git a/src-self-hosted/link/Wasm.zig b/src-self-hosted/link/Wasm.zig
new file mode 100644
index 0000000000..a62cff15ee
--- /dev/null
+++ b/src-self-hosted/link/Wasm.zig
@@ -0,0 +1,445 @@
+const Wasm = @This();
+
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const fs = std.fs;
+const leb = std.debug.leb;
+
+const Module = @import("../Module.zig");
+const codegen = @import("../codegen/wasm.zig");
+const link = @import("../link.zig");
+
+/// Various magic numbers defined by the wasm spec
+const spec = struct {
+ const magic = [_]u8{ 0x00, 0x61, 0x73, 0x6D }; // \0asm
+ const version = [_]u8{ 0x01, 0x00, 0x00, 0x00 }; // version 1
+
+ const custom_id = 0;
+ const types_id = 1;
+ const imports_id = 2;
+ const funcs_id = 3;
+ const tables_id = 4;
+ const memories_id = 5;
+ const globals_id = 6;
+ const exports_id = 7;
+ const start_id = 8;
+ const elements_id = 9;
+ const code_id = 10;
+ const data_id = 11;
+};
+
+pub const base_tag = link.File.Tag.wasm;
+
+pub const FnData = struct {
+ funcidx: u32,
+ typeidx: u32,
+};
+
+base: link.File,
+
+types: Types,
+funcs: Funcs,
+exports: Exports,
+
+/// Array over the section structs used in the various sections above to
+/// allow iteration when shifting sections to make space.
+/// TODO: this should eventually be size 11 when we use all the sections.
+sections: [4]*Section,
+
+pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File {
+ assert(options.object_format == .wasm);
+
+ // TODO: read the file and keep vaild parts instead of truncating
+ const file = try dir.createFile(sub_path, .{ .truncate = true, .read = true });
+ errdefer file.close();
+
+ const wasm = try allocator.create(Wasm);
+ errdefer allocator.destroy(wasm);
+
+ try file.writeAll(&(spec.magic ++ spec.version));
+
+ wasm.base = .{
+ .tag = .wasm,
+ .options = options,
+ .file = file,
+ .allocator = allocator,
+ };
+
+ // TODO: this should vary depending on the section and be less arbitrary
+ const size = 1024;
+ const offset = @sizeOf(@TypeOf(spec.magic ++ spec.version));
+
+ wasm.types = try Types.init(file, offset, size);
+ wasm.funcs = try Funcs.init(file, offset + size, size, offset + 3 * size, size);
+ wasm.exports = try Exports.init(file, offset + 2 * size, size);
+ try file.setEndPos(offset + 4 * size);
+
+ wasm.sections = [_]*Section{
+ &wasm.types.typesec.section,
+ &wasm.funcs.funcsec,
+ &wasm.exports.exportsec,
+ &wasm.funcs.codesec.section,
+ };
+
+ return &wasm.base;
+}
+
+pub fn deinit(self: *Wasm) void {
+ if (self.base.file) |f| f.close();
+ self.types.deinit();
+ self.funcs.deinit();
+}
+
+pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
+ if (decl.typed_value.most_recent.typed_value.ty.zigTypeTag() != .Fn)
+ return error.TODOImplementNonFnDeclsForWasm;
+
+ if (decl.fn_link.wasm) |fn_data| {
+ self.types.free(fn_data.typeidx);
+ self.funcs.free(fn_data.funcidx);
+ }
+
+ var buf = std.ArrayList(u8).init(self.base.allocator);
+ defer buf.deinit();
+
+ try codegen.genFunctype(&buf, decl);
+ const typeidx = try self.types.new(buf.items);
+ buf.items.len = 0;
+
+ try codegen.genCode(&buf, decl);
+ const funcidx = try self.funcs.new(typeidx, buf.items);
+
+ decl.fn_link.wasm = .{ .typeidx = typeidx, .funcidx = funcidx };
+
+ try self.exports.writeAll(module);
+}
+
+pub fn updateDeclExports(
+ self: *Wasm,
+ module: *Module,
+ decl: *const Module.Decl,
+ exports: []const *Module.Export,
+) !void {
+ // TODO: updateDeclExports() may currently be called before updateDecl,
+ // presumably due to a bug. For now just rely on the following call
+ // being made in updateDecl().
+
+ //try self.exports.writeAll(module);
+}
+
+pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
+ // TODO: remove this assert when non-function Decls are implemented
+ assert(decl.typed_value.most_recent.typed_value.ty.zigTypeTag() == .Fn);
+ if (decl.fn_link.wasm) |fn_data| {
+ self.types.free(fn_data.typeidx);
+ self.funcs.free(fn_data.funcidx);
+ decl.fn_link.wasm = null;
+ }
+}
+
+pub fn flush(self: *Wasm) !void {}
+
+/// This struct describes the location of a named section + custom section
+/// padding in the output file. This is all the data we need to allow for
+/// shifting sections around when padding runs out.
+const Section = struct {
+ /// The size of a section header: 1 byte section id + 5 bytes
+ /// for the fixed-width ULEB128 encoded contents size.
+ const header_size = 1 + 5;
+ /// Offset of the section id byte from the start of the file.
+ offset: u64,
+ /// Size of the section, including the header and directly
+ /// following custom section used for padding if any.
+ size: u64,
+
+ /// Resize the usable part of the section, handling the following custom
+ /// section used for padding. If there is not enough padding left, shift
+ /// all following sections to make space. Takes the current and target
+ /// contents sizes of the section as arguments.
+ fn resize(self: *Section, file: fs.File, current: u32, target: u32) !void {
+ // Section header + target contents size + custom section header
+ // + custom section name + empty custom section > owned chunk of the file
+ if (header_size + target + header_size + 1 + 0 > self.size)
+ return error.TODOImplementSectionShifting;
+
+ const new_custom_start = self.offset + header_size + target;
+ const new_custom_contents_size = self.size - target - 2 * header_size;
+ assert(new_custom_contents_size >= 1);
+ // +1 for the name of the custom section, which we set to an empty string
+ var custom_header: [header_size + 1]u8 = undefined;
+ custom_header[0] = spec.custom_id;
+ leb.writeUnsignedFixed(5, custom_header[1..header_size], @intCast(u32, new_custom_contents_size));
+ custom_header[header_size] = 0;
+ try file.pwriteAll(&custom_header, new_custom_start);
+ }
+};
+
+/// This can be used to manage the contents of any section which uses a vector
+/// of contents. This interface maintains index stability while allowing for
+/// reuse of "dead" indexes.
+const VecSection = struct {
+ /// Represents a single entry in the vector (e.g. a type in the type section)
+ const Entry = struct {
+ /// Offset from the start of the section contents in bytes
+ offset: u32,
+ /// Size in bytes of the entry
+ size: u32,
+ };
+ section: Section,
+ /// Size in bytes of the contents of the section. Does not include
+ /// the "header" containing the section id and this value.
+ contents_size: u32,
+ /// List of all entries in the contents of the section.
+ entries: std.ArrayListUnmanaged(Entry) = std.ArrayListUnmanaged(Entry){},
+ /// List of indexes of unreferenced entries which may be
+ /// overwritten and reused.
+ dead_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
+
+ /// Write the headers of the section and custom padding section
+ fn init(comptime section_id: u8, file: fs.File, offset: u64, initial_size: u64) !VecSection {
+ // section id, section size, empty vector, custom section id,
+ // custom section size, empty custom section name
+ var initial_data: [1 + 5 + 5 + 1 + 5 + 1]u8 = undefined;
+
+ assert(initial_size >= initial_data.len);
+
+ comptime var i = 0;
+ initial_data[i] = section_id;
+ i += 1;
+ leb.writeUnsignedFixed(5, initial_data[i..(i + 5)], 5);
+ i += 5;
+ leb.writeUnsignedFixed(5, initial_data[i..(i + 5)], 0);
+ i += 5;
+ initial_data[i] = spec.custom_id;
+ i += 1;
+ leb.writeUnsignedFixed(5, initial_data[i..(i + 5)], @intCast(u32, initial_size - @sizeOf(@TypeOf(initial_data))));
+ i += 5;
+ initial_data[i] = 0;
+
+ try file.pwriteAll(&initial_data, offset);
+
+ return VecSection{
+ .section = .{
+ .offset = offset,
+ .size = initial_size,
+ },
+ .contents_size = 5,
+ };
+ }
+
+ fn deinit(self: *VecSection, allocator: *Allocator) void {
+ self.entries.deinit(allocator);
+ self.dead_list.deinit(allocator);
+ }
+
+ /// Write a new entry into the file, returning the index used.
+ fn addEntry(self: *VecSection, file: fs.File, allocator: *Allocator, data: []const u8) !u32 {
+ // First look for a dead entry we can reuse
+ for (self.dead_list.items) |dead_idx, i| {
+ const dead_entry = &self.entries.items[dead_idx];
+ if (dead_entry.size == data.len) {
+ // Found a dead entry of the right length, overwrite it
+ try file.pwriteAll(data, self.section.offset + Section.header_size + dead_entry.offset);
+ _ = self.dead_list.swapRemove(i);
+ return dead_idx;
+ }
+ }
+
+ // TODO: We can be more efficient if we special-case one or
+ // more consecutive dead entries at the end of the vector.
+
+ // We failed to find a dead entry to reuse, so write the new
+ // entry to the end of the section.
+ try self.section.resize(file, self.contents_size, self.contents_size + @intCast(u32, data.len));
+ try file.pwriteAll(data, self.section.offset + Section.header_size + self.contents_size);
+ try self.entries.append(allocator, .{
+ .offset = self.contents_size,
+ .size = @intCast(u32, data.len),
+ });
+ self.contents_size += @intCast(u32, data.len);
+ // Make sure the dead list always has enough space to store all free'd
+ // entries. This makes it so that delEntry() cannot fail.
+ // TODO: figure out a better way that doesn't waste as much memory
+ try self.dead_list.ensureCapacity(allocator, self.entries.items.len);
+
+ // Update the size in the section header and the item count of
+ // the contents vector.
+ var size_and_count: [10]u8 = undefined;
+ leb.writeUnsignedFixed(5, size_and_count[0..5], self.contents_size);
+ leb.writeUnsignedFixed(5, size_and_count[5..], @intCast(u32, self.entries.items.len));
+ try file.pwriteAll(&size_and_count, self.section.offset + 1);
+
+ return @intCast(u32, self.entries.items.len - 1);
+ }
+
+ /// Mark the type referenced by the given index as dead.
+ fn delEntry(self: *VecSection, index: u32) void {
+ self.dead_list.appendAssumeCapacity(index);
+ }
+};
+
+const Types = struct {
+ typesec: VecSection,
+
+ fn init(file: fs.File, offset: u64, initial_size: u64) !Types {
+ return Types{ .typesec = try VecSection.init(spec.types_id, file, offset, initial_size) };
+ }
+
+ fn deinit(self: *Types) void {
+ const wasm = @fieldParentPtr(Wasm, "types", self);
+ self.typesec.deinit(wasm.base.allocator);
+ }
+
+ fn new(self: *Types, data: []const u8) !u32 {
+ const wasm = @fieldParentPtr(Wasm, "types", self);
+ return self.typesec.addEntry(wasm.base.file.?, wasm.base.allocator, data);
+ }
+
+ fn free(self: *Types, typeidx: u32) void {
+ self.typesec.delEntry(typeidx);
+ }
+};
+
+const Funcs = struct {
+ /// This section needs special handling to keep the indexes matching with
+ /// the codesec, so we cant just use a VecSection.
+ funcsec: Section,
+ /// Number of functions listed in the funcsec. Must be kept in sync with
+ /// codesec.entries.items.len.
+ funcs_count: u32,
+ codesec: VecSection,
+
+ fn init(file: fs.File, funcs_offset: u64, funcs_size: u64, code_offset: u64, code_size: u64) !Funcs {
+ return Funcs{
+ .funcsec = (try VecSection.init(spec.funcs_id, file, funcs_offset, funcs_size)).section,
+ .funcs_count = 0,
+ .codesec = try VecSection.init(spec.code_id, file, code_offset, code_size),
+ };
+ }
+
+ fn deinit(self: *Funcs) void {
+ const wasm = @fieldParentPtr(Wasm, "funcs", self);
+ self.codesec.deinit(wasm.base.allocator);
+ }
+
+ /// Add a new function to the binary, first finding space for and writing
+ /// the code then writing the typeidx to the corresponding index in the
+ /// funcsec. Returns the function index used.
+ fn new(self: *Funcs, typeidx: u32, code: []const u8) !u32 {
+ const wasm = @fieldParentPtr(Wasm, "funcs", self);
+ const file = wasm.base.file.?;
+ const allocator = wasm.base.allocator;
+
+ assert(self.funcs_count == self.codesec.entries.items.len);
+
+ // TODO: consider nop-padding the code if there is a close but not perfect fit
+ const funcidx = try self.codesec.addEntry(file, allocator, code);
+
+ if (self.funcs_count < self.codesec.entries.items.len) {
+ // u32 vector length + funcs_count u32s in the vector
+ const current = 5 + self.funcs_count * 5;
+ try self.funcsec.resize(file, current, current + 5);
+ self.funcs_count += 1;
+
+ // Update the size in the section header and the item count of
+ // the contents vector.
+ var size_and_count: [10]u8 = undefined;
+ leb.writeUnsignedFixed(5, size_and_count[0..5], 5 + self.funcs_count * 5);
+ leb.writeUnsignedFixed(5, size_and_count[5..], self.funcs_count);
+ try file.pwriteAll(&size_and_count, self.funcsec.offset + 1);
+ }
+ assert(self.funcs_count == self.codesec.entries.items.len);
+
+ var typeidx_leb: [5]u8 = undefined;
+ leb.writeUnsignedFixed(5, &typeidx_leb, typeidx);
+ try file.pwriteAll(&typeidx_leb, self.funcsec.offset + Section.header_size + 5 + funcidx * 5);
+
+ return funcidx;
+ }
+
+ fn free(self: *Funcs, funcidx: u32) void {
+ self.codesec.delEntry(funcidx);
+ }
+};
+
+/// Exports are tricky. We can't leave dead entries in the binary as they
+/// would obviously be visible from the execution environment. The simplest
+/// way to work around this is to re-emit the export section whenever
+/// something changes. This also makes it easier to ensure exported function
+/// and global indexes are updated as they change.
+const Exports = struct {
+ exportsec: Section,
+ /// Size in bytes of the contents of the section. Does not include
+ /// the "header" containing the section id and this value.
+ contents_size: u32,
+
+ fn init(file: fs.File, offset: u64, initial_size: u64) !Exports {
+ return Exports{
+ .exportsec = (try VecSection.init(spec.exports_id, file, offset, initial_size)).section,
+ .contents_size = 5,
+ };
+ }
+
+ fn writeAll(self: *Exports, module: *Module) !void {
+ const wasm = @fieldParentPtr(Wasm, "exports", self);
+ const file = wasm.base.file.?;
+ var buf: [5]u8 = undefined;
+
+ // First ensure the section is the right size
+ var export_count: u32 = 0;
+ var new_contents_size: u32 = 5;
+ for (module.decl_exports.entries.items) |entry| {
+ for (entry.value) |e| {
+ export_count += 1;
+ new_contents_size += calcSize(e);
+ }
+ }
+ if (new_contents_size != self.contents_size) {
+ try self.exportsec.resize(file, self.contents_size, new_contents_size);
+ leb.writeUnsignedFixed(5, &buf, new_contents_size);
+ try file.pwriteAll(&buf, self.exportsec.offset + 1);
+ }
+
+ try file.seekTo(self.exportsec.offset + Section.header_size);
+ const writer = file.writer();
+
+ // Length of the exports vec
+ leb.writeUnsignedFixed(5, &buf, export_count);
+ try writer.writeAll(&buf);
+
+ for (module.decl_exports.entries.items) |entry|
+ for (entry.value) |e| try writeExport(writer, e);
+ }
+
+ /// Return the total number of bytes an export will take.
+ /// TODO: fixed-width LEB128 is currently used for simplicity, but should
+ /// be replaced with proper variable-length LEB128 as it is inefficient.
+ fn calcSize(e: *Module.Export) u32 {
+ // LEB128 name length + name bytes + export type + LEB128 index
+ return 5 + @intCast(u32, e.options.name.len) + 1 + 5;
+ }
+
+ /// Write the data for a single export to the given file at a given offset.
+ /// TODO: fixed-width LEB128 is currently used for simplicity, but should
+ /// be replaced with proper variable-length LEB128 as it is inefficient.
+ fn writeExport(writer: anytype, e: *Module.Export) !void {
+ var buf: [5]u8 = undefined;
+
+ // Export name length + name
+ leb.writeUnsignedFixed(5, &buf, @intCast(u32, e.options.name.len));
+ try writer.writeAll(&buf);
+ try writer.writeAll(e.options.name);
+
+ switch (e.exported_decl.typed_value.most_recent.typed_value.ty.zigTypeTag()) {
+ .Fn => {
+ // Type of the export
+ try writer.writeByte(0x00);
+ // Exported function index
+ leb.writeUnsignedFixed(5, &buf, e.exported_decl.fn_link.wasm.?.funcidx);
+ try writer.writeAll(&buf);
+ },
+ else => return error.TODOImplementNonFnDeclsForWasm,
+ }
+ }
+};
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index d63ea3a757..52e6a3b651 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -150,6 +150,7 @@ const usage_build_generic =
\\ -ofmt=[mode] Override target object format
\\ elf Executable and Linking Format
\\ c Compile to C source code
+ \\ wasm WebAssembly
\\ coff (planned) Common Object File Format (Windows)
\\ pe (planned) Portable Executable (Windows)
\\ macho (planned) macOS relocatables
From 97300896ed137f411c1eef04f8cde86abf4d32df Mon Sep 17 00:00:00 2001
From: Isaac Freund
Date: Sun, 16 Aug 2020 01:10:08 +0200
Subject: [PATCH 127/153] stage2/wasm: implement trivial codegen
We now generate code for returning constants of any of the basic types.
---
src-self-hosted/codegen/wasm.zig | 57 +++++++++++++++++++++++++++++---
1 file changed, 53 insertions(+), 4 deletions(-)
diff --git a/src-self-hosted/codegen/wasm.zig b/src-self-hosted/codegen/wasm.zig
index 78d8d22ded..8e794ff934 100644
--- a/src-self-hosted/codegen/wasm.zig
+++ b/src-self-hosted/codegen/wasm.zig
@@ -3,9 +3,12 @@ const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const assert = std.debug.assert;
const leb = std.debug.leb;
+const mem = std.mem;
const Decl = @import("../Module.zig").Decl;
+const Inst = @import("../ir.zig").Inst;
const Type = @import("../type.zig").Type;
+const Value = @import("../value.zig").Value;
fn genValtype(ty: Type) u8 {
return switch (ty.tag()) {
@@ -56,10 +59,10 @@ pub fn genCode(buf: *ArrayList(u8), decl: *Decl) !void {
try leb.writeULEB128(writer, @as(u32, 0));
// Write instructions
-
- // TODO: actually implement codegen
- try writer.writeByte(0x41); // i32.const
- try leb.writeILEB128(writer, @as(i32, 42));
+ // TODO: check for and handle death of instructions
+ const tv = decl.typed_value.most_recent.typed_value;
+ const mod_fn = tv.val.cast(Value.Payload.Function).?.func;
+ for (mod_fn.analysis.success.instructions) |inst| try genInst(writer, inst);
// Write 'end' opcode
try writer.writeByte(0x0B);
@@ -68,3 +71,49 @@ pub fn genCode(buf: *ArrayList(u8), decl: *Decl) !void {
// beginning of the buffer.
leb.writeUnsignedFixed(5, buf.items[0..5], @intCast(u32, buf.items.len - 5));
}
+
+fn genInst(writer: ArrayList(u8).Writer, inst: *Inst) !void {
+ return switch (inst.tag) {
+ .dbg_stmt => {},
+ .ret => genRet(writer, inst.castTag(.ret).?),
+ else => error.TODOImplementMoreWasmCodegen,
+ };
+}
+
+fn genRet(writer: ArrayList(u8).Writer, inst: *Inst.UnOp) !void {
+ switch (inst.operand.tag) {
+ .constant => {
+ const constant = inst.operand.castTag(.constant).?;
+ switch (inst.operand.ty.tag()) {
+ .u32 => {
+ try writer.writeByte(0x41); // i32.const
+ try leb.writeILEB128(writer, constant.val.toUnsignedInt());
+ },
+ .i32 => {
+ try writer.writeByte(0x41); // i32.const
+ try leb.writeILEB128(writer, constant.val.toSignedInt());
+ },
+ .u64 => {
+ try writer.writeByte(0x42); // i64.const
+ try leb.writeILEB128(writer, constant.val.toUnsignedInt());
+ },
+ .i64 => {
+ try writer.writeByte(0x42); // i64.const
+ try leb.writeILEB128(writer, constant.val.toSignedInt());
+ },
+ .f32 => {
+ try writer.writeByte(0x43); // f32.const
+ // TODO: enforce LE byte order
+ try writer.writeAll(mem.asBytes(&constant.val.toFloat(f32)));
+ },
+ .f64 => {
+ try writer.writeByte(0x44); // f64.const
+ // TODO: enforce LE byte order
+ try writer.writeAll(mem.asBytes(&constant.val.toFloat(f64)));
+ },
+ else => return error.TODOImplementMoreWasmCodegen,
+ }
+ },
+ else => return error.TODOImplementMoreWasmCodegen,
+ }
+}
From 60fb50ee5a4a06687bf2f7b8774cc46f73a5b07e Mon Sep 17 00:00:00 2001
From: Isaac Freund
Date: Sun, 16 Aug 2020 02:21:20 +0200
Subject: [PATCH 128/153] stage2/wasm: write exports on flush, cleanup
Exports now have a dirty flag and are rewritten on flush if this flag
has been set.
A couple other minor changes have been made based on Andrew's review.
---
src-self-hosted/Module.zig | 2 +-
src-self-hosted/codegen/wasm.zig | 2 +-
src-self-hosted/link.zig | 16 +++++-----
src-self-hosted/link/MachO.zig | 2 +-
src-self-hosted/link/Wasm.zig | 55 ++++++++++++++++++--------------
5 files changed, 42 insertions(+), 35 deletions(-)
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index 5e22058acf..6e33101e76 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -974,7 +974,7 @@ pub fn update(self: *Module) !void {
}
// This is needed before reading the error flags.
- try self.bin_file.flush();
+ try self.bin_file.flush(self);
self.link_error_flags = self.bin_file.errorFlags();
diff --git a/src-self-hosted/codegen/wasm.zig b/src-self-hosted/codegen/wasm.zig
index 8e794ff934..57eb002e82 100644
--- a/src-self-hosted/codegen/wasm.zig
+++ b/src-self-hosted/codegen/wasm.zig
@@ -52,7 +52,7 @@ pub fn genCode(buf: *ArrayList(u8), decl: *Decl) !void {
const writer = buf.writer();
// Reserve space to write the size after generating the code
- try writer.writeAll(&([1]u8{undefined} ** 5));
+ try buf.resize(5);
// Write the size of the locals vec
// TODO: implement locals
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 6a51138785..6ed76ce561 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -168,16 +168,15 @@ pub const File = struct {
}
}
- /// Commit pending changes and write headers.
- pub fn flush(base: *File) !void {
+ pub fn flush(base: *File, module: *Module) !void {
const tracy = trace(@src());
defer tracy.end();
try switch (base.tag) {
- .elf => @fieldParentPtr(Elf, "base", base).flush(),
- .macho => @fieldParentPtr(MachO, "base", base).flush(),
- .c => @fieldParentPtr(C, "base", base).flush(),
- .wasm => @fieldParentPtr(Wasm, "base", base).flush(),
+ .elf => @fieldParentPtr(Elf, "base", base).flush(module),
+ .macho => @fieldParentPtr(MachO, "base", base).flush(module),
+ .c => @fieldParentPtr(C, "base", base).flush(module),
+ .wasm => @fieldParentPtr(Wasm, "base", base).flush(module),
};
}
@@ -285,7 +284,7 @@ pub const File = struct {
};
}
- pub fn flush(self: *File.C) !void {
+ pub fn flush(self: *File.C, module: *Module) !void {
const writer = self.base.file.?.writer();
try writer.writeAll(@embedFile("cbe.h"));
var includes = false;
@@ -1038,7 +1037,8 @@ pub const File = struct {
pub const abbrev_pad1 = 5;
pub const abbrev_parameter = 6;
- pub fn flush(self: *Elf) !void {
+ /// Commit pending changes and write headers.
+ pub fn flush(self: *Elf, module: *Module) !void {
const target_endian = self.base.options.target.cpu.arch.endian();
const foreign_endian = target_endian != std.Target.current.cpu.arch.endian();
const ptr_width_bytes: u8 = self.ptrWidthBytes();
diff --git a/src-self-hosted/link/MachO.zig b/src-self-hosted/link/MachO.zig
index 1b6c395ea0..49e365d203 100644
--- a/src-self-hosted/link/MachO.zig
+++ b/src-self-hosted/link/MachO.zig
@@ -73,7 +73,7 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Mach
}
}
-pub fn flush(self: *MachO) !void {}
+pub fn flush(self: *MachO, module: *Module) !void {}
pub fn deinit(self: *MachO) void {}
diff --git a/src-self-hosted/link/Wasm.zig b/src-self-hosted/link/Wasm.zig
index a62cff15ee..cf1a8c21c2 100644
--- a/src-self-hosted/link/Wasm.zig
+++ b/src-self-hosted/link/Wasm.zig
@@ -59,34 +59,37 @@ pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, option
try file.writeAll(&(spec.magic ++ spec.version));
- wasm.base = .{
- .tag = .wasm,
- .options = options,
- .file = file,
- .allocator = allocator,
- };
-
// TODO: this should vary depending on the section and be less arbitrary
const size = 1024;
const offset = @sizeOf(@TypeOf(spec.magic ++ spec.version));
- wasm.types = try Types.init(file, offset, size);
- wasm.funcs = try Funcs.init(file, offset + size, size, offset + 3 * size, size);
- wasm.exports = try Exports.init(file, offset + 2 * size, size);
- try file.setEndPos(offset + 4 * size);
+ wasm.* = .{
+ .base = .{
+ .tag = .wasm,
+ .options = options,
+ .file = file,
+ .allocator = allocator,
+ },
- wasm.sections = [_]*Section{
- &wasm.types.typesec.section,
- &wasm.funcs.funcsec,
- &wasm.exports.exportsec,
- &wasm.funcs.codesec.section,
+ .types = try Types.init(file, offset, size),
+ .funcs = try Funcs.init(file, offset + size, size, offset + 3 * size, size),
+ .exports = try Exports.init(file, offset + 2 * size, size),
+
+ // These must be ordered as they will appear in the output file
+ .sections = [_]*Section{
+ &wasm.types.typesec.section,
+ &wasm.funcs.funcsec,
+ &wasm.exports.exportsec,
+ &wasm.funcs.codesec.section,
+ },
};
+ try file.setEndPos(offset + 4 * size);
+
return &wasm.base;
}
pub fn deinit(self: *Wasm) void {
- if (self.base.file) |f| f.close();
self.types.deinit();
self.funcs.deinit();
}
@@ -112,7 +115,8 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
decl.fn_link.wasm = .{ .typeidx = typeidx, .funcidx = funcidx };
- try self.exports.writeAll(module);
+ // TODO: we should be more smart and set this only when needed
+ self.exports.dirty = true;
}
pub fn updateDeclExports(
@@ -121,11 +125,7 @@ pub fn updateDeclExports(
decl: *const Module.Decl,
exports: []const *Module.Export,
) !void {
- // TODO: updateDeclExports() may currently be called before updateDecl,
- // presumably due to a bug. For now just rely on the following call
- // being made in updateDecl().
-
- //try self.exports.writeAll(module);
+ self.exports.dirty = true;
}
pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
@@ -138,7 +138,9 @@ pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
}
}
-pub fn flush(self: *Wasm) !void {}
+pub fn flush(self: *Wasm, module: *Module) !void {
+ if (self.exports.dirty) try self.exports.writeAll(module);
+}
/// This struct describes the location of a named section + custom section
/// padding in the output file. This is all the data we need to allow for
@@ -373,11 +375,14 @@ const Exports = struct {
/// Size in bytes of the contents of the section. Does not include
/// the "header" containing the section id and this value.
contents_size: u32,
+ /// If this is true, then exports will be rewritten on flush()
+ dirty: bool,
fn init(file: fs.File, offset: u64, initial_size: u64) !Exports {
return Exports{
.exportsec = (try VecSection.init(spec.exports_id, file, offset, initial_size)).section,
.contents_size = 5,
+ .dirty = false,
};
}
@@ -410,6 +415,8 @@ const Exports = struct {
for (module.decl_exports.entries.items) |entry|
for (entry.value) |e| try writeExport(writer, e);
+
+ self.dirty = false;
}
/// Return the total number of bytes an export will take.
From f9963909a1cf73eaee47102f4e4dcafe5afa3898 Mon Sep 17 00:00:00 2001
From: Isaac Freund
Date: Mon, 17 Aug 2020 23:49:06 +0200
Subject: [PATCH 129/153] stage2/wasm: only free types after func overwrite
Functions which are free'd are not immediately removed from the binary
as this would cause a shifting of function indexes. Instead, they hang
around until they can be overwritten by a new function. This means that
the types associated with these dead functions must also remain until
the function is overwritten to avoid a type mismatch.
---
src-self-hosted/link/Wasm.zig | 31 ++++++++++++++++---------------
1 file changed, 16 insertions(+), 15 deletions(-)
diff --git a/src-self-hosted/link/Wasm.zig b/src-self-hosted/link/Wasm.zig
index cf1a8c21c2..a381cfad57 100644
--- a/src-self-hosted/link/Wasm.zig
+++ b/src-self-hosted/link/Wasm.zig
@@ -33,7 +33,6 @@ pub const base_tag = link.File.Tag.wasm;
pub const FnData = struct {
funcidx: u32,
- typeidx: u32,
};
base: link.File,
@@ -99,7 +98,6 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
return error.TODOImplementNonFnDeclsForWasm;
if (decl.fn_link.wasm) |fn_data| {
- self.types.free(fn_data.typeidx);
self.funcs.free(fn_data.funcidx);
}
@@ -113,7 +111,7 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
try codegen.genCode(&buf, decl);
const funcidx = try self.funcs.new(typeidx, buf.items);
- decl.fn_link.wasm = .{ .typeidx = typeidx, .funcidx = funcidx };
+ decl.fn_link.wasm = .{ .funcidx = funcidx };
// TODO: we should be more smart and set this only when needed
self.exports.dirty = true;
@@ -132,7 +130,6 @@ pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
// TODO: remove this assert when non-function Decls are implemented
assert(decl.typed_value.most_recent.typed_value.ty.zigTypeTag() == .Fn);
if (decl.fn_link.wasm) |fn_data| {
- self.types.free(fn_data.typeidx);
self.funcs.free(fn_data.funcidx);
decl.fn_link.wasm = null;
}
@@ -307,21 +304,20 @@ const Funcs = struct {
/// This section needs special handling to keep the indexes matching with
/// the codesec, so we cant just use a VecSection.
funcsec: Section,
- /// Number of functions listed in the funcsec. Must be kept in sync with
- /// codesec.entries.items.len.
- funcs_count: u32,
+ /// The typeidx stored for each function, indexed by funcidx.
+ func_types: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
codesec: VecSection,
fn init(file: fs.File, funcs_offset: u64, funcs_size: u64, code_offset: u64, code_size: u64) !Funcs {
return Funcs{
.funcsec = (try VecSection.init(spec.funcs_id, file, funcs_offset, funcs_size)).section,
- .funcs_count = 0,
.codesec = try VecSection.init(spec.code_id, file, code_offset, code_size),
};
}
fn deinit(self: *Funcs) void {
const wasm = @fieldParentPtr(Wasm, "funcs", self);
+ self.func_types.deinit(wasm.base.allocator);
self.codesec.deinit(wasm.base.allocator);
}
@@ -333,25 +329,30 @@ const Funcs = struct {
const file = wasm.base.file.?;
const allocator = wasm.base.allocator;
- assert(self.funcs_count == self.codesec.entries.items.len);
+ assert(self.func_types.items.len == self.codesec.entries.items.len);
// TODO: consider nop-padding the code if there is a close but not perfect fit
const funcidx = try self.codesec.addEntry(file, allocator, code);
- if (self.funcs_count < self.codesec.entries.items.len) {
+ if (self.func_types.items.len < self.codesec.entries.items.len) {
// u32 vector length + funcs_count u32s in the vector
- const current = 5 + self.funcs_count * 5;
+ const current = 5 + @intCast(u32, self.func_types.items.len) * 5;
try self.funcsec.resize(file, current, current + 5);
- self.funcs_count += 1;
+ try self.func_types.append(allocator, typeidx);
// Update the size in the section header and the item count of
// the contents vector.
+ const count = @intCast(u32, self.func_types.items.len);
var size_and_count: [10]u8 = undefined;
- leb.writeUnsignedFixed(5, size_and_count[0..5], 5 + self.funcs_count * 5);
- leb.writeUnsignedFixed(5, size_and_count[5..], self.funcs_count);
+ leb.writeUnsignedFixed(5, size_and_count[0..5], 5 + count * 5);
+ leb.writeUnsignedFixed(5, size_and_count[5..], count);
try file.pwriteAll(&size_and_count, self.funcsec.offset + 1);
+ } else {
+ // We are overwriting a dead function and may now free the type
+ wasm.types.free(self.func_types.items[funcidx]);
}
- assert(self.funcs_count == self.codesec.entries.items.len);
+
+ assert(self.func_types.items.len == self.codesec.entries.items.len);
var typeidx_leb: [5]u8 = undefined;
leb.writeUnsignedFixed(5, &typeidx_leb, typeidx);
From 2deb07a001d2a3f23681a368a07544baf2673ccf Mon Sep 17 00:00:00 2001
From: Luna
Date: Sun, 16 Aug 2020 14:30:09 -0300
Subject: [PATCH 130/153] rename signalfd4 to signalfd
---
lib/std/os.zig | 2 +-
lib/std/os/linux.zig | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 123dfc9747..082a09b247 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -5309,7 +5309,7 @@ pub fn ioctl_SIOCGIFINDEX(fd: fd_t, ifr: *ifreq) IoCtl_SIOCGIFINDEX_Error!void {
}
pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: i32) !fd_t {
- const rc = system.signalfd4(fd, mask, flags);
+ const rc = system.signalfd(fd, mask, flags);
switch (errno(rc)) {
0 => return @intCast(fd_t, rc),
EBADF, EINVAL => unreachable,
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index d9f7dda03e..4bb1da9587 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -1200,7 +1200,7 @@ pub fn ioctl(fd: fd_t, request: u32, arg: usize) usize {
return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), request, arg);
}
-pub fn signalfd4(fd: fd_t, mask: *const sigset_t, flags: i32) usize {
+pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: i32) usize {
return syscall4(
.signalfd4,
@bitCast(usize, @as(isize, fd)),
From 293b07df12014cadf8b8b01b279856563523e257 Mon Sep 17 00:00:00 2001
From: Luna
Date: Sun, 16 Aug 2020 14:30:17 -0300
Subject: [PATCH 131/153] add signalfd to std.c.linux
---
lib/std/c/linux.zig | 2 ++
1 file changed, 2 insertions(+)
diff --git a/lib/std/c/linux.zig b/lib/std/c/linux.zig
index c00fc68bca..f37fce7a3e 100644
--- a/lib/std/c/linux.zig
+++ b/lib/std/c/linux.zig
@@ -93,6 +93,8 @@ pub extern "c" fn sendfile(
pub extern "c" fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: c_uint) isize;
+pub extern "c" fn signalfd(fd: fd_t, mask: *const sigset_t, flags: i32) usize;
+
pub const pthread_attr_t = extern struct {
__size: [56]u8,
__align: c_long,
From ae2c88754df436d4b1d748405e6ddd4b8ee61774 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 17 Aug 2020 16:19:57 -0700
Subject: [PATCH 132/153] std: signalfd: fix the types of things; add test
---
lib/std/c/linux.zig | 2 +-
lib/std/os.zig | 2 +-
lib/std/os/linux.zig | 10 ++--------
lib/std/os/test.zig | 6 ++++++
4 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/lib/std/c/linux.zig b/lib/std/c/linux.zig
index f37fce7a3e..b9f29b211d 100644
--- a/lib/std/c/linux.zig
+++ b/lib/std/c/linux.zig
@@ -93,7 +93,7 @@ pub extern "c" fn sendfile(
pub extern "c" fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: c_uint) isize;
-pub extern "c" fn signalfd(fd: fd_t, mask: *const sigset_t, flags: i32) usize;
+pub extern "c" fn signalfd(fd: fd_t, mask: *const sigset_t, flags: c_uint) c_int;
pub const pthread_attr_t = extern struct {
__size: [56]u8,
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 082a09b247..828f056148 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -5308,7 +5308,7 @@ pub fn ioctl_SIOCGIFINDEX(fd: fd_t, ifr: *ifreq) IoCtl_SIOCGIFINDEX_Error!void {
}
}
-pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: i32) !fd_t {
+pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t {
const rc = system.signalfd(fd, mask, flags);
switch (errno(rc)) {
0 => return @intCast(fd_t, rc),
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index 4bb1da9587..7fe0ba00ae 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -1200,14 +1200,8 @@ pub fn ioctl(fd: fd_t, request: u32, arg: usize) usize {
return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), request, arg);
}
-pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: i32) usize {
- return syscall4(
- .signalfd4,
- @bitCast(usize, @as(isize, fd)),
- @ptrToInt(mask),
- @bitCast(usize, @as(usize, NSIG / 8)),
- @intCast(usize, flags),
- );
+pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) usize {
+ return syscall4(.signalfd4, @bitCast(usize, @as(isize, fd)), @ptrToInt(mask), NSIG / 8, flags);
}
pub fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: u32) usize {
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index b17ddfaf7e..dc8a89c688 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -522,3 +522,9 @@ test "fcntl" {
expect((flags & os.FD_CLOEXEC) != 0);
}
}
+
+test "signalfd" {
+ if (builtin.os.tag != .linux)
+ return error.SkipZigTest;
+ _ = std.os.signalfd;
+}
From 80e70735fb9a56279ec990bba17f044a770b339a Mon Sep 17 00:00:00 2001
From: Sergey Poznyak
Date: Wed, 20 May 2020 07:04:22 +0300
Subject: [PATCH 133/153] add `zig info` command
---
src-self-hosted/main.zig | 6 +-
src-self-hosted/print_info.zig | 192 +++++++++++++++++++++++++++++++++
src-self-hosted/stage2.zig | 32 +++++-
src/main.cpp | 3 +
src/stage2.cpp | 5 +
src/stage2.h | 3 +
6 files changed, 236 insertions(+), 5 deletions(-)
create mode 100644 src-self-hosted/print_info.zig
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index d63ea3a757..8e4764f824 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -30,6 +30,7 @@ const usage =
\\ build-obj [source] Create object from source or assembly
\\ fmt [source] Parse file and render in canonical zig format
\\ targets List available compilation targets
+ \\ info Print lib path, std path, compiler id and version
\\ version Print version number and exit
\\ zen Print zen of zig and exit
\\
@@ -95,8 +96,9 @@ pub fn main() !void {
const stdout = io.getStdOut().outStream();
return @import("print_targets.zig").cmdTargets(arena, cmd_args, stdout, info.target);
} else if (mem.eql(u8, cmd, "version")) {
- std.io.getStdOut().writeAll(build_options.version ++ "\n") catch process.exit(1);
- return;
+ try std.io.getStdOut().writeAll(build_options.version ++ "\n");
+ } else if (mem.eql(u8, cmd, "info")) {
+ try @import("print_info.zig").cmdInfo(arena, cmd_args, .SelfHosted, io.getStdOut().outStream());
} else if (mem.eql(u8, cmd, "zen")) {
try io.getStdOut().writeAll(info_zen);
} else if (mem.eql(u8, cmd, "help")) {
diff --git a/src-self-hosted/print_info.zig b/src-self-hosted/print_info.zig
new file mode 100644
index 0000000000..dadf7efbfe
--- /dev/null
+++ b/src-self-hosted/print_info.zig
@@ -0,0 +1,192 @@
+const builtin = @import("builtin");
+const std = @import("std");
+const process = std.process;
+const mem = std.mem;
+const unicode = std.unicode;
+const io = std.io;
+const fs = std.fs;
+const os = std.os;
+const json = std.json;
+const StringifyOptions = json.StringifyOptions;
+const Allocator = std.mem.Allocator;
+const introspect = @import("introspect.zig");
+
+const usage_info =
+ \\Usage: zig info [options]
+ \\
+ \\ Outputs path to zig lib dir, std dir and the global cache dir.
+ \\
+ \\Options:
+ \\ --help Print this help and exit
+ \\ --format [text|json] Choose output format (defaults to text)
+ \\
+;
+
+pub const CompilerInfo = struct {
+ // TODO: port compiler id hash from cpp
+ // /// Compiler id hash
+ // id: []const u8,
+
+ // /// Compiler version
+ // version: []const u8,
+ /// Path to lib/
+ lib_dir: []const u8,
+
+ /// Path to lib/zig/std
+ std_dir: []const u8,
+
+ /// Path to the global cache dir
+ global_cache_dir: []const u8,
+
+ const CompilerType = enum {
+ Stage1,
+ SelfHosted,
+ };
+
+ pub fn getVersionString() []const u8 {
+ // TODO: get this from build.zig somehow
+ return "0.6.0";
+ }
+
+ pub fn getCacheDir(allocator: *Allocator, compiler_type: CompilerType) ![]u8 {
+ const global_cache_dir = try getAppCacheDir(allocator, "zig");
+ defer allocator.free(global_cache_dir);
+
+ const postfix = switch (compiler_type) {
+ .SelfHosted => "self_hosted",
+ .Stage1 => "stage1",
+ };
+ return try fs.path.join(allocator, &[_][]const u8{ global_cache_dir, postfix }); // stage1 compiler uses $cache_dir/zig/stage1
+ }
+
+ // TODO: add CacheType argument here to make it return correct cache dir for stage1
+ pub fn init(allocator: *Allocator, compiler_type: CompilerType) !CompilerInfo {
+ const zig_lib_dir = try introspect.resolveZigLibDir(allocator);
+ errdefer allocator.free(zig_lib_dir);
+
+ const zig_std_dir = try fs.path.join(allocator, &[_][]const u8{ zig_lib_dir, "std" });
+ errdefer allocator.free(zig_std_dir);
+
+ const cache_dir = try CompilerInfo.getCacheDir(allocator, compiler_type);
+ errdefer allocator.free(cache_dir);
+
+ return CompilerInfo{
+ .lib_dir = zig_lib_dir,
+ .std_dir = zig_std_dir,
+ .global_cache_dir = cache_dir,
+ };
+ }
+
+ pub fn toString(self: *CompilerInfo, out_stream: var) !void {
+ inline for (@typeInfo(CompilerInfo).Struct.fields) |field| {
+ try std.fmt.format(out_stream, "{: <16}\t{: <}\n", .{ field.name, @field(self, field.name) });
+ }
+ }
+
+ pub fn deinit(self: *CompilerInfo, allocator: *Allocator) void {
+ allocator.free(self.lib_dir);
+ allocator.free(self.std_dir);
+ allocator.free(self.global_cache_dir);
+ }
+};
+
+pub fn cmdInfo(allocator: *Allocator, cmd_args: []const []const u8, compiler_type: CompilerInfo.CompilerType, stdout: var) !void {
+ var info = try CompilerInfo.init(allocator, compiler_type);
+ defer info.deinit(allocator);
+
+ var bos = io.bufferedOutStream(stdout);
+ const bos_stream = bos.outStream();
+
+ var json_format = false;
+
+ var i: usize = 0;
+ while (i < cmd_args.len) : (i += 1) {
+ const arg = cmd_args[i];
+ if (mem.eql(u8, arg, "--format")) {
+ if (cmd_args.len <= i + 1) {
+ std.debug.warn("expected [text|json] after --format\n", .{});
+ process.exit(1);
+ }
+ const format = cmd_args[i + 1];
+ i += 1;
+ if (mem.eql(u8, format, "text")) {
+ json_format = false;
+ } else if (mem.eql(u8, format, "json")) {
+ json_format = true;
+ } else {
+ std.debug.warn("expected [text|json] after --format, found '{}'\n", .{format});
+ process.exit(1);
+ }
+ } else if (mem.eql(u8, arg, "--help")) {
+ try stdout.writeAll(usage_info);
+ return;
+ } else {
+ std.debug.warn("unrecognized parameter: '{}'\n", .{arg});
+ process.exit(1);
+ }
+ }
+
+ if (json_format) {
+ try json.stringify(info, StringifyOptions{
+ .whitespace = StringifyOptions.Whitespace{ .indent = .{ .Space = 2 } },
+ }, bos_stream);
+ try bos_stream.writeByte('\n');
+ } else {
+ try info.toString(bos_stream);
+ }
+
+ try bos.flush();
+}
+
+pub const GetAppCacheDirError = error{
+ OutOfMemory,
+ AppCacheDirUnavailable,
+};
+
+// Copied from fs.getAppDataDir, but changed it to return .cache/ dir on linux.
+// This is the same behavior as the current zig compiler global cache resolution.
+fn getAppCacheDir(allocator: *Allocator, appname: []const u8) GetAppCacheDirError![]u8 {
+ switch (builtin.os.tag) {
+ .windows => {
+ var dir_path_ptr: [*:0]u16 = undefined;
+ switch (os.windows.shell32.SHGetKnownFolderPath(
+ &os.windows.FOLDERID_LocalAppData,
+ os.windows.KF_FLAG_CREATE,
+ null,
+ &dir_path_ptr,
+ )) {
+ os.windows.S_OK => {
+ defer os.windows.ole32.CoTaskMemFree(@ptrCast(*c_void, dir_path_ptr));
+ const global_dir = unicode.utf16leToUtf8Alloc(allocator, mem.spanZ(dir_path_ptr)) catch |err| switch (err) {
+ error.UnexpectedSecondSurrogateHalf => return error.AppCacheDirUnavailable,
+ error.ExpectedSecondSurrogateHalf => return error.AppCacheDirUnavailable,
+ error.DanglingSurrogateHalf => return error.AppCacheDirUnavailable,
+ error.OutOfMemory => return error.OutOfMemory,
+ };
+ defer allocator.free(global_dir);
+ return fs.path.join(allocator, &[_][]const u8{ global_dir, appname });
+ },
+ os.windows.E_OUTOFMEMORY => return error.OutOfMemory,
+ else => return error.AppCacheDirUnavailable,
+ }
+ },
+ .macosx => {
+ const home_dir = os.getenv("HOME") orelse {
+ // TODO look in /etc/passwd
+ return error.AppCacheDirUnavailable;
+ };
+ return fs.path.join(allocator, &[_][]const u8{ home_dir, "Library", "Application Support", appname });
+ },
+ .linux, .freebsd, .netbsd, .dragonfly => {
+ if (os.getenv("XDG_CACHE_HOME")) |cache_home| {
+ return fs.path.join(allocator, &[_][]const u8{ cache_home, appname });
+ }
+
+ const home_dir = os.getenv("HOME") orelse {
+ return error.AppCacheDirUnavailable;
+ };
+ return fs.path.join(allocator, &[_][]const u8{ home_dir, ".cache", appname });
+ },
+ else => @compileError("Unsupported OS"),
+ }
+}
diff --git a/src-self-hosted/stage2.zig b/src-self-hosted/stage2.zig
index d96f27a84b..c73fc97904 100644
--- a/src-self-hosted/stage2.zig
+++ b/src-self-hosted/stage2.zig
@@ -179,8 +179,7 @@ export fn stage2_fmt(argc: c_int, argv: [*]const [*:0]const u8) c_int {
return 0;
}
-fn fmtMain(argc: c_int, argv: [*]const [*:0]const u8) !void {
- const allocator = std.heap.c_allocator;
+fn argvToArrayList(allocator: *Allocator, argc: c_int, argv: [*]const [*:0]const u8) !ArrayList([]const u8) {
var args_list = std.ArrayList([]const u8).init(allocator);
const argc_usize = @intCast(usize, argc);
var arg_i: usize = 0;
@@ -188,8 +187,16 @@ fn fmtMain(argc: c_int, argv: [*]const [*:0]const u8) !void {
try args_list.append(mem.spanZ(argv[arg_i]));
}
- const args = args_list.span()[2..];
+ return args_list;
+}
+fn fmtMain(argc: c_int, argv: [*]const [*:0]const u8) !void {
+ const allocator = std.heap.c_allocator;
+
+ var args_list = try argvToArrayList(allocator, argc, argv);
+ defer args_list.deinit();
+
+ const args = args_list.span()[2..];
return self_hosted_main.cmdFmt(allocator, args);
}
@@ -387,6 +394,25 @@ fn detectNativeCpuWithLLVM(
return result;
}
+export fn stage2_info(argc: c_int, argv: [*]const [*:0]const u8) c_int {
+ const allocator = std.heap.c_allocator;
+
+ var args_list = argvToArrayList(allocator, argc, argv) catch |err| {
+ std.debug.warn("unable to parse arguments: {}\n", .{@errorName(err)});
+ return -1;
+ };
+ defer args_list.deinit();
+
+ const args = args_list.span()[2..];
+
+ @import("print_info.zig").cmdInfo(allocator, args, .Stage1, std.io.getStdOut().outStream()) catch |err| {
+ std.debug.warn("unable to print info: {}\n", .{@errorName(err)});
+ return -1;
+ };
+
+ return 0;
+}
+
// ABI warning
export fn stage2_cmd_targets(
zig_triple: ?[*:0]const u8,
diff --git a/src/main.cpp b/src/main.cpp
index 6d5f1f3f76..23c5bcd02b 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -47,6 +47,7 @@ static int print_full_usage(const char *arg0, FILE *file, int return_code) {
" translate-c [source] convert c code to zig code\n"
" targets list available compilation targets\n"
" test [source] create and run a test build\n"
+ " info print lib path, std path, compiler id and version\n"
" version print version number and exit\n"
" zen print zen of zig and exit\n"
"\n"
@@ -582,6 +583,8 @@ static int main0(int argc, char **argv) {
return (term.how == TerminationIdClean) ? term.code : -1;
} else if (argc >= 2 && strcmp(argv[1], "fmt") == 0) {
return stage2_fmt(argc, argv);
+ } else if (argc >= 2 && strcmp(argv[1], "info") == 0) {
+ return stage2_info(argc, argv);
} else if (argc >= 2 && (strcmp(argv[1], "cc") == 0 || strcmp(argv[1], "c++") == 0)) {
emit_h = false;
strip = true;
diff --git a/src/stage2.cpp b/src/stage2.cpp
index 887ad461bc..ef2b2348e3 100644
--- a/src/stage2.cpp
+++ b/src/stage2.cpp
@@ -27,6 +27,11 @@ void stage2_zen(const char **ptr, size_t *len) {
stage2_panic(msg, strlen(msg));
}
+int stage2_info(int argc, char** argv) {
+ const char *msg = "stage0 called stage2_info";
+ stage2_panic(msg, strlen(msg));
+}
+
void stage2_attach_segfault_handler(void) { }
void stage2_panic(const char *ptr, size_t len) {
diff --git a/src/stage2.h b/src/stage2.h
index 7875ef26c3..9ec67ea3f3 100644
--- a/src/stage2.h
+++ b/src/stage2.h
@@ -141,6 +141,9 @@ ZIG_EXTERN_C void stage2_render_ast(struct Stage2Ast *ast, FILE *output_file);
// ABI warning
ZIG_EXTERN_C void stage2_zen(const char **ptr, size_t *len);
+// ABI warning
+ZIG_EXTERN_C int stage2_info(int argc, char **argv);
+
// ABI warning
ZIG_EXTERN_C void stage2_attach_segfault_handler(void);
From 9f44284ad5ed5ae94b7a4bc1f5972c60c1739bb6 Mon Sep 17 00:00:00 2001
From: Isaac Freund
Date: Tue, 18 Aug 2020 01:46:19 +0200
Subject: [PATCH 134/153] stage2/wasm: add basic test cases
---
src-self-hosted/link.zig | 12 +++++++-----
test/stage2/compare_output.zig | 36 ++++++++++++++++++++++++++++++++++
2 files changed, 43 insertions(+), 5 deletions(-)
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 6ed76ce561..cbc376f7ab 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -100,11 +100,13 @@ pub const File = struct {
}
pub fn makeExecutable(base: *File) !void {
- std.debug.assert(base.tag != .c);
- if (base.file) |f| {
- f.close();
- base.file = null;
-
+ switch (base.tag) {
+ .c => unreachable,
+ .wasm => {},
+ else => if (base.file) |f| {
+ f.close();
+ base.file = null;
+ },
}
}
diff --git a/test/stage2/compare_output.zig b/test/stage2/compare_output.zig
index 83dfddb3e8..4208cc3911 100644
--- a/test/stage2/compare_output.zig
+++ b/test/stage2/compare_output.zig
@@ -12,6 +12,11 @@ const linux_riscv64 = std.zig.CrossTarget{
.os_tag = .linux,
};
+const wasi = std.zig.CrossTarget{
+ .cpu_arch = .wasm32,
+ .os_tag = .wasi,
+};
+
pub fn addCases(ctx: *TestContext) !void {
{
var case = ctx.exe("hello world with updates", linux_x64);
@@ -539,4 +544,35 @@ pub fn addCases(ctx: *TestContext) !void {
"",
);
}
+
+ {
+ var case = ctx.exe("wasm returns", wasi);
+
+ case.addCompareOutput(
+ \\export fn _start() u32 {
+ \\ return 42;
+ \\}
+ ,
+ "42\n",
+ );
+
+ case.addCompareOutput(
+ \\export fn _start() i64 {
+ \\ return 42;
+ \\}
+ ,
+ "42\n",
+ );
+
+ case.addCompareOutput(
+ \\export fn _start() f32 {
+ \\ return 42.0;
+ \\}
+ ,
+ // This is what you get when you take the bits of the IEE-754
+ // representation of 42.0 and reinterpret them as an unsigned
+ // integer. Guess that's a bug in wasmtime.
+ "1109917696\n",
+ );
+ }
}
From 502d413621a8c2de3c6987e7d5a654392e2eab17 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 17 Aug 2020 17:06:23 -0700
Subject: [PATCH 135/153] simplify `zig info` and rename it to `zig env`
also add version to it
---
CMakeLists.txt | 7 ++
src-self-hosted/main.zig | 6 +-
src-self-hosted/print_env.zig | 34 ++++++
src-self-hosted/print_info.zig | 192 ------------------------------
src-self-hosted/print_targets.zig | 2 +-
src-self-hosted/stage2.zig | 8 +-
src/config.zig.in | 3 +
src/main.cpp | 6 +-
src/stage2.cpp | 4 +-
src/stage2.h | 2 +-
10 files changed, 58 insertions(+), 206 deletions(-)
create mode 100644 src-self-hosted/print_env.zig
delete mode 100644 src-self-hosted/print_info.zig
create mode 100644 src/config.zig.in
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4679185573..ceaecf5552 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -326,10 +326,15 @@ set(LIBUNWIND_FILES_DEST "${ZIG_LIB_DIR}/libunwind")
set(LIBCXX_FILES_DEST "${ZIG_LIB_DIR}/libcxx")
set(ZIG_STD_DEST "${ZIG_LIB_DIR}/std")
set(ZIG_CONFIG_H_OUT "${CMAKE_BINARY_DIR}/config.h")
+set(ZIG_CONFIG_ZIG_OUT "${CMAKE_BINARY_DIR}/config.zig")
configure_file (
"${CMAKE_SOURCE_DIR}/src/config.h.in"
"${ZIG_CONFIG_H_OUT}"
)
+configure_file (
+ "${CMAKE_SOURCE_DIR}/src/config.zig.in"
+ "${ZIG_CONFIG_ZIG_OUT}"
+)
include_directories(
${CMAKE_SOURCE_DIR}
@@ -472,6 +477,8 @@ set(BUILD_LIBSTAGE2_ARGS "build-lib"
--bundle-compiler-rt
-fPIC
-lc
+ --pkg-begin build_options "${ZIG_CONFIG_ZIG_OUT}"
+ --pkg-end
)
if("${ZIG_TARGET_TRIPLE}" STREQUAL "native")
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 8e4764f824..d44a7f5305 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -30,7 +30,7 @@ const usage =
\\ build-obj [source] Create object from source or assembly
\\ fmt [source] Parse file and render in canonical zig format
\\ targets List available compilation targets
- \\ info Print lib path, std path, compiler id and version
+ \\ env Print lib path, std path, compiler id and version
\\ version Print version number and exit
\\ zen Print zen of zig and exit
\\
@@ -97,8 +97,8 @@ pub fn main() !void {
return @import("print_targets.zig").cmdTargets(arena, cmd_args, stdout, info.target);
} else if (mem.eql(u8, cmd, "version")) {
try std.io.getStdOut().writeAll(build_options.version ++ "\n");
- } else if (mem.eql(u8, cmd, "info")) {
- try @import("print_info.zig").cmdInfo(arena, cmd_args, .SelfHosted, io.getStdOut().outStream());
+ } else if (mem.eql(u8, cmd, "env")) {
+ try @import("print_env.zig").cmdEnv(arena, cmd_args, io.getStdOut().outStream());
} else if (mem.eql(u8, cmd, "zen")) {
try io.getStdOut().writeAll(info_zen);
} else if (mem.eql(u8, cmd, "help")) {
diff --git a/src-self-hosted/print_env.zig b/src-self-hosted/print_env.zig
new file mode 100644
index 0000000000..444462789c
--- /dev/null
+++ b/src-self-hosted/print_env.zig
@@ -0,0 +1,34 @@
+const std = @import("std");
+const build_options = @import("build_options");
+const introspect = @import("introspect.zig");
+const Allocator = std.mem.Allocator;
+
+pub fn cmdEnv(gpa: *Allocator, args: []const []const u8, stdout: anytype) !void {
+ const zig_lib_dir = introspect.resolveZigLibDir(gpa) catch |err| {
+ std.debug.print("unable to find zig installation directory: {}\n", .{@errorName(err)});
+ std.process.exit(1);
+ };
+ defer gpa.free(zig_lib_dir);
+
+ const zig_std_dir = try std.fs.path.join(gpa, &[_][]const u8{ zig_lib_dir, "std" });
+ defer gpa.free(zig_std_dir);
+
+ var bos = std.io.bufferedOutStream(stdout);
+ const bos_stream = bos.outStream();
+
+ var jws = std.json.WriteStream(@TypeOf(bos_stream), 6).init(bos_stream);
+ try jws.beginObject();
+
+ try jws.objectField("lib_dir");
+ try jws.emitString(zig_lib_dir);
+
+ try jws.objectField("std_dir");
+ try jws.emitString(zig_std_dir);
+
+ try jws.objectField("version");
+ try jws.emitString(build_options.version);
+
+ try jws.endObject();
+ try bos_stream.writeByte('\n');
+ try bos.flush();
+}
diff --git a/src-self-hosted/print_info.zig b/src-self-hosted/print_info.zig
deleted file mode 100644
index dadf7efbfe..0000000000
--- a/src-self-hosted/print_info.zig
+++ /dev/null
@@ -1,192 +0,0 @@
-const builtin = @import("builtin");
-const std = @import("std");
-const process = std.process;
-const mem = std.mem;
-const unicode = std.unicode;
-const io = std.io;
-const fs = std.fs;
-const os = std.os;
-const json = std.json;
-const StringifyOptions = json.StringifyOptions;
-const Allocator = std.mem.Allocator;
-const introspect = @import("introspect.zig");
-
-const usage_info =
- \\Usage: zig info [options]
- \\
- \\ Outputs path to zig lib dir, std dir and the global cache dir.
- \\
- \\Options:
- \\ --help Print this help and exit
- \\ --format [text|json] Choose output format (defaults to text)
- \\
-;
-
-pub const CompilerInfo = struct {
- // TODO: port compiler id hash from cpp
- // /// Compiler id hash
- // id: []const u8,
-
- // /// Compiler version
- // version: []const u8,
- /// Path to lib/
- lib_dir: []const u8,
-
- /// Path to lib/zig/std
- std_dir: []const u8,
-
- /// Path to the global cache dir
- global_cache_dir: []const u8,
-
- const CompilerType = enum {
- Stage1,
- SelfHosted,
- };
-
- pub fn getVersionString() []const u8 {
- // TODO: get this from build.zig somehow
- return "0.6.0";
- }
-
- pub fn getCacheDir(allocator: *Allocator, compiler_type: CompilerType) ![]u8 {
- const global_cache_dir = try getAppCacheDir(allocator, "zig");
- defer allocator.free(global_cache_dir);
-
- const postfix = switch (compiler_type) {
- .SelfHosted => "self_hosted",
- .Stage1 => "stage1",
- };
- return try fs.path.join(allocator, &[_][]const u8{ global_cache_dir, postfix }); // stage1 compiler uses $cache_dir/zig/stage1
- }
-
- // TODO: add CacheType argument here to make it return correct cache dir for stage1
- pub fn init(allocator: *Allocator, compiler_type: CompilerType) !CompilerInfo {
- const zig_lib_dir = try introspect.resolveZigLibDir(allocator);
- errdefer allocator.free(zig_lib_dir);
-
- const zig_std_dir = try fs.path.join(allocator, &[_][]const u8{ zig_lib_dir, "std" });
- errdefer allocator.free(zig_std_dir);
-
- const cache_dir = try CompilerInfo.getCacheDir(allocator, compiler_type);
- errdefer allocator.free(cache_dir);
-
- return CompilerInfo{
- .lib_dir = zig_lib_dir,
- .std_dir = zig_std_dir,
- .global_cache_dir = cache_dir,
- };
- }
-
- pub fn toString(self: *CompilerInfo, out_stream: var) !void {
- inline for (@typeInfo(CompilerInfo).Struct.fields) |field| {
- try std.fmt.format(out_stream, "{: <16}\t{: <}\n", .{ field.name, @field(self, field.name) });
- }
- }
-
- pub fn deinit(self: *CompilerInfo, allocator: *Allocator) void {
- allocator.free(self.lib_dir);
- allocator.free(self.std_dir);
- allocator.free(self.global_cache_dir);
- }
-};
-
-pub fn cmdInfo(allocator: *Allocator, cmd_args: []const []const u8, compiler_type: CompilerInfo.CompilerType, stdout: var) !void {
- var info = try CompilerInfo.init(allocator, compiler_type);
- defer info.deinit(allocator);
-
- var bos = io.bufferedOutStream(stdout);
- const bos_stream = bos.outStream();
-
- var json_format = false;
-
- var i: usize = 0;
- while (i < cmd_args.len) : (i += 1) {
- const arg = cmd_args[i];
- if (mem.eql(u8, arg, "--format")) {
- if (cmd_args.len <= i + 1) {
- std.debug.warn("expected [text|json] after --format\n", .{});
- process.exit(1);
- }
- const format = cmd_args[i + 1];
- i += 1;
- if (mem.eql(u8, format, "text")) {
- json_format = false;
- } else if (mem.eql(u8, format, "json")) {
- json_format = true;
- } else {
- std.debug.warn("expected [text|json] after --format, found '{}'\n", .{format});
- process.exit(1);
- }
- } else if (mem.eql(u8, arg, "--help")) {
- try stdout.writeAll(usage_info);
- return;
- } else {
- std.debug.warn("unrecognized parameter: '{}'\n", .{arg});
- process.exit(1);
- }
- }
-
- if (json_format) {
- try json.stringify(info, StringifyOptions{
- .whitespace = StringifyOptions.Whitespace{ .indent = .{ .Space = 2 } },
- }, bos_stream);
- try bos_stream.writeByte('\n');
- } else {
- try info.toString(bos_stream);
- }
-
- try bos.flush();
-}
-
-pub const GetAppCacheDirError = error{
- OutOfMemory,
- AppCacheDirUnavailable,
-};
-
-// Copied from fs.getAppDataDir, but changed it to return .cache/ dir on linux.
-// This is the same behavior as the current zig compiler global cache resolution.
-fn getAppCacheDir(allocator: *Allocator, appname: []const u8) GetAppCacheDirError![]u8 {
- switch (builtin.os.tag) {
- .windows => {
- var dir_path_ptr: [*:0]u16 = undefined;
- switch (os.windows.shell32.SHGetKnownFolderPath(
- &os.windows.FOLDERID_LocalAppData,
- os.windows.KF_FLAG_CREATE,
- null,
- &dir_path_ptr,
- )) {
- os.windows.S_OK => {
- defer os.windows.ole32.CoTaskMemFree(@ptrCast(*c_void, dir_path_ptr));
- const global_dir = unicode.utf16leToUtf8Alloc(allocator, mem.spanZ(dir_path_ptr)) catch |err| switch (err) {
- error.UnexpectedSecondSurrogateHalf => return error.AppCacheDirUnavailable,
- error.ExpectedSecondSurrogateHalf => return error.AppCacheDirUnavailable,
- error.DanglingSurrogateHalf => return error.AppCacheDirUnavailable,
- error.OutOfMemory => return error.OutOfMemory,
- };
- defer allocator.free(global_dir);
- return fs.path.join(allocator, &[_][]const u8{ global_dir, appname });
- },
- os.windows.E_OUTOFMEMORY => return error.OutOfMemory,
- else => return error.AppCacheDirUnavailable,
- }
- },
- .macosx => {
- const home_dir = os.getenv("HOME") orelse {
- // TODO look in /etc/passwd
- return error.AppCacheDirUnavailable;
- };
- return fs.path.join(allocator, &[_][]const u8{ home_dir, "Library", "Application Support", appname });
- },
- .linux, .freebsd, .netbsd, .dragonfly => {
- if (os.getenv("XDG_CACHE_HOME")) |cache_home| {
- return fs.path.join(allocator, &[_][]const u8{ cache_home, appname });
- }
-
- const home_dir = os.getenv("HOME") orelse {
- return error.AppCacheDirUnavailable;
- };
- return fs.path.join(allocator, &[_][]const u8{ home_dir, ".cache", appname });
- },
- else => @compileError("Unsupported OS"),
- }
-}
diff --git a/src-self-hosted/print_targets.zig b/src-self-hosted/print_targets.zig
index 34eda71ccf..0fe755ffb4 100644
--- a/src-self-hosted/print_targets.zig
+++ b/src-self-hosted/print_targets.zig
@@ -67,7 +67,7 @@ pub fn cmdTargets(
) !void {
const available_glibcs = blk: {
const zig_lib_dir = introspect.resolveZigLibDir(allocator) catch |err| {
- std.debug.warn("unable to find zig installation directory: {}\n", .{@errorName(err)});
+ std.debug.print("unable to find zig installation directory: {}\n", .{@errorName(err)});
std.process.exit(1);
};
defer allocator.free(zig_lib_dir);
diff --git a/src-self-hosted/stage2.zig b/src-self-hosted/stage2.zig
index c73fc97904..3e1e80892c 100644
--- a/src-self-hosted/stage2.zig
+++ b/src-self-hosted/stage2.zig
@@ -394,19 +394,19 @@ fn detectNativeCpuWithLLVM(
return result;
}
-export fn stage2_info(argc: c_int, argv: [*]const [*:0]const u8) c_int {
+export fn stage2_env(argc: c_int, argv: [*]const [*:0]const u8) c_int {
const allocator = std.heap.c_allocator;
var args_list = argvToArrayList(allocator, argc, argv) catch |err| {
- std.debug.warn("unable to parse arguments: {}\n", .{@errorName(err)});
+ std.debug.print("unable to parse arguments: {}\n", .{@errorName(err)});
return -1;
};
defer args_list.deinit();
const args = args_list.span()[2..];
- @import("print_info.zig").cmdInfo(allocator, args, .Stage1, std.io.getStdOut().outStream()) catch |err| {
- std.debug.warn("unable to print info: {}\n", .{@errorName(err)});
+ @import("print_env.zig").cmdEnv(allocator, args, std.io.getStdOut().outStream()) catch |err| {
+ std.debug.print("unable to print info: {}\n", .{@errorName(err)});
return -1;
};
diff --git a/src/config.zig.in b/src/config.zig.in
new file mode 100644
index 0000000000..ccb618df2d
--- /dev/null
+++ b/src/config.zig.in
@@ -0,0 +1,3 @@
+pub const version: []const u8 = "@ZIG_VERSION@";
+pub const log_scopes: []const []const u8 = &[_][]const u8{};
+pub const enable_tracy = false;
diff --git a/src/main.cpp b/src/main.cpp
index 23c5bcd02b..e2f6a82a12 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -38,6 +38,7 @@ static int print_full_usage(const char *arg0, FILE *file, int return_code) {
" builtin show the source code of @import(\"builtin\")\n"
" cc use Zig as a drop-in C compiler\n"
" c++ use Zig as a drop-in C++ compiler\n"
+ " env print lib path, std path, compiler id and version\n"
" fmt parse files and render in canonical zig format\n"
" id print the base64-encoded compiler id\n"
" init-exe initialize a `zig build` application in the cwd\n"
@@ -47,7 +48,6 @@ static int print_full_usage(const char *arg0, FILE *file, int return_code) {
" translate-c [source] convert c code to zig code\n"
" targets list available compilation targets\n"
" test [source] create and run a test build\n"
- " info print lib path, std path, compiler id and version\n"
" version print version number and exit\n"
" zen print zen of zig and exit\n"
"\n"
@@ -583,8 +583,8 @@ static int main0(int argc, char **argv) {
return (term.how == TerminationIdClean) ? term.code : -1;
} else if (argc >= 2 && strcmp(argv[1], "fmt") == 0) {
return stage2_fmt(argc, argv);
- } else if (argc >= 2 && strcmp(argv[1], "info") == 0) {
- return stage2_info(argc, argv);
+ } else if (argc >= 2 && strcmp(argv[1], "env") == 0) {
+ return stage2_env(argc, argv);
} else if (argc >= 2 && (strcmp(argv[1], "cc") == 0 || strcmp(argv[1], "c++") == 0)) {
emit_h = false;
strip = true;
diff --git a/src/stage2.cpp b/src/stage2.cpp
index ef2b2348e3..6c010de84f 100644
--- a/src/stage2.cpp
+++ b/src/stage2.cpp
@@ -27,8 +27,8 @@ void stage2_zen(const char **ptr, size_t *len) {
stage2_panic(msg, strlen(msg));
}
-int stage2_info(int argc, char** argv) {
- const char *msg = "stage0 called stage2_info";
+int stage2_env(int argc, char** argv) {
+ const char *msg = "stage0 called stage2_env";
stage2_panic(msg, strlen(msg));
}
diff --git a/src/stage2.h b/src/stage2.h
index 9ec67ea3f3..38a1f77d46 100644
--- a/src/stage2.h
+++ b/src/stage2.h
@@ -142,7 +142,7 @@ ZIG_EXTERN_C void stage2_render_ast(struct Stage2Ast *ast, FILE *output_file);
ZIG_EXTERN_C void stage2_zen(const char **ptr, size_t *len);
// ABI warning
-ZIG_EXTERN_C int stage2_info(int argc, char **argv);
+ZIG_EXTERN_C int stage2_env(int argc, char **argv);
// ABI warning
ZIG_EXTERN_C void stage2_attach_segfault_handler(void);
From 18ac9987670b01cd254dc05031e1758dacbc7265 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 17 Aug 2020 17:52:13 -0700
Subject: [PATCH 136/153] zig env: add global_cache_dir field
---
src-self-hosted/introspect.zig | 16 +++++++++++++---
src-self-hosted/print_env.zig | 6 ++++++
2 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/src-self-hosted/introspect.zig b/src-self-hosted/introspect.zig
index 0d4c14a9ae..46405a2b70 100644
--- a/src-self-hosted/introspect.zig
+++ b/src-self-hosted/introspect.zig
@@ -73,7 +73,17 @@ pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 {
};
}
-/// Caller must free result
-pub fn resolveZigCacheDir(allocator: *mem.Allocator) ![]u8 {
- return std.mem.dupe(allocator, u8, "zig-cache");
+/// Caller owns returned memory.
+pub fn resolveGlobalCacheDir(allocator: *mem.Allocator) ![]u8 {
+ const appname = "zig";
+
+ if (std.Target.current.os.tag != .windows) {
+ if (std.os.getenv("XDG_CACHE_HOME")) |cache_root| {
+ return fs.path.join(allocator, &[_][]const u8{ cache_root, appname });
+ } else if (std.os.getenv("HOME")) |home| {
+ return fs.path.join(allocator, &[_][]const u8{ home, ".cache", appname });
+ }
+ }
+
+ return fs.getAppDataDir(allocator, appname);
}
diff --git a/src-self-hosted/print_env.zig b/src-self-hosted/print_env.zig
index 444462789c..907e9e234d 100644
--- a/src-self-hosted/print_env.zig
+++ b/src-self-hosted/print_env.zig
@@ -13,6 +13,9 @@ pub fn cmdEnv(gpa: *Allocator, args: []const []const u8, stdout: anytype) !void
const zig_std_dir = try std.fs.path.join(gpa, &[_][]const u8{ zig_lib_dir, "std" });
defer gpa.free(zig_std_dir);
+ const global_cache_dir = try introspect.resolveGlobalCacheDir(gpa);
+ defer gpa.free(global_cache_dir);
+
var bos = std.io.bufferedOutStream(stdout);
const bos_stream = bos.outStream();
@@ -25,6 +28,9 @@ pub fn cmdEnv(gpa: *Allocator, args: []const []const u8, stdout: anytype) !void
try jws.objectField("std_dir");
try jws.emitString(zig_std_dir);
+ try jws.objectField("global_cache_dir");
+ try jws.emitString(global_cache_dir);
+
try jws.objectField("version");
try jws.emitString(build_options.version);
From a916f63940ed0cf039ab5f70fbee022f604995ea Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 17 Aug 2020 18:49:33 -0700
Subject: [PATCH 137/153] std.cache_hash: fix bug parsing inode
This resulted in false negatives cache misses.
---
lib/std/cache_hash.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/cache_hash.zig b/lib/std/cache_hash.zig
index acaa5edc8d..e89c165d23 100644
--- a/lib/std/cache_hash.zig
+++ b/lib/std/cache_hash.zig
@@ -193,7 +193,7 @@ pub const CacheHash = struct {
const digest_str = iter.next() orelse return error.InvalidFormat;
const file_path = iter.rest();
- cache_hash_file.stat.inode = fmt.parseInt(fs.File.INode, mtime_nsec_str, 10) catch return error.InvalidFormat;
+ cache_hash_file.stat.inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat;
cache_hash_file.stat.mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat;
base64_decoder.decode(&cache_hash_file.bin_digest, digest_str) catch return error.InvalidFormat;
From 4462c60639807502eca8a2db2bcf6adafea496ef Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 17 Aug 2020 18:56:27 -0700
Subject: [PATCH 138/153] stage2: implement compiler id hash and add it to `zig
env`
---
src-self-hosted/introspect.zig | 52 ++++++++++++++++++++++++++++++++--
src-self-hosted/print_env.zig | 7 +++++
2 files changed, 56 insertions(+), 3 deletions(-)
diff --git a/src-self-hosted/introspect.zig b/src-self-hosted/introspect.zig
index 46405a2b70..8e99c93c60 100644
--- a/src-self-hosted/introspect.zig
+++ b/src-self-hosted/introspect.zig
@@ -3,8 +3,7 @@
const std = @import("std");
const mem = std.mem;
const fs = std.fs;
-
-const warn = std.debug.warn;
+const CacheHash = std.cache_hash.CacheHash;
/// Caller must free result
pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![]u8 {
@@ -63,7 +62,7 @@ pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 {
return findZigLibDir(allocator) catch |err| {
- warn(
+ std.debug.print(
\\Unable to find zig lib directory: {}.
\\Reinstall Zig or use --zig-install-prefix.
\\
@@ -87,3 +86,50 @@ pub fn resolveGlobalCacheDir(allocator: *mem.Allocator) ![]u8 {
return fs.getAppDataDir(allocator, appname);
}
+
+var compiler_id_mutex = std.Mutex{};
+var compiler_id: [16]u8 = undefined;
+var compiler_id_computed = false;
+
+pub fn resolveCompilerId(gpa: *mem.Allocator) ![16]u8 {
+ const held = compiler_id_mutex.acquire();
+ defer held.release();
+
+ if (compiler_id_computed)
+ return compiler_id;
+ compiler_id_computed = true;
+
+ const global_cache_dir = try resolveGlobalCacheDir(gpa);
+ defer gpa.free(global_cache_dir);
+
+ // TODO Introduce openGlobalCacheDir which returns a dir handle rather than a string.
+ var cache_dir = try fs.cwd().openDir(global_cache_dir, .{});
+ defer cache_dir.close();
+
+ var ch = try CacheHash.init(gpa, cache_dir, "exe");
+ defer ch.release();
+
+ const self_exe_path = try fs.selfExePathAlloc(gpa);
+ defer gpa.free(self_exe_path);
+
+ _ = try ch.addFile(self_exe_path, null);
+
+ if (try ch.hit()) |digest| {
+ compiler_id = digest[0..16].*;
+ return compiler_id;
+ }
+
+ const libs = try std.process.getSelfExeSharedLibPaths(gpa);
+ defer {
+ for (libs) |lib| gpa.free(lib);
+ gpa.free(libs);
+ }
+
+ for (libs) |lib| {
+ try ch.addFilePost(lib);
+ }
+
+ const digest = ch.final();
+ compiler_id = digest[0..16].*;
+ return compiler_id;
+}
diff --git a/src-self-hosted/print_env.zig b/src-self-hosted/print_env.zig
index 907e9e234d..9b68633d3e 100644
--- a/src-self-hosted/print_env.zig
+++ b/src-self-hosted/print_env.zig
@@ -16,6 +16,10 @@ pub fn cmdEnv(gpa: *Allocator, args: []const []const u8, stdout: anytype) !void
const global_cache_dir = try introspect.resolveGlobalCacheDir(gpa);
defer gpa.free(global_cache_dir);
+ const compiler_id_digest = try introspect.resolveCompilerId(gpa);
+ var compiler_id_buf: [compiler_id_digest.len * 2]u8 = undefined;
+ const compiler_id = std.fmt.bufPrint(&compiler_id_buf, "{x}", .{compiler_id_digest}) catch unreachable;
+
var bos = std.io.bufferedOutStream(stdout);
const bos_stream = bos.outStream();
@@ -28,6 +32,9 @@ pub fn cmdEnv(gpa: *Allocator, args: []const []const u8, stdout: anytype) !void
try jws.objectField("std_dir");
try jws.emitString(zig_std_dir);
+ try jws.objectField("id");
+ try jws.emitString(compiler_id);
+
try jws.objectField("global_cache_dir");
try jws.emitString(global_cache_dir);
From d605af511a9af5d987a9e2276c2ed9a1b4e951c7 Mon Sep 17 00:00:00 2001
From: Matthew Knight
Date: Mon, 17 Aug 2020 19:17:04 -0700
Subject: [PATCH 139/153] added bpf() syscall and some supporting structs
(#6061)
* added bpf syscall and some supporting structs
* moved bpf to bits and added flags
---
lib/std/os/bits/linux.zig | 1 +
lib/std/os/bits/linux/bpf.zig | 606 ++++++++++++++++++++++++++++++++++
2 files changed, 607 insertions(+)
create mode 100644 lib/std/os/bits/linux/bpf.zig
diff --git a/lib/std/os/bits/linux.zig b/lib/std/os/bits/linux.zig
index 41d681554e..4ad1fc7de4 100644
--- a/lib/std/os/bits/linux.zig
+++ b/lib/std/os/bits/linux.zig
@@ -19,6 +19,7 @@ pub usingnamespace switch (builtin.arch) {
};
pub usingnamespace @import("linux/netlink.zig");
+pub const bpf = @import("linux/bpf.zig");
const is_mips = builtin.arch.isMIPS();
diff --git a/lib/std/os/bits/linux/bpf.zig b/lib/std/os/bits/linux/bpf.zig
new file mode 100644
index 0000000000..150d3e9135
--- /dev/null
+++ b/lib/std/os/bits/linux/bpf.zig
@@ -0,0 +1,606 @@
+usingnamespace std.os;
+const std = @import("../../../std.zig");
+
+// instruction classes
+/// jmp mode in word width
+pub const JMP32 = 0x06;
+/// alu mode in double word width
+pub const ALU64 = 0x07;
+
+// ld/ldx fields
+/// double word (64-bit)
+pub const DW = 0x18;
+/// exclusive add
+pub const XADD = 0xc0;
+
+// alu/jmp fields
+/// mov reg to reg
+pub const MOV = 0xb0;
+/// sign extending arithmetic shift right */
+pub const ARSH = 0xc0;
+
+// change endianness of a register
+/// flags for endianness conversion:
+pub const END = 0xd0;
+/// convert to little-endian */
+pub const TO_LE = 0x00;
+/// convert to big-endian
+pub const TO_BE = 0x08;
+pub const FROM_LE = TO_LE;
+pub const FROM_BE = TO_BE;
+
+// jmp encodings
+/// jump != *
+pub const JNE = 0x50;
+/// LT is unsigned, '<'
+pub const JLT = 0xa0;
+/// LE is unsigned, '<=' *
+pub const JLE = 0xb0;
+/// SGT is signed '>', GT in x86
+pub const JSGT = 0x60;
+/// SGE is signed '>=', GE in x86
+pub const JSGE = 0x70;
+/// SLT is signed, '<'
+pub const JSLT = 0xc0;
+/// SLE is signed, '<='
+pub const JSLE = 0xd0;
+/// function call
+pub const CALL = 0x80;
+/// function return
+pub const EXIT = 0x90;
+
+/// Flag for prog_attach command. If a sub-cgroup installs some bpf program, the
+/// program in this cgroup yields to sub-cgroup program.
+pub const F_ALLOW_OVERRIDE = 0x1;
+/// Flag for prog_attach command. If a sub-cgroup installs some bpf program,
+/// that cgroup program gets run in addition to the program in this cgroup.
+pub const F_ALLOW_MULTI = 0x2;
+/// Flag for prog_attach command.
+pub const F_REPLACE = 0x4;
+
+/// If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the verifier
+/// will perform strict alignment checking as if the kernel has been built with
+/// CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, and NET_IP_ALIGN defined to 2.
+pub const F_STRICT_ALIGNMENT = 0x1;
+
+/// If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the verifier will
+/// allow any alignment whatsoever. On platforms with strict alignment
+/// requirements for loads ands stores (such as sparc and mips) the verifier
+/// validates that all loads and stores provably follow this requirement. This
+/// flag turns that checking and enforcement off.
+///
+/// It is mostly used for testing when we want to validate the context and
+/// memory access aspects of the verifier, but because of an unaligned access
+/// the alignment check would trigger before the one we are interested in.
+pub const F_ANY_ALIGNMENT = 0x2;
+
+/// BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose.
+/// Verifier does sub-register def/use analysis and identifies instructions
+/// whose def only matters for low 32-bit, high 32-bit is never referenced later
+/// through implicit zero extension. Therefore verifier notifies JIT back-ends
+/// that it is safe to ignore clearing high 32-bit for these instructions. This
+/// saves some back-ends a lot of code-gen. However such optimization is not
+/// necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends
+/// hence hasn't used verifier's analysis result. But, we really want to have a
+/// way to be able to verify the correctness of the described optimization on
+/// x86_64 on which testsuites are frequently exercised.
+///
+/// So, this flag is introduced. Once it is set, verifier will randomize high
+/// 32-bit for those instructions who has been identified as safe to ignore
+/// them. Then, if verifier is not doing correct analysis, such randomization
+/// will regress tests to expose bugs.
+pub const F_TEST_RND_HI32 = 0x4;
+
+/// When BPF ldimm64's insn[0].src_reg != 0 then this can have two extensions:
+/// insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE
+/// insn[0].imm: map fd map fd
+/// insn[1].imm: 0 offset into value
+/// insn[0].off: 0 0
+/// insn[1].off: 0 0
+/// ldimm64 rewrite: address of map address of map[0]+offset
+/// verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE
+pub const PSEUDO_MAP_FD = 1;
+pub const PSEUDO_MAP_VALUE = 2;
+
+/// when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
+/// offset to another bpf function
+pub const PSEUDO_CALL = 1;
+
+/// flag for BPF_MAP_UPDATE_ELEM command. create new element or update existing
+pub const ANY = 0;
+/// flag for BPF_MAP_UPDATE_ELEM command. create new element if it didn't exist
+pub const NOEXIST = 1;
+/// flag for BPF_MAP_UPDATE_ELEM command. update existing element
+pub const EXIST = 2;
+/// flag for BPF_MAP_UPDATE_ELEM command. spin_lock-ed map_lookup/map_update
+pub const F_LOCK = 4;
+
+/// flag for BPF_MAP_CREATE command */
+pub const BPF_F_NO_PREALLOC = 0x1;
+/// flag for BPF_MAP_CREATE command. Instead of having one common LRU list in
+/// the BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list which can
+/// scale and perform better. Note, the LRU nodes (including free nodes) cannot
+/// be moved across different LRU lists.
+pub const BPF_F_NO_COMMON_LRU = 0x2;
+/// flag for BPF_MAP_CREATE command. Specify numa node during map creation
+pub const BPF_F_NUMA_NODE = 0x4;
+/// flag for BPF_MAP_CREATE command. Flags for BPF object read access from
+/// syscall side
+pub const BPF_F_RDONLY = 0x8;
+/// flag for BPF_MAP_CREATE command. Flags for BPF object write access from
+/// syscall side
+pub const BPF_F_WRONLY = 0x10;
+/// flag for BPF_MAP_CREATE command. Flag for stack_map, store build_id+offset
+/// instead of pointer
+pub const BPF_F_STACK_BUILD_ID = 0x20;
+/// flag for BPF_MAP_CREATE command. Zero-initialize hash function seed. This
+/// should only be used for testing.
+pub const BPF_F_ZERO_SEED = 0x40;
+/// flag for BPF_MAP_CREATE command Flags for accessing BPF object from program
+/// side.
+pub const BPF_F_RDONLY_PROG = 0x80;
+/// flag for BPF_MAP_CREATE command. Flags for accessing BPF object from program
+/// side.
+pub const BPF_F_WRONLY_PROG = 0x100;
+/// flag for BPF_MAP_CREATE command. Clone map from listener for newly accepted
+/// socket
+pub const BPF_F_CLONE = 0x200;
+/// flag for BPF_MAP_CREATE command. Enable memory-mapping BPF map
+pub const BPF_F_MMAPABLE = 0x400;
+
+/// a single BPF instruction
+pub const Insn = packed struct {
+ code: u8,
+ dst: u4,
+ src: u4,
+ off: i16,
+ imm: i32,
+
+ /// r0 - r9 are general purpose 64-bit registers, r10 points to the stack
+ /// frame
+ pub const Reg = enum(u4) {
+ r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10
+ };
+
+ const alu = 0x04;
+ const jmp = 0x05;
+ const mov = 0xb0;
+ const k = 0;
+ const exit_code = 0x90;
+
+ // TODO: implement more factory functions for the other instructions
+ /// load immediate value into a register
+ pub fn load_imm(dst: Reg, imm: i32) Insn {
+ return Insn{
+ .code = alu | mov | k,
+ .dst = @enumToInt(dst),
+ .src = 0,
+ .off = 0,
+ .imm = imm,
+ };
+ }
+
+ /// exit BPF program
+ pub fn exit() Insn {
+ return Insn{
+ .code = jmp | exit_code,
+ .dst = 0,
+ .src = 0,
+ .off = 0,
+ .imm = 0,
+ };
+ }
+};
+
+pub const Cmd = extern enum(usize) {
+ map_create,
+ map_lookup_elem,
+ map_update_elem,
+ map_delete_elem,
+ map_get_next_key,
+ prog_load,
+ obj_pin,
+ obj_get,
+ prog_attach,
+ prog_detach,
+ prog_test_run,
+ prog_get_next_id,
+ map_get_next_id,
+ prog_get_fd_by_id,
+ map_get_fd_by_id,
+ obj_get_info_by_fd,
+ prog_query,
+ raw_tracepoint_open,
+ btf_load,
+ btf_get_fd_by_id,
+ task_fd_query,
+ map_lookup_and_delete_elem,
+ map_freeze,
+ btf_get_next_id,
+ map_lookup_batch,
+ map_lookup_and_delete_batch,
+ map_update_batch,
+ map_delete_batch,
+ link_create,
+ link_update,
+ link_get_fd_by_id,
+ link_get_next_id,
+ enable_stats,
+ iter_create,
+ link_detach,
+ _,
+};
+
+pub const MapType = extern enum(u32) {
+ unspec,
+ hash,
+ array,
+ prog_array,
+ perf_event_array,
+ percpu_hash,
+ percpu_array,
+ stack_trace,
+ cgroup_array,
+ lru_hash,
+ lru_percpu_hash,
+ lpm_trie,
+ array_of_maps,
+ hash_of_maps,
+ devmap,
+ sockmap,
+ cpumap,
+ xskmap,
+ sockhash,
+ cgroup_storage,
+ reuseport_sockarray,
+ percpu_cgroup_storage,
+ queue,
+ stack,
+ sk_storage,
+ devmap_hash,
+ struct_ops,
+ ringbuf,
+ _,
+};
+
+pub const ProgType = extern enum(u32) {
+ unspec,
+ socket_filter,
+ kprobe,
+ sched_cls,
+ sched_act,
+ tracepoint,
+ xdp,
+ perf_event,
+ cgroup_skb,
+ cgroup_sock,
+ lwt_in,
+ lwt_out,
+ lwt_xmit,
+ sock_ops,
+ sk_skb,
+ cgroup_device,
+ sk_msg,
+ raw_tracepoint,
+ cgroup_sock_addr,
+ lwt_seg6local,
+ lirc_mode2,
+ sk_reuseport,
+ flow_dissector,
+ cgroup_sysctl,
+ raw_tracepoint_writable,
+ cgroup_sockopt,
+ tracing,
+ struct_ops,
+ ext,
+ lsm,
+ sk_lookup,
+};
+
+pub const AttachType = extern enum(u32) {
+ cgroup_inet_ingress,
+ cgroup_inet_egress,
+ cgroup_inet_sock_create,
+ cgroup_sock_ops,
+ sk_skb_stream_parser,
+ sk_skb_stream_verdict,
+ cgroup_device,
+ sk_msg_verdict,
+ cgroup_inet4_bind,
+ cgroup_inet6_bind,
+ cgroup_inet4_connect,
+ cgroup_inet6_connect,
+ cgroup_inet4_post_bind,
+ cgroup_inet6_post_bind,
+ cgroup_udp4_sendmsg,
+ cgroup_udp6_sendmsg,
+ lirc_mode2,
+ flow_dissector,
+ cgroup_sysctl,
+ cgroup_udp4_recvmsg,
+ cgroup_udp6_recvmsg,
+ cgroup_getsockopt,
+ cgroup_setsockopt,
+ trace_raw_tp,
+ trace_fentry,
+ trace_fexit,
+ modify_return,
+ lsm_mac,
+ trace_iter,
+ cgroup_inet4_getpeername,
+ cgroup_inet6_getpeername,
+ cgroup_inet4_getsockname,
+ cgroup_inet6_getsockname,
+ xdp_devmap,
+ cgroup_inet_sock_release,
+ xdp_cpumap,
+ sk_lookup,
+ xdp,
+ _,
+};
+
+const obj_name_len = 16;
+/// struct used by Cmd.map_create command
+pub const MapCreateAttr = extern struct {
+ /// one of MapType
+ map_type: u32,
+ /// size of key in bytes
+ key_size: u32,
+ /// size of value in bytes
+ value_size: u32,
+ /// max number of entries in a map
+ max_entries: u32,
+ /// .map_create related flags
+ map_flags: u32,
+ /// fd pointing to the inner map
+ inner_map_fd: fd_t,
+ /// numa node (effective only if MapCreateFlags.numa_node is set)
+ numa_node: u32,
+ map_name: [obj_name_len]u8,
+ /// ifindex of netdev to create on
+ map_ifindex: u32,
+ /// fd pointing to a BTF type data
+ btf_fd: fd_t,
+ /// BTF type_id of the key
+ btf_key_type_id: u32,
+ /// BTF type_id of the value
+ bpf_value_type_id: u32,
+ /// BTF type_id of a kernel struct stored as the map value
+ btf_vmlinux_value_type_id: u32,
+};
+
+/// struct used by Cmd.map_*_elem commands
+pub const MapElemAttr = extern struct {
+ map_fd: fd_t,
+ key: u64,
+ result: extern union {
+ value: u64,
+ next_key: u64,
+ },
+ flags: u64,
+};
+
+/// struct used by Cmd.map_*_batch commands
+pub const MapBatchAttr = extern struct {
+ /// start batch, NULL to start from beginning
+ in_batch: u64,
+ /// output: next start batch
+ out_batch: u64,
+ keys: u64,
+ values: u64,
+ /// input/output:
+ /// input: # of key/value elements
+ /// output: # of filled elements
+ count: u32,
+ map_fd: fd_t,
+ elem_flags: u64,
+ flags: u64,
+};
+
+/// struct used by Cmd.prog_load command
+pub const ProgLoadAttr = extern struct {
+ /// one of ProgType
+ prog_type: u32,
+ insn_cnt: u32,
+ insns: u64,
+ license: u64,
+ /// verbosity level of verifier
+ log_level: u32,
+ /// size of user buffer
+ log_size: u32,
+ /// user supplied buffer
+ log_buf: u64,
+ /// not used
+ kern_version: u32,
+ prog_flags: u32,
+ prog_name: [obj_name_len]u8,
+ /// ifindex of netdev to prep for. For some prog types expected attach
+ /// type must be known at load time to verify attach type specific parts
+ /// of prog (context accesses, allowed helpers, etc).
+ prog_ifindex: u32,
+ expected_attach_type: u32,
+ /// fd pointing to BTF type data
+ prog_btf_fd: fd_t,
+ /// userspace bpf_func_info size
+ func_info_rec_size: u32,
+ func_info: u64,
+ /// number of bpf_func_info records
+ func_info_cnt: u32,
+ /// userspace bpf_line_info size
+ line_info_rec_size: u32,
+ line_info: u64,
+ /// number of bpf_line_info records
+ line_info_cnt: u32,
+ /// in-kernel BTF type id to attach to
+ attact_btf_id: u32,
+ /// 0 to attach to vmlinux
+ attach_prog_id: u32,
+};
+
+/// struct used by Cmd.obj_* commands
+pub const ObjAttr = extern struct {
+ pathname: u64,
+ bpf_fd: fd_t,
+ file_flags: u32,
+};
+
+/// struct used by Cmd.prog_attach/detach commands
+pub const ProgAttachAttr = extern struct {
+ /// container object to attach to
+ target_fd: fd_t,
+ /// eBPF program to attach
+ attach_bpf_fd: fd_t,
+ attach_type: u32,
+ attach_flags: u32,
+ // TODO: BPF_F_REPLACE flags
+ /// previously attached eBPF program to replace if .replace is used
+ replace_bpf_fd: fd_t,
+};
+
+/// struct used by Cmd.prog_test_run command
+pub const TestAttr = extern struct {
+ prog_fd: fd_t,
+ retval: u32,
+ /// input: len of data_in
+ data_size_in: u32,
+ /// input/output: len of data_out. returns ENOSPC if data_out is too small.
+ data_size_out: u32,
+ data_in: u64,
+ data_out: u64,
+ repeat: u32,
+ duration: u32,
+ /// input: len of ctx_in
+ ctx_size_in: u32,
+ /// input/output: len of ctx_out. returns ENOSPC if ctx_out is too small.
+ ctx_size_out: u32,
+ ctx_in: u64,
+ ctx_out: u64,
+};
+
+/// struct used by Cmd.*_get_*_id commands
+pub const GetIdAttr = extern struct {
+ id: extern union {
+ start_id: u32,
+ prog_id: u32,
+ map_id: u32,
+ btf_id: u32,
+ link_id: u32,
+ },
+ next_id: u32,
+ open_flags: u32,
+};
+
+/// struct used by Cmd.obj_get_info_by_fd command
+pub const InfoAttr = extern struct {
+ bpf_fd: fd_t,
+ info_len: u32,
+ info: u64,
+};
+
+/// struct used by Cmd.prog_query command
+pub const QueryAttr = extern struct {
+ /// container object to query
+ target_fd: fd_t,
+ attach_type: u32,
+ query_flags: u32,
+ attach_flags: u32,
+ prog_ids: u64,
+ prog_cnt: u32,
+};
+
+/// struct used by Cmd.raw_tracepoint_open command
+pub const RawTracepointAttr = extern struct {
+ name: u64,
+ prog_fd: fd_t,
+};
+
+/// struct used by Cmd.btf_load command
+pub const BtfLoadAttr = extern struct {
+ btf: u64,
+ btf_log_buf: u64,
+ btf_size: u32,
+ btf_log_size: u32,
+ btf_log_level: u32,
+};
+
+pub const TaskFdQueryAttr = extern struct {
+ /// input: pid
+ pid: pid_t,
+ /// input: fd
+ fd: fd_t,
+ /// input: flags
+ flags: u32,
+ /// input/output: buf len
+ buf_len: u32,
+ /// input/output:
+ /// tp_name for tracepoint
+ /// symbol for kprobe
+ /// filename for uprobe
+ buf: u64,
+ /// output: prod_id
+ prog_id: u32,
+ /// output: BPF_FD_TYPE
+ fd_type: u32,
+ /// output: probe_offset
+ probe_offset: u64,
+ /// output: probe_addr
+ probe_addr: u64,
+};
+
+/// struct used by Cmd.link_create command
+pub const LinkCreateAttr = extern struct {
+ /// eBPF program to attach
+ prog_fd: fd_t,
+ /// object to attach to
+ target_fd: fd_t,
+ attach_type: u32,
+ /// extra flags
+ flags: u32,
+};
+
+/// struct used by Cmd.link_update command
+pub const LinkUpdateAttr = extern struct {
+ link_fd: fd_t,
+ /// new program to update link with
+ new_prog_fd: fd_t,
+ /// extra flags
+ flags: u32,
+ /// expected link's program fd, it is specified only if BPF_F_REPLACE is
+ /// set in flags
+ old_prog_fd: fd_t,
+};
+
+/// struct used by Cmd.enable_stats command
+pub const EnableStatsAttr = extern struct {
+ type: u32,
+};
+
+/// struct used by Cmd.iter_create command
+pub const IterCreateAttr = extern struct {
+ link_fd: fd_t,
+ flags: u32,
+};
+
+pub const Attr = extern union {
+ map_create: MapCreateAttr,
+ map_elem: MapElemAttr,
+ map_batch: MapBatchAttr,
+ prog_load: ProgLoadAttr,
+ obj: ObjAttr,
+ prog_attach: ProgAttachAttr,
+ test_run: TestRunAttr,
+ get_id: GetIdAttr,
+ info: InfoAttr,
+ query: QueryAttr,
+ raw_tracepoint: RawTracepointAttr,
+ btf_load: BtfLoadAttr,
+ task_fd_query: TaskFdQueryAttr,
+ link_create: LinkCreateAttr,
+ link_update: LinkUpdateAttr,
+ enable_stats: EnableStatsAttr,
+ iter_create: IterCreateAttr,
+};
+
+pub fn bpf(cmd: Cmd, attr: *Attr, size: u32) usize {
+ return syscall3(.bpf, @enumToInt(cmd), @ptrToInt(attr), size);
+}
From 27cb23cbc5fe35d0eae8494006ba93111bd2bde6 Mon Sep 17 00:00:00 2001
From: Ashish Shekar
Date: Tue, 18 Aug 2020 07:48:29 +0530
Subject: [PATCH 140/153] =?UTF-8?q?Handle=20singular=20param=20count=20wor?=
=?UTF-8?q?d=C2=A0in=20error=20messages=20(#6073)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src-self-hosted/zir_sema.zig | 6 +++---
src/ir.cpp | 12 +++++++-----
test/compile_errors.zig | 8 ++++----
3 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/src-self-hosted/zir_sema.zig b/src-self-hosted/zir_sema.zig
index 31ffb2cc0d..94c3a19677 100644
--- a/src-self-hosted/zir_sema.zig
+++ b/src-self-hosted/zir_sema.zig
@@ -393,7 +393,7 @@ fn analyzeInstParamType(mod: *Module, scope: *Scope, inst: *zir.Inst.ParamType)
// TODO support C-style var args
const param_count = fn_ty.fnParamLen();
if (arg_index >= param_count) {
- return mod.fail(scope, inst.base.src, "arg index {} out of bounds; '{}' has {} arguments", .{
+ return mod.fail(scope, inst.base.src, "arg index {} out of bounds; '{}' has {} argument(s)", .{
arg_index,
fn_ty,
param_count,
@@ -600,7 +600,7 @@ fn analyzeInstCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError
return mod.fail(
scope,
inst.positionals.func.src,
- "expected at least {} arguments, found {}",
+ "expected at least {} argument(s), found {}",
.{ fn_params_len, call_params_len },
);
}
@@ -610,7 +610,7 @@ fn analyzeInstCall(mod: *Module, scope: *Scope, inst: *zir.Inst.Call) InnerError
return mod.fail(
scope,
inst.positionals.func.src,
- "expected {} arguments, found {}",
+ "expected {} argument(s), found {}",
.{ fn_params_len, call_params_len },
);
}
diff --git a/src/ir.cpp b/src/ir.cpp
index 40be4e147b..78d451a4eb 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -6349,9 +6349,9 @@ static IrInstSrc *ir_gen_builtin_fn_call(IrBuilderSrc *irb, Scope *scope, AstNod
BuiltinFnEntry *builtin_fn = entry->value;
size_t actual_param_count = node->data.fn_call_expr.params.length;
- if (builtin_fn->param_count != SIZE_MAX && builtin_fn->param_count != actual_param_count) {
+ if (builtin_fn->param_count != SIZE_MAX && builtin_fn->param_count != actual_param_count) {
add_node_error(irb->codegen, node,
- buf_sprintf("expected %" ZIG_PRI_usize " arguments, found %" ZIG_PRI_usize,
+ buf_sprintf("expected %" ZIG_PRI_usize " argument(s), found %" ZIG_PRI_usize,
builtin_fn->param_count, actual_param_count));
return irb->codegen->invalid_inst_src;
}
@@ -20186,7 +20186,8 @@ static IrInstGen *ir_analyze_fn_call(IrAnalyze *ira, IrInst* source_instr,
if (fn_type_id->is_var_args) {
if (call_param_count < src_param_count) {
ErrorMsg *msg = ir_add_error_node(ira, source_node,
- buf_sprintf("expected at least %" ZIG_PRI_usize " arguments, found %" ZIG_PRI_usize "", src_param_count, call_param_count));
+ buf_sprintf("expected at least %" ZIG_PRI_usize " argument(s), found %" ZIG_PRI_usize "",
+ src_param_count, call_param_count));
if (fn_proto_node) {
add_error_note(ira->codegen, msg, fn_proto_node,
buf_sprintf("declared here"));
@@ -20195,7 +20196,8 @@ static IrInstGen *ir_analyze_fn_call(IrAnalyze *ira, IrInst* source_instr,
}
} else if (src_param_count != call_param_count) {
ErrorMsg *msg = ir_add_error_node(ira, source_node,
- buf_sprintf("expected %" ZIG_PRI_usize " arguments, found %" ZIG_PRI_usize "", src_param_count, call_param_count));
+ buf_sprintf("expected %" ZIG_PRI_usize " argument(s), found %" ZIG_PRI_usize "",
+ src_param_count, call_param_count));
if (fn_proto_node) {
add_error_note(ira->codegen, msg, fn_proto_node,
buf_sprintf("declared here"));
@@ -30127,7 +30129,7 @@ static IrInstGen *ir_analyze_instruction_arg_type(IrAnalyze *ira, IrInstSrcArgTy
return ir_const_type(ira, &instruction->base.base, ira->codegen->builtin_types.entry_anytype);
}
ir_add_error(ira, &arg_index_inst->base,
- buf_sprintf("arg index %" ZIG_PRI_u64 " out of bounds; '%s' has %" ZIG_PRI_usize " arguments",
+ buf_sprintf("arg index %" ZIG_PRI_u64 " out of bounds; '%s' has %" ZIG_PRI_usize " argument(s)",
arg_index, buf_ptr(&fn_type->name), fn_type_id->param_count));
return ira->codegen->invalid_inst_gen;
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 70a9c47998..48198199df 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -705,7 +705,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ for (arr) |bits| _ = @popCount(bits);
\\}
, &[_][]const u8{
- "tmp.zig:3:26: error: expected 2 arguments, found 1",
+ "tmp.zig:3:26: error: expected 2 argument(s), found 1",
});
cases.addTest("@call rejects non comptime-known fn - always_inline",
@@ -4103,7 +4103,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\}
\\fn b(a: i32, b: i32, c: i32) void { }
, &[_][]const u8{
- "tmp.zig:2:6: error: expected 3 arguments, found 1",
+ "tmp.zig:2:6: error: expected 3 argument(s), found 1",
});
cases.add("invalid type",
@@ -4716,7 +4716,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\
\\export fn entry() usize { return @sizeOf(@TypeOf(f)); }
, &[_][]const u8{
- "tmp.zig:20:34: error: expected 1 arguments, found 0",
+ "tmp.zig:20:34: error: expected 1 argument(s), found 0",
});
cases.add("missing function name",
@@ -5498,7 +5498,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\}
\\export fn entry() usize { return @sizeOf(@TypeOf(f)); }
, &[_][]const u8{
- "tmp.zig:6:15: error: expected 2 arguments, found 3",
+ "tmp.zig:6:15: error: expected 2 argument(s), found 3",
});
cases.add("assign through constant pointer",
From ce8b9c0c5cdbe4161952e6f2aa875f722949d4cb Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 17 Aug 2020 21:24:38 -0700
Subject: [PATCH 141/153] std.cache_hash: don't trust mtime granularity to be
better than 1ms
I empirically observed mtime not changing when rapidly writing the same
file name within the same millisecond of wall clock time, despite the
mtime field having nanosecond precision.
I believe this fixes the CI test failures.
---
lib/std/cache_hash.zig | 32 ++++++++++++++++++++++----------
1 file changed, 22 insertions(+), 10 deletions(-)
diff --git a/lib/std/cache_hash.zig b/lib/std/cache_hash.zig
index e89c165d23..bc684faa61 100644
--- a/lib/std/cache_hash.zig
+++ b/lib/std/cache_hash.zig
@@ -466,7 +466,16 @@ fn isProblematicTimestamp(fs_clock: i128) bool {
} else {
wall_nsec &= @as(i64, -1) << @intCast(u6, @ctz(i64, fs_nsec));
}
- return wall_nsec == fs_nsec and wall_sec == fs_sec;
+ if (wall_nsec == fs_nsec and wall_sec == fs_sec)
+ return true;
+
+ // I have also observed precision problems at a millisecond granularity.
+ const fs_msec = @intCast(i64, @divFloor(fs_clock, std.time.ns_per_ms * 2));
+ const wall_msec = @intCast(i64, @divFloor(wall_clock, std.time.ns_per_ms * 2));
+ if (fs_msec == wall_msec)
+ return true;
+
+ return false;
}
test "cache file and then recall it" {
@@ -479,9 +488,10 @@ test "cache file and then recall it" {
const temp_file = "test.txt";
const temp_manifest_dir = "temp_manifest_dir";
+ const ts = std.time.nanoTimestamp();
try cwd.writeFile(temp_file, "Hello, world!\n");
- while (isProblematicTimestamp(std.time.nanoTimestamp())) {
+ while (isProblematicTimestamp(ts)) {
std.time.sleep(1);
}
@@ -545,9 +555,13 @@ test "check that changing a file makes cache fail" {
const original_temp_file_contents = "Hello, world!\n";
const updated_temp_file_contents = "Hello, world; but updated!\n";
+ try cwd.deleteTree(temp_manifest_dir);
+ try cwd.deleteTree(temp_file);
+
+ const ts = std.time.nanoTimestamp();
try cwd.writeFile(temp_file, original_temp_file_contents);
- while (isProblematicTimestamp(std.time.nanoTimestamp())) {
+ while (isProblematicTimestamp(ts)) {
std.time.sleep(1);
}
@@ -571,10 +585,6 @@ test "check that changing a file makes cache fail" {
try cwd.writeFile(temp_file, updated_temp_file_contents);
- while (isProblematicTimestamp(std.time.nanoTimestamp())) {
- std.time.sleep(1);
- }
-
{
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
@@ -594,7 +604,7 @@ test "check that changing a file makes cache fail" {
testing.expect(!mem.eql(u8, digest1[0..], digest2[0..]));
try cwd.deleteTree(temp_manifest_dir);
- try cwd.deleteFile(temp_file);
+ try cwd.deleteTree(temp_file);
}
test "no file inputs" {
@@ -643,10 +653,11 @@ test "CacheHashes with files added after initial hash work" {
const temp_file2 = "cache_hash_post_file_test2.txt";
const temp_manifest_dir = "cache_hash_post_file_manifest_dir";
+ const ts1 = std.time.nanoTimestamp();
try cwd.writeFile(temp_file1, "Hello, world!\n");
try cwd.writeFile(temp_file2, "Hello world the second!\n");
- while (isProblematicTimestamp(std.time.nanoTimestamp())) {
+ while (isProblematicTimestamp(ts1)) {
std.time.sleep(1);
}
@@ -680,9 +691,10 @@ test "CacheHashes with files added after initial hash work" {
testing.expect(mem.eql(u8, &digest1, &digest2));
// Modify the file added after initial hash
+ const ts2 = std.time.nanoTimestamp();
try cwd.writeFile(temp_file2, "Hello world the second, updated\n");
- while (isProblematicTimestamp(std.time.nanoTimestamp())) {
+ while (isProblematicTimestamp(ts2)) {
std.time.sleep(1);
}
From fa8935426b425110a89c6a2008013c500b7a3a79 Mon Sep 17 00:00:00 2001
From: Eleanor Bartle
Date: Tue, 18 Aug 2020 14:30:00 +1000
Subject: [PATCH 142/153] Cleaned up RISC-V instruction creation, added 32-bit
immediates (#6077)
* Implemented all R-type arithmetic/logical instructions
* Implemented all I-type arithmetic/logical instructions
* Implemented all load and store instructions
* Implemented all of RV64I except FENCE
---
src-self-hosted/codegen.zig | 68 +----
src-self-hosted/codegen/riscv64.zig | 418 +++++++++++++++++++++++++---
2 files changed, 387 insertions(+), 99 deletions(-)
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index f49d3b41fd..c59b8db825 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -1113,11 +1113,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try self.code.append(0xcc); // int3
},
.riscv64 => {
- const full = @bitCast(u32, instructions.CallBreak{
- .mode = @enumToInt(instructions.CallBreak.Mode.ebreak),
- });
-
- mem.writeIntLittle(u32, try self.code.addManyAsArray(4), full);
+ mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ebreak.toU32());
},
else => return self.fail(src, "TODO implement @breakpoint() for {}", .{self.target.cpu.arch}),
}
@@ -1193,12 +1189,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
try self.genSetReg(inst.base.src, .ra, .{ .memory = got_addr });
- const jalr = instructions.Jalr{
- .rd = Register.ra.id(),
- .rs1 = Register.ra.id(),
- .offset = 0,
- };
- mem.writeIntLittle(u32, try self.code.addManyAsArray(4), @bitCast(u32, jalr));
+ mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.ra, 0, .ra).toU32());
} else {
return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{});
}
@@ -1255,12 +1246,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try self.exitlude_jump_relocs.append(self.gpa, self.code.items.len - 4);
},
.riscv64 => {
- const jalr = instructions.Jalr{
- .rd = Register.zero.id(),
- .rs1 = Register.ra.id(),
- .offset = 0,
- };
- mem.writeIntLittle(u32, try self.code.addManyAsArray(4), @bitCast(u32, jalr));
+ mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.zero, 0, .ra).toU32());
},
else => return self.fail(src, "TODO implement return for {}", .{self.target.cpu.arch}),
}
@@ -1512,11 +1498,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
if (mem.eql(u8, inst.asm_source, "ecall")) {
- const full = @bitCast(u32, instructions.CallBreak{
- .mode = @enumToInt(instructions.CallBreak.Mode.ecall),
- });
-
- mem.writeIntLittle(u32, try self.code.addManyAsArray(4), full);
+ mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ecall.toU32());
} else {
return self.fail(inst.base.src, "TODO implement support for more riscv64 assembly instructions", .{});
}
@@ -1723,36 +1705,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.immediate => |unsigned_x| {
const x = @bitCast(i64, unsigned_x);
if (math.minInt(i12) <= x and x <= math.maxInt(i12)) {
- const instruction = @bitCast(u32, instructions.Addi{
- .mode = @enumToInt(instructions.Addi.Mode.addi),
- .imm = @truncate(i12, x),
- .rs1 = Register.zero.id(),
- .rd = reg.id(),
- });
-
- mem.writeIntLittle(u32, try self.code.addManyAsArray(4), instruction);
+ mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.addi(reg, .zero, @truncate(i12, x)).toU32());
return;
}
if (math.minInt(i32) <= x and x <= math.maxInt(i32)) {
- const split = @bitCast(packed struct {
- low12: i12,
- up20: i20,
- }, @truncate(i32, x));
- if (split.low12 < 0) return self.fail(src, "TODO support riscv64 genSetReg i32 immediates with 12th bit set to 1", .{});
+ const lo12 = @truncate(i12, x);
+ const carry: i32 = if (lo12 < 0) 1 else 0;
+ const hi20 = @truncate(i20, (x >> 12) +% carry);
- const lui = @bitCast(u32, instructions.Lui{
- .imm = split.up20,
- .rd = reg.id(),
- });
- mem.writeIntLittle(u32, try self.code.addManyAsArray(4), lui);
-
- const addi = @bitCast(u32, instructions.Addi{
- .mode = @enumToInt(instructions.Addi.Mode.addi),
- .imm = @truncate(i12, split.low12),
- .rs1 = reg.id(),
- .rd = reg.id(),
- });
- mem.writeIntLittle(u32, try self.code.addManyAsArray(4), addi);
+ // TODO: add test case for 32-bit immediate
+ mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.lui(reg, hi20).toU32());
+ mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.addi(reg, reg, lo12).toU32());
return;
}
// li rd, immediate
@@ -1764,14 +1727,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// If the type is a pointer, it means the pointer address is at this memory location.
try self.genSetReg(src, reg, .{ .immediate = addr });
- const ld = @bitCast(u32, instructions.Load{
- .mode = @enumToInt(instructions.Load.Mode.ld),
- .rs1 = reg.id(),
- .rd = reg.id(),
- .offset = 0,
- });
-
- mem.writeIntLittle(u32, try self.code.addManyAsArray(4), ld);
+ mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ld(reg, 0, reg).toU32());
// LOAD imm=[i12 offset = 0], rs1 =
// return self.fail("TODO implement genSetReg memory for riscv64");
diff --git a/src-self-hosted/codegen/riscv64.zig b/src-self-hosted/codegen/riscv64.zig
index 793731c83c..96b9c58f9c 100644
--- a/src-self-hosted/codegen/riscv64.zig
+++ b/src-self-hosted/codegen/riscv64.zig
@@ -1,50 +1,387 @@
const std = @import("std");
const DW = std.dwarf;
-pub const instructions = struct {
- pub const CallBreak = packed struct {
- pub const Mode = packed enum(u12) { ecall, ebreak };
- opcode: u7 = 0b1110011,
- unused1: u5 = 0,
- unused2: u3 = 0,
- unused3: u5 = 0,
- mode: u12, //: Mode
- };
- // I-type
- pub const Addi = packed struct {
- pub const Mode = packed enum(u3) { addi = 0b000, slti = 0b010, sltiu = 0b011, xori = 0b100, ori = 0b110, andi = 0b111 };
- opcode: u7 = 0b0010011,
+// TODO: this is only tagged to facilitate the monstrosity.
+// Once packed structs work make it packed.
+pub const Instruction = union(enum) {
+ R: packed struct {
+ opcode: u7,
rd: u5,
- mode: u3, //: Mode
+ funct3: u3,
rs1: u5,
- imm: i12,
- };
- pub const Lui = packed struct {
- opcode: u7 = 0b0110111,
+ rs2: u5,
+ funct7: u7,
+ },
+ I: packed struct {
+ opcode: u7,
rd: u5,
- imm: i20,
- };
- // I_type
- pub const Load = packed struct {
- pub const Mode = packed enum(u3) { ld = 0b011, lwu = 0b110 };
- opcode: u7 = 0b0000011,
- rd: u5,
- mode: u3, //: Mode
+ funct3: u3,
rs1: u5,
- offset: i12,
- };
- // I-type
- pub const Jalr = packed struct {
- opcode: u7 = 0b1100111,
- rd: u5,
- mode: u3 = 0,
+ imm0_11: u12,
+ },
+ S: packed struct {
+ opcode: u7,
+ imm0_4: u5,
+ funct3: u3,
rs1: u5,
- offset: i12,
- };
+ rs2: u5,
+ imm5_11: u7,
+ },
+ B: packed struct {
+ opcode: u7,
+ imm11: u1,
+ imm1_4: u4,
+ funct3: u3,
+ rs1: u5,
+ rs2: u5,
+ imm5_10: u6,
+ imm12: u1,
+ },
+ U: packed struct {
+ opcode: u7,
+ rd: u5,
+ imm12_31: u20,
+ },
+ J: packed struct {
+ opcode: u7,
+ rd: u5,
+ imm12_19: u8,
+ imm11: u1,
+ imm1_10: u10,
+ imm20: u1,
+ },
+
+ // TODO: once packed structs work we can remove this monstrosity.
+ pub fn toU32(self: Instruction) u32 {
+ return switch (self) {
+ .R => |v| @bitCast(u32, v),
+ .I => |v| @bitCast(u32, v),
+ .S => |v| @bitCast(u32, v),
+ .B => |v| @intCast(u32, v.opcode) + (@intCast(u32, v.imm11) << 7) + (@intCast(u32, v.imm1_4) << 8) + (@intCast(u32, v.funct3) << 12) + (@intCast(u32, v.rs1) << 15) + (@intCast(u32, v.rs2) << 20) + (@intCast(u32, v.imm5_10) << 25) + (@intCast(u32, v.imm12) << 31),
+ .U => |v| @bitCast(u32, v),
+ .J => |v| @bitCast(u32, v),
+ };
+ }
+
+ fn rType(op: u7, fn3: u3, fn7: u7, rd: Register, r1: Register, r2: Register) Instruction {
+ return Instruction{
+ .R = .{
+ .opcode = op,
+ .funct3 = fn3,
+ .funct7 = fn7,
+ .rd = @enumToInt(rd),
+ .rs1 = @enumToInt(r1),
+ .rs2 = @enumToInt(r2),
+ },
+ };
+ }
+
+ // RISC-V is all signed all the time -- convert immediates to unsigned for processing
+ fn iType(op: u7, fn3: u3, rd: Register, r1: Register, imm: i12) Instruction {
+ const umm = @bitCast(u12, imm);
+
+ return Instruction{
+ .I = .{
+ .opcode = op,
+ .funct3 = fn3,
+ .rd = @enumToInt(rd),
+ .rs1 = @enumToInt(r1),
+ .imm0_11 = umm,
+ },
+ };
+ }
+
+ fn sType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i12) Instruction {
+ const umm = @bitCast(u12, imm);
+
+ return Instruction{
+ .S = .{
+ .opcode = op,
+ .funct3 = fn3,
+ .rs1 = @enumToInt(r1),
+ .rs2 = @enumToInt(r2),
+ .imm0_4 = @truncate(u5, umm),
+ .imm5_11 = @truncate(u7, umm >> 5),
+ },
+ };
+ }
+
+ // Use significance value rather than bit value, same for J-type
+ // -- less burden on callsite, bonus semantic checking
+ fn bType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i13) Instruction {
+ const umm = @bitCast(u13, imm);
+ if (umm % 2 != 0) @panic("Internal error: misaligned branch target");
+
+ return Instruction{
+ .B = .{
+ .opcode = op,
+ .funct3 = fn3,
+ .rs1 = @enumToInt(r1),
+ .rs2 = @enumToInt(r2),
+ .imm1_4 = @truncate(u4, umm >> 1),
+ .imm5_10 = @truncate(u6, umm >> 5),
+ .imm11 = @truncate(u1, umm >> 11),
+ .imm12 = @truncate(u1, umm >> 12),
+ },
+ };
+ }
+
+ // We have to extract the 20 bits anyway -- let's not make it more painful
+ fn uType(op: u7, rd: Register, imm: i20) Instruction {
+ const umm = @bitCast(u20, imm);
+
+ return Instruction{
+ .U = .{
+ .opcode = op,
+ .rd = @enumToInt(rd),
+ .imm12_31 = umm,
+ },
+ };
+ }
+
+ fn jType(op: u7, rd: Register, imm: i21) Instruction {
+ const umm = @bitcast(u21, imm);
+ if (umm % 2 != 0) @panic("Internal error: misaligned jump target");
+
+ return Instruction{
+ .J = .{
+ .opcode = op,
+ .rd = @enumToInt(rd),
+ .imm1_10 = @truncate(u10, umm >> 1),
+ .imm11 = @truncate(u1, umm >> 1),
+ .imm12_19 = @truncate(u8, umm >> 12),
+ .imm20 = @truncate(u1, umm >> 20),
+ },
+ };
+ }
+
+ // The meat and potatoes. Arguments are in the order in which they would appear in assembly code.
+
+ // Arithmetic/Logical, Register-Register
+
+ pub fn add(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0110011, 0b000, 0b0000000, rd, r1, r2);
+ }
+
+ pub fn sub(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0110011, 0b000, 0b0100000, rd, r1, r2);
+ }
+
+ pub fn @"and"(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0110011, 0b111, 0b0000000, rd, r1, r2);
+ }
+
+ pub fn @"or"(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0110011, 0b110, 0b0000000, rd, r1, r2);
+ }
+
+ pub fn xor(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0110011, 0b100, 0b0000000, rd, r1, r2);
+ }
+
+ pub fn sll(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0110011, 0b001, 0b0000000, rd, r1, r2);
+ }
+
+ pub fn srl(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0110011, 0b101, 0b0000000, rd, r1, r2);
+ }
+
+ pub fn sra(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0110011, 0b101, 0b0100000, rd, r1, r2);
+ }
+
+ pub fn slt(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0110011, 0b010, 0b0000000, rd, r1, r2);
+ }
+
+ pub fn sltu(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0110011, 0b011, 0b0000000, rd, r1, r2);
+ }
+
+ // Arithmetic/Logical, Register-Register (32-bit)
+
+ pub fn addw(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0111011, 0b000, rd, r1, r2);
+ }
+
+ pub fn subw(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0111011, 0b000, 0b0100000, rd, r1, r2);
+ }
+
+ pub fn sllw(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0111011, 0b001, 0b0000000, rd, r1, r2);
+ }
+
+ pub fn srlw(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0111011, 0b101, 0b0000000, rd, r1, r2);
+ }
+
+ pub fn sraw(rd: Register, r1: Register, r2: Register) Instruction {
+ return rType(0b0111011, 0b101, 0b0100000, rd, r1, r2);
+ }
+
+ // Arithmetic/Logical, Register-Immediate
+
+ pub fn addi(rd: Register, r1: Register, imm: i12) Instruction {
+ return iType(0b0010011, 0b000, rd, r1, imm);
+ }
+
+ pub fn andi(rd: Register, r1: Register, imm: i12) Instruction {
+ return iType(0b0010011, 0b111, rd, r1, imm);
+ }
+
+ pub fn ori(rd: Register, r1: Register, imm: i12) Instruction {
+ return iType(0b0010011, 0b110, rd, r1, imm);
+ }
+
+ pub fn xori(rd: Register, r1: Register, imm: i12) Instruction {
+ return iType(0b0010011, 0b100, rd, r1, imm);
+ }
+
+ pub fn slli(rd: Register, r1: Register, shamt: u6) Instruction {
+ return iType(0b0010011, 0b001, rd, r1, shamt);
+ }
+
+ pub fn srli(rd: Register, r1: Register, shamt: u6) Instruction {
+ return iType(0b0010011, 0b101, rd, r1, shamt);
+ }
+
+ pub fn srai(rd: Register, r1: Register, shamt: u6) Instruction {
+ return iType(0b0010011, 0b101, rd, r1, (1 << 10) + shamt);
+ }
+
+ pub fn slti(rd: Register, r1: Register, imm: i12) Instruction {
+ return iType(0b0010011, 0b010, rd, r1, imm);
+ }
+
+ pub fn sltiu(rd: Register, r1: Register, imm: u12) Instruction {
+ return iType(0b0010011, 0b011, rd, r1, @bitCast(i12, imm));
+ }
+
+ // Arithmetic/Logical, Register-Immediate (32-bit)
+
+ pub fn addiw(rd: Register, r1: Register, imm: i12) Instruction {
+ return iType(0b0011011, 0b000, rd, r1, imm);
+ }
+
+ pub fn slliw(rd: Register, r1: Register, shamt: u5) Instruction {
+ return iType(0b0011011, 0b001, rd, r1, shamt);
+ }
+
+ pub fn srliw(rd: Register, r1: Register, shamt: u5) Instruction {
+ return iType(0b0011011, 0b101, rd, r1, shamt);
+ }
+
+ pub fn sraiw(rd: Register, r1: Register, shamt: u5) Instruction {
+ return iType(0b0011011, 0b101, rd, r1, (1 << 10) + shamt);
+ }
+
+ // Upper Immediate
+
+ pub fn lui(rd: Register, imm: i20) Instruction {
+ return uType(0b0110111, rd, imm);
+ }
+
+ pub fn auipc(rd: Register, imm: i20) Instruction {
+ return uType(0b0010111, rd, imm);
+ }
+
+ // Load
+
+ pub fn ld(rd: Register, offset: i12, base: Register) Instruction {
+ return iType(0b0000011, 0b011, rd, base, offset);
+ }
+
+ pub fn lw(rd: Register, offset: i12, base: Register) Instruction {
+ return iType(0b0000011, 0b010, rd, base, offset);
+ }
+
+ pub fn lwu(rd: Register, offset: i12, base: Register) Instruction {
+ return iType(0b0000011, 0b110, rd, base, offset);
+ }
+
+ pub fn lh(rd: Register, offset: i12, base: Register) Instruction {
+ return iType(0b0000011, 0b001, rd, base, offset);
+ }
+
+ pub fn lhu(rd: Register, offset: i12, base: Register) Instruction {
+ return iType(0b0000011, 0b101, rd, base, offset);
+ }
+
+ pub fn lb(rd: Register, offset: i12, base: Register) Instruction {
+ return iType(0b0000011, 0b000, rd, base, offset);
+ }
+
+ pub fn lbu(rd: Register, offset: i12, base: Register) Instruction {
+ return iType(0b0000011, 0b100, rd, base, offset);
+ }
+
+ // Store
+
+ pub fn sd(rs: Register, offset: i12, base: Register) Instruction {
+ return sType(0b0100011, 0b011, base, rs, offset);
+ }
+
+ pub fn sw(rs: Register, offset: i12, base: Register) Instruction {
+ return sType(0b0100011, 0b010, base, rs, offset);
+ }
+
+ pub fn sh(rs: Register, offset: i12, base: Register) Instruction {
+ return sType(0b0100011, 0b001, base, rs, offset);
+ }
+
+ pub fn sb(rs: Register, offset: i12, base: Register) Instruction {
+ return sType(0b0100011, 0b000, base, rs, offset);
+ }
+
+ // Fence
+ // TODO: implement fence
+
+ // Branch
+
+ pub fn beq(r1: Register, r2: Register, offset: u13) Instruction {
+ return bType(0b1100011, 0b000, r1, r2, offset);
+ }
+
+ pub fn bne(r1: Register, r2: Register, offset: u13) Instruction {
+ return bType(0b1100011, 0b001, r1, r2, offset);
+ }
+
+ pub fn blt(r1: Register, r2: Register, offset: u13) Instruction {
+ return bType(0b1100011, 0b100, r1, r2, offset);
+ }
+
+ pub fn bge(r1: Register, r2: Register, offset: u13) Instruction {
+ return bType(0b1100011, 0b101, r1, r2, offset);
+ }
+
+ pub fn bltu(r1: Register, r2: Register, offset: u13) Instruction {
+ return bType(0b1100011, 0b110, r1, r2, offset);
+ }
+
+ pub fn bgeu(r1: Register, r2: Register, offset: u13) Instruction {
+ return bType(0b1100011, 0b111, r1, r2, offset);
+ }
+
+ // Jump
+
+ pub fn jal(link: Register, offset: i21) Instruction {
+ return jType(0b1101111, link, offset);
+ }
+
+ pub fn jalr(link: Register, offset: i12, base: Register) Instruction {
+ return iType(0b1100111, 0b000, link, base, offset);
+ }
+
+ // System
+
+ pub const ecall = iType(0b1110011, 0b000, .zero, .zero, 0x000);
+ pub const ebreak = iType(0b1110011, 0b000, .zero, .zero, 0x001);
};
// zig fmt: off
-pub const RawRegister = enum(u8) {
+pub const RawRegister = enum(u5) {
x0, x1, x2, x3, x4, x5, x6, x7,
x8, x9, x10, x11, x12, x13, x14, x15,
x16, x17, x18, x19, x20, x21, x22, x23,
@@ -55,7 +392,7 @@ pub const RawRegister = enum(u8) {
}
};
-pub const Register = enum(u8) {
+pub const Register = enum(u5) {
// 64 bit registers
zero, // zero
ra, // return address. caller saved
@@ -76,11 +413,6 @@ pub const Register = enum(u8) {
return null;
}
- /// Returns the register's id.
- pub fn id(self: @This()) u5 {
- return @truncate(u5, @enumToInt(self));
- }
-
/// Returns the index into `callee_preserved_regs`.
pub fn allocIndex(self: Register) ?u4 {
inline for(callee_preserved_regs) |cpreg, i| {
@@ -90,7 +422,7 @@ pub const Register = enum(u8) {
}
pub fn dwarfLocOp(reg: Register) u8 {
- return @enumToInt(reg) + DW.OP_reg0;
+ return @as(u8, @enumToInt(reg)) + DW.OP_reg0;
}
};
From 8d6004769731deaabbed014c79b4066bd9e380e8 Mon Sep 17 00:00:00 2001
From: Frank Denis
Date: Mon, 17 Aug 2020 15:48:41 +0200
Subject: [PATCH 143/153] ristretto255: add uniform string->element map & fast
equivalence check
---
lib/std/crypto/25519/field.zig | 6 +++
lib/std/crypto/25519/ristretto255.zig | 69 +++++++++++++++++++++------
2 files changed, 60 insertions(+), 15 deletions(-)
diff --git a/lib/std/crypto/25519/field.zig b/lib/std/crypto/25519/field.zig
index 121d187e17..85b8c68315 100644
--- a/lib/std/crypto/25519/field.zig
+++ b/lib/std/crypto/25519/field.zig
@@ -21,6 +21,12 @@ pub const Fe = struct {
pub const edwards25519sqrtamd = Fe{ .limbs = .{ 278908739862762, 821645201101625, 8113234426968, 1777959178193151, 2118520810568447 } }; // 1/sqrt(a-d)
+ pub const edwards25519eonemsqd = Fe{ .limbs = .{ 1136626929484150, 1998550399581263, 496427632559748, 118527312129759, 45110755273534 } }; // 1-d^2
+
+ pub const edwards25519sqdmone = Fe{ .limbs = .{ 1507062230895904, 1572317787530805, 683053064812840, 317374165784489, 1572899562415810 } }; // (d-1)^2
+
+ pub const edwards25519sqrtadm1 = Fe{ .limbs = .{ 2241493124984347, 425987919032274, 2207028919301688, 1220490630685848, 974799131293748 } };
+
pub inline fn isZero(fe: Fe) bool {
var reduced = fe;
reduced.reduce();
diff --git a/lib/std/crypto/25519/ristretto255.zig b/lib/std/crypto/25519/ristretto255.zig
index bfdeb41f9d..a9636074a6 100644
--- a/lib/std/crypto/25519/ristretto255.zig
+++ b/lib/std/crypto/25519/ristretto255.zig
@@ -12,7 +12,7 @@ pub const Ristretto255 = struct {
p: Curve,
- fn sqrtRatioM1(u: Fe, v: Fe) !Fe {
+ fn sqrtRatioM1(u: Fe, v: Fe) struct { ratio_is_square: u32, root: Fe } {
const v3 = v.sq().mul(v); // v^3
var x = v3.sq().mul(u).mul(v).pow2523().mul(v3).mul(u); // uv^3(uv^7)^((q-5)/8)
const vxx = x.sq().mul(v); // vx^2
@@ -24,11 +24,7 @@ pub const Ristretto255 = struct {
const has_f_root = f_root_check.isZero();
const x_sqrtm1 = x.mul(Fe.sqrtm1); // x*sqrt(-1)
x.cMov(x_sqrtm1, @boolToInt(has_p_root) | @boolToInt(has_f_root));
- const xa = x.abs();
- if ((@boolToInt(has_m_root) | @boolToInt(has_p_root)) == 0) {
- return error.NoRoot;
- }
- return xa;
+ return .{ .ratio_is_square = @boolToInt(has_m_root) | @boolToInt(has_p_root), .root = x.abs() };
}
fn rejectNonCanonical(s: [32]u8) !void {
@@ -57,15 +53,14 @@ pub const Ristretto255 = struct {
const u2u2 = u2_.sq(); // (1+s^2)^2
const v = Fe.edwards25519d.mul(u1u1).neg().sub(u2u2); // -(d*u1^2)-u2^2
const v_u2u2 = v.mul(u2u2); // v*u2^2
- const inv_sqrt = sqrtRatioM1(Fe.one, v_u2u2) catch |e| {
- return error.InvalidEncoding;
- };
- var x = inv_sqrt.mul(u2_);
- const y = inv_sqrt.mul(x).mul(v).mul(u1_);
+
+ const inv_sqrt = sqrtRatioM1(Fe.one, v_u2u2);
+ var x = inv_sqrt.root.mul(u2_);
+ const y = inv_sqrt.root.mul(x).mul(v).mul(u1_);
x = x.mul(s_);
x = x.add(x).abs();
const t = x.mul(y);
- if ((@boolToInt(t.isNegative()) | @boolToInt(y.isZero())) != 0) {
+ if ((1 - inv_sqrt.ratio_is_square) | @boolToInt(t.isNegative()) | @boolToInt(y.isZero()) != 0) {
return error.InvalidEncoding;
}
const p: Curve = .{
@@ -85,9 +80,9 @@ pub const Ristretto255 = struct {
u1_ = u1_.mul(zmy); // (Z+Y)*(Z-Y)
const u2_ = p.x.mul(p.y); // X*Y
const u1_u2u2 = u2_.sq().mul(u1_); // u1*u2^2
- const inv_sqrt = sqrtRatioM1(Fe.one, u1_u2u2) catch unreachable;
- const den1 = inv_sqrt.mul(u1_);
- const den2 = inv_sqrt.mul(u2_);
+ const inv_sqrt = sqrtRatioM1(Fe.one, u1_u2u2);
+ const den1 = inv_sqrt.root.mul(u1_);
+ const den2 = inv_sqrt.root.mul(u2_);
const z_inv = den1.mul(den2).mul(p.t); // den1*den2*T
const ix = p.x.mul(Fe.sqrtm1); // X*sqrt(-1)
const iy = p.y.mul(Fe.sqrtm1); // Y*sqrt(-1)
@@ -109,6 +104,35 @@ pub const Ristretto255 = struct {
return p.z.sub(y).mul(den_inv).abs().toBytes();
}
+ fn elligator(t: Fe) Curve {
+ const r = t.sq().mul(Fe.sqrtm1); // sqrt(-1)*t^2
+ const u = r.add(Fe.one).mul(Fe.edwards25519eonemsqd); // (r+1)*(1-d^2)
+ var c = comptime Fe.one.neg(); // -1
+ const v = c.sub(r.mul(Fe.edwards25519d)).mul(r.add(Fe.edwards25519d)); // (c-r*d)*(r+d)
+ const ratio_sqrt = sqrtRatioM1(u, v);
+ const wasnt_square = 1 - ratio_sqrt.ratio_is_square;
+ var s = ratio_sqrt.root;
+ const s_prime = s.mul(t).abs().neg(); // -|s*t|
+ s.cMov(s_prime, wasnt_square);
+ c.cMov(r, wasnt_square);
+
+ const n = r.sub(Fe.one).mul(c).mul(Fe.edwards25519sqdmone).sub(v); // c*(r-1)*(d-1)^2-v
+ const w0 = s.add(s).mul(v); // 2s*v
+ const w1 = n.mul(Fe.edwards25519sqrtadm1); // n*sqrt(ad-1)
+ const ss = s.sq(); // s^2
+ const w2 = Fe.one.sub(ss); // 1-s^2
+ const w3 = Fe.one.add(ss); // 1+s^2
+
+ return .{ .x = w0.mul(w3), .y = w2.mul(w1), .z = w1.mul(w3), .t = w0.mul(w2) };
+ }
+
+ /// Map a 64-bit string into a Ristretto255 group element
+ pub fn fromUniform(h: [64]u8) Ristretto255 {
+ const p0 = elligator(Fe.fromBytes(h[0..32].*));
+ const p1 = elligator(Fe.fromBytes(h[32..64].*));
+ return Ristretto255{ .p = p0.add(p1) };
+ }
+
/// Double a Ristretto255 element.
pub inline fn dbl(p: Ristretto255) Ristretto255 {
return .{ .p = p.p.dbl() };
@@ -125,6 +149,15 @@ pub const Ristretto255 = struct {
pub inline fn mul(p: Ristretto255, s: [32]u8) !Ristretto255 {
return Ristretto255{ .p = try p.p.mul(s) };
}
+
+ /// Return true if two Ristretto255 elements are equivalent
+ pub fn equivalent(p: Ristretto255, q: Ristretto255) bool {
+ const p_ = &p.p;
+ const q_ = &q.p;
+ const a = p_.x.mul(q_.y).equivalent(p_.y.mul(q_.x));
+ const b = p_.y.mul(q_.y).equivalent(p_.x.mul(q_.x));
+ return (@boolToInt(a) | @boolToInt(b)) != 0;
+ }
};
test "ristretto255" {
@@ -141,4 +174,10 @@ test "ristretto255" {
const s = [_]u8{15} ++ [_]u8{0} ** 31;
const w = try p.mul(s);
std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{w.toBytes()}), "E0C418F7C8D9C4CDD7395B93EA124F3AD99021BB681DFC3302A9D99A2E53E64E");
+
+ std.testing.expect(p.dbl().dbl().dbl().dbl().equivalent(w.add(p)));
+
+ const h = [_]u8{69} ** 32 ++ [_]u8{42} ** 32;
+ const ph = Ristretto255.fromUniform(h);
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{ph.toBytes()}), "DCCA54E037A4311EFBEEF413ACD21D35276518970B7A61DC88F8587B493D5E19");
}
From 60ea87340e7a7157bedbb43d60804ff0fb191ef9 Mon Sep 17 00:00:00 2001
From: Soren
Date: Sun, 16 Aug 2020 12:08:34 -0700
Subject: [PATCH 144/153] Fix opaque structs and C++ mangling
---
src/codegen.cpp | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 1bd8d5b7bc..36ed520069 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -10252,9 +10252,11 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, ZigType *type_e
gen_h->types_to_declare.append(type_entry);
return;
case ZigTypeIdStruct:
- for (uint32_t i = 0; i < type_entry->data.structure.src_field_count; i += 1) {
- TypeStructField *field = type_entry->data.structure.fields[i];
- prepend_c_type_to_decl_list(g, gen_h, field->type_entry);
+ if(type_entry->data.structure.layout == ContainerLayoutExtern) {
+ for (uint32_t i = 0; i < type_entry->data.structure.src_field_count; i += 1) {
+ TypeStructField *field = type_entry->data.structure.fields[i];
+ prepend_c_type_to_decl_list(g, gen_h, field->type_entry);
+ }
}
gen_h->types_to_declare.append(type_entry);
return;
@@ -10687,21 +10689,19 @@ static void gen_h_file(CodeGen *g) {
fprintf(out_h, "\n");
}
- fprintf(out_h, "%s", buf_ptr(&types_buf));
-
fprintf(out_h, "#ifdef __cplusplus\n");
fprintf(out_h, "extern \"C\" {\n");
fprintf(out_h, "#endif\n");
fprintf(out_h, "\n");
+ fprintf(out_h, "%s", buf_ptr(&types_buf));
fprintf(out_h, "%s\n", buf_ptr(&fns_buf));
+ fprintf(out_h, "%s\n", buf_ptr(&vars_buf));
fprintf(out_h, "#ifdef __cplusplus\n");
fprintf(out_h, "} // extern \"C\"\n");
fprintf(out_h, "#endif\n\n");
- fprintf(out_h, "%s\n", buf_ptr(&vars_buf));
-
fprintf(out_h, "#endif // %s\n", buf_ptr(ifdef_dance_name));
if (fclose(out_h))
From 56c81c713f749f858089e5bcdf9e6fac588bcf86 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 18 Aug 2020 01:29:34 -0700
Subject: [PATCH 145/153] stage1: let \r\n slide
---
src/tokenizer.cpp | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 487a125d62..4415bdf431 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -17,6 +17,7 @@
#define WHITESPACE \
' ': \
+ case '\r': \
case '\n'
#define DIGIT_NON_ZERO \
From c0517bf1f6f6f7de0279e3766f414c5c8380bed7 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 18 Aug 2020 01:30:03 -0700
Subject: [PATCH 146/153] std.cache_hash: temporary workaround for mtime
precision on linux
See #6082
---
lib/std/cache_hash.zig | 20 +++++++++-----------
1 file changed, 9 insertions(+), 11 deletions(-)
diff --git a/lib/std/cache_hash.zig b/lib/std/cache_hash.zig
index bc684faa61..9c53f9e4c4 100644
--- a/lib/std/cache_hash.zig
+++ b/lib/std/cache_hash.zig
@@ -451,10 +451,17 @@ fn isProblematicTimestamp(fs_clock: i128) bool {
// to detect precision of seconds, because looking at the zero bits in base
// 2 would not detect precision of the seconds value.
const fs_sec = @intCast(i64, @divFloor(fs_clock, std.time.ns_per_s));
- const fs_nsec = @intCast(i64, @mod(fs_clock, std.time.ns_per_s));
+ var fs_nsec = @intCast(i64, @mod(fs_clock, std.time.ns_per_s));
var wall_sec = @intCast(i64, @divFloor(wall_clock, std.time.ns_per_s));
var wall_nsec = @intCast(i64, @mod(wall_clock, std.time.ns_per_s));
+ if (std.Target.current.os.tag == .linux) {
+ // TODO As a temporary measure while we figure out how to solve
+ // https://github.com/ziglang/zig/issues/6082, we cut the granularity of nanoseconds
+ // by a large amount.
+ fs_nsec &= @as(i64, -1) << 23;
+ }
+
// First make all the least significant zero bits in the fs_clock, also zero bits in the wall clock.
if (fs_nsec == 0) {
wall_nsec = 0;
@@ -466,16 +473,7 @@ fn isProblematicTimestamp(fs_clock: i128) bool {
} else {
wall_nsec &= @as(i64, -1) << @intCast(u6, @ctz(i64, fs_nsec));
}
- if (wall_nsec == fs_nsec and wall_sec == fs_sec)
- return true;
-
- // I have also observed precision problems at a millisecond granularity.
- const fs_msec = @intCast(i64, @divFloor(fs_clock, std.time.ns_per_ms * 2));
- const wall_msec = @intCast(i64, @divFloor(wall_clock, std.time.ns_per_ms * 2));
- if (fs_msec == wall_msec)
- return true;
-
- return false;
+ return wall_nsec == fs_nsec and wall_sec == fs_sec;
}
test "cache file and then recall it" {
From bdb8c494188f699527e927f3a4d3b37d3823549f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 18 Aug 2020 08:44:34 -0700
Subject: [PATCH 147/153] stage1: update compile error tests
follow-up to 56c81c713f749f858089e5bcdf9e6fac588bcf86
---
test/compile_errors.zig | 6 ------
1 file changed, 6 deletions(-)
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 48198199df..fc5b16fd9d 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -6665,12 +6665,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"tmp.zig:9:13: error: type '*MyType' does not support field access",
});
- cases.add("carriage return special case", "fn test() bool {\r\n" ++
- " true\r\n" ++
- "}\r\n", &[_][]const u8{
- "tmp.zig:1:17: error: invalid carriage return, only '\\n' line endings are supported",
- });
-
cases.add("invalid legacy unicode escape",
\\export fn entry() void {
\\ const a = '\U1234';
From e2c741f1e7dbddabdbfc18a2520f5efa376899bc Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 18 Aug 2020 12:44:00 -0700
Subject: [PATCH 148/153] std.cache_hash: additionally use file size to detect
modifications
I have observed on Linux writing and reading the same file many times
without the mtime changing, despite the file system having nanosecond
granularity (and about 1 millisecond worth of nanoseconds passing between
modifications). I am calling this a Linux Kernel Bug and adding file
size to the cache hash manifest as a mitigation. As evidence, macOS does
not exhibit this behavior.
This means it is possible, on Linux, for a file to be added to the cache
hash, and, if it is updated with the same file size, same inode, within
about 1 millisecond, the cache system will give us a false positive,
saying it is unmodified. I don't see any way to improve this situation
without fixing the bug in the Linux kernel.
closes #6082
---
lib/std/cache_hash.zig | 16 ++++++----------
1 file changed, 6 insertions(+), 10 deletions(-)
diff --git a/lib/std/cache_hash.zig b/lib/std/cache_hash.zig
index 9c53f9e4c4..79597a2663 100644
--- a/lib/std/cache_hash.zig
+++ b/lib/std/cache_hash.zig
@@ -188,11 +188,13 @@ pub const CacheHash = struct {
};
var iter = mem.tokenize(line, " ");
+ const size = iter.next() orelse return error.InvalidFormat;
const inode = iter.next() orelse return error.InvalidFormat;
const mtime_nsec_str = iter.next() orelse return error.InvalidFormat;
const digest_str = iter.next() orelse return error.InvalidFormat;
const file_path = iter.rest();
+ cache_hash_file.stat.size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat;
cache_hash_file.stat.inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat;
cache_hash_file.stat.mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat;
base64_decoder.decode(&cache_hash_file.bin_digest, digest_str) catch return error.InvalidFormat;
@@ -216,10 +218,11 @@ pub const CacheHash = struct {
defer this_file.close();
const actual_stat = try this_file.stat();
+ const size_match = actual_stat.size == cache_hash_file.stat.size;
const mtime_match = actual_stat.mtime == cache_hash_file.stat.mtime;
const inode_match = actual_stat.inode == cache_hash_file.stat.inode;
- if (!mtime_match or !inode_match) {
+ if (!size_match or !mtime_match or !inode_match) {
self.manifest_dirty = true;
cache_hash_file.stat = actual_stat;
@@ -392,7 +395,7 @@ pub const CacheHash = struct {
for (self.files.items) |file| {
base64_encoder.encode(encoded_digest[0..], &file.bin_digest);
- try outStream.print("{} {} {} {}\n", .{ file.stat.inode, file.stat.mtime, encoded_digest[0..], file.path });
+ try outStream.print("{} {} {} {} {}\n", .{ file.stat.size, file.stat.inode, file.stat.mtime, encoded_digest[0..], file.path });
}
try self.manifest_file.?.pwriteAll(contents.items, 0);
@@ -451,17 +454,10 @@ fn isProblematicTimestamp(fs_clock: i128) bool {
// to detect precision of seconds, because looking at the zero bits in base
// 2 would not detect precision of the seconds value.
const fs_sec = @intCast(i64, @divFloor(fs_clock, std.time.ns_per_s));
- var fs_nsec = @intCast(i64, @mod(fs_clock, std.time.ns_per_s));
+ const fs_nsec = @intCast(i64, @mod(fs_clock, std.time.ns_per_s));
var wall_sec = @intCast(i64, @divFloor(wall_clock, std.time.ns_per_s));
var wall_nsec = @intCast(i64, @mod(wall_clock, std.time.ns_per_s));
- if (std.Target.current.os.tag == .linux) {
- // TODO As a temporary measure while we figure out how to solve
- // https://github.com/ziglang/zig/issues/6082, we cut the granularity of nanoseconds
- // by a large amount.
- fs_nsec &= @as(i64, -1) << 23;
- }
-
// First make all the least significant zero bits in the fs_clock, also zero bits in the wall clock.
if (fs_nsec == 0) {
wall_nsec = 0;
From 5547abd2d1fab482ab0d26a92058c94c298ce403 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 18 Aug 2020 15:08:43 -0700
Subject: [PATCH 149/153] build: -Dforce-link-libc now also applies to
test-stage2
---
build.zig | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/build.zig b/build.zig
index 95761a2fd3..2536b24bc0 100644
--- a/build.zig
+++ b/build.zig
@@ -77,7 +77,10 @@ pub fn build(b: *Builder) !void {
}
const tracy = b.option([]const u8, "tracy", "Enable Tracy integration. Supply path to Tracy source");
const link_libc = b.option(bool, "force-link-libc", "Force self-hosted compiler to link libc") orelse false;
- if (link_libc) exe.linkLibC();
+ if (link_libc) {
+ exe.linkLibC();
+ test_stage2.linkLibC();
+ }
const log_scopes = b.option([]const []const u8, "log", "Which log scopes to enable") orelse &[0][]const u8{};
From 583b843803c5851c1d9796ba301f6a602a6da3d9 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 18 Aug 2020 15:09:48 -0700
Subject: [PATCH 150/153] std.heap.GeneralPurposeAllocator: add `never_unmap`
config option
This is a temporary debugging trick you can use to turn segfaults into more helpful
logged error messages with stack trace details. The downside is that every allocation
will be leaked!
---
lib/std/heap/general_purpose_allocator.zig | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 91a01bb837..cb53af113e 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -142,6 +142,11 @@ pub const Config = struct {
/// Whether the allocator may be used simultaneously from multiple threads.
thread_safe: bool = !std.builtin.single_threaded,
+
+ /// This is a temporary debugging trick you can use to turn segfaults into more helpful
+ /// logged error messages with stack trace details. The downside is that every allocation
+ /// will be leaked!
+ never_unmap: bool = false,
};
pub fn GeneralPurposeAllocator(comptime config: Config) type {
@@ -416,7 +421,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
bucket.prev.next = bucket.next;
self.buckets[bucket_index] = bucket.prev;
}
- self.backing_allocator.free(bucket.page[0..page_size]);
+ if (!config.never_unmap) {
+ self.backing_allocator.free(bucket.page[0..page_size]);
+ }
const bucket_size = bucketSize(size_class);
const bucket_slice = @ptrCast([*]align(@alignOf(BucketHeader)) u8, bucket)[0..bucket_size];
self.backing_allocator.free(bucket_slice);
From f950f5452b6a8131f2bb3f665b2ac141dfba367f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 18 Aug 2020 15:10:31 -0700
Subject: [PATCH 151/153] test_runner: don't assume the GeneralPurposeAllocator
config
This allows changing the config in only 1 location (std.testing)
---
lib/std/special/test_runner.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index f4c8c6de9d..4267151638 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -23,7 +23,7 @@ pub fn main() anyerror!void {
var leaks: usize = 0;
for (test_fn_list) |test_fn, i| {
- std.testing.allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
+ std.testing.allocator_instance = .{};
defer {
if (std.testing.allocator_instance.deinit()) {
leaks += 1;
From 31b58acdaef2942a33a0609706446b534539b063 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 18 Aug 2020 15:11:24 -0700
Subject: [PATCH 152/153] stage2: minor cleanup
---
src-self-hosted/codegen.zig | 4 ++--
src-self-hosted/test.zig | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index c59b8db825..8887a1e0ca 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -73,6 +73,8 @@ pub fn generateSymbol(
switch (typed_value.ty.zigTypeTag()) {
.Fn => {
switch (bin_file.base.options.target.cpu.arch) {
+ .wasm32 => unreachable, // has its own code path
+ .wasm64 => unreachable, // has its own code path
//.arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
@@ -119,8 +121,6 @@ pub fn generateSymbol(
//.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.wasm32 => return Function(.wasm32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
- //.wasm64 => return Function(.wasm64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig
index 265c26b2dc..b51cb313f8 100644
--- a/src-self-hosted/test.zig
+++ b/src-self-hosted/test.zig
@@ -421,7 +421,7 @@ pub const TestContext = struct {
}
fn runOneCase(self: *TestContext, allocator: *Allocator, root_node: *std.Progress.Node, case: Case) !void {
- const target_info = try std.zig.system.NativeTargetInfo.detect(std.testing.allocator, case.target);
+ const target_info = try std.zig.system.NativeTargetInfo.detect(allocator, case.target);
const target = target_info.target;
var arena_allocator = std.heap.ArenaAllocator.init(allocator);
From 15bcfcd36865fca75b93dc6ce52c904292b62a81 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 18 Aug 2020 15:11:43 -0700
Subject: [PATCH 153/153] stage2: fix use-after-free when printing ZIR
---
src-self-hosted/zir.zig | 6 ++++--
test/stage2/zir.zig | 18 +++++++++---------
2 files changed, 13 insertions(+), 11 deletions(-)
diff --git a/src-self-hosted/zir.zig b/src-self-hosted/zir.zig
index 276db6d522..793fdff031 100644
--- a/src-self-hosted/zir.zig
+++ b/src-self-hosted/zir.zig
@@ -872,6 +872,8 @@ pub const Module = struct {
};
pub fn deinit(self: *Module, allocator: *Allocator) void {
+ self.metadata.deinit();
+ self.body_metadata.deinit();
allocator.free(self.decls);
self.arena.deinit();
self.* = undefined;
@@ -1543,8 +1545,8 @@ pub fn emit(allocator: *Allocator, old_module: IrModule) !Module {
.metadata = std.AutoHashMap(*Inst, Module.MetaData).init(allocator),
.body_metadata = std.AutoHashMap(*Module.Body, Module.BodyMetaData).init(allocator),
};
- defer ctx.metadata.deinit();
- defer ctx.body_metadata.deinit();
+ errdefer ctx.metadata.deinit();
+ errdefer ctx.body_metadata.deinit();
defer ctx.block_table.deinit();
defer ctx.loop_table.deinit();
defer ctx.decls.deinit(allocator);
diff --git a/test/stage2/zir.zig b/test/stage2/zir.zig
index f77c950052..225a7f58cd 100644
--- a/test/stage2/zir.zig
+++ b/test/stage2/zir.zig
@@ -28,7 +28,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\@unnamed$5 = export(@unnamed$4, "entry")
\\@unnamed$6 = fntype([], @void, cc=C)
\\@entry = fn(@unnamed$6, {
- \\ %0 = returnvoid()
+ \\ %0 = returnvoid() ; deaths=0b1000000000000000
\\})
\\
);
@@ -75,7 +75,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\@3 = int(3)
\\@unnamed$6 = fntype([], @void, cc=C)
\\@entry = fn(@unnamed$6, {
- \\ %0 = returnvoid()
+ \\ %0 = returnvoid() ; deaths=0b1000000000000000
\\})
\\@entry__anon_1 = str("2\x08\x01\n")
\\@9 = declref("9__anon_0")
@@ -117,18 +117,18 @@ pub fn addCases(ctx: *TestContext) !void {
\\@unnamed$5 = export(@unnamed$4, "entry")
\\@unnamed$6 = fntype([], @void, cc=C)
\\@entry = fn(@unnamed$6, {
- \\ %0 = call(@a, [], modifier=auto)
- \\ %1 = returnvoid()
+ \\ %0 = call(@a, [], modifier=auto) ; deaths=0b1000000000000001
+ \\ %1 = returnvoid() ; deaths=0b1000000000000000
\\})
\\@unnamed$8 = fntype([], @void, cc=C)
\\@a = fn(@unnamed$8, {
- \\ %0 = call(@b, [], modifier=auto)
- \\ %1 = returnvoid()
+ \\ %0 = call(@b, [], modifier=auto) ; deaths=0b1000000000000001
+ \\ %1 = returnvoid() ; deaths=0b1000000000000000
\\})
\\@unnamed$10 = fntype([], @void, cc=C)
\\@b = fn(@unnamed$10, {
- \\ %0 = call(@a, [], modifier=auto)
- \\ %1 = returnvoid()
+ \\ %0 = call(@a, [], modifier=auto) ; deaths=0b1000000000000001
+ \\ %1 = returnvoid() ; deaths=0b1000000000000000
\\})
\\
);
@@ -193,7 +193,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\@unnamed$5 = export(@unnamed$4, "entry")
\\@unnamed$6 = fntype([], @void, cc=C)
\\@entry = fn(@unnamed$6, {
- \\ %0 = returnvoid()
+ \\ %0 = returnvoid() ; deaths=0b1000000000000000
\\})
\\
);