Merge remote-tracking branch 'origin/master' into llvm7

This commit is contained in:
Andrew Kelley
2018-07-14 18:27:51 -04:00
52 changed files with 4063 additions and 1520 deletions

View File

@@ -431,8 +431,8 @@ set(ZIG_CPP_SOURCES
set(ZIG_STD_FILES
"array_list.zig"
"atomic/index.zig"
"atomic/queue_mpmc.zig"
"atomic/queue_mpsc.zig"
"atomic/int.zig"
"atomic/queue.zig"
"atomic/stack.zig"
"base64.zig"
"buf_map.zig"
@@ -459,6 +459,8 @@ set(ZIG_STD_FILES
"empty.zig"
"event.zig"
"event/channel.zig"
"event/future.zig"
"event/group.zig"
"event/lock.zig"
"event/locked.zig"
"event/loop.zig"

177
build.zig
View File

@@ -35,73 +35,31 @@ pub fn build(b: *Builder) !void {
"BUILD_INFO",
});
var index: usize = 0;
const cmake_binary_dir = nextValue(&index, build_info);
const cxx_compiler = nextValue(&index, build_info);
const llvm_config_exe = nextValue(&index, build_info);
const lld_include_dir = nextValue(&index, build_info);
const lld_libraries = nextValue(&index, build_info);
const std_files = nextValue(&index, build_info);
const c_header_files = nextValue(&index, build_info);
const dia_guids_lib = nextValue(&index, build_info);
var ctx = Context{
.cmake_binary_dir = nextValue(&index, build_info),
.cxx_compiler = nextValue(&index, build_info),
.llvm_config_exe = nextValue(&index, build_info),
.lld_include_dir = nextValue(&index, build_info),
.lld_libraries = nextValue(&index, build_info),
.std_files = nextValue(&index, build_info),
.c_header_files = nextValue(&index, build_info),
.dia_guids_lib = nextValue(&index, build_info),
.llvm = undefined,
};
ctx.llvm = try findLLVM(b, ctx.llvm_config_exe);
const llvm = findLLVM(b, llvm_config_exe) catch unreachable;
var test_stage2 = b.addTest("src-self-hosted/test.zig");
test_stage2.setBuildMode(builtin.Mode.Debug);
var exe = b.addExecutable("zig", "src-self-hosted/main.zig");
exe.setBuildMode(mode);
// This is for finding /lib/libz.a on alpine linux.
// TODO turn this into -Dextra-lib-path=/lib option
exe.addLibPath("/lib");
exe.addIncludeDir("src");
exe.addIncludeDir(cmake_binary_dir);
addCppLib(b, exe, cmake_binary_dir, "zig_cpp");
if (lld_include_dir.len != 0) {
exe.addIncludeDir(lld_include_dir);
var it = mem.split(lld_libraries, ";");
while (it.next()) |lib| {
exe.addObjectFile(lib);
}
} else {
addCppLib(b, exe, cmake_binary_dir, "embedded_lld_wasm");
addCppLib(b, exe, cmake_binary_dir, "embedded_lld_elf");
addCppLib(b, exe, cmake_binary_dir, "embedded_lld_coff");
addCppLib(b, exe, cmake_binary_dir, "embedded_lld_lib");
}
dependOnLib(exe, llvm);
if (exe.target.getOs() == builtin.Os.linux) {
const libstdcxx_path_padded = try b.exec([][]const u8{
cxx_compiler,
"-print-file-name=libstdc++.a",
});
const libstdcxx_path = mem.split(libstdcxx_path_padded, "\r\n").next().?;
if (mem.eql(u8, libstdcxx_path, "libstdc++.a")) {
warn(
\\Unable to determine path to libstdc++.a
\\On Fedora, install libstdc++-static and try again.
\\
);
return error.RequiredLibraryNotFound;
}
exe.addObjectFile(libstdcxx_path);
exe.linkSystemLibrary("pthread");
} else if (exe.target.isDarwin()) {
exe.linkSystemLibrary("c++");
}
if (dia_guids_lib.len != 0) {
exe.addObjectFile(dia_guids_lib);
}
if (exe.target.getOs() != builtin.Os.windows) {
exe.linkSystemLibrary("xml2");
}
exe.linkSystemLibrary("c");
try configureStage2(b, test_stage2, ctx);
try configureStage2(b, exe, ctx);
b.default_step.dependOn(&exe.step);
const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false;
const skip_self_hosted = b.option(bool, "skip-self-hosted", "Main test suite skips building self hosted compiler") orelse false;
if (!skip_self_hosted) {
test_step.dependOn(&exe.step);
@@ -110,30 +68,40 @@ pub fn build(b: *Builder) !void {
exe.setVerboseLink(verbose_link_exe);
b.installArtifact(exe);
installStdLib(b, std_files);
installCHeaders(b, c_header_files);
installStdLib(b, ctx.std_files);
installCHeaders(b, ctx.c_header_files);
const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");
const with_lldb = b.option(bool, "with-lldb", "Run tests in LLDB to get a backtrace if one fails") orelse false;
test_step.dependOn(docs_step);
const test_stage2_step = b.step("test-stage2", "Run the stage2 compiler tests");
test_stage2_step.dependOn(&test_stage2.step);
test_step.dependOn(test_stage2_step);
test_step.dependOn(tests.addPkgTests(b, test_filter, "test/behavior.zig", "behavior", "Run the behavior tests", with_lldb));
const all_modes = []builtin.Mode{
builtin.Mode.Debug,
builtin.Mode.ReleaseSafe,
builtin.Mode.ReleaseFast,
builtin.Mode.ReleaseSmall,
};
const modes = if (skip_release) []builtin.Mode{builtin.Mode.Debug} else all_modes;
test_step.dependOn(tests.addPkgTests(b, test_filter, "std/index.zig", "std", "Run the standard library tests", with_lldb));
test_step.dependOn(tests.addPkgTests(b, test_filter, "test/behavior.zig", "behavior", "Run the behavior tests", modes));
test_step.dependOn(tests.addPkgTests(b, test_filter, "std/special/compiler_rt/index.zig", "compiler-rt", "Run the compiler_rt tests", with_lldb));
test_step.dependOn(tests.addPkgTests(b, test_filter, "std/index.zig", "std", "Run the standard library tests", modes));
test_step.dependOn(tests.addCompareOutputTests(b, test_filter));
test_step.dependOn(tests.addPkgTests(b, test_filter, "std/special/compiler_rt/index.zig", "compiler-rt", "Run the compiler_rt tests", modes));
test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
test_step.dependOn(tests.addBuildExampleTests(b, test_filter));
test_step.dependOn(tests.addCompileErrorTests(b, test_filter));
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter));
test_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter));
test_step.dependOn(tests.addCompileErrorTests(b, test_filter, modes));
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
test_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter, modes));
test_step.dependOn(tests.addTranslateCTests(b, test_filter));
test_step.dependOn(tests.addGenHTests(b, test_filter));
test_step.dependOn(docs_step);
}
fn dependOnLib(lib_exe_obj: *std.build.LibExeObjStep, dep: *const LibraryDep) void {
fn dependOnLib(lib_exe_obj: var, dep: *const LibraryDep) void {
for (dep.libdirs.toSliceConst()) |lib_dir| {
lib_exe_obj.addLibPath(lib_dir);
}
@@ -148,7 +116,7 @@ fn dependOnLib(lib_exe_obj: *std.build.LibExeObjStep, dep: *const LibraryDep) vo
}
}
fn addCppLib(b: *Builder, lib_exe_obj: *std.build.LibExeObjStep, cmake_binary_dir: []const u8, lib_name: []const u8) void {
fn addCppLib(b: *Builder, lib_exe_obj: var, cmake_binary_dir: []const u8, lib_name: []const u8) void {
const lib_prefix = if (lib_exe_obj.target.isWindows()) "" else "lib";
lib_exe_obj.addObjectFile(os.path.join(b.allocator, cmake_binary_dir, "zig_cpp", b.fmt("{}{}{}", lib_prefix, lib_name, lib_exe_obj.target.libFileExt())) catch unreachable);
}
@@ -254,3 +222,68 @@ fn nextValue(index: *usize, build_info: []const u8) []const u8 {
}
}
}
fn configureStage2(b: *Builder, exe: var, ctx: Context) !void {
// This is for finding /lib/libz.a on alpine linux.
// TODO turn this into -Dextra-lib-path=/lib option
exe.addLibPath("/lib");
exe.addIncludeDir("src");
exe.addIncludeDir(ctx.cmake_binary_dir);
addCppLib(b, exe, ctx.cmake_binary_dir, "zig_cpp");
if (ctx.lld_include_dir.len != 0) {
exe.addIncludeDir(ctx.lld_include_dir);
var it = mem.split(ctx.lld_libraries, ";");
while (it.next()) |lib| {
exe.addObjectFile(lib);
}
} else {
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_wasm");
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_elf");
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_coff");
addCppLib(b, exe, ctx.cmake_binary_dir, "embedded_lld_lib");
}
dependOnLib(exe, ctx.llvm);
if (exe.target.getOs() == builtin.Os.linux) {
const libstdcxx_path_padded = try b.exec([][]const u8{
ctx.cxx_compiler,
"-print-file-name=libstdc++.a",
});
const libstdcxx_path = mem.split(libstdcxx_path_padded, "\r\n").next().?;
if (mem.eql(u8, libstdcxx_path, "libstdc++.a")) {
warn(
\\Unable to determine path to libstdc++.a
\\On Fedora, install libstdc++-static and try again.
\\
);
return error.RequiredLibraryNotFound;
}
exe.addObjectFile(libstdcxx_path);
exe.linkSystemLibrary("pthread");
} else if (exe.target.isDarwin()) {
exe.linkSystemLibrary("c++");
}
if (ctx.dia_guids_lib.len != 0) {
exe.addObjectFile(ctx.dia_guids_lib);
}
if (exe.target.getOs() != builtin.Os.windows) {
exe.linkSystemLibrary("xml2");
}
exe.linkSystemLibrary("c");
}
const Context = struct {
cmake_binary_dir: []const u8,
cxx_compiler: []const u8,
llvm_config_exe: []const u8,
lld_include_dir: []const u8,
lld_libraries: []const u8,
std_files: []const u8,
c_header_files: []const u8,
dia_guids_lib: []const u8,
llvm: LibraryDep,
};

View File

@@ -2239,7 +2239,7 @@ test "switch inside function" {
// On an OS other than fuchsia, block is not even analyzed,
// so this compile error is not triggered.
// On fuchsia this compile error would be triggered.
@compileError("windows not supported");
@compileError("fuchsia not supported");
},
else => {},
}
@@ -2303,13 +2303,13 @@ test "while continue" {
{#code_begin|test|while#}
const assert = @import("std").debug.assert;
test "while loop continuation expression" {
test "while loop continue expression" {
var i: usize = 0;
while (i < 10) : (i += 1) {}
assert(i == 10);
}
test "while loop continuation expression, more complicated" {
test "while loop continue expression, more complicated" {
var i1: usize = 1;
var j1: usize = 1;
while (i1 * j1 < 2000) : ({ i1 *= 2; j1 *= 3; }) {
@@ -7118,10 +7118,16 @@ Environments:
opencl</code></pre>
<p>
The Zig Standard Library (<code>@import("std")</code>) has architecture, environment, and operating sytsem
abstractions, and thus takes additional work to support more platforms. It currently supports
Linux x86_64. Not all standard library code requires operating system abstractions, however,
abstractions, and thus takes additional work to support more platforms.
Not all standard library code requires operating system abstractions, however,
so things such as generic data structures work an all above platforms.
</p>
<p>The current list of targets supported by the Zig Standard Library is:</p>
<ul>
<li>Linux x86_64</li>
<li>Windows x86_64</li>
<li>MacOS x86_64</li>
</ul>
{#header_close#}
{#header_open|Style Guide#}
<p>

View File

@@ -0,0 +1,59 @@
const std = @import("std");
const Compilation = @import("compilation.zig").Compilation;
// we go through llvm instead of c for 2 reasons:
// 1. to avoid accidentally calling the non-thread-safe functions
// 2. patch up some of the types to remove nullability
const llvm = @import("llvm.zig");
const ir = @import("ir.zig");
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
const event = std.event;
pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code) !void {
fn_val.base.ref();
defer fn_val.base.deref(comp);
defer code.destroy(comp.a());
const llvm_handle = try comp.event_loop_local.getAnyLlvmContext();
defer llvm_handle.release(comp.event_loop_local);
const context = llvm_handle.node.data;
const module = llvm.ModuleCreateWithNameInContext(comp.name.ptr(), context) orelse return error.OutOfMemory;
defer llvm.DisposeModule(module);
const builder = llvm.CreateBuilderInContext(context) orelse return error.OutOfMemory;
defer llvm.DisposeBuilder(builder);
var ofile = ObjectFile{
.comp = comp,
.module = module,
.builder = builder,
.context = context,
.lock = event.Lock.init(comp.loop),
};
try renderToLlvmModule(&ofile, fn_val, code);
if (comp.verbose_llvm_ir) {
llvm.DumpModule(ofile.module);
}
}
pub const ObjectFile = struct {
comp: *Compilation,
module: llvm.ModuleRef,
builder: llvm.BuilderRef,
context: llvm.ContextRef,
lock: event.Lock,
fn a(self: *ObjectFile) *std.mem.Allocator {
return self.comp.a();
}
};
pub fn renderToLlvmModule(ofile: *ObjectFile, fn_val: *Value.Fn, code: *ir.Code) !void {
// TODO audit more of codegen.cpp:fn_llvm_value and port more logic
const llvm_fn_type = try fn_val.base.typeof.getLlvmType(ofile);
const llvm_fn = llvm.AddFunction(ofile.module, fn_val.symbol_name.ptr(), llvm_fn_type);
}

View File

@@ -0,0 +1,747 @@
const std = @import("std");
const os = std.os;
const io = std.io;
const mem = std.mem;
const Allocator = mem.Allocator;
const Buffer = std.Buffer;
const llvm = @import("llvm.zig");
const c = @import("c.zig");
const builtin = @import("builtin");
const Target = @import("target.zig").Target;
const warn = std.debug.warn;
const Token = std.zig.Token;
const ArrayList = std.ArrayList;
const errmsg = @import("errmsg.zig");
const ast = std.zig.ast;
const event = std.event;
const assert = std.debug.assert;
const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
const Scope = @import("scope.zig").Scope;
const Decl = @import("decl.zig").Decl;
const ir = @import("ir.zig");
const Visib = @import("visib.zig").Visib;
const ParsedFile = @import("parsed_file.zig").ParsedFile;
const Value = @import("value.zig").Value;
const Type = Value.Type;
const Span = errmsg.Span;
const codegen = @import("codegen.zig");
/// Data that is local to the event loop.
pub const EventLoopLocal = struct {
loop: *event.Loop,
llvm_handle_pool: std.atomic.Stack(llvm.ContextRef),
fn init(loop: *event.Loop) EventLoopLocal {
return EventLoopLocal{
.loop = loop,
.llvm_handle_pool = std.atomic.Stack(llvm.ContextRef).init(),
};
}
fn deinit(self: *EventLoopLocal) void {
while (self.llvm_handle_pool.pop()) |node| {
c.LLVMContextDispose(node.data);
self.loop.allocator.destroy(node);
}
}
/// Gets an exclusive handle on any LlvmContext.
/// Caller must release the handle when done.
pub fn getAnyLlvmContext(self: *EventLoopLocal) !LlvmHandle {
if (self.llvm_handle_pool.pop()) |node| return LlvmHandle{ .node = node };
const context_ref = c.LLVMContextCreate() orelse return error.OutOfMemory;
errdefer c.LLVMContextDispose(context_ref);
const node = try self.loop.allocator.create(std.atomic.Stack(llvm.ContextRef).Node{
.next = undefined,
.data = context_ref,
});
errdefer self.loop.allocator.destroy(node);
return LlvmHandle{ .node = node };
}
};
pub const LlvmHandle = struct {
node: *std.atomic.Stack(llvm.ContextRef).Node,
pub fn release(self: LlvmHandle, event_loop_local: *EventLoopLocal) void {
event_loop_local.llvm_handle_pool.push(self.node);
}
};
pub const Compilation = struct {
event_loop_local: *EventLoopLocal,
loop: *event.Loop,
name: Buffer,
root_src_path: ?[]const u8,
target: Target,
build_mode: builtin.Mode,
zig_lib_dir: []const u8,
version_major: u32,
version_minor: u32,
version_patch: u32,
linker_script: ?[]const u8,
cache_dir: []const u8,
libc_lib_dir: ?[]const u8,
libc_static_lib_dir: ?[]const u8,
libc_include_dir: ?[]const u8,
msvc_lib_dir: ?[]const u8,
kernel32_lib_dir: ?[]const u8,
dynamic_linker: ?[]const u8,
out_h_path: ?[]const u8,
is_test: bool,
each_lib_rpath: bool,
strip: bool,
is_static: bool,
linker_rdynamic: bool,
clang_argv: []const []const u8,
llvm_argv: []const []const u8,
lib_dirs: []const []const u8,
rpath_list: []const []const u8,
assembly_files: []const []const u8,
link_objects: []const []const u8,
windows_subsystem_windows: bool,
windows_subsystem_console: bool,
link_libs_list: ArrayList(*LinkLib),
libc_link_lib: ?*LinkLib,
err_color: errmsg.Color,
verbose_tokenize: bool,
verbose_ast_tree: bool,
verbose_ast_fmt: bool,
verbose_cimport: bool,
verbose_ir: bool,
verbose_llvm_ir: bool,
verbose_link: bool,
darwin_frameworks: []const []const u8,
darwin_version_min: DarwinVersionMin,
test_filters: []const []const u8,
test_name_prefix: ?[]const u8,
emit_file_type: Emit,
kind: Kind,
link_out_file: ?[]const u8,
events: *event.Channel(Event),
exported_symbol_names: event.Locked(Decl.Table),
/// Before code generation starts, must wait on this group to make sure
/// the build is complete.
build_group: event.Group(BuildError!void),
compile_errors: event.Locked(CompileErrList),
meta_type: *Type.MetaType,
void_type: *Type.Void,
bool_type: *Type.Bool,
noreturn_type: *Type.NoReturn,
void_value: *Value.Void,
true_value: *Value.Bool,
false_value: *Value.Bool,
noreturn_value: *Value.NoReturn,
const CompileErrList = std.ArrayList(*errmsg.Msg);
// TODO handle some of these earlier and report them in a way other than error codes
pub const BuildError = error{
OutOfMemory,
EndOfStream,
BadFd,
Io,
IsDir,
Unexpected,
SystemResources,
SharingViolation,
PathAlreadyExists,
FileNotFound,
AccessDenied,
PipeBusy,
FileTooBig,
SymLinkLoop,
ProcessFdQuotaExceeded,
NameTooLong,
SystemFdQuotaExceeded,
NoDevice,
PathNotFound,
NoSpaceLeft,
NotDir,
FileSystem,
OperationAborted,
IoPending,
BrokenPipe,
WouldBlock,
FileClosed,
DestinationAddressRequired,
DiskQuota,
InputOutput,
NoStdHandles,
Overflow,
NotSupported,
BufferTooSmall,
Unimplemented, // TODO remove this one
SemanticAnalysisFailed, // TODO remove this one
};
pub const Event = union(enum) {
Ok,
Error: BuildError,
Fail: []*errmsg.Msg,
};
pub const DarwinVersionMin = union(enum) {
None,
MacOS: []const u8,
Ios: []const u8,
};
pub const Kind = enum {
Exe,
Lib,
Obj,
};
pub const LinkLib = struct {
name: []const u8,
path: ?[]const u8,
/// the list of symbols we depend on from this lib
symbols: ArrayList([]u8),
provided_explicitly: bool,
};
pub const Emit = enum {
Binary,
Assembly,
LlvmIr,
};
pub fn create(
event_loop_local: *EventLoopLocal,
name: []const u8,
root_src_path: ?[]const u8,
target: *const Target,
kind: Kind,
build_mode: builtin.Mode,
zig_lib_dir: []const u8,
cache_dir: []const u8,
) !*Compilation {
const loop = event_loop_local.loop;
var name_buffer = try Buffer.init(loop.allocator, name);
errdefer name_buffer.deinit();
const events = try event.Channel(Event).create(loop, 0);
errdefer events.destroy();
const comp = try loop.allocator.create(Compilation{
.loop = loop,
.event_loop_local = event_loop_local,
.events = events,
.name = name_buffer,
.root_src_path = root_src_path,
.target = target.*,
.kind = kind,
.build_mode = build_mode,
.zig_lib_dir = zig_lib_dir,
.cache_dir = cache_dir,
.version_major = 0,
.version_minor = 0,
.version_patch = 0,
.verbose_tokenize = false,
.verbose_ast_tree = false,
.verbose_ast_fmt = false,
.verbose_cimport = false,
.verbose_ir = false,
.verbose_llvm_ir = false,
.verbose_link = false,
.linker_script = null,
.libc_lib_dir = null,
.libc_static_lib_dir = null,
.libc_include_dir = null,
.msvc_lib_dir = null,
.kernel32_lib_dir = null,
.dynamic_linker = null,
.out_h_path = null,
.is_test = false,
.each_lib_rpath = false,
.strip = false,
.is_static = false,
.linker_rdynamic = false,
.clang_argv = [][]const u8{},
.llvm_argv = [][]const u8{},
.lib_dirs = [][]const u8{},
.rpath_list = [][]const u8{},
.assembly_files = [][]const u8{},
.link_objects = [][]const u8{},
.windows_subsystem_windows = false,
.windows_subsystem_console = false,
.link_libs_list = ArrayList(*LinkLib).init(loop.allocator),
.libc_link_lib = null,
.err_color = errmsg.Color.Auto,
.darwin_frameworks = [][]const u8{},
.darwin_version_min = DarwinVersionMin.None,
.test_filters = [][]const u8{},
.test_name_prefix = null,
.emit_file_type = Emit.Binary,
.link_out_file = null,
.exported_symbol_names = event.Locked(Decl.Table).init(loop, Decl.Table.init(loop.allocator)),
.build_group = event.Group(BuildError!void).init(loop),
.compile_errors = event.Locked(CompileErrList).init(loop, CompileErrList.init(loop.allocator)),
.meta_type = undefined,
.void_type = undefined,
.void_value = undefined,
.bool_type = undefined,
.true_value = undefined,
.false_value = undefined,
.noreturn_type = undefined,
.noreturn_value = undefined,
});
try comp.initTypes();
return comp;
}
fn initTypes(comp: *Compilation) !void {
comp.meta_type = try comp.a().create(Type.MetaType{
.base = Type{
.base = Value{
.id = Value.Id.Type,
.typeof = undefined,
.ref_count = std.atomic.Int(usize).init(3), // 3 because it references itself twice
},
.id = builtin.TypeId.Type,
},
.value = undefined,
});
comp.meta_type.value = &comp.meta_type.base;
comp.meta_type.base.base.typeof = &comp.meta_type.base;
errdefer comp.a().destroy(comp.meta_type);
comp.void_type = try comp.a().create(Type.Void{
.base = Type{
.base = Value{
.id = Value.Id.Type,
.typeof = &Type.MetaType.get(comp).base,
.ref_count = std.atomic.Int(usize).init(1),
},
.id = builtin.TypeId.Void,
},
});
errdefer comp.a().destroy(comp.void_type);
comp.noreturn_type = try comp.a().create(Type.NoReturn{
.base = Type{
.base = Value{
.id = Value.Id.Type,
.typeof = &Type.MetaType.get(comp).base,
.ref_count = std.atomic.Int(usize).init(1),
},
.id = builtin.TypeId.NoReturn,
},
});
errdefer comp.a().destroy(comp.noreturn_type);
comp.bool_type = try comp.a().create(Type.Bool{
.base = Type{
.base = Value{
.id = Value.Id.Type,
.typeof = &Type.MetaType.get(comp).base,
.ref_count = std.atomic.Int(usize).init(1),
},
.id = builtin.TypeId.Bool,
},
});
errdefer comp.a().destroy(comp.bool_type);
comp.void_value = try comp.a().create(Value.Void{
.base = Value{
.id = Value.Id.Void,
.typeof = &Type.Void.get(comp).base,
.ref_count = std.atomic.Int(usize).init(1),
},
});
errdefer comp.a().destroy(comp.void_value);
comp.true_value = try comp.a().create(Value.Bool{
.base = Value{
.id = Value.Id.Bool,
.typeof = &Type.Bool.get(comp).base,
.ref_count = std.atomic.Int(usize).init(1),
},
.x = true,
});
errdefer comp.a().destroy(comp.true_value);
comp.false_value = try comp.a().create(Value.Bool{
.base = Value{
.id = Value.Id.Bool,
.typeof = &Type.Bool.get(comp).base,
.ref_count = std.atomic.Int(usize).init(1),
},
.x = false,
});
errdefer comp.a().destroy(comp.false_value);
comp.noreturn_value = try comp.a().create(Value.NoReturn{
.base = Value{
.id = Value.Id.NoReturn,
.typeof = &Type.NoReturn.get(comp).base,
.ref_count = std.atomic.Int(usize).init(1),
},
});
errdefer comp.a().destroy(comp.noreturn_value);
}
pub fn destroy(self: *Compilation) void {
self.noreturn_value.base.deref(self);
self.void_value.base.deref(self);
self.false_value.base.deref(self);
self.true_value.base.deref(self);
self.noreturn_type.base.base.deref(self);
self.void_type.base.base.deref(self);
self.meta_type.base.base.deref(self);
self.events.destroy();
self.name.deinit();
self.a().destroy(self);
}
pub fn build(self: *Compilation) !void {
if (self.llvm_argv.len != 0) {
var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.a(), [][]const []const u8{
[][]const u8{"zig (LLVM option parsing)"},
self.llvm_argv,
});
defer c_compatible_args.deinit();
// TODO this sets global state
c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
}
_ = try async<self.a()> self.buildAsync();
}
async fn buildAsync(self: *Compilation) void {
while (true) {
// TODO directly awaiting async should guarantee memory allocation elision
// TODO also async before suspending should guarantee memory allocation elision
const build_result = await (async self.addRootSrc() catch unreachable);
// this makes a handy error return trace and stack trace in debug mode
if (std.debug.runtime_safety) {
build_result catch unreachable;
}
const compile_errors = blk: {
const held = await (async self.compile_errors.acquire() catch unreachable);
defer held.release();
break :blk held.value.toOwnedSlice();
};
if (build_result) |_| {
if (compile_errors.len == 0) {
await (async self.events.put(Event.Ok) catch unreachable);
} else {
await (async self.events.put(Event{ .Fail = compile_errors }) catch unreachable);
}
} else |err| {
// if there's an error then the compile errors have dangling references
self.a().free(compile_errors);
await (async self.events.put(Event{ .Error = err }) catch unreachable);
}
// for now we stop after 1
return;
}
}
async fn addRootSrc(self: *Compilation) !void {
const root_src_path = self.root_src_path orelse @panic("TODO handle null root src path");
// TODO async/await os.path.real
const root_src_real_path = os.path.real(self.a(), root_src_path) catch |err| {
try printError("unable to get real path '{}': {}", root_src_path, err);
return err;
};
errdefer self.a().free(root_src_real_path);
// TODO async/await readFileAlloc()
const source_code = io.readFileAlloc(self.a(), root_src_real_path) catch |err| {
try printError("unable to open '{}': {}", root_src_real_path, err);
return err;
};
errdefer self.a().free(source_code);
const parsed_file = try self.a().create(ParsedFile{
.tree = undefined,
.realpath = root_src_real_path,
});
errdefer self.a().destroy(parsed_file);
parsed_file.tree = try std.zig.parse(self.a(), source_code);
errdefer parsed_file.tree.deinit();
const tree = &parsed_file.tree;
// create empty struct for it
const decls = try Scope.Decls.create(self, null);
defer decls.base.deref(self);
var decl_group = event.Group(BuildError!void).init(self.loop);
errdefer decl_group.cancelAll();
var it = tree.root_node.decls.iterator(0);
while (it.next()) |decl_ptr| {
const decl = decl_ptr.*;
switch (decl.id) {
ast.Node.Id.Comptime => @panic("TODO"),
ast.Node.Id.VarDecl => @panic("TODO"),
ast.Node.Id.FnProto => {
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
const name = if (fn_proto.name_token) |name_token| tree.tokenSlice(name_token) else {
try self.addCompileError(parsed_file, Span{
.first = fn_proto.fn_token,
.last = fn_proto.fn_token + 1,
}, "missing function name");
continue;
};
const fn_decl = try self.a().create(Decl.Fn{
.base = Decl{
.id = Decl.Id.Fn,
.name = name,
.visib = parseVisibToken(tree, fn_proto.visib_token),
.resolution = event.Future(BuildError!void).init(self.loop),
.resolution_in_progress = 0,
.parsed_file = parsed_file,
.parent_scope = &decls.base,
},
.value = Decl.Fn.Val{ .Unresolved = {} },
.fn_proto = fn_proto,
});
errdefer self.a().destroy(fn_decl);
try decl_group.call(addTopLevelDecl, self, &fn_decl.base);
},
ast.Node.Id.TestDecl => @panic("TODO"),
else => unreachable,
}
}
try await (async decl_group.wait() catch unreachable);
try await (async self.build_group.wait() catch unreachable);
}
async fn addTopLevelDecl(self: *Compilation, decl: *Decl) !void {
const is_export = decl.isExported(&decl.parsed_file.tree);
if (is_export) {
try self.build_group.call(verifyUniqueSymbol, self, decl);
try self.build_group.call(resolveDecl, self, decl);
}
}
fn addCompileError(self: *Compilation, parsed_file: *ParsedFile, span: Span, comptime fmt: []const u8, args: ...) !void {
const text = try std.fmt.allocPrint(self.loop.allocator, fmt, args);
errdefer self.loop.allocator.free(text);
try self.build_group.call(addCompileErrorAsync, self, parsed_file, span, text);
}
async fn addCompileErrorAsync(
self: *Compilation,
parsed_file: *ParsedFile,
span: Span,
text: []u8,
) !void {
const msg = try self.loop.allocator.create(errmsg.Msg{
.path = parsed_file.realpath,
.text = text,
.span = span,
.tree = &parsed_file.tree,
});
errdefer self.loop.allocator.destroy(msg);
const compile_errors = await (async self.compile_errors.acquire() catch unreachable);
defer compile_errors.release();
try compile_errors.value.append(msg);
}
async fn verifyUniqueSymbol(self: *Compilation, decl: *Decl) !void {
const exported_symbol_names = await (async self.exported_symbol_names.acquire() catch unreachable);
defer exported_symbol_names.release();
if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
try self.addCompileError(
decl.parsed_file,
decl.getSpan(),
"exported symbol collision: '{}'",
decl.name,
);
// TODO add error note showing location of other symbol
}
}
pub fn link(self: *Compilation, out_file: ?[]const u8) !void {
warn("TODO link");
return error.Todo;
}
pub fn addLinkLib(self: *Compilation, name: []const u8, provided_explicitly: bool) !*LinkLib {
const is_libc = mem.eql(u8, name, "c");
if (is_libc) {
if (self.libc_link_lib) |libc_link_lib| {
return libc_link_lib;
}
}
for (self.link_libs_list.toSliceConst()) |existing_lib| {
if (mem.eql(u8, name, existing_lib.name)) {
return existing_lib;
}
}
const link_lib = try self.a().create(LinkLib{
.name = name,
.path = null,
.provided_explicitly = provided_explicitly,
.symbols = ArrayList([]u8).init(self.a()),
});
try self.link_libs_list.append(link_lib);
if (is_libc) {
self.libc_link_lib = link_lib;
}
return link_lib;
}
fn a(self: Compilation) *mem.Allocator {
return self.loop.allocator;
}
};
fn printError(comptime format: []const u8, args: ...) !void {
var stderr_file = try std.io.getStdErr();
var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
const out_stream = &stderr_file_out_stream.stream;
try out_stream.print(format, args);
}
fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib {
if (optional_token_index) |token_index| {
const token = tree.tokens.at(token_index);
assert(token.id == Token.Id.Keyword_pub);
return Visib.Pub;
} else {
return Visib.Private;
}
}
/// This declaration has been blessed as going into the final code generation.
pub async fn resolveDecl(comp: *Compilation, decl: *Decl) !void {
if (@atomicRmw(u8, &decl.resolution_in_progress, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) == 0) {
decl.resolution.data = await (async generateDecl(comp, decl) catch unreachable);
decl.resolution.resolve();
return decl.resolution.data;
} else {
return (await (async decl.resolution.get() catch unreachable)).*;
}
}
/// The function that actually does the generation.
async fn generateDecl(comp: *Compilation, decl: *Decl) !void {
switch (decl.id) {
Decl.Id.Var => @panic("TODO"),
Decl.Id.Fn => {
const fn_decl = @fieldParentPtr(Decl.Fn, "base", decl);
return await (async generateDeclFn(comp, fn_decl) catch unreachable);
},
Decl.Id.CompTime => @panic("TODO"),
}
}
async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
const body_node = fn_decl.fn_proto.body_node orelse @panic("TODO extern fn proto decl");
const fndef_scope = try Scope.FnDef.create(comp, fn_decl.base.parent_scope);
defer fndef_scope.base.deref(comp);
// TODO actually look at the return type of the AST
const return_type = &Type.Void.get(comp).base;
defer return_type.base.deref(comp);
const is_var_args = false;
const params = ([*]Type.Fn.Param)(undefined)[0..0];
const fn_type = try Type.Fn.create(comp, return_type, params, is_var_args);
defer fn_type.base.base.deref(comp);
var symbol_name = try std.Buffer.init(comp.a(), fn_decl.base.name);
errdefer symbol_name.deinit();
const fn_val = try Value.Fn.create(comp, fn_type, fndef_scope, symbol_name);
defer fn_val.base.deref(comp);
fn_decl.value = Decl.Fn.Val{ .Ok = fn_val };
const unanalyzed_code = (await (async ir.gen(
comp,
body_node,
&fndef_scope.base,
Span.token(body_node.lastToken()),
fn_decl.base.parsed_file,
) catch unreachable)) catch |err| switch (err) {
// This poison value should not cause the errdefers to run. It simply means
// that self.compile_errors is populated.
// TODO https://github.com/ziglang/zig/issues/769
error.SemanticAnalysisFailed => return {},
else => return err,
};
defer unanalyzed_code.destroy(comp.a());
if (comp.verbose_ir) {
std.debug.warn("unanalyzed:\n");
unanalyzed_code.dump();
}
const analyzed_code = (await (async ir.analyze(
comp,
fn_decl.base.parsed_file,
unanalyzed_code,
null,
) catch unreachable)) catch |err| switch (err) {
// This poison value should not cause the errdefers to run. It simply means
// that self.compile_errors is populated.
// TODO https://github.com/ziglang/zig/issues/769
error.SemanticAnalysisFailed => return {},
else => return err,
};
errdefer analyzed_code.destroy(comp.a());
if (comp.verbose_ir) {
std.debug.warn("analyzed:\n");
analyzed_code.dump();
}
// Kick off rendering to LLVM comp, but it doesn't block the fn decl
// analysis from being complete.
try comp.build_group.call(codegen.renderToLlvm, comp, fn_val, analyzed_code);
}

96
src-self-hosted/decl.zig Normal file
View File

@@ -0,0 +1,96 @@
const std = @import("std");
const Allocator = mem.Allocator;
const mem = std.mem;
const ast = std.zig.ast;
const Visib = @import("visib.zig").Visib;
const ParsedFile = @import("parsed_file.zig").ParsedFile;
const event = std.event;
const Value = @import("value.zig").Value;
const Token = std.zig.Token;
const errmsg = @import("errmsg.zig");
const Scope = @import("scope.zig").Scope;
const Compilation = @import("compilation.zig").Compilation;
pub const Decl = struct {
id: Id,
name: []const u8,
visib: Visib,
resolution: event.Future(Compilation.BuildError!void),
resolution_in_progress: u8,
parsed_file: *ParsedFile,
parent_scope: *Scope,
pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8);
pub fn isExported(base: *const Decl, tree: *ast.Tree) bool {
switch (base.id) {
Id.Fn => {
const fn_decl = @fieldParentPtr(Fn, "base", base);
return fn_decl.isExported(tree);
},
else => return false,
}
}
pub fn getSpan(base: *const Decl) errmsg.Span {
switch (base.id) {
Id.Fn => {
const fn_decl = @fieldParentPtr(Fn, "base", base);
const fn_proto = fn_decl.fn_proto;
const start = fn_proto.fn_token;
const end = fn_proto.name_token orelse start;
return errmsg.Span{
.first = start,
.last = end + 1,
};
},
else => @panic("TODO"),
}
}
pub const Id = enum {
Var,
Fn,
CompTime,
};
pub const Var = struct {
base: Decl,
};
pub const Fn = struct {
base: Decl,
value: Val,
fn_proto: *const ast.Node.FnProto,
// TODO https://github.com/ziglang/zig/issues/683 and then make this anonymous
pub const Val = union {
Unresolved: void,
Ok: *Value.Fn,
};
pub fn externLibName(self: Fn, tree: *ast.Tree) ?[]const u8 {
return if (self.fn_proto.extern_export_inline_token) |tok_index| x: {
const token = tree.tokens.at(tok_index);
break :x switch (token.id) {
Token.Id.Extern => tree.tokenSlicePtr(token),
else => null,
};
} else null;
}
pub fn isExported(self: Fn, tree: *ast.Tree) bool {
if (self.fn_proto.extern_export_inline_token) |tok_index| {
const token = tree.tokens.at(tok_index);
return token.id == Token.Id.Keyword_export;
} else {
return false;
}
}
};
pub const CompTime = struct {
base: Decl,
};
};

View File

@@ -11,11 +11,22 @@ pub const Color = enum {
On,
};
pub const Span = struct {
first: ast.TokenIndex,
last: ast.TokenIndex,
pub fn token(i: TokenIndex) Span {
return Span {
.first = i,
.last = i,
};
}
};
pub const Msg = struct {
path: []const u8,
text: []u8,
first_token: TokenIndex,
last_token: TokenIndex,
span: Span,
tree: *ast.Tree,
};
@@ -39,8 +50,10 @@ pub fn createFromParseError(
.tree = tree,
.path = path,
.text = text_buf.toOwnedSlice(),
.first_token = loc_token,
.last_token = loc_token,
.span = Span{
.first = loc_token,
.last = loc_token,
},
});
errdefer allocator.destroy(msg);
@@ -48,8 +61,8 @@ pub fn createFromParseError(
}
pub fn printToStream(stream: var, msg: *const Msg, color_on: bool) !void {
const first_token = msg.tree.tokens.at(msg.first_token);
const last_token = msg.tree.tokens.at(msg.last_token);
const first_token = msg.tree.tokens.at(msg.span.first);
const last_token = msg.tree.tokens.at(msg.span.last);
const start_loc = msg.tree.tokenLocationPtr(0, first_token);
const end_loc = msg.tree.tokenLocationPtr(first_token.end, last_token);
if (!color_on) {

View File

@@ -53,3 +53,8 @@ pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 {
return error.ZigLibDirNotFound;
};
}
/// Caller must free result
pub fn resolveZigCacheDir(allocator: *mem.Allocator) ![]u8 {
return std.mem.dupe(allocator, u8, "zig-cache");
}

File diff suppressed because it is too large Load Diff

View File

@@ -2,10 +2,27 @@ const builtin = @import("builtin");
const c = @import("c.zig");
const assert = @import("std").debug.assert;
pub const ValueRef = removeNullability(c.LLVMValueRef);
pub const ModuleRef = removeNullability(c.LLVMModuleRef);
pub const ContextRef = removeNullability(c.LLVMContextRef);
pub const BuilderRef = removeNullability(c.LLVMBuilderRef);
pub const ContextRef = removeNullability(c.LLVMContextRef);
pub const ModuleRef = removeNullability(c.LLVMModuleRef);
pub const ValueRef = removeNullability(c.LLVMValueRef);
pub const TypeRef = removeNullability(c.LLVMTypeRef);
pub const AddFunction = c.LLVMAddFunction;
pub const CreateBuilderInContext = c.LLVMCreateBuilderInContext;
pub const DisposeBuilder = c.LLVMDisposeBuilder;
pub const DisposeModule = c.LLVMDisposeModule;
pub const DumpModule = c.LLVMDumpModule;
pub const ModuleCreateWithNameInContext = c.LLVMModuleCreateWithNameInContext;
pub const VoidTypeInContext = c.LLVMVoidTypeInContext;
pub const FunctionType = LLVMFunctionType;
extern fn LLVMFunctionType(
ReturnType: TypeRef,
ParamTypes: [*]TypeRef,
ParamCount: c_uint,
IsVarArg: c_int,
) ?TypeRef;
fn removeNullability(comptime T: type) type {
comptime assert(@typeId(T) == builtin.TypeId.Optional);

View File

@@ -14,7 +14,8 @@ const c = @import("c.zig");
const introspect = @import("introspect.zig");
const Args = arg.Args;
const Flag = arg.Flag;
const Module = @import("module.zig").Module;
const EventLoopLocal = @import("compilation.zig").EventLoopLocal;
const Compilation = @import("compilation.zig").Compilation;
const Target = @import("target.zig").Target;
const errmsg = @import("errmsg.zig");
@@ -257,7 +258,7 @@ const args_build_generic = []Flag{
Flag.Arg1("--ver-patch"),
};
fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Module.Kind) !void {
fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Compilation.Kind) !void {
var flags = try Args.parse(allocator, args_build_generic, args);
defer flags.deinit();
@@ -299,14 +300,14 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
const emit_type = blk: {
if (flags.single("emit")) |emit_flag| {
if (mem.eql(u8, emit_flag, "asm")) {
break :blk Module.Emit.Assembly;
break :blk Compilation.Emit.Assembly;
} else if (mem.eql(u8, emit_flag, "bin")) {
break :blk Module.Emit.Binary;
break :blk Compilation.Emit.Binary;
} else if (mem.eql(u8, emit_flag, "llvm-ir")) {
break :blk Module.Emit.LlvmIr;
break :blk Compilation.Emit.LlvmIr;
} else unreachable;
} else {
break :blk Module.Emit.Binary;
break :blk Compilation.Emit.Binary;
}
};
@@ -369,7 +370,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
os.exit(1);
}
if (out_type == Module.Kind.Obj and link_objects.len != 0) {
if (out_type == Compilation.Kind.Obj and link_objects.len != 0) {
try stderr.write("When building an object file, --object arguments are invalid\n");
os.exit(1);
}
@@ -386,9 +387,13 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
var loop: event.Loop = undefined;
try loop.initMultiThreaded(allocator);
defer loop.deinit();
var module = try Module.create(
&loop,
var event_loop_local = EventLoopLocal.init(&loop);
defer event_loop_local.deinit();
var comp = try Compilation.create(
&event_loop_local,
root_name,
root_source_file,
Target.Native,
@@ -397,16 +402,16 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
zig_lib_dir,
full_cache_dir,
);
defer module.destroy();
defer comp.destroy();
module.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") orelse "0", 10);
module.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") orelse "0", 10);
module.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") orelse "0", 10);
comp.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") orelse "0", 10);
comp.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") orelse "0", 10);
comp.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") orelse "0", 10);
module.is_test = false;
comp.is_test = false;
module.linker_script = flags.single("linker-script");
module.each_lib_rpath = flags.present("each-lib-rpath");
comp.linker_script = flags.single("linker-script");
comp.each_lib_rpath = flags.present("each-lib-rpath");
var clang_argv_buf = ArrayList([]const u8).init(allocator);
defer clang_argv_buf.deinit();
@@ -417,51 +422,51 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
try clang_argv_buf.append(mllvm);
}
module.llvm_argv = mllvm_flags;
module.clang_argv = clang_argv_buf.toSliceConst();
comp.llvm_argv = mllvm_flags;
comp.clang_argv = clang_argv_buf.toSliceConst();
module.strip = flags.present("strip");
module.is_static = flags.present("static");
comp.strip = flags.present("strip");
comp.is_static = flags.present("static");
if (flags.single("libc-lib-dir")) |libc_lib_dir| {
module.libc_lib_dir = libc_lib_dir;
comp.libc_lib_dir = libc_lib_dir;
}
if (flags.single("libc-static-lib-dir")) |libc_static_lib_dir| {
module.libc_static_lib_dir = libc_static_lib_dir;
comp.libc_static_lib_dir = libc_static_lib_dir;
}
if (flags.single("libc-include-dir")) |libc_include_dir| {
module.libc_include_dir = libc_include_dir;
comp.libc_include_dir = libc_include_dir;
}
if (flags.single("msvc-lib-dir")) |msvc_lib_dir| {
module.msvc_lib_dir = msvc_lib_dir;
comp.msvc_lib_dir = msvc_lib_dir;
}
if (flags.single("kernel32-lib-dir")) |kernel32_lib_dir| {
module.kernel32_lib_dir = kernel32_lib_dir;
comp.kernel32_lib_dir = kernel32_lib_dir;
}
if (flags.single("dynamic-linker")) |dynamic_linker| {
module.dynamic_linker = dynamic_linker;
comp.dynamic_linker = dynamic_linker;
}
module.verbose_tokenize = flags.present("verbose-tokenize");
module.verbose_ast_tree = flags.present("verbose-ast-tree");
module.verbose_ast_fmt = flags.present("verbose-ast-fmt");
module.verbose_link = flags.present("verbose-link");
module.verbose_ir = flags.present("verbose-ir");
module.verbose_llvm_ir = flags.present("verbose-llvm-ir");
module.verbose_cimport = flags.present("verbose-cimport");
comp.verbose_tokenize = flags.present("verbose-tokenize");
comp.verbose_ast_tree = flags.present("verbose-ast-tree");
comp.verbose_ast_fmt = flags.present("verbose-ast-fmt");
comp.verbose_link = flags.present("verbose-link");
comp.verbose_ir = flags.present("verbose-ir");
comp.verbose_llvm_ir = flags.present("verbose-llvm-ir");
comp.verbose_cimport = flags.present("verbose-cimport");
module.err_color = color;
module.lib_dirs = flags.many("library-path");
module.darwin_frameworks = flags.many("framework");
module.rpath_list = flags.many("rpath");
comp.err_color = color;
comp.lib_dirs = flags.many("library-path");
comp.darwin_frameworks = flags.many("framework");
comp.rpath_list = flags.many("rpath");
if (flags.single("output-h")) |output_h| {
module.out_h_path = output_h;
comp.out_h_path = output_h;
}
module.windows_subsystem_windows = flags.present("mwindows");
module.windows_subsystem_console = flags.present("mconsole");
module.linker_rdynamic = flags.present("rdynamic");
comp.windows_subsystem_windows = flags.present("mwindows");
comp.windows_subsystem_console = flags.present("mconsole");
comp.linker_rdynamic = flags.present("rdynamic");
if (flags.single("mmacosx-version-min") != null and flags.single("mios-version-min") != null) {
try stderr.write("-mmacosx-version-min and -mios-version-min options not allowed together\n");
@@ -469,54 +474,54 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
}
if (flags.single("mmacosx-version-min")) |ver| {
module.darwin_version_min = Module.DarwinVersionMin{ .MacOS = ver };
comp.darwin_version_min = Compilation.DarwinVersionMin{ .MacOS = ver };
}
if (flags.single("mios-version-min")) |ver| {
module.darwin_version_min = Module.DarwinVersionMin{ .Ios = ver };
comp.darwin_version_min = Compilation.DarwinVersionMin{ .Ios = ver };
}
module.emit_file_type = emit_type;
module.link_objects = link_objects;
module.assembly_files = assembly_files;
module.link_out_file = flags.single("out-file");
comp.emit_file_type = emit_type;
comp.link_objects = link_objects;
comp.assembly_files = assembly_files;
comp.link_out_file = flags.single("out-file");
try module.build();
const process_build_events_handle = try async<loop.allocator> processBuildEvents(module, true);
try comp.build();
const process_build_events_handle = try async<loop.allocator> processBuildEvents(comp, color);
defer cancel process_build_events_handle;
loop.run();
}
async fn processBuildEvents(module: *Module, watch: bool) void {
while (watch) {
// TODO directly awaiting async should guarantee memory allocation elision
const build_event = await (async module.events.get() catch unreachable);
async fn processBuildEvents(comp: *Compilation, color: errmsg.Color) void {
// TODO directly awaiting async should guarantee memory allocation elision
const build_event = await (async comp.events.get() catch unreachable);
switch (build_event) {
Module.Event.Ok => {
std.debug.warn("Build succeeded\n");
return;
},
Module.Event.Error => |err| {
std.debug.warn("build failed: {}\n", @errorName(err));
@panic("TODO error return trace");
},
Module.Event.Fail => |errs| {
@panic("TODO print compile error messages");
},
}
switch (build_event) {
Compilation.Event.Ok => {
std.debug.warn("Build succeeded\n");
return;
},
Compilation.Event.Error => |err| {
std.debug.warn("build failed: {}\n", @errorName(err));
os.exit(1);
},
Compilation.Event.Fail => |msgs| {
for (msgs) |msg| {
errmsg.printToFile(&stderr_file, msg, color) catch os.exit(1);
}
},
}
}
fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
return buildOutputType(allocator, args, Module.Kind.Exe);
return buildOutputType(allocator, args, Compilation.Kind.Exe);
}
fn cmdBuildLib(allocator: *Allocator, args: []const []const u8) !void {
return buildOutputType(allocator, args, Module.Kind.Lib);
return buildOutputType(allocator, args, Compilation.Kind.Lib);
}
fn cmdBuildObj(allocator: *Allocator, args: []const []const u8) !void {
return buildOutputType(allocator, args, Module.Kind.Obj);
return buildOutputType(allocator, args, Compilation.Kind.Obj);
}
const usage_fmt =
@@ -527,6 +532,7 @@ const usage_fmt =
\\Options:
\\ --help Print this help and exit
\\ --color [auto|off|on] Enable or disable colored error messages
\\ --stdin Format code from stdin
\\
\\
;
@@ -538,6 +544,7 @@ const args_fmt_spec = []Flag{
"off",
"on",
}),
Flag.Bool("--stdin"),
};
const Fmt = struct {
@@ -579,11 +586,6 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
os.exit(0);
}
if (flags.positionals.len == 0) {
try stderr.write("expected at least one source file argument\n");
os.exit(1);
}
const color = blk: {
if (flags.single("color")) |color_flag| {
if (mem.eql(u8, color_flag, "auto")) {
@@ -598,6 +600,44 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
}
};
if (flags.present("stdin")) {
if (flags.positionals.len != 0) {
try stderr.write("cannot use --stdin with positional arguments\n");
os.exit(1);
}
var stdin_file = try io.getStdIn();
var stdin = io.FileInStream.init(&stdin_file);
const source_code = try stdin.stream.readAllAlloc(allocator, @maxValue(usize));
defer allocator.free(source_code);
var tree = std.zig.parse(allocator, source_code) catch |err| {
try stderr.print("error parsing stdin: {}\n", err);
os.exit(1);
};
defer tree.deinit();
var error_it = tree.errors.iterator(0);
while (error_it.next()) |parse_error| {
const msg = try errmsg.createFromParseError(allocator, parse_error, &tree, "<stdin>");
defer allocator.destroy(msg);
try errmsg.printToFile(&stderr_file, msg, color);
}
if (tree.errors.len != 0) {
os.exit(1);
}
_ = try std.zig.render(allocator, stdout, &tree);
return;
}
if (flags.positionals.len == 0) {
try stderr.write("expected at least one source file argument\n");
os.exit(1);
}
var fmt = Fmt{
.seen = std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8).init(allocator),
.queue = std.LinkedList([]const u8).init(),

View File

@@ -1,579 +0,0 @@
const std = @import("std");
const os = std.os;
const io = std.io;
const mem = std.mem;
const Allocator = mem.Allocator;
const Buffer = std.Buffer;
const llvm = @import("llvm.zig");
const c = @import("c.zig");
const builtin = @import("builtin");
const Target = @import("target.zig").Target;
const warn = std.debug.warn;
const Token = std.zig.Token;
const ArrayList = std.ArrayList;
const errmsg = @import("errmsg.zig");
const ast = std.zig.ast;
const event = std.event;
const assert = std.debug.assert;
pub const Module = struct {
loop: *event.Loop,
name: Buffer,
root_src_path: ?[]const u8,
module: llvm.ModuleRef,
context: llvm.ContextRef,
builder: llvm.BuilderRef,
target: Target,
build_mode: builtin.Mode,
zig_lib_dir: []const u8,
version_major: u32,
version_minor: u32,
version_patch: u32,
linker_script: ?[]const u8,
cache_dir: []const u8,
libc_lib_dir: ?[]const u8,
libc_static_lib_dir: ?[]const u8,
libc_include_dir: ?[]const u8,
msvc_lib_dir: ?[]const u8,
kernel32_lib_dir: ?[]const u8,
dynamic_linker: ?[]const u8,
out_h_path: ?[]const u8,
is_test: bool,
each_lib_rpath: bool,
strip: bool,
is_static: bool,
linker_rdynamic: bool,
clang_argv: []const []const u8,
llvm_argv: []const []const u8,
lib_dirs: []const []const u8,
rpath_list: []const []const u8,
assembly_files: []const []const u8,
link_objects: []const []const u8,
windows_subsystem_windows: bool,
windows_subsystem_console: bool,
link_libs_list: ArrayList(*LinkLib),
libc_link_lib: ?*LinkLib,
err_color: errmsg.Color,
verbose_tokenize: bool,
verbose_ast_tree: bool,
verbose_ast_fmt: bool,
verbose_cimport: bool,
verbose_ir: bool,
verbose_llvm_ir: bool,
verbose_link: bool,
darwin_frameworks: []const []const u8,
darwin_version_min: DarwinVersionMin,
test_filters: []const []const u8,
test_name_prefix: ?[]const u8,
emit_file_type: Emit,
kind: Kind,
link_out_file: ?[]const u8,
events: *event.Channel(Event),
exported_symbol_names: event.Locked(Decl.Table),
// TODO handle some of these earlier and report them in a way other than error codes
pub const BuildError = error{
OutOfMemory,
EndOfStream,
BadFd,
Io,
IsDir,
Unexpected,
SystemResources,
SharingViolation,
PathAlreadyExists,
FileNotFound,
AccessDenied,
PipeBusy,
FileTooBig,
SymLinkLoop,
ProcessFdQuotaExceeded,
NameTooLong,
SystemFdQuotaExceeded,
NoDevice,
PathNotFound,
NoSpaceLeft,
NotDir,
FileSystem,
OperationAborted,
IoPending,
BrokenPipe,
WouldBlock,
FileClosed,
DestinationAddressRequired,
DiskQuota,
InputOutput,
NoStdHandles,
Overflow,
NotSupported,
};
pub const Event = union(enum) {
Ok,
Fail: []errmsg.Msg,
Error: BuildError,
};
pub const DarwinVersionMin = union(enum) {
None,
MacOS: []const u8,
Ios: []const u8,
};
pub const Kind = enum {
Exe,
Lib,
Obj,
};
pub const LinkLib = struct {
name: []const u8,
path: ?[]const u8,
/// the list of symbols we depend on from this lib
symbols: ArrayList([]u8),
provided_explicitly: bool,
};
pub const Emit = enum {
Binary,
Assembly,
LlvmIr,
};
pub fn create(
loop: *event.Loop,
name: []const u8,
root_src_path: ?[]const u8,
target: *const Target,
kind: Kind,
build_mode: builtin.Mode,
zig_lib_dir: []const u8,
cache_dir: []const u8,
) !*Module {
var name_buffer = try Buffer.init(loop.allocator, name);
errdefer name_buffer.deinit();
const context = c.LLVMContextCreate() orelse return error.OutOfMemory;
errdefer c.LLVMContextDispose(context);
const module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) orelse return error.OutOfMemory;
errdefer c.LLVMDisposeModule(module);
const builder = c.LLVMCreateBuilderInContext(context) orelse return error.OutOfMemory;
errdefer c.LLVMDisposeBuilder(builder);
const events = try event.Channel(Event).create(loop, 0);
errdefer events.destroy();
return loop.allocator.create(Module{
.loop = loop,
.events = events,
.name = name_buffer,
.root_src_path = root_src_path,
.module = module,
.context = context,
.builder = builder,
.target = target.*,
.kind = kind,
.build_mode = build_mode,
.zig_lib_dir = zig_lib_dir,
.cache_dir = cache_dir,
.version_major = 0,
.version_minor = 0,
.version_patch = 0,
.verbose_tokenize = false,
.verbose_ast_tree = false,
.verbose_ast_fmt = false,
.verbose_cimport = false,
.verbose_ir = false,
.verbose_llvm_ir = false,
.verbose_link = false,
.linker_script = null,
.libc_lib_dir = null,
.libc_static_lib_dir = null,
.libc_include_dir = null,
.msvc_lib_dir = null,
.kernel32_lib_dir = null,
.dynamic_linker = null,
.out_h_path = null,
.is_test = false,
.each_lib_rpath = false,
.strip = false,
.is_static = false,
.linker_rdynamic = false,
.clang_argv = [][]const u8{},
.llvm_argv = [][]const u8{},
.lib_dirs = [][]const u8{},
.rpath_list = [][]const u8{},
.assembly_files = [][]const u8{},
.link_objects = [][]const u8{},
.windows_subsystem_windows = false,
.windows_subsystem_console = false,
.link_libs_list = ArrayList(*LinkLib).init(loop.allocator),
.libc_link_lib = null,
.err_color = errmsg.Color.Auto,
.darwin_frameworks = [][]const u8{},
.darwin_version_min = DarwinVersionMin.None,
.test_filters = [][]const u8{},
.test_name_prefix = null,
.emit_file_type = Emit.Binary,
.link_out_file = null,
.exported_symbol_names = event.Locked(Decl.Table).init(loop, Decl.Table.init(loop.allocator)),
});
}
fn dump(self: *Module) void {
c.LLVMDumpModule(self.module);
}
pub fn destroy(self: *Module) void {
self.events.destroy();
c.LLVMDisposeBuilder(self.builder);
c.LLVMDisposeModule(self.module);
c.LLVMContextDispose(self.context);
self.name.deinit();
self.a().destroy(self);
}
pub fn build(self: *Module) !void {
if (self.llvm_argv.len != 0) {
var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.a(), [][]const []const u8{
[][]const u8{"zig (LLVM option parsing)"},
self.llvm_argv,
});
defer c_compatible_args.deinit();
// TODO this sets global state
c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
}
_ = try async<self.a()> self.buildAsync();
}
async fn buildAsync(self: *Module) void {
while (true) {
// TODO directly awaiting async should guarantee memory allocation elision
// TODO also async before suspending should guarantee memory allocation elision
(await (async self.addRootSrc() catch unreachable)) catch |err| {
await (async self.events.put(Event{ .Error = err }) catch unreachable);
return;
};
await (async self.events.put(Event.Ok) catch unreachable);
// for now we stop after 1
return;
}
}
async fn addRootSrc(self: *Module) !void {
const root_src_path = self.root_src_path orelse @panic("TODO handle null root src path");
// TODO async/await os.path.real
const root_src_real_path = os.path.real(self.a(), root_src_path) catch |err| {
try printError("unable to get real path '{}': {}", root_src_path, err);
return err;
};
errdefer self.a().free(root_src_real_path);
// TODO async/await readFileAlloc()
const source_code = io.readFileAlloc(self.a(), root_src_real_path) catch |err| {
try printError("unable to open '{}': {}", root_src_real_path, err);
return err;
};
errdefer self.a().free(source_code);
var parsed_file = ParsedFile{
.tree = try std.zig.parse(self.a(), source_code),
.realpath = root_src_real_path,
};
errdefer parsed_file.tree.deinit();
const tree = &parsed_file.tree;
// create empty struct for it
const decls = try Scope.Decls.create(self.a(), null);
errdefer decls.destroy();
var it = tree.root_node.decls.iterator(0);
while (it.next()) |decl_ptr| {
const decl = decl_ptr.*;
switch (decl.id) {
ast.Node.Id.Comptime => @panic("TODO"),
ast.Node.Id.VarDecl => @panic("TODO"),
ast.Node.Id.FnProto => {
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
const name = if (fn_proto.name_token) |name_token| tree.tokenSlice(name_token) else {
@panic("TODO add compile error");
//try self.addCompileError(
// &parsed_file,
// fn_proto.fn_token,
// fn_proto.fn_token + 1,
// "missing function name",
//);
continue;
};
const fn_decl = try self.a().create(Decl.Fn{
.base = Decl{
.id = Decl.Id.Fn,
.name = name,
.visib = parseVisibToken(tree, fn_proto.visib_token),
.resolution = Decl.Resolution.Unresolved,
},
.value = Decl.Fn.Val{ .Unresolved = {} },
.fn_proto = fn_proto,
});
errdefer self.a().destroy(fn_decl);
// TODO make this parallel
try await try async self.addTopLevelDecl(tree, &fn_decl.base);
},
ast.Node.Id.TestDecl => @panic("TODO"),
else => unreachable,
}
}
}
async fn addTopLevelDecl(self: *Module, tree: *ast.Tree, decl: *Decl) !void {
const is_export = decl.isExported(tree);
{
const exported_symbol_names = await try async self.exported_symbol_names.acquire();
defer exported_symbol_names.release();
if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
@panic("TODO report compile error");
}
}
}
pub fn link(self: *Module, out_file: ?[]const u8) !void {
warn("TODO link");
return error.Todo;
}
pub fn addLinkLib(self: *Module, name: []const u8, provided_explicitly: bool) !*LinkLib {
const is_libc = mem.eql(u8, name, "c");
if (is_libc) {
if (self.libc_link_lib) |libc_link_lib| {
return libc_link_lib;
}
}
for (self.link_libs_list.toSliceConst()) |existing_lib| {
if (mem.eql(u8, name, existing_lib.name)) {
return existing_lib;
}
}
const link_lib = try self.a().create(LinkLib{
.name = name,
.path = null,
.provided_explicitly = provided_explicitly,
.symbols = ArrayList([]u8).init(self.a()),
});
try self.link_libs_list.append(link_lib);
if (is_libc) {
self.libc_link_lib = link_lib;
}
return link_lib;
}
fn a(self: Module) *mem.Allocator {
return self.loop.allocator;
}
};
fn printError(comptime format: []const u8, args: ...) !void {
var stderr_file = try std.io.getStdErr();
var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
const out_stream = &stderr_file_out_stream.stream;
try out_stream.print(format, args);
}
fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib {
if (optional_token_index) |token_index| {
const token = tree.tokens.at(token_index);
assert(token.id == Token.Id.Keyword_pub);
return Visib.Pub;
} else {
return Visib.Private;
}
}
pub const Scope = struct {
id: Id,
parent: ?*Scope,
pub const Id = enum {
Decls,
Block,
};
pub const Decls = struct {
base: Scope,
table: Decl.Table,
pub fn create(a: *Allocator, parent: ?*Scope) !*Decls {
const self = try a.create(Decls{
.base = Scope{
.id = Id.Decls,
.parent = parent,
},
.table = undefined,
});
errdefer a.destroy(self);
self.table = Decl.Table.init(a);
errdefer self.table.deinit();
return self;
}
pub fn destroy(self: *Decls) void {
self.table.deinit();
self.table.allocator.destroy(self);
self.* = undefined;
}
};
pub const Block = struct {
base: Scope,
};
};
pub const Visib = enum {
Private,
Pub,
};
pub const Decl = struct {
id: Id,
name: []const u8,
visib: Visib,
resolution: Resolution,
pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8);
pub fn isExported(base: *const Decl, tree: *ast.Tree) bool {
switch (base.id) {
Id.Fn => {
const fn_decl = @fieldParentPtr(Fn, "base", base);
return fn_decl.isExported(tree);
},
else => return false,
}
}
pub const Resolution = enum {
Unresolved,
InProgress,
Invalid,
Ok,
};
pub const Id = enum {
Var,
Fn,
CompTime,
};
pub const Var = struct {
base: Decl,
};
pub const Fn = struct {
base: Decl,
value: Val,
fn_proto: *const ast.Node.FnProto,
// TODO https://github.com/ziglang/zig/issues/683 and then make this anonymous
pub const Val = union {
Unresolved: void,
Ok: *Value.Fn,
};
pub fn externLibName(self: Fn, tree: *ast.Tree) ?[]const u8 {
return if (self.fn_proto.extern_export_inline_token) |tok_index| x: {
const token = tree.tokens.at(tok_index);
break :x switch (token.id) {
Token.Id.Extern => tree.tokenSlicePtr(token),
else => null,
};
} else null;
}
pub fn isExported(self: Fn, tree: *ast.Tree) bool {
if (self.fn_proto.extern_export_inline_token) |tok_index| {
const token = tree.tokens.at(tok_index);
return token.id == Token.Id.Keyword_export;
} else {
return false;
}
}
};
pub const CompTime = struct {
base: Decl,
};
};
pub const Value = struct {
pub const Fn = struct {};
};
pub const Type = struct {
id: Id,
pub const Id = enum {
Type,
Void,
Bool,
NoReturn,
Int,
Float,
Pointer,
Array,
Struct,
ComptimeFloat,
ComptimeInt,
Undefined,
Null,
Optional,
ErrorUnion,
ErrorSet,
Enum,
Union,
Fn,
Opaque,
Promise,
};
pub const Struct = struct {
base: Type,
decls: *Scope.Decls,
};
};
pub const ParsedFile = struct {
tree: ast.Tree,
realpath: []const u8,
};

View File

@@ -0,0 +1,6 @@
const ast = @import("std").zig.ast;
pub const ParsedFile = struct {
tree: ast.Tree,
realpath: []const u8,
};

View File

@@ -1,16 +1,234 @@
const std = @import("std");
const Allocator = mem.Allocator;
const Decl = @import("decl.zig").Decl;
const Compilation = @import("compilation.zig").Compilation;
const mem = std.mem;
const ast = std.zig.ast;
const Value = @import("value.zig").Value;
const ir = @import("ir.zig");
pub const Scope = struct {
id: Id,
parent: *Scope,
parent: ?*Scope,
ref_count: usize,
pub fn ref(base: *Scope) void {
base.ref_count += 1;
}
pub fn deref(base: *Scope, comp: *Compilation) void {
base.ref_count -= 1;
if (base.ref_count == 0) {
if (base.parent) |parent| parent.deref(comp);
switch (base.id) {
Id.Decls => @fieldParentPtr(Decls, "base", base).destroy(),
Id.Block => @fieldParentPtr(Block, "base", base).destroy(comp),
Id.FnDef => @fieldParentPtr(FnDef, "base", base).destroy(comp),
Id.CompTime => @fieldParentPtr(CompTime, "base", base).destroy(comp),
Id.Defer => @fieldParentPtr(Defer, "base", base).destroy(comp),
Id.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(comp),
}
}
}
pub fn findFnDef(base: *Scope) ?*FnDef {
var scope = base;
while (true) {
switch (scope.id) {
Id.FnDef => return @fieldParentPtr(FnDef, "base", base),
Id.Decls => return null,
Id.Block,
Id.Defer,
Id.DeferExpr,
Id.CompTime,
=> scope = scope.parent orelse return null,
}
}
}
pub const Id = enum {
Decls,
Block,
Defer,
DeferExpr,
VarDecl,
CImport,
Loop,
FnDef,
CompTime,
Defer,
DeferExpr,
};
pub const Decls = struct {
base: Scope,
table: Decl.Table,
/// Creates a Decls scope with 1 reference
pub fn create(comp: *Compilation, parent: ?*Scope) !*Decls {
const self = try comp.a().create(Decls{
.base = Scope{
.id = Id.Decls,
.parent = parent,
.ref_count = 1,
},
.table = undefined,
});
errdefer comp.a().destroy(self);
self.table = Decl.Table.init(comp.a());
errdefer self.table.deinit();
if (parent) |p| p.ref();
return self;
}
pub fn destroy(self: *Decls) void {
self.table.deinit();
self.table.allocator.destroy(self);
}
};
pub const Block = struct {
base: Scope,
incoming_values: std.ArrayList(*ir.Instruction),
incoming_blocks: std.ArrayList(*ir.BasicBlock),
end_block: *ir.BasicBlock,
is_comptime: *ir.Instruction,
/// Creates a Block scope with 1 reference
pub fn create(comp: *Compilation, parent: ?*Scope) !*Block {
const self = try comp.a().create(Block{
.base = Scope{
.id = Id.Block,
.parent = parent,
.ref_count = 1,
},
.incoming_values = undefined,
.incoming_blocks = undefined,
.end_block = undefined,
.is_comptime = undefined,
});
errdefer comp.a().destroy(self);
if (parent) |p| p.ref();
return self;
}
pub fn destroy(self: *Block, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const FnDef = struct {
base: Scope,
/// This reference is not counted so that the scope can get destroyed with the function
fn_val: *Value.Fn,
/// Creates a FnDef scope with 1 reference
/// Must set the fn_val later
pub fn create(comp: *Compilation, parent: ?*Scope) !*FnDef {
const self = try comp.a().create(FnDef{
.base = Scope{
.id = Id.FnDef,
.parent = parent,
.ref_count = 1,
},
.fn_val = undefined,
});
if (parent) |p| p.ref();
return self;
}
pub fn destroy(self: *FnDef, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const CompTime = struct {
base: Scope,
/// Creates a CompTime scope with 1 reference
pub fn create(comp: *Compilation, parent: ?*Scope) !*CompTime {
const self = try comp.a().create(CompTime{
.base = Scope{
.id = Id.CompTime,
.parent = parent,
.ref_count = 1,
},
});
if (parent) |p| p.ref();
return self;
}
pub fn destroy(self: *CompTime, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const Defer = struct {
base: Scope,
defer_expr_scope: *DeferExpr,
kind: Kind,
pub const Kind = enum {
ScopeExit,
ErrorExit,
};
/// Creates a Defer scope with 1 reference
pub fn create(
comp: *Compilation,
parent: ?*Scope,
kind: Kind,
defer_expr_scope: *DeferExpr,
) !*Defer {
const self = try comp.a().create(Defer{
.base = Scope{
.id = Id.Defer,
.parent = parent,
.ref_count = 1,
},
.defer_expr_scope = defer_expr_scope,
.kind = kind,
});
errdefer comp.a().destroy(self);
defer_expr_scope.base.ref();
if (parent) |p| p.ref();
return self;
}
pub fn destroy(self: *Defer, comp: *Compilation) void {
self.defer_expr_scope.base.deref(comp);
comp.a().destroy(self);
}
};
pub const DeferExpr = struct {
base: Scope,
expr_node: *ast.Node,
/// Creates a DeferExpr scope with 1 reference
pub fn create(comp: *Compilation, parent: ?*Scope, expr_node: *ast.Node) !*DeferExpr {
const self = try comp.a().create(DeferExpr{
.base = Scope{
.id = Id.DeferExpr,
.parent = parent,
.ref_count = 1,
},
.expr_node = expr_node,
});
errdefer comp.a().destroy(self);
if (parent) |p| p.ref();
return self;
}
pub fn destroy(self: *DeferExpr, comp: *Compilation) void {
comp.a().destroy(self);
}
};
};

168
src-self-hosted/test.zig Normal file
View File

@@ -0,0 +1,168 @@
const std = @import("std");
const mem = std.mem;
const builtin = @import("builtin");
const Target = @import("target.zig").Target;
const Compilation = @import("compilation.zig").Compilation;
const introspect = @import("introspect.zig");
const assertOrPanic = std.debug.assertOrPanic;
const errmsg = @import("errmsg.zig");
const EventLoopLocal = @import("compilation.zig").EventLoopLocal;
test "compile errors" {
var ctx: TestContext = undefined;
try ctx.init();
defer ctx.deinit();
try @import("../test/stage2/compile_errors.zig").addCases(&ctx);
try ctx.run();
}
const file1 = "1.zig";
const allocator = std.heap.c_allocator;
pub const TestContext = struct {
loop: std.event.Loop,
event_loop_local: EventLoopLocal,
zig_lib_dir: []u8,
zig_cache_dir: []u8,
file_index: std.atomic.Int(usize),
group: std.event.Group(error!void),
any_err: error!void,
const tmp_dir_name = "stage2_test_tmp";
fn init(self: *TestContext) !void {
self.* = TestContext{
.any_err = {},
.loop = undefined,
.event_loop_local = undefined,
.zig_lib_dir = undefined,
.zig_cache_dir = undefined,
.group = undefined,
.file_index = std.atomic.Int(usize).init(0),
};
try self.loop.initMultiThreaded(allocator);
errdefer self.loop.deinit();
self.event_loop_local = EventLoopLocal.init(&self.loop);
errdefer self.event_loop_local.deinit();
self.group = std.event.Group(error!void).init(&self.loop);
errdefer self.group.cancelAll();
self.zig_lib_dir = try introspect.resolveZigLibDir(allocator);
errdefer allocator.free(self.zig_lib_dir);
self.zig_cache_dir = try introspect.resolveZigCacheDir(allocator);
errdefer allocator.free(self.zig_cache_dir);
try std.os.makePath(allocator, tmp_dir_name);
errdefer std.os.deleteTree(allocator, tmp_dir_name) catch {};
}
fn deinit(self: *TestContext) void {
std.os.deleteTree(allocator, tmp_dir_name) catch {};
allocator.free(self.zig_cache_dir);
allocator.free(self.zig_lib_dir);
self.event_loop_local.deinit();
self.loop.deinit();
}
fn run(self: *TestContext) !void {
const handle = try self.loop.call(waitForGroup, self);
defer cancel handle;
self.loop.run();
return self.any_err;
}
async fn waitForGroup(self: *TestContext) void {
self.any_err = await (async self.group.wait() catch unreachable);
}
fn testCompileError(
self: *TestContext,
source: []const u8,
path: []const u8,
line: usize,
column: usize,
msg: []const u8,
) !void {
var file_index_buf: [20]u8 = undefined;
const file_index = try std.fmt.bufPrint(file_index_buf[0..], "{}", self.file_index.incr());
const file1_path = try std.os.path.join(allocator, tmp_dir_name, file_index, file1);
if (std.os.path.dirname(file1_path)) |dirname| {
try std.os.makePath(allocator, dirname);
}
// TODO async I/O
try std.io.writeFile(allocator, file1_path, source);
var comp = try Compilation.create(
&self.event_loop_local,
"test",
file1_path,
Target.Native,
Compilation.Kind.Obj,
builtin.Mode.Debug,
self.zig_lib_dir,
self.zig_cache_dir,
);
errdefer comp.destroy();
try comp.build();
try self.group.call(getModuleEvent, comp, source, path, line, column, msg);
}
async fn getModuleEvent(
comp: *Compilation,
source: []const u8,
path: []const u8,
line: usize,
column: usize,
text: []const u8,
) !void {
defer comp.destroy();
const build_event = await (async comp.events.get() catch unreachable);
switch (build_event) {
Compilation.Event.Ok => {
@panic("build incorrectly succeeded");
},
Compilation.Event.Error => |err| {
@panic("build incorrectly failed");
},
Compilation.Event.Fail => |msgs| {
assertOrPanic(msgs.len != 0);
for (msgs) |msg| {
if (mem.endsWith(u8, msg.path, path) and mem.eql(u8, msg.text, text)) {
const first_token = msg.tree.tokens.at(msg.span.first);
const last_token = msg.tree.tokens.at(msg.span.first);
const start_loc = msg.tree.tokenLocationPtr(0, first_token);
if (start_loc.line + 1 == line and start_loc.column + 1 == column) {
return;
}
}
}
std.debug.warn(
"\n=====source:=======\n{}\n====expected:========\n{}:{}:{}: error: {}\n",
source,
path,
line,
column,
text,
);
std.debug.warn("\n====found:========\n");
var stderr = try std.io.getStdErr();
for (msgs) |msg| {
try errmsg.printToFile(&stderr, msg, errmsg.Color.Auto);
}
std.debug.warn("============\n");
return error.TestFailed;
},
}
}
};

442
src-self-hosted/type.zig Normal file
View File

@@ -0,0 +1,442 @@
const std = @import("std");
const builtin = @import("builtin");
const Scope = @import("scope.zig").Scope;
const Compilation = @import("compilation.zig").Compilation;
const Value = @import("value.zig").Value;
const llvm = @import("llvm.zig");
const ObjectFile = @import("codegen.zig").ObjectFile;
pub const Type = struct {
base: Value,
id: Id,
pub const Id = builtin.TypeId;
pub fn destroy(base: *Type, comp: *Compilation) void {
switch (base.id) {
Id.Struct => @fieldParentPtr(Struct, "base", base).destroy(comp),
Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(comp),
Id.Type => @fieldParentPtr(MetaType, "base", base).destroy(comp),
Id.Void => @fieldParentPtr(Void, "base", base).destroy(comp),
Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(comp),
Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(comp),
Id.Int => @fieldParentPtr(Int, "base", base).destroy(comp),
Id.Float => @fieldParentPtr(Float, "base", base).destroy(comp),
Id.Pointer => @fieldParentPtr(Pointer, "base", base).destroy(comp),
Id.Array => @fieldParentPtr(Array, "base", base).destroy(comp),
Id.ComptimeFloat => @fieldParentPtr(ComptimeFloat, "base", base).destroy(comp),
Id.ComptimeInt => @fieldParentPtr(ComptimeInt, "base", base).destroy(comp),
Id.Undefined => @fieldParentPtr(Undefined, "base", base).destroy(comp),
Id.Null => @fieldParentPtr(Null, "base", base).destroy(comp),
Id.Optional => @fieldParentPtr(Optional, "base", base).destroy(comp),
Id.ErrorUnion => @fieldParentPtr(ErrorUnion, "base", base).destroy(comp),
Id.ErrorSet => @fieldParentPtr(ErrorSet, "base", base).destroy(comp),
Id.Enum => @fieldParentPtr(Enum, "base", base).destroy(comp),
Id.Union => @fieldParentPtr(Union, "base", base).destroy(comp),
Id.Namespace => @fieldParentPtr(Namespace, "base", base).destroy(comp),
Id.Block => @fieldParentPtr(Block, "base", base).destroy(comp),
Id.BoundFn => @fieldParentPtr(BoundFn, "base", base).destroy(comp),
Id.ArgTuple => @fieldParentPtr(ArgTuple, "base", base).destroy(comp),
Id.Opaque => @fieldParentPtr(Opaque, "base", base).destroy(comp),
Id.Promise => @fieldParentPtr(Promise, "base", base).destroy(comp),
}
}
pub fn getLlvmType(base: *Type, ofile: *ObjectFile) (error{OutOfMemory}!llvm.TypeRef) {
switch (base.id) {
Id.Struct => return @fieldParentPtr(Struct, "base", base).getLlvmType(ofile),
Id.Fn => return @fieldParentPtr(Fn, "base", base).getLlvmType(ofile),
Id.Type => unreachable,
Id.Void => unreachable,
Id.Bool => return @fieldParentPtr(Bool, "base", base).getLlvmType(ofile),
Id.NoReturn => unreachable,
Id.Int => return @fieldParentPtr(Int, "base", base).getLlvmType(ofile),
Id.Float => return @fieldParentPtr(Float, "base", base).getLlvmType(ofile),
Id.Pointer => return @fieldParentPtr(Pointer, "base", base).getLlvmType(ofile),
Id.Array => return @fieldParentPtr(Array, "base", base).getLlvmType(ofile),
Id.ComptimeFloat => unreachable,
Id.ComptimeInt => unreachable,
Id.Undefined => unreachable,
Id.Null => unreachable,
Id.Optional => return @fieldParentPtr(Optional, "base", base).getLlvmType(ofile),
Id.ErrorUnion => return @fieldParentPtr(ErrorUnion, "base", base).getLlvmType(ofile),
Id.ErrorSet => return @fieldParentPtr(ErrorSet, "base", base).getLlvmType(ofile),
Id.Enum => return @fieldParentPtr(Enum, "base", base).getLlvmType(ofile),
Id.Union => return @fieldParentPtr(Union, "base", base).getLlvmType(ofile),
Id.Namespace => unreachable,
Id.Block => unreachable,
Id.BoundFn => return @fieldParentPtr(BoundFn, "base", base).getLlvmType(ofile),
Id.ArgTuple => unreachable,
Id.Opaque => return @fieldParentPtr(Opaque, "base", base).getLlvmType(ofile),
Id.Promise => return @fieldParentPtr(Promise, "base", base).getLlvmType(ofile),
}
}
pub fn dump(base: *const Type) void {
std.debug.warn("{}", @tagName(base.id));
}
pub fn getAbiAlignment(base: *Type, comp: *Compilation) u32 {
@panic("TODO getAbiAlignment");
}
pub const Struct = struct {
base: Type,
decls: *Scope.Decls,
pub fn destroy(self: *Struct, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmType(self: *Struct, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
pub const Fn = struct {
base: Type,
return_type: *Type,
params: []Param,
is_var_args: bool,
pub const Param = struct {
is_noalias: bool,
typeof: *Type,
};
pub fn create(comp: *Compilation, return_type: *Type, params: []Param, is_var_args: bool) !*Fn {
const result = try comp.a().create(Fn{
.base = Type{
.base = Value{
.id = Value.Id.Type,
.typeof = &MetaType.get(comp).base,
.ref_count = std.atomic.Int(usize).init(1),
},
.id = builtin.TypeId.Fn,
},
.return_type = return_type,
.params = params,
.is_var_args = is_var_args,
});
errdefer comp.a().destroy(result);
result.return_type.base.ref();
for (result.params) |param| {
param.typeof.base.ref();
}
return result;
}
pub fn destroy(self: *Fn, comp: *Compilation) void {
self.return_type.base.deref(comp);
for (self.params) |param| {
param.typeof.base.deref(comp);
}
comp.a().destroy(self);
}
pub fn getLlvmType(self: *Fn, ofile: *ObjectFile) !llvm.TypeRef {
const llvm_return_type = switch (self.return_type.id) {
Type.Id.Void => llvm.VoidTypeInContext(ofile.context) orelse return error.OutOfMemory,
else => try self.return_type.getLlvmType(ofile),
};
const llvm_param_types = try ofile.a().alloc(llvm.TypeRef, self.params.len);
defer ofile.a().free(llvm_param_types);
for (llvm_param_types) |*llvm_param_type, i| {
llvm_param_type.* = try self.params[i].typeof.getLlvmType(ofile);
}
return llvm.FunctionType(
llvm_return_type,
llvm_param_types.ptr,
@intCast(c_uint, llvm_param_types.len),
@boolToInt(self.is_var_args),
) orelse error.OutOfMemory;
}
};
pub const MetaType = struct {
base: Type,
value: *Type,
/// Adds 1 reference to the resulting type
pub fn get(comp: *Compilation) *MetaType {
comp.meta_type.base.base.ref();
return comp.meta_type;
}
pub fn destroy(self: *MetaType, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const Void = struct {
base: Type,
/// Adds 1 reference to the resulting type
pub fn get(comp: *Compilation) *Void {
comp.void_type.base.base.ref();
return comp.void_type;
}
pub fn destroy(self: *Void, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const Bool = struct {
base: Type,
/// Adds 1 reference to the resulting type
pub fn get(comp: *Compilation) *Bool {
comp.bool_type.base.base.ref();
return comp.bool_type;
}
pub fn destroy(self: *Bool, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmType(self: *Bool, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
pub const NoReturn = struct {
base: Type,
/// Adds 1 reference to the resulting type
pub fn get(comp: *Compilation) *NoReturn {
comp.noreturn_type.base.base.ref();
return comp.noreturn_type;
}
pub fn destroy(self: *NoReturn, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const Int = struct {
base: Type,
pub fn destroy(self: *Int, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmType(self: *Int, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
pub const Float = struct {
base: Type,
pub fn destroy(self: *Float, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmType(self: *Float, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
pub const Pointer = struct {
base: Type,
mut: Mut,
vol: Vol,
size: Size,
alignment: u32,
pub const Mut = enum {
Mut,
Const,
};
pub const Vol = enum {
Non,
Volatile,
};
pub const Size = builtin.TypeInfo.Pointer.Size;
pub fn destroy(self: *Pointer, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn get(
comp: *Compilation,
elem_type: *Type,
mut: Mut,
vol: Vol,
size: Size,
alignment: u32,
) *Pointer {
@panic("TODO get pointer");
}
pub fn getLlvmType(self: *Pointer, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
pub const Array = struct {
base: Type,
pub fn destroy(self: *Array, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmType(self: *Array, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
pub const ComptimeFloat = struct {
base: Type,
pub fn destroy(self: *ComptimeFloat, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const ComptimeInt = struct {
base: Type,
pub fn destroy(self: *ComptimeInt, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const Undefined = struct {
base: Type,
pub fn destroy(self: *Undefined, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const Null = struct {
base: Type,
pub fn destroy(self: *Null, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const Optional = struct {
base: Type,
pub fn destroy(self: *Optional, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmType(self: *Optional, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
pub const ErrorUnion = struct {
base: Type,
pub fn destroy(self: *ErrorUnion, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmType(self: *ErrorUnion, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
pub const ErrorSet = struct {
base: Type,
pub fn destroy(self: *ErrorSet, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmType(self: *ErrorSet, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
pub const Enum = struct {
base: Type,
pub fn destroy(self: *Enum, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmType(self: *Enum, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
pub const Union = struct {
base: Type,
pub fn destroy(self: *Union, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmType(self: *Union, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
pub const Namespace = struct {
base: Type,
pub fn destroy(self: *Namespace, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const Block = struct {
base: Type,
pub fn destroy(self: *Block, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const BoundFn = struct {
base: Type,
pub fn destroy(self: *BoundFn, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmType(self: *BoundFn, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
pub const ArgTuple = struct {
base: Type,
pub fn destroy(self: *ArgTuple, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const Opaque = struct {
base: Type,
pub fn destroy(self: *Opaque, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmType(self: *Opaque, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
pub const Promise = struct {
base: Type,
pub fn destroy(self: *Promise, comp: *Compilation) void {
comp.a().destroy(self);
}
pub fn getLlvmType(self: *Promise, ofile: *ObjectFile) llvm.TypeRef {
@panic("TODO");
}
};
};

154
src-self-hosted/value.zig Normal file
View File

@@ -0,0 +1,154 @@
const std = @import("std");
const builtin = @import("builtin");
const Scope = @import("scope.zig").Scope;
const Compilation = @import("compilation.zig").Compilation;
/// Values are ref-counted, heap-allocated, and copy-on-write
/// If there is only 1 ref then write need not copy
pub const Value = struct {
id: Id,
typeof: *Type,
ref_count: std.atomic.Int(usize),
/// Thread-safe
pub fn ref(base: *Value) void {
_ = base.ref_count.incr();
}
/// Thread-safe
pub fn deref(base: *Value, comp: *Compilation) void {
if (base.ref_count.decr() == 1) {
base.typeof.base.deref(comp);
switch (base.id) {
Id.Type => @fieldParentPtr(Type, "base", base).destroy(comp),
Id.Fn => @fieldParentPtr(Fn, "base", base).destroy(comp),
Id.Void => @fieldParentPtr(Void, "base", base).destroy(comp),
Id.Bool => @fieldParentPtr(Bool, "base", base).destroy(comp),
Id.NoReturn => @fieldParentPtr(NoReturn, "base", base).destroy(comp),
Id.Ptr => @fieldParentPtr(Ptr, "base", base).destroy(comp),
}
}
}
pub fn getRef(base: *Value) *Value {
base.ref();
return base;
}
pub fn dump(base: *const Value) void {
std.debug.warn("{}", @tagName(base.id));
}
pub const Id = enum {
Type,
Fn,
Void,
Bool,
NoReturn,
Ptr,
};
pub const Type = @import("type.zig").Type;
pub const Fn = struct {
base: Value,
/// The main external name that is used in the .o file.
/// TODO https://github.com/ziglang/zig/issues/265
symbol_name: std.Buffer,
/// parent should be the top level decls or container decls
fndef_scope: *Scope.FnDef,
/// parent is scope for last parameter
child_scope: *Scope,
/// parent is child_scope
block_scope: *Scope.Block,
/// Creates a Fn value with 1 ref
/// Takes ownership of symbol_name
pub fn create(comp: *Compilation, fn_type: *Type.Fn, fndef_scope: *Scope.FnDef, symbol_name: std.Buffer) !*Fn {
const self = try comp.a().create(Fn{
.base = Value{
.id = Value.Id.Fn,
.typeof = &fn_type.base,
.ref_count = std.atomic.Int(usize).init(1),
},
.fndef_scope = fndef_scope,
.child_scope = &fndef_scope.base,
.block_scope = undefined,
.symbol_name = symbol_name,
});
fn_type.base.base.ref();
fndef_scope.fn_val = self;
fndef_scope.base.ref();
return self;
}
pub fn destroy(self: *Fn, comp: *Compilation) void {
self.fndef_scope.base.deref(comp);
self.symbol_name.deinit();
comp.a().destroy(self);
}
};
pub const Void = struct {
base: Value,
pub fn get(comp: *Compilation) *Void {
comp.void_value.base.ref();
return comp.void_value;
}
pub fn destroy(self: *Void, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const Bool = struct {
base: Value,
x: bool,
pub fn get(comp: *Compilation, x: bool) *Bool {
if (x) {
comp.true_value.base.ref();
return comp.true_value;
} else {
comp.false_value.base.ref();
return comp.false_value;
}
}
pub fn destroy(self: *Bool, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const NoReturn = struct {
base: Value,
pub fn get(comp: *Compilation) *NoReturn {
comp.noreturn_value.base.ref();
return comp.noreturn_value;
}
pub fn destroy(self: *NoReturn, comp: *Compilation) void {
comp.a().destroy(self);
}
};
pub const Ptr = struct {
base: Value,
pub const Mut = enum {
CompTimeConst,
CompTimeVar,
RunTime,
};
pub fn destroy(self: *Ptr, comp: *Compilation) void {
comp.a().destroy(self);
}
};
};

View File

@@ -0,0 +1,4 @@
pub const Visib = enum {
Private,
Pub,
};

View File

@@ -2003,12 +2003,6 @@ struct IrBasicBlock {
IrInstruction *must_be_comptime_source_instr;
};
struct LVal {
bool is_ptr;
bool is_const;
bool is_volatile;
};
enum IrInstructionId {
IrInstructionIdInvalid,
IrInstructionIdBr,
@@ -2970,6 +2964,11 @@ struct IrInstructionTypeName {
IrInstruction *type_value;
};
enum LVal {
LValNone,
LValPtr,
};
struct IrInstructionDeclRef {
IrInstruction base;

View File

@@ -1430,10 +1430,10 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) {
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdArgTuple:
case TypeTableEntryIdPromise:
case TypeTableEntryIdVoid:
return false;
case TypeTableEntryIdOpaque:
case TypeTableEntryIdUnreachable:
case TypeTableEntryIdVoid:
case TypeTableEntryIdBool:
return true;
case TypeTableEntryIdInt:
@@ -1460,7 +1460,10 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) {
case TypeTableEntryIdOptional:
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
return child_type->id == TypeTableEntryIdPointer || child_type->id == TypeTableEntryIdFn;
if (child_type->id != TypeTableEntryIdPointer && child_type->id != TypeTableEntryIdFn) {
return false;
}
return type_allowed_in_extern(g, child_type);
}
case TypeTableEntryIdEnum:
return type_entry->data.enumeration.layout == ContainerLayoutExtern || type_entry->data.enumeration.layout == ContainerLayoutPacked;
@@ -1637,7 +1640,10 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
fn_type_id.return_type = specified_return_type;
}
if (!calling_convention_allows_zig_types(fn_type_id.cc) && !type_allowed_in_extern(g, fn_type_id.return_type)) {
if (!calling_convention_allows_zig_types(fn_type_id.cc) &&
fn_type_id.return_type->id != TypeTableEntryIdVoid &&
!type_allowed_in_extern(g, fn_type_id.return_type))
{
add_node_error(g, fn_proto->return_type,
buf_sprintf("return type '%s' not allowed in function with calling convention '%s'",
buf_ptr(&fn_type_id.return_type->name),
@@ -1939,6 +1945,17 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
break;
}
if (struct_type->data.structure.layout == ContainerLayoutExtern) {
if (!type_allowed_in_extern(g, field_type)) {
AstNode *field_source_node = decl_node->data.container_decl.fields.at(i);
add_node_error(g, field_source_node,
buf_sprintf("extern structs cannot contain fields of type '%s'",
buf_ptr(&field_type->name)));
struct_type->data.structure.is_invalid = true;
break;
}
}
if (!type_has_bits(field_type))
continue;

View File

@@ -2212,10 +2212,8 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, "");
} else if (type_entry->id == TypeTableEntryIdEnum ||
type_entry->id == TypeTableEntryIdErrorSet ||
type_entry->id == TypeTableEntryIdPointer ||
type_entry->id == TypeTableEntryIdBool ||
type_entry->id == TypeTableEntryIdPromise ||
type_entry->id == TypeTableEntryIdFn)
get_codegen_ptr_type(type_entry) != nullptr)
{
LLVMIntPredicate pred = cmp_op_to_int_predicate(op_id, false);
return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, "");
@@ -3103,6 +3101,10 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
return nullptr;
} else if (first_arg_ret) {
return instruction->tmp_ptr;
} else if (handle_is_ptr(src_return_type)) {
auto store_instr = LLVMBuildStore(g->builder, result, instruction->tmp_ptr);
LLVMSetAlignment(store_instr, LLVMGetAlignment(instruction->tmp_ptr));
return instruction->tmp_ptr;
} else {
return result;
}

View File

@@ -39,9 +39,6 @@ struct IrAnalyze {
IrBasicBlock *const_predecessor_bb;
};
static const LVal LVAL_NONE = { false, false, false };
static const LVal LVAL_PTR = { true, false, false };
enum ConstCastResultId {
ConstCastResultIdOk,
ConstCastResultIdErrSet,
@@ -249,8 +246,6 @@ static void ir_ref_bb(IrBasicBlock *bb) {
static void ir_ref_instruction(IrInstruction *instruction, IrBasicBlock *cur_bb) {
assert(instruction->id != IrInstructionIdInvalid);
instruction->ref_count += 1;
if (instruction->owner_bb != cur_bb && !instr_is_comptime(instruction))
ir_ref_bb(instruction->owner_bb);
}
static void ir_ref_var(VariableTableEntry *var) {
@@ -3164,7 +3159,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
case ReturnKindError:
{
assert(expr_node);
IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr);
if (err_union_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
IrInstruction *err_union_val = ir_build_load_ptr(irb, scope, node, err_union_ptr);
@@ -3192,7 +3187,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node,
ir_set_cursor_at_end_and_append_block(irb, continue_block);
IrInstruction *unwrapped_ptr = ir_build_unwrap_err_payload(irb, scope, node, err_union_ptr, false);
if (lval.is_ptr)
if (lval == LValPtr)
return unwrapped_ptr;
else
return ir_build_load_ptr(irb, scope, node, unwrapped_ptr);
@@ -3357,7 +3352,7 @@ static IrInstruction *ir_gen_bin_op_id(IrBuilder *irb, Scope *scope, AstNode *no
}
static IrInstruction *ir_gen_assign(IrBuilder *irb, Scope *scope, AstNode *node) {
IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LVAL_PTR);
IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValPtr);
IrInstruction *rvalue = ir_gen_node(irb, node->data.bin_op_expr.op2, scope);
if (lvalue == irb->codegen->invalid_instruction || rvalue == irb->codegen->invalid_instruction)
@@ -3368,7 +3363,7 @@ static IrInstruction *ir_gen_assign(IrBuilder *irb, Scope *scope, AstNode *node)
}
static IrInstruction *ir_gen_assign_op(IrBuilder *irb, Scope *scope, AstNode *node, IrBinOp op_id) {
IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LVAL_PTR);
IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValPtr);
if (lvalue == irb->codegen->invalid_instruction)
return lvalue;
IrInstruction *op1 = ir_build_load_ptr(irb, scope, node->data.bin_op_expr.op1, lvalue);
@@ -3470,7 +3465,7 @@ static IrInstruction *ir_gen_maybe_ok_or(IrBuilder *irb, Scope *parent_scope, As
AstNode *op1_node = node->data.bin_op_expr.op1;
AstNode *op2_node = node->data.bin_op_expr.op2;
IrInstruction *maybe_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LVAL_PTR);
IrInstruction *maybe_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LValPtr);
if (maybe_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
@@ -3657,7 +3652,7 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node,
Buf *variable_name = node->data.symbol_expr.symbol;
if (buf_eql_str(variable_name, "_") && lval.is_ptr) {
if (buf_eql_str(variable_name, "_") && lval == LValPtr) {
IrInstructionConst *const_instruction = ir_build_instruction<IrInstructionConst>(irb, scope, node);
const_instruction->base.value.type = get_pointer_to_type(irb->codegen,
irb->codegen->builtin_types.entry_void, false);
@@ -3669,8 +3664,8 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node,
auto primitive_table_entry = irb->codegen->primitive_type_table.maybe_get(variable_name);
if (primitive_table_entry) {
IrInstruction *value = ir_build_const_type(irb, scope, node, primitive_table_entry->value);
if (lval.is_ptr) {
return ir_build_ref(irb, scope, node, value, lval.is_const, lval.is_volatile);
if (lval == LValPtr) {
return ir_build_ref(irb, scope, node, value, false, false);
} else {
return value;
}
@@ -3679,7 +3674,7 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node,
VariableTableEntry *var = find_variable(irb->codegen, scope, variable_name);
if (var) {
IrInstruction *var_ptr = ir_build_var_ptr(irb, scope, node, var);
if (lval.is_ptr)
if (lval == LValPtr)
return var_ptr;
else
return ir_build_load_ptr(irb, scope, node, var_ptr);
@@ -3705,7 +3700,7 @@ static IrInstruction *ir_gen_array_access(IrBuilder *irb, Scope *scope, AstNode
assert(node->type == NodeTypeArrayAccessExpr);
AstNode *array_ref_node = node->data.array_access_expr.array_ref_expr;
IrInstruction *array_ref_instruction = ir_gen_node_extra(irb, array_ref_node, scope, LVAL_PTR);
IrInstruction *array_ref_instruction = ir_gen_node_extra(irb, array_ref_node, scope, LValPtr);
if (array_ref_instruction == irb->codegen->invalid_instruction)
return array_ref_instruction;
@@ -3716,7 +3711,7 @@ static IrInstruction *ir_gen_array_access(IrBuilder *irb, Scope *scope, AstNode
IrInstruction *ptr_instruction = ir_build_elem_ptr(irb, scope, node, array_ref_instruction,
subscript_instruction, true, PtrLenSingle);
if (lval.is_ptr)
if (lval == LValPtr)
return ptr_instruction;
return ir_build_load_ptr(irb, scope, node, ptr_instruction);
@@ -3728,7 +3723,7 @@ static IrInstruction *ir_gen_field_access(IrBuilder *irb, Scope *scope, AstNode
AstNode *container_ref_node = node->data.field_access_expr.struct_expr;
Buf *field_name = node->data.field_access_expr.field_name;
IrInstruction *container_ref_instruction = ir_gen_node_extra(irb, container_ref_node, scope, LVAL_PTR);
IrInstruction *container_ref_instruction = ir_gen_node_extra(irb, container_ref_node, scope, LValPtr);
if (container_ref_instruction == irb->codegen->invalid_instruction)
return container_ref_instruction;
@@ -4386,7 +4381,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
case BuiltinFnIdField:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
IrInstruction *arg0_value = ir_gen_node_extra(irb, arg0_node, scope, LVAL_PTR);
IrInstruction *arg0_value = ir_gen_node_extra(irb, arg0_node, scope, LValPtr);
if (arg0_value == irb->codegen->invalid_instruction)
return arg0_value;
@@ -4397,7 +4392,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
IrInstruction *ptr_instruction = ir_build_field_ptr_instruction(irb, scope, node, arg0_value, arg1_value);
if (lval.is_ptr)
if (lval == LValPtr)
return ptr_instruction;
return ir_build_load_ptr(irb, scope, node, ptr_instruction);
@@ -4928,18 +4923,18 @@ static IrInstruction *ir_gen_prefix_op_id_lval(IrBuilder *irb, Scope *scope, Ast
}
static IrInstruction *ir_gen_prefix_op_id(IrBuilder *irb, Scope *scope, AstNode *node, IrUnOp op_id) {
return ir_gen_prefix_op_id_lval(irb, scope, node, op_id, LVAL_NONE);
return ir_gen_prefix_op_id_lval(irb, scope, node, op_id, LValNone);
}
static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval) {
if (!lval.is_ptr)
if (lval != LValPtr)
return value;
if (value == irb->codegen->invalid_instruction)
return value;
// We needed a pointer to a value, but we got a value. So we create
// an instruction which just makes a const pointer of it.
return ir_build_ref(irb, scope, value->source_node, value, lval.is_const, lval.is_volatile);
return ir_build_ref(irb, scope, value->source_node, value, false, false);
}
static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode *node) {
@@ -5001,7 +4996,7 @@ static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode
static IrInstruction *ir_gen_err_assert_ok(IrBuilder *irb, Scope *scope, AstNode *source_node, AstNode *expr_node,
LVal lval)
{
IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr);
if (err_union_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
@@ -5009,7 +5004,7 @@ static IrInstruction *ir_gen_err_assert_ok(IrBuilder *irb, Scope *scope, AstNode
if (payload_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
if (lval.is_ptr)
if (lval == LValPtr)
return payload_ptr;
return ir_build_load_ptr(irb, scope, source_node, payload_ptr);
@@ -5046,7 +5041,7 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpOptional), lval);
case PrefixOpAddrOf: {
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR), lval);
return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LValPtr), lval);
}
}
zig_unreachable();
@@ -5186,7 +5181,7 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n
} else {
payload_scope = scope;
}
IrInstruction *err_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, scope, LVAL_PTR);
IrInstruction *err_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, scope, LValPtr);
if (err_val_ptr == irb->codegen->invalid_instruction)
return err_val_ptr;
IrInstruction *err_val = ir_build_load_ptr(irb, scope, node->data.while_expr.condition, err_val_ptr);
@@ -5269,7 +5264,7 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n
VariableTableEntry *payload_var = ir_create_var(irb, symbol_node, scope, var_symbol,
true, false, false, is_comptime);
Scope *child_scope = payload_var->child_scope;
IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, scope, LVAL_PTR);
IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, scope, LValPtr);
if (maybe_val_ptr == irb->codegen->invalid_instruction)
return maybe_val_ptr;
IrInstruction *maybe_val = ir_build_load_ptr(irb, scope, node->data.while_expr.condition, maybe_val_ptr);
@@ -5413,7 +5408,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo
}
assert(elem_node->type == NodeTypeSymbol);
IrInstruction *array_val_ptr = ir_gen_node_extra(irb, array_node, parent_scope, LVAL_PTR);
IrInstruction *array_val_ptr = ir_gen_node_extra(irb, array_node, parent_scope, LValPtr);
if (array_val_ptr == irb->codegen->invalid_instruction)
return array_val_ptr;
@@ -5700,7 +5695,7 @@ static IrInstruction *ir_gen_test_expr(IrBuilder *irb, Scope *scope, AstNode *no
AstNode *else_node = node->data.test_expr.else_node;
bool var_is_ptr = node->data.test_expr.var_is_ptr;
IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr);
if (maybe_val_ptr == irb->codegen->invalid_instruction)
return maybe_val_ptr;
@@ -5778,7 +5773,7 @@ static IrInstruction *ir_gen_if_err_expr(IrBuilder *irb, Scope *scope, AstNode *
Buf *var_symbol = node->data.if_err_expr.var_symbol;
Buf *err_symbol = node->data.if_err_expr.err_symbol;
IrInstruction *err_val_ptr = ir_gen_node_extra(irb, target_node, scope, LVAL_PTR);
IrInstruction *err_val_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr);
if (err_val_ptr == irb->codegen->invalid_instruction)
return err_val_ptr;
@@ -5904,7 +5899,7 @@ static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode *
assert(node->type == NodeTypeSwitchExpr);
AstNode *target_node = node->data.switch_expr.expr;
IrInstruction *target_value_ptr = ir_gen_node_extra(irb, target_node, scope, LVAL_PTR);
IrInstruction *target_value_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr);
if (target_value_ptr == irb->codegen->invalid_instruction)
return target_value_ptr;
IrInstruction *target_value = ir_build_switch_target(irb, scope, node, target_value_ptr);
@@ -6277,7 +6272,7 @@ static IrInstruction *ir_gen_slice(IrBuilder *irb, Scope *scope, AstNode *node)
AstNode *start_node = slice_expr->start;
AstNode *end_node = slice_expr->end;
IrInstruction *ptr_value = ir_gen_node_extra(irb, array_node, scope, LVAL_PTR);
IrInstruction *ptr_value = ir_gen_node_extra(irb, array_node, scope, LValPtr);
if (ptr_value == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
@@ -6311,11 +6306,11 @@ static IrInstruction *ir_gen_err_ok_or(IrBuilder *irb, Scope *parent_scope, AstN
add_node_error(irb->codegen, var_node, buf_sprintf("unused variable: '%s'", buf_ptr(var_name)));
return irb->codegen->invalid_instruction;
}
return ir_gen_err_assert_ok(irb, parent_scope, node, op1_node, LVAL_NONE);
return ir_gen_err_assert_ok(irb, parent_scope, node, op1_node, LValNone);
}
IrInstruction *err_union_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LVAL_PTR);
IrInstruction *err_union_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LValPtr);
if (err_union_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
@@ -6868,7 +6863,7 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
IrInstruction *ptr_instruction = ir_gen_field_access(irb, scope, node);
if (ptr_instruction == irb->codegen->invalid_instruction)
return ptr_instruction;
if (lval.is_ptr)
if (lval == LValPtr)
return ptr_instruction;
return ir_build_load_ptr(irb, scope, node, ptr_instruction);
@@ -6884,12 +6879,12 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
case NodeTypeUnwrapOptional: {
AstNode *expr_node = node->data.unwrap_optional.expr;
IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr);
if (maybe_ptr == irb->codegen->invalid_instruction)
return irb->codegen->invalid_instruction;
IrInstruction *unwrapped_ptr = ir_build_unwrap_maybe(irb, scope, node, maybe_ptr, true);
if (lval.is_ptr)
if (lval == LValPtr)
return unwrapped_ptr;
return ir_build_load_ptr(irb, scope, node, unwrapped_ptr);
@@ -6959,7 +6954,7 @@ static IrInstruction *ir_gen_node_extra(IrBuilder *irb, AstNode *node, Scope *sc
}
static IrInstruction *ir_gen_node(IrBuilder *irb, AstNode *node, Scope *scope) {
return ir_gen_node_extra(irb, node, scope, LVAL_NONE);
return ir_gen_node_extra(irb, node, scope, LValNone);
}
static void invalidate_exec(IrExecutable *exec) {
@@ -7089,7 +7084,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
irb->exec->coro_final_cleanup_block = ir_create_basic_block(irb, scope, "FinalCleanup");
}
IrInstruction *result = ir_gen_node_extra(irb, node, scope, LVAL_NONE);
IrInstruction *result = ir_gen_node_extra(irb, node, scope, LValNone);
assert(result);
if (irb->exec->invalid)
return false;
@@ -9242,26 +9237,9 @@ static TypeTableEntry *ir_finish_anal(IrAnalyze *ira, TypeTableEntry *result_typ
}
static IrInstruction *ir_get_const(IrAnalyze *ira, IrInstruction *old_instruction) {
IrInstruction *new_instruction;
if (old_instruction->id == IrInstructionIdVarPtr) {
IrInstructionVarPtr *old_var_ptr_instruction = (IrInstructionVarPtr *)old_instruction;
IrInstructionVarPtr *var_ptr_instruction = ir_create_instruction<IrInstructionVarPtr>(&ira->new_irb,
old_instruction->scope, old_instruction->source_node);
var_ptr_instruction->var = old_var_ptr_instruction->var;
new_instruction = &var_ptr_instruction->base;
} else if (old_instruction->id == IrInstructionIdFieldPtr) {
IrInstructionFieldPtr *field_ptr_instruction = ir_create_instruction<IrInstructionFieldPtr>(&ira->new_irb,
old_instruction->scope, old_instruction->source_node);
new_instruction = &field_ptr_instruction->base;
} else if (old_instruction->id == IrInstructionIdElemPtr) {
IrInstructionElemPtr *elem_ptr_instruction = ir_create_instruction<IrInstructionElemPtr>(&ira->new_irb,
old_instruction->scope, old_instruction->source_node);
new_instruction = &elem_ptr_instruction->base;
} else {
IrInstructionConst *const_instruction = ir_create_instruction<IrInstructionConst>(&ira->new_irb,
old_instruction->scope, old_instruction->source_node);
new_instruction = &const_instruction->base;
}
IrInstructionConst *const_instruction = ir_create_instruction<IrInstructionConst>(&ira->new_irb,
old_instruction->scope, old_instruction->source_node);
IrInstruction *new_instruction = &const_instruction->base;
new_instruction->value.special = ConstValSpecialStatic;
return new_instruction;
}
@@ -9615,23 +9593,6 @@ static IrInstruction *ir_get_ref(IrAnalyze *ira, IrInstruction *source_instructi
if (type_is_invalid(value->value.type))
return ira->codegen->invalid_instruction;
if (value->id == IrInstructionIdLoadPtr) {
IrInstructionLoadPtr *load_ptr_inst = (IrInstructionLoadPtr *) value;
if (load_ptr_inst->ptr->value.type->data.pointer.is_const) {
return load_ptr_inst->ptr;
}
type_ensure_zero_bits_known(ira->codegen, value->value.type);
if (type_is_invalid(value->value.type)) {
return ira->codegen->invalid_instruction;
}
if (!type_has_bits(value->value.type)) {
return load_ptr_inst->ptr;
}
}
if (instr_is_comptime(value)) {
ConstExprValue *val = ir_resolve_const(ira, value, UndefOk);
if (!val)
@@ -11150,7 +11111,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
if (type_is_invalid(resolved_type))
return resolved_type;
bool operator_allowed;
switch (resolved_type->id) {
case TypeTableEntryIdInvalid:
zig_unreachable(); // handled above
@@ -11159,6 +11120,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdInt:
case TypeTableEntryIdFloat:
operator_allowed = true;
break;
case TypeTableEntryIdBool:
@@ -11173,19 +11135,8 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdArgTuple:
case TypeTableEntryIdPromise:
if (!is_equality_cmp) {
ir_add_error_node(ira, source_node,
buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name)));
return ira->codegen->builtin_types.entry_invalid;
}
break;
case TypeTableEntryIdEnum:
if (!is_equality_cmp) {
ir_add_error_node(ira, source_node,
buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name)));
return ira->codegen->builtin_types.entry_invalid;
}
operator_allowed = is_equality_cmp;
break;
case TypeTableEntryIdUnreachable:
@@ -11193,12 +11144,18 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
case TypeTableEntryIdStruct:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdUnion:
ir_add_error_node(ira, source_node,
buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name)));
return ira->codegen->builtin_types.entry_invalid;
operator_allowed = false;
break;
case TypeTableEntryIdOptional:
operator_allowed = is_equality_cmp && get_codegen_ptr_type(resolved_type) != nullptr;
break;
}
if (!operator_allowed) {
ir_add_error_node(ira, source_node,
buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name)));
return ira->codegen->builtin_types.entry_invalid;
}
IrInstruction *casted_op1 = ir_implicit_cast(ira, op1, resolved_type);
@@ -19752,7 +19709,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_ref(IrAnalyze *ira,
Tld *tld = instruction->tld;
LVal lval = instruction->lval;
resolve_top_level_decl(ira->codegen, tld, lval.is_ptr, instruction->base.source_node);
resolve_top_level_decl(ira->codegen, tld, lval == LValPtr, instruction->base.source_node);
if (tld->resolution == TldResolutionInvalid)
return ira->codegen->builtin_types.entry_invalid;
@@ -19773,7 +19730,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_ref(IrAnalyze *ira,
add_link_lib_symbol(ira, tld_var->extern_lib_name, &var->name, instruction->base.source_node);
}
if (lval.is_ptr) {
if (lval == LValPtr) {
ir_link_new_instruction(var_ptr, &instruction->base);
return var_ptr->value.type;
} else {
@@ -19794,7 +19751,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_ref(IrAnalyze *ira,
IrInstruction *ref_instruction = ir_create_const_fn(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, fn_entry);
if (lval.is_ptr) {
if (lval == LValPtr) {
IrInstruction *ptr_instr = ir_get_ref(ira, &instruction->base, ref_instruction, true, false);
ir_link_new_instruction(ptr_instr, &instruction->base);
return ptr_instr->value.type;

View File

@@ -1005,10 +1005,8 @@ static void ir_print_ptr_type(IrPrint *irp, IrInstructionPtrType *instruction) {
}
static void ir_print_decl_ref(IrPrint *irp, IrInstructionDeclRef *instruction) {
const char *ptr_str = instruction->lval.is_ptr ? "ptr " : "";
const char *const_str = instruction->lval.is_const ? "const " : "";
const char *volatile_str = instruction->lval.is_volatile ? "volatile " : "";
fprintf(irp->f, "declref %s%s%s%s", const_str, volatile_str, ptr_str, buf_ptr(instruction->tld->name));
const char *ptr_str = (instruction->lval == LValPtr) ? "ptr " : "";
fprintf(irp->f, "declref %s%s", ptr_str, buf_ptr(instruction->tld->name));
}
static void ir_print_panic(IrPrint *irp, IrInstructionPanic *instruction) {

View File

@@ -891,15 +891,19 @@ int main(int argc, char **argv) {
add_package(g, cur_pkg, g->root_package);
if (cmd == CmdBuild || cmd == CmdRun) {
codegen_set_emit_file_type(g, emit_file_type);
if (cmd == CmdBuild || cmd == CmdRun || cmd == CmdTest) {
for (size_t i = 0; i < objects.length; i += 1) {
codegen_add_object(g, buf_create_from_str(objects.at(i)));
}
for (size_t i = 0; i < asm_files.length; i += 1) {
codegen_add_assembly(g, buf_create_from_str(asm_files.at(i)));
}
}
if (cmd == CmdBuild || cmd == CmdRun) {
codegen_set_emit_file_type(g, emit_file_type);
codegen_build(g);
codegen_link(g, out_file);
if (timing_info)

View File

@@ -41,8 +41,8 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
return self.items[0..self.len];
}
pub fn at(self: Self, n: usize) T {
return self.toSliceConst()[n];
pub fn at(self: Self, i: usize) T {
return self.toSliceConst()[i];
}
/// Sets the value at index `i`, or returns `error.OutOfBounds` if
@@ -85,7 +85,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
try self.ensureCapacity(self.len + 1);
self.len += 1;
mem.copy(T, self.items[n + 1 .. self.len], self.items[n .. self.len - 1]);
mem.copyBackwards(T, self.items[n + 1 .. self.len], self.items[n .. self.len - 1]);
self.items[n] = item;
}
@@ -93,7 +93,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
try self.ensureCapacity(self.len + items.len);
self.len += items.len;
mem.copy(T, self.items[n + items.len .. self.len], self.items[n .. self.len - items.len]);
mem.copyBackwards(T, self.items[n + items.len .. self.len], self.items[n .. self.len - items.len]);
mem.copy(T, self.items[n .. n + items.len], items);
}
@@ -102,6 +102,26 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
new_item_ptr.* = item;
}
/// Removes the element at the specified index and returns it.
/// The empty slot is filled from the end of the list.
pub fn swapRemove(self: *Self, i: usize) T {
if (self.len - 1 == i) return self.pop();
const slice = self.toSlice();
const old_item = slice[i];
slice[i] = self.pop();
return old_item;
}
pub fn removeOrError(self: *Self, n: usize) !T {
if (n >= self.len) return error.OutOfBounds;
if (self.len - 1 == n) return self.pop();
var old_item = self.at(n);
try self.setOrError(n, self.pop());
return old_item;
}
pub fn appendSlice(self: *Self, items: []align(A) const T) !void {
try self.ensureCapacity(self.len + items.len);
mem.copy(T, self.items[self.len..], items);
@@ -232,6 +252,33 @@ test "basic ArrayList test" {
assert(list.pop() == 33);
}
test "std.ArrayList.swapRemove" {
var list = ArrayList(i32).init(debug.global_allocator);
defer list.deinit();
try list.append(1);
try list.append(2);
try list.append(3);
try list.append(4);
try list.append(5);
try list.append(6);
try list.append(7);
//remove from middle
assert(list.swapRemove(3) == 4);
assert(list.at(3) == 7);
assert(list.len == 6);
//remove from end
assert(list.swapRemove(5) == 6);
assert(list.len == 5);
//remove from front
assert(list.swapRemove(0) == 1);
assert(list.at(0) == 5);
assert(list.len == 4);
}
test "iterator ArrayList test" {
var list = ArrayList(i32).init(debug.global_allocator);
defer list.deinit();
@@ -266,19 +313,36 @@ test "insert ArrayList test" {
defer list.deinit();
try list.append(1);
try list.append(2);
try list.append(3);
try list.insert(0, 5);
assert(list.items[0] == 5);
assert(list.items[1] == 1);
assert(list.items[2] == 2);
assert(list.items[3] == 3);
}
test "insertSlice ArrayList test" {
var list = ArrayList(i32).init(debug.global_allocator);
defer list.deinit();
try list.append(1);
try list.append(2);
try list.append(3);
try list.append(4);
try list.insertSlice(1, []const i32{
9,
8,
});
assert(list.items[0] == 5);
assert(list.items[0] == 1);
assert(list.items[1] == 9);
assert(list.items[2] == 8);
assert(list.items[3] == 2);
assert(list.items[4] == 3);
assert(list.items[5] == 4);
const items = []const i32{1};
try list.insertSlice(0, items[0..0]);
assert(list.items[0] == 5);
assert(list.len == 6);
assert(list.items[0] == 1);
}

View File

@@ -1,9 +1,9 @@
pub const Stack = @import("stack.zig").Stack;
pub const QueueMpsc = @import("queue_mpsc.zig").QueueMpsc;
pub const QueueMpmc = @import("queue_mpmc.zig").QueueMpmc;
pub const Queue = @import("queue.zig").Queue;
pub const Int = @import("int.zig").Int;
test "std.atomic" {
_ = @import("stack.zig");
_ = @import("queue_mpsc.zig");
_ = @import("queue_mpmc.zig");
_ = @import("queue.zig");
_ = @import("int.zig");
}

29
std/atomic/int.zig Normal file
View File

@@ -0,0 +1,29 @@
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
/// Thread-safe, lock-free integer
pub fn Int(comptime T: type) type {
return struct {
unprotected_value: T,
pub const Self = this;
pub fn init(init_val: T) Self {
return Self{ .unprotected_value = init_val };
}
/// Returns previous value
pub fn incr(self: *Self) T {
return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
}
/// Returns previous value
pub fn decr(self: *Self) T {
return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
}
pub fn get(self: *Self) T {
return @atomicLoad(T, &self.unprotected_value, AtomicOrder.SeqCst);
}
};
}

View File

@@ -2,15 +2,13 @@ const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
const AtomicRmwOp = builtin.AtomicRmwOp;
/// Many producer, many consumer, non-allocating, thread-safe, lock-free
/// This implementation has a crippling limitation - it hangs onto node
/// memory for 1 extra get() and 1 extra put() operation - when get() returns a node, that
/// node must not be freed until both the next get() and the next put() completes.
pub fn QueueMpmc(comptime T: type) type {
/// Many producer, many consumer, non-allocating, thread-safe.
/// Uses a spinlock to protect get() and put().
pub fn Queue(comptime T: type) type {
return struct {
head: *Node,
tail: *Node,
root: Node,
head: ?*Node,
tail: ?*Node,
lock: u8,
pub const Self = this;
@@ -19,31 +17,48 @@ pub fn QueueMpmc(comptime T: type) type {
data: T,
};
/// TODO: well defined copy elision: https://github.com/ziglang/zig/issues/287
pub fn init(self: *Self) void {
self.root.next = null;
self.head = &self.root;
self.tail = &self.root;
pub fn init() Self {
return Self{
.head = null,
.tail = null,
.lock = 0,
};
}
pub fn put(self: *Self, node: *Node) void {
node.next = null;
const tail = @atomicRmw(*Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
_ = @atomicRmw(?*Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
}
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
/// node must not be freed until both the next get() and the next put() complete
pub fn get(self: *Self) ?*Node {
var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst);
while (true) {
const node = head.next orelse return null;
head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return node;
const opt_tail = self.tail;
self.tail = node;
if (opt_tail) |tail| {
tail.next = node;
} else {
assert(self.head == null);
self.head = node;
}
}
///// This is a debug function that is not thread-safe.
pub fn get(self: *Self) ?*Node {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
const head = self.head orelse return null;
self.head = head.next;
if (head.next == null) self.tail = null;
return head;
}
pub fn isEmpty(self: *Self) bool {
return @atomicLoad(?*Node, &self.head, builtin.AtomicOrder.SeqCst) != null;
}
pub fn dump(self: *Self) void {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
std.debug.warn("head: ");
dumpRecursive(self.head, 0);
std.debug.warn("tail: ");
@@ -64,12 +79,12 @@ pub fn QueueMpmc(comptime T: type) type {
};
}
const std = @import("std");
const std = @import("../index.zig");
const assert = std.debug.assert;
const Context = struct {
allocator: *std.mem.Allocator,
queue: *QueueMpmc(i32),
queue: *Queue(i32),
put_sum: isize,
get_sum: isize,
get_count: usize,
@@ -84,7 +99,7 @@ const Context = struct {
const puts_per_thread = 500;
const put_thread_count = 3;
test "std.atomic.queue_mpmc" {
test "std.atomic.Queue" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
@@ -94,8 +109,7 @@ test "std.atomic.queue_mpmc" {
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
var a = &fixed_buffer_allocator.allocator;
var queue: QueueMpmc(i32) = undefined;
queue.init();
var queue = Queue(i32).init();
var context = Context{
.allocator = a,
.queue = &queue,
@@ -140,7 +154,7 @@ fn startPuts(ctx: *Context) u8 {
while (put_count != 0) : (put_count -= 1) {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(QueueMpmc(i32).Node{
const node = ctx.allocator.create(Queue(i32).Node{
.next = undefined,
.data = x,
}) catch unreachable;
@@ -164,17 +178,16 @@ fn startGets(ctx: *Context) u8 {
}
}
test "std.atomic.queue_mpmc single-threaded" {
var queue: QueueMpmc(i32) = undefined;
queue.init();
test "std.atomic.Queue single-threaded" {
var queue = Queue(i32).init();
var node_0 = QueueMpmc(i32).Node{
var node_0 = Queue(i32).Node{
.data = 0,
.next = undefined,
};
queue.put(&node_0);
var node_1 = QueueMpmc(i32).Node{
var node_1 = Queue(i32).Node{
.data = 1,
.next = undefined,
};
@@ -182,13 +195,13 @@ test "std.atomic.queue_mpmc single-threaded" {
assert(queue.get().?.data == 0);
var node_2 = QueueMpmc(i32).Node{
var node_2 = Queue(i32).Node{
.data = 2,
.next = undefined,
};
queue.put(&node_2);
var node_3 = QueueMpmc(i32).Node{
var node_3 = Queue(i32).Node{
.data = 3,
.next = undefined,
};
@@ -198,15 +211,14 @@ test "std.atomic.queue_mpmc single-threaded" {
assert(queue.get().?.data == 2);
var node_4 = QueueMpmc(i32).Node{
var node_4 = Queue(i32).Node{
.data = 4,
.next = undefined,
};
queue.put(&node_4);
assert(queue.get().?.data == 3);
// if we were to set node_3.next to null here, it would cause this test
// to fail. this demonstrates the limitation of hanging on to extra memory.
node_3.next = null;
assert(queue.get().?.data == 4);

View File

@@ -1,185 +0,0 @@
const std = @import("../index.zig");
const assert = std.debug.assert;
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
const AtomicRmwOp = builtin.AtomicRmwOp;
/// Many producer, single consumer, non-allocating, thread-safe, lock-free
pub fn QueueMpsc(comptime T: type) type {
return struct {
inboxes: [2]std.atomic.Stack(T),
outbox: std.atomic.Stack(T),
inbox_index: usize,
pub const Self = this;
pub const Node = std.atomic.Stack(T).Node;
/// Not thread-safe. The call to init() must complete before any other functions are called.
/// No deinitialization required.
pub fn init() Self {
return Self{
.inboxes = []std.atomic.Stack(T){
std.atomic.Stack(T).init(),
std.atomic.Stack(T).init(),
},
.outbox = std.atomic.Stack(T).init(),
.inbox_index = 0,
};
}
/// Fully thread-safe. put() may be called from any thread at any time.
pub fn put(self: *Self, node: *Node) void {
const inbox_index = @atomicLoad(usize, &self.inbox_index, AtomicOrder.SeqCst);
const inbox = &self.inboxes[inbox_index];
inbox.push(node);
}
/// Must be called by only 1 consumer at a time. Every call to get() and isEmpty() must complete before
/// the next call to get().
pub fn get(self: *Self) ?*Node {
if (self.outbox.pop()) |node| {
return node;
}
const prev_inbox_index = @atomicRmw(usize, &self.inbox_index, AtomicRmwOp.Xor, 0x1, AtomicOrder.SeqCst);
const prev_inbox = &self.inboxes[prev_inbox_index];
while (prev_inbox.pop()) |node| {
self.outbox.push(node);
}
return self.outbox.pop();
}
/// Must be called by only 1 consumer at a time. Every call to get() and isEmpty() must complete before
/// the next call to isEmpty().
pub fn isEmpty(self: *Self) bool {
if (!self.outbox.isEmpty()) return false;
const prev_inbox_index = @atomicRmw(usize, &self.inbox_index, AtomicRmwOp.Xor, 0x1, AtomicOrder.SeqCst);
const prev_inbox = &self.inboxes[prev_inbox_index];
while (prev_inbox.pop()) |node| {
self.outbox.push(node);
}
return self.outbox.isEmpty();
}
/// For debugging only. No API guarantees about what this does.
pub fn dump(self: *Self) void {
{
var it = self.outbox.root;
while (it) |node| {
std.debug.warn("0x{x} -> ", @ptrToInt(node));
it = node.next;
}
}
const inbox_index = self.inbox_index;
const inboxes = []*std.atomic.Stack(T){
&self.inboxes[self.inbox_index],
&self.inboxes[1 - self.inbox_index],
};
for (inboxes) |inbox| {
var it = inbox.root;
while (it) |node| {
std.debug.warn("0x{x} -> ", @ptrToInt(node));
it = node.next;
}
}
std.debug.warn("null\n");
}
};
}
const Context = struct {
allocator: *std.mem.Allocator,
queue: *QueueMpsc(i32),
put_sum: isize,
get_sum: isize,
get_count: usize,
puts_done: u8, // TODO make this a bool
};
// TODO add lazy evaluated build options and then put puts_per_thread behind
// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor
// CI we would use a less aggressive setting since at 1 core, while we still
// want this test to pass, we need a smaller value since there is so much thrashing
// we would also use a less aggressive setting when running in valgrind
const puts_per_thread = 500;
const put_thread_count = 3;
test "std.atomic.queue_mpsc" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024);
defer direct_allocator.allocator.free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
var a = &fixed_buffer_allocator.allocator;
var queue = QueueMpsc(i32).init();
var context = Context{
.allocator = a,
.queue = &queue,
.put_sum = 0,
.get_sum = 0,
.puts_done = 0,
.get_count = 0,
};
var putters: [put_thread_count]*std.os.Thread = undefined;
for (putters) |*t| {
t.* = try std.os.spawnThread(&context, startPuts);
}
var getters: [1]*std.os.Thread = undefined;
for (getters) |*t| {
t.* = try std.os.spawnThread(&context, startGets);
}
for (putters) |t|
t.wait();
_ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
for (getters) |t|
t.wait();
if (context.put_sum != context.get_sum) {
std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum);
}
if (context.get_count != puts_per_thread * put_thread_count) {
std.debug.panic(
"failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}",
context.get_count,
u32(puts_per_thread),
u32(put_thread_count),
);
}
}
fn startPuts(ctx: *Context) u8 {
var put_count: usize = puts_per_thread;
var r = std.rand.DefaultPrng.init(0xdeadbeef);
while (put_count != 0) : (put_count -= 1) {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(QueueMpsc(i32).Node{
.next = undefined,
.data = x,
}) catch unreachable;
ctx.queue.put(node);
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
}
return 0;
}
fn startGets(ctx: *Context) u8 {
while (true) {
const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
while (ctx.queue.get()) |node| {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
_ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
}
if (last) return 0;
}
}

View File

@@ -1,10 +1,13 @@
const assert = std.debug.assert;
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
/// Many reader, many writer, non-allocating, thread-safe, lock-free
/// Many reader, many writer, non-allocating, thread-safe
/// Uses a spinlock to protect push() and pop()
pub fn Stack(comptime T: type) type {
return struct {
root: ?*Node,
lock: u8,
pub const Self = this;
@@ -14,7 +17,10 @@ pub fn Stack(comptime T: type) type {
};
pub fn init() Self {
return Self{ .root = null };
return Self{
.root = null,
.lock = 0,
};
}
/// push operation, but only if you are the first item in the stack. if you did not succeed in
@@ -25,18 +31,20 @@ pub fn Stack(comptime T: type) type {
}
pub fn push(self: *Self, node: *Node) void {
var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
node.next = root;
root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse break;
}
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
node.next = self.root;
self.root = node;
}
pub fn pop(self: *Self) ?*Node {
var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
root = @cmpxchgWeak(?*Node, &self.root, root, (root orelse return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return root;
}
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
const root = self.root orelse return null;
self.root = root.next;
return root;
}
pub fn isEmpty(self: *Self) bool {
@@ -45,7 +53,7 @@ pub fn Stack(comptime T: type) type {
};
}
const std = @import("std");
const std = @import("../index.zig");
const Context = struct {
allocator: *std.mem.Allocator,
stack: *Stack(i32),

View File

@@ -1596,6 +1596,8 @@ pub const TestStep = struct {
target: Target,
exec_cmd_args: ?[]const ?[]const u8,
include_dirs: ArrayList([]const u8),
lib_paths: ArrayList([]const u8),
object_files: ArrayList([]const u8),
pub fn init(builder: *Builder, root_src: []const u8) TestStep {
const step_name = builder.fmt("test {}", root_src);
@@ -1611,9 +1613,15 @@ pub const TestStep = struct {
.target = Target{ .Native = {} },
.exec_cmd_args = null,
.include_dirs = ArrayList([]const u8).init(builder.allocator),
.lib_paths = ArrayList([]const u8).init(builder.allocator),
.object_files = ArrayList([]const u8).init(builder.allocator),
};
}
pub fn addLibPath(self: *TestStep, path: []const u8) void {
self.lib_paths.append(path) catch unreachable;
}
pub fn setVerbose(self: *TestStep, value: bool) void {
self.verbose = value;
}
@@ -1638,6 +1646,10 @@ pub const TestStep = struct {
self.filter = text;
}
pub fn addObjectFile(self: *TestStep, path: []const u8) void {
self.object_files.append(path) catch unreachable;
}
pub fn setTarget(self: *TestStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
self.target = Target{
.Cross = CrossTarget{
@@ -1699,6 +1711,11 @@ pub const TestStep = struct {
try zig_args.append(self.name_prefix);
}
for (self.object_files.toSliceConst()) |object_file| {
try zig_args.append("--object");
try zig_args.append(builder.pathFromRoot(object_file));
}
{
var it = self.link_libs.iterator();
while (true) {
@@ -1734,6 +1751,11 @@ pub const TestStep = struct {
try zig_args.append(rpath);
}
for (self.lib_paths.toSliceConst()) |lib_path| {
try zig_args.append("--library-path");
try zig_args.append(lib_path);
}
for (builder.lib_paths.toSliceConst()) |lib_path| {
try zig_args.append("--library-path");
try zig_args.append(lib_path);

View File

@@ -44,7 +44,7 @@ pub const timezone = extern struct {
tz_dsttime: i32,
};
pub const mach_timebase_info_data = struct {
pub const mach_timebase_info_data = extern struct {
numer: u32,
denom: u32,
};

View File

@@ -3,6 +3,8 @@ pub const Loop = @import("event/loop.zig").Loop;
pub const Lock = @import("event/lock.zig").Lock;
pub const tcp = @import("event/tcp.zig");
pub const Channel = @import("event/channel.zig").Channel;
pub const Group = @import("event/group.zig").Group;
pub const Future = @import("event/future.zig").Future;
test "import event tests" {
_ = @import("event/locked.zig");
@@ -10,4 +12,6 @@ test "import event tests" {
_ = @import("event/lock.zig");
_ = @import("event/tcp.zig");
_ = @import("event/channel.zig");
_ = @import("event/group.zig");
_ = @import("event/future.zig");
}

View File

@@ -12,8 +12,8 @@ pub fn Channel(comptime T: type) type {
return struct {
loop: *Loop,
getters: std.atomic.QueueMpsc(GetNode),
putters: std.atomic.QueueMpsc(PutNode),
getters: std.atomic.Queue(GetNode),
putters: std.atomic.Queue(PutNode),
get_count: usize,
put_count: usize,
dispatch_lock: u8, // TODO make this a bool
@@ -46,8 +46,8 @@ pub fn Channel(comptime T: type) type {
.buffer_index = 0,
.dispatch_lock = 0,
.need_dispatch = 0,
.getters = std.atomic.QueueMpsc(GetNode).init(),
.putters = std.atomic.QueueMpsc(PutNode).init(),
.getters = std.atomic.Queue(GetNode).init(),
.putters = std.atomic.Queue(PutNode).init(),
.get_count = 0,
.put_count = 0,
});
@@ -81,7 +81,7 @@ pub fn Channel(comptime T: type) type {
.next = undefined,
.data = handle,
};
var queue_node = std.atomic.QueueMpsc(PutNode).Node{
var queue_node = std.atomic.Queue(PutNode).Node{
.data = PutNode{
.tick_node = &my_tick_node,
.data = data,
@@ -111,7 +111,7 @@ pub fn Channel(comptime T: type) type {
.next = undefined,
.data = handle,
};
var queue_node = std.atomic.QueueMpsc(GetNode).Node{
var queue_node = std.atomic.Queue(GetNode).Node{
.data = GetNode{
.ptr = &result,
.tick_node = &my_tick_node,

97
std/event/future.zig Normal file
View File

@@ -0,0 +1,97 @@
const std = @import("../index.zig");
const assert = std.debug.assert;
const builtin = @import("builtin");
const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
const Lock = std.event.Lock;
const Loop = std.event.Loop;
/// This is a value that starts out unavailable, until a value is put().
/// While it is unavailable, coroutines suspend when they try to get() it,
/// and then are resumed when the value is put().
/// At this point the value remains forever available, and another put() is not allowed.
pub fn Future(comptime T: type) type {
return struct {
lock: Lock,
data: T,
available: u8, // TODO make this a bool
const Self = this;
const Queue = std.atomic.Queue(promise);
pub fn init(loop: *Loop) Self {
return Self{
.lock = Lock.initLocked(loop),
.available = 0,
.data = undefined,
};
}
/// Obtain the value. If it's not available, wait until it becomes
/// available.
/// Thread-safe.
pub async fn get(self: *Self) *T {
if (@atomicLoad(u8, &self.available, AtomicOrder.SeqCst) == 1) {
return &self.data;
}
const held = await (async self.lock.acquire() catch unreachable);
held.release();
return &self.data;
}
/// Make the data become available. May be called only once.
/// Before calling this, modify the `data` property.
pub fn resolve(self: *Self) void {
const prev = @atomicRmw(u8, &self.available, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
assert(prev == 0); // put() called twice
Lock.Held.release(Lock.Held{ .lock = &self.lock });
}
};
}
test "std.event.Future" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
const allocator = &da.allocator;
var loop: Loop = undefined;
try loop.initMultiThreaded(allocator);
defer loop.deinit();
const handle = try async<allocator> testFuture(&loop);
defer cancel handle;
loop.run();
}
async fn testFuture(loop: *Loop) void {
suspend |p| {
resume p;
}
var future = Future(i32).init(loop);
const a = async waitOnFuture(&future) catch @panic("memory");
const b = async waitOnFuture(&future) catch @panic("memory");
const c = async resolveFuture(&future) catch @panic("memory");
const result = (await a) + (await b);
cancel c;
assert(result == 12);
}
async fn waitOnFuture(future: *Future(i32)) i32 {
suspend |p| {
resume p;
}
return (await (async future.get() catch @panic("memory"))).*;
}
async fn resolveFuture(future: *Future(i32)) void {
suspend |p| {
resume p;
}
future.data = 6;
future.resolve();
}

158
std/event/group.zig Normal file
View File

@@ -0,0 +1,158 @@
const std = @import("../index.zig");
const builtin = @import("builtin");
const Lock = std.event.Lock;
const Loop = std.event.Loop;
const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
const assert = std.debug.assert;
/// ReturnType should be `void` or `E!void`
pub fn Group(comptime ReturnType: type) type {
return struct {
coro_stack: Stack,
alloc_stack: Stack,
lock: Lock,
const Self = this;
const Error = switch (@typeInfo(ReturnType)) {
builtin.TypeId.ErrorUnion => |payload| payload.error_set,
else => void,
};
const Stack = std.atomic.Stack(promise->ReturnType);
pub fn init(loop: *Loop) Self {
return Self{
.coro_stack = Stack.init(),
.alloc_stack = Stack.init(),
.lock = Lock.init(loop),
};
}
/// Add a promise to the group. Thread-safe.
pub fn add(self: *Self, handle: promise->ReturnType) (error{OutOfMemory}!void) {
const node = try self.lock.loop.allocator.create(Stack.Node{
.next = undefined,
.data = handle,
});
self.alloc_stack.push(node);
}
/// This is equivalent to an async call, but the async function is added to the group, instead
/// of returning a promise. func must be async and have return type void.
/// Thread-safe.
pub fn call(self: *Self, comptime func: var, args: ...) (error{OutOfMemory}!void) {
const S = struct {
async fn asyncFunc(node: **Stack.Node, args2: ...) ReturnType {
// TODO this is a hack to make the memory following be inside the coro frame
suspend |p| {
var my_node: Stack.Node = undefined;
node.* = &my_node;
resume p;
}
// TODO this allocation elision should be guaranteed because we await it in
// this coro frame
return await (async func(args2) catch unreachable);
}
};
var node: *Stack.Node = undefined;
const handle = try async<self.lock.loop.allocator> S.asyncFunc(&node, args);
node.* = Stack.Node{
.next = undefined,
.data = handle,
};
self.coro_stack.push(node);
}
/// Wait for all the calls and promises of the group to complete.
/// Thread-safe.
pub async fn wait(self: *Self) ReturnType {
// TODO catch unreachable because the allocation can be grouped with
// the coro frame allocation
const held = await (async self.lock.acquire() catch unreachable);
defer held.release();
while (self.coro_stack.pop()) |node| {
if (Error == void) {
await node.data;
} else {
(await node.data) catch |err| {
self.cancelAll();
return err;
};
}
}
while (self.alloc_stack.pop()) |node| {
const handle = node.data;
self.lock.loop.allocator.destroy(node);
if (Error == void) {
await handle;
} else {
(await handle) catch |err| {
self.cancelAll();
return err;
};
}
}
}
/// Cancel all the outstanding promises. May only be called if wait was never called.
pub fn cancelAll(self: *Self) void {
while (self.coro_stack.pop()) |node| {
cancel node.data;
}
while (self.alloc_stack.pop()) |node| {
cancel node.data;
self.lock.loop.allocator.destroy(node);
}
}
};
}
test "std.event.Group" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
const allocator = &da.allocator;
var loop: Loop = undefined;
try loop.initMultiThreaded(allocator);
defer loop.deinit();
const handle = try async<allocator> testGroup(&loop);
defer cancel handle;
loop.run();
}
async fn testGroup(loop: *Loop) void {
var count: usize = 0;
var group = Group(void).init(loop);
group.add(async sleepALittle(&count) catch @panic("memory")) catch @panic("memory");
group.call(increaseByTen, &count) catch @panic("memory");
await (async group.wait() catch @panic("memory"));
assert(count == 11);
var another = Group(error!void).init(loop);
another.add(async somethingElse() catch @panic("memory")) catch @panic("memory");
another.call(doSomethingThatFails) catch @panic("memory");
std.debug.assertError(await (async another.wait() catch @panic("memory")), error.ItBroke);
}
async fn sleepALittle(count: *usize) void {
std.os.time.sleep(0, 1000000);
_ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
}
async fn increaseByTen(count: *usize) void {
var i: usize = 0;
while (i < 10) : (i += 1) {
_ = @atomicRmw(usize, count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
}
}
async fn doSomethingThatFails() error!void {}
async fn somethingElse() error!void {
return error.ItBroke;
}

View File

@@ -15,7 +15,7 @@ pub const Lock = struct {
queue: Queue,
queue_empty_bit: u8, // TODO make this a bool
const Queue = std.atomic.QueueMpsc(promise);
const Queue = std.atomic.Queue(promise);
pub const Held = struct {
lock: *Lock,
@@ -73,6 +73,15 @@ pub const Lock = struct {
};
}
pub fn initLocked(loop: *Loop) Lock {
return Lock{
.loop = loop,
.shared_bit = 1,
.queue = Queue.init(),
.queue_empty_bit = 1,
};
}
/// Must be called when not locked. Not thread safe.
/// All calls to acquire() and release() must complete before calling deinit().
pub fn deinit(self: *Lock) void {
@@ -81,7 +90,7 @@ pub const Lock = struct {
}
pub async fn acquire(self: *Lock) Held {
s: suspend |handle| {
suspend |handle| {
// TODO explicitly put this memory in the coroutine frame #1194
var my_tick_node = Loop.NextTickNode{
.data = handle,

View File

@@ -9,7 +9,7 @@ const AtomicOrder = builtin.AtomicOrder;
pub const Loop = struct {
allocator: *mem.Allocator,
next_tick_queue: std.atomic.QueueMpsc(promise),
next_tick_queue: std.atomic.Queue(promise),
os_data: OsData,
final_resume_node: ResumeNode,
dispatch_lock: u8, // TODO make this a bool
@@ -21,7 +21,7 @@ pub const Loop = struct {
available_eventfd_resume_nodes: std.atomic.Stack(ResumeNode.EventFd),
eventfd_resume_nodes: []std.atomic.Stack(ResumeNode.EventFd).Node,
pub const NextTickNode = std.atomic.QueueMpsc(promise).Node;
pub const NextTickNode = std.atomic.Queue(promise).Node;
pub const ResumeNode = struct {
id: Id,
@@ -77,7 +77,7 @@ pub const Loop = struct {
.pending_event_count = 0,
.allocator = allocator,
.os_data = undefined,
.next_tick_queue = std.atomic.QueueMpsc(promise).init(),
.next_tick_queue = std.atomic.Queue(promise).init(),
.dispatch_lock = 1, // start locked so threads go directly into epoll wait
.extra_threads = undefined,
.available_eventfd_resume_nodes = std.atomic.Stack(ResumeNode.EventFd).init(),
@@ -101,7 +101,6 @@ pub const Loop = struct {
errdefer self.deinitOsData();
}
/// must call stop before deinit
pub fn deinit(self: *Loop) void {
self.deinitOsData();
self.allocator.free(self.extra_threads);
@@ -382,6 +381,21 @@ pub const Loop = struct {
return async<self.allocator> S.asyncFunc(self, &handle, args);
}
/// Awaiting a yield lets the event loop run, starting any unstarted async operations.
/// Note that async operations automatically start when a function yields for any other reason,
/// for example, when async I/O is performed. This function is intended to be used only when
/// CPU bound tasks would be waiting in the event loop but never get started because no async I/O
/// is performed.
pub async fn yield(self: *Loop) void {
suspend |p| {
var my_tick_node = Loop.NextTickNode{
.next = undefined,
.data = p,
};
loop.onNextTick(&my_tick_node);
}
}
fn workerRun(self: *Loop) void {
start_over: while (true) {
if (@atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) == 0) {

View File

@@ -302,8 +302,17 @@ pub const FixedBufferAllocator = struct {
}
fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
assert(old_mem.len <= self.end_index);
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len) {
const start_index = self.end_index - old_mem.len;
const new_end_index = start_index + new_size;
if (new_end_index > self.buffer.len) return error.OutOfMemory;
const result = self.buffer[start_index..new_end_index];
self.end_index = new_end_index;
return result;
} else {
const result = try alloc(allocator, new_size, alignment);
mem.copy(u8, result, old_mem);
@@ -442,6 +451,7 @@ test "DirectAllocator" {
const allocator = &direct_allocator.allocator;
try testAllocator(allocator);
try testAllocatorAligned(allocator, 16);
try testAllocatorLargeAlignment(allocator);
}
@@ -453,6 +463,7 @@ test "ArenaAllocator" {
defer arena_allocator.deinit();
try testAllocator(&arena_allocator.allocator);
try testAllocatorAligned(&arena_allocator.allocator, 16);
try testAllocatorLargeAlignment(&arena_allocator.allocator);
}
@@ -461,35 +472,98 @@ test "FixedBufferAllocator" {
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
try testAllocator(&fixed_buffer_allocator.allocator);
try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
}
test "FixedBufferAllocator Reuse memory on realloc" {
var small_fixed_buffer: [10]u8 = undefined;
// check if we re-use the memory
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 5);
assert(slice0.len == 5);
var slice1 = try fixed_buffer_allocator.allocator.realloc(u8, slice0, 10);
assert(slice1.ptr == slice0.ptr);
assert(slice1.len == 10);
debug.assertError(fixed_buffer_allocator.allocator.realloc(u8, slice1, 11), error.OutOfMemory);
}
// check that we don't re-use the memory if it's not the most recent block
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
slice0[0] = 1;
slice0[1] = 2;
var slice1 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
var slice2 = try fixed_buffer_allocator.allocator.realloc(u8, slice0, 4);
assert(slice0.ptr != slice2.ptr);
assert(slice1.ptr != slice2.ptr);
assert(slice2[0] == 1);
assert(slice2[1] == 2);
}
}
test "ThreadSafeFixedBufferAllocator" {
var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
try testAllocator(&fixed_buffer_allocator.allocator);
try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
}
fn testAllocator(allocator: *mem.Allocator) !void {
var slice = try allocator.alloc(*i32, 100);
assert(slice.len == 100);
for (slice) |*item, i| {
item.* = try allocator.create(@intCast(i32, i));
}
for (slice) |item, i| {
slice = try allocator.realloc(*i32, slice, 20000);
assert(slice.len == 20000);
for (slice[0..100]) |item, i| {
assert(item.* == @intCast(i32, i));
allocator.destroy(item);
}
slice = try allocator.realloc(*i32, slice, 20000);
slice = try allocator.realloc(*i32, slice, 50);
assert(slice.len == 50);
slice = try allocator.realloc(*i32, slice, 25);
assert(slice.len == 25);
slice = try allocator.realloc(*i32, slice, 0);
assert(slice.len == 0);
slice = try allocator.realloc(*i32, slice, 10);
assert(slice.len == 10);
allocator.free(slice);
}
fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !void {
// initial
var slice = try allocator.alignedAlloc(u8, alignment, 10);
assert(slice.len == 10);
// grow
slice = try allocator.alignedRealloc(u8, alignment, slice, 100);
assert(slice.len == 100);
// shrink
slice = try allocator.alignedRealloc(u8, alignment, slice, 10);
assert(slice.len == 10);
// go to zero
slice = try allocator.alignedRealloc(u8, alignment, slice, 0);
assert(slice.len == 0);
// realloc from zero
slice = try allocator.alignedRealloc(u8, alignment, slice, 100);
assert(slice.len == 100);
// shrink with shrink
slice = allocator.alignedShrink(u8, alignment, slice, 10);
assert(slice.len == 10);
// shrink to zero
slice = allocator.alignedShrink(u8, alignment, slice, 0);
assert(slice.len == 0);
}
fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void {
//Maybe a platform's page_size is actually the same as or
// very near usize?

View File

@@ -42,7 +42,7 @@ pub const Symbol = struct {
name: []const u8,
address: u64,
fn addressLessThan(lhs: *const Symbol, rhs: *const Symbol) bool {
fn addressLessThan(lhs: Symbol, rhs: Symbol) bool {
return lhs.address < rhs.address;
}
};

View File

@@ -23,7 +23,10 @@ pub const Allocator = struct {
/// * this function must return successfully.
/// * alignment <= alignment of old_mem.ptr
///
/// The returned newly allocated memory is undefined.
/// When `reallocFn` returns,
/// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same
/// as `old_mem` was when `reallocFn` is called. The bytes of
/// `return_value[old_mem.len..]` have undefined values.
/// `alignment` is guaranteed to be >= 1
/// `alignment` is guaranteed to be a power of 2
reallocFn: fn (self: *Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
@@ -71,7 +74,7 @@ pub const Allocator = struct {
pub fn alignedRealloc(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T {
if (old_mem.len == 0) {
return self.alloc(T, n);
return self.alignedAlloc(T, alignment, n);
}
if (n == 0) {
self.free(old_mem);
@@ -125,6 +128,7 @@ pub const Allocator = struct {
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.
/// dest.ptr must be <= src.ptr.
pub fn copy(comptime T: type, dest: []T, source: []const T) void {
// TODO instead of manually doing this check for the whole array
// and turning off runtime safety, the compiler should detect loops like
@@ -135,6 +139,23 @@ pub fn copy(comptime T: type, dest: []T, source: []const T) void {
dest[i] = s;
}
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.
/// dest.ptr must be >= src.ptr.
pub fn copyBackwards(comptime T: type, dest: []T, source: []const T) void {
// TODO instead of manually doing this check for the whole array
// and turning off runtime safety, the compiler should detect loops like
// this and automatically omit safety checks for loops
@setRuntimeSafety(false);
assert(dest.len >= source.len);
var i = source.len;
while(i > 0){
i -= 1;
dest[i] = source[i];
}
}
pub fn set(comptime T: type, dest: []T, value: T) void {
for (dest) |*d|
d.* = value;

View File

@@ -59,7 +59,6 @@ pub extern "kernel32" stdcallcc fn CreateSymbolicLinkA(
dwFlags: DWORD,
) BOOLEAN;
pub extern "kernel32" stdcallcc fn CreateIoCompletionPort(FileHandle: HANDLE, ExistingCompletionPort: ?HANDLE, CompletionKey: ULONG_PTR, NumberOfConcurrentThreads: DWORD) ?HANDLE;
pub extern "kernel32" stdcallcc fn CreateThread(lpThreadAttributes: ?LPSECURITY_ATTRIBUTES, dwStackSize: SIZE_T, lpStartAddress: LPTHREAD_START_ROUTINE, lpParameter: ?LPVOID, dwCreationFlags: DWORD, lpThreadId: ?LPDWORD) ?HANDLE;
@@ -134,7 +133,6 @@ pub extern "kernel32" stdcallcc fn MoveFileExA(
dwFlags: DWORD,
) BOOL;
pub extern "kernel32" stdcallcc fn PostQueuedCompletionStatus(CompletionPort: HANDLE, dwNumberOfBytesTransferred: DWORD, dwCompletionKey: ULONG_PTR, lpOverlapped: ?*OVERLAPPED) BOOL;
pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: *LARGE_INTEGER) BOOL;

View File

@@ -215,10 +215,7 @@ pub fn windowsFindNextFile(handle: windows.HANDLE, find_file_data: *windows.WIN3
return true;
}
pub const WindowsCreateIoCompletionPortError = error {
Unexpected,
};
pub const WindowsCreateIoCompletionPortError = error{Unexpected};
pub fn windowsCreateIoCompletionPort(file_handle: windows.HANDLE, existing_completion_port: ?windows.HANDLE, completion_key: usize, concurrent_thread_count: windows.DWORD) !windows.HANDLE {
const handle = windows.CreateIoCompletionPort(file_handle, existing_completion_port, completion_key, concurrent_thread_count) orelse {
@@ -230,9 +227,7 @@ pub fn windowsCreateIoCompletionPort(file_handle: windows.HANDLE, existing_compl
return handle;
}
pub const WindowsPostQueuedCompletionStatusError = error {
Unexpected,
};
pub const WindowsPostQueuedCompletionStatusError = error{Unexpected};
pub fn windowsPostQueuedCompletionStatus(completion_port: windows.HANDLE, bytes_transferred_count: windows.DWORD, completion_key: usize, lpOverlapped: ?*windows.OVERLAPPED) WindowsPostQueuedCompletionStatusError!void {
if (windows.PostQueuedCompletionStatus(completion_port, bytes_transferred_count, completion_key, lpOverlapped) == 0) {
@@ -243,7 +238,7 @@ pub fn windowsPostQueuedCompletionStatus(completion_port: windows.HANDLE, bytes_
}
}
pub const WindowsWaitResult = error {
pub const WindowsWaitResult = error{
Normal,
Aborted,
};

View File

@@ -5,7 +5,7 @@ const math = std.math;
const builtin = @import("builtin");
/// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. O(1) memory (no allocator required).
pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) void {
pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool) void {
{
var i: usize = 1;
while (i < items.len) : (i += 1) {
@@ -30,7 +30,7 @@ const Range = struct {
};
}
fn length(self: *const Range) usize {
fn length(self: Range) usize {
return self.end - self.start;
}
};
@@ -108,7 +108,7 @@ const Pull = struct {
/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. O(1) memory (no allocator required).
/// Currently implemented as block sort.
pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) void {
pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool) void {
// Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c
var cache: [512]T = undefined;
@@ -131,16 +131,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *con
// http://pages.ripco.net/~jgamble/nw.html
var iterator = Iterator.init(items.len, 4);
while (!iterator.finished()) {
var order = []u8{
0,
1,
2,
3,
4,
5,
6,
7,
};
var order = []u8{ 0, 1, 2, 3, 4, 5, 6, 7 };
const range = iterator.nextRange();
const sliced_items = items[range.start..];
@@ -741,7 +732,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *con
}
// merge operation without a buffer
fn mergeInPlace(comptime T: type, items: []T, A_arg: *const Range, B_arg: *const Range, lessThan: fn (*const T, *const T) bool) void {
fn mergeInPlace(comptime T: type, items: []T, A_arg: Range, B_arg: Range, lessThan: fn (T, T) bool) void {
if (A_arg.length() == 0 or B_arg.length() == 0) return;
// this just repeatedly binary searches into B and rotates A into position.
@@ -762,8 +753,8 @@ fn mergeInPlace(comptime T: type, items: []T, A_arg: *const Range, B_arg: *const
// again, this is NOT a general-purpose solution it only works well in this case!
// kind of like how the O(n^2) insertion sort is used in some places
var A = A_arg.*;
var B = B_arg.*;
var A = A_arg;
var B = B_arg;
while (true) {
// find the first place in B where the first item in A needs to be inserted
@@ -783,7 +774,7 @@ fn mergeInPlace(comptime T: type, items: []T, A_arg: *const Range, B_arg: *const
}
// merge operation using an internal buffer
fn mergeInternal(comptime T: type, items: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, buffer: *const Range) void {
fn mergeInternal(comptime T: type, items: []T, A: Range, B: Range, lessThan: fn (T, T) bool, buffer: Range) void {
// whenever we find a value to add to the final array, swap it with the value that's already in that spot
// when this algorithm is finished, 'buffer' will contain its original contents, but in a different order
var A_count: usize = 0;
@@ -819,7 +810,7 @@ fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_s
// combine a linear search with a binary search to reduce the number of comparisons in situations
// where have some idea as to how many unique values there are and where the next value might be
fn findFirstForward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
fn findFirstForward(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@@ -833,7 +824,7 @@ fn findFirstForward(comptime T: type, items: []T, value: *const T, range: *const
return binaryFirst(T, items, value, Range.init(index - skip, index), lessThan);
}
fn findFirstBackward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
fn findFirstBackward(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@@ -847,7 +838,7 @@ fn findFirstBackward(comptime T: type, items: []T, value: *const T, range: *cons
return binaryFirst(T, items, value, Range.init(index, index + skip), lessThan);
}
fn findLastForward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
fn findLastForward(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@@ -861,7 +852,7 @@ fn findLastForward(comptime T: type, items: []T, value: *const T, range: *const
return binaryLast(T, items, value, Range.init(index - skip, index), lessThan);
}
fn findLastBackward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
fn findLastBackward(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@@ -875,7 +866,7 @@ fn findLastBackward(comptime T: type, items: []T, value: *const T, range: *const
return binaryLast(T, items, value, Range.init(index, index + skip), lessThan);
}
fn binaryFirst(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool) usize {
fn binaryFirst(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
@@ -893,7 +884,7 @@ fn binaryFirst(comptime T: type, items: []T, value: *const T, range: *const Rang
return start;
}
fn binaryLast(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool) usize {
fn binaryLast(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
@@ -911,7 +902,7 @@ fn binaryLast(comptime T: type, items: []T, value: *const T, range: *const Range
return start;
}
fn mergeInto(comptime T: type, from: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, into: []T) void {
fn mergeInto(comptime T: type, from: []T, A: Range, B: Range, lessThan: fn (T, T) bool, into: []T) void {
var A_index: usize = A.start;
var B_index: usize = B.start;
const A_last = A.end;
@@ -941,7 +932,7 @@ fn mergeInto(comptime T: type, from: []T, A: *const Range, B: *const Range, less
}
}
fn mergeExternal(comptime T: type, items: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, cache: []T) void {
fn mergeExternal(comptime T: type, items: []T, A: Range, B: Range, lessThan: fn (T, T) bool, cache: []T) void {
// A fits into the cache, so use that instead of the internal buffer
var A_index: usize = 0;
var B_index: usize = B.start;
@@ -969,27 +960,32 @@ fn mergeExternal(comptime T: type, items: []T, A: *const Range, B: *const Range,
mem.copy(T, items[insert_index..], cache[A_index..A_last]);
}
fn swap(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool, order: *[8]u8, x: usize, y: usize) void {
fn swap(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool, order: *[8]u8, x: usize, y: usize) void {
if (lessThan(items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(items[x], items[y]))) {
mem.swap(T, &items[x], &items[y]);
mem.swap(u8, &(order.*)[x], &(order.*)[y]);
}
}
fn i32asc(lhs: *const i32, rhs: *const i32) bool {
return lhs.* < rhs.*;
// Use these to generate a comparator function for a given type. e.g. `sort(u8, slice, asc(u8))`.
pub fn asc(comptime T: type) fn (T, T) bool {
const impl = struct {
fn inner(a: T, b: T) bool {
return a < b;
}
};
return impl.inner;
}
fn i32desc(lhs: *const i32, rhs: *const i32) bool {
return rhs.* < lhs.*;
}
pub fn desc(comptime T: type) fn (T, T) bool {
const impl = struct {
fn inner(a: T, b: T) bool {
return a > b;
}
};
fn u8asc(lhs: *const u8, rhs: *const u8) bool {
return lhs.* < rhs.*;
}
fn u8desc(lhs: *const u8, rhs: *const u8) bool {
return rhs.* < lhs.*;
return impl.inner;
}
test "stable sort" {
@@ -998,119 +994,38 @@ test "stable sort" {
}
fn testStableSort() void {
var expected = []IdAndValue{
IdAndValue{
.id = 0,
.value = 0,
},
IdAndValue{
.id = 1,
.value = 0,
},
IdAndValue{
.id = 2,
.value = 0,
},
IdAndValue{
.id = 0,
.value = 1,
},
IdAndValue{
.id = 1,
.value = 1,
},
IdAndValue{
.id = 2,
.value = 1,
},
IdAndValue{
.id = 0,
.value = 2,
},
IdAndValue{
.id = 1,
.value = 2,
},
IdAndValue{
.id = 2,
.value = 2,
},
IdAndValue{ .id = 0, .value = 0 },
IdAndValue{ .id = 1, .value = 0 },
IdAndValue{ .id = 2, .value = 0 },
IdAndValue{ .id = 0, .value = 1 },
IdAndValue{ .id = 1, .value = 1 },
IdAndValue{ .id = 2, .value = 1 },
IdAndValue{ .id = 0, .value = 2 },
IdAndValue{ .id = 1, .value = 2 },
IdAndValue{ .id = 2, .value = 2 },
};
var cases = [][9]IdAndValue{
[]IdAndValue{
IdAndValue{
.id = 0,
.value = 0,
},
IdAndValue{
.id = 0,
.value = 1,
},
IdAndValue{
.id = 0,
.value = 2,
},
IdAndValue{
.id = 1,
.value = 0,
},
IdAndValue{
.id = 1,
.value = 1,
},
IdAndValue{
.id = 1,
.value = 2,
},
IdAndValue{
.id = 2,
.value = 0,
},
IdAndValue{
.id = 2,
.value = 1,
},
IdAndValue{
.id = 2,
.value = 2,
},
IdAndValue{ .id = 0, .value = 0 },
IdAndValue{ .id = 0, .value = 1 },
IdAndValue{ .id = 0, .value = 2 },
IdAndValue{ .id = 1, .value = 0 },
IdAndValue{ .id = 1, .value = 1 },
IdAndValue{ .id = 1, .value = 2 },
IdAndValue{ .id = 2, .value = 0 },
IdAndValue{ .id = 2, .value = 1 },
IdAndValue{ .id = 2, .value = 2 },
},
[]IdAndValue{
IdAndValue{
.id = 0,
.value = 2,
},
IdAndValue{
.id = 0,
.value = 1,
},
IdAndValue{
.id = 0,
.value = 0,
},
IdAndValue{
.id = 1,
.value = 2,
},
IdAndValue{
.id = 1,
.value = 1,
},
IdAndValue{
.id = 1,
.value = 0,
},
IdAndValue{
.id = 2,
.value = 2,
},
IdAndValue{
.id = 2,
.value = 1,
},
IdAndValue{
.id = 2,
.value = 0,
},
IdAndValue{ .id = 0, .value = 2 },
IdAndValue{ .id = 0, .value = 1 },
IdAndValue{ .id = 0, .value = 0 },
IdAndValue{ .id = 1, .value = 2 },
IdAndValue{ .id = 1, .value = 1 },
IdAndValue{ .id = 1, .value = 0 },
IdAndValue{ .id = 2, .value = 2 },
IdAndValue{ .id = 2, .value = 1 },
IdAndValue{ .id = 2, .value = 0 },
},
};
for (cases) |*case| {
@@ -1125,8 +1040,8 @@ const IdAndValue = struct {
id: usize,
value: i32,
};
fn cmpByValue(a: *const IdAndValue, b: *const IdAndValue) bool {
return i32asc(a.value, b.value);
fn cmpByValue(a: IdAndValue, b: IdAndValue) bool {
return asc(i32)(a.value, b.value);
}
test "std.sort" {
@@ -1161,7 +1076,7 @@ test "std.sort" {
var buf: [8]u8 = undefined;
const slice = buf[0..case[0].len];
mem.copy(u8, slice, case[0]);
sort(u8, slice, u8asc);
sort(u8, slice, asc(u8));
assert(mem.eql(u8, slice, case[1]));
}
@@ -1175,48 +1090,20 @@ test "std.sort" {
[]i32{1},
},
[][]const i32{
[]i32{
0,
1,
},
[]i32{
0,
1,
},
[]i32{ 0, 1 },
[]i32{ 0, 1 },
},
[][]const i32{
[]i32{
1,
0,
},
[]i32{
0,
1,
},
[]i32{ 1, 0 },
[]i32{ 0, 1 },
},
[][]const i32{
[]i32{
1,
-1,
0,
},
[]i32{
-1,
0,
1,
},
[]i32{ 1, -1, 0 },
[]i32{ -1, 0, 1 },
},
[][]const i32{
[]i32{
2,
1,
3,
},
[]i32{
1,
2,
3,
},
[]i32{ 2, 1, 3 },
[]i32{ 1, 2, 3 },
},
};
@@ -1224,7 +1111,7 @@ test "std.sort" {
var buf: [8]i32 = undefined;
const slice = buf[0..case[0].len];
mem.copy(i32, slice, case[0]);
sort(i32, slice, i32asc);
sort(i32, slice, asc(i32));
assert(mem.eql(i32, slice, case[1]));
}
}
@@ -1240,48 +1127,20 @@ test "std.sort descending" {
[]i32{1},
},
[][]const i32{
[]i32{
0,
1,
},
[]i32{
1,
0,
},
[]i32{ 0, 1 },
[]i32{ 1, 0 },
},
[][]const i32{
[]i32{
1,
0,
},
[]i32{
1,
0,
},
[]i32{ 1, 0 },
[]i32{ 1, 0 },
},
[][]const i32{
[]i32{
1,
-1,
0,
},
[]i32{
1,
0,
-1,
},
[]i32{ 1, -1, 0 },
[]i32{ 1, 0, -1 },
},
[][]const i32{
[]i32{
2,
1,
3,
},
[]i32{
3,
2,
1,
},
[]i32{ 2, 1, 3 },
[]i32{ 3, 2, 1 },
},
};
@@ -1289,28 +1148,16 @@ test "std.sort descending" {
var buf: [8]i32 = undefined;
const slice = buf[0..case[0].len];
mem.copy(i32, slice, case[0]);
sort(i32, slice, i32desc);
sort(i32, slice, desc(i32));
assert(mem.eql(i32, slice, case[1]));
}
}
test "another sort case" {
var arr = []i32{
5,
3,
1,
2,
4,
};
sort(i32, arr[0..], i32asc);
var arr = []i32{ 5, 3, 1, 2, 4 };
sort(i32, arr[0..], asc(i32));
assert(mem.eql(i32, arr, []i32{
1,
2,
3,
4,
5,
}));
assert(mem.eql(i32, arr, []i32{ 1, 2, 3, 4, 5 }));
}
test "sort fuzz testing" {
@@ -1345,7 +1192,7 @@ fn fuzzTest(rng: *std.rand.Random) void {
}
}
pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) T {
pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool) T {
var i: usize = 0;
var smallest = items[0];
for (items[1..]) |item| {
@@ -1356,7 +1203,7 @@ pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *cons
return smallest;
}
pub fn max(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) T {
pub fn max(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool) T {
var i: usize = 0;
var biggest = items[0];
for (items[1..]) |item| {

View File

@@ -970,14 +970,8 @@ pub const Node = struct {
pub const Defer = struct {
base: Node,
defer_token: TokenIndex,
kind: Kind,
expr: *Node,
const Kind = enum {
Error,
Unconditional,
};
pub fn iterate(self: *Defer, index: usize) ?*Node {
var i = index;

View File

@@ -1041,11 +1041,6 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const node = try arena.create(ast.Node.Defer{
.base = ast.Node{ .id = ast.Node.Id.Defer },
.defer_token = token_index,
.kind = switch (token_ptr.id) {
Token.Id.Keyword_defer => ast.Node.Defer.Kind.Unconditional,
Token.Id.Keyword_errdefer => ast.Node.Defer.Kind.Error,
else => unreachable,
},
.expr = undefined,
});
const node_ptr = try block.statements.addOne();

View File

@@ -9,6 +9,7 @@ comptime {
_ = @import("cases/bitcast.zig");
_ = @import("cases/bool.zig");
_ = @import("cases/bugs/1111.zig");
_ = @import("cases/bugs/1230.zig");
_ = @import("cases/bugs/394.zig");
_ = @import("cases/bugs/655.zig");
_ = @import("cases/bugs/656.zig");

14
test/cases/bugs/1230.zig Normal file
View File

@@ -0,0 +1,14 @@
const assert = @import("std").debug.assert;
const S = extern struct {
x: i32,
};
extern fn ret_struct() S {
return S{ .x = 42 };
}
test "extern return small struct (bug 1230)" {
const s = ret_struct();
assert(s.x == 42);
}

View File

@@ -7,3 +7,24 @@ test "optional pointer to size zero struct" {
var o: ?*EmptyStruct = &e;
assert(o != null);
}
test "equality compare nullable pointers" {
testNullPtrsEql();
comptime testNullPtrsEql();
}
fn testNullPtrsEql() void {
var number: i32 = 1234;
var x: ?*i32 = null;
var y: ?*i32 = null;
assert(x == y);
y = &number;
assert(x != y);
assert(x != &number);
assert(&number != x);
x = &number;
assert(x == y);
assert(x == &number);
assert(&number == x);
}

View File

@@ -1,6 +1,33 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"optional pointer to void in extern struct",
\\const Foo = extern struct {
\\ x: ?*const void,
\\};
\\const Bar = extern struct {
\\ foo: Foo,
\\ y: i32,
\\};
\\export fn entry(bar: *Bar) void {}
,
".tmp_source.zig:2:5: error: extern structs cannot contain fields of type '?*const void'",
);
cases.add(
"use of comptime-known undefined function value",
\\const Cmd = struct {
\\ exec: fn () void,
\\};
\\export fn entry() void {
\\ const command = Cmd{ .exec = undefined };
\\ command.exec();
\\}
,
".tmp_source.zig:6:12: error: use of undefined value",
);
cases.add(
"use of comptime-known undefined function value",
\\const Cmd = struct {

View File

@@ -0,0 +1,12 @@
const TestContext = @import("../../src-self-hosted/test.zig").TestContext;
pub fn addCases(ctx: *TestContext) !void {
try ctx.testCompileError(
\\export fn entry() void {}
\\export fn entry() void {}
, "1.zig", 2, 8, "exported symbol collision: 'entry'");
try ctx.testCompileError(
\\fn() void {}
, "1.zig", 1, 1, "missing function name");
}

View File

@@ -47,12 +47,13 @@ const test_targets = []TestTarget{
const max_stdout_size = 1 * 1024 * 1024; // 1 MB
pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-compare-output", "Run the compare output tests"),
.test_index = 0,
.test_filter = test_filter,
.modes = modes,
}) catch unreachable;
compare_output.addCases(cases);
@@ -60,12 +61,13 @@ pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8) *build
return cases.step;
}
pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-runtime-safety", "Run the runtime safety tests"),
.test_index = 0,
.test_filter = test_filter,
.modes = modes,
}) catch unreachable;
runtime_safety.addCases(cases);
@@ -73,12 +75,13 @@ pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8) *build
return cases.step;
}
pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
const cases = b.allocator.create(CompileErrorContext{
.b = b,
.step = b.step("test-compile-errors", "Run the compile error tests"),
.test_index = 0,
.test_filter = test_filter,
.modes = modes,
}) catch unreachable;
compile_errors.addCases(cases);
@@ -99,12 +102,13 @@ pub fn addBuildExampleTests(b: *build.Builder, test_filter: ?[]const u8) *build.
return cases.step;
}
pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-asm-link", "Run the assemble and link tests"),
.test_index = 0,
.test_filter = test_filter,
.modes = modes,
}) catch unreachable;
assemble_and_link.addCases(cases);
@@ -138,16 +142,11 @@ pub fn addGenHTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
return cases.step;
}
pub fn addPkgTests(b: *build.Builder, test_filter: ?[]const u8, root_src: []const u8, name: []const u8, desc: []const u8, with_lldb: bool) *build.Step {
pub fn addPkgTests(b: *build.Builder, test_filter: ?[]const u8, root_src: []const u8, name: []const u8, desc: []const u8, modes: []const Mode) *build.Step {
const step = b.step(b.fmt("test-{}", name), desc);
for (test_targets) |test_target| {
const is_native = (test_target.os == builtin.os and test_target.arch == builtin.arch);
for ([]Mode{
Mode.Debug,
Mode.ReleaseSafe,
Mode.ReleaseFast,
Mode.ReleaseSmall,
}) |mode| {
for (modes) |mode| {
for ([]bool{
false,
true,
@@ -166,18 +165,6 @@ pub fn addPkgTests(b: *build.Builder, test_filter: ?[]const u8, root_src: []cons
if (link_libc) {
these_tests.linkSystemLibrary("c");
}
if (with_lldb) {
these_tests.setExecCmd([]?[]const u8{
"lldb",
null,
"-o",
"run",
"-o",
"bt",
"-o",
"exit",
});
}
step.dependOn(&these_tests.step);
}
}
@@ -190,6 +177,7 @@ pub const CompareOutputContext = struct {
step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
modes: []const Mode,
const Special = enum {
None,
@@ -440,12 +428,7 @@ pub const CompareOutputContext = struct {
self.step.dependOn(&run_and_cmp_output.step);
},
Special.None => {
for ([]Mode{
Mode.Debug,
Mode.ReleaseSafe,
Mode.ReleaseFast,
Mode.ReleaseSmall,
}) |mode| {
for (self.modes) |mode| {
const annotated_case_name = fmt.allocPrint(self.b.allocator, "{} {} ({})", "compare-output", case.name, @tagName(mode)) catch unreachable;
if (self.test_filter) |filter| {
if (mem.indexOf(u8, annotated_case_name, filter) == null) continue;
@@ -500,6 +483,7 @@ pub const CompileErrorContext = struct {
step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
modes: []const Mode,
const TestCase = struct {
name: []const u8,
@@ -690,10 +674,7 @@ pub const CompileErrorContext = struct {
pub fn addCase(self: *CompileErrorContext, case: *const TestCase) void {
const b = self.b;
for ([]Mode{
Mode.Debug,
Mode.ReleaseFast,
}) |mode| {
for (self.modes) |mode| {
const annotated_case_name = fmt.allocPrint(self.b.allocator, "compile-error {} ({})", case.name, @tagName(mode)) catch unreachable;
if (self.test_filter) |filter| {
if (mem.indexOf(u8, annotated_case_name, filter) == null) continue;