Merge tag '0.15.2' into zig0-0.15.2
Release 0.15.2
This commit is contained in:
@@ -101,22 +101,22 @@ const SourceLocationIndex = enum(u32) {
|
||||
|
||||
fn sourceLocationLinkHtml(
|
||||
sli: SourceLocationIndex,
|
||||
out: *std.ArrayListUnmanaged(u8),
|
||||
out: *std.ArrayList(u8),
|
||||
focused: bool,
|
||||
) Allocator.Error!void {
|
||||
) error{OutOfMemory}!void {
|
||||
const sl = sli.ptr();
|
||||
try out.writer(gpa).print("<code{s}>", .{
|
||||
try out.print(gpa, "<code{s}>", .{
|
||||
@as([]const u8, if (focused) " class=\"status-running\"" else ""),
|
||||
});
|
||||
try sli.appendPath(out);
|
||||
try out.writer(gpa).print(":{d}:{d} </code><button class=\"linkish\" onclick=\"wasm_exports.fuzzSelectSli({d});\">View</button>", .{
|
||||
try out.print(gpa, ":{d}:{d} </code><button class=\"linkish\" onclick=\"wasm_exports.fuzzSelectSli({d});\">View</button>", .{
|
||||
sl.line,
|
||||
sl.column,
|
||||
@intFromEnum(sli),
|
||||
});
|
||||
}
|
||||
|
||||
fn appendPath(sli: SourceLocationIndex, out: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
|
||||
fn appendPath(sli: SourceLocationIndex, out: *std.ArrayList(u8)) error{OutOfMemory}!void {
|
||||
const sl = sli.ptr();
|
||||
const file = coverage.fileAt(sl.file);
|
||||
const file_name = coverage.stringAt(file.basename);
|
||||
@@ -294,7 +294,7 @@ fn updateStats() error{OutOfMemory}!void {
|
||||
}
|
||||
|
||||
fn updateEntryPoints() error{OutOfMemory}!void {
|
||||
var html: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var html: std.ArrayList(u8) = .empty;
|
||||
defer html.deinit(gpa);
|
||||
for (entry_points.items) |sli| {
|
||||
try html.appendSlice(gpa, "<li>");
|
||||
|
||||
@@ -44,7 +44,7 @@ pub fn genericResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
|
||||
js.updateGeneric(msg.step_idx, inner_html.ptr, inner_html.len);
|
||||
}
|
||||
|
||||
pub fn compileResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
|
||||
pub fn compileResultMessage(msg_bytes: []u8) error{ OutOfMemory, WriteFailed }!void {
|
||||
const max_table_rows = 500;
|
||||
|
||||
if (msg_bytes.len < @sizeOf(abi.CompileResult)) @panic("malformed CompileResult message");
|
||||
@@ -166,10 +166,11 @@ pub fn compileResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
|
||||
});
|
||||
defer gpa.free(inner_html);
|
||||
|
||||
var file_table_html: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer file_table_html.deinit(gpa);
|
||||
var file_table_html: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer file_table_html.deinit();
|
||||
|
||||
for (slowest_files[0..@min(max_table_rows, slowest_files.len)]) |file| {
|
||||
try file_table_html.writer(gpa).print(
|
||||
try file_table_html.writer.print(
|
||||
\\<tr>
|
||||
\\ <th scope="row"><code>{f}</code></th>
|
||||
\\ <td>{D}</td>
|
||||
@@ -187,17 +188,17 @@ pub fn compileResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
|
||||
});
|
||||
}
|
||||
if (slowest_files.len > max_table_rows) {
|
||||
try file_table_html.writer(gpa).print(
|
||||
try file_table_html.writer.print(
|
||||
\\<tr><td colspan="4">{d} more rows omitted</td></tr>
|
||||
\\
|
||||
, .{slowest_files.len - max_table_rows});
|
||||
}
|
||||
|
||||
var decl_table_html: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer decl_table_html.deinit(gpa);
|
||||
var decl_table_html: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer decl_table_html.deinit();
|
||||
|
||||
for (slowest_decls[0..@min(max_table_rows, slowest_decls.len)]) |decl| {
|
||||
try decl_table_html.writer(gpa).print(
|
||||
try decl_table_html.writer.print(
|
||||
\\<tr>
|
||||
\\ <th scope="row"><code>{f}</code></th>
|
||||
\\ <th scope="row"><code>{f}</code></th>
|
||||
@@ -219,7 +220,7 @@ pub fn compileResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
|
||||
});
|
||||
}
|
||||
if (slowest_decls.len > max_table_rows) {
|
||||
try decl_table_html.writer(gpa).print(
|
||||
try decl_table_html.writer.print(
|
||||
\\<tr><td colspan="6">{d} more rows omitted</td></tr>
|
||||
\\
|
||||
, .{slowest_decls.len - max_table_rows});
|
||||
@@ -229,10 +230,10 @@ pub fn compileResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
|
||||
hdr.step_idx,
|
||||
inner_html.ptr,
|
||||
inner_html.len,
|
||||
file_table_html.items.ptr,
|
||||
file_table_html.items.len,
|
||||
decl_table_html.items.ptr,
|
||||
decl_table_html.items.len,
|
||||
file_table_html.written().ptr,
|
||||
file_table_html.written().len,
|
||||
decl_table_html.written().ptr,
|
||||
decl_table_html.written().len,
|
||||
hdr.flags.use_llvm,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -114,10 +114,10 @@ pub fn main() !void {
|
||||
interestingness_argv.appendAssumeCapacity(checker_path);
|
||||
interestingness_argv.appendSliceAssumeCapacity(argv);
|
||||
|
||||
var rendered = std.array_list.Managed(u8).init(gpa);
|
||||
var rendered: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer rendered.deinit();
|
||||
|
||||
var astgen_input = std.array_list.Managed(u8).init(gpa);
|
||||
var astgen_input: std.Io.Writer.Allocating = .init(gpa);
|
||||
defer astgen_input.deinit();
|
||||
|
||||
var tree = try parse(gpa, root_source_file_path);
|
||||
@@ -138,10 +138,10 @@ pub fn main() !void {
|
||||
}
|
||||
}
|
||||
|
||||
var fixups: Ast.Fixups = .{};
|
||||
var fixups: Ast.Render.Fixups = .{};
|
||||
defer fixups.deinit(gpa);
|
||||
|
||||
var more_fixups: Ast.Fixups = .{};
|
||||
var more_fixups: Ast.Render.Fixups = .{};
|
||||
defer more_fixups.deinit(gpa);
|
||||
|
||||
var rng = std.Random.DefaultPrng.init(seed);
|
||||
@@ -188,15 +188,14 @@ pub fn main() !void {
|
||||
try transformationsToFixups(gpa, arena, root_source_file_path, this_set, &fixups);
|
||||
|
||||
rendered.clearRetainingCapacity();
|
||||
try tree.renderToArrayList(&rendered, fixups);
|
||||
try tree.render(gpa, &rendered.writer, fixups);
|
||||
|
||||
// The transformations we applied may have resulted in unused locals,
|
||||
// in which case we would like to add the respective discards.
|
||||
{
|
||||
try astgen_input.resize(rendered.items.len);
|
||||
@memcpy(astgen_input.items, rendered.items);
|
||||
try astgen_input.append(0);
|
||||
const source_with_null = astgen_input.items[0 .. astgen_input.items.len - 1 :0];
|
||||
try astgen_input.writer.writeAll(rendered.written());
|
||||
try astgen_input.writer.writeByte(0);
|
||||
const source_with_null = astgen_input.written()[0..(astgen_input.written().len - 1) :0];
|
||||
var astgen_tree = try Ast.parse(gpa, source_with_null, .zig);
|
||||
defer astgen_tree.deinit(gpa);
|
||||
if (astgen_tree.errors.len != 0) {
|
||||
@@ -228,12 +227,12 @@ pub fn main() !void {
|
||||
}
|
||||
if (more_fixups.count() != 0) {
|
||||
rendered.clearRetainingCapacity();
|
||||
try astgen_tree.renderToArrayList(&rendered, more_fixups);
|
||||
try astgen_tree.render(gpa, &rendered.writer, more_fixups);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try std.fs.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.items });
|
||||
try std.fs.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() });
|
||||
// std.debug.print("trying this code:\n{s}\n", .{rendered.items});
|
||||
|
||||
const interestingness = try runCheck(arena, interestingness_argv.items);
|
||||
@@ -273,8 +272,8 @@ pub fn main() !void {
|
||||
// Revert the source back to not be transformed.
|
||||
fixups.clearRetainingCapacity();
|
||||
rendered.clearRetainingCapacity();
|
||||
try tree.renderToArrayList(&rendered, fixups);
|
||||
try std.fs.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.items });
|
||||
try tree.render(gpa, &rendered.writer, fixups);
|
||||
try std.fs.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() });
|
||||
|
||||
return std.process.cleanExit();
|
||||
}
|
||||
@@ -318,7 +317,7 @@ fn transformationsToFixups(
|
||||
arena: Allocator,
|
||||
root_source_file_path: []const u8,
|
||||
transforms: []const Walk.Transformation,
|
||||
fixups: *Ast.Fixups,
|
||||
fixups: *Ast.Render.Fixups,
|
||||
) !void {
|
||||
fixups.clearRetainingCapacity();
|
||||
|
||||
@@ -359,7 +358,7 @@ fn transformationsToFixups(
|
||||
other_file_ast.deinit(gpa);
|
||||
}
|
||||
|
||||
var inlined_fixups: Ast.Fixups = .{};
|
||||
var inlined_fixups: Ast.Render.Fixups = .{};
|
||||
defer inlined_fixups.deinit(gpa);
|
||||
if (std.fs.path.dirname(inline_imported_file.imported_string)) |dirname| {
|
||||
inlined_fixups.rebase_imported_paths = dirname;
|
||||
@@ -382,16 +381,16 @@ fn transformationsToFixups(
|
||||
}
|
||||
}
|
||||
|
||||
var other_source = std.array_list.Managed(u8).init(gpa);
|
||||
var other_source: std.io.Writer.Allocating = .init(gpa);
|
||||
defer other_source.deinit();
|
||||
try other_source.appendSlice("struct {\n");
|
||||
try other_file_ast.renderToArrayList(&other_source, inlined_fixups);
|
||||
try other_source.appendSlice("}");
|
||||
try other_source.writer.writeAll("struct {\n");
|
||||
try other_file_ast.render(gpa, &other_source.writer, inlined_fixups);
|
||||
try other_source.writer.writeAll("}");
|
||||
|
||||
try fixups.replace_nodes_with_string.put(
|
||||
gpa,
|
||||
inline_imported_file.builtin_call_node,
|
||||
try arena.dupe(u8, other_source.items),
|
||||
try arena.dupe(u8, other_source.written()),
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
@@ -501,6 +501,10 @@ fn walkExpression(w: *Walk, node: Ast.Node.Index) Error!void {
|
||||
.@"asm",
|
||||
=> return walkAsm(w, ast.fullAsm(node).?),
|
||||
|
||||
.asm_legacy => {
|
||||
return walkAsmLegacy(w, ast.legacyAsm(node).?);
|
||||
},
|
||||
|
||||
.enum_literal => {
|
||||
return walkIdentifier(w, ast.nodeMainToken(node)); // name
|
||||
},
|
||||
@@ -665,7 +669,7 @@ fn walkStructInit(
|
||||
|
||||
fn walkCall(w: *Walk, call: Ast.full.Call) Error!void {
|
||||
try walkExpression(w, call.ast.fn_expr);
|
||||
try walkParamList(w, call.ast.params);
|
||||
try walkExpressions(w, call.ast.params);
|
||||
}
|
||||
|
||||
fn walkSlice(
|
||||
@@ -830,7 +834,7 @@ fn walkWhile(w: *Walk, node_index: Ast.Node.Index, while_node: Ast.full.While) E
|
||||
}
|
||||
|
||||
fn walkFor(w: *Walk, for_node: Ast.full.For) Error!void {
|
||||
try walkParamList(w, for_node.ast.inputs);
|
||||
try walkExpressions(w, for_node.ast.inputs);
|
||||
try walkExpression(w, for_node.ast.then_expr);
|
||||
if (for_node.ast.else_expr.unwrap()) |else_expr| {
|
||||
try walkExpression(w, else_expr);
|
||||
@@ -874,15 +878,12 @@ fn walkIf(w: *Walk, node_index: Ast.Node.Index, if_node: Ast.full.If) Error!void
|
||||
|
||||
fn walkAsm(w: *Walk, asm_node: Ast.full.Asm) Error!void {
|
||||
try walkExpression(w, asm_node.ast.template);
|
||||
for (asm_node.ast.items) |item| {
|
||||
try walkExpression(w, item);
|
||||
}
|
||||
try walkExpressions(w, asm_node.ast.items);
|
||||
}
|
||||
|
||||
fn walkParamList(w: *Walk, params: []const Ast.Node.Index) Error!void {
|
||||
for (params) |param_node| {
|
||||
try walkExpression(w, param_node);
|
||||
}
|
||||
fn walkAsmLegacy(w: *Walk, asm_node: Ast.full.AsmLegacy) Error!void {
|
||||
try walkExpression(w, asm_node.ast.template);
|
||||
try walkExpressions(w, asm_node.ast.items);
|
||||
}
|
||||
|
||||
/// Check if it is already gutted (i.e. its body replaced with `@trap()`).
|
||||
|
||||
@@ -674,7 +674,7 @@ pub const Compiler = struct {
|
||||
}
|
||||
|
||||
try file_reader.seekTo(entry.data_offset_from_start_of_file);
|
||||
var header_bytes = (file_reader.interface.takeArray(16) catch {
|
||||
var header_bytes: [16]u8 align(@alignOf(ico.BitmapHeader)) = (file_reader.interface.takeArray(16) catch {
|
||||
return self.iconReadError(
|
||||
error.UnexpectedEOF,
|
||||
filename_utf8,
|
||||
|
||||
@@ -37,7 +37,7 @@ comptime {
|
||||
@export(&__aeabi_memclr4, .{ .name = "__aeabi_memclr4", .linkage = common.linkage, .visibility = common.visibility });
|
||||
@export(&__aeabi_memclr8, .{ .name = "__aeabi_memclr8", .linkage = common.linkage, .visibility = common.visibility });
|
||||
|
||||
if (builtin.os.tag == .linux) {
|
||||
if (builtin.os.tag == .linux or builtin.os.tag == .freebsd) {
|
||||
@export(&__aeabi_read_tp, .{ .name = "__aeabi_read_tp", .linkage = common.linkage, .visibility = common.visibility });
|
||||
}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ pub fn build(b: *std.Build) void {
|
||||
// Here we define an executable. An executable needs to have a root module
|
||||
// which needs to expose a `main` function. While we could add a main function
|
||||
// to the module defined above, it's sometimes preferable to split business
|
||||
// business logic and the CLI into two separate modules.
|
||||
// logic and the CLI into two separate modules.
|
||||
//
|
||||
// If your goal is to create a Zig library for others to use, consider if
|
||||
// it might benefit from also exposing a CLI tool. A parser library for a
|
||||
|
||||
3
lib/libc/include/generic-glibc/arpa/inet.h
vendored
3
lib/libc/include/generic-glibc/arpa/inet.h
vendored
@@ -101,10 +101,13 @@ extern char *inet_nsap_ntoa (int __len, const unsigned char *__cp,
|
||||
char *__buf) __THROW;
|
||||
#endif
|
||||
|
||||
// zig patch: inet was fortified in glibc 2.42
|
||||
#if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 42) || __GLIBC__ > 2
|
||||
#if __USE_FORTIFY_LEVEL > 0 && defined __fortify_function
|
||||
/* Include functions with security checks. */
|
||||
# include <bits/inet-fortified.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
__END_DECLS
|
||||
|
||||
|
||||
3
lib/libc/musl/src/fenv/loongarch64/fenv-sf.c
vendored
Normal file
3
lib/libc/musl/src/fenv/loongarch64/fenv-sf.c
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
#ifdef __loongarch_soft_float
|
||||
#include "../fenv.c"
|
||||
#endif
|
||||
@@ -2524,7 +2524,10 @@ pub const LazyPath = union(enum) {
|
||||
.up = gen.up,
|
||||
.sub_path = dupePathInner(allocator, gen.sub_path),
|
||||
} },
|
||||
.dependency => |dep| .{ .dependency = dep },
|
||||
.dependency => |dep| .{ .dependency = .{
|
||||
.dependency = dep.dependency,
|
||||
.sub_path = dupePathInner(allocator, dep.sub_path),
|
||||
} },
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1827,7 +1827,26 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
|
||||
_ = try std.fmt.bufPrint(&args_hex_hash, "{x}", .{&args_hash});
|
||||
|
||||
const args_file = "args" ++ fs.path.sep_str ++ args_hex_hash;
|
||||
try b.cache_root.handle.writeFile(.{ .sub_path = args_file, .data = args });
|
||||
if (b.cache_root.handle.access(args_file, .{})) |_| {
|
||||
// The args file is already present from a previous run.
|
||||
} else |err| switch (err) {
|
||||
error.FileNotFound => {
|
||||
try b.cache_root.handle.makePath("tmp");
|
||||
const rand_int = std.crypto.random.int(u64);
|
||||
const tmp_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
|
||||
try b.cache_root.handle.writeFile(.{ .sub_path = tmp_path, .data = args });
|
||||
defer b.cache_root.handle.deleteFile(tmp_path) catch {
|
||||
// It's fine if the temporary file can't be cleaned up.
|
||||
};
|
||||
b.cache_root.handle.rename(tmp_path, args_file) catch |rename_err| switch (rename_err) {
|
||||
error.PathAlreadyExists => {
|
||||
// The args file was created by another concurrent build process.
|
||||
},
|
||||
else => |other_err| return other_err,
|
||||
};
|
||||
},
|
||||
else => |other_err| return other_err,
|
||||
}
|
||||
|
||||
const resolved_args_file = try mem.concat(arena, u8, &.{
|
||||
"@",
|
||||
|
||||
@@ -163,6 +163,12 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
|
||||
try argv_list.append("-fno-clang");
|
||||
}
|
||||
|
||||
try argv_list.append("--cache-dir");
|
||||
try argv_list.append(b.cache_root.path orelse ".");
|
||||
|
||||
try argv_list.append("--global-cache-dir");
|
||||
try argv_list.append(b.graph.global_cache_root.path orelse ".");
|
||||
|
||||
try argv_list.append("--listen=-");
|
||||
|
||||
if (!translate_c.target.query.isNative()) {
|
||||
|
||||
@@ -323,7 +323,7 @@ fn serveWebSocket(ws: *WebServer, sock: *http.Server.WebSocket) !noreturn {
|
||||
// Temporarily unlock, then re-lock after the message is sent.
|
||||
ws.time_report_mutex.unlock();
|
||||
defer ws.time_report_mutex.lock();
|
||||
try sock.writeMessage(msg, .binary);
|
||||
try sock.writeMessage(owned_msg, .binary);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -400,10 +400,11 @@ pub fn defaultReadVec(r: *Reader, data: [][]u8) Error!usize {
|
||||
.vtable = &.{ .drain = Writer.fixedDrain },
|
||||
};
|
||||
const limit: Limit = .limited(writer.buffer.len - writer.end);
|
||||
r.end += r.vtable.stream(r, &writer, limit) catch |err| switch (err) {
|
||||
const n = r.vtable.stream(r, &writer, limit) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
else => |e| return e,
|
||||
};
|
||||
r.end += n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -448,7 +449,6 @@ pub fn readVecAll(r: *Reader, data: [][]u8) Error!void {
|
||||
/// is returned instead.
|
||||
///
|
||||
/// See also:
|
||||
/// * `peek`
|
||||
/// * `toss`
|
||||
pub fn peek(r: *Reader, n: usize) Error![]u8 {
|
||||
try r.fill(n);
|
||||
@@ -699,7 +699,7 @@ pub const DelimiterError = error{
|
||||
};
|
||||
|
||||
/// Returns a slice of the next bytes of buffered data from the stream until
|
||||
/// `sentinel` is found, advancing the seek position.
|
||||
/// `sentinel` is found, advancing the seek position past the sentinel.
|
||||
///
|
||||
/// Returned slice has a sentinel.
|
||||
///
|
||||
@@ -732,7 +732,7 @@ pub fn peekSentinel(r: *Reader, comptime sentinel: u8) DelimiterError![:sentinel
|
||||
}
|
||||
|
||||
/// Returns a slice of the next bytes of buffered data from the stream until
|
||||
/// `delimiter` is found, advancing the seek position.
|
||||
/// `delimiter` is found, advancing the seek position past the delimiter.
|
||||
///
|
||||
/// Returned slice includes the delimiter as the last byte.
|
||||
///
|
||||
@@ -760,31 +760,42 @@ pub fn takeDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
|
||||
/// * `peekDelimiterExclusive`
|
||||
/// * `takeDelimiterInclusive`
|
||||
pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
|
||||
const buffer = r.buffer[0..r.end];
|
||||
const seek = r.seek;
|
||||
if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |end| {
|
||||
@branchHint(.likely);
|
||||
return buffer[seek .. end + 1];
|
||||
}
|
||||
// TODO take a parameter for max search length rather than relying on buffer capacity
|
||||
try rebase(r, r.buffer.len);
|
||||
while (r.buffer.len - r.end != 0) {
|
||||
const end_cap = r.buffer[r.end..];
|
||||
var writer: Writer = .fixed(end_cap);
|
||||
const n = r.vtable.stream(r, &writer, .limited(end_cap.len)) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
else => |e| return e,
|
||||
};
|
||||
r.end += n;
|
||||
if (std.mem.indexOfScalarPos(u8, end_cap[0..n], 0, delimiter)) |end| {
|
||||
return r.buffer[0 .. r.end - n + end + 1];
|
||||
{
|
||||
const contents = r.buffer[0..r.end];
|
||||
const seek = r.seek;
|
||||
if (std.mem.indexOfScalarPos(u8, contents, seek, delimiter)) |end| {
|
||||
@branchHint(.likely);
|
||||
return contents[seek .. end + 1];
|
||||
}
|
||||
}
|
||||
return error.StreamTooLong;
|
||||
while (true) {
|
||||
const content_len = r.end - r.seek;
|
||||
if (r.buffer.len - content_len == 0) break;
|
||||
try fillMore(r);
|
||||
const seek = r.seek;
|
||||
const contents = r.buffer[0..r.end];
|
||||
if (std.mem.indexOfScalarPos(u8, contents, seek + content_len, delimiter)) |end| {
|
||||
return contents[seek .. end + 1];
|
||||
}
|
||||
}
|
||||
// It might or might not be end of stream. There is no more buffer space
|
||||
// left to disambiguate. If `StreamTooLong` was added to `RebaseError` then
|
||||
// this logic could be replaced by removing the exit condition from the
|
||||
// above while loop. That error code would represent when `buffer` capacity
|
||||
// is too small for an operation, replacing the current use of asserts.
|
||||
var failing_writer = Writer.failing;
|
||||
while (r.vtable.stream(r, &failing_writer, .limited(1))) |n| {
|
||||
assert(n == 0);
|
||||
} else |err| switch (err) {
|
||||
error.WriteFailed => return error.StreamTooLong,
|
||||
error.ReadFailed => |e| return e,
|
||||
error.EndOfStream => |e| return e,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a slice of the next bytes of buffered data from the stream until
|
||||
/// `delimiter` is found, advancing the seek position.
|
||||
/// `delimiter` is found, advancing the seek position up to (but not past)
|
||||
/// the delimiter.
|
||||
///
|
||||
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
|
||||
/// to a delimiter, unless it would result in a length 0 return value, in which
|
||||
@@ -798,20 +809,44 @@ pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
|
||||
/// Invalidates previously returned values from `peek`.
|
||||
///
|
||||
/// See also:
|
||||
/// * `takeDelimiter`
|
||||
/// * `takeDelimiterInclusive`
|
||||
/// * `peekDelimiterExclusive`
|
||||
pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
|
||||
const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
|
||||
const result = try r.peekDelimiterExclusive(delimiter);
|
||||
r.toss(result.len);
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Returns a slice of the next bytes of buffered data from the stream until
|
||||
/// `delimiter` is found, advancing the seek position past the delimiter.
|
||||
///
|
||||
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
|
||||
/// to a delimiter, unless it would result in a length 0 return value, in which
|
||||
/// case `null` is returned instead.
|
||||
///
|
||||
/// If the delimiter is not found within a number of bytes matching the
|
||||
/// capacity of this `Reader`, `error.StreamTooLong` is returned. In
|
||||
/// such case, the stream state is unmodified as if this function was never
|
||||
/// called.
|
||||
///
|
||||
/// Invalidates previously returned values from `peek`.
|
||||
///
|
||||
/// See also:
|
||||
/// * `takeDelimiterInclusive`
|
||||
/// * `takeDelimiterExclusive`
|
||||
pub fn takeDelimiter(r: *Reader, delimiter: u8) error{ ReadFailed, StreamTooLong }!?[]u8 {
|
||||
const inclusive = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
|
||||
error.EndOfStream => {
|
||||
const remaining = r.buffer[r.seek..r.end];
|
||||
if (remaining.len == 0) return error.EndOfStream;
|
||||
if (remaining.len == 0) return null;
|
||||
r.toss(remaining.len);
|
||||
return remaining;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
r.toss(result.len);
|
||||
return result[0 .. result.len - 1];
|
||||
r.toss(inclusive.len);
|
||||
return inclusive[0 .. inclusive.len - 1];
|
||||
}
|
||||
|
||||
/// Returns a slice of the next bytes of buffered data from the stream until
|
||||
@@ -1334,6 +1369,9 @@ test peekSentinel {
|
||||
var r: Reader = .fixed("ab\nc");
|
||||
try testing.expectEqualStrings("ab", try r.peekSentinel('\n'));
|
||||
try testing.expectEqualStrings("ab", try r.peekSentinel('\n'));
|
||||
r.toss(3);
|
||||
try testing.expectError(error.EndOfStream, r.peekSentinel('\n'));
|
||||
try testing.expectEqualStrings("c", try r.peek(1));
|
||||
}
|
||||
|
||||
test takeDelimiterInclusive {
|
||||
@@ -1348,22 +1386,52 @@ test peekDelimiterInclusive {
|
||||
try testing.expectEqualStrings("ab\n", try r.peekDelimiterInclusive('\n'));
|
||||
r.toss(3);
|
||||
try testing.expectError(error.EndOfStream, r.peekDelimiterInclusive('\n'));
|
||||
try testing.expectEqualStrings("c", try r.peek(1));
|
||||
}
|
||||
|
||||
test takeDelimiterExclusive {
|
||||
var r: Reader = .fixed("ab\nc");
|
||||
|
||||
try testing.expectEqualStrings("ab", try r.takeDelimiterExclusive('\n'));
|
||||
try testing.expectEqualStrings("", try r.takeDelimiterExclusive('\n'));
|
||||
try testing.expectEqualStrings("", try r.takeDelimiterExclusive('\n'));
|
||||
try testing.expectEqualStrings("\n", try r.take(1));
|
||||
|
||||
try testing.expectEqualStrings("c", try r.takeDelimiterExclusive('\n'));
|
||||
try testing.expectError(error.EndOfStream, r.takeDelimiterExclusive('\n'));
|
||||
}
|
||||
|
||||
test peekDelimiterExclusive {
|
||||
var r: Reader = .fixed("ab\nc");
|
||||
|
||||
try testing.expectEqualStrings("ab", try r.peekDelimiterExclusive('\n'));
|
||||
try testing.expectEqualStrings("ab", try r.peekDelimiterExclusive('\n'));
|
||||
r.toss(3);
|
||||
r.toss(2);
|
||||
try testing.expectEqualStrings("", try r.peekDelimiterExclusive('\n'));
|
||||
try testing.expectEqualStrings("\n", try r.take(1));
|
||||
|
||||
try testing.expectEqualStrings("c", try r.peekDelimiterExclusive('\n'));
|
||||
try testing.expectEqualStrings("c", try r.peekDelimiterExclusive('\n'));
|
||||
r.toss(1);
|
||||
try testing.expectError(error.EndOfStream, r.peekDelimiterExclusive('\n'));
|
||||
}
|
||||
|
||||
test takeDelimiter {
|
||||
var r: Reader = .fixed("ab\nc\n\nd");
|
||||
try testing.expectEqualStrings("ab", (try r.takeDelimiter('\n')).?);
|
||||
try testing.expectEqualStrings("c", (try r.takeDelimiter('\n')).?);
|
||||
try testing.expectEqualStrings("", (try r.takeDelimiter('\n')).?);
|
||||
try testing.expectEqualStrings("d", (try r.takeDelimiter('\n')).?);
|
||||
try testing.expectEqual(null, try r.takeDelimiter('\n'));
|
||||
try testing.expectEqual(null, try r.takeDelimiter('\n'));
|
||||
|
||||
r = .fixed("ab\nc\n\nd\n"); // one trailing newline does not affect behavior
|
||||
try testing.expectEqualStrings("ab", (try r.takeDelimiter('\n')).?);
|
||||
try testing.expectEqualStrings("c", (try r.takeDelimiter('\n')).?);
|
||||
try testing.expectEqualStrings("", (try r.takeDelimiter('\n')).?);
|
||||
try testing.expectEqualStrings("d", (try r.takeDelimiter('\n')).?);
|
||||
try testing.expectEqual(null, try r.takeDelimiter('\n'));
|
||||
try testing.expectEqual(null, try r.takeDelimiter('\n'));
|
||||
}
|
||||
|
||||
test streamDelimiter {
|
||||
@@ -1533,6 +1601,18 @@ test "readSliceShort with smaller buffer than Reader" {
|
||||
try testing.expectEqualStrings(str, &buf);
|
||||
}
|
||||
|
||||
test "readSliceShort with indirect reader" {
|
||||
var r: Reader = .fixed("HelloFren");
|
||||
var ri_buf: [3]u8 = undefined;
|
||||
var ri: std.testing.ReaderIndirect = .init(&r, &ri_buf);
|
||||
var buf: [5]u8 = undefined;
|
||||
try testing.expectEqual(5, try ri.interface.readSliceShort(&buf));
|
||||
try testing.expectEqualStrings("Hello", buf[0..5]);
|
||||
try testing.expectEqual(4, try ri.interface.readSliceShort(&buf));
|
||||
try testing.expectEqualStrings("Fren", buf[0..4]);
|
||||
try testing.expectEqual(0, try ri.interface.readSliceShort(&buf));
|
||||
}
|
||||
|
||||
test readVec {
|
||||
var r: Reader = .fixed(std.ascii.letters);
|
||||
var flat_buffer: [52]u8 = undefined;
|
||||
@@ -1642,6 +1722,26 @@ test "takeDelimiterInclusive when it rebases" {
|
||||
}
|
||||
}
|
||||
|
||||
test "takeDelimiterInclusive on an indirect reader when it rebases" {
|
||||
const written_line = "ABCDEFGHIJKLMNOPQRSTUVWXYZ\n";
|
||||
var buffer: [128]u8 = undefined;
|
||||
var tr: std.testing.Reader = .init(&buffer, &.{
|
||||
.{ .buffer = written_line[0..4] },
|
||||
.{ .buffer = written_line[4..] },
|
||||
.{ .buffer = written_line },
|
||||
.{ .buffer = written_line },
|
||||
.{ .buffer = written_line },
|
||||
.{ .buffer = written_line },
|
||||
.{ .buffer = written_line },
|
||||
});
|
||||
var indirect_buffer: [128]u8 = undefined;
|
||||
var tri: std.testing.ReaderIndirect = .init(&tr.interface, &indirect_buffer);
|
||||
const r = &tri.interface;
|
||||
for (0..6) |_| {
|
||||
try std.testing.expectEqualStrings(written_line, try r.takeDelimiterInclusive('\n'));
|
||||
}
|
||||
}
|
||||
|
||||
test "takeStruct and peekStruct packed" {
|
||||
var r: Reader = .fixed(&.{ 0b11110000, 0b00110011 });
|
||||
const S = packed struct(u16) { a: u2, b: u6, c: u7, d: u1 };
|
||||
|
||||
@@ -27,6 +27,7 @@ pub fn init(reader: *Reader, limit: Limit, buffer: []u8) Limited {
|
||||
|
||||
fn stream(r: *Reader, w: *Writer, limit: Limit) Reader.StreamError!usize {
|
||||
const l: *Limited = @fieldParentPtr("interface", r);
|
||||
if (l.remaining == .nothing) return error.EndOfStream;
|
||||
const combined_limit = limit.min(l.remaining);
|
||||
const n = try l.unlimited.stream(w, combined_limit);
|
||||
l.remaining = l.remaining.subtract(n).?;
|
||||
@@ -51,8 +52,51 @@ test stream {
|
||||
|
||||
fn discard(r: *Reader, limit: Limit) Reader.Error!usize {
|
||||
const l: *Limited = @fieldParentPtr("interface", r);
|
||||
if (l.remaining == .nothing) return error.EndOfStream;
|
||||
const combined_limit = limit.min(l.remaining);
|
||||
const n = try l.unlimited.discard(combined_limit);
|
||||
l.remaining = l.remaining.subtract(n).?;
|
||||
return n;
|
||||
}
|
||||
|
||||
test "end of stream, read, hit limit exactly" {
|
||||
var f: Reader = .fixed("i'm dying");
|
||||
var l = f.limited(.limited(4), &.{});
|
||||
const r = &l.interface;
|
||||
|
||||
var buf: [2]u8 = undefined;
|
||||
try r.readSliceAll(&buf);
|
||||
try r.readSliceAll(&buf);
|
||||
try std.testing.expectError(error.EndOfStream, l.interface.readSliceAll(&buf));
|
||||
}
|
||||
|
||||
test "end of stream, read, hit limit after partial read" {
|
||||
var f: Reader = .fixed("i'm dying");
|
||||
var l = f.limited(.limited(5), &.{});
|
||||
const r = &l.interface;
|
||||
|
||||
var buf: [2]u8 = undefined;
|
||||
try r.readSliceAll(&buf);
|
||||
try r.readSliceAll(&buf);
|
||||
try std.testing.expectError(error.EndOfStream, l.interface.readSliceAll(&buf));
|
||||
}
|
||||
|
||||
test "end of stream, discard, hit limit exactly" {
|
||||
var f: Reader = .fixed("i'm dying");
|
||||
var l = f.limited(.limited(4), &.{});
|
||||
const r = &l.interface;
|
||||
|
||||
try r.discardAll(2);
|
||||
try r.discardAll(2);
|
||||
try std.testing.expectError(error.EndOfStream, l.interface.discardAll(2));
|
||||
}
|
||||
|
||||
test "end of stream, discard, hit limit after partial read" {
|
||||
var f: Reader = .fixed("i'm dying");
|
||||
var l = f.limited(.limited(5), &.{});
|
||||
const r = &l.interface;
|
||||
|
||||
try r.discardAll(2);
|
||||
try r.discardAll(2);
|
||||
try std.testing.expectError(error.EndOfStream, l.interface.discardAll(2));
|
||||
}
|
||||
|
||||
@@ -917,10 +917,12 @@ pub fn sendFileHeader(
|
||||
return n;
|
||||
}
|
||||
|
||||
/// Asserts nonzero buffer capacity.
|
||||
/// Asserts nonzero buffer capacity and nonzero `limit`.
|
||||
pub fn sendFileReading(w: *Writer, file_reader: *File.Reader, limit: Limit) FileReadingError!usize {
|
||||
assert(limit != .nothing);
|
||||
const dest = limit.slice(try w.writableSliceGreedy(1));
|
||||
const n = try file_reader.read(dest);
|
||||
const n = try file_reader.interface.readSliceShort(dest);
|
||||
if (n == 0) return error.EndOfStream;
|
||||
w.advance(n);
|
||||
return n;
|
||||
}
|
||||
@@ -2655,7 +2657,8 @@ pub const Allocating = struct {
|
||||
if (additional == 0) return error.EndOfStream;
|
||||
list.ensureUnusedCapacity(gpa, limit.minInt64(additional)) catch return error.WriteFailed;
|
||||
const dest = limit.slice(list.unusedCapacitySlice());
|
||||
const n = try file_reader.read(dest);
|
||||
const n = try file_reader.interface.readSliceShort(dest);
|
||||
if (n == 0) return error.EndOfStream;
|
||||
list.items.len += n;
|
||||
return n;
|
||||
}
|
||||
@@ -2714,18 +2717,40 @@ test "allocating sendFile" {
|
||||
|
||||
const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
|
||||
defer file.close();
|
||||
var r_buffer: [256]u8 = undefined;
|
||||
var r_buffer: [2]u8 = undefined;
|
||||
var file_writer: std.fs.File.Writer = .init(file, &r_buffer);
|
||||
try file_writer.interface.writeByte('h');
|
||||
try file_writer.interface.writeAll("abcd");
|
||||
try file_writer.interface.flush();
|
||||
|
||||
var file_reader = file_writer.moveToReader();
|
||||
try file_reader.seekTo(0);
|
||||
try file_reader.interface.fill(2);
|
||||
|
||||
var allocating: Writer.Allocating = .init(testing.allocator);
|
||||
defer allocating.deinit();
|
||||
try allocating.ensureUnusedCapacity(1);
|
||||
try testing.expectEqual(4, allocating.writer.sendFileAll(&file_reader, .unlimited));
|
||||
try testing.expectEqualStrings("abcd", allocating.writer.buffered());
|
||||
}
|
||||
|
||||
_ = try file_reader.interface.streamRemaining(&allocating.writer);
|
||||
test sendFileReading {
|
||||
var tmp_dir = testing.tmpDir(.{});
|
||||
defer tmp_dir.cleanup();
|
||||
|
||||
const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
|
||||
defer file.close();
|
||||
var r_buffer: [2]u8 = undefined;
|
||||
var file_writer: std.fs.File.Writer = .init(file, &r_buffer);
|
||||
try file_writer.interface.writeAll("abcd");
|
||||
try file_writer.interface.flush();
|
||||
|
||||
var file_reader = file_writer.moveToReader();
|
||||
try file_reader.seekTo(0);
|
||||
try file_reader.interface.fill(2);
|
||||
|
||||
var w_buffer: [1]u8 = undefined;
|
||||
var discarding: Writer.Discarding = .init(&w_buffer);
|
||||
try testing.expectEqual(4, discarding.writer.sendFileReadingAll(&file_reader, .unlimited));
|
||||
}
|
||||
|
||||
test writeStruct {
|
||||
|
||||
@@ -3075,6 +3075,10 @@ pub fn cTypeAlignment(target: *const Target, c_type: CType) u16 {
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
.m68k => switch (c_type) {
|
||||
.int, .uint, .long, .ulong => return 2,
|
||||
else => {},
|
||||
},
|
||||
.powerpc, .powerpcle, .powerpc64, .powerpc64le => switch (target.os.tag) {
|
||||
.aix => switch (c_type) {
|
||||
.double, .longdouble => return 4,
|
||||
@@ -3175,6 +3179,10 @@ pub fn cTypePreferredAlignment(target: *const Target, c_type: CType) u16 {
|
||||
else => {},
|
||||
},
|
||||
},
|
||||
.m68k => switch (c_type) {
|
||||
.int, .uint, .long, .ulong => return 2,
|
||||
else => {},
|
||||
},
|
||||
.wasm32, .wasm64 => switch (target.os.tag) {
|
||||
.emscripten => switch (c_type) {
|
||||
.longdouble => return 8,
|
||||
|
||||
@@ -83,10 +83,9 @@ pub fn sleep(nanoseconds: u64) void {
|
||||
req = rem;
|
||||
continue;
|
||||
},
|
||||
.FAULT,
|
||||
.INVAL,
|
||||
.OPNOTSUPP,
|
||||
=> unreachable,
|
||||
.FAULT => unreachable,
|
||||
.INVAL => unreachable,
|
||||
.OPNOTSUPP => unreachable,
|
||||
else => return,
|
||||
}
|
||||
}
|
||||
|
||||
113
lib/std/c.zig
113
lib/std/c.zig
@@ -2235,40 +2235,70 @@ pub const S = switch (native_os) {
|
||||
}
|
||||
},
|
||||
.dragonfly => struct {
|
||||
pub const IFMT = 0o170000;
|
||||
|
||||
pub const IFIFO = 0o010000;
|
||||
pub const IFCHR = 0o020000;
|
||||
pub const IFDIR = 0o040000;
|
||||
pub const IFBLK = 0o060000;
|
||||
pub const IFREG = 0o100000;
|
||||
pub const IFLNK = 0o120000;
|
||||
pub const IFSOCK = 0o140000;
|
||||
pub const IFWHT = 0o160000;
|
||||
|
||||
pub const ISUID = 0o4000;
|
||||
pub const ISGID = 0o2000;
|
||||
pub const ISVTX = 0o1000;
|
||||
pub const IRWXU = 0o700;
|
||||
pub const IRUSR = 0o400;
|
||||
pub const IWUSR = 0o200;
|
||||
pub const IXUSR = 0o100;
|
||||
pub const IRWXG = 0o070;
|
||||
pub const IRGRP = 0o040;
|
||||
pub const IWGRP = 0o020;
|
||||
pub const IXGRP = 0o010;
|
||||
pub const IRWXO = 0o007;
|
||||
pub const IROTH = 0o004;
|
||||
pub const IWOTH = 0o002;
|
||||
pub const IXOTH = 0o001;
|
||||
|
||||
pub const IREAD = IRUSR;
|
||||
pub const IEXEC = IXUSR;
|
||||
pub const IWRITE = IWUSR;
|
||||
pub const IXOTH = 1;
|
||||
pub const IWOTH = 2;
|
||||
pub const IROTH = 4;
|
||||
pub const IRWXO = 7;
|
||||
pub const IXGRP = 8;
|
||||
pub const IWGRP = 16;
|
||||
pub const IRGRP = 32;
|
||||
pub const IRWXG = 56;
|
||||
pub const IXUSR = 64;
|
||||
pub const IWUSR = 128;
|
||||
pub const IRUSR = 256;
|
||||
pub const IRWXU = 448;
|
||||
pub const ISTXT = 512;
|
||||
pub const BLKSIZE = 512;
|
||||
pub const ISVTX = 512;
|
||||
pub const ISGID = 1024;
|
||||
pub const ISUID = 2048;
|
||||
pub const IFIFO = 4096;
|
||||
pub const IFCHR = 8192;
|
||||
pub const IFDIR = 16384;
|
||||
pub const IFBLK = 24576;
|
||||
pub const IFREG = 32768;
|
||||
pub const IFDB = 36864;
|
||||
pub const IFLNK = 40960;
|
||||
pub const IFSOCK = 49152;
|
||||
pub const IFWHT = 57344;
|
||||
pub const IFMT = 61440;
|
||||
|
||||
pub fn ISFIFO(m: u32) bool {
|
||||
return m & IFMT == IFIFO;
|
||||
}
|
||||
|
||||
pub fn ISCHR(m: u32) bool {
|
||||
return m & IFMT == IFCHR;
|
||||
}
|
||||
|
||||
pub fn ISDIR(m: u32) bool {
|
||||
return m & IFMT == IFDIR;
|
||||
}
|
||||
|
||||
pub fn ISBLK(m: u32) bool {
|
||||
return m & IFMT == IFBLK;
|
||||
}
|
||||
|
||||
pub fn ISREG(m: u32) bool {
|
||||
return m & IFMT == IFREG;
|
||||
}
|
||||
|
||||
pub fn ISLNK(m: u32) bool {
|
||||
return m & IFMT == IFLNK;
|
||||
}
|
||||
|
||||
pub fn ISSOCK(m: u32) bool {
|
||||
return m & IFMT == IFSOCK;
|
||||
}
|
||||
|
||||
pub fn IWHT(m: u32) bool {
|
||||
return m & IFMT == IFWHT;
|
||||
}
|
||||
},
|
||||
.haiku => struct {
|
||||
pub const IFMT = 0o170000;
|
||||
@@ -3091,8 +3121,17 @@ pub const SIG = switch (native_os) {
|
||||
pub const UNBLOCK = 2;
|
||||
pub const SETMASK = 3;
|
||||
},
|
||||
// https://github.com/SerenityOS/serenity/blob/046c23f567a17758d762a33bdf04bacbfd088f9f/Kernel/API/POSIX/signal.h
|
||||
// https://github.com/SerenityOS/serenity/blob/046c23f567a17758d762a33bdf04bacbfd088f9f/Kernel/API/POSIX/signal_numbers.h
|
||||
.serenity => struct {
|
||||
pub const DFL: ?Sigaction.handler_fn = @ptrFromInt(0);
|
||||
pub const ERR: ?Sigaction.handler_fn = @ptrFromInt(maxInt(usize));
|
||||
pub const IGN: ?Sigaction.handler_fn = @ptrFromInt(1);
|
||||
|
||||
pub const BLOCK = 1;
|
||||
pub const UNBLOCK = 2;
|
||||
pub const SETMASK = 3;
|
||||
|
||||
pub const INVAL = 0;
|
||||
pub const HUP = 1;
|
||||
pub const INT = 2;
|
||||
@@ -5685,6 +5724,23 @@ pub const MSG = switch (native_os) {
|
||||
pub const WAITFORONE = 0x2000;
|
||||
pub const NOTIFICATION = 0x4000;
|
||||
},
|
||||
// https://github.com/openbsd/src/blob/42a7be81bef70c04732f45ec573622effe56b563/sys/sys/socket.h#L506
|
||||
.openbsd => struct {
|
||||
pub const OOB = 0x1;
|
||||
pub const PEEK = 0x2;
|
||||
pub const DONTROUTE = 0x4;
|
||||
pub const EOR = 0x8;
|
||||
pub const TRUNC = 0x10;
|
||||
pub const CTRUNC = 0x20;
|
||||
pub const WAITALL = 0x40;
|
||||
pub const DONTWAIT = 0x80;
|
||||
pub const BCAST = 0x100;
|
||||
pub const MCAST = 0x200;
|
||||
pub const NOSIGNAL = 0x400;
|
||||
pub const CMSG_CLOEXEC = 0x800;
|
||||
pub const WAITFORONE = 0x1000;
|
||||
pub const CMSG_CLOFORK = 0x2000;
|
||||
},
|
||||
else => void,
|
||||
};
|
||||
pub const SOCK = switch (native_os) {
|
||||
@@ -6664,7 +6720,12 @@ pub const SOMAXCONN = switch (native_os) {
|
||||
.windows => ws2_32.SOMAXCONN,
|
||||
// https://github.com/SerenityOS/serenity/blob/ac44ec5ebc707f9dd0c3d4759a1e17e91db5d74f/Kernel/API/POSIX/sys/socket.h#L128
|
||||
.solaris, .illumos, .serenity => 128,
|
||||
.openbsd => 28,
|
||||
// https://github.com/freebsd/freebsd-src/blob/9ab31f821ad1c6bad474510447387c50bef2c24c/sys/sys/socket.h#L434
|
||||
// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/fd3d1949d526ffa646e57037770acd6f2f3bb617/sys/sys/socket.h#L393
|
||||
// https://github.com/NetBSD/src/blob/a673fb3f8487e974c669216064f7588207229fea/sys/sys/socket.h#L472
|
||||
// https://github.com/openbsd/src/blob/8ba9cd88f10123fef7af805b8e5ccc2463ad8fa4/sys/sys/socket.h#L483
|
||||
// https://github.com/apple/darwin-xnu/blob/2ff845c2e033bd0ff64b5b6aa6063a1f8f65aa32/bsd/sys/socket.h#L815
|
||||
.freebsd, .dragonfly, .netbsd, .openbsd, .driverkit, .macos, .ios, .tvos, .watchos, .visionos => 128,
|
||||
else => void,
|
||||
};
|
||||
pub const IFNAMESIZE = switch (native_os) {
|
||||
|
||||
@@ -155,12 +155,12 @@ fn AesOcb(comptime Aes: anytype) type {
|
||||
xorWith(&offset, lx.star);
|
||||
var pad = offset;
|
||||
aes_enc_ctx.encrypt(&pad, &pad);
|
||||
for (m[i * 16 ..], 0..) |x, j| {
|
||||
c[i * 16 + j] = pad[j] ^ x;
|
||||
}
|
||||
var e = [_]u8{0} ** 16;
|
||||
@memcpy(e[0..leftover], m[i * 16 ..][0..leftover]);
|
||||
e[leftover] = 0x80;
|
||||
for (m[i * 16 ..], 0..) |x, j| {
|
||||
c[i * 16 + j] = pad[j] ^ x;
|
||||
}
|
||||
xorWith(&sum, e);
|
||||
}
|
||||
var e = xorBlocks(xorBlocks(sum, offset), lx.dol);
|
||||
@@ -354,3 +354,32 @@ test "AesOcb test vector 4" {
|
||||
try Aes128Ocb.decrypt(&m2, &c, tag, &ad, nonce, k);
|
||||
assert(mem.eql(u8, &m, &m2));
|
||||
}
|
||||
|
||||
test "AesOcb in-place encryption-decryption" {
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
|
||||
var k: [Aes128Ocb.key_length]u8 = undefined;
|
||||
var nonce: [Aes128Ocb.nonce_length]u8 = undefined;
|
||||
var tag: [Aes128Ocb.tag_length]u8 = undefined;
|
||||
var m: [40]u8 = undefined;
|
||||
var original_m: [m.len]u8 = undefined;
|
||||
_ = try hexToBytes(&k, "000102030405060708090A0B0C0D0E0F");
|
||||
_ = try hexToBytes(&m, "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627");
|
||||
_ = try hexToBytes(&nonce, "BBAA9988776655443322110D");
|
||||
const ad = m;
|
||||
|
||||
@memcpy(&original_m, &m);
|
||||
|
||||
Aes128Ocb.encrypt(&m, &tag, &m, &ad, nonce, k);
|
||||
|
||||
var expected_c: [m.len]u8 = undefined;
|
||||
var expected_tag: [tag.len]u8 = undefined;
|
||||
_ = try hexToBytes(&expected_tag, "ED07BA06A4A69483A7035490C5769E60");
|
||||
_ = try hexToBytes(&expected_c, "D5CA91748410C1751FF8A2F618255B68A0A12E093FF454606E59F9C1D0DDC54B65E8628E568BAD7A");
|
||||
|
||||
try testing.expectEqualSlices(u8, &expected_tag, &tag);
|
||||
try testing.expectEqualSlices(u8, &expected_c, &m);
|
||||
try Aes128Ocb.decrypt(&m, &m, tag, &ad, nonce, k);
|
||||
|
||||
try testing.expectEqualSlices(u8, &original_m, &m);
|
||||
}
|
||||
|
||||
@@ -320,6 +320,7 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
|
||||
var handshake_state: HandshakeState = .hello;
|
||||
var handshake_cipher: tls.HandshakeCipher = undefined;
|
||||
var main_cert_pub_key: CertificatePublicKey = undefined;
|
||||
var tls12_negotiated_group: ?tls.NamedGroup = null;
|
||||
const now_sec = std.time.timestamp();
|
||||
|
||||
var cleartext_fragment_start: usize = 0;
|
||||
@@ -679,6 +680,7 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
|
||||
const curve_type = hsd.decode(u8);
|
||||
if (curve_type != 0x03) return error.TlsIllegalParameter; // named_curve
|
||||
const named_group = hsd.decode(tls.NamedGroup);
|
||||
tls12_negotiated_group = named_group;
|
||||
const key_size = hsd.decode(u8);
|
||||
try hsd.ensure(key_size);
|
||||
const server_pub_key = hsd.slice(key_size);
|
||||
@@ -691,10 +693,19 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
|
||||
if (cipher_state != .cleartext) return error.TlsUnexpectedMessage;
|
||||
if (handshake_state != .server_hello_done) return error.TlsUnexpectedMessage;
|
||||
|
||||
const client_key_exchange_msg = .{@intFromEnum(tls.ContentType.handshake)} ++
|
||||
const public_key_bytes: []const u8 = switch (tls12_negotiated_group orelse .secp256r1) {
|
||||
.secp256r1 => &key_share.secp256r1_kp.public_key.toUncompressedSec1(),
|
||||
.secp384r1 => &key_share.secp384r1_kp.public_key.toUncompressedSec1(),
|
||||
.x25519 => &key_share.x25519_kp.public_key,
|
||||
else => return error.TlsIllegalParameter,
|
||||
};
|
||||
|
||||
const client_key_exchange_prefix = .{@intFromEnum(tls.ContentType.handshake)} ++
|
||||
int(u16, @intFromEnum(tls.ProtocolVersion.tls_1_2)) ++
|
||||
array(u16, u8, .{@intFromEnum(tls.HandshakeType.client_key_exchange)} ++
|
||||
array(u24, u8, array(u8, u8, key_share.secp256r1_kp.public_key.toUncompressedSec1())));
|
||||
int(u16, @intCast(public_key_bytes.len + 5)) ++ // record length
|
||||
.{@intFromEnum(tls.HandshakeType.client_key_exchange)} ++
|
||||
int(u24, @intCast(public_key_bytes.len + 1)) ++ // handshake message length
|
||||
.{@as(u8, @intCast(public_key_bytes.len))}; // public key length
|
||||
const client_change_cipher_spec_msg = .{@intFromEnum(tls.ContentType.change_cipher_spec)} ++
|
||||
int(u16, @intFromEnum(tls.ProtocolVersion.tls_1_2)) ++
|
||||
array(u16, tls.ChangeCipherSpecType, .{.change_cipher_spec});
|
||||
@@ -703,7 +714,8 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
|
||||
inline else => |*p| {
|
||||
const P = @TypeOf(p.*).A;
|
||||
p.transcript_hash.update(wrapped_handshake);
|
||||
p.transcript_hash.update(client_key_exchange_msg[tls.record_header_len..]);
|
||||
p.transcript_hash.update(client_key_exchange_prefix[tls.record_header_len..]);
|
||||
p.transcript_hash.update(public_key_bytes);
|
||||
const master_secret = hmacExpandLabel(P.Hmac, pre_master_secret, &.{
|
||||
"master secret",
|
||||
&client_hello_rand,
|
||||
@@ -757,8 +769,9 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
|
||||
nonce,
|
||||
pv.app_cipher.client_write_key,
|
||||
);
|
||||
var all_msgs_vec: [3][]const u8 = .{
|
||||
&client_key_exchange_msg,
|
||||
var all_msgs_vec: [4][]const u8 = .{
|
||||
&client_key_exchange_prefix,
|
||||
public_key_bytes,
|
||||
&client_change_cipher_spec_msg,
|
||||
&client_verify_msg,
|
||||
};
|
||||
@@ -929,7 +942,6 @@ fn drain(w: *Writer, data: []const []const u8, splat: usize) Writer.Error!usize
|
||||
if (prepared.cleartext_len < buf.len) break :done;
|
||||
}
|
||||
for (data[0 .. data.len - 1]) |buf| {
|
||||
if (buf.len < min_buffer_len) break :done;
|
||||
const prepared = prepareCiphertextRecord(c, ciphertext_buf[ciphertext_end..], buf, .application_data);
|
||||
total_clear += prepared.cleartext_len;
|
||||
ciphertext_end += prepared.ciphertext_end;
|
||||
@@ -937,7 +949,6 @@ fn drain(w: *Writer, data: []const []const u8, splat: usize) Writer.Error!usize
|
||||
}
|
||||
const buf = data[data.len - 1];
|
||||
for (0..splat) |_| {
|
||||
if (buf.len < min_buffer_len) break :done;
|
||||
const prepared = prepareCiphertextRecord(c, ciphertext_buf[ciphertext_end..], buf, .application_data);
|
||||
total_clear += prepared.cleartext_len;
|
||||
ciphertext_end += prepared.ciphertext_end;
|
||||
|
||||
@@ -569,7 +569,7 @@ pub fn assertReadable(slice: []const volatile u8) void {
|
||||
/// Invokes detectable illegal behavior when the provided array is not aligned
|
||||
/// to the provided amount.
|
||||
pub fn assertAligned(ptr: anytype, comptime alignment: std.mem.Alignment) void {
|
||||
const aligned_ptr: *align(alignment.toByteUnits()) anyopaque = @ptrCast(@alignCast(ptr));
|
||||
const aligned_ptr: *align(alignment.toByteUnits()) const anyopaque = @ptrCast(@alignCast(ptr));
|
||||
_ = aligned_ptr;
|
||||
}
|
||||
|
||||
|
||||
@@ -2583,8 +2583,9 @@ pub fn updateFile(
|
||||
error.ReadFailed => return src_reader.err.?,
|
||||
error.WriteFailed => return atomic_file.file_writer.err.?,
|
||||
};
|
||||
try atomic_file.flush();
|
||||
try atomic_file.file_writer.file.updateTimes(src_stat.atime, src_stat.mtime);
|
||||
try atomic_file.finish();
|
||||
try atomic_file.renameIntoPlace();
|
||||
return .stale;
|
||||
}
|
||||
|
||||
|
||||
@@ -1242,7 +1242,7 @@ pub const Reader = struct {
|
||||
pub fn seekBy(r: *Reader, offset: i64) Reader.SeekError!void {
|
||||
switch (r.mode) {
|
||||
.positional, .positional_reading => {
|
||||
setPosAdjustingBuffer(r, @intCast(@as(i64, @intCast(r.pos)) + offset));
|
||||
setLogicalPos(r, @intCast(@as(i64, @intCast(logicalPos(r))) + offset));
|
||||
},
|
||||
.streaming, .streaming_reading => {
|
||||
if (posix.SEEK == void) {
|
||||
@@ -1251,7 +1251,7 @@ pub const Reader = struct {
|
||||
}
|
||||
const seek_err = r.seek_err orelse e: {
|
||||
if (posix.lseek_CUR(r.file.handle, offset)) |_| {
|
||||
setPosAdjustingBuffer(r, @intCast(@as(i64, @intCast(r.pos)) + offset));
|
||||
setLogicalPos(r, @intCast(@as(i64, @intCast(logicalPos(r))) + offset));
|
||||
return;
|
||||
} else |err| {
|
||||
r.seek_err = err;
|
||||
@@ -1275,16 +1275,17 @@ pub const Reader = struct {
|
||||
pub fn seekTo(r: *Reader, offset: u64) Reader.SeekError!void {
|
||||
switch (r.mode) {
|
||||
.positional, .positional_reading => {
|
||||
setPosAdjustingBuffer(r, offset);
|
||||
setLogicalPos(r, offset);
|
||||
},
|
||||
.streaming, .streaming_reading => {
|
||||
if (offset >= r.pos) return Reader.seekBy(r, @intCast(offset - r.pos));
|
||||
const logical_pos = logicalPos(r);
|
||||
if (offset >= logical_pos) return Reader.seekBy(r, @intCast(offset - logical_pos));
|
||||
if (r.seek_err) |err| return err;
|
||||
posix.lseek_SET(r.file.handle, offset) catch |err| {
|
||||
r.seek_err = err;
|
||||
return err;
|
||||
};
|
||||
setPosAdjustingBuffer(r, offset);
|
||||
setLogicalPos(r, offset);
|
||||
},
|
||||
.failure => return r.seek_err.?,
|
||||
}
|
||||
@@ -1294,7 +1295,7 @@ pub const Reader = struct {
|
||||
return r.pos - r.interface.bufferedLen();
|
||||
}
|
||||
|
||||
fn setPosAdjustingBuffer(r: *Reader, offset: u64) void {
|
||||
fn setLogicalPos(r: *Reader, offset: u64) void {
|
||||
const logical_pos = logicalPos(r);
|
||||
if (offset < logical_pos or offset >= r.pos) {
|
||||
r.interface.seek = 0;
|
||||
@@ -1322,13 +1323,15 @@ pub const Reader = struct {
|
||||
},
|
||||
.positional_reading => {
|
||||
const dest = limit.slice(try w.writableSliceGreedy(1));
|
||||
const n = try readPositional(r, dest);
|
||||
var data: [1][]u8 = .{dest};
|
||||
const n = try readVecPositional(r, &data);
|
||||
w.advance(n);
|
||||
return n;
|
||||
},
|
||||
.streaming_reading => {
|
||||
const dest = limit.slice(try w.writableSliceGreedy(1));
|
||||
const n = try readStreaming(r, dest);
|
||||
var data: [1][]u8 = .{dest};
|
||||
const n = try readVecStreaming(r, &data);
|
||||
w.advance(n);
|
||||
return n;
|
||||
},
|
||||
@@ -1339,94 +1342,100 @@ pub const Reader = struct {
|
||||
fn readVec(io_reader: *std.Io.Reader, data: [][]u8) std.Io.Reader.Error!usize {
|
||||
const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
|
||||
switch (r.mode) {
|
||||
.positional, .positional_reading => {
|
||||
if (is_windows) {
|
||||
// Unfortunately, `ReadFileScatter` cannot be used since it
|
||||
// requires page alignment.
|
||||
if (io_reader.seek == io_reader.end) {
|
||||
io_reader.seek = 0;
|
||||
io_reader.end = 0;
|
||||
}
|
||||
const first = data[0];
|
||||
if (first.len >= io_reader.buffer.len - io_reader.end) {
|
||||
return readPositional(r, first);
|
||||
} else {
|
||||
io_reader.end += try readPositional(r, io_reader.buffer[io_reader.end..]);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
var iovecs_buffer: [max_buffers_len]posix.iovec = undefined;
|
||||
const dest_n, const data_size = try io_reader.writableVectorPosix(&iovecs_buffer, data);
|
||||
const dest = iovecs_buffer[0..dest_n];
|
||||
assert(dest[0].len > 0);
|
||||
const n = posix.preadv(r.file.handle, dest, r.pos) catch |err| switch (err) {
|
||||
error.Unseekable => {
|
||||
r.mode = r.mode.toStreaming();
|
||||
const pos = r.pos;
|
||||
if (pos != 0) {
|
||||
r.pos = 0;
|
||||
r.seekBy(@intCast(pos)) catch {
|
||||
r.mode = .failure;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
}
|
||||
return 0;
|
||||
},
|
||||
else => |e| {
|
||||
r.err = e;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
};
|
||||
if (n == 0) {
|
||||
r.size = r.pos;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
r.pos += n;
|
||||
if (n > data_size) {
|
||||
io_reader.end += n - data_size;
|
||||
return data_size;
|
||||
}
|
||||
return n;
|
||||
},
|
||||
.streaming, .streaming_reading => {
|
||||
if (is_windows) {
|
||||
// Unfortunately, `ReadFileScatter` cannot be used since it
|
||||
// requires page alignment.
|
||||
if (io_reader.seek == io_reader.end) {
|
||||
io_reader.seek = 0;
|
||||
io_reader.end = 0;
|
||||
}
|
||||
const first = data[0];
|
||||
if (first.len >= io_reader.buffer.len - io_reader.end) {
|
||||
return readPositional(r, first);
|
||||
} else {
|
||||
io_reader.end += try readPositional(r, io_reader.buffer[io_reader.end..]);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
var iovecs_buffer: [max_buffers_len]posix.iovec = undefined;
|
||||
const dest_n, const data_size = try io_reader.writableVectorPosix(&iovecs_buffer, data);
|
||||
const dest = iovecs_buffer[0..dest_n];
|
||||
assert(dest[0].len > 0);
|
||||
const n = posix.readv(r.file.handle, dest) catch |err| {
|
||||
r.err = err;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
if (n == 0) {
|
||||
r.size = r.pos;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
r.pos += n;
|
||||
if (n > data_size) {
|
||||
io_reader.end += n - data_size;
|
||||
return data_size;
|
||||
}
|
||||
return n;
|
||||
},
|
||||
.positional, .positional_reading => return readVecPositional(r, data),
|
||||
.streaming, .streaming_reading => return readVecStreaming(r, data),
|
||||
.failure => return error.ReadFailed,
|
||||
}
|
||||
}
|
||||
|
||||
fn readVecPositional(r: *Reader, data: [][]u8) std.Io.Reader.Error!usize {
|
||||
const io_reader = &r.interface;
|
||||
if (is_windows) {
|
||||
// Unfortunately, `ReadFileScatter` cannot be used since it
|
||||
// requires page alignment.
|
||||
if (io_reader.seek == io_reader.end) {
|
||||
io_reader.seek = 0;
|
||||
io_reader.end = 0;
|
||||
}
|
||||
const first = data[0];
|
||||
if (first.len >= io_reader.buffer.len - io_reader.end) {
|
||||
return readPositional(r, first);
|
||||
} else {
|
||||
io_reader.end += try readPositional(r, io_reader.buffer[io_reader.end..]);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
var iovecs_buffer: [max_buffers_len]posix.iovec = undefined;
|
||||
const dest_n, const data_size = try io_reader.writableVectorPosix(&iovecs_buffer, data);
|
||||
const dest = iovecs_buffer[0..dest_n];
|
||||
assert(dest[0].len > 0);
|
||||
const n = posix.preadv(r.file.handle, dest, r.pos) catch |err| switch (err) {
|
||||
error.Unseekable => {
|
||||
r.mode = r.mode.toStreaming();
|
||||
const pos = r.pos;
|
||||
if (pos != 0) {
|
||||
r.pos = 0;
|
||||
r.seekBy(@intCast(pos)) catch {
|
||||
r.mode = .failure;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
}
|
||||
return 0;
|
||||
},
|
||||
else => |e| {
|
||||
r.err = e;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
};
|
||||
if (n == 0) {
|
||||
r.size = r.pos;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
r.pos += n;
|
||||
if (n > data_size) {
|
||||
io_reader.end += n - data_size;
|
||||
return data_size;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
fn readVecStreaming(r: *Reader, data: [][]u8) std.Io.Reader.Error!usize {
|
||||
const io_reader = &r.interface;
|
||||
if (is_windows) {
|
||||
// Unfortunately, `ReadFileScatter` cannot be used since it
|
||||
// requires page alignment.
|
||||
if (io_reader.seek == io_reader.end) {
|
||||
io_reader.seek = 0;
|
||||
io_reader.end = 0;
|
||||
}
|
||||
const first = data[0];
|
||||
if (first.len >= io_reader.buffer.len - io_reader.end) {
|
||||
return readStreaming(r, first);
|
||||
} else {
|
||||
io_reader.end += try readStreaming(r, io_reader.buffer[io_reader.end..]);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
var iovecs_buffer: [max_buffers_len]posix.iovec = undefined;
|
||||
const dest_n, const data_size = try io_reader.writableVectorPosix(&iovecs_buffer, data);
|
||||
const dest = iovecs_buffer[0..dest_n];
|
||||
assert(dest[0].len > 0);
|
||||
const n = posix.readv(r.file.handle, dest) catch |err| {
|
||||
r.err = err;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
if (n == 0) {
|
||||
r.size = r.pos;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
r.pos += n;
|
||||
if (n > data_size) {
|
||||
io_reader.end += n - data_size;
|
||||
return data_size;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
fn discard(io_reader: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize {
|
||||
const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
|
||||
const file = r.file;
|
||||
@@ -1493,7 +1502,7 @@ pub const Reader = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn readPositional(r: *Reader, dest: []u8) std.Io.Reader.Error!usize {
|
||||
fn readPositional(r: *Reader, dest: []u8) std.Io.Reader.Error!usize {
|
||||
const n = r.file.pread(dest, r.pos) catch |err| switch (err) {
|
||||
error.Unseekable => {
|
||||
r.mode = r.mode.toStreaming();
|
||||
@@ -1520,7 +1529,7 @@ pub const Reader = struct {
|
||||
return n;
|
||||
}
|
||||
|
||||
pub fn readStreaming(r: *Reader, dest: []u8) std.Io.Reader.Error!usize {
|
||||
fn readStreaming(r: *Reader, dest: []u8) std.Io.Reader.Error!usize {
|
||||
const n = r.file.read(dest) catch |err| {
|
||||
r.err = err;
|
||||
return error.ReadFailed;
|
||||
@@ -1533,14 +1542,6 @@ pub const Reader = struct {
|
||||
return n;
|
||||
}
|
||||
|
||||
pub fn read(r: *Reader, dest: []u8) std.Io.Reader.Error!usize {
|
||||
switch (r.mode) {
|
||||
.positional, .positional_reading => return readPositional(r, dest),
|
||||
.streaming, .streaming_reading => return readStreaming(r, dest),
|
||||
.failure => return error.ReadFailed,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn atEnd(r: *Reader) bool {
|
||||
// Even if stat fails, size is set when end is encountered.
|
||||
const size = r.size orelse return false;
|
||||
@@ -1783,7 +1784,7 @@ pub const Writer = struct {
|
||||
) std.Io.Writer.FileError!usize {
|
||||
const reader_buffered = file_reader.interface.buffered();
|
||||
if (reader_buffered.len >= @intFromEnum(limit))
|
||||
return sendFileBuffered(io_w, file_reader, reader_buffered);
|
||||
return sendFileBuffered(io_w, file_reader, limit.slice(reader_buffered));
|
||||
const writer_buffered = io_w.buffered();
|
||||
const file_limit = @intFromEnum(limit) - reader_buffered.len;
|
||||
const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w));
|
||||
@@ -1855,7 +1856,7 @@ pub const Writer = struct {
|
||||
return error.EndOfStream;
|
||||
}
|
||||
const consumed = io_w.consume(@intCast(sbytes));
|
||||
file_reader.seekTo(file_reader.pos + consumed) catch return error.ReadFailed;
|
||||
file_reader.seekBy(@intCast(consumed)) catch return error.ReadFailed;
|
||||
return consumed;
|
||||
}
|
||||
|
||||
@@ -1916,7 +1917,7 @@ pub const Writer = struct {
|
||||
return error.EndOfStream;
|
||||
}
|
||||
const consumed = io_w.consume(@bitCast(len));
|
||||
file_reader.seekTo(file_reader.pos + consumed) catch return error.ReadFailed;
|
||||
file_reader.seekBy(@intCast(consumed)) catch return error.ReadFailed;
|
||||
return consumed;
|
||||
}
|
||||
|
||||
@@ -1967,7 +1968,7 @@ pub const Writer = struct {
|
||||
|
||||
const copy_file_range = switch (native_os) {
|
||||
.freebsd => std.os.freebsd.copy_file_range,
|
||||
.linux => if (std.c.versionCheck(if (builtin.abi.isAndroid()) .{ .major = 34, .minor = 0, .patch = 0 } else .{ .major = 2, .minor = 27, .patch = 0 })) std.os.linux.wrapped.copy_file_range else {},
|
||||
.linux => std.os.linux.wrapped.copy_file_range,
|
||||
else => {},
|
||||
};
|
||||
if (@TypeOf(copy_file_range) != void) cfr: {
|
||||
@@ -2049,7 +2050,7 @@ pub const Writer = struct {
|
||||
reader_buffered: []const u8,
|
||||
) std.Io.Writer.FileError!usize {
|
||||
const n = try drain(io_w, &.{reader_buffered}, 1);
|
||||
file_reader.seekTo(file_reader.pos + n) catch return error.ReadFailed;
|
||||
file_reader.seekBy(@intCast(n)) catch return error.ReadFailed;
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
@@ -1515,6 +1515,41 @@ test "sendfile" {
|
||||
try testing.expectEqualStrings("header1\nsecond header\nine1\nsecontrailer1\nsecond trailer\n", written_buf[0..amt]);
|
||||
}
|
||||
|
||||
test "sendfile with buffered data" {
|
||||
var tmp = tmpDir(.{});
|
||||
defer tmp.cleanup();
|
||||
|
||||
try tmp.dir.makePath("os_test_tmp");
|
||||
|
||||
var dir = try tmp.dir.openDir("os_test_tmp", .{});
|
||||
defer dir.close();
|
||||
|
||||
var src_file = try dir.createFile("sendfile1.txt", .{ .read = true });
|
||||
defer src_file.close();
|
||||
|
||||
try src_file.writeAll("AAAABBBB");
|
||||
|
||||
var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true });
|
||||
defer dest_file.close();
|
||||
|
||||
var src_buffer: [32]u8 = undefined;
|
||||
var file_reader = src_file.reader(&src_buffer);
|
||||
|
||||
try file_reader.seekTo(0);
|
||||
try file_reader.interface.fill(8);
|
||||
|
||||
var fallback_buffer: [32]u8 = undefined;
|
||||
var file_writer = dest_file.writer(&fallback_buffer);
|
||||
|
||||
try std.testing.expectEqual(4, try file_writer.interface.sendFileAll(&file_reader, .limited(4)));
|
||||
|
||||
var written_buf: [8]u8 = undefined;
|
||||
const amt = try dest_file.preadAll(&written_buf, 0);
|
||||
|
||||
try std.testing.expectEqual(4, amt);
|
||||
try std.testing.expectEqualSlices(u8, "AAAA", written_buf[0..amt]);
|
||||
}
|
||||
|
||||
test "copyRangeAll" {
|
||||
var tmp = tmpDir(.{});
|
||||
defer tmp.cleanup();
|
||||
@@ -2145,3 +2180,34 @@ test "seekBy" {
|
||||
try testing.expectEqual(15, n);
|
||||
try testing.expectEqualStrings("t's test seekBy", buffer[0..15]);
|
||||
}
|
||||
|
||||
test "File.Writer sendfile with buffered contents" {
|
||||
var tmp_dir = testing.tmpDir(.{});
|
||||
defer tmp_dir.cleanup();
|
||||
|
||||
{
|
||||
try tmp_dir.dir.writeFile(.{ .sub_path = "a", .data = "bcd" });
|
||||
const in = try tmp_dir.dir.openFile("a", .{});
|
||||
defer in.close();
|
||||
const out = try tmp_dir.dir.createFile("b", .{});
|
||||
defer out.close();
|
||||
|
||||
var in_buf: [2]u8 = undefined;
|
||||
var in_r = in.reader(&in_buf);
|
||||
_ = try in_r.getSize(); // Catch seeks past end by populating size
|
||||
try in_r.interface.fill(2);
|
||||
|
||||
var out_buf: [1]u8 = undefined;
|
||||
var out_w = out.writerStreaming(&out_buf);
|
||||
try out_w.interface.writeByte('a');
|
||||
try testing.expectEqual(3, try out_w.interface.sendFileAll(&in_r, .unlimited));
|
||||
try out_w.interface.flush();
|
||||
}
|
||||
|
||||
var check = try tmp_dir.dir.openFile("b", .{});
|
||||
defer check.close();
|
||||
var check_buf: [4]u8 = undefined;
|
||||
var check_r = check.reader(&check_buf);
|
||||
try testing.expectEqualStrings("abcd", try check_r.interface.take(4));
|
||||
try testing.expectError(error.EndOfStream, check_r.interface.takeByte());
|
||||
}
|
||||
|
||||
@@ -1375,7 +1375,7 @@ pub const basic_authorization = struct {
|
||||
var buf: [max_user_len + 1 + max_password_len]u8 = undefined;
|
||||
var w: Writer = .fixed(&buf);
|
||||
const user: Uri.Component = uri.user orelse .empty;
|
||||
const password: Uri.Component = uri.user orelse .empty;
|
||||
const password: Uri.Component = uri.password orelse .empty;
|
||||
user.formatUser(&w) catch unreachable;
|
||||
w.writeByte(':') catch unreachable;
|
||||
password.formatPassword(&w) catch unreachable;
|
||||
@@ -1797,9 +1797,10 @@ pub fn fetch(client: *Client, options: FetchOptions) FetchError!FetchResult {
|
||||
|
||||
if (options.payload) |payload| {
|
||||
req.transfer_encoding = .{ .content_length = payload.len };
|
||||
var body = try req.sendBody(&.{});
|
||||
var body = try req.sendBodyUnflushed(&.{});
|
||||
try body.writer.writeAll(payload);
|
||||
try body.end();
|
||||
try req.connection.?.flush();
|
||||
} else {
|
||||
try req.sendBodiless();
|
||||
}
|
||||
|
||||
@@ -1105,8 +1105,8 @@ fn createTestServer(S: type) !*TestServer {
|
||||
const test_server = try std.testing.allocator.create(TestServer);
|
||||
test_server.* = .{
|
||||
.net_server = try address.listen(.{ .reuse_address = true }),
|
||||
.server_thread = try std.Thread.spawn(.{}, S.run, .{test_server}),
|
||||
.shutting_down = false,
|
||||
.server_thread = try std.Thread.spawn(.{}, S.run, .{test_server}),
|
||||
};
|
||||
return test_server;
|
||||
}
|
||||
|
||||
@@ -567,8 +567,8 @@ pub fn innerParseFromValue(
|
||||
switch (source) {
|
||||
.float => |f| {
|
||||
if (@round(f) != f) return error.InvalidNumber;
|
||||
if (f > std.math.maxInt(T)) return error.Overflow;
|
||||
if (f < std.math.minInt(T)) return error.Overflow;
|
||||
if (f > @as(@TypeOf(f), @floatFromInt(std.math.maxInt(T)))) return error.Overflow;
|
||||
if (f < @as(@TypeOf(f), @floatFromInt(std.math.minInt(T)))) return error.Overflow;
|
||||
return @as(T, @intFromFloat(f));
|
||||
},
|
||||
.integer => |i| {
|
||||
@@ -770,7 +770,7 @@ fn sliceToInt(comptime T: type, slice: []const u8) !T {
|
||||
// Try to coerce a float to an integer.
|
||||
const float = try std.fmt.parseFloat(f128, slice);
|
||||
if (@round(float) != float) return error.InvalidNumber;
|
||||
if (float > std.math.maxInt(T) or float < std.math.minInt(T)) return error.Overflow;
|
||||
if (float > @as(f128, @floatFromInt(std.math.maxInt(T))) or float < @as(f128, @floatFromInt(std.math.minInt(T)))) return error.Overflow;
|
||||
return @as(T, @intCast(@as(i128, @intFromFloat(float))));
|
||||
}
|
||||
|
||||
|
||||
@@ -786,11 +786,10 @@ pub const Mutable = struct {
|
||||
assert(rma.limbs.ptr != b.limbs.ptr); // illegal aliasing
|
||||
|
||||
if (a.limbs.len == 1 and b.limbs.len == 1) {
|
||||
const ov = @mulWithOverflow(a.limbs[0], b.limbs[0]);
|
||||
rma.limbs[0] = ov[0];
|
||||
if (ov[1] == 0) {
|
||||
rma.limbs[0], const overflow_bit = @mulWithOverflow(a.limbs[0], b.limbs[0]);
|
||||
if (overflow_bit == 0) {
|
||||
rma.len = 1;
|
||||
rma.positive = (a.positive == b.positive);
|
||||
rma.positive = (a.positive == b.positive) or rma.limbs[0] == 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ const testing = std.testing;
|
||||
/// Errors:
|
||||
/// - Overflow: Integer overflow or Infinity
|
||||
/// - Underflow: Absolute value of result smaller than 1
|
||||
///
|
||||
/// Edge case rules ordered by precedence:
|
||||
/// - powi(T, x, 0) = 1 unless T is i1, i0, u0
|
||||
/// - powi(T, 0, x) = 0 when x > 0
|
||||
|
||||
@@ -3785,6 +3785,7 @@ test rotate {
|
||||
|
||||
/// Replace needle with replacement as many times as possible, writing to an output buffer which is assumed to be of
|
||||
/// appropriate size. Use replacementSize to calculate an appropriate buffer size.
|
||||
/// The `input` and `output` slices must not overlap.
|
||||
/// The needle must not be empty.
|
||||
/// Returns the number of replacements made.
|
||||
pub fn replace(comptime T: type, input: []const T, needle: []const T, replacement: []const T, output: []T) usize {
|
||||
@@ -4484,7 +4485,8 @@ pub fn doNotOptimizeAway(val: anytype) void {
|
||||
} else doNotOptimizeAway(&val);
|
||||
},
|
||||
.float => {
|
||||
if ((t.float.bits == 32 or t.float.bits == 64) and builtin.zig_backend != .stage2_c) {
|
||||
// https://github.com/llvm/llvm-project/issues/159200
|
||||
if ((t.float.bits == 32 or t.float.bits == 64) and builtin.zig_backend != .stage2_c and !builtin.cpu.arch.isLoongArch()) {
|
||||
asm volatile (""
|
||||
:
|
||||
: [_] "rm" (val),
|
||||
|
||||
@@ -358,8 +358,10 @@ pub fn remap(self: Allocator, allocation: anytype, new_len: usize) t: {
|
||||
return mem.bytesAsSlice(T, new_memory);
|
||||
}
|
||||
|
||||
/// This function requests a new byte size for an existing allocation, which
|
||||
/// This function requests a new size for an existing allocation, which
|
||||
/// can be larger, smaller, or the same size as the old memory allocation.
|
||||
/// The result is an array of `new_n` items of the same type as the existing
|
||||
/// allocation.
|
||||
///
|
||||
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
|
||||
///
|
||||
|
||||
@@ -1393,7 +1393,7 @@ fn parseHosts(
|
||||
br: *Io.Reader,
|
||||
) error{ OutOfMemory, ReadFailed }!void {
|
||||
while (true) {
|
||||
const line = br.takeDelimiterExclusive('\n') catch |err| switch (err) {
|
||||
const line = br.takeDelimiter('\n') catch |err| switch (err) {
|
||||
error.StreamTooLong => {
|
||||
// Skip lines that are too long.
|
||||
_ = br.discardDelimiterInclusive('\n') catch |e| switch (e) {
|
||||
@@ -1403,7 +1403,8 @@ fn parseHosts(
|
||||
continue;
|
||||
},
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
error.EndOfStream => break,
|
||||
} orelse {
|
||||
break; // end of stream
|
||||
};
|
||||
var split_it = mem.splitScalar(u8, line, '#');
|
||||
const no_comment_line = split_it.first();
|
||||
|
||||
@@ -1238,11 +1238,14 @@ pub fn access(path: [*:0]const u8, mode: u32) usize {
|
||||
if (@hasField(SYS, "access")) {
|
||||
return syscall2(.access, @intFromPtr(path), mode);
|
||||
} else {
|
||||
return syscall4(.faccessat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), mode, 0);
|
||||
return faccessat(AT.FDCWD, path, mode, 0);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn faccessat(dirfd: i32, path: [*:0]const u8, mode: u32, flags: u32) usize {
|
||||
if (flags == 0) {
|
||||
return syscall3(.faccessat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode);
|
||||
}
|
||||
return syscall4(.faccessat2, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode, flags);
|
||||
}
|
||||
|
||||
@@ -9799,7 +9802,9 @@ pub const wrapped = struct {
|
||||
};
|
||||
|
||||
pub fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: u32) CopyFileRangeError!usize {
|
||||
const rc = system.copy_file_range(fd_in, off_in, fd_out, off_out, len, flags);
|
||||
const use_c = std.c.versionCheck(if (builtin.abi.isAndroid()) .{ .major = 34, .minor = 0, .patch = 0 } else .{ .major = 2, .minor = 27, .patch = 0 });
|
||||
const sys = if (use_c) std.c else std.os.linux;
|
||||
const rc = sys.copy_file_range(fd_in, off_in, fd_out, off_out, len, flags);
|
||||
switch (errno(rc)) {
|
||||
.SUCCESS => return @intCast(rc),
|
||||
.BADF => return error.BadFileFlags,
|
||||
|
||||
@@ -642,7 +642,7 @@ pub const Insn = packed struct {
|
||||
.dst = @intFromEnum(dst),
|
||||
.src = @intFromEnum(src),
|
||||
.off = 0,
|
||||
.imm = @as(i32, @intCast(@as(u32, @truncate(imm)))),
|
||||
.imm = @as(i32, @bitCast(@as(u32, @truncate(imm)))),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -652,7 +652,7 @@ pub const Insn = packed struct {
|
||||
.dst = 0,
|
||||
.src = 0,
|
||||
.off = 0,
|
||||
.imm = @as(i32, @intCast(@as(u32, @truncate(imm >> 32)))),
|
||||
.imm = @as(i32, @bitCast(@as(u32, @truncate(imm >> 32)))),
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -15,84 +15,125 @@ const sockaddr = linux.sockaddr;
|
||||
const timespec = linux.timespec;
|
||||
|
||||
pub fn syscall0(number: SYS) usize {
|
||||
// r0 is both an input register and a clobber. musl and glibc achieve this with
|
||||
// a "+" constraint, which isn't supported in Zig, so instead we separately list
|
||||
// r0 as both an input and an output. (Listing it as an input and a clobber would
|
||||
// cause the C backend to emit invalid code; see #25209.)
|
||||
var r0_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall1(number: SYS, arg1: usize) usize {
|
||||
// r0 is both an input and a clobber.
|
||||
var r0_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall2(number: SYS, arg1: usize, arg2: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall3(number: SYS, arg1: usize, arg2: usize, arg3: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
[arg3] "{r5}" (arg3),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall4(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
var r6_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
[r6_out] "={r6}" (r6_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
[arg3] "{r5}" (arg3),
|
||||
[arg4] "{r6}" (arg4),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall5(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
var r6_out: usize = undefined;
|
||||
var r7_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
[r6_out] "={r6}" (r6_out),
|
||||
[r7_out] "={r7}" (r7_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
[arg3] "{r5}" (arg3),
|
||||
[arg4] "{r6}" (arg4),
|
||||
[arg5] "{r7}" (arg5),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall6(
|
||||
@@ -104,12 +145,25 @@ pub fn syscall6(
|
||||
arg5: usize,
|
||||
arg6: usize,
|
||||
) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
var r6_out: usize = undefined;
|
||||
var r7_out: usize = undefined;
|
||||
var r8_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
[r6_out] "={r6}" (r6_out),
|
||||
[r7_out] "={r7}" (r7_out),
|
||||
[r8_out] "={r8}" (r8_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
@@ -117,7 +171,7 @@ pub fn syscall6(
|
||||
[arg4] "{r6}" (arg4),
|
||||
[arg5] "{r7}" (arg5),
|
||||
[arg6] "{r8}" (arg6),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn clone() callconv(.naked) usize {
|
||||
@@ -193,11 +247,19 @@ pub fn clone() callconv(.naked) usize {
|
||||
pub const restore = restore_rt;
|
||||
|
||||
pub fn restore_rt() callconv(.naked) noreturn {
|
||||
asm volatile (
|
||||
\\ sc
|
||||
:
|
||||
: [number] "{r0}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
switch (@import("builtin").zig_backend) {
|
||||
.stage2_c => asm volatile (
|
||||
\\ li 0, %[number]
|
||||
\\ sc
|
||||
:
|
||||
: [number] "i" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true }),
|
||||
else => _ = asm volatile (
|
||||
\\ sc
|
||||
:
|
||||
: [number] "{r0}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true }),
|
||||
}
|
||||
}
|
||||
|
||||
pub const F = struct {
|
||||
|
||||
@@ -15,84 +15,125 @@ const sockaddr = linux.sockaddr;
|
||||
const timespec = linux.timespec;
|
||||
|
||||
pub fn syscall0(number: SYS) usize {
|
||||
// r0 is both an input register and a clobber. musl and glibc achieve this with
|
||||
// a "+" constraint, which isn't supported in Zig, so instead we separately list
|
||||
// r0 as both an input and an output. (Listing it as an input and a clobber would
|
||||
// cause the C backend to emit invalid code; see #25209.)
|
||||
var r0_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall1(number: SYS, arg1: usize) usize {
|
||||
// r0 is both an input and a clobber.
|
||||
var r0_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall2(number: SYS, arg1: usize, arg2: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall3(number: SYS, arg1: usize, arg2: usize, arg3: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
[arg3] "{r5}" (arg3),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall4(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
var r6_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
[r6_out] "={r6}" (r6_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
[arg3] "{r5}" (arg3),
|
||||
[arg4] "{r6}" (arg4),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall5(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
var r6_out: usize = undefined;
|
||||
var r7_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
[r6_out] "={r6}" (r6_out),
|
||||
[r7_out] "={r7}" (r7_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
[arg3] "{r5}" (arg3),
|
||||
[arg4] "{r6}" (arg4),
|
||||
[arg5] "{r7}" (arg5),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn syscall6(
|
||||
@@ -104,12 +145,25 @@ pub fn syscall6(
|
||||
arg5: usize,
|
||||
arg6: usize,
|
||||
) usize {
|
||||
// These registers are both inputs and clobbers.
|
||||
var r0_out: usize = undefined;
|
||||
var r4_out: usize = undefined;
|
||||
var r5_out: usize = undefined;
|
||||
var r6_out: usize = undefined;
|
||||
var r7_out: usize = undefined;
|
||||
var r8_out: usize = undefined;
|
||||
return asm volatile (
|
||||
\\ sc
|
||||
\\ bns+ 1f
|
||||
\\ neg 3, 3
|
||||
\\ 1:
|
||||
: [ret] "={r3}" (-> usize),
|
||||
[r0_out] "={r0}" (r0_out),
|
||||
[r4_out] "={r4}" (r4_out),
|
||||
[r5_out] "={r5}" (r5_out),
|
||||
[r6_out] "={r6}" (r6_out),
|
||||
[r7_out] "={r7}" (r7_out),
|
||||
[r8_out] "={r8}" (r8_out),
|
||||
: [number] "{r0}" (@intFromEnum(number)),
|
||||
[arg1] "{r3}" (arg1),
|
||||
[arg2] "{r4}" (arg2),
|
||||
@@ -117,7 +171,7 @@ pub fn syscall6(
|
||||
[arg4] "{r6}" (arg4),
|
||||
[arg5] "{r7}" (arg5),
|
||||
[arg6] "{r8}" (arg6),
|
||||
: .{ .memory = true, .cr0 = true, .r0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
: .{ .memory = true, .cr0 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true, .ctr = true, .xer = true });
|
||||
}
|
||||
|
||||
pub fn clone() callconv(.naked) usize {
|
||||
@@ -178,11 +232,19 @@ pub fn clone() callconv(.naked) usize {
|
||||
pub const restore = restore_rt;
|
||||
|
||||
pub fn restore_rt() callconv(.naked) noreturn {
|
||||
asm volatile (
|
||||
\\ sc
|
||||
:
|
||||
: [number] "{r0}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true });
|
||||
switch (@import("builtin").zig_backend) {
|
||||
.stage2_c => asm volatile (
|
||||
\\ li 0, %[number]
|
||||
\\ sc
|
||||
:
|
||||
: [number] "i" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true }),
|
||||
else => _ = asm volatile (
|
||||
\\ sc
|
||||
:
|
||||
: [number] "{r0}" (@intFromEnum(SYS.rt_sigreturn)),
|
||||
: .{ .memory = true, .cr0 = true, .r4 = true, .r5 = true, .r6 = true, .r7 = true, .r8 = true, .r9 = true, .r10 = true, .r11 = true, .r12 = true }),
|
||||
}
|
||||
}
|
||||
|
||||
pub const F = struct {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const std = @import("std");
|
||||
const uefi = std.uefi;
|
||||
const uefi = std.os.uefi;
|
||||
const Guid = uefi.Guid;
|
||||
const Handle = uefi.Handle;
|
||||
const Status = uefi.Status;
|
||||
|
||||
@@ -90,7 +90,7 @@ pub const MemoryType = enum(u32) {
|
||||
return @truncate(as_int - vendor_start);
|
||||
}
|
||||
|
||||
pub fn format(self: MemoryType, w: *std.io.Writer) std.io.WriteError!void {
|
||||
pub fn format(self: MemoryType, w: *std.io.Writer) std.io.Writer.Error!void {
|
||||
if (self.toOem()) |oemval|
|
||||
try w.print("OEM({X})", .{oemval})
|
||||
else if (self.toVendor()) |vendorval|
|
||||
|
||||
@@ -177,7 +177,7 @@ inline fn getDynamicSymbol() [*]const elf.Dyn {
|
||||
\\ jg 2f
|
||||
\\ 1: .quad _DYNAMIC - .
|
||||
\\ 2:
|
||||
: [ret] "=r" (-> [*]const elf.Dyn),
|
||||
: [ret] "=a" (-> [*]const elf.Dyn),
|
||||
),
|
||||
// The compiler does not necessarily have any obligation to load the `l7` register (pointing
|
||||
// to the GOT), so do it ourselves just in case.
|
||||
|
||||
@@ -6387,14 +6387,16 @@ pub const CopyFileRangeError = error{
|
||||
///
|
||||
/// Maximum offsets on Linux and FreeBSD are `maxInt(i64)`.
|
||||
pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len: usize, flags: u32) CopyFileRangeError!usize {
|
||||
if (builtin.os.tag == .freebsd or
|
||||
(comptime builtin.os.tag == .linux and std.c.versionCheck(if (builtin.abi.isAndroid()) .{ .major = 34, .minor = 0, .patch = 0 } else .{ .major = 2, .minor = 27, .patch = 0 })))
|
||||
{
|
||||
if (builtin.os.tag == .freebsd or builtin.os.tag == .linux) {
|
||||
const use_c = native_os != .linux or
|
||||
std.c.versionCheck(if (builtin.abi.isAndroid()) .{ .major = 34, .minor = 0, .patch = 0 } else .{ .major = 2, .minor = 27, .patch = 0 });
|
||||
const sys = if (use_c) std.c else linux;
|
||||
|
||||
var off_in_copy: i64 = @bitCast(off_in);
|
||||
var off_out_copy: i64 = @bitCast(off_out);
|
||||
|
||||
while (true) {
|
||||
const rc = system.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags);
|
||||
const rc = sys.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags);
|
||||
if (native_os == .freebsd) {
|
||||
switch (errno(rc)) {
|
||||
.SUCCESS => return @intCast(rc),
|
||||
|
||||
@@ -883,7 +883,6 @@ test "sigrtmin/max" {
|
||||
try std.testing.expect(posix.sigrtmin() >= 32);
|
||||
try std.testing.expect(posix.sigrtmin() >= posix.system.sigrtmin());
|
||||
try std.testing.expect(posix.sigrtmin() < posix.system.sigrtmax());
|
||||
try std.testing.expect(posix.sigrtmax() < posix.NSIG);
|
||||
}
|
||||
|
||||
test "sigset empty/full" {
|
||||
|
||||
@@ -1753,7 +1753,8 @@ pub fn totalSystemMemory() TotalSystemMemoryError!u64 {
|
||||
if (std.os.linux.E.init(result) != .SUCCESS) {
|
||||
return error.UnknownTotalSystemMemory;
|
||||
}
|
||||
return info.totalram * info.mem_unit;
|
||||
// Promote to u64 to avoid overflow on systems where info.totalram is a 32-bit usize
|
||||
return @as(u64, info.totalram) * info.mem_unit;
|
||||
},
|
||||
.freebsd => {
|
||||
var physmem: c_ulong = undefined;
|
||||
@@ -1762,7 +1763,20 @@ pub fn totalSystemMemory() TotalSystemMemoryError!u64 {
|
||||
error.NameTooLong, error.UnknownName => unreachable,
|
||||
else => return error.UnknownTotalSystemMemory,
|
||||
};
|
||||
return @as(usize, @intCast(physmem));
|
||||
return @as(u64, @intCast(physmem));
|
||||
},
|
||||
// whole Darwin family
|
||||
.driverkit, .ios, .macos, .tvos, .visionos, .watchos => {
|
||||
// "hw.memsize" returns uint64_t
|
||||
var physmem: u64 = undefined;
|
||||
var len: usize = @sizeOf(u64);
|
||||
posix.sysctlbynameZ("hw.memsize", &physmem, &len, null, 0) catch |err| switch (err) {
|
||||
error.PermissionDenied => unreachable, // only when setting values,
|
||||
error.SystemResources => unreachable, // memory already on the stack
|
||||
error.UnknownName => unreachable, // constant, known good value
|
||||
else => return error.UnknownTotalSystemMemory,
|
||||
};
|
||||
return physmem;
|
||||
},
|
||||
.openbsd => {
|
||||
const mib: [2]c_int = [_]c_int{
|
||||
|
||||
@@ -52,6 +52,8 @@ term: ?(SpawnError!Term),
|
||||
argv: []const []const u8,
|
||||
|
||||
/// Leave as null to use the current env map using the supplied allocator.
|
||||
/// Required if unable to access the current env map (e.g. building a library on
|
||||
/// some platforms).
|
||||
env_map: ?*const EnvMap,
|
||||
|
||||
stdin_behavior: StdIo,
|
||||
@@ -414,6 +416,8 @@ pub fn run(args: struct {
|
||||
argv: []const []const u8,
|
||||
cwd: ?[]const u8 = null,
|
||||
cwd_dir: ?fs.Dir = null,
|
||||
/// Required if unable to access the current env map (e.g. building a
|
||||
/// library on some platforms).
|
||||
env_map: ?*const EnvMap = null,
|
||||
max_output_bytes: usize = 50 * 1024,
|
||||
expand_arg0: Arg0Expand = .no_expand,
|
||||
@@ -614,7 +618,7 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void {
|
||||
})).ptr;
|
||||
} else {
|
||||
// TODO come up with a solution for this.
|
||||
@compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process");
|
||||
@panic("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process");
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -227,7 +227,7 @@ fn partialInsertionSort(a: usize, b: usize, context: anytype) bool {
|
||||
// shift the smaller element to the left.
|
||||
if (i - a >= 2) {
|
||||
var j = i - 1;
|
||||
while (j >= 1) : (j -= 1) {
|
||||
while (j > a) : (j -= 1) {
|
||||
if (!context.lessThan(j, j - 1)) break;
|
||||
context.swap(j, j - 1);
|
||||
}
|
||||
@@ -328,3 +328,50 @@ fn reverseRange(a: usize, b: usize, context: anytype) void {
|
||||
j -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
test "pdqContext respects arbitrary range boundaries" {
|
||||
// Regression test for issue #25250
|
||||
// pdqsort should never access indices outside the specified [a, b) range
|
||||
var data: [2000]i32 = @splat(0);
|
||||
|
||||
// Fill with data that triggers the partialInsertionSort path
|
||||
for (0..data.len) |i| {
|
||||
data[i] = @intCast(@mod(@as(i32, @intCast(i)) * 7, 100));
|
||||
}
|
||||
|
||||
const TestContext = struct {
|
||||
items: []i32,
|
||||
range_start: usize,
|
||||
range_end: usize,
|
||||
|
||||
pub fn lessThan(ctx: @This(), a: usize, b: usize) bool {
|
||||
// Assert indices are within the expected range
|
||||
testing.expect(a >= ctx.range_start and a < ctx.range_end) catch @panic("index a out of range");
|
||||
testing.expect(b >= ctx.range_start and b < ctx.range_end) catch @panic("index b out of range");
|
||||
return ctx.items[a] < ctx.items[b];
|
||||
}
|
||||
|
||||
pub fn swap(ctx: @This(), a: usize, b: usize) void {
|
||||
// Assert indices are within the expected range
|
||||
testing.expect(a >= ctx.range_start and a < ctx.range_end) catch @panic("index a out of range");
|
||||
testing.expect(b >= ctx.range_start and b < ctx.range_end) catch @panic("index b out of range");
|
||||
mem.swap(i32, &ctx.items[a], &ctx.items[b]);
|
||||
}
|
||||
};
|
||||
|
||||
// Test sorting a sub-range that doesn't start at 0
|
||||
const start = 1118;
|
||||
const end = 1764;
|
||||
const ctx = TestContext{
|
||||
.items = &data,
|
||||
.range_start = start,
|
||||
.range_end = end,
|
||||
};
|
||||
|
||||
pdqContext(start, end, ctx);
|
||||
|
||||
// Verify the range is sorted
|
||||
for ((start + 1)..end) |i| {
|
||||
try testing.expect(data[i - 1] <= data[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1249,3 +1249,63 @@ pub const Reader = struct {
|
||||
return n;
|
||||
}
|
||||
};
|
||||
|
||||
/// A `std.Io.Reader` that gets its data from another `std.Io.Reader`, and always
|
||||
/// writes to its own buffer (and returns 0) during `stream` and `readVec`.
|
||||
pub const ReaderIndirect = struct {
|
||||
in: *std.Io.Reader,
|
||||
interface: std.Io.Reader,
|
||||
|
||||
pub fn init(in: *std.Io.Reader, buffer: []u8) ReaderIndirect {
|
||||
return .{
|
||||
.in = in,
|
||||
.interface = .{
|
||||
.vtable = &.{
|
||||
.stream = stream,
|
||||
.readVec = readVec,
|
||||
},
|
||||
.buffer = buffer,
|
||||
.seek = 0,
|
||||
.end = 0,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn readVec(r: *std.Io.Reader, _: [][]u8) std.Io.Reader.Error!usize {
|
||||
try streamInner(r);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fn stream(r: *std.Io.Reader, _: *std.Io.Writer, _: std.Io.Limit) std.Io.Reader.StreamError!usize {
|
||||
try streamInner(r);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fn streamInner(r: *std.Io.Reader) std.Io.Reader.Error!void {
|
||||
const r_indirect: *ReaderIndirect = @alignCast(@fieldParentPtr("interface", r));
|
||||
|
||||
// If there's no room remaining in the buffer at all, make room.
|
||||
if (r.buffer.len == r.end) {
|
||||
try r.rebase(r.buffer.len);
|
||||
}
|
||||
|
||||
var writer: std.Io.Writer = .{
|
||||
.buffer = r.buffer,
|
||||
.end = r.end,
|
||||
.vtable = &.{
|
||||
.drain = std.Io.Writer.unreachableDrain,
|
||||
.rebase = std.Io.Writer.unreachableRebase,
|
||||
},
|
||||
};
|
||||
defer r.end = writer.end;
|
||||
|
||||
r_indirect.in.streamExact(&writer, r.buffer.len - r.end) catch |err| switch (err) {
|
||||
// Only forward EndOfStream if no new bytes were written to the buffer
|
||||
error.EndOfStream => |e| if (r.end == writer.end) {
|
||||
return e;
|
||||
},
|
||||
error.WriteFailed => unreachable,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@@ -154,7 +154,11 @@ pub fn next(bc: *BitcodeReader) !?Item {
|
||||
Abbrev.Builtin.enter_subblock.toRecordId() => {
|
||||
const block_id: u32 = @intCast(record.operands[0]);
|
||||
switch (block_id) {
|
||||
Block.block_info => try bc.parseBlockInfoBlock(),
|
||||
Block.block_info => {
|
||||
try bc.startBlock(Block.block_info, @intCast(record.operands[1]));
|
||||
try bc.parseBlockInfoBlock();
|
||||
try bc.endBlock();
|
||||
},
|
||||
Block.first_reserved...Block.last_standard => return error.UnsupportedBlockId,
|
||||
else => {
|
||||
try bc.startBlock(block_id, @intCast(record.operands[1]));
|
||||
|
||||
@@ -102,6 +102,7 @@ pub fn getExternalExecutor(
|
||||
else => "qemu-mips64el",
|
||||
},
|
||||
},
|
||||
.or1k => Executor{ .qemu = "qemu-or1k" },
|
||||
.powerpc => Executor{ .qemu = "qemu-ppc" },
|
||||
.powerpc64 => Executor{ .qemu = "qemu-ppc64" },
|
||||
.powerpc64le => Executor{ .qemu = "qemu-ppc64le" },
|
||||
@@ -109,7 +110,7 @@ pub fn getExternalExecutor(
|
||||
.riscv64 => Executor{ .qemu = "qemu-riscv64" },
|
||||
.s390x => Executor{ .qemu = "qemu-s390x" },
|
||||
.sparc => Executor{
|
||||
.qemu = if (candidate.cpu.has(.sparc, .v9))
|
||||
.qemu = if (candidate.cpu.has(.sparc, .v8plus))
|
||||
"qemu-sparc32plus"
|
||||
else
|
||||
"qemu-sparc",
|
||||
@@ -121,7 +122,7 @@ pub fn getExternalExecutor(
|
||||
else => Executor{ .qemu = "qemu-x86_64" },
|
||||
},
|
||||
.xtensa => Executor{ .qemu = "qemu-xtensa" },
|
||||
else => return bad_result,
|
||||
else => bad_result,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -358,14 +358,11 @@ fn CpuinfoParser(comptime impl: anytype) type {
|
||||
return struct {
|
||||
fn parse(arch: Target.Cpu.Arch, reader: *std.Io.Reader) !?Target.Cpu {
|
||||
var obj: impl = .{};
|
||||
while (reader.takeDelimiterExclusive('\n')) |line| {
|
||||
while (try reader.takeDelimiter('\n')) |line| {
|
||||
const colon_pos = mem.indexOfScalar(u8, line, ':') orelse continue;
|
||||
const key = mem.trimEnd(u8, line[0..colon_pos], " \t");
|
||||
const value = mem.trimStart(u8, line[colon_pos + 1 ..], " \t");
|
||||
if (!try obj.line_hook(key, value)) break;
|
||||
} else |err| switch (err) {
|
||||
error.EndOfStream => {},
|
||||
else => |e| return e,
|
||||
}
|
||||
return obj.finalize(arch);
|
||||
}
|
||||
|
||||
@@ -717,6 +717,7 @@ const Parser = struct {
|
||||
|
||||
elem.* = try self.parseExpr(array_info.child, nodes.at(@intCast(i)));
|
||||
}
|
||||
if (array_info.sentinel()) |s| result[result.len] = s;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@@ -627,7 +627,7 @@ fn exportHandler(
|
||||
// Work around x86_64 backend limitation.
|
||||
const linkage = if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) .internal else .weak;
|
||||
const N = "__ubsan_handle_" ++ sym_name;
|
||||
@export(handler, .{ .name = N, .linkage = linkage });
|
||||
@export(handler, .{ .name = N, .linkage = linkage, .visibility = if (linkage == .internal) .default else .hidden });
|
||||
}
|
||||
|
||||
fn exportHandlerWithAbort(
|
||||
@@ -639,11 +639,11 @@ fn exportHandlerWithAbort(
|
||||
const linkage = if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) .internal else .weak;
|
||||
{
|
||||
const N = "__ubsan_handle_" ++ sym_name;
|
||||
@export(handler, .{ .name = N, .linkage = linkage });
|
||||
@export(handler, .{ .name = N, .linkage = linkage, .visibility = if (linkage == .internal) .default else .hidden });
|
||||
}
|
||||
{
|
||||
const N = "__ubsan_handle_" ++ sym_name ++ "_abort";
|
||||
@export(abort_handler, .{ .name = N, .linkage = linkage });
|
||||
@export(abort_handler, .{ .name = N, .linkage = linkage, .visibility = if (linkage == .internal) .default else .hidden });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user