Merge pull request #18955 from ziglang/std.http.Server

take std.http in a different direction
This commit is contained in:
Andrew Kelley
2024-02-23 17:41:38 -08:00
committed by GitHub
26 changed files with 3745 additions and 3679 deletions

View File

@@ -4,6 +4,7 @@
const Uri = @This();
const std = @import("std.zig");
const testing = std.testing;
const Allocator = std.mem.Allocator;
scheme: []const u8,
user: ?[]const u8 = null,
@@ -15,15 +16,15 @@ query: ?[]const u8 = null,
fragment: ?[]const u8 = null,
/// Applies URI encoding and replaces all reserved characters with their respective %XX code.
pub fn escapeString(allocator: std.mem.Allocator, input: []const u8) error{OutOfMemory}![]u8 {
pub fn escapeString(allocator: Allocator, input: []const u8) error{OutOfMemory}![]u8 {
return escapeStringWithFn(allocator, input, isUnreserved);
}
pub fn escapePath(allocator: std.mem.Allocator, input: []const u8) error{OutOfMemory}![]u8 {
pub fn escapePath(allocator: Allocator, input: []const u8) error{OutOfMemory}![]u8 {
return escapeStringWithFn(allocator, input, isPathChar);
}
pub fn escapeQuery(allocator: std.mem.Allocator, input: []const u8) error{OutOfMemory}![]u8 {
pub fn escapeQuery(allocator: Allocator, input: []const u8) error{OutOfMemory}![]u8 {
return escapeStringWithFn(allocator, input, isQueryChar);
}
@@ -39,7 +40,7 @@ pub fn writeEscapedQuery(writer: anytype, input: []const u8) !void {
return writeEscapedStringWithFn(writer, input, isQueryChar);
}
pub fn escapeStringWithFn(allocator: std.mem.Allocator, input: []const u8, comptime keepUnescaped: fn (c: u8) bool) std.mem.Allocator.Error![]u8 {
pub fn escapeStringWithFn(allocator: Allocator, input: []const u8, comptime keepUnescaped: fn (c: u8) bool) Allocator.Error![]u8 {
var outsize: usize = 0;
for (input) |c| {
outsize += if (keepUnescaped(c)) @as(usize, 1) else 3;
@@ -76,7 +77,7 @@ pub fn writeEscapedStringWithFn(writer: anytype, input: []const u8, comptime kee
/// Parses a URI string and unescapes all %XX where XX is a valid hex number. Otherwise, verbatim copies
/// them to the output.
pub fn unescapeString(allocator: std.mem.Allocator, input: []const u8) error{OutOfMemory}![]u8 {
pub fn unescapeString(allocator: Allocator, input: []const u8) error{OutOfMemory}![]u8 {
var outsize: usize = 0;
var inptr: usize = 0;
while (inptr < input.len) {
@@ -341,7 +342,7 @@ pub fn format(
/// The return value will contain unescaped strings pointing into the
/// original `text`. Each component that is provided, will be non-`null`.
pub fn parse(text: []const u8) ParseError!Uri {
var reader = SliceReader{ .slice = text };
var reader: SliceReader = .{ .slice = text };
const scheme = reader.readWhile(isSchemeChar);
// after the scheme, a ':' must appear
@@ -358,111 +359,145 @@ pub fn parse(text: []const u8) ParseError!Uri {
return uri;
}
/// Implementation of RFC 3986, Section 5.2.4. Removes dot segments from a URI path.
///
/// `std.fs.path.resolvePosix` is not sufficient here because it may return relative paths and does not preserve trailing slashes.
fn removeDotSegments(allocator: std.mem.Allocator, paths: []const []const u8) std.mem.Allocator.Error![]const u8 {
var result = std.ArrayList(u8).init(allocator);
defer result.deinit();
for (paths) |p| {
var it = std.mem.tokenizeScalar(u8, p, '/');
while (it.next()) |component| {
if (std.mem.eql(u8, component, ".")) {
continue;
} else if (std.mem.eql(u8, component, "..")) {
if (result.items.len == 0)
continue;
while (true) {
const ends_with_slash = result.items[result.items.len - 1] == '/';
result.items.len -= 1;
if (ends_with_slash or result.items.len == 0) break;
}
} else {
try result.ensureUnusedCapacity(1 + component.len);
result.appendAssumeCapacity('/');
result.appendSliceAssumeCapacity(component);
}
}
}
// ensure a trailing slash is kept
const last_path = paths[paths.len - 1];
if (last_path.len > 0 and last_path[last_path.len - 1] == '/') {
try result.append('/');
}
return result.toOwnedSlice();
}
pub const ResolveInplaceError = ParseError || error{OutOfMemory};
/// Resolves a URI against a base URI, conforming to RFC 3986, Section 5.
///
/// Assumes `arena` owns all memory in `base` and `ref`. `arena` will own all memory in the returned URI.
pub fn resolve(base: Uri, ref: Uri, strict: bool, arena: std.mem.Allocator) std.mem.Allocator.Error!Uri {
var target: Uri = Uri{
.scheme = "",
.user = null,
.password = null,
.host = null,
.port = null,
.path = "",
.query = null,
.fragment = null,
/// Copies `new` to the beginning of `aux_buf`, allowing the slices to overlap,
/// then parses `new` as a URI, and then resolves the path in place.
/// If a merge needs to take place, the newly constructed path will be stored
/// in `aux_buf` just after the copied `new`.
pub fn resolve_inplace(base: Uri, new: []const u8, aux_buf: []u8) ResolveInplaceError!Uri {
std.mem.copyBackwards(u8, aux_buf, new);
// At this point, new is an invalid pointer.
const new_mut = aux_buf[0..new.len];
const new_parsed, const has_scheme = p: {
break :p .{
parse(new_mut) catch |first_err| {
break :p .{
parseWithoutScheme(new_mut) catch return first_err,
false,
};
},
true,
};
};
if (ref.scheme.len > 0 and (strict or !std.mem.eql(u8, ref.scheme, base.scheme))) {
target.scheme = ref.scheme;
target.user = ref.user;
target.host = ref.host;
target.port = ref.port;
target.path = try removeDotSegments(arena, &.{ref.path});
target.query = ref.query;
} else {
target.scheme = base.scheme;
if (ref.host) |host| {
target.user = ref.user;
target.host = host;
target.port = ref.port;
target.path = ref.path;
target.path = try removeDotSegments(arena, &.{ref.path});
target.query = ref.query;
} else {
if (ref.path.len == 0) {
target.path = base.path;
target.query = ref.query orelse base.query;
} else {
if (ref.path[0] == '/') {
target.path = try removeDotSegments(arena, &.{ref.path});
} else {
target.path = try removeDotSegments(arena, &.{ std.fs.path.dirnamePosix(base.path) orelse "", ref.path });
}
target.query = ref.query;
}
// As you can see above, `new_mut` is not a const pointer.
const new_path: []u8 = @constCast(new_parsed.path);
target.user = base.user;
target.host = base.host;
target.port = base.port;
}
}
if (has_scheme) return .{
.scheme = new_parsed.scheme,
.user = new_parsed.user,
.host = new_parsed.host,
.port = new_parsed.port,
.path = remove_dot_segments(new_path),
.query = new_parsed.query,
.fragment = new_parsed.fragment,
};
target.fragment = ref.fragment;
if (new_parsed.host) |host| return .{
.scheme = base.scheme,
.user = new_parsed.user,
.host = host,
.port = new_parsed.port,
.path = remove_dot_segments(new_path),
.query = new_parsed.query,
.fragment = new_parsed.fragment,
};
return target;
const path, const query = b: {
if (new_path.len == 0)
break :b .{
base.path,
new_parsed.query orelse base.query,
};
if (new_path[0] == '/')
break :b .{
remove_dot_segments(new_path),
new_parsed.query,
};
break :b .{
try merge_paths(base.path, new_path, aux_buf[new_mut.len..]),
new_parsed.query,
};
};
return .{
.scheme = base.scheme,
.user = base.user,
.host = base.host,
.port = base.port,
.path = path,
.query = query,
.fragment = new_parsed.fragment,
};
}
test resolve {
const base = try parse("http://a/b/c/d;p?q");
/// In-place implementation of RFC 3986, Section 5.2.4.
fn remove_dot_segments(path: []u8) []u8 {
var in_i: usize = 0;
var out_i: usize = 0;
while (in_i < path.len) {
if (std.mem.startsWith(u8, path[in_i..], "./")) {
in_i += 2;
} else if (std.mem.startsWith(u8, path[in_i..], "../")) {
in_i += 3;
} else if (std.mem.startsWith(u8, path[in_i..], "/./")) {
in_i += 2;
} else if (std.mem.eql(u8, path[in_i..], "/.")) {
in_i += 1;
path[in_i] = '/';
} else if (std.mem.startsWith(u8, path[in_i..], "/../")) {
in_i += 3;
while (out_i > 0) {
out_i -= 1;
if (path[out_i] == '/') break;
}
} else if (std.mem.eql(u8, path[in_i..], "/..")) {
in_i += 2;
path[in_i] = '/';
while (out_i > 0) {
out_i -= 1;
if (path[out_i] == '/') break;
}
} else if (std.mem.eql(u8, path[in_i..], ".")) {
in_i += 1;
} else if (std.mem.eql(u8, path[in_i..], "..")) {
in_i += 2;
} else {
while (true) {
path[out_i] = path[in_i];
out_i += 1;
in_i += 1;
if (in_i >= path.len or path[in_i] == '/') break;
}
}
}
return path[0..out_i];
}
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
test remove_dot_segments {
{
var buffer = "/a/b/c/./../../g".*;
try std.testing.expectEqualStrings("/a/g", remove_dot_segments(&buffer));
}
}
try std.testing.expectEqualDeep(try parse("http://a/b/c/blog/"), try base.resolve(try parseWithoutScheme("blog/"), true, arena.allocator()));
try std.testing.expectEqualDeep(try parse("http://a/b/c/blog/?k"), try base.resolve(try parseWithoutScheme("blog/?k"), true, arena.allocator()));
try std.testing.expectEqualDeep(try parse("http://a/b/blog/"), try base.resolve(try parseWithoutScheme("../blog/"), true, arena.allocator()));
try std.testing.expectEqualDeep(try parse("http://a/b/blog"), try base.resolve(try parseWithoutScheme("../blog"), true, arena.allocator()));
try std.testing.expectEqualDeep(try parse("http://e"), try base.resolve(try parseWithoutScheme("//e"), true, arena.allocator()));
try std.testing.expectEqualDeep(try parse("https://a:1/"), try base.resolve(try parse("https://a:1/"), true, arena.allocator()));
/// 5.2.3. Merge Paths
fn merge_paths(base: []const u8, new: []u8, aux: []u8) error{OutOfMemory}![]u8 {
if (aux.len < base.len + 1 + new.len) return error.OutOfMemory;
if (base.len == 0) {
aux[0] = '/';
@memcpy(aux[1..][0..new.len], new);
return remove_dot_segments(aux[0 .. new.len + 1]);
}
const pos = std.mem.lastIndexOfScalar(u8, base, '/') orelse return remove_dot_segments(new);
@memcpy(aux[0 .. pos + 1], base[0 .. pos + 1]);
@memcpy(aux[pos + 1 ..][0..new.len], new);
return remove_dot_segments(aux[0 .. pos + 1 + new.len]);
}
const SliceReader = struct {

View File

@@ -937,14 +937,33 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
return .{ .context = .{ .self = self, .allocator = allocator } };
}
/// Same as `append` except it returns the number of bytes written, which is always the same
/// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API.
/// Same as `append` except it returns the number of bytes written,
/// which is always the same as `m.len`. The purpose of this function
/// existing is to match `std.io.Writer` API.
/// Invalidates element pointers if additional memory is needed.
fn appendWrite(context: WriterContext, m: []const u8) Allocator.Error!usize {
try context.self.appendSlice(context.allocator, m);
return m.len;
}
pub const FixedWriter = std.io.Writer(*Self, Allocator.Error, appendWriteFixed);
/// Initializes a Writer which will append to the list but will return
/// `error.OutOfMemory` rather than increasing capacity.
pub fn fixedWriter(self: *Self) FixedWriter {
return .{ .context = self };
}
/// The purpose of this function existing is to match `std.io.Writer` API.
fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
const available_capacity = self.capacity - self.items.len;
if (m.len > available_capacity)
return error.OutOfMemory;
self.appendSliceAssumeCapacity(m);
return m.len;
}
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.

View File

@@ -1,5 +1,4 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const RingBuffer = std.RingBuffer;
const types = @import("zstandard/types.zig");
@@ -8,32 +7,41 @@ pub const compressed_block = types.compressed_block;
pub const decompress = @import("zstandard/decompress.zig");
pub const DecompressStreamOptions = struct {
pub const DecompressorOptions = struct {
verify_checksum: bool = true,
window_size_max: usize = 1 << 23, // 8MiB default maximum window size
window_buffer: []u8,
/// Recommended amount by the standard. Lower than this may result
/// in inability to decompress common streams.
pub const default_window_buffer_len = 8 * 1024 * 1024;
};
pub fn DecompressStream(
comptime ReaderType: type,
comptime options: DecompressStreamOptions,
) type {
pub fn Decompressor(comptime ReaderType: type) type {
return struct {
const Self = @This();
allocator: Allocator,
const table_size_max = types.compressed_block.table_size_max;
source: std.io.CountingReader(ReaderType),
state: enum { NewFrame, InFrame, LastBlock },
decode_state: decompress.block.DecodeState,
frame_context: decompress.FrameContext,
buffer: RingBuffer,
literal_fse_buffer: []types.compressed_block.Table.Fse,
match_fse_buffer: []types.compressed_block.Table.Fse,
offset_fse_buffer: []types.compressed_block.Table.Fse,
literals_buffer: []u8,
sequence_buffer: []u8,
checksum: if (options.verify_checksum) ?u32 else void,
buffer: WindowBuffer,
literal_fse_buffer: [table_size_max.literal]types.compressed_block.Table.Fse,
match_fse_buffer: [table_size_max.match]types.compressed_block.Table.Fse,
offset_fse_buffer: [table_size_max.offset]types.compressed_block.Table.Fse,
literals_buffer: [types.block_size_max]u8,
sequence_buffer: [types.block_size_max]u8,
verify_checksum: bool,
checksum: ?u32,
current_frame_decompressed_size: usize,
const WindowBuffer = struct {
data: []u8 = undefined,
read_index: usize = 0,
write_index: usize = 0,
};
pub const Error = ReaderType.Error || error{
ChecksumFailure,
DictionaryIdFlagUnsupported,
@@ -44,19 +52,19 @@ pub fn DecompressStream(
pub const Reader = std.io.Reader(*Self, Error, read);
pub fn init(allocator: Allocator, source: ReaderType) Self {
return Self{
.allocator = allocator,
pub fn init(source: ReaderType, options: DecompressorOptions) Self {
return .{
.source = std.io.countingReader(source),
.state = .NewFrame,
.decode_state = undefined,
.frame_context = undefined,
.buffer = undefined,
.buffer = .{ .data = options.window_buffer },
.literal_fse_buffer = undefined,
.match_fse_buffer = undefined,
.offset_fse_buffer = undefined,
.literals_buffer = undefined,
.sequence_buffer = undefined,
.verify_checksum = options.verify_checksum,
.checksum = undefined,
.current_frame_decompressed_size = undefined,
};
@@ -72,53 +80,20 @@ pub fn DecompressStream(
.zstandard => |header| {
const frame_context = try decompress.FrameContext.init(
header,
options.window_size_max,
options.verify_checksum,
self.buffer.data.len,
self.verify_checksum,
);
const literal_fse_buffer = try self.allocator.alloc(
types.compressed_block.Table.Fse,
types.compressed_block.table_size_max.literal,
);
errdefer self.allocator.free(literal_fse_buffer);
const match_fse_buffer = try self.allocator.alloc(
types.compressed_block.Table.Fse,
types.compressed_block.table_size_max.match,
);
errdefer self.allocator.free(match_fse_buffer);
const offset_fse_buffer = try self.allocator.alloc(
types.compressed_block.Table.Fse,
types.compressed_block.table_size_max.offset,
);
errdefer self.allocator.free(offset_fse_buffer);
const decode_state = decompress.block.DecodeState.init(
literal_fse_buffer,
match_fse_buffer,
offset_fse_buffer,
&self.literal_fse_buffer,
&self.match_fse_buffer,
&self.offset_fse_buffer,
);
const buffer = try RingBuffer.init(self.allocator, frame_context.window_size);
const literals_data = try self.allocator.alloc(u8, options.window_size_max);
errdefer self.allocator.free(literals_data);
const sequence_data = try self.allocator.alloc(u8, options.window_size_max);
errdefer self.allocator.free(sequence_data);
self.literal_fse_buffer = literal_fse_buffer;
self.match_fse_buffer = match_fse_buffer;
self.offset_fse_buffer = offset_fse_buffer;
self.literals_buffer = literals_data;
self.sequence_buffer = sequence_data;
self.buffer = buffer;
self.decode_state = decode_state;
self.frame_context = frame_context;
self.checksum = if (options.verify_checksum) null else {};
self.checksum = null;
self.current_frame_decompressed_size = 0;
self.state = .InFrame;
@@ -126,16 +101,6 @@ pub fn DecompressStream(
}
}
pub fn deinit(self: *Self) void {
if (self.state == .NewFrame) return;
self.allocator.free(self.decode_state.literal_fse_buffer);
self.allocator.free(self.decode_state.match_fse_buffer);
self.allocator.free(self.decode_state.offset_fse_buffer);
self.allocator.free(self.literals_buffer);
self.allocator.free(self.sequence_buffer);
self.buffer.deinit(self.allocator);
}
pub fn reader(self: *Self) Reader {
return .{ .context = self };
}
@@ -153,7 +118,6 @@ pub fn DecompressStream(
0
else
error.MalformedFrame,
error.OutOfMemory => return error.OutOfMemory,
else => return error.MalformedFrame,
};
}
@@ -165,20 +129,30 @@ pub fn DecompressStream(
fn readInner(self: *Self, buffer: []u8) Error!usize {
std.debug.assert(self.state != .NewFrame);
var ring_buffer = RingBuffer{
.data = self.buffer.data,
.read_index = self.buffer.read_index,
.write_index = self.buffer.write_index,
};
defer {
self.buffer.read_index = ring_buffer.read_index;
self.buffer.write_index = ring_buffer.write_index;
}
const source_reader = self.source.reader();
while (self.buffer.isEmpty() and self.state != .LastBlock) {
while (ring_buffer.isEmpty() and self.state != .LastBlock) {
const header_bytes = source_reader.readBytesNoEof(3) catch
return error.MalformedFrame;
const block_header = decompress.block.decodeBlockHeader(&header_bytes);
decompress.block.decodeBlockReader(
&self.buffer,
&ring_buffer,
source_reader,
block_header,
&self.decode_state,
self.frame_context.block_size_max,
self.literals_buffer,
self.sequence_buffer,
&self.literals_buffer,
&self.sequence_buffer,
) catch
return error.MalformedBlock;
@@ -186,12 +160,12 @@ pub fn DecompressStream(
if (self.current_frame_decompressed_size > size) return error.MalformedFrame;
}
const size = self.buffer.len();
const size = ring_buffer.len();
self.current_frame_decompressed_size += size;
if (self.frame_context.hasher_opt) |*hasher| {
if (size > 0) {
const written_slice = self.buffer.sliceLast(size);
const written_slice = ring_buffer.sliceLast(size);
hasher.update(written_slice.first);
hasher.update(written_slice.second);
}
@@ -201,7 +175,7 @@ pub fn DecompressStream(
if (self.frame_context.has_checksum) {
const checksum = source_reader.readInt(u32, .little) catch
return error.MalformedFrame;
if (comptime options.verify_checksum) {
if (self.verify_checksum) {
if (self.frame_context.hasher_opt) |*hasher| {
if (checksum != decompress.computeChecksum(hasher))
return error.ChecksumFailure;
@@ -216,43 +190,28 @@ pub fn DecompressStream(
}
}
const size = @min(self.buffer.len(), buffer.len);
const size = @min(ring_buffer.len(), buffer.len);
if (size > 0) {
self.buffer.readFirstAssumeLength(buffer, size);
ring_buffer.readFirstAssumeLength(buffer, size);
}
if (self.state == .LastBlock and self.buffer.len() == 0) {
if (self.state == .LastBlock and ring_buffer.len() == 0) {
self.state = .NewFrame;
self.allocator.free(self.literal_fse_buffer);
self.allocator.free(self.match_fse_buffer);
self.allocator.free(self.offset_fse_buffer);
self.allocator.free(self.literals_buffer);
self.allocator.free(self.sequence_buffer);
self.buffer.deinit(self.allocator);
}
return size;
}
};
}
pub fn decompressStreamOptions(
allocator: Allocator,
reader: anytype,
comptime options: DecompressStreamOptions,
) DecompressStream(@TypeOf(reader, options)) {
return DecompressStream(@TypeOf(reader), options).init(allocator, reader);
}
pub fn decompressStream(
allocator: Allocator,
reader: anytype,
) DecompressStream(@TypeOf(reader), .{}) {
return DecompressStream(@TypeOf(reader), .{}).init(allocator, reader);
pub fn decompressor(reader: anytype, options: DecompressorOptions) Decompressor(@TypeOf(reader)) {
return Decompressor(@TypeOf(reader)).init(reader, options);
}
fn testDecompress(data: []const u8) ![]u8 {
const window_buffer = try std.testing.allocator.alloc(u8, 1 << 23);
defer std.testing.allocator.free(window_buffer);
var in_stream = std.io.fixedBufferStream(data);
var zstd_stream = decompressStream(std.testing.allocator, in_stream.reader());
defer zstd_stream.deinit();
var zstd_stream = decompressor(in_stream.reader(), .{ .window_buffer = window_buffer });
const result = zstd_stream.reader().readAllAlloc(std.testing.allocator, std.math.maxInt(usize));
return result;
}
@@ -278,38 +237,48 @@ test "zstandard decompression" {
const res19 = try decompress.decode(buffer, compressed19, true);
try std.testing.expectEqual(uncompressed.len, res19);
try std.testing.expectEqualSlices(u8, uncompressed, buffer);
}
test "zstandard streaming decompression" {
// default stack size for wasm32 is too low for Decompressor - slightly
// over 1MiB stack space is needed via the --stack CLI flag
if (@import("builtin").target.cpu.arch == .wasm32) return error.SkipZigTest;
const uncompressed = @embedFile("testdata/rfc8478.txt");
const compressed3 = @embedFile("testdata/rfc8478.txt.zst.3");
const compressed19 = @embedFile("testdata/rfc8478.txt.zst.19");
try testReader(compressed3, uncompressed);
try testReader(compressed19, uncompressed);
}
fn expectEqualDecoded(expected: []const u8, input: []const u8) !void {
const allocator = std.testing.allocator;
{
const result = try decompress.decodeAlloc(allocator, input, false, 1 << 23);
defer allocator.free(result);
const result = try decompress.decodeAlloc(std.testing.allocator, input, false, 1 << 23);
defer std.testing.allocator.free(result);
try std.testing.expectEqualStrings(expected, result);
}
{
var buffer = try allocator.alloc(u8, 2 * expected.len);
defer allocator.free(buffer);
var buffer = try std.testing.allocator.alloc(u8, 2 * expected.len);
defer std.testing.allocator.free(buffer);
const size = try decompress.decode(buffer, input, false);
try std.testing.expectEqualStrings(expected, buffer[0..size]);
}
}
{
var in_stream = std.io.fixedBufferStream(input);
var stream = decompressStream(allocator, in_stream.reader());
defer stream.deinit();
fn expectEqualDecodedStreaming(expected: []const u8, input: []const u8) !void {
const window_buffer = try std.testing.allocator.alloc(u8, 1 << 23);
defer std.testing.allocator.free(window_buffer);
const result = try stream.reader().readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(result);
var in_stream = std.io.fixedBufferStream(input);
var stream = decompressor(in_stream.reader(), .{ .window_buffer = window_buffer });
try std.testing.expectEqualStrings(expected, result);
}
const result = try stream.reader().readAllAlloc(std.testing.allocator, std.math.maxInt(usize));
defer std.testing.allocator.free(result);
try std.testing.expectEqualStrings(expected, result);
}
test "zero sized block" {
@@ -327,3 +296,23 @@ test "zero sized block" {
try expectEqualDecoded("", input_raw);
try expectEqualDecoded("", input_rle);
}
test "zero sized block streaming" {
// default stack size for wasm32 is too low for Decompressor - slightly
// over 1MiB stack space is needed via the --stack CLI flag
if (@import("builtin").target.cpu.arch == .wasm32) return error.SkipZigTest;
const input_raw =
"\x28\xb5\x2f\xfd" ++ // zstandard frame magic number
"\x20\x00" ++ // frame header: only single_segment_flag set, frame_content_size zero
"\x01\x00\x00"; // block header with: last_block set, block_type raw, block_size zero
const input_rle =
"\x28\xb5\x2f\xfd" ++ // zstandard frame magic number
"\x20\x00" ++ // frame header: only single_segment_flag set, frame_content_size zero
"\x03\x00\x00" ++ // block header with: last_block set, block_type rle, block_size zero
"\xaa"; // block_content
try expectEqualDecodedStreaming("", input_raw);
try expectEqualDecodedStreaming("", input_rle);
}

View File

@@ -409,7 +409,7 @@ pub const FrameContext = struct {
.hasher_opt = if (should_compute_checksum) std.hash.XxHash64.init(0) else null,
.window_size = window_size,
.has_checksum = frame_header.descriptor.content_checksum_flag,
.block_size_max = @min(1 << 17, window_size),
.block_size_max = @min(types.block_size_max, window_size),
.content_size = content_size,
};
}

View File

@@ -1,3 +1,5 @@
pub const block_size_max = 1 << 17;
pub const frame = struct {
pub const Kind = enum { zstandard, skippable };
@@ -391,7 +393,7 @@ pub const compressed_block = struct {
pub const table_size_max = struct {
pub const literal = 1 << table_accuracy_log_max.literal;
pub const match = 1 << table_accuracy_log_max.match;
pub const offset = 1 << table_accuracy_log_max.match;
pub const offset = 1 << table_accuracy_log_max.offset;
};
};

View File

@@ -1,12 +1,9 @@
const std = @import("std.zig");
pub const Client = @import("http/Client.zig");
pub const Server = @import("http/Server.zig");
pub const protocol = @import("http/protocol.zig");
const headers = @import("http/Headers.zig");
pub const Headers = headers.Headers;
pub const Field = headers.Field;
pub const HeadParser = @import("http/HeadParser.zig");
pub const ChunkParser = @import("http/ChunkParser.zig");
pub const HeaderIterator = @import("http/HeaderIterator.zig");
pub const Version = enum {
@"HTTP/1.0",
@@ -18,7 +15,7 @@ pub const Version = enum {
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4 Initial definition
///
/// https://datatracker.ietf.org/doc/html/rfc5789#section-2 PATCH
pub const Method = enum(u64) { // TODO: should be u192 or u256, but neither is supported by the C backend, and therefore cannot pass CI
pub const Method = enum(u64) {
GET = parse("GET"),
HEAD = parse("HEAD"),
POST = parse("POST"),
@@ -46,10 +43,6 @@ pub const Method = enum(u64) { // TODO: should be u192 or u256, but neither is s
try w.writeAll(str);
}
pub fn format(value: Method, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) @TypeOf(writer).Error!void {
return try value.write(writer);
}
/// Returns true if a request of this method is allowed to have a body
/// Actual behavior from servers may vary and should still be checked
pub fn requestHasBody(self: Method) bool {
@@ -309,9 +302,22 @@ pub const Connection = enum {
close,
};
pub const Header = struct {
name: []const u8,
value: []const u8,
};
const builtin = @import("builtin");
const std = @import("std.zig");
test {
_ = Client;
_ = Method;
_ = Server;
_ = Status;
_ = HeadParser;
_ = ChunkParser;
if (builtin.os.tag != .wasi) {
_ = @import("http/test.zig");
}
}

View File

@@ -0,0 +1,131 @@
//! Parser for transfer-encoding: chunked.
state: State,
chunk_len: u64,
pub const init: ChunkParser = .{
.state = .head_size,
.chunk_len = 0,
};
pub const State = enum {
head_size,
head_ext,
head_r,
data,
data_suffix,
data_suffix_r,
invalid,
};
/// Returns the number of bytes consumed by the chunk size. This is always
/// less than or equal to `bytes.len`.
///
/// After this function returns, `chunk_len` will contain the parsed chunk size
/// in bytes when `state` is `data`. Alternately, `state` may become `invalid`,
/// indicating a syntax error in the input stream.
///
/// If the amount returned is less than `bytes.len`, the parser is in the
/// `chunk_data` state and the first byte of the chunk is at `bytes[result]`.
///
/// Asserts `state` is neither `data` nor `invalid`.
pub fn feed(p: *ChunkParser, bytes: []const u8) usize {
for (bytes, 0..) |c, i| switch (p.state) {
.data_suffix => switch (c) {
'\r' => p.state = .data_suffix_r,
'\n' => p.state = .head_size,
else => {
p.state = .invalid;
return i;
},
},
.data_suffix_r => switch (c) {
'\n' => p.state = .head_size,
else => {
p.state = .invalid;
return i;
},
},
.head_size => {
const digit = switch (c) {
'0'...'9' => |b| b - '0',
'A'...'Z' => |b| b - 'A' + 10,
'a'...'z' => |b| b - 'a' + 10,
'\r' => {
p.state = .head_r;
continue;
},
'\n' => {
p.state = .data;
return i + 1;
},
else => {
p.state = .head_ext;
continue;
},
};
const new_len = p.chunk_len *% 16 +% digit;
if (new_len <= p.chunk_len and p.chunk_len != 0) {
p.state = .invalid;
return i;
}
p.chunk_len = new_len;
},
.head_ext => switch (c) {
'\r' => p.state = .head_r,
'\n' => {
p.state = .data;
return i + 1;
},
else => continue,
},
.head_r => switch (c) {
'\n' => {
p.state = .data;
return i + 1;
},
else => {
p.state = .invalid;
return i;
},
},
.data => unreachable,
.invalid => unreachable,
};
return bytes.len;
}
const ChunkParser = @This();
const std = @import("std");
test feed {
const testing = std.testing;
const data = "Ff\r\nf0f000 ; ext\n0\r\nffffffffffffffffffffffffffffffffffffffff\r\n";
var p = init;
const first = p.feed(data[0..]);
try testing.expectEqual(@as(u32, 4), first);
try testing.expectEqual(@as(u64, 0xff), p.chunk_len);
try testing.expectEqual(.data, p.state);
p = init;
const second = p.feed(data[first..]);
try testing.expectEqual(@as(u32, 13), second);
try testing.expectEqual(@as(u64, 0xf0f000), p.chunk_len);
try testing.expectEqual(.data, p.state);
p = init;
const third = p.feed(data[first + second ..]);
try testing.expectEqual(@as(u32, 3), third);
try testing.expectEqual(@as(u64, 0), p.chunk_len);
try testing.expectEqual(.data, p.state);
p = init;
const fourth = p.feed(data[first + second + third ..]);
try testing.expectEqual(@as(u32, 16), fourth);
try testing.expectEqual(@as(u64, 0xffffffffffffffff), p.chunk_len);
try testing.expectEqual(.invalid, p.state);
}

File diff suppressed because it is too large Load Diff

371
lib/std/http/HeadParser.zig Normal file
View File

@@ -0,0 +1,371 @@
//! Finds the end of an HTTP head in a stream.
state: State = .start,
pub const State = enum {
start,
seen_n,
seen_r,
seen_rn,
seen_rnr,
finished,
};
/// Returns the number of bytes consumed by headers. This is always less
/// than or equal to `bytes.len`.
///
/// If the amount returned is less than `bytes.len`, the parser is in a
/// content state and the first byte of content is located at
/// `bytes[result]`.
pub fn feed(p: *HeadParser, bytes: []const u8) usize {
const vector_len: comptime_int = @max(std.simd.suggestVectorLength(u8) orelse 1, 8);
var index: usize = 0;
while (true) {
switch (p.state) {
.finished => return index,
.start => switch (bytes.len - index) {
0 => return index,
1 => {
switch (bytes[index]) {
'\r' => p.state = .seen_r,
'\n' => p.state = .seen_n,
else => {},
}
return index + 1;
},
2 => {
const b16 = int16(bytes[index..][0..2]);
const b8 = intShift(u8, b16);
switch (b8) {
'\r' => p.state = .seen_r,
'\n' => p.state = .seen_n,
else => {},
}
switch (b16) {
int16("\r\n") => p.state = .seen_rn,
int16("\n\n") => p.state = .finished,
else => {},
}
return index + 2;
},
3 => {
const b24 = int24(bytes[index..][0..3]);
const b16 = intShift(u16, b24);
const b8 = intShift(u8, b24);
switch (b8) {
'\r' => p.state = .seen_r,
'\n' => p.state = .seen_n,
else => {},
}
switch (b16) {
int16("\r\n") => p.state = .seen_rn,
int16("\n\n") => p.state = .finished,
else => {},
}
switch (b24) {
int24("\r\n\r") => p.state = .seen_rnr,
else => {},
}
return index + 3;
},
4...vector_len - 1 => {
const b32 = int32(bytes[index..][0..4]);
const b24 = intShift(u24, b32);
const b16 = intShift(u16, b32);
const b8 = intShift(u8, b32);
switch (b8) {
'\r' => p.state = .seen_r,
'\n' => p.state = .seen_n,
else => {},
}
switch (b16) {
int16("\r\n") => p.state = .seen_rn,
int16("\n\n") => p.state = .finished,
else => {},
}
switch (b24) {
int24("\r\n\r") => p.state = .seen_rnr,
else => {},
}
switch (b32) {
int32("\r\n\r\n") => p.state = .finished,
else => {},
}
index += 4;
continue;
},
else => {
const chunk = bytes[index..][0..vector_len];
const matches = if (use_vectors) matches: {
const Vector = @Vector(vector_len, u8);
// const BoolVector = @Vector(vector_len, bool);
const BitVector = @Vector(vector_len, u1);
const SizeVector = @Vector(vector_len, u8);
const v: Vector = chunk.*;
const matches_r: BitVector = @bitCast(v == @as(Vector, @splat('\r')));
const matches_n: BitVector = @bitCast(v == @as(Vector, @splat('\n')));
const matches_or: SizeVector = matches_r | matches_n;
break :matches @reduce(.Add, matches_or);
} else matches: {
var matches: u8 = 0;
for (chunk) |byte| switch (byte) {
'\r', '\n' => matches += 1,
else => {},
};
break :matches matches;
};
switch (matches) {
0 => {},
1 => switch (chunk[vector_len - 1]) {
'\r' => p.state = .seen_r,
'\n' => p.state = .seen_n,
else => {},
},
2 => {
const b16 = int16(chunk[vector_len - 2 ..][0..2]);
const b8 = intShift(u8, b16);
switch (b8) {
'\r' => p.state = .seen_r,
'\n' => p.state = .seen_n,
else => {},
}
switch (b16) {
int16("\r\n") => p.state = .seen_rn,
int16("\n\n") => p.state = .finished,
else => {},
}
},
3 => {
const b24 = int24(chunk[vector_len - 3 ..][0..3]);
const b16 = intShift(u16, b24);
const b8 = intShift(u8, b24);
switch (b8) {
'\r' => p.state = .seen_r,
'\n' => p.state = .seen_n,
else => {},
}
switch (b16) {
int16("\r\n") => p.state = .seen_rn,
int16("\n\n") => p.state = .finished,
else => {},
}
switch (b24) {
int24("\r\n\r") => p.state = .seen_rnr,
else => {},
}
},
4...vector_len => {
inline for (0..vector_len - 3) |i_usize| {
const i = @as(u32, @truncate(i_usize));
const b32 = int32(chunk[i..][0..4]);
const b16 = intShift(u16, b32);
if (b32 == int32("\r\n\r\n")) {
p.state = .finished;
return index + i + 4;
} else if (b16 == int16("\n\n")) {
p.state = .finished;
return index + i + 2;
}
}
const b24 = int24(chunk[vector_len - 3 ..][0..3]);
const b16 = intShift(u16, b24);
const b8 = intShift(u8, b24);
switch (b8) {
'\r' => p.state = .seen_r,
'\n' => p.state = .seen_n,
else => {},
}
switch (b16) {
int16("\r\n") => p.state = .seen_rn,
int16("\n\n") => p.state = .finished,
else => {},
}
switch (b24) {
int24("\r\n\r") => p.state = .seen_rnr,
else => {},
}
},
else => unreachable,
}
index += vector_len;
continue;
},
},
.seen_n => switch (bytes.len - index) {
0 => return index,
else => {
switch (bytes[index]) {
'\n' => p.state = .finished,
else => p.state = .start,
}
index += 1;
continue;
},
},
.seen_r => switch (bytes.len - index) {
0 => return index,
1 => {
switch (bytes[index]) {
'\n' => p.state = .seen_rn,
'\r' => p.state = .seen_r,
else => p.state = .start,
}
return index + 1;
},
2 => {
const b16 = int16(bytes[index..][0..2]);
const b8 = intShift(u8, b16);
switch (b8) {
'\r' => p.state = .seen_r,
'\n' => p.state = .seen_rn,
else => p.state = .start,
}
switch (b16) {
int16("\r\n") => p.state = .seen_rn,
int16("\n\r") => p.state = .seen_rnr,
int16("\n\n") => p.state = .finished,
else => {},
}
return index + 2;
},
else => {
const b24 = int24(bytes[index..][0..3]);
const b16 = intShift(u16, b24);
const b8 = intShift(u8, b24);
switch (b8) {
'\r' => p.state = .seen_r,
'\n' => p.state = .seen_n,
else => p.state = .start,
}
switch (b16) {
int16("\r\n") => p.state = .seen_rn,
int16("\n\n") => p.state = .finished,
else => {},
}
switch (b24) {
int24("\n\r\n") => p.state = .finished,
else => {},
}
index += 3;
continue;
},
},
.seen_rn => switch (bytes.len - index) {
0 => return index,
1 => {
switch (bytes[index]) {
'\r' => p.state = .seen_rnr,
'\n' => p.state = .seen_n,
else => p.state = .start,
}
return index + 1;
},
else => {
const b16 = int16(bytes[index..][0..2]);
const b8 = intShift(u8, b16);
switch (b8) {
'\r' => p.state = .seen_rnr,
'\n' => p.state = .seen_n,
else => p.state = .start,
}
switch (b16) {
int16("\r\n") => p.state = .finished,
int16("\n\n") => p.state = .finished,
else => {},
}
index += 2;
continue;
},
},
.seen_rnr => switch (bytes.len - index) {
0 => return index,
else => {
switch (bytes[index]) {
'\n' => p.state = .finished,
else => p.state = .start,
}
index += 1;
continue;
},
},
}
return index;
}
}
inline fn int16(array: *const [2]u8) u16 {
return @bitCast(array.*);
}
inline fn int24(array: *const [3]u8) u24 {
return @bitCast(array.*);
}
inline fn int32(array: *const [4]u8) u32 {
return @bitCast(array.*);
}
inline fn intShift(comptime T: type, x: anytype) T {
switch (@import("builtin").cpu.arch.endian()) {
.little => return @truncate(x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T))),
.big => return @truncate(x),
}
}
const HeadParser = @This();
const std = @import("std");
const use_vectors = builtin.zig_backend != .stage2_x86_64;
const builtin = @import("builtin");
test feed {
const data = "GET / HTTP/1.1\r\nHost: localhost\r\n\r\nHello";
for (0..36) |i| {
var p: HeadParser = .{};
try std.testing.expectEqual(i, p.feed(data[0..i]));
try std.testing.expectEqual(35 - i, p.feed(data[i..]));
}
}

View File

@@ -0,0 +1,62 @@
bytes: []const u8,
index: usize,
is_trailer: bool,
pub fn init(bytes: []const u8) HeaderIterator {
return .{
.bytes = bytes,
.index = std.mem.indexOfPosLinear(u8, bytes, 0, "\r\n").? + 2,
.is_trailer = false,
};
}
pub fn next(it: *HeaderIterator) ?std.http.Header {
const end = std.mem.indexOfPosLinear(u8, it.bytes, it.index, "\r\n").?;
var kv_it = std.mem.splitSequence(u8, it.bytes[it.index..end], ": ");
const name = kv_it.next().?;
const value = kv_it.rest();
if (value.len == 0) {
if (it.is_trailer) return null;
const next_end = std.mem.indexOfPosLinear(u8, it.bytes, end + 2, "\r\n") orelse
return null;
it.is_trailer = true;
it.index = next_end + 2;
kv_it = std.mem.splitSequence(u8, it.bytes[end + 2 .. next_end], ": ");
return .{
.name = kv_it.next().?,
.value = kv_it.rest(),
};
}
it.index = end + 2;
return .{
.name = name,
.value = value,
};
}
test next {
var it = HeaderIterator.init("200 OK\r\na: b\r\nc: d\r\n\r\ne: f\r\n\r\n");
try std.testing.expect(!it.is_trailer);
{
const header = it.next().?;
try std.testing.expect(!it.is_trailer);
try std.testing.expectEqualStrings("a", header.name);
try std.testing.expectEqualStrings("b", header.value);
}
{
const header = it.next().?;
try std.testing.expect(!it.is_trailer);
try std.testing.expectEqualStrings("c", header.name);
try std.testing.expectEqualStrings("d", header.value);
}
{
const header = it.next().?;
try std.testing.expect(it.is_trailer);
try std.testing.expectEqualStrings("e", header.name);
try std.testing.expectEqualStrings("f", header.value);
}
try std.testing.expectEqual(null, it.next());
}
const HeaderIterator = @This();
const std = @import("../std.zig");

View File

@@ -1,527 +0,0 @@
const std = @import("../std.zig");
const Allocator = std.mem.Allocator;
const testing = std.testing;
const ascii = std.ascii;
const assert = std.debug.assert;
pub const HeaderList = std.ArrayListUnmanaged(Field);
pub const HeaderIndexList = std.ArrayListUnmanaged(usize);
pub const HeaderIndex = std.HashMapUnmanaged([]const u8, HeaderIndexList, CaseInsensitiveStringContext, std.hash_map.default_max_load_percentage);
pub const CaseInsensitiveStringContext = struct {
pub fn hash(self: @This(), s: []const u8) u64 {
_ = self;
var buf: [64]u8 = undefined;
var i: usize = 0;
var h = std.hash.Wyhash.init(0);
while (i + 64 < s.len) : (i += 64) {
const ret = ascii.lowerString(buf[0..], s[i..][0..64]);
h.update(ret);
}
const left = @min(64, s.len - i);
const ret = ascii.lowerString(buf[0..], s[i..][0..left]);
h.update(ret);
return h.final();
}
pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
_ = self;
return ascii.eqlIgnoreCase(a, b);
}
};
/// A single HTTP header field.
pub const Field = struct {
name: []const u8,
value: []const u8,
fn lessThan(ctx: void, a: Field, b: Field) bool {
_ = ctx;
if (a.name.ptr == b.name.ptr) return false;
return ascii.lessThanIgnoreCase(a.name, b.name);
}
};
/// A list of HTTP header fields.
pub const Headers = struct {
allocator: Allocator,
list: HeaderList = .{},
index: HeaderIndex = .{},
/// When this is false, names and values will not be duplicated.
/// Use with caution.
owned: bool = true,
/// Initialize an empty list of headers.
pub fn init(allocator: Allocator) Headers {
return .{ .allocator = allocator };
}
/// Initialize a pre-populated list of headers from a list of fields.
pub fn initList(allocator: Allocator, list: []const Field) !Headers {
var new = Headers.init(allocator);
try new.list.ensureTotalCapacity(allocator, list.len);
try new.index.ensureTotalCapacity(allocator, @intCast(list.len));
for (list) |field| {
try new.append(field.name, field.value);
}
return new;
}
/// Deallocate all memory associated with the headers.
///
/// If the `owned` field is false, this will not free the names and values of the headers.
pub fn deinit(headers: *Headers) void {
headers.deallocateIndexListsAndFields();
headers.index.deinit(headers.allocator);
headers.list.deinit(headers.allocator);
headers.* = undefined;
}
/// Appends a header to the list.
///
/// If the `owned` field is true, both name and value will be copied.
pub fn append(headers: *Headers, name: []const u8, value: []const u8) !void {
try headers.appendOwned(.{ .unowned = name }, .{ .unowned = value });
}
pub const OwnedString = union(enum) {
/// A string allocated by the `allocator` field.
owned: []u8,
/// A string to be copied by the `allocator` field.
unowned: []const u8,
};
/// Appends a header to the list.
///
/// If the `owned` field is true, `name` and `value` will be copied if unowned.
pub fn appendOwned(headers: *Headers, name: OwnedString, value: OwnedString) !void {
const n = headers.list.items.len;
try headers.list.ensureUnusedCapacity(headers.allocator, 1);
const owned_value = switch (value) {
.owned => |owned| owned,
.unowned => |unowned| if (headers.owned)
try headers.allocator.dupe(u8, unowned)
else
unowned,
};
errdefer if (value == .unowned and headers.owned) headers.allocator.free(owned_value);
var entry = Field{ .name = undefined, .value = owned_value };
if (headers.index.getEntry(switch (name) {
inline else => |string| string,
})) |kv| {
defer switch (name) {
.owned => |owned| headers.allocator.free(owned),
.unowned => {},
};
entry.name = kv.key_ptr.*;
try kv.value_ptr.append(headers.allocator, n);
} else {
const owned_name = switch (name) {
.owned => |owned| owned,
.unowned => |unowned| if (headers.owned)
try std.ascii.allocLowerString(headers.allocator, unowned)
else
unowned,
};
errdefer if (name == .unowned and headers.owned) headers.allocator.free(owned_name);
entry.name = owned_name;
var new_index = try HeaderIndexList.initCapacity(headers.allocator, 1);
errdefer new_index.deinit(headers.allocator);
new_index.appendAssumeCapacity(n);
try headers.index.put(headers.allocator, owned_name, new_index);
}
headers.list.appendAssumeCapacity(entry);
}
/// Returns true if this list of headers contains the given name.
pub fn contains(headers: Headers, name: []const u8) bool {
return headers.index.contains(name);
}
/// Removes all headers with the given name.
pub fn delete(headers: *Headers, name: []const u8) bool {
if (headers.index.fetchRemove(name)) |kv| {
var index = kv.value;
// iterate backwards
var i = index.items.len;
while (i > 0) {
i -= 1;
const data_index = index.items[i];
const removed = headers.list.orderedRemove(data_index);
assert(ascii.eqlIgnoreCase(removed.name, name)); // ensure the index hasn't been corrupted
if (headers.owned) headers.allocator.free(removed.value);
}
if (headers.owned) headers.allocator.free(kv.key);
index.deinit(headers.allocator);
headers.rebuildIndex();
return true;
} else {
return false;
}
}
/// Returns the index of the first occurrence of a header with the given name.
pub fn firstIndexOf(headers: Headers, name: []const u8) ?usize {
const index = headers.index.get(name) orelse return null;
return index.items[0];
}
/// Returns a list of indices containing headers with the given name.
pub fn getIndices(headers: Headers, name: []const u8) ?[]const usize {
const index = headers.index.get(name) orelse return null;
return index.items;
}
/// Returns the entry of the first occurrence of a header with the given name.
pub fn getFirstEntry(headers: Headers, name: []const u8) ?Field {
const first_index = headers.firstIndexOf(name) orelse return null;
return headers.list.items[first_index];
}
/// Returns a slice containing each header with the given name.
/// The caller owns the returned slice, but NOT the values in the slice.
pub fn getEntries(headers: Headers, allocator: Allocator, name: []const u8) !?[]const Field {
const indices = headers.getIndices(name) orelse return null;
const buf = try allocator.alloc(Field, indices.len);
for (indices, 0..) |idx, n| {
buf[n] = headers.list.items[idx];
}
return buf;
}
/// Returns the value in the entry of the first occurrence of a header with the given name.
pub fn getFirstValue(headers: Headers, name: []const u8) ?[]const u8 {
const first_index = headers.firstIndexOf(name) orelse return null;
return headers.list.items[first_index].value;
}
/// Returns a slice containing the value of each header with the given name.
/// The caller owns the returned slice, but NOT the values in the slice.
pub fn getValues(headers: Headers, allocator: Allocator, name: []const u8) !?[]const []const u8 {
const indices = headers.getIndices(name) orelse return null;
const buf = try allocator.alloc([]const u8, indices.len);
for (indices, 0..) |idx, n| {
buf[n] = headers.list.items[idx].value;
}
return buf;
}
fn rebuildIndex(headers: *Headers) void {
// clear out the indexes
var it = headers.index.iterator();
while (it.next()) |entry| {
entry.value_ptr.shrinkRetainingCapacity(0);
}
// fill up indexes again; we know capacity is fine from before
for (headers.list.items, 0..) |entry, i| {
headers.index.getEntry(entry.name).?.value_ptr.appendAssumeCapacity(i);
}
}
/// Sorts the headers in lexicographical order.
pub fn sort(headers: *Headers) void {
std.mem.sort(Field, headers.list.items, {}, Field.lessThan);
headers.rebuildIndex();
}
/// Writes the headers to the given stream.
pub fn format(
headers: Headers,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
out_stream: anytype,
) !void {
_ = fmt;
_ = options;
for (headers.list.items) |entry| {
if (entry.value.len == 0) continue;
try out_stream.writeAll(entry.name);
try out_stream.writeAll(": ");
try out_stream.writeAll(entry.value);
try out_stream.writeAll("\r\n");
}
}
/// Writes all of the headers with the given name to the given stream, separated by commas.
///
/// This is useful for headers like `Set-Cookie` which can have multiple values. RFC 9110, Section 5.2
pub fn formatCommaSeparated(
headers: Headers,
name: []const u8,
out_stream: anytype,
) !void {
const indices = headers.getIndices(name) orelse return;
try out_stream.writeAll(name);
try out_stream.writeAll(": ");
for (indices, 0..) |idx, n| {
if (n != 0) try out_stream.writeAll(", ");
try out_stream.writeAll(headers.list.items[idx].value);
}
try out_stream.writeAll("\r\n");
}
/// Frees all `HeaderIndexList`s within `index`.
/// Frees names and values of all fields if they are owned.
fn deallocateIndexListsAndFields(headers: *Headers) void {
var it = headers.index.iterator();
while (it.next()) |entry| {
entry.value_ptr.deinit(headers.allocator);
if (headers.owned) headers.allocator.free(entry.key_ptr.*);
}
if (headers.owned) {
for (headers.list.items) |entry| {
headers.allocator.free(entry.value);
}
}
}
/// Clears and frees the underlying data structures.
/// Frees names and values if they are owned.
pub fn clearAndFree(headers: *Headers) void {
headers.deallocateIndexListsAndFields();
headers.index.clearAndFree(headers.allocator);
headers.list.clearAndFree(headers.allocator);
}
/// Clears the underlying data structures while retaining their capacities.
/// Frees names and values if they are owned.
pub fn clearRetainingCapacity(headers: *Headers) void {
headers.deallocateIndexListsAndFields();
headers.index.clearRetainingCapacity();
headers.list.clearRetainingCapacity();
}
/// Creates a copy of the headers using the provided allocator.
pub fn clone(headers: Headers, allocator: Allocator) !Headers {
var new = Headers.init(allocator);
try new.list.ensureTotalCapacity(allocator, headers.list.capacity);
try new.index.ensureTotalCapacity(allocator, headers.index.capacity());
for (headers.list.items) |field| {
try new.append(field.name, field.value);
}
return new;
}
};
test "Headers.append" {
var h = Headers{ .allocator = std.testing.allocator };
defer h.deinit();
try h.append("foo", "bar");
try h.append("hello", "world");
try testing.expect(h.contains("Foo"));
try testing.expect(!h.contains("Bar"));
}
test "Headers.delete" {
var h = Headers{ .allocator = std.testing.allocator };
defer h.deinit();
try h.append("foo", "bar");
try h.append("hello", "world");
try testing.expect(h.contains("Foo"));
_ = h.delete("Foo");
try testing.expect(!h.contains("foo"));
}
test "Headers consistency" {
var h = Headers{ .allocator = std.testing.allocator };
defer h.deinit();
try h.append("foo", "bar");
try h.append("hello", "world");
_ = h.delete("Foo");
try h.append("foo", "bar");
try h.append("bar", "world");
try h.append("foo", "baz");
try h.append("baz", "hello");
try testing.expectEqual(@as(?usize, 0), h.firstIndexOf("hello"));
try testing.expectEqual(@as(?usize, 1), h.firstIndexOf("foo"));
try testing.expectEqual(@as(?usize, 2), h.firstIndexOf("bar"));
try testing.expectEqual(@as(?usize, 4), h.firstIndexOf("baz"));
try testing.expectEqual(@as(?usize, null), h.firstIndexOf("pog"));
try testing.expectEqualSlices(usize, &[_]usize{0}, h.getIndices("hello").?);
try testing.expectEqualSlices(usize, &[_]usize{ 1, 3 }, h.getIndices("foo").?);
try testing.expectEqualSlices(usize, &[_]usize{2}, h.getIndices("bar").?);
try testing.expectEqualSlices(usize, &[_]usize{4}, h.getIndices("baz").?);
try testing.expectEqual(@as(?[]const usize, null), h.getIndices("pog"));
try testing.expectEqualStrings("world", h.getFirstEntry("hello").?.value);
try testing.expectEqualStrings("bar", h.getFirstEntry("foo").?.value);
try testing.expectEqualStrings("world", h.getFirstEntry("bar").?.value);
try testing.expectEqualStrings("hello", h.getFirstEntry("baz").?.value);
const hello_entries = (try h.getEntries(testing.allocator, "hello")).?;
defer testing.allocator.free(hello_entries);
try testing.expectEqualDeep(@as([]const Field, &[_]Field{
.{ .name = "hello", .value = "world" },
}), hello_entries);
const foo_entries = (try h.getEntries(testing.allocator, "foo")).?;
defer testing.allocator.free(foo_entries);
try testing.expectEqualDeep(@as([]const Field, &[_]Field{
.{ .name = "foo", .value = "bar" },
.{ .name = "foo", .value = "baz" },
}), foo_entries);
const bar_entries = (try h.getEntries(testing.allocator, "bar")).?;
defer testing.allocator.free(bar_entries);
try testing.expectEqualDeep(@as([]const Field, &[_]Field{
.{ .name = "bar", .value = "world" },
}), bar_entries);
const baz_entries = (try h.getEntries(testing.allocator, "baz")).?;
defer testing.allocator.free(baz_entries);
try testing.expectEqualDeep(@as([]const Field, &[_]Field{
.{ .name = "baz", .value = "hello" },
}), baz_entries);
const pog_entries = (try h.getEntries(testing.allocator, "pog"));
try testing.expectEqual(@as(?[]const Field, null), pog_entries);
try testing.expectEqualStrings("world", h.getFirstValue("hello").?);
try testing.expectEqualStrings("bar", h.getFirstValue("foo").?);
try testing.expectEqualStrings("world", h.getFirstValue("bar").?);
try testing.expectEqualStrings("hello", h.getFirstValue("baz").?);
try testing.expectEqual(@as(?[]const u8, null), h.getFirstValue("pog"));
const hello_values = (try h.getValues(testing.allocator, "hello")).?;
defer testing.allocator.free(hello_values);
try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{"world"}), hello_values);
const foo_values = (try h.getValues(testing.allocator, "foo")).?;
defer testing.allocator.free(foo_values);
try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{ "bar", "baz" }), foo_values);
const bar_values = (try h.getValues(testing.allocator, "bar")).?;
defer testing.allocator.free(bar_values);
try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{"world"}), bar_values);
const baz_values = (try h.getValues(testing.allocator, "baz")).?;
defer testing.allocator.free(baz_values);
try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{"hello"}), baz_values);
const pog_values = (try h.getValues(testing.allocator, "pog"));
try testing.expectEqual(@as(?[]const []const u8, null), pog_values);
h.sort();
try testing.expectEqualSlices(usize, &[_]usize{0}, h.getIndices("bar").?);
try testing.expectEqualSlices(usize, &[_]usize{1}, h.getIndices("baz").?);
try testing.expectEqualSlices(usize, &[_]usize{ 2, 3 }, h.getIndices("foo").?);
try testing.expectEqualSlices(usize, &[_]usize{4}, h.getIndices("hello").?);
const formatted_values = try std.fmt.allocPrint(testing.allocator, "{}", .{h});
defer testing.allocator.free(formatted_values);
try testing.expectEqualStrings("bar: world\r\nbaz: hello\r\nfoo: bar\r\nfoo: baz\r\nhello: world\r\n", formatted_values);
var buf: [128]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf);
const writer = fbs.writer();
try h.formatCommaSeparated("foo", writer);
try testing.expectEqualStrings("foo: bar, baz\r\n", fbs.getWritten());
}
test "Headers.clearRetainingCapacity and clearAndFree" {
var h = Headers.init(std.testing.allocator);
defer h.deinit();
h.clearRetainingCapacity();
try h.append("foo", "bar");
try h.append("bar", "world");
try h.append("foo", "baz");
try h.append("baz", "hello");
try testing.expectEqual(@as(usize, 4), h.list.items.len);
try testing.expectEqual(@as(usize, 3), h.index.count());
const list_capacity = h.list.capacity;
const index_capacity = h.index.capacity();
h.clearRetainingCapacity();
try testing.expectEqual(@as(usize, 0), h.list.items.len);
try testing.expectEqual(@as(usize, 0), h.index.count());
try testing.expectEqual(list_capacity, h.list.capacity);
try testing.expectEqual(index_capacity, h.index.capacity());
try h.append("foo", "bar");
try h.append("bar", "world");
try h.append("foo", "baz");
try h.append("baz", "hello");
try testing.expectEqual(@as(usize, 4), h.list.items.len);
try testing.expectEqual(@as(usize, 3), h.index.count());
// Capacity should still be the same since we shouldn't have needed to grow
// when adding back the same fields
try testing.expectEqual(list_capacity, h.list.capacity);
try testing.expectEqual(index_capacity, h.index.capacity());
h.clearAndFree();
try testing.expectEqual(@as(usize, 0), h.list.items.len);
try testing.expectEqual(@as(usize, 0), h.index.count());
try testing.expectEqual(@as(usize, 0), h.list.capacity);
try testing.expectEqual(@as(usize, 0), h.index.capacity());
}
test "Headers.initList" {
var h = try Headers.initList(std.testing.allocator, &.{
.{ .name = "Accept-Encoding", .value = "gzip" },
.{ .name = "Authorization", .value = "it's over 9000!" },
});
defer h.deinit();
const encoding_values = (try h.getValues(testing.allocator, "Accept-Encoding")).?;
defer testing.allocator.free(encoding_values);
try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{"gzip"}), encoding_values);
const authorization_values = (try h.getValues(testing.allocator, "Authorization")).?;
defer testing.allocator.free(authorization_values);
try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{"it's over 9000!"}), authorization_values);
}

File diff suppressed because it is too large Load Diff

View File

@@ -7,15 +7,19 @@ const assert = std.debug.assert;
const use_vectors = builtin.zig_backend != .stage2_x86_64;
pub const State = enum {
/// Begin header parsing states.
invalid,
// Begin header and trailer parsing states.
start,
seen_n,
seen_r,
seen_rn,
seen_rnr,
finished,
/// Begin transfer-encoding: chunked parsing states.
// Begin transfer-encoding: chunked parsing states.
chunk_head_size,
chunk_head_ext,
chunk_head_r,
@@ -34,484 +38,114 @@ pub const State = enum {
pub const HeadersParser = struct {
state: State = .start,
/// Whether or not `header_bytes` is allocated or was provided as a fixed buffer.
header_bytes_owned: bool,
/// Either a fixed buffer of len `max_header_bytes` or a dynamic buffer that can grow up to `max_header_bytes`.
/// A fixed buffer of len `max_header_bytes`.
/// Pointers into this buffer are not stable until after a message is complete.
header_bytes: std.ArrayListUnmanaged(u8),
/// The maximum allowed size of `header_bytes`.
max_header_bytes: usize,
next_chunk_length: u64 = 0,
/// Whether this parser is done parsing a complete message.
/// A message is only done when the entire payload has been read.
done: bool = false,
/// Initializes the parser with a dynamically growing header buffer of up to `max` bytes.
pub fn initDynamic(max: usize) HeadersParser {
return .{
.header_bytes = .{},
.max_header_bytes = max,
.header_bytes_owned = true,
};
}
header_bytes_buffer: []u8,
header_bytes_len: u32,
next_chunk_length: u64,
/// `false`: headers. `true`: trailers.
done: bool,
/// Initializes the parser with a provided buffer `buf`.
pub fn initStatic(buf: []u8) HeadersParser {
pub fn init(buf: []u8) HeadersParser {
return .{
.header_bytes = .{ .items = buf[0..0], .capacity = buf.len },
.max_header_bytes = buf.len,
.header_bytes_owned = false,
.header_bytes_buffer = buf,
.header_bytes_len = 0,
.done = false,
.next_chunk_length = 0,
};
}
/// Completely resets the parser to it's initial state.
/// This must be called after a message is complete.
pub fn reset(r: *HeadersParser) void {
assert(r.done); // The message must be completely read before reset, otherwise the parser is in an invalid state.
r.header_bytes.clearRetainingCapacity();
r.* = .{
.header_bytes = r.header_bytes,
.max_header_bytes = r.max_header_bytes,
.header_bytes_owned = r.header_bytes_owned,
/// Reinitialize the parser.
/// Asserts the parser is in the "done" state.
pub fn reset(hp: *HeadersParser) void {
assert(hp.done);
hp.* = .{
.state = .start,
.header_bytes_buffer = hp.header_bytes_buffer,
.header_bytes_len = 0,
.done = false,
.next_chunk_length = 0,
};
}
/// Returns the number of bytes consumed by headers. This is always less than or equal to `bytes.len`.
/// You should check `r.state.isContent()` after this to check if the headers are done.
///
/// If the amount returned is less than `bytes.len`, you may assume that the parser is in a content state and the
/// first byte of content is located at `bytes[result]`.
pub fn get(hp: HeadersParser) []u8 {
return hp.header_bytes_buffer[0..hp.header_bytes_len];
}
pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 {
const vector_len: comptime_int = @max(std.simd.suggestVectorLength(u8) orelse 1, 8);
const len: u32 = @intCast(bytes.len);
var index: u32 = 0;
while (true) {
switch (r.state) {
.invalid => unreachable,
.finished => return index,
.start => switch (len - index) {
0 => return index,
1 => {
switch (bytes[index]) {
'\r' => r.state = .seen_r,
'\n' => r.state = .seen_n,
else => {},
}
return index + 1;
},
2 => {
const b16 = int16(bytes[index..][0..2]);
const b8 = intShift(u8, b16);
switch (b8) {
'\r' => r.state = .seen_r,
'\n' => r.state = .seen_n,
else => {},
}
switch (b16) {
int16("\r\n") => r.state = .seen_rn,
int16("\n\n") => r.state = .finished,
else => {},
}
return index + 2;
},
3 => {
const b24 = int24(bytes[index..][0..3]);
const b16 = intShift(u16, b24);
const b8 = intShift(u8, b24);
switch (b8) {
'\r' => r.state = .seen_r,
'\n' => r.state = .seen_n,
else => {},
}
switch (b16) {
int16("\r\n") => r.state = .seen_rn,
int16("\n\n") => r.state = .finished,
else => {},
}
switch (b24) {
int24("\r\n\r") => r.state = .seen_rnr,
else => {},
}
return index + 3;
},
4...vector_len - 1 => {
const b32 = int32(bytes[index..][0..4]);
const b24 = intShift(u24, b32);
const b16 = intShift(u16, b32);
const b8 = intShift(u8, b32);
switch (b8) {
'\r' => r.state = .seen_r,
'\n' => r.state = .seen_n,
else => {},
}
switch (b16) {
int16("\r\n") => r.state = .seen_rn,
int16("\n\n") => r.state = .finished,
else => {},
}
switch (b24) {
int24("\r\n\r") => r.state = .seen_rnr,
else => {},
}
switch (b32) {
int32("\r\n\r\n") => r.state = .finished,
else => {},
}
index += 4;
continue;
},
else => {
const chunk = bytes[index..][0..vector_len];
const matches = if (use_vectors) matches: {
const Vector = @Vector(vector_len, u8);
// const BoolVector = @Vector(vector_len, bool);
const BitVector = @Vector(vector_len, u1);
const SizeVector = @Vector(vector_len, u8);
const v: Vector = chunk.*;
const matches_r: BitVector = @bitCast(v == @as(Vector, @splat('\r')));
const matches_n: BitVector = @bitCast(v == @as(Vector, @splat('\n')));
const matches_or: SizeVector = matches_r | matches_n;
break :matches @reduce(.Add, matches_or);
} else matches: {
var matches: u8 = 0;
for (chunk) |byte| switch (byte) {
'\r', '\n' => matches += 1,
else => {},
};
break :matches matches;
};
switch (matches) {
0 => {},
1 => switch (chunk[vector_len - 1]) {
'\r' => r.state = .seen_r,
'\n' => r.state = .seen_n,
else => {},
},
2 => {
const b16 = int16(chunk[vector_len - 2 ..][0..2]);
const b8 = intShift(u8, b16);
switch (b8) {
'\r' => r.state = .seen_r,
'\n' => r.state = .seen_n,
else => {},
}
switch (b16) {
int16("\r\n") => r.state = .seen_rn,
int16("\n\n") => r.state = .finished,
else => {},
}
},
3 => {
const b24 = int24(chunk[vector_len - 3 ..][0..3]);
const b16 = intShift(u16, b24);
const b8 = intShift(u8, b24);
switch (b8) {
'\r' => r.state = .seen_r,
'\n' => r.state = .seen_n,
else => {},
}
switch (b16) {
int16("\r\n") => r.state = .seen_rn,
int16("\n\n") => r.state = .finished,
else => {},
}
switch (b24) {
int24("\r\n\r") => r.state = .seen_rnr,
else => {},
}
},
4...vector_len => {
inline for (0..vector_len - 3) |i_usize| {
const i = @as(u32, @truncate(i_usize));
const b32 = int32(chunk[i..][0..4]);
const b16 = intShift(u16, b32);
if (b32 == int32("\r\n\r\n")) {
r.state = .finished;
return index + i + 4;
} else if (b16 == int16("\n\n")) {
r.state = .finished;
return index + i + 2;
}
}
const b24 = int24(chunk[vector_len - 3 ..][0..3]);
const b16 = intShift(u16, b24);
const b8 = intShift(u8, b24);
switch (b8) {
'\r' => r.state = .seen_r,
'\n' => r.state = .seen_n,
else => {},
}
switch (b16) {
int16("\r\n") => r.state = .seen_rn,
int16("\n\n") => r.state = .finished,
else => {},
}
switch (b24) {
int24("\r\n\r") => r.state = .seen_rnr,
else => {},
}
},
else => unreachable,
}
index += vector_len;
continue;
},
},
.seen_n => switch (len - index) {
0 => return index,
else => {
switch (bytes[index]) {
'\n' => r.state = .finished,
else => r.state = .start,
}
index += 1;
continue;
},
},
.seen_r => switch (len - index) {
0 => return index,
1 => {
switch (bytes[index]) {
'\n' => r.state = .seen_rn,
'\r' => r.state = .seen_r,
else => r.state = .start,
}
return index + 1;
},
2 => {
const b16 = int16(bytes[index..][0..2]);
const b8 = intShift(u8, b16);
switch (b8) {
'\r' => r.state = .seen_r,
'\n' => r.state = .seen_rn,
else => r.state = .start,
}
switch (b16) {
int16("\r\n") => r.state = .seen_rn,
int16("\n\r") => r.state = .seen_rnr,
int16("\n\n") => r.state = .finished,
else => {},
}
return index + 2;
},
else => {
const b24 = int24(bytes[index..][0..3]);
const b16 = intShift(u16, b24);
const b8 = intShift(u8, b24);
switch (b8) {
'\r' => r.state = .seen_r,
'\n' => r.state = .seen_n,
else => r.state = .start,
}
switch (b16) {
int16("\r\n") => r.state = .seen_rn,
int16("\n\n") => r.state = .finished,
else => {},
}
switch (b24) {
int24("\n\r\n") => r.state = .finished,
else => {},
}
index += 3;
continue;
},
},
.seen_rn => switch (len - index) {
0 => return index,
1 => {
switch (bytes[index]) {
'\r' => r.state = .seen_rnr,
'\n' => r.state = .seen_n,
else => r.state = .start,
}
return index + 1;
},
else => {
const b16 = int16(bytes[index..][0..2]);
const b8 = intShift(u8, b16);
switch (b8) {
'\r' => r.state = .seen_rnr,
'\n' => r.state = .seen_n,
else => r.state = .start,
}
switch (b16) {
int16("\r\n") => r.state = .finished,
int16("\n\n") => r.state = .finished,
else => {},
}
index += 2;
continue;
},
},
.seen_rnr => switch (len - index) {
0 => return index,
else => {
switch (bytes[index]) {
'\n' => r.state = .finished,
else => r.state = .start,
}
index += 1;
continue;
},
},
.chunk_head_size => unreachable,
.chunk_head_ext => unreachable,
.chunk_head_r => unreachable,
.chunk_data => unreachable,
.chunk_data_suffix => unreachable,
.chunk_data_suffix_r => unreachable,
}
return index;
}
}
/// Returns the number of bytes consumed by the chunk size. This is always less than or equal to `bytes.len`.
/// You should check `r.state == .chunk_data` after this to check if the chunk size has been fully parsed.
///
/// If the amount returned is less than `bytes.len`, you may assume that the parser is in the `chunk_data` state
/// and that the first byte of the chunk is at `bytes[result]`.
pub fn findChunkedLen(r: *HeadersParser, bytes: []const u8) u32 {
const len = @as(u32, @intCast(bytes.len));
for (bytes[0..], 0..) |c, i| {
const index = @as(u32, @intCast(i));
switch (r.state) {
.chunk_data_suffix => switch (c) {
'\r' => r.state = .chunk_data_suffix_r,
'\n' => r.state = .chunk_head_size,
else => {
r.state = .invalid;
return index;
},
},
.chunk_data_suffix_r => switch (c) {
'\n' => r.state = .chunk_head_size,
else => {
r.state = .invalid;
return index;
},
},
.chunk_head_size => {
const digit = switch (c) {
'0'...'9' => |b| b - '0',
'A'...'Z' => |b| b - 'A' + 10,
'a'...'z' => |b| b - 'a' + 10,
'\r' => {
r.state = .chunk_head_r;
continue;
},
'\n' => {
r.state = .chunk_data;
return index + 1;
},
else => {
r.state = .chunk_head_ext;
continue;
},
};
const new_len = r.next_chunk_length *% 16 +% digit;
if (new_len <= r.next_chunk_length and r.next_chunk_length != 0) {
r.state = .invalid;
return index;
}
r.next_chunk_length = new_len;
},
.chunk_head_ext => switch (c) {
'\r' => r.state = .chunk_head_r,
'\n' => {
r.state = .chunk_data;
return index + 1;
},
else => continue,
},
.chunk_head_r => switch (c) {
'\n' => {
r.state = .chunk_data;
return index + 1;
},
else => {
r.state = .invalid;
return index;
},
},
var hp: std.http.HeadParser = .{
.state = switch (r.state) {
.start => .start,
.seen_n => .seen_n,
.seen_r => .seen_r,
.seen_rn => .seen_rn,
.seen_rnr => .seen_rnr,
.finished => .finished,
else => unreachable,
}
}
return len;
},
};
const result = hp.feed(bytes);
r.state = switch (hp.state) {
.start => .start,
.seen_n => .seen_n,
.seen_r => .seen_r,
.seen_rn => .seen_rn,
.seen_rnr => .seen_rnr,
.finished => .finished,
};
return @intCast(result);
}
/// Returns whether or not the parser has finished parsing a complete message. A message is only complete after the
/// entire body has been read and any trailing headers have been parsed.
pub fn findChunkedLen(r: *HeadersParser, bytes: []const u8) u32 {
var cp: std.http.ChunkParser = .{
.state = switch (r.state) {
.chunk_head_size => .head_size,
.chunk_head_ext => .head_ext,
.chunk_head_r => .head_r,
.chunk_data => .data,
.chunk_data_suffix => .data_suffix,
.chunk_data_suffix_r => .data_suffix_r,
.invalid => .invalid,
else => unreachable,
},
.chunk_len = r.next_chunk_length,
};
const result = cp.feed(bytes);
r.state = switch (cp.state) {
.head_size => .chunk_head_size,
.head_ext => .chunk_head_ext,
.head_r => .chunk_head_r,
.data => .chunk_data,
.data_suffix => .chunk_data_suffix,
.data_suffix_r => .chunk_data_suffix_r,
.invalid => .invalid,
};
r.next_chunk_length = cp.chunk_len;
return @intCast(result);
}
/// Returns whether or not the parser has finished parsing a complete
/// message. A message is only complete after the entire body has been read
/// and any trailing headers have been parsed.
pub fn isComplete(r: *HeadersParser) bool {
return r.done and r.state == .finished;
}
pub const CheckCompleteHeadError = mem.Allocator.Error || error{HttpHeadersExceededSizeLimit};
pub const CheckCompleteHeadError = error{HttpHeadersOversize};
/// Pushes `in` into the parser. Returns the number of bytes consumed by the header. Any header bytes are appended
/// to the `header_bytes` buffer.
///
/// This function only uses `allocator` if `r.header_bytes_owned` is true, and may be undefined otherwise.
pub fn checkCompleteHead(r: *HeadersParser, allocator: std.mem.Allocator, in: []const u8) CheckCompleteHeadError!u32 {
if (r.state.isContent()) return 0;
/// Pushes `in` into the parser. Returns the number of bytes consumed by
/// the header. Any header bytes are appended to `header_bytes_buffer`.
pub fn checkCompleteHead(hp: *HeadersParser, in: []const u8) CheckCompleteHeadError!u32 {
if (hp.state.isContent()) return 0;
const i = r.findHeadersEnd(in);
const i = hp.findHeadersEnd(in);
const data = in[0..i];
if (r.header_bytes.items.len + data.len > r.max_header_bytes) {
return error.HttpHeadersExceededSizeLimit;
} else {
if (r.header_bytes_owned) try r.header_bytes.ensureUnusedCapacity(allocator, data.len);
if (hp.header_bytes_len + data.len > hp.header_bytes_buffer.len)
return error.HttpHeadersOversize;
r.header_bytes.appendSliceAssumeCapacity(data);
}
@memcpy(hp.header_bytes_buffer[hp.header_bytes_len..][0..data.len], data);
hp.header_bytes_len += @intCast(data.len);
return i;
}
@@ -520,7 +154,8 @@ pub const HeadersParser = struct {
HttpChunkInvalid,
};
/// Reads the body of the message into `buffer`. Returns the number of bytes placed in the buffer.
/// Reads the body of the message into `buffer`. Returns the number of
/// bytes placed in the buffer.
///
/// If `skip` is true, the buffer will be unused and the body will be skipped.
///
@@ -571,9 +206,10 @@ pub const HeadersParser = struct {
.chunk_data => if (r.next_chunk_length == 0) {
if (std.mem.eql(u8, conn.peek(), "\r\n")) {
r.state = .finished;
r.done = true;
conn.drop(2);
} else {
// The trailer section is formatted identically to the header section.
// The trailer section is formatted identically
// to the header section.
r.state = .seen_rn;
}
r.done = true;
@@ -713,57 +349,11 @@ const MockBufferedConnection = struct {
}
};
test "HeadersParser.findHeadersEnd" {
var r: HeadersParser = undefined;
const data = "GET / HTTP/1.1\r\nHost: localhost\r\n\r\nHello";
for (0..36) |i| {
r = HeadersParser.initDynamic(0);
try std.testing.expectEqual(@as(u32, @intCast(i)), r.findHeadersEnd(data[0..i]));
try std.testing.expectEqual(@as(u32, @intCast(35 - i)), r.findHeadersEnd(data[i..]));
}
}
test "HeadersParser.findChunkedLen" {
var r: HeadersParser = undefined;
const data = "Ff\r\nf0f000 ; ext\n0\r\nffffffffffffffffffffffffffffffffffffffff\r\n";
r = HeadersParser.initDynamic(0);
r.state = .chunk_head_size;
r.next_chunk_length = 0;
const first = r.findChunkedLen(data[0..]);
try testing.expectEqual(@as(u32, 4), first);
try testing.expectEqual(@as(u64, 0xff), r.next_chunk_length);
try testing.expectEqual(State.chunk_data, r.state);
r.state = .chunk_head_size;
r.next_chunk_length = 0;
const second = r.findChunkedLen(data[first..]);
try testing.expectEqual(@as(u32, 13), second);
try testing.expectEqual(@as(u64, 0xf0f000), r.next_chunk_length);
try testing.expectEqual(State.chunk_data, r.state);
r.state = .chunk_head_size;
r.next_chunk_length = 0;
const third = r.findChunkedLen(data[first + second ..]);
try testing.expectEqual(@as(u32, 3), third);
try testing.expectEqual(@as(u64, 0), r.next_chunk_length);
try testing.expectEqual(State.chunk_data, r.state);
r.state = .chunk_head_size;
r.next_chunk_length = 0;
const fourth = r.findChunkedLen(data[first + second + third ..]);
try testing.expectEqual(@as(u32, 16), fourth);
try testing.expectEqual(@as(u64, 0xffffffffffffffff), r.next_chunk_length);
try testing.expectEqual(State.invalid, r.state);
}
test "HeadersParser.read length" {
// mock BufferedConnection for read
var headers_buf: [256]u8 = undefined;
var r = HeadersParser.initDynamic(256);
defer r.header_bytes.deinit(std.testing.allocator);
var r = HeadersParser.init(&headers_buf);
const data = "GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 5\r\n\r\nHello";
var conn: MockBufferedConnection = .{
@@ -773,8 +363,8 @@ test "HeadersParser.read length" {
while (true) { // read headers
try conn.fill();
const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek());
conn.drop(@as(u16, @intCast(nchecked)));
const nchecked = try r.checkCompleteHead(conn.peek());
conn.drop(@intCast(nchecked));
if (r.state.isContent()) break;
}
@@ -786,14 +376,14 @@ test "HeadersParser.read length" {
try std.testing.expectEqual(@as(usize, 5), len);
try std.testing.expectEqualStrings("Hello", buf[0..len]);
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 5\r\n\r\n", r.header_bytes.items);
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 5\r\n\r\n", r.get());
}
test "HeadersParser.read chunked" {
// mock BufferedConnection for read
var r = HeadersParser.initDynamic(256);
defer r.header_bytes.deinit(std.testing.allocator);
var headers_buf: [256]u8 = undefined;
var r = HeadersParser.init(&headers_buf);
const data = "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n2\r\nHe\r\n2\r\nll\r\n1\r\no\r\n0\r\n\r\n";
var conn: MockBufferedConnection = .{
@@ -803,8 +393,8 @@ test "HeadersParser.read chunked" {
while (true) { // read headers
try conn.fill();
const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek());
conn.drop(@as(u16, @intCast(nchecked)));
const nchecked = try r.checkCompleteHead(conn.peek());
conn.drop(@intCast(nchecked));
if (r.state.isContent()) break;
}
@@ -815,14 +405,14 @@ test "HeadersParser.read chunked" {
try std.testing.expectEqual(@as(usize, 5), len);
try std.testing.expectEqualStrings("Hello", buf[0..len]);
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\n\r\n", r.header_bytes.items);
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\n\r\n", r.get());
}
test "HeadersParser.read chunked trailer" {
// mock BufferedConnection for read
var r = HeadersParser.initDynamic(256);
defer r.header_bytes.deinit(std.testing.allocator);
var headers_buf: [256]u8 = undefined;
var r = HeadersParser.init(&headers_buf);
const data = "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n2\r\nHe\r\n2\r\nll\r\n1\r\no\r\n0\r\nContent-Type: text/plain\r\n\r\n";
var conn: MockBufferedConnection = .{
@@ -832,8 +422,8 @@ test "HeadersParser.read chunked trailer" {
while (true) { // read headers
try conn.fill();
const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek());
conn.drop(@as(u16, @intCast(nchecked)));
const nchecked = try r.checkCompleteHead(conn.peek());
conn.drop(@intCast(nchecked));
if (r.state.isContent()) break;
}
@@ -847,11 +437,11 @@ test "HeadersParser.read chunked trailer" {
while (true) { // read headers
try conn.fill();
const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek());
conn.drop(@as(u16, @intCast(nchecked)));
const nchecked = try r.checkCompleteHead(conn.peek());
conn.drop(@intCast(nchecked));
if (r.state.isContent()) break;
}
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\n\r\nContent-Type: text/plain\r\n\r\n", r.header_bytes.items);
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\n\r\nContent-Type: text/plain\r\n\r\n", r.get());
}

995
lib/std/http/test.zig Normal file
View File

@@ -0,0 +1,995 @@
const builtin = @import("builtin");
const std = @import("std");
const http = std.http;
const mem = std.mem;
const native_endian = builtin.cpu.arch.endian();
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectEqualStrings = std.testing.expectEqualStrings;
const expectError = std.testing.expectError;
test "trailers" {
const test_server = try createTestServer(struct {
fn run(net_server: *std.net.Server) anyerror!void {
var header_buffer: [1024]u8 = undefined;
var remaining: usize = 1;
while (remaining != 0) : (remaining -= 1) {
const conn = try net_server.accept();
defer conn.stream.close();
var server = http.Server.init(conn, &header_buffer);
try expectEqual(.ready, server.state);
var request = try server.receiveHead();
try serve(&request);
try expectEqual(.ready, server.state);
}
}
fn serve(request: *http.Server.Request) !void {
try expectEqualStrings(request.head.target, "/trailer");
var send_buffer: [1024]u8 = undefined;
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
});
try response.writeAll("Hello, ");
try response.flush();
try response.writeAll("World!\n");
try response.flush();
try response.endChunked(.{
.trailers = &.{
.{ .name = "X-Checksum", .value = "aaaa" },
},
});
}
});
defer test_server.destroy();
const gpa = std.testing.allocator;
var client: http.Client = .{ .allocator = gpa };
defer client.deinit();
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/trailer", .{
test_server.port(),
});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
{
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(gpa, 8192);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
var it = req.response.iterateHeaders();
{
const header = it.next().?;
try expect(!it.is_trailer);
try expectEqualStrings("connection", header.name);
try expectEqualStrings("keep-alive", header.value);
}
{
const header = it.next().?;
try expect(!it.is_trailer);
try expectEqualStrings("transfer-encoding", header.name);
try expectEqualStrings("chunked", header.value);
}
{
const header = it.next().?;
try expect(it.is_trailer);
try expectEqualStrings("X-Checksum", header.name);
try expectEqualStrings("aaaa", header.value);
}
try expectEqual(null, it.next());
}
// connection has been kept alive
try expect(client.connection_pool.free_len == 1);
}
test "HTTP server handles a chunked transfer coding request" {
const test_server = try createTestServer(struct {
fn run(net_server: *std.net.Server) !void {
var header_buffer: [8192]u8 = undefined;
const conn = try net_server.accept();
defer conn.stream.close();
var server = http.Server.init(conn, &header_buffer);
var request = try server.receiveHead();
try expect(request.head.transfer_encoding == .chunked);
var buf: [128]u8 = undefined;
const n = try (try request.reader()).readAll(&buf);
try expect(mem.eql(u8, buf[0..n], "ABCD"));
try request.respond("message from server!\n", .{
.extra_headers = &.{
.{ .name = "content-type", .value = "text/plain" },
},
.keep_alive = false,
});
}
});
defer test_server.destroy();
const request_bytes =
"POST / HTTP/1.1\r\n" ++
"Content-Type: text/plain\r\n" ++
"Transfer-Encoding: chunked\r\n" ++
"\r\n" ++
"1\r\n" ++
"A\r\n" ++
"1\r\n" ++
"B\r\n" ++
"2\r\n" ++
"CD\r\n" ++
"0\r\n" ++
"\r\n";
const gpa = std.testing.allocator;
const stream = try std.net.tcpConnectToHost(gpa, "127.0.0.1", test_server.port());
defer stream.close();
try stream.writeAll(request_bytes);
}
test "echo content server" {
const test_server = try createTestServer(struct {
fn run(net_server: *std.net.Server) anyerror!void {
var read_buffer: [1024]u8 = undefined;
accept: while (true) {
const conn = try net_server.accept();
defer conn.stream.close();
var http_server = http.Server.init(conn, &read_buffer);
while (http_server.state == .ready) {
var request = http_server.receiveHead() catch |err| switch (err) {
error.HttpConnectionClosing => continue :accept,
else => |e| return e,
};
if (mem.eql(u8, request.head.target, "/end")) {
return request.respond("", .{ .keep_alive = false });
}
if (request.head.expect) |expect_header_value| {
if (mem.eql(u8, expect_header_value, "garbage")) {
try expectError(error.HttpExpectationFailed, request.reader());
try request.respond("", .{ .keep_alive = false });
continue;
}
}
handleRequest(&request) catch |err| {
// This message helps the person troubleshooting determine whether
// output comes from the server thread or the client thread.
std.debug.print("handleRequest failed with '{s}'\n", .{@errorName(err)});
return err;
};
}
}
}
fn handleRequest(request: *http.Server.Request) !void {
//std.debug.print("server received {s} {s} {s}\n", .{
// @tagName(request.head.method),
// @tagName(request.head.version),
// request.head.target,
//});
const body = try (try request.reader()).readAllAlloc(std.testing.allocator, 8192);
defer std.testing.allocator.free(body);
try expect(mem.startsWith(u8, request.head.target, "/echo-content"));
try expectEqualStrings("Hello, World!\n", body);
try expectEqualStrings("text/plain", request.head.content_type.?);
var send_buffer: [100]u8 = undefined;
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
.content_length = switch (request.head.transfer_encoding) {
.chunked => null,
.none => len: {
try expectEqual(14, request.head.content_length.?);
break :len 14;
},
},
});
try response.flush(); // Test an early flush to send the HTTP headers before the body.
const w = response.writer();
try w.writeAll("Hello, ");
try w.writeAll("World!\n");
try response.end();
//std.debug.print(" server finished responding\n", .{});
}
});
defer test_server.destroy();
{
var client: http.Client = .{ .allocator = std.testing.allocator };
defer client.deinit();
try echoTests(&client, test_server.port());
}
}
test "Server.Request.respondStreaming non-chunked, unknown content-length" {
// In this case, the response is expected to stream until the connection is
// closed, indicating the end of the body.
const test_server = try createTestServer(struct {
fn run(net_server: *std.net.Server) anyerror!void {
var header_buffer: [1000]u8 = undefined;
var remaining: usize = 1;
while (remaining != 0) : (remaining -= 1) {
const conn = try net_server.accept();
defer conn.stream.close();
var server = http.Server.init(conn, &header_buffer);
try expectEqual(.ready, server.state);
var request = try server.receiveHead();
try expectEqualStrings(request.head.target, "/foo");
var send_buffer: [500]u8 = undefined;
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
.respond_options = .{
.transfer_encoding = .none,
},
});
var total: usize = 0;
for (0..500) |i| {
var buf: [30]u8 = undefined;
const line = try std.fmt.bufPrint(&buf, "{d}, ah ha ha!\n", .{i});
try response.writeAll(line);
total += line.len;
}
try expectEqual(7390, total);
try response.end();
try expectEqual(.closing, server.state);
}
}
});
defer test_server.destroy();
const request_bytes = "GET /foo HTTP/1.1\r\n\r\n";
const gpa = std.testing.allocator;
const stream = try std.net.tcpConnectToHost(gpa, "127.0.0.1", test_server.port());
defer stream.close();
try stream.writeAll(request_bytes);
const response = try stream.reader().readAllAlloc(gpa, 8192);
defer gpa.free(response);
var expected_response = std.ArrayList(u8).init(gpa);
defer expected_response.deinit();
try expected_response.appendSlice("HTTP/1.1 200 OK\r\n\r\n");
{
var total: usize = 0;
for (0..500) |i| {
var buf: [30]u8 = undefined;
const line = try std.fmt.bufPrint(&buf, "{d}, ah ha ha!\n", .{i});
try expected_response.appendSlice(line);
total += line.len;
}
try expectEqual(7390, total);
}
try expectEqualStrings(expected_response.items, response);
}
test "receiving arbitrary http headers from the client" {
const test_server = try createTestServer(struct {
fn run(net_server: *std.net.Server) anyerror!void {
var read_buffer: [666]u8 = undefined;
var remaining: usize = 1;
while (remaining != 0) : (remaining -= 1) {
const conn = try net_server.accept();
defer conn.stream.close();
var server = http.Server.init(conn, &read_buffer);
try expectEqual(.ready, server.state);
var request = try server.receiveHead();
try expectEqualStrings("/bar", request.head.target);
var it = request.iterateHeaders();
{
const header = it.next().?;
try expectEqualStrings("CoNneCtIoN", header.name);
try expectEqualStrings("close", header.value);
try expect(!it.is_trailer);
}
{
const header = it.next().?;
try expectEqualStrings("aoeu", header.name);
try expectEqualStrings("asdf", header.value);
try expect(!it.is_trailer);
}
try request.respond("", .{});
}
}
});
defer test_server.destroy();
const request_bytes = "GET /bar HTTP/1.1\r\n" ++
"CoNneCtIoN: close\r\n" ++
"aoeu: asdf\r\n" ++
"\r\n";
const gpa = std.testing.allocator;
const stream = try std.net.tcpConnectToHost(gpa, "127.0.0.1", test_server.port());
defer stream.close();
try stream.writeAll(request_bytes);
const response = try stream.reader().readAllAlloc(gpa, 8192);
defer gpa.free(response);
var expected_response = std.ArrayList(u8).init(gpa);
defer expected_response.deinit();
try expected_response.appendSlice("HTTP/1.1 200 OK\r\n");
try expected_response.appendSlice("content-length: 0\r\n\r\n");
try expectEqualStrings(expected_response.items, response);
}
test "general client/server API coverage" {
if (builtin.os.tag == .windows) {
// This test was never passing on Windows.
return error.SkipZigTest;
}
const global = struct {
var handle_new_requests = true;
};
const test_server = try createTestServer(struct {
fn run(net_server: *std.net.Server) anyerror!void {
var client_header_buffer: [1024]u8 = undefined;
outer: while (global.handle_new_requests) {
var connection = try net_server.accept();
defer connection.stream.close();
var http_server = http.Server.init(connection, &client_header_buffer);
while (http_server.state == .ready) {
var request = http_server.receiveHead() catch |err| switch (err) {
error.HttpConnectionClosing => continue :outer,
else => |e| return e,
};
try handleRequest(&request, net_server.listen_address.getPort());
}
}
}
fn handleRequest(request: *http.Server.Request, listen_port: u16) !void {
const log = std.log.scoped(.server);
log.info("{} {s} {s}", .{
request.head.method,
@tagName(request.head.version),
request.head.target,
});
const gpa = std.testing.allocator;
const body = try (try request.reader()).readAllAlloc(gpa, 8192);
defer gpa.free(body);
var send_buffer: [100]u8 = undefined;
if (mem.startsWith(u8, request.head.target, "/get")) {
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
.content_length = if (mem.indexOf(u8, request.head.target, "?chunked") == null)
14
else
null,
.respond_options = .{
.extra_headers = &.{
.{ .name = "content-type", .value = "text/plain" },
},
},
});
const w = response.writer();
try w.writeAll("Hello, ");
try w.writeAll("World!\n");
try response.end();
// Writing again would cause an assertion failure.
} else if (mem.startsWith(u8, request.head.target, "/large")) {
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
.content_length = 14 * 1024 + 14 * 10,
});
try response.flush(); // Test an early flush to send the HTTP headers before the body.
const w = response.writer();
var i: u32 = 0;
while (i < 5) : (i += 1) {
try w.writeAll("Hello, World!\n");
}
try w.writeAll("Hello, World!\n" ** 1024);
i = 0;
while (i < 5) : (i += 1) {
try w.writeAll("Hello, World!\n");
}
try response.end();
} else if (mem.eql(u8, request.head.target, "/redirect/1")) {
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
.respond_options = .{
.status = .found,
.extra_headers = &.{
.{ .name = "location", .value = "../../get" },
},
},
});
const w = response.writer();
try w.writeAll("Hello, ");
try w.writeAll("Redirected!\n");
try response.end();
} else if (mem.eql(u8, request.head.target, "/redirect/2")) {
try request.respond("Hello, Redirected!\n", .{
.status = .found,
.extra_headers = &.{
.{ .name = "location", .value = "/redirect/1" },
},
});
} else if (mem.eql(u8, request.head.target, "/redirect/3")) {
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/redirect/2", .{
listen_port,
});
defer gpa.free(location);
try request.respond("Hello, Redirected!\n", .{
.status = .found,
.extra_headers = &.{
.{ .name = "location", .value = location },
},
});
} else if (mem.eql(u8, request.head.target, "/redirect/4")) {
try request.respond("Hello, Redirected!\n", .{
.status = .found,
.extra_headers = &.{
.{ .name = "location", .value = "/redirect/3" },
},
});
} else if (mem.eql(u8, request.head.target, "/redirect/invalid")) {
const invalid_port = try getUnusedTcpPort();
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}", .{invalid_port});
defer gpa.free(location);
try request.respond("", .{
.status = .found,
.extra_headers = &.{
.{ .name = "location", .value = location },
},
});
} else {
try request.respond("", .{ .status = .not_found });
}
}
fn getUnusedTcpPort() !u16 {
const addr = try std.net.Address.parseIp("127.0.0.1", 0);
var s = try addr.listen(.{});
defer s.deinit();
return s.listen_address.in.getPort();
}
});
defer test_server.destroy();
const log = std.log.scoped(.client);
const gpa = std.testing.allocator;
var client: http.Client = .{ .allocator = gpa };
errdefer client.deinit();
// defer client.deinit(); handled below
const port = test_server.port();
{ // read content-length response
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/get", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(gpa, 8192);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
try expectEqualStrings("text/plain", req.response.content_type.?);
}
// connection has been kept alive
try expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // read large content-length response
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/large", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(gpa, 8192 * 1024);
defer gpa.free(body);
try expectEqual(@as(usize, 14 * 1024 + 14 * 10), body.len);
}
// connection has been kept alive
try expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // send head request and not read chunked
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/get", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.HEAD, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(gpa, 8192);
defer gpa.free(body);
try expectEqualStrings("", body);
try expectEqualStrings("text/plain", req.response.content_type.?);
try expectEqual(14, req.response.content_length.?);
}
// connection has been kept alive
try expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // read chunked response
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/get?chunked", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(gpa, 8192);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
try expectEqualStrings("text/plain", req.response.content_type.?);
}
// connection has been kept alive
try expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // send head request and not read chunked
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/get?chunked", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.HEAD, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(gpa, 8192);
defer gpa.free(body);
try expectEqualStrings("", body);
try expectEqualStrings("text/plain", req.response.content_type.?);
try expect(req.response.transfer_encoding == .chunked);
}
// connection has been kept alive
try expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // read content-length response with connection close
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/get", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
.keep_alive = false,
});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(gpa, 8192);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
try expectEqualStrings("text/plain", req.response.content_type.?);
}
// connection has been closed
try expect(client.connection_pool.free_len == 0);
{ // relative redirect
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/redirect/1", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(gpa, 8192);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
}
// connection has been kept alive
try expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // redirect from root
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/redirect/2", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(gpa, 8192);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
}
// connection has been kept alive
try expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // absolute redirect
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/redirect/3", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(gpa, 8192);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
}
// connection has been kept alive
try expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // too many redirects
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/redirect/4", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
try req.send(.{});
req.wait() catch |err| switch (err) {
error.TooManyHttpRedirects => {},
else => return err,
};
}
// connection has been kept alive
try expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // check client without segfault by connection error after redirection
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/redirect/invalid", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
try req.send(.{});
const result = req.wait();
// a proxy without an upstream is likely to return a 5xx status.
if (client.http_proxy == null) {
try expectError(error.ConnectionRefused, result); // expects not segfault but the regular error
}
}
// connection has been kept alive
try expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // issue 16282 *** This test leaves the client in an invalid state, it must be last ***
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/get", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
const total_connections = client.connection_pool.free_size + 64;
var requests = try gpa.alloc(http.Client.Request, total_connections);
defer gpa.free(requests);
var header_bufs = std.ArrayList([]u8).init(gpa);
defer header_bufs.deinit();
defer for (header_bufs.items) |item| gpa.free(item);
for (0..total_connections) |i| {
const headers_buf = try gpa.alloc(u8, 1024);
try header_bufs.append(headers_buf);
var req = try client.open(.GET, uri, .{
.server_header_buffer = headers_buf,
});
req.response.parser.done = true;
req.connection.?.closing = false;
requests[i] = req;
}
for (0..total_connections) |i| {
requests[i].deinit();
}
// free connections should be full now
try expect(client.connection_pool.free_len == client.connection_pool.free_size);
}
client.deinit();
{
global.handle_new_requests = false;
const conn = try std.net.tcpConnectToAddress(test_server.net_server.listen_address);
conn.close();
}
}
fn echoTests(client: *http.Client, port: u16) !void {
const gpa = std.testing.allocator;
var location_buffer: [100]u8 = undefined;
{ // send content-length request
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/echo-content", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.POST, uri, .{
.server_header_buffer = &server_header_buffer,
.extra_headers = &.{
.{ .name = "content-type", .value = "text/plain" },
},
});
defer req.deinit();
req.transfer_encoding = .{ .content_length = 14 };
try req.send(.{});
try req.writeAll("Hello, ");
try req.writeAll("World!\n");
try req.finish();
try req.wait();
const body = try req.reader().readAllAlloc(gpa, 8192);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
}
// connection has been kept alive
try expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // send chunked request
const uri = try std.Uri.parse(try std.fmt.bufPrint(
&location_buffer,
"http://127.0.0.1:{d}/echo-content",
.{port},
));
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.POST, uri, .{
.server_header_buffer = &server_header_buffer,
.extra_headers = &.{
.{ .name = "content-type", .value = "text/plain" },
},
});
defer req.deinit();
req.transfer_encoding = .chunked;
try req.send(.{});
try req.writeAll("Hello, ");
try req.writeAll("World!\n");
try req.finish();
try req.wait();
const body = try req.reader().readAllAlloc(gpa, 8192);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
}
// connection has been kept alive
try expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // Client.fetch()
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/echo-content#fetch", .{port});
defer gpa.free(location);
var body = std.ArrayList(u8).init(gpa);
defer body.deinit();
const res = try client.fetch(.{
.location = .{ .url = location },
.method = .POST,
.payload = "Hello, World!\n",
.extra_headers = &.{
.{ .name = "content-type", .value = "text/plain" },
},
.response_storage = .{ .dynamic = &body },
});
try expectEqual(.ok, res.status);
try expectEqualStrings("Hello, World!\n", body.items);
}
{ // expect: 100-continue
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/echo-content#expect-100", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.POST, uri, .{
.server_header_buffer = &server_header_buffer,
.extra_headers = &.{
.{ .name = "expect", .value = "100-continue" },
.{ .name = "content-type", .value = "text/plain" },
},
});
defer req.deinit();
req.transfer_encoding = .chunked;
try req.send(.{});
try req.writeAll("Hello, ");
try req.writeAll("World!\n");
try req.finish();
try req.wait();
try expectEqual(.ok, req.response.status);
const body = try req.reader().readAllAlloc(gpa, 8192);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
}
{ // expect: garbage
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/echo-content#expect-garbage", .{port});
defer gpa.free(location);
const uri = try std.Uri.parse(location);
var server_header_buffer: [1024]u8 = undefined;
var req = try client.open(.POST, uri, .{
.server_header_buffer = &server_header_buffer,
.extra_headers = &.{
.{ .name = "content-type", .value = "text/plain" },
.{ .name = "expect", .value = "garbage" },
},
});
defer req.deinit();
req.transfer_encoding = .chunked;
try req.send(.{});
try req.wait();
try expectEqual(.expectation_failed, req.response.status);
}
_ = try client.fetch(.{
.location = .{
.url = try std.fmt.bufPrint(&location_buffer, "http://127.0.0.1:{d}/end", .{port}),
},
});
}
const TestServer = struct {
server_thread: std.Thread,
net_server: std.net.Server,
fn destroy(self: *@This()) void {
self.server_thread.join();
self.net_server.deinit();
std.testing.allocator.destroy(self);
}
fn port(self: @This()) u16 {
return self.net_server.listen_address.in.getPort();
}
};
fn createTestServer(S: type) !*TestServer {
if (builtin.single_threaded) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and native_endian == .big) {
// https://github.com/ziglang/zig/issues/13782
return error.SkipZigTest;
}
const address = try std.net.Address.parseIp("127.0.0.1", 0);
const test_server = try std.testing.allocator.create(TestServer);
test_server.net_server = try address.listen(.{ .reuse_address = true });
test_server.server_thread = try std.Thread.spawn(.{}, S.run, .{&test_server.net_server});
return test_server;
}

View File

@@ -360,6 +360,18 @@ pub fn readEnum(self: Self, comptime Enum: type, endian: std.builtin.Endian) any
return E.InvalidValue;
}
/// Reads the stream until the end, ignoring all the data.
/// Returns the number of bytes discarded.
pub fn discard(self: Self) anyerror!u64 {
var trash: [4096]u8 = undefined;
var index: u64 = 0;
while (true) {
const n = try self.read(&trash);
if (n == 0) return index;
index += n;
}
}
const std = @import("../std.zig");
const Self = @This();
const math = std.math;

View File

@@ -1338,7 +1338,7 @@ pub fn indexOf(comptime T: type, haystack: []const T, needle: []const T) ?usize
pub fn lastIndexOfLinear(comptime T: type, haystack: []const T, needle: []const T) ?usize {
var i: usize = haystack.len - needle.len;
while (true) : (i -= 1) {
if (mem.eql(T, haystack[i .. i + needle.len], needle)) return i;
if (mem.eql(T, haystack[i..][0..needle.len], needle)) return i;
if (i == 0) return null;
}
}
@@ -1349,7 +1349,7 @@ pub fn indexOfPosLinear(comptime T: type, haystack: []const T, start_index: usiz
var i: usize = start_index;
const end = haystack.len - needle.len;
while (i <= end) : (i += 1) {
if (eql(T, haystack[i .. i + needle.len], needle)) return i;
if (eql(T, haystack[i..][0..needle.len], needle)) return i;
}
return null;
}

View File

@@ -4,15 +4,17 @@ const assert = std.debug.assert;
const net = @This();
const mem = std.mem;
const os = std.os;
const posix = std.posix;
const fs = std.fs;
const io = std.io;
const native_endian = builtin.target.cpu.arch.endian();
// Windows 10 added support for unix sockets in build 17063, redstone 4 is the
// first release to support them.
pub const has_unix_sockets = @hasDecl(os.sockaddr, "un") and
(builtin.target.os.tag != .windows or
builtin.os.version_range.windows.isAtLeast(.win10_rs4) orelse false);
pub const has_unix_sockets = switch (builtin.os.tag) {
.windows => builtin.os.version_range.windows.isAtLeast(.win10_rs4) orelse false,
else => true,
};
pub const IPParseError = error{
Overflow,
@@ -122,7 +124,7 @@ pub const Address = extern union {
@memset(&sock_addr.path, 0);
@memcpy(sock_addr.path[0..path.len], path);
return Address{ .un = sock_addr };
return .{ .un = sock_addr };
}
/// Returns the port in native endian.
@@ -206,6 +208,60 @@ pub const Address = extern union {
else => unreachable,
}
}
pub const ListenError = posix.SocketError || posix.BindError || posix.ListenError ||
posix.SetSockOptError || posix.GetSockNameError;
pub const ListenOptions = struct {
/// How many connections the kernel will accept on the application's behalf.
/// If more than this many connections pool in the kernel, clients will start
/// seeing "Connection refused".
kernel_backlog: u31 = 128,
/// Sets SO_REUSEADDR and SO_REUSEPORT on POSIX.
/// Sets SO_REUSEADDR on Windows, which is roughly equivalent.
reuse_address: bool = false,
/// Deprecated. Does the same thing as reuse_address.
reuse_port: bool = false,
force_nonblocking: bool = false,
};
/// The returned `Server` has an open `stream`.
pub fn listen(address: Address, options: ListenOptions) ListenError!Server {
const nonblock: u32 = if (options.force_nonblocking) posix.SOCK.NONBLOCK else 0;
const sock_flags = posix.SOCK.STREAM | posix.SOCK.CLOEXEC | nonblock;
const proto: u32 = if (address.any.family == posix.AF.UNIX) 0 else posix.IPPROTO.TCP;
const sockfd = try posix.socket(address.any.family, sock_flags, proto);
var s: Server = .{
.listen_address = undefined,
.stream = .{ .handle = sockfd },
};
errdefer s.stream.close();
if (options.reuse_address or options.reuse_port) {
try posix.setsockopt(
sockfd,
posix.SOL.SOCKET,
posix.SO.REUSEADDR,
&mem.toBytes(@as(c_int, 1)),
);
switch (builtin.os.tag) {
.windows => {},
else => try posix.setsockopt(
sockfd,
posix.SOL.SOCKET,
posix.SO.REUSEPORT,
&mem.toBytes(@as(c_int, 1)),
),
}
}
var socklen = address.getOsSockLen();
try posix.bind(sockfd, &address.any, socklen);
try posix.listen(sockfd, options.kernel_backlog);
try posix.getsockname(sockfd, &s.listen_address.any, &socklen);
return s;
}
};
pub const Ip4Address = extern struct {
@@ -657,7 +713,7 @@ pub fn connectUnixSocket(path: []const u8) !Stream {
os.SOCK.STREAM | os.SOCK.CLOEXEC | opt_non_block,
0,
);
errdefer os.closeSocket(sockfd);
errdefer Stream.close(.{ .handle = sockfd });
var addr = try std.net.Address.initUnix(path);
try os.connect(sockfd, &addr.any, addr.getOsSockLen());
@@ -669,7 +725,7 @@ fn if_nametoindex(name: []const u8) IPv6InterfaceError!u32 {
if (builtin.target.os.tag == .linux) {
var ifr: os.ifreq = undefined;
const sockfd = try os.socket(os.AF.UNIX, os.SOCK.DGRAM | os.SOCK.CLOEXEC, 0);
defer os.closeSocket(sockfd);
defer Stream.close(.{ .handle = sockfd });
@memcpy(ifr.ifrn.name[0..name.len], name);
ifr.ifrn.name[name.len] = 0;
@@ -738,7 +794,7 @@ pub fn tcpConnectToAddress(address: Address) TcpConnectToAddressError!Stream {
const sock_flags = os.SOCK.STREAM | nonblock |
(if (builtin.target.os.tag == .windows) 0 else os.SOCK.CLOEXEC);
const sockfd = try os.socket(address.any.family, sock_flags, os.IPPROTO.TCP);
errdefer os.closeSocket(sockfd);
errdefer Stream.close(.{ .handle = sockfd });
try os.connect(sockfd, &address.any, address.getOsSockLen());
@@ -1068,7 +1124,7 @@ fn linuxLookupName(
var prefixlen: i32 = 0;
const sock_flags = os.SOCK.DGRAM | os.SOCK.CLOEXEC;
if (os.socket(addr.addr.any.family, sock_flags, os.IPPROTO.UDP)) |fd| syscalls: {
defer os.closeSocket(fd);
defer Stream.close(.{ .handle = fd });
os.connect(fd, da, dalen) catch break :syscalls;
key |= DAS_USABLE;
os.getsockname(fd, sa, &salen) catch break :syscalls;
@@ -1553,7 +1609,7 @@ fn resMSendRc(
},
else => |e| return e,
};
defer os.closeSocket(fd);
defer Stream.close(.{ .handle = fd });
// Past this point, there are no errors. Each individual query will
// yield either no reply (indicated by zero length) or an answer
@@ -1729,13 +1785,15 @@ fn dnsParseCallback(ctx: dpc_ctx, rr: u8, data: []const u8, packet: []const u8)
}
pub const Stream = struct {
// Underlying socket descriptor.
// Note that on some platforms this may not be interchangeable with a
// regular files descriptor.
handle: os.socket_t,
/// Underlying platform-defined type which may or may not be
/// interchangeable with a file system file descriptor.
handle: posix.socket_t,
pub fn close(self: Stream) void {
os.closeSocket(self.handle);
pub fn close(s: Stream) void {
switch (builtin.os.tag) {
.windows => std.os.windows.closesocket(s.handle) catch unreachable,
else => posix.close(s.handle),
}
}
pub const ReadError = os.ReadError;
@@ -1839,156 +1897,38 @@ pub const Stream = struct {
}
};
pub const StreamServer = struct {
/// Copied from `Options` on `init`.
kernel_backlog: u31,
reuse_address: bool,
reuse_port: bool,
force_nonblocking: bool,
/// `undefined` until `listen` returns successfully.
pub const Server = struct {
listen_address: Address,
sockfd: ?os.socket_t,
pub const Options = struct {
/// How many connections the kernel will accept on the application's behalf.
/// If more than this many connections pool in the kernel, clients will start
/// seeing "Connection refused".
kernel_backlog: u31 = 128,
/// Enable SO.REUSEADDR on the socket.
reuse_address: bool = false,
/// Enable SO.REUSEPORT on the socket.
reuse_port: bool = false,
/// Force non-blocking mode.
force_nonblocking: bool = false,
};
/// After this call succeeds, resources have been acquired and must
/// be released with `deinit`.
pub fn init(options: Options) StreamServer {
return StreamServer{
.sockfd = null,
.kernel_backlog = options.kernel_backlog,
.reuse_address = options.reuse_address,
.reuse_port = options.reuse_port,
.force_nonblocking = options.force_nonblocking,
.listen_address = undefined,
};
}
/// Release all resources. The `StreamServer` memory becomes `undefined`.
pub fn deinit(self: *StreamServer) void {
self.close();
self.* = undefined;
}
pub fn listen(self: *StreamServer, address: Address) !void {
const nonblock = 0;
const sock_flags = os.SOCK.STREAM | os.SOCK.CLOEXEC | nonblock;
var use_sock_flags: u32 = sock_flags;
if (self.force_nonblocking) use_sock_flags |= os.SOCK.NONBLOCK;
const proto = if (address.any.family == os.AF.UNIX) @as(u32, 0) else os.IPPROTO.TCP;
const sockfd = try os.socket(address.any.family, use_sock_flags, proto);
self.sockfd = sockfd;
errdefer {
os.closeSocket(sockfd);
self.sockfd = null;
}
if (self.reuse_address) {
try os.setsockopt(
sockfd,
os.SOL.SOCKET,
os.SO.REUSEADDR,
&mem.toBytes(@as(c_int, 1)),
);
}
if (@hasDecl(os.SO, "REUSEPORT") and self.reuse_port) {
try os.setsockopt(
sockfd,
os.SOL.SOCKET,
os.SO.REUSEPORT,
&mem.toBytes(@as(c_int, 1)),
);
}
var socklen = address.getOsSockLen();
try os.bind(sockfd, &address.any, socklen);
try os.listen(sockfd, self.kernel_backlog);
try os.getsockname(sockfd, &self.listen_address.any, &socklen);
}
/// Stop listening. It is still necessary to call `deinit` after stopping listening.
/// Calling `deinit` will automatically call `close`. It is safe to call `close` when
/// not listening.
pub fn close(self: *StreamServer) void {
if (self.sockfd) |fd| {
os.closeSocket(fd);
self.sockfd = null;
self.listen_address = undefined;
}
}
pub const AcceptError = error{
ConnectionAborted,
/// The per-process limit on the number of open file descriptors has been reached.
ProcessFdQuotaExceeded,
/// The system-wide limit on the total number of open files has been reached.
SystemFdQuotaExceeded,
/// Not enough free memory. This often means that the memory allocation
/// is limited by the socket buffer limits, not by the system memory.
SystemResources,
/// Socket is not listening for new connections.
SocketNotListening,
ProtocolFailure,
/// Socket is in non-blocking mode and there is no connection to accept.
WouldBlock,
/// Firewall rules forbid connection.
BlockedByFirewall,
FileDescriptorNotASocket,
ConnectionResetByPeer,
NetworkSubsystemFailed,
OperationNotSupported,
} || os.UnexpectedError;
stream: std.net.Stream,
pub const Connection = struct {
stream: Stream,
stream: std.net.Stream,
address: Address,
};
/// If this function succeeds, the returned `Connection` is a caller-managed resource.
pub fn accept(self: *StreamServer) AcceptError!Connection {
var accepted_addr: Address = undefined;
var adr_len: os.socklen_t = @sizeOf(Address);
const accept_result = os.accept(self.sockfd.?, &accepted_addr.any, &adr_len, os.SOCK.CLOEXEC);
pub fn deinit(s: *Server) void {
s.stream.close();
s.* = undefined;
}
if (accept_result) |fd| {
return Connection{
.stream = Stream{ .handle = fd },
.address = accepted_addr,
};
} else |err| {
return err;
}
pub const AcceptError = posix.AcceptError;
/// Blocks until a client connects to the server. The returned `Connection` has
/// an open stream.
pub fn accept(s: *Server) AcceptError!Connection {
var accepted_addr: Address = undefined;
var addr_len: posix.socklen_t = @sizeOf(Address);
const fd = try posix.accept(s.stream.handle, &accepted_addr.any, &addr_len, posix.SOCK.CLOEXEC);
return .{
.stream = .{ .handle = fd },
.address = accepted_addr,
};
}
};
test {
_ = @import("net/test.zig");
_ = Server;
_ = Stream;
_ = Address;
}

View File

@@ -181,11 +181,9 @@ test "listen on a port, send bytes, receive bytes" {
// configured.
const localhost = try net.Address.parseIp("127.0.0.1", 0);
var server = net.StreamServer.init(.{});
var server = try localhost.listen(.{});
defer server.deinit();
try server.listen(localhost);
const S = struct {
fn clientFn(server_address: net.Address) !void {
const socket = try net.tcpConnectToAddress(server_address);
@@ -215,17 +213,11 @@ test "listen on an in use port" {
const localhost = try net.Address.parseIp("127.0.0.1", 0);
var server1 = net.StreamServer.init(net.StreamServer.Options{
.reuse_port = true,
});
var server1 = try localhost.listen(.{ .reuse_port = true });
defer server1.deinit();
try server1.listen(localhost);
var server2 = net.StreamServer.init(net.StreamServer.Options{
.reuse_port = true,
});
var server2 = try server1.listen_address.listen(.{ .reuse_port = true });
defer server2.deinit();
try server2.listen(server1.listen_address);
}
fn testClientToHost(allocator: mem.Allocator, name: []const u8, port: u16) anyerror!void {
@@ -252,7 +244,7 @@ fn testClient(addr: net.Address) anyerror!void {
try testing.expect(mem.eql(u8, msg, "hello from server\n"));
}
fn testServer(server: *net.StreamServer) anyerror!void {
fn testServer(server: *net.Server) anyerror!void {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
var client = try server.accept();
@@ -274,15 +266,14 @@ test "listen on a unix socket, send bytes, receive bytes" {
}
}
var server = net.StreamServer.init(.{});
defer server.deinit();
const socket_path = try generateFileName("socket.unix");
defer testing.allocator.free(socket_path);
const socket_addr = try net.Address.initUnix(socket_path);
defer std.fs.cwd().deleteFile(socket_path) catch {};
try server.listen(socket_addr);
var server = try socket_addr.listen(.{});
defer server.deinit();
const S = struct {
fn clientFn(path: []const u8) !void {
@@ -323,9 +314,8 @@ test "non-blocking tcp server" {
}
const localhost = try net.Address.parseIp("127.0.0.1", 0);
var server = net.StreamServer.init(.{ .force_nonblocking = true });
var server = localhost.listen(.{ .force_nonblocking = true });
defer server.deinit();
try server.listen(localhost);
const accept_err = server.accept();
try testing.expectError(error.WouldBlock, accept_err);

View File

@@ -3598,14 +3598,6 @@ pub fn shutdown(sock: socket_t, how: ShutdownHow) ShutdownError!void {
}
}
pub fn closeSocket(sock: socket_t) void {
if (builtin.os.tag == .windows) {
windows.closesocket(sock) catch unreachable;
} else {
close(sock);
}
}
pub const BindError = error{
/// The address is protected, and the user is not the superuser.
/// For UNIX domain sockets: Search permission is denied on a component

View File

@@ -4,6 +4,7 @@ const assert = std.debug.assert;
const mem = std.mem;
const net = std.net;
const os = std.os;
const posix = std.posix;
const linux = os.linux;
const testing = std.testing;
@@ -3730,8 +3731,8 @@ const SocketTestHarness = struct {
client: os.socket_t,
fn close(self: SocketTestHarness) void {
os.closeSocket(self.client);
os.closeSocket(self.listener);
posix.close(self.client);
posix.close(self.listener);
}
};
@@ -3739,7 +3740,7 @@ fn createSocketTestHarness(ring: *IO_Uring) !SocketTestHarness {
// Create a TCP server socket
var address = try net.Address.parseIp4("127.0.0.1", 0);
const listener_socket = try createListenerSocket(&address);
errdefer os.closeSocket(listener_socket);
errdefer posix.close(listener_socket);
// Submit 1 accept
var accept_addr: os.sockaddr = undefined;
@@ -3748,7 +3749,7 @@ fn createSocketTestHarness(ring: *IO_Uring) !SocketTestHarness {
// Create a TCP client socket
const client = try os.socket(address.any.family, os.SOCK.STREAM | os.SOCK.CLOEXEC, 0);
errdefer os.closeSocket(client);
errdefer posix.close(client);
_ = try ring.connect(0xcccccccc, client, &address.any, address.getOsSockLen());
try testing.expectEqual(@as(u32, 2), try ring.submit());
@@ -3788,7 +3789,7 @@ fn createSocketTestHarness(ring: *IO_Uring) !SocketTestHarness {
fn createListenerSocket(address: *net.Address) !os.socket_t {
const kernel_backlog = 1;
const listener_socket = try os.socket(address.any.family, os.SOCK.STREAM | os.SOCK.CLOEXEC, 0);
errdefer os.closeSocket(listener_socket);
errdefer posix.close(listener_socket);
try os.setsockopt(listener_socket, os.SOL.SOCKET, os.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
try os.bind(listener_socket, &address.any, address.getOsSockLen());
@@ -3813,7 +3814,7 @@ test "accept multishot" {
var address = try net.Address.parseIp4("127.0.0.1", 0);
const listener_socket = try createListenerSocket(&address);
defer os.closeSocket(listener_socket);
defer posix.close(listener_socket);
// submit multishot accept operation
var addr: os.sockaddr = undefined;
@@ -3826,7 +3827,7 @@ test "accept multishot" {
while (nr > 0) : (nr -= 1) {
// connect client
const client = try os.socket(address.any.family, os.SOCK.STREAM | os.SOCK.CLOEXEC, 0);
errdefer os.closeSocket(client);
errdefer posix.close(client);
try os.connect(client, &address.any, address.getOsSockLen());
// test accept completion
@@ -3836,7 +3837,7 @@ test "accept multishot" {
try testing.expect(cqe.user_data == userdata);
try testing.expect(cqe.flags & linux.IORING_CQE_F_MORE > 0); // more flag is set
os.closeSocket(client);
posix.close(client);
}
}
@@ -3909,7 +3910,7 @@ test "accept_direct" {
try ring.register_files(registered_fds[0..]);
const listener_socket = try createListenerSocket(&address);
defer os.closeSocket(listener_socket);
defer posix.close(listener_socket);
const accept_userdata: u64 = 0xaaaaaaaa;
const read_userdata: u64 = 0xbbbbbbbb;
@@ -3927,7 +3928,7 @@ test "accept_direct" {
// connect
const client = try os.socket(address.any.family, os.SOCK.STREAM | os.SOCK.CLOEXEC, 0);
try os.connect(client, &address.any, address.getOsSockLen());
defer os.closeSocket(client);
defer posix.close(client);
// accept completion
const cqe_accept = try ring.copy_cqe();
@@ -3961,7 +3962,7 @@ test "accept_direct" {
// connect
const client = try os.socket(address.any.family, os.SOCK.STREAM | os.SOCK.CLOEXEC, 0);
try os.connect(client, &address.any, address.getOsSockLen());
defer os.closeSocket(client);
defer posix.close(client);
// completion with error
const cqe_accept = try ring.copy_cqe();
try testing.expect(cqe_accept.user_data == accept_userdata);
@@ -3989,7 +3990,7 @@ test "accept_multishot_direct" {
try ring.register_files(registered_fds[0..]);
const listener_socket = try createListenerSocket(&address);
defer os.closeSocket(listener_socket);
defer posix.close(listener_socket);
const accept_userdata: u64 = 0xaaaaaaaa;
@@ -4003,7 +4004,7 @@ test "accept_multishot_direct" {
// connect
const client = try os.socket(address.any.family, os.SOCK.STREAM | os.SOCK.CLOEXEC, 0);
try os.connect(client, &address.any, address.getOsSockLen());
defer os.closeSocket(client);
defer posix.close(client);
// accept completion
const cqe_accept = try ring.copy_cqe();
@@ -4018,7 +4019,7 @@ test "accept_multishot_direct" {
// connect
const client = try os.socket(address.any.family, os.SOCK.STREAM | os.SOCK.CLOEXEC, 0);
try os.connect(client, &address.any, address.getOsSockLen());
defer os.closeSocket(client);
defer posix.close(client);
// completion with error
const cqe_accept = try ring.copy_cqe();
try testing.expect(cqe_accept.user_data == accept_userdata);
@@ -4092,7 +4093,7 @@ test "socket_direct/socket_direct_alloc/close_direct" {
// use sockets from registered_fds in connect operation
var address = try net.Address.parseIp4("127.0.0.1", 0);
const listener_socket = try createListenerSocket(&address);
defer os.closeSocket(listener_socket);
defer posix.close(listener_socket);
const accept_userdata: u64 = 0xaaaaaaaa;
const connect_userdata: u64 = 0xbbbbbbbb;
const close_userdata: u64 = 0xcccccccc;

View File

@@ -817,7 +817,7 @@ test "shutdown socket" {
error.SocketNotConnected => {},
else => |e| return e,
};
os.closeSocket(sock);
std.net.Stream.close(.{ .handle = sock });
}
test "sigaction" {

View File

@@ -354,7 +354,8 @@ pub fn run(f: *Fetch) RunError!void {
.{ path_or_url, @errorName(file_err), @errorName(uri_err) },
));
};
var resource = try f.initResource(uri);
var server_header_buffer: [header_buffer_size]u8 = undefined;
var resource = try f.initResource(uri, &server_header_buffer);
return runResource(f, uri.path, &resource, null);
}
},
@@ -415,7 +416,8 @@ pub fn run(f: *Fetch) RunError!void {
f.location_tok,
try eb.printString("invalid URI: {s}", .{@errorName(err)}),
);
var resource = try f.initResource(uri);
var server_header_buffer: [header_buffer_size]u8 = undefined;
var resource = try f.initResource(uri, &server_header_buffer);
return runResource(f, uri.path, &resource, remote.hash);
}
@@ -876,7 +878,9 @@ const FileType = enum {
}
};
fn initResource(f: *Fetch, uri: std.Uri) RunError!Resource {
const header_buffer_size = 16 * 1024;
fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Resource {
const gpa = f.arena.child_allocator;
const arena = f.arena.allocator();
const eb = &f.error_bundle;
@@ -894,10 +898,9 @@ fn initResource(f: *Fetch, uri: std.Uri) RunError!Resource {
if (ascii.eqlIgnoreCase(uri.scheme, "http") or
ascii.eqlIgnoreCase(uri.scheme, "https"))
{
var h = std.http.Headers{ .allocator = gpa };
defer h.deinit();
var req = http_client.open(.GET, uri, h, .{}) catch |err| {
var req = http_client.open(.GET, uri, .{
.server_header_buffer = server_header_buffer,
}) catch |err| {
return f.fail(f.location_tok, try eb.printString(
"unable to connect to server: {s}",
.{@errorName(err)},
@@ -935,7 +938,7 @@ fn initResource(f: *Fetch, uri: std.Uri) RunError!Resource {
transport_uri.scheme = uri.scheme["git+".len..];
var redirect_uri: []u8 = undefined;
var session: git.Session = .{ .transport = http_client, .uri = transport_uri };
session.discoverCapabilities(gpa, &redirect_uri) catch |err| switch (err) {
session.discoverCapabilities(gpa, &redirect_uri, server_header_buffer) catch |err| switch (err) {
error.Redirected => {
defer gpa.free(redirect_uri);
return f.fail(f.location_tok, try eb.printString(
@@ -961,6 +964,7 @@ fn initResource(f: *Fetch, uri: std.Uri) RunError!Resource {
var ref_iterator = session.listRefs(gpa, .{
.ref_prefixes = &.{ want_ref, want_ref_head, want_ref_tag },
.include_peeled = true,
.server_header_buffer = server_header_buffer,
}) catch |err| {
return f.fail(f.location_tok, try eb.printString(
"unable to list refs: {s}",
@@ -1003,7 +1007,7 @@ fn initResource(f: *Fetch, uri: std.Uri) RunError!Resource {
_ = std.fmt.bufPrint(&want_oid_buf, "{}", .{
std.fmt.fmtSliceHexLower(&want_oid),
}) catch unreachable;
var fetch_stream = session.fetch(gpa, &.{&want_oid_buf}) catch |err| {
var fetch_stream = session.fetch(gpa, &.{&want_oid_buf}, server_header_buffer) catch |err| {
return f.fail(f.location_tok, try eb.printString(
"unable to create fetch stream: {s}",
.{@errorName(err)},
@@ -1036,7 +1040,7 @@ fn unpackResource(
.http_request => |req| ft: {
// Content-Type takes first precedence.
const content_type = req.response.headers.getFirstValue("Content-Type") orelse
const content_type = req.response.content_type orelse
return f.fail(f.location_tok, try eb.addString("missing 'Content-Type' header"));
// Extract the MIME type, ignoring charset and boundary directives
@@ -1069,7 +1073,7 @@ fn unpackResource(
}
// Next, the filename from 'content-disposition: attachment' takes precedence.
if (req.response.headers.getFirstValue("Content-Disposition")) |cd_header| {
if (req.response.content_disposition) |cd_header| {
break :ft FileType.fromContentDisposition(cd_header) orelse {
return f.fail(f.location_tok, try eb.printString(
"unsupported Content-Disposition header value: '{s}' for Content-Type=application/octet-stream",
@@ -1105,8 +1109,29 @@ fn unpackResource(
var dcp = std.compress.gzip.decompressor(br.reader());
try unpackTarball(f, tmp_directory.handle, dcp.reader());
},
.@"tar.xz" => try unpackTarballCompressed(f, tmp_directory.handle, resource, std.compress.xz),
.@"tar.zst" => try unpackTarballCompressed(f, tmp_directory.handle, resource, ZstdWrapper),
.@"tar.xz" => {
const gpa = f.arena.child_allocator;
const reader = resource.reader();
var br = std.io.bufferedReaderSize(std.crypto.tls.max_ciphertext_record_len, reader);
var dcp = std.compress.xz.decompress(gpa, br.reader()) catch |err| {
return f.fail(f.location_tok, try eb.printString(
"unable to decompress tarball: {s}",
.{@errorName(err)},
));
};
defer dcp.deinit();
try unpackTarball(f, tmp_directory.handle, dcp.reader());
},
.@"tar.zst" => {
const window_size = std.compress.zstd.DecompressorOptions.default_window_buffer_len;
const window_buffer = try f.arena.allocator().create([window_size]u8);
const reader = resource.reader();
var br = std.io.bufferedReaderSize(std.crypto.tls.max_ciphertext_record_len, reader);
var dcp = std.compress.zstd.decompressor(br.reader(), .{
.window_buffer = window_buffer,
});
return unpackTarball(f, tmp_directory.handle, dcp.reader());
},
.git_pack => unpackGitPack(f, tmp_directory.handle, resource) catch |err| switch (err) {
error.FetchFailed => return error.FetchFailed,
error.OutOfMemory => return error.OutOfMemory,
@@ -1118,40 +1143,6 @@ fn unpackResource(
}
}
// due to slight differences in the API of std.compress.(gzip|xz) and std.compress.zstd, zstd is
// wrapped for generic use in unpackTarballCompressed: see github.com/ziglang/zig/issues/14739
const ZstdWrapper = struct {
fn DecompressType(comptime T: type) type {
return error{}!std.compress.zstd.DecompressStream(T, .{});
}
fn decompress(allocator: Allocator, reader: anytype) DecompressType(@TypeOf(reader)) {
return std.compress.zstd.decompressStream(allocator, reader);
}
};
fn unpackTarballCompressed(
f: *Fetch,
out_dir: fs.Dir,
resource: *Resource,
comptime Compression: type,
) RunError!void {
const gpa = f.arena.child_allocator;
const eb = &f.error_bundle;
const reader = resource.reader();
var br = std.io.bufferedReaderSize(std.crypto.tls.max_ciphertext_record_len, reader);
var decompress = Compression.decompress(gpa, br.reader()) catch |err| {
return f.fail(f.location_tok, try eb.printString(
"unable to decompress tarball: {s}",
.{@errorName(err)},
));
};
defer decompress.deinit();
return unpackTarball(f, out_dir, decompress.reader());
}
fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: anytype) RunError!void {
const eb = &f.error_bundle;
const gpa = f.arena.child_allocator;

View File

@@ -494,8 +494,9 @@ pub const Session = struct {
session: *Session,
allocator: Allocator,
redirect_uri: *[]u8,
http_headers_buffer: []u8,
) !void {
var capability_iterator = try session.getCapabilities(allocator, redirect_uri);
var capability_iterator = try session.getCapabilities(allocator, redirect_uri, http_headers_buffer);
defer capability_iterator.deinit();
while (try capability_iterator.next()) |capability| {
if (mem.eql(u8, capability.key, "agent")) {
@@ -521,6 +522,7 @@ pub const Session = struct {
session: Session,
allocator: Allocator,
redirect_uri: *[]u8,
http_headers_buffer: []u8,
) !CapabilityIterator {
var info_refs_uri = session.uri;
info_refs_uri.path = try std.fs.path.resolvePosix(allocator, &.{ "/", session.uri.path, "info/refs" });
@@ -528,12 +530,13 @@ pub const Session = struct {
info_refs_uri.query = "service=git-upload-pack";
info_refs_uri.fragment = null;
var headers = std.http.Headers.init(allocator);
defer headers.deinit();
try headers.append("Git-Protocol", "version=2");
var request = try session.transport.open(.GET, info_refs_uri, headers, .{
.max_redirects = 3,
const max_redirects = 3;
var request = try session.transport.open(.GET, info_refs_uri, .{
.redirect_behavior = @enumFromInt(max_redirects),
.server_header_buffer = http_headers_buffer,
.extra_headers = &.{
.{ .name = "Git-Protocol", .value = "version=2" },
},
});
errdefer request.deinit();
try request.send(.{});
@@ -541,7 +544,8 @@ pub const Session = struct {
try request.wait();
if (request.response.status != .ok) return error.ProtocolError;
if (request.redirects_left < 3) {
const any_redirects_occurred = request.redirect_behavior.remaining() < max_redirects;
if (any_redirects_occurred) {
if (!mem.endsWith(u8, request.uri.path, "/info/refs")) return error.UnparseableRedirect;
var new_uri = request.uri;
new_uri.path = new_uri.path[0 .. new_uri.path.len - "/info/refs".len];
@@ -620,6 +624,7 @@ pub const Session = struct {
include_symrefs: bool = false,
/// Whether to include the peeled object ID for returned tag refs.
include_peeled: bool = false,
server_header_buffer: []u8,
};
/// Returns an iterator over refs known to the server.
@@ -630,11 +635,6 @@ pub const Session = struct {
upload_pack_uri.query = null;
upload_pack_uri.fragment = null;
var headers = std.http.Headers.init(allocator);
defer headers.deinit();
try headers.append("Content-Type", "application/x-git-upload-pack-request");
try headers.append("Git-Protocol", "version=2");
var body = std.ArrayListUnmanaged(u8){};
defer body.deinit(allocator);
const body_writer = body.writer(allocator);
@@ -656,8 +656,13 @@ pub const Session = struct {
}
try Packet.write(.flush, body_writer);
var request = try session.transport.open(.POST, upload_pack_uri, headers, .{
.handle_redirects = false,
var request = try session.transport.open(.POST, upload_pack_uri, .{
.redirect_behavior = .unhandled,
.server_header_buffer = options.server_header_buffer,
.extra_headers = &.{
.{ .name = "Content-Type", .value = "application/x-git-upload-pack-request" },
.{ .name = "Git-Protocol", .value = "version=2" },
},
});
errdefer request.deinit();
request.transfer_encoding = .{ .content_length = body.items.len };
@@ -721,18 +726,18 @@ pub const Session = struct {
/// Fetches the given refs from the server. A shallow fetch (depth 1) is
/// performed if the server supports it.
pub fn fetch(session: Session, allocator: Allocator, wants: []const []const u8) !FetchStream {
pub fn fetch(
session: Session,
allocator: Allocator,
wants: []const []const u8,
http_headers_buffer: []u8,
) !FetchStream {
var upload_pack_uri = session.uri;
upload_pack_uri.path = try std.fs.path.resolvePosix(allocator, &.{ "/", session.uri.path, "git-upload-pack" });
defer allocator.free(upload_pack_uri.path);
upload_pack_uri.query = null;
upload_pack_uri.fragment = null;
var headers = std.http.Headers.init(allocator);
defer headers.deinit();
try headers.append("Content-Type", "application/x-git-upload-pack-request");
try headers.append("Git-Protocol", "version=2");
var body = std.ArrayListUnmanaged(u8){};
defer body.deinit(allocator);
const body_writer = body.writer(allocator);
@@ -756,8 +761,13 @@ pub const Session = struct {
try Packet.write(.{ .data = "done\n" }, body_writer);
try Packet.write(.flush, body_writer);
var request = try session.transport.open(.POST, upload_pack_uri, headers, .{
.handle_redirects = false,
var request = try session.transport.open(.POST, upload_pack_uri, .{
.redirect_behavior = .not_allowed,
.server_header_buffer = http_headers_buffer,
.extra_headers = &.{
.{ .name = "Content-Type", .value = "application/x-git-upload-pack-request" },
.{ .name = "Git-Protocol", .value = "version=2" },
},
});
errdefer request.deinit();
request.transfer_encoding = .{ .content_length = body.items.len };

View File

@@ -3322,13 +3322,13 @@ fn buildOutputType(
.ip4 => |ip4_addr| {
if (build_options.only_core_functionality) unreachable;
var server = std.net.StreamServer.init(.{
const addr: std.net.Address = .{ .in = ip4_addr };
var server = try addr.listen(.{
.reuse_address = true,
});
defer server.deinit();
try server.listen(.{ .in = ip4_addr });
const conn = try server.accept();
defer conn.stream.close();
@@ -5486,7 +5486,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
job_queue.read_only = true;
cleanup_build_dir = job_queue.global_cache.handle;
} else {
try http_client.loadDefaultProxies();
try http_client.initDefaultProxies(arena);
}
try job_queue.all_fetches.ensureUnusedCapacity(gpa, 1);
@@ -7442,7 +7442,7 @@ fn cmdFetch(
var http_client: std.http.Client = .{ .allocator = gpa };
defer http_client.deinit();
try http_client.loadDefaultProxies();
try http_client.initDefaultProxies(arena);
var progress: std.Progress = .{ .dont_print_on_dumb = true };
const root_prog_node = progress.start("Fetch", 0);

View File

@@ -55,10 +55,6 @@ pub const simple_cases = [_]SimpleCase{
.os_filter = .windows,
.link_libc = true,
},
.{
.src_path = "test/standalone/http.zig",
.all_modes = true,
},
// Ensure the development tools are buildable. Alphabetically sorted.
// No need to build `tools/spirv/grammar.zig`.

View File

@@ -1,700 +0,0 @@
const std = @import("std");
const http = std.http;
const Server = http.Server;
const Client = http.Client;
const mem = std.mem;
const testing = std.testing;
pub const std_options = .{
.http_disable_tls = true,
};
const max_header_size = 8192;
var gpa_server = std.heap.GeneralPurposeAllocator(.{ .stack_trace_frames = 12 }){};
var gpa_client = std.heap.GeneralPurposeAllocator(.{ .stack_trace_frames = 12 }){};
const salloc = gpa_server.allocator();
const calloc = gpa_client.allocator();
var server: Server = undefined;
fn handleRequest(res: *Server.Response) !void {
const log = std.log.scoped(.server);
log.info("{} {s} {s}", .{ res.request.method, @tagName(res.request.version), res.request.target });
if (res.request.headers.contains("expect")) {
if (mem.eql(u8, res.request.headers.getFirstValue("expect").?, "100-continue")) {
res.status = .@"continue";
try res.send();
res.status = .ok;
} else {
res.status = .expectation_failed;
try res.send();
return;
}
}
const body = try res.reader().readAllAlloc(salloc, 8192);
defer salloc.free(body);
if (res.request.headers.contains("connection")) {
try res.headers.append("connection", "keep-alive");
}
if (mem.startsWith(u8, res.request.target, "/get")) {
if (std.mem.indexOf(u8, res.request.target, "?chunked") != null) {
res.transfer_encoding = .chunked;
} else {
res.transfer_encoding = .{ .content_length = 14 };
}
try res.headers.append("content-type", "text/plain");
try res.send();
if (res.request.method != .HEAD) {
try res.writeAll("Hello, ");
try res.writeAll("World!\n");
try res.finish();
} else {
try testing.expectEqual(res.writeAll("errors"), error.NotWriteable);
}
} else if (mem.startsWith(u8, res.request.target, "/large")) {
res.transfer_encoding = .{ .content_length = 14 * 1024 + 14 * 10 };
try res.send();
var i: u32 = 0;
while (i < 5) : (i += 1) {
try res.writeAll("Hello, World!\n");
}
try res.writeAll("Hello, World!\n" ** 1024);
i = 0;
while (i < 5) : (i += 1) {
try res.writeAll("Hello, World!\n");
}
try res.finish();
} else if (mem.startsWith(u8, res.request.target, "/echo-content")) {
try testing.expectEqualStrings("Hello, World!\n", body);
try testing.expectEqualStrings("text/plain", res.request.headers.getFirstValue("content-type").?);
if (res.request.headers.contains("transfer-encoding")) {
try testing.expectEqualStrings("chunked", res.request.headers.getFirstValue("transfer-encoding").?);
res.transfer_encoding = .chunked;
} else {
res.transfer_encoding = .{ .content_length = 14 };
try testing.expectEqualStrings("14", res.request.headers.getFirstValue("content-length").?);
}
try res.send();
try res.writeAll("Hello, ");
try res.writeAll("World!\n");
try res.finish();
} else if (mem.eql(u8, res.request.target, "/trailer")) {
res.transfer_encoding = .chunked;
try res.send();
try res.writeAll("Hello, ");
try res.writeAll("World!\n");
// try res.finish();
try res.connection.writeAll("0\r\nX-Checksum: aaaa\r\n\r\n");
} else if (mem.eql(u8, res.request.target, "/redirect/1")) {
res.transfer_encoding = .chunked;
res.status = .found;
try res.headers.append("location", "../../get");
try res.send();
try res.writeAll("Hello, ");
try res.writeAll("Redirected!\n");
try res.finish();
} else if (mem.eql(u8, res.request.target, "/redirect/2")) {
res.transfer_encoding = .chunked;
res.status = .found;
try res.headers.append("location", "/redirect/1");
try res.send();
try res.writeAll("Hello, ");
try res.writeAll("Redirected!\n");
try res.finish();
} else if (mem.eql(u8, res.request.target, "/redirect/3")) {
res.transfer_encoding = .chunked;
const location = try std.fmt.allocPrint(salloc, "http://127.0.0.1:{d}/redirect/2", .{server.socket.listen_address.getPort()});
defer salloc.free(location);
res.status = .found;
try res.headers.append("location", location);
try res.send();
try res.writeAll("Hello, ");
try res.writeAll("Redirected!\n");
try res.finish();
} else if (mem.eql(u8, res.request.target, "/redirect/4")) {
res.transfer_encoding = .chunked;
res.status = .found;
try res.headers.append("location", "/redirect/3");
try res.send();
try res.writeAll("Hello, ");
try res.writeAll("Redirected!\n");
try res.finish();
} else if (mem.eql(u8, res.request.target, "/redirect/invalid")) {
const invalid_port = try getUnusedTcpPort();
const location = try std.fmt.allocPrint(salloc, "http://127.0.0.1:{d}", .{invalid_port});
defer salloc.free(location);
res.status = .found;
try res.headers.append("location", location);
try res.send();
try res.finish();
} else {
res.status = .not_found;
try res.send();
}
}
var handle_new_requests = true;
fn runServer(srv: *Server) !void {
outer: while (handle_new_requests) {
var res = try srv.accept(.{
.allocator = salloc,
.header_strategy = .{ .dynamic = max_header_size },
});
defer res.deinit();
while (res.reset() != .closing) {
res.wait() catch |err| switch (err) {
error.HttpHeadersInvalid => continue :outer,
error.EndOfStream => continue,
else => return err,
};
try handleRequest(&res);
}
}
}
fn serverThread(srv: *Server) void {
defer srv.deinit();
defer _ = gpa_server.deinit();
runServer(srv) catch |err| {
std.debug.print("server error: {}\n", .{err});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
_ = gpa_server.deinit();
std.os.exit(1);
};
}
fn killServer(addr: std.net.Address) void {
handle_new_requests = false;
const conn = std.net.tcpConnectToAddress(addr) catch return;
conn.close();
}
fn getUnusedTcpPort() !u16 {
const addr = try std.net.Address.parseIp("127.0.0.1", 0);
var s = std.net.StreamServer.init(.{});
defer s.deinit();
try s.listen(addr);
return s.listen_address.in.getPort();
}
pub fn main() !void {
const log = std.log.scoped(.client);
defer _ = gpa_client.deinit();
server = Server.init(.{ .reuse_address = true });
const addr = std.net.Address.parseIp("127.0.0.1", 0) catch unreachable;
try server.listen(addr);
const port = server.socket.listen_address.getPort();
const server_thread = try std.Thread.spawn(.{}, serverThread, .{&server});
var client = Client{ .allocator = calloc };
errdefer client.deinit();
// defer client.deinit(); handled below
try client.loadDefaultProxies();
{ // read content-length response
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.GET, uri, h, .{});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(calloc, 8192);
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // read large content-length response
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/large", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.GET, uri, h, .{});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(calloc, 8192 * 1024);
defer calloc.free(body);
try testing.expectEqual(@as(usize, 14 * 1024 + 14 * 10), body.len);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // send head request and not read chunked
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.HEAD, uri, h, .{});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(calloc, 8192);
defer calloc.free(body);
try testing.expectEqualStrings("", body);
try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?);
try testing.expectEqualStrings("14", req.response.headers.getFirstValue("content-length").?);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // read chunked response
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get?chunked", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.GET, uri, h, .{});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(calloc, 8192);
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // send head request and not read chunked
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get?chunked", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.HEAD, uri, h, .{});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(calloc, 8192);
defer calloc.free(body);
try testing.expectEqualStrings("", body);
try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?);
try testing.expectEqualStrings("chunked", req.response.headers.getFirstValue("transfer-encoding").?);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // check trailing headers
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/trailer", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.GET, uri, h, .{});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(calloc, 8192);
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
try testing.expectEqualStrings("aaaa", req.response.headers.getFirstValue("x-checksum").?);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // send content-length request
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
try h.append("content-type", "text/plain");
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.POST, uri, h, .{});
defer req.deinit();
req.transfer_encoding = .{ .content_length = 14 };
try req.send(.{});
try req.writeAll("Hello, ");
try req.writeAll("World!\n");
try req.finish();
try req.wait();
const body = try req.reader().readAllAlloc(calloc, 8192);
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // read content-length response with connection close
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
try h.append("connection", "close");
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.GET, uri, h, .{});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(calloc, 8192);
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?);
}
// connection has been closed
try testing.expect(client.connection_pool.free_len == 0);
{ // send chunked request
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
try h.append("content-type", "text/plain");
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.POST, uri, h, .{});
defer req.deinit();
req.transfer_encoding = .chunked;
try req.send(.{});
try req.writeAll("Hello, ");
try req.writeAll("World!\n");
try req.finish();
try req.wait();
const body = try req.reader().readAllAlloc(calloc, 8192);
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // relative redirect
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/1", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.GET, uri, h, .{});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(calloc, 8192);
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // redirect from root
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/2", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.GET, uri, h, .{});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(calloc, 8192);
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // absolute redirect
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/3", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.GET, uri, h, .{});
defer req.deinit();
try req.send(.{});
try req.wait();
const body = try req.reader().readAllAlloc(calloc, 8192);
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // too many redirects
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/4", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.GET, uri, h, .{});
defer req.deinit();
try req.send(.{});
req.wait() catch |err| switch (err) {
error.TooManyHttpRedirects => {},
else => return err,
};
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // check client without segfault by connection error after redirection
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/invalid", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.GET, uri, h, .{});
defer req.deinit();
try req.send(.{});
const result = req.wait();
// a proxy without an upstream is likely to return a 5xx status.
if (client.http_proxy == null) {
try testing.expectError(error.ConnectionRefused, result); // expects not segfault but the regular error
}
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // Client.fetch()
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
try h.append("content-type", "text/plain");
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content#fetch", .{port});
defer calloc.free(location);
log.info("{s}", .{location});
var res = try client.fetch(calloc, .{
.location = .{ .url = location },
.method = .POST,
.headers = h,
.payload = .{ .string = "Hello, World!\n" },
});
defer res.deinit();
try testing.expectEqualStrings("Hello, World!\n", res.body.?);
}
{ // expect: 100-continue
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
try h.append("expect", "100-continue");
try h.append("content-type", "text/plain");
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content#expect-100", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.POST, uri, h, .{});
defer req.deinit();
req.transfer_encoding = .chunked;
try req.send(.{});
try req.writeAll("Hello, ");
try req.writeAll("World!\n");
try req.finish();
try req.wait();
try testing.expectEqual(http.Status.ok, req.response.status);
const body = try req.reader().readAllAlloc(calloc, 8192);
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
}
{ // expect: garbage
var h = http.Headers{ .allocator = calloc };
defer h.deinit();
try h.append("content-type", "text/plain");
try h.append("expect", "garbage");
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content#expect-garbage", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var req = try client.open(.POST, uri, h, .{});
defer req.deinit();
req.transfer_encoding = .chunked;
try req.send(.{});
try req.wait();
try testing.expectEqual(http.Status.expectation_failed, req.response.status);
}
{ // issue 16282 *** This test leaves the client in an invalid state, it must be last ***
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
const total_connections = client.connection_pool.free_size + 64;
var requests = try calloc.alloc(http.Client.Request, total_connections);
defer calloc.free(requests);
for (0..total_connections) |i| {
var req = try client.open(.GET, uri, .{ .allocator = calloc }, .{});
req.response.parser.done = true;
req.connection.?.closing = false;
requests[i] = req;
}
for (0..total_connections) |i| {
requests[i].deinit();
}
// free connections should be full now
try testing.expect(client.connection_pool.free_len == client.connection_pool.free_size);
}
client.deinit();
killServer(server.socket.listen_address);
server_thread.join();
}