http: fix handling of limit in chunkedSendFile

`limit` in chunkedSendFile applies only to the file, not the entire
chunk. `limit` in sendFileHeader does not include the header.

Additionally adds a comment to clarify what `limit` applies to in
sendFileHeader and fixed a small bug in it (`drain` is able to return
less then `header.len`).
This commit is contained in:
Kendall Condon
2025-08-12 15:48:45 -04:00
committed by Andrew Kelley
parent 5e986fef1f
commit 4f639ff880
2 changed files with 8 additions and 3 deletions

View File

@@ -868,6 +868,8 @@ pub fn sendFile(w: *Writer, file_reader: *File.Reader, limit: Limit) FileError!u
}
/// Returns how many bytes from `header` and `file_reader` were consumed.
///
/// `limit` only applies to `file_reader`.
pub fn sendFileHeader(
w: *Writer,
header: []const u8,
@@ -882,7 +884,7 @@ pub fn sendFileHeader(
}
const buffered_contents = limit.slice(file_reader.interface.buffered());
const n = try w.vtable.drain(w, &.{ header, buffered_contents }, 1);
file_reader.interface.toss(n - header.len);
file_reader.interface.toss(n -| header.len);
return n;
}

View File

@@ -1021,8 +1021,11 @@ pub const BodyWriter = struct {
continue :l 1;
},
else => {
const new_limit = limit.min(.limited(chunk_len - 2));
const n = try out.sendFileHeader(w.buffered(), file_reader, new_limit);
const chunk_limit: std.Io.Limit = .limited(chunk_len - 2);
const n = if (chunk_limit.subtract(w.buffered().len)) |sendfile_limit|
try out.sendFileHeader(w.buffered(), file_reader, sendfile_limit.min(limit))
else
try out.write(chunk_limit.slice(w.buffered()));
chunked.chunk_len = chunk_len - n;
return w.consume(n);
},