compiler: update to new flate API

This commit is contained in:
Andrew Kelley
2025-07-28 22:12:46 -07:00
parent 2569f4ff85
commit 05ce1f99a6
7 changed files with 122 additions and 107 deletions

View File

@@ -1709,6 +1709,15 @@ fn failingDiscard(r: *Reader, limit: Limit) Error!usize {
return error.ReadFailed;
}
pub fn adaptToOldInterface(r: *Reader) std.Io.AnyReader {
return .{ .context = r, .readFn = derpRead };
}
fn derpRead(context: *const anyopaque, buffer: []u8) anyerror!usize {
const r: *Reader = @constCast(@alignCast(@ptrCast(context)));
return r.readSliceShort(buffer);
}
test "readAlloc when the backing reader provides one byte at a time" {
const str = "This is a test";
var tiny_buffer: [1]u8 = undefined;

View File

@@ -23,7 +23,7 @@ dst_dec: DistanceDecoder,
final_block: bool,
state: State,
read_err: ?Error,
err: ?Error,
const BlockType = enum(u2) {
stored = 0,
@@ -74,7 +74,7 @@ pub fn init(input: *Reader, container: Container, buffer: []u8) Decompress {
.dst_dec = .{},
.final_block = false,
.state = .protocol_header,
.read_err = null,
.err = null,
};
}
@@ -153,7 +153,7 @@ pub fn stream(r: *Reader, w: *Writer, limit: std.Io.Limit) Reader.StreamError!us
if (d.state == .end) {
return error.EndOfStream;
} else {
d.read_err = error.EndOfStream;
d.err = error.EndOfStream;
return error.ReadFailed;
}
},
@@ -161,7 +161,7 @@ pub fn stream(r: *Reader, w: *Writer, limit: std.Io.Limit) Reader.StreamError!us
else => |e| {
// In the event of an error, state is unmodified so that it can be
// better used to diagnose the failure.
d.read_err = e;
d.err = e;
return error.ReadFailed;
},
};
@@ -1179,7 +1179,7 @@ fn testFailure(container: Container, in: []const u8, expected_err: anyerror) !vo
var decompress: Decompress = .init(&reader, container, &.{});
try testing.expectError(error.ReadFailed, decompress.reader.streamRemaining(&aw.writer));
try testing.expectEqual(expected_err, decompress.read_err orelse return error.TestFailed);
try testing.expectEqual(expected_err, decompress.err orelse return error.TestFailed);
}
fn testDecompress(container: Container, compressed: []const u8, expected_plain: []const u8) !void {

View File

@@ -405,8 +405,8 @@ pub const RequestTransfer = union(enum) {
/// The decompressor for response messages.
pub const Compression = union(enum) {
deflate: std.compress.flate.Decompress,
gzip: std.compress.flate.Decompress,
//deflate: std.compress.flate.Decompress,
//gzip: std.compress.flate.Decompress,
// https://github.com/ziglang/zig/issues/18937
//zstd: ZstdDecompressor,
none: void,
@@ -1074,12 +1074,10 @@ pub const Request = struct {
switch (req.response.transfer_compression) {
.identity => req.response.compression = .none,
.compress, .@"x-compress" => return error.CompressionUnsupported,
.deflate => req.response.compression = .{
.deflate = std.compress.zlib.decompressor(req.transferReader()),
},
.gzip, .@"x-gzip" => req.response.compression = .{
.gzip = std.compress.gzip.decompressor(req.transferReader()),
},
// I'm about to upstream my http.Client rewrite
.deflate => return error.CompressionUnsupported,
// I'm about to upstream my http.Client rewrite
.gzip, .@"x-gzip" => return error.CompressionUnsupported,
// https://github.com/ziglang/zig/issues/18937
//.zstd => req.response.compression = .{
// .zstd = std.compress.zstd.decompressStream(req.client.allocator, req.transferReader()),
@@ -1105,8 +1103,9 @@ pub const Request = struct {
/// Reads data from the response body. Must be called after `wait`.
pub fn read(req: *Request, buffer: []u8) ReadError!usize {
const out_index = switch (req.response.compression) {
.deflate => |*deflate| deflate.read(buffer) catch return error.DecompressionFailure,
.gzip => |*gzip| gzip.read(buffer) catch return error.DecompressionFailure,
// I'm about to upstream my http client rewrite
//.deflate => |*deflate| deflate.readSlice(buffer) catch return error.DecompressionFailure,
//.gzip => |*gzip| gzip.read(buffer) catch return error.DecompressionFailure,
// https://github.com/ziglang/zig/issues/18937
//.zstd => |*zstd| zstd.read(buffer) catch return error.DecompressionFailure,
else => try req.transferRead(buffer),

View File

@@ -7,8 +7,9 @@ const builtin = @import("builtin");
const std = @import("std");
const File = std.fs.File;
const is_le = builtin.target.cpu.arch.endian() == .little;
const Writer = std.io.Writer;
const Reader = std.io.Reader;
const Writer = std.Io.Writer;
const Reader = std.Io.Reader;
const flate = std.compress.flate;
pub const CompressionMethod = enum(u16) {
store = 0,
@@ -117,6 +118,7 @@ pub const EndRecord = extern struct {
pub const FindFileError = File.GetEndPosError || File.SeekError || File.ReadError || error{
ZipNoEndRecord,
EndOfStream,
ReadFailed,
};
pub fn findFile(fr: *File.Reader) FindFileError!EndRecord {
@@ -137,8 +139,7 @@ pub const EndRecord = extern struct {
try fr.seekTo(end_pos - @as(u64, new_loaded_len));
const read_buf: []u8 = buf[buf.len - new_loaded_len ..][0..read_len];
var br = fr.interface().unbuffered();
br.readSlice(read_buf) catch |err| switch (err) {
fr.interface.readSliceAll(read_buf) catch |err| switch (err) {
error.ReadFailed => return fr.err.?,
error.EndOfStream => return error.EndOfStream,
};
@@ -164,7 +165,7 @@ pub const EndRecord = extern struct {
pub const Decompress = struct {
interface: Reader,
state: union {
inflate: std.compress.flate.Decompress,
inflate: flate.Decompress,
store: *Reader,
},
@@ -201,7 +202,7 @@ pub const Decompress = struct {
fn streamDeflate(r: *Reader, w: *Writer, limit: std.io.Limit) Reader.StreamError!usize {
const d: *Decompress = @fieldParentPtr("interface", r);
return std.compress.flate.Decompress.read(&d.inflate, w, limit);
return flate.Decompress.read(&d.inflate, w, limit);
}
};
@@ -305,7 +306,7 @@ pub const Iterator = struct {
if (locator_end_offset > stream_len)
return error.ZipTruncated;
try input.seekTo(stream_len - locator_end_offset);
const locator = input.interface.takeStructEndian(EndLocator64, .little) catch |err| switch (err) {
const locator = input.interface.takeStruct(EndLocator64, .little) catch |err| switch (err) {
error.ReadFailed => return input.err.?,
error.EndOfStream => return error.EndOfStream,
};
@@ -318,7 +319,7 @@ pub const Iterator = struct {
try input.seekTo(locator.record_file_offset);
const record64 = input.interface.takeStructEndian(EndRecord64, .little) catch |err| switch (err) {
const record64 = input.interface.takeStruct(EndRecord64, .little) catch |err| switch (err) {
error.ReadFailed => return input.err.?,
error.EndOfStream => return error.EndOfStream,
};
@@ -374,7 +375,7 @@ pub const Iterator = struct {
const header_zip_offset = self.cd_zip_offset + self.cd_record_offset;
const input = self.input;
try input.seekTo(header_zip_offset);
const header = input.interface.takeStructEndian(CentralDirectoryFileHeader, .little) catch |err| switch (err) {
const header = input.interface.takeStruct(CentralDirectoryFileHeader, .little) catch |err| switch (err) {
error.ReadFailed => return input.err.?,
error.EndOfStream => return error.EndOfStream,
};
@@ -405,7 +406,7 @@ pub const Iterator = struct {
const extra = extra_buf[0..header.extra_len];
try input.seekTo(header_zip_offset + @sizeOf(CentralDirectoryFileHeader) + header.filename_len);
input.interface.readSlice(extra) catch |err| switch (err) {
input.interface.readSliceAll(extra) catch |err| switch (err) {
error.ReadFailed => return input.err.?,
error.EndOfStream => return error.EndOfStream,
};
@@ -460,7 +461,7 @@ pub const Iterator = struct {
options: ExtractOptions,
filename_buf: []u8,
dest: std.fs.Dir,
) !u32 {
) !void {
if (filename_buf.len < self.filename_len)
return error.ZipInsufficientBuffer;
switch (self.compression_method) {
@@ -470,13 +471,13 @@ pub const Iterator = struct {
const filename = filename_buf[0..self.filename_len];
{
try stream.seekTo(self.header_zip_offset + @sizeOf(CentralDirectoryFileHeader));
try stream.interface.readSlice(filename);
try stream.interface.readSliceAll(filename);
}
const local_data_header_offset: u64 = local_data_header_offset: {
const local_header = blk: {
try stream.seekTo(self.file_offset);
break :blk try stream.interface.takeStructEndian(LocalFileHeader, .little);
break :blk try stream.interface.takeStruct(LocalFileHeader, .little);
};
if (!std.mem.eql(u8, &local_header.signature, &local_file_header_sig))
return error.ZipBadFileOffset;
@@ -502,7 +503,7 @@ pub const Iterator = struct {
{
try stream.seekTo(self.file_offset + @sizeOf(LocalFileHeader) + local_header.filename_len);
try stream.interface.readSlice(extra);
try stream.interface.readSliceAll(extra);
}
var extra_offset: usize = 0;
@@ -550,7 +551,7 @@ pub const Iterator = struct {
if (self.uncompressed_size != 0)
return error.ZipBadDirectorySize;
try dest.makePath(filename[0 .. filename.len - 1]);
return std.hash.Crc32.hash(&.{});
return;
}
const out_file = blk: {
@@ -564,31 +565,36 @@ pub const Iterator = struct {
break :blk try dest.createFile(filename, .{ .exclusive = true });
};
defer out_file.close();
var file_writer = out_file.writer();
var file_bw = file_writer.writer(&.{});
var out_file_buffer: [1024]u8 = undefined;
var file_writer = out_file.writer(&out_file_buffer);
const local_data_file_offset: u64 =
@as(u64, self.file_offset) +
@as(u64, @sizeOf(LocalFileHeader)) +
local_data_header_offset;
try stream.seekTo(local_data_file_offset);
var limited_file_reader = stream.interface.limited(.limited(self.compressed_size));
var file_read_buffer: [1000]u8 = undefined;
var decompress_read_buffer: [1000]u8 = undefined;
var limited_br = limited_file_reader.reader().buffered(&file_read_buffer);
var decompress: Decompress = undefined;
var decompress_br = decompress.readable(&limited_br, self.compression_method, &decompress_read_buffer);
const start_out = file_bw.count;
var hash_writer = file_bw.hashed(std.hash.Crc32.init());
var hash_bw = hash_writer.writer(&.{});
decompress_br.readAll(&hash_bw, .limited(self.uncompressed_size)) catch |err| switch (err) {
error.ReadFailed => return stream.err.?,
error.WriteFailed => return file_writer.err.?,
error.EndOfStream => return error.ZipDecompressTruncated,
};
if (limited_file_reader.remaining.nonzero()) return error.ZipDecompressTruncated;
const written = file_bw.count - start_out;
if (written != self.uncompressed_size) return error.ZipUncompressSizeMismatch;
return hash_writer.hasher.final();
// TODO limit based on self.compressed_size
switch (self.compression_method) {
.store => {
stream.interface.streamExact(&file_writer.interface, self.uncompressed_size) catch |err| switch (err) {
error.ReadFailed => return stream.err.?,
error.WriteFailed => return file_writer.err.?,
error.EndOfStream => return error.ZipDecompressTruncated,
};
},
.deflate => {
var flate_buffer: [flate.max_window_len]u8 = undefined;
var decompress: flate.Decompress = .init(&stream.interface, .raw, &flate_buffer);
decompress.reader.streamExact(&file_writer.interface, self.uncompressed_size) catch |err| switch (err) {
error.ReadFailed => return stream.err.?,
error.WriteFailed => return file_writer.err orelse decompress.err.?,
error.EndOfStream => return error.ZipDecompressTruncated,
};
},
else => return error.UnsupportedCompressionMethod,
}
try file_writer.end();
}
};
};
@@ -636,19 +642,19 @@ pub const ExtractOptions = struct {
/// Allow filenames within the zip to use backslashes. Back slashes are normalized
/// to forward slashes before forwarding them to platform APIs.
allow_backslashes: bool = false,
diagnostics: ?*Diagnostics = null,
verify_checksums: bool = false,
};
/// Extract the zipped files to the given `dest` directory.
pub fn extract(dest: std.fs.Dir, fr: *File.Reader, options: ExtractOptions) !void {
if (options.verify_checksums) @panic("TODO unimplemented");
var iter = try Iterator.init(fr);
var filename_buf: [std.fs.max_path_bytes]u8 = undefined;
while (try iter.next()) |entry| {
const crc32 = try entry.extract(fr, options, &filename_buf, dest);
if (crc32 != entry.crc32)
return error.ZipCrcMismatch;
try entry.extract(fr, options, &filename_buf, dest);
if (options.diagnostics) |d| {
try d.nextFilename(filename_buf[0..entry.filename_len]);
}