Fetch: handle compressed git+http
This commit is contained in:
@@ -292,6 +292,14 @@ pub const ContentEncoding = enum {
|
||||
});
|
||||
return map.get(s);
|
||||
}
|
||||
|
||||
pub fn minBufferCapacity(ce: ContentEncoding) usize {
|
||||
return switch (ce) {
|
||||
.zstd => std.compress.zstd.default_window_len,
|
||||
.gzip, .deflate => std.compress.flate.max_window_len,
|
||||
.compress, .identity => 0,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Connection = enum {
|
||||
@@ -464,8 +472,8 @@ pub const Reader = struct {
|
||||
transfer_encoding: TransferEncoding,
|
||||
content_length: ?u64,
|
||||
content_encoding: ContentEncoding,
|
||||
decompressor: *Decompressor,
|
||||
decompression_buffer: []u8,
|
||||
decompress: *Decompress,
|
||||
decompress_buffer: []u8,
|
||||
) *std.Io.Reader {
|
||||
if (transfer_encoding == .none and content_length == null) {
|
||||
assert(reader.state == .received_head);
|
||||
@@ -475,22 +483,22 @@ pub const Reader = struct {
|
||||
return reader.in;
|
||||
},
|
||||
.deflate => {
|
||||
decompressor.* = .{ .flate = .init(reader.in, .zlib, decompression_buffer) };
|
||||
return &decompressor.flate.reader;
|
||||
decompress.* = .{ .flate = .init(reader.in, .zlib, decompress_buffer) };
|
||||
return &decompress.flate.reader;
|
||||
},
|
||||
.gzip => {
|
||||
decompressor.* = .{ .flate = .init(reader.in, .gzip, decompression_buffer) };
|
||||
return &decompressor.flate.reader;
|
||||
decompress.* = .{ .flate = .init(reader.in, .gzip, decompress_buffer) };
|
||||
return &decompress.flate.reader;
|
||||
},
|
||||
.zstd => {
|
||||
decompressor.* = .{ .zstd = .init(reader.in, decompression_buffer, .{ .verify_checksum = false }) };
|
||||
return &decompressor.zstd.reader;
|
||||
decompress.* = .{ .zstd = .init(reader.in, decompress_buffer, .{ .verify_checksum = false }) };
|
||||
return &decompress.zstd.reader;
|
||||
},
|
||||
.compress => unreachable,
|
||||
}
|
||||
}
|
||||
const transfer_reader = bodyReader(reader, transfer_buffer, transfer_encoding, content_length);
|
||||
return decompressor.init(transfer_reader, decompression_buffer, content_encoding);
|
||||
return decompress.init(transfer_reader, decompress_buffer, content_encoding);
|
||||
}
|
||||
|
||||
fn contentLengthStream(
|
||||
@@ -692,33 +700,33 @@ pub const Reader = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub const Decompressor = union(enum) {
|
||||
pub const Decompress = union(enum) {
|
||||
flate: std.compress.flate.Decompress,
|
||||
zstd: std.compress.zstd.Decompress,
|
||||
none: *std.Io.Reader,
|
||||
|
||||
pub fn init(
|
||||
decompressor: *Decompressor,
|
||||
decompress: *Decompress,
|
||||
transfer_reader: *std.Io.Reader,
|
||||
buffer: []u8,
|
||||
content_encoding: ContentEncoding,
|
||||
) *std.Io.Reader {
|
||||
switch (content_encoding) {
|
||||
.identity => {
|
||||
decompressor.* = .{ .none = transfer_reader };
|
||||
decompress.* = .{ .none = transfer_reader };
|
||||
return transfer_reader;
|
||||
},
|
||||
.deflate => {
|
||||
decompressor.* = .{ .flate = .init(transfer_reader, .zlib, buffer) };
|
||||
return &decompressor.flate.reader;
|
||||
decompress.* = .{ .flate = .init(transfer_reader, .zlib, buffer) };
|
||||
return &decompress.flate.reader;
|
||||
},
|
||||
.gzip => {
|
||||
decompressor.* = .{ .flate = .init(transfer_reader, .gzip, buffer) };
|
||||
return &decompressor.flate.reader;
|
||||
decompress.* = .{ .flate = .init(transfer_reader, .gzip, buffer) };
|
||||
return &decompress.flate.reader;
|
||||
},
|
||||
.zstd => {
|
||||
decompressor.* = .{ .zstd = .init(transfer_reader, buffer, .{ .verify_checksum = false }) };
|
||||
return &decompressor.zstd.reader;
|
||||
decompress.* = .{ .zstd = .init(transfer_reader, buffer, .{ .verify_checksum = false }) };
|
||||
return &decompress.zstd.reader;
|
||||
},
|
||||
.compress => unreachable,
|
||||
}
|
||||
|
||||
@@ -724,8 +724,8 @@ pub const Response = struct {
|
||||
pub fn readerDecompressing(
|
||||
response: *Response,
|
||||
transfer_buffer: []u8,
|
||||
decompressor: *http.Decompressor,
|
||||
decompression_buffer: []u8,
|
||||
decompress: *http.Decompress,
|
||||
decompress_buffer: []u8,
|
||||
) *Reader {
|
||||
response.head.invalidateStrings();
|
||||
const head = &response.head;
|
||||
@@ -734,8 +734,8 @@ pub const Response = struct {
|
||||
head.transfer_encoding,
|
||||
head.content_length,
|
||||
head.content_encoding,
|
||||
decompressor,
|
||||
decompression_buffer,
|
||||
decompress,
|
||||
decompress_buffer,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1797,8 +1797,8 @@ pub fn fetch(client: *Client, options: FetchOptions) FetchError!FetchResult {
|
||||
defer if (options.decompress_buffer == null) client.allocator.free(decompress_buffer);
|
||||
|
||||
var transfer_buffer: [64]u8 = undefined;
|
||||
var decompressor: http.Decompressor = undefined;
|
||||
const reader = response.readerDecompressing(&transfer_buffer, &decompressor, decompress_buffer);
|
||||
var decompress: http.Decompress = undefined;
|
||||
const reader = response.readerDecompressing(&transfer_buffer, &decompress, decompress_buffer);
|
||||
|
||||
_ = reader.streamRemaining(response_writer) catch |err| switch (err) {
|
||||
error.ReadFailed => return response.bodyErr().?,
|
||||
|
||||
@@ -883,7 +883,9 @@ const Resource = union(enum) {
|
||||
const HttpRequest = struct {
|
||||
request: std.http.Client.Request,
|
||||
response: std.http.Client.Response,
|
||||
buffer: []u8,
|
||||
transfer_buffer: []u8,
|
||||
decompress: std.http.Decompress,
|
||||
decompress_buffer: []u8,
|
||||
};
|
||||
|
||||
fn deinit(resource: *Resource) void {
|
||||
@@ -892,7 +894,6 @@ const Resource = union(enum) {
|
||||
.http_request => |*http_request| http_request.request.deinit(),
|
||||
.git => |*git_resource| {
|
||||
git_resource.fetch_stream.deinit();
|
||||
git_resource.session.deinit();
|
||||
},
|
||||
.dir => |*dir| dir.close(),
|
||||
}
|
||||
@@ -902,7 +903,11 @@ const Resource = union(enum) {
|
||||
fn reader(resource: *Resource) *std.Io.Reader {
|
||||
return switch (resource.*) {
|
||||
.file => |*file_reader| return &file_reader.interface,
|
||||
.http_request => |*http_request| return http_request.response.reader(http_request.buffer),
|
||||
.http_request => |*http_request| return http_request.response.readerDecompressing(
|
||||
http_request.transfer_buffer,
|
||||
&http_request.decompress,
|
||||
http_request.decompress_buffer,
|
||||
),
|
||||
.git => |*g| return &g.fetch_stream.reader,
|
||||
.dir => unreachable,
|
||||
};
|
||||
@@ -971,7 +976,6 @@ const FileType = enum {
|
||||
const init_resource_buffer_size = git.Packet.max_data_length;
|
||||
|
||||
fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u8) RunError!void {
|
||||
const gpa = f.arena.child_allocator;
|
||||
const arena = f.arena.allocator();
|
||||
const eb = &f.error_bundle;
|
||||
|
||||
@@ -995,7 +999,9 @@ fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u
|
||||
.request = http_client.request(.GET, uri, .{}) catch |err|
|
||||
return f.fail(f.location_tok, try eb.printString("unable to connect to server: {t}", .{err})),
|
||||
.response = undefined,
|
||||
.buffer = reader_buffer,
|
||||
.transfer_buffer = reader_buffer,
|
||||
.decompress_buffer = &.{},
|
||||
.decompress = undefined,
|
||||
} };
|
||||
const request = &resource.http_request.request;
|
||||
errdefer request.deinit();
|
||||
@@ -1019,6 +1025,7 @@ fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u
|
||||
.{ response.head.status, response.head.status.phrase() orelse "" },
|
||||
));
|
||||
|
||||
resource.http_request.decompress_buffer = try arena.alloc(u8, response.head.content_encoding.minBufferCapacity());
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1027,13 +1034,12 @@ fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u
|
||||
{
|
||||
var transport_uri = uri;
|
||||
transport_uri.scheme = uri.scheme["git+".len..];
|
||||
var session = git.Session.init(gpa, http_client, transport_uri, reader_buffer) catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString(
|
||||
"unable to discover remote git server capabilities: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
var session = git.Session.init(arena, http_client, transport_uri, reader_buffer) catch |err| {
|
||||
return f.fail(
|
||||
f.location_tok,
|
||||
try eb.printString("unable to discover remote git server capabilities: {t}", .{err}),
|
||||
);
|
||||
};
|
||||
errdefer session.deinit();
|
||||
|
||||
const want_oid = want_oid: {
|
||||
const want_ref =
|
||||
|
||||
@@ -644,7 +644,7 @@ pub const Session = struct {
|
||||
supports_agent: bool,
|
||||
supports_shallow: bool,
|
||||
object_format: Oid.Format,
|
||||
allocator: Allocator,
|
||||
arena: Allocator,
|
||||
|
||||
const agent = "zig/" ++ @import("builtin").zig_version_string;
|
||||
const agent_capability = std.fmt.comptimePrint("agent={s}\n", .{agent});
|
||||
@@ -652,7 +652,7 @@ pub const Session = struct {
|
||||
/// Initializes a client session and discovers the capabilities of the
|
||||
/// server for optimal transport.
|
||||
pub fn init(
|
||||
allocator: Allocator,
|
||||
arena: Allocator,
|
||||
transport: *std.http.Client,
|
||||
uri: std.Uri,
|
||||
/// Asserted to be at least `Packet.max_data_length`
|
||||
@@ -661,13 +661,12 @@ pub const Session = struct {
|
||||
assert(response_buffer.len >= Packet.max_data_length);
|
||||
var session: Session = .{
|
||||
.transport = transport,
|
||||
.location = try .init(allocator, uri),
|
||||
.location = try .init(arena, uri),
|
||||
.supports_agent = false,
|
||||
.supports_shallow = false,
|
||||
.object_format = .sha1,
|
||||
.allocator = allocator,
|
||||
.arena = arena,
|
||||
};
|
||||
errdefer session.deinit();
|
||||
var capability_iterator: CapabilityIterator = undefined;
|
||||
try session.getCapabilities(&capability_iterator, response_buffer);
|
||||
defer capability_iterator.deinit();
|
||||
@@ -690,34 +689,24 @@ pub const Session = struct {
|
||||
return session;
|
||||
}
|
||||
|
||||
pub fn deinit(session: *Session) void {
|
||||
session.location.deinit(session.allocator);
|
||||
session.* = undefined;
|
||||
}
|
||||
|
||||
/// An owned `std.Uri` representing the location of the server (base URI).
|
||||
const Location = struct {
|
||||
uri: std.Uri,
|
||||
|
||||
fn init(allocator: Allocator, uri: std.Uri) !Location {
|
||||
const scheme = try allocator.dupe(u8, uri.scheme);
|
||||
errdefer allocator.free(scheme);
|
||||
const user = if (uri.user) |user| try std.fmt.allocPrint(allocator, "{f}", .{
|
||||
fn init(arena: Allocator, uri: std.Uri) !Location {
|
||||
const scheme = try arena.dupe(u8, uri.scheme);
|
||||
const user = if (uri.user) |user| try std.fmt.allocPrint(arena, "{f}", .{
|
||||
std.fmt.alt(user, .formatUser),
|
||||
}) else null;
|
||||
errdefer if (user) |s| allocator.free(s);
|
||||
const password = if (uri.password) |password| try std.fmt.allocPrint(allocator, "{f}", .{
|
||||
const password = if (uri.password) |password| try std.fmt.allocPrint(arena, "{f}", .{
|
||||
std.fmt.alt(password, .formatPassword),
|
||||
}) else null;
|
||||
errdefer if (password) |s| allocator.free(s);
|
||||
const host = if (uri.host) |host| try std.fmt.allocPrint(allocator, "{f}", .{
|
||||
const host = if (uri.host) |host| try std.fmt.allocPrint(arena, "{f}", .{
|
||||
std.fmt.alt(host, .formatHost),
|
||||
}) else null;
|
||||
errdefer if (host) |s| allocator.free(s);
|
||||
const path = try std.fmt.allocPrint(allocator, "{f}", .{
|
||||
const path = try std.fmt.allocPrint(arena, "{f}", .{
|
||||
std.fmt.alt(uri.path, .formatPath),
|
||||
});
|
||||
errdefer allocator.free(path);
|
||||
// The query and fragment are not used as part of the base server URI.
|
||||
return .{
|
||||
.uri = .{
|
||||
@@ -730,14 +719,6 @@ pub const Session = struct {
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn deinit(loc: *Location, allocator: Allocator) void {
|
||||
allocator.free(loc.uri.scheme);
|
||||
if (loc.uri.user) |user| allocator.free(user.percent_encoded);
|
||||
if (loc.uri.password) |password| allocator.free(password.percent_encoded);
|
||||
if (loc.uri.host) |host| allocator.free(host.percent_encoded);
|
||||
allocator.free(loc.uri.path.percent_encoded);
|
||||
}
|
||||
};
|
||||
|
||||
/// Returns an iterator over capabilities supported by the server.
|
||||
@@ -745,16 +726,17 @@ pub const Session = struct {
|
||||
/// The `session.location` is updated if the server returns a redirect, so
|
||||
/// that subsequent session functions do not need to handle redirects.
|
||||
fn getCapabilities(session: *Session, it: *CapabilityIterator, response_buffer: []u8) !void {
|
||||
const arena = session.arena;
|
||||
assert(response_buffer.len >= Packet.max_data_length);
|
||||
var info_refs_uri = session.location.uri;
|
||||
{
|
||||
const session_uri_path = try std.fmt.allocPrint(session.allocator, "{f}", .{
|
||||
const session_uri_path = try std.fmt.allocPrint(arena, "{f}", .{
|
||||
std.fmt.alt(session.location.uri.path, .formatPath),
|
||||
});
|
||||
defer session.allocator.free(session_uri_path);
|
||||
info_refs_uri.path = .{ .percent_encoded = try std.fs.path.resolvePosix(session.allocator, &.{ "/", session_uri_path, "info/refs" }) };
|
||||
info_refs_uri.path = .{ .percent_encoded = try std.fs.path.resolvePosix(arena, &.{
|
||||
"/", session_uri_path, "info/refs",
|
||||
}) };
|
||||
}
|
||||
defer session.allocator.free(info_refs_uri.path.percent_encoded);
|
||||
info_refs_uri.query = .{ .percent_encoded = "service=git-upload-pack" };
|
||||
info_refs_uri.fragment = null;
|
||||
|
||||
@@ -767,6 +749,7 @@ pub const Session = struct {
|
||||
},
|
||||
}),
|
||||
.reader = undefined,
|
||||
.decompress = undefined,
|
||||
};
|
||||
errdefer it.deinit();
|
||||
const request = &it.request;
|
||||
@@ -777,19 +760,17 @@ pub const Session = struct {
|
||||
if (response.head.status != .ok) return error.ProtocolError;
|
||||
const any_redirects_occurred = request.redirect_behavior.remaining() < max_redirects;
|
||||
if (any_redirects_occurred) {
|
||||
const request_uri_path = try std.fmt.allocPrint(session.allocator, "{f}", .{
|
||||
const request_uri_path = try std.fmt.allocPrint(arena, "{f}", .{
|
||||
std.fmt.alt(request.uri.path, .formatPath),
|
||||
});
|
||||
defer session.allocator.free(request_uri_path);
|
||||
if (!mem.endsWith(u8, request_uri_path, "/info/refs")) return error.UnparseableRedirect;
|
||||
var new_uri = request.uri;
|
||||
new_uri.path = .{ .percent_encoded = request_uri_path[0 .. request_uri_path.len - "/info/refs".len] };
|
||||
const new_location: Location = try .init(session.allocator, new_uri);
|
||||
session.location.deinit(session.allocator);
|
||||
session.location = new_location;
|
||||
session.location = try .init(arena, new_uri);
|
||||
}
|
||||
|
||||
it.reader = response.reader(response_buffer);
|
||||
const decompress_buffer = try arena.alloc(u8, response.head.content_encoding.minBufferCapacity());
|
||||
it.reader = response.readerDecompressing(response_buffer, &it.decompress, decompress_buffer);
|
||||
var state: enum { response_start, response_content } = .response_start;
|
||||
while (true) {
|
||||
// Some Git servers (at least GitHub) include an additional
|
||||
@@ -821,6 +802,7 @@ pub const Session = struct {
|
||||
const CapabilityIterator = struct {
|
||||
request: std.http.Client.Request,
|
||||
reader: *std.Io.Reader,
|
||||
decompress: std.http.Decompress,
|
||||
|
||||
const Capability = struct {
|
||||
key: []const u8,
|
||||
@@ -864,16 +846,15 @@ pub const Session = struct {
|
||||
|
||||
/// Returns an iterator over refs known to the server.
|
||||
pub fn listRefs(session: Session, it: *RefIterator, options: ListRefsOptions) !void {
|
||||
const arena = session.arena;
|
||||
assert(options.buffer.len >= Packet.max_data_length);
|
||||
var upload_pack_uri = session.location.uri;
|
||||
{
|
||||
const session_uri_path = try std.fmt.allocPrint(session.allocator, "{f}", .{
|
||||
const session_uri_path = try std.fmt.allocPrint(arena, "{f}", .{
|
||||
std.fmt.alt(session.location.uri.path, .formatPath),
|
||||
});
|
||||
defer session.allocator.free(session_uri_path);
|
||||
upload_pack_uri.path = .{ .percent_encoded = try std.fs.path.resolvePosix(session.allocator, &.{ "/", session_uri_path, "git-upload-pack" }) };
|
||||
upload_pack_uri.path = .{ .percent_encoded = try std.fs.path.resolvePosix(arena, &.{ "/", session_uri_path, "git-upload-pack" }) };
|
||||
}
|
||||
defer session.allocator.free(upload_pack_uri.path.percent_encoded);
|
||||
upload_pack_uri.query = null;
|
||||
upload_pack_uri.fragment = null;
|
||||
|
||||
@@ -883,16 +864,14 @@ pub const Session = struct {
|
||||
try Packet.write(.{ .data = agent_capability }, &body);
|
||||
}
|
||||
{
|
||||
const object_format_packet = try std.fmt.allocPrint(session.allocator, "object-format={t}\n", .{
|
||||
const object_format_packet = try std.fmt.allocPrint(arena, "object-format={t}\n", .{
|
||||
session.object_format,
|
||||
});
|
||||
defer session.allocator.free(object_format_packet);
|
||||
try Packet.write(.{ .data = object_format_packet }, &body);
|
||||
}
|
||||
try Packet.write(.delimiter, &body);
|
||||
for (options.ref_prefixes) |ref_prefix| {
|
||||
const ref_prefix_packet = try std.fmt.allocPrint(session.allocator, "ref-prefix {s}\n", .{ref_prefix});
|
||||
defer session.allocator.free(ref_prefix_packet);
|
||||
const ref_prefix_packet = try std.fmt.allocPrint(arena, "ref-prefix {s}\n", .{ref_prefix});
|
||||
try Packet.write(.{ .data = ref_prefix_packet }, &body);
|
||||
}
|
||||
if (options.include_symrefs) {
|
||||
@@ -913,6 +892,7 @@ pub const Session = struct {
|
||||
}),
|
||||
.reader = undefined,
|
||||
.format = session.object_format,
|
||||
.decompress = undefined,
|
||||
};
|
||||
const request = &it.request;
|
||||
errdefer request.deinit();
|
||||
@@ -920,13 +900,15 @@ pub const Session = struct {
|
||||
|
||||
var response = try request.receiveHead(options.buffer);
|
||||
if (response.head.status != .ok) return error.ProtocolError;
|
||||
it.reader = response.reader(options.buffer);
|
||||
const decompress_buffer = try arena.alloc(u8, response.head.content_encoding.minBufferCapacity());
|
||||
it.reader = response.readerDecompressing(options.buffer, &it.decompress, decompress_buffer);
|
||||
}
|
||||
|
||||
pub const RefIterator = struct {
|
||||
format: Oid.Format,
|
||||
request: std.http.Client.Request,
|
||||
reader: *std.Io.Reader,
|
||||
decompress: std.http.Decompress,
|
||||
|
||||
pub const Ref = struct {
|
||||
oid: Oid,
|
||||
@@ -981,16 +963,15 @@ pub const Session = struct {
|
||||
/// Asserted to be at least `Packet.max_data_length`.
|
||||
response_buffer: []u8,
|
||||
) !void {
|
||||
const arena = session.arena;
|
||||
assert(response_buffer.len >= Packet.max_data_length);
|
||||
var upload_pack_uri = session.location.uri;
|
||||
{
|
||||
const session_uri_path = try std.fmt.allocPrint(session.allocator, "{f}", .{
|
||||
const session_uri_path = try std.fmt.allocPrint(arena, "{f}", .{
|
||||
std.fmt.alt(session.location.uri.path, .formatPath),
|
||||
});
|
||||
defer session.allocator.free(session_uri_path);
|
||||
upload_pack_uri.path = .{ .percent_encoded = try std.fs.path.resolvePosix(session.allocator, &.{ "/", session_uri_path, "git-upload-pack" }) };
|
||||
upload_pack_uri.path = .{ .percent_encoded = try std.fs.path.resolvePosix(arena, &.{ "/", session_uri_path, "git-upload-pack" }) };
|
||||
}
|
||||
defer session.allocator.free(upload_pack_uri.path.percent_encoded);
|
||||
upload_pack_uri.query = null;
|
||||
upload_pack_uri.fragment = null;
|
||||
|
||||
@@ -1000,8 +981,7 @@ pub const Session = struct {
|
||||
try Packet.write(.{ .data = agent_capability }, &body);
|
||||
}
|
||||
{
|
||||
const object_format_packet = try std.fmt.allocPrint(session.allocator, "object-format={s}\n", .{@tagName(session.object_format)});
|
||||
defer session.allocator.free(object_format_packet);
|
||||
const object_format_packet = try std.fmt.allocPrint(arena, "object-format={s}\n", .{@tagName(session.object_format)});
|
||||
try Packet.write(.{ .data = object_format_packet }, &body);
|
||||
}
|
||||
try Packet.write(.delimiter, &body);
|
||||
@@ -1031,6 +1011,7 @@ pub const Session = struct {
|
||||
.input = undefined,
|
||||
.reader = undefined,
|
||||
.remaining_len = undefined,
|
||||
.decompress = undefined,
|
||||
};
|
||||
const request = &fs.request;
|
||||
errdefer request.deinit();
|
||||
@@ -1040,7 +1021,8 @@ pub const Session = struct {
|
||||
var response = try request.receiveHead(&.{});
|
||||
if (response.head.status != .ok) return error.ProtocolError;
|
||||
|
||||
const reader = response.reader(response_buffer);
|
||||
const decompress_buffer = try arena.alloc(u8, response.head.content_encoding.minBufferCapacity());
|
||||
const reader = response.readerDecompressing(response_buffer, &fs.decompress, decompress_buffer);
|
||||
// We are not interested in any of the sections of the returned fetch
|
||||
// data other than the packfile section, since we aren't doing anything
|
||||
// complex like ref negotiation (this is a fresh clone).
|
||||
@@ -1079,6 +1061,7 @@ pub const Session = struct {
|
||||
reader: std.Io.Reader,
|
||||
err: ?Error = null,
|
||||
remaining_len: usize,
|
||||
decompress: std.http.Decompress,
|
||||
|
||||
pub fn deinit(fs: *FetchStream) void {
|
||||
fs.request.deinit();
|
||||
|
||||
Reference in New Issue
Block a user