Remove streaming delta support from JGit

Streaming packed deltas is so slow that it never feasibly completes
(it will take hours for it to stream a few hundred megabytes on
relatively fast systems with a large amount of storage).  This
was indicated as a "failed experiment" by Shawn in the following
mailing list post:
http://dev.eclipse.org/mhonarc/lists/jgit-dev/msg01674.html

Change-Id: Idc12f59e37b122f13856d7b533a5af9d8867a8a5
Signed-off-by: Doug Kelly <dougk.ff7@gmail.com>
This commit is contained in:
Doug Kelly 2014-04-23 19:33:18 -05:00 committed by Shawn Pearce
parent 62bbde3393
commit 62697c8d33
6 changed files with 74 additions and 999 deletions

View File

@ -47,20 +47,27 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.security.MessageDigest;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.zip.Deflater;
import org.eclipse.jgit.errors.LargeObjectException;
import org.eclipse.jgit.internal.JGitText;
import org.eclipse.jgit.internal.storage.pack.DeltaEncoder;
import org.eclipse.jgit.internal.storage.pack.PackExt;
import org.eclipse.jgit.junit.JGitTestUtil;
import org.eclipse.jgit.junit.LocalDiskRepositoryTestCase;
import org.eclipse.jgit.junit.TestRepository;
@ -75,6 +82,7 @@
import org.eclipse.jgit.revwalk.RevBlob;
import org.eclipse.jgit.storage.file.WindowCacheConfig;
import org.eclipse.jgit.transport.PackParser;
import org.eclipse.jgit.transport.PackedObjectInfo;
import org.eclipse.jgit.util.IO;
import org.eclipse.jgit.util.NB;
import org.eclipse.jgit.util.TemporaryBuffer;
@ -105,7 +113,7 @@ public void setUp() throws Exception {
WindowCacheConfig cfg = new WindowCacheConfig();
cfg.setStreamFileThreshold(streamThreshold);
WindowCache.reconfigure(cfg);
cfg.install();
repo = createBareRepository();
tr = new TestRepository<Repository>(repo);
@ -116,7 +124,7 @@ public void setUp() throws Exception {
public void tearDown() throws Exception {
if (wc != null)
wc.release();
WindowCache.reconfigure(new WindowCacheConfig());
new WindowCacheConfig().install();
super.tearDown();
}
@ -241,68 +249,65 @@ public void testDelta_SmallObjectChain() throws Exception {
}
@Test
public void testDelta_LargeObjectChain() throws Exception {
public void testDelta_FailsOver2GiB() throws Exception {
ObjectInserter.Formatter fmt = new ObjectInserter.Formatter();
byte[] data0 = new byte[streamThreshold + 5];
Arrays.fill(data0, (byte) 0xf3);
ObjectId id0 = fmt.idFor(Constants.OBJ_BLOB, data0);
byte[] base = new byte[] { 'a' };
ObjectId idA = fmt.idFor(Constants.OBJ_BLOB, base);
ObjectId idB = fmt.idFor(Constants.OBJ_BLOB, new byte[] { 'b' });
PackedObjectInfo a = new PackedObjectInfo(idA);
PackedObjectInfo b = new PackedObjectInfo(idB);
TemporaryBuffer.Heap pack = new TemporaryBuffer.Heap(64 * 1024);
packHeader(pack, 4);
objectHeader(pack, Constants.OBJ_BLOB, data0.length);
deflate(pack, data0);
packHeader(pack, 2);
a.setOffset(pack.length());
objectHeader(pack, Constants.OBJ_BLOB, base.length);
deflate(pack, base);
byte[] data1 = clone(0x01, data0);
byte[] delta1 = delta(data0, data1);
ObjectId id1 = fmt.idFor(Constants.OBJ_BLOB, data1);
objectHeader(pack, Constants.OBJ_REF_DELTA, delta1.length);
id0.copyRawTo(pack);
deflate(pack, delta1);
ByteArrayOutputStream tmp = new ByteArrayOutputStream();
DeltaEncoder de = new DeltaEncoder(tmp, base.length, 3L << 30);
de.copy(0, 1);
byte[] delta = tmp.toByteArray();
b.setOffset(pack.length());
objectHeader(pack, Constants.OBJ_REF_DELTA, delta.length);
idA.copyRawTo(pack);
deflate(pack, delta);
byte[] footer = digest(pack);
byte[] data2 = clone(0x02, data1);
byte[] delta2 = delta(data1, data2);
ObjectId id2 = fmt.idFor(Constants.OBJ_BLOB, data2);
objectHeader(pack, Constants.OBJ_REF_DELTA, delta2.length);
id1.copyRawTo(pack);
deflate(pack, delta2);
File packName = new File(new File(
((FileObjectDatabase) repo.getObjectDatabase()).getDirectory(),
"pack"), idA.name() + ".pack");
File idxName = new File(new File(
((FileObjectDatabase) repo.getObjectDatabase()).getDirectory(),
"pack"), idA.name() + ".idx");
byte[] data3 = clone(0x03, data2);
byte[] delta3 = delta(data2, data3);
ObjectId id3 = fmt.idFor(Constants.OBJ_BLOB, data3);
objectHeader(pack, Constants.OBJ_REF_DELTA, delta3.length);
id2.copyRawTo(pack);
deflate(pack, delta3);
digest(pack);
PackParser ip = index(pack.toByteArray());
ip.setAllowThin(true);
ip.parse(NullProgressMonitor.INSTANCE);
assertTrue("has blob", wc.has(id3));
ObjectLoader ol = wc.open(id3);
assertNotNull("created loader", ol);
assertEquals(Constants.OBJ_BLOB, ol.getType());
assertEquals(data3.length, ol.getSize());
assertTrue("is large", ol.isLarge());
FileOutputStream f = new FileOutputStream(packName);
try {
ol.getCachedBytes();
fail("Should have thrown LargeObjectException");
} catch (LargeObjectException tooBig) {
assertEquals(MessageFormat.format(
JGitText.get().largeObjectException, id3.name()), tooBig
.getMessage());
f.write(pack.toByteArray());
} finally {
f.close();
}
ObjectStream in = ol.openStream();
assertNotNull("have stream", in);
assertEquals(Constants.OBJ_BLOB, in.getType());
assertEquals(data3.length, in.getSize());
byte[] act = new byte[data3.length];
IO.readFully(in, act, 0, data3.length);
assertTrue("same content", Arrays.equals(act, data3));
assertEquals("stream at EOF", -1, in.read());
in.close();
f = new FileOutputStream(idxName);
try {
List<PackedObjectInfo> list = new ArrayList<PackedObjectInfo>();
list.add(a);
list.add(b);
Collections.sort(list);
new PackIndexWriterV1(f).write(list, footer);
} finally {
f.close();
}
PackFile packFile = new PackFile(packName, PackExt.INDEX.getBit());
try {
packFile.get(wc, b);
fail("expected LargeObjectException.ExceedsByteArrayLimit");
} catch (LargeObjectException.ExceedsByteArrayLimit bad) {
assertNull(bad.getObjectId());
} finally {
packFile.close();
}
}
private static byte[] clone(int first, byte[] base) {
@ -358,10 +363,13 @@ private static void deflate(TemporaryBuffer.Heap pack, final byte[] content)
deflater.end();
}
private static void digest(TemporaryBuffer.Heap buf) throws IOException {
private static byte[] digest(TemporaryBuffer.Heap buf)
throws IOException {
MessageDigest md = Constants.newMessageDigest();
md.update(buf.toByteArray());
buf.write(md.digest());
byte[] footer = md.digest();
buf.write(footer);
return footer;
}
private ObjectInserter inserter;

View File

@ -1,314 +0,0 @@
/*
* Copyright (C) 2010, Google Inc.
* and other copyright owners as documented in the project's IP log.
*
* This program and the accompanying materials are made available
* under the terms of the Eclipse Distribution License v1.0 which
* accompanies this distribution, is reproduced below, and is
* available at http://www.eclipse.org/org/documents/edl-v10.php
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* - Neither the name of the Eclipse Foundation, Inc. nor the
* names of its contributors may be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.eclipse.jgit.internal.storage.pack;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import org.eclipse.jgit.errors.CorruptObjectException;
import org.eclipse.jgit.internal.JGitText;
import org.eclipse.jgit.internal.storage.pack.BinaryDelta;
import org.eclipse.jgit.internal.storage.pack.DeltaEncoder;
import org.eclipse.jgit.internal.storage.pack.DeltaStream;
import org.eclipse.jgit.junit.JGitTestUtil;
import org.eclipse.jgit.junit.TestRng;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.util.IO;
import org.junit.Before;
import org.junit.Test;
public class DeltaStreamTest {
private TestRng rng;
private ByteArrayOutputStream deltaBuf;
private DeltaEncoder deltaEnc;
private byte[] base;
private byte[] data;
private int dataPtr;
private byte[] delta;
private TestRng getRng() {
if (rng == null)
rng = new TestRng(JGitTestUtil.getName());
return rng;
}
@Before
public void setUp() throws Exception {
deltaBuf = new ByteArrayOutputStream();
}
@Test
public void testCopy_SingleOp() throws IOException {
init((1 << 16) + 1, (1 << 8) + 1);
copy(0, data.length);
assertValidState();
}
@Test
public void testCopy_MaxSize() throws IOException {
int max = (0xff << 16) + (0xff << 8) + 0xff;
init(1 + max, max);
copy(1, max);
assertValidState();
}
@Test
public void testCopy_64k() throws IOException {
init(0x10000 + 2, 0x10000 + 1);
copy(1, 0x10000);
copy(0x10001, 1);
assertValidState();
}
@Test
public void testCopy_Gap() throws IOException {
init(256, 8);
copy(4, 4);
copy(128, 4);
assertValidState();
}
@Test
public void testCopy_OutOfOrder() throws IOException {
init((1 << 16) + 1, (1 << 16) + 1);
copy(1 << 8, 1 << 8);
copy(0, data.length - dataPtr);
assertValidState();
}
@Test
public void testInsert_SingleOp() throws IOException {
init((1 << 16) + 1, 2);
insert("hi");
assertValidState();
}
@Test
public void testInsertAndCopy() throws IOException {
init(8, 512);
insert(new byte[127]);
insert(new byte[127]);
insert(new byte[127]);
insert(new byte[125]);
copy(2, 6);
assertValidState();
}
@Test
public void testSkip() throws IOException {
init(32, 15);
copy(2, 2);
insert("ab");
insert("cd");
copy(4, 4);
copy(0, 2);
insert("efg");
assertValidState();
for (int p = 0; p < data.length; p++) {
byte[] act = new byte[data.length];
System.arraycopy(data, 0, act, 0, p);
DeltaStream in = open();
IO.skipFully(in, p);
assertEquals(data.length - p, in.read(act, p, data.length - p));
assertEquals(-1, in.read());
assertTrue("skipping " + p, Arrays.equals(data, act));
}
// Skip all the way to the end should still recognize EOF.
DeltaStream in = open();
IO.skipFully(in, data.length);
assertEquals(-1, in.read());
assertEquals(0, in.skip(1));
// Skip should not open the base as we move past it, but it
// will open when we need to start copying data from it.
final boolean[] opened = new boolean[1];
in = new DeltaStream(new ByteArrayInputStream(delta)) {
@Override
protected long getBaseSize() throws IOException {
return base.length;
}
@Override
protected InputStream openBase() throws IOException {
opened[0] = true;
return new ByteArrayInputStream(base);
}
};
IO.skipFully(in, 7);
assertFalse("not yet open", opened[0]);
assertEquals(data[7], in.read());
assertTrue("now open", opened[0]);
}
@Test
public void testIncorrectBaseSize() throws IOException {
init(4, 4);
copy(0, 4);
assertValidState();
DeltaStream in = new DeltaStream(new ByteArrayInputStream(delta)) {
@Override
protected long getBaseSize() throws IOException {
return 128;
}
@Override
protected InputStream openBase() throws IOException {
return new ByteArrayInputStream(base);
}
};
try {
in.read(new byte[4]);
fail("did not throw an exception");
} catch (CorruptObjectException e) {
assertEquals(JGitText.get().baseLengthIncorrect, e.getMessage());
}
in = new DeltaStream(new ByteArrayInputStream(delta)) {
@Override
protected long getBaseSize() throws IOException {
return 4;
}
@Override
protected InputStream openBase() throws IOException {
return new ByteArrayInputStream(new byte[0]);
}
};
try {
in.read(new byte[4]);
fail("did not throw an exception");
} catch (CorruptObjectException e) {
assertEquals(JGitText.get().baseLengthIncorrect, e.getMessage());
}
}
private void init(int baseSize, int dataSize) throws IOException {
base = getRng().nextBytes(baseSize);
data = new byte[dataSize];
deltaEnc = new DeltaEncoder(deltaBuf, baseSize, dataSize);
}
private void copy(int offset, int len) throws IOException {
System.arraycopy(base, offset, data, dataPtr, len);
deltaEnc.copy(offset, len);
assertEquals(deltaBuf.size(), deltaEnc.getSize());
dataPtr += len;
}
private void insert(String text) throws IOException {
insert(Constants.encode(text));
}
private void insert(byte[] text) throws IOException {
System.arraycopy(text, 0, data, dataPtr, text.length);
deltaEnc.insert(text);
assertEquals(deltaBuf.size(), deltaEnc.getSize());
dataPtr += text.length;
}
private void assertValidState() throws IOException {
assertEquals("test filled example result", data.length, dataPtr);
delta = deltaBuf.toByteArray();
assertEquals(base.length, BinaryDelta.getBaseSize(delta));
assertEquals(data.length, BinaryDelta.getResultSize(delta));
assertArrayEquals(data, BinaryDelta.apply(base, delta));
// Assert that a single bulk read produces the correct result.
//
byte[] act = new byte[data.length];
DeltaStream in = open();
assertEquals(data.length, in.getSize());
assertEquals(data.length, in.read(act));
assertEquals(-1, in.read());
assertTrue("bulk read has same content", Arrays.equals(data, act));
// Assert that smaller tiny reads have the same result too.
//
act = new byte[data.length];
in = open();
int read = 0;
while (read < data.length) {
int n = in.read(act, read, 128);
if (n <= 0)
break;
read += n;
}
assertEquals(data.length, read);
assertEquals(-1, in.read());
assertTrue("small reads have same content", Arrays.equals(data, act));
}
private DeltaStream open() throws IOException {
return new DeltaStream(new ByteArrayInputStream(delta)) {
@Override
protected long getBaseSize() throws IOException {
return base.length;
}
@Override
protected InputStream openBase() throws IOException {
return new ByteArrayInputStream(base);
}
};
}
}

View File

@ -1,265 +0,0 @@
/*
* Copyright (C) 2010, Google Inc.
* and other copyright owners as documented in the project's IP log.
*
* This program and the accompanying materials are made available
* under the terms of the Eclipse Distribution License v1.0 which
* accompanies this distribution, is reproduced below, and is
* available at http://www.eclipse.org/org/documents/edl-v10.php
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* - Neither the name of the Eclipse Foundation, Inc. nor the
* names of its contributors may be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.eclipse.jgit.internal.storage.file;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.zip.DataFormatException;
import java.util.zip.DeflaterOutputStream;
import java.util.zip.InflaterInputStream;
import org.eclipse.jgit.errors.IncorrectObjectTypeException;
import org.eclipse.jgit.errors.LargeObjectException;
import org.eclipse.jgit.errors.MissingObjectException;
import org.eclipse.jgit.internal.storage.pack.BinaryDelta;
import org.eclipse.jgit.internal.storage.pack.DeltaStream;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.ObjectLoader;
import org.eclipse.jgit.lib.ObjectStream;
import org.eclipse.jgit.util.io.TeeInputStream;
class LargePackedDeltaObject extends ObjectLoader {
private static final long SIZE_UNKNOWN = -1;
private int type;
private long size;
private final long objectOffset;
private final long baseOffset;
private final int headerLength;
private final PackFile pack;
private final FileObjectDatabase db;
LargePackedDeltaObject(long objectOffset,
long baseOffset, int headerLength, PackFile pack,
FileObjectDatabase db) {
this.type = Constants.OBJ_BAD;
this.size = SIZE_UNKNOWN;
this.objectOffset = objectOffset;
this.baseOffset = baseOffset;
this.headerLength = headerLength;
this.pack = pack;
this.db = db;
}
@Override
public int getType() {
if (type == Constants.OBJ_BAD) {
WindowCursor wc = new WindowCursor(db);
try {
type = pack.getObjectType(wc, objectOffset);
} catch (IOException packGone) {
// If the pack file cannot be pinned into the cursor, it
// probably was repacked recently. Go find the object
// again and get the type from that location instead.
//
try {
type = wc.open(getObjectId()).getType();
} catch (IOException packGone2) {
// "He's dead, Jim." We just can't discover the type
// and the interface isn't supposed to be lazy here.
// Report an invalid type code instead, callers will
// wind up bailing out with an error at some point.
}
} finally {
wc.release();
}
}
return type;
}
@Override
public long getSize() {
if (size == SIZE_UNKNOWN) {
WindowCursor wc = new WindowCursor(db);
try {
byte[] b = pack.getDeltaHeader(wc, objectOffset + headerLength);
size = BinaryDelta.getResultSize(b);
} catch (DataFormatException objectCorrupt) {
// The zlib stream for the delta is corrupt. We probably
// cannot access the object. Keep the size negative and
// report that bogus result to the caller.
} catch (IOException packGone) {
// If the pack file cannot be pinned into the cursor, it
// probably was repacked recently. Go find the object
// again and get the size from that location instead.
//
try {
size = wc.open(getObjectId()).getSize();
} catch (IOException packGone2) {
// "He's dead, Jim." We just can't discover the size
// and the interface isn't supposed to be lazy here.
// Report an invalid type code instead, callers will
// wind up bailing out with an error at some point.
}
} finally {
wc.release();
}
}
return size;
}
@Override
public boolean isLarge() {
return true;
}
@Override
public byte[] getCachedBytes() throws LargeObjectException {
try {
throw new LargeObjectException(getObjectId());
} catch (IOException cannotObtainId) {
LargeObjectException err = new LargeObjectException();
err.initCause(cannotObtainId);
throw err;
}
}
@Override
public ObjectStream openStream() throws MissingObjectException, IOException {
// If the object was recently unpacked, its available loose.
// The loose format is going to be faster to access than a
// delta applied on top of a base. Use that whenever we can.
//
final ObjectId myId = getObjectId();
final WindowCursor wc = new WindowCursor(db);
ObjectLoader ldr = db.openLooseObject(wc, myId);
if (ldr != null)
return ldr.openStream();
InputStream in = open(wc);
in = new BufferedInputStream(in, 8192);
// While we inflate the object, also deflate it back as a loose
// object. This will later be cleaned up by a gc pass, but until
// then we will reuse the loose form by the above code path.
//
int myType = getType();
long mySize = getSize();
final ObjectDirectoryInserter odi = db.newInserter();
final File tmp = odi.newTempFile();
DeflaterOutputStream dOut = odi.compress(new FileOutputStream(tmp));
odi.writeHeader(dOut, myType, mySize);
in = new TeeInputStream(in, dOut);
return new ObjectStream.Filter(myType, mySize, in) {
@Override
public void close() throws IOException {
super.close();
odi.release();
wc.release();
db.insertUnpackedObject(tmp, myId, true /* force creation */);
}
};
}
private InputStream open(final WindowCursor wc)
throws MissingObjectException, IOException,
IncorrectObjectTypeException {
InputStream delta;
try {
delta = new PackInputStream(pack, objectOffset + headerLength, wc);
} catch (IOException packGone) {
// If the pack file cannot be pinned into the cursor, it
// probably was repacked recently. Go find the object
// again and open the stream from that location instead.
//
return wc.open(getObjectId()).openStream();
}
delta = new InflaterInputStream(delta);
final ObjectLoader base = pack.load(wc, baseOffset);
DeltaStream ds = new DeltaStream(delta) {
private long baseSize = SIZE_UNKNOWN;
@Override
protected InputStream openBase() throws IOException {
InputStream in;
if (base instanceof LargePackedDeltaObject)
in = ((LargePackedDeltaObject) base).open(wc);
else
in = base.openStream();
if (baseSize == SIZE_UNKNOWN) {
if (in instanceof DeltaStream)
baseSize = ((DeltaStream) in).getSize();
else if (in instanceof ObjectStream)
baseSize = ((ObjectStream) in).getSize();
}
return in;
}
@Override
protected long getBaseSize() throws IOException {
if (baseSize == SIZE_UNKNOWN) {
// This code path should never be used as DeltaStream
// is supposed to open the stream first, which would
// initialize the size for us directly from the stream.
baseSize = base.getSize();
}
return baseSize;
}
};
if (type == Constants.OBJ_BAD) {
if (!(base instanceof LargePackedDeltaObject))
type = base.getType();
}
if (size == SIZE_UNKNOWN)
size = ds.getSize();
return ds;
}
private ObjectId getObjectId() throws IOException {
return pack.findObjectForOffset(objectOffset);
}
}

View File

@ -65,6 +65,7 @@
import java.util.zip.Inflater;
import org.eclipse.jgit.errors.CorruptObjectException;
import org.eclipse.jgit.errors.LargeObjectException;
import org.eclipse.jgit.errors.MissingObjectException;
import org.eclipse.jgit.errors.PackInvalidException;
import org.eclipse.jgit.errors.PackMismatchException;
@ -701,7 +702,7 @@ private void onOpenPack() throws IOException {
}
ObjectLoader load(final WindowCursor curs, long pos)
throws IOException {
throws IOException, LargeObjectException {
try {
final byte[] ib = curs.tempId;
Delta delta = null;
@ -727,7 +728,7 @@ ObjectLoader load(final WindowCursor curs, long pos)
case Constants.OBJ_TREE:
case Constants.OBJ_BLOB:
case Constants.OBJ_TAG: {
if (sz < curs.getStreamFileThreshold())
if (delta != null || sz < curs.getStreamFileThreshold())
data = decompress(pos + p, (int) sz, curs);
if (delta != null) {
@ -796,7 +797,7 @@ ObjectLoader load(final WindowCursor curs, long pos)
// (Whole objects with no deltas to apply return early above.)
if (data == null)
return delta.large(this, curs);
throw new IOException(JGitText.get().inMemoryBufferLimitExceeded);
do {
// Cache only the base immediately before desired object.
@ -811,19 +812,19 @@ else if (delta.next == null)
delta.deltaSize, curs);
if (cmds == null) {
data = null; // Discard base in case of OutOfMemoryError
return delta.large(this, curs);
throw new LargeObjectException.OutOfMemory(new OutOfMemoryError());
}
final long sz = BinaryDelta.getResultSize(cmds);
if (Integer.MAX_VALUE <= sz)
return delta.large(this, curs);
throw new LargeObjectException.ExceedsByteArrayLimit();
final byte[] result;
try {
result = new byte[(int) sz];
} catch (OutOfMemoryError tooBig) {
data = null; // Discard base in case of OutOfMemoryError
return delta.large(this, curs);
throw new LargeObjectException.OutOfMemory(tooBig);
}
BinaryDelta.apply(data, cmds, result);
@ -875,18 +876,6 @@ private static class Delta {
this.hdrLen = hdrLen;
this.basePos = baseOffset;
}
ObjectLoader large(PackFile pack, WindowCursor wc) {
Delta d = this;
while (d.next != null)
d = d.next;
return d.newLargeLoader(pack, wc);
}
private ObjectLoader newLargeLoader(PackFile pack, WindowCursor wc) {
return new LargePackedDeltaObject(deltaPos, basePos, hdrLen,
pack, wc.db);
}
}
byte[] getDeltaHeader(WindowCursor wc, long pos)

View File

@ -51,8 +51,8 @@
* <p>
* The index can be passed a result buffer, and output an instruction sequence
* that transforms the source buffer used by the index into the result buffer.
* The instruction sequence can be executed by {@link BinaryDelta} or
* {@link DeltaStream} to recreate the result buffer.
* The instruction sequence can be executed by {@link BinaryDelta} to recreate
* the result buffer.
* <p>
* An index stores the entire contents of the source buffer, but also a table of
* block identities mapped to locations where the block appears in the source

View File

@ -1,343 +0,0 @@
/*
* Copyright (C) 2007, Robin Rosenberg <robin.rosenberg@dewire.com>
* Copyright (C) 2006-2007, Shawn O. Pearce <spearce@spearce.org>
* Copyright (C) 2010, Google Inc.
* and other copyright owners as documented in the project's IP log.
*
* This program and the accompanying materials are made available
* under the terms of the Eclipse Distribution License v1.0 which
* accompanies this distribution, is reproduced below, and is
* available at http://www.eclipse.org/org/documents/edl-v10.php
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* - Neither the name of the Eclipse Foundation, Inc. nor the
* names of its contributors may be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.eclipse.jgit.internal.storage.pack;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import org.eclipse.jgit.errors.CorruptObjectException;
import org.eclipse.jgit.internal.JGitText;
import org.eclipse.jgit.util.IO;
/**
* Inflates a delta in an incremental way.
* <p>
* Implementations must provide a means to access a stream for the base object.
* This stream may be accessed multiple times, in order to randomly position it
* to match the copy instructions. A {@code DeltaStream} performs an efficient
* skip by only moving through the delta stream, making restarts of stacked
* deltas reasonably efficient.
*/
public abstract class DeltaStream extends InputStream {
private static final int CMD_COPY = 0;
private static final int CMD_INSERT = 1;
private static final int CMD_EOF = 2;
private final InputStream deltaStream;
private long baseSize;
private long resultSize;
private final byte[] cmdbuf = new byte[512];
private int cmdptr;
private int cmdcnt;
/** Stream to read from the base object. */
private InputStream baseStream;
/** Current position within {@link #baseStream}. */
private long baseOffset;
private int curcmd;
/** If {@code curcmd == CMD_COPY}, position the base has to be at. */
private long copyOffset;
/** Total number of bytes in this current command. */
private int copySize;
/**
* Construct a delta application stream, reading instructions.
*
* @param deltaStream
* the stream to read delta instructions from.
* @throws IOException
* the delta instruction stream cannot be read, or is
* inconsistent with the the base object information.
*/
public DeltaStream(final InputStream deltaStream) throws IOException {
this.deltaStream = deltaStream;
if (!fill(cmdbuf.length))
throw new EOFException();
// Length of the base object.
//
int c, shift = 0;
do {
c = cmdbuf[cmdptr++] & 0xff;
baseSize |= ((long) (c & 0x7f)) << shift;
shift += 7;
} while ((c & 0x80) != 0);
// Length of the resulting object.
//
shift = 0;
do {
c = cmdbuf[cmdptr++] & 0xff;
resultSize |= ((long) (c & 0x7f)) << shift;
shift += 7;
} while ((c & 0x80) != 0);
curcmd = next();
}
/**
* Open the base stream.
* <p>
* The {@code DeltaStream} may close and reopen the base stream multiple
* times if copy instructions use offsets out of order. This can occur if a
* large block in the file was moved from near the top, to near the bottom.
* In such cases the reopened stream is skipped to the target offset, so
* {@code skip(long)} should be as efficient as possible.
*
* @return stream to read from the base object. This stream should not be
* buffered (or should be only minimally buffered), and does not
* need to support mark/reset.
* @throws IOException
* the base object cannot be opened for reading.
*/
protected abstract InputStream openBase() throws IOException;
/**
* @return length of the base object, in bytes.
* @throws IOException
* the length of the base cannot be determined.
*/
protected abstract long getBaseSize() throws IOException;
/** @return total size of this stream, in bytes. */
public long getSize() {
return resultSize;
}
@Override
public int read() throws IOException {
byte[] buf = new byte[1];
int n = read(buf, 0, 1);
return n == 1 ? buf[0] & 0xff : -1;
}
@Override
public void close() throws IOException {
deltaStream.close();
if (baseStream != null)
baseStream.close();
}
@Override
public long skip(long len) throws IOException {
long act = 0;
while (0 < len) {
long n = Math.min(len, copySize);
switch (curcmd) {
case CMD_COPY:
copyOffset += n;
break;
case CMD_INSERT:
cmdptr += n;
break;
case CMD_EOF:
return act;
default:
throw new CorruptObjectException(
JGitText.get().unsupportedCommand0);
}
act += n;
len -= n;
copySize -= n;
if (copySize == 0)
curcmd = next();
}
return act;
}
@Override
public int read(byte[] buf, int off, int len) throws IOException {
int act = 0;
while (0 < len) {
int n = Math.min(len, copySize);
switch (curcmd) {
case CMD_COPY:
seekBase();
n = baseStream.read(buf, off, n);
if (n < 0)
throw new CorruptObjectException(
JGitText.get().baseLengthIncorrect);
copyOffset += n;
baseOffset = copyOffset;
break;
case CMD_INSERT:
System.arraycopy(cmdbuf, cmdptr, buf, off, n);
cmdptr += n;
break;
case CMD_EOF:
return 0 < act ? act : -1;
default:
throw new CorruptObjectException(
JGitText.get().unsupportedCommand0);
}
act += n;
off += n;
len -= n;
copySize -= n;
if (copySize == 0)
curcmd = next();
}
return act;
}
private boolean fill(final int need) throws IOException {
int n = have();
if (need < n)
return true;
if (n == 0) {
cmdptr = 0;
cmdcnt = 0;
} else if (cmdbuf.length - cmdptr < need) {
// There isn't room for the entire worst-case copy command,
// so shift the array down to make sure we can use the entire
// command without having it span across the end of the array.
//
System.arraycopy(cmdbuf, cmdptr, cmdbuf, 0, n);
cmdptr = 0;
cmdcnt = n;
}
do {
n = deltaStream.read(cmdbuf, cmdcnt, cmdbuf.length - cmdcnt);
if (n < 0)
return 0 < have();
cmdcnt += n;
} while (cmdcnt < cmdbuf.length);
return true;
}
private int next() throws IOException {
if (!fill(8))
return CMD_EOF;
final int cmd = cmdbuf[cmdptr++] & 0xff;
if ((cmd & 0x80) != 0) {
// Determine the segment of the base which should
// be copied into the output. The segment is given
// as an offset and a length.
//
copyOffset = 0;
if ((cmd & 0x01) != 0)
copyOffset = cmdbuf[cmdptr++] & 0xff;
if ((cmd & 0x02) != 0)
copyOffset |= (cmdbuf[cmdptr++] & 0xff) << 8;
if ((cmd & 0x04) != 0)
copyOffset |= (cmdbuf[cmdptr++] & 0xff) << 16;
if ((cmd & 0x08) != 0)
copyOffset |= ((long) (cmdbuf[cmdptr++] & 0xff)) << 24;
copySize = 0;
if ((cmd & 0x10) != 0)
copySize = cmdbuf[cmdptr++] & 0xff;
if ((cmd & 0x20) != 0)
copySize |= (cmdbuf[cmdptr++] & 0xff) << 8;
if ((cmd & 0x40) != 0)
copySize |= (cmdbuf[cmdptr++] & 0xff) << 16;
if (copySize == 0)
copySize = 0x10000;
return CMD_COPY;
} else if (cmd != 0) {
// Anything else the data is literal within the delta
// itself. Page the entire thing into the cmdbuf, if
// its not already there.
//
fill(cmd);
copySize = cmd;
return CMD_INSERT;
} else {
// cmd == 0 has been reserved for future encoding but
// for now its not acceptable.
//
throw new CorruptObjectException(JGitText.get().unsupportedCommand0);
}
}
private int have() {
return cmdcnt - cmdptr;
}
private void seekBase() throws IOException {
if (baseStream == null) {
baseStream = openBase();
if (getBaseSize() != baseSize)
throw new CorruptObjectException(
JGitText.get().baseLengthIncorrect);
IO.skipFully(baseStream, copyOffset);
baseOffset = copyOffset;
} else if (baseOffset < copyOffset) {
IO.skipFully(baseStream, copyOffset - baseOffset);
baseOffset = copyOffset;
} else if (baseOffset > copyOffset) {
baseStream.close();
baseStream = openBase();
IO.skipFully(baseStream, copyOffset);
baseOffset = copyOffset;
}
}
}