Merge branch 'master' into stable-6.5

* master:
  Externalize strings introduced in c9552aba
  Silence API error introduced by 596c445a
  PackConfig: add entry for minimum size to index
  Fix getPackedRefs to not throw NoSuchFileException
  PackObjectSizeIndex: interface and impl for the object-size index
  UInt24Array: Array of unsigned ints encoded in 3 bytes.
  PackIndex: expose the position of an object-id in the index
  Add pack options to preserve and prune old pack files
  DfsPackFile/DfsGC: Write commit graphs and expose in pack
  ObjectReader: Allow getCommitGraph to throw IOException
  Allow to perform PackedBatchRefUpdate without locking loose refs
  Document option "core.sha1Implementation" introduced in 59029aec
  UploadPack: consume delimiter in object-info command
  PatchApplier fix - init cache with provided tree
  Avoid error-prone warning
  Fix unused exception error-prone warning
  UploadPack: advertise object-info command if enabled
  Move MemRefDatabase creation in a separate method.
  DfsReaderIoStats: Add Commit Graph fields into DfsReaderIoStats

Change-Id: Ic9f91f2139432999b99c444302457b3c08911009
This commit is contained in:
Matthias Sohn 2023-02-20 22:18:22 +01:00
commit c8683db55d
42 changed files with 1822 additions and 56 deletions

View File

@ -42,6 +42,7 @@ For details on native git options see also the official [git config documentatio
| `core.precomposeUnicode` | `true` on Mac OS | ✅ | MacOS only. When `true`, JGit reverts the unicode decomposition of filenames done by Mac OS. |
| `core.quotePath` | `true` | ✅ | Commands that output paths (e.g. ls-files, diff), will quote "unusual" characters in the pathname by enclosing the pathname in double-quotes and escaping those characters with backslashes in the same way C escapes control characters (e.g. `\t` for TAB, `\n` for LF, `\\` for backslash) or bytes with values larger than `0x80` (e.g. octal `\302\265` for "micro" in UTF-8). |
| `core.repositoryFormatVersion` | `1` | ⃞ | Internal version identifying the repository format and layout version. Don't set manually. |
| `core.sha1Implementation` | `java` | ⃞ | Choose the SHA1 implementation used by JGit. Set it to `java` to use JGit's Java implementation which detects SHA1 collisions if system property `org.eclipse.jgit.util.sha1.detectCollision` is unset or `true`. Set it to `jdkNative` to use the native implementation available in the JDK, can also be set using system property `org.eclipse.jgit.util.sha1.implementation`. If both are set the system property takes precedence. Performance of `jdkNative` is around 10% higher than `java` when `detectCollision=false` and 30% higher when `detectCollision=true`.|
| `core.streamFileThreshold` | `50 MiB` | ⃞ | The size threshold beyond which objects must be streamed. |
| `core.supportsAtomicFileCreation` | `true` | ⃞ | Whether the filesystem supports atomic file creation. |
| `core.symlinks` | Auto detect if filesystem supports symlinks| ✅ | If false, symbolic links are checked out as small plain files that contain the link text. |
@ -99,9 +100,10 @@ Proxy configuration uses the standard Java mechanisms via class `java.net.ProxyS
| `pack.deltaCompression` | `true` | ⃞ | Whether the writer will create new deltas on the fly. `true` if the pack writer will create a new delta when either `pack.reuseDeltas` is false, or no suitable delta is available for reuse. |
| `pack.depth` | `50` | ✅ | Maximum depth of delta chain set up for the pack writer. |
| `pack.indexVersion` | `2` | ✅ | Pack index file format version. |
| `pack.minBytesForObjSizeIndex` | `-1` | ⃞ | Minimum size of an object (inclusive, in bytes) to be included in the size index. -1 to disable the object size index. |
| `pack.minSizePreventRacyPack` | `100 MiB` | ⃞ | Minimum packfile size for which we wait before opening a newly written pack to prevent its lastModified timestamp could be racy if `pack.waitPreventRacyPack` is `true`. |
| `pack.preserveOldPacks` | `false` | ⃞ | Whether to preserve old packs in a preserved directory. |
| `prunePreserved`, only via API of PackConfig | `false` | ⃞ | Whether to remove preserved pack files in a preserved directory. |
| `pack.preserveOldPacks` | `false` | ⃞ | Whether to preserve old packs during gc in the `objects/pack/preserved` directory. This can avoid rare races between gc removing pack files and other concurrent operations. If this option is false data loss can occur in rare cases when an object is believed to be unreferenced when object repacking is running, and then garbage collection deletes it while another concurrent operation references this object shortly before garbage collection deletes it. When this happens, a new reference is created which points to a now missing object. |
| `pack.prunePreserved` | `false` | ⃞ | Whether to prune preserved pack files from the previous run of gc from the `objects/pack/preserved` directory. This helps to limit the additional storage space needed to preserve old packs when `pack.preserveOldPacks = true`. |
| `pack.reuseDeltas` | `true` |⃞ | Whether to reuse deltas existing in repository. |
| `pack.reuseObjects` | `true` | ⃞ | Whether to reuse existing objects representation in repository. |
| `pack.searchForReuseTimeout` | | ⃞ | Search for reuse phase timeout. Expressed as a `Duration`, i.e.: `50sec`. |

View File

@ -10,6 +10,7 @@
package org.eclipse.jgit.pgm;
import org.eclipse.jgit.api.GarbageCollectCommand;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.api.errors.GitAPIException;
import org.eclipse.jgit.lib.TextProgressMonitor;
@ -21,20 +22,25 @@ class Gc extends TextBuiltin {
private boolean aggressive;
@Option(name = "--preserve-oldpacks", usage = "usage_PreserveOldPacks")
private boolean preserveOldPacks;
private Boolean preserveOldPacks;
@Option(name = "--prune-preserved", usage = "usage_PrunePreserved")
private boolean prunePreserved;
private Boolean prunePreserved;
/** {@inheritDoc} */
@Override
protected void run() {
Git git = Git.wrap(db);
try {
git.gc().setAggressive(aggressive)
.setPreserveOldPacks(preserveOldPacks)
.setPrunePreserved(prunePreserved)
.setProgressMonitor(new TextProgressMonitor(errw)).call();
GarbageCollectCommand command = git.gc().setAggressive(aggressive)
.setProgressMonitor(new TextProgressMonitor(errw));
if (preserveOldPacks != null) {
command.setPreserveOldPacks(preserveOldPacks.booleanValue());
}
if (prunePreserved != null) {
command.setPrunePreserved(prunePreserved.booleanValue());
}
command.call();
} catch (GitAPIException e) {
throw die(e.getMessage(), e);
}

View File

@ -284,12 +284,14 @@ public void noConcurrencySerializedReads_oneRepo() throws Exception {
asyncRun(() -> pack.getBitmapIndex(reader));
asyncRun(() -> pack.getPackIndex(reader));
asyncRun(() -> pack.getBitmapIndex(reader));
asyncRun(() -> pack.getCommitGraph(reader));
}
waitForExecutorPoolTermination();
assertEquals(1, cache.getMissCount()[PackExt.BITMAP_INDEX.ordinal()]);
assertEquals(1, cache.getMissCount()[PackExt.INDEX.ordinal()]);
assertEquals(1, cache.getMissCount()[PackExt.REVERSE_INDEX.ordinal()]);
assertEquals(1, cache.getMissCount()[PackExt.COMMIT_GRAPH.ordinal()]);
}
@SuppressWarnings("resource")
@ -313,12 +315,15 @@ public void noConcurrencySerializedReads_twoRepos() throws Exception {
}
asyncRun(() -> pack1.getBitmapIndex(reader));
asyncRun(() -> pack2.getBitmapIndex(reader));
asyncRun(() -> pack1.getCommitGraph(reader));
asyncRun(() -> pack2.getCommitGraph(reader));
}
waitForExecutorPoolTermination();
assertEquals(2, cache.getMissCount()[PackExt.BITMAP_INDEX.ordinal()]);
assertEquals(2, cache.getMissCount()[PackExt.INDEX.ordinal()]);
assertEquals(2, cache.getMissCount()[PackExt.REVERSE_INDEX.ordinal()]);
assertEquals(2, cache.getMissCount()[PackExt.COMMIT_GRAPH.ordinal()]);
}
@SuppressWarnings("resource")
@ -342,12 +347,15 @@ public void lowConcurrencyParallelReads_twoRepos() throws Exception {
}
asyncRun(() -> pack1.getBitmapIndex(reader));
asyncRun(() -> pack2.getBitmapIndex(reader));
asyncRun(() -> pack1.getCommitGraph(reader));
asyncRun(() -> pack2.getCommitGraph(reader));
}
waitForExecutorPoolTermination();
assertEquals(2, cache.getMissCount()[PackExt.BITMAP_INDEX.ordinal()]);
assertEquals(2, cache.getMissCount()[PackExt.INDEX.ordinal()]);
assertEquals(2, cache.getMissCount()[PackExt.REVERSE_INDEX.ordinal()]);
assertEquals(2, cache.getMissCount()[PackExt.COMMIT_GRAPH.ordinal()]);
}
@SuppressWarnings("resource")
@ -372,7 +380,9 @@ public void lowConcurrencyParallelReads_twoReposAndIndex()
}
asyncRun(() -> pack1.getBitmapIndex(reader));
asyncRun(() -> pack1.getPackIndex(reader));
asyncRun(() -> pack1.getCommitGraph(reader));
asyncRun(() -> pack2.getBitmapIndex(reader));
asyncRun(() -> pack2.getCommitGraph(reader));
}
waitForExecutorPoolTermination();
@ -380,6 +390,7 @@ public void lowConcurrencyParallelReads_twoReposAndIndex()
// Index is loaded once for each repo.
assertEquals(2, cache.getMissCount()[PackExt.INDEX.ordinal()]);
assertEquals(2, cache.getMissCount()[PackExt.REVERSE_INDEX.ordinal()]);
assertEquals(2, cache.getMissCount()[PackExt.COMMIT_GRAPH.ordinal()]);
}
@Test
@ -396,12 +407,14 @@ public void highConcurrencyParallelReads_oneRepo() throws Exception {
asyncRun(() -> pack.getBitmapIndex(reader));
asyncRun(() -> pack.getPackIndex(reader));
asyncRun(() -> pack.getBitmapIndex(reader));
asyncRun(() -> pack.getCommitGraph(reader));
}
waitForExecutorPoolTermination();
assertEquals(1, cache.getMissCount()[PackExt.BITMAP_INDEX.ordinal()]);
assertEquals(1, cache.getMissCount()[PackExt.INDEX.ordinal()]);
assertEquals(1, cache.getMissCount()[PackExt.REVERSE_INDEX.ordinal()]);
assertEquals(1, cache.getMissCount()[PackExt.COMMIT_GRAPH.ordinal()]);
}
@Test
@ -420,12 +433,14 @@ public void highConcurrencyParallelReads_oneRepoParallelReverseIndex()
asyncRun(() -> pack.getBitmapIndex(reader));
asyncRun(() -> pack.getPackIndex(reader));
asyncRun(() -> pack.getBitmapIndex(reader));
asyncRun(() -> pack.getCommitGraph(reader));
}
waitForExecutorPoolTermination();
assertEquals(1, cache.getMissCount()[PackExt.BITMAP_INDEX.ordinal()]);
assertEquals(1, cache.getMissCount()[PackExt.INDEX.ordinal()]);
assertEquals(1, cache.getMissCount()[PackExt.REVERSE_INDEX.ordinal()]);
assertEquals(1, cache.getMissCount()[PackExt.COMMIT_GRAPH.ordinal()]);
}
private void resetCache() {
@ -450,7 +465,7 @@ private InMemoryRepository createRepoWithBitmap(String repoName)
repository.branch("/refs/ref2" + repoName).commit()
.add("blob2", "blob2" + repoName).parent(commit).create();
}
new DfsGarbageCollector(repo).pack(null);
new DfsGarbageCollector(repo).setWriteCommitGraph(true).pack(null);
return repo;
}

View File

@ -18,6 +18,7 @@
import java.util.Collections;
import java.util.concurrent.TimeUnit;
import org.eclipse.jgit.internal.storage.commitgraph.CommitGraph;
import org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource;
import org.eclipse.jgit.internal.storage.reftable.RefCursor;
import org.eclipse.jgit.internal.storage.reftable.ReftableConfig;
@ -976,10 +977,139 @@ public void reftableWithTombstoneNotResurrected() throws Exception {
assertNull(refdb.exactRef(NEXT));
}
@Test
public void produceCommitGraphAllRefsIncludedFromDisk() throws Exception {
String tag = "refs/tags/tag1";
String head = "refs/heads/head1";
String nonHead = "refs/something/nonHead";
RevCommit rootCommitTagged = git.branch(tag).commit().message("0")
.noParents().create();
RevCommit headTip = git.branch(head).commit().message("1")
.parent(rootCommitTagged).create();
RevCommit nonHeadTip = git.branch(nonHead).commit().message("2")
.parent(rootCommitTagged).create();
gcWithCommitGraph();
assertEquals(2, odb.getPacks().length);
DfsPackFile gcPack = odb.getPacks()[0];
assertEquals(GC, gcPack.getPackDescription().getPackSource());
DfsReader reader = odb.newReader();
CommitGraph cg = gcPack.getCommitGraph(reader);
assertNotNull(cg);
assertTrue("all commits in commit graph", cg.getCommitCnt() == 3);
// GC packed
assertTrue("tag referenced commit is in graph",
cg.findGraphPosition(rootCommitTagged) != -1);
assertTrue("head referenced commit is in graph",
cg.findGraphPosition(headTip) != -1);
// GC_REST packed
assertTrue("nonHead referenced commit is in graph",
cg.findGraphPosition(nonHeadTip) != -1);
}
@Test
public void produceCommitGraphAllRefsIncludedFromCache() throws Exception {
String tag = "refs/tags/tag1";
String head = "refs/heads/head1";
String nonHead = "refs/something/nonHead";
RevCommit rootCommitTagged = git.branch(tag).commit().message("0")
.noParents().create();
RevCommit headTip = git.branch(head).commit().message("1")
.parent(rootCommitTagged).create();
RevCommit nonHeadTip = git.branch(nonHead).commit().message("2")
.parent(rootCommitTagged).create();
gcWithCommitGraph();
assertEquals(2, odb.getPacks().length);
DfsPackFile gcPack = odb.getPacks()[0];
assertEquals(GC, gcPack.getPackDescription().getPackSource());
DfsReader reader = odb.newReader();
gcPack.getCommitGraph(reader);
// Invoke cache hit
CommitGraph cachedCG = gcPack.getCommitGraph(reader);
assertNotNull(cachedCG);
assertTrue("commit graph have been read from disk once",
reader.stats.readCommitGraph == 1);
assertTrue("commit graph read contains content",
reader.stats.readCommitGraphBytes > 0);
assertTrue("commit graph read time is recorded",
reader.stats.readCommitGraphMicros > 0);
assertTrue("all commits in commit graph", cachedCG.getCommitCnt() == 3);
// GC packed
assertTrue("tag referenced commit is in graph",
cachedCG.findGraphPosition(rootCommitTagged) != -1);
assertTrue("head referenced commit is in graph",
cachedCG.findGraphPosition(headTip) != -1);
// GC_REST packed
assertTrue("nonHead referenced commit is in graph",
cachedCG.findGraphPosition(nonHeadTip) != -1);
}
@Test
public void noCommitGraphWithoutGcPack() throws Exception {
String nonHead = "refs/something/nonHead";
RevCommit nonHeadCommit = git.branch(nonHead).commit()
.message("nonhead").noParents().create();
commit().message("unreachable").parent(nonHeadCommit).create();
gcWithCommitGraph();
assertEquals(2, odb.getPacks().length);
for (DfsPackFile pack : odb.getPacks()) {
assertNull(pack.getCommitGraph(odb.newReader()));
}
}
@Test
public void commitGraphWithoutGCrestPack() throws Exception {
String head = "refs/heads/head1";
RevCommit headCommit = git.branch(head).commit().message("head")
.noParents().create();
RevCommit unreachableCommit = commit().message("unreachable")
.parent(headCommit).create();
gcWithCommitGraph();
assertEquals(2, odb.getPacks().length);
for (DfsPackFile pack : odb.getPacks()) {
DfsPackDescription d = pack.getPackDescription();
if (d.getPackSource() == GC) {
CommitGraph cg = pack.getCommitGraph(odb.newReader());
assertNotNull(cg);
assertTrue("commit graph only contains 1 commit",
cg.getCommitCnt() == 1);
assertTrue("head exists in commit graph",
cg.findGraphPosition(headCommit) != -1);
assertTrue("unreachable commit does not exist in commit graph",
cg.findGraphPosition(unreachableCommit) == -1);
} else if (d.getPackSource() == UNREACHABLE_GARBAGE) {
CommitGraph cg = pack.getCommitGraph(odb.newReader());
assertNull(cg);
} else {
fail("unexpected " + d.getPackSource());
break;
}
}
}
private TestRepository<InMemoryRepository>.CommitBuilder commit() {
return git.commit();
}
private void gcWithCommitGraph() throws IOException {
DfsGarbageCollector gc = new DfsGarbageCollector(repo);
gc.setWriteCommitGraph(true);
run(gc);
}
private void gcNoTtl() throws IOException {
DfsGarbageCollector gc = new DfsGarbageCollector(repo);
gc.setGarbageTtl(0, TimeUnit.MILLISECONDS); // disable TTL

View File

@ -90,7 +90,7 @@ public void testWriteWhenGc() throws Exception {
bb.update(tip);
assertTrue(gc.shouldWriteCommitGraphWhenGc());
gc.gc();
gc.gc().get();
File graphFile = new File(repo.getObjectsDirectory(),
Constants.INFO_COMMIT_GRAPH);
assertGraphFile(graphFile);
@ -103,7 +103,7 @@ public void testDefaultWriteWhenGc() throws Exception {
bb.update(tip);
assertFalse(gc.shouldWriteCommitGraphWhenGc());
gc.gc();
gc.gc().get();
File graphFile = new File(repo.getObjectsDirectory(),
Constants.INFO_COMMIT_GRAPH);
assertFalse(graphFile.exists());
@ -123,21 +123,21 @@ public void testDisableWriteWhenGc() throws Exception {
config.setBoolean(ConfigConstants.CONFIG_GC_SECTION, null,
ConfigConstants.CONFIG_KEY_WRITE_COMMIT_GRAPH, true);
gc.gc();
gc.gc().get();
assertFalse(graphFile.exists());
config.setBoolean(ConfigConstants.CONFIG_CORE_SECTION, null,
ConfigConstants.CONFIG_COMMIT_GRAPH, true);
config.setBoolean(ConfigConstants.CONFIG_GC_SECTION, null,
ConfigConstants.CONFIG_KEY_WRITE_COMMIT_GRAPH, false);
gc.gc();
gc.gc().get();
assertFalse(graphFile.exists());
config.setBoolean(ConfigConstants.CONFIG_CORE_SECTION, null,
ConfigConstants.CONFIG_COMMIT_GRAPH, false);
config.setBoolean(ConfigConstants.CONFIG_GC_SECTION, null,
ConfigConstants.CONFIG_KEY_WRITE_COMMIT_GRAPH, false);
gc.gc();
gc.gc().get();
assertFalse(graphFile.exists());
}

View File

@ -247,7 +247,7 @@ public void testWindowCursorGetCommitGraph() throws Exception {
assertTrue(curs.getCommitGraph().isEmpty());
commitFile("file.txt", "content", "master");
GC gc = new GC(db);
gc.gc();
gc.gc().get();
assertTrue(curs.getCommitGraph().isPresent());
db.getConfig().setBoolean(ConfigConstants.CONFIG_CORE_SECTION, null,
@ -286,7 +286,7 @@ public void testGetCommitGraph() throws Exception {
// add commit-graph
commitFile("file.txt", "content", "master");
GC gc = new GC(db);
gc.gc();
gc.gc().get();
File file = new File(db.getObjectsDirectory(),
Constants.INFO_COMMIT_GRAPH);
assertTrue(file.exists());
@ -296,7 +296,7 @@ public void testGetCommitGraph() throws Exception {
// update commit-graph
commitFile("file2.txt", "content", "master");
gc.gc();
gc.gc().get();
assertEquals(2, dir.getCommitGraph().get().getCommitCnt());
// delete commit-graph
@ -311,7 +311,7 @@ public void testGetCommitGraph() throws Exception {
assertTrue(dir.getCommitGraph().isEmpty());
// add commit-graph again
gc.gc();
gc.gc().get();
assertTrue(dir.getCommitGraph().isPresent());
assertEquals(2, dir.getCommitGraph().get().getCommitCnt());
}

View File

@ -25,6 +25,7 @@
import org.eclipse.jgit.internal.JGitText;
import org.eclipse.jgit.internal.storage.file.PackIndex.MutableEntry;
import org.eclipse.jgit.junit.RepositoryTestCase;
import org.eclipse.jgit.lib.ObjectId;
import org.junit.Test;
public abstract class PackIndexTestCase extends RepositoryTestCase {
@ -122,6 +123,37 @@ public void testIteratorReturnedValues1() {
assertFalse(iter.hasNext());
}
@Test
public void testEntriesPositionsRamdomAccess() {
assertEquals(4, smallIdx.findPosition(ObjectId
.fromString("82c6b885ff600be425b4ea96dee75dca255b69e7")));
assertEquals(7, smallIdx.findPosition(ObjectId
.fromString("c59759f143fb1fe21c197981df75a7ee00290799")));
assertEquals(0, smallIdx.findPosition(ObjectId
.fromString("4b825dc642cb6eb9a060e54bf8d69288fbee4904")));
}
@Test
public void testEntriesPositionsWithIteratorOrder() {
int i = 0;
for (MutableEntry me : smallIdx) {
assertEquals(smallIdx.findPosition(me.toObjectId()), i);
i++;
}
i = 0;
for (MutableEntry me : denseIdx) {
assertEquals(denseIdx.findPosition(me.toObjectId()), i);
i++;
}
}
@Test
public void testEntriesPositionsObjectNotInPack() {
assertEquals(-1, smallIdx.findPosition(ObjectId.zeroId()));
assertEquals(-1, smallIdx.findPosition(ObjectId
.fromString("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")));
}
/**
* Compare offset from iterator entries with output of findOffset() method.
*/
@ -135,6 +167,13 @@ public void testCompareEntriesOffsetsWithFindOffsets() {
}
}
@Test
public void testEntriesOffsetsObjectNotInPack() {
assertEquals(-1, smallIdx.findOffset(ObjectId.zeroId()));
assertEquals(-1, smallIdx.findOffset(ObjectId
.fromString("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")));
}
/**
* Compare offset from iterator entries with output of getOffset() method.
*/

View File

@ -0,0 +1,392 @@
/*
* Copyright (C) 2022, Google LLC and others
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Distribution License v. 1.0 which is available at
* https://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
package org.eclipse.jgit.internal.storage.file;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.transport.PackedObjectInfo;
import org.eclipse.jgit.util.BlockList;
import org.junit.Test;
public class PackObjectSizeIndexV1Test {
private static final ObjectId OBJ_ID = ObjectId
.fromString("b8b1d53172fb3fb19647adce4b38fab4371c2454");
private static final long GB = 1 << 30;
private static final int MAX_24BITS_UINT = 0xffffff;
@Test
public void write_24bPositions_32bSizes() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PackObjectSizeIndexWriter writer = PackObjectSizeIndexWriter
.createWriter(out, 0);
List<PackedObjectInfo> objs = new ArrayList<>();
objs.add(blobWithSize(100));
objs.add(blobWithSize(400));
objs.add(blobWithSize(200));
writer.write(objs);
byte[] expected = new byte[] { -1, 's', 'i', 'z', // header
0x01, // version
0x00, 0x00, 0x00, 0x00, // minimum object size
0x00, 0x00, 0x00, 0x03, // obj count
0x18, // Unsigned 3 bytes
0x00, 0x00, 0x00, 0x03, // 3 positions
0x00, 0x00, 0x00, // positions
0x00, 0x00, 0x01, //
0x00, 0x00, 0x02, //
0x00, // No more positions
0x00, 0x00, 0x00, 0x64, // size 100
0x00, 0x00, 0x01, (byte) 0x90, // size 400
0x00, 0x00, 0x00, (byte) 0xc8, // size 200
0x00, 0x00, 0x00, 0x00 // 64bit sizes counter
};
byte[] output = out.toByteArray();
assertArrayEquals(expected, output);
}
@Test
public void write_32bPositions_32bSizes() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PackObjectSizeIndexWriter writer = PackObjectSizeIndexWriter
.createWriter(out, 0);
List<PackedObjectInfo> objs = new BlockList<>(9_000_000);
// The 24 bit range is full of commits and trees
PackedObjectInfo commit = objInfo(Constants.OBJ_COMMIT, 100);
for (int i = 0; i <= MAX_24BITS_UINT; i++) {
objs.add(commit);
}
objs.add(blobWithSize(100));
objs.add(blobWithSize(400));
objs.add(blobWithSize(200));
writer.write(objs);
byte[] expected = new byte[] { -1, 's', 'i', 'z', // header
0x01, // version
0x00, 0x00, 0x00, 0x00, // minimum object size
0x00, 0x00, 0x00, 0x03, // obj count
(byte) 0x20, // Signed 4 bytes
0x00, 0x00, 0x00, 0x03, // 3 positions
0x01, 0x00, 0x00, 0x00, // positions
0x01, 0x00, 0x00, 0x01, //
0x01, 0x00, 0x00, 0x02, //
0x00, // No more positions
0x00, 0x00, 0x00, 0x64, // size 100
0x00, 0x00, 0x01, (byte) 0x90, // size 400
0x00, 0x00, 0x00, (byte) 0xc8, // size 200
0x00, 0x00, 0x00, 0x00 // 64bit sizes counter
};
byte[] output = out.toByteArray();
assertArrayEquals(expected, output);
}
@Test
public void write_24b32bPositions_32bSizes() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PackObjectSizeIndexWriter writer = PackObjectSizeIndexWriter
.createWriter(out, 0);
List<PackedObjectInfo> objs = new BlockList<>(9_000_000);
// The 24 bit range is full of commits and trees
PackedObjectInfo commit = objInfo(Constants.OBJ_COMMIT, 100);
for (int i = 0; i < MAX_24BITS_UINT; i++) {
objs.add(commit);
}
objs.add(blobWithSize(100));
objs.add(blobWithSize(400));
objs.add(blobWithSize(200));
writer.write(objs);
byte[] expected = new byte[] { -1, 's', 'i', 'z', // header
0x01, // version
0x00, 0x00, 0x00, 0x00, // minimum object size
0x00, 0x00, 0x00, 0x03, // obj count
0x18, // 3 bytes
0x00, 0x00, 0x00, 0x01, // 1 position
(byte) 0xff, (byte) 0xff, (byte) 0xff,
(byte) 0x20, // 4 bytes (32 bits)
0x00, 0x00, 0x00, 0x02, // 2 positions
0x01, 0x00, 0x00, 0x00, // positions
0x01, 0x00, 0x00, 0x01, //
0x00, // No more positions
0x00, 0x00, 0x00, 0x64, // size 100
0x00, 0x00, 0x01, (byte) 0x90, // size 400
0x00, 0x00, 0x00, (byte) 0xc8, // size 200
0x00, 0x00, 0x00, 0x00 // 64bit sizes counter
};
byte[] output = out.toByteArray();
assertArrayEquals(expected, output);
}
@Test
public void write_64bitsSize() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PackObjectSizeIndexWriter writer = PackObjectSizeIndexWriter
.createWriter(out, 0);
List<PackedObjectInfo> objs = new ArrayList<>();
objs.add(blobWithSize(100));
objs.add(blobWithSize(3 * GB));
objs.add(blobWithSize(4 * GB));
objs.add(blobWithSize(400));
writer.write(objs);
byte[] expected = new byte[] { -1, 's', 'i', 'z', // header
0x01, // version
0x00, 0x00, 0x00, 0x00, // minimum object size
0x00, 0x00, 0x00, 0x04, // Object count
0x18, // Unsigned 3 byte positions
0x00, 0x00, 0x00, 0x04, // 4 positions
0x00, 0x00, 0x00, // positions
0x00, 0x00, 0x01, //
0x00, 0x00, 0x02, //
0x00, 0x00, 0x03, //
0x00, // No more positions
0x00, 0x00, 0x00, 0x64, // size 100
(byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, // -1 (3GB)
(byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xfe, // -2 (4GB)
0x00, 0x00, 0x01, (byte) 0x90, // size 400
0x00, 0x00, 0x00, (byte) 0x02, // 64bit sizes counter (2)
0x00, 0x00, 0x00, 0x00, // size 3Gb
(byte) 0xc0, 0x00, 0x00, 0x00, //
0x00, 0x00, 0x00, 0x01, // size 4GB
(byte) 0x00, 0x00, 0x00, 0x00, //
0x00, 0x00, 0x00, 0x00 // 128bit sizes counter
};
byte[] output = out.toByteArray();
assertArrayEquals(expected, output);
}
@Test
public void write_allObjectsTooSmall() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PackObjectSizeIndexWriter writer = PackObjectSizeIndexWriter
.createWriter(out, 1 << 10);
List<PackedObjectInfo> objs = new ArrayList<>();
// too small blobs
objs.add(blobWithSize(100));
objs.add(blobWithSize(200));
objs.add(blobWithSize(400));
writer.write(objs);
byte[] expected = new byte[] { -1, 's', 'i', 'z', // header
0x01, // version
0x00, 0x00, 0x04, 0x00, // minimum object size
0x00, 0x00, 0x00, 0x00, // Object count
};
byte[] output = out.toByteArray();
assertArrayEquals(expected, output);
}
@Test
public void write_onlyBlobsIndexed() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PackObjectSizeIndexWriter writer = PackObjectSizeIndexWriter
.createWriter(out, 0);
List<PackedObjectInfo> objs = new ArrayList<>();
objs.add(objInfo(Constants.OBJ_COMMIT, 1000));
objs.add(blobWithSize(100));
objs.add(objInfo(Constants.OBJ_TAG, 1000));
objs.add(blobWithSize(400));
objs.add(blobWithSize(200));
writer.write(objs);
byte[] expected = new byte[] { -1, 's', 'i', 'z', // header
0x01, // version
0x00, 0x00, 0x00, 0x00, // minimum object size
0x00, 0x00, 0x00, 0x03, // Object count
0x18, // Positions in 3 bytes
0x00, 0x00, 0x00, 0x03, // 3 entries
0x00, 0x00, 0x01, // positions
0x00, 0x00, 0x03, //
0x00, 0x00, 0x04, //
0x00, // No more positions
0x00, 0x00, 0x00, 0x64, // size 100
0x00, 0x00, 0x01, (byte) 0x90, // size 400
0x00, 0x00, 0x00, (byte) 0xc8, // size 200
0x00, 0x00, 0x00, 0x00 // 64bit sizes counter
};
byte[] output = out.toByteArray();
assertArrayEquals(expected, output);
}
@Test
public void write_noObjects() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PackObjectSizeIndexWriter writer = PackObjectSizeIndexWriter
.createWriter(out, 0);
List<PackedObjectInfo> objs = new ArrayList<>();
writer.write(objs);
byte[] expected = new byte[] { -1, 's', 'i', 'z', // header
0x01, // version
0x00, 0x00, 0x00, 0x00, // minimum object size
0x00, 0x00, 0x00, 0x00, // Object count
};
byte[] output = out.toByteArray();
assertArrayEquals(expected, output);
}
@Test
public void read_empty() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PackObjectSizeIndexWriter writer = PackObjectSizeIndexWriter
.createWriter(out, 0);
List<PackedObjectInfo> objs = new ArrayList<>();
writer.write(objs);
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
PackObjectSizeIndex index = PackObjectSizeIndexLoader.load(in);
assertEquals(-1, index.getSize(0));
assertEquals(-1, index.getSize(1));
assertEquals(-1, index.getSize(1 << 30));
assertEquals(0, index.getThreshold());
}
@Test
public void read_only24bitsPositions() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PackObjectSizeIndexWriter writer = PackObjectSizeIndexWriter
.createWriter(out, 0);
List<PackedObjectInfo> objs = new ArrayList<>();
objs.add(blobWithSize(100));
objs.add(blobWithSize(200));
objs.add(blobWithSize(400));
objs.add(blobWithSize(1500));
writer.write(objs);
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
PackObjectSizeIndex index = PackObjectSizeIndexLoader.load(in);
assertEquals(100, index.getSize(0));
assertEquals(200, index.getSize(1));
assertEquals(400, index.getSize(2));
assertEquals(1500, index.getSize(3));
assertEquals(0, index.getThreshold());
}
@Test
public void read_only32bitsPositions() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PackObjectSizeIndexWriter writer = PackObjectSizeIndexWriter
.createWriter(out, 2000);
List<PackedObjectInfo> objs = new ArrayList<>();
PackedObjectInfo smallObj = blobWithSize(100);
for (int i = 0; i <= MAX_24BITS_UINT; i++) {
objs.add(smallObj);
}
objs.add(blobWithSize(1000));
objs.add(blobWithSize(3000));
objs.add(blobWithSize(2500));
objs.add(blobWithSize(1000));
writer.write(objs);
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
PackObjectSizeIndex index = PackObjectSizeIndexLoader.load(in);
assertEquals(-1, index.getSize(5));
assertEquals(-1, index.getSize(MAX_24BITS_UINT+1));
assertEquals(3000, index.getSize(MAX_24BITS_UINT+2));
assertEquals(2500, index.getSize(MAX_24BITS_UINT+3));
assertEquals(-1, index.getSize(MAX_24BITS_UINT+4)); // Not indexed
assertEquals(2000, index.getThreshold());
}
@Test
public void read_24and32BitsPositions() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PackObjectSizeIndexWriter writer = PackObjectSizeIndexWriter
.createWriter(out, 2000);
List<PackedObjectInfo> objs = new ArrayList<>();
PackedObjectInfo smallObj = blobWithSize(100);
for (int i = 0; i <= MAX_24BITS_UINT; i++) {
if (i == 500 || i == 1000 || i == 1500) {
objs.add(blobWithSize(2500));
continue;
}
objs.add(smallObj);
}
objs.add(blobWithSize(3000));
objs.add(blobWithSize(1000));
objs.add(blobWithSize(2500));
objs.add(blobWithSize(1000));
writer.write(objs);
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
PackObjectSizeIndex index = PackObjectSizeIndexLoader.load(in);
// 24 bit positions
assertEquals(-1, index.getSize(5));
assertEquals(2500, index.getSize(500));
// 32 bit positions
assertEquals(3000, index.getSize(MAX_24BITS_UINT+1));
assertEquals(-1, index.getSize(MAX_24BITS_UINT+2));
assertEquals(2500, index.getSize(MAX_24BITS_UINT+3));
assertEquals(-1, index.getSize(MAX_24BITS_UINT+4)); // Not indexed
assertEquals(2000, index.getThreshold());
}
@Test
public void read_only64bits() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PackObjectSizeIndexWriter writer = PackObjectSizeIndexWriter
.createWriter(out, 0);
List<PackedObjectInfo> objs = new ArrayList<>();
objs.add(blobWithSize(3 * GB));
objs.add(blobWithSize(8 * GB));
writer.write(objs);
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
PackObjectSizeIndex index = PackObjectSizeIndexLoader.load(in);
assertEquals(3 * GB, index.getSize(0));
assertEquals(8 * GB, index.getSize(1));
assertEquals(0, index.getThreshold());
}
@Test
public void read_withMinSize() throws IOException {
int minSize = 1000;
ByteArrayOutputStream out = new ByteArrayOutputStream();
PackObjectSizeIndexWriter writer = PackObjectSizeIndexWriter
.createWriter(out, minSize);
List<PackedObjectInfo> objs = new ArrayList<>();
objs.add(blobWithSize(3 * GB));
objs.add(blobWithSize(1500));
objs.add(blobWithSize(500));
objs.add(blobWithSize(1000));
objs.add(blobWithSize(2000));
writer.write(objs);
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
PackObjectSizeIndex index = PackObjectSizeIndexLoader.load(in);
assertEquals(3 * GB, index.getSize(0));
assertEquals(1500, index.getSize(1));
assertEquals(-1, index.getSize(2));
assertEquals(1000, index.getSize(3));
assertEquals(2000, index.getSize(4));
assertEquals(minSize, index.getThreshold());
}
private static PackedObjectInfo blobWithSize(long size) {
return objInfo(Constants.OBJ_BLOB, size);
}
private static PackedObjectInfo objInfo(int type, long size) {
PackedObjectInfo objectInfo = new PackedObjectInfo(OBJ_ID);
objectInfo.setType(type);
objectInfo.setFullSize(size);
return objectInfo;
}
}

View File

@ -0,0 +1,79 @@
/*
* Copyright (C) 2023, Google LLC
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Distribution License v. 1.0 which is available at
* https://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
package org.eclipse.jgit.internal.storage.file;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThrows;
import org.junit.Assert;
import org.junit.Test;
public class UInt24ArrayTest {
private static final byte[] DATA = { 0x00, 0x00, 0x00, // 0
0x00, 0x00, 0x05, // 5
0x00, 0x00, 0x0a, // 10
0x00, 0x00, 0x0f, // 15
0x00, 0x00, 0x14, // 20
0x00, 0x00, 0x19, // 25
(byte) 0xff, 0x00, 0x00, // Uint with MSB=1
(byte) 0xff, (byte) 0xff, (byte) 0xff, // MAX
};
private static final UInt24Array asArray = new UInt24Array(DATA);
@Test
public void uInt24Array_size() {
assertEquals(8, asArray.size());
}
@Test
public void uInt24Array_get() {
assertEquals(0, asArray.get(0));
assertEquals(5, asArray.get(1));
assertEquals(10, asArray.get(2));
assertEquals(15, asArray.get(3));
assertEquals(20, asArray.get(4));
assertEquals(25, asArray.get(5));
assertEquals(0xff0000, asArray.get(6));
assertEquals(0xffffff, asArray.get(7));
assertThrows(IndexOutOfBoundsException.class, () -> asArray.get(9));
}
@Test
public void uInt24Array_getLastValue() {
assertEquals(0xffffff, asArray.getLastValue());
}
@Test
public void uInt24Array_find() {
assertEquals(0, asArray.binarySearch(0));
assertEquals(1, asArray.binarySearch(5));
assertEquals(2, asArray.binarySearch(10));
assertEquals(3, asArray.binarySearch(15));
assertEquals(4, asArray.binarySearch(20));
assertEquals(5, asArray.binarySearch(25));
assertEquals(6, asArray.binarySearch(0xff0000));
assertEquals(7, asArray.binarySearch(0xffffff));
assertThrows(IllegalArgumentException.class,
() -> asArray.binarySearch(Integer.MAX_VALUE));
}
@Test
public void uInt24Array_empty() {
Assert.assertTrue(UInt24Array.EMPTY.isEmpty());
assertEquals(0, UInt24Array.EMPTY.size());
assertEquals(-1, UInt24Array.EMPTY.binarySearch(1));
assertThrows(IndexOutOfBoundsException.class,
() -> UInt24Array.EMPTY.getLastValue());
assertThrows(IndexOutOfBoundsException.class,
() -> UInt24Array.EMPTY.get(0));
}
}

View File

@ -24,6 +24,7 @@
import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import org.eclipse.jgit.annotations.Nullable;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.api.errors.PatchApplyException;
import org.eclipse.jgit.api.errors.PatchFormatException;
@ -71,27 +72,21 @@ public abstract static class Base extends RepositoryTestCase {
this.inCore = inCore;
}
void init(final String aName) throws Exception {
init(aName, true, true);
}
protected void init(String aName, boolean preExists, boolean postExists)
throws Exception {
// Patch and pre/postimage are read from data
// org.eclipse.jgit.test/tst-rsrc/org/eclipse/jgit/diff/
this.name = aName;
if (postExists) {
postImage = IO
.readWholeStream(getTestResource(name + "_PostImage"), 0)
.array();
expectedText = new String(postImage, StandardCharsets.UTF_8);
expectedText = initPostImage(aName);
}
File f = new File(db.getWorkTree(), name);
if (preExists) {
preImage = IO
.readWholeStream(getTestResource(name + "_PreImage"), 0)
.array();
try (Git git = new Git(db)) {
Files.write(f.toPath(), preImage);
git.add().addFilepattern(name).call();
}
initPreImage(aName);
}
try (Git git = new Git(db)) {
RevCommit base = git.commit().setMessage("PreImage").call();
@ -99,8 +94,22 @@ protected void init(String aName, boolean preExists, boolean postExists)
}
}
void init(final String aName) throws Exception {
init(aName, true, true);
protected void initPreImage(String aName) throws Exception {
File f = new File(db.getWorkTree(), aName);
preImage = IO
.readWholeStream(getTestResource(aName + "_PreImage"), 0)
.array();
try (Git git = new Git(db)) {
Files.write(f.toPath(), preImage);
git.add().addFilepattern(aName).call();
}
}
protected String initPostImage(String aName) throws Exception {
postImage = IO
.readWholeStream(getTestResource(aName + "_PostImage"), 0)
.array();
return new String(postImage, StandardCharsets.UTF_8);
}
protected Result applyPatch()
@ -118,27 +127,33 @@ protected static InputStream getTestResource(String patchFile) {
return PatchApplierTest.class.getClassLoader()
.getResourceAsStream("org/eclipse/jgit/diff/" + patchFile);
}
void verifyChange(Result result, String aName) throws Exception {
verifyChange(result, aName, true);
}
protected void verifyContent(Result result, String path, boolean exists)
throws Exception {
verifyContent(result, path, exists ? expectedText : null);
}
protected void verifyContent(Result result, String path,
@Nullable String expectedContent) throws Exception {
if (inCore) {
byte[] output = readBlob(result.getTreeId(), path);
if (!exists)
if (expectedContent == null)
assertNull(output);
else {
assertNotNull(output);
assertEquals(expectedText,
assertEquals(expectedContent,
new String(output, StandardCharsets.UTF_8));
}
} else {
File f = new File(db.getWorkTree(), path);
if (!exists)
if (expectedContent == null)
assertFalse(f.exists());
else
checkFile(f, expectedText);
checkFile(f, expectedContent);
}
}
@ -154,7 +169,7 @@ protected byte[] readBlob(ObjectId treeish, String path)
RevWalk rw = tr.getRevWalk()) {
db.incrementOpen();
RevTree tree = rw.parseTree(treeish);
try (TreeWalk tw = TreeWalk.forPath(db,path,tree)){
try (TreeWalk tw = TreeWalk.forPath(db, path, tree)) {
if (tw == null) {
return null;
}
@ -300,7 +315,7 @@ public void testRenameNoHunks() throws Exception {
assertTrue(result.getPaths().contains("RenameNoHunks"));
assertTrue(result.getPaths().contains("nested/subdir/Renamed"));
verifyContent(result,"nested/subdir/Renamed", true);
verifyContent(result, "nested/subdir/Renamed", true);
}
@Test
@ -312,7 +327,7 @@ public void testRenameWithHunks() throws Exception {
assertTrue(result.getPaths().contains("RenameWithHunks"));
assertTrue(result.getPaths().contains("nested/subdir/Renamed"));
verifyContent(result,"nested/subdir/Renamed", true);
verifyContent(result, "nested/subdir/Renamed", true);
}
@Test
@ -355,6 +370,16 @@ public void testShiftDown2() throws Exception {
verifyChange(result, "ShiftDown2");
}
@Test
public void testDoesNotAffectUnrelatedFiles() throws Exception {
initPreImage("Unaffected");
String expectedUnaffectedText = initPostImage("Unaffected");
init("X");
Result result = applyPatch();
verifyChange(result, "X");
verifyContent(result, "Unaffected", expectedUnaffectedText);
}
}
public static class InCore extends Base {

View File

@ -309,7 +309,7 @@ void enableAndWriteCommitGraph() throws Exception {
db.getConfig().setBoolean(ConfigConstants.CONFIG_GC_SECTION, null,
ConfigConstants.CONFIG_KEY_WRITE_COMMIT_GRAPH, true);
GC gc = new GC(db);
gc.gc();
gc.gc().get();
}
private void reinitializeRevWalk() {

View File

@ -2766,7 +2766,9 @@ public void testObjectInfo() throws Exception {
TestV2Hook hook = new TestV2Hook();
ByteArrayInputStream recvStream = uploadPackV2((UploadPack up) -> {
up.setProtocolV2Hook(hook);
}, "command=object-info\n", "size",
}, "command=object-info\n",
PacketLineIn.delimiter(),
"size",
"oid " + ObjectId.toString(blob1.getId()),
"oid " + ObjectId.toString(blob2.getId()), PacketLineIn.end());
PacketLineIn pckIn = new PacketLineIn(recvStream);

View File

@ -7,6 +7,18 @@
<message_argument value="CONFIG_KEY_BITMAP_EXCLUDED_REFS_PREFIXES"/>
</message_arguments>
</filter>
<filter id="1142947843">
<message_arguments>
<message_argument value="5.13.2"/>
<message_argument value="CONFIG_KEY_PRESERVE_OLD_PACKS"/>
</message_arguments>
</filter>
<filter id="1142947843">
<message_arguments>
<message_argument value="5.13.2"/>
<message_argument value="CONFIG_KEY_PRUNE_PRESERVED"/>
</message_arguments>
</filter>
<filter id="1142947843">
<message_arguments>
<message_argument value="6.1.1"/>
@ -53,6 +65,12 @@
<message_argument value="DEFAULT_BITMAP_EXCLUDED_REFS_PREFIXES"/>
</message_arguments>
</filter>
<filter id="336658481">
<message_arguments>
<message_argument value="org.eclipse.jgit.storage.pack.PackConfig"/>
<message_argument value="DEFAULT_MIN_BYTES_FOR_OBJ_SIZE_INDEX"/>
</message_arguments>
</filter>
<filter id="1142947843">
<message_arguments>
<message_argument value="5.13.2"/>

View File

@ -111,6 +111,7 @@ cannotPullOnARepoWithState=Cannot pull into a repository with state: {0}
cannotRead=Cannot read {0}
cannotReadBackDelta=Cannot read delta type {0}
cannotReadBlob=Cannot read blob {0}
cannotReadByte=Cannot read byte from stream
cannotReadCommit=Cannot read commit {0}
cannotReadFile=Cannot read file {0}
cannotReadHEAD=cannot read HEAD: {0} {1}
@ -538,6 +539,7 @@ nothingToPush=Nothing to push.
notMergedExceptionMessage=Branch was not deleted as it has not been merged yet; use the force option to delete it anyway
notShallowedUnshallow=The server sent a unshallow for a commit that wasn''t marked as shallow: {0}
noXMLParserAvailable=No XML parser available.
numberDoesntFit=Number doesn't fit in a single byte
objectAtHasBadZlibStream=Object at {0} in {1} has bad zlib stream
objectIsCorrupt=Object {0} is corrupt: {1}
objectIsCorrupt3={0}: object {1}: {2}
@ -773,6 +775,7 @@ truncatedHunkOldLinesMissing=Truncated hunk, at least {0} old lines is missing
tSizeMustBeGreaterOrEqual1=tSize must be >= 1
unableToCheckConnectivity=Unable to check connectivity.
unableToCreateNewObject=Unable to create new object: {0}
unableToReadFullInt=Unable to read a full int from the stream
unableToReadPackfile=Unable to read packfile {0}
unableToRemovePath=Unable to remove path ''{0}''
unableToWrite=Unable to write {0}
@ -798,6 +801,7 @@ unknownObject=unknown object
unknownObjectInIndex=unknown object {0} found in index but not in pack file
unknownObjectType=Unknown object type {0}.
unknownObjectType2=unknown
unknownPositionEncoding=Unknown position encoding %s
unknownRefStorageFormat=Unknown ref storage format "{0}"
unknownRepositoryFormat=Unknown repository format
unknownRepositoryFormat2=Unknown repository format "{0}"; expected "0".
@ -825,6 +829,7 @@ unsupportedPackIndexVersion=Unsupported pack index version {0}
unsupportedPackVersion=Unsupported pack version {0}.
unsupportedReftableVersion=Unsupported reftable version {0}.
unsupportedRepositoryDescription=Repository description not supported
unsupportedSizesObjSizeIndex=Unsupported sizes in object-size-index
updateRequiresOldIdAndNewId=Update requires both old ID and new ID to be nonzero
updatingHeadFailed=Updating HEAD failed
updatingReferences=Updating references

View File

@ -1,4 +1,5 @@
cannotReadIndex=Cannot read index {0}
cannotReadCommitGraph=Cannot read commit graph {0}
shortReadOfBlock=Short read of block at {0} in pack {1}; expected {2} bytes, received only {3}
shortReadOfIndex=Short read of index {0}
willNotStoreEmptyPack=Cannot store empty pack

View File

@ -139,6 +139,7 @@ public static JGitText get() {
/***/ public String cannotRead;
/***/ public String cannotReadBackDelta;
/***/ public String cannotReadBlob;
/***/ public String cannotReadByte;
/***/ public String cannotReadCommit;
/***/ public String cannotReadFile;
/***/ public String cannotReadHEAD;
@ -566,6 +567,7 @@ public static JGitText get() {
/***/ public String notMergedExceptionMessage;
/***/ public String notShallowedUnshallow;
/***/ public String noXMLParserAvailable;
/***/ public String numberDoesntFit;
/***/ public String objectAtHasBadZlibStream;
/***/ public String objectIsCorrupt;
/***/ public String objectIsCorrupt3;
@ -801,6 +803,7 @@ public static JGitText get() {
/***/ public String tSizeMustBeGreaterOrEqual1;
/***/ public String unableToCheckConnectivity;
/***/ public String unableToCreateNewObject;
/***/ public String unableToReadFullInt;
/***/ public String unableToReadPackfile;
/***/ public String unableToRemovePath;
/***/ public String unableToWrite;
@ -826,6 +829,7 @@ public static JGitText get() {
/***/ public String unknownObjectInIndex;
/***/ public String unknownObjectType;
/***/ public String unknownObjectType2;
/***/ public String unknownPositionEncoding;
/***/ public String unknownRefStorageFormat;
/***/ public String unknownRepositoryFormat;
/***/ public String unknownRepositoryFormat2;
@ -853,6 +857,7 @@ public static JGitText get() {
/***/ public String unsupportedPackVersion;
/***/ public String unsupportedReftableVersion;
/***/ public String unsupportedRepositoryDescription;
/***/ public String unsupportedSizesObjSizeIndex;
/***/ public String updateRequiresOldIdAndNewId;
/***/ public String updatingHeadFailed;
/***/ public String updatingReferences;

View File

@ -18,6 +18,7 @@
import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.UNREACHABLE_GARBAGE;
import static org.eclipse.jgit.internal.storage.dfs.DfsPackCompactor.configureReftable;
import static org.eclipse.jgit.internal.storage.pack.PackExt.BITMAP_INDEX;
import static org.eclipse.jgit.internal.storage.pack.PackExt.COMMIT_GRAPH;
import static org.eclipse.jgit.internal.storage.pack.PackExt.INDEX;
import static org.eclipse.jgit.internal.storage.pack.PackExt.PACK;
import static org.eclipse.jgit.internal.storage.pack.PackExt.REFTABLE;
@ -34,8 +35,11 @@
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.eclipse.jgit.internal.JGitText;
import org.eclipse.jgit.internal.storage.commitgraph.CommitGraphWriter;
import org.eclipse.jgit.internal.storage.commitgraph.GraphCommits;
import org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource;
import org.eclipse.jgit.internal.storage.file.PackIndex;
import org.eclipse.jgit.internal.storage.file.PackReverseIndex;
@ -75,6 +79,7 @@ public class DfsGarbageCollector {
private PackConfig packConfig;
private ReftableConfig reftableConfig;
private boolean convertToReftable = true;
private boolean writeCommitGraph;
private boolean includeDeletes;
private long reftableInitialMinUpdateIndex = 1;
private long reftableInitialMaxUpdateIndex = 1;
@ -278,6 +283,20 @@ public DfsGarbageCollector setGarbageTtl(long ttl, TimeUnit unit) {
return this;
}
/**
* Toggle commit graph generation.
* <p>
* False by default.
*
* @param enable
* Allow/Disallow commit graph generation.
* @return {@code this}
*/
public DfsGarbageCollector setWriteCommitGraph(boolean enable) {
writeCommitGraph = enable;
return this;
}
/**
* Create a single new pack file containing all of the live objects.
* <p>
@ -642,6 +661,10 @@ private DfsPackDescription writePack(PackSource source, PackWriter pw,
writeReftable(pack);
}
if (source == GC) {
writeCommitGraph(pack, pm);
}
try (DfsOutputStream out = objdb.writeFile(pack, PACK)) {
pw.writePack(pm, pm, out);
pack.addFileExt(PACK);
@ -724,4 +747,25 @@ private void writeReftable(DfsPackDescription pack, Collection<Ref> refs)
pack.setReftableStats(writer.getStats());
}
}
private void writeCommitGraph(DfsPackDescription pack, ProgressMonitor pm)
throws IOException {
if (!writeCommitGraph || !objdb.getShallowCommits().isEmpty()) {
return;
}
Set<ObjectId> allTips = refsBefore.stream().map(Ref::getObjectId)
.collect(Collectors.toUnmodifiableSet());
try (DfsOutputStream out = objdb.writeFile(pack, COMMIT_GRAPH);
RevWalk pool = new RevWalk(ctx)) {
GraphCommits gcs = GraphCommits.fromWalk(pm, allTips, pool);
CountingOutputStream cnt = new CountingOutputStream(out);
CommitGraphWriter writer = new CommitGraphWriter(gcs);
writer.write(pm, cnt);
pack.addFileExt(COMMIT_GRAPH);
pack.setFileSize(COMMIT_GRAPH, cnt.getCount());
pack.setBlockSize(COMMIT_GRAPH, out.blockSize());
}
}
}

View File

@ -14,6 +14,7 @@
import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.UNREACHABLE_GARBAGE;
import static org.eclipse.jgit.internal.storage.pack.PackExt.BITMAP_INDEX;
import static org.eclipse.jgit.internal.storage.pack.PackExt.COMMIT_GRAPH;
import static org.eclipse.jgit.internal.storage.pack.PackExt.INDEX;
import static org.eclipse.jgit.internal.storage.pack.PackExt.PACK;
import static org.eclipse.jgit.internal.storage.pack.PackExt.REVERSE_INDEX;
@ -37,6 +38,8 @@
import org.eclipse.jgit.errors.PackInvalidException;
import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
import org.eclipse.jgit.internal.JGitText;
import org.eclipse.jgit.internal.storage.commitgraph.CommitGraph;
import org.eclipse.jgit.internal.storage.commitgraph.CommitGraphLoader;
import org.eclipse.jgit.internal.storage.file.PackBitmapIndex;
import org.eclipse.jgit.internal.storage.file.PackIndex;
import org.eclipse.jgit.internal.storage.file.PackReverseIndex;
@ -69,6 +72,9 @@ public final class DfsPackFile extends BlockBasedFile {
/** Index of compressed bitmap mapping entire object graph. */
private volatile PackBitmapIndex bitmapIndex;
/** Index of compressed commit graph mapping entire object graph. */
private volatile CommitGraph commitGraph;
/**
* Objects we have tried to read, and discovered to be corrupt.
* <p>
@ -215,6 +221,43 @@ public PackBitmapIndex getBitmapIndex(DfsReader ctx) throws IOException {
return bitmapIndex;
}
/**
* Get the Commit Graph for this PackFile.
*
* @param ctx
* reader context to support reading from the backing store if
* the index is not already loaded in memory.
* @return {@link org.eclipse.jgit.internal.storage.commitgraph.CommitGraph},
* null if pack doesn't have it.
* @throws java.io.IOException
* the Commit Graph is not available, or is corrupt.
*/
public CommitGraph getCommitGraph(DfsReader ctx) throws IOException {
if (invalid || isGarbage() || !desc.hasFileExt(COMMIT_GRAPH)) {
return null;
}
if (commitGraph != null) {
return commitGraph;
}
DfsStreamKey commitGraphKey = desc.getStreamKey(COMMIT_GRAPH);
AtomicBoolean cacheHit = new AtomicBoolean(true);
DfsBlockCache.Ref<CommitGraph> cgref = cache
.getOrLoadRef(commitGraphKey, REF_POSITION, () -> {
cacheHit.set(false);
return loadCommitGraph(ctx, commitGraphKey);
});
if (cacheHit.get()) {
ctx.stats.commitGraphCacheHit++;
}
CommitGraph cg = cgref.get();
if (commitGraph == null && cg != null) {
commitGraph = cg;
}
return commitGraph;
}
PackReverseIndex getReverseIdx(DfsReader ctx) throws IOException {
if (reverseIndex != null) {
return reverseIndex;
@ -1081,4 +1124,37 @@ private DfsBlockCache.Ref<PackBitmapIndex> loadBitmapIndex(DfsReader ctx,
desc.getFileName(BITMAP_INDEX)), e);
}
}
private DfsBlockCache.Ref<CommitGraph> loadCommitGraph(DfsReader ctx,
DfsStreamKey cgkey) throws IOException {
ctx.stats.readCommitGraph++;
long start = System.nanoTime();
try (ReadableChannel rc = ctx.db.openFile(desc, COMMIT_GRAPH)) {
long size;
CommitGraph cg;
try {
InputStream in = Channels.newInputStream(rc);
int wantSize = 8192;
int bs = rc.blockSize();
if (0 < bs && bs < wantSize) {
bs = (wantSize / bs) * bs;
} else if (bs <= 0) {
bs = wantSize;
}
in = new BufferedInputStream(in, bs);
cg = CommitGraphLoader.read(in);
} finally {
size = rc.position();
ctx.stats.readCommitGraphBytes += size;
ctx.stats.readCommitGraphMicros += elapsedMicros(start);
}
commitGraph = cg;
return new DfsBlockCache.Ref<>(cgkey, REF_POSITION, size, cg);
} catch (IOException e) {
throw new IOException(
MessageFormat.format(DfsText.get().cannotReadCommitGraph,
desc.getFileName(COMMIT_GRAPH)),
e);
}
}
}

View File

@ -23,6 +23,7 @@
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
@ -31,6 +32,7 @@
import org.eclipse.jgit.errors.MissingObjectException;
import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
import org.eclipse.jgit.internal.JGitText;
import org.eclipse.jgit.internal.storage.commitgraph.CommitGraph;
import org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackList;
import org.eclipse.jgit.internal.storage.file.BitmapIndexImpl;
import org.eclipse.jgit.internal.storage.file.PackBitmapIndex;
@ -121,6 +123,18 @@ public BitmapIndex getBitmapIndex() throws IOException {
return null;
}
/** {@inheritDoc} */
@Override
public Optional<CommitGraph> getCommitGraph() throws IOException {
for (DfsPackFile pack : db.getPacks()) {
CommitGraph cg = pack.getCommitGraph(this);
if (cg != null) {
return Optional.of(cg);
}
}
return Optional.empty();
}
/** {@inheritDoc} */
@Override
public Collection<CachedPack> getCachedPacksAndUpdate(

View File

@ -28,6 +28,9 @@ public static class Accumulator {
/** Total number of cache hits for bitmap indexes. */
long bitmapCacheHit;
/** Total number of cache hits for commit graphs. */
long commitGraphCacheHit;
/** Total number of complete pack indexes read into memory. */
long readIdx;
@ -37,15 +40,24 @@ public static class Accumulator {
/** Total number of reverse indexes added into memory. */
long readReverseIdx;
/** Total number of complete commit graphs read into memory. */
long readCommitGraph;
/** Total number of bytes read from pack indexes. */
long readIdxBytes;
/** Total number of bytes read from commit graphs. */
long readCommitGraphBytes;
/** Total microseconds spent reading pack indexes. */
long readIdxMicros;
/** Total microseconds spent creating reverse indexes. */
long readReverseIdxMicros;
/** Total microseconds spent creating commit graphs. */
long readCommitGraphMicros;
/** Total number of bytes read from bitmap indexes. */
long readBitmapIdxBytes;
@ -122,6 +134,15 @@ public long getBitmapIndexCacheHits() {
return stats.bitmapCacheHit;
}
/**
* Get total number of commit graph cache hits.
*
* @return total number of commit graph cache hits.
*/
public long getCommitGraphCacheHits() {
return stats.commitGraphCacheHit;
}
/**
* Get total number of complete pack indexes read into memory.
*
@ -140,6 +161,15 @@ public long getReadReverseIndexCount() {
return stats.readReverseIdx;
}
/**
* Get total number of times the commit graph read into memory.
*
* @return total number of commit graph read into memory.
*/
public long getReadCommitGraphCount() {
return stats.readCommitGraph;
}
/**
* Get total number of complete bitmap indexes read into memory.
*
@ -158,6 +188,15 @@ public long getReadIndexBytes() {
return stats.readIdxBytes;
}
/**
* Get total number of bytes read from commit graphs.
*
* @return total number of bytes read from commit graphs.
*/
public long getCommitGraphBytes() {
return stats.readCommitGraphBytes;
}
/**
* Get total microseconds spent reading pack indexes.
*
@ -176,6 +215,15 @@ public long getReadReverseIndexMicros() {
return stats.readReverseIdxMicros;
}
/**
* Get total microseconds spent reading commit graphs.
*
* @return total microseconds spent reading commit graphs.
*/
public long getReadCommitGraphMicros() {
return stats.readCommitGraphMicros;
}
/**
* Get total number of bytes read from bitmap indexes.
*

View File

@ -28,6 +28,7 @@ public static DfsText get() {
// @formatter:off
/***/ public String cannotReadIndex;
/***/ public String cannotReadCommitGraph;
/***/ public String shortReadOfBlock;
/***/ public String shortReadOfIndex;
/***/ public String willNotStoreEmptyPack;

View File

@ -66,7 +66,16 @@ public InMemoryRepository(DfsRepositoryDescription repoDesc) {
InMemoryRepository(Builder builder) {
super(builder);
objdb = new MemObjDatabase(this);
refdb = new MemRefDatabase();
refdb = createRefDatabase();
}
/**
* Creates a new in-memory ref database.
*
* @return a new in-memory reference database.
*/
protected MemRefDatabase createRefDatabase() {
return new MemRefDatabase();
}
/** {@inheritDoc} */

View File

@ -254,7 +254,7 @@ long getSize(WindowCursor curs, AnyObjectId id) throws IOException {
// refresh directory to work around NFS caching issue
}
return getSizeWithoutRefresh(curs, id);
} catch (FileNotFoundException e) {
} catch (FileNotFoundException unused) {
if (fileFor(id).exists()) {
throw noFile;
}

View File

@ -240,6 +240,17 @@ public final ObjectId getObjectId(int nthPosition) {
*/
public abstract long findOffset(AnyObjectId objId);
/**
* Locate the position of this id in the list of object-ids in the index
*
* @param objId
* name of the object to locate within the index
* @return position of the object-id in the lexicographically ordered list
* of ids stored in this index; -1 if the object does not exist in
* this index and is thus not stored in the associated pack.
*/
public abstract int findPosition(AnyObjectId objId);
/**
* Retrieve stored CRC32 checksum of the requested object raw-data
* (including header).

View File

@ -32,6 +32,8 @@
class PackIndexV1 extends PackIndex {
private static final int IDX_HDR_LEN = 256 * 4;
private static final int RECORD_SIZE = 4 + Constants.OBJECT_ID_LENGTH;
private final long[] idxHeader;
byte[][] idxdata;
@ -131,8 +133,50 @@ long getOffset(long nthPosition) {
public long findOffset(AnyObjectId objId) {
final int levelOne = objId.getFirstByte();
byte[] data = idxdata[levelOne];
if (data == null)
int pos = levelTwoPosition(objId, data);
if (pos < 0) {
return -1;
}
// The records are (offset, objectid), pos points to objectId
int b0 = data[pos - 4] & 0xff;
int b1 = data[pos - 3] & 0xff;
int b2 = data[pos - 2] & 0xff;
int b3 = data[pos - 1] & 0xff;
return (((long) b0) << 24) | (b1 << 16) | (b2 << 8) | (b3);
}
/** {@inheritDoc} */
@Override
public int findPosition(AnyObjectId objId) {
int levelOne = objId.getFirstByte();
int levelTwo = levelTwoPosition(objId, idxdata[levelOne]);
if (levelTwo < 0) {
return -1;
}
long objsBefore = levelOne == 0 ? 0 : idxHeader[levelOne - 1];
return (int) objsBefore + ((levelTwo - 4) / RECORD_SIZE);
}
/**
* Find position in level two data of this objectId
*
* Records are (offset, objectId), so to read the corresponding offset,
* caller must substract from this position.
*
* @param objId
* ObjectId we are looking for
* @param data
* Blob of second level data with a series of (offset, objectid)
* pairs where we should find objId
*
* @return position in the byte[] where the objectId starts. -1 if not
* found.
*/
private int levelTwoPosition(AnyObjectId objId, byte[] data) {
if (data == null || data.length == 0) {
return -1;
}
int high = data.length / (4 + Constants.OBJECT_ID_LENGTH);
int low = 0;
do {
@ -142,11 +186,7 @@ public long findOffset(AnyObjectId objId) {
if (cmp < 0)
high = mid;
else if (cmp == 0) {
int b0 = data[pos - 4] & 0xff;
int b1 = data[pos - 3] & 0xff;
int b2 = data[pos - 2] & 0xff;
int b3 = data[pos - 1] & 0xff;
return (((long) b0) << 24) | (b1 << 16) | (b2 << 8) | (b3);
return pos;
} else
low = mid + 1;
} while (low < high);
@ -204,7 +244,7 @@ else if (cmp == 0) {
}
private static int idOffset(int mid) {
return ((4 + Constants.OBJECT_ID_LENGTH) * mid) + 4;
return (RECORD_SIZE * mid) + 4;
}
private class IndexV1Iterator extends EntriesIterator {

View File

@ -192,6 +192,18 @@ public long findOffset(AnyObjectId objId) {
return getOffset(levelOne, levelTwo);
}
/** {@inheritDoc} */
@Override
public int findPosition(AnyObjectId objId) {
int levelOne = objId.getFirstByte();
int levelTwo = binarySearchLevelTwo(objId, levelOne);
if (levelTwo < 0) {
return -1;
}
long objsBefore = levelOne == 0 ? 0 : fanoutTable[levelOne - 1];
return (int) objsBefore + levelTwo;
}
private long getOffset(int levelOne, int levelTwo) {
final long p = NB.decodeUInt32(offset32[levelOne], levelTwo << 2);
if ((p & IS_O64) != 0)

View File

@ -0,0 +1,45 @@
/*
* Copyright (C) 2022, Google LLC and others
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Distribution License v. 1.0 which is available at
* https://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
package org.eclipse.jgit.internal.storage.file;
/**
* Index of object sizes in a pack
*
* It is not guaranteed that the implementation contains the sizes of all
* objects (e.g. it could store only objects over certain threshold).
*/
public interface PackObjectSizeIndex {
/**
* Returns the inflated size of the object.
*
* @param idxOffset
* position in the pack (as returned from PackIndex)
* @return size of the object, -1 if not found in the index.
*/
long getSize(int idxOffset);
/**
* Number of objects in the index
*
* @return number of objects in the index
*/
long getObjectCount();
/**
* Minimal size of an object to be included in this index
*
* Cut-off value used at generation time to decide what objects to index.
*
* @return size in bytes
*/
int getThreshold();
}

View File

@ -0,0 +1,43 @@
/*
* Copyright (C) 2022, Google LLC and others
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Distribution License v. 1.0 which is available at
* https://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
package org.eclipse.jgit.internal.storage.file;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
/**
* Chooses the specific implementation of the object-size index based on the
* file version.
*/
public class PackObjectSizeIndexLoader {
/**
* Read an object size index from the stream
*
* @param in
* input stream at the beginning of the object size data
* @return an implementation of the object size index
* @throws IOException
* error reading the streams
*/
public static PackObjectSizeIndex load(InputStream in) throws IOException {
byte[] header = in.readNBytes(4);
if (!Arrays.equals(header, PackObjectSizeIndexWriter.HEADER)) {
throw new IOException("Stream is not an object index"); //$NON-NLS-1$
}
int version = in.readNBytes(1)[0];
if (version != 1) {
throw new IOException("Unknown object size version: " + version); //$NON-NLS-1$
}
return PackObjectSizeIndexV1.parse(in);
}
}

View File

@ -0,0 +1,223 @@
/*
* Copyright (C) 2022, Google LLC and others
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Distribution License v. 1.0 which is available at
* https://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
package org.eclipse.jgit.internal.storage.file;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.util.Arrays;
import org.eclipse.jgit.internal.JGitText;
import org.eclipse.jgit.util.NB;
/**
* Memory representation of the object-size index
*
* The object size index is a map from position in the primary idx (i.e.
* position of the object-id in lexicographical order) to size.
*
* Most of the positions fit in unsigned 3 bytes (up to 16 million)
*/
class PackObjectSizeIndexV1 implements PackObjectSizeIndex {
private static final byte BITS_24 = 0x18;
private static final byte BITS_32 = 0x20;
private final int threshold;
private final UInt24Array positions24;
private final int[] positions32;
/**
* Parallel array to concat(positions24, positions32) with the size of the
* objects.
*
* A value >= 0 is the size of the object. A negative value means the size
* doesn't fit in an int and |value|-1 is the position for the size in the
* size64 array e.g. a value of -1 is sizes64[0], -2 = sizes64[1], ...
*/
private final int[] sizes32;
private final long[] sizes64;
static PackObjectSizeIndex parse(InputStream in) throws IOException {
/** Header and version already out of the input */
IndexInputStreamReader stream = new IndexInputStreamReader(in);
int threshold = stream.readInt(); // minSize
int objCount = stream.readInt();
if (objCount == 0) {
return new EmptyPackObjectSizeIndex(threshold);
}
return new PackObjectSizeIndexV1(stream, threshold, objCount);
}
private PackObjectSizeIndexV1(IndexInputStreamReader stream, int threshold,
int objCount) throws IOException {
this.threshold = threshold;
UInt24Array pos24 = null;
int[] pos32 = null;
byte positionEncoding;
while ((positionEncoding = stream.readByte()) != 0) {
if (Byte.compareUnsigned(positionEncoding, BITS_24) == 0) {
int sz = stream.readInt();
pos24 = new UInt24Array(stream.readNBytes(sz * 3));
} else if (Byte.compareUnsigned(positionEncoding, BITS_32) == 0) {
int sz = stream.readInt();
pos32 = stream.readIntArray(sz);
} else {
throw new UnsupportedEncodingException(
String.format(JGitText.get().unknownPositionEncoding,
Integer.toHexString(positionEncoding)));
}
}
positions24 = pos24 != null ? pos24 : UInt24Array.EMPTY;
positions32 = pos32 != null ? pos32 : new int[0];
sizes32 = stream.readIntArray(objCount);
int c64sizes = stream.readInt();
if (c64sizes == 0) {
sizes64 = new long[0];
return;
}
sizes64 = stream.readLongArray(c64sizes);
int c128sizes = stream.readInt();
if (c128sizes != 0) {
// this MUST be 0 (we don't support 128 bits sizes yet)
throw new IOException(JGitText.get().unsupportedSizesObjSizeIndex);
}
}
@Override
public long getSize(int idxOffset) {
int pos = -1;
if (!positions24.isEmpty() && idxOffset <= positions24.getLastValue()) {
pos = positions24.binarySearch(idxOffset);
} else if (positions32.length > 0 && idxOffset >= positions32[0]) {
int pos32 = Arrays.binarySearch(positions32, idxOffset);
if (pos32 >= 0) {
pos = pos32 + positions24.size();
}
}
if (pos < 0) {
return -1;
}
int objSize = sizes32[pos];
if (objSize < 0) {
int secondPos = Math.abs(objSize) - 1;
return sizes64[secondPos];
}
return objSize;
}
@Override
public long getObjectCount() {
return positions24.size() + positions32.length;
}
@Override
public int getThreshold() {
return threshold;
}
/**
* Wrapper to read parsed content from the byte stream
*/
private static class IndexInputStreamReader {
private final byte[] buffer = new byte[8];
private final InputStream in;
IndexInputStreamReader(InputStream in) {
this.in = in;
}
int readInt() throws IOException {
int n = in.readNBytes(buffer, 0, 4);
if (n < 4) {
throw new IOException(JGitText.get().unableToReadFullInt);
}
return NB.decodeInt32(buffer, 0);
}
int[] readIntArray(int intsCount) throws IOException {
if (intsCount == 0) {
return new int[0];
}
int[] dest = new int[intsCount];
for (int i = 0; i < intsCount; i++) {
dest[i] = readInt();
}
return dest;
}
long readLong() throws IOException {
int n = in.readNBytes(buffer, 0, 8);
if (n < 8) {
throw new IOException(JGitText.get().unableToReadFullInt);
}
return NB.decodeInt64(buffer, 0);
}
long[] readLongArray(int longsCount) throws IOException {
if (longsCount == 0) {
return new long[0];
}
long[] dest = new long[longsCount];
for (int i = 0; i < longsCount; i++) {
dest[i] = readLong();
}
return dest;
}
byte readByte() throws IOException {
int n = in.readNBytes(buffer, 0, 1);
if (n != 1) {
throw new IOException(JGitText.get().cannotReadByte);
}
return buffer[0];
}
byte[] readNBytes(int sz) throws IOException {
return in.readNBytes(sz);
}
}
private static class EmptyPackObjectSizeIndex
implements PackObjectSizeIndex {
private final int threshold;
EmptyPackObjectSizeIndex(int threshold) {
this.threshold = threshold;
}
@Override
public long getSize(int idxOffset) {
return -1;
}
@Override
public long getObjectCount() {
return 0;
}
@Override
public int getThreshold() {
return threshold;
}
}
}

View File

@ -0,0 +1,286 @@
/*
* Copyright (C) 2022, Google LLC and others
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Distribution License v. 1.0 which is available at
* https://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
package org.eclipse.jgit.internal.storage.file;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.List;
import org.eclipse.jgit.internal.JGitText;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.transport.PackedObjectInfo;
import org.eclipse.jgit.util.NB;
/**
* Write an object index in the output stream
*/
public abstract class PackObjectSizeIndexWriter {
private static final int MAX_24BITS_UINT = 0xffffff;
private static final PackObjectSizeIndexWriter NULL_WRITER = new PackObjectSizeIndexWriter() {
@Override
public void write(List<? extends PackedObjectInfo> objs) {
// Do nothing
}
};
/** Magic constant for the object size index file */
protected static final byte[] HEADER = { -1, 's', 'i', 'z' };
/**
* Returns a writer for the latest index version
*
* @param os
* Output stream where to write the index
* @param minSize
* objects strictly smaller than this size won't be added to the
* index. Negative size won't write AT ALL. Other sizes could write
* an empty index.
* @return the index writer
*/
public static PackObjectSizeIndexWriter createWriter(OutputStream os,
int minSize) {
if (minSize < 0) {
return NULL_WRITER;
}
return new PackObjectSizeWriterV1(os, minSize);
}
/**
* Add the objects to the index
*
* @param objs
* objects in the pack, in sha1 order. Their position in the list
* matches their position in the primary index.
* @throws IOException
* problem writing to the stream
*/
public abstract void write(List<? extends PackedObjectInfo> objs)
throws IOException;
/**
* Object size index v1.
*
* Store position (in the main index) to size as parallel arrays.
*
* <p>Positions in the main index fit well in unsigned 24 bits (16M) for most
* repositories, but some outliers have even more objects, so we need to
* store also 32 bits positions.
*
* <p>Sizes are stored as a first array parallel to positions. If a size
* doesn't fit in an element of that array, then we encode there a position
* on the next-size array. This "overflow" array doesn't have entries for
* all positions.
*
* <pre>
*
* positions [10, 500, 1000, 1001]
* sizes (32bits) [15MB, -1, 6MB, -2]
* ___/ ______/
* / /
* sizes (64 bits) [3GB, 6GB]
* </pre>
*
* <p>For sizes we use 32 bits as the first level and 64 for the rare objects
* over 2GB.
*
* <p>A 24/32/64 bits hierarchy of arrays saves space if we have a lot of small
* objects, but wastes space if we have only big ones. The min size to index is
* controlled by conf and in principle we want to index only rather
* big objects (e.g. > 10MB). We could support more dynamics read/write of sizes
* (e.g. 24 only if the threshold will include many of those objects) but it
* complicates a lot code and spec. If needed it could go for a v2 of the protocol.
*
* <p>Format:
*
* <li>A header with the magic number (4 bytes)
* <li>The index version (1 byte)
* <li>The minimum object size (4 bytes)
* <li>Total count of objects indexed (C, 4 bytes)
* (if count == 0, stop here)
*
* Blocks of
* <li>Size per entry in bits (1 byte, either 24 (0x18) or 32 (0x20))
* <li>Count of entries (4 bytes) (c, as a signed int)
* <li>positions encoded in s bytes each (i.e s*c bytes)
*
* <li>0 (as a "size-per-entry = 0", marking end of the section)
*
* <li>32 bit sizes (C * 4 bytes). Negative size means
* nextLevel[abs(size)-1]
* <li>Count of 64 bit sizes (s64) (or 0 if no more indirections)
* <li>64 bit sizes (s64 * 8 bytes)
* <li>0 (end)
*/
static class PackObjectSizeWriterV1 extends PackObjectSizeIndexWriter {
private final OutputStream os;
private final int minObjSize;
private final byte[] intBuffer = new byte[4];
PackObjectSizeWriterV1(OutputStream os, int minSize) {
this.os = new BufferedOutputStream(os);
this.minObjSize = minSize;
}
@Override
public void write(List<? extends PackedObjectInfo> allObjects)
throws IOException {
os.write(HEADER);
writeUInt8(1); // Version
writeInt32(minObjSize);
PackedObjectStats stats = countIndexableObjects(allObjects);
int[] indexablePositions = findIndexablePositions(allObjects,
stats.indexableObjs);
writeInt32(indexablePositions.length); // Total # of objects
if (indexablePositions.length == 0) {
os.flush();
return;
}
// Positions that fit in 3 bytes
if (stats.pos24Bits > 0) {
writeUInt8(24);
writeInt32(stats.pos24Bits);
applyToRange(indexablePositions, 0, stats.pos24Bits,
this::writeInt24);
}
// Positions that fit in 4 bytes
// We only use 31 bits due to sign,
// but that covers 2 billion objs
if (stats.pos31Bits > 0) {
writeUInt8(32);
writeInt32(stats.pos31Bits);
applyToRange(indexablePositions, stats.pos24Bits,
stats.pos24Bits + stats.pos31Bits, this::writeInt32);
}
writeUInt8(0);
writeSizes(allObjects, indexablePositions, stats.sizeOver2GB);
os.flush();
}
private void writeUInt8(int i) throws IOException {
if (i > 255) {
throw new IllegalStateException(
JGitText.get().numberDoesntFit);
}
NB.encodeInt32(intBuffer, 0, i);
os.write(intBuffer, 3, 1);
}
private void writeInt24(int i) throws IOException {
NB.encodeInt24(intBuffer, 1, i);
os.write(intBuffer, 1, 3);
}
private void writeInt32(int i) throws IOException {
NB.encodeInt32(intBuffer, 0, i);
os.write(intBuffer);
}
private void writeSizes(List<? extends PackedObjectInfo> allObjects,
int[] indexablePositions, int objsBiggerThan2Gb)
throws IOException {
if (indexablePositions.length == 0) {
writeInt32(0);
return;
}
byte[] sizes64bits = new byte[8 * objsBiggerThan2Gb];
int s64 = 0;
for (int i = 0; i < indexablePositions.length; i++) {
PackedObjectInfo info = allObjects.get(indexablePositions[i]);
if (info.getFullSize() < Integer.MAX_VALUE) {
writeInt32((int) info.getFullSize());
} else {
// Size needs more than 32 bits. Store -1 * offset in the
// next table as size.
writeInt32(-1 * (s64 + 1));
NB.encodeInt64(sizes64bits, s64 * 8, info.getFullSize());
s64++;
}
}
if (objsBiggerThan2Gb > 0) {
writeInt32(objsBiggerThan2Gb);
os.write(sizes64bits);
}
writeInt32(0);
}
private int[] findIndexablePositions(
List<? extends PackedObjectInfo> allObjects,
int indexableObjs) {
int[] positions = new int[indexableObjs];
int positionIdx = 0;
for (int i = 0; i < allObjects.size(); i++) {
PackedObjectInfo o = allObjects.get(i);
if (!shouldIndex(o)) {
continue;
}
positions[positionIdx++] = i;
}
return positions;
}
private PackedObjectStats countIndexableObjects(
List<? extends PackedObjectInfo> objs) {
PackedObjectStats stats = new PackedObjectStats();
for (int i = 0; i < objs.size(); i++) {
PackedObjectInfo o = objs.get(i);
if (!shouldIndex(o)) {
continue;
}
stats.indexableObjs++;
if (o.getFullSize() > Integer.MAX_VALUE) {
stats.sizeOver2GB++;
}
if (i <= MAX_24BITS_UINT) {
stats.pos24Bits++;
} else {
stats.pos31Bits++;
// i is a positive int, cannot be bigger than this
}
}
return stats;
}
private boolean shouldIndex(PackedObjectInfo o) {
return (o.getType() == Constants.OBJ_BLOB)
&& (o.getFullSize() >= minObjSize);
}
private static class PackedObjectStats {
int indexableObjs;
int pos24Bits;
int pos31Bits;
int sizeOver2GB;
}
@FunctionalInterface
interface IntEncoder {
void encode(int i) throws IOException;
}
private static void applyToRange(int[] allPositions, int start, int end,
IntEncoder encoder) throws IOException {
for (int i = start; i < end; i++) {
encoder.encode(allPositions[i]);
}
}
}
}

View File

@ -35,6 +35,7 @@
import java.io.InterruptedIOException;
import java.nio.file.DirectoryNotEmptyException;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.security.DigestInputStream;
import java.security.MessageDigest;
@ -906,7 +907,7 @@ PackedRefList getPackedRefs() throws IOException {
try (InputStream stream = Files
.newInputStream(packedRefsFile.toPath())) {
// open the file to refresh attributes (on some NFS clients)
} catch (FileNotFoundException e) {
} catch (FileNotFoundException | NoSuchFileException e) {
// Ignore as packed-refs may not exist
}
//$FALL-THROUGH$

View File

@ -0,0 +1,93 @@
/*
* Copyright (C) 2023, Google LLC
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Distribution License v. 1.0 which is available at
* https://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
package org.eclipse.jgit.internal.storage.file;
/**
* A view of a byte[] as a list of integers stored in 3 bytes.
*
* The ints are stored in big-endian ("network order"), so
* byte[]{aa,bb,cc} becomes the int 0x00aabbcc
*/
final class UInt24Array {
public static final UInt24Array EMPTY = new UInt24Array(
new byte[0]);
private static final int ENTRY_SZ = 3;
private final byte[] data;
private final int size;
UInt24Array(byte[] data) {
this.data = data;
this.size = data.length / ENTRY_SZ;
}
boolean isEmpty() {
return size == 0;
}
int size() {
return size;
}
int get(int index) {
if (index < 0 || index >= size) {
throw new IndexOutOfBoundsException(index);
}
int offset = index * ENTRY_SZ;
int e = data[offset] & 0xff;
e <<= 8;
e |= data[offset + 1] & 0xff;
e <<= 8;
e |= data[offset + 2] & 0xff;
return e;
}
/**
* Search needle in the array.
*
* This assumes a sorted array.
*
* @param needle
* It cannot be bigger than 0xffffff (max unsigned three bytes).
* @return position of the needle in the array, -1 if not found. Runtime
* exception if the value is too big for 3 bytes.
*/
int binarySearch(int needle) {
if ((needle & 0xff000000) != 0) {
throw new IllegalArgumentException("Too big value for 3 bytes"); //$NON-NLS-1$
}
if (size == 0) {
return -1;
}
int high = size;
if (high == 0)
return -1;
int low = 0;
do {
int mid = (low + high) >>> 1;
int cmp;
cmp = Integer.compare(needle, get(mid));
if (cmp < 0)
high = mid;
else if (cmp == 0) {
return mid;
} else
low = mid + 1;
} while (low < high);
return -1;
}
int getLastValue() {
return get(size - 1);
}
}

View File

@ -822,6 +822,13 @@ public final class ConfigConstants {
*/
public static final String CONFIG_KEY_WINDOW_MEMORY = "windowmemory";
/**
* the "pack.minBytesForObjSizeIndex" key
*
* @since 6.5
*/
public static final String CONFIG_KEY_MIN_BYTES_OBJ_SIZE_INDEX = "minBytesForObjSizeIndex";
/**
* The "feature" section
*
@ -912,4 +919,18 @@ public final class ConfigConstants {
* @since 6.1.1
*/
public static final String CONFIG_KEY_TRUST_PACKED_REFS_STAT = "trustPackedRefsStat";
/**
* The "pack.preserveOldPacks" key
*
* @since 5.13.2
*/
public static final String CONFIG_KEY_PRESERVE_OLD_PACKS = "preserveoldpacks";
/**
* The "pack.prunePreserved" key
*
* @since 5.13.2
*/
public static final String CONFIG_KEY_PRUNE_PRESERVED = "prunepreserved";
}

View File

@ -512,9 +512,12 @@ public ObjectReachabilityChecker createObjectReachabilityChecker(
* (default is
* {@value org.eclipse.jgit.lib.CoreConfig#DEFAULT_COMMIT_GRAPH_ENABLE}).
*
* @throws IOException
* if it cannot open any of the underlying commit graph.
*
* @since 6.5
*/
public Optional<CommitGraph> getCommitGraph() {
public Optional<CommitGraph> getCommitGraph() throws IOException {
return Optional.empty();
}
@ -661,7 +664,7 @@ public BitmapIndex getBitmapIndex() throws IOException {
}
@Override
public Optional<CommitGraph> getCommitGraph() {
public Optional<CommitGraph> getCommitGraph() throws IOException{
return delegate().getCommitGraph();
}

View File

@ -194,7 +194,7 @@ public Result applyPatch(InputStream patchInput)
throw new PatchFormatException(p.getErrors());
}
DirCache dirCache = (inCore()) ? DirCache.newInCore()
DirCache dirCache = inCore() ? DirCache.read(reader, beforeTree)
: repo.lockDirCache();
DirCacheBuilder dirCacheBuilder = dirCache.builder();

View File

@ -1173,8 +1173,13 @@ byte[] getCachedBytes(RevObject obj, ObjectLoader ldr)
@NonNull
CommitGraph commitGraph() {
if (commitGraph == null) {
commitGraph = reader != null ? reader.getCommitGraph().orElse(EMPTY)
: EMPTY;
try {
commitGraph = reader != null
? reader.getCommitGraph().orElse(EMPTY)
: EMPTY;
} catch (IOException e) {
commitGraph = EMPTY;
}
}
return commitGraph;
}

View File

@ -36,7 +36,10 @@
import static org.eclipse.jgit.lib.ConfigConstants.CONFIG_KEY_WAIT_PREVENT_RACYPACK;
import static org.eclipse.jgit.lib.ConfigConstants.CONFIG_KEY_WINDOW;
import static org.eclipse.jgit.lib.ConfigConstants.CONFIG_KEY_WINDOW_MEMORY;
import static org.eclipse.jgit.lib.ConfigConstants.CONFIG_KEY_MIN_BYTES_OBJ_SIZE_INDEX;
import static org.eclipse.jgit.lib.ConfigConstants.CONFIG_PACK_SECTION;
import static org.eclipse.jgit.lib.ConfigConstants.CONFIG_KEY_PRESERVE_OLD_PACKS;
import static org.eclipse.jgit.lib.ConfigConstants.CONFIG_KEY_PRUNE_PRESERVED;
import java.time.Duration;
import java.util.concurrent.Executor;
@ -234,6 +237,15 @@ public class PackConfig {
*/
public static final String[] DEFAULT_BITMAP_EXCLUDED_REFS_PREFIXES = new String[0];
/**
* Default minimum size for an object to be included in the size index:
* {@value}
*
* @see #setMinBytesForObjSizeIndex(int)
* @since 6.5
*/
public static final int DEFAULT_MIN_BYTES_FOR_OBJ_SIZE_INDEX = -1;
/**
* Default max time to spend during the search for reuse phase. This
* optimization is disabled by default: {@value}
@ -302,6 +314,8 @@ public class PackConfig {
private boolean singlePack;
private int minBytesForObjSizeIndex = DEFAULT_MIN_BYTES_FOR_OBJ_SIZE_INDEX;
/**
* Create a default configuration.
*/
@ -369,6 +383,7 @@ public PackConfig(PackConfig cfg) {
this.cutDeltaChains = cfg.cutDeltaChains;
this.singlePack = cfg.singlePack;
this.searchForReuseTimeout = cfg.searchForReuseTimeout;
this.minBytesForObjSizeIndex = cfg.minBytesForObjSizeIndex;
}
/**
@ -1189,6 +1204,45 @@ public void setSearchForReuseTimeout(Duration timeout) {
searchForReuseTimeout = timeout;
}
/**
* Minimum size of an object (inclusive) to be added in the object size
* index.
*
* A negative value disables the writing of the object size index.
*
* @return minimum size an object must have to be included in the object
* index.
* @since 6.5
*/
public int getMinBytesForObjSizeIndex() {
return minBytesForObjSizeIndex;
}
/**
* Set minimum size an object must have to be included in the object size
* index.
*
* A negative value disables the object index.
*
* @param minBytesForObjSizeIndex
* minimum size (inclusive) of an object to be included in the
* object size index. -1 disables the index.
* @since 6.5
*/
public void setMinBytesForObjSizeIndex(int minBytesForObjSizeIndex) {
this.minBytesForObjSizeIndex = minBytesForObjSizeIndex;
}
/**
* Should writers add an object size index when writing a pack.
*
* @return true to write an object-size index with the pack
* @since 6.5
*/
public boolean isWriteObjSizeIndex() {
return this.minBytesForObjSizeIndex >= 0;
}
/**
* Update properties by setting fields from the configuration.
*
@ -1267,6 +1321,13 @@ public void fromConfig(Config rc) {
setMinSizePreventRacyPack(rc.getLong(CONFIG_PACK_SECTION,
CONFIG_KEY_MIN_SIZE_PREVENT_RACYPACK,
getMinSizePreventRacyPack()));
setMinBytesForObjSizeIndex(rc.getInt(CONFIG_PACK_SECTION,
CONFIG_KEY_MIN_BYTES_OBJ_SIZE_INDEX,
DEFAULT_MIN_BYTES_FOR_OBJ_SIZE_INDEX));
setPreserveOldPacks(rc.getBoolean(CONFIG_PACK_SECTION,
CONFIG_KEY_PRESERVE_OLD_PACKS, DEFAULT_PRESERVE_OLD_PACKS));
setPrunePreserved(rc.getBoolean(CONFIG_PACK_SECTION,
CONFIG_KEY_PRUNE_PRESERVED, DEFAULT_PRUNE_PRESERVED));
}
/** {@inheritDoc} */
@ -1302,6 +1363,8 @@ public String toString() {
b.append(", searchForReuseTimeout") //$NON-NLS-1$
.append(getSearchForReuseTimeout());
b.append(", singlePack=").append(getSinglePack()); //$NON-NLS-1$
b.append(", minBytesForObjSizeIndex=") //$NON-NLS-1$
.append(getMinBytesForObjSizeIndex());
return b.toString();
}
}

View File

@ -281,6 +281,12 @@ ObjectInfoRequest parseObjectInfoRequest(PacketLineIn pckIn)
return builder.build();
}
if (!PacketLineIn.isDelimiter(line)) {
throw new PackProtocolException(MessageFormat
.format(JGitText.get().unexpectedPacketLine, line));
}
line = pckIn.readString();
if (!line.equals("size")) { //$NON-NLS-1$
throw new PackProtocolException(MessageFormat
.format(JGitText.get().unexpectedPacketLine, line));

View File

@ -1386,6 +1386,9 @@ private List<String> getV2CapabilityAdvertisement() {
if (transferConfig.isAllowReceiveClientSID()) {
caps.add(OPTION_SESSION_ID);
}
if (transferConfig.isAdvertiseObjectInfo()) {
caps.add(COMMAND_OBJECT_INFO);
}
return caps;
}