RefDirectory: Retry acquiring ref locks with backoff

If a repo frequently uses PackedBatchRefUpdates, there is likely to be
contention on the packed-refs file, so it's not appropriate to fail
immediately the first time we fail to acquire a lock. Add some logic to
RefDirectory to support general retrying of lock acquisition.

Currently, there is a hard-coded wait starting at 100ms and backing off
exponentially to 1600ms, for about 3s of total wait. This is no worse
than the hard-coded backoff that JGit does elsewhere, e.g. in
FileUtils#delete. One can imagine a scheme that uses per-repository
configuration of backoff, and the current interface would support this
without changing any callers.

Change-Id: I4764e11270d9336882483eb698f67a78a401c251
This commit is contained in:
Dave Borowitz 2017-07-26 16:53:21 -04:00
parent 5188c23104
commit 6f23210781
4 changed files with 267 additions and 29 deletions

View File

@ -54,6 +54,7 @@
import static org.eclipse.jgit.transport.ReceiveCommand.Type.UPDATE;
import static org.eclipse.jgit.transport.ReceiveCommand.Type.UPDATE_NONFASTFORWARD;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
@ -96,6 +97,7 @@
import org.junit.runners.Parameterized.Parameter;
import org.junit.runners.Parameterized.Parameters;
@SuppressWarnings("boxing")
@RunWith(Parameterized.class)
public class BatchRefUpdateTest extends LocalDiskRepositoryTestCase {
@Parameter
@ -125,6 +127,7 @@ public void setUp() throws Exception {
cfg.save();
refdir = (RefDirectory) diskRepo.getRefDatabase();
refdir.setRetrySleepMs(Arrays.asList(0, 0));
repo = new TestRepository<>(diskRepo);
A = repo.commit().create();
@ -584,6 +587,85 @@ public void overrideDisableRefLog() throws Exception {
getLastReflog("refs/heads/branch"));
}
@Test
public void packedRefsLockFailure() throws Exception {
writeLooseRef("refs/heads/master", A);
List<ReceiveCommand> cmds = Arrays.asList(
new ReceiveCommand(A, B, "refs/heads/master", UPDATE),
new ReceiveCommand(zeroId(), B, "refs/heads/branch", CREATE));
LockFile myLock = refdir.lockPackedRefs();
try {
execute(newBatchUpdate(cmds).setAllowNonFastForwards(true));
assertFalse(getLockFile("refs/heads/master").exists());
assertFalse(getLockFile("refs/heads/branch").exists());
if (atomic) {
assertResults(cmds, LOCK_FAILURE, TRANSACTION_ABORTED);
assertRefs("refs/heads/master", A);
} else {
// Only operates on loose refs, doesn't care that packed-refs is locked.
assertResults(cmds, OK, OK);
assertRefs(
"refs/heads/master", B,
"refs/heads/branch", B);
}
} finally {
myLock.unlock();
}
}
@Test
public void oneRefLockFailure() throws Exception {
writeLooseRef("refs/heads/master", A);
List<ReceiveCommand> cmds = Arrays.asList(
new ReceiveCommand(zeroId(), B, "refs/heads/branch", CREATE),
new ReceiveCommand(A, B, "refs/heads/master", UPDATE));
LockFile myLock = new LockFile(refdir.fileFor("refs/heads/master"));
assertTrue(myLock.lock());
try {
execute(newBatchUpdate(cmds).setAllowNonFastForwards(true));
assertFalse(LockFile.getLockFile(refdir.packedRefsFile).exists());
assertFalse(getLockFile("refs/heads/branch").exists());
if (atomic) {
assertResults(cmds, TRANSACTION_ABORTED, LOCK_FAILURE);
assertRefs("refs/heads/master", A);
} else {
assertResults(cmds, OK, LOCK_FAILURE);
assertRefs(
"refs/heads/branch", B,
"refs/heads/master", A);
}
} finally {
myLock.unlock();
}
}
@Test
public void singleRefUpdateDoesNotRequirePackedRefsLock() throws Exception {
writeLooseRef("refs/heads/master", A);
List<ReceiveCommand> cmds = Arrays.asList(
new ReceiveCommand(A, B, "refs/heads/master", UPDATE));
LockFile myLock = refdir.lockPackedRefs();
try {
execute(newBatchUpdate(cmds).setAllowNonFastForwards(true));
assertFalse(getLockFile("refs/heads/master").exists());
assertResults(cmds, OK);
assertRefs("refs/heads/master", B);
} finally {
myLock.unlock();
}
}
private void writeLooseRef(String name, AnyObjectId id) throws IOException {
write(new File(diskRepo.getDirectory(), name), id.name() + "\n");
}
@ -712,6 +794,10 @@ private ReflogEntry getLastReflog(String name) throws IOException {
return r.getLastEntry();
}
private File getLockFile(String refName) {
return LockFile.getLockFile(refdir.fileFor(refName));
}
private void assertReflogUnchanged(
Map<String, ReflogEntry> old, String name) throws IOException {
assertReflogEquals(old.get(name), getLastReflog(name), true);

View File

@ -60,10 +60,12 @@
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.eclipse.jgit.errors.LockFailedException;
import org.eclipse.jgit.events.ListenerHandle;
import org.eclipse.jgit.events.RefsChangedEvent;
import org.eclipse.jgit.events.RefsChangedListener;
@ -79,6 +81,7 @@
import org.junit.Before;
import org.junit.Test;
@SuppressWarnings("boxing")
public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
private Repository diskRepo;
@ -1284,6 +1287,23 @@ public void onRefsChanged(RefsChangedEvent event) {
assertEquals(1, changeCount.get());
}
@Test
public void testPackedRefsLockFailure() throws Exception {
writeLooseRef("refs/heads/master", A);
refdir.setRetrySleepMs(Arrays.asList(0, 0));
LockFile myLock = refdir.lockPackedRefs();
try {
refdir.pack(Arrays.asList("refs/heads/master"));
fail("expected LockFailedException");
} catch (LockFailedException e) {
assertEquals(refdir.packedRefsFile.getPath(), e.getFile().getPath());
} finally {
myLock.unlock();
}
Ref ref = refdir.getRef("refs/heads/master");
assertEquals(Storage.LOOSE, ref.getStorage());
}
private void writeLooseRef(String name, AnyObjectId id) throws IOException {
writeLooseRef(name, id.name() + "\n");
}

View File

@ -49,6 +49,7 @@
import static org.eclipse.jgit.transport.ReceiveCommand.Result.REJECTED_NONFASTFORWARD;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
@ -57,7 +58,10 @@
import java.util.Map;
import java.util.Set;
import org.eclipse.jgit.annotations.Nullable;
import org.eclipse.jgit.errors.LockFailedException;
import org.eclipse.jgit.errors.MissingObjectException;
import org.eclipse.jgit.internal.JGitText;
import org.eclipse.jgit.internal.storage.file.RefDirectory.PackedRefList;
import org.eclipse.jgit.lib.BatchRefUpdate;
import org.eclipse.jgit.lib.ObjectId;
@ -164,12 +168,18 @@ public void execute(RevWalk walk, ProgressMonitor monitor,
// Pack refs normally, so we can create lock files even in the case where
// refs/x is deleted and refs/x/y is created in this batch.
refdb.pack(
pending.stream().map(ReceiveCommand::getRefName).collect(toList()));
Map<String, LockFile> locks = new HashMap<>();
try {
if (!lockLooseRefs(pending, locks)) {
refdb.pack(
pending.stream().map(ReceiveCommand::getRefName).collect(toList()));
} catch (LockFailedException e) {
lockFailure(pending.get(0), pending);
return;
}
Map<String, LockFile> locks = null;
try {
locks = lockLooseRefs(pending);
if (locks == null) {
return;
}
PackedRefList oldPackedList = refdb.pack(locks);
@ -177,15 +187,15 @@ public void execute(RevWalk walk, ProgressMonitor monitor,
if (newRefs == null) {
return;
}
LockFile packedRefsLock = new LockFile(refdb.packedRefsFile);
try {
packedRefsLock.lock();
refdb.commitPackedRefs(packedRefsLock, newRefs, oldPackedList);
} finally {
packedRefsLock.unlock();
LockFile packedRefsLock = refdb.lockPackedRefs();
if (packedRefsLock == null) {
lockFailure(pending.get(0), pending);
return;
}
// commitPackedRefs removes lock file (by renaming over real file).
refdb.commitPackedRefs(packedRefsLock, newRefs, oldPackedList);
} finally {
locks.values().forEach(LockFile::unlock);
unlockAll(locks);
}
refdb.fireRefsChanged();
@ -271,17 +281,54 @@ private boolean checkNonFastForwards(RevWalk walk,
return true;
}
private boolean lockLooseRefs(List<ReceiveCommand> commands,
Map<String, LockFile> locks) throws IOException {
for (ReceiveCommand c : commands) {
LockFile lock = new LockFile(refdb.fileFor(c.getRefName()));
if (!lock.lock()) {
lockFailure(c, commands);
return false;
/**
* Lock loose refs corresponding to a list of commands.
*
* @param commands
* commands that we intend to execute.
* @return map of ref name in the input commands to lock file. Always contains
* one entry for each ref in the input list. All locks are acquired
* before returning. If any lock was not able to be acquired: the
* return value is null; no locks are held; and all commands that were
* pending are set to fail with {@code LOCK_FAILURE}.
* @throws IOException
* an error occurred other than a failure to acquire; no locks are
* held if this exception is thrown.
*/
@Nullable
private Map<String, LockFile> lockLooseRefs(List<ReceiveCommand> commands)
throws IOException {
ReceiveCommand failed = null;
Map<String, LockFile> locks = new HashMap<>();
try {
RETRY: for (int ms : refdb.getRetrySleepMs()) {
failed = null;
// Release all locks before trying again, to prevent deadlock.
unlockAll(locks);
locks.clear();
RefDirectory.sleep(ms);
for (ReceiveCommand c : commands) {
String name = c.getRefName();
LockFile lock = new LockFile(refdb.fileFor(name));
if (locks.put(name, lock) != null) {
throw new IOException(
MessageFormat.format(JGitText.get().duplicateRef, name));
}
if (!lock.lock()) {
failed = c;
continue RETRY;
}
}
Map<String, LockFile> result = locks;
locks = null;
return result;
}
locks.put(c.getRefName(), lock);
} finally {
unlockAll(locks);
}
return true;
lockFailure(failed != null ? failed : commands.get(0), commands);
return null;
}
private static RefList<Ref> applyUpdates(RevWalk walk, RefList<Ref> refs,
@ -444,6 +491,12 @@ private static Ref peeledRef(RevWalk walk, ReceiveCommand cmd)
Ref.Storage.PACKED, cmd.getRefName(), newId);
}
private static void unlockAll(@Nullable Map<?, LockFile> locks) {
if (locks != null) {
locks.values().forEach(LockFile::unlock);
}
}
private static void lockFailure(ReceiveCommand cmd,
List<ReceiveCommand> commands) {
reject(cmd, LOCK_FAILURE, commands);

View File

@ -63,6 +63,7 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.InterruptedIOException;
import java.security.DigestInputStream;
import java.security.MessageDigest;
import java.text.MessageFormat;
@ -76,6 +77,7 @@
import java.util.concurrent.atomic.AtomicReference;
import org.eclipse.jgit.annotations.NonNull;
import org.eclipse.jgit.annotations.Nullable;
import org.eclipse.jgit.errors.InvalidObjectIdException;
import org.eclipse.jgit.errors.LockFailedException;
import org.eclipse.jgit.errors.MissingObjectException;
@ -137,6 +139,10 @@ public class RefDirectory extends RefDatabase {
Constants.MERGE_HEAD, Constants.FETCH_HEAD, Constants.ORIG_HEAD,
Constants.CHERRY_PICK_HEAD };
@SuppressWarnings("boxing")
private static final List<Integer> RETRY_SLEEP_MS =
Collections.unmodifiableList(Arrays.asList(0, 100, 200, 400, 800, 1600));
private final FileRepository parent;
private final File gitDir;
@ -176,6 +182,8 @@ public class RefDirectory extends RefDatabase {
*/
private final AtomicInteger lastNotifiedModCnt = new AtomicInteger();
private List<Integer> retrySleepMs = RETRY_SLEEP_MS;
RefDirectory(final FileRepository db) {
final FS fs = db.getFS();
parent = db;
@ -602,9 +610,7 @@ void delete(RefDirectoryUpdate update) throws IOException {
// we don't miss an edit made externally.
final PackedRefList packed = getPackedRefs();
if (packed.contains(name)) {
LockFile lck = new LockFile(packedRefsFile);
if (!lck.lock())
throw new LockFailedException(packedRefsFile);
LockFile lck = lockPackedRefsOrThrow();
try {
PackedRefList cur = readPackedRefs();
int idx = cur.find(name);
@ -665,11 +671,7 @@ private PackedRefList pack(Collection<String> refs,
FS fs = parent.getFS();
// Lock the packed refs file and read the content
LockFile lck = new LockFile(packedRefsFile);
if (!lck.lock()) {
throw new IOException(MessageFormat.format(
JGitText.get().cannotLock, packedRefsFile));
}
LockFile lck = lockPackedRefsOrThrow();
try {
final PackedRefList packed = getPackedRefs();
@ -765,6 +767,26 @@ private PackedRefList pack(Collection<String> refs,
}
}
@Nullable
LockFile lockPackedRefs() throws IOException {
LockFile lck = new LockFile(packedRefsFile);
for (int ms : getRetrySleepMs()) {
sleep(ms);
if (lck.lock()) {
return lck;
}
}
return null;
}
private LockFile lockPackedRefsOrThrow() throws IOException {
LockFile lck = lockPackedRefs();
if (lck == null) {
throw new LockFailedException(packedRefsFile);
}
return lck;
}
/**
* Make sure a ref is peeled and has the Storage PACKED. If the given ref
* has this attributes simply return it. Otherwise create a new peeled
@ -1175,6 +1197,63 @@ private static void delete(final File file, final int depth, LockFile rLck)
}
}
/**
* Get times to sleep while retrying a possibly contentious operation.
* <p>
* For retrying an operation that might have high contention, such as locking
* the {@code packed-refs} file, the caller may implement a retry loop using
* the returned values:
*
* <pre>
* for (int toSleepMs : getRetrySleepMs()) {
* sleep(toSleepMs);
* if (isSuccessful(doSomething())) {
* return success;
* }
* }
* return failure;
* </pre>
*
* The first value in the returned iterable is 0, and the caller should treat
* a fully-consumed iterator as a timeout.
*
* @return iterable of times, in milliseconds, that the caller should sleep
* before attempting an operation.
*/
Iterable<Integer> getRetrySleepMs() {
return retrySleepMs;
}
void setRetrySleepMs(List<Integer> retrySleepMs) {
if (retrySleepMs == null || retrySleepMs.isEmpty()
|| retrySleepMs.get(0).intValue() != 0) {
throw new IllegalArgumentException();
}
this.retrySleepMs = retrySleepMs;
}
/**
* Sleep with {@link Thread#sleep(long)}, converting {@link
* InterruptedException} to {@link InterruptedIOException}.
*
* @param ms
* time to sleep, in milliseconds; zero or negative is a no-op.
* @throws InterruptedIOException
* if sleeping was interrupted.
*/
static void sleep(long ms) throws InterruptedIOException {
if (ms <= 0) {
return;
}
try {
Thread.sleep(ms);
} catch (InterruptedException e) {
InterruptedIOException ie = new InterruptedIOException();
ie.initCause(e);
throw ie;
}
}
static class PackedRefList extends RefList<Ref> {
private final FileSnapshot snapshot;