Revert "Create util class for work tree updating in both filesystem and index."

This reverts commit 5151b324f4. It is
producing NullPointerExceptions during merges, causing Gerrit's
acceptance tests to fail:

com.google.gerrit.extensions.restapi.RestApiException: Cannot rebase ps
[...]
	at com.google.gerrit.server.api.changes.RevisionApiImpl.rebase(RevisionApiImpl.java:280)
	at com.google.gerrit.acceptance.api.change.ChangeIT.rebaseChangeBase(ChangeIT.java:1584)
Caused by: com.google.gerrit.server.update.UpdateException: java.lang.NullPointerException: repository is required
	at com.google.gerrit.server.update.BatchUpdate.executeUpdateRepo(BatchUpdate.java:588)
[...]
Caused by: java.lang.NullPointerException: repository is required
	at org.eclipse.jgit.merge.Merger.nonNullRepo(Merger.java:128)
	at org.eclipse.jgit.merge.ResolveMerger.addDeletion(ResolveMerger.java:380)
	at org.eclipse.jgit.merge.ResolveMerger.processEntry(ResolveMerger.java:553)
	at org.eclipse.jgit.merge.ResolveMerger.mergeTreeWalk(ResolveMerger.java:1224)
	at org.eclipse.jgit.merge.ResolveMerger.mergeTrees(ResolveMerger.java:1174)
	at org.eclipse.jgit.merge.ResolveMerger.mergeImpl(ResolveMerger.java:299)
	at org.eclipse.jgit.merge.Merger.merge(Merger.java:233)
	at org.eclipse.jgit.merge.Merger.merge(Merger.java:186)
	at org.eclipse.jgit.merge.ThreeWayMerger.merge(ThreeWayMerger.java:96)
	at com.google.gerrit.server.change.RebaseChangeOp.rebaseCommit(RebaseChangeOp.java:360)

Change-Id: Idf63de81666d0df118d2d93c4f6e014e00dc05b8
This commit is contained in:
Jonathan Nieder 2022-08-05 16:11:41 -04:00
parent 05a2485075
commit 5709317f71
4 changed files with 428 additions and 857 deletions

View File

@ -9,6 +9,7 @@
*/
package org.eclipse.jgit.api;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
@ -24,6 +25,7 @@
import java.util.Iterator;
import java.util.List;
import java.util.zip.InflaterInputStream;
import org.eclipse.jgit.api.errors.FilterFailedException;
import org.eclipse.jgit.api.errors.GitAPIException;
import org.eclipse.jgit.api.errors.PatchApplyException;
@ -36,11 +38,15 @@
import org.eclipse.jgit.dircache.DirCacheCheckout;
import org.eclipse.jgit.dircache.DirCacheCheckout.CheckoutMetadata;
import org.eclipse.jgit.dircache.DirCacheIterator;
import org.eclipse.jgit.errors.LargeObjectException;
import org.eclipse.jgit.errors.MissingObjectException;
import org.eclipse.jgit.internal.JGitText;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.CoreConfig.EolStreamType;
import org.eclipse.jgit.lib.FileMode;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.ObjectLoader;
import org.eclipse.jgit.lib.ObjectStream;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.patch.BinaryHunk;
import org.eclipse.jgit.patch.FileHeader;
@ -58,7 +64,6 @@
import org.eclipse.jgit.util.FileUtils;
import org.eclipse.jgit.util.IO;
import org.eclipse.jgit.util.RawParseUtils;
import org.eclipse.jgit.util.WorkTreeUpdater;
import org.eclipse.jgit.util.StringUtils;
import org.eclipse.jgit.util.TemporaryBuffer;
import org.eclipse.jgit.util.TemporaryBuffer.LocalFile;
@ -350,6 +355,60 @@ private InputStream filterClean(Repository repository, String path,
return result.getStdout().openInputStreamWithAutoDestroy();
}
/**
* Something that can supply an {@link InputStream}.
*/
private interface StreamSupplier {
InputStream load() throws IOException;
}
/**
* We write the patch result to a {@link TemporaryBuffer} and then use
* {@link DirCacheCheckout}.getContent() to run the result through the CR-LF
* and smudge filters. DirCacheCheckout needs an ObjectLoader, not a
* TemporaryBuffer, so this class bridges between the two, making any Stream
* provided by a {@link StreamSupplier} look like an ordinary git blob to
* DirCacheCheckout.
*/
private static class StreamLoader extends ObjectLoader {
private StreamSupplier data;
private long size;
StreamLoader(StreamSupplier data, long length) {
this.data = data;
this.size = length;
}
@Override
public int getType() {
return Constants.OBJ_BLOB;
}
@Override
public long getSize() {
return size;
}
@Override
public boolean isLarge() {
return true;
}
@Override
public byte[] getCachedBytes() throws LargeObjectException {
throw new LargeObjectException();
}
@Override
public ObjectStream openStream()
throws MissingObjectException, IOException {
return new ObjectStream.Filter(getType(), getSize(),
new BufferedInputStream(data.load()));
}
}
private void initHash(SHA1 hash, long size) {
hash.update(Constants.encodedTypeString(Constants.OBJ_BLOB));
hash.update((byte) ' ');
@ -397,7 +456,7 @@ private void checkOid(ObjectId baseId, ObjectId id, ChangeType type, File f,
}
private void applyBinary(Repository repository, String path, File f,
FileHeader fh, WorkTreeUpdater.StreamSupplier loader, ObjectId id,
FileHeader fh, StreamSupplier loader, ObjectId id,
CheckoutMetadata checkOut)
throws PatchApplyException, IOException {
if (!fh.getOldId().isComplete() || !fh.getNewId().isComplete()) {
@ -429,8 +488,7 @@ private void applyBinary(Repository repository, String path, File f,
hunk.getBuffer(), start,
length))))) {
DirCacheCheckout.getContent(repository, path, checkOut,
WorkTreeUpdater.createStreamLoader(() -> inflated,
hunk.getSize()),
new StreamLoader(() -> inflated, hunk.getSize()),
null, out);
if (!fh.getNewId().toObjectId().equals(hash.toObjectId())) {
throw new PatchApplyException(MessageFormat.format(
@ -462,8 +520,8 @@ private void applyBinary(Repository repository, String path, File f,
SHA1InputStream hashed = new SHA1InputStream(hash,
input)) {
DirCacheCheckout.getContent(repository, path, checkOut,
WorkTreeUpdater.createStreamLoader(() -> hashed, finalSize),
null, out);
new StreamLoader(() -> hashed, finalSize), null,
out);
if (!fh.getNewId().toObjectId()
.equals(hash.toObjectId())) {
throw new PatchApplyException(MessageFormat.format(
@ -631,7 +689,7 @@ && canApplyAt(hunkLines, newLines, 0)) {
}
try (OutputStream output = new FileOutputStream(f)) {
DirCacheCheckout.getContent(repository, path, checkOut,
WorkTreeUpdater.createStreamLoader(buffer::openInputStream,
new StreamLoader(buffer::openInputStream,
buffer.length()),
null, output);
}

View File

@ -195,6 +195,9 @@ protected RevCommit getBaseCommit(RevCommit a, RevCommit b, int callDepth)
inCore = oldIncore;
dircache = oldDircache;
workingTreeIterator = oldWTreeIt;
toBeCheckedOut.clear();
toBeDeleted.clear();
modifiedFiles.clear();
unmergedPaths.clear();
mergeResults.clear();
failingPaths.clear();

View File

@ -20,15 +20,23 @@
import static org.eclipse.jgit.lib.ConfigConstants.CONFIG_KEY_ALGORITHM;
import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.eclipse.jgit.annotations.NonNull;
import org.eclipse.jgit.attributes.Attributes;
import org.eclipse.jgit.diff.DiffAlgorithm;
@ -38,10 +46,18 @@
import org.eclipse.jgit.diff.Sequence;
import org.eclipse.jgit.dircache.DirCache;
import org.eclipse.jgit.dircache.DirCacheBuildIterator;
import org.eclipse.jgit.dircache.DirCacheBuilder;
import org.eclipse.jgit.dircache.DirCacheCheckout;
import org.eclipse.jgit.dircache.DirCacheCheckout.CheckoutMetadata;
import org.eclipse.jgit.dircache.DirCacheEntry;
import org.eclipse.jgit.errors.BinaryBlobException;
import org.eclipse.jgit.errors.CorruptObjectException;
import org.eclipse.jgit.errors.IncorrectObjectTypeException;
import org.eclipse.jgit.errors.IndexWriteException;
import org.eclipse.jgit.errors.MissingObjectException;
import org.eclipse.jgit.errors.NoWorkTreeException;
import org.eclipse.jgit.lib.Config;
import org.eclipse.jgit.lib.ConfigConstants;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.CoreConfig.EolStreamType;
import org.eclipse.jgit.lib.FileMode;
@ -56,19 +72,20 @@
import org.eclipse.jgit.treewalk.CanonicalTreeParser;
import org.eclipse.jgit.treewalk.NameConflictTreeWalk;
import org.eclipse.jgit.treewalk.TreeWalk;
import org.eclipse.jgit.treewalk.TreeWalk.OperationType;
import org.eclipse.jgit.treewalk.WorkingTreeIterator;
import org.eclipse.jgit.treewalk.WorkingTreeOptions;
import org.eclipse.jgit.treewalk.filter.TreeFilter;
import org.eclipse.jgit.util.FS;
import org.eclipse.jgit.util.LfsFactory;
import org.eclipse.jgit.util.WorkTreeUpdater;
import org.eclipse.jgit.util.WorkTreeUpdater.StreamLoader;
import org.eclipse.jgit.util.LfsFactory.LfsInputStream;
import org.eclipse.jgit.util.TemporaryBuffer;
import org.eclipse.jgit.util.io.EolStreamTypeUtil;
/**
* A three-way merger performing a content-merge if necessary
*/
public class ResolveMerger extends ThreeWayMerger {
/**
* If the merge fails (means: not stopped because of unresolved conflicts)
* this enum is used to explain why it failed
@ -132,9 +149,11 @@ public enum MergeFailureReason {
protected static final int T_FILE = 4;
/**
* Handler for repository I/O actions.
* Builder to update the cache during this merge.
*
* @since 3.4
*/
protected WorkTreeUpdater workTreeUpdater;
protected DirCacheBuilder builder;
/**
* merge result as tree
@ -143,11 +162,6 @@ public enum MergeFailureReason {
*/
protected ObjectId resultTree;
/**
* Files modified during this operation. Note this list is only updated after a successful write.
*/
protected List<String> modifiedFiles = new ArrayList<>();
/**
* Paths that could not be merged by this merger because of an unsolvable
* conflict.
@ -156,6 +170,29 @@ public enum MergeFailureReason {
*/
protected List<String> unmergedPaths = new ArrayList<>();
/**
* Files modified during this merge operation.
*
* @since 3.4
*/
protected List<String> modifiedFiles = new LinkedList<>();
/**
* If the merger has nothing to do for a file but check it out at the end of
* the operation, it can be added here.
*
* @since 3.4
*/
protected Map<String, DirCacheEntry> toBeCheckedOut = new HashMap<>();
/**
* Paths in this list will be deleted from the local copy at the end of the
* operation.
*
* @since 3.4
*/
protected List<String> toBeDeleted = new ArrayList<>();
/**
* Low-level textual merge results. Will be passed on to the callers in case
* of conflicts.
@ -189,6 +226,15 @@ public enum MergeFailureReason {
*/
protected boolean inCore;
/**
* Set to true if this merger should use the default dircache of the
* repository and should handle locking and unlocking of the dircache. If
* this merger should work in-core or if an explicit dircache was specified
* during construction then this field is set to false.
* @since 3.0
*/
protected boolean implicitDirCache;
/**
* Directory cache
* @since 3.0
@ -208,6 +254,20 @@ public enum MergeFailureReason {
*/
protected MergeAlgorithm mergeAlgorithm;
/**
* The {@link WorkingTreeOptions} are needed to determine line endings for
* merged files.
*
* @since 4.11
*/
protected WorkingTreeOptions workingTreeOptions;
/**
* The size limit (bytes) which controls a file to be stored in {@code Heap}
* or {@code LocalFile} during the merge.
*/
private int inCoreLimit;
/**
* The {@link ContentMergeStrategy} to use for "resolve" and "recursive"
* merges.
@ -215,6 +275,16 @@ public enum MergeFailureReason {
@NonNull
private ContentMergeStrategy contentStrategy = ContentMergeStrategy.CONFLICT;
/**
* Keeps {@link CheckoutMetadata} for {@link #checkout()}.
*/
private Map<String, CheckoutMetadata> checkoutMetadata;
/**
* Keeps {@link CheckoutMetadata} for {@link #cleanUp()}.
*/
private Map<String, CheckoutMetadata> cleanupMetadata;
private static MergeAlgorithm getMergeAlgorithm(Config config) {
SupportedAlgorithm diffAlg = config.getEnum(
CONFIG_DIFF_SECTION, null, CONFIG_KEY_ALGORITHM,
@ -222,8 +292,13 @@ private static MergeAlgorithm getMergeAlgorithm(Config config) {
return new MergeAlgorithm(DiffAlgorithm.getAlgorithm(diffAlg));
}
private static int getInCoreLimit(Config config) {
return config.getInt(
ConfigConstants.CONFIG_MERGE_SECTION, ConfigConstants.CONFIG_KEY_IN_CORE_LIMIT, 10 << 20);
}
private static String[] defaultCommitNames() {
return new String[]{"BASE", "OURS", "THEIRS"}; //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
return new String[] { "BASE", "OURS", "THEIRS" }; //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
}
private static final Attributes NO_ATTRIBUTES = new Attributes();
@ -240,8 +315,17 @@ protected ResolveMerger(Repository local, boolean inCore) {
super(local);
Config config = local.getConfig();
mergeAlgorithm = getMergeAlgorithm(config);
inCoreLimit = getInCoreLimit(config);
commitNames = defaultCommitNames();
this.inCore = inCore;
if (inCore) {
implicitDirCache = false;
dircache = DirCache.newInCore();
} else {
implicitDirCache = true;
workingTreeOptions = local.getConfig().get(WorkingTreeOptions.KEY);
}
}
/**
@ -268,6 +352,8 @@ protected ResolveMerger(ObjectInserter inserter, Config config) {
mergeAlgorithm = getMergeAlgorithm(config);
commitNames = defaultCommitNames();
inCore = true;
implicitDirCache = false;
dircache = DirCache.newInCore();
}
/**
@ -296,10 +382,82 @@ public void setContentMergeStrategy(ContentMergeStrategy strategy) {
/** {@inheritDoc} */
@Override
protected boolean mergeImpl() throws IOException {
return mergeTrees(mergeBase(), sourceTrees[0], sourceTrees[1],
false);
if (implicitDirCache) {
dircache = nonNullRepo().lockDirCache();
}
if (!inCore) {
checkoutMetadata = new HashMap<>();
cleanupMetadata = new HashMap<>();
}
try {
return mergeTrees(mergeBase(), sourceTrees[0], sourceTrees[1],
false);
} finally {
checkoutMetadata = null;
cleanupMetadata = null;
if (implicitDirCache) {
dircache.unlock();
}
}
}
private void checkout() throws NoWorkTreeException, IOException {
// Iterate in reverse so that "folder/file" is deleted before
// "folder". Otherwise this could result in a failing path because
// of a non-empty directory, for which delete() would fail.
for (int i = toBeDeleted.size() - 1; i >= 0; i--) {
String fileName = toBeDeleted.get(i);
File f = new File(nonNullRepo().getWorkTree(), fileName);
if (!f.delete())
if (!f.isDirectory())
failingPaths.put(fileName,
MergeFailureReason.COULD_NOT_DELETE);
modifiedFiles.add(fileName);
}
for (Map.Entry<String, DirCacheEntry> entry : toBeCheckedOut
.entrySet()) {
DirCacheEntry cacheEntry = entry.getValue();
if (cacheEntry.getFileMode() == FileMode.GITLINK) {
new File(nonNullRepo().getWorkTree(), entry.getKey()).mkdirs();
} else {
DirCacheCheckout.checkoutEntry(db, cacheEntry, reader, false,
checkoutMetadata.get(entry.getKey()));
modifiedFiles.add(entry.getKey());
}
}
}
/**
* Reverts the worktree after an unsuccessful merge. We know that for all
* modified files the old content was in the old index and the index
* contained only stage 0. In case if inCore operation just clear the
* history of modified files.
*
* @throws java.io.IOException
* @throws org.eclipse.jgit.errors.CorruptObjectException
* @throws org.eclipse.jgit.errors.NoWorkTreeException
* @since 3.4
*/
protected void cleanUp() throws NoWorkTreeException,
CorruptObjectException,
IOException {
if (inCore) {
modifiedFiles.clear();
return;
}
DirCache dc = nonNullRepo().readDirCache();
Iterator<String> mpathsIt=modifiedFiles.iterator();
while(mpathsIt.hasNext()) {
String mpath = mpathsIt.next();
DirCacheEntry entry = dc.getEntry(mpath);
if (entry != null) {
DirCacheCheckout.checkoutEntry(db, entry, reader, false,
cleanupMetadata.get(mpath));
}
mpathsIt.remove();
}
}
/**
* adds a new path with the specified stage to the index builder
@ -314,9 +472,13 @@ protected boolean mergeImpl() throws IOException {
private DirCacheEntry add(byte[] path, CanonicalTreeParser p, int stage,
Instant lastMod, long len) {
if (p != null && !p.getEntryFileMode().equals(FileMode.TREE)) {
return workTreeUpdater.addExistingToIndex(p.getEntryObjectId(), path,
p.getEntryFileMode(), stage,
lastMod, (int) len);
DirCacheEntry e = new DirCacheEntry(path, stage);
e.setFileMode(p.getEntryFileMode());
e.setObjectId(p.getEntryObjectId());
e.setLastModified(lastMod);
e.setLength(len);
builder.add(e);
return e;
}
return null;
}
@ -331,8 +493,41 @@ private DirCacheEntry add(byte[] path, CanonicalTreeParser p, int stage,
* @return the entry which was added to the index
*/
private DirCacheEntry keep(DirCacheEntry e) {
return workTreeUpdater.addExistingToIndex(e.getObjectId(), e.getRawPath(), e.getFileMode(),
e.getStage(), e.getLastModifiedInstant(), e.getLength());
DirCacheEntry newEntry = new DirCacheEntry(e.getRawPath(),
e.getStage());
newEntry.setFileMode(e.getFileMode());
newEntry.setObjectId(e.getObjectId());
newEntry.setLastModified(e.getLastModifiedInstant());
newEntry.setLength(e.getLength());
builder.add(newEntry);
return newEntry;
}
/**
* Remembers the {@link CheckoutMetadata} for the given path; it may be
* needed in {@link #checkout()} or in {@link #cleanUp()}.
*
* @param map
* to add the metadata to
* @param path
* of the current node
* @param attributes
* to use for determining the metadata
* @throws IOException
* if the smudge filter cannot be determined
* @since 6.1
*/
protected void addCheckoutMetadata(Map<String, CheckoutMetadata> map,
String path, Attributes attributes)
throws IOException {
if (map != null) {
EolStreamType eol = EolStreamTypeUtil.detectStreamType(
OperationType.CHECKOUT_OP, workingTreeOptions,
attributes);
CheckoutMetadata data = new CheckoutMetadata(eol,
tw.getSmudgeCommand(attributes));
map.put(path, data);
}
}
/**
@ -352,17 +547,14 @@ private DirCacheEntry keep(DirCacheEntry e) {
protected void addToCheckout(String path, DirCacheEntry entry,
Attributes[] attributes)
throws IOException {
EolStreamType cleanupStreamType = workTreeUpdater.detectCheckoutStreamType(attributes[T_OURS]);
String cleanupSmudgeCommand = tw.getSmudgeCommand(attributes[T_OURS]);
EolStreamType checkoutStreamType = workTreeUpdater.detectCheckoutStreamType(attributes[T_THEIRS]);
String checkoutSmudgeCommand = tw.getSmudgeCommand(attributes[T_THEIRS]);
workTreeUpdater.addToCheckout(path, entry, cleanupStreamType, cleanupSmudgeCommand,
checkoutStreamType, checkoutSmudgeCommand);
toBeCheckedOut.put(path, entry);
addCheckoutMetadata(cleanupMetadata, path, attributes[T_OURS]);
addCheckoutMetadata(checkoutMetadata, path, attributes[T_THEIRS]);
}
/**
* Remember a path for deletion, and remember its {@link CheckoutMetadata}
* in case it has to be restored in the cleanUp.
* in case it has to be restored in {@link #cleanUp()}.
*
* @param path
* of the entry
@ -376,11 +568,10 @@ protected void addToCheckout(String path, DirCacheEntry entry,
*/
protected void addDeletion(String path, boolean isFile,
Attributes attributes) throws IOException {
File file =
isFile && !nonNullRepo().isBare() ? new File(nonNullRepo().getWorkTree(), path) : null;
EolStreamType streamType = workTreeUpdater.detectCheckoutStreamType(attributes);
String smudgeCommand = tw.getSmudgeCommand(attributes);
workTreeUpdater.deleteFile(path, file, streamType, smudgeCommand);
toBeDeleted.add(path);
if (isFile) {
addCheckoutMetadata(cleanupMetadata, path, attributes);
}
}
/**
@ -424,6 +615,9 @@ protected void addDeletion(String path, boolean isFile,
* @return <code>false</code> if the merge will fail because the index entry
* didn't match ours or the working-dir file was dirty and a
* conflict occurred
* @throws org.eclipse.jgit.errors.MissingObjectException
* @throws org.eclipse.jgit.errors.IncorrectObjectTypeException
* @throws org.eclipse.jgit.errors.CorruptObjectException
* @throws java.io.IOException
* @since 6.1
*/
@ -431,21 +625,20 @@ protected boolean processEntry(CanonicalTreeParser base,
CanonicalTreeParser ours, CanonicalTreeParser theirs,
DirCacheBuildIterator index, WorkingTreeIterator work,
boolean ignoreConflicts, Attributes[] attributes)
throws IOException {
throws MissingObjectException, IncorrectObjectTypeException,
CorruptObjectException, IOException {
enterSubtree = true;
final int modeO = tw.getRawMode(T_OURS);
final int modeT = tw.getRawMode(T_THEIRS);
final int modeB = tw.getRawMode(T_BASE);
boolean gitLinkMerging = isGitLink(modeO) || isGitLink(modeT)
|| isGitLink(modeB);
if (modeO == 0 && modeT == 0 && modeB == 0) {
if (modeO == 0 && modeT == 0 && modeB == 0)
// File is either untracked or new, staged but uncommitted
return true;
}
if (isIndexDirty()) {
if (isIndexDirty())
return false;
}
DirCacheEntry ourDce = null;
@ -513,9 +706,8 @@ protected boolean processEntry(CanonicalTreeParser base,
if (modeB == modeT && tw.idEqual(T_BASE, T_THEIRS)) {
// THEIRS was not changed compared to BASE. All changes must be in
// OURS. OURS is chosen. We can keep the existing entry.
if (ourDce != null) {
if (ourDce != null)
keep(ourDce);
}
// no checkout needed!
return true;
}
@ -525,9 +717,8 @@ protected boolean processEntry(CanonicalTreeParser base,
// THEIRS. THEIRS is chosen.
// Check worktree before checking out THEIRS
if (isWorktreeDirty(work, ourDce)) {
if (isWorktreeDirty(work, ourDce))
return false;
}
if (nonTree(modeT)) {
// we know about length and lastMod only after we have written
// the new content.
@ -568,15 +759,12 @@ protected boolean processEntry(CanonicalTreeParser base,
enterSubtree = false;
return true;
}
if (nonTree(modeB)) {
if (nonTree(modeB))
add(tw.getRawPath(), base, DirCacheEntry.STAGE_1, EPOCH, 0);
}
if (nonTree(modeO)) {
if (nonTree(modeO))
add(tw.getRawPath(), ours, DirCacheEntry.STAGE_2, EPOCH, 0);
}
if (nonTree(modeT)) {
if (nonTree(modeT))
add(tw.getRawPath(), theirs, DirCacheEntry.STAGE_3, EPOCH, 0);
}
unmergedPaths.add(tw.getPathString());
enterSubtree = false;
return true;
@ -586,9 +774,8 @@ protected boolean processEntry(CanonicalTreeParser base,
// tells us we are in a subtree because of index or working-dir).
// If they are both folders no content-merge is required - we can
// return here.
if (!nonTree(modeO)) {
if (!nonTree(modeO))
return true;
}
// ours and theirs are both files, just fall out of the if block
// and do the content merge
@ -619,16 +806,16 @@ protected boolean processEntry(CanonicalTreeParser base,
} else if (!attributes[T_OURS].canBeContentMerged()) {
// File marked as binary
switch (getContentMergeStrategy()) {
case OURS:
keep(ourDce);
return true;
case THEIRS:
DirCacheEntry theirEntry = add(tw.getRawPath(), theirs,
DirCacheEntry.STAGE_0, EPOCH, 0);
addToCheckout(tw.getPathString(), theirEntry, attributes);
return true;
default:
break;
case OURS:
keep(ourDce);
return true;
case THEIRS:
DirCacheEntry theirEntry = add(tw.getRawPath(), theirs,
DirCacheEntry.STAGE_0, EPOCH, 0);
addToCheckout(tw.getPathString(), theirEntry, attributes);
return true;
default:
break;
}
add(tw.getRawPath(), base, DirCacheEntry.STAGE_1, EPOCH, 0);
add(tw.getRawPath(), ours, DirCacheEntry.STAGE_2, EPOCH, 0);
@ -650,18 +837,18 @@ protected boolean processEntry(CanonicalTreeParser base,
getContentMergeStrategy());
} catch (BinaryBlobException e) {
switch (getContentMergeStrategy()) {
case OURS:
keep(ourDce);
return true;
case THEIRS:
DirCacheEntry theirEntry = add(tw.getRawPath(), theirs,
DirCacheEntry.STAGE_0, EPOCH, 0);
addToCheckout(tw.getPathString(), theirEntry, attributes);
return true;
default:
result = new MergeResult<>(Collections.emptyList());
result.setContainsConflicts(true);
break;
case OURS:
keep(ourDce);
return true;
case THEIRS:
DirCacheEntry theirEntry = add(tw.getRawPath(), theirs,
DirCacheEntry.STAGE_0, EPOCH, 0);
addToCheckout(tw.getPathString(), theirEntry, attributes);
return true;
default:
result = new MergeResult<>(Collections.emptyList());
result.setContainsConflicts(true);
break;
}
}
if (ignoreConflicts) {
@ -672,9 +859,11 @@ protected boolean processEntry(CanonicalTreeParser base,
if (result.containsConflicts() && !ignoreConflicts) {
unmergedPaths.add(currentPath);
}
workTreeUpdater.markAsModified(currentPath);
// Entry is null - only adds the metadata.
addToCheckout(currentPath, null, attributes);
modifiedFiles.add(currentPath);
addCheckoutMetadata(cleanupMetadata, currentPath,
attributes[T_OURS]);
addCheckoutMetadata(checkoutMetadata, currentPath,
attributes[T_THEIRS]);
} else if (modeO != modeT) {
// OURS or THEIRS has been deleted
if (((modeO != 0 && !tw.idEqual(T_BASE, T_OURS)) || (modeT != 0 && !tw
@ -786,9 +975,8 @@ private MergeResult<RawText> contentMerge(CanonicalTreeParser base,
}
private boolean isIndexDirty() {
if (inCore) {
if (inCore)
return false;
}
final int modeI = tw.getRawMode(T_INDEX);
final int modeO = tw.getRawMode(T_OURS);
@ -796,42 +984,37 @@ private boolean isIndexDirty() {
// Index entry has to match ours to be considered clean
final boolean isDirty = nonTree(modeI)
&& !(modeO == modeI && tw.idEqual(T_INDEX, T_OURS));
if (isDirty) {
if (isDirty)
failingPaths
.put(tw.getPathString(), MergeFailureReason.DIRTY_INDEX);
}
return isDirty;
}
private boolean isWorktreeDirty(WorkingTreeIterator work,
DirCacheEntry ourDce) throws IOException {
if (work == null) {
if (work == null)
return false;
}
final int modeF = tw.getRawMode(T_FILE);
final int modeO = tw.getRawMode(T_OURS);
// Worktree entry has to match ours to be considered clean
boolean isDirty;
if (ourDce != null) {
if (ourDce != null)
isDirty = work.isModified(ourDce, true, reader);
} else {
else {
isDirty = work.isModeDifferent(modeO);
if (!isDirty && nonTree(modeF)) {
if (!isDirty && nonTree(modeF))
isDirty = !tw.idEqual(T_FILE, T_OURS);
}
}
// Ignore existing empty directories
if (isDirty && modeF == FileMode.TYPE_TREE
&& modeO == FileMode.TYPE_MISSING) {
&& modeO == FileMode.TYPE_MISSING)
isDirty = false;
}
if (isDirty) {
if (isDirty)
failingPaths.put(tw.getPathString(),
MergeFailureReason.DIRTY_WORKTREE);
}
return isDirty;
}
@ -846,12 +1029,14 @@ private boolean isWorktreeDirty(WorkingTreeIterator work,
* @param theirs
* @param result
* @param attributes
* @throws FileNotFoundException
* @throws IOException
*/
private void updateIndex(CanonicalTreeParser base,
CanonicalTreeParser ours, CanonicalTreeParser theirs,
MergeResult<RawText> result, Attributes attributes)
throws IOException {
throws FileNotFoundException,
IOException {
TemporaryBuffer rawMerged = null;
try {
rawMerged = doMerge(result);
@ -870,17 +1055,21 @@ private void updateIndex(CanonicalTreeParser base,
// No conflict occurred, the file will contain fully merged content.
// The index will be populated with the new merged version.
Instant lastModified =
mergedFile == null ? null : nonNullRepo().getFS().lastModifiedInstant(mergedFile);
DirCacheEntry dce = new DirCacheEntry(tw.getPathString());
// Set the mode for the new content. Fall back to REGULAR_FILE if
// we can't merge modes of OURS and THEIRS.
int newMode = mergeFileModes(tw.getRawMode(0), tw.getRawMode(1),
tw.getRawMode(2));
FileMode mode = newMode == FileMode.MISSING.getBits()
? FileMode.REGULAR_FILE : FileMode.fromBits(newMode);
workTreeUpdater.insertToIndex(rawMerged.openInputStream(), tw.getPathString().getBytes(UTF_8), mode,
DirCacheEntry.STAGE_0, lastModified, (int) rawMerged.length(),
attributes.get(Constants.ATTR_MERGE));
dce.setFileMode(newMode == FileMode.MISSING.getBits()
? FileMode.REGULAR_FILE : FileMode.fromBits(newMode));
if (mergedFile != null) {
dce.setLastModified(
nonNullRepo().getFS().lastModifiedInstant(mergedFile));
dce.setLength((int) mergedFile.length());
}
dce.setObjectId(insertMergeResult(rawMerged, attributes));
builder.add(dce);
} finally {
if (rawMerged != null) {
rawMerged.destroy();
@ -896,30 +1085,34 @@ private void updateIndex(CanonicalTreeParser base,
* @param attributes
* the files .gitattributes entries
* @return the working tree file to which the merged content was written.
* @throws FileNotFoundException
* @throws IOException
*/
private File writeMergedFile(TemporaryBuffer rawMerged,
Attributes attributes)
throws IOException {
throws FileNotFoundException, IOException {
File workTree = nonNullRepo().getWorkTree();
FS fs = nonNullRepo().getFS();
File of = new File(workTree, tw.getPathString());
File parentFolder = of.getParentFile();
EolStreamType eol = workTreeUpdater.detectCheckoutStreamType(attributes);
if (!fs.exists(parentFolder)) {
parentFolder.mkdirs();
}
StreamLoader contentLoader = WorkTreeUpdater.createStreamLoader(rawMerged::openInputStream,
rawMerged.length());
workTreeUpdater.updateFileWithContent(contentLoader,
eol, tw.getSmudgeCommand(attributes), of.getPath(), of, false);
EolStreamType streamType = EolStreamTypeUtil.detectStreamType(
OperationType.CHECKOUT_OP, workingTreeOptions,
attributes);
try (OutputStream os = EolStreamTypeUtil.wrapOutputStream(
new BufferedOutputStream(new FileOutputStream(of)),
streamType)) {
rawMerged.writeTo(os, null);
}
return of;
}
private TemporaryBuffer doMerge(MergeResult<RawText> result)
throws IOException {
TemporaryBuffer.LocalFile buf = new TemporaryBuffer.LocalFile(
db != null ? nonNullRepo().getDirectory() : null, workTreeUpdater.getInCoreFileSizeLimit());
db != null ? nonNullRepo().getDirectory() : null, inCoreLimit);
boolean success = false;
try {
new MergeFormatter().formatMerge(buf, result,
@ -934,6 +1127,16 @@ private TemporaryBuffer doMerge(MergeResult<RawText> result)
return buf;
}
private ObjectId insertMergeResult(TemporaryBuffer buf,
Attributes attributes) throws IOException {
InputStream in = buf.openInputStream();
try (LfsInputStream is = LfsFactory.getInstance().applyCleanFilter(
getRepository(), in,
buf.length(), attributes.get(Constants.ATTR_MERGE))) {
return getObjectInserter().insert(OBJ_BLOB, is.getLength(), is);
}
}
/**
* Try to merge filemodes. If only ours or theirs have changed the mode
* (compared to base) we choose that one. If ours and theirs have equal
@ -951,26 +1154,22 @@ private TemporaryBuffer doMerge(MergeResult<RawText> result)
* conflict
*/
private int mergeFileModes(int modeB, int modeO, int modeT) {
if (modeO == modeT) {
if (modeO == modeT)
return modeO;
}
if (modeB == modeO) {
if (modeB == modeO)
// Base equal to Ours -> chooses Theirs if that is not missing
return (modeT == FileMode.MISSING.getBits()) ? modeO : modeT;
}
if (modeB == modeT) {
if (modeB == modeT)
// Base equal to Theirs -> chooses Ours if that is not missing
return (modeO == FileMode.MISSING.getBits()) ? modeT : modeO;
}
return FileMode.MISSING.getBits();
}
private RawText getRawText(ObjectId id,
Attributes attributes)
throws IOException, BinaryBlobException {
if (id.equals(ObjectId.zeroId())) {
return new RawText(new byte[]{});
}
if (id.equals(ObjectId.zeroId()))
return new RawText(new byte[] {});
ObjectLoader loader = LfsFactory.getInstance().applySmudgeFilter(
getRepository(), reader.open(id, OBJ_BLOB),
@ -1034,7 +1233,7 @@ public List<String> getUnmergedPaths() {
* superset of the files listed by {@link #getUnmergedPaths()}.
*/
public List<String> getModifiedFiles() {
return workTreeUpdater != null ? workTreeUpdater.getModifiedFiles() : modifiedFiles;
return modifiedFiles;
}
/**
@ -1048,7 +1247,7 @@ public List<String> getModifiedFiles() {
* for this path.
*/
public Map<String, DirCacheEntry> getToBeCheckedOut() {
return workTreeUpdater.getToBeCheckedOut();
return toBeCheckedOut;
}
/**
@ -1098,6 +1297,7 @@ public boolean failed() {
*/
public void setDirCache(DirCache dc) {
this.dircache = dc;
implicitDirCache = false;
}
/**
@ -1152,48 +1352,53 @@ public void setWorkingTreeIterator(WorkingTreeIterator workingTreeIterator) {
protected boolean mergeTrees(AbstractTreeIterator baseTree,
RevTree headTree, RevTree mergeTree, boolean ignoreConflicts)
throws IOException {
try {
workTreeUpdater = inCore ?
WorkTreeUpdater.createInCoreWorkTreeUpdater(db, dircache, getObjectInserter()) :
WorkTreeUpdater.createWorkTreeUpdater(db, dircache);
dircache = workTreeUpdater.getLockedDirCache();
tw = new NameConflictTreeWalk(db, reader);
tw.addTree(baseTree);
tw.setHead(tw.addTree(headTree));
tw.addTree(mergeTree);
DirCacheBuildIterator buildIt = workTreeUpdater.createDirCacheBuildIterator();
int dciPos = tw.addTree(buildIt);
if (workingTreeIterator != null) {
tw.addTree(workingTreeIterator);
workingTreeIterator.setDirCacheIterator(tw, dciPos);
} else {
tw.setFilter(TreeFilter.ANY_DIFF);
}
builder = dircache.builder();
DirCacheBuildIterator buildIt = new DirCacheBuildIterator(builder);
if (!mergeTreeWalk(tw, ignoreConflicts)) {
return false;
}
workTreeUpdater.writeWorkTreeChanges(true);
if (getUnmergedPaths().isEmpty() && !failed()) {
WorkTreeUpdater.Result result = workTreeUpdater.writeIndexChanges();
resultTree = result.treeId;
modifiedFiles = result.modifiedFiles;
for (String f : result.failedToDelete) {
failingPaths.put(f, MergeFailureReason.COULD_NOT_DELETE);
}
return result.failedToDelete.isEmpty();
}
resultTree = null;
return false;
} finally {
if(modifiedFiles.isEmpty()) {
modifiedFiles = workTreeUpdater.getModifiedFiles();
}
workTreeUpdater.close();
workTreeUpdater = null;
tw = new NameConflictTreeWalk(db, reader);
tw.addTree(baseTree);
tw.setHead(tw.addTree(headTree));
tw.addTree(mergeTree);
int dciPos = tw.addTree(buildIt);
if (workingTreeIterator != null) {
tw.addTree(workingTreeIterator);
workingTreeIterator.setDirCacheIterator(tw, dciPos);
} else {
tw.setFilter(TreeFilter.ANY_DIFF);
}
if (!mergeTreeWalk(tw, ignoreConflicts)) {
return false;
}
if (!inCore) {
// No problem found. The only thing left to be done is to
// checkout all files from "theirs" which have been selected to
// go into the new index.
checkout();
// All content-merges are successfully done. If we can now write the
// new index we are on quite safe ground. Even if the checkout of
// files coming from "theirs" fails the user can work around such
// failures by checking out the index again.
if (!builder.commit()) {
cleanUp();
throw new IndexWriteException();
}
builder = null;
} else {
builder.finish();
builder = null;
}
if (getUnmergedPaths().isEmpty() && !failed()) {
resultTree = dircache.writeTree(getObjectInserter());
return true;
}
resultTree = null;
return false;
}
/**
@ -1214,8 +1419,8 @@ protected boolean mergeTreeWalk(TreeWalk treeWalk, boolean ignoreConflicts)
boolean hasAttributeNodeProvider = treeWalk
.getAttributesNodeProvider() != null;
while (treeWalk.next()) {
Attributes[] attributes = {NO_ATTRIBUTES, NO_ATTRIBUTES,
NO_ATTRIBUTES};
Attributes[] attributes = { NO_ATTRIBUTES, NO_ATTRIBUTES,
NO_ATTRIBUTES };
if (hasAttributeNodeProvider) {
attributes[T_BASE] = treeWalk.getAttributes(T_BASE);
attributes[T_OURS] = treeWalk.getAttributes(T_OURS);
@ -1229,12 +1434,11 @@ protected boolean mergeTreeWalk(TreeWalk treeWalk, boolean ignoreConflicts)
hasWorkingTreeIterator ? treeWalk.getTree(T_FILE,
WorkingTreeIterator.class) : null,
ignoreConflicts, attributes)) {
workTreeUpdater.revertModifiedFiles();
cleanUp();
return false;
}
if (treeWalk.isSubtree() && enterSubtree) {
if (treeWalk.isSubtree() && enterSubtree)
treeWalk.enterSubtree();
}
}
return true;
}

View File

@ -1,694 +0,0 @@
/*
* Copyright (C) 2022, Google Inc. and others
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Distribution License v. 1.0 which is available at
* https://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
package org.eclipse.jgit.util;
import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
import java.io.BufferedInputStream;
import java.io.Closeable;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.file.Files;
import java.nio.file.StandardCopyOption;
import java.time.Instant;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.eclipse.jgit.annotations.Nullable;
import org.eclipse.jgit.attributes.Attribute;
import org.eclipse.jgit.attributes.Attributes;
import org.eclipse.jgit.dircache.DirCache;
import org.eclipse.jgit.dircache.DirCacheBuildIterator;
import org.eclipse.jgit.dircache.DirCacheBuilder;
import org.eclipse.jgit.dircache.DirCacheCheckout;
import org.eclipse.jgit.dircache.DirCacheCheckout.CheckoutMetadata;
import org.eclipse.jgit.dircache.DirCacheEntry;
import org.eclipse.jgit.errors.IndexWriteException;
import org.eclipse.jgit.errors.LargeObjectException;
import org.eclipse.jgit.errors.NoWorkTreeException;
import org.eclipse.jgit.internal.JGitText;
import org.eclipse.jgit.lib.Config;
import org.eclipse.jgit.lib.ConfigConstants;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.CoreConfig.EolStreamType;
import org.eclipse.jgit.lib.FileMode;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.ObjectInserter;
import org.eclipse.jgit.lib.ObjectLoader;
import org.eclipse.jgit.lib.ObjectReader;
import org.eclipse.jgit.lib.ObjectStream;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.treewalk.TreeWalk.OperationType;
import org.eclipse.jgit.treewalk.WorkingTreeOptions;
import org.eclipse.jgit.util.LfsFactory.LfsInputStream;
import org.eclipse.jgit.util.io.EolStreamTypeUtil;
/**
* Handles work tree updates on both the checkout and the index.
* <p>
* You should use a single instance for all of your file changes. In case of an error, make sure
* your instance is released, and initiate a new one if necessary.
*/
public class WorkTreeUpdater implements Closeable {
/**
* The result of writing the index changes.
*/
public static class Result {
/**
* Files modified during this operation.
*/
public List<String> modifiedFiles = new LinkedList<>();
/**
* Files in this list were failed to be deleted.
*/
public List<String> failedToDelete = new LinkedList<>();
/**
* Modified tree ID if any, or null otherwise.
*/
public ObjectId treeId = null;
}
Result result = new Result();
/**
* The repository this handler operates on.
*/
@Nullable
private final Repository repo;
/**
* Set to true if this operation should work in-memory. The repo's dircache and
* workingtree are not touched by this method. Eventually needed files are
* created as temporary files and a new empty, in-memory dircache will be
* used instead the repo's one. Often used for bare repos where the repo
* doesn't even have a workingtree and dircache.
*/
private final boolean inCore;
private final ObjectInserter inserter;
private final ObjectReader reader;
private DirCache dirCache;
private boolean implicitDirCache = false;
/**
* Builder to update the dir cache during this operation.
*/
private DirCacheBuilder builder = null;
/**
* The {@link WorkingTreeOptions} are needed to determine line endings for affected files.
*/
private WorkingTreeOptions workingTreeOptions;
/**
* The size limit (bytes) which controls a file to be stored in {@code Heap} or {@code LocalFile}
* during the operation.
*/
private int inCoreFileSizeLimit;
/**
* If the operation has nothing to do for a file but check it out at the end of the operation, it
* can be added here.
*/
private final Map<String, DirCacheEntry> toBeCheckedOut = new HashMap<>();
/**
* Files in this list will be deleted from the local copy at the end of the operation.
*/
private final TreeMap<String, File> toBeDeleted = new TreeMap<>();
/**
* Keeps {@link CheckoutMetadata} for {@link #checkout()}.
*/
private Map<String, CheckoutMetadata> checkoutMetadata;
/**
* Keeps {@link CheckoutMetadata} for {@link #revertModifiedFiles()}.
*/
private Map<String, CheckoutMetadata> cleanupMetadata;
/**
* Whether the changes were successfully written
*/
private boolean indexChangesWritten = false;
/**
* @param repo the {@link org.eclipse.jgit.lib.Repository}.
* @param dirCache if set, use the provided dir cache. Otherwise, use the default repository one
*/
private WorkTreeUpdater(
Repository repo,
DirCache dirCache) {
this.repo = repo;
this.dirCache = dirCache;
this.inCore = false;
this.inserter = repo.newObjectInserter();
this.reader = inserter.newReader();
this.workingTreeOptions = repo.getConfig().get(WorkingTreeOptions.KEY);
this.checkoutMetadata = new HashMap<>();
this.cleanupMetadata = new HashMap<>();
this.inCoreFileSizeLimit = setInCoreFileSizeLimit(repo.getConfig());
}
/**
* @param repo the {@link org.eclipse.jgit.lib.Repository}.
* @param dirCache if set, use the provided dir cache. Otherwise, use the default repository one
* @return an IO handler.
*/
public static WorkTreeUpdater createWorkTreeUpdater(Repository repo, DirCache dirCache) {
return new WorkTreeUpdater(repo, dirCache);
}
/**
* @param repo the {@link org.eclipse.jgit.lib.Repository}.
* @param dirCache if set, use the provided dir cache. Otherwise, creates a new one
* @param oi to use for writing the modified objects with.
*/
private WorkTreeUpdater(
Repository repo,
DirCache dirCache,
ObjectInserter oi) {
this.repo = repo;
this.dirCache = dirCache;
this.inserter = oi;
this.inCore = true;
this.reader = oi.newReader();
if (repo != null) {
this.inCoreFileSizeLimit = setInCoreFileSizeLimit(repo.getConfig());
}
}
/**
* @param repo the {@link org.eclipse.jgit.lib.Repository}.
* @param dirCache if set, use the provided dir cache. Otherwise, creates a new one
* @param oi to use for writing the modified objects with.
* @return an IO handler.
*/
public static WorkTreeUpdater createInCoreWorkTreeUpdater(Repository repo, DirCache dirCache,
ObjectInserter oi) {
return new WorkTreeUpdater(repo, dirCache, oi);
}
/**
* Something that can supply an {@link InputStream}.
*/
public interface StreamSupplier {
/**
* Loads the input stream.
*
* @return the loaded stream
* @throws IOException if any reading error occurs
*/
InputStream load() throws IOException;
}
/**
* We write the patch result to a {@link org.eclipse.jgit.util.TemporaryBuffer} and then use
* {@link DirCacheCheckout}.getContent() to run the result through the CR-LF and smudge filters.
* DirCacheCheckout needs an ObjectLoader, not a TemporaryBuffer, so this class bridges between
* the two, making any Stream provided by a {@link StreamSupplier} look like an ordinary git blob
* to DirCacheCheckout.
*/
public static class StreamLoader extends ObjectLoader {
private final StreamSupplier data;
private final long size;
private StreamLoader(StreamSupplier data, long length) {
this.data = data;
this.size = length;
}
@Override
public int getType() {
return Constants.OBJ_BLOB;
}
@Override
public long getSize() {
return size;
}
@Override
public boolean isLarge() {
return true;
}
@Override
public byte[] getCachedBytes() throws LargeObjectException {
throw new LargeObjectException();
}
@Override
public ObjectStream openStream() throws IOException {
return new ObjectStream.Filter(getType(), getSize(), new BufferedInputStream(data.load()));
}
}
/**
* Creates stream loader for the given supplier.
*
* @param supplier to wrap
* @param length of the supplied content
* @return the result stream loader
*/
public static StreamLoader createStreamLoader(StreamSupplier supplier, long length) {
return new StreamLoader(supplier, length);
}
private static int setInCoreFileSizeLimit(Config config) {
return config.getInt(
ConfigConstants.CONFIG_MERGE_SECTION, ConfigConstants.CONFIG_KEY_IN_CORE_LIMIT, 10 << 20);
}
/**
* Gets the size limit for in-core files in this config.
*
* @return the size
*/
public int getInCoreFileSizeLimit() {
return inCoreFileSizeLimit;
}
/**
* Gets dir cache for the repo. Locked if not inCore.
*
* @return the result dir cache
* @throws IOException is case the dir cache cannot be read
*/
public DirCache getLockedDirCache() throws IOException {
if (dirCache == null) {
implicitDirCache = true;
if (inCore) {
dirCache = DirCache.newInCore();
} else {
dirCache = nonNullNonBareRepo().lockDirCache();
}
}
if (builder == null) {
builder = dirCache.builder();
}
return dirCache;
}
/**
* Creates build iterator for the handler's builder.
*
* @return the iterator
*/
public DirCacheBuildIterator createDirCacheBuildIterator() {
return new DirCacheBuildIterator(builder);
}
/**
* Writes the changes to the WorkTree (but not the index).
*
* @param shouldCheckoutTheirs before committing the changes
* @throws IOException if any of the writes fail
*/
public void writeWorkTreeChanges(boolean shouldCheckoutTheirs) throws IOException {
handleDeletedFiles();
if (inCore) {
builder.finish();
return;
}
if (shouldCheckoutTheirs) {
// No problem found. The only thing left to be done is to
// check out all files from "theirs" which have been selected to
// go into the new index.
checkout();
}
// All content operations are successfully done. If we can now write the
// new index we are on quite safe ground. Even if the checkout of
// files coming from "theirs" fails the user can work around such
// failures by checking out the index again.
if (!builder.commit()) {
revertModifiedFiles();
throw new IndexWriteException();
}
}
/**
* Writes the changes to the index.
*
* @return the Result of the operation.
* @throws IOException if any of the writes fail
*/
public Result writeIndexChanges() throws IOException {
result.treeId = getLockedDirCache().writeTree(inserter);
indexChangesWritten = true;
return result;
}
/**
* Adds a {@link DirCacheEntry} for direct checkout and remembers its {@link CheckoutMetadata}.
*
* @param path of the entry
* @param entry to add
* @param cleanupStreamType to use for the cleanup metadata
* @param cleanupSmudgeCommand to use for the cleanup metadata
* @param checkoutStreamType to use for the checkout metadata
* @param checkoutSmudgeCommand to use for the checkout metadata
* @since 6.1
*/
public void addToCheckout(
String path, DirCacheEntry entry, EolStreamType cleanupStreamType,
String cleanupSmudgeCommand, EolStreamType checkoutStreamType, String checkoutSmudgeCommand) {
if (entry != null) {
// In some cases, we just want to add the metadata.
toBeCheckedOut.put(path, entry);
}
addCheckoutMetadata(cleanupMetadata, path, cleanupStreamType, cleanupSmudgeCommand);
addCheckoutMetadata(checkoutMetadata, path, checkoutStreamType, checkoutSmudgeCommand);
}
/**
* Get a map which maps the paths of files which have to be checked out because the operation
* created new fully-merged content for this file into the index.
*
* <p>This means: the operation wrote a new stage 0 entry for this path.</p>
*
* @return the map
*/
public Map<String, DirCacheEntry> getToBeCheckedOut() {
return toBeCheckedOut;
}
/**
* Deletes the given file
* <p>
* Note the actual deletion is only done in {@link #writeWorkTreeChanges}
*
* @param path of the file to be deleted
* @param file to be deleted
* @param streamType to use for cleanup metadata
* @param smudgeCommand to use for cleanup metadata
* @throws IOException if the file cannot be deleted
*/
public void deleteFile(String path, File file, EolStreamType streamType, String smudgeCommand)
throws IOException {
toBeDeleted.put(path, file);
if (file != null && file.isFile()) {
addCheckoutMetadata(cleanupMetadata, path, streamType, smudgeCommand);
}
}
/**
* Remembers the {@link CheckoutMetadata} for the given path; it may be needed in {@link
* #checkout()} or in {@link #revertModifiedFiles()}.
*
* @param map to add the metadata to
* @param path of the current node
* @param streamType to use for the metadata
* @param smudgeCommand to use for the metadata
* @since 6.1
*/
private void addCheckoutMetadata(
Map<String, CheckoutMetadata> map, String path, EolStreamType streamType,
String smudgeCommand) {
if (inCore || map == null) {
return;
}
map.put(path, new CheckoutMetadata(streamType, smudgeCommand));
}
/**
* Detects if CRLF conversion has been configured.
* <p></p>
* See {@link EolStreamTypeUtil#detectStreamType} for more info.
*
* @param attributes of the file for which the type is to be detected
* @return the detected type
*/
public EolStreamType detectCheckoutStreamType(Attributes attributes) {
if (inCore) {
return null;
}
return EolStreamTypeUtil.detectStreamType(
OperationType.CHECKOUT_OP, workingTreeOptions, attributes);
}
private void handleDeletedFiles() {
// Iterate in reverse so that "folder/file" is deleted before
// "folder". Otherwise, this could result in a failing path because
// of a non-empty directory, for which delete() would fail.
for (String path : toBeDeleted.descendingKeySet()) {
File file = inCore ? null : toBeDeleted.get(path);
if (file != null && !file.delete()) {
if (!file.isDirectory()) {
result.failedToDelete.add(path);
}
}
result.modifiedFiles.add(path);
}
}
/**
* Marks the given path as modified in the operation.
*
* @param path to mark as modified
*/
public void markAsModified(String path) {
result.modifiedFiles.add(path);
}
/**
* Gets the list of files which were modified in this operation.
*
* @return the list
*/
public List<String> getModifiedFiles() {
return result.modifiedFiles;
}
private void checkout() throws NoWorkTreeException, IOException {
// Iterate in reverse so that "folder/file" is deleted before
// "folder". Otherwise, this could result in a failing path because
// of a non-empty directory, for which delete() would fail.
for (Map.Entry<String, DirCacheEntry> entry : toBeCheckedOut.entrySet()) {
DirCacheEntry dirCacheEntry = entry.getValue();
if (dirCacheEntry.getFileMode() == FileMode.GITLINK) {
new File(nonNullNonBareRepo().getWorkTree(), entry.getKey()).mkdirs();
} else {
DirCacheCheckout.checkoutEntry(
repo, dirCacheEntry, reader, false, checkoutMetadata.get(entry.getKey()));
result.modifiedFiles.add(entry.getKey());
}
}
}
/**
* Reverts any uncommitted changes in the worktree. We know that for all modified files the
* old content was in the old index and the index contained only stage 0. In case if inCore
* operation just clear the history of modified files.
*
* @throws java.io.IOException in case the cleaning up failed
*/
public void revertModifiedFiles() throws IOException {
if (inCore) {
result.modifiedFiles.clear();
return;
}
if (indexChangesWritten) {
return;
}
for (String path : result.modifiedFiles) {
DirCacheEntry entry = dirCache.getEntry(path);
if (entry != null) {
DirCacheCheckout.checkoutEntry(
repo, entry, reader, false, cleanupMetadata.get(path));
}
}
}
@Override
public void close() throws IOException {
if (implicitDirCache) {
dirCache.unlock();
}
}
/**
* Updates the file in the checkout with the given content.
*
* @param resultStreamLoader with the content to be updated
* @param streamType for parsing the content
* @param smudgeCommand for formatting the content
* @param path of the file to be updated
* @param file to be updated
* @param safeWrite whether the content should be written to a buffer first
* @throws IOException if the {@link CheckoutMetadata} cannot be determined
*/
public void updateFileWithContent(
StreamLoader resultStreamLoader,
EolStreamType streamType,
String smudgeCommand,
String path,
File file,
boolean safeWrite)
throws IOException {
if (inCore) {
return;
}
CheckoutMetadata checkoutMetadata = new CheckoutMetadata(streamType, smudgeCommand);
if (safeWrite) {
try (org.eclipse.jgit.util.TemporaryBuffer buffer =
new org.eclipse.jgit.util.TemporaryBuffer.LocalFile(null)) {
// Write to a buffer and copy to the file only if everything was fine.
DirCacheCheckout.getContent(
repo, path, checkoutMetadata, resultStreamLoader, null, buffer);
InputStream bufIn = buffer.openInputStream();
Files.copy(bufIn, file.toPath(), StandardCopyOption.REPLACE_EXISTING);
}
return;
}
OutputStream outputStream = new FileOutputStream(file);
DirCacheCheckout.getContent(
repo, path, checkoutMetadata, resultStreamLoader, null, outputStream);
}
/**
* Creates a path with the given content, and adds it to the specified stage to the index builder
*
* @param inputStream with the content to be updated
* @param path of the file to be updated
* @param fileMode of the modified file
* @param entryStage of the new entry
* @param lastModified instant of the modified file
* @param len of the content
* @param lfsAttribute for checking for LFS enablement
* @return the entry which was added to the index
* @throws IOException if inserting the content fails
*/
public DirCacheEntry insertToIndex(
InputStream inputStream,
byte[] path,
FileMode fileMode,
int entryStage,
Instant lastModified,
int len,
Attribute lfsAttribute) throws IOException {
StreamLoader contentLoader = createStreamLoader(() -> inputStream, len);
return insertToIndex(contentLoader, path, fileMode, entryStage, lastModified, len,
lfsAttribute);
}
/**
* Creates a path with the given content, and adds it to the specified stage to the index builder
*
* @param resultStreamLoader with the content to be updated
* @param path of the file to be updated
* @param fileMode of the modified file
* @param entryStage of the new entry
* @param lastModified instant of the modified file
* @param len of the content
* @param lfsAttribute for checking for LFS enablement
* @return the entry which was added to the index
* @throws IOException if inserting the content fails
*/
public DirCacheEntry insertToIndex(
StreamLoader resultStreamLoader,
byte[] path,
FileMode fileMode,
int entryStage,
Instant lastModified,
int len,
Attribute lfsAttribute) throws IOException {
return addExistingToIndex(insertResult(resultStreamLoader, lfsAttribute),
path, fileMode, entryStage, lastModified, len);
}
/**
* Adds a path with the specified stage to the index builder
*
* @param objectId of the existing object to add
* @param path of the modified file
* @param fileMode of the modified file
* @param entryStage of the new entry
* @param lastModified instant of the modified file
* @param len of the modified file content
* @return the entry which was added to the index
*/
public DirCacheEntry addExistingToIndex(
ObjectId objectId,
byte[] path,
FileMode fileMode,
int entryStage,
Instant lastModified,
int len) {
DirCacheEntry dce = new DirCacheEntry(path, entryStage);
dce.setFileMode(fileMode);
if (lastModified != null) {
dce.setLastModified(lastModified);
}
dce.setLength(inCore ? 0 : len);
dce.setObjectId(objectId);
builder.add(dce);
return dce;
}
private ObjectId insertResult(StreamLoader resultStreamLoader, Attribute lfsAttribute)
throws IOException {
try (LfsInputStream is =
org.eclipse.jgit.util.LfsFactory.getInstance()
.applyCleanFilter(
repo,
resultStreamLoader.data.load(),
resultStreamLoader.size,
lfsAttribute)) {
return inserter.insert(OBJ_BLOB, is.getLength(), is);
}
}
/**
* Gets non-null repository instance
*
* @return non-null repository instance
* @throws java.lang.NullPointerException if the handler was constructed without a repository.
*/
private Repository nonNullRepo() throws NullPointerException {
if (repo == null) {
throw new NullPointerException(JGitText.get().repositoryIsRequired);
}
return repo;
}
/**
* Gets non-null and non-bare repository instance
*
* @return non-null and non-bare repository instance
* @throws java.lang.NullPointerException if the handler was constructed without a repository.
* @throws NoWorkTreeException if the handler was constructed with a bare repository
*/
private Repository nonNullNonBareRepo() throws NullPointerException, NoWorkTreeException {
if (nonNullRepo().isBare()) {
throw new NoWorkTreeException();
}
return repo;
}
}