Rename DfsPackKey to DfsStreamKey

This renaming supports reusing DfsStreamKey in a future commit
to index other PackExt type streams inside of the DfsBlockCache.

Change-Id: Ib52d374e47724ccb837f4fbab1fc85c486c5b408
This commit is contained in:
Shawn Pearce 2017-07-03 17:22:52 -07:00
parent dfb9884dbc
commit e924de5295
8 changed files with 36 additions and 39 deletions

View File

@ -57,13 +57,13 @@
public class DeltaBaseCacheTest {
private static final int SZ = 512;
private DfsPackKey key;
private DfsStreamKey key;
private DeltaBaseCache cache;
private TestRng rng;
@Before
public void setUp() {
key = new DfsPackKey();
key = new DfsStreamKey();
cache = new DeltaBaseCache(SZ);
rng = new TestRng(getClass().getSimpleName());
}

View File

@ -75,7 +75,7 @@ private static int hash(long position) {
table = new Entry[1 << TABLE_BITS];
}
Entry get(DfsPackKey key, long position) {
Entry get(DfsStreamKey key, long position) {
Entry e = table[hash(position)];
for (; e != null; e = e.tableNext) {
if (e.offset == position && key.equals(e.pack)) {
@ -86,7 +86,7 @@ Entry get(DfsPackKey key, long position) {
return null;
}
void put(DfsPackKey key, long offset, int objectType, byte[] data) {
void put(DfsStreamKey key, long offset, int objectType, byte[] data) {
if (data.length > maxByteCount)
return; // Too large to cache.
@ -189,7 +189,7 @@ int getMemoryUsedByTableForTest() {
}
static class Entry {
final DfsPackKey pack;
final DfsStreamKey pack;
final long offset;
final int type;
final byte[] data;
@ -198,7 +198,7 @@ static class Entry {
Entry lruPrev;
Entry lruNext;
Entry(DfsPackKey key, long offset, int type, byte[] data) {
Entry(DfsStreamKey key, long offset, int type, byte[] data) {
this.pack = key;
this.offset = offset;
this.type = type;

View File

@ -54,7 +54,7 @@
/** A cached slice of a {@link DfsPackFile}. */
final class DfsBlock {
final DfsPackKey pack;
final DfsStreamKey stream;
final long start;
@ -62,8 +62,8 @@ final class DfsBlock {
private final byte[] block;
DfsBlock(DfsPackKey p, long pos, byte[] buf) {
pack = p;
DfsBlock(DfsStreamKey p, long pos, byte[] buf) {
stream = p;
start = pos;
end = pos + buf.length;
block = buf;
@ -73,8 +73,8 @@ int size() {
return block.length;
}
boolean contains(DfsPackKey want, long pos) {
return pack == want && start <= pos && pos < end;
boolean contains(DfsStreamKey want, long pos) {
return stream == want && start <= pos && pos < end;
}
int copy(long pos, byte[] dstbuf, int dstoff, int cnt) {

View File

@ -195,7 +195,7 @@ private DfsBlockCache(final DfsBlockCacheConfig cfg) {
blockSizeShift = Integer.numberOfTrailingZeros(blockSize);
clockLock = new ReentrantLock(true /* fair */);
clockHand = new Ref<>(new DfsPackKey(), -1, 0, null);
clockHand = new Ref<>(new DfsStreamKey(), -1, 0, null);
clockHand.next = clockHand;
packCache = new ConcurrentHashMap<>(
@ -260,7 +260,7 @@ public Collection<DfsPackFile> getPackFiles() {
return packFiles;
}
DfsPackFile getOrCreate(DfsPackDescription dsc, DfsPackKey key) {
DfsPackFile getOrCreate(DfsPackDescription dsc, DfsStreamKey key) {
// TODO This table grows without bound. It needs to clean up
// entries that aren't in cache anymore, and aren't being used
// by a live DfsObjDatabase reference.
@ -277,7 +277,7 @@ DfsPackFile getOrCreate(DfsPackDescription dsc, DfsPackKey key) {
return v; // another thread
} else {
return new DfsPackFile(
this, dsc, key != null ? key : new DfsPackKey());
this, dsc, key != null ? key : new DfsStreamKey());
}
});
}
@ -320,7 +320,7 @@ DfsBlock getOrLoad(DfsPackFile pack, long position, DfsReader ctx,
final long requestedPosition = position;
position = pack.alignToBlock(position);
DfsPackKey key = pack.key;
DfsStreamKey key = pack.key;
int slot = slot(key, position);
HashEntry e1 = table.get(slot);
DfsBlock v = scan(e1, key, position);
@ -442,10 +442,10 @@ private void addToClock(Ref ref, int credit) {
}
void put(DfsBlock v) {
put(v.pack, v.start, v.size(), v);
put(v.stream, v.start, v.size(), v);
}
<T> Ref<T> put(DfsPackKey key, long pos, int size, T v) {
<T> Ref<T> put(DfsStreamKey key, long pos, int size, T v) {
int slot = slot(key, pos);
HashEntry e1 = table.get(slot);
Ref<T> ref = scanRef(e1, key, pos);
@ -481,12 +481,12 @@ <T> Ref<T> put(DfsPackKey key, long pos, int size, T v) {
return ref;
}
boolean contains(DfsPackKey key, long position) {
boolean contains(DfsStreamKey key, long position) {
return scan(table.get(slot(key, position)), key, position) != null;
}
@SuppressWarnings("unchecked")
<T> T get(DfsPackKey key, long position) {
<T> T get(DfsStreamKey key, long position) {
T val = (T) scan(table.get(slot(key, position)), key, position);
if (val == null)
statMiss.incrementAndGet();
@ -495,13 +495,13 @@ <T> T get(DfsPackKey key, long position) {
return val;
}
private <T> T scan(HashEntry n, DfsPackKey pack, long position) {
private <T> T scan(HashEntry n, DfsStreamKey pack, long position) {
Ref<T> r = scanRef(n, pack, position);
return r != null ? r.get() : null;
}
@SuppressWarnings("unchecked")
private <T> Ref<T> scanRef(HashEntry n, DfsPackKey pack, long position) {
private <T> Ref<T> scanRef(HashEntry n, DfsStreamKey pack, long position) {
for (; n != null; n = n.next) {
Ref<T> r = n.ref;
if (r.pack == pack && r.position == position)
@ -514,11 +514,11 @@ void remove(DfsPackFile pack) {
packCache.remove(pack.getPackDescription());
}
private int slot(DfsPackKey pack, long position) {
private int slot(DfsStreamKey pack, long position) {
return (hash(pack.hash, position) >>> 1) % tableSize;
}
private ReentrantLock lockFor(DfsPackKey pack, long position) {
private ReentrantLock lockFor(DfsStreamKey pack, long position) {
return loadLocks[(hash(pack.hash, position) >>> 1) % loadLocks.length];
}
@ -545,14 +545,14 @@ private static final class HashEntry {
}
static final class Ref<T> {
final DfsPackKey pack;
final DfsStreamKey pack;
final long position;
final int size;
volatile T value;
Ref next;
volatile boolean hot;
Ref(DfsPackKey pack, long position, int size, T v) {
Ref(DfsStreamKey pack, long position, int size, T v) {
this.pack = pack;
this.position = position;
this.size = size;

View File

@ -104,7 +104,7 @@ public class DfsInserter extends ObjectInserter {
ObjectIdOwnerMap<PackedObjectInfo> objectMap;
DfsBlockCache cache;
DfsPackKey packKey;
DfsStreamKey packKey;
DfsPackDescription packDsc;
PackStream packOut;
private boolean rollback;
@ -282,7 +282,7 @@ private void beginPack() throws IOException {
rollback = true;
packDsc = db.newPack(DfsObjDatabase.PackSource.INSERT);
packOut = new PackStream(db.writeFile(packDsc, PACK));
packKey = new DfsPackKey();
packKey = new DfsStreamKey();
// Write the header as though it were a single object pack.
byte[] buf = packOut.hdrBuf;
@ -633,11 +633,11 @@ private class StreamLoader extends ObjectLoader {
private final int type;
private final long size;
private final DfsPackKey srcPack;
private final DfsStreamKey srcPack;
private final long pos;
StreamLoader(ObjectId id, int type, long sz,
DfsPackKey key, long pos) {
DfsStreamKey key, long pos) {
this.id = id;
this.type = type;
this.size = sz;

View File

@ -114,7 +114,7 @@ public final class DfsPackFile {
private final DfsPackDescription packDesc;
/** Unique identity of this pack while in-memory. */
final DfsPackKey key;
final DfsStreamKey key;
/**
* Total number of bytes in this pack file.
@ -171,7 +171,7 @@ public final class DfsPackFile {
* @param key
* interned key used to identify blocks in the block cache.
*/
DfsPackFile(DfsBlockCache cache, DfsPackDescription desc, DfsPackKey key) {
DfsPackFile(DfsBlockCache cache, DfsPackDescription desc, DfsStreamKey key) {
this.cache = cache;
this.packDesc = desc;
this.key = key;

View File

@ -94,7 +94,7 @@ public class DfsPackParser extends PackParser {
private DfsPackDescription packDsc;
/** Key used during delta resolution reading delta chains. */
private DfsPackKey packKey;
private DfsStreamKey packKey;
/** If the index was small enough, the entire index after writing. */
private PackIndex packIndex;
@ -206,7 +206,7 @@ protected void onPackHeader(long objectCount) throws IOException {
}
packDsc = objdb.newPack(DfsObjDatabase.PackSource.RECEIVE);
packKey = new DfsPackKey();
packKey = new DfsStreamKey();
out = objdb.writeFile(packDsc, PACK);
int size = out.blockSize();

View File

@ -45,16 +45,13 @@
import java.util.concurrent.atomic.AtomicLong;
final class DfsPackKey {
final class DfsStreamKey {
final int hash;
final AtomicLong cachedSize = new AtomicLong();
final AtomicLong cachedSize;
DfsPackKey() {
DfsStreamKey() {
// Multiply by 31 here so we can more directly combine with another
// value without doing the multiply there.
//
hash = System.identityHashCode(this) * 31;
cachedSize = new AtomicLong();
}
}