HBASE-1218 Implement in-memory column
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@791021 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
faacd72a21
commit
0afd4cffa8
|
@ -437,6 +437,7 @@ Release 0.20.0 - Unreleased
|
|||
(Jon Gray via Stack)
|
||||
HBASE-1607 Redo MemStore heap sizing to be accurate, testable, and more
|
||||
like new LruBlockCache (Jon Gray via Stack)
|
||||
HBASE-1218 Implement in-memory column (Jon Gray via Stack)
|
||||
|
||||
OPTIMIZATIONS
|
||||
HBASE-1412 Change values for delete column and column family in KeyValue
|
||||
|
|
|
@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.Ta
|
|||
@XmlType(propOrder = {"name","columns"})
|
||||
public class TableSchemaModel implements Serializable, IProtobufWrapper {
|
||||
private static final long serialVersionUID = 1L;
|
||||
private static final QName IN_MEMORY = new QName(HConstants.IN_MEMORY);
|
||||
private static final QName IS_META = new QName(HTableDescriptor.IS_META);
|
||||
private static final QName IS_ROOT = new QName(HTableDescriptor.IS_ROOT);
|
||||
private static final QName READONLY = new QName(HTableDescriptor.READONLY);
|
||||
|
@ -176,15 +175,6 @@ public class TableSchemaModel implements Serializable, IProtobufWrapper {
|
|||
// cannot be standard bean type getters and setters, otherwise this would
|
||||
// confuse JAXB
|
||||
|
||||
/**
|
||||
* @return true if IN_MEMORY attribute exists and is true
|
||||
*/
|
||||
public boolean __getInMemory() {
|
||||
Object o = attrs.get(IN_MEMORY);
|
||||
return o != null ?
|
||||
Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_IN_MEMORY;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if IS_META attribute exists and is truel
|
||||
*/
|
||||
|
@ -210,13 +200,6 @@ public class TableSchemaModel implements Serializable, IProtobufWrapper {
|
|||
Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_READONLY;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param value desired value of IN_MEMORY attribute
|
||||
*/
|
||||
public void __setInMemory(boolean value) {
|
||||
attrs.put(IN_MEMORY, Boolean.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* @param value desired value of IS_META attribute
|
||||
*/
|
||||
|
@ -273,10 +256,6 @@ public class TableSchemaModel implements Serializable, IProtobufWrapper {
|
|||
}
|
||||
builder.addColumns(familyBuilder);
|
||||
}
|
||||
if (attrs.containsKey(IN_MEMORY)) {
|
||||
builder.setInMemory(
|
||||
Boolean.valueOf(attrs.get(IN_MEMORY).toString()));
|
||||
}
|
||||
if (attrs.containsKey(READONLY)) {
|
||||
builder.setReadOnly(
|
||||
Boolean.valueOf(attrs.get(READONLY).toString()));
|
||||
|
@ -293,9 +272,6 @@ public class TableSchemaModel implements Serializable, IProtobufWrapper {
|
|||
for (TableSchema.Attribute attr: builder.getAttrsList()) {
|
||||
this.addAttribute(attr.getName(), attr.getValue());
|
||||
}
|
||||
if (builder.hasInMemory()) {
|
||||
this.addAttribute(HConstants.IN_MEMORY, builder.getInMemory());
|
||||
}
|
||||
if (builder.hasReadOnly()) {
|
||||
this.addAttribute(HTableDescriptor.READONLY, builder.getReadOnly());
|
||||
}
|
||||
|
|
|
@ -90,8 +90,6 @@ ISerializable {
|
|||
private static final ImmutableBytesWritable TRUE =
|
||||
new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
|
||||
|
||||
public static final boolean DEFAULT_IN_MEMORY = false;
|
||||
|
||||
public static final boolean DEFAULT_READONLY = false;
|
||||
|
||||
public static final int DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*64;
|
||||
|
@ -352,25 +350,6 @@ ISerializable {
|
|||
setValue(Bytes.toBytes(key), Bytes.toBytes(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if all columns in the table should be kept in the
|
||||
* HRegionServer cache only
|
||||
*/
|
||||
public boolean isInMemory() {
|
||||
String value = getValue(HConstants.IN_MEMORY);
|
||||
if (value != null)
|
||||
return Boolean.valueOf(value).booleanValue();
|
||||
return DEFAULT_IN_MEMORY;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param inMemory True if all of the columns in the table should be kept in
|
||||
* the HRegionServer cache only.
|
||||
*/
|
||||
public void setInMemory(boolean inMemory) {
|
||||
setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if all columns in the table should be read only
|
||||
*/
|
||||
|
|
|
@ -75,14 +75,6 @@ public class UnmodifyableHTableDescriptor extends HTableDescriptor {
|
|||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#setInMemory(boolean)
|
||||
*/
|
||||
@Override
|
||||
public void setInMemory(boolean inMemory) {
|
||||
throw new UnsupportedOperationException("HTableDescriptor is read-only");
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean)
|
||||
*/
|
||||
|
|
|
@ -63,7 +63,7 @@ public class HalfHFileReader extends HFile.Reader {
|
|||
public HalfHFileReader(final FileSystem fs, final Path p, final BlockCache c,
|
||||
final Reference r)
|
||||
throws IOException {
|
||||
super(fs, p, c);
|
||||
super(fs, p, c, false);
|
||||
// This is not actual midkey for this half-file; its just border
|
||||
// around which we split top and bottom. Have to look in files to find
|
||||
// actual last and first keys for bottom and top halves. Half-files don't
|
||||
|
|
|
@ -658,6 +658,9 @@ public class HFile {
|
|||
public int cacheHits = 0;
|
||||
public int blockLoads = 0;
|
||||
|
||||
// Whether file is from in-memory store
|
||||
private boolean inMemory = false;
|
||||
|
||||
// Name for this object used when logging or in toString. Is either
|
||||
// the result of a toString on the stream or else is toString of passed
|
||||
// file Path plus metadata key/value pairs.
|
||||
|
@ -668,7 +671,7 @@ public class HFile {
|
|||
*/
|
||||
@SuppressWarnings("unused")
|
||||
private Reader() throws IOException {
|
||||
this(null, null, null);
|
||||
this(null, null, null, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -680,9 +683,9 @@ public class HFile {
|
|||
* @param cache block cache. Pass null if none.
|
||||
* @throws IOException
|
||||
*/
|
||||
public Reader(FileSystem fs, Path path, BlockCache cache)
|
||||
public Reader(FileSystem fs, Path path, BlockCache cache, boolean inMemory)
|
||||
throws IOException {
|
||||
this(fs.open(path), fs.getFileStatus(path).getLen(), cache);
|
||||
this(fs.open(path), fs.getFileStatus(path).getLen(), cache, inMemory);
|
||||
this.closeIStream = true;
|
||||
this.name = path.toString();
|
||||
}
|
||||
|
@ -698,12 +701,13 @@ public class HFile {
|
|||
* @throws IOException
|
||||
*/
|
||||
public Reader(final FSDataInputStream fsdis, final long size,
|
||||
final BlockCache cache) {
|
||||
final BlockCache cache, final boolean inMemory) {
|
||||
this.cache = cache;
|
||||
this.fileSize = size;
|
||||
this.istream = fsdis;
|
||||
this.closeIStream = false;
|
||||
this.name = this.istream.toString();
|
||||
this.inMemory = inMemory;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -711,6 +715,7 @@ public class HFile {
|
|||
return "reader=" + this.name +
|
||||
(!isFileInfoLoaded()? "":
|
||||
", compression=" + this.compressAlgo.getName() +
|
||||
", inMemory=" + this.inMemory +
|
||||
", firstKey=" + toStringFirstKey() +
|
||||
", lastKey=" + toStringLastKey()) +
|
||||
", avgKeyLen=" + this.avgKeyLen +
|
||||
|
@ -731,6 +736,10 @@ public class HFile {
|
|||
return this.fileSize;
|
||||
}
|
||||
|
||||
public boolean inMemory() {
|
||||
return this.inMemory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read in the index and file info.
|
||||
* @return A map of fileinfo data.
|
||||
|
@ -933,7 +942,7 @@ public class HFile {
|
|||
*/
|
||||
void cacheBlock(String blockName, ByteBuffer buf) {
|
||||
if (cache != null) {
|
||||
cache.cacheBlock(blockName, buf.duplicate());
|
||||
cache.cacheBlock(blockName, buf.duplicate(), inMemory);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1259,7 +1268,7 @@ public class HFile {
|
|||
*/
|
||||
public static class CompactionReader extends Reader {
|
||||
public CompactionReader(Reader reader) {
|
||||
super(reader.istream, reader.fileSize, reader.cache);
|
||||
super(reader.istream, reader.fileSize, reader.cache, reader.inMemory);
|
||||
super.blockIndex = reader.blockIndex;
|
||||
super.trailer = reader.trailer;
|
||||
super.lastkey = reader.lastkey;
|
||||
|
@ -1625,7 +1634,7 @@ public class HFile {
|
|||
return;
|
||||
}
|
||||
|
||||
HFile.Reader reader = new HFile.Reader(fs, path, null);
|
||||
HFile.Reader reader = new HFile.Reader(fs, path, null, false);
|
||||
Map<byte[],byte[]> fileInfo = reader.loadFileInfo();
|
||||
|
||||
// scan thru and count the # of unique rows.
|
||||
|
|
|
@ -372,8 +372,16 @@ public class LruBlockCache implements BlockCache, HeapSize {
|
|||
remainingBuckets--;
|
||||
}
|
||||
|
||||
float singleMB = ((float)bucketSingle.totalSize())/((float)(1024*1024));
|
||||
float multiMB = ((float)bucketMulti.totalSize())/((float)(1024*1024));
|
||||
float memoryMB = ((float)bucketMemory.totalSize())/((float)(1024*1024));
|
||||
|
||||
LOG.debug("Block cache LRU eviction completed. " +
|
||||
"Freed " + bytesFreed + " bytes");
|
||||
"Freed " + bytesFreed + " bytes. " +
|
||||
"Priority Sizes: " +
|
||||
"Single=" + singleMB + "MB (" + bucketSingle.totalSize() + "), " +
|
||||
"Multi=" + multiMB + "MB (" + bucketMulti.totalSize() + ")," +
|
||||
"Memory=" + memoryMB + "MB (" + bucketMemory.totalSize() + ")");
|
||||
|
||||
} finally {
|
||||
stats.evict();
|
||||
|
@ -424,6 +432,10 @@ public class LruBlockCache implements BlockCache, HeapSize {
|
|||
return totalSize - bucketSize;
|
||||
}
|
||||
|
||||
public long totalSize() {
|
||||
return totalSize;
|
||||
}
|
||||
|
||||
public int compareTo(BlockBucket that) {
|
||||
if(this.overflow() == that.overflow()) return 0;
|
||||
return this.overflow() > that.overflow() ? 1 : -1;
|
||||
|
|
|
@ -110,6 +110,7 @@ public class Store implements HConstants, HeapSize {
|
|||
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
|
||||
final byte [] storeName;
|
||||
private final String storeNameStr;
|
||||
private final boolean inMemory;
|
||||
|
||||
/*
|
||||
* Sorted Map of readers keyed by maximum edit sequence id (Most recent should
|
||||
|
@ -191,6 +192,9 @@ public class Store implements HConstants, HeapSize {
|
|||
this.compactionThreshold =
|
||||
conf.getInt("hbase.hstore.compactionThreshold", 3);
|
||||
|
||||
// Check if this is in-memory store
|
||||
this.inMemory = family.isInMemory();
|
||||
|
||||
// By default we split region if a file > DEFAULT_MAX_FILE_SIZE.
|
||||
long maxFileSize = info.getTableDesc().getMaxFileSize();
|
||||
if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) {
|
||||
|
@ -366,7 +370,7 @@ public class Store implements HConstants, HeapSize {
|
|||
}
|
||||
StoreFile curfile = null;
|
||||
try {
|
||||
curfile = new StoreFile(fs, p, blockcache, this.conf);
|
||||
curfile = new StoreFile(fs, p, blockcache, this.conf, this.inMemory);
|
||||
} catch (IOException ioe) {
|
||||
LOG.warn("Failed open of " + p + "; presumption is that file was " +
|
||||
"corrupted at flush and lost edits picked up by commit log replay. " +
|
||||
|
@ -523,7 +527,7 @@ public class Store implements HConstants, HeapSize {
|
|||
}
|
||||
}
|
||||
StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache,
|
||||
this.conf);
|
||||
this.conf, this.inMemory);
|
||||
Reader r = sf.getReader();
|
||||
this.storeSize += r.length();
|
||||
if(LOG.isDebugEnabled()) {
|
||||
|
@ -922,7 +926,7 @@ public class Store implements HConstants, HeapSize {
|
|||
return;
|
||||
}
|
||||
StoreFile finalCompactedFile = new StoreFile(this.fs, p, blockcache,
|
||||
this.conf);
|
||||
this.conf, this.inMemory);
|
||||
this.lock.writeLock().lock();
|
||||
try {
|
||||
try {
|
||||
|
|
|
@ -76,6 +76,8 @@ public class StoreFile implements HConstants {
|
|||
private Path referencePath;
|
||||
// Should the block cache be used or not.
|
||||
private boolean blockcache;
|
||||
// Is this from an in-memory store
|
||||
private boolean inMemory;
|
||||
|
||||
// Keys for metadata stored in backing HFile.
|
||||
private static final byte [] MAX_SEQ_ID_KEY = Bytes.toBytes("MAX_SEQ_ID_KEY");
|
||||
|
@ -113,12 +115,13 @@ public class StoreFile implements HConstants {
|
|||
* @throws IOException When opening the reader fails.
|
||||
*/
|
||||
StoreFile(final FileSystem fs, final Path p, final boolean blockcache,
|
||||
final HBaseConfiguration conf)
|
||||
final HBaseConfiguration conf, final boolean inMemory)
|
||||
throws IOException {
|
||||
this.conf = conf;
|
||||
this.fs = fs;
|
||||
this.path = p;
|
||||
this.blockcache = blockcache;
|
||||
this.inMemory = inMemory;
|
||||
if (isReference(p)) {
|
||||
this.reference = Reference.read(fs, p);
|
||||
this.referencePath = getReferredToFile(this.path);
|
||||
|
@ -263,7 +266,8 @@ public class StoreFile implements HConstants {
|
|||
this.reader = new HalfHFileReader(this.fs, this.referencePath,
|
||||
getBlockCache(), this.reference);
|
||||
} else {
|
||||
this.reader = new Reader(this.fs, this.path, getBlockCache());
|
||||
this.reader = new Reader(this.fs, this.path, getBlockCache(),
|
||||
this.inMemory);
|
||||
}
|
||||
// Load up indices and fileinfo.
|
||||
Map<byte [], byte []> map = this.reader.loadFileInfo();
|
||||
|
|
|
@ -226,7 +226,7 @@ public class HFilePerformanceEvaluation {
|
|||
|
||||
@Override
|
||||
void setUp() throws Exception {
|
||||
reader = new HFile.Reader(this.fs, this.mf, null);
|
||||
reader = new HFile.Reader(this.fs, this.mf, null, false);
|
||||
this.reader.loadFileInfo();
|
||||
}
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ public class RandomSeek {
|
|||
long start = System.currentTimeMillis();
|
||||
SimpleBlockCache cache = new SimpleBlockCache();
|
||||
//LruBlockCache cache = new LruBlockCache();
|
||||
Reader reader = new HFile.Reader(lfs, path, cache);
|
||||
Reader reader = new HFile.Reader(lfs, path, cache, false);
|
||||
reader.loadFileInfo();
|
||||
System.out.println(reader.trailer);
|
||||
long end = System.currentTimeMillis();
|
||||
|
|
|
@ -122,10 +122,9 @@ public class TestHFile extends HBaseTestCase {
|
|||
fout.close();
|
||||
FSDataInputStream fin = fs.open(ncTFile);
|
||||
Reader reader = new Reader(fs.open(ncTFile),
|
||||
fs.getFileStatus(ncTFile).getLen(), null);
|
||||
fs.getFileStatus(ncTFile).getLen(), null, false);
|
||||
// Load up the index.
|
||||
reader.loadFileInfo();
|
||||
LOG.info(reader);
|
||||
HFileScanner scanner = reader.getScanner();
|
||||
// Align scanner at start of the file.
|
||||
scanner.seekTo();
|
||||
|
@ -186,7 +185,7 @@ public class TestHFile extends HBaseTestCase {
|
|||
fout.close();
|
||||
FSDataInputStream fin = fs.open(mFile);
|
||||
Reader reader = new Reader(fs.open(mFile), this.fs.getFileStatus(mFile)
|
||||
.getLen(), null);
|
||||
.getLen(), null, false);
|
||||
reader.loadFileInfo();
|
||||
// No data -- this should return false.
|
||||
assertFalse(reader.getScanner().seekTo());
|
||||
|
@ -210,7 +209,7 @@ public class TestHFile extends HBaseTestCase {
|
|||
writer.append("foo".getBytes(), "value".getBytes());
|
||||
writer.close();
|
||||
fout.close();
|
||||
Reader reader = new Reader(fs, mFile, null);
|
||||
Reader reader = new Reader(fs, mFile, null, false);
|
||||
reader.loadFileInfo();
|
||||
assertNull(reader.getMetaBlock("non-existant"));
|
||||
}
|
||||
|
|
|
@ -236,9 +236,8 @@ public class TestHFilePerformance extends TestCase {
|
|||
|
||||
if ("HFile".equals(fileType)){
|
||||
HFile.Reader reader = new HFile.Reader(fs.open(path),
|
||||
fs.getFileStatus(path).getLen(), null);
|
||||
fs.getFileStatus(path).getLen(), null, false);
|
||||
reader.loadFileInfo();
|
||||
System.out.println(reader);
|
||||
switch (method) {
|
||||
|
||||
case 0:
|
||||
|
|
|
@ -155,9 +155,8 @@ public class TestHFileSeek extends TestCase {
|
|||
long totalBytes = 0;
|
||||
FSDataInputStream fsdis = fs.open(path);
|
||||
Reader reader =
|
||||
new Reader(fsdis, fs.getFileStatus(path).getLen(), null);
|
||||
new Reader(fsdis, fs.getFileStatus(path).getLen(), null, false);
|
||||
reader.loadFileInfo();
|
||||
System.out.println(reader);
|
||||
KeySampler kSampler =
|
||||
new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
|
||||
keyLenGen);
|
||||
|
|
|
@ -49,7 +49,7 @@ public class TestSeekTo extends HBaseTestCase {
|
|||
}
|
||||
public void testSeekBefore() throws Exception {
|
||||
Path p = makeNewFile();
|
||||
HFile.Reader reader = new HFile.Reader(fs, p, null);
|
||||
HFile.Reader reader = new HFile.Reader(fs, p, null, false);
|
||||
reader.loadFileInfo();
|
||||
HFileScanner scanner = reader.getScanner();
|
||||
assertEquals(false, scanner.seekBefore(Bytes.toBytes("a")));
|
||||
|
@ -82,7 +82,7 @@ public class TestSeekTo extends HBaseTestCase {
|
|||
|
||||
public void testSeekTo() throws Exception {
|
||||
Path p = makeNewFile();
|
||||
HFile.Reader reader = new HFile.Reader(fs, p, null);
|
||||
HFile.Reader reader = new HFile.Reader(fs, p, null, false);
|
||||
reader.loadFileInfo();
|
||||
assertEquals(2, reader.blockIndex.count);
|
||||
HFileScanner scanner = reader.getScanner();
|
||||
|
@ -102,7 +102,7 @@ public class TestSeekTo extends HBaseTestCase {
|
|||
|
||||
public void testBlockContainingKey() throws Exception {
|
||||
Path p = makeNewFile();
|
||||
HFile.Reader reader = new HFile.Reader(fs, p, null);
|
||||
HFile.Reader reader = new HFile.Reader(fs, p, null, false);
|
||||
reader.loadFileInfo();
|
||||
System.out.println(reader.blockIndex.toString());
|
||||
// falls before the start of the file.
|
||||
|
|
|
@ -74,7 +74,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
new Path(new Path(this.testDir, "regionname"), "familyname"),
|
||||
2 * 1024, null, null);
|
||||
writeStoreFile(writer);
|
||||
checkHalfHFile(new StoreFile(this.fs, writer.getPath(), true, conf));
|
||||
checkHalfHFile(new StoreFile(this.fs, writer.getPath(), true, conf, false));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -113,7 +113,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
HFile.Writer writer = StoreFile.getWriter(this.fs, dir, 8 * 1024, null,
|
||||
null);
|
||||
writeStoreFile(writer);
|
||||
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf);
|
||||
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf, false);
|
||||
HFile.Reader reader = hsf.getReader();
|
||||
// Split on a row, not in middle of row. Midkey returned by reader
|
||||
// may be in middle of row. Create new one with empty column and
|
||||
|
@ -124,7 +124,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
byte [] finalKey = hsk.getRow();
|
||||
// Make a reference
|
||||
Path refPath = StoreFile.split(fs, dir, hsf, reader.midkey(), Range.top);
|
||||
StoreFile refHsf = new StoreFile(this.fs, refPath, true, conf);
|
||||
StoreFile refHsf = new StoreFile(this.fs, refPath, true, conf, false);
|
||||
// Now confirm that I can read from the reference and that it only gets
|
||||
// keys from top half of the file.
|
||||
HFileScanner s = refHsf.getReader().getScanner();
|
||||
|
@ -158,8 +158,8 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
Path bottomPath = StoreFile.split(this.fs, bottomDir,
|
||||
f, midkey, Range.bottom);
|
||||
// Make readers on top and bottom.
|
||||
HFile.Reader top = new StoreFile(this.fs, topPath, true, conf).getReader();
|
||||
HFile.Reader bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader();
|
||||
HFile.Reader top = new StoreFile(this.fs, topPath, true, conf, false).getReader();
|
||||
HFile.Reader bottom = new StoreFile(this.fs, bottomPath, true, conf, false).getReader();
|
||||
ByteBuffer previous = null;
|
||||
LOG.info("Midkey: " + Bytes.toString(midkey));
|
||||
byte [] midkeyBytes = new HStoreKey(midkey).getBytes();
|
||||
|
@ -212,8 +212,8 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
|
||||
bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
|
||||
Range.bottom);
|
||||
top = new StoreFile(this.fs, topPath, true, conf).getReader();
|
||||
bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader();
|
||||
top = new StoreFile(this.fs, topPath, true, conf, false).getReader();
|
||||
bottom = new StoreFile(this.fs, bottomPath, true, conf, false).getReader();
|
||||
bottomScanner = bottom.getScanner();
|
||||
int count = 0;
|
||||
while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
|
||||
|
@ -256,8 +256,8 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
|
||||
bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
|
||||
Range.bottom);
|
||||
top = new StoreFile(this.fs, topPath, true, conf).getReader();
|
||||
bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader();
|
||||
top = new StoreFile(this.fs, topPath, true, conf, false).getReader();
|
||||
bottom = new StoreFile(this.fs, bottomPath, true, conf, false).getReader();
|
||||
first = true;
|
||||
bottomScanner = bottom.getScanner();
|
||||
while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
|
||||
|
|
Loading…
Reference in New Issue