From 0afd4cffa86461566604ed49077552350d27793c Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Fri, 3 Jul 2009 21:29:21 +0000 Subject: [PATCH] HBASE-1218 Implement in-memory column git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@791021 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES.txt | 1 + .../stargate/model/TableSchemaModel.java | 24 ------------------ .../apache/hadoop/hbase/HTableDescriptor.java | 21 ---------------- .../client/UnmodifyableHTableDescriptor.java | 10 +------- .../hadoop/hbase/io/HalfHFileReader.java | 2 +- .../apache/hadoop/hbase/io/hfile/HFile.java | 25 +++++++++++++------ .../hadoop/hbase/io/hfile/LruBlockCache.java | 14 ++++++++++- .../hadoop/hbase/regionserver/Store.java | 12 ++++++--- .../hadoop/hbase/regionserver/StoreFile.java | 8 ++++-- .../hbase/HFilePerformanceEvaluation.java | 2 +- .../hadoop/hbase/io/hfile/RandomSeek.java | 2 +- .../hadoop/hbase/io/hfile/TestHFile.java | 9 +++---- .../hbase/io/hfile/TestHFilePerformance.java | 5 ++-- .../hadoop/hbase/io/hfile/TestHFileSeek.java | 3 +-- .../hadoop/hbase/io/hfile/TestSeekTo.java | 6 ++--- .../hbase/regionserver/TestStoreFile.java | 18 ++++++------- 16 files changed, 68 insertions(+), 94 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index 1365e08d9dc..3fb7a36e15d 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -437,6 +437,7 @@ Release 0.20.0 - Unreleased (Jon Gray via Stack) HBASE-1607 Redo MemStore heap sizing to be accurate, testable, and more like new LruBlockCache (Jon Gray via Stack) + HBASE-1218 Implement in-memory column (Jon Gray via Stack) OPTIMIZATIONS HBASE-1412 Change values for delete column and column family in KeyValue diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java index 495fa9b9fa9..8f0dccb39d3 100644 --- a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java +++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java @@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.Ta @XmlType(propOrder = {"name","columns"}) public class TableSchemaModel implements Serializable, IProtobufWrapper { private static final long serialVersionUID = 1L; - private static final QName IN_MEMORY = new QName(HConstants.IN_MEMORY); private static final QName IS_META = new QName(HTableDescriptor.IS_META); private static final QName IS_ROOT = new QName(HTableDescriptor.IS_ROOT); private static final QName READONLY = new QName(HTableDescriptor.READONLY); @@ -176,15 +175,6 @@ public class TableSchemaModel implements Serializable, IProtobufWrapper { // cannot be standard bean type getters and setters, otherwise this would // confuse JAXB - /** - * @return true if IN_MEMORY attribute exists and is true - */ - public boolean __getInMemory() { - Object o = attrs.get(IN_MEMORY); - return o != null ? - Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_IN_MEMORY; - } - /** * @return true if IS_META attribute exists and is truel */ @@ -210,13 +200,6 @@ public class TableSchemaModel implements Serializable, IProtobufWrapper { Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_READONLY; } - /** - * @param value desired value of IN_MEMORY attribute - */ - public void __setInMemory(boolean value) { - attrs.put(IN_MEMORY, Boolean.toString(value)); - } - /** * @param value desired value of IS_META attribute */ @@ -273,10 +256,6 @@ public class TableSchemaModel implements Serializable, IProtobufWrapper { } builder.addColumns(familyBuilder); } - if (attrs.containsKey(IN_MEMORY)) { - builder.setInMemory( - Boolean.valueOf(attrs.get(IN_MEMORY).toString())); - } if (attrs.containsKey(READONLY)) { builder.setReadOnly( Boolean.valueOf(attrs.get(READONLY).toString())); @@ -293,9 +272,6 @@ public class TableSchemaModel implements Serializable, IProtobufWrapper { for (TableSchema.Attribute attr: builder.getAttrsList()) { this.addAttribute(attr.getName(), attr.getValue()); } - if (builder.hasInMemory()) { - this.addAttribute(HConstants.IN_MEMORY, builder.getInMemory()); - } if (builder.hasReadOnly()) { this.addAttribute(HTableDescriptor.READONLY, builder.getReadOnly()); } diff --git a/src/java/org/apache/hadoop/hbase/HTableDescriptor.java b/src/java/org/apache/hadoop/hbase/HTableDescriptor.java index 5b3753965f2..f580563b261 100644 --- a/src/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -90,8 +90,6 @@ ISerializable { private static final ImmutableBytesWritable TRUE = new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString())); - public static final boolean DEFAULT_IN_MEMORY = false; - public static final boolean DEFAULT_READONLY = false; public static final int DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*64; @@ -352,25 +350,6 @@ ISerializable { setValue(Bytes.toBytes(key), Bytes.toBytes(value)); } - /** - * @return true if all columns in the table should be kept in the - * HRegionServer cache only - */ - public boolean isInMemory() { - String value = getValue(HConstants.IN_MEMORY); - if (value != null) - return Boolean.valueOf(value).booleanValue(); - return DEFAULT_IN_MEMORY; - } - - /** - * @param inMemory True if all of the columns in the table should be kept in - * the HRegionServer cache only. - */ - public void setInMemory(boolean inMemory) { - setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory)); - } - /** * @return true if all columns in the table should be read only */ diff --git a/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java b/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java index 34628cbffaa..ecd872bf78f 100644 --- a/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java @@ -74,15 +74,7 @@ public class UnmodifyableHTableDescriptor extends HTableDescriptor { public HColumnDescriptor removeFamily(final byte [] column) { throw new UnsupportedOperationException("HTableDescriptor is read-only"); } - - /** - * @see org.apache.hadoop.hbase.HTableDescriptor#setInMemory(boolean) - */ - @Override - public void setInMemory(boolean inMemory) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } - + /** * @see org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean) */ diff --git a/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java b/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java index e06b169a829..4f9e029bbb0 100644 --- a/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java +++ b/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java @@ -63,7 +63,7 @@ public class HalfHFileReader extends HFile.Reader { public HalfHFileReader(final FileSystem fs, final Path p, final BlockCache c, final Reference r) throws IOException { - super(fs, p, c); + super(fs, p, c, false); // This is not actual midkey for this half-file; its just border // around which we split top and bottom. Have to look in files to find // actual last and first keys for bottom and top halves. Half-files don't diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java index d23db148e05..406f9989e19 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -657,6 +657,9 @@ public class HFile { private final BlockCache cache; public int cacheHits = 0; public int blockLoads = 0; + + // Whether file is from in-memory store + private boolean inMemory = false; // Name for this object used when logging or in toString. Is either // the result of a toString on the stream or else is toString of passed @@ -668,7 +671,7 @@ public class HFile { */ @SuppressWarnings("unused") private Reader() throws IOException { - this(null, null, null); + this(null, null, null, false); } /** @@ -680,9 +683,9 @@ public class HFile { * @param cache block cache. Pass null if none. * @throws IOException */ - public Reader(FileSystem fs, Path path, BlockCache cache) + public Reader(FileSystem fs, Path path, BlockCache cache, boolean inMemory) throws IOException { - this(fs.open(path), fs.getFileStatus(path).getLen(), cache); + this(fs.open(path), fs.getFileStatus(path).getLen(), cache, inMemory); this.closeIStream = true; this.name = path.toString(); } @@ -698,12 +701,13 @@ public class HFile { * @throws IOException */ public Reader(final FSDataInputStream fsdis, final long size, - final BlockCache cache) { + final BlockCache cache, final boolean inMemory) { this.cache = cache; this.fileSize = size; this.istream = fsdis; this.closeIStream = false; this.name = this.istream.toString(); + this.inMemory = inMemory; } @Override @@ -711,6 +715,7 @@ public class HFile { return "reader=" + this.name + (!isFileInfoLoaded()? "": ", compression=" + this.compressAlgo.getName() + + ", inMemory=" + this.inMemory + ", firstKey=" + toStringFirstKey() + ", lastKey=" + toStringLastKey()) + ", avgKeyLen=" + this.avgKeyLen + @@ -730,7 +735,11 @@ public class HFile { public long length() { return this.fileSize; } - + + public boolean inMemory() { + return this.inMemory; + } + /** * Read in the index and file info. * @return A map of fileinfo data. @@ -933,7 +942,7 @@ public class HFile { */ void cacheBlock(String blockName, ByteBuffer buf) { if (cache != null) { - cache.cacheBlock(blockName, buf.duplicate()); + cache.cacheBlock(blockName, buf.duplicate(), inMemory); } } @@ -1259,7 +1268,7 @@ public class HFile { */ public static class CompactionReader extends Reader { public CompactionReader(Reader reader) { - super(reader.istream, reader.fileSize, reader.cache); + super(reader.istream, reader.fileSize, reader.cache, reader.inMemory); super.blockIndex = reader.blockIndex; super.trailer = reader.trailer; super.lastkey = reader.lastkey; @@ -1625,7 +1634,7 @@ public class HFile { return; } - HFile.Reader reader = new HFile.Reader(fs, path, null); + HFile.Reader reader = new HFile.Reader(fs, path, null, false); Map fileInfo = reader.loadFileInfo(); // scan thru and count the # of unique rows. diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index ba4db600a9f..41167865365 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -372,8 +372,16 @@ public class LruBlockCache implements BlockCache, HeapSize { remainingBuckets--; } + float singleMB = ((float)bucketSingle.totalSize())/((float)(1024*1024)); + float multiMB = ((float)bucketMulti.totalSize())/((float)(1024*1024)); + float memoryMB = ((float)bucketMemory.totalSize())/((float)(1024*1024)); + LOG.debug("Block cache LRU eviction completed. " + - "Freed " + bytesFreed + " bytes"); + "Freed " + bytesFreed + " bytes. " + + "Priority Sizes: " + + "Single=" + singleMB + "MB (" + bucketSingle.totalSize() + "), " + + "Multi=" + multiMB + "MB (" + bucketMulti.totalSize() + ")," + + "Memory=" + memoryMB + "MB (" + bucketMemory.totalSize() + ")"); } finally { stats.evict(); @@ -424,6 +432,10 @@ public class LruBlockCache implements BlockCache, HeapSize { return totalSize - bucketSize; } + public long totalSize() { + return totalSize; + } + public int compareTo(BlockBucket that) { if(this.overflow() == that.overflow()) return 0; return this.overflow() > that.overflow() ? 1 : -1; diff --git a/src/java/org/apache/hadoop/hbase/regionserver/Store.java b/src/java/org/apache/hadoop/hbase/regionserver/Store.java index c050e76e64e..8eb4ae21846 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -110,6 +110,7 @@ public class Store implements HConstants, HeapSize { final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); final byte [] storeName; private final String storeNameStr; + private final boolean inMemory; /* * Sorted Map of readers keyed by maximum edit sequence id (Most recent should @@ -190,7 +191,10 @@ public class Store implements HConstants, HeapSize { // MIN_COMMITS_FOR_COMPACTION map files this.compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); - + + // Check if this is in-memory store + this.inMemory = family.isInMemory(); + // By default we split region if a file > DEFAULT_MAX_FILE_SIZE. long maxFileSize = info.getTableDesc().getMaxFileSize(); if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) { @@ -366,7 +370,7 @@ public class Store implements HConstants, HeapSize { } StoreFile curfile = null; try { - curfile = new StoreFile(fs, p, blockcache, this.conf); + curfile = new StoreFile(fs, p, blockcache, this.conf, this.inMemory); } catch (IOException ioe) { LOG.warn("Failed open of " + p + "; presumption is that file was " + "corrupted at flush and lost edits picked up by commit log replay. " + @@ -523,7 +527,7 @@ public class Store implements HConstants, HeapSize { } } StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache, - this.conf); + this.conf, this.inMemory); Reader r = sf.getReader(); this.storeSize += r.length(); if(LOG.isDebugEnabled()) { @@ -922,7 +926,7 @@ public class Store implements HConstants, HeapSize { return; } StoreFile finalCompactedFile = new StoreFile(this.fs, p, blockcache, - this.conf); + this.conf, this.inMemory); this.lock.writeLock().lock(); try { try { diff --git a/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 24dbc73966f..d8f95e26bda 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -76,6 +76,8 @@ public class StoreFile implements HConstants { private Path referencePath; // Should the block cache be used or not. private boolean blockcache; + // Is this from an in-memory store + private boolean inMemory; // Keys for metadata stored in backing HFile. private static final byte [] MAX_SEQ_ID_KEY = Bytes.toBytes("MAX_SEQ_ID_KEY"); @@ -113,12 +115,13 @@ public class StoreFile implements HConstants { * @throws IOException When opening the reader fails. */ StoreFile(final FileSystem fs, final Path p, final boolean blockcache, - final HBaseConfiguration conf) + final HBaseConfiguration conf, final boolean inMemory) throws IOException { this.conf = conf; this.fs = fs; this.path = p; this.blockcache = blockcache; + this.inMemory = inMemory; if (isReference(p)) { this.reference = Reference.read(fs, p); this.referencePath = getReferredToFile(this.path); @@ -263,7 +266,8 @@ public class StoreFile implements HConstants { this.reader = new HalfHFileReader(this.fs, this.referencePath, getBlockCache(), this.reference); } else { - this.reader = new Reader(this.fs, this.path, getBlockCache()); + this.reader = new Reader(this.fs, this.path, getBlockCache(), + this.inMemory); } // Load up indices and fileinfo. Map map = this.reader.loadFileInfo(); diff --git a/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index ef929f2cbc7..072d40b0a4e 100644 --- a/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -226,7 +226,7 @@ public class HFilePerformanceEvaluation { @Override void setUp() throws Exception { - reader = new HFile.Reader(this.fs, this.mf, null); + reader = new HFile.Reader(this.fs, this.mf, null, false); this.reader.loadFileInfo(); } diff --git a/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java b/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java index 2845c073937..3afff98e972 100644 --- a/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java +++ b/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java @@ -66,7 +66,7 @@ public class RandomSeek { long start = System.currentTimeMillis(); SimpleBlockCache cache = new SimpleBlockCache(); //LruBlockCache cache = new LruBlockCache(); - Reader reader = new HFile.Reader(lfs, path, cache); + Reader reader = new HFile.Reader(lfs, path, cache, false); reader.loadFileInfo(); System.out.println(reader.trailer); long end = System.currentTimeMillis(); diff --git a/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java index bdac1e096ae..a1f95a935ef 100644 --- a/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -122,10 +122,9 @@ public class TestHFile extends HBaseTestCase { fout.close(); FSDataInputStream fin = fs.open(ncTFile); Reader reader = new Reader(fs.open(ncTFile), - fs.getFileStatus(ncTFile).getLen(), null); + fs.getFileStatus(ncTFile).getLen(), null, false); // Load up the index. reader.loadFileInfo(); - LOG.info(reader); HFileScanner scanner = reader.getScanner(); // Align scanner at start of the file. scanner.seekTo(); @@ -186,7 +185,7 @@ public class TestHFile extends HBaseTestCase { fout.close(); FSDataInputStream fin = fs.open(mFile); Reader reader = new Reader(fs.open(mFile), this.fs.getFileStatus(mFile) - .getLen(), null); + .getLen(), null, false); reader.loadFileInfo(); // No data -- this should return false. assertFalse(reader.getScanner().seekTo()); @@ -210,7 +209,7 @@ public class TestHFile extends HBaseTestCase { writer.append("foo".getBytes(), "value".getBytes()); writer.close(); fout.close(); - Reader reader = new Reader(fs, mFile, null); + Reader reader = new Reader(fs, mFile, null, false); reader.loadFileInfo(); assertNull(reader.getMetaBlock("non-existant")); } @@ -270,4 +269,4 @@ public class TestHFile extends HBaseTestCase { } } -} \ No newline at end of file +} diff --git a/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java b/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java index 2ae8824ff24..efc16cbb9f1 100644 --- a/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java +++ b/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java @@ -236,9 +236,8 @@ public class TestHFilePerformance extends TestCase { if ("HFile".equals(fileType)){ HFile.Reader reader = new HFile.Reader(fs.open(path), - fs.getFileStatus(path).getLen(), null); + fs.getFileStatus(path).getLen(), null, false); reader.loadFileInfo(); - System.out.println(reader); switch (method) { case 0: @@ -381,4 +380,4 @@ public class TestHFilePerformance extends TestCase { " the same method several times and flood cache every time and average it to get a" + " better number."); } -} \ No newline at end of file +} diff --git a/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java index db542c4c2a6..ee4c9835d8b 100644 --- a/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java +++ b/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java @@ -155,9 +155,8 @@ public class TestHFileSeek extends TestCase { long totalBytes = 0; FSDataInputStream fsdis = fs.open(path); Reader reader = - new Reader(fsdis, fs.getFileStatus(path).getLen(), null); + new Reader(fsdis, fs.getFileStatus(path).getLen(), null, false); reader.loadFileInfo(); - System.out.println(reader); KeySampler kSampler = new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(), keyLenGen); diff --git a/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java index 5fa80a1b639..9e9ecc2f074 100644 --- a/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java +++ b/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java @@ -49,7 +49,7 @@ public class TestSeekTo extends HBaseTestCase { } public void testSeekBefore() throws Exception { Path p = makeNewFile(); - HFile.Reader reader = new HFile.Reader(fs, p, null); + HFile.Reader reader = new HFile.Reader(fs, p, null, false); reader.loadFileInfo(); HFileScanner scanner = reader.getScanner(); assertEquals(false, scanner.seekBefore(Bytes.toBytes("a"))); @@ -82,7 +82,7 @@ public class TestSeekTo extends HBaseTestCase { public void testSeekTo() throws Exception { Path p = makeNewFile(); - HFile.Reader reader = new HFile.Reader(fs, p, null); + HFile.Reader reader = new HFile.Reader(fs, p, null, false); reader.loadFileInfo(); assertEquals(2, reader.blockIndex.count); HFileScanner scanner = reader.getScanner(); @@ -102,7 +102,7 @@ public class TestSeekTo extends HBaseTestCase { public void testBlockContainingKey() throws Exception { Path p = makeNewFile(); - HFile.Reader reader = new HFile.Reader(fs, p, null); + HFile.Reader reader = new HFile.Reader(fs, p, null, false); reader.loadFileInfo(); System.out.println(reader.blockIndex.toString()); // falls before the start of the file. diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index ea58b69712c..3aed0b0367d 100644 --- a/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -74,7 +74,7 @@ public class TestStoreFile extends HBaseTestCase { new Path(new Path(this.testDir, "regionname"), "familyname"), 2 * 1024, null, null); writeStoreFile(writer); - checkHalfHFile(new StoreFile(this.fs, writer.getPath(), true, conf)); + checkHalfHFile(new StoreFile(this.fs, writer.getPath(), true, conf, false)); } /* @@ -113,7 +113,7 @@ public class TestStoreFile extends HBaseTestCase { HFile.Writer writer = StoreFile.getWriter(this.fs, dir, 8 * 1024, null, null); writeStoreFile(writer); - StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf); + StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf, false); HFile.Reader reader = hsf.getReader(); // Split on a row, not in middle of row. Midkey returned by reader // may be in middle of row. Create new one with empty column and @@ -124,7 +124,7 @@ public class TestStoreFile extends HBaseTestCase { byte [] finalKey = hsk.getRow(); // Make a reference Path refPath = StoreFile.split(fs, dir, hsf, reader.midkey(), Range.top); - StoreFile refHsf = new StoreFile(this.fs, refPath, true, conf); + StoreFile refHsf = new StoreFile(this.fs, refPath, true, conf, false); // Now confirm that I can read from the reference and that it only gets // keys from top half of the file. HFileScanner s = refHsf.getReader().getScanner(); @@ -158,8 +158,8 @@ public class TestStoreFile extends HBaseTestCase { Path bottomPath = StoreFile.split(this.fs, bottomDir, f, midkey, Range.bottom); // Make readers on top and bottom. - HFile.Reader top = new StoreFile(this.fs, topPath, true, conf).getReader(); - HFile.Reader bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader(); + HFile.Reader top = new StoreFile(this.fs, topPath, true, conf, false).getReader(); + HFile.Reader bottom = new StoreFile(this.fs, bottomPath, true, conf, false).getReader(); ByteBuffer previous = null; LOG.info("Midkey: " + Bytes.toString(midkey)); byte [] midkeyBytes = new HStoreKey(midkey).getBytes(); @@ -212,8 +212,8 @@ public class TestStoreFile extends HBaseTestCase { topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top); bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, Range.bottom); - top = new StoreFile(this.fs, topPath, true, conf).getReader(); - bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader(); + top = new StoreFile(this.fs, topPath, true, conf, false).getReader(); + bottom = new StoreFile(this.fs, bottomPath, true, conf, false).getReader(); bottomScanner = bottom.getScanner(); int count = 0; while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || @@ -256,8 +256,8 @@ public class TestStoreFile extends HBaseTestCase { topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top); bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, Range.bottom); - top = new StoreFile(this.fs, topPath, true, conf).getReader(); - bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader(); + top = new StoreFile(this.fs, topPath, true, conf, false).getReader(); + bottom = new StoreFile(this.fs, bottomPath, true, conf, false).getReader(); first = true; bottomScanner = bottom.getScanner(); while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||