HBASE-1597 Prevent unnecessary caching of blocks during compactions

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@790989 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2009-07-03 19:11:12 +00:00
parent 80b5452a0d
commit 1f85226103
7 changed files with 74 additions and 88 deletions

View File

@ -433,6 +433,8 @@ Release 0.20.0 - Unreleased
(Lars George via Stack)
HBASE-1596 Remove WatcherWrapper and have all users of Zookeeper provide a
Watcher
HBASE-1597 Prevent unnecessary caching of blocks during compactions
(Jon Gray via Stack)
OPTIMIZATIONS
HBASE-1412 Change values for delete column and column family in KeyValue

View File

@ -639,9 +639,9 @@ public class KeyValue implements Writable, HeapSize {
int familylength = b[columnoffset - 1];
int columnlength = l - ((columnoffset - o) + TIMESTAMP_TYPE_SIZE);
String family = familylength == 0? "":
Bytes.toString(b, columnoffset, familylength);
Bytes.toStringBinary(b, columnoffset, familylength);
String qualifier = columnlength == 0? "":
Bytes.toString(b, columnoffset + familylength,
Bytes.toStringBinary(b, columnoffset + familylength,
columnlength - familylength);
long timestamp = Bytes.toLong(b, o + (l - TIMESTAMP_TYPE_SIZE));
byte type = b[o + l - 1];

View File

@ -513,8 +513,7 @@ public class HFile {
}
}
private void checkValue(final byte [] value,
@SuppressWarnings("unused") final int offset,
private void checkValue(final byte [] value, final int offset,
final int length) throws IOException {
if (value == null) {
throw new IOException("Value cannot be null");
@ -699,8 +698,7 @@ public class HFile {
* @throws IOException
*/
public Reader(final FSDataInputStream fsdis, final long size,
final BlockCache cache)
throws IOException {
final BlockCache cache) {
this.cache = cache;
this.fileSize = size;
this.istream = fsdis;
@ -722,11 +720,11 @@ public class HFile {
}
protected String toStringFirstKey() {
return Bytes.toStringBinary(getFirstKey());
return KeyValue.keyToString(getFirstKey());
}
protected String toStringLastKey() {
return Bytes.toStringBinary(getFirstKey());
return KeyValue.keyToString(getFirstKey());
}
public long length() {
@ -918,15 +916,26 @@ public class HFile {
buf.limit(buf.limit() - DATABLOCKMAGIC.length);
buf.rewind();
// Cache a copy, not the one we are sending back, so the position doesnt
// get messed.
if (cache != null) {
cache.cacheBlock(name + block, buf.duplicate());
}
// Cache the block
cacheBlock(name + block, buf.duplicate());
return buf;
}
}
/**
* Cache this block if there is a block cache available.<p>
*
* Makes a copy of the ByteBuffer, not the one we are sending back, so the
* position does not get messed up.
* @param blockName
* @param buf
*/
void cacheBlock(String blockName, ByteBuffer buf) {
if (cache != null) {
cache.cacheBlock(blockName, buf.duplicate());
}
}
/*
* Decompress <code>compressedSize</code> bytes off the backing
@ -1241,6 +1250,36 @@ public class HFile {
return trailer.toString();
}
}
/**
* HFile Reader that does not cache blocks that were not already cached.<p>
*
* Used for compactions.
*/
public static class CompactionReader extends Reader {
public CompactionReader(Reader reader) {
super(reader.istream, reader.fileSize, reader.cache);
super.blockIndex = reader.blockIndex;
super.trailer = reader.trailer;
super.lastkey = reader.lastkey;
super.avgKeyLen = reader.avgKeyLen;
super.avgValueLen = reader.avgValueLen;
super.comparator = reader.comparator;
super.metaIndex = reader.metaIndex;
super.fileInfoLoaded = reader.fileInfoLoaded;
super.compressAlgo = reader.compressAlgo;
}
/**
* Do not cache this block when doing a compaction.
*/
@Override
void cacheBlock(String blockName, ByteBuffer buf) {
return;
}
}
/*
* The RFile has a fixed trailer which contains offsets to other variable
* parts of the file. Also includes basic metadata on this file.

View File

@ -539,19 +539,15 @@ public class LruBlockCache implements BlockCache, HeapSize {
LruBlockCache.LOG.debug("Cache Stats: Sizes: " +
"Total=" + sizeMB + "MB (" + totalSize + "), " +
"Free=" + freeMB + "MB (" + freeSize + "), " +
"Max=" + maxMB + "MB (" + maxSize +")");
// Log hit/miss and eviction counts
LruBlockCache.LOG.debug("Cache Stats: Counts: " +
"Max=" + maxMB + "MB (" + maxSize +")" +
", Counts: " +
"Blocks=" + size() +", " +
"Access=" + stats.getRequestCount() + ", " +
"Hit=" + stats.getHitCount() + ", " +
"Miss=" + stats.getMissCount() + ", " +
"Evictions=" + stats.getEvictionCount() + ", " +
"Evicted=" + stats.getEvictedCount());
// Log hit/miss and eviction ratios
LruBlockCache.LOG.debug("Cache Stats: Ratios: " +
"Evicted=" + stats.getEvictedCount() +
", Ratios: " +
"Hit Ratio=" + stats.getHitRatio()*100 + "%, " +
"Miss Ratio=" + stats.getMissRatio()*100 + "%, " +
"Evicted/Run=" + stats.evictedPerEviction());

View File

@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.io.SequenceFile;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.io.hfile.HFile.CompactionReader;
import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
@ -674,12 +675,12 @@ public class Store implements HConstants {
LOG.warn("Path is null for " + file);
return null;
}
Reader r = file.getReader();
CompactionReader r = file.getCompactionReader();
if (r == null) {
LOG.warn("StoreFile " + file + " has a null Reader");
continue;
}
long len = file.getReader().length();
long len = file.getCompactionReader().length();
fileSizes[i] = len;
totalSize += len;
}
@ -838,7 +839,7 @@ public class Store implements HConstants {
// init:
for (int i = 0; i < filesToCompact.size(); ++i) {
// TODO open a new HFile.Reader w/o block cache.
Reader r = filesToCompact.get(i).getReader();
CompactionReader r = filesToCompact.get(i).getCompactionReader();
if (r == null) {
LOG.warn("StoreFile " + filesToCompact.get(i) + " has a null Reader");
continue;
@ -953,7 +954,7 @@ public class Store implements HConstants {
// 4. Compute new store size
this.storeSize = 0L;
for (StoreFile hsf : this.storefiles.values()) {
Reader r = hsf.getReader();
Reader r = hsf.getCompactionReader();
if (r == null) {
LOG.warn("StoreFile " + hsf + " has a null Reader");
continue;

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.StringUtils;
@ -262,7 +263,7 @@ public class StoreFile implements HConstants {
this.reader = new HalfHFileReader(this.fs, this.referencePath,
getBlockCache(), this.reference);
} else {
this.reader = new StoreFileReader(this.fs, this.path, getBlockCache());
this.reader = new Reader(this.fs, this.path, getBlockCache());
}
// Load up indices and fileinfo.
Map<byte [], byte []> map = this.reader.loadFileInfo();
@ -297,67 +298,6 @@ public class StoreFile implements HConstants {
return this.reader;
}
/**
* Override to add some customization on HFile.Reader
*/
static class StoreFileReader extends HFile.Reader {
/**
*
* @param fs
* @param path
* @param cache
* @throws IOException
*/
public StoreFileReader(FileSystem fs, Path path, BlockCache cache)
throws IOException {
super(fs, path, cache);
}
@Override
protected String toStringFirstKey() {
return KeyValue.keyToString(getFirstKey());
}
@Override
protected String toStringLastKey() {
return KeyValue.keyToString(getLastKey());
}
}
/**
* Override to add some customization on HalfHFileReader.
*/
static class HalfStoreFileReader extends HalfHFileReader {
/**
*
* @param fs
* @param p
* @param c
* @param r
* @throws IOException
*/
public HalfStoreFileReader(FileSystem fs, Path p, BlockCache c, Reference r)
throws IOException {
super(fs, p, c, r);
}
@Override
public String toString() {
return super.toString() + (isTop()? ", half=top": ", half=bottom") +
" splitKey: " + KeyValue.keyToString(splitkey);
}
@Override
protected String toStringFirstKey() {
return KeyValue.keyToString(getFirstKey());
}
@Override
protected String toStringLastKey() {
return KeyValue.keyToString(getLastKey());
}
}
/**
* @return Current reader. Must call open first else returns null.
*/
@ -365,6 +305,14 @@ public class StoreFile implements HConstants {
return this.reader;
}
/**
* Gets a special Reader for use during compactions. Will not cache blocks.
* @return Current reader. Must call open first else returns null.
*/
public HFile.CompactionReader getCompactionReader() {
return new HFile.CompactionReader(this.reader);
}
/**
* @throws IOException
*/

View File

@ -169,7 +169,7 @@ public class TestCompaction extends HBaseTestCase {
boolean containsStartRow = false;
for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles().
values()) {
HFileScanner scanner = f.getReader().getScanner();
HFileScanner scanner = f.getCompactionReader().getScanner();
scanner.seekTo();
do {
byte [] row = scanner.getKeyValue().getRow();