HBASE-1192 LRU-style map for the block cache
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@780527 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
dca4256907
commit
2b47c15286
|
@ -293,6 +293,8 @@ Release 0.20.0 - Unreleased
|
|||
(Lars George and Alex Newman via Stack)
|
||||
HBASE-1455 Update DemoClient.py for thrift 1.0 (Tim Sell via Stack)
|
||||
HBASE-1464 Add hbase.regionserver.logroll.period to hbase-default
|
||||
HBASE-1192 LRU-style map for the block cache (Jon Gray and Ryan Rawson
|
||||
via Stack)
|
||||
|
||||
OPTIMIZATIONS
|
||||
HBASE-1412 Change values for delete column and column family in KeyValue
|
||||
|
|
|
@ -394,4 +394,11 @@
|
|||
mode flag is stored at /hbase/safe-mode.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hfile.block.cache.size</name>
|
||||
<value>50000000</value>
|
||||
<description>
|
||||
The size of the block cache used by HFile/StoreFile. Set to 0 to disable.
|
||||
</description>
|
||||
</property>
|
||||
</configuration>
|
||||
|
|
|
@ -55,7 +55,7 @@ public interface HeapSize {
|
|||
|
||||
static final int BLOCK_SIZE_TAX = 8;
|
||||
|
||||
|
||||
static final int BYTE_BUFFER = 56;
|
||||
|
||||
/**
|
||||
* @return Approximate 'exclusive deep size' of implementing object. Includes
|
||||
|
|
|
@ -526,6 +526,10 @@ public class HFile {
|
|||
}
|
||||
}
|
||||
|
||||
public long getTotalBytes() {
|
||||
return this.totalBytes;
|
||||
}
|
||||
|
||||
public void close() throws IOException {
|
||||
if (this.outputStream == null) {
|
||||
return;
|
||||
|
|
|
@ -76,7 +76,7 @@ import org.apache.hadoop.util.StringUtils;
|
|||
*
|
||||
* <p>We maintain multiple HStores for a single HRegion.
|
||||
*
|
||||
* <p>An HStore is a set of rows with some column data; together,
|
||||
* <p>An Store is a set of rows with some column data; together,
|
||||
* they make up all the data for the rows.
|
||||
*
|
||||
* <p>Each HRegion has a 'startKey' and 'endKey'.
|
||||
|
@ -96,9 +96,9 @@ import org.apache.hadoop.util.StringUtils;
|
|||
*
|
||||
* <p>An HRegion is defined by its table and its key extent.
|
||||
*
|
||||
* <p>It consists of at least one HStore. The number of HStores should be
|
||||
* <p>It consists of at least one Store. The number of Stores should be
|
||||
* configurable, so that data which is accessed together is stored in the same
|
||||
* HStore. Right now, we approximate that by building a single HStore for
|
||||
* Store. Right now, we approximate that by building a single Store for
|
||||
* each column family. (This config info will be communicated via the
|
||||
* tabledesc.)
|
||||
*
|
||||
|
|
|
@ -86,6 +86,7 @@ import org.apache.hadoop.hbase.io.BatchUpdate;
|
|||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRPC;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRPCProtocolVersion;
|
||||
|
@ -1093,6 +1094,16 @@ public class HRegionServer implements HConstants, HRegionInterface,
|
|||
this.metrics.storefiles.set(storefiles);
|
||||
this.metrics.memcacheSizeMB.set((int)(memcacheSize/(1024*1024)));
|
||||
this.metrics.storefileIndexSizeMB.set((int)(storefileIndexSize/(1024*1024)));
|
||||
|
||||
LruBlockCache lruBlockCache = (LruBlockCache)StoreFile.getBlockCache(conf);
|
||||
if (lruBlockCache != null) {
|
||||
this.metrics.blockCacheCount.set(lruBlockCache.size());
|
||||
this.metrics.blockCacheFree.set(lruBlockCache.getMemFree());
|
||||
this.metrics.blockCacheSize.set(lruBlockCache.getMemUsed());
|
||||
double ratio = lruBlockCache.getHitRatio();
|
||||
int percent = (int) (ratio * 100);
|
||||
this.metrics.blockCacheHitRatio.set(percent);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -358,7 +358,7 @@ public class Store implements HConstants {
|
|||
}
|
||||
StoreFile curfile = null;
|
||||
try {
|
||||
curfile = new StoreFile(fs, p);
|
||||
curfile = new StoreFile(fs, p, this.conf);
|
||||
} catch (IOException ioe) {
|
||||
LOG.warn("Failed open of " + p + "; presumption is that file was " +
|
||||
"corrupted at flush and lost edits picked up by commit log replay. " +
|
||||
|
@ -499,7 +499,7 @@ public class Store implements HConstants {
|
|||
writer.close();
|
||||
}
|
||||
}
|
||||
StoreFile sf = new StoreFile(this.fs, writer.getPath());
|
||||
StoreFile sf = new StoreFile(this.fs, writer.getPath(), this.conf);
|
||||
this.storeSize += sf.getReader().length();
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Added " + sf + ", entries=" + sf.getReader().getEntries() +
|
||||
|
@ -962,7 +962,7 @@ public class Store implements HConstants {
|
|||
LOG.error("Failed move of compacted file " + compactedFile.getPath(), e);
|
||||
return;
|
||||
}
|
||||
StoreFile finalCompactedFile = new StoreFile(this.fs, p);
|
||||
StoreFile finalCompactedFile = new StoreFile(this.fs, p, this.conf);
|
||||
this.lock.writeLock().lock();
|
||||
try {
|
||||
try {
|
||||
|
|
|
@ -33,12 +33,17 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.io.HalfHFileReader;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Hash;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
|
||||
/**
|
||||
* A Store data file. Stores usually have one or more of these files. They
|
||||
|
@ -52,6 +57,10 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
*/
|
||||
public class StoreFile implements HConstants {
|
||||
static final Log LOG = LogFactory.getLog(StoreFile.class.getName());
|
||||
|
||||
public static final String HFILE_CACHE_SIZE_KEY = "hfile.block.cache.size";
|
||||
|
||||
private static BlockCache hfileBlockCache = null;
|
||||
|
||||
// Make default block size for StoreFiles 8k while testing. TODO: FIX!
|
||||
// Need to make it 8k for testing.
|
||||
|
@ -88,16 +97,18 @@ public class StoreFile implements HConstants {
|
|||
|
||||
// Used making file ids.
|
||||
private final static Random rand = new Random();
|
||||
private final HBaseConfiguration conf;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* Loads up a Reader (and its indices, etc.).
|
||||
* @param fs Filesystem.
|
||||
* @param p qualified path
|
||||
* Constructor, loads a reader and it's indices, etc. May allocate a substantial
|
||||
* amount of ram depending on the underlying files (10-20MB?).
|
||||
* @param fs
|
||||
* @param p
|
||||
* @param conf
|
||||
* @throws IOException
|
||||
*/
|
||||
StoreFile(final FileSystem fs, final Path p)
|
||||
throws IOException {
|
||||
StoreFile(final FileSystem fs, final Path p, final HBaseConfiguration conf) throws IOException {
|
||||
this.conf = conf;
|
||||
this.fs = fs;
|
||||
this.path = p;
|
||||
if (isReference(p)) {
|
||||
|
@ -105,6 +116,7 @@ public class StoreFile implements HConstants {
|
|||
this.referencePath = getReferredToFile(this.path);
|
||||
}
|
||||
this.reader = open();
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -196,6 +208,23 @@ public class StoreFile implements HConstants {
|
|||
return this.sequenceid;
|
||||
}
|
||||
|
||||
public static synchronized BlockCache getBlockCache(HBaseConfiguration conf) {
|
||||
if (hfileBlockCache != null)
|
||||
return hfileBlockCache;
|
||||
|
||||
long cacheSize = conf.getLong(HFILE_CACHE_SIZE_KEY, 0L);
|
||||
// There should be a better way to optimize this. But oh well.
|
||||
if (cacheSize == 0L)
|
||||
return null;
|
||||
|
||||
hfileBlockCache = new LruBlockCache(cacheSize);
|
||||
return hfileBlockCache;
|
||||
}
|
||||
|
||||
public BlockCache getBlockCache() {
|
||||
return getBlockCache(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens reader on this store file. Called by Constructor.
|
||||
* @return Reader for the store file.
|
||||
|
@ -208,10 +237,10 @@ public class StoreFile implements HConstants {
|
|||
throw new IllegalAccessError("Already open");
|
||||
}
|
||||
if (isReference()) {
|
||||
this.reader = new HalfHFileReader(this.fs, this.referencePath, null,
|
||||
this.reader = new HalfHFileReader(this.fs, this.referencePath, getBlockCache(),
|
||||
this.reference);
|
||||
} else {
|
||||
this.reader = new StoreFileReader(this.fs, this.path, null);
|
||||
this.reader = new StoreFileReader(this.fs, this.path, getBlockCache());
|
||||
}
|
||||
// Load up indices and fileinfo.
|
||||
Map<byte [], byte []> map = this.reader.loadFileInfo();
|
||||
|
@ -368,13 +397,13 @@ public class StoreFile implements HConstants {
|
|||
* @param blocksize
|
||||
* @param algorithm Pass null to get default.
|
||||
* @param c Pass null to get default.
|
||||
* @param bloomfilter
|
||||
* @param filter BloomFilter
|
||||
* @return HFile.Writer
|
||||
* @throws IOException
|
||||
*/
|
||||
public static HFile.Writer getWriter(final FileSystem fs, final Path dir,
|
||||
final int blocksize, final Compression.Algorithm algorithm,
|
||||
final KeyValue.KeyComparator c, final boolean bloomfilter)
|
||||
final KeyValue.KeyComparator c, final boolean filter)
|
||||
throws IOException {
|
||||
if (!fs.exists(dir)) {
|
||||
fs.mkdirs(dir);
|
||||
|
@ -382,7 +411,7 @@ public class StoreFile implements HConstants {
|
|||
Path path = getUniqueFile(fs, dir);
|
||||
return new HFile.Writer(fs, path, blocksize,
|
||||
algorithm == null? HFile.DEFAULT_COMPRESSION_ALGORITHM: algorithm,
|
||||
c == null? KeyValue.KEY_COMPARATOR: c, bloomfilter);
|
||||
c == null? KeyValue.KEY_COMPARATOR: c, filter);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -399,10 +428,9 @@ public class StoreFile implements HConstants {
|
|||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param fs
|
||||
* @param dir
|
||||
* @param encodedRegionName
|
||||
* @param family
|
||||
* @return Path to a file that doesn't exist at time of this invocation.
|
||||
* @throws IOException
|
||||
*/
|
||||
|
@ -412,12 +440,12 @@ public class StoreFile implements HConstants {
|
|||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param fs
|
||||
* @param dir
|
||||
* @param encodedRegionName
|
||||
* @param family
|
||||
* @param suffix
|
||||
* @return Path to a file that doesn't exist at time of this invocation.
|
||||
* @return
|
||||
* @throws IOException
|
||||
*/
|
||||
static Path getRandomFilename(final FileSystem fs, final Path dir,
|
||||
|
@ -437,8 +465,8 @@ public class StoreFile implements HConstants {
|
|||
* Write file metadata.
|
||||
* Call before you call close on the passed <code>w</code> since its written
|
||||
* as metadata to that file.
|
||||
*
|
||||
* @param filesystem file system
|
||||
*
|
||||
* @param w
|
||||
* @param maxSequenceId Maximum sequence id.
|
||||
* @throws IOException
|
||||
*/
|
||||
|
@ -488,4 +516,4 @@ public class StoreFile implements HConstants {
|
|||
Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
|
||||
return r.write(fs, p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ public class RegionServerMetrics implements Updater {
|
|||
private MetricsRegistry registry = new MetricsRegistry();
|
||||
|
||||
public final MetricsTimeVaryingRate atomicIncrementTime =
|
||||
new MetricsTimeVaryingRate("atomicIncrementTime", registry);
|
||||
new MetricsTimeVaryingRate("atomicIncrementTime", registry);
|
||||
|
||||
/**
|
||||
* Count of regions carried by this regionserver
|
||||
|
@ -57,10 +57,30 @@ public class RegionServerMetrics implements Updater {
|
|||
public final MetricsIntValue regions =
|
||||
new MetricsIntValue("regions", registry);
|
||||
|
||||
/**
|
||||
* Block cache size.
|
||||
*/
|
||||
public final MetricsLongValue blockCacheSize = new MetricsLongValue("blockCacheSize", registry);
|
||||
|
||||
/**
|
||||
* Block cache free size.
|
||||
*/
|
||||
public final MetricsLongValue blockCacheFree = new MetricsLongValue("blockCacheFree", registry);
|
||||
|
||||
/**
|
||||
* Block cache item count.
|
||||
*/
|
||||
public final MetricsLongValue blockCacheCount = new MetricsLongValue("blockCacheCount", registry);
|
||||
|
||||
/**
|
||||
* Block hit ratio.
|
||||
*/
|
||||
public final MetricsIntValue blockCacheHitRatio = new MetricsIntValue("blockCacheHitRatio", registry);
|
||||
|
||||
/*
|
||||
* Count of requests to the regionservers since last call to metrics update
|
||||
*/
|
||||
private final MetricsRate requests = new MetricsRate("requests");
|
||||
private final MetricsRate requests = new MetricsRate("requests");
|
||||
|
||||
/**
|
||||
* Count of stores open on the regionserver.
|
||||
|
@ -112,6 +132,11 @@ public class RegionServerMetrics implements Updater {
|
|||
this.memcacheSizeMB.pushMetric(this.metricsRecord);
|
||||
this.regions.pushMetric(this.metricsRecord);
|
||||
this.requests.pushMetric(this.metricsRecord);
|
||||
|
||||
this.blockCacheSize.pushMetric(this.metricsRecord);
|
||||
this.blockCacheFree.pushMetric(this.metricsRecord);
|
||||
this.blockCacheCount.pushMetric(this.metricsRecord);
|
||||
this.blockCacheHitRatio.pushMetric(this.metricsRecord);
|
||||
}
|
||||
this.metricsRecord.update();
|
||||
this.lastUpdate = System.currentTimeMillis();
|
||||
|
@ -162,6 +187,14 @@ public class RegionServerMetrics implements Updater {
|
|||
Long.valueOf(memory.getUsed()/MB));
|
||||
sb = Strings.appendKeyValue(sb, "maxHeap",
|
||||
Long.valueOf(memory.getMax()/MB));
|
||||
sb = Strings.appendKeyValue(sb, this.blockCacheSize.getName(),
|
||||
Long.valueOf(this.blockCacheSize.get()));
|
||||
sb = Strings.appendKeyValue(sb, this.blockCacheFree.getName(),
|
||||
Long.valueOf(this.blockCacheFree.get()));
|
||||
sb = Strings.appendKeyValue(sb, this.blockCacheCount.getName(),
|
||||
Long.valueOf(this.blockCacheCount.get()));
|
||||
sb = Strings.appendKeyValue(sb, this.blockCacheHitRatio.getName(),
|
||||
Long.valueOf(this.blockCacheHitRatio.get()));
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
new Path(new Path(this.testDir, "regionname"), "familyname"),
|
||||
2 * 1024, null, null, false);
|
||||
writeStoreFile(writer);
|
||||
checkHalfHFile(new StoreFile(this.fs, writer.getPath()));
|
||||
checkHalfHFile(new StoreFile(this.fs, writer.getPath(), conf));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -112,7 +112,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
HFile.Writer writer = StoreFile.getWriter(this.fs, dir, 8 * 1024, null,
|
||||
null, false);
|
||||
writeStoreFile(writer);
|
||||
StoreFile hsf = new StoreFile(this.fs, writer.getPath());
|
||||
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf);
|
||||
HFile.Reader reader = hsf.getReader();
|
||||
// Split on a row, not in middle of row. Midkey returned by reader
|
||||
// may be in middle of row. Create new one with empty column and
|
||||
|
@ -123,7 +123,7 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
byte [] finalKey = hsk.getRow();
|
||||
// Make a reference
|
||||
Path refPath = StoreFile.split(fs, dir, hsf, reader.midkey(), Range.top);
|
||||
StoreFile refHsf = new StoreFile(this.fs, refPath);
|
||||
StoreFile refHsf = new StoreFile(this.fs, refPath, conf);
|
||||
// Now confirm that I can read from the reference and that it only gets
|
||||
// keys from top half of the file.
|
||||
HFileScanner s = refHsf.getReader().getScanner();
|
||||
|
@ -157,8 +157,8 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
Path bottomPath = StoreFile.split(this.fs, bottomDir,
|
||||
f, midkey, Range.bottom);
|
||||
// Make readers on top and bottom.
|
||||
HFile.Reader top = new StoreFile(this.fs, topPath).getReader();
|
||||
HFile.Reader bottom = new StoreFile(this.fs, bottomPath).getReader();
|
||||
HFile.Reader top = new StoreFile(this.fs, topPath, conf).getReader();
|
||||
HFile.Reader bottom = new StoreFile(this.fs, bottomPath, conf).getReader();
|
||||
ByteBuffer previous = null;
|
||||
LOG.info("Midkey: " + Bytes.toString(midkey));
|
||||
byte [] midkeyBytes = new HStoreKey(midkey).getBytes();
|
||||
|
@ -211,8 +211,8 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
|
||||
bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
|
||||
Range.bottom);
|
||||
top = new StoreFile(this.fs, topPath).getReader();
|
||||
bottom = new StoreFile(this.fs, bottomPath).getReader();
|
||||
top = new StoreFile(this.fs, topPath, conf).getReader();
|
||||
bottom = new StoreFile(this.fs, bottomPath, conf).getReader();
|
||||
bottomScanner = bottom.getScanner();
|
||||
int count = 0;
|
||||
while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
|
||||
|
@ -255,8 +255,8 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
|
||||
bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
|
||||
Range.bottom);
|
||||
top = new StoreFile(this.fs, topPath).getReader();
|
||||
bottom = new StoreFile(this.fs, bottomPath).getReader();
|
||||
top = new StoreFile(this.fs, topPath, conf).getReader();
|
||||
bottom = new StoreFile(this.fs, bottomPath, conf).getReader();
|
||||
first = true;
|
||||
bottomScanner = bottom.getScanner();
|
||||
while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
|
||||
|
|
Loading…
Reference in New Issue