HBASE-4017 BlockCache interface should be truly modular
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1144588 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
a618fea162
commit
08a2eb0964
|
@ -324,6 +324,7 @@ Release 0.91.0 - Unreleased
|
|||
HBASE-4013 Make ZooKeeperListener Abstract (Akash Ashok via Ted Yu)
|
||||
HBASE-4025 Server startup fails during startup due to failure in loading
|
||||
all table descriptors. (Subbu Iyer via Ted Yu)
|
||||
HBASE-4017 BlockCache interface should be truly modular (Li Pi)
|
||||
|
||||
NEW FEATURES
|
||||
HBASE-2001 Coprocessors: Colocate user code with regions (Mingjie Lai via
|
||||
|
|
|
@ -68,4 +68,12 @@ public interface BlockCache {
|
|||
* Shutdown the cache.
|
||||
*/
|
||||
public void shutdown();
|
||||
|
||||
public long size();
|
||||
|
||||
public long getFreeSize();
|
||||
|
||||
public long getCurrentSize();
|
||||
|
||||
public long getEvictedCount();
|
||||
}
|
|
@ -63,7 +63,7 @@ public class SimpleBlockCache implements BlockCache {
|
|||
/**
|
||||
* @return the size
|
||||
*/
|
||||
public synchronized int size() {
|
||||
public synchronized long size() {
|
||||
processQueue();
|
||||
return cache.size();
|
||||
}
|
||||
|
@ -99,4 +99,22 @@ public class SimpleBlockCache implements BlockCache {
|
|||
// TODO: implement this if we ever actually use this block cache
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFreeSize() {
|
||||
// TODO: implement this if we ever actually use this block cache
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCurrentSize() {
|
||||
// TODO: implement this if we ever actually use this block cache
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getEvictedCount() {
|
||||
// TODO: implement this if we ever actually use this block cache
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,6 +96,7 @@ import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
|
|||
import org.apache.hadoop.hbase.filter.BinaryComparator;
|
||||
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
||||
import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.LruBlockCache.CacheStats;
|
||||
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
|
||||
|
@ -670,7 +671,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
|||
}
|
||||
}
|
||||
// Send cache a shutdown.
|
||||
LruBlockCache c = (LruBlockCache) StoreFile.getBlockCache(this.conf);
|
||||
BlockCache c = StoreFile.getBlockCache(this.conf);
|
||||
if (c != null) {
|
||||
c.shutdown();
|
||||
}
|
||||
|
@ -1216,19 +1217,19 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
|||
this.metrics.flushQueueSize.set(cacheFlusher
|
||||
.getFlushQueueSize());
|
||||
|
||||
LruBlockCache lruBlockCache = (LruBlockCache) StoreFile.getBlockCache(conf);
|
||||
if (lruBlockCache != null) {
|
||||
this.metrics.blockCacheCount.set(lruBlockCache.size());
|
||||
this.metrics.blockCacheFree.set(lruBlockCache.getFreeSize());
|
||||
this.metrics.blockCacheSize.set(lruBlockCache.getCurrentSize());
|
||||
CacheStats cacheStats = lruBlockCache.getStats();
|
||||
BlockCache blockCache = StoreFile.getBlockCache(conf);
|
||||
if (blockCache != null) {
|
||||
this.metrics.blockCacheCount.set(blockCache.size());
|
||||
this.metrics.blockCacheFree.set(blockCache.getFreeSize());
|
||||
this.metrics.blockCacheSize.set(blockCache.getCurrentSize());
|
||||
CacheStats cacheStats = blockCache.getStats();
|
||||
this.metrics.blockCacheHitCount.set(cacheStats.getHitCount());
|
||||
this.metrics.blockCacheMissCount.set(cacheStats.getMissCount());
|
||||
this.metrics.blockCacheEvictedCount.set(lruBlockCache.getEvictedCount());
|
||||
double ratio = lruBlockCache.getStats().getHitRatio();
|
||||
this.metrics.blockCacheEvictedCount.set(blockCache.getEvictedCount());
|
||||
double ratio = blockCache.getStats().getHitRatio();
|
||||
int percent = (int) (ratio * 100);
|
||||
this.metrics.blockCacheHitRatio.set(percent);
|
||||
ratio = lruBlockCache.getStats().getHitCachingRatio();
|
||||
ratio = blockCache.getStats().getHitCachingRatio();
|
||||
percent = (int) (ratio * 100);
|
||||
this.metrics.blockCacheHitCachingRatio.set(percent);
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
package org.apache.hadoop.hbase.rest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import javax.ws.rs.Consumes;
|
||||
|
@ -158,8 +159,7 @@ public class RowResource extends ResourceBase {
|
|||
HTableInterface table = null;
|
||||
try {
|
||||
List<RowModel> rows = model.getRows();
|
||||
table = pool.getTable(tableResource.getName());
|
||||
((HTable)table).setAutoFlush(false);
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
for (RowModel row: rows) {
|
||||
byte[] key = row.getKey();
|
||||
if (key == null) {
|
||||
|
@ -191,12 +191,13 @@ public class RowResource extends ResourceBase {
|
|||
Transform.Direction.IN));
|
||||
}
|
||||
}
|
||||
table.put(put);
|
||||
puts.add(put);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("PUT " + put.toString());
|
||||
}
|
||||
}
|
||||
((HTable)table).setAutoFlush(true);
|
||||
table = pool.getTable(tableResource.getName());
|
||||
table.put(puts);
|
||||
table.flushCommits();
|
||||
ResponseBuilder response = Response.ok();
|
||||
return response.build();
|
||||
|
|
|
@ -67,7 +67,6 @@ public class RandomSeek {
|
|||
Path path = new Path("/Users/ryan/rfile.big.txt");
|
||||
long start = System.currentTimeMillis();
|
||||
SimpleBlockCache cache = new SimpleBlockCache();
|
||||
//LruBlockCache cache = new LruBlockCache();
|
||||
Reader reader = new HFile.Reader(lfs, path, cache, false, false);
|
||||
reader.loadFileInfo();
|
||||
System.out.println(reader.trailer);
|
||||
|
|
Loading…
Reference in New Issue