HBASE-1956 Export HDFS read and write latency as a metric

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@894779 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Kyle Purtell 2009-12-31 05:18:46 +00:00
parent 1241459503
commit e99d776a91
4 changed files with 114 additions and 2 deletions

View File

@ -268,6 +268,7 @@ Release 0.21.0 - Unreleased
HBASE-2025 0.20.2 accessed from older client throws HBASE-2025 0.20.2 accessed from older client throws
UndeclaredThrowableException; frustrates rolling upgrade UndeclaredThrowableException; frustrates rolling upgrade
HBASE-2081 Set the retries higher in shell since client pause is lower HBASE-2081 Set the retries higher in shell since client pause is lower
HBASE-1956 Export HDFS read and write latency as a metric
NEW FEATURES NEW FEATURES
HBASE-1901 "General" partitioner for "hbase-48" bulk (behind the api, write HBASE-1901 "General" partitioner for "hbase-48" bulk (behind the api, write

View File

@ -154,6 +154,36 @@ public class HFile {
public final static String DEFAULT_COMPRESSION = public final static String DEFAULT_COMPRESSION =
DEFAULT_COMPRESSION_ALGORITHM.getName(); DEFAULT_COMPRESSION_ALGORITHM.getName();
// For measuring latency of "typical" reads and writes
private static volatile long readOps;
private static volatile long readTime;
private static volatile long writeOps;
private static volatile long writeTime;
public static final long getReadOps() {
long ret = readOps;
readOps = 0;
return ret;
}
public static final long getReadTime() {
long ret = readTime;
readTime = 0;
return ret;
}
public static final long getWriteOps() {
long ret = writeOps;
writeOps = 0;
return ret;
}
public static final long getWriteTime() {
long ret = writeTime;
writeTime = 0;
return ret;
}
/** /**
* HFile Writer. * HFile Writer.
*/ */
@ -320,12 +350,17 @@ public class HFile {
*/ */
private void finishBlock() throws IOException { private void finishBlock() throws IOException {
if (this.out == null) return; if (this.out == null) return;
long now = System.currentTimeMillis();
int size = releaseCompressingStream(this.out); int size = releaseCompressingStream(this.out);
this.out = null; this.out = null;
blockKeys.add(firstKey); blockKeys.add(firstKey);
blockOffsets.add(Long.valueOf(blockBegin)); blockOffsets.add(Long.valueOf(blockBegin));
blockDataSizes.add(Integer.valueOf(size)); blockDataSizes.add(Integer.valueOf(size));
this.totalBytes += size; this.totalBytes += size;
writeTime += System.currentTimeMillis() - now;
writeOps++;
} }
/* /*
@ -896,6 +931,7 @@ public class HFile {
buf.rewind(); buf.rewind();
return buf; return buf;
} }
/** /**
* Read in a file block. * Read in a file block.
* @param block Index of block to read. * @param block Index of block to read.
@ -910,7 +946,6 @@ public class HFile {
throw new IOException("Requested block is out of range: " + block + throw new IOException("Requested block is out of range: " + block +
", max: " + blockIndex.count); ", max: " + blockIndex.count);
} }
// For any given block from any given file, synchronize reads for said // For any given block from any given file, synchronize reads for said
// block. // block.
// Without a cache, this synchronizing is needless overhead, but really // Without a cache, this synchronizing is needless overhead, but really
@ -930,6 +965,7 @@ public class HFile {
} }
// Load block from filesystem. // Load block from filesystem.
long now = System.currentTimeMillis();
long onDiskBlockSize; long onDiskBlockSize;
if (block == blockIndex.count - 1) { if (block == blockIndex.count - 1) {
// last block! The end of data block is first meta block if there is // last block! The end of data block is first meta block if there is
@ -954,6 +990,9 @@ public class HFile {
buf.limit(buf.limit() - DATABLOCKMAGIC.length); buf.limit(buf.limit() - DATABLOCKMAGIC.length);
buf.rewind(); buf.rewind();
readTime += System.currentTimeMillis() - now;
readOps++;
// Cache the block // Cache the block
if(cacheBlock && cache != null) { if(cacheBlock && cache != null) {
cache.cacheBlock(name + block, buf.duplicate(), inMemory); cache.cacheBlock(name + block, buf.duplicate(), inMemory);

View File

@ -22,7 +22,9 @@ import java.lang.management.MemoryUsage;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.metrics.MetricsRate; import org.apache.hadoop.hbase.metrics.MetricsRate;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Strings; import org.apache.hadoop.hbase.util.Strings;
import org.apache.hadoop.metrics.MetricsContext; import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord; import org.apache.hadoop.metrics.MetricsRecord;
@ -105,6 +107,24 @@ public class RegionServerMetrics implements Updater {
public final MetricsIntValue memstoreSizeMB = public final MetricsIntValue memstoreSizeMB =
new MetricsIntValue("memstoreSizeMB", registry); new MetricsIntValue("memstoreSizeMB", registry);
/**
* filesystem read latency
*/
public final MetricsTimeVaryingRate fsReadLatency =
new MetricsTimeVaryingRate("fsReadLatency", registry);
/**
* filesystem write latency
*/
public final MetricsTimeVaryingRate fsWriteLatency =
new MetricsTimeVaryingRate("fsWriteLatency", registry);
/**
* filesystem sync latency
*/
public final MetricsTimeVaryingRate fsSyncLatency =
new MetricsTimeVaryingRate("fsSyncLatency", registry);
public RegionServerMetrics() { public RegionServerMetrics() {
MetricsContext context = MetricsUtil.getContext("hbase"); MetricsContext context = MetricsUtil.getContext("hbase");
metricsRecord = MetricsUtil.createRecord(context, "regionserver"); metricsRecord = MetricsUtil.createRecord(context, "regionserver");
@ -143,13 +163,26 @@ public class RegionServerMetrics implements Updater {
this.blockCacheFree.pushMetric(this.metricsRecord); this.blockCacheFree.pushMetric(this.metricsRecord);
this.blockCacheCount.pushMetric(this.metricsRecord); this.blockCacheCount.pushMetric(this.metricsRecord);
this.blockCacheHitRatio.pushMetric(this.metricsRecord); this.blockCacheHitRatio.pushMetric(this.metricsRecord);
// mix in HFile metrics
this.fsReadLatency.inc((int)HFile.getReadOps(), HFile.getReadTime());
this.fsWriteLatency.inc((int)HFile.getWriteOps(), HFile.getWriteTime());
// mix in HLog metrics
this.fsWriteLatency.inc((int)HLog.getWriteOps(), HLog.getWriteTime());
this.fsSyncLatency.inc((int)HLog.getSyncOps(), HLog.getSyncTime());
// push the result
this.fsReadLatency.pushMetric(this.metricsRecord);
this.fsWriteLatency.pushMetric(this.metricsRecord);
this.fsSyncLatency.pushMetric(this.metricsRecord);
} }
this.metricsRecord.update(); this.metricsRecord.update();
this.lastUpdate = System.currentTimeMillis(); this.lastUpdate = System.currentTimeMillis();
} }
public void resetAllMinMax() { public void resetAllMinMax() {
// Nothing to do this.atomicIncrementTime.resetMinMax();
this.fsReadLatency.resetMinMax();
this.fsWriteLatency.resetMinMax();
} }
/** /**

View File

@ -204,6 +204,37 @@ public class HLog implements HConstants, Syncable {
} }
} }
// For measuring latency of writes
private static volatile long writeOps;
private static volatile long writeTime;
// For measuring latency of syncs
private static volatile long syncOps;
private static volatile long syncTime;
public static long getWriteOps() {
long ret = writeOps;
writeOps = 0;
return ret;
}
public static long getWriteTime() {
long ret = writeTime;
writeTime = 0;
return ret;
}
public static long getSyncOps() {
long ret = syncOps;
syncOps = 0;
return ret;
}
public static long getSyncTime() {
long ret = syncTime;
syncTime = 0;
return ret;
}
/** /**
* Create an edit log at the given <code>dir</code> location. * Create an edit log at the given <code>dir</code> location.
* *
@ -757,7 +788,10 @@ public class HLog implements HConstants, Syncable {
if (this.forceSync || if (this.forceSync ||
this.unflushedEntries.get() >= this.flushlogentries) { this.unflushedEntries.get() >= this.flushlogentries) {
try { try {
long now = System.currentTimeMillis();
this.writer.sync(); this.writer.sync();
syncTime += System.currentTimeMillis() - now;
syncOps++;
this.forceSync = false; this.forceSync = false;
this.unflushedEntries.set(0); this.unflushedEntries.set(0);
} catch (IOException e) { } catch (IOException e) {
@ -789,6 +823,8 @@ public class HLog implements HConstants, Syncable {
this.editsSize.addAndGet(logKey.heapSize() + logEdit.heapSize()); this.editsSize.addAndGet(logKey.heapSize() + logEdit.heapSize());
this.writer.append(new HLog.Entry(logKey, logEdit)); this.writer.append(new HLog.Entry(logKey, logEdit));
long took = System.currentTimeMillis() - now; long took = System.currentTimeMillis() - now;
writeTime += took;
writeOps++;
if (took > 1000) { if (took > 1000) {
LOG.warn(Thread.currentThread().getName() + " took " + took + LOG.warn(Thread.currentThread().getName() + " took " + took +
"ms appending an edit to hlog; editcount=" + this.numEntries.get()); "ms appending an edit to hlog; editcount=" + this.numEntries.get());
@ -866,9 +902,12 @@ public class HLog implements HConstants, Syncable {
return; return;
} }
synchronized (updateLock) { synchronized (updateLock) {
long now = System.currentTimeMillis();
this.writer.append(new HLog.Entry( this.writer.append(new HLog.Entry(
makeKey(regionName, tableName, logSeqId, System.currentTimeMillis()), makeKey(regionName, tableName, logSeqId, System.currentTimeMillis()),
completeCacheFlushLogEdit())); completeCacheFlushLogEdit()));
writeTime += System.currentTimeMillis() - now;
writeOps++;
this.numEntries.incrementAndGet(); this.numEntries.incrementAndGet();
Long seq = this.lastSeqWritten.get(regionName); Long seq = this.lastSeqWritten.get(regionName);
if (seq != null && logSeqId >= seq.longValue()) { if (seq != null && logSeqId >= seq.longValue()) {