HBASE-10048 Add hlog number metric in regionserver

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1548768 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-12-07 00:32:59 +00:00
parent d5cca6aebc
commit 3f58873f08
14 changed files with 151 additions and 39 deletions

View File

@ -117,6 +117,10 @@ public interface MetricsRegionServerSource extends BaseSource {
String REGION_COUNT_DESC = "Number of regions";
String STORE_COUNT = "storeCount";
String STORE_COUNT_DESC = "Number of Stores";
String HLOGFILE_COUNT = "hlogFileCount";
String HLOGFILE_COUNT_DESC = "Number of HLog Files";
String HLOGFILE_SIZE = "hlogFileSize";
String HLOGFILE_SIZE_DESC = "Size of all HLog Files";
String STOREFILE_COUNT = "storeFileCount";
String STOREFILE_COUNT_DESC = "Number of Store Files";
String MEMSTORE_SIZE = "memStoreSize";

View File

@ -67,6 +67,16 @@ public interface MetricsRegionServerWrapper {
*/
long getNumStores();
/**
* Get the number of HLog files of this region server.
*/
public long getNumHLogFiles();
/**
* Get the size of HLog files of this region server.
*/
public long getHLogFileSize();
/**
* Get the number of store files hosted on this region server.
*/

View File

@ -148,6 +148,8 @@ public class MetricsRegionServerSourceImpl
if (rsWrap != null) {
mrb.addGauge(REGION_COUNT, REGION_COUNT_DESC, rsWrap.getNumOnlineRegions())
.addGauge(STORE_COUNT, STORE_COUNT_DESC, rsWrap.getNumStores())
.addGauge(HLOGFILE_COUNT, HLOGFILE_COUNT_DESC, rsWrap.getNumHLogFiles())
.addGauge(HLOGFILE_SIZE, HLOGFILE_SIZE_DESC, rsWrap.getHLogFileSize())
.addGauge(STOREFILE_COUNT, STOREFILE_COUNT_DESC, rsWrap.getNumStoreFiles())
.addGauge(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC, rsWrap.getMemstoreSize())
.addGauge(STOREFILE_SIZE, STOREFILE_SIZE_DESC, rsWrap.getStoreFileSize())

View File

@ -155,6 +155,8 @@ public class MetricsRegionServerSourceImpl
if (rsWrap != null) {
mrb.addGauge(Interns.info(REGION_COUNT, REGION_COUNT_DESC), rsWrap.getNumOnlineRegions())
.addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores())
.addGauge(Interns.info(HLOGFILE_COUNT, HLOGFILE_COUNT_DESC), rsWrap.getNumHLogFiles())
.addGauge(Interns.info(HLOGFILE_SIZE, HLOGFILE_SIZE_DESC), rsWrap.getHLogFileSize())
.addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), rsWrap.getNumStoreFiles())
.addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemstoreSize())
.addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize())

View File

@ -40,6 +40,7 @@ java.lang.management.ManagementFactory;
<li class="active"><a href="#tab_baseStats" data-toggle="tab">Base Stats</a></li>
<li class=""><a href="#tab_memoryStats" data-toggle="tab">Memory</a></li>
<li class=""><a href="#tab_requestStats" data-toggle="tab">Requests</a></li>
<li class=""><a href="#tab_hlogStats" data-toggle="tab">hlogs</a></li>
<li class=""><a href="#tab_storeStats" data-toggle="tab">Storefiles</a></li>
<li class=""><a href="#tab_queueStats" data-toggle="tab">Queues</a></li>
<li class=""><a href="#tab_blockCacheStats" data-toggle="tab">Block Cache</a></li>
@ -54,6 +55,9 @@ java.lang.management.ManagementFactory;
<div class="tab-pane" id="tab_requestStats">
<& requestStats; mWrap = mWrap &>
</div>
<div class="tab-pane" id="tab_hlogStats">
<& hlogStats; mWrap = mWrap &>
</div>
<div class="tab-pane" id="tab_storeStats">
<& storeStats; mWrap = mWrap &>
</div>
@ -111,6 +115,24 @@ MetricsRegionServerWrapper mWrap;
</table>
</%def>
<%def hlogStats>
<%args>
MetricsRegionServerWrapper mWrap;
</%args>
<table class="table table-striped">
<tr>
<tr>
<th>Num. HLog Files</th>
<th>Size. HLog Files (bytes)</th>
</tr>
</tr>
<tr>
<td><% mWrap.getNumHLogFiles() %></td>
<td><% mWrap.getHLogFileSize() %></td>
</tr>
</table>
</%def>
<%def storeStats>
<%args>
MetricsRegionServerWrapper mWrap;

View File

@ -50,6 +50,8 @@ class MetricsRegionServerWrapperImpl
private BlockCache blockCache;
private volatile long numStores = 0;
private volatile long numHLogFiles = 0;
private volatile long hlogFileSize = 0;
private volatile long numStoreFiles = 0;
private volatile long memstoreSize = 0;
private volatile long storeFileSize = 0;
@ -264,6 +266,16 @@ class MetricsRegionServerWrapperImpl
return numStores;
}
@Override
public long getNumHLogFiles() {
return numHLogFiles;
}
@Override
public long getHLogFileSize() {
return hlogFileSize;
}
@Override
public long getNumStoreFiles() {
return numStoreFiles;
@ -421,6 +433,19 @@ class MetricsRegionServerWrapperImpl
//Copy over computed values so that no thread sees half computed values.
numStores = tempNumStores;
long tempNumHLogFiles = regionServer.hlog.getNumLogFiles();
// meta logs
if (regionServer.hlogForMeta != null) {
tempNumHLogFiles += regionServer.hlogForMeta.getNumLogFiles();
}
numHLogFiles = tempNumHLogFiles;
long tempHlogFileSize = regionServer.hlog.getLogFileSize();
if (regionServer.hlogForMeta != null) {
tempHlogFileSize += regionServer.hlogForMeta.getLogFileSize();
}
hlogFileSize = tempHlogFileSize;
numStoreFiles = tempNumStoreFiles;
memstoreSize = tempMemstoreSize;
storeFileSize = tempStoreFileSize;
@ -436,5 +461,4 @@ class MetricsRegionServerWrapperImpl
percentFileLocal = tempPercentFileLocal;
}
}
}

View File

@ -196,6 +196,14 @@ class FSHLog implements HLog, Syncable {
// of the default Hdfs block size.
private final long logrollsize;
/** size of current log */
private long curLogSize = 0;
/**
* The total size of hlog
*/
private AtomicLong totalLogSize = new AtomicLong(0);
// We synchronize on updateLock to prevent updates and to prevent a log roll
// during an update
// locked during appends
@ -542,9 +550,12 @@ class FSHLog implements HLog, Syncable {
}
if (oldFile == null) LOG.info("New WAL " + FSUtils.getPath(newPath));
else {
LOG.info("Rolled WAL " + FSUtils.getPath(oldFile) + " with entries=" + oldNumEntries +
", filesize=" + StringUtils.humanReadableInt(this.fs.getFileStatus(oldFile).getLen()) +
"; new WAL " + FSUtils.getPath(newPath));
long oldFileLen = this.fs.getFileStatus(oldFile).getLen();
this.totalLogSize.addAndGet(oldFileLen);
LOG.info("Rolled WAL " + FSUtils.getPath(oldFile) + " with entries="
+ oldNumEntries + ", filesize="
+ StringUtils.humanReadableInt(oldFileLen) + "; new WAL "
+ FSUtils.getPath(newPath));
}
// Tell our listeners that a new log was created
@ -555,7 +566,7 @@ class FSHLog implements HLog, Syncable {
}
// Can we delete any of the old log files?
if (getNumLogFiles() > 0) {
if (getNumRolledLogFiles() > 0) {
cleanOldLogs();
regionsToFlush = findRegionsToForceFlush();
}
@ -617,6 +628,7 @@ class FSHLog implements HLog, Syncable {
}
}
for (Path p : logsToArchive) {
this.totalLogSize.addAndGet(-this.fs.getFileStatus(p).getLen());
archiveLogFile(p);
this.hlogSequenceNums.remove(p);
}
@ -684,7 +696,7 @@ class FSHLog implements HLog, Syncable {
*/
byte[][] findRegionsToForceFlush() throws IOException {
byte [][] regions = null;
int logCount = getNumLogFiles();
int logCount = getNumRolledLogFiles();
if (logCount > this.maxLogs && logCount > 0) {
Map.Entry<Path, Map<byte[], Long>> firstWALEntry =
this.hlogSequenceNums.firstEntry();
@ -1171,7 +1183,8 @@ class FSHLog implements HLog, Syncable {
if (!this.logRollRunning) {
checkLowReplication();
try {
if (tempWriter.getLength() > this.logrollsize) {
curLogSize = tempWriter.getLength();
if (curLogSize > this.logrollsize) {
requestLogRoll();
}
} catch (IOException x) {
@ -1335,11 +1348,24 @@ class FSHLog implements HLog, Syncable {
return numEntries.get();
}
/** @return the number of log files in use */
int getNumLogFiles() {
/** @return the number of rolled log files */
public int getNumRolledLogFiles() {
return hlogSequenceNums.size();
}
/** @return the number of log files in use */
@Override
public int getNumLogFiles() {
// +1 for current use log
return getNumRolledLogFiles() + 1;
}
/** @return the size of log files in use */
@Override
public long getLogFileSize() {
return totalLogSize.get() + curLogSize;
}
@Override
public boolean startCacheFlush(final byte[] encodedRegionName) {
Long oldRegionSeqNum = null;

View File

@ -208,6 +208,16 @@ public interface HLog {
// TODO: Remove. Implementation detail.
long getFilenum();
/**
* @return the number of HLog files
*/
int getNumLogFiles();
/**
* @return the size of HLog files
*/
long getLogFileSize();
// TODO: Log rolling should not be in this interface.
/**
* Roll the log writer. That is, start writing log messages to a new file.

View File

@ -1508,7 +1508,7 @@ public class TestAdmin {
byte[] value = Bytes.toBytes(v.toString());
HRegionServer regionServer = startAndWriteData("TestLogRolling", value);
LOG.info("after writing there are "
+ HLogUtilsForTests.getNumLogFiles(regionServer.getWAL()) + " log files");
+ HLogUtilsForTests.getNumRolledLogFiles(regionServer.getWAL()) + " log files");
// flush all regions
@ -1518,7 +1518,7 @@ public class TestAdmin {
r.flushcache();
}
admin.rollHLogWriter(regionServer.getServerName().getServerName());
int count = HLogUtilsForTests.getNumLogFiles(regionServer.getWAL());
int count = HLogUtilsForTests.getNumRolledLogFiles(regionServer.getWAL());
LOG.info("after flushing all regions and rolling logs there are " +
count + " log files");
assertTrue(("actual count: " + count), count <= 2);

View File

@ -201,4 +201,14 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe
//IGNORED.
}
@Override
public long getNumHLogFiles() {
return 10;
}
@Override
public long getHLogFileSize() {
return 1024000;
}
}

View File

@ -59,6 +59,8 @@ public class TestMetricsRegionServer {
HELPER.assertGauge("regionServerStartTime", 100, serverSource);
HELPER.assertGauge("regionCount", 101, serverSource);
HELPER.assertGauge("storeCount", 2, serverSource);
HELPER.assertGauge("hlogFileCount", 10, serverSource);
HELPER.assertGauge("hlogFileSize", 1024000, serverSource);
HELPER.assertGauge("storeFileCount", 300, serverSource);
HELPER.assertGauge("memstoreSize", 1025, serverSource);
HELPER.assertGauge("storeFileSize", 1900, serverSource);

View File

@ -33,8 +33,8 @@ public class HLogUtilsForTests {
* @param log
* @return
*/
public static int getNumLogFiles(HLog log) {
return ((FSHLog) log).getNumLogFiles();
public static int getNumRolledLogFiles(HLog log) {
return ((FSHLog) log).getNumRolledLogFiles();
}
public static int getNumEntries(HLog log) {

View File

@ -653,12 +653,12 @@ public class TestHLog {
// Before HBASE-3198 it used to delete it
addEdits(log, hri, tableName, 1, sequenceId);
log.rollWriter();
assertEquals(1, ((FSHLog) log).getNumLogFiles());
assertEquals(1, ((FSHLog) log).getNumRolledLogFiles());
// See if there's anything wrong with more than 1 edit
addEdits(log, hri, tableName, 2, sequenceId);
log.rollWriter();
assertEquals(2, ((FSHLog) log).getNumLogFiles());
assertEquals(2, ((FSHLog) log).getNumRolledLogFiles());
// Now mix edits from 2 regions, still no flushing
addEdits(log, hri, tableName, 1, sequenceId);
@ -666,7 +666,7 @@ public class TestHLog {
addEdits(log, hri, tableName, 1, sequenceId);
addEdits(log, hri2, tableName2, 1, sequenceId);
log.rollWriter();
assertEquals(3, ((FSHLog) log).getNumLogFiles());
assertEquals(3, ((FSHLog) log).getNumRolledLogFiles());
// Flush the first region, we expect to see the first two files getting
// archived. We need to append something or writer won't be rolled.
@ -674,7 +674,7 @@ public class TestHLog {
log.startCacheFlush(hri.getEncodedNameAsBytes());
log.completeCacheFlush(hri.getEncodedNameAsBytes());
log.rollWriter();
assertEquals(2, ((FSHLog) log).getNumLogFiles());
assertEquals(2, ((FSHLog) log).getNumRolledLogFiles());
// Flush the second region, which removes all the remaining output files
// since the oldest was completely flushed and the two others only contain
@ -683,7 +683,7 @@ public class TestHLog {
log.startCacheFlush(hri2.getEncodedNameAsBytes());
log.completeCacheFlush(hri2.getEncodedNameAsBytes());
log.rollWriter();
assertEquals(0, ((FSHLog) log).getNumLogFiles());
assertEquals(0, ((FSHLog) log).getNumRolledLogFiles());
} finally {
if (log != null) log.closeAndDelete();
}
@ -994,7 +994,7 @@ public class TestHLog {
TableName table2 = TableName.valueOf("t2");
HLog hlog = HLogFactory.createHLog(fs, FSUtils.getRootDir(conf), dir.toString(), conf);
try {
assertEquals(0, ((FSHLog) hlog).getNumLogFiles());
assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
HRegionInfo hri1 = new HRegionInfo(table1, HConstants.EMPTY_START_ROW,
HConstants.EMPTY_END_ROW);
HRegionInfo hri2 = new HRegionInfo(table2, HConstants.EMPTY_START_ROW,
@ -1009,26 +1009,26 @@ public class TestHLog {
addEdits(hlog, hri1, table1, 1, sequenceId1);
hlog.rollWriter();
// assert that the wal is rolled
assertEquals(1, ((FSHLog) hlog).getNumLogFiles());
assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
// add edits in the second wal file, and roll writer.
addEdits(hlog, hri1, table1, 1, sequenceId1);
hlog.rollWriter();
// assert that the wal is rolled
assertEquals(2, ((FSHLog) hlog).getNumLogFiles());
assertEquals(2, ((FSHLog) hlog).getNumRolledLogFiles());
// add a waledit to table1, and flush the region.
addEdits(hlog, hri1, table1, 3, sequenceId1);
flushRegion(hlog, hri1.getEncodedNameAsBytes());
// roll log; all old logs should be archived.
hlog.rollWriter();
assertEquals(0, ((FSHLog) hlog).getNumLogFiles());
assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
// add an edit to table2, and roll writer
addEdits(hlog, hri2, table2, 1, sequenceId2);
hlog.rollWriter();
assertEquals(1, ((FSHLog) hlog).getNumLogFiles());
assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
// add edits for table1, and roll writer
addEdits(hlog, hri1, table1, 2, sequenceId1);
hlog.rollWriter();
assertEquals(2, ((FSHLog) hlog).getNumLogFiles());
assertEquals(2, ((FSHLog) hlog).getNumRolledLogFiles());
// add edits for table2, and flush hri1.
addEdits(hlog, hri2, table2, 2, sequenceId2);
flushRegion(hlog, hri1.getEncodedNameAsBytes());
@ -1038,12 +1038,12 @@ public class TestHLog {
// log3: region2 (unflushed)
// roll the writer; log2 should be archived.
hlog.rollWriter();
assertEquals(2, ((FSHLog) hlog).getNumLogFiles());
assertEquals(2, ((FSHLog) hlog).getNumRolledLogFiles());
// flush region2, and all logs should be archived.
addEdits(hlog, hri2, table2, 2, sequenceId2);
flushRegion(hlog, hri2.getEncodedNameAsBytes());
hlog.rollWriter();
assertEquals(0, ((FSHLog) hlog).getNumLogFiles());
assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
} finally {
if (hlog != null) hlog.close();
}
@ -1079,7 +1079,7 @@ public class TestHLog {
addEdits(hlog, hri1, t1, 2, sequenceId1);
hlog.rollWriter();
// with above rollWriter call, the max logs limit is reached.
assertTrue(((FSHLog) hlog).getNumLogFiles() == 2);
assertTrue(((FSHLog) hlog).getNumRolledLogFiles() == 2);
// get the regions to flush; since there is only one region in the oldest wal, it should
// return only one region.
@ -1097,18 +1097,18 @@ public class TestHLog {
flushRegion(hlog, hri1.getEncodedNameAsBytes());
hlog.rollWriter();
// only one wal should remain now (that is for the second region).
assertEquals(1, ((FSHLog) hlog).getNumLogFiles());
assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
// flush the second region
flushRegion(hlog, hri2.getEncodedNameAsBytes());
hlog.rollWriter(true);
// no wal should remain now.
assertEquals(0, ((FSHLog) hlog).getNumLogFiles());
assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
// add edits both to region 1 and region 2, and roll.
addEdits(hlog, hri1, t1, 2, sequenceId1);
addEdits(hlog, hri2, t2, 2, sequenceId2);
hlog.rollWriter();
// add edits and roll the writer, to reach the max logs limit.
assertEquals(1, ((FSHLog) hlog).getNumLogFiles());
assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
addEdits(hlog, hri1, t1, 2, sequenceId1);
hlog.rollWriter();
// it should return two regions to flush, as the oldest wal file has entries
@ -1119,14 +1119,14 @@ public class TestHLog {
flushRegion(hlog, hri1.getEncodedNameAsBytes());
flushRegion(hlog, hri2.getEncodedNameAsBytes());
hlog.rollWriter(true);
assertEquals(0, ((FSHLog) hlog).getNumLogFiles());
assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
// Add an edit to region1, and roll the wal.
addEdits(hlog, hri1, t1, 2, sequenceId1);
// tests partial flush: roll on a partial flush, and ensure that wal is not archived.
hlog.startCacheFlush(hri1.getEncodedNameAsBytes());
hlog.rollWriter();
hlog.completeCacheFlush(hri1.getEncodedNameAsBytes());
assertEquals(1, ((FSHLog) hlog).getNumLogFiles());
assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
} finally {
if (hlog != null) hlog.close();
}

View File

@ -221,7 +221,7 @@ public class TestLogRolling {
public void testLogRolling() throws Exception {
this.tableName = getName();
startAndWriteData();
LOG.info("after writing there are " + ((FSHLog) log).getNumLogFiles() + " log files");
LOG.info("after writing there are " + ((FSHLog) log).getNumRolledLogFiles() + " log files");
// flush all regions
@ -234,9 +234,9 @@ public class TestLogRolling {
// Now roll the log
log.rollWriter();
int count = ((FSHLog) log).getNumLogFiles();
int count = ((FSHLog) log).getNumRolledLogFiles();
LOG.info("after flushing all regions and rolling logs there are " +
((FSHLog) log).getNumLogFiles() + " log files");
((FSHLog) log).getNumRolledLogFiles() + " log files");
assertTrue(("actual count: " + count), count <= 2);
}
@ -606,12 +606,12 @@ public class TestLogRolling {
admin.flush(table2.getTableName());
}
doPut(table2, 3); // don't flush yet, or compaction might trigger before we roll WAL
assertEquals("Should have no WAL after initial writes", 0, fshLog.getNumLogFiles());
assertEquals("Should have no WAL after initial writes", 0, fshLog.getNumRolledLogFiles());
assertEquals(2, s.getStorefilesCount());
// Roll the log and compact table2, to have compaction record in the 2nd WAL.
fshLog.rollWriter();
assertEquals("Should have WAL; one table is not flushed", 1, fshLog.getNumLogFiles());
assertEquals("Should have WAL; one table is not flushed", 1, fshLog.getNumRolledLogFiles());
admin.flush(table2.getTableName());
region.compactStores();
// Wait for compaction in case if flush triggered it before us.
@ -624,13 +624,13 @@ public class TestLogRolling {
// Write some value to the table so the WAL cannot be deleted until table is flushed.
doPut(table, 0); // Now 2nd WAL will have compaction record for table2 and put for table.
fshLog.rollWriter(); // 1st WAL deleted, 2nd not deleted yet.
assertEquals("Should have WAL; one table is not flushed", 1, fshLog.getNumLogFiles());
assertEquals("Should have WAL; one table is not flushed", 1, fshLog.getNumRolledLogFiles());
// Flush table to make latest WAL obsolete; write another record, and roll again.
admin.flush(table.getTableName());
doPut(table, 1);
fshLog.rollWriter(); // Now 2nd WAL is deleted and 3rd is added.
assertEquals("Should have 1 WALs at the end", 1, fshLog.getNumLogFiles());
assertEquals("Should have 1 WALs at the end", 1, fshLog.getNumRolledLogFiles());
table.close();
table2.close();