HBASE-13857 Slow WAL Append count in ServerMetricsTmpl.jamon is hardcoded to zero (Vrishal Kulkarni)

This commit is contained in:
stack 2015-12-03 17:00:29 -08:00
parent 67ba6598b1
commit 51503efcf0
7 changed files with 42 additions and 5 deletions

View File

@ -70,16 +70,21 @@ public interface MetricsRegionServerWrapper {
/**
* Get the number of WAL files of this region server.
*/
public long getNumWALFiles();
long getNumWALFiles();
/**
* Get the size of WAL files of this region server.
*/
public long getWALFileSize();
long getWALFileSize();
/**
* Get the number of store files hosted on this region server.
* Get the number of WAL files with slow appends for this region server.
*/
long getNumWALSlowAppend();
/**
* Get the number of store files hosted on this region server.
*/
long getNumStoreFiles();
/**

View File

@ -92,4 +92,5 @@ public interface MetricsWALSource extends BaseSource {
void incrementLowReplicationLogRoll();
long getSlowAppendCount();
}

View File

@ -98,4 +98,9 @@ public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSo
public void incrementLowReplicationLogRoll() {
lowReplicationLogRollRequested.incr();
}
@Override
public long getSlowAppendCount() {
return slowAppendCount.value();
}
}

View File

@ -84,7 +84,7 @@ java.lang.management.ManagementFactory;
<td><% mWrap.getNumOnlineRegions() %></td>
<td><% mWrap.getPercentFileLocal() %></td>
<td><% mWrap.getPercentFileLocalSecondaryRegions() %></td>
<td><% 0 %></td>
<td><% mWrap.getNumWALSlowAppend() %></td>
</tr>
</table>
</%def>

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.CacheStats;
import org.apache.hadoop.hbase.mob.MobCacheConfig;
import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.wal.WALProvider;
@ -54,6 +55,7 @@ class MetricsRegionServerWrapperImpl
private static final Log LOG = LogFactory.getLog(MetricsRegionServerWrapperImpl.class);
private final HRegionServer regionServer;
private final MetricsWALSource metricsWALSource;
private BlockCache blockCache;
private MobFileCache mobFileCache;
@ -121,6 +123,7 @@ class MetricsRegionServerWrapperImpl
this.runnable = new RegionServerMetricsWrapperRunnable();
this.executor.scheduleWithFixedDelay(this.runnable, this.period, this.period,
TimeUnit.MILLISECONDS);
this.metricsWALSource = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
try {
this.dfsHedgedReadMetrics = FSUtils.getDFSHedgedReadMetrics(regionServer.getConfiguration());
@ -374,6 +377,11 @@ class MetricsRegionServerWrapperImpl
public long getWALFileSize() {
return walFileSize;
}
@Override
public long getNumWALSlowAppend() {
return metricsWALSource.getSlowAppendCount();
}
@Override
public long getNumStoreFiles() {

View File

@ -235,6 +235,11 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe
return 1024000;
}
@Override
public long getNumWALSlowAppend() {
return 0;
}
@Override
public long getFlushedCellsCount() {
return 100000000;

View File

@ -26,6 +26,7 @@ import org.junit.experimental.categories.Category;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
@ -53,4 +54,16 @@ public class TestMetricsWAL {
metricsWAL.postSync(nanos, 1);
verify(source, times(1)).incrementSyncTime(145);
}
@Test
public void testSlowAppend() throws Exception {
MetricsWALSource source = new MetricsWALSourceImpl();
MetricsWAL metricsWAL = new MetricsWAL(source);
// One not so slow append (< 1000)
metricsWAL.postAppend(1, 900);
// Two slow appends (> 1000)
metricsWAL.postAppend(1, 1010);
metricsWAL.postAppend(1, 2000);
assertEquals(2, source.getSlowAppendCount());
}
}