HBASE-8711 Requests count is completely off (James Kinley via JD)

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1526754 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jean-Daniel Cryans 2013-09-27 00:04:03 +00:00
parent 2782846450
commit 7cb75d865e
2 changed files with 23 additions and 10 deletions

View File

@ -848,7 +848,11 @@ public final class HConstants {
/** Temporary directory used for table creation and deletion */ /** Temporary directory used for table creation and deletion */
public static final String HBASE_TEMP_DIRECTORY = ".tmp"; public static final String HBASE_TEMP_DIRECTORY = ".tmp";
/**
* The period (in milliseconds) between computing region server point in time metrics
*/
public static final String REGIONSERVER_METRICS_PERIOD = "hbase.regionserver.metrics.period";
public static final long DEFAULT_REGIONSERVER_METRICS_PERIOD = 5000;
/** Directories that are not HBase table directories */ /** Directories that are not HBase table directories */
public static final List<String> HBASE_NON_TABLE_DIRS = public static final List<String> HBASE_NON_TABLE_DIRS =
Collections.unmodifiableList(Arrays.asList(new String[] { HREGION_LOGDIR_NAME, Collections.unmodifiableList(Arrays.asList(new String[] { HREGION_LOGDIR_NAME,

View File

@ -17,11 +17,16 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import java.util.Collection;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.BlockCache;
@ -31,10 +36,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.metrics2.MetricsExecutor; import org.apache.hadoop.metrics2.MetricsExecutor;
import java.util.Collection;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
/** /**
* Impl for exposing HRegionServer Information through Hadoop's metrics 2 system. * Impl for exposing HRegionServer Information through Hadoop's metrics 2 system.
*/ */
@ -44,8 +45,6 @@ class MetricsRegionServerWrapperImpl
public static final Log LOG = LogFactory.getLog(MetricsRegionServerWrapperImpl.class); public static final Log LOG = LogFactory.getLog(MetricsRegionServerWrapperImpl.class);
public static final int PERIOD = 15;
private final HRegionServer regionServer; private final HRegionServer regionServer;
private BlockCache blockCache; private BlockCache blockCache;
@ -69,14 +68,24 @@ class MetricsRegionServerWrapperImpl
private CacheStats cacheStats; private CacheStats cacheStats;
private ScheduledExecutorService executor; private ScheduledExecutorService executor;
private Runnable runnable; private Runnable runnable;
private long period;
public MetricsRegionServerWrapperImpl(final HRegionServer regionServer) { public MetricsRegionServerWrapperImpl(final HRegionServer regionServer) {
this.regionServer = regionServer; this.regionServer = regionServer;
initBlockCache(); initBlockCache();
this.period =
regionServer.conf.getLong(HConstants.REGIONSERVER_METRICS_PERIOD,
HConstants.DEFAULT_REGIONSERVER_METRICS_PERIOD);
this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor(); this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
this.runnable = new RegionServerMetricsWrapperRunnable(); this.runnable = new RegionServerMetricsWrapperRunnable();
this.executor.scheduleWithFixedDelay(this.runnable, PERIOD, PERIOD, TimeUnit.SECONDS); this.executor.scheduleWithFixedDelay(this.runnable, this.period, this.period,
TimeUnit.MILLISECONDS);
if (LOG.isInfoEnabled()) {
LOG.info("Computing regionserver metrics every " + this.period + " milliseconds");
}
} }
/** /**
@ -398,12 +407,12 @@ class MetricsRegionServerWrapperImpl
// assume that it took PERIOD seconds to start the executor. // assume that it took PERIOD seconds to start the executor.
// this is a guess but it's a pretty good one. // this is a guess but it's a pretty good one.
if (lastRan == 0) { if (lastRan == 0) {
lastRan = currentTime - (PERIOD*1000); lastRan = currentTime - (period * 1000);
} }
//If we've time traveled keep the last requests per second. //If we've time traveled keep the last requests per second.
if ((currentTime - lastRan) > 10) { if ((currentTime - lastRan) > 0) {
long currentRequestCount = getTotalRequestCount(); long currentRequestCount = getTotalRequestCount();
requestsPerSecond = (currentRequestCount - lastRequestCount) / ((currentTime - lastRan) / 1000.0); requestsPerSecond = (currentRequestCount - lastRequestCount) / ((currentTime - lastRan) / 1000.0);
lastRequestCount = currentRequestCount; lastRequestCount = currentRequestCount;