HBASE-5862 After Region Close remove the Operation Metrics

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1330997 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-04-26 17:59:58 +00:00
parent 6d848a1943
commit f854df4223
6 changed files with 117 additions and 4 deletions

View File

@ -966,6 +966,7 @@ public class HRegion implements HeapSize { // , Writable{
status.setStatus("Running coprocessor post-close hooks"); status.setStatus("Running coprocessor post-close hooks");
this.coprocessorHost.postClose(abort); this.coprocessorHost.postClose(abort);
} }
this.opMetrics.closeMetrics();
status.markComplete("Closed"); status.markComplete("Closed");
LOG.info("Closed " + this); LOG.info("Closed " + this);
return result; return result;

View File

@ -2680,6 +2680,14 @@ public class HRegionServer extends RegionServer
public boolean removeFromOnlineRegions(final String encodedName) { public boolean removeFromOnlineRegions(final String encodedName) {
HRegion toReturn = null; HRegion toReturn = null;
toReturn = this.onlineRegions.remove(encodedName); toReturn = this.onlineRegions.remove(encodedName);
//Clear all of the dynamic metrics as they are now probably useless.
//This is a clear because dynamic metrics could include metrics per cf and
//per hfile. Figuring out which cfs, hfiles, and regions are still relevant to
//this region server would be an onerous task. Instead just clear everything
//and on the next tick of the metrics everything that is still relevant will be
//re-added.
this.dynamicMetrics.clear();
return toReturn != null; return toReturn != null;
} }

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
@ -173,6 +172,13 @@ public class OperationMetrics {
doUpdateTimeVarying(columnFamilies, DELETE_KEY, value); doUpdateTimeVarying(columnFamilies, DELETE_KEY, value);
} }
/**
* This deletes all old metrics this instance has ever created or updated.
*/
public void closeMetrics() {
RegionMetricsStorage.clear();
}
/** /**
* Method to send updates for cf and region metrics. This is the normal method * Method to send updates for cf and region metrics. This is the normal method
* used if the naming of stats and CF's are in line with put/delete/multiput. * used if the naming of stats and CF's are in line with put/delete/multiput.
@ -199,7 +205,8 @@ public class OperationMetrics {
private void doSafeIncTimeVarying(String prefix, String key, long value) { private void doSafeIncTimeVarying(String prefix, String key, long value) {
if (conf.getBoolean(CONF_KEY, true)) { if (conf.getBoolean(CONF_KEY, true)) {
if (prefix != null && !prefix.isEmpty() && key != null && !key.isEmpty()) { if (prefix != null && !prefix.isEmpty() && key != null && !key.isEmpty()) {
RegionMetricsStorage.incrTimeVaryingMetric(prefix + key, value); String m = prefix + key;
RegionMetricsStorage.incrTimeVaryingMetric(m, value);
} }
} }
} }

View File

@ -127,4 +127,12 @@ public class RegionMetricsStorage {
return m.get(); return m.get();
} }
/**
* Clear all copies of the metrics this stores.
*/
public static void clear() {
timeVaryingMetrics.clear();
numericMetrics.clear();
numericPersistentMetrics.clear();
}
} }

View File

@ -20,7 +20,9 @@
package org.apache.hadoop.hbase.regionserver.metrics; package org.apache.hadoop.hbase.regionserver.metrics;
import java.lang.reflect.Field;
import java.lang.reflect.Method; import java.lang.reflect.Method;
import java.util.Map;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
@ -52,6 +54,8 @@ import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class RegionServerDynamicMetrics implements Updater { public class RegionServerDynamicMetrics implements Updater {
private static final String UNABLE_TO_CLEAR = "Unable to clear RegionServerDynamicMetrics";
private MetricsRecord metricsRecord; private MetricsRecord metricsRecord;
private MetricsContext context; private MetricsContext context;
private final RegionServerDynamicStatistics rsDynamicStatistics; private final RegionServerDynamicStatistics rsDynamicStatistics;
@ -59,6 +63,10 @@ public class RegionServerDynamicMetrics implements Updater {
private static final Log LOG = private static final Log LOG =
LogFactory.getLog(RegionServerDynamicStatistics.class); LogFactory.getLog(RegionServerDynamicStatistics.class);
private boolean reflectionInitialized = false;
private Field recordMetricMapField;
private Field registryMetricMapField;
/** /**
* The metrics variables are public: * The metrics variables are public:
* - they can be set directly by calling their set/inc methods * - they can be set directly by calling their set/inc methods
@ -127,6 +135,60 @@ public class RegionServerDynamicMetrics implements Updater {
} }
} }
/**
* Clear all metrics this exposes.
* Uses reflection to clear them from hadoop metrics side as well.
*/
@SuppressWarnings("rawtypes")
public void clear() {
// If this is the first clear use reflection to get the two maps that hold copies of our
// metrics on the hadoop metrics side. We have to use reflection because there is not
// remove metrics on the hadoop side. If we can't get them then clearing old metrics
// is not possible and bailing out early is our best option.
if (!this.reflectionInitialized) {
this.reflectionInitialized = true;
try {
this.recordMetricMapField = this.metricsRecord.getClass().getDeclaredField("metricTable");
this.recordMetricMapField.setAccessible(true);
} catch (SecurityException e) {
LOG.debug(UNABLE_TO_CLEAR);
return;
} catch (NoSuchFieldException e) {
LOG.debug(UNABLE_TO_CLEAR);
return;
}
try {
this.registryMetricMapField = this.registry.getClass().getDeclaredField("metricsList");
this.registryMetricMapField.setAccessible(true);
} catch (SecurityException e) {
LOG.debug(UNABLE_TO_CLEAR);
return;
} catch (NoSuchFieldException e) {
LOG.debug(UNABLE_TO_CLEAR);
return;
}
}
//If we found both fields then try and clear the maps.
if (this.recordMetricMapField != null && this.registryMetricMapField != null) {
try {
Map recordMap = (Map) this.recordMetricMapField.get(this.metricsRecord);
recordMap.clear();
Map registryMap = (Map) this.registryMetricMapField.get(this.registry);
registryMap.clear();
} catch (IllegalArgumentException e) {
LOG.debug(UNABLE_TO_CLEAR);
} catch (IllegalAccessException e) {
LOG.debug(UNABLE_TO_CLEAR);
}
} else {
LOG.debug(UNABLE_TO_CLEAR);
}
}
/** /**
* Push the metrics to the monitoring subsystem on doUpdate() call. * Push the metrics to the monitoring subsystem on doUpdate() call.
* @param context ctx * @param context ctx

View File

@ -49,6 +49,7 @@ import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
/** /**
* Test metrics incremented on region server operations. * Test metrics incremented on region server operations.
*/ */
@ -196,6 +197,32 @@ public class TestRegionServerMetrics {
} }
@Test
public void testRemoveRegionMetrics() throws IOException, InterruptedException {
String cf = "REMOVECF";
HTable hTable = TEST_UTIL.createTable(TABLE_NAME.getBytes(), cf.getBytes());
HRegionInfo[] regionInfos =
hTable.getRegionLocations().keySet()
.toArray(new HRegionInfo[hTable.getRegionLocations().keySet().size()]);
String regionName = regionInfos[0].getEncodedName();
// Do some operations so there are metrics.
Put pOne = new Put("TEST".getBytes());
pOne.add(cf.getBytes(), "test".getBytes(), "test".getBytes());
hTable.put(pOne);
Get g = new Get("TEST".getBytes());
g.addFamily(cf.getBytes());
hTable.get(g);
assertTimeVaryingMetricCount(1, TABLE_NAME, cf, regionName, "get_");
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
admin.disableTable(TABLE_NAME.getBytes());
admin.deleteTable(TABLE_NAME.getBytes());
assertTimeVaryingMetricCount(0, TABLE_NAME, cf, regionName, "get_");
}
@Test @Test
public void testMultipleRegions() throws IOException, InterruptedException { public void testMultipleRegions() throws IOException, InterruptedException {