HBASE-7220 Creating a table with 3000 regions on 2 nodes fails after 1 hour
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1415016 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
eded2c4881
commit
7dc5908e6b
|
@ -20,7 +20,13 @@ package org.apache.hadoop.metrics2.impl;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.metrics2.MetricsExecutor;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.metrics2.lib.MetricsExecutorImpl;
|
||||
|
||||
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* JMX caches the beans that have been exported; even after the values are removed from hadoop's
|
||||
|
@ -32,11 +38,27 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
|||
*/
|
||||
public class JmxCacheBuster {
|
||||
private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class);
|
||||
private static Object lock = new Object();
|
||||
private static ScheduledFuture fut = null;
|
||||
private static MetricsExecutor executor = new MetricsExecutorImpl();
|
||||
|
||||
/**
|
||||
* For JMX to forget about all previously exported metrics.
|
||||
*/
|
||||
public static void clearJmxCache() {
|
||||
|
||||
//If there are more then 100 ms before the executor will run then everything should be merged.
|
||||
if (fut == null || (!fut.isDone() && fut.getDelay(TimeUnit.MILLISECONDS) > 100)) return;
|
||||
|
||||
synchronized (lock) {
|
||||
fut = executor.getExecutor().schedule(new JmxCacheBusterRunnable(), 5, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
static class JmxCacheBusterRunnable implements Runnable {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
LOG.trace("Clearing JMX mbean cache.");
|
||||
|
||||
// This is pretty extreme but it's the best way that
|
||||
|
@ -50,3 +72,4 @@ public class JmxCacheBuster {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,12 @@ package org.apache.hadoop.metrics2.impl;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.metrics2.MetricsExecutor;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.metrics2.lib.MetricsExecutorImpl;
|
||||
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* JMX caches the beans that have been exported; even after the values are removed from hadoop's
|
||||
|
@ -32,11 +37,27 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
|||
*/
|
||||
public class JmxCacheBuster {
|
||||
private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class);
|
||||
private static Object lock = new Object();
|
||||
private static ScheduledFuture fut = null;
|
||||
private static MetricsExecutor executor = new MetricsExecutorImpl();
|
||||
|
||||
/**
|
||||
* For JMX to forget about all previously exported metrics.
|
||||
*/
|
||||
public static void clearJmxCache() {
|
||||
|
||||
//If there are more then 100 ms before the executor will run then everything should be merged.
|
||||
if (fut == null || (!fut.isDone() && fut.getDelay(TimeUnit.MILLISECONDS) > 100)) return;
|
||||
|
||||
synchronized (lock) {
|
||||
fut = executor.getExecutor().schedule(new JmxCacheBusterRunnable(), 5, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
static class JmxCacheBusterRunnable implements Runnable {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
LOG.trace("Clearing JMX mbean cache.");
|
||||
|
||||
// This is pretty extreme but it's the best way that
|
||||
|
@ -52,3 +73,4 @@ public class JmxCacheBuster {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -451,8 +451,6 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
this.regiondir = getRegionDir(this.tableDir, encodedNameStr);
|
||||
this.scannerReadPoints = new ConcurrentHashMap<RegionScanner, Long>();
|
||||
|
||||
this.metricsRegion = new MetricsRegion(new MetricsRegionWrapperImpl(this));
|
||||
|
||||
/*
|
||||
* timestamp.slop provides a server-side constraint on the timestamp. This
|
||||
* assumes that you base your TS around currentTimeMillis(). In this case,
|
||||
|
@ -475,6 +473,9 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
// don't initialize coprocessors if not running within a regionserver
|
||||
// TODO: revisit if coprocessors should load in other cases
|
||||
this.coprocessorHost = new RegionCoprocessorHost(this, rsServices, conf);
|
||||
this.metricsRegion = new MetricsRegion(new MetricsRegionWrapperImpl(this));
|
||||
} else {
|
||||
this.metricsRegion = null;
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
// Write out region name as string and its encoded name.
|
||||
|
@ -1024,7 +1025,9 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
status.setStatus("Running coprocessor post-close hooks");
|
||||
this.coprocessorHost.postClose(abort);
|
||||
}
|
||||
if ( this.metricsRegion != null) {
|
||||
this.metricsRegion.close();
|
||||
}
|
||||
status.markComplete("Closed");
|
||||
LOG.info("Closed " + this);
|
||||
return result;
|
||||
|
@ -2331,12 +2334,16 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
if (noOfPuts > 0) {
|
||||
// There were some Puts in the batch.
|
||||
double noOfMutations = noOfPuts + noOfDeletes;
|
||||
if (this.metricsRegion != null) {
|
||||
this.metricsRegion.updatePut();
|
||||
}
|
||||
}
|
||||
if (noOfDeletes > 0) {
|
||||
// There were some Deletes in the batch.
|
||||
if (this.metricsRegion != null) {
|
||||
this.metricsRegion.updateDelete();
|
||||
}
|
||||
}
|
||||
if (!success) {
|
||||
for (int i = firstIndex; i < lastIndexExclusive; i++) {
|
||||
if (batchOp.retCodeDetails[i].getOperationStatusCode() == OperationStatusCode.NOT_RUN) {
|
||||
|
@ -4269,8 +4276,9 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
}
|
||||
|
||||
// do after lock
|
||||
|
||||
if (this.metricsRegion != null) {
|
||||
this.metricsRegion.updateGet();
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
@ -4657,8 +4665,9 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
closeRegionOperation();
|
||||
}
|
||||
|
||||
if (this.metricsRegion != null) {
|
||||
this.metricsRegion.updateAppend();
|
||||
|
||||
}
|
||||
|
||||
if (flush) {
|
||||
// Request a cache flush. Do it outside update lock.
|
||||
|
@ -4795,8 +4804,10 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
mvcc.completeMemstoreInsert(w);
|
||||
}
|
||||
closeRegionOperation();
|
||||
if (this.metricsRegion != null) {
|
||||
this.metricsRegion.updateIncrement();
|
||||
}
|
||||
}
|
||||
|
||||
if (flush) {
|
||||
// Request a cache flush. Do it outside update lock.
|
||||
|
|
Loading…
Reference in New Issue