HBASE-11610 Enhance remote meta updates (Virag Kothari)

This commit is contained in:
stack 2014-08-25 18:16:55 -07:00
parent 6ac5718094
commit 59230cb184
3 changed files with 87 additions and 24 deletions

View File

@ -47,9 +47,12 @@ public class RpcRetryingCallerFactory {
} }
public static RpcRetryingCallerFactory instantiate(Configuration configuration) { public static RpcRetryingCallerFactory instantiate(Configuration configuration) {
String clazzName = RpcRetryingCallerFactory.class.getName();
String rpcCallerFactoryClazz = String rpcCallerFactoryClazz =
configuration.get(RpcRetryingCallerFactory.CUSTOM_CALLER_CONF_KEY, configuration.get(RpcRetryingCallerFactory.CUSTOM_CALLER_CONF_KEY, clazzName);
RpcRetryingCallerFactory.class.getName()); if (rpcCallerFactoryClazz.equals(clazzName)) {
return new RpcRetryingCallerFactory(configuration);
}
return ReflectionUtils.instantiateWithCustomCtor(rpcCallerFactoryClazz, return ReflectionUtils.instantiateWithCustomCtor(rpcCallerFactoryClazz,
new Class[] { Configuration.class }, new Object[] { configuration }); new Class[] { Configuration.class }, new Object[] { configuration });
} }

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -32,8 +33,6 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.RegionState.State;
@ -41,6 +40,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ConfigUtil; import org.apache.hadoop.hbase.util.ConfigUtil;
import org.apache.hadoop.hbase.util.MultiHConnection;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -56,7 +56,7 @@ public class RegionStateStore {
protected static final char META_REPLICA_ID_DELIMITER = '_'; protected static final char META_REPLICA_ID_DELIMITER = '_';
private volatile HRegion metaRegion; private volatile HRegion metaRegion;
private volatile HTableInterface metaTable; private MultiHConnection multiHConnection;
private volatile boolean initialized; private volatile boolean initialized;
private final boolean noPersistence; private final boolean noPersistence;
@ -139,7 +139,6 @@ public class RegionStateStore {
initialized = false; initialized = false;
} }
@SuppressWarnings("deprecation")
void start() throws IOException { void start() throws IOException {
if (!noPersistence) { if (!noPersistence) {
if (server instanceof RegionServerServices) { if (server instanceof RegionServerServices) {
@ -147,8 +146,13 @@ public class RegionStateStore {
HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
} }
if (metaRegion == null) { if (metaRegion == null) {
metaTable = new HTable(TableName.META_TABLE_NAME, Configuration conf = server.getConfiguration();
server.getShortCircuitConnection()); // Config to determine the no of HConnections to META.
// A single HConnection should be sufficient in most cases. Only if
// you are doing lot of writes (>1M) to META,
// increasing this value might improve the write throughput.
multiHConnection =
new MultiHConnection(conf, conf.getInt("hbase.regionstatestore.meta.connection", 1));
} }
} }
initialized = true; initialized = true;
@ -156,18 +160,11 @@ public class RegionStateStore {
void stop() { void stop() {
initialized = false; initialized = false;
if (metaTable != null) { if (multiHConnection != null) {
try { multiHConnection.close();
metaTable.close();
} catch (IOException e) {
LOG.info("Got exception in closing meta table", e);
} finally {
metaTable = null;
}
} }
} }
@SuppressWarnings("deprecation")
void updateRegionState(long openSeqNum, void updateRegionState(long openSeqNum,
RegionState newState, RegionState oldState) { RegionState newState, RegionState oldState) {
if (noPersistence || !initialized) { if (noPersistence || !initialized) {
@ -219,16 +216,17 @@ public class RegionStateStore {
synchronized (this) { synchronized (this) {
if (metaRegion != null) { if (metaRegion != null) {
LOG.info("Meta region shortcut failed", t); LOG.info("Meta region shortcut failed", t);
metaTable = new HTable(TableName.META_TABLE_NAME, if (multiHConnection == null) {
server.getShortCircuitConnection()); multiHConnection = new MultiHConnection(server.getConfiguration(), 1);
}
metaRegion = null; metaRegion = null;
} }
} }
} }
} }
synchronized(metaTable) { // Called when meta is not on master
metaTable.put(put); multiHConnection.processBatchCallback(Arrays.asList(put), TableName.META_TABLE_NAME, null, null);
}
} catch (IOException ioe) { } catch (IOException ioe) {
LOG.error("Failed to persist region state " + newState, ioe); LOG.error("Failed to persist region state " + newState, ioe);
server.abort("Failed to update region location", ioe); server.abort("Failed to update region location", ioe);

View File

@ -51,6 +51,8 @@ import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@ -1080,6 +1082,66 @@ public class TestAssignmentManagerOnCluster {
} }
} }
/**
* Test concurrent updates to meta when meta is not on master
* @throws Exception
*/
@Test(timeout = 30000)
public void testUpdatesRemoteMeta() throws Exception {
// Not for zk less assignment
if (conf.getBoolean("hbase.assignment.usezk", true)) {
return;
}
conf.setInt("hbase.regionstatestore.meta.connection", 3);
final RegionStateStore rss =
new RegionStateStore(new MyRegionServer(conf, new ZkCoordinatedStateManager()));
rss.start();
// Create 10 threads and make each do 10 puts related to region state update
Thread[] th = new Thread[10];
List<String> nameList = new ArrayList<String>();
List<TableName> tableNameList = new ArrayList<TableName>();
for (int i = 0; i < th.length; i++) {
th[i] = new Thread() {
@Override
public void run() {
HRegionInfo[] hri = new HRegionInfo[10];
ServerName serverName = ServerName.valueOf("dummyhost", 1000, 1234);
for (int i = 0; i < 10; i++) {
hri[i] = new HRegionInfo(TableName.valueOf(Thread.currentThread().getName() + "_" + i));
RegionState newState = new RegionState(hri[i], RegionState.State.OPEN, serverName);
RegionState oldState =
new RegionState(hri[i], RegionState.State.PENDING_OPEN, serverName);
rss.updateRegionState(1, newState, oldState);
}
}
};
th[i].start();
nameList.add(th[i].getName());
}
for (int i = 0; i < th.length; i++) {
th[i].join();
}
// Add all the expected table names in meta to tableNameList
for (String name : nameList) {
for (int i = 0; i < 10; i++) {
tableNameList.add(TableName.valueOf(name + "_" + i));
}
}
List<Result> metaRows = MetaTableAccessor.fullScanOfMeta(admin.getConnection());
int count = 0;
// Check all 100 rows are in meta
for (Result result : metaRows) {
if (tableNameList.contains(HRegionInfo.getTable(result.getRow()))) {
count++;
if (count == 100) {
break;
}
}
}
assertTrue(count == 100);
rss.stop();
}
static class MyLoadBalancer extends StochasticLoadBalancer { static class MyLoadBalancer extends StochasticLoadBalancer {
// For this region, if specified, always assign to nowhere // For this region, if specified, always assign to nowhere
static volatile String controledRegion = null; static volatile String controledRegion = null;