HBASE-4669 Add an option of using round-robin assignment for enabling table

(Jieshan Bean)


git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1195670 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Zhihong Yu 2011-10-31 21:00:48 +00:00
parent 468f6dd371
commit e29a6c1b3b
6 changed files with 119 additions and 33 deletions

View File

@ -27,6 +27,8 @@ Release 0.93.0 - Unreleased
HBASE-4489 Better key splitting in RegionSplitter HBASE-4489 Better key splitting in RegionSplitter
HBASE-4626 Filters unnecessarily copy byte arrays (Lars H) HBASE-4626 Filters unnecessarily copy byte arrays (Lars H)
HBASE-4691 Remove more unnecessary byte[] copies from KeyValues (Lars H) HBASE-4691 Remove more unnecessary byte[] copies from KeyValues (Lars H)
HBASE-4669 Add an option of using round-robin assignment for enabling table
(Jieshan Bean)
BUG FIXES BUG FIXES
HBASE-4488 Store could miss rows during flush (Lars H via jgray) HBASE-4488 Store could miss rows during flush (Lars H via jgray)

View File

@ -24,8 +24,8 @@ import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.lang.Thread.UncaughtExceptionHandler; import java.lang.Thread.UncaughtExceptionHandler;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Date;
import java.util.Collections; import java.util.Collections;
import java.util.Date;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
@ -57,19 +57,19 @@ import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.catalog.RootLocationEditor; import org.apache.hadoop.hbase.catalog.RootLocationEditor;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.executor.EventHandler.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.executor.RegionTransitionData; import org.apache.hadoop.hbase.executor.RegionTransitionData;
import org.apache.hadoop.hbase.executor.EventHandler.EventType;
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.AssignmentManager.RegionState;
import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler; import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler; import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler; import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler; import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler; import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
import org.apache.hadoop.hbase.master.handler.SplitRegionHandler; import org.apache.hadoop.hbase.master.handler.SplitRegionHandler;
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
@ -706,7 +706,7 @@ public class AssignmentManager extends ZooKeeperListener {
// what follows will fail because not in expected state. // what follows will fail because not in expected state.
regionState.update(RegionState.State.CLOSED, regionState.update(RegionState.State.CLOSED,
data.getStamp(), data.getOrigin()); data.getStamp(), data.getOrigin());
removeClosedRegion(regionState.getRegion()); removeClosedRegion(regionState.getRegion());
this.executorService.submit(new ClosedRegionHandler(master, this.executorService.submit(new ClosedRegionHandler(master,
this, regionState.getRegion())); this, regionState.getRegion()));
break; break;
@ -1834,6 +1834,20 @@ public class AssignmentManager extends ZooKeeperListener {
assign(HRegionInfo.FIRST_META_REGIONINFO, true); assign(HRegionInfo.FIRST_META_REGIONINFO, true);
} }
/**
* Assigns all user regions to online servers. Use round-robin assignment.
*
* @param regions
* @throws IOException
* @throws InterruptedException
*/
public void assignUserRegionsToOnlineServers(List<HRegionInfo> regions)
throws IOException,
InterruptedException {
List<ServerName> servers = this.serverManager.getOnlineServersList();
assignUserRegions(regions, servers);
}
/** /**
* Assigns all user regions, if any. Used during cluster startup. * Assigns all user regions, if any. Used during cluster startup.
* <p> * <p>
@ -1919,7 +1933,8 @@ public class AssignmentManager extends ZooKeeperListener {
} }
@Override @Override
public boolean bulkAssign(boolean sync) throws InterruptedException { public boolean bulkAssign(boolean sync) throws InterruptedException,
IOException {
// Disable timing out regions in transition up in zk while bulk assigning. // Disable timing out regions in transition up in zk while bulk assigning.
this.assignmentManager.timeoutMonitor.bulkAssign(true); this.assignmentManager.timeoutMonitor.bulkAssign(true);
try { try {

View File

@ -19,6 +19,7 @@
*/ */
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.lang.Thread.UncaughtExceptionHandler; import java.lang.Thread.UncaughtExceptionHandler;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
@ -35,7 +36,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* Server. * Server.
*/ */
public abstract class BulkAssigner { public abstract class BulkAssigner {
final Server server; protected final Server server;
/** /**
* @param server An instance of Server * @param server An instance of Server
@ -71,19 +72,24 @@ public abstract class BulkAssigner {
getLong("hbase.bulk.assignment.waiton.empty.rit", 5 * 60 * 1000); getLong("hbase.bulk.assignment.waiton.empty.rit", 5 * 60 * 1000);
} }
protected abstract void populatePool(final java.util.concurrent.ExecutorService pool); protected abstract void populatePool(
final java.util.concurrent.ExecutorService pool) throws IOException;
public boolean bulkAssign() throws InterruptedException { public boolean bulkAssign() throws InterruptedException, IOException {
return bulkAssign(true); return bulkAssign(true);
} }
/** /**
* Run the bulk assign. * Run the bulk assign.
* @param sync Whether to assign synchronously. *
* @param sync
* Whether to assign synchronously.
* @throws InterruptedException * @throws InterruptedException
* @return True if done. * @return True if done.
* @throws IOException
*/ */
public boolean bulkAssign(boolean sync) throws InterruptedException { public boolean bulkAssign(boolean sync) throws InterruptedException,
IOException {
boolean result = false; boolean result = false;
ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
builder.setDaemon(true); builder.setDaemon(true);

View File

@ -19,18 +19,16 @@
*/ */
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.BulkAssigner;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.commons.logging.Log;
/** /**
* Performs bulk reopen of the list of regions provided to it. * Performs bulk reopen of the list of regions provided to it.
@ -93,7 +91,7 @@ public class BulkReOpen extends BulkAssigner {
"hbase.bulk.reopen.threadpool.size", defaultThreadCount); "hbase.bulk.reopen.threadpool.size", defaultThreadCount);
} }
public boolean bulkReOpen() throws InterruptedException { public boolean bulkReOpen() throws InterruptedException, IOException {
return bulkAssign(); return bulkAssign();
} }
} }

View File

@ -169,15 +169,29 @@ public class EnableTableHandler extends EventHandler {
} }
@Override @Override
protected void populatePool(ExecutorService pool) { protected void populatePool(ExecutorService pool) throws IOException {
for (HRegionInfo region: regions) { boolean roundRobinAssignment = this.server.getConfiguration().getBoolean(
if (assignmentManager.isRegionInTransition(region) != null) continue; "hbase.master.enabletable.roundrobin", false);
final HRegionInfo hri = region;
pool.execute(new Runnable() { if (!roundRobinAssignment) {
public void run() { for (HRegionInfo region : regions) {
assignmentManager.assign(hri, true); if (assignmentManager.isRegionInTransition(region) != null) {
continue;
} }
}); final HRegionInfo hri = region;
pool.execute(new Runnable() {
public void run() {
assignmentManager.assign(hri, true);
}
});
}
} else {
try {
assignmentManager.assignUserRegionsToOnlineServers(regions);
} catch (InterruptedException e) {
LOG.warn("Assignment was interrupted");
Thread.currentThread().interrupt();
}
} }
} }

View File

@ -27,6 +27,8 @@ import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap; import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
@ -42,10 +44,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotEnabledException;
@ -53,16 +53,11 @@ import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.executor.EventHandler.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.TestHLogUtils; import org.apache.hadoop.hbase.regionserver.wal.TestHLogUtils;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before; import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -84,6 +79,8 @@ public class TestAdmin {
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6); TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);
TEST_UTIL.getConfiguration().setBoolean(
"hbase.master.enabletable.roundrobin", true);
TEST_UTIL.startMiniCluster(3); TEST_UTIL.startMiniCluster(3);
} }
@ -578,6 +575,60 @@ public class TestAdmin {
splitTest(splitKey, familyNames, rowCounts, numVersions, blockSize); splitTest(splitKey, familyNames, rowCounts, numVersions, blockSize);
} }
/**
* Test round-robin assignment on enableTable.
*
* @throws IOException
*/
@Test
public void testEnableTableRoundRobinAssignment() throws IOException {
byte[] tableName = Bytes.toBytes("testEnableTableAssignment");
byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 },
new byte[] { 3, 3, 3 }, new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 },
new byte[] { 6, 6, 6 }, new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 },
new byte[] { 9, 9, 9 } };
int expectedRegions = splitKeys.length + 1;
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc, splitKeys);
HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
Map<HRegionInfo, HServerAddress> regions = ht.getRegionsInfo();
assertEquals("Tried to create " + expectedRegions + " regions "
+ "but only found " + regions.size(), expectedRegions, regions.size());
// Disable table.
admin.disableTable(tableName);
// Enable table, use round-robin assignment to assign regions.
admin.enableTable(tableName);
// Check the assignment.
HTable metaTable = new HTable(HConstants.META_TABLE_NAME);
List<HRegionInfo> regionInfos = admin.getTableRegions(tableName);
Map<String, Integer> serverMap = new HashMap<String, Integer>();
for (int i = 0, j = regionInfos.size(); i < j; i++) {
HRegionInfo hri = regionInfos.get(i);
Get get = new Get(hri.getRegionName());
Result result = metaTable.get(get);
String server = Bytes.toString(result.getValue(HConstants.CATALOG_FAMILY,
HConstants.SERVER_QUALIFIER));
Integer regioncount = serverMap.get(server);
if (regioncount == null) {
regioncount = 0;
}
regioncount++;
serverMap.put(server, regioncount);
}
List<Map.Entry<String, Integer>> entryList = new ArrayList<Map.Entry<String, Integer>>(
serverMap.entrySet());
Collections.sort(entryList, new Comparator<Map.Entry<String, Integer>>() {
public int compare(Map.Entry<String, Integer> oa,
Map.Entry<String, Integer> ob) {
return (oa.getValue() - ob.getValue());
}
});
assertTrue(entryList.size() == 3);
assertTrue((entryList.get(2).getValue() - entryList.get(0).getValue()) < 2);
}
/** /**
* Multi-family scenario. Tests forcing split from client and * Multi-family scenario. Tests forcing split from client and
* having scanners successfully ride over split. * having scanners successfully ride over split.