HBASE-18352 Enable TestMasterOperationsForRegionReplicas#testCreateTableWithMultipleReplicas disabled by Proc-V2 AM in HBASE-14614

Reenables the test. Adds facility to HBaseTestingUtility so
you can pass in ports a restarted cluster should use. This
is needed so retention of region placement, on which this
test depends, can come trigger (this is why it was broke
on AMv2 commit... region placement retention is done
different in AMv2).
This commit is contained in:
Michael Stack 2017-12-13 20:57:02 -08:00
parent bff4226105
commit 85969cfcda
3 changed files with 53 additions and 23 deletions

View File

@ -967,21 +967,28 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
} }
// Start the MiniHBaseCluster // Start the MiniHBaseCluster
return startMiniHBaseCluster(numMasters, numSlaves, masterClass, return startMiniHBaseCluster(numMasters, numSlaves, null, masterClass,
regionserverClass, create, withWALDir); regionserverClass, create, withWALDir);
} }
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves) public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
throws IOException, InterruptedException { throws IOException, InterruptedException {
return startMiniHBaseCluster(numMasters, numSlaves, null, null, false, false); return startMiniHBaseCluster(numMasters, numSlaves, null);
}
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves,
List<Integer> rsPorts) throws IOException, InterruptedException {
return startMiniHBaseCluster(numMasters, numSlaves, rsPorts, null, null, false, false);
} }
/** /**
* Starts up mini hbase cluster. Usually used after call to * Starts up mini hbase cluster. Usually used after call to
* {@link #startMiniCluster(int, int)} when doing stepped startup of clusters. * {@link #startMiniCluster(int, int)} when doing stepped startup of clusters.
* Usually you won't want this. You'll usually want {@link #startMiniCluster()}. * Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
* @param numMasters * @param rsPorts Ports that RegionServer should use; pass ports if you want to test cluster
* @param numSlaves * restart where for sure the regionservers come up on same address+port (but
* just with different startcode); by default mini hbase clusters choose new
* arbitrary ports on each cluster start.
* @param create Whether to create a * @param create Whether to create a
* root or data directory path or not; will overwrite if exists already. * root or data directory path or not; will overwrite if exists already.
* @return Reference to the hbase mini hbase cluster. * @return Reference to the hbase mini hbase cluster.
@ -990,7 +997,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* @see {@link #startMiniCluster()} * @see {@link #startMiniCluster()}
*/ */
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
final int numSlaves, Class<? extends HMaster> masterClass, final int numSlaves, List<Integer> rsPorts, Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass, Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
boolean create, boolean withWALDir) boolean create, boolean withWALDir)
throws IOException, InterruptedException { throws IOException, InterruptedException {
@ -1015,7 +1022,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
Configuration c = new Configuration(this.conf); Configuration c = new Configuration(this.conf);
TraceUtil.initTracer(c); TraceUtil.initTracer(c);
this.hbaseCluster = this.hbaseCluster =
new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass); new MiniHBaseCluster(c, numMasters, numSlaves, rsPorts, masterClass, regionserverClass);
// Don't leave here till we've done a successful scan of the hbase:meta // Don't leave here till we've done a successful scan of the hbase:meta
Table t = getConnection().getTable(TableName.META_TABLE_NAME); Table t = getConnection().getTable(TableName.META_TABLE_NAME);
ResultScanner s = t.getScanner(new Scan()); ResultScanner s = t.getScanner(new Scan());

View File

@ -77,10 +77,19 @@ public class MiniHBaseCluster extends HBaseCluster {
*/ */
public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers) public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers)
throws IOException, InterruptedException { throws IOException, InterruptedException {
this(conf, numMasters, numRegionServers, null, null); this(conf, numMasters, numRegionServers, null, null, null);
} }
/**
* @param rsPorts Ports that RegionServer should use; pass ports if you want to test cluster
* restart where for sure the regionservers come up on same address+port (but
* just with different startcode); by default mini hbase clusters choose new
* arbitrary ports on each cluster start.
* @throws IOException
* @throws InterruptedException
*/
public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers, public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers,
List<Integer> rsPorts,
Class<? extends HMaster> masterClass, Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass) Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
throws IOException, InterruptedException { throws IOException, InterruptedException {
@ -93,7 +102,7 @@ public class MiniHBaseCluster extends HBaseCluster {
// Hadoop 2 // Hadoop 2
CompatibilityFactory.getInstance(MetricsAssertHelper.class).init(); CompatibilityFactory.getInstance(MetricsAssertHelper.class).init();
init(numMasters, numRegionServers, masterClass, regionserverClass); init(numMasters, numRegionServers, rsPorts, masterClass, regionserverClass);
this.initialClusterStatus = getClusterStatus(); this.initialClusterStatus = getClusterStatus();
} }
@ -207,7 +216,7 @@ public class MiniHBaseCluster extends HBaseCluster {
} }
} }
private void init(final int nMasterNodes, final int nRegionNodes, private void init(final int nMasterNodes, final int nRegionNodes, List<Integer> rsPorts,
Class<? extends HMaster> masterClass, Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass) Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
throws IOException, InterruptedException { throws IOException, InterruptedException {
@ -226,6 +235,9 @@ public class MiniHBaseCluster extends HBaseCluster {
// manually add the regionservers as other users // manually add the regionservers as other users
for (int i = 0; i < nRegionNodes; i++) { for (int i = 0; i < nRegionNodes; i++) {
Configuration rsConf = HBaseConfiguration.create(conf); Configuration rsConf = HBaseConfiguration.create(conf);
if (rsPorts != null) {
rsConf.setInt(HConstants.REGIONSERVER_PORT, rsPorts.get(i));
}
User user = HBaseTestingUtility.getDifferentUser(rsConf, User user = HBaseTestingUtility.getDifferentUser(rsConf,
".hfs."+index++); ".hfs."+index++);
hbaseCluster.addRegionServer(rsConf, i, user); hbaseCluster.addRegionServer(rsConf, i, user);

View File

@ -19,9 +19,13 @@
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -50,9 +54,11 @@ import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Ignore; import org.junit.Ignore;
@ -174,12 +180,18 @@ public class TestMasterOperationsForRegionReplicas {
} }
validateFromSnapshotFromMeta(TEST_UTIL, tableName, numRegions, numReplica, validateFromSnapshotFromMeta(TEST_UTIL, tableName, numRegions, numReplica,
ADMIN.getConnection()); ADMIN.getConnection());
/* DISABLED!!!!! FOR NOW!!!!
// Now shut the whole cluster down, and verify the assignments are kept so that the // Now shut the whole cluster down, and verify the assignments are kept so that the
// availability constraints are met. // availability constraints are met. MiniHBaseCluster chooses arbitrary ports on each
TEST_UTIL.getConfiguration().setBoolean("hbase.master.startup.retainassign", true); // restart. This messes with our being able to test that we retain locality. Therefore,
// figure current cluster ports and pass them in on next cluster start so new cluster comes
// up at same coordinates -- and the assignment retention logic has a chance to cut in.
List<Integer> rsports = new ArrayList<>();
for (JVMClusterUtil.RegionServerThread rst:
TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads()) {
rsports.add(rst.getRegionServer().getRpcServer().getListenerAddress().getPort());
}
TEST_UTIL.shutdownMiniHBaseCluster(); TEST_UTIL.shutdownMiniHBaseCluster();
TEST_UTIL.startMiniHBaseCluster(1, numSlaves); TEST_UTIL.startMiniHBaseCluster(1, numSlaves, rsports);
TEST_UTIL.waitTableEnabled(tableName); TEST_UTIL.waitTableEnabled(tableName);
validateFromSnapshotFromMeta(TEST_UTIL, tableName, numRegions, numReplica, validateFromSnapshotFromMeta(TEST_UTIL, tableName, numRegions, numReplica,
ADMIN.getConnection()); ADMIN.getConnection());
@ -203,10 +215,10 @@ public class TestMasterOperationsForRegionReplicas {
ADMIN.enableTable(tableName); ADMIN.enableTable(tableName);
LOG.info(ADMIN.getTableDescriptor(tableName).toString()); LOG.info(ADMIN.getTableDescriptor(tableName).toString());
assert(ADMIN.isTableEnabled(tableName)); assert(ADMIN.isTableEnabled(tableName));
List<RegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster() List<RegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster().
.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName); getAssignmentManager().getRegionStates().getRegionsOfTable(tableName);
assertTrue("regions.size=" + regions.size() + ", numRegions=" + numRegions + ", numReplica=" + numReplica, assertTrue("regions.size=" + regions.size() + ", numRegions=" + numRegions +
regions.size() == numRegions * (numReplica + 1)); ", numReplica=" + numReplica, regions.size() == numRegions * (numReplica + 1));
//decrease the replica(earlier, table was modified to have a replica count of numReplica + 1) //decrease the replica(earlier, table was modified to have a replica count of numReplica + 1)
ADMIN.disableTable(tableName); ADMIN.disableTable(tableName);
@ -233,7 +245,6 @@ public class TestMasterOperationsForRegionReplicas {
assert(defaultReplicas.size() == numRegions); assert(defaultReplicas.size() == numRegions);
Collection<Integer> counts = new HashSet<>(defaultReplicas.values()); Collection<Integer> counts = new HashSet<>(defaultReplicas.values());
assert(counts.size() == 1 && counts.contains(new Integer(numReplica))); assert(counts.size() == 1 && counts.contains(new Integer(numReplica)));
*/
} finally { } finally {
ADMIN.disableTable(tableName); ADMIN.disableTable(tableName);
ADMIN.deleteTable(tableName); ADMIN.deleteTable(tableName);
@ -342,14 +353,14 @@ public class TestMasterOperationsForRegionReplicas {
connection); connection);
snapshot.initialize(); snapshot.initialize();
Map<RegionInfo, ServerName> regionToServerMap = snapshot.getRegionToRegionServerMap(); Map<RegionInfo, ServerName> regionToServerMap = snapshot.getRegionToRegionServerMap();
assertEquals(regionToServerMap.size(), numRegions * numReplica + 1); //'1' for the namespace assertEquals(regionToServerMap.size(), numRegions * numReplica + 1);
Map<ServerName, List<RegionInfo>> serverToRegionMap = snapshot.getRegionServerToRegionMap(); Map<ServerName, List<RegionInfo>> serverToRegionMap = snapshot.getRegionServerToRegionMap();
assertEquals(serverToRegionMap.keySet().size(), 2); // 1 rs + 1 master assertEquals("One Region Only", 1, serverToRegionMap.keySet().size());
for (Map.Entry<ServerName, List<RegionInfo>> entry : serverToRegionMap.entrySet()) { for (Map.Entry<ServerName, List<RegionInfo>> entry : serverToRegionMap.entrySet()) {
if (entry.getKey().equals(TEST_UTIL.getHBaseCluster().getMaster().getServerName())) { if (entry.getKey().equals(TEST_UTIL.getHBaseCluster().getMaster().getServerName())) {
continue; continue;
} }
assertEquals(entry.getValue().size(), numRegions * numReplica); assertEquals(entry.getValue().size(), numRegions * numReplica + 1);
} }
} }
} }