Revert "HBASE-18352 Enable TestMasterOperationsForRegionReplicas#testCreateTableWithMultipleReplicas"

Pushed prematurely

This reverts commit 1a173f820b.
This commit is contained in:
Michael Stack 2017-12-14 14:17:20 -08:00
parent 70f02dbc7c
commit 6ab8ce9829
No known key found for this signature in database
GPG Key ID: 9816C7FC8ACC93D2
3 changed files with 23 additions and 53 deletions

View File

@ -967,28 +967,21 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
} }
// Start the MiniHBaseCluster // Start the MiniHBaseCluster
return startMiniHBaseCluster(numMasters, numSlaves, null, masterClass, return startMiniHBaseCluster(numMasters, numSlaves, masterClass,
regionserverClass, create, withWALDir); regionserverClass, create, withWALDir);
} }
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves) public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
throws IOException, InterruptedException{ throws IOException, InterruptedException{
return startMiniHBaseCluster(numMasters, numSlaves, null); return startMiniHBaseCluster(numMasters, numSlaves, null, null, false, false);
}
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves,
List<Integer> rsPorts) throws IOException, InterruptedException {
return startMiniHBaseCluster(numMasters, numSlaves, rsPorts, null, null, false, false);
} }
/** /**
* Starts up mini hbase cluster. Usually used after call to * Starts up mini hbase cluster. Usually used after call to
* {@link #startMiniCluster(int, int)} when doing stepped startup of clusters. * {@link #startMiniCluster(int, int)} when doing stepped startup of clusters.
* Usually you won't want this. You'll usually want {@link #startMiniCluster()}. * Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
* @param rsPorts Ports that RegionServer should use; pass ports if you want to test cluster * @param numMasters
* restart where for sure the regionservers come up on same address+port (but * @param numSlaves
* just with different startcode); by default mini hbase clusters choose new
* arbitrary ports on each cluster start.
* @param create Whether to create a * @param create Whether to create a
* root or data directory path or not; will overwrite if exists already. * root or data directory path or not; will overwrite if exists already.
* @return Reference to the hbase mini hbase cluster. * @return Reference to the hbase mini hbase cluster.
@ -997,7 +990,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* @see {@link #startMiniCluster()} * @see {@link #startMiniCluster()}
*/ */
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
final int numSlaves, List<Integer> rsPorts, Class<? extends HMaster> masterClass, final int numSlaves, Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass, Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
boolean create, boolean withWALDir) boolean create, boolean withWALDir)
throws IOException, InterruptedException { throws IOException, InterruptedException {
@ -1022,7 +1015,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
Configuration c = new Configuration(this.conf); Configuration c = new Configuration(this.conf);
TraceUtil.initTracer(c); TraceUtil.initTracer(c);
this.hbaseCluster = this.hbaseCluster =
new MiniHBaseCluster(c, numMasters, numSlaves, rsPorts, masterClass, regionserverClass); new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
// Don't leave here till we've done a successful scan of the hbase:meta // Don't leave here till we've done a successful scan of the hbase:meta
Table t = getConnection().getTable(TableName.META_TABLE_NAME); Table t = getConnection().getTable(TableName.META_TABLE_NAME);
ResultScanner s = t.getScanner(new Scan()); ResultScanner s = t.getScanner(new Scan());

View File

@ -77,19 +77,10 @@ public class MiniHBaseCluster extends HBaseCluster {
*/ */
public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers) public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers)
throws IOException, InterruptedException { throws IOException, InterruptedException {
this(conf, numMasters, numRegionServers, null, null, null); this(conf, numMasters, numRegionServers, null, null);
} }
/**
* @param rsPorts Ports that RegionServer should use; pass ports if you want to test cluster
* restart where for sure the regionservers come up on same address+port (but
* just with different startcode); by default mini hbase clusters choose new
* arbitrary ports on each cluster start.
* @throws IOException
* @throws InterruptedException
*/
public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers, public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers,
List<Integer> rsPorts,
Class<? extends HMaster> masterClass, Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass) Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
throws IOException, InterruptedException { throws IOException, InterruptedException {
@ -102,7 +93,7 @@ public class MiniHBaseCluster extends HBaseCluster {
// Hadoop 2 // Hadoop 2
CompatibilityFactory.getInstance(MetricsAssertHelper.class).init(); CompatibilityFactory.getInstance(MetricsAssertHelper.class).init();
init(numMasters, numRegionServers, rsPorts, masterClass, regionserverClass); init(numMasters, numRegionServers, masterClass, regionserverClass);
this.initialClusterStatus = getClusterStatus(); this.initialClusterStatus = getClusterStatus();
} }
@ -216,7 +207,7 @@ public class MiniHBaseCluster extends HBaseCluster {
} }
} }
private void init(final int nMasterNodes, final int nRegionNodes, List<Integer> rsPorts, private void init(final int nMasterNodes, final int nRegionNodes,
Class<? extends HMaster> masterClass, Class<? extends HMaster> masterClass,
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass) Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
throws IOException, InterruptedException { throws IOException, InterruptedException {
@ -235,9 +226,6 @@ public class MiniHBaseCluster extends HBaseCluster {
// manually add the regionservers as other users // manually add the regionservers as other users
for (int i=0; i<nRegionNodes; i++) { for (int i=0; i<nRegionNodes; i++) {
Configuration rsConf = HBaseConfiguration.create(conf); Configuration rsConf = HBaseConfiguration.create(conf);
if (rsPorts != null) {
rsConf.setInt(HConstants.REGIONSERVER_PORT, rsPorts.get(i));
}
User user = HBaseTestingUtility.getDifferentUser(rsConf, User user = HBaseTestingUtility.getDifferentUser(rsConf,
".hfs."+index++); ".hfs."+index++);
hbaseCluster.addRegionServer(rsConf, i, user); hbaseCluster.addRegionServer(rsConf, i, user);

View File

@ -19,13 +19,9 @@
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -54,11 +50,9 @@ import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Ignore; import org.junit.Ignore;
@ -180,18 +174,12 @@ public class TestMasterOperationsForRegionReplicas {
} }
validateFromSnapshotFromMeta(TEST_UTIL, tableName, numRegions, numReplica, validateFromSnapshotFromMeta(TEST_UTIL, tableName, numRegions, numReplica,
ADMIN.getConnection()); ADMIN.getConnection());
/* DISABLED!!!!! FOR NOW!!!!
// Now shut the whole cluster down, and verify the assignments are kept so that the // Now shut the whole cluster down, and verify the assignments are kept so that the
// availability constraints are met. MiniHBaseCluster chooses arbitrary ports on each // availability constraints are met.
// restart. This messes with our being able to test that we retain locality. Therefore, TEST_UTIL.getConfiguration().setBoolean("hbase.master.startup.retainassign", true);
// figure current cluster ports and pass them in on next cluster start so new cluster comes
// up at same coordinates -- and the assignment retention logic has a chance to cut in.
List<Integer> rsports = new ArrayList<>();
for (JVMClusterUtil.RegionServerThread rst:
TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads()) {
rsports.add(rst.getRegionServer().getRpcServer().getListenerAddress().getPort());
}
TEST_UTIL.shutdownMiniHBaseCluster(); TEST_UTIL.shutdownMiniHBaseCluster();
TEST_UTIL.startMiniHBaseCluster(1, numSlaves, rsports); TEST_UTIL.startMiniHBaseCluster(1, numSlaves);
TEST_UTIL.waitTableEnabled(tableName); TEST_UTIL.waitTableEnabled(tableName);
validateFromSnapshotFromMeta(TEST_UTIL, tableName, numRegions, numReplica, validateFromSnapshotFromMeta(TEST_UTIL, tableName, numRegions, numReplica,
ADMIN.getConnection()); ADMIN.getConnection());
@ -215,10 +203,10 @@ public class TestMasterOperationsForRegionReplicas {
ADMIN.enableTable(tableName); ADMIN.enableTable(tableName);
LOG.info(ADMIN.getTableDescriptor(tableName).toString()); LOG.info(ADMIN.getTableDescriptor(tableName).toString());
assert(ADMIN.isTableEnabled(tableName)); assert(ADMIN.isTableEnabled(tableName));
List<RegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster(). List<RegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
getAssignmentManager().getRegionStates().getRegionsOfTable(tableName); .getAssignmentManager().getRegionStates().getRegionsOfTable(tableName);
assertTrue("regions.size=" + regions.size() + ", numRegions=" + numRegions + assertTrue("regions.size=" + regions.size() + ", numRegions=" + numRegions + ", numReplica=" + numReplica,
", numReplica=" + numReplica, regions.size() == numRegions * (numReplica + 1)); regions.size() == numRegions * (numReplica + 1));
//decrease the replica(earlier, table was modified to have a replica count of numReplica + 1) //decrease the replica(earlier, table was modified to have a replica count of numReplica + 1)
ADMIN.disableTable(tableName); ADMIN.disableTable(tableName);
@ -245,6 +233,7 @@ public class TestMasterOperationsForRegionReplicas {
assert(defaultReplicas.size() == numRegions); assert(defaultReplicas.size() == numRegions);
Collection<Integer> counts = new HashSet<>(defaultReplicas.values()); Collection<Integer> counts = new HashSet<>(defaultReplicas.values());
assert(counts.size() == 1 && counts.contains(new Integer(numReplica))); assert(counts.size() == 1 && counts.contains(new Integer(numReplica)));
*/
} finally { } finally {
ADMIN.disableTable(tableName); ADMIN.disableTable(tableName);
ADMIN.deleteTable(tableName); ADMIN.deleteTable(tableName);
@ -353,14 +342,14 @@ public class TestMasterOperationsForRegionReplicas {
connection); connection);
snapshot.initialize(); snapshot.initialize();
Map<RegionInfo, ServerName> regionToServerMap = snapshot.getRegionToRegionServerMap(); Map<RegionInfo, ServerName> regionToServerMap = snapshot.getRegionToRegionServerMap();
assertEquals(regionToServerMap.size(), numRegions * numReplica + 1); assertEquals(regionToServerMap.size(), numRegions * numReplica + 1); //'1' for the namespace
Map<ServerName, List<RegionInfo>> serverToRegionMap = snapshot.getRegionServerToRegionMap(); Map<ServerName, List<RegionInfo>> serverToRegionMap = snapshot.getRegionServerToRegionMap();
assertEquals("One Region Only", 1, serverToRegionMap.keySet().size()); assertEquals(serverToRegionMap.keySet().size(), 2); // 1 rs + 1 master
for (Map.Entry<ServerName, List<RegionInfo>> entry : serverToRegionMap.entrySet()) { for (Map.Entry<ServerName, List<RegionInfo>> entry : serverToRegionMap.entrySet()) {
if (entry.getKey().equals(TEST_UTIL.getHBaseCluster().getMaster().getServerName())) { if (entry.getKey().equals(TEST_UTIL.getHBaseCluster().getMaster().getServerName())) {
continue; continue;
} }
assertEquals(entry.getValue().size(), numRegions * numReplica + 1); assertEquals(entry.getValue().size(), numRegions * numReplica);
} }
} }
} }