HBASE-20775 TestMultiParallel is flakey
This commit is contained in:
parent
dde042cc93
commit
2f4b0a4a42
|
@ -799,6 +799,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
s.setConsistency(Consistency.TIMELINE);
|
||||
}
|
||||
int maxAttempts = (retry ? numTries : 1);
|
||||
boolean relocateMeta = false;
|
||||
for (int tries = 0; ; tries++) {
|
||||
if (tries >= maxAttempts) {
|
||||
throw new NoServerForRegionException("Unable to find region for "
|
||||
|
@ -825,6 +826,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
return locations;
|
||||
}
|
||||
}
|
||||
if (relocateMeta) {
|
||||
relocateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW,
|
||||
RegionInfo.DEFAULT_REPLICA_ID);
|
||||
}
|
||||
s.resetMvccReadPoint();
|
||||
try (ReversedClientScanner rcs =
|
||||
new ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, rpcCallerFactory,
|
||||
|
@ -904,10 +909,8 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
throw e;
|
||||
}
|
||||
// Only relocate the parent region if necessary
|
||||
if(!(e instanceof RegionOfflineException ||
|
||||
e instanceof NoServerForRegionException)) {
|
||||
relocateRegion(TableName.META_TABLE_NAME, metaStartKey, replicaId);
|
||||
}
|
||||
relocateMeta =
|
||||
!(e instanceof RegionOfflineException || e instanceof NoServerForRegionException);
|
||||
} finally {
|
||||
userRegionLock.unlock();
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
-->
|
||||
<!-- for testing -->
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<artifactId>hbase-build-configuration</artifactId>
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
|
|||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.master.LoadBalancer;
|
||||
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||
import org.apache.hadoop.hbase.testclassification.FlakeyTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
|
@ -85,7 +84,8 @@ public class TestMultiParallel {
|
|||
private static final int slaves = 5; // also used for testing HTable pool size
|
||||
private static Connection CONNECTION;
|
||||
|
||||
@BeforeClass public static void beforeClass() throws Exception {
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
// Uncomment the following lines if more verbosity is needed for
|
||||
// debugging (see HBASE-12285 for details).
|
||||
//((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
|
||||
|
@ -93,7 +93,8 @@ public class TestMultiParallel {
|
|||
//((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
|
||||
UTIL.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
|
||||
KeyValueCodec.class.getCanonicalName());
|
||||
UTIL.getConfiguration().setBoolean(LoadBalancer.TABLES_ON_MASTER, true);
|
||||
// Disable table on master for now as the feature is broken
|
||||
//UTIL.getConfiguration().setBoolean(LoadBalancer.TABLES_ON_MASTER, true);
|
||||
// We used to ask for system tables on Master exclusively but not needed by test and doesn't
|
||||
// work anyways -- so commented out.
|
||||
// UTIL.getConfiguration().setBoolean(LoadBalancer.SYSTEM_TABLES_ON_MASTER, true);
|
||||
|
@ -107,12 +108,14 @@ public class TestMultiParallel {
|
|||
assertTrue(MyMasterObserver.start.get());
|
||||
}
|
||||
|
||||
@AfterClass public static void afterClass() throws Exception {
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
CONNECTION.close();
|
||||
UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
@Before public void before() throws Exception {
|
||||
@Before
|
||||
public void before() throws Exception {
|
||||
final int balanceCount = MyMasterObserver.postBalanceCount.get();
|
||||
LOG.info("before");
|
||||
if (UTIL.ensureSomeRegionServersAvailable(slaves)) {
|
||||
|
@ -172,11 +175,8 @@ public class TestMultiParallel {
|
|||
* This is for testing the active number of threads that were used while
|
||||
* doing a batch operation. It inserts one row per region via the batch
|
||||
* operation, and then checks the number of active threads.
|
||||
* <p/>
|
||||
* For HBASE-3553
|
||||
* @throws IOException
|
||||
* @throws InterruptedException
|
||||
* @throws NoSuchFieldException
|
||||
* @throws SecurityException
|
||||
*/
|
||||
@Test
|
||||
public void testActiveThreadsCount() throws Exception {
|
||||
|
@ -280,8 +280,6 @@ public class TestMultiParallel {
|
|||
/**
|
||||
* Only run one Multi test with a forced RegionServer abort. Otherwise, the
|
||||
* unit tests will take an unnecessarily long time to run.
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testFlushCommitsWithAbort() throws Exception {
|
||||
|
@ -292,7 +290,6 @@ public class TestMultiParallel {
|
|||
/**
|
||||
* Set table auto flush to false and test flushing commits
|
||||
* @param doAbort true if abort one regionserver in the testing
|
||||
* @throws Exception
|
||||
*/
|
||||
private void doTestFlushCommits(boolean doAbort) throws Exception {
|
||||
// Load the data
|
||||
|
@ -342,9 +339,9 @@ public class TestMultiParallel {
|
|||
UTIL.waitFor(15 * 1000, new Waiter.Predicate<Exception>() {
|
||||
@Override
|
||||
public boolean evaluate() throws Exception {
|
||||
// Master is also a regionserver, so the count is liveRScount
|
||||
// We disable regions on master so the count should be liveRScount - 1
|
||||
return UTIL.getMiniHBaseCluster().getMaster()
|
||||
.getClusterMetrics().getLiveServerMetrics().size() == liveRScount;
|
||||
.getClusterMetrics().getLiveServerMetrics().size() == liveRScount - 1;
|
||||
}
|
||||
});
|
||||
UTIL.waitFor(15 * 1000, UTIL.predicateNoRegionsInTransition());
|
||||
|
@ -678,12 +675,12 @@ public class TestMultiParallel {
|
|||
RowMutations rm = new RowMutations(KEYS[50]);
|
||||
put = new Put(KEYS[50]);
|
||||
put.addColumn(BYTES_FAMILY, qual2, val2);
|
||||
rm.add(put);
|
||||
rm.add((Mutation) put);
|
||||
byte[] qual3 = Bytes.toBytes("qual3");
|
||||
byte[] val3 = Bytes.toBytes("putvalue3");
|
||||
put = new Put(KEYS[50]);
|
||||
put.addColumn(BYTES_FAMILY, qual3, val3);
|
||||
rm.add(put);
|
||||
rm.add((Mutation) put);
|
||||
actions.add(rm);
|
||||
|
||||
// 7 Add another Get to the mixed sequence after RowMutations
|
||||
|
|
Loading…
Reference in New Issue