HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug

hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
 Edit of log about archiving that shows in middle of a table create;
 try to make it less disorientating.

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
 Loosen assert. Compaction may have produced a single file only. Allow
 for this.

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
 Make this test less furious given it is inline w/ a bunch of unit
 tests.

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
 Add debug

hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java
 Add wait on quota table to show up before moving forward; otherwise,
 attempt at quota setting fails.

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
 Debug

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
 Remove asserts that expected regions to still have a presence in fs
 after merge when a catalogjanitor may have cleaned up parent dirs.

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
 Catch exception on way out and log it rather than let it fail test.

hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java
 Wait on acl table before proceeding.
This commit is contained in:
stack 2020-03-24 14:36:09 -07:00
parent 9a212ee487
commit 2ca0a105bc
9 changed files with 34 additions and 11 deletions

View File

@ -23,7 +23,6 @@ import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -319,7 +318,9 @@ public class DeleteTableProcedure
.collect(Collectors.toList()); .collect(Collectors.toList());
HFileArchiver.archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(), tempTableDir, HFileArchiver.archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(), tempTableDir,
regionDirList); regionDirList);
LOG.debug("Table '{}' archived!", tableName); if (!regionDirList.isEmpty()) {
LOG.debug("Archived {} regions", tableName);
}
} }
// Archive mob data // Archive mob data

View File

@ -429,8 +429,11 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
int countAfterSingleFamily = countStoreFilesInFamily(regions, family); int countAfterSingleFamily = countStoreFilesInFamily(regions, family);
assertTrue(countAfter < countBefore); assertTrue(countAfter < countBefore);
if (!singleFamily) { if (!singleFamily) {
if (expectedState == CompactionState.MAJOR) assertTrue(families.length == countAfter); if (expectedState == CompactionState.MAJOR) {
else assertTrue(families.length < countAfter); assertEquals(families.length, countAfter);
} else {
assertTrue(families.length <= countAfter);
}
} else { } else {
int singleFamDiff = countBeforeSingleFamily - countAfterSingleFamily; int singleFamDiff = countBeforeSingleFamily - countAfterSingleFamily;
// assert only change was to single column family // assert only change was to single column family

View File

@ -134,7 +134,7 @@ public class TestAsyncTableGetMultiThreaded {
@Test @Test
public void test() throws Exception { public void test() throws Exception {
LOG.info("====== Test started ======"); LOG.info("====== Test started ======");
int numThreads = 20; int numThreads = 10;
AtomicBoolean stop = new AtomicBoolean(false); AtomicBoolean stop = new AtomicBoolean(false);
ExecutorService executor = ExecutorService executor =
Executors.newFixedThreadPool(numThreads, Threads.newDaemonThreadFactory("TestAsyncGet-")); Executors.newFixedThreadPool(numThreads, Threads.newDaemonThreadFactory("TestAsyncGet-"));

View File

@ -236,7 +236,7 @@ public class TestFromClientSide3 {
table.put(put); table.put(put);
Result r = table.get(new Get(row)); Result r = table.get(new Get(row));
assertEquals(3, r.size()); assertEquals(r.toString(), 3, r.size());
assertEquals("testValue", Bytes.toString(CellUtil.cloneValue(r.rawCells()[0]))); assertEquals("testValue", Bytes.toString(CellUtil.cloneValue(r.rawCells()[0])));
assertEquals("qual0", Bytes.toString(CellUtil.cloneValue(r.rawCells()[1]))); assertEquals("qual0", Bytes.toString(CellUtil.cloneValue(r.rawCells()[1])));
assertEquals("qual1", Bytes.toString(CellUtil.cloneValue(r.rawCells()[2]))); assertEquals("qual1", Bytes.toString(CellUtil.cloneValue(r.rawCells()[2])));

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -19,16 +19,17 @@ package org.apache.hadoop.hbase.quotas;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
@ -42,6 +43,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After; import org.junit.After;
@ -144,6 +146,13 @@ public class TestQuotaObserverChoreRegionReports {
// Expire the reports after 5 seconds // Expire the reports after 5 seconds
conf.setInt(QuotaObserverChore.REGION_REPORT_RETENTION_DURATION_KEY, 5000); conf.setInt(QuotaObserverChore.REGION_REPORT_RETENTION_DURATION_KEY, 5000);
TEST_UTIL.startMiniCluster(1); TEST_UTIL.startMiniCluster(1);
// Wait till quota table onlined.
TEST_UTIL.waitFor(10000, new Waiter.Predicate<Exception>() {
@Override public boolean evaluate() throws Exception {
return MetaTableAccessor.tableExists(TEST_UTIL.getConnection(),
QuotaTableUtil.QUOTA_TABLE_NAME);
}
});
final String FAM1 = "f1"; final String FAM1 = "f1";
@ -158,6 +167,7 @@ public class TestQuotaObserverChoreRegionReports {
final SpaceViolationPolicy violationPolicy = SpaceViolationPolicy.NO_INSERTS; final SpaceViolationPolicy violationPolicy = SpaceViolationPolicy.NO_INSERTS;
QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, sizeLimit, violationPolicy); QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, sizeLimit, violationPolicy);
final Admin admin = TEST_UTIL.getAdmin(); final Admin admin = TEST_UTIL.getAdmin();
LOG.info("SET QUOTA");
admin.setQuota(settings); admin.setQuota(settings);
final Connection conn = TEST_UTIL.getConnection(); final Connection conn = TEST_UTIL.getConnection();

View File

@ -2107,6 +2107,8 @@ public class TestHRegion {
put.addColumn(fam1, qf3, val1); put.addColumn(fam1, qf3, val1);
region.put(put); region.put(put);
LOG.info("get={}", region.get(new Get(row1).addColumn(fam1, qf1)).toString());
// Multi-column delete // Multi-column delete
Delete delete = new Delete(row1); Delete delete = new Delete(row1);
delete.addColumn(fam1, qf1); delete.addColumn(fam1, qf1);

View File

@ -281,8 +281,8 @@ public class TestRegionMergeTransactionOnCluster {
ProcedureTestingUtility.waitNoProcedureRunning( ProcedureTestingUtility.waitNoProcedureRunning(
TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor());
} }
assertFalse(regionAdir.toString(), fs.exists(regionAdir)); // We used to check for existence of region in fs but sometimes the region dir was
assertFalse(regionBdir.toString(), fs.exists(regionBdir)); // cleaned up by the time we got here making the test sometimes flakey.
assertTrue(cleaned > 0); assertTrue(cleaned > 0);
mergedRegionResult = MetaTableAccessor.getRegionResult( mergedRegionResult = MetaTableAccessor.getRegionResult(

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TestMetaTableAccessor; import org.apache.hadoop.hbase.TestMetaTableAccessor;
import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Consistency;
@ -428,7 +429,11 @@ public class TestRegionReplicas {
} }
} finally { } finally {
HTU.deleteNumericRows(table, HConstants.CATALOG_FAMILY, startKey, endKey); HTU.deleteNumericRows(table, HConstants.CATALOG_FAMILY, startKey, endKey);
closeRegion(HTU, getRS(), hriSecondary); try {
closeRegion(HTU, getRS(), hriSecondary);
} catch (NotServingRegionException e) {
LOG.info("Closing wrong region {}", hriSecondary, e);
}
} }
} }

View File

@ -624,6 +624,7 @@ public class TestSnapshotScannerHDFSAclController {
String snapshot = namespace + "s1"; String snapshot = namespace + "s1";
String snapshot2 = namespace + "s2"; String snapshot2 = namespace + "s2";
String snapshot3 = namespace + "s3"; String snapshot3 = namespace + "s3";
TEST_UTIL.waitTableAvailable(PermissionStorage.ACL_TABLE_NAME);
try (Table t = TestHDFSAclHelper.createTable(TEST_UTIL, table)) { try (Table t = TestHDFSAclHelper.createTable(TEST_UTIL, table)) {
TestHDFSAclHelper.put(t); TestHDFSAclHelper.put(t);
@ -633,6 +634,7 @@ public class TestSnapshotScannerHDFSAclController {
// delete // delete
admin.disableTable(table); admin.disableTable(table);
admin.deleteTable(table); admin.deleteTable(table);
LOG.info("Before scan of shapshot!");
TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, -1); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, -1);
// restore snapshot and restore acl // restore snapshot and restore acl