HBASE-23814 Add null checks and logging to misc set of tests

Signed-off-by: Nick Dimiduk <ndimiduk@apache.org>
Signed-off-by: Zach York <zyork@apache.org>

* TestFullLogReconstruction log the server we've chosen to expire and then note where we starting counting rows
* TestAsyncTableScanException use a define for row counts
* TestRawAsyncTableLimitedScanWithFilter check connection was made before closing it in tearDown
* TestLogsCleaner use single mod time. Make it for sure less than now in case test runs all in the same millisecond (would cause test fail)
* TestReplicationBase test table is non-null before closing in tearDown
This commit is contained in:
stack 2020-02-07 15:11:11 -08:00
parent 35989c1968
commit a4aa183f45
8 changed files with 30 additions and 13 deletions

View File

@ -85,8 +85,8 @@ public class ReplicationLogCleaner extends BaseLogCleanerDelegate {
}
String wal = file.getPath().getName();
boolean logInReplicationQueue = wals.contains(wal);
if (logInReplicationQueue) {
LOG.debug("Found up in ZooKeeper, NOT deleting={}", wal);
if (logInReplicationQueue) {
LOG.debug("Found up in ZooKeeper, NOT deleting={}", wal);
}
return !logInReplicationQueue && (file.getModificationTime() < readZKTimestamp);
}

View File

@ -31,9 +31,12 @@ import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Category({ MiscTests.class, MediumTests.class })
public class TestFullLogReconstruction {
private static final Logger LOG = LoggerFactory.getLogger(TestFullLogReconstruction.class);
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
@ -82,7 +85,9 @@ public class TestFullLogReconstruction {
TEST_UTIL.loadTable(table, FAMILY);
}
RegionServerThread rsThread = TEST_UTIL.getHBaseCluster().getRegionServerThreads().get(0);
TEST_UTIL.expireRegionServerSession(0);
int index = 0;
LOG.info("Expiring {}", TEST_UTIL.getMiniHBaseCluster().getRegionServer(index));
TEST_UTIL.expireRegionServerSession(index);
// make sure that the RS is fully down before reading, so that we will read the data from other
// RSes.
TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
@ -97,6 +102,7 @@ public class TestFullLogReconstruction {
return rsThread.getRegionServer() + " is still alive";
}
});
LOG.info("Starting count");
int newCount = TEST_UTIL.countRows(table);
assertEquals(count, newCount);

View File

@ -66,7 +66,7 @@ public class TestAsyncTableRegionReplicasScan extends AbstractTestAsyncTableRegi
scan.setReplicaId(replicaId);
}
try (ResultScanner scanner = table.getScanner(scan)) {
for (int i = 0; i < 1000; i++) {
for (int i = 0; i < ROW_COUNT; i++) {
Result result = scanner.next();
assertNotNull(result);
assertArrayEquals(getValue(i), result.getValue(FAMILY, QUALIFIER));

View File

@ -74,6 +74,8 @@ public class TestAsyncTableScanException {
private static volatile boolean DO_NOT_RETRY;
private static final int ROW_COUNT = 100;
public static final class ErrorCP implements RegionObserver, RegionCoprocessor {
@Override
@ -99,13 +101,13 @@ public class TestAsyncTableScanException {
@BeforeClass
public static void setUp() throws Exception {
UTIL.startMiniCluster(3);
UTIL.startMiniCluster(1);
UTIL.getAdmin()
.createTable(TableDescriptorBuilder.newBuilder(TABLE_NAME)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY))
.setCoprocessor(ErrorCP.class.getName()).build());
try (Table table = UTIL.getConnection().getTable(TABLE_NAME)) {
for (int i = 0; i < 100; i++) {
for (int i = 0; i < ROW_COUNT; i++) {
table.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(i)));
}
}
@ -151,7 +153,7 @@ public class TestAsyncTableScanException {
private void count() throws IOException {
try (ResultScanner scanner = CONN.getTable(TABLE_NAME).getScanner(new Scan().setCaching(1))) {
for (int i = 0; i < 100; i++) {
for (int i = 0; i < ROW_COUNT; i++) {
Result result = scanner.next();
assertArrayEquals(Bytes.toBytes(i), result.getRow());
assertArrayEquals(Bytes.toBytes(i), result.getValue(FAMILY, QUAL));

View File

@ -81,7 +81,9 @@ public class TestRawAsyncTableLimitedScanWithFilter {
@AfterClass
public static void tearDown() throws Exception {
CONN.close();
if (CONN != null) {
CONN.close();
}
UTIL.shutdownMiniCluster();
}

View File

@ -276,9 +276,12 @@ public class TestLogsCleaner {
public void testZooKeeperNormal() throws Exception {
ReplicationLogCleaner cleaner = new ReplicationLogCleaner();
// Subtract 1000 from current time so modtime is for sure older
// than 'now'.
long modTime = System.currentTimeMillis() - 1000;
List<FileStatus> dummyFiles = Arrays.asList(
new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log1")),
new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log2"))
new FileStatus(100, false, 3, 100, modTime, new Path("log1")),
new FileStatus(100, false, 3, 100, modTime, new Path("log2"))
);
ZKWatcher zkw = new ZKWatcher(conf, "testZooKeeperAbort-normal", null);

View File

@ -342,8 +342,12 @@ public class TestReplicationBase {
@AfterClass
public static void tearDownAfterClass() throws Exception {
htable2.close();
htable1.close();
if (htable2 != null) {
htable2.close();
}
if (htable1 != null) {
htable1.close();
}
UTIL2.shutdownMiniCluster();
UTIL1.shutdownMiniCluster();
}

View File

@ -218,7 +218,7 @@ public class TestCanaryTool {
verify(mockAppender, times(1)).doAppend(argThat(new ArgumentMatcher<LoggingEvent>() {
@Override
public boolean matches(LoggingEvent argument) {
return ((LoggingEvent) argument).getRenderedMessage().contains("exceeded the configured read timeout.");
return argument.getRenderedMessage().contains("exceeded the configured read timeout.");
}
}));
verify(mockAppender, times(2)).doAppend(argThat(new ArgumentMatcher<LoggingEvent>() {