HBASE-17995 improve log messages during snapshot tests.

Signed-off-by: Michael Stack <stack@apache.org>

 Conflicts:
	hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
This commit is contained in:
Sean Busbey 2017-05-04 11:17:51 -05:00
parent 67b61e6d5a
commit 9264e8fb17
3 changed files with 25 additions and 9 deletions

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.hbase.mapreduce; package org.apache.hadoop.hbase.mapreduce;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -49,6 +51,7 @@ import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
public abstract class TableSnapshotInputFormatTestBase { public abstract class TableSnapshotInputFormatTestBase {
private static final Log LOG = LogFactory.getLog(TableSnapshotInputFormatTestBase.class);
@Rule public final TestRule timeout = CategoryBasedTimeout.builder(). @Rule public final TestRule timeout = CategoryBasedTimeout.builder().
withTimeout(this.getClass()).withLookingForStuckThread(true).build(); withTimeout(this.getClass()).withLookingForStuckThread(true).build();
protected final HBaseTestingUtility UTIL = new HBaseTestingUtility(); protected final HBaseTestingUtility UTIL = new HBaseTestingUtility();
@ -192,11 +195,13 @@ public abstract class TableSnapshotInputFormatTestBase {
String snapshotName, byte[] startRow, byte[] endRow, int numRegions) String snapshotName, byte[] startRow, byte[] endRow, int numRegions)
throws Exception { throws Exception {
try { try {
LOG.debug("Ensuring table doesn't exist.");
util.deleteTable(tableName); util.deleteTable(tableName);
} catch(Exception ex) { } catch(Exception ex) {
// ignore // ignore
} }
LOG.info("creating table '" + tableName + "'");
if (numRegions > 1) { if (numRegions > 1) {
util.createTable(tableName, FAMILIES, 1, startRow, endRow, numRegions); util.createTable(tableName, FAMILIES, 1, startRow, endRow, numRegions);
} else { } else {
@ -204,21 +209,22 @@ public abstract class TableSnapshotInputFormatTestBase {
} }
Admin admin = util.getHBaseAdmin(); Admin admin = util.getHBaseAdmin();
// put some stuff in the table LOG.info("put some stuff in the table");
HTable table = new HTable(util.getConfiguration(), tableName); HTable table = new HTable(util.getConfiguration(), tableName);
util.loadTable(table, FAMILIES); util.loadTable(table, FAMILIES);
Path rootDir = FSUtils.getRootDir(util.getConfiguration()); Path rootDir = FSUtils.getRootDir(util.getConfiguration());
FileSystem fs = rootDir.getFileSystem(util.getConfiguration()); FileSystem fs = rootDir.getFileSystem(util.getConfiguration());
LOG.info("snapshot");
SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName,
Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true); Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true);
// load different values LOG.info("load different values");
byte[] value = Bytes.toBytes("after_snapshot_value"); byte[] value = Bytes.toBytes("after_snapshot_value");
util.loadTable(table, FAMILIES, value); util.loadTable(table, FAMILIES, value);
// cause flush to create new files in the region LOG.info("cause flush to create new files in the region");
admin.flush(tableName); admin.flush(tableName);
table.close(); table.close();
} }

View File

@ -25,6 +25,8 @@ import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -62,6 +64,7 @@ import com.google.common.collect.Lists;
@Category(LargeTests.class) @Category(LargeTests.class)
public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase { public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase {
private static final Log LOG = LogFactory.getLog(TestTableSnapshotInputFormat.class);
@Rule public final TestRule timeout = CategoryBasedTimeout.builder(). @Rule public final TestRule timeout = CategoryBasedTimeout.builder().
withTimeout(this.getClass()).withLookingForStuckThread(true).build(); withTimeout(this.getClass()).withLookingForStuckThread(true).build();
@ -332,10 +335,13 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa
String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions, String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
int expectedNumSplits, boolean shutdownCluster) throws Exception { int expectedNumSplits, boolean shutdownCluster) throws Exception {
//create the table and snapshot LOG.info("testing with MapReduce");
LOG.info("create the table and snapshot");
createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions); createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);
if (shutdownCluster) { if (shutdownCluster) {
LOG.info("shutting down hbase cluster.");
util.shutdownMiniHBaseCluster(); util.shutdownMiniHBaseCluster();
} }

View File

@ -207,7 +207,7 @@ public final class SnapshotTestingUtils {
// check snapshot dir // check snapshot dir
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir( Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(
snapshotDescriptor, rootDir); snapshotDescriptor, rootDir);
assertTrue(fs.exists(snapshotDir)); assertTrue("target snapshot directory, '"+ snapshotDir +"', doesn't exist.", fs.exists(snapshotDir));
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
@ -230,14 +230,14 @@ public final class SnapshotTestingUtils {
// Verify that there are store files in the specified families // Verify that there are store files in the specified families
if (nonEmptyTestFamilies != null) { if (nonEmptyTestFamilies != null) {
for (final byte[] familyName: nonEmptyTestFamilies) { for (final byte[] familyName: nonEmptyTestFamilies) {
assertTrue(snapshotFamilies.contains(familyName)); assertTrue("Expected snapshot to contain family '" + Bytes.toString(familyName) + "', but it does not.", snapshotFamilies.contains(familyName));
} }
} }
// Verify that there are no store files in the specified families // Verify that there are no store files in the specified families
if (emptyTestFamilies != null) { if (emptyTestFamilies != null) {
for (final byte[] familyName: emptyTestFamilies) { for (final byte[] familyName: emptyTestFamilies) {
assertFalse(snapshotFamilies.contains(familyName)); assertFalse("Expected snapshot to skip empty family '" + Bytes.toString(familyName) + "', but it is present.", snapshotFamilies.contains(familyName));
} }
} }
@ -256,12 +256,12 @@ public final class SnapshotTestingUtils {
} }
regionCountExclusiveSplitParent++; regionCountExclusiveSplitParent++;
} }
assertEquals(regions.size(), regionCountExclusiveSplitParent); assertEquals("Wrong number of regions.", regions.size(), regionCountExclusiveSplitParent);
// Verify Regions (redundant check, see MasterSnapshotVerifier) // Verify Regions (redundant check, see MasterSnapshotVerifier)
for (HRegionInfo info : regions) { for (HRegionInfo info : regions) {
String regionName = info.getEncodedName(); String regionName = info.getEncodedName();
assertTrue(regionManifests.containsKey(regionName)); assertTrue("Missing region name: '" + regionName + "'", regionManifests.containsKey(regionName));
} }
} }
@ -395,20 +395,24 @@ public final class SnapshotTestingUtils {
throws Exception { throws Exception {
if (!onlineSnapshot) { if (!onlineSnapshot) {
try { try {
LOG.info("prepping for offline snapshot.");
admin.disableTable(tableName); admin.disableTable(tableName);
} catch (TableNotEnabledException tne) { } catch (TableNotEnabledException tne) {
LOG.info("In attempting to disable " + tableName + " it turns out that the this table is " + LOG.info("In attempting to disable " + tableName + " it turns out that the this table is " +
"already disabled."); "already disabled.");
} }
} }
LOG.info("taking snapshot.");
admin.snapshot(snapshotNameString, tableName); admin.snapshot(snapshotNameString, tableName);
LOG.info("Confirming snapshot exists.");
List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertExistsMatchingSnapshot(admin, List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertExistsMatchingSnapshot(admin,
snapshotNameString, tableName); snapshotNameString, tableName);
if (snapshots == null || snapshots.size() != 1) { if (snapshots == null || snapshots.size() != 1) {
Assert.fail("Incorrect number of snapshots for table " + tableName); Assert.fail("Incorrect number of snapshots for table " + tableName);
} }
LOG.info("validating snapshot.");
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), tableName, nonEmptyFamilyNames, SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), tableName, nonEmptyFamilyNames,
emptyFamilyNames, rootDir, admin, fs); emptyFamilyNames, rootDir, admin, fs);
} }