HBASE-23838 Adding debug logging to a few ExportSnapshot tests
* hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java Add logging of temp output dir and if tmp dir is being used at all. * hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java Don't create new Configurations. Use current. Set it into the launched ExportSnapshot too. Log as we make progress through stages so easier to find location of exception source. * hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java Add makeQualified though looks to be redundant. * hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java Might fix a local fail where hbase doesn't seem to be up when we query jmx. Wait on meta being online. M hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java Run all on the one cluster instance. See if helps w/ odd failure when run locally.
This commit is contained in:
parent
488c2b2483
commit
b7ef225609
|
@ -967,20 +967,19 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool {
|
|||
Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX);
|
||||
srcConf.setBoolean("fs." + inputRoot.toUri().getScheme() + ".impl.disable.cache", true);
|
||||
FileSystem inputFs = FileSystem.get(inputRoot.toUri(), srcConf);
|
||||
LOG.debug("inputFs=" + inputFs.getUri().toString() + " inputRoot=" + inputRoot);
|
||||
Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX);
|
||||
destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true);
|
||||
FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf);
|
||||
LOG.debug("outputFs=" + outputFs.getUri().toString() + " outputRoot=" + outputRoot.toString());
|
||||
|
||||
boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) ||
|
||||
conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null;
|
||||
|
||||
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot);
|
||||
Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot,
|
||||
destConf);
|
||||
Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot);
|
||||
Path initialOutputSnapshotDir = skipTmp ? outputSnapshotDir : snapshotTmpDir;
|
||||
LOG.debug("inputFs={}, inputRoot={}", inputFs.getUri().toString(), inputRoot);
|
||||
LOG.debug("outputFs={}, outputRoot={}, outputFs.getUri().toString(), " +
|
||||
"initialOutputSnapshotDir={}", outputRoot.toString(), skipTmp, initialOutputSnapshotDir);
|
||||
|
||||
// Find the necessary directory which need to change owner and group
|
||||
Path needSetOwnerDir = SnapshotDescriptionUtils.getSnapshotRootDir(outputRoot);
|
||||
|
|
|
@ -212,8 +212,8 @@ public class TestExportSnapshot {
|
|||
final Path sourceDir, Path copyDir, final boolean overwrite,
|
||||
final RegionPredicate bypassregionPredicate, boolean success) throws Exception {
|
||||
URI hdfsUri = FileSystem.get(conf).getUri();
|
||||
FileSystem fs = FileSystem.get(copyDir.toUri(), new Configuration());
|
||||
copyDir = copyDir.makeQualified(fs);
|
||||
FileSystem fs = FileSystem.get(copyDir.toUri(), conf);
|
||||
copyDir = copyDir.makeQualified(hdfsUri, copyDir);
|
||||
|
||||
List<String> opts = new ArrayList<>();
|
||||
opts.add("--snapshot");
|
||||
|
@ -227,30 +227,34 @@ public class TestExportSnapshot {
|
|||
if (overwrite) opts.add("--overwrite");
|
||||
|
||||
// Export Snapshot
|
||||
int res = run(conf, new ExportSnapshot(), opts.toArray(new String[opts.size()]));
|
||||
ExportSnapshot es = new ExportSnapshot();
|
||||
es.setConf(conf);
|
||||
int res = run(conf, es, opts.toArray(new String[opts.size()]));
|
||||
assertEquals(success ? 0 : 1, res);
|
||||
if (!success) {
|
||||
final Path targetDir = new Path(HConstants.SNAPSHOT_DIR_NAME, targetName);
|
||||
assertFalse(fs.exists(new Path(copyDir, targetDir)));
|
||||
assertFalse(copyDir.toString() + " " + targetDir.toString(),
|
||||
fs.exists(new Path(copyDir, targetDir)));
|
||||
return;
|
||||
}
|
||||
LOG.info("Exported snapshot");
|
||||
|
||||
// Verify File-System state
|
||||
FileStatus[] rootFiles = fs.listStatus(copyDir);
|
||||
assertEquals(filesExpected > 0 ? 2 : 1, rootFiles.length);
|
||||
for (FileStatus fileStatus: rootFiles) {
|
||||
String name = fileStatus.getPath().getName();
|
||||
assertTrue(fileStatus.isDirectory());
|
||||
assertTrue(name.equals(HConstants.SNAPSHOT_DIR_NAME) ||
|
||||
name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY));
|
||||
assertTrue(fileStatus.toString(), fileStatus.isDirectory());
|
||||
assertTrue(name.toString(), name.equals(HConstants.SNAPSHOT_DIR_NAME) ||
|
||||
name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY));
|
||||
}
|
||||
LOG.info("Verified filesystem state");
|
||||
|
||||
// compare the snapshot metadata and verify the hfiles
|
||||
// Compare the snapshot metadata and verify the hfiles
|
||||
final FileSystem hdfs = FileSystem.get(hdfsUri, conf);
|
||||
final Path snapshotDir = new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName);
|
||||
final Path targetDir = new Path(HConstants.SNAPSHOT_DIR_NAME, targetName);
|
||||
verifySnapshotDir(hdfs, new Path(sourceDir, snapshotDir),
|
||||
fs, new Path(copyDir, targetDir));
|
||||
verifySnapshotDir(hdfs, new Path(sourceDir, snapshotDir), fs, new Path(copyDir, targetDir));
|
||||
Set<String> snapshotFiles = verifySnapshot(conf, fs, copyDir, tableName,
|
||||
targetName, bypassregionPredicate);
|
||||
assertEquals(filesExpected, snapshotFiles.size());
|
||||
|
@ -347,7 +351,8 @@ public class TestExportSnapshot {
|
|||
private static Set<String> listFiles(final FileSystem fs, final Path root, final Path dir)
|
||||
throws IOException {
|
||||
Set<String> files = new HashSet<>();
|
||||
int rootPrefix = root.makeQualified(fs).toString().length();
|
||||
LOG.debug("List files in {} in root {} at {}", fs, root, dir);
|
||||
int rootPrefix = root.makeQualified(fs.getUri(), root).toString().length();
|
||||
FileStatus[] list = FSUtils.listStatus(fs, dir);
|
||||
if (list != null) {
|
||||
for (FileStatus fstat: list) {
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.snapshot;
|
|||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -101,16 +102,17 @@ public class TestExportSnapshotV1NoCluster {
|
|||
Path[] r2Files = builder.addRegion();
|
||||
builder.commit();
|
||||
int snapshotFilesCount = r1Files.length + r2Files.length;
|
||||
|
||||
String snapshotName = builder.getSnapshotDescription().getName();
|
||||
TableName tableName = builder.getTableDescriptor().getTableName();
|
||||
TestExportSnapshot.testExportFileSystemState(testUtil.getConfiguration(),
|
||||
tableName, snapshotName, snapshotName, snapshotFilesCount,
|
||||
testDir, getDestinationDir(testDir), false, null, true);
|
||||
testDir, getDestinationDir(testUtil, testDir), false, null, true);
|
||||
}
|
||||
|
||||
static Path getDestinationDir(Path testDir) {
|
||||
Path path = new Path(new Path(testDir, "export-test"), "export-" + System.currentTimeMillis());
|
||||
static Path getDestinationDir(HBaseCommonTestingUtility hctu, Path testDir) throws IOException {
|
||||
FileSystem fs = FileSystem.get(hctu.getConfiguration());
|
||||
Path path = new Path(new Path(testDir, "export-test"),
|
||||
"export-" + System.currentTimeMillis()).makeQualified(fs.getUri(), testDir);
|
||||
LOG.info("HDFS export destination path: " + path);
|
||||
return path;
|
||||
}
|
||||
|
|
|
@ -138,6 +138,7 @@ public class TestInfoServersACL {
|
|||
|
||||
CLUSTER = new LocalHBaseCluster(conf, 1);
|
||||
CLUSTER.startup();
|
||||
CLUSTER.getActiveMaster().waitForMetaOnline();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals;
|
|||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
|
@ -39,8 +38,8 @@ import org.apache.hadoop.hbase.testclassification.MasterTests;
|
|||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
@ -57,26 +56,25 @@ public class TestRegionStateStore {
|
|||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TestRegionStateStore.class);
|
||||
|
||||
protected HBaseTestingUtility util;
|
||||
private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||
|
||||
@Before
|
||||
public void setup() throws Exception {
|
||||
util = new HBaseTestingUtility();
|
||||
util.startMiniCluster();
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
UTIL.startMiniCluster();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
util.shutdownMiniCluster();
|
||||
@AfterClass
|
||||
public static void tearDown() throws Exception {
|
||||
UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVisitMetaForRegionExistingRegion() throws Exception {
|
||||
final TableName tableName = TableName.valueOf("testVisitMetaForRegion");
|
||||
util.createTable(tableName, "cf");
|
||||
final List<HRegion> regions = util.getHBaseCluster().getRegions(tableName);
|
||||
UTIL.createTable(tableName, "cf");
|
||||
final List<HRegion> regions = UTIL.getHBaseCluster().getRegions(tableName);
|
||||
final String encodedName = regions.get(0).getRegionInfo().getEncodedName();
|
||||
final RegionStateStore regionStateStore = util.getHBaseCluster().getMaster().
|
||||
final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster().
|
||||
getAssignmentManager().getRegionStateStore();
|
||||
final AtomicBoolean visitorCalled = new AtomicBoolean(false);
|
||||
regionStateStore.visitMetaForRegion(encodedName, new RegionStateStore.RegionStateVisitor() {
|
||||
|
@ -93,10 +91,10 @@ public class TestRegionStateStore {
|
|||
@Test
|
||||
public void testVisitMetaForBadRegionState() throws Exception {
|
||||
final TableName tableName = TableName.valueOf("testVisitMetaForBadRegionState");
|
||||
util.createTable(tableName, "cf");
|
||||
final List<HRegion> regions = util.getHBaseCluster().getRegions(tableName);
|
||||
UTIL.createTable(tableName, "cf");
|
||||
final List<HRegion> regions = UTIL.getHBaseCluster().getRegions(tableName);
|
||||
final String encodedName = regions.get(0).getRegionInfo().getEncodedName();
|
||||
final RegionStateStore regionStateStore = util.getHBaseCluster().getMaster().
|
||||
final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster().
|
||||
getAssignmentManager().getRegionStateStore();
|
||||
|
||||
// add the BAD_STATE which does not exist in enum RegionState.State
|
||||
|
@ -105,7 +103,7 @@ public class TestRegionStateStore {
|
|||
put.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER,
|
||||
Bytes.toBytes("BAD_STATE"));
|
||||
|
||||
try (Table table = util.getConnection().getTable(TableName.META_TABLE_NAME)) {
|
||||
try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
|
||||
table.put(put);
|
||||
}
|
||||
|
||||
|
@ -126,7 +124,7 @@ public class TestRegionStateStore {
|
|||
@Test
|
||||
public void testVisitMetaForRegionNonExistingRegion() throws Exception {
|
||||
final String encodedName = "fakeencodedregionname";
|
||||
final RegionStateStore regionStateStore = util.getHBaseCluster().getMaster().
|
||||
final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster().
|
||||
getAssignmentManager().getRegionStateStore();
|
||||
final AtomicBoolean visitorCalled = new AtomicBoolean(false);
|
||||
regionStateStore.visitMetaForRegion(encodedName, new RegionStateStore.RegionStateVisitor() {
|
||||
|
@ -138,5 +136,4 @@ public class TestRegionStateStore {
|
|||
});
|
||||
assertFalse("Visitor has been called, but it shouldn't.", visitorCalled.get());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
package org.apache.hadoop.hbase.master.procedure;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||
|
|
Loading…
Reference in New Issue