diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index 4993feea223..36b75c43dfd 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -967,20 +967,19 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); srcConf.setBoolean("fs." + inputRoot.toUri().getScheme() + ".impl.disable.cache", true); FileSystem inputFs = FileSystem.get(inputRoot.toUri(), srcConf); - LOG.debug("inputFs=" + inputFs.getUri().toString() + " inputRoot=" + inputRoot); Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf); - LOG.debug("outputFs=" + outputFs.getUri().toString() + " outputRoot=" + outputRoot.toString()); - boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) || conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null; - Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot); Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot, destConf); Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot); Path initialOutputSnapshotDir = skipTmp ? outputSnapshotDir : snapshotTmpDir; + LOG.debug("inputFs={}, inputRoot={}", inputFs.getUri().toString(), inputRoot); + LOG.debug("outputFs={}, outputRoot={}, outputFs.getUri().toString(), " + + "initialOutputSnapshotDir={}", outputRoot.toString(), skipTmp, initialOutputSnapshotDir); // Find the necessary directory which need to change owner and group Path needSetOwnerDir = SnapshotDescriptionUtils.getSnapshotRootDir(outputRoot); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index c988854b62c..4293d461d99 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -212,8 +212,8 @@ public class TestExportSnapshot { final Path sourceDir, Path copyDir, final boolean overwrite, final RegionPredicate bypassregionPredicate, boolean success) throws Exception { URI hdfsUri = FileSystem.get(conf).getUri(); - FileSystem fs = FileSystem.get(copyDir.toUri(), new Configuration()); - copyDir = copyDir.makeQualified(fs); + FileSystem fs = FileSystem.get(copyDir.toUri(), conf); + copyDir = copyDir.makeQualified(hdfsUri, copyDir); List opts = new ArrayList<>(); opts.add("--snapshot"); @@ -227,30 +227,34 @@ public class TestExportSnapshot { if (overwrite) opts.add("--overwrite"); // Export Snapshot - int res = run(conf, new ExportSnapshot(), opts.toArray(new String[opts.size()])); + ExportSnapshot es = new ExportSnapshot(); + es.setConf(conf); + int res = run(conf, es, opts.toArray(new String[opts.size()])); assertEquals(success ? 0 : 1, res); if (!success) { final Path targetDir = new Path(HConstants.SNAPSHOT_DIR_NAME, Bytes.toString(targetName)); - assertFalse(fs.exists(new Path(copyDir, targetDir))); + assertFalse(copyDir.toString() + " " + targetDir.toString(), + fs.exists(new Path(copyDir, targetDir))); return; } + LOG.info("Exported snapshot"); // Verify File-System state FileStatus[] rootFiles = fs.listStatus(copyDir); assertEquals(filesExpected > 0 ? 2 : 1, rootFiles.length); for (FileStatus fileStatus: rootFiles) { String name = fileStatus.getPath().getName(); - assertTrue(fileStatus.isDirectory()); - assertTrue(name.equals(HConstants.SNAPSHOT_DIR_NAME) || - name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY)); + assertTrue(fileStatus.toString(), fileStatus.isDirectory()); + assertTrue(name.toString(), name.equals(HConstants.SNAPSHOT_DIR_NAME) || + name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY)); } + LOG.info("Verified filesystem state"); - // compare the snapshot metadata and verify the hfiles + // Compare the snapshot metadata and verify the hfiles final FileSystem hdfs = FileSystem.get(hdfsUri, conf); final Path snapshotDir = new Path(HConstants.SNAPSHOT_DIR_NAME, Bytes.toString(snapshotName)); final Path targetDir = new Path(HConstants.SNAPSHOT_DIR_NAME, Bytes.toString(targetName)); - verifySnapshotDir(hdfs, new Path(sourceDir, snapshotDir), - fs, new Path(copyDir, targetDir)); + verifySnapshotDir(hdfs, new Path(sourceDir, snapshotDir), fs, new Path(copyDir, targetDir)); Set snapshotFiles = verifySnapshot(conf, fs, copyDir, tableName, Bytes.toString(targetName), bypassregionPredicate); assertEquals(filesExpected, snapshotFiles.size()); @@ -347,7 +351,8 @@ public class TestExportSnapshot { private static Set listFiles(final FileSystem fs, final Path root, final Path dir) throws IOException { Set files = new HashSet<>(); - int rootPrefix = root.makeQualified(fs).toString().length(); + LOG.debug("List files in {} in root {} at {}", fs, root, dir); + int rootPrefix = root.makeQualified(fs.getUri(), root).toString().length(); FileStatus[] list = FSUtils.listStatus(fs, dir); if (list != null) { for (FileStatus fstat: list) { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java index 3e9e0ef2460..9b57cb7495a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.snapshot; import static org.junit.Assert.assertTrue; +import java.io.File; import java.io.IOException; import java.net.URI; import org.apache.hadoop.fs.FileSystem; @@ -102,16 +103,17 @@ public class TestExportSnapshotV1NoCluster { Path[] r2Files = builder.addRegion(); builder.commit(); int snapshotFilesCount = r1Files.length + r2Files.length; - byte[] snapshotName = Bytes.toBytes(builder.getSnapshotDescription().getName()); TableName tableName = builder.getTableDescriptor().getTableName(); TestExportSnapshot.testExportFileSystemState(testUtil.getConfiguration(), tableName, snapshotName, snapshotName, snapshotFilesCount, - testDir, getDestinationDir(testDir), false, null, true); + testDir, getDestinationDir(testUtil, testDir), false, null, true); } - static Path getDestinationDir(Path testDir) { - Path path = new Path(new Path(testDir, "export-test"), "export-" + System.currentTimeMillis()); + static Path getDestinationDir(HBaseCommonTestingUtility hctu, Path testDir) throws IOException { + FileSystem fs = FileSystem.get(hctu.getConfiguration()); + Path path = new Path(new Path(testDir, "export-test"), + "export-" + System.currentTimeMillis()).makeQualified(fs.getUri(), testDir); LOG.info("HDFS export destination path: " + path); return path; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java index 5e7e902c375..f605792990a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java @@ -138,6 +138,7 @@ public class TestInfoServersACL { CLUSTER = new LocalHBaseCluster(conf, 1); CLUSTER.startup(); + CLUSTER.getActiveMaster().waitForMetaOnline(); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java index c817b895600..fea362f13f3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; - import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -39,8 +38,8 @@ import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -57,26 +56,25 @@ public class TestRegionStateStore { private static final Logger LOG = LoggerFactory.getLogger(TestRegionStateStore.class); - protected HBaseTestingUtility util; + private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); - @Before - public void setup() throws Exception { - util = new HBaseTestingUtility(); - util.startMiniCluster(); + @BeforeClass + public static void beforeClass() throws Exception { + UTIL.startMiniCluster(); } - @After - public void tearDown() throws Exception { - util.shutdownMiniCluster(); + @AfterClass + public static void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); } @Test public void testVisitMetaForRegionExistingRegion() throws Exception { final TableName tableName = TableName.valueOf("testVisitMetaForRegion"); - util.createTable(tableName, "cf"); - final List regions = util.getHBaseCluster().getRegions(tableName); + UTIL.createTable(tableName, "cf"); + final List regions = UTIL.getHBaseCluster().getRegions(tableName); final String encodedName = regions.get(0).getRegionInfo().getEncodedName(); - final RegionStateStore regionStateStore = util.getHBaseCluster().getMaster(). + final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster(). getAssignmentManager().getRegionStateStore(); final AtomicBoolean visitorCalled = new AtomicBoolean(false); regionStateStore.visitMetaForRegion(encodedName, new RegionStateStore.RegionStateVisitor() { @@ -93,10 +91,10 @@ public class TestRegionStateStore { @Test public void testVisitMetaForBadRegionState() throws Exception { final TableName tableName = TableName.valueOf("testVisitMetaForBadRegionState"); - util.createTable(tableName, "cf"); - final List regions = util.getHBaseCluster().getRegions(tableName); + UTIL.createTable(tableName, "cf"); + final List regions = UTIL.getHBaseCluster().getRegions(tableName); final String encodedName = regions.get(0).getRegionInfo().getEncodedName(); - final RegionStateStore regionStateStore = util.getHBaseCluster().getMaster(). + final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster(). getAssignmentManager().getRegionStateStore(); // add the BAD_STATE which does not exist in enum RegionState.State @@ -105,7 +103,7 @@ public class TestRegionStateStore { put.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER, Bytes.toBytes("BAD_STATE")); - try (Table table = util.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { table.put(put); } @@ -126,7 +124,7 @@ public class TestRegionStateStore { @Test public void testVisitMetaForRegionNonExistingRegion() throws Exception { final String encodedName = "fakeencodedregionname"; - final RegionStateStore regionStateStore = util.getHBaseCluster().getMaster(). + final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster(). getAssignmentManager().getRegionStateStore(); final AtomicBoolean visitorCalled = new AtomicBoolean(false); regionStateStore.visitMetaForRegion(encodedName, new RegionStateStore.RegionStateVisitor() { @@ -138,5 +136,4 @@ public class TestRegionStateStore { }); assertFalse("Visitor has been called, but it shouldn't.", visitorCalled.get()); } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java index a91ebc48f32..3a14bae112c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.master.procedure; import static org.junit.Assert.assertTrue; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException;