diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java index 1d3b9f1a4e8..2dacd445036 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java @@ -90,6 +90,7 @@ public class TestSnapshotFromMaster { TableName.valueOf("test"); // refresh the cache every 1/2 second private static final long cacheRefreshPeriod = 500; + private static final int blockingStoreFiles = 12; /** * Setup the config for the cluster @@ -114,7 +115,7 @@ public class TestSnapshotFromMaster { conf.setInt("hbase.hstore.compaction.min", 2); conf.setInt("hbase.hstore.compactionThreshold", 5); // block writes if we get to 12 store files - conf.setInt("hbase.hstore.blockingStoreFiles", 12); + conf.setInt("hbase.hstore.blockingStoreFiles", blockingStoreFiles); // Ensure no extra cleaners on by default (e.g. TimeToLiveHFileCleaner) conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, ""); conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, ""); @@ -279,7 +280,6 @@ public class TestSnapshotFromMaster { */ @Test(timeout = 300000) public void testSnapshotHFileArchiving() throws Exception { - int hfileCount = 20; Admin admin = UTIL.getHBaseAdmin(); // make sure we don't fail on listing snapshots SnapshotTestingUtils.assertNoSnapshots(admin); @@ -293,13 +293,9 @@ public class TestSnapshotFromMaster { UTIL.createTable(htd, new byte[][] { TEST_FAM }, null); // load the table - while(true) { + for (int i = 0; i < blockingStoreFiles / 2; i ++) { UTIL.loadTable(UTIL.getConnection().getTable(TABLE_NAME), TEST_FAM); UTIL.flush(TABLE_NAME); - Collection hfiles = getHFiles(rootDir, fs, TABLE_NAME); - if (hfiles.size() >= hfileCount) { - break; - } } // disable the table so we can take a snapshot