diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 9b076f31552..8c0f05084e6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -245,6 +245,7 @@ class CatalogJanitor extends Chore {
this.services.getAssignmentManager().regionOffline(parent);
}
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
+ LOG.debug("Archiving parent region:" + parent);
HFileArchiver.archiveRegion(fs, parent);
MetaEditor.deleteRegion(this.server.getCatalogTracker(), parent);
result = true;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 39ba8c75318..377ef736343 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -32,6 +32,8 @@ import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@@ -76,6 +78,8 @@ import com.google.protobuf.ServiceException;
@Category(SmallTests.class)
public class TestCatalogJanitor {
+ private static final Log LOG = LogFactory.getLog(TestCatalogJanitor.class);
+
/**
* Pseudo server for below tests.
* Be sure to call stop on the way out else could leave some mess around.
@@ -529,6 +533,10 @@ public class TestCatalogJanitor {
janitor.join();
}
+ /**
+ * Test that we correctly archive all the storefiles when a region is deleted
+ * @throws Exception
+ */
@Test
public void testArchiveOldRegion() throws Exception {
String table = "table";
@@ -546,10 +554,10 @@ public class TestCatalogJanitor {
HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
+
// Test that when both daughter regions are in place, that we do not
// remove the parent.
- Result r = createResult(parent, splita, splitb);
-
+ Result parentMetaRow = createResult(parent, splita, splitb);
FileSystem fs = FileSystem.get(htu.getConfiguration());
Path rootdir = services.getMasterFileSystem().getRootDir();
// have to set the root directory since we use it in HFileDisposer to figure out to get to the
@@ -559,32 +567,53 @@ public class TestCatalogJanitor {
Path tabledir = HTableDescriptor.getTableDir(rootdir, htd.getName());
Path storedir = HStore.getStoreHomedir(tabledir, parent.getEncodedName(),
htd.getColumnFamilies()[0].getName());
-
- // delete the file and ensure that the files have been archived
Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
tabledir, htd.getColumnFamilies()[0].getName());
+ LOG.debug("Table dir:" + tabledir);
+ LOG.debug("Store dir:" + storedir);
+ LOG.debug("Store archive dir:" + storeArchive);
- // enable archiving, make sure that files get archived
- addMockStoreFiles(2, services, storedir);
+ // add a couple of store files that we can check for
+ FileStatus[] mockFiles = addMockStoreFiles(2, services, storedir);
// get the current store files for comparison
FileStatus[] storeFiles = fs.listStatus(storedir);
+ int index = 0;
for (FileStatus file : storeFiles) {
- System.out.println("Have store file:" + file.getPath());
+ LOG.debug("Have store file:" + file.getPath());
+ assertEquals("Got unexpected store file", mockFiles[index].getPath(),
+ storeFiles[index].getPath());
+ index++;
}
// do the cleaning of the parent
- assertTrue(janitor.cleanParent(parent, r));
+ assertTrue(janitor.cleanParent(parent, parentMetaRow));
+ LOG.debug("Finished cleanup of parent region");
// and now check to make sure that the files have actually been archived
FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
+ logFiles("archived files", storeFiles);
+ logFiles("archived files", archivedStoreFiles);
+
assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
// cleanup
+ FSUtils.delete(fs, rootdir, true);
services.stop("Test finished");
- server.stop("shutdown");
+ server.stop("Test finished");
janitor.join();
}
+ /**
+ * @param description description of the files for logging
+ * @param storeFiles the status of the files to log
+ */
+ private void logFiles(String description, FileStatus[] storeFiles) {
+ LOG.debug("Current " + description + ": ");
+ for (FileStatus file : storeFiles) {
+ LOG.debug(file.getPath());
+ }
+ }
+
/**
* Test that if a store file with the same name is present as those already backed up cause the
* already archived files to be timestamped backup
@@ -657,7 +686,7 @@ public class TestCatalogJanitor {
janitor.join();
}
- private void addMockStoreFiles(int count, MasterServices services, Path storedir)
+ private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir)
throws IOException {
// get the existing store files
FileSystem fs = services.getMasterFileSystem().getFileSystem();
@@ -669,9 +698,11 @@ public class TestCatalogJanitor {
dos.writeBytes("Some data: " + i);
dos.close();
}
+ LOG.debug("Adding " + count + " store files to the storedir:" + storedir);
// make sure the mock store files are there
FileStatus[] storeFiles = fs.listStatus(storedir);
- assertEquals(count, storeFiles.length);
+ assertEquals("Didn't have expected store files", count, storeFiles.length);
+ return storeFiles;
}
private String setRootDirAndCleanIt(final HBaseTestingUtility htu,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
index 1b48cb7db92..fa6c44b84f2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
@@ -85,29 +85,29 @@ public class HFileArchiveTestingUtil {
/**
* Compare the archived files to the files in the original directory
- * @param previous original files that should have been archived
- * @param archived files that were archived
+ * @param expected original files that should have been archived
+ * @param actual files that were archived
* @param fs filessystem on which the archiving took place
* @throws IOException
*/
- public static void assertArchiveEqualToOriginal(FileStatus[] previous, FileStatus[] archived,
+ public static void assertArchiveEqualToOriginal(FileStatus[] expected, FileStatus[] actual,
FileSystem fs) throws IOException {
- assertArchiveEqualToOriginal(previous, archived, fs, false);
+ assertArchiveEqualToOriginal(expected, actual, fs, false);
}
/**
* Compare the archived files to the files in the original directory
- * @param previous original files that should have been archived
- * @param archived files that were archived
+ * @param expected original files that should have been archived
+ * @param actual files that were archived
* @param fs {@link FileSystem} on which the archiving took place
* @param hasTimedBackup true if we expect to find an archive backup directory with a
* copy of the files in the archive directory (and the original files).
* @throws IOException
*/
- public static void assertArchiveEqualToOriginal(FileStatus[] previous, FileStatus[] archived,
+ public static void assertArchiveEqualToOriginal(FileStatus[] expected, FileStatus[] actual,
FileSystem fs, boolean hasTimedBackup) throws IOException {
- List> lists = getFileLists(previous, archived);
+ List> lists = getFileLists(expected, actual);
List original = lists.get(0);
Collections.sort(original);