diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index e751f65a8b4..1492548c22c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.master;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Comparator;
+import java.util.HashSet;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicInteger;
@@ -99,11 +100,10 @@ class CatalogJanitor extends Chore {
}
/**
- * Run janitorial scan of catalog .META.
table looking for
- * garbage to collect.
- * @throws IOException
+ * Scans META and returns a number of scanned rows, and
+ * an ordered map of split parents.
*/
- void scan() throws IOException {
+ Pair> getSplitParents() throws IOException {
// TODO: Only works with single .META. region currently. Fix.
final AtomicInteger count = new AtomicInteger(0);
// Keep Map of found split parents. There are candidates for cleanup.
@@ -125,18 +125,40 @@ class CatalogJanitor extends Chore {
};
// Run full scan of .META. catalog table passing in our custom visitor
MetaReader.fullScan(this.server.getCatalogTracker(), visitor);
+
+ return new Pair>(count.get(), splitParents);
+ }
+
+ /**
+ * Run janitorial scan of catalog .META.
table looking for
+ * garbage to collect.
+ * @throws IOException
+ */
+ int scan() throws IOException {
+ Pair> pair = getSplitParents();
+ int count = pair.getFirst();
+ Map splitParents = pair.getSecond();
+
// Now work on our list of found parents. See if any we can clean up.
int cleaned = 0;
+ HashSet parentNotCleaned = new HashSet(); //regions whose parents are still around
for (Map.Entry e : splitParents.entrySet()) {
- if (cleanParent(e.getKey(), e.getValue())) cleaned++;
+ if (!parentNotCleaned.contains(e.getKey()) && cleanParent(e.getKey(), e.getValue())) {
+ cleaned++;
+ } else {
+ // We could not clean the parent, so it's daughters should not be cleaned either (HBASE-6160)
+ parentNotCleaned.add(getDaughterRegionInfo(e.getValue(), HConstants.SPLITA_QUALIFIER));
+ parentNotCleaned.add(getDaughterRegionInfo(e.getValue(), HConstants.SPLITB_QUALIFIER));
+ }
}
if (cleaned != 0) {
- LOG.info("Scanned " + count.get() + " catalog row(s) and gc'd " + cleaned +
+ LOG.info("Scanned " + count + " catalog row(s) and gc'd " + cleaned +
" unreferenced parent region(s)");
} else if (LOG.isDebugEnabled()) {
- LOG.debug("Scanned " + count.get() + " catalog row(s) and gc'd " + cleaned +
+ LOG.debug("Scanned " + count + " catalog row(s) and gc'd " + cleaned +
" unreferenced parent region(s)");
}
+ return cleaned;
}
/**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 5c16e343515..2495987dd0e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -23,6 +23,9 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.spy;
import java.io.FileNotFoundException;
import java.io.IOException;
@@ -55,11 +58,13 @@ import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.io.Reference;
+import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.Test;
@@ -490,6 +495,96 @@ public class TestCatalogJanitor {
janitor.join();
}
+ /**
+ * CatalogJanitor.scan() should not clean parent regions if their own
+ * parents are still referencing them. This ensures that grandfather regions
+ * do not point to deleted parent regions.
+ */
+ @Test
+ public void testScanDoesNotCleanRegionsWithExistingParents() throws Exception {
+ HBaseTestingUtility htu = new HBaseTestingUtility();
+ setRootDirAndCleanIt(htu, "testScanDoesNotCleanRegionsWithExistingParents");
+ Server server = new MockServer(htu);
+ MasterServices services = new MockMasterServices(server);
+
+ final HTableDescriptor htd = createHTableDescriptor();
+
+ // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
+
+ // Parent
+ HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
+ new byte[0], true);
+ // Sleep a second else the encoded name on these regions comes out
+ // same for all with same start key and made in same second.
+ Thread.sleep(1001);
+
+ // Daughter a
+ HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
+ Bytes.toBytes("ccc"), true);
+ Thread.sleep(1001);
+ // Make daughters of daughter a; splitaa and splitab.
+ HRegionInfo splitaa = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
+ Bytes.toBytes("bbb"), false);
+ HRegionInfo splitab = new HRegionInfo(htd.getName(), Bytes.toBytes("bbb"),
+ Bytes.toBytes("ccc"), false);
+
+ // Daughter b
+ HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"),
+ new byte[0]);
+ Thread.sleep(1001);
+
+ final Map splitParents =
+ new TreeMap(new SplitParentFirstComparator());
+ splitParents.put(parent, makeResultFromHRegionInfo(parent, splita, splitb));
+ splitParents.put(splita, makeResultFromHRegionInfo(splita, splitaa, splitab));
+
+ CatalogJanitor janitor = spy(new CatalogJanitor(server, services));
+ doReturn(new Pair>(
+ 10, splitParents)).when(janitor).getSplitParents();
+
+ //create ref from splita to parent
+ Path splitaRef =
+ createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
+
+ //parent and A should not be removed
+ assertEquals(0, janitor.scan());
+
+ //now delete the ref
+ FileSystem fs = FileSystem.get(htu.getConfiguration());
+ assertTrue(fs.delete(splitaRef, true));
+
+ //now, both parent, and splita can be deleted
+ assertEquals(2, janitor.scan());
+
+ services.stop("test finished");
+ janitor.join();
+ }
+
+ private Result makeResultFromHRegionInfo(HRegionInfo region, HRegionInfo splita,
+ HRegionInfo splitb) throws IOException {
+ List kvs = new ArrayList();
+ kvs.add(new KeyValue(
+ region.getRegionName(),
+ HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
+ Writables.getBytes(region)));
+
+ if (splita != null) {
+ kvs.add(new KeyValue(
+ region.getRegionName(),
+ HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
+ Writables.getBytes(splita)));
+ }
+
+ if (splitb != null) {
+ kvs.add(new KeyValue(
+ region.getRegionName(),
+ HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
+ Writables.getBytes(splitb)));
+ }
+
+ return new Result(kvs);
+ }
+
private String setRootDirAndCleanIt(final HBaseTestingUtility htu,
final String subdir)
throws IOException {