HBASE-6160 META entries from daughters can be deleted before parent entries

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1346496 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-06-05 18:02:34 +00:00
parent fc4d7d7a59
commit 29a433911b
2 changed files with 124 additions and 7 deletions

View File

@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.master;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicInteger;
@ -99,11 +100,10 @@ class CatalogJanitor extends Chore {
}
/**
* Run janitorial scan of catalog <code>.META.</code> table looking for
* garbage to collect.
* @throws IOException
* Scans META and returns a number of scanned rows, and
* an ordered map of split parents.
*/
void scan() throws IOException {
Pair<Integer, Map<HRegionInfo, Result>> getSplitParents() throws IOException {
// TODO: Only works with single .META. region currently. Fix.
final AtomicInteger count = new AtomicInteger(0);
// Keep Map of found split parents. There are candidates for cleanup.
@ -125,18 +125,40 @@ class CatalogJanitor extends Chore {
};
// Run full scan of .META. catalog table passing in our custom visitor
MetaReader.fullScan(this.server.getCatalogTracker(), visitor);
return new Pair<Integer, Map<HRegionInfo, Result>>(count.get(), splitParents);
}
/**
* Run janitorial scan of catalog <code>.META.</code> table looking for
* garbage to collect.
* @throws IOException
*/
int scan() throws IOException {
Pair<Integer, Map<HRegionInfo, Result>> pair = getSplitParents();
int count = pair.getFirst();
Map<HRegionInfo, Result> splitParents = pair.getSecond();
// Now work on our list of found parents. See if any we can clean up.
int cleaned = 0;
HashSet<HRegionInfo> parentNotCleaned = new HashSet<HRegionInfo>(); //regions whose parents are still around
for (Map.Entry<HRegionInfo, Result> e : splitParents.entrySet()) {
if (cleanParent(e.getKey(), e.getValue())) cleaned++;
if (!parentNotCleaned.contains(e.getKey()) && cleanParent(e.getKey(), e.getValue())) {
cleaned++;
} else {
// We could not clean the parent, so it's daughters should not be cleaned either (HBASE-6160)
parentNotCleaned.add(getDaughterRegionInfo(e.getValue(), HConstants.SPLITA_QUALIFIER));
parentNotCleaned.add(getDaughterRegionInfo(e.getValue(), HConstants.SPLITB_QUALIFIER));
}
}
if (cleaned != 0) {
LOG.info("Scanned " + count.get() + " catalog row(s) and gc'd " + cleaned +
LOG.info("Scanned " + count + " catalog row(s) and gc'd " + cleaned +
" unreferenced parent region(s)");
} else if (LOG.isDebugEnabled()) {
LOG.debug("Scanned " + count.get() + " catalog row(s) and gc'd " + cleaned +
LOG.debug("Scanned " + count + " catalog row(s) and gc'd " + cleaned +
" unreferenced parent region(s)");
}
return cleaned;
}
/**

View File

@ -23,6 +23,9 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.spy;
import java.io.FileNotFoundException;
import java.io.IOException;
@ -55,11 +58,13 @@ import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.Test;
@ -490,6 +495,96 @@ public class TestCatalogJanitor {
janitor.join();
}
/**
* CatalogJanitor.scan() should not clean parent regions if their own
* parents are still referencing them. This ensures that grandfather regions
* do not point to deleted parent regions.
*/
@Test
public void testScanDoesNotCleanRegionsWithExistingParents() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
setRootDirAndCleanIt(htu, "testScanDoesNotCleanRegionsWithExistingParents");
Server server = new MockServer(htu);
MasterServices services = new MockMasterServices(server);
final HTableDescriptor htd = createHTableDescriptor();
// Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
// Parent
HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
new byte[0], true);
// Sleep a second else the encoded name on these regions comes out
// same for all with same start key and made in same second.
Thread.sleep(1001);
// Daughter a
HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
Bytes.toBytes("ccc"), true);
Thread.sleep(1001);
// Make daughters of daughter a; splitaa and splitab.
HRegionInfo splitaa = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
Bytes.toBytes("bbb"), false);
HRegionInfo splitab = new HRegionInfo(htd.getName(), Bytes.toBytes("bbb"),
Bytes.toBytes("ccc"), false);
// Daughter b
HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"),
new byte[0]);
Thread.sleep(1001);
final Map<HRegionInfo, Result> splitParents =
new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
splitParents.put(parent, makeResultFromHRegionInfo(parent, splita, splitb));
splitParents.put(splita, makeResultFromHRegionInfo(splita, splitaa, splitab));
CatalogJanitor janitor = spy(new CatalogJanitor(server, services));
doReturn(new Pair<Integer, Map<HRegionInfo, Result>>(
10, splitParents)).when(janitor).getSplitParents();
//create ref from splita to parent
Path splitaRef =
createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
//parent and A should not be removed
assertEquals(0, janitor.scan());
//now delete the ref
FileSystem fs = FileSystem.get(htu.getConfiguration());
assertTrue(fs.delete(splitaRef, true));
//now, both parent, and splita can be deleted
assertEquals(2, janitor.scan());
services.stop("test finished");
janitor.join();
}
private Result makeResultFromHRegionInfo(HRegionInfo region, HRegionInfo splita,
HRegionInfo splitb) throws IOException {
List<KeyValue> kvs = new ArrayList<KeyValue>();
kvs.add(new KeyValue(
region.getRegionName(),
HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
Writables.getBytes(region)));
if (splita != null) {
kvs.add(new KeyValue(
region.getRegionName(),
HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
Writables.getBytes(splita)));
}
if (splitb != null) {
kvs.add(new KeyValue(
region.getRegionName(),
HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
Writables.getBytes(splitb)));
}
return new Result(kvs);
}
private String setRootDirAndCleanIt(final HBaseTestingUtility htu,
final String subdir)
throws IOException {