From 2797adf45f1e4de20fb017834f583ae390fdb5a2 Mon Sep 17 00:00:00 2001 From: tedyu Date: Mon, 4 Jan 2016 07:19:13 -0800 Subject: [PATCH] HBASE-14987 Compaction marker whose region name doesn't match current region's needs to be handled --- .../hadoop/hbase/protobuf/ProtobufUtil.java | 9 +++- .../hadoop/hbase/regionserver/HRegion.java | 44 +++++++++++++++---- .../hbase/regionserver/TestHRegion.java | 26 +++++++++-- 3 files changed, 65 insertions(+), 14 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 47305ffb9f5..5f5c3a32942 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -2609,12 +2609,19 @@ public final class ProtobufUtil { public static CompactionDescriptor toCompactionDescriptor(HRegionInfo info, byte[] family, List inputPaths, List outputPaths, Path storeDir) { + return toCompactionDescriptor(info, null, family, inputPaths, outputPaths, storeDir); + } + + @SuppressWarnings("deprecation") + public static CompactionDescriptor toCompactionDescriptor(HRegionInfo info, byte[] regionName, + byte[] family, List inputPaths, List outputPaths, Path storeDir) { // compaction descriptor contains relative paths. // input / output paths are relative to the store dir // store dir is relative to region dir CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder() .setTableName(ByteStringer.wrap(info.getTableName())) - .setEncodedRegionName(ByteStringer.wrap(info.getEncodedNameAsBytes())) + .setEncodedRegionName(ByteStringer.wrap( + regionName == null ? info.getEncodedNameAsBytes() : regionName)) .setFamilyName(ByteStringer.wrap(family)) .setStoreHomeDir(storeDir.getName()); //make relative for (Path inputPath : inputPaths) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 7d511018946..e4149309fde 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -4132,11 +4132,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi continue; } } + boolean checkRowWithinBoundary = false; // Check this edit is for this region. if (!Bytes.equals(key.getEncodedRegionName(), this.getRegionInfo().getEncodedNameAsBytes())) { - skippedEdits++; - continue; + checkRowWithinBoundary = true; } boolean flush = false; @@ -4144,11 +4144,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Check this edit is for me. Also, guard against writing the special // METACOLUMN info such as HBASE::CACHEFLUSH entries if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { - //this is a special edit, we should handle it - CompactionDescriptor compaction = WALEdit.getCompaction(cell); - if (compaction != null) { - //replay the compaction - replayWALCompactionMarker(compaction, false, true, Long.MAX_VALUE); + // if region names don't match, skipp replaying compaction marker + if (!checkRowWithinBoundary) { + //this is a special edit, we should handle it + CompactionDescriptor compaction = WALEdit.getCompaction(cell); + if (compaction != null) { + //replay the compaction + replayWALCompactionMarker(compaction, false, true, Long.MAX_VALUE); + } } skippedEdits++; continue; @@ -4164,6 +4167,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi skippedEdits++; continue; } + if (checkRowWithinBoundary && !rowIsInRange(this.getRegionInfo(), + cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) { + LOG.warn("Row of " + cell + " is not within region boundary"); + skippedEdits++; + continue; + } // Now, figure if we should skip this edit. if (key.getLogSeqNum() <= maxSeqIdInStores.get(store.getFamily() .getName())) { @@ -4234,8 +4243,16 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi void replayWALCompactionMarker(CompactionDescriptor compaction, boolean pickCompactionFiles, boolean removeFiles, long replaySeqId) throws IOException { - checkTargetRegion(compaction.getEncodedRegionName().toByteArray(), - "Compaction marker from WAL ", compaction); + try { + checkTargetRegion(compaction.getEncodedRegionName().toByteArray(), + "Compaction marker from WAL ", compaction); + } catch (WrongRegionException wre) { + if (RegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) { + // skip the compaction marker since it is not for this region + return; + } + throw wre; + } synchronized (writestate) { if (replaySeqId < lastReplayedOpenRegionSeqId) { @@ -6663,6 +6680,15 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi (Bytes.compareTo(info.getEndKey(), row) > 0)); } + public static boolean rowIsInRange(HRegionInfo info, final byte [] row, final int offset, + final short length) { + return ((info.getStartKey().length == 0) || + (Bytes.compareTo(info.getStartKey(), 0, info.getStartKey().length, + row, offset, length) <= 0)) && + ((info.getEndKey().length == 0) || + (Bytes.compareTo(info.getEndKey(), 0, info.getEndKey().length, row, offset, length) > 0)); + } + /** * Merge two HRegions. The regions must be adjacent and must not overlap. * diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 52447e72ea9..1edc2518eab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -828,6 +828,10 @@ public class TestHRegion { @Test public void testRecoveredEditsReplayCompaction() throws Exception { + testRecoveredEditsReplayCompaction(false); + testRecoveredEditsReplayCompaction(true); + } + public void testRecoveredEditsReplayCompaction(boolean mismatchedRegionName) throws Exception { String method = name.getMethodName(); TableName tableName = TableName.valueOf(method); byte[] family = Bytes.toBytes("family"); @@ -873,9 +877,17 @@ public class TestHRegion { Path newFile = region.getRegionFileSystem().commitStoreFile(Bytes.toString(family), files[0].getPath()); + byte[] encodedNameAsBytes = this.region.getRegionInfo().getEncodedNameAsBytes(); + byte[] fakeEncodedNameAsBytes = new byte [encodedNameAsBytes.length]; + for (int i=0; i < encodedNameAsBytes.length; i++) { + // Mix the byte array to have a new encodedName + fakeEncodedNameAsBytes[i] = (byte) (encodedNameAsBytes[i] + 1); + } + CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(this.region - .getRegionInfo(), family, storeFiles, Lists.newArrayList(newFile), region - .getRegionFileSystem().getStoreDir(Bytes.toString(family))); + .getRegionInfo(), mismatchedRegionName ? fakeEncodedNameAsBytes : null, family, + storeFiles, Lists.newArrayList(newFile), + region.getRegionFileSystem().getStoreDir(Bytes.toString(family))); WALUtil.writeCompactionMarker(region.getWAL(), this.region.getTableDesc(), this.region.getRegionInfo(), compactionDescriptor, region.getMVCC()); @@ -897,14 +909,20 @@ public class TestHRegion { region.getTableDesc(); region.getRegionInfo(); region.close(); - region = HRegion.openHRegion(region, null); + try { + region = HRegion.openHRegion(region, null); + } catch (WrongRegionException wre) { + fail("Matching encoded region name should not have produced WrongRegionException"); + } // now check whether we have only one store file, the compacted one Collection sfs = region.getStore(family).getStorefiles(); for (StoreFile sf : sfs) { LOG.info(sf.getPath()); } - assertEquals(1, region.getStore(family).getStorefilesCount()); + if (!mismatchedRegionName) { + assertEquals(1, region.getStore(family).getStorefilesCount()); + } files = FSUtils.listStatus(fs, tmpDir); assertTrue("Expected to find 0 files inside " + tmpDir, files == null || files.length == 0);