diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java index 16dc2e38c2f..56339da36bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java @@ -684,8 +684,9 @@ public class HFileReaderV1 extends AbstractHFileReader { @Override public DataInput getGeneralBloomFilterMetadata() throws IOException { - // Always cache Bloom filter blocks. - ByteBuffer buf = getMetaBlock(HFileWriterV1.BLOOM_FILTER_META_KEY, true); + // Shouldn't cache Bloom filter blocks, otherwise server would abort when + // splitting, see HBASE-6479 + ByteBuffer buf = getMetaBlock(HFileWriterV1.BLOOM_FILTER_META_KEY, false); if (buf == null) return null; ByteArrayInputStream bais = new ByteArrayInputStream(buf.array(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 8c406664598..4c813bbfce7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1093,7 +1093,6 @@ public class HBaseTestingUtility { return rowCount; } - /** * Load table of multiple column families with rows from 'aaa' to 'zzz'. * @param t Table @@ -1124,15 +1123,19 @@ public class HBaseTestingUtility { return rowCount; } + public int loadRegion(final HRegion r, final byte[] f) throws IOException { + return loadRegion(r, f, false); + } /** * Load region with rows from 'aaa' to 'zzz'. * @param r Region * @param f Family + * @param flush flush the cache if true * @return Count of rows loaded. * @throws IOException */ - public int loadRegion(final HRegion r, final byte[] f) + public int loadRegion(final HRegion r, final byte[] f, final boolean flush) throws IOException { byte[] k = new byte[3]; int rowCount = 0; @@ -1149,6 +1152,9 @@ public class HBaseTestingUtility { rowCount++; } } + if (flush) { + r.flushcache(); + } } return rowCount; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java index 390b422b30f..b9a9e4fd0a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java @@ -18,8 +18,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import com.google.common.collect.ImmutableList; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -32,8 +30,18 @@ import java.util.List; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.io.hfile.LruBlockCache; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -49,6 +57,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; +import com.google.common.collect.ImmutableList; + /** * Test the {@link SplitTransaction} class against an HRegion (as opposed to * running cluster). @@ -192,12 +202,32 @@ public class TestSplitTransaction { assertFalse(st.prepare()); } + @Test public void testWholesomeSplitWithHFileV1() throws IOException { + int defaultVersion = TEST_UTIL.getConfiguration().getInt( + HFile.FORMAT_VERSION_KEY, 2); + TEST_UTIL.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 1); + try { + for (Store store : this.parent.stores.values()) { + store.getFamily().setBloomFilterType(StoreFile.BloomType.ROW); + } + testWholesomeSplit(); + } finally { + TEST_UTIL.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, + defaultVersion); + } + } + @Test public void testWholesomeSplit() throws IOException { - final int rowcount = TEST_UTIL.loadRegion(this.parent, CF); + final int rowcount = TEST_UTIL.loadRegion(this.parent, CF, true); assertTrue(rowcount > 0); int parentRowCount = countRows(this.parent); assertEquals(rowcount, parentRowCount); + // Pretend region's blocks are not in the cache, used for + // testWholesomeSplitWithHFileV1 + CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); + ((LruBlockCache) cacheConf.getBlockCache()).clearCache(); + // Start transaction. SplitTransaction st = prepareGOOD_SPLIT_ROW();