HBASE-6479 HFileReaderV1 caching the same parent META block could cause server abot when splitting

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1393194 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-10-02 21:29:21 +00:00
parent 6b82ddc911
commit 63743c2804
3 changed files with 45 additions and 8 deletions

View File

@ -684,8 +684,9 @@ public class HFileReaderV1 extends AbstractHFileReader {
@Override
public DataInput getGeneralBloomFilterMetadata() throws IOException {
// Always cache Bloom filter blocks.
ByteBuffer buf = getMetaBlock(HFileWriterV1.BLOOM_FILTER_META_KEY, true);
// Shouldn't cache Bloom filter blocks, otherwise server would abort when
// splitting, see HBASE-6479
ByteBuffer buf = getMetaBlock(HFileWriterV1.BLOOM_FILTER_META_KEY, false);
if (buf == null)
return null;
ByteArrayInputStream bais = new ByteArrayInputStream(buf.array(),

View File

@ -1093,7 +1093,6 @@ public class HBaseTestingUtility {
return rowCount;
}
/**
* Load table of multiple column families with rows from 'aaa' to 'zzz'.
* @param t Table
@ -1124,15 +1123,19 @@ public class HBaseTestingUtility {
return rowCount;
}
public int loadRegion(final HRegion r, final byte[] f) throws IOException {
return loadRegion(r, f, false);
}
/**
* Load region with rows from 'aaa' to 'zzz'.
* @param r Region
* @param f Family
* @param flush flush the cache if true
* @return Count of rows loaded.
* @throws IOException
*/
public int loadRegion(final HRegion r, final byte[] f)
public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
throws IOException {
byte[] k = new byte[3];
int rowCount = 0;
@ -1149,6 +1152,9 @@ public class HBaseTestingUtility {
rowCount++;
}
}
if (flush) {
r.flushcache();
}
}
return rowCount;
}

View File

@ -18,8 +18,6 @@
*/
package org.apache.hadoop.hbase.regionserver;
import com.google.common.collect.ImmutableList;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@ -32,8 +30,18 @@ import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@ -49,6 +57,8 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
import com.google.common.collect.ImmutableList;
/**
* Test the {@link SplitTransaction} class against an HRegion (as opposed to
* running cluster).
@ -192,12 +202,32 @@ public class TestSplitTransaction {
assertFalse(st.prepare());
}
@Test public void testWholesomeSplitWithHFileV1() throws IOException {
int defaultVersion = TEST_UTIL.getConfiguration().getInt(
HFile.FORMAT_VERSION_KEY, 2);
TEST_UTIL.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 1);
try {
for (Store store : this.parent.stores.values()) {
store.getFamily().setBloomFilterType(StoreFile.BloomType.ROW);
}
testWholesomeSplit();
} finally {
TEST_UTIL.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY,
defaultVersion);
}
}
@Test public void testWholesomeSplit() throws IOException {
final int rowcount = TEST_UTIL.loadRegion(this.parent, CF);
final int rowcount = TEST_UTIL.loadRegion(this.parent, CF, true);
assertTrue(rowcount > 0);
int parentRowCount = countRows(this.parent);
assertEquals(rowcount, parentRowCount);
// Pretend region's blocks are not in the cache, used for
// testWholesomeSplitWithHFileV1
CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
((LruBlockCache) cacheConf.getBlockCache()).clearCache();
// Start transaction.
SplitTransaction st = prepareGOOD_SPLIT_ROW();