From 07b623670647686084f8f5fd2038e2bafcfdac54 Mon Sep 17 00:00:00 2001 From: ramkrishna Date: Wed, 30 Dec 2015 14:39:03 +0530 Subject: [PATCH] HBASE-15050 Block Ref counting does not work in Region Split cases (Ram) --- .../hadoop/hbase/io/HalfStoreFileReader.java | 8 +++ .../hbase/io/hfile/HFileReaderImpl.java | 8 ++- .../client/TestBlockEvictionFromClient.java | 53 +++++++++++++++++++ 3 files changed, 68 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index 0dd77427c7e..067d24c3a16 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -314,6 +314,10 @@ public class HalfStoreFileReader extends StoreFile.Reader { } } catch (IOException e) { LOG.warn("Failed seekBefore " + Bytes.toStringBinary(this.splitkey), e); + } finally { + if (scanner != null) { + scanner.close(); + } } return null; } @@ -335,6 +339,10 @@ public class HalfStoreFileReader extends StoreFile.Reader { firstKeySeeked = true; } catch (IOException e) { LOG.warn("Failed seekTo first KV in the file", e); + } finally { + if(scanner != null) { + scanner.close(); + } } } return this.firstKey; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index 4e2ca7d5b5e..fcf7b5bbdfb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -986,7 +986,13 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { return new KeyValue.KeyOnlyKeyValue(keyBuf.array(), keyBuf.arrayOffset() + keyPair.getSecond(), currKeyLen); } else { - return new ByteBufferedKeyOnlyKeyValue(keyBuf, keyPair.getSecond(), currKeyLen); + // Better to do a copy here instead of holding on to this BB so that + // we could release the blocks referring to this key. This key is specifically used + // in HalfStoreFileReader to get the firstkey and lastkey by creating a new scanner + // every time. So holding onto the BB (incase of DBB) is not advised here. + byte[] key = new byte[currKeyLen]; + ByteBufferUtils.copyFromBufferToArray(key, keyBuf, keyPair.getSecond(), 0, currKeyLen); + return new KeyValue.KeyOnlyKeyValue(key, 0, currKeyLen); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java index a812623f8eb..f4d668cb6c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java @@ -74,6 +74,8 @@ public class TestBlockEvictionFromClient { private static int NO_OF_THREADS = 3; private static byte[] ROW = Bytes.toBytes("testRow"); private static byte[] ROW1 = Bytes.toBytes("testRow1"); + private static byte[] ROW2 = Bytes.toBytes("testRow2"); + private static byte[] ROW3 = Bytes.toBytes("testRow3"); private static byte[] FAMILY = Bytes.toBytes("testFamily"); private static byte[][] FAMILIES_1 = new byte[1][0]; private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); @@ -553,6 +555,57 @@ public class TestBlockEvictionFromClient { } } + @Test + public void testBlockRefCountAfterSplits() throws IOException, InterruptedException { + HTable table = null; + try { + TableName tableName = TableName.valueOf("testBlockRefCountAfterSplits"); + table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024); + // get the block cache and region + RegionLocator locator = table.getRegionLocator(); + String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); + Region region = + TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); + Store store = region.getStores().iterator().next(); + CacheConfig cacheConf = store.getCacheConfig(); + cacheConf.setEvictOnClose(true); + BlockCache cache = cacheConf.getBlockCache(); + + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, data); + table.put(put); + region.flush(true); + put = new Put(ROW1); + put.addColumn(FAMILY, QUALIFIER, data); + table.put(put); + region.flush(true); + byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER); + put = new Put(ROW2); + put.addColumn(FAMILY, QUALIFIER2, data2); + table.put(put); + put = new Put(ROW3); + put.addColumn(FAMILY, QUALIFIER2, data2); + table.put(put); + region.flush(true); + TEST_UTIL.getAdmin().split(tableName, ROW1); + List tableRegions = TEST_UTIL.getAdmin().getTableRegions(tableName); + // Wait for splits + while (tableRegions.size() != 2) { + tableRegions = TEST_UTIL.getAdmin().getTableRegions(tableName); + Thread.sleep(100); + } + region.compact(true); + Iterator iterator = cache.iterator(); + // Though the split had created the HalfStorefileReader - the firstkey and lastkey scanners + // should be closed inorder to return those blocks + iterateBlockCache(cache, iterator); + } finally { + if (table != null) { + table.close(); + } + } + } + @Test public void testMultiGets() throws IOException, InterruptedException { HTable table = null;