HBASE-8814 Possible NPE in split if a region has empty store files
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1499213 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9ccf16db51
commit
e13718b872
|
@ -527,6 +527,8 @@ public class HRegionFileSystem {
|
|||
//check if larger than last key.
|
||||
KeyValue splitKey = KeyValue.createFirstOnRow(splitRow);
|
||||
byte[] lastKey = f.createReader().getLastKey();
|
||||
// If lastKey is null means storefile is empty.
|
||||
if (lastKey == null) return null;
|
||||
if (f.getReader().getComparator().compare(splitKey.getBuffer(),
|
||||
splitKey.getKeyOffset(), splitKey.getKeyLength(), lastKey, 0, lastKey.length) > 0) {
|
||||
return null;
|
||||
|
@ -535,6 +537,8 @@ public class HRegionFileSystem {
|
|||
//check if smaller than first key
|
||||
KeyValue splitKey = KeyValue.createLastOnRow(splitRow);
|
||||
byte[] firstKey = f.createReader().getFirstKey();
|
||||
// If firstKey is null means storefile is empty.
|
||||
if (firstKey == null) return null;
|
||||
if (f.getReader().getComparator().compare(splitKey.getBuffer(),
|
||||
splitKey.getKeyOffset(), splitKey.getKeyLength(), firstKey, 0, firstKey.length) < 0) {
|
||||
return null;
|
||||
|
|
|
@ -40,8 +40,10 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hbase.Abortable;
|
||||
import org.apache.hadoop.hbase.HBaseIOException;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.LargeTests;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.RegionTransition;
|
||||
|
@ -52,6 +54,9 @@ import org.apache.hadoop.hbase.client.Delete;
|
|||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.exceptions.MasterNotRunningException;
|
||||
import org.apache.hadoop.hbase.exceptions.UnknownRegionException;
|
||||
|
@ -75,6 +80,7 @@ import org.apache.zookeeper.KeeperException.NodeExistsException;
|
|||
import org.apache.zookeeper.data.Stat;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
@ -427,6 +433,69 @@ public class TestSplitTransactionOnCluster {
|
|||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 180000)
|
||||
public void testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles() throws Exception {
|
||||
Configuration conf = TESTING_UTIL.getConfiguration();
|
||||
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TESTING_UTIL);
|
||||
String userTableName = "testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles";
|
||||
HTableDescriptor htd = new HTableDescriptor(userTableName);
|
||||
HColumnDescriptor hcd = new HColumnDescriptor("col");
|
||||
htd.addFamily(hcd);
|
||||
admin.createTable(htd);
|
||||
ZKAssign.blockUntilNoRIT(zkw);
|
||||
HTable table = new HTable(conf, userTableName);
|
||||
try {
|
||||
for (int i = 0; i <= 5; i++) {
|
||||
String row = "row" + i;
|
||||
Put p = new Put(row.getBytes());
|
||||
String val = "Val" + i;
|
||||
p.add("col".getBytes(), "ql".getBytes(), val.getBytes());
|
||||
table.put(p);
|
||||
admin.flush(userTableName);
|
||||
Delete d = new Delete(row.getBytes());
|
||||
// Do a normal delete
|
||||
table.delete(d);
|
||||
admin.flush(userTableName);
|
||||
}
|
||||
admin.majorCompact(userTableName);
|
||||
List<HRegionInfo> regionsOfTable = TESTING_UTIL.getMiniHBaseCluster()
|
||||
.getMaster().getAssignmentManager().getRegionStates()
|
||||
.getRegionsOfTable(userTableName.getBytes());
|
||||
HRegionInfo hRegionInfo = regionsOfTable.get(0);
|
||||
Put p = new Put("row6".getBytes());
|
||||
p.add("col".getBytes(), "ql".getBytes(), "val".getBytes());
|
||||
table.put(p);
|
||||
p = new Put("row7".getBytes());
|
||||
p.add("col".getBytes(), "ql".getBytes(), "val".getBytes());
|
||||
table.put(p);
|
||||
p = new Put("row8".getBytes());
|
||||
p.add("col".getBytes(), "ql".getBytes(), "val".getBytes());
|
||||
table.put(p);
|
||||
admin.flush(userTableName);
|
||||
admin.split(hRegionInfo.getRegionName(), "row7".getBytes());
|
||||
regionsOfTable = TESTING_UTIL.getMiniHBaseCluster().getMaster()
|
||||
.getAssignmentManager().getRegionStates()
|
||||
.getRegionsOfTable(userTableName.getBytes());
|
||||
|
||||
while (regionsOfTable.size() != 2) {
|
||||
Thread.sleep(2000);
|
||||
regionsOfTable = TESTING_UTIL.getMiniHBaseCluster().getMaster()
|
||||
.getAssignmentManager().getRegionStates()
|
||||
.getRegionsOfTable(userTableName.getBytes());
|
||||
}
|
||||
Assert.assertEquals(2, regionsOfTable.size());
|
||||
Scan s = new Scan();
|
||||
ResultScanner scanner = table.getScanner(s);
|
||||
int mainTableCount = 0;
|
||||
for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
|
||||
mainTableCount++;
|
||||
}
|
||||
Assert.assertEquals(3, mainTableCount);
|
||||
} finally {
|
||||
table.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Noop Abortable implementation used below in tests.
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue