HBASE-9044 Merging regions throws NPE

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1507500 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
jxiang 2013-07-26 22:50:43 +00:00
parent ff5fdb671f
commit 3812294456
5 changed files with 143 additions and 15 deletions

View File

@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.exceptions.FailedLogCloseException;
import org.apache.hadoop.hbase.exceptions.HBaseSnapshotException;
import org.apache.hadoop.hbase.exceptions.MasterNotRunningException;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
import org.apache.hadoop.hbase.exceptions.RegionException;
import org.apache.hadoop.hbase.exceptions.RestoreSnapshotException;
@ -1710,6 +1711,9 @@ public class HBaseAdmin implements Abortable, Closeable {
if (ioe instanceof UnknownRegionException) {
throw (UnknownRegionException) ioe;
}
if (ioe instanceof MergeRegionException) {
throw (MergeRegionException) ioe;
}
LOG.error("Unexpected exception: " + se
+ " from calling HMaster.dispatchMergingRegions");
} catch (DeserializationException de) {
@ -2740,4 +2744,4 @@ public class HBaseAdmin implements Abortable, Closeable {
public CoprocessorRpcChannel coprocessorService() {
return new MasterCoprocessorRpcChannel(connection);
}
}
}

View File

@ -0,0 +1,44 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.exceptions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown when something is wrong in trying to merge two regions.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MergeRegionException extends RegionException {
private static final long serialVersionUID = 4970899110066124122L;
/** default constructor */
public MergeRegionException() {
super();
}
/**
* Constructor
* @param s message
*/
public MergeRegionException(String s) {
super(s);
}
}

View File

@ -69,6 +69,7 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.exceptions.MasterNotRunningException;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.exceptions.PleaseHoldException;
import org.apache.hadoop.hbase.exceptions.TableNotDisabledException;
import org.apache.hadoop.hbase.exceptions.TableNotFoundException;
@ -76,9 +77,9 @@ import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
import org.apache.hadoop.hbase.exceptions.UnknownRegionException;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.executor.ExecutorType;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
import org.apache.hadoop.hbase.ipc.SimpleRpcScheduler;
import org.apache.hadoop.hbase.master.balancer.BalancerChore;
@ -1525,16 +1526,28 @@ MasterServices, Server {
: encodedNameOfRegionB)));
}
if (!forcible && !HRegionInfo.areAdjacent(regionStateA.getRegion(),
regionStateB.getRegion())) {
throw new ServiceException("Unable to merge not adjacent regions "
+ regionStateA.getRegion().getRegionNameAsString() + ", "
+ regionStateB.getRegion().getRegionNameAsString()
+ " where forcible = " + forcible);
if (!regionStateA.isOpened() || !regionStateB.isOpened()) {
throw new ServiceException(new MergeRegionException(
"Unable to merge regions not online " + regionStateA + ", " + regionStateB));
}
HRegionInfo regionInfoA = regionStateA.getRegion();
HRegionInfo regionInfoB = regionStateB.getRegion();
if (regionInfoA.compareTo(regionInfoB) == 0) {
throw new ServiceException(new MergeRegionException(
"Unable to merge a region to itself " + regionInfoA + ", " + regionInfoB));
}
if (!forcible && !HRegionInfo.areAdjacent(regionInfoA, regionInfoB)) {
throw new ServiceException(new MergeRegionException(
"Unable to merge not adjacent regions "
+ regionInfoA.getRegionNameAsString() + ", "
+ regionInfoB.getRegionNameAsString()
+ " where forcible = " + forcible));
}
try {
dispatchMergingRegions(regionStateA.getRegion(), regionStateB.getRegion(), forcible);
dispatchMergingRegions(regionInfoA, regionInfoB, forcible);
} catch (IOException ioe) {
throw new ServiceException(ioe);
}

View File

@ -20,12 +20,14 @@ package org.apache.hadoop.hbase.master.handler;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
import org.apache.hadoop.hbase.executor.EventHandler;
@ -34,6 +36,7 @@ import org.apache.hadoop.hbase.master.CatalogJanitor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
@ -98,12 +101,8 @@ public class DispatchMergingRegionHandler extends EventHandler {
// Move region_b to region a's location, switch region_a and region_b if
// region_a's load lower than region_b's, so we will always move lower
// load region
RegionLoad loadOfRegionA = masterServices.getServerManager()
.getLoad(region_a_location).getRegionsLoad()
.get(region_a.getRegionName());
RegionLoad loadOfRegionB = masterServices.getServerManager()
.getLoad(region_b_location).getRegionsLoad()
.get(region_b.getRegionName());
RegionLoad loadOfRegionA = getRegionLoad(region_a_location, region_a);
RegionLoad loadOfRegionB = getRegionLoad(region_b_location, region_b);
if (loadOfRegionA != null && loadOfRegionB != null
&& loadOfRegionA.getRequestsCount() < loadOfRegionB
.getRequestsCount()) {
@ -174,4 +173,16 @@ public class DispatchMergingRegionHandler extends EventHandler {
+ (EnvironmentEdgeManager.currentTimeMillis() - startTime) + "ms");
}
}
private RegionLoad getRegionLoad(ServerName sn, HRegionInfo hri) {
ServerManager serverManager = masterServices.getServerManager();
ServerLoad load = serverManager.getLoad(sn);
if (load != null) {
Map<byte[], RegionLoad> regionsLoad = load.getRegionsLoad();
if (regionsLoad != null) {
return regionsLoad.get(hri.getRegionName());
}
}
return null;
}
}

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.List;
@ -43,7 +44,10 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.exceptions.UnknownRegionException;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.junit.AfterClass;
@ -190,6 +194,58 @@ public class TestRegionMergeTransactionOnCluster {
}
}
/**
* This test tests 1, merging region not online;
* 2, merging same two regions; 3, merging unknown regions.
* They are in one test case so that we don't have to create
* many tables, and these tests are simple.
*/
@Test
public void testMerge() throws Exception {
LOG.info("Starting testMerge");
final byte[] tableName = Bytes.toBytes("testMerge");
try {
// Create table and load data.
HTable table = createTableAndLoadData(master, tableName);
RegionStates regionStates = master.getAssignmentManager().getRegionStates();
List<HRegionInfo> regions = regionStates.getRegionsOfTable(tableName);
// Fake offline one region
HRegionInfo a = regions.get(0);
HRegionInfo b = regions.get(1);
regionStates.regionOffline(a);
try {
// Merge offline region. Region a is offline here
admin.mergeRegions(a.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), false);
fail("Offline regions should not be able to merge");
} catch (IOException ie) {
assertTrue("Exception should mention regions not online",
ie.getMessage().contains("regions not online")
&& ie instanceof MergeRegionException);
}
try {
// Merge the same region: b and b.
admin.mergeRegions(b.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), true);
fail("A region should not be able to merge with itself, even forcifully");
} catch (IOException ie) {
assertTrue("Exception should mention regions not online",
ie.getMessage().contains("region to itself")
&& ie instanceof MergeRegionException);
}
try {
// Merge unknown regions
admin.mergeRegions(Bytes.toBytes("-f1"), Bytes.toBytes("-f2"), true);
fail("Unknown region could not be merged");
} catch (IOException ie) {
assertTrue("UnknownRegionException should be thrown",
ie instanceof UnknownRegionException);
}
table.close();
} finally {
TEST_UTIL.deleteTable(tableName);
}
}
private void mergeRegionsAndVerifyRegionNum(HMaster master, byte[] tablename,
int regionAnum, int regionBnum, int expectedRegionNum) throws Exception {
requestMergeRegion(master, tablename, regionAnum, regionBnum);