HBASE-11974 When a disabled table is scanned, NotServingRegionException is thrown instead of TableNotEnabledException (Ted Yu)

This commit is contained in:
Enis Soztutar 2014-09-21 15:23:31 -07:00
parent 676a0126bc
commit d568aa22b8
7 changed files with 70 additions and 13 deletions

View File

@ -113,11 +113,11 @@ public interface ClusterConnection extends HConnection {
* @param tableName name of the table <i>row</i> is in
* @param row row key you're trying to find the region of
* @param replicaId the replicaId of the region
* @return HRegionLocation that describes where to find the region in
* @return RegionLocations that describe where to find the region in
* question
* @throws IOException if a remote or network exception occurs
*/
HRegionLocation relocateRegion(final TableName tableName,
RegionLocations relocateRegion(final TableName tableName,
final byte [] row, int replicaId) throws IOException;
/**

View File

@ -305,7 +305,7 @@ class ConnectionAdapter implements ClusterConnection {
}
@Override
public HRegionLocation relocateRegion(TableName tableName, byte[] row, int replicaId)
public RegionLocations relocateRegion(TableName tableName, byte[] row, int replicaId)
throws IOException {
return wrappedConnection.relocateRegion(tableName, row, replicaId);
}

View File

@ -115,6 +115,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescripto
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
@ -178,8 +180,6 @@ import com.google.protobuf.BlockingRpcChannel;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*;
/**
* An internal, A non-instantiable class that manages creation of {@link HConnection}s.
*/
@ -1065,21 +1065,23 @@ class ConnectionManager {
@Override
public HRegionLocation relocateRegion(final TableName tableName,
final byte [] row) throws IOException{
return relocateRegion(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID);
RegionLocations locations = relocateRegion(tableName, row,
RegionReplicaUtil.DEFAULT_REPLICA_ID);
return locations == null ? null :
locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
}
@Override
public HRegionLocation relocateRegion(final TableName tableName,
public RegionLocations relocateRegion(final TableName tableName,
final byte [] row, int replicaId) throws IOException{
// Since this is an explicit request not to use any caching, finding
// disabled tables should not be desirable. This will ensure that an exception is thrown when
// the first time a disabled table is interacted with.
if (isTableDisabled(tableName)) {
if (!tableName.equals(TableName.META_TABLE_NAME) && isTableDisabled(tableName)) {
throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
}
RegionLocations locations = locateRegion(tableName, row, false, true, replicaId);
return locations == null ? null : locations.getRegionLocation(replicaId);
return locateRegion(tableName, row, false, true, replicaId);
}
@Override

View File

@ -19,14 +19,19 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.security.User;
import com.google.common.annotations.VisibleForTesting;
/**
* Utility used by client connections.
@ -67,7 +72,7 @@ public class ConnectionUtils {
}
return newPause;
}
/**
* @param conn The connection for which to replace the generator.
* @param cnm Replaces the nonce generator used, for testing.
@ -123,4 +128,31 @@ public class ConnectionUtils {
}
};
}
/**
* Setup the connection class, so that it will not depend on master being online. Used for testing
* @param conf configuration to set
*/
@VisibleForTesting
public static void setupMasterlessConnection(Configuration conf) {
conf.set(HConnection.HBASE_CLIENT_CONNECTION_IMPL,
MasterlessConnection.class.getName());
}
/**
* Some tests shut down the master. But table availability is a master RPC which is performed on
* region re-lookups.
*/
static class MasterlessConnection extends ConnectionManager.HConnectionImplementation {
MasterlessConnection(Configuration conf, boolean managed,
ExecutorService pool, User user) throws IOException {
super(conf, managed, pool, user);
}
@Override
public boolean isTableDisabled(TableName tableName) throws IOException {
// treat all tables as enabled
return false;
}
}
}

View File

@ -283,7 +283,11 @@ public class RpcRetryingCallerWithReadReplicas {
RegionLocations rl;
try {
rl = cConnection.locateRegion(tableName, row, useCache, true, replicaId);
if (!useCache) {
rl = cConnection.relocateRegion(tableName, row, replicaId);
} else {
rl = cConnection.locateRegion(tableName, row, useCache, true, replicaId);
}
} catch (DoNotRetryIOException e) {
throw e;
} catch (RetriesExhaustedException e) {

View File

@ -264,7 +264,19 @@ public class TestAdmin {
boolean ok = false;
try {
ht.get(get);
} catch (org.apache.hadoop.hbase.DoNotRetryIOException e) {
} catch (TableNotEnabledException e) {
ok = true;
}
ok = false;
// verify that scan encounters correct exception
Scan scan = new Scan();
try {
ResultScanner scanner = ht.getScanner(scan);
Result res = null;
do {
res = scanner.next();
} while (res != null);
} catch (TableNotEnabledException e) {
ok = true;
}
assertTrue(ok);

View File

@ -33,6 +33,7 @@ import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@ -56,6 +57,7 @@ import org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.log4j.Level;
import org.apache.zookeeper.KeeperException;
import org.junit.After;
import org.junit.AfterClass;
@ -74,6 +76,10 @@ import org.junit.experimental.categories.Category;
public class TestReplicasClient {
private static final Log LOG = LogFactory.getLog(TestReplicasClient.class);
static {
((Log4JLogger)RpcRetryingCaller.LOG).getLogger().setLevel(Level.ALL);
}
private static final int NB_SERVERS = 1;
private static HTable table = null;
private static final byte[] row = TestReplicasClient.class.getName().getBytes();
@ -161,6 +167,7 @@ public class TestReplicasClient {
HTU.getConfiguration().setInt(
StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, REFRESH_PERIOD);
HTU.getConfiguration().setBoolean("hbase.client.log.scanner.activity", true);
ConnectionUtils.setupMasterlessConnection(HTU.getConfiguration());
HTU.startMiniCluster(NB_SERVERS);
// Create table then get the single region for our new table.