HBASE-8410 Basic quota support for namespaces (Vandana)

This commit is contained in:
tedyu 2015-01-24 10:36:49 -08:00
parent 588b43b06b
commit 8261d84290
11 changed files with 1077 additions and 8 deletions

View File

@ -74,6 +74,7 @@ import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler; import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.quotas.RegionStateListener;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.wal.DefaultWALProvider; import org.apache.hadoop.hbase.wal.DefaultWALProvider;
@ -85,6 +86,7 @@ import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
@ -193,6 +195,8 @@ public class AssignmentManager {
/** Listeners that are called on assignment events. */ /** Listeners that are called on assignment events. */
private List<AssignmentListener> listeners = new CopyOnWriteArrayList<AssignmentListener>(); private List<AssignmentListener> listeners = new CopyOnWriteArrayList<AssignmentListener>();
private RegionStateListener regionStateListener;
/** /**
* Constructs a new assignment manager. * Constructs a new assignment manager.
@ -2758,7 +2762,12 @@ public class AssignmentManager {
errorMsg = onRegionClosed(current, hri, serverName); errorMsg = onRegionClosed(current, hri, serverName);
break; break;
case READY_TO_SPLIT: case READY_TO_SPLIT:
errorMsg = onRegionReadyToSplit(current, hri, serverName, transition); try {
regionStateListener.onRegionSplit(hri);
errorMsg = onRegionReadyToSplit(current, hri, serverName, transition);
} catch (IOException exp) {
errorMsg = StringUtils.stringifyException(exp);
}
break; break;
case SPLIT_PONR: case SPLIT_PONR:
errorMsg = onRegionSplitPONR(current, hri, serverName, transition); errorMsg = onRegionSplitPONR(current, hri, serverName, transition);
@ -2768,6 +2777,13 @@ public class AssignmentManager {
break; break;
case SPLIT_REVERTED: case SPLIT_REVERTED:
errorMsg = onRegionSplitReverted(current, hri, serverName, transition); errorMsg = onRegionSplitReverted(current, hri, serverName, transition);
if (org.apache.commons.lang.StringUtils.isEmpty(errorMsg)) {
try {
regionStateListener.onRegionSplitReverted(hri);
} catch (IOException exp) {
LOG.warn(StringUtils.stringifyException(exp));
}
}
break; break;
case READY_TO_MERGE: case READY_TO_MERGE:
errorMsg = onRegionReadyToMerge(current, hri, serverName, transition); errorMsg = onRegionReadyToMerge(current, hri, serverName, transition);
@ -2806,4 +2822,8 @@ public class AssignmentManager {
getSnapShotOfAssignment(Collection<HRegionInfo> infos) { getSnapShotOfAssignment(Collection<HRegionInfo> infos) {
return getRegionStates().getRegionAssignments(infos); return getRegionStates().getRegionAssignments(infos);
} }
void setRegionStateListener(RegionStateListener listener) {
this.regionStateListener = listener;
}
} }

View File

@ -109,6 +109,7 @@ import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
import org.apache.hadoop.hbase.quotas.RegionStateListener;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
@ -724,9 +725,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
status.setStatus("Starting namespace manager"); status.setStatus("Starting namespace manager");
initNamespace(); initNamespace();
status.setStatus("Starting quota manager");
initQuotaManager();
if (this.cpHost != null) { if (this.cpHost != null) {
try { try {
this.cpHost.preMasterInitialization(); this.cpHost.preMasterInitialization();
@ -739,6 +737,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
LOG.info("Master has completed initialization"); LOG.info("Master has completed initialization");
configurationManager.registerObserver(this.balancer); configurationManager.registerObserver(this.balancer);
initialized = true; initialized = true;
status.setStatus("Starting quota manager");
initQuotaManager();
// clear the dead servers with same host name and port of online server because we are not // clear the dead servers with same host name and port of online server because we are not
// removing dead server with same hostname and port of rs which is trying to check in before // removing dead server with same hostname and port of rs which is trying to check in before
// master initialization. See HBASE-5916. // master initialization. See HBASE-5916.
@ -840,6 +841,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
void initQuotaManager() throws IOException { void initQuotaManager() throws IOException {
quotaManager = new MasterQuotaManager(this); quotaManager = new MasterQuotaManager(this);
this.assignmentManager.setRegionStateListener((RegionStateListener)quotaManager);
quotaManager.start(); quotaManager.start();
} }
@ -1242,6 +1244,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys); HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
checkInitialized(); checkInitialized();
sanityCheckTableDescriptor(hTableDescriptor); sanityCheckTableDescriptor(hTableDescriptor);
this.quotaManager.checkNamespaceTableAndRegionQuota(hTableDescriptor.getTableName(),
newRegions.length);
if (cpHost != null) { if (cpHost != null) {
cpHost.preCreateTable(hTableDescriptor, newRegions); cpHost.preCreateTable(hTableDescriptor, newRegions);
} }

View File

@ -23,6 +23,7 @@ import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.util.NavigableSet; import java.util.NavigableSet;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -30,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
@ -72,6 +74,8 @@ public class TableNamespaceManager {
private ZKNamespaceManager zkNamespaceManager; private ZKNamespaceManager zkNamespaceManager;
private boolean initialized; private boolean initialized;
public static final String KEY_MAX_REGIONS = "hbase.namespace.quota.maxregions";
public static final String KEY_MAX_TABLES = "hbase.namespace.quota.maxtables";
static final String NS_INIT_TIMEOUT = "hbase.master.namespace.init.timeout"; static final String NS_INIT_TIMEOUT = "hbase.master.namespace.init.timeout";
static final int DEFAULT_NS_INIT_TIMEOUT = 300000; static final int DEFAULT_NS_INIT_TIMEOUT = 300000;
@ -150,13 +154,18 @@ public class TableNamespaceManager {
if (get(table, ns.getName()) != null) { if (get(table, ns.getName()) != null) {
throw new NamespaceExistException(ns.getName()); throw new NamespaceExistException(ns.getName());
} }
validateTableAndRegionCount(ns);
FileSystem fs = masterServices.getMasterFileSystem().getFileSystem(); FileSystem fs = masterServices.getMasterFileSystem().getFileSystem();
fs.mkdirs(FSUtils.getNamespaceDir( fs.mkdirs(FSUtils.getNamespaceDir(
masterServices.getMasterFileSystem().getRootDir(), ns.getName())); masterServices.getMasterFileSystem().getRootDir(), ns.getName()));
upsert(table, ns); upsert(table, ns);
if (this.masterServices.isInitialized()) {
this.masterServices.getMasterQuotaManager().setNamespaceQuota(ns);
}
} }
private void upsert(Table table, NamespaceDescriptor ns) throws IOException { private void upsert(Table table, NamespaceDescriptor ns) throws IOException {
validateTableAndRegionCount(ns);
Put p = new Put(Bytes.toBytes(ns.getName())); Put p = new Put(Bytes.toBytes(ns.getName()));
p.addImmutable(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES, p.addImmutable(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES,
HTableDescriptor.NAMESPACE_COL_DESC_BYTES, HTableDescriptor.NAMESPACE_COL_DESC_BYTES,
@ -205,6 +214,7 @@ public class TableNamespaceManager {
masterServices.getMasterFileSystem().getRootDir(), name), true)) { masterServices.getMasterFileSystem().getRootDir(), name), true)) {
throw new IOException("Failed to remove namespace: "+name); throw new IOException("Failed to remove namespace: "+name);
} }
this.masterServices.getMasterQuotaManager().removeNamespaceQuota(name);
} }
public synchronized NavigableSet<NamespaceDescriptor> list() throws IOException { public synchronized NavigableSet<NamespaceDescriptor> list() throws IOException {
@ -309,4 +319,47 @@ public class TableNamespaceManager {
return !masterServices.getAssignmentManager() return !masterServices.getAssignmentManager()
.getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME).isEmpty(); .getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME).isEmpty();
} }
void validateTableAndRegionCount(NamespaceDescriptor desc) throws IOException {
if (getMaxRegions(desc) <= 0) {
throw new ConstraintException("The max region quota for " + desc.getName()
+ " is less than or equal to zero.");
}
if (getMaxTables(desc) <= 0) {
throw new ConstraintException("The max tables quota for " + desc.getName()
+ " is less than or equal to zero.");
}
}
public static long getMaxTables(NamespaceDescriptor ns) throws IOException {
String value = ns.getConfigurationValue(KEY_MAX_TABLES);
long maxTables = 0;
if (StringUtils.isNotEmpty(value)) {
try {
maxTables = Long.parseLong(value);
} catch (NumberFormatException exp) {
throw new DoNotRetryIOException("NumberFormatException while getting max tables.", exp);
}
} else {
// The property is not set, so assume its the max long value.
maxTables = Long.MAX_VALUE;
}
return maxTables;
}
public static long getMaxRegions(NamespaceDescriptor ns) throws IOException {
String value = ns.getConfigurationValue(KEY_MAX_REGIONS);
long maxRegions = 0;
if (StringUtils.isNotEmpty(value)) {
try {
maxRegions = Long.parseLong(value);
} catch (NumberFormatException exp) {
throw new DoNotRetryIOException("NumberFormatException while getting max regions.", exp);
}
} else {
// The property is not set, so assume its the max long value.
maxRegions = Long.MAX_VALUE;
}
return maxRegions;
}
} }

View File

@ -145,9 +145,9 @@ public class CreateTableHandler extends EventHandler {
public void process() { public void process() {
TableName tableName = this.hTableDescriptor.getTableName(); TableName tableName = this.hTableDescriptor.getTableName();
LOG.info("Create table " + tableName); LOG.info("Create table " + tableName);
HMaster master = ((HMaster) this.server);
try { try {
final MasterCoprocessorHost cpHost = ((HMaster) this.server).getMasterCoprocessorHost(); final MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
if (cpHost != null) { if (cpHost != null) {
cpHost.preCreateTableHandler(this.hTableDescriptor, this.newRegions); cpHost.preCreateTableHandler(this.hTableDescriptor, this.newRegions);
} }
@ -164,7 +164,16 @@ public class CreateTableHandler extends EventHandler {
} }
} catch (Throwable e) { } catch (Throwable e) {
LOG.error("Error trying to create the table " + tableName, e); LOG.error("Error trying to create the table " + tableName, e);
if (master.isInitialized()) {
try {
((HMaster) this.server).getMasterQuotaManager().removeTableFromNamespaceQuota(
hTableDescriptor.getTableName());
} catch (IOException e1) {
LOG.error("Error trying to update namespace quota " + e1);
}
}
completed(e); completed(e);
} }
} }

View File

@ -108,6 +108,7 @@ public class DeleteTableHandler extends TableEventHandler {
if (cpHost != null) { if (cpHost != null) {
cpHost.postDeleteTableHandler(this.tableName); cpHost.postDeleteTableHandler(this.tableName);
} }
((HMaster) this.server).getMasterQuotaManager().removeTableFromNamespaceQuota(tableName);
} }
private void cleanupTableState() throws IOException { private void cleanupTableState() throws IOException {

View File

@ -0,0 +1,155 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.namespace;
import java.io.IOException;
import java.io.InterruptedIOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.quotas.QuotaExceededException;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.common.annotations.VisibleForTesting;
/**
* The Class NamespaceAuditor performs checks to ensure operations like table creation
* and region splitting preserve namespace quota. The namespace quota can be specified
* while namespace creation.
*/
@InterfaceAudience.Public
public class NamespaceAuditor {
private static Log LOG = LogFactory.getLog(NamespaceAuditor.class);
static final String NS_AUDITOR_INIT_TIMEOUT = "hbase.namespace.auditor.init.timeout";
static final int DEFAULT_NS_AUDITOR_INIT_TIMEOUT = 120000;
private NamespaceStateManager stateManager;
private MasterServices masterServices;
public NamespaceAuditor(MasterServices masterServices) {
this.masterServices = masterServices;
stateManager = new NamespaceStateManager(masterServices);
}
public void start() throws IOException {
stateManager.start();
long startTime = EnvironmentEdgeManager.currentTime();
int timeout = masterServices.getConfiguration().getInt(NS_AUDITOR_INIT_TIMEOUT,
DEFAULT_NS_AUDITOR_INIT_TIMEOUT);
try {
while (!stateManager.isInitialized()) {
if (EnvironmentEdgeManager.currentTime() - startTime + 1000 > timeout) {
throw new HBaseIOException("Timed out waiting for namespace auditor to be initialized.");
}
Thread.sleep(1000);
}
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
}
LOG.info("NamespaceAuditor started.");
}
/**
* Check quota to create table.
* We add the table information to namespace state cache, assuming the operation will
* pass. If the operation fails, then the next time namespace state chore runs
* namespace state cache will be corrected.
*
* @param tName - The table name to check quota.
* @param regions - Number of regions that will be added.
* @throws IOException Signals that an I/O exception has occurred.
*/
public void checkQuotaToCreateTable(TableName tName, int regions) throws IOException {
if (stateManager.isInitialized()) {
// We do this check to fail fast.
if (MetaTableAccessor.tableExists(this.masterServices.getConnection(), tName)) {
throw new TableExistsException(tName);
}
stateManager.checkAndUpdateNamespaceTableCount(tName, regions);
} else {
checkTableTypeAndThrowException(tName);
}
}
private void checkTableTypeAndThrowException(TableName name) throws IOException {
if (name.isSystemTable()) {
LOG.debug("Namespace auditor checks not performed for table " + name.getNameAsString());
} else {
throw new HBaseIOException(
name + " is being created even before namespace auditor has been initialized.");
}
}
public void checkQuotaToSplitRegion(HRegionInfo hri) throws IOException {
if (!stateManager.isInitialized()) {
throw new IOException(
"Split operation is being performed even before namespace auditor is initialized.");
} else if (!stateManager
.checkAndUpdateNamespaceRegionCount(hri.getTable(), hri.getRegionName())) {
throw new QuotaExceededException("Region split not possible for :" + hri.getEncodedName()
+ " as quota limits are exceeded ");
}
}
public void addNamespace(NamespaceDescriptor ns) throws IOException {
stateManager.addNamespace(ns.getName());
}
public void deleteNamespace(String namespace) throws IOException {
stateManager.deleteNamespace(namespace);
}
public void removeFromNamespaceUsage(TableName tableName)
throws IOException {
stateManager.removeTable(tableName);
}
public void removeRegionFromNamespaceUsage(HRegionInfo hri) throws IOException {
stateManager.removeRegionFromTable(hri);
}
/**
* Used only for unit tests.
* @param namespace The name of the namespace
* @return An instance of NamespaceTableAndRegionInfo
*/
@VisibleForTesting
NamespaceTableAndRegionInfo getState(String namespace) {
if (stateManager.isInitialized()) {
return stateManager.getState(namespace);
}
return null;
}
/**
* Checks if namespace auditor is initialized. Used only for testing.
*
* @return true, if is initialized
*/
public boolean isInitialized() {
return stateManager.isInitialized();
}
}

View File

@ -0,0 +1,225 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.namespace;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.MetaScanner;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.TableNamespaceManager;
import org.apache.hadoop.hbase.util.Bytes;
/**
* NamespaceStateManager manages state (in terms of quota) of all the namespaces. It contains
* a cache which is updated based on the hooks in the NamespaceAuditor class.
*/
@InterfaceAudience.Private
class NamespaceStateManager {
private static Log LOG = LogFactory.getLog(NamespaceStateManager.class);
private ConcurrentMap<String, NamespaceTableAndRegionInfo> nsStateCache;
private MasterServices master;
private volatile boolean initialized = false;
public NamespaceStateManager(MasterServices masterServices) {
nsStateCache = new ConcurrentHashMap<String, NamespaceTableAndRegionInfo>();
master = masterServices;
}
/**
* Starts the NamespaceStateManager. The boot strap of cache
* is done in the post master start hook of the NamespaceAuditor
* class.
*
* @throws IOException Signals that an I/O exception has occurred.
*/
public void start() throws IOException {
LOG.info("Namespace State Manager started.");
initialize();
}
/**
* Gets an instance of NamespaceTableAndRegionInfo associated with namespace.
* @param The name of the namespace
* @return An instance of NamespaceTableAndRegionInfo.
*/
public NamespaceTableAndRegionInfo getState(String name) {
return nsStateCache.get(name);
}
/**
* Check if adding a region violates namespace quota, if not update namespace cache.
*
* @param TableName
* @param regionName
* @return true, if region can be added to table.
* @throws IOException Signals that an I/O exception has occurred.
*/
synchronized boolean checkAndUpdateNamespaceRegionCount(TableName name,
byte[] regionName) throws IOException {
String namespace = name.getNamespaceAsString();
NamespaceDescriptor nspdesc = getNamespaceDescriptor(namespace);
if (nspdesc != null) {
NamespaceTableAndRegionInfo currentStatus;
currentStatus = getState(namespace);
if (currentStatus.getRegionCount() >= TableNamespaceManager.getMaxRegions(nspdesc)) {
LOG.warn("The region " + Bytes.toStringBinary(regionName)
+ " cannot be created. The region count will exceed quota on the namespace. "
+ "This may be transient, please retry later if there are any ongoing split"
+ " operations in the namespace.");
return false;
}
NamespaceTableAndRegionInfo nsInfo = nsStateCache.get(namespace);
if (nsInfo != null) {
nsInfo.incRegionCountForTable(name, 1);
} else {
LOG.warn("Namespace state found null for namespace : " + namespace);
}
}
return true;
}
private NamespaceDescriptor getNamespaceDescriptor(String namespaceAsString) {
try {
return this.master.getNamespaceDescriptor(namespaceAsString);
} catch (IOException e) {
LOG.error("Error while fetching namespace descriptor for namespace : " + namespaceAsString);
return null;
}
}
synchronized void checkAndUpdateNamespaceTableCount(TableName table, int numRegions)
throws IOException {
String namespace = table.getNamespaceAsString();
NamespaceDescriptor nspdesc = getNamespaceDescriptor(namespace);
if (nspdesc != null) {
NamespaceTableAndRegionInfo currentStatus;
currentStatus = getState(nspdesc.getName());
if ((currentStatus.getTables().size()) >= TableNamespaceManager.getMaxTables(nspdesc)) {
throw new DoNotRetryIOException("The table " + table.getNameAsString()
+ "cannot be created as it would exceed maximum number of tables allowed "
+ " in the namespace.");
}
if ((currentStatus.getRegionCount() + numRegions) > TableNamespaceManager
.getMaxRegions(nspdesc)) {
throw new DoNotRetryIOException("The table " + table.getNameAsString()
+ " is not allowed to have " + numRegions
+ " regions. The total number of regions permitted is only "
+ TableNamespaceManager.getMaxRegions(nspdesc)
+ ", while current region count is " + currentStatus.getRegionCount()
+ ". This may be transient, please retry later if there are any"
+ " ongoing split operations in the namespace.");
}
} else {
throw new IOException("Namespace Descriptor found null for " + namespace
+ " This is unexpected.");
}
addTable(table, numRegions);
}
NamespaceTableAndRegionInfo addNamespace(String namespace) {
if (!nsStateCache.containsKey(namespace)) {
NamespaceTableAndRegionInfo a1 = new NamespaceTableAndRegionInfo(namespace);
nsStateCache.put(namespace, a1);
}
return nsStateCache.get(namespace);
}
/**
* Delete the namespace state.
*
* @param An instance of NamespaceTableAndRegionInfo
*/
void deleteNamespace(String namespace) {
this.nsStateCache.remove(namespace);
}
private void addTable(TableName tableName, int regionCount) throws IOException {
NamespaceTableAndRegionInfo info =
nsStateCache.get(tableName.getNamespaceAsString());
if(info != null) {
info.addTable(tableName, regionCount);
} else {
throw new IOException("Bad state : Namespace quota information not found for namespace : "
+ tableName.getNamespaceAsString());
}
}
synchronized void removeTable(TableName tableName) {
NamespaceTableAndRegionInfo info =
nsStateCache.get(tableName.getNamespaceAsString());
if (info != null) {
info.removeTable(tableName);
}
}
/**
* Initialize namespace state cache by scanning meta table.
*/
void initialize() {
try {
List<NamespaceDescriptor> namespaces = this.master.listNamespaceDescriptors();
for (NamespaceDescriptor namespace : namespaces) {
addNamespace(namespace.getName());
List<TableName> tables = this.master.listTableNamesByNamespace(namespace.getName());
for (TableName table : tables) {
int regionCount = 0;
Map<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(
this.master.getConnection(), table);
for (HRegionInfo info : regions.keySet()) {
if (!info.isSplit()) {
regionCount++;
}
}
addTable(table, regionCount);
}
}
LOG.info("Finished updating state of " + nsStateCache.size() + " namespaces. ");
initialized = true;
} catch (IOException e) {
LOG.error("Error while update namespace state.", e);
initialized = false;
}
}
boolean isInitialized() {
return initialized;
}
public synchronized void removeRegionFromTable(HRegionInfo hri) throws IOException {
String namespace = hri.getTable().getNamespaceAsString();
NamespaceTableAndRegionInfo nsInfo = nsStateCache.get(namespace);
if (nsInfo != null) {
nsInfo.decrementRegionCountForTable(hri.getTable(), 1);
} else {
throw new IOException("Namespace state found null for namespace : " + namespace);
}
}
}

View File

@ -0,0 +1,119 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.namespace;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.TableName;
import com.google.common.base.Joiner;
/**
* NamespaceTableAndRegionInfo is a helper class that contains information
* about current state of tables and regions in a namespace.
*/
@InterfaceAudience.Private
class NamespaceTableAndRegionInfo {
private String name;
private Map<TableName, AtomicInteger> tableAndRegionInfo;
public NamespaceTableAndRegionInfo(String namespace) {
this.name = namespace;
this.tableAndRegionInfo = new HashMap<TableName, AtomicInteger>();
}
/**
* Gets the name of the namespace.
*
* @return name of the namespace.
*/
String getName() {
return name;
}
/**
* Gets the set of table names belonging to namespace.
*
* @return A set of table names.
*/
synchronized Set<TableName> getTables() {
return this.tableAndRegionInfo.keySet();
}
/**
* Gets the total number of regions in namespace.
*
* @return the region count
*/
synchronized int getRegionCount() {
int regionCount = 0;
for (Entry<TableName, AtomicInteger> entry : this.tableAndRegionInfo.entrySet()) {
regionCount = regionCount + entry.getValue().get();
}
return regionCount;
}
synchronized int getRegionCountOfTable(TableName tableName) {
if (tableAndRegionInfo.containsKey(tableName)) {
return this.tableAndRegionInfo.get(tableName).get();
} else {
return -1;
}
}
synchronized boolean containsTable(TableName tableName) {
return this.tableAndRegionInfo.containsKey(tableName);
}
synchronized void addTable(TableName tableName, int regionCount) {
if (!name.equalsIgnoreCase(tableName.getNamespaceAsString())) {
throw new IllegalStateException("Table : " + tableName + " does not belong to namespace "
+ name);
}
if (!tableAndRegionInfo.containsKey(tableName)) {
tableAndRegionInfo.put(tableName, new AtomicInteger(regionCount));
} else {
throw new IllegalStateException("Table already in the cache " + tableName);
}
}
synchronized void removeTable(TableName tableName) {
tableAndRegionInfo.remove(tableName);
}
synchronized int incRegionCountForTable(TableName tableName, int count) {
return tableAndRegionInfo.get(tableName).addAndGet(count);
}
synchronized int decrementRegionCountForTable(TableName tableName, int count) {
return tableAndRegionInfo.get(tableName).decrementAndGet();
}
@Override
public String toString() {
Joiner.MapJoiner mapJoiner = Joiner.on(',').withKeyValueSeparator("=");
return "NamespaceTableAndRegionInfo [name=" + name + ", tableAndRegionInfo="
+ mapJoiner.join(tableAndRegionInfo) + "]";
}
}

View File

@ -26,11 +26,13 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.handler.CreateTableHandler; import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
import org.apache.hadoop.hbase.namespace.NamespaceAuditor;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse;
@ -49,7 +51,7 @@ import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class MasterQuotaManager { public class MasterQuotaManager implements RegionStateListener {
private static final Log LOG = LogFactory.getLog(MasterQuotaManager.class); private static final Log LOG = LogFactory.getLog(MasterQuotaManager.class);
private final MasterServices masterServices; private final MasterServices masterServices;
@ -57,6 +59,7 @@ public class MasterQuotaManager {
private NamedLock<TableName> tableLocks; private NamedLock<TableName> tableLocks;
private NamedLock<String> userLocks; private NamedLock<String> userLocks;
private boolean enabled = false; private boolean enabled = false;
private NamespaceAuditor namespaceQuotaManager;
public MasterQuotaManager(final MasterServices masterServices) { public MasterQuotaManager(final MasterServices masterServices) {
this.masterServices = masterServices; this.masterServices = masterServices;
@ -81,6 +84,8 @@ public class MasterQuotaManager {
tableLocks = new NamedLock<TableName>(); tableLocks = new NamedLock<TableName>();
userLocks = new NamedLock<String>(); userLocks = new NamedLock<String>();
namespaceQuotaManager = new NamespaceAuditor(masterServices);
namespaceQuotaManager.start();
enabled = true; enabled = true;
} }
@ -88,7 +93,7 @@ public class MasterQuotaManager {
} }
public boolean isQuotaEnabled() { public boolean isQuotaEnabled() {
return enabled; return enabled && namespaceQuotaManager.isInitialized();
} }
/* ========================================================================== /* ==========================================================================
@ -263,6 +268,18 @@ public class MasterQuotaManager {
}); });
} }
public void setNamespaceQuota(NamespaceDescriptor desc) throws IOException {
if (enabled) {
this.namespaceQuotaManager.addNamespace(desc);
}
}
public void removeNamespaceQuota(String namespace) throws IOException {
if (enabled) {
this.namespaceQuotaManager.deleteNamespace(namespace);
}
}
private void setQuota(final SetQuotaRequest req, final SetQuotaOperations quotaOps) private void setQuota(final SetQuotaRequest req, final SetQuotaOperations quotaOps)
throws IOException, InterruptedException { throws IOException, InterruptedException {
if (req.hasRemoveAll() && req.getRemoveAll() == true) { if (req.hasRemoveAll() && req.getRemoveAll() == true) {
@ -290,6 +307,34 @@ public class MasterQuotaManager {
quotaOps.postApply(quotas); quotaOps.postApply(quotas);
} }
public void checkNamespaceTableAndRegionQuota(TableName tName, int regions) throws IOException {
if (enabled) {
namespaceQuotaManager.checkQuotaToCreateTable(tName, regions);
}
}
public void onRegionSplit(HRegionInfo hri) throws IOException {
if (enabled) {
namespaceQuotaManager.checkQuotaToSplitRegion(hri);
}
}
/**
* Remove table from namespace quota.
*
* @param tName - The table name to update quota usage.
* @throws IOException Signals that an I/O exception has occurred.
*/
public void removeTableFromNamespaceQuota(TableName tName) throws IOException {
if (enabled) {
namespaceQuotaManager.removeFromNamespaceUsage(tName);
}
}
public NamespaceAuditor getNamespaceQuotaManager() {
return this.namespaceQuotaManager;
}
private static interface SetQuotaOperations { private static interface SetQuotaOperations {
Quotas fetch() throws IOException; Quotas fetch() throws IOException;
void delete() throws IOException; void delete() throws IOException;
@ -422,5 +467,12 @@ public class MasterQuotaManager {
} }
} }
} }
@Override
public void onRegionSplitReverted(HRegionInfo hri) throws IOException {
if (enabled) {
this.namespaceQuotaManager.removeRegionFromNamespaceUsage(hri);
}
}
} }

View File

@ -0,0 +1,47 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.quotas;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
/**
* The listener interface for receiving region state events.
*/
@InterfaceAudience.Private
public interface RegionStateListener {
/**
* Process region split event.
*
* @param hri An instance of HRegionInfo
* @throws IOException
*/
void onRegionSplit(HRegionInfo hri) throws IOException;
/**
* Process region split reverted event.
*
* @param hri An instance of HRegionInfo
* @throws IOException Signals that an I/O exception has occurred.
*/
void onRegionSplitReverted(HRegionInfo hri) throws IOException;
}

View File

@ -0,0 +1,384 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.namespace;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.master.TableNamespaceManager;
import org.apache.hadoop.hbase.quotas.QuotaUtil;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.zookeeper.KeeperException;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category(MediumTests.class)
public class TestNamespaceAuditor {
private static final Log LOG = LogFactory.getLog(TestNamespaceAuditor.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static HBaseAdmin admin;
private String prefix = "TestNamespaceAuditor";
@BeforeClass
public static void before() throws Exception {
UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
CustomObserver.class.getName());
UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
UTIL.startMiniCluster(1, 3);
UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return UTIL.getHBaseCluster().getMaster().getMasterQuotaManager().isQuotaEnabled();
}
});
admin = UTIL.getHBaseAdmin();
}
@AfterClass
public static void tearDown() throws Exception {
UTIL.shutdownMiniCluster();
}
@After
public void cleanup() throws IOException, KeeperException {
for (HTableDescriptor table : admin.listTables()) {
admin.disableTable(table.getTableName());
admin.deleteTable(table.getTableName());
}
for (NamespaceDescriptor ns : admin.listNamespaceDescriptors()) {
if (ns.getName().startsWith(prefix)) {
admin.deleteNamespace(ns.getName());
}
}
assertTrue("Quota manager not enabled", UTIL.getHBaseCluster().getMaster()
.getMasterQuotaManager().isQuotaEnabled());
}
@Test
public void testTableOperations() throws Exception {
String nsp = prefix + "_np2";
NamespaceDescriptor nspDesc =
NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5")
.addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
admin.createNamespace(nspDesc);
assertNotNull("Namespace descriptor found null.", admin.getNamespaceDescriptor(nsp));
assertEquals(admin.listNamespaceDescriptors().length, 3);
HTableDescriptor tableDescOne =
new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"));
HTableDescriptor tableDescTwo =
new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2"));
HTableDescriptor tableDescThree =
new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table3"));
admin.createTable(tableDescOne);
boolean constraintViolated = false;
try {
admin.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 5);
} catch (Exception exp) {
assertTrue(exp instanceof IOException);
constraintViolated = true;
} finally {
assertTrue("Constraint not violated for table " + tableDescTwo.getTableName(),
constraintViolated);
}
admin.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4);
NamespaceTableAndRegionInfo nspState = getQuotaManager().getState(nsp);
assertNotNull(nspState);
assertTrue(nspState.getTables().size() == 2);
assertTrue(nspState.getRegionCount() == 5);
constraintViolated = false;
try {
admin.createTable(tableDescThree);
} catch (Exception exp) {
assertTrue(exp instanceof IOException);
constraintViolated = true;
} finally {
assertTrue("Constraint not violated for table " + tableDescThree.getTableName(),
constraintViolated);
}
}
@Test
public void testValidQuotas() throws Exception {
boolean exceptionCaught = false;
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
NamespaceDescriptor nspDesc =
NamespaceDescriptor.create(prefix + "vq1")
.addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "hihdufh")
.addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
try {
admin.createNamespace(nspDesc);
} catch (Exception exp) {
LOG.warn(exp);
exceptionCaught = true;
} finally {
assertTrue(exceptionCaught);
assertFalse(fs.exists(FSUtils.getNamespaceDir(rootDir, nspDesc.getName())));
}
nspDesc =
NamespaceDescriptor.create(prefix + "vq2")
.addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "-456")
.addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
try {
admin.createNamespace(nspDesc);
} catch (Exception exp) {
LOG.warn(exp);
exceptionCaught = true;
} finally {
assertTrue(exceptionCaught);
assertFalse(fs.exists(FSUtils.getNamespaceDir(rootDir, nspDesc.getName())));
}
nspDesc =
NamespaceDescriptor.create(prefix + "vq3")
.addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10")
.addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "sciigd").build();
try {
admin.createNamespace(nspDesc);
} catch (Exception exp) {
LOG.warn(exp);
exceptionCaught = true;
} finally {
assertTrue(exceptionCaught);
assertFalse(fs.exists(FSUtils.getNamespaceDir(rootDir, nspDesc.getName())));
}
nspDesc =
NamespaceDescriptor.create(prefix + "vq4")
.addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10")
.addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "-1500").build();
try {
admin.createNamespace(nspDesc);
} catch (Exception exp) {
LOG.warn(exp);
exceptionCaught = true;
} finally {
assertTrue(exceptionCaught);
assertFalse(fs.exists(FSUtils.getNamespaceDir(rootDir, nspDesc.getName())));
}
}
@Test
public void testDeleteTable() throws Exception {
String namespace = prefix + "_dummy";
NamespaceDescriptor nspDesc =
NamespaceDescriptor.create(namespace)
.addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "100")
.addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "3").build();
admin.createNamespace(nspDesc);
assertNotNull("Namespace descriptor found null.", admin.getNamespaceDescriptor(namespace));
NamespaceTableAndRegionInfo stateInfo = getNamespaceState(nspDesc.getName());
assertNotNull("Namespace state found null for " + namespace, stateInfo);
HTableDescriptor tableDescOne =
new HTableDescriptor(TableName.valueOf(namespace + TableName.NAMESPACE_DELIM + "table1"));
HTableDescriptor tableDescTwo =
new HTableDescriptor(TableName.valueOf(namespace + TableName.NAMESPACE_DELIM + "table2"));
admin.createTable(tableDescOne);
admin.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 5);
stateInfo = getNamespaceState(nspDesc.getName());
assertNotNull("Namespace state found to be null.", stateInfo);
assertEquals(2, stateInfo.getTables().size());
assertEquals(5, stateInfo.getRegionCountOfTable(tableDescTwo.getTableName()));
assertEquals(6, stateInfo.getRegionCount());
admin.disableTable(tableDescOne.getTableName());
admin.deleteTable(tableDescOne.getTableName());
stateInfo = getNamespaceState(nspDesc.getName());
assertNotNull("Namespace state found to be null.", stateInfo);
assertEquals(5, stateInfo.getRegionCount());
assertEquals(1, stateInfo.getTables().size());
admin.disableTable(tableDescTwo.getTableName());
admin.deleteTable(tableDescTwo.getTableName());
admin.deleteNamespace(namespace);
stateInfo = getNamespaceState(namespace);
assertNull("Namespace state not found to be null.", stateInfo);
}
@Test
public void testRegionOperations() throws Exception {
String nsp1 = prefix + "_regiontest";
NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1)
.addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "2")
.addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
admin.createNamespace(nspDesc);
boolean constraintViolated = false;
final TableName tableOne = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table1");
byte[] columnFamily = Bytes.toBytes("info");
HTableDescriptor tableDescOne = new HTableDescriptor(tableOne);
tableDescOne.addFamily(new HColumnDescriptor(columnFamily));
NamespaceTableAndRegionInfo stateInfo;
try {
admin.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("1000"), 7);
} catch (Exception exp) {
assertTrue(exp instanceof DoNotRetryIOException);
LOG.info(exp);
constraintViolated = true;
} finally {
assertTrue(constraintViolated);
}
assertFalse(admin.tableExists(tableOne));
// This call will pass.
admin.createTable(tableDescOne);
Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
HTable htable = (HTable)connection.getTable(tableOne);
UTIL.loadNumericRows(htable, Bytes.toBytes("info"), 1, 1000);
admin.flush(tableOne);
stateInfo = getNamespaceState(nsp1);
assertEquals(1, stateInfo.getTables().size());
assertEquals(1, stateInfo.getRegionCount());
restartMaster();
admin.split(tableOne, Bytes.toBytes("500"));
HRegion actualRegion = UTIL.getHBaseCluster().getRegions(tableOne).get(0);
CustomObserver observer = (CustomObserver) actualRegion.getCoprocessorHost().findCoprocessor(
CustomObserver.class.getName());
assertNotNull(observer);
observer.postSplit.await();
assertEquals(2, admin.getTableRegions(tableOne).size());
actualRegion = UTIL.getHBaseCluster().getRegions(tableOne).get(0);
observer = (CustomObserver) actualRegion.getCoprocessorHost().findCoprocessor(
CustomObserver.class.getName());
assertNotNull(observer);
admin.split(tableOne, getSplitKey(actualRegion.getStartKey(), actualRegion.getEndKey()));
observer.postSplit.await();
// Make sure no regions have been added.
assertEquals(2, admin.getTableRegions(tableOne).size());
assertTrue("split completed", observer.preSplitBeforePONR.getCount() == 1);
htable.close();
}
private NamespaceTableAndRegionInfo getNamespaceState(String namespace) throws KeeperException,
IOException {
return getQuotaManager().getState(namespace);
}
byte[] getSplitKey(byte[] startKey, byte[] endKey) {
String skey = Bytes.toString(startKey);
int key;
if (StringUtils.isBlank(skey)) {
key = Integer.parseInt(Bytes.toString(endKey))/2 ;
} else {
key = (int) (Integer.parseInt(skey) * 1.5);
}
return Bytes.toBytes("" + key);
}
public static class CustomObserver extends BaseRegionObserver{
volatile CountDownLatch postSplit;
volatile CountDownLatch preSplitBeforePONR;
@Override
public void postCompleteSplit(ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
postSplit.countDown();
}
@Override
public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx,
byte[] splitKey, List<Mutation> metaEntries) throws IOException {
preSplitBeforePONR.countDown();
}
@Override
public void start(CoprocessorEnvironment e) throws IOException {
postSplit = new CountDownLatch(1);
preSplitBeforePONR = new CountDownLatch(1);
}
}
@Test
public void testStatePreserve() throws Exception {
final String nsp1 = prefix + "_testStatePreserve";
NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1)
.addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "20")
.addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "10").build();
admin.createNamespace(nspDesc);
TableName tableOne = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table1");
TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
TableName tableThree = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table3");
HTableDescriptor tableDescOne = new HTableDescriptor(tableOne);
HTableDescriptor tableDescTwo = new HTableDescriptor(tableTwo);
HTableDescriptor tableDescThree = new HTableDescriptor(tableThree);
admin.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("1000"), 3);
admin.createTable(tableDescTwo, Bytes.toBytes("1"), Bytes.toBytes("1000"), 3);
admin.createTable(tableDescThree, Bytes.toBytes("1"), Bytes.toBytes("1000"), 4);
admin.disableTable(tableThree);
admin.deleteTable(tableThree);
// wait for chore to complete
UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return (getNamespaceState(nsp1).getTables().size() == 2);
}
});
NamespaceTableAndRegionInfo before = getNamespaceState(nsp1);
restartMaster();
NamespaceTableAndRegionInfo after = getNamespaceState(nsp1);
assertEquals("Expected: " + before.getTables() + " Found: " + after.getTables(), before
.getTables().size(), after.getTables().size());
}
private void restartMaster() throws Exception {
UTIL.getHBaseCluster().getMaster().stop("Stopping to start again");
UTIL.getHBaseCluster().startMaster();
Thread.sleep(60000);
UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return UTIL.getHBaseCluster().getMaster().getMasterQuotaManager().isQuotaEnabled();
}
});
}
private NamespaceAuditor getQuotaManager() {
return UTIL.getHBaseCluster().getMaster()
.getMasterQuotaManager().getNamespaceQuotaManager();
}
}