HBASE-9273 Consolidate isSystemTable checking
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1516039 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9ed698b131
commit
f034a249f2
|
@ -429,18 +429,6 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
|||
return isMetaRegion() && !isRootRegion();
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks of the tableName being passed is a system table
|
||||
*
|
||||
*
|
||||
* @return true if a tableName is a member of the system
|
||||
* namesapce (aka hbase)
|
||||
*/
|
||||
public static boolean isSystemTable(final TableName tableName) {
|
||||
return tableName.getNamespaceAsString()
|
||||
.equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
|
||||
}
|
||||
|
||||
/**
|
||||
* Getter for accessing the metadata associated with the key
|
||||
*
|
||||
|
|
|
@ -304,7 +304,7 @@ public class HTable implements HTableInterface {
|
|||
*/
|
||||
private void finishSetup() throws IOException {
|
||||
this.connection.locateRegion(tableName, HConstants.EMPTY_START_ROW);
|
||||
this.operationTimeout = HTableDescriptor.isSystemTable(tableName) ?
|
||||
this.operationTimeout = tableName.isSystemTable() ?
|
||||
this.configuration.getInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT,
|
||||
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT):
|
||||
this.configuration.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.apache.hadoop.hbase.snapshot;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
|
@ -46,7 +44,7 @@ public class ClientSnapshotDescriptionUtils {
|
|||
// make sure the table name is valid, this will implicitly check validity
|
||||
TableName tableName = TableName.valueOf(snapshot.getTable());
|
||||
|
||||
if (HTableDescriptor.isSystemTable(tableName)) {
|
||||
if (tableName.isSystemTable()) {
|
||||
throw new IllegalArgumentException("System table snapshots are not allowed");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hbase.Chore;
|
||||
import org.apache.hadoop.hbase.HBaseIOException;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
@ -453,7 +452,7 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
// its a clean cluster startup, else its a failover.
|
||||
Map<HRegionInfo, ServerName> regions = regionStates.getRegionAssignments();
|
||||
for (Map.Entry<HRegionInfo, ServerName> e: regions.entrySet()) {
|
||||
if (!HTableDescriptor.isSystemTable(e.getKey().getTableName())
|
||||
if (!e.getKey().getTableName().isSystemTable()
|
||||
&& e.getValue() != null) {
|
||||
LOG.debug("Found " + e + " out on cluster");
|
||||
failover = true;
|
||||
|
@ -2575,7 +2574,7 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
//remove system tables because they would have been assigned earlier
|
||||
for(Iterator<HRegionInfo> iter = allRegions.keySet().iterator();
|
||||
iter.hasNext();) {
|
||||
if (HTableDescriptor.isSystemTable(iter.next().getTableName())) {
|
||||
if (iter.next().getTableName().isSystemTable()) {
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1045,7 +1045,7 @@ MasterServices, Server {
|
|||
MetaReader.fullScan(catalogTracker, disabledOrDisablingOrEnabling, true);
|
||||
for(Iterator<HRegionInfo> iter = allRegions.keySet().iterator();
|
||||
iter.hasNext();) {
|
||||
if (!HTableDescriptor.isSystemTable(iter.next().getTableName())) {
|
||||
if (!iter.next().getTableName().isSystemTable()) {
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
|
@ -2601,7 +2601,7 @@ MasterServices, Server {
|
|||
}
|
||||
if (descriptorMap != null) {
|
||||
for(HTableDescriptor desc: descriptorMap.values()) {
|
||||
if(!HTableDescriptor.isSystemTable(desc.getTableName())) {
|
||||
if(!desc.getTableName().isSystemTable()) {
|
||||
descriptors.add(desc);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
|||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Append;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
|
@ -147,7 +146,7 @@ public class RegionCoprocessorHost
|
|||
loadSystemCoprocessors(conf, REGION_COPROCESSOR_CONF_KEY);
|
||||
|
||||
// load system default cp's for user tables from configuration.
|
||||
if (!HTableDescriptor.isSystemTable(region.getRegionInfo().getTableName())) {
|
||||
if (!region.getRegionInfo().getTableName().isSystemTable()) {
|
||||
loadSystemCoprocessors(conf, USER_REGION_COPROCESSOR_CONF_KEY);
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@ import javax.xml.bind.annotation.XmlRootElement;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
|
@ -96,10 +95,9 @@ public class TableRegionModel implements Serializable {
|
|||
@XmlAttribute
|
||||
public String getName() {
|
||||
byte [] tableNameAsBytes = Bytes.toBytes(this.table);
|
||||
TableName tableName = TableName.valueOf(tableNameAsBytes);
|
||||
byte [] nameAsBytes = HRegionInfo.createRegionName(
|
||||
TableName.valueOf(tableNameAsBytes),
|
||||
this.startKey, this.id,
|
||||
!HTableDescriptor.isSystemTable(TableName.valueOf(tableNameAsBytes)));
|
||||
tableName, this.startKey, this.id, !tableName.isSystemTable());
|
||||
return Bytes.toString(nameAsBytes);
|
||||
}
|
||||
|
||||
|
|
|
@ -1325,7 +1325,7 @@ public class TestAdmin {
|
|||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME);
|
||||
List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(rs);
|
||||
for (HRegionInfo regionInfo : onlineRegions) {
|
||||
if (!HTableDescriptor.isSystemTable(regionInfo.getTableName())) {
|
||||
if (!regionInfo.getTableName().isSystemTable()) {
|
||||
info = regionInfo;
|
||||
admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(), rs
|
||||
.getServerName().getServerName());
|
||||
|
|
|
@ -1121,7 +1121,7 @@ public class TestDistributedLogSplitting {
|
|||
HRegionServer hrs = rst.getRegionServer();
|
||||
List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(hrs);
|
||||
for (HRegionInfo hri : hris) {
|
||||
if (HTableDescriptor.isSystemTable(hri.getTableName())) {
|
||||
if (hri.getTableName().isSystemTable()) {
|
||||
continue;
|
||||
}
|
||||
LOG.debug("adding data to rs = " + rst.getName() +
|
||||
|
@ -1146,7 +1146,7 @@ public class TestDistributedLogSplitting {
|
|||
|
||||
for(Iterator<HRegionInfo> iter = regions.iterator(); iter.hasNext(); ) {
|
||||
HRegionInfo regionInfo = iter.next();
|
||||
if(HTableDescriptor.isSystemTable(regionInfo.getTableName())) {
|
||||
if(regionInfo.getTableName().isSystemTable()) {
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1868,8 +1868,7 @@ public class TestHBaseFsck {
|
|||
|
||||
@Override
|
||||
public boolean processRow(Result rowResult) throws IOException {
|
||||
if(!HTableDescriptor.isSystemTable(MetaScanner.getHRegionInfo(rowResult)
|
||||
.getTableName())) {
|
||||
if(!MetaScanner.getHRegionInfo(rowResult).getTableName().isSystemTable()) {
|
||||
Delete delete = new Delete(rowResult.getRow());
|
||||
delete.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
|
||||
deletes.add(delete);
|
||||
|
|
Loading…
Reference in New Issue