Revert "HBASE-23055 Alter hbase:meta"

This reverts commit d64b0e3612db4ba7a2c5c308291770db27b2e345.
This commit is contained in:
stack 2020-01-11 09:21:15 -08:00
parent 1ad28a6f40
commit da782e4233
37 changed files with 449 additions and 613 deletions

View File

@ -17,7 +17,6 @@
*/ */
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.Nullable; import edu.umd.cs.findbugs.annotations.Nullable;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
@ -81,7 +80,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
/** /**
@ -306,18 +304,11 @@ public class MetaTableAccessor {
*/ */
public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo) public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo)
throws IOException { throws IOException {
return getRegionLocation(getCatalogFamilyRow(connection, regionInfo), byte[] row = getMetaKeyForRegion(regionInfo);
regionInfo, regionInfo.getReplicaId()); Get get = new Get(row);
}
/**
* @return Return the {@link HConstants#CATALOG_FAMILY} row from hbase:meta.
*/
public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri)
throws IOException {
Get get = new Get(getMetaKeyForRegion(ri));
get.addFamily(HConstants.CATALOG_FAMILY); get.addFamily(HConstants.CATALOG_FAMILY);
return get(getMetaHTable(connection), get); Result r = get(getMetaHTable(connection), get);
return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
} }
/** Returns the row key to use for this regionInfo */ /** Returns the row key to use for this regionInfo */
@ -981,8 +972,7 @@ public class MetaTableAccessor {
* @return A ServerName instance or null if necessary fields not found or empty. * @return A ServerName instance or null if necessary fields not found or empty.
*/ */
@Nullable @Nullable
// for use by HMaster#getTableRegionRow which is used for testing only @InterfaceAudience.Private // for use by HMaster#getTableRegionRow which is used for testing only
@InterfaceAudience.Private
public static ServerName getServerName(final Result r, final int replicaId) { public static ServerName getServerName(final Result r, final int replicaId) {
byte[] serverColumn = getServerColumn(replicaId); byte[] serverColumn = getServerColumn(replicaId);
Cell cell = r.getColumnLatestCell(getCatalogFamily(), serverColumn); Cell cell = r.getColumnLatestCell(getCatalogFamily(), serverColumn);
@ -1121,8 +1111,9 @@ public class MetaTableAccessor {
@Nullable @Nullable
public static TableState getTableState(Connection conn, TableName tableName) public static TableState getTableState(Connection conn, TableName tableName)
throws IOException { throws IOException {
Preconditions.checkArgument(!tableName.equals(TableName.META_TABLE_NAME), if (tableName.equals(TableName.META_TABLE_NAME)) {
"Not for hbase:meta state"); return new TableState(tableName, TableState.State.ENABLED);
}
Table metaHTable = getMetaHTable(conn); Table metaHTable = getMetaHTable(conn);
Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn()); Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
Result result = metaHTable.get(get); Result result = metaHTable.get(get);
@ -1149,8 +1140,7 @@ public class MetaTableAccessor {
} }
/** /**
* Updates state in META. * Updates state in META
* Do not use. For internal use only.
* @param conn connection to use * @param conn connection to use
* @param tableName table to look for * @param tableName table to look for
*/ */

View File

@ -21,7 +21,6 @@ import java.io.Closeable;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
/** /**
@ -30,26 +29,12 @@ import org.apache.yetus.audience.InterfaceAudience;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
interface AsyncRegistry extends Closeable { interface AsyncRegistry extends Closeable {
/**
* A completed CompletableFuture to host default hbase:meta table state (ENABLED).
*/
TableState ENABLED_META_TABLE_STATE =
new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED);
CompletableFuture<TableState> COMPLETED_GET_META_TABLE_STATE =
CompletableFuture.completedFuture(ENABLED_META_TABLE_STATE);
/** /**
* Get the location of meta region. * Get the location of meta region.
*/ */
CompletableFuture<RegionLocations> getMetaRegionLocation(); CompletableFuture<RegionLocations> getMetaRegionLocation();
/**
* The hbase:meta table state.
*/
default CompletableFuture<TableState> getMetaTableState() {
return COMPLETED_GET_META_TABLE_STATE;
}
/** /**
* Should only be called once. * Should only be called once.
* <p> * <p>

View File

@ -1,4 +1,5 @@
/* /**
*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -85,9 +86,6 @@ import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder;
import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader;
import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache;
import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel;
import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
@ -156,21 +154,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
public static final String RETRIES_BY_SERVER_KEY = "hbase.client.retries.by.server"; public static final String RETRIES_BY_SERVER_KEY = "hbase.client.retries.by.server";
private static final Logger LOG = LoggerFactory.getLogger(ConnectionImplementation.class); private static final Logger LOG = LoggerFactory.getLogger(ConnectionImplementation.class);
/**
* TableState cache.
* Table States change super rarely. In synchronous client, state can be queried a lot
* particularly when Regions are moving. It is ok if we are not super responsive noticing
* Table State change. So, cache the last look up for a period. Use
* {@link #TABLESTATE_CACHE_DURATION_MS} to change default of one second.
* NOT-private to allow external readers of generated cache stats.
*/
final LoadingCache<TableName, TableState> tableStateCache;
/**
* Duration in milliseconds a tablestate endures in the cache of tablestates.
*/
public static final String TABLESTATE_CACHE_DURATION_MS = "hbase.client.tablestate.cache.ttl.ms";
private static final String RESOLVE_HOSTNAME_ON_FAIL_KEY = "hbase.resolve.hostnames.on.failure"; private static final String RESOLVE_HOSTNAME_ON_FAIL_KEY = "hbase.resolve.hostnames.on.failure";
private final boolean hostnamesCanChange; private final boolean hostnamesCanChange;
@ -347,26 +330,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
close(); close();
throw e; throw e;
} }
// Create tablestate cache. Add a loader that know how to find table state.
int duration = this.conf.getInt(TABLESTATE_CACHE_DURATION_MS, 1000);
this.tableStateCache = CacheBuilder.newBuilder().
expireAfterWrite(duration, TimeUnit.MILLISECONDS).
recordStats().
build(new CacheLoader<TableName, TableState>() {
@Override
public TableState load(TableName tableName) throws Exception {
if (tableName.equals(TableName.META_TABLE_NAME)) {
// We cannot get hbase:meta state by reading hbase:meta table. Read registry.
return registry.getMetaTableState().get();
}
TableState ts =
MetaTableAccessor.getTableState(ConnectionImplementation.this, tableName);
if (ts == null) {
throw new TableNotFoundException(tableName);
}
return ts;
}
});
} }
private void spawnRenewalChore(final UserGroupInformation user) { private void spawnRenewalChore(final UserGroupInformation user) {
@ -466,19 +429,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
@Override @Override
public Admin getAdmin() throws IOException { public Admin getAdmin() throws IOException {
return new HBaseAdmin(this) { return new HBaseAdmin(this);
@Override
public void enableTable(TableName tableName) throws IOException {
super.enableTable(tableName);
ConnectionImplementation.this.tableStateCache.invalidate(tableName);
}
@Override
public void disableTable(TableName tableName) throws IOException {
super.disableTable(tableName);
ConnectionImplementation.this.tableStateCache.invalidate(tableName);
}
};
} }
@Override @Override
@ -803,9 +754,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
@Override @Override
public RegionLocations relocateRegion(final TableName tableName, public RegionLocations relocateRegion(final TableName tableName,
final byte [] row, int replicaId) throws IOException{ final byte [] row, int replicaId) throws IOException{
if (isTableDisabled(tableName)) { // Since this is an explicit request not to use any caching, finding
// disabled tables should not be desirable. This will ensure that an exception is thrown when
// the first time a disabled table is interacted with.
if (!tableName.equals(TableName.META_TABLE_NAME) && isTableDisabled(tableName)) {
throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled."); throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
} }
return locateRegion(tableName, row, false, true, replicaId); return locateRegion(tableName, row, false, true, replicaId);
} }
@ -2102,15 +2057,11 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
@Override @Override
public TableState getTableState(TableName tableName) throws IOException { public TableState getTableState(TableName tableName) throws IOException {
checkClosed(); checkClosed();
try { TableState tableState = MetaTableAccessor.getTableState(this, tableName);
return this.tableStateCache.get(tableName); if (tableState == null) {
} catch (ExecutionException e) { throw new TableNotFoundException(tableName);
// Throws ExecutionException for any exceptions fetching table state. Probably an IOE.
if (e.getCause() instanceof IOException) {
throw (IOException)e.getCause();
}
throw new IOException(e);
} }
return tableState;
} }
@Override @Override

View File

@ -1,4 +1,4 @@
/* /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -543,9 +543,7 @@ public class HBaseAdmin implements Admin {
static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection, static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection,
RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
int operationTimeout, int rpcTimeout) throws IOException { int operationTimeout, int rpcTimeout) throws IOException {
if (tableName == null) { if (tableName == null) return null;
return null;
}
TableDescriptor td = TableDescriptor td =
executeCallable(new MasterCallable<TableDescriptor>(connection, rpcControllerFactory) { executeCallable(new MasterCallable<TableDescriptor>(connection, rpcControllerFactory) {
@Override @Override
@ -950,13 +948,22 @@ public class HBaseAdmin implements Admin {
@Override @Override
public boolean isTableEnabled(final TableName tableName) throws IOException { public boolean isTableEnabled(final TableName tableName) throws IOException {
checkTableExists(tableName); checkTableExists(tableName);
return this.connection.getTableState(tableName).isEnabled(); return executeCallable(new RpcRetryingCallable<Boolean>() {
@Override
protected Boolean rpcCall(int callTimeout) throws Exception {
TableState tableState = MetaTableAccessor.getTableState(getConnection(), tableName);
if (tableState == null) {
throw new TableNotFoundException(tableName);
}
return tableState.inStates(TableState.State.ENABLED);
}
});
} }
@Override @Override
public boolean isTableDisabled(TableName tableName) throws IOException { public boolean isTableDisabled(TableName tableName) throws IOException {
checkTableExists(tableName); checkTableExists(tableName);
return this.connection.getTableState(tableName).isDisabled(); return connection.isTableDisabled(tableName);
} }
@Override @Override
@ -4350,4 +4357,5 @@ public class HBaseAdmin implements Admin {
}); });
} }
} }

View File

@ -1,4 +1,4 @@
/* /**
* Copyright The Apache Software Foundation * Copyright The Apache Software Foundation
* *
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
@ -20,12 +20,9 @@
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import java.io.Closeable;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
/** /**
* A KeepAlive connection is not physically closed immediately after the close, * A KeepAlive connection is not physically closed immediately after the close,
* but rather kept alive for a few minutes. It makes sense only if it is shared. * but rather kept alive for a few minutes. It makes sense only if it is shared.
@ -38,7 +35,7 @@ import org.apache.yetus.audience.InterfaceAudience;
* final user code. Hence it's package protected. * final user code. Hence it's package protected.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
interface MasterKeepAliveConnection extends interface MasterKeepAliveConnection extends MasterProtos.MasterService.BlockingInterface {
MasterProtos.MasterService.BlockingInterface, Closeable { // Do this instead of implement Closeable because closeable returning IOE is PITA.
void close(); void close();
} }

View File

@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest;
import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil; import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
import org.apache.hadoop.hbase.security.access.UserPermission; import org.apache.hadoop.hbase.security.access.UserPermission;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@ -662,38 +661,22 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
new DisableTableProcedureBiConsumer(tableName)); new DisableTableProcedureBiConsumer(tableName));
} }
/**
* Utility for completing passed TableState {@link CompletableFuture} <code>future</code>
* using passed parameters.
*/
private static CompletableFuture<Boolean> completeCheckTableState(
CompletableFuture<Boolean> future, TableState tableState, Throwable error,
TableState.State targetState, TableName tableName) {
if (error != null) {
future.completeExceptionally(error);
} else {
if (tableState != null) {
future.complete(tableState.inStates(targetState));
} else {
future.completeExceptionally(new TableNotFoundException(tableName));
}
}
return future;
}
@Override @Override
public CompletableFuture<Boolean> isTableEnabled(TableName tableName) { public CompletableFuture<Boolean> isTableEnabled(TableName tableName) {
if (TableName.isMetaTableName(tableName)) { if (TableName.isMetaTableName(tableName)) {
CompletableFuture<Boolean> future = new CompletableFuture<>(); return CompletableFuture.completedFuture(true);
addListener(this.connection.registry.getMetaTableState(), (tableState, error) -> {
completeCheckTableState(future, tableState, error, TableState.State.ENABLED, tableName);
});
return future;
} }
CompletableFuture<Boolean> future = new CompletableFuture<>(); CompletableFuture<Boolean> future = new CompletableFuture<>();
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> { addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (state, error) -> {
completeCheckTableState(future, tableState.isPresent()? tableState.get(): null, error, if (error != null) {
TableState.State.ENABLED, tableName); future.completeExceptionally(error);
return;
}
if (state.isPresent()) {
future.complete(state.get().inStates(TableState.State.ENABLED));
} else {
future.completeExceptionally(new TableNotFoundException(tableName));
}
}); });
return future; return future;
} }
@ -701,16 +684,19 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
@Override @Override
public CompletableFuture<Boolean> isTableDisabled(TableName tableName) { public CompletableFuture<Boolean> isTableDisabled(TableName tableName) {
if (TableName.isMetaTableName(tableName)) { if (TableName.isMetaTableName(tableName)) {
CompletableFuture<Boolean> future = new CompletableFuture<>(); return CompletableFuture.completedFuture(false);
addListener(this.connection.registry.getMetaTableState(), (tableState, error) -> {
completeCheckTableState(future, tableState, error, TableState.State.DISABLED, tableName);
});
return future;
} }
CompletableFuture<Boolean> future = new CompletableFuture<>(); CompletableFuture<Boolean> future = new CompletableFuture<>();
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> { addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (state, error) -> {
completeCheckTableState(future, tableState.isPresent()? tableState.get(): null, error, if (error != null) {
TableState.State.DISABLED, tableName); future.completeExceptionally(error);
return;
}
if (state.isPresent()) {
future.complete(state.get().inStates(TableState.State.DISABLED));
} else {
future.completeExceptionally(new TableNotFoundException(tableName));
}
}); });
return future; return future;
} }

View File

@ -215,7 +215,8 @@ public abstract class RegionServerCallable<T, S> implements RetryingCallable<T>
@Override @Override
public void prepare(final boolean reload) throws IOException { public void prepare(final boolean reload) throws IOException {
// check table state if this is a retry // check table state if this is a retry
if (reload && tableName != null && getConnection().isTableDisabled(tableName)) { if (reload && tableName != null && !tableName.equals(TableName.META_TABLE_NAME)
&& getConnection().isTableDisabled(tableName)) {
throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled."); throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
} }
try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) { try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {

View File

@ -27,7 +27,6 @@ import static org.apache.hadoop.hbase.zookeeper.ZKMetadata.removeMetaData;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.commons.lang3.mutable.MutableInt; import org.apache.commons.lang3.mutable.MutableInt;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -35,18 +34,15 @@ import org.apache.hadoop.hbase.ClusterId;
import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient; import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient;
import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
@ -62,19 +58,8 @@ class ZKAsyncRegistry implements AsyncRegistry {
private final ZNodePaths znodePaths; private final ZNodePaths znodePaths;
/**
* A znode maintained by MirroringTableStateManager.
* MirroringTableStateManager is deprecated to be removed in hbase3. It can also be disabled.
* Make sure it is enabled if you want to alter hbase:meta table in hbase2. In hbase3,
* TBD how metatable state will be hosted; likely on active hbase master.
*/
private final String znodeMirroredMetaTableState;
ZKAsyncRegistry(Configuration conf) { ZKAsyncRegistry(Configuration conf) {
this.znodePaths = new ZNodePaths(conf); this.znodePaths = new ZNodePaths(conf);
this.znodeMirroredMetaTableState =
ZNodePaths.joinZNode(this.znodePaths.tableZNode, TableName.META_TABLE_NAME.getNameAsString());
this.zk = new ReadOnlyZKClient(conf); this.zk = new ReadOnlyZKClient(conf);
} }
@ -170,8 +155,7 @@ class ZKAsyncRegistry implements AsyncRegistry {
} }
Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto); Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
if (stateAndServerName.getFirst() != RegionState.State.OPEN) { if (stateAndServerName.getFirst() != RegionState.State.OPEN) {
LOG.warn("hbase:meta region (replicaId={}) is in state {}", replicaId, LOG.warn("Meta region is in state " + stateAndServerName.getFirst());
stateAndServerName.getFirst());
} }
locs[DEFAULT_REPLICA_ID] = new HRegionLocation( locs[DEFAULT_REPLICA_ID] = new HRegionLocation(
getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond()); getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond());
@ -186,7 +170,7 @@ class ZKAsyncRegistry implements AsyncRegistry {
LOG.warn("Failed to fetch " + path, error); LOG.warn("Failed to fetch " + path, error);
locs[replicaId] = null; locs[replicaId] = null;
} else if (proto == null) { } else if (proto == null) {
LOG.warn("hbase:meta znode for replica " + replicaId + " is null"); LOG.warn("Meta znode for replica " + replicaId + " is null");
locs[replicaId] = null; locs[replicaId] = null;
} else { } else {
Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto); Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
@ -210,8 +194,9 @@ class ZKAsyncRegistry implements AsyncRegistry {
public CompletableFuture<RegionLocations> getMetaRegionLocation() { public CompletableFuture<RegionLocations> getMetaRegionLocation() {
CompletableFuture<RegionLocations> future = new CompletableFuture<>(); CompletableFuture<RegionLocations> future = new CompletableFuture<>();
addListener( addListener(
zk.list(znodePaths.baseZNode).thenApply(children -> children.stream(). zk.list(znodePaths.baseZNode)
filter(c -> znodePaths.isMetaZNodePrefix(c)).collect(Collectors.toList())), .thenApply(children -> children.stream()
.filter(c -> c.startsWith(znodePaths.metaZNodePrefix)).collect(Collectors.toList())),
(metaReplicaZNodes, error) -> { (metaReplicaZNodes, error) -> {
if (error != null) { if (error != null) {
future.completeExceptionally(error); future.completeExceptionally(error);
@ -244,43 +229,6 @@ class ZKAsyncRegistry implements AsyncRegistry {
}); });
} }
@Override
public CompletableFuture<TableState> getMetaTableState() {
return getAndConvert(this.znodeMirroredMetaTableState, ZKAsyncRegistry::getTableState).
thenApply(state -> {
return state == null || state.equals(ENABLED_META_TABLE_STATE.getState())?
ENABLED_META_TABLE_STATE: new TableState(TableName.META_TABLE_NAME, state);
}).exceptionally(e -> {
// Handle this case where no znode... Return default ENABLED in this case:
// Caused by: java.io.IOException: java.util.concurrent.ExecutionException:
// java.util.concurrent.ExecutionException:
// org.apache.zookeeper.KeeperException$NoNodeException: KeeperErrorCode = NoNode for
// /hbase/table/hbase:meta
// If not case of above, then rethrow but may need to wrap. See
// https://stackoverflow.com/questions/55453961/
// completablefutureexceptionally-rethrow-checked-exception
if (e.getCause() instanceof KeeperException.NoNodeException) {
return ENABLED_META_TABLE_STATE;
}
throw e instanceof CompletionException? (CompletionException)e:
new CompletionException(e);
});
}
/**
* Get tablestate from data byte array found in the mirroring znode of table state.
*/
private static TableState.State getTableState(byte[] data) throws DeserializationException {
if (data == null || data.length == 0) {
return null;
}
try {
return ProtobufUtil.toTableState(ProtobufUtil.toTableState(removeMetaData(data)));
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
}
@Override @Override
public void close() { public void close() {
zk.close(); zk.close();

View File

@ -87,7 +87,6 @@ import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.SnapshotType; import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.client.security.SecurityCapability;
import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.DeserializationException;
@ -3373,46 +3372,4 @@ public final class ProtobufUtil {
.build(); .build();
} }
/**
* Parses pb TableState from <code>data</code>
*/
public static ZooKeeperProtos.DeprecatedTableState.State toTableState(byte [] data)
throws DeserializationException, IOException {
if (data == null || data.length <= 0) {
return null;
}
ProtobufUtil.expectPBMagicPrefix(data);
ZooKeeperProtos.DeprecatedTableState.Builder builder =
ZooKeeperProtos.DeprecatedTableState.newBuilder();
int magicLen = ProtobufUtil.lengthOfPBMagic();
ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen);
return builder.getState();
}
/**
* @return Convert from pb TableState to pojo TableState.
*/
public static TableState.State toTableState(ZooKeeperProtos.DeprecatedTableState.State state) {
TableState.State newState = TableState.State.ENABLED;
if (state != null) {
switch (state) {
case ENABLED:
newState = TableState.State.ENABLED;
break;
case DISABLED:
newState = TableState.State.DISABLED;
break;
case DISABLING:
newState = TableState.State.DISABLING;
break;
case ENABLING:
newState = TableState.State.ENABLING;
break;
default:
}
}
return newState;
}
} }

View File

@ -1,4 +1,4 @@
/* /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -24,7 +24,6 @@ import static org.apache.hadoop.hbase.HConstants.SPLIT_LOGDIR_NAME;
import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT; import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID; import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
import java.util.Collection;
import java.util.Optional; import java.util.Optional;
import java.util.stream.IntStream; import java.util.stream.IntStream;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -41,24 +40,15 @@ public class ZNodePaths {
// TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved. // TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved.
public static final char ZNODE_PATH_SEPARATOR = '/'; public static final char ZNODE_PATH_SEPARATOR = '/';
private static final String META_ZNODE_PREFIX = "meta-region-server"; public final static String META_ZNODE_PREFIX = "meta-region-server";
private static final String DEFAULT_SNAPSHOT_CLEANUP_ZNODE = "snapshot-cleanup"; private static final String DEFAULT_SNAPSHOT_CLEANUP_ZNODE = "snapshot-cleanup";
// base znode for this cluster // base znode for this cluster
public final String baseZNode; public final String baseZNode;
// the prefix of meta znode, does not include baseZNode.
/** public final String metaZNodePrefix;
* The prefix of meta znode. Does not include baseZNode. // znodes containing the locations of the servers hosting the meta replicas
* Its a 'prefix' because meta replica id integer can be tagged on the end (if public final ImmutableMap<Integer, String> metaReplicaZNodes;
* no number present, it is 'default' replica).
*/
private final String metaZNodePrefix;
/**
* znodes containing the locations of the servers hosting the meta replicas
*/
private final ImmutableMap<Integer, String> metaReplicaZNodes;
// znode containing ephemeral nodes of the regionservers // znode containing ephemeral nodes of the regionservers
public final String rsZNode; public final String rsZNode;
// znode containing ephemeral nodes of the draining regionservers // znode containing ephemeral nodes of the draining regionservers
@ -164,21 +154,21 @@ public class ZNodePaths {
} }
/** /**
* @return true if the znode is a meta region replica * Is the znode of any meta replica
* @param node
* @return true or false
*/ */
public boolean isAnyMetaReplicaZNode(String node) { public boolean isAnyMetaReplicaZNode(String node) {
return this.metaReplicaZNodes.containsValue(node); if (metaReplicaZNodes.containsValue(node)) {
return true;
}
return false;
} }
/** /**
* @return Meta Replica ZNodes * Get the znode string corresponding to a replicaId
*/ * @param replicaId
public Collection<String> getMetaReplicaZNodes() { * @return znode
return this.metaReplicaZNodes.values();
}
/**
* @return the znode string corresponding to a replicaId
*/ */
public String getZNodeForReplica(int replicaId) { public String getZNodeForReplica(int replicaId) {
// return a newly created path but don't update the cache of paths // return a newly created path but don't update the cache of paths
@ -189,21 +179,24 @@ public class ZNodePaths {
} }
/** /**
* Parse the meta replicaId from the passed znode name. * Parse the meta replicaId from the passed znode
* @param znode the name of the znode, does not include baseZNode * @param znode the name of the znode, does not include baseZNode
* @return replicaId * @return replicaId
*/ */
public int getMetaReplicaIdFromZnode(String znode) { public int getMetaReplicaIdFromZnode(String znode) {
return znode.equals(metaZNodePrefix)? if (znode.equals(metaZNodePrefix)) {
RegionInfo.DEFAULT_REPLICA_ID: return RegionInfo.DEFAULT_REPLICA_ID;
Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); }
return Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1));
} }
/** /**
* @return True if meta znode. * Is it the default meta replica's znode
* @param znode the name of the znode, does not include baseZNode
* @return true or false
*/ */
public boolean isMetaZNodePrefix(String znode) { public boolean isDefaultMetaReplicaZnode(String znode) {
return znode != null && znode.startsWith(this.metaZNodePrefix); return metaReplicaZNodes.get(DEFAULT_REPLICA_ID).equals(znode);
} }
/** /**

View File

@ -27,6 +27,7 @@ import java.util.List;
import java.util.UUID; import java.util.UUID;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -1224,6 +1225,12 @@ public final class HConstants {
HBCK_SIDELINEDIR_NAME, HBASE_TEMP_DIRECTORY, MIGRATION_NAME HBCK_SIDELINEDIR_NAME, HBASE_TEMP_DIRECTORY, MIGRATION_NAME
})); }));
/** Directories that are not HBase user table directories */
public static final List<String> HBASE_NON_USER_TABLE_DIRS =
Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll(
new String[] { TableName.META_TABLE_NAME.getNameAsString() },
HBASE_NON_TABLE_DIRS.toArray())));
/** Health script related settings. */ /** Health script related settings. */
public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location"; public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location";
public static final String HEALTH_SCRIPT_TIMEOUT = "hbase.node.health.script.timeout"; public static final String HEALTH_SCRIPT_TIMEOUT = "hbase.node.health.script.timeout";

View File

@ -703,8 +703,6 @@ public abstract class CommonFSUtils {
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {
LOG.trace("{} doesn't exist", dir); LOG.trace("{} doesn't exist", dir);
} }
} catch (IllegalArgumentException iae) {
int x = 0;
} }
if (status == null || status.length < 1) { if (status == null || status.length < 1) {
return null; return null;

View File

@ -25,19 +25,25 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
/** /**
* Get, remove and modify table descriptors. * Get, remove and modify table descriptors.
* Used by servers to host descriptors.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public interface TableDescriptors { public interface TableDescriptors {
/** /**
* @param tableName
* @return TableDescriptor for tablename * @return TableDescriptor for tablename
* @throws IOException
*/ */
TableDescriptor get(final TableName tableName) throws IOException; TableDescriptor get(final TableName tableName)
throws IOException;
/** /**
* Get Map of all NamespaceDescriptors for a given namespace. * Get Map of all NamespaceDescriptors for a given namespace.
* @return Map of all descriptors. * @return Map of all descriptors.
* @throws IOException
*/ */
Map<String, TableDescriptor> getByNamespace(String name) throws IOException; Map<String, TableDescriptor> getByNamespace(String name)
throws IOException;
/** /**
* Get Map of all TableDescriptors. Populates the descriptor cache as a * Get Map of all TableDescriptors. Populates the descriptor cache as a
@ -45,19 +51,25 @@ public interface TableDescriptors {
* Notice: the key of map is the table name which contains namespace. It was generated by * Notice: the key of map is the table name which contains namespace. It was generated by
* {@link TableName#getNameWithNamespaceInclAsString()}. * {@link TableName#getNameWithNamespaceInclAsString()}.
* @return Map of all descriptors. * @return Map of all descriptors.
* @throws IOException
*/ */
Map<String, TableDescriptor> getAll() throws IOException; Map<String, TableDescriptor> getAll() throws IOException;
/** /**
* Add or update descriptor * Add or update descriptor
* @param htd Descriptor to set into TableDescriptors * @param htd Descriptor to set into TableDescriptors
* @throws IOException
*/ */
void add(final TableDescriptor htd) throws IOException; void add(final TableDescriptor htd)
throws IOException;
/** /**
* @param tablename
* @return Instance of table descriptor or null if none found. * @return Instance of table descriptor or null if none found.
* @throws IOException
*/ */
TableDescriptor remove(final TableName tablename) throws IOException; TableDescriptor remove(final TableName tablename)
throws IOException;
/** /**
* Enables the tabledescriptor cache * Enables the tabledescriptor cache

View File

@ -1025,7 +1025,7 @@ public class HMaster extends HRegionServer implements MasterServices {
RegionState rs = this.assignmentManager.getRegionStates(). RegionState rs = this.assignmentManager.getRegionStates().
getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO); getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO);
LOG.info("hbase:meta {}", rs); LOG.info("hbase:meta {}", rs);
if (rs != null && rs.isOffline()) { if (rs.isOffline()) {
Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream() Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream()
.filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny(); .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny();
initMetaProc = optProc.orElseGet(() -> { initMetaProc = optProc.orElseGet(() -> {

View File

@ -38,8 +38,7 @@ import org.slf4j.LoggerFactory;
* mirroring. See in HMaster where we make the choice. The below does zk updates on a best-effort * mirroring. See in HMaster where we make the choice. The below does zk updates on a best-effort
* basis only. If we fail updating zk we keep going because only hbase1 clients suffer; we'll just * basis only. If we fail updating zk we keep going because only hbase1 clients suffer; we'll just
* log at WARN level. * log at WARN level.
* @deprecated Since 2.0.0. To be removed in 3.0.0. ZKRegistry#getMetaTableState reads * @deprecated Since 2.0.0. To be removed in 3.0.0.
* mirrored state so add alternative mechanism before purge else cannot disable hbase:meta table
*/ */
@Deprecated @Deprecated
@InterfaceAudience.Private @InterfaceAudience.Private
@ -48,7 +47,7 @@ public class MirroringTableStateManager extends TableStateManager {
/** /**
* Set this key to true in Configuration to enable mirroring of table state out to zookeeper so * Set this key to true in Configuration to enable mirroring of table state out to zookeeper so
* hbase-1.x clients can pick-up table state. Default value is 'true'. * hbase-1.x clients can pick-up table state.
*/ */
static final String MIRROR_TABLE_STATE_TO_ZK_KEY = "hbase.mirror.table.state.to.zookeeper"; static final String MIRROR_TABLE_STATE_TO_ZK_KEY = "hbase.mirror.table.state.to.zookeeper";

View File

@ -1,4 +1,4 @@
/* /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
import org.apache.hadoop.hbase.util.IdReadWriteLock; import org.apache.hadoop.hbase.util.IdReadWriteLock;
import org.apache.hadoop.hbase.util.ZKDataMigrator; import org.apache.hadoop.hbase.util.ZKDataMigrator;
import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@ -52,20 +53,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
// TODO: Make this a guava Service // TODO: Make this a guava Service
@InterfaceAudience.Private @InterfaceAudience.Private
public class TableStateManager { public class TableStateManager {
private static final Logger LOG = LoggerFactory.getLogger(TableStateManager.class); private static final Logger LOG = LoggerFactory.getLogger(TableStateManager.class);
/**
* All table state is kept in hbase:meta except that of hbase:meta itself.
* hbase:meta state is kept here locally in this in-memory variable. State
* for hbase:meta is not persistent. If this process dies, the hbase:meta
* state reverts to enabled. State is used so we can edit hbase:meta as we
* would any other table by disabling, altering, and then re-enabling. If this
* process dies in the midst of an edit, the table reverts to enabled. Schema
* is read from the filesystem. It is changed atomically so if we die midway
* through an edit we should be good.
*/
private TableState.State metaTableState = TableState.State.ENABLED;
/** /**
* Set this key to false in Configuration to disable migrating table state from zookeeper so * Set this key to false in Configuration to disable migrating table state from zookeeper so
* hbase:meta table. * hbase:meta table.
@ -79,7 +68,7 @@ public class TableStateManager {
private final ConcurrentMap<TableName, TableState.State> tableName2State = private final ConcurrentMap<TableName, TableState.State> tableName2State =
new ConcurrentHashMap<>(); new ConcurrentHashMap<>();
TableStateManager(MasterServices master) { public TableStateManager(MasterServices master) {
this.master = master; this.master = master;
} }
@ -98,6 +87,61 @@ public class TableStateManager {
} }
} }
/**
* Set table state to provided but only if table in specified states Caller should lock table on
* write.
* @param tableName table to change state for
* @param newState new state
* @param states states to check against
* @return null if succeed or table state if failed
*/
public TableState setTableStateIfInStates(TableName tableName, TableState.State newState,
TableState.State... states) throws IOException {
ReadWriteLock lock = tnLock.getLock(tableName);
lock.writeLock().lock();
try {
TableState currentState = readMetaState(tableName);
if (currentState == null) {
throw new TableNotFoundException(tableName);
}
if (currentState.inStates(states)) {
updateMetaState(tableName, newState);
return null;
} else {
return currentState;
}
} finally {
lock.writeLock().unlock();
}
}
/**
* Set table state to provided but only if table not in specified states Caller should lock table
* on write.
* @param tableName table to change state for
* @param newState new state
* @param states states to check against
*/
public boolean setTableStateIfNotInStates(TableName tableName, TableState.State newState,
TableState.State... states) throws IOException {
ReadWriteLock lock = tnLock.getLock(tableName);
lock.writeLock().lock();
try {
TableState currentState = readMetaState(tableName);
if (currentState == null) {
throw new TableNotFoundException(tableName);
}
if (!currentState.inStates(states)) {
updateMetaState(tableName, newState);
return true;
} else {
return false;
}
} finally {
lock.writeLock().unlock();
}
}
public boolean isTableState(TableName tableName, TableState.State... states) { public boolean isTableState(TableName tableName, TableState.State... states) {
try { try {
TableState tableState = getTableState(tableName); TableState tableState = getTableState(tableName);
@ -111,7 +155,6 @@ public class TableStateManager {
public void setDeletedTable(TableName tableName) throws IOException { public void setDeletedTable(TableName tableName) throws IOException {
if (tableName.equals(TableName.META_TABLE_NAME)) { if (tableName.equals(TableName.META_TABLE_NAME)) {
// Can't delete the hbase:meta table.
return; return;
} }
ReadWriteLock lock = tnLock.getLock(tableName); ReadWriteLock lock = tnLock.getLock(tableName);
@ -140,7 +183,7 @@ public class TableStateManager {
* @param states filter by states * @param states filter by states
* @return tables in given states * @return tables in given states
*/ */
Set<TableName> getTablesInStates(TableState.State... states) throws IOException { public Set<TableName> getTablesInStates(TableState.State... states) throws IOException {
// Only be called in region normalizer, will not use cache. // Only be called in region normalizer, will not use cache.
final Set<TableName> rv = Sets.newHashSet(); final Set<TableName> rv = Sets.newHashSet();
MetaTableAccessor.fullScanTables(master.getConnection(), new MetaTableAccessor.Visitor() { MetaTableAccessor.fullScanTables(master.getConnection(), new MetaTableAccessor.Visitor() {
@ -156,6 +199,12 @@ public class TableStateManager {
return rv; return rv;
} }
public static class TableStateNotFoundException extends TableNotFoundException {
TableStateNotFoundException(TableName tableName) {
super(tableName.getNameAsString());
}
}
@NonNull @NonNull
public TableState getTableState(TableName tableName) throws IOException { public TableState getTableState(TableName tableName) throws IOException {
ReadWriteLock lock = tnLock.getLock(tableName); ReadWriteLock lock = tnLock.getLock(tableName);
@ -163,7 +212,7 @@ public class TableStateManager {
try { try {
TableState currentState = readMetaState(tableName); TableState currentState = readMetaState(tableName);
if (currentState == null) { if (currentState == null) {
throw new TableNotFoundException("No state found for " + tableName); throw new TableStateNotFoundException(tableName);
} }
return currentState; return currentState;
} finally { } finally {
@ -172,18 +221,22 @@ public class TableStateManager {
} }
private void updateMetaState(TableName tableName, TableState.State newState) throws IOException { private void updateMetaState(TableName tableName, TableState.State newState) throws IOException {
if (tableName.equals(TableName.META_TABLE_NAME)) {
if (TableState.State.DISABLING.equals(newState) ||
TableState.State.DISABLED.equals(newState)) {
throw new IllegalArgumentIOException("Cannot disable the meta table; " + newState);
}
// Otherwise, just return; no need to set ENABLED on meta -- it is always ENABLED.
return;
}
boolean succ = false; boolean succ = false;
try { try {
if (tableName.equals(TableName.META_TABLE_NAME)) { MetaTableAccessor.updateTableState(master.getConnection(), tableName, newState);
this.metaTableState = newState; tableName2State.put(tableName, newState);
} else {
MetaTableAccessor.updateTableState(master.getConnection(), tableName, newState);
}
this.tableName2State.put(tableName, newState);
succ = true; succ = true;
} finally { } finally {
if (!succ) { if (!succ) {
this.tableName2State.remove(tableName); tableName2State.remove(tableName);
} }
} }
metaStateUpdated(tableName, newState); metaStateUpdated(tableName, newState);
@ -202,9 +255,7 @@ public class TableStateManager {
if (state != null) { if (state != null) {
return new TableState(tableName, state); return new TableState(tableName, state);
} }
TableState tableState = tableName.equals(TableName.META_TABLE_NAME)? TableState tableState = MetaTableAccessor.getTableState(master.getConnection(), tableName);
new TableState(TableName.META_TABLE_NAME, this.metaTableState):
MetaTableAccessor.getTableState(master.getConnection(), tableName);
if (tableState != null) { if (tableState != null) {
tableName2State.putIfAbsent(tableName, tableState.getState()); tableName2State.putIfAbsent(tableName, tableState.getState());
} }
@ -212,8 +263,10 @@ public class TableStateManager {
} }
public void start() throws IOException { public void start() throws IOException {
TableDescriptors tableDescriptors = master.getTableDescriptors();
migrateZooKeeper(); migrateZooKeeper();
fixTableStates(master.getTableDescriptors(), master.getConnection()); Connection connection = master.getConnection();
fixTableStates(tableDescriptors, connection);
} }
private void fixTableStates(TableDescriptors tableDescriptors, Connection connection) private void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
@ -282,7 +335,7 @@ public class TableStateManager {
TableState ts = null; TableState ts = null;
try { try {
ts = getTableState(entry.getKey()); ts = getTableState(entry.getKey());
} catch (TableNotFoundException e) { } catch (TableStateNotFoundException e) {
// This can happen; table exists but no TableState. // This can happen; table exists but no TableState.
} }
if (ts == null) { if (ts == null) {
@ -324,4 +377,4 @@ public class TableStateManager {
LOG.warn("Failed deleting table state from zookeeper", e); LOG.warn("Failed deleting table state from zookeeper", e);
} }
} }
} }

View File

@ -146,7 +146,8 @@ public class RegionStateStore {
} }
} }
void updateRegionLocation(RegionStateNode regionStateNode) throws IOException { public void updateRegionLocation(RegionStateNode regionStateNode)
throws IOException {
if (regionStateNode.getRegionInfo().isMetaRegion()) { if (regionStateNode.getRegionInfo().isMetaRegion()) {
updateMetaLocation(regionStateNode.getRegionInfo(), regionStateNode.getRegionLocation(), updateMetaLocation(regionStateNode.getRegionInfo(), regionStateNode.getRegionLocation(),
regionStateNode.getState()); regionStateNode.getState());

View File

@ -78,7 +78,9 @@ public class CreateTableProcedure
@Override @Override
protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableState state) protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableState state)
throws InterruptedException { throws InterruptedException {
LOG.info("{} execute state={}", this, state); if (LOG.isTraceEnabled()) {
LOG.trace(this + " execute state=" + state);
}
try { try {
switch (state) { switch (state) {
case CREATE_TABLE_PRE_OPERATION: case CREATE_TABLE_PRE_OPERATION:
@ -318,7 +320,8 @@ public class CreateTableProcedure
// using a copy of descriptor, table will be created enabling first // using a copy of descriptor, table will be created enabling first
final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName()); final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
((FSTableDescriptors)(env.getMasterServices().getTableDescriptors())) ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
.createTableDescriptorForTableDirectory(tempTableDir, tableDescriptor, false); .createTableDescriptorForTableDirectory(
tempTableDir, tableDescriptor, false);
// 2. Create Regions // 2. Create Regions
newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir, newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.master.TableStateManager;
@ -108,8 +109,8 @@ public class DisableTableProcedure
setNextState(DisableTableState.DISABLE_TABLE_ADD_REPLICATION_BARRIER); setNextState(DisableTableState.DISABLE_TABLE_ADD_REPLICATION_BARRIER);
break; break;
case DISABLE_TABLE_ADD_REPLICATION_BARRIER: case DISABLE_TABLE_ADD_REPLICATION_BARRIER:
if (env.getMasterServices().getTableDescriptors().get(tableName). if (env.getMasterServices().getTableDescriptors().get(tableName)
hasGlobalReplicationScope()) { .hasGlobalReplicationScope()) {
MasterFileSystem fs = env.getMasterFileSystem(); MasterFileSystem fs = env.getMasterFileSystem();
try (BufferedMutator mutator = env.getMasterServices().getConnection() try (BufferedMutator mutator = env.getMasterServices().getConnection()
.getBufferedMutator(TableName.META_TABLE_NAME)) { .getBufferedMutator(TableName.META_TABLE_NAME)) {
@ -241,7 +242,10 @@ public class DisableTableProcedure
*/ */
private boolean prepareDisable(final MasterProcedureEnv env) throws IOException { private boolean prepareDisable(final MasterProcedureEnv env) throws IOException {
boolean canTableBeDisabled = true; boolean canTableBeDisabled = true;
if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) { if (tableName.equals(TableName.META_TABLE_NAME)) {
setFailure("master-disable-table", new ConstraintException("Cannot disable catalog table"));
canTableBeDisabled = false;
} else if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
setFailure("master-disable-table", new TableNotFoundException(tableName)); setFailure("master-disable-table", new TableNotFoundException(tableName));
canTableBeDisabled = false; canTableBeDisabled = false;
} else if (!skipTableStateCheck) { } else if (!skipTableStateCheck) {

View File

@ -1,4 +1,4 @@
/* /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -27,9 +27,11 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@ -97,55 +99,66 @@ public class EnableTableProcedure
setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE); setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE);
break; break;
case ENABLE_TABLE_MARK_REGIONS_ONLINE: case ENABLE_TABLE_MARK_REGIONS_ONLINE:
// Get the region replica count. If changed since disable, need to do
// more work assigning.
Connection connection = env.getMasterServices().getConnection(); Connection connection = env.getMasterServices().getConnection();
TableDescriptor tableDescriptor = // we will need to get the tableDescriptor here to see if there is a change in the replica
// count
TableDescriptor hTableDescriptor =
env.getMasterServices().getTableDescriptors().get(tableName); env.getMasterServices().getTableDescriptors().get(tableName);
int configuredReplicaCount = tableDescriptor.getRegionReplication();
// Get regions for the table from memory; get both online and offline regions ('true'). // Get the replica count
int regionReplicaCount = hTableDescriptor.getRegionReplication();
// Get the regions for the table from memory; get both online and offline regions
// ('true').
List<RegionInfo> regionsOfTable = List<RegionInfo> regionsOfTable =
env.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName, true); env.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName, true);
// How many replicas do we currently have? Check regions returned from int currentMaxReplica = 0;
// in-memory state. // Check if the regions in memory have replica regions as marked in META table
int currentMaxReplica = getMaxReplicaId(regionsOfTable); for (RegionInfo regionInfo : regionsOfTable) {
if (regionInfo.getReplicaId() > currentMaxReplica) {
// Read the META table to know the number of replicas the table currently has. // Iterating through all the list to identify the highest replicaID region.
// If there was a table modification on region replica count then need to // We can stop after checking with the first set of regions??
// adjust replica counts here. currentMaxReplica = regionInfo.getReplicaId();
int replicasFound = TableName.isMetaTableName(this.tableName)?
0: // TODO: Figure better what to do here for hbase:meta replica.
getReplicaCountInMeta(connection, configuredReplicaCount, regionsOfTable);
LOG.info("replicasFound={} (configuredReplicaCount={} for {}", replicasFound,
configuredReplicaCount, tableName.getNameAsString());
if (currentMaxReplica == (configuredReplicaCount - 1)) {
if (LOG.isDebugEnabled()) {
LOG.debug("No change in number of region replicas (configuredReplicaCount={});"
+ " assigning.", configuredReplicaCount);
} }
} else if (currentMaxReplica > (configuredReplicaCount - 1)) { }
// We have additional regions as the replica count has been decreased. Delete
// read the META table to know the actual number of replicas for the table - if there
// was a table modification on region replica then this will reflect the new entries also
int replicasFound =
getNumberOfReplicasFromMeta(connection, regionReplicaCount, regionsOfTable);
assert regionReplicaCount - 1 == replicasFound;
LOG.info(replicasFound + " META entries added for the given regionReplicaCount "
+ regionReplicaCount + " for the table " + tableName.getNameAsString());
if (currentMaxReplica == (regionReplicaCount - 1)) {
if (LOG.isDebugEnabled()) {
LOG.debug("There is no change to the number of region replicas."
+ " Assigning the available regions." + " Current and previous"
+ "replica count is " + regionReplicaCount);
}
} else if (currentMaxReplica > (regionReplicaCount - 1)) {
// we have additional regions as the replica count has been decreased. Delete
// those regions because already the table is in the unassigned state // those regions because already the table is in the unassigned state
LOG.info("The number of replicas " + (currentMaxReplica + 1) LOG.info("The number of replicas " + (currentMaxReplica + 1)
+ " is more than the region replica count " + configuredReplicaCount); + " is more than the region replica count " + regionReplicaCount);
List<RegionInfo> copyOfRegions = new ArrayList<RegionInfo>(regionsOfTable); List<RegionInfo> copyOfRegions = new ArrayList<RegionInfo>(regionsOfTable);
for (RegionInfo regionInfo : copyOfRegions) { for (RegionInfo regionInfo : copyOfRegions) {
if (regionInfo.getReplicaId() > (configuredReplicaCount - 1)) { if (regionInfo.getReplicaId() > (regionReplicaCount - 1)) {
// delete the region from the regionStates // delete the region from the regionStates
env.getAssignmentManager().getRegionStates().deleteRegion(regionInfo); env.getAssignmentManager().getRegionStates().deleteRegion(regionInfo);
// remove it from the list of regions of the table // remove it from the list of regions of the table
LOG.info("Removed replica={} of {}", regionInfo.getRegionId(), regionInfo); LOG.info("The regioninfo being removed is " + regionInfo + " "
+ regionInfo.getReplicaId());
regionsOfTable.remove(regionInfo); regionsOfTable.remove(regionInfo);
} }
} }
} else { } else {
// the replicasFound is less than the regionReplication // the replicasFound is less than the regionReplication
LOG.info("Number of replicas has increased. Assigning new region replicas." + LOG.info("The number of replicas has been changed(increased)."
"The previous replica count was {}. The current replica count is {}.", + " Lets assign the new region replicas. The previous replica count was "
(currentMaxReplica + 1), configuredReplicaCount); + (currentMaxReplica + 1) + ". The current replica count is " + regionReplicaCount);
regionsOfTable = RegionReplicaUtil.addReplicas(tableDescriptor, regionsOfTable, regionsOfTable = RegionReplicaUtil.addReplicas(hTableDescriptor, regionsOfTable,
currentMaxReplica + 1, configuredReplicaCount); currentMaxReplica + 1, regionReplicaCount);
} }
// Assign all the table regions. (including region replicas if added). // Assign all the table regions. (including region replicas if added).
// createAssignProcedure will try to retain old assignments if possible. // createAssignProcedure will try to retain old assignments if possible.
@ -173,13 +186,9 @@ public class EnableTableProcedure
return Flow.HAS_MORE_STATE; return Flow.HAS_MORE_STATE;
} }
/** private int getNumberOfReplicasFromMeta(Connection connection, int regionReplicaCount,
* @return Count of replicas found reading hbase:meta Region row or zk if
* asking about the hbase:meta table itself..
*/
private int getReplicaCountInMeta(Connection connection, int regionReplicaCount,
List<RegionInfo> regionsOfTable) throws IOException { List<RegionInfo> regionsOfTable) throws IOException {
Result r = MetaTableAccessor.getCatalogFamilyRow(connection, regionsOfTable.get(0)); Result r = getRegionFromMeta(connection, regionsOfTable);
int replicasFound = 0; int replicasFound = 0;
for (int i = 1; i < regionReplicaCount; i++) { for (int i = 1; i < regionReplicaCount; i++) {
// Since we have already added the entries to the META we will be getting only that here // Since we have already added the entries to the META we will be getting only that here
@ -192,6 +201,16 @@ public class EnableTableProcedure
return replicasFound; return replicasFound;
} }
private Result getRegionFromMeta(Connection connection, List<RegionInfo> regionsOfTable)
throws IOException {
byte[] metaKeyForRegion = MetaTableAccessor.getMetaKeyForRegion(regionsOfTable.get(0));
Get get = new Get(metaKeyForRegion);
get.addFamily(HConstants.CATALOG_FAMILY);
Table metaTable = MetaTableAccessor.getMetaHTable(connection);
Result r = metaTable.get(get);
return r;
}
@Override @Override
protected void rollbackState(final MasterProcedureEnv env, final EnableTableState state) protected void rollbackState(final MasterProcedureEnv env, final EnableTableState state)
throws IOException { throws IOException {
@ -389,20 +408,4 @@ public class EnableTableProcedure
} }
} }
} }
/**
* @return Maximum region replica id found in passed list of regions.
*/
private static int getMaxReplicaId(List<RegionInfo> regions) {
int max = 0;
for (RegionInfo regionInfo: regions) {
if (regionInfo.getReplicaId() > max) {
// Iterating through all the list to identify the highest replicaID region.
// We can stop after checking with the first set of regions??
max = regionInfo.getReplicaId();
}
}
return max;
}
} }

View File

@ -25,11 +25,11 @@ import java.util.function.LongConsumer;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.master.TableStateManager;
import org.apache.hadoop.hbase.master.TableStateManager.TableStateNotFoundException;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure; import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure;
@ -148,7 +148,7 @@ public abstract class ModifyPeerProcedure extends AbstractPeerProcedure<PeerModi
return false; return false;
} }
Thread.sleep(SLEEP_INTERVAL_MS); Thread.sleep(SLEEP_INTERVAL_MS);
} catch (TableNotFoundException e) { } catch (TableStateNotFoundException e) {
return false; return false;
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e); throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);
@ -227,7 +227,7 @@ public abstract class ModifyPeerProcedure extends AbstractPeerProcedure<PeerModi
return true; return true;
} }
Thread.sleep(SLEEP_INTERVAL_MS); Thread.sleep(SLEEP_INTERVAL_MS);
} catch (TableNotFoundException e) { } catch (TableStateNotFoundException e) {
return false; return false;
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e); throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);

View File

@ -1,4 +1,4 @@
/* /**
* *
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
@ -41,6 +41,6 @@ public class MetaLocationSyncer extends ClientZKSyncer {
@Override @Override
Collection<String> getNodesToWatch() { Collection<String> getNodesToWatch() {
return watcher.getZNodePaths().getMetaReplicaZNodes(); return watcher.getZNodePaths().metaReplicaZNodes.values();
} }
} }

View File

@ -472,10 +472,11 @@ public final class SnapshotManifest {
public void consolidate() throws IOException { public void consolidate() throws IOException {
if (getSnapshotFormat(desc) == SnapshotManifestV1.DESCRIPTOR_VERSION) { if (getSnapshotFormat(desc) == SnapshotManifestV1.DESCRIPTOR_VERSION) {
Path rootDir = FSUtils.getRootDir(conf);
LOG.info("Using old Snapshot Format"); LOG.info("Using old Snapshot Format");
// write a copy of descriptor to the snapshot directory // write a copy of descriptor to the snapshot directory
FSTableDescriptors.createTableDescriptorForTableDirectory(workingDirFs, workingDir, htd, new FSTableDescriptors(conf, workingDirFs, rootDir)
false); .createTableDescriptorForTableDirectory(workingDir, htd, false);
} else { } else {
LOG.debug("Convert to Single Snapshot Manifest"); LOG.debug("Convert to Single Snapshot Manifest");
convertToV2SingleManifest(); convertToV2SingleManifest();

View File

@ -122,9 +122,8 @@ public class FSTableDescriptors implements TableDescriptors {
* @param fsreadonly True if we are read-only when it comes to filesystem * @param fsreadonly True if we are read-only when it comes to filesystem
* operations; i.e. on remove, we do not do delete in fs. * operations; i.e. on remove, we do not do delete in fs.
*/ */
@VisibleForTesting
public FSTableDescriptors(final Configuration conf, final FileSystem fs, public FSTableDescriptors(final Configuration conf, final FileSystem fs,
final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException { final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
this(conf, fs, rootdir, fsreadonly, usecache, null); this(conf, fs, rootdir, fsreadonly, usecache, null);
} }
@ -136,32 +135,16 @@ public class FSTableDescriptors implements TableDescriptors {
* TODO: This is a workaround. Should remove this ugly code... * TODO: This is a workaround. Should remove this ugly code...
*/ */
public FSTableDescriptors(final Configuration conf, final FileSystem fs, public FSTableDescriptors(final Configuration conf, final FileSystem fs,
final Path rootdir, final boolean fsreadonly, final boolean usecache, final Path rootdir, final boolean fsreadonly, final boolean usecache,
Function<TableDescriptorBuilder, TableDescriptorBuilder> metaObserver) throws IOException { Function<TableDescriptorBuilder, TableDescriptorBuilder> metaObserver) throws IOException {
this.fs = fs; this.fs = fs;
this.rootdir = rootdir; this.rootdir = rootdir;
this.fsreadonly = fsreadonly; this.fsreadonly = fsreadonly;
this.usecache = usecache; this.usecache = usecache;
TableDescriptor td = null; this.metaTableDescriptor = metaObserver == null ? createMetaTableDescriptor(conf)
try { : metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build();
td = getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME);
} catch (TableInfoMissingException e) {
td = metaObserver == null? createMetaTableDescriptor(conf):
metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build();
if (!fsreadonly) {
LOG.info("Creating new hbase:meta table default descriptor/schema {}", td);
updateTableDescriptor(td);
}
}
this.metaTableDescriptor = td;
} }
/**
*
* Should be private
* @deprecated Since 2.3.0. Should be for internal use only. Used by testing.
*/
@Deprecated
@VisibleForTesting @VisibleForTesting
public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) throws IOException { public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) throws IOException {
// TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now // TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now
@ -235,6 +218,16 @@ public class FSTableDescriptors implements TableDescriptors {
public TableDescriptor get(final TableName tablename) public TableDescriptor get(final TableName tablename)
throws IOException { throws IOException {
invocations++; invocations++;
if (TableName.META_TABLE_NAME.equals(tablename)) {
cachehits++;
return metaTableDescriptor;
}
// hbase:meta is already handled. If some one tries to get the descriptor for
// .logs, .oldlogs or .corrupt throw an exception.
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) {
throw new IOException("No descriptor found for non table = " + tablename);
}
if (usecache) { if (usecache) {
// Look in cache of descriptors. // Look in cache of descriptors.
TableDescriptor cachedtdm = this.cache.get(tablename); TableDescriptor cachedtdm = this.cache.get(tablename);
@ -270,6 +263,7 @@ public class FSTableDescriptors implements TableDescriptors {
public Map<String, TableDescriptor> getAll() public Map<String, TableDescriptor> getAll()
throws IOException { throws IOException {
Map<String, TableDescriptor> tds = new TreeMap<>(); Map<String, TableDescriptor> tds = new TreeMap<>();
if (fsvisited && usecache) { if (fsvisited && usecache) {
for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) { for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue()); tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue());
@ -332,6 +326,15 @@ public class FSTableDescriptors implements TableDescriptors {
if (fsreadonly) { if (fsreadonly) {
throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
} }
TableName tableName = htd.getTableName();
if (TableName.META_TABLE_NAME.equals(tableName)) {
throw new NotImplementedException(HConstants.NOT_IMPLEMENTED);
}
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
throw new NotImplementedException(
"Cannot add a table descriptor for a reserved subdirectory name: "
+ htd.getTableName().getNameAsString());
}
updateTableDescriptor(htd); updateTableDescriptor(htd);
} }
@ -356,6 +359,26 @@ public class FSTableDescriptors implements TableDescriptors {
return descriptor; return descriptor;
} }
/**
* Checks if a current table info file exists for the given table
*
* @param tableName name of table
* @return true if exists
* @throws IOException
*/
public boolean isTableInfoExists(TableName tableName) throws IOException {
return getTableInfoPath(tableName) != null;
}
/**
* Find the most current table info file for the given table in the hbase root directory.
* @return The file status of the current table info file or null if it does not exist
*/
private FileStatus getTableInfoPath(final TableName tableName) throws IOException {
Path tableDir = getTableDir(tableName);
return getTableInfoPath(tableDir);
}
private FileStatus getTableInfoPath(Path tableDir) private FileStatus getTableInfoPath(Path tableDir)
throws IOException { throws IOException {
return getTableInfoPath(fs, tableDir, !fsreadonly); return getTableInfoPath(fs, tableDir, !fsreadonly);
@ -370,6 +393,7 @@ public class FSTableDescriptors implements TableDescriptors {
* were sequence numbers). * were sequence numbers).
* *
* @return The file status of the current table info file or null if it does not exist * @return The file status of the current table info file or null if it does not exist
* @throws IOException
*/ */
public static FileStatus getTableInfoPath(FileSystem fs, Path tableDir) public static FileStatus getTableInfoPath(FileSystem fs, Path tableDir)
throws IOException { throws IOException {
@ -387,6 +411,7 @@ public class FSTableDescriptors implements TableDescriptors {
* older files. * older files.
* *
* @return The file status of the current table info file or null if none exist * @return The file status of the current table info file or null if none exist
* @throws IOException
*/ */
private static FileStatus getTableInfoPath(FileSystem fs, Path tableDir, boolean removeOldFiles) private static FileStatus getTableInfoPath(FileSystem fs, Path tableDir, boolean removeOldFiles)
throws IOException { throws IOException {
@ -574,6 +599,21 @@ public class FSTableDescriptors implements TableDescriptors {
return p; return p;
} }
/**
* Deletes all the table descriptor files from the file system.
* Used in unit tests only.
* @throws NotImplementedException if in read only mode
*/
public void deleteTableDescriptorIfExists(TableName tableName) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot delete a table descriptor - in read only mode");
}
Path tableDir = getTableDir(tableName);
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE);
}
/** /**
* Deletes files matching the table info file pattern within the given directory * Deletes files matching the table info file pattern within the given directory
* whose sequenceId is at most the given max sequenceId. * whose sequenceId is at most the given max sequenceId.
@ -696,8 +736,7 @@ public class FSTableDescriptors implements TableDescriptors {
/** /**
* Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create
* a new table during cluster start or in Clone and Create Table Procedures. Checks readOnly flag * a new table or snapshot a table.
* passed on construction.
* @param tableDir table directory under which we should write the file * @param tableDir table directory under which we should write the file
* @param htd description of the table to write * @param htd description of the table to write
* @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will * @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
@ -706,28 +745,11 @@ public class FSTableDescriptors implements TableDescriptors {
* already exists and we weren't forcing the descriptor creation. * already exists and we weren't forcing the descriptor creation.
* @throws IOException if a filesystem error occurs * @throws IOException if a filesystem error occurs
*/ */
public boolean createTableDescriptorForTableDirectory(Path tableDir, TableDescriptor htd, public boolean createTableDescriptorForTableDirectory(Path tableDir,
boolean forceCreation) throws IOException { TableDescriptor htd, boolean forceCreation) throws IOException {
if (this.fsreadonly) { if (fsreadonly) {
throw new NotImplementedException("Cannot create a table descriptor - in read only mode"); throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
} }
return createTableDescriptorForTableDirectory(this.fs, tableDir, htd, forceCreation);
}
/**
* Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create
* a new table snapshoting. Does not enforce read-only. That is for caller to determine.
* @param fs Filesystem to use.
* @param tableDir table directory under which we should write the file
* @param htd description of the table to write
* @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
* be overwritten
* @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file
* already exists and we weren't forcing the descriptor creation.
* @throws IOException if a filesystem error occurs
*/
public static boolean createTableDescriptorForTableDirectory(FileSystem fs,
Path tableDir, TableDescriptor htd, boolean forceCreation) throws IOException {
FileStatus status = getTableInfoPath(fs, tableDir); FileStatus status = getTableInfoPath(fs, tableDir);
if (status != null) { if (status != null) {
LOG.debug("Current path=" + status.getPath()); LOG.debug("Current path=" + status.getPath());
@ -740,7 +762,9 @@ public class FSTableDescriptors implements TableDescriptors {
} }
} }
} }
return writeTableDescriptor(fs, htd, tableDir, status) != null; Path p = writeTableDescriptor(fs, htd, tableDir, status);
return p != null;
} }
} }

View File

@ -1,4 +1,4 @@
/* /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -37,7 +37,6 @@ import org.slf4j.LoggerFactory;
/** /**
* Utlity method to migrate zookeeper data across HBase versions. * Utlity method to migrate zookeeper data across HBase versions.
* Used by Master mirroring table state to zk for hbase-1 clients.
* @deprecated Since 2.0.0. To be removed in hbase-3.0.0. * @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
*/ */
@Deprecated @Deprecated
@ -66,7 +65,25 @@ public class ZKDataMigrator {
return rv; return rv;
for (String child: children) { for (String child: children) {
TableName tableName = TableName.valueOf(child); TableName tableName = TableName.valueOf(child);
TableState.State newState = ProtobufUtil.toTableState(getTableState(zkw, tableName)); ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, tableName);
TableState.State newState = TableState.State.ENABLED;
if (state != null) {
switch (state) {
case ENABLED:
newState = TableState.State.ENABLED;
break;
case DISABLED:
newState = TableState.State.DISABLED;
break;
case DISABLING:
newState = TableState.State.DISABLING;
break;
case ENABLING:
newState = TableState.State.ENABLING;
break;
default:
}
}
rv.put(tableName, newState); rv.put(tableName, newState);
} }
return rv; return rv;
@ -83,13 +100,20 @@ public class ZKDataMigrator {
* @deprecated Since 2.0.0. To be removed in hbase-3.0.0. * @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
*/ */
@Deprecated @Deprecated
private static ZooKeeperProtos.DeprecatedTableState.State getTableState( private static ZooKeeperProtos.DeprecatedTableState.State getTableState(
final ZKWatcher zkw, final TableName tableName) final ZKWatcher zkw, final TableName tableName)
throws KeeperException, InterruptedException { throws KeeperException, InterruptedException {
String znode = ZNodePaths.joinZNode(zkw.getZNodePaths().tableZNode, String znode = ZNodePaths.joinZNode(zkw.getZNodePaths().tableZNode,
tableName.getNameAsString()); tableName.getNameAsString());
byte [] data = ZKUtil.getData(zkw, znode);
if (data == null || data.length <= 0) return null;
try { try {
return ProtobufUtil.toTableState(ZKUtil.getData(zkw, znode)); ProtobufUtil.expectPBMagicPrefix(data);
ZooKeeperProtos.DeprecatedTableState.Builder builder =
ZooKeeperProtos.DeprecatedTableState.newBuilder();
int magicLen = ProtobufUtil.lengthOfPBMagic();
ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen);
return builder.getState();
} catch (IOException e) { } catch (IOException e) {
KeeperException ke = new KeeperException.DataInconsistencyException(); KeeperException ke = new KeeperException.DataInconsistencyException();
ke.initCause(e); ke.initCause(e);

View File

@ -152,7 +152,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.slf4j.impl.Log4jLoggerAdapter; import org.slf4j.impl.Log4jLoggerAdapter;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
/** /**
@ -497,7 +496,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/** /**
* @return META table descriptor * @return META table descriptor
* @deprecated since 2.0 version and will be removed in 3.0 version. Currently for test only. * @deprecated since 2.0 version and will be removed in 3.0 version.
* use {@link #getMetaTableDescriptorBuilder()} * use {@link #getMetaTableDescriptorBuilder()}
*/ */
@Deprecated @Deprecated
@ -507,10 +506,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/** /**
* @return META table descriptor * @return META table descriptor
* @deprecated Since 2.3.0. No one should be using this internal. Used in testing only.
*/ */
@Deprecated
@VisibleForTesting
public TableDescriptorBuilder getMetaTableDescriptorBuilder() { public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
try { try {
return FSTableDescriptors.createMetaTableDescriptorBuilder(conf); return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);

View File

@ -1,110 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
/**
* Test being able to edit hbase:meta.
*/
@Category({MiscTests.class, LargeTests.class})
public class TestHBaseMetaEdit {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestHBaseMetaEdit.class);
@Rule
public TestName name = new TestName();
private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
@Before
public void before() throws Exception {
UTIL.startMiniCluster();
}
@After
public void after() throws Exception {
UTIL.shutdownMiniCluster();
}
/**
* Set versions, set HBASE-16213 indexed block encoding, and add a column family.
* Verify they are all in place by looking at TableDescriptor AND by checking
* what the RegionServer sees after opening Region.
*/
@Test
public void testEditMeta() throws IOException {
Admin admin = UTIL.getAdmin();
admin.tableExists(TableName.META_TABLE_NAME);
admin.disableTable(TableName.META_TABLE_NAME);
assertTrue(admin.isTableDisabled(TableName.META_TABLE_NAME));
TableDescriptor descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
ColumnFamilyDescriptor cfd = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY);
byte [] extraColumnFamilyName = Bytes.toBytes("xtra");
ColumnFamilyDescriptor newCfd =
ColumnFamilyDescriptorBuilder.newBuilder(extraColumnFamilyName).build();
int oldVersions = cfd.getMaxVersions();
// Add '1' to current versions count.
cfd = ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(oldVersions + 1).
setConfiguration(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING,
DataBlockEncoding.ROW_INDEX_V1.toString()).build();
admin.modifyColumnFamily(TableName.META_TABLE_NAME, cfd);
admin.addColumnFamily(TableName.META_TABLE_NAME, newCfd);
descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
// Assert new max versions is == old versions plus 1.
assertEquals(oldVersions + 1,
descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
admin.enableTable(TableName.META_TABLE_NAME);
descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
// Assert new max versions is == old versions plus 1.
assertEquals(oldVersions + 1,
descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
assertTrue(descriptor.getColumnFamily(newCfd.getName()) != null);
String encoding = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getConfiguration().
get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
Region r = UTIL.getHBaseCluster().getRegionServer(0).
getRegion(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
assertEquals(oldVersions + 1,
r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getMaxVersions());
encoding = r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().
getConfigurationValue(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
assertTrue(r.getStore(extraColumnFamilyName) != null);
}
}

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
@ -537,6 +538,22 @@ public class TestAdmin2 extends TestAdminBase {
" HBase was not available"); " HBase was not available");
} }
@Test
public void testDisableCatalogTable() throws Exception {
try {
ADMIN.disableTable(TableName.META_TABLE_NAME);
fail("Expected to throw ConstraintException");
} catch (ConstraintException e) {
}
// Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table
// actually getting disabled by the disableTable() call.
HTableDescriptor htd =
new HTableDescriptor(TableName.valueOf(Bytes.toBytes(name.getMethodName())));
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("cf1"));
htd.addFamily(hcd);
TEST_UTIL.getHBaseAdmin().createTable(htd);
}
@Test @Test
public void testIsEnabledOrDisabledOnUnknownTable() throws Exception { public void testIsEnabledOrDisabledOnUnknownTable() throws Exception {
try { try {

View File

@ -1,4 +1,4 @@
/* /**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding * agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the

View File

@ -39,6 +39,7 @@ import java.util.Set;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/** /**
* Class to test asynchronous table admin operations * Class to test asynchronous table admin operations
@ -53,6 +54,18 @@ public class TestAsyncTableAdminApi2 extends TestAsyncAdminBase {
public static final HBaseClassTestRule CLASS_RULE = public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestAsyncTableAdminApi2.class); HBaseClassTestRule.forClass(TestAsyncTableAdminApi2.class);
@Test
public void testDisableCatalogTable() throws Exception {
try {
this.admin.disableTable(TableName.META_TABLE_NAME).join();
fail("Expected to throw ConstraintException");
} catch (Exception e) {
}
// Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table
// actually getting disabled by the disableTable() call.
createTableWithDefaultConf(tableName);
}
@Test @Test
public void testAddColumnFamily() throws Exception { public void testAddColumnFamily() throws Exception {
// Create a table with two families // Create a table with two families

View File

@ -18,9 +18,12 @@
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
@ -28,6 +31,7 @@ import java.util.List;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.hadoop.hbase.AsyncMetaTableAccessor; import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
@ -196,6 +200,14 @@ public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
ok = false; ok = false;
} }
assertTrue(ok); assertTrue(ok);
// meta table can not be disabled.
try {
admin.disableTable(TableName.META_TABLE_NAME).get();
fail("meta table can not be disabled");
} catch (ExecutionException e) {
Throwable cause = e.getCause();
assertThat(cause, instanceOf(DoNotRetryIOException.class));
}
} }
@Test @Test

View File

@ -192,16 +192,6 @@ public class TestConnectionImplementation {
table.close(); table.close();
} }
// See if stats change.
LOG.info(((ConnectionImplementation)con1).tableStateCache.stats().toString());
assertEquals(0, ((ConnectionImplementation)con1).tableStateCache.stats().missCount());
try (Admin a = con1.getAdmin()) {
a.isTableDisabled(TableName.META_TABLE_NAME);
}
LOG.info(((ConnectionImplementation)con1).tableStateCache.stats().toString());
assertEquals(1, ((ConnectionImplementation)con1).tableStateCache.stats().missCount());
assertEquals(1,
((ConnectionImplementation)con1).tableStateCache.stats().loadSuccessCount());
con1.close(); con1.close();
// if the pool was created on demand it should be closed upon connection close // if the pool was created on demand it should be closed upon connection close

View File

@ -17,7 +17,6 @@
*/ */
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import static junit.framework.TestCase.assertTrue;
import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM; import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM;
import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.instanceOf;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
@ -129,31 +128,4 @@ public class TestZKAsyncRegistry {
} }
} }
} }
/**
* Test meta tablestate implementation.
* Test is a bit involved because meta has replicas... Replica assign lags so check
* between steps all assigned.
*/
@Test
public void testMetaTableState() throws IOException, ExecutionException, InterruptedException {
assertTrue(TEST_UTIL.getMiniHBaseCluster().getMaster().isActiveMaster());
int ritTimeout = 10000;
TEST_UTIL.waitUntilNoRegionsInTransition(ritTimeout);
LOG.info("MASTER INITIALIZED");
try (ZKAsyncRegistry registry = new ZKAsyncRegistry(TEST_UTIL.getConfiguration())) {
assertEquals(TableState.State.ENABLED, registry.getMetaTableState().get().getState());
LOG.info("META ENABLED");
try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
admin.disableTable(TableName.META_TABLE_NAME);
assertEquals(TableState.State.DISABLED, registry.getMetaTableState().get().getState());
TEST_UTIL.waitUntilNoRegionsInTransition(ritTimeout);
LOG.info("META DISABLED");
admin.enableTable(TableName.META_TABLE_NAME);
assertEquals(TableState.State.ENABLED, registry.getMetaTableState().get().getState());
TEST_UTIL.waitUntilNoRegionsInTransition(ritTimeout);
LOG.info("META ENABLED");
}
}
}
} }

View File

@ -322,13 +322,8 @@ public class TestFSTableDescriptors {
} }
Map<String, TableDescriptor> tables = tds.getAll(); Map<String, TableDescriptor> tables = tds.getAll();
// Remove hbase:meta from list. It shows up now since we made it dynamic. The schema
// is written into the fs by the FSTableDescriptors constructor now where before it
// didn't.
tables.remove(TableName.META_TABLE_NAME.getNameAsString());
assertEquals(4, tables.size()); assertEquals(4, tables.size());
String[] tableNamesOrdered = String[] tableNamesOrdered =
new String[] { "bar:foo", "default:bar", "default:foo", "foo:bar" }; new String[] { "bar:foo", "default:bar", "default:foo", "foo:bar" };
int i = 0; int i = 0;
@ -364,13 +359,12 @@ public class TestFSTableDescriptors {
assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
// add a new entry for random table name. // add a new entry for hbase:meta
TableName random = TableName.valueOf("random"); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build();
TableDescriptor htd = TableDescriptorBuilder.newBuilder(random).build();
nonchtds.createTableDescriptor(htd); nonchtds.createTableDescriptor(htd);
// random will only increase the cachehit by 1 // hbase:meta will only increase the cachehit by 1
assertEquals(nonchtds.getAll().size(), chtds.getAll().size() + 1); assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
for (Map.Entry<String, TableDescriptor> entry: nonchtds.getAll().entrySet()) { for (Map.Entry<String, TableDescriptor> entry: nonchtds.getAll().entrySet()) {
String t = (String) entry.getKey(); String t = (String) entry.getKey();

View File

@ -44,15 +44,12 @@ EOF
puts puts
end end
formatter.footer formatter.footer
if table.to_s != 'hbase:meta' puts
# No QUOTAS if hbase:meta table formatter.header(%w[QUOTAS])
puts count = quotas_admin.list_quotas(TABLE => table.to_s) do |_, quota|
formatter.header(%w[QUOTAS]) formatter.row([quota])
count = quotas_admin.list_quotas(TABLE => table.to_s) do |_, quota|
formatter.row([quota])
end
formatter.footer(count)
end end
formatter.footer(count)
end end
# rubocop:enable Metrics/AbcSize, Metrics/MethodLength # rubocop:enable Metrics/AbcSize, Metrics/MethodLength
end end

View File

@ -1,4 +1,4 @@
/* /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -61,6 +61,14 @@ public final class MetaTableLocator {
private MetaTableLocator() { private MetaTableLocator() {
} }
/**
* Checks if the meta region location is available.
* @return true if meta region location is available, false if not
*/
public static boolean isLocationAvailable(ZKWatcher zkw) {
return getMetaRegionLocation(zkw) != null;
}
/** /**
* @param zkw ZooKeeper watcher to be used * @param zkw ZooKeeper watcher to be used
* @return meta table regions and their locations. * @return meta table regions and their locations.
@ -258,7 +266,7 @@ public final class MetaTableLocator {
} }
/** /**
* Load the meta region state from the meta region server ZNode. * Load the meta region state from the meta server ZNode.
* *
* @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
* @param replicaId the ID of the replica * @param replicaId the ID of the replica
@ -298,8 +306,10 @@ public final class MetaTableLocator {
if (serverName == null) { if (serverName == null) {
state = RegionState.State.OFFLINE; state = RegionState.State.OFFLINE;
} }
return new RegionState(RegionReplicaUtil.getRegionInfoForReplica( return new RegionState(
RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), state, serverName); RegionReplicaUtil.getRegionInfoForReplica(
RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId),
state, serverName);
} }
/** /**

View File

@ -2056,7 +2056,7 @@ public final class ZKUtil {
" byte(s) of data from znode " + znode + " byte(s) of data from znode " + znode +
(watcherSet? " and set watcher; ": "; data=") + (watcherSet? " and set watcher; ": "; data=") +
(data == null? "null": data.length == 0? "empty": ( (data == null? "null": data.length == 0? "empty": (
zkw.getZNodePaths().isMetaZNodePrefix(znode)? znode.startsWith(zkw.getZNodePaths().metaZNodePrefix)?
getServerNameOrEmptyString(data): getServerNameOrEmptyString(data):
znode.startsWith(zkw.getZNodePaths().backupMasterAddressesZNode)? znode.startsWith(zkw.getZNodePaths().backupMasterAddressesZNode)?
getServerNameOrEmptyString(data): getServerNameOrEmptyString(data):