HBASE-23055 Alter hbase:meta
Make it so hbase:meta can be altered. TableState for hbase:meta was hardcoded ENABLED. Make it dynamic. State is now kept in current active Master. It is transient so falls back to default if Master crashes. Add to registry a getMetaTableState which reads mirrored state from zookeeper (NOT from Master and defaults ENABLED if no implementation or error fetching state). hbase:meta schema will be bootstrapped from the filesystem. Changes to filesystem schema are atomic so we should be ok if Master fails mid-edit (TBD). Undoes a bunch of guards that prevented our being able to edit hbase:meta. TODO: Tests, more clarity around hbase:meta table state, and undoing references to hard-coded hbase:meta regioninfo. M hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java Throw illegal access exception if you try to use MetaTableAccessor getting state of the hbase:meta table. M hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java Add fetching of hbase:meta table state from registry. Adds cache of tablestates w/ a ttl of 1 second (adjustable). M hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java M hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java Add querying registry for hbase:meta table state. M hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKAsyncRegistry.java Add querying of mirrored table state for hbase:meta table. M hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java Shutdown access. M hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java Just cleanup. M hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java Add state holder for hbase:meta. Removed unused methods. M hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java Shut down access. M hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java Allow hbase:meta to be disabled. M hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java Allow hbase:meta to be enabled. Signed-off-by: Bharath Vissapragada <bharathv@apache.org>
This commit is contained in:
parent
d7e7d028bd
commit
d64b0e3612
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
|
||||
import edu.umd.cs.findbugs.annotations.NonNull;
|
||||
import edu.umd.cs.findbugs.annotations.Nullable;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
|
@ -80,6 +81,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
|
||||
|
||||
/**
|
||||
|
@ -304,11 +306,18 @@ public class MetaTableAccessor {
|
|||
*/
|
||||
public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo)
|
||||
throws IOException {
|
||||
byte[] row = getMetaKeyForRegion(regionInfo);
|
||||
Get get = new Get(row);
|
||||
return getRegionLocation(getCatalogFamilyRow(connection, regionInfo),
|
||||
regionInfo, regionInfo.getReplicaId());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Return the {@link HConstants#CATALOG_FAMILY} row from hbase:meta.
|
||||
*/
|
||||
public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri)
|
||||
throws IOException {
|
||||
Get get = new Get(getMetaKeyForRegion(ri));
|
||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
||||
Result r = get(getMetaHTable(connection), get);
|
||||
return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
|
||||
return get(getMetaHTable(connection), get);
|
||||
}
|
||||
|
||||
/** Returns the row key to use for this regionInfo */
|
||||
|
@ -972,7 +981,8 @@ public class MetaTableAccessor {
|
|||
* @return A ServerName instance or null if necessary fields not found or empty.
|
||||
*/
|
||||
@Nullable
|
||||
@InterfaceAudience.Private // for use by HMaster#getTableRegionRow which is used for testing only
|
||||
// for use by HMaster#getTableRegionRow which is used for testing only
|
||||
@InterfaceAudience.Private
|
||||
public static ServerName getServerName(final Result r, final int replicaId) {
|
||||
byte[] serverColumn = getServerColumn(replicaId);
|
||||
Cell cell = r.getColumnLatestCell(getCatalogFamily(), serverColumn);
|
||||
|
@ -1111,9 +1121,8 @@ public class MetaTableAccessor {
|
|||
@Nullable
|
||||
public static TableState getTableState(Connection conn, TableName tableName)
|
||||
throws IOException {
|
||||
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
||||
return new TableState(tableName, TableState.State.ENABLED);
|
||||
}
|
||||
Preconditions.checkArgument(!tableName.equals(TableName.META_TABLE_NAME),
|
||||
"Not for hbase:meta state");
|
||||
Table metaHTable = getMetaHTable(conn);
|
||||
Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
|
||||
Result result = metaHTable.get(get);
|
||||
|
@ -1140,7 +1149,8 @@ public class MetaTableAccessor {
|
|||
}
|
||||
|
||||
/**
|
||||
* Updates state in META
|
||||
* Updates state in META.
|
||||
* Do not use. For internal use only.
|
||||
* @param conn connection to use
|
||||
* @param tableName table to look for
|
||||
*/
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.Closeable;
|
|||
import java.util.concurrent.CompletableFuture;
|
||||
import org.apache.hadoop.hbase.RegionLocations;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
|
@ -29,12 +30,26 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
interface AsyncRegistry extends Closeable {
|
||||
/**
|
||||
* A completed CompletableFuture to host default hbase:meta table state (ENABLED).
|
||||
*/
|
||||
TableState ENABLED_META_TABLE_STATE =
|
||||
new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED);
|
||||
CompletableFuture<TableState> COMPLETED_GET_META_TABLE_STATE =
|
||||
CompletableFuture.completedFuture(ENABLED_META_TABLE_STATE);
|
||||
|
||||
/**
|
||||
* Get the location of meta region.
|
||||
*/
|
||||
CompletableFuture<RegionLocations> getMetaRegionLocation();
|
||||
|
||||
/**
|
||||
* The hbase:meta table state.
|
||||
*/
|
||||
default CompletableFuture<TableState> getMetaTableState() {
|
||||
return COMPLETED_GET_META_TABLE_STATE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should only be called once.
|
||||
* <p>
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/**
|
||||
*
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -86,6 +85,9 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
|
||||
import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder;
|
||||
import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader;
|
||||
import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
|
||||
|
@ -154,6 +156,21 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
public static final String RETRIES_BY_SERVER_KEY = "hbase.client.retries.by.server";
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ConnectionImplementation.class);
|
||||
|
||||
/**
|
||||
* TableState cache.
|
||||
* Table States change super rarely. In synchronous client, state can be queried a lot
|
||||
* particularly when Regions are moving. It is ok if we are not super responsive noticing
|
||||
* Table State change. So, cache the last look up for a period. Use
|
||||
* {@link #TABLESTATE_CACHE_DURATION_MS} to change default of one second.
|
||||
* NOT-private to allow external readers of generated cache stats.
|
||||
*/
|
||||
final LoadingCache<TableName, TableState> tableStateCache;
|
||||
|
||||
/**
|
||||
* Duration in milliseconds a tablestate endures in the cache of tablestates.
|
||||
*/
|
||||
public static final String TABLESTATE_CACHE_DURATION_MS = "hbase.client.tablestate.cache.ttl.ms";
|
||||
|
||||
private static final String RESOLVE_HOSTNAME_ON_FAIL_KEY = "hbase.resolve.hostnames.on.failure";
|
||||
|
||||
private final boolean hostnamesCanChange;
|
||||
|
@ -330,6 +347,26 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
close();
|
||||
throw e;
|
||||
}
|
||||
// Create tablestate cache. Add a loader that know how to find table state.
|
||||
int duration = this.conf.getInt(TABLESTATE_CACHE_DURATION_MS, 1000);
|
||||
this.tableStateCache = CacheBuilder.newBuilder().
|
||||
expireAfterWrite(duration, TimeUnit.MILLISECONDS).
|
||||
recordStats().
|
||||
build(new CacheLoader<TableName, TableState>() {
|
||||
@Override
|
||||
public TableState load(TableName tableName) throws Exception {
|
||||
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
||||
// We cannot get hbase:meta state by reading hbase:meta table. Read registry.
|
||||
return registry.getMetaTableState().get();
|
||||
}
|
||||
TableState ts =
|
||||
MetaTableAccessor.getTableState(ConnectionImplementation.this, tableName);
|
||||
if (ts == null) {
|
||||
throw new TableNotFoundException(tableName);
|
||||
}
|
||||
return ts;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void spawnRenewalChore(final UserGroupInformation user) {
|
||||
|
@ -429,7 +466,19 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
|
||||
@Override
|
||||
public Admin getAdmin() throws IOException {
|
||||
return new HBaseAdmin(this);
|
||||
return new HBaseAdmin(this) {
|
||||
@Override
|
||||
public void enableTable(TableName tableName) throws IOException {
|
||||
super.enableTable(tableName);
|
||||
ConnectionImplementation.this.tableStateCache.invalidate(tableName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void disableTable(TableName tableName) throws IOException {
|
||||
super.disableTable(tableName);
|
||||
ConnectionImplementation.this.tableStateCache.invalidate(tableName);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -754,13 +803,9 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
@Override
|
||||
public RegionLocations relocateRegion(final TableName tableName,
|
||||
final byte [] row, int replicaId) throws IOException{
|
||||
// Since this is an explicit request not to use any caching, finding
|
||||
// disabled tables should not be desirable. This will ensure that an exception is thrown when
|
||||
// the first time a disabled table is interacted with.
|
||||
if (!tableName.equals(TableName.META_TABLE_NAME) && isTableDisabled(tableName)) {
|
||||
if (isTableDisabled(tableName)) {
|
||||
throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
|
||||
}
|
||||
|
||||
return locateRegion(tableName, row, false, true, replicaId);
|
||||
}
|
||||
|
||||
|
@ -2057,11 +2102,15 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
|||
@Override
|
||||
public TableState getTableState(TableName tableName) throws IOException {
|
||||
checkClosed();
|
||||
TableState tableState = MetaTableAccessor.getTableState(this, tableName);
|
||||
if (tableState == null) {
|
||||
throw new TableNotFoundException(tableName);
|
||||
try {
|
||||
return this.tableStateCache.get(tableName);
|
||||
} catch (ExecutionException e) {
|
||||
// Throws ExecutionException for any exceptions fetching table state. Probably an IOE.
|
||||
if (e.getCause() instanceof IOException) {
|
||||
throw (IOException)e.getCause();
|
||||
}
|
||||
throw new IOException(e);
|
||||
}
|
||||
return tableState;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -543,7 +543,9 @@ public class HBaseAdmin implements Admin {
|
|||
static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection,
|
||||
RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
|
||||
int operationTimeout, int rpcTimeout) throws IOException {
|
||||
if (tableName == null) return null;
|
||||
if (tableName == null) {
|
||||
return null;
|
||||
}
|
||||
TableDescriptor td =
|
||||
executeCallable(new MasterCallable<TableDescriptor>(connection, rpcControllerFactory) {
|
||||
@Override
|
||||
|
@ -948,22 +950,13 @@ public class HBaseAdmin implements Admin {
|
|||
@Override
|
||||
public boolean isTableEnabled(final TableName tableName) throws IOException {
|
||||
checkTableExists(tableName);
|
||||
return executeCallable(new RpcRetryingCallable<Boolean>() {
|
||||
@Override
|
||||
protected Boolean rpcCall(int callTimeout) throws Exception {
|
||||
TableState tableState = MetaTableAccessor.getTableState(getConnection(), tableName);
|
||||
if (tableState == null) {
|
||||
throw new TableNotFoundException(tableName);
|
||||
}
|
||||
return tableState.inStates(TableState.State.ENABLED);
|
||||
}
|
||||
});
|
||||
return this.connection.getTableState(tableName).isEnabled();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isTableDisabled(TableName tableName) throws IOException {
|
||||
checkTableExists(tableName);
|
||||
return connection.isTableDisabled(tableName);
|
||||
return this.connection.getTableState(tableName).isDisabled();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -4357,5 +4350,4 @@ public class HBaseAdmin implements Admin {
|
|||
});
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Copyright The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
|
@ -20,9 +20,12 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
|
||||
/**
|
||||
* A KeepAlive connection is not physically closed immediately after the close,
|
||||
* but rather kept alive for a few minutes. It makes sense only if it is shared.
|
||||
|
@ -35,7 +38,7 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
* final user code. Hence it's package protected.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
interface MasterKeepAliveConnection extends MasterProtos.MasterService.BlockingInterface {
|
||||
// Do this instead of implement Closeable because closeable returning IOE is PITA.
|
||||
interface MasterKeepAliveConnection extends
|
||||
MasterProtos.MasterService.BlockingInterface, Closeable {
|
||||
void close();
|
||||
}
|
||||
|
|
|
@ -89,6 +89,7 @@ import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest;
|
|||
import org.apache.hadoop.hbase.security.access.Permission;
|
||||
import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
|
||||
import org.apache.hadoop.hbase.security.access.UserPermission;
|
||||
|
||||
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
||||
|
@ -661,22 +662,38 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
new DisableTableProcedureBiConsumer(tableName));
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<Boolean> isTableEnabled(TableName tableName) {
|
||||
if (TableName.isMetaTableName(tableName)) {
|
||||
return CompletableFuture.completedFuture(true);
|
||||
}
|
||||
CompletableFuture<Boolean> future = new CompletableFuture<>();
|
||||
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (state, error) -> {
|
||||
/**
|
||||
* Utility for completing passed TableState {@link CompletableFuture} <code>future</code>
|
||||
* using passed parameters.
|
||||
*/
|
||||
private static CompletableFuture<Boolean> completeCheckTableState(
|
||||
CompletableFuture<Boolean> future, TableState tableState, Throwable error,
|
||||
TableState.State targetState, TableName tableName) {
|
||||
if (error != null) {
|
||||
future.completeExceptionally(error);
|
||||
return;
|
||||
}
|
||||
if (state.isPresent()) {
|
||||
future.complete(state.get().inStates(TableState.State.ENABLED));
|
||||
} else {
|
||||
if (tableState != null) {
|
||||
future.complete(tableState.inStates(targetState));
|
||||
} else {
|
||||
future.completeExceptionally(new TableNotFoundException(tableName));
|
||||
}
|
||||
}
|
||||
return future;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<Boolean> isTableEnabled(TableName tableName) {
|
||||
if (TableName.isMetaTableName(tableName)) {
|
||||
CompletableFuture<Boolean> future = new CompletableFuture<>();
|
||||
addListener(this.connection.registry.getMetaTableState(), (tableState, error) -> {
|
||||
completeCheckTableState(future, tableState, error, TableState.State.ENABLED, tableName);
|
||||
});
|
||||
return future;
|
||||
}
|
||||
CompletableFuture<Boolean> future = new CompletableFuture<>();
|
||||
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> {
|
||||
completeCheckTableState(future, tableState.isPresent()? tableState.get(): null, error,
|
||||
TableState.State.ENABLED, tableName);
|
||||
});
|
||||
return future;
|
||||
}
|
||||
|
@ -684,19 +701,16 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
@Override
|
||||
public CompletableFuture<Boolean> isTableDisabled(TableName tableName) {
|
||||
if (TableName.isMetaTableName(tableName)) {
|
||||
return CompletableFuture.completedFuture(false);
|
||||
CompletableFuture<Boolean> future = new CompletableFuture<>();
|
||||
addListener(this.connection.registry.getMetaTableState(), (tableState, error) -> {
|
||||
completeCheckTableState(future, tableState, error, TableState.State.DISABLED, tableName);
|
||||
});
|
||||
return future;
|
||||
}
|
||||
CompletableFuture<Boolean> future = new CompletableFuture<>();
|
||||
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (state, error) -> {
|
||||
if (error != null) {
|
||||
future.completeExceptionally(error);
|
||||
return;
|
||||
}
|
||||
if (state.isPresent()) {
|
||||
future.complete(state.get().inStates(TableState.State.DISABLED));
|
||||
} else {
|
||||
future.completeExceptionally(new TableNotFoundException(tableName));
|
||||
}
|
||||
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> {
|
||||
completeCheckTableState(future, tableState.isPresent()? tableState.get(): null, error,
|
||||
TableState.State.DISABLED, tableName);
|
||||
});
|
||||
return future;
|
||||
}
|
||||
|
|
|
@ -215,8 +215,7 @@ public abstract class RegionServerCallable<T, S> implements RetryingCallable<T>
|
|||
@Override
|
||||
public void prepare(final boolean reload) throws IOException {
|
||||
// check table state if this is a retry
|
||||
if (reload && tableName != null && !tableName.equals(TableName.META_TABLE_NAME)
|
||||
&& getConnection().isTableDisabled(tableName)) {
|
||||
if (reload && tableName != null && getConnection().isTableDisabled(tableName)) {
|
||||
throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
|
||||
}
|
||||
try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
|
||||
|
|
|
@ -27,6 +27,7 @@ import static org.apache.hadoop.hbase.zookeeper.ZKMetadata.removeMetaData;
|
|||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CompletionException;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.commons.lang3.mutable.MutableInt;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -34,15 +35,18 @@ import org.apache.hadoop.hbase.ClusterId;
|
|||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.RegionLocations;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.master.RegionState;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
|
||||
|
||||
|
@ -58,8 +62,19 @@ class ZKAsyncRegistry implements AsyncRegistry {
|
|||
|
||||
private final ZNodePaths znodePaths;
|
||||
|
||||
/**
|
||||
* A znode maintained by MirroringTableStateManager.
|
||||
* MirroringTableStateManager is deprecated to be removed in hbase3. It can also be disabled.
|
||||
* Make sure it is enabled if you want to alter hbase:meta table in hbase2. In hbase3,
|
||||
* TBD how metatable state will be hosted; likely on active hbase master.
|
||||
*/
|
||||
private final String znodeMirroredMetaTableState;
|
||||
|
||||
|
||||
ZKAsyncRegistry(Configuration conf) {
|
||||
this.znodePaths = new ZNodePaths(conf);
|
||||
this.znodeMirroredMetaTableState =
|
||||
ZNodePaths.joinZNode(this.znodePaths.tableZNode, TableName.META_TABLE_NAME.getNameAsString());
|
||||
this.zk = new ReadOnlyZKClient(conf);
|
||||
}
|
||||
|
||||
|
@ -155,7 +170,8 @@ class ZKAsyncRegistry implements AsyncRegistry {
|
|||
}
|
||||
Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
|
||||
if (stateAndServerName.getFirst() != RegionState.State.OPEN) {
|
||||
LOG.warn("Meta region is in state " + stateAndServerName.getFirst());
|
||||
LOG.warn("hbase:meta region (replicaId={}) is in state {}", replicaId,
|
||||
stateAndServerName.getFirst());
|
||||
}
|
||||
locs[DEFAULT_REPLICA_ID] = new HRegionLocation(
|
||||
getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond());
|
||||
|
@ -170,7 +186,7 @@ class ZKAsyncRegistry implements AsyncRegistry {
|
|||
LOG.warn("Failed to fetch " + path, error);
|
||||
locs[replicaId] = null;
|
||||
} else if (proto == null) {
|
||||
LOG.warn("Meta znode for replica " + replicaId + " is null");
|
||||
LOG.warn("hbase:meta znode for replica " + replicaId + " is null");
|
||||
locs[replicaId] = null;
|
||||
} else {
|
||||
Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
|
||||
|
@ -194,9 +210,8 @@ class ZKAsyncRegistry implements AsyncRegistry {
|
|||
public CompletableFuture<RegionLocations> getMetaRegionLocation() {
|
||||
CompletableFuture<RegionLocations> future = new CompletableFuture<>();
|
||||
addListener(
|
||||
zk.list(znodePaths.baseZNode)
|
||||
.thenApply(children -> children.stream()
|
||||
.filter(c -> c.startsWith(znodePaths.metaZNodePrefix)).collect(Collectors.toList())),
|
||||
zk.list(znodePaths.baseZNode).thenApply(children -> children.stream().
|
||||
filter(c -> znodePaths.isMetaZNodePrefix(c)).collect(Collectors.toList())),
|
||||
(metaReplicaZNodes, error) -> {
|
||||
if (error != null) {
|
||||
future.completeExceptionally(error);
|
||||
|
@ -229,6 +244,43 @@ class ZKAsyncRegistry implements AsyncRegistry {
|
|||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<TableState> getMetaTableState() {
|
||||
return getAndConvert(this.znodeMirroredMetaTableState, ZKAsyncRegistry::getTableState).
|
||||
thenApply(state -> {
|
||||
return state == null || state.equals(ENABLED_META_TABLE_STATE.getState())?
|
||||
ENABLED_META_TABLE_STATE: new TableState(TableName.META_TABLE_NAME, state);
|
||||
}).exceptionally(e -> {
|
||||
// Handle this case where no znode... Return default ENABLED in this case:
|
||||
// Caused by: java.io.IOException: java.util.concurrent.ExecutionException:
|
||||
// java.util.concurrent.ExecutionException:
|
||||
// org.apache.zookeeper.KeeperException$NoNodeException: KeeperErrorCode = NoNode for
|
||||
// /hbase/table/hbase:meta
|
||||
// If not case of above, then rethrow but may need to wrap. See
|
||||
// https://stackoverflow.com/questions/55453961/
|
||||
// completablefutureexceptionally-rethrow-checked-exception
|
||||
if (e.getCause() instanceof KeeperException.NoNodeException) {
|
||||
return ENABLED_META_TABLE_STATE;
|
||||
}
|
||||
throw e instanceof CompletionException? (CompletionException)e:
|
||||
new CompletionException(e);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tablestate from data byte array found in the mirroring znode of table state.
|
||||
*/
|
||||
private static TableState.State getTableState(byte[] data) throws DeserializationException {
|
||||
if (data == null || data.length == 0) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
return ProtobufUtil.toTableState(ProtobufUtil.toTableState(removeMetaData(data)));
|
||||
} catch (IOException ioe) {
|
||||
throw new DeserializationException(ioe);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
zk.close();
|
||||
|
|
|
@ -87,6 +87,7 @@ import org.apache.hadoop.hbase.client.SnapshotDescription;
|
|||
import org.apache.hadoop.hbase.client.SnapshotType;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.client.TableState;
|
||||
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||
import org.apache.hadoop.hbase.client.security.SecurityCapability;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
|
@ -3372,4 +3373,46 @@ public final class ProtobufUtil {
|
|||
.build();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Parses pb TableState from <code>data</code>
|
||||
*/
|
||||
public static ZooKeeperProtos.DeprecatedTableState.State toTableState(byte [] data)
|
||||
throws DeserializationException, IOException {
|
||||
if (data == null || data.length <= 0) {
|
||||
return null;
|
||||
}
|
||||
ProtobufUtil.expectPBMagicPrefix(data);
|
||||
ZooKeeperProtos.DeprecatedTableState.Builder builder =
|
||||
ZooKeeperProtos.DeprecatedTableState.newBuilder();
|
||||
int magicLen = ProtobufUtil.lengthOfPBMagic();
|
||||
ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen);
|
||||
return builder.getState();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @return Convert from pb TableState to pojo TableState.
|
||||
*/
|
||||
public static TableState.State toTableState(ZooKeeperProtos.DeprecatedTableState.State state) {
|
||||
TableState.State newState = TableState.State.ENABLED;
|
||||
if (state != null) {
|
||||
switch (state) {
|
||||
case ENABLED:
|
||||
newState = TableState.State.ENABLED;
|
||||
break;
|
||||
case DISABLED:
|
||||
newState = TableState.State.DISABLED;
|
||||
break;
|
||||
case DISABLING:
|
||||
newState = TableState.State.DISABLING;
|
||||
break;
|
||||
case ENABLING:
|
||||
newState = TableState.State.ENABLING;
|
||||
break;
|
||||
default:
|
||||
}
|
||||
}
|
||||
return newState;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -24,6 +24,7 @@ import static org.apache.hadoop.hbase.HConstants.SPLIT_LOGDIR_NAME;
|
|||
import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
|
||||
import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.IntStream;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -40,15 +41,24 @@ public class ZNodePaths {
|
|||
// TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved.
|
||||
public static final char ZNODE_PATH_SEPARATOR = '/';
|
||||
|
||||
public final static String META_ZNODE_PREFIX = "meta-region-server";
|
||||
private static final String META_ZNODE_PREFIX = "meta-region-server";
|
||||
private static final String DEFAULT_SNAPSHOT_CLEANUP_ZNODE = "snapshot-cleanup";
|
||||
|
||||
// base znode for this cluster
|
||||
public final String baseZNode;
|
||||
// the prefix of meta znode, does not include baseZNode.
|
||||
public final String metaZNodePrefix;
|
||||
// znodes containing the locations of the servers hosting the meta replicas
|
||||
public final ImmutableMap<Integer, String> metaReplicaZNodes;
|
||||
|
||||
/**
|
||||
* The prefix of meta znode. Does not include baseZNode.
|
||||
* Its a 'prefix' because meta replica id integer can be tagged on the end (if
|
||||
* no number present, it is 'default' replica).
|
||||
*/
|
||||
private final String metaZNodePrefix;
|
||||
|
||||
/**
|
||||
* znodes containing the locations of the servers hosting the meta replicas
|
||||
*/
|
||||
private final ImmutableMap<Integer, String> metaReplicaZNodes;
|
||||
|
||||
// znode containing ephemeral nodes of the regionservers
|
||||
public final String rsZNode;
|
||||
// znode containing ephemeral nodes of the draining regionservers
|
||||
|
@ -154,21 +164,21 @@ public class ZNodePaths {
|
|||
}
|
||||
|
||||
/**
|
||||
* Is the znode of any meta replica
|
||||
* @param node
|
||||
* @return true or false
|
||||
* @return true if the znode is a meta region replica
|
||||
*/
|
||||
public boolean isAnyMetaReplicaZNode(String node) {
|
||||
if (metaReplicaZNodes.containsValue(node)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return this.metaReplicaZNodes.containsValue(node);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the znode string corresponding to a replicaId
|
||||
* @param replicaId
|
||||
* @return znode
|
||||
* @return Meta Replica ZNodes
|
||||
*/
|
||||
public Collection<String> getMetaReplicaZNodes() {
|
||||
return this.metaReplicaZNodes.values();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the znode string corresponding to a replicaId
|
||||
*/
|
||||
public String getZNodeForReplica(int replicaId) {
|
||||
// return a newly created path but don't update the cache of paths
|
||||
|
@ -179,24 +189,21 @@ public class ZNodePaths {
|
|||
}
|
||||
|
||||
/**
|
||||
* Parse the meta replicaId from the passed znode
|
||||
* Parse the meta replicaId from the passed znode name.
|
||||
* @param znode the name of the znode, does not include baseZNode
|
||||
* @return replicaId
|
||||
*/
|
||||
public int getMetaReplicaIdFromZnode(String znode) {
|
||||
if (znode.equals(metaZNodePrefix)) {
|
||||
return RegionInfo.DEFAULT_REPLICA_ID;
|
||||
}
|
||||
return Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1));
|
||||
return znode.equals(metaZNodePrefix)?
|
||||
RegionInfo.DEFAULT_REPLICA_ID:
|
||||
Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1));
|
||||
}
|
||||
|
||||
/**
|
||||
* Is it the default meta replica's znode
|
||||
* @param znode the name of the znode, does not include baseZNode
|
||||
* @return true or false
|
||||
* @return True if meta znode.
|
||||
*/
|
||||
public boolean isDefaultMetaReplicaZnode(String znode) {
|
||||
return metaReplicaZNodes.get(DEFAULT_REPLICA_ID).equals(znode);
|
||||
public boolean isMetaZNodePrefix(String znode) {
|
||||
return znode != null && znode.startsWith(this.metaZNodePrefix);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -27,7 +27,6 @@ import java.util.List;
|
|||
import java.util.UUID;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.lang3.ArrayUtils;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
|
@ -1225,12 +1224,6 @@ public final class HConstants {
|
|||
HBCK_SIDELINEDIR_NAME, HBASE_TEMP_DIRECTORY, MIGRATION_NAME
|
||||
}));
|
||||
|
||||
/** Directories that are not HBase user table directories */
|
||||
public static final List<String> HBASE_NON_USER_TABLE_DIRS =
|
||||
Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll(
|
||||
new String[] { TableName.META_TABLE_NAME.getNameAsString() },
|
||||
HBASE_NON_TABLE_DIRS.toArray())));
|
||||
|
||||
/** Health script related settings. */
|
||||
public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location";
|
||||
public static final String HEALTH_SCRIPT_TIMEOUT = "hbase.node.health.script.timeout";
|
||||
|
|
|
@ -703,6 +703,8 @@ public abstract class CommonFSUtils {
|
|||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("{} doesn't exist", dir);
|
||||
}
|
||||
} catch (IllegalArgumentException iae) {
|
||||
int x = 0;
|
||||
}
|
||||
if (status == null || status.length < 1) {
|
||||
return null;
|
||||
|
|
|
@ -25,25 +25,19 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
|
|||
|
||||
/**
|
||||
* Get, remove and modify table descriptors.
|
||||
* Used by servers to host descriptors.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface TableDescriptors {
|
||||
/**
|
||||
* @param tableName
|
||||
* @return TableDescriptor for tablename
|
||||
* @throws IOException
|
||||
*/
|
||||
TableDescriptor get(final TableName tableName)
|
||||
throws IOException;
|
||||
TableDescriptor get(final TableName tableName) throws IOException;
|
||||
|
||||
/**
|
||||
* Get Map of all NamespaceDescriptors for a given namespace.
|
||||
* @return Map of all descriptors.
|
||||
* @throws IOException
|
||||
*/
|
||||
Map<String, TableDescriptor> getByNamespace(String name)
|
||||
throws IOException;
|
||||
Map<String, TableDescriptor> getByNamespace(String name) throws IOException;
|
||||
|
||||
/**
|
||||
* Get Map of all TableDescriptors. Populates the descriptor cache as a
|
||||
|
@ -51,25 +45,19 @@ public interface TableDescriptors {
|
|||
* Notice: the key of map is the table name which contains namespace. It was generated by
|
||||
* {@link TableName#getNameWithNamespaceInclAsString()}.
|
||||
* @return Map of all descriptors.
|
||||
* @throws IOException
|
||||
*/
|
||||
Map<String, TableDescriptor> getAll() throws IOException;
|
||||
|
||||
/**
|
||||
* Add or update descriptor
|
||||
* @param htd Descriptor to set into TableDescriptors
|
||||
* @throws IOException
|
||||
*/
|
||||
void add(final TableDescriptor htd)
|
||||
throws IOException;
|
||||
void add(final TableDescriptor htd) throws IOException;
|
||||
|
||||
/**
|
||||
* @param tablename
|
||||
* @return Instance of table descriptor or null if none found.
|
||||
* @throws IOException
|
||||
*/
|
||||
TableDescriptor remove(final TableName tablename)
|
||||
throws IOException;
|
||||
TableDescriptor remove(final TableName tablename) throws IOException;
|
||||
|
||||
/**
|
||||
* Enables the tabledescriptor cache
|
||||
|
|
|
@ -1025,7 +1025,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
RegionState rs = this.assignmentManager.getRegionStates().
|
||||
getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO);
|
||||
LOG.info("hbase:meta {}", rs);
|
||||
if (rs.isOffline()) {
|
||||
if (rs != null && rs.isOffline()) {
|
||||
Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream()
|
||||
.filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny();
|
||||
initMetaProc = optProc.orElseGet(() -> {
|
||||
|
|
|
@ -38,7 +38,8 @@ import org.slf4j.LoggerFactory;
|
|||
* mirroring. See in HMaster where we make the choice. The below does zk updates on a best-effort
|
||||
* basis only. If we fail updating zk we keep going because only hbase1 clients suffer; we'll just
|
||||
* log at WARN level.
|
||||
* @deprecated Since 2.0.0. To be removed in 3.0.0.
|
||||
* @deprecated Since 2.0.0. To be removed in 3.0.0. ZKRegistry#getMetaTableState reads
|
||||
* mirrored state so add alternative mechanism before purge else cannot disable hbase:meta table
|
||||
*/
|
||||
@Deprecated
|
||||
@InterfaceAudience.Private
|
||||
|
@ -47,7 +48,7 @@ public class MirroringTableStateManager extends TableStateManager {
|
|||
|
||||
/**
|
||||
* Set this key to true in Configuration to enable mirroring of table state out to zookeeper so
|
||||
* hbase-1.x clients can pick-up table state.
|
||||
* hbase-1.x clients can pick-up table state. Default value is 'true'.
|
||||
*/
|
||||
static final String MIRROR_TABLE_STATE_TO_ZK_KEY = "hbase.mirror.table.state.to.zookeeper";
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.client.Connection;
|
|||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.TableState;
|
||||
import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
|
||||
import org.apache.hadoop.hbase.util.IdReadWriteLock;
|
||||
import org.apache.hadoop.hbase.util.ZKDataMigrator;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||
|
@ -53,8 +52,20 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
|
|||
// TODO: Make this a guava Service
|
||||
@InterfaceAudience.Private
|
||||
public class TableStateManager {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TableStateManager.class);
|
||||
|
||||
/**
|
||||
* All table state is kept in hbase:meta except that of hbase:meta itself.
|
||||
* hbase:meta state is kept here locally in this in-memory variable. State
|
||||
* for hbase:meta is not persistent. If this process dies, the hbase:meta
|
||||
* state reverts to enabled. State is used so we can edit hbase:meta as we
|
||||
* would any other table by disabling, altering, and then re-enabling. If this
|
||||
* process dies in the midst of an edit, the table reverts to enabled. Schema
|
||||
* is read from the filesystem. It is changed atomically so if we die midway
|
||||
* through an edit we should be good.
|
||||
*/
|
||||
private TableState.State metaTableState = TableState.State.ENABLED;
|
||||
|
||||
/**
|
||||
* Set this key to false in Configuration to disable migrating table state from zookeeper so
|
||||
* hbase:meta table.
|
||||
|
@ -68,7 +79,7 @@ public class TableStateManager {
|
|||
private final ConcurrentMap<TableName, TableState.State> tableName2State =
|
||||
new ConcurrentHashMap<>();
|
||||
|
||||
public TableStateManager(MasterServices master) {
|
||||
TableStateManager(MasterServices master) {
|
||||
this.master = master;
|
||||
}
|
||||
|
||||
|
@ -87,61 +98,6 @@ public class TableStateManager {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set table state to provided but only if table in specified states Caller should lock table on
|
||||
* write.
|
||||
* @param tableName table to change state for
|
||||
* @param newState new state
|
||||
* @param states states to check against
|
||||
* @return null if succeed or table state if failed
|
||||
*/
|
||||
public TableState setTableStateIfInStates(TableName tableName, TableState.State newState,
|
||||
TableState.State... states) throws IOException {
|
||||
ReadWriteLock lock = tnLock.getLock(tableName);
|
||||
lock.writeLock().lock();
|
||||
try {
|
||||
TableState currentState = readMetaState(tableName);
|
||||
if (currentState == null) {
|
||||
throw new TableNotFoundException(tableName);
|
||||
}
|
||||
if (currentState.inStates(states)) {
|
||||
updateMetaState(tableName, newState);
|
||||
return null;
|
||||
} else {
|
||||
return currentState;
|
||||
}
|
||||
} finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set table state to provided but only if table not in specified states Caller should lock table
|
||||
* on write.
|
||||
* @param tableName table to change state for
|
||||
* @param newState new state
|
||||
* @param states states to check against
|
||||
*/
|
||||
public boolean setTableStateIfNotInStates(TableName tableName, TableState.State newState,
|
||||
TableState.State... states) throws IOException {
|
||||
ReadWriteLock lock = tnLock.getLock(tableName);
|
||||
lock.writeLock().lock();
|
||||
try {
|
||||
TableState currentState = readMetaState(tableName);
|
||||
if (currentState == null) {
|
||||
throw new TableNotFoundException(tableName);
|
||||
}
|
||||
if (!currentState.inStates(states)) {
|
||||
updateMetaState(tableName, newState);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} finally {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isTableState(TableName tableName, TableState.State... states) {
|
||||
try {
|
||||
TableState tableState = getTableState(tableName);
|
||||
|
@ -155,6 +111,7 @@ public class TableStateManager {
|
|||
|
||||
public void setDeletedTable(TableName tableName) throws IOException {
|
||||
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
||||
// Can't delete the hbase:meta table.
|
||||
return;
|
||||
}
|
||||
ReadWriteLock lock = tnLock.getLock(tableName);
|
||||
|
@ -183,7 +140,7 @@ public class TableStateManager {
|
|||
* @param states filter by states
|
||||
* @return tables in given states
|
||||
*/
|
||||
public Set<TableName> getTablesInStates(TableState.State... states) throws IOException {
|
||||
Set<TableName> getTablesInStates(TableState.State... states) throws IOException {
|
||||
// Only be called in region normalizer, will not use cache.
|
||||
final Set<TableName> rv = Sets.newHashSet();
|
||||
MetaTableAccessor.fullScanTables(master.getConnection(), new MetaTableAccessor.Visitor() {
|
||||
|
@ -199,12 +156,6 @@ public class TableStateManager {
|
|||
return rv;
|
||||
}
|
||||
|
||||
public static class TableStateNotFoundException extends TableNotFoundException {
|
||||
TableStateNotFoundException(TableName tableName) {
|
||||
super(tableName.getNameAsString());
|
||||
}
|
||||
}
|
||||
|
||||
@NonNull
|
||||
public TableState getTableState(TableName tableName) throws IOException {
|
||||
ReadWriteLock lock = tnLock.getLock(tableName);
|
||||
|
@ -212,7 +163,7 @@ public class TableStateManager {
|
|||
try {
|
||||
TableState currentState = readMetaState(tableName);
|
||||
if (currentState == null) {
|
||||
throw new TableStateNotFoundException(tableName);
|
||||
throw new TableNotFoundException("No state found for " + tableName);
|
||||
}
|
||||
return currentState;
|
||||
} finally {
|
||||
|
@ -221,22 +172,18 @@ public class TableStateManager {
|
|||
}
|
||||
|
||||
private void updateMetaState(TableName tableName, TableState.State newState) throws IOException {
|
||||
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
||||
if (TableState.State.DISABLING.equals(newState) ||
|
||||
TableState.State.DISABLED.equals(newState)) {
|
||||
throw new IllegalArgumentIOException("Cannot disable the meta table; " + newState);
|
||||
}
|
||||
// Otherwise, just return; no need to set ENABLED on meta -- it is always ENABLED.
|
||||
return;
|
||||
}
|
||||
boolean succ = false;
|
||||
try {
|
||||
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
||||
this.metaTableState = newState;
|
||||
} else {
|
||||
MetaTableAccessor.updateTableState(master.getConnection(), tableName, newState);
|
||||
tableName2State.put(tableName, newState);
|
||||
}
|
||||
this.tableName2State.put(tableName, newState);
|
||||
succ = true;
|
||||
} finally {
|
||||
if (!succ) {
|
||||
tableName2State.remove(tableName);
|
||||
this.tableName2State.remove(tableName);
|
||||
}
|
||||
}
|
||||
metaStateUpdated(tableName, newState);
|
||||
|
@ -255,7 +202,9 @@ public class TableStateManager {
|
|||
if (state != null) {
|
||||
return new TableState(tableName, state);
|
||||
}
|
||||
TableState tableState = MetaTableAccessor.getTableState(master.getConnection(), tableName);
|
||||
TableState tableState = tableName.equals(TableName.META_TABLE_NAME)?
|
||||
new TableState(TableName.META_TABLE_NAME, this.metaTableState):
|
||||
MetaTableAccessor.getTableState(master.getConnection(), tableName);
|
||||
if (tableState != null) {
|
||||
tableName2State.putIfAbsent(tableName, tableState.getState());
|
||||
}
|
||||
|
@ -263,10 +212,8 @@ public class TableStateManager {
|
|||
}
|
||||
|
||||
public void start() throws IOException {
|
||||
TableDescriptors tableDescriptors = master.getTableDescriptors();
|
||||
migrateZooKeeper();
|
||||
Connection connection = master.getConnection();
|
||||
fixTableStates(tableDescriptors, connection);
|
||||
fixTableStates(master.getTableDescriptors(), master.getConnection());
|
||||
}
|
||||
|
||||
private void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
|
||||
|
@ -335,7 +282,7 @@ public class TableStateManager {
|
|||
TableState ts = null;
|
||||
try {
|
||||
ts = getTableState(entry.getKey());
|
||||
} catch (TableStateNotFoundException e) {
|
||||
} catch (TableNotFoundException e) {
|
||||
// This can happen; table exists but no TableState.
|
||||
}
|
||||
if (ts == null) {
|
||||
|
|
|
@ -146,8 +146,7 @@ public class RegionStateStore {
|
|||
}
|
||||
}
|
||||
|
||||
public void updateRegionLocation(RegionStateNode regionStateNode)
|
||||
throws IOException {
|
||||
void updateRegionLocation(RegionStateNode regionStateNode) throws IOException {
|
||||
if (regionStateNode.getRegionInfo().isMetaRegion()) {
|
||||
updateMetaLocation(regionStateNode.getRegionInfo(), regionStateNode.getRegionLocation(),
|
||||
regionStateNode.getState());
|
||||
|
|
|
@ -78,9 +78,7 @@ public class CreateTableProcedure
|
|||
@Override
|
||||
protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableState state)
|
||||
throws InterruptedException {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(this + " execute state=" + state);
|
||||
}
|
||||
LOG.info("{} execute state={}", this, state);
|
||||
try {
|
||||
switch (state) {
|
||||
case CREATE_TABLE_PRE_OPERATION:
|
||||
|
@ -320,8 +318,7 @@ public class CreateTableProcedure
|
|||
// using a copy of descriptor, table will be created enabling first
|
||||
final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
|
||||
((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
|
||||
.createTableDescriptorForTableDirectory(
|
||||
tempTableDir, tableDescriptor, false);
|
||||
.createTableDescriptorForTableDirectory(tempTableDir, tableDescriptor, false);
|
||||
|
||||
// 2. Create Regions
|
||||
newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.TableNotFoundException;
|
|||
import org.apache.hadoop.hbase.client.BufferedMutator;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.TableState;
|
||||
import org.apache.hadoop.hbase.constraint.ConstraintException;
|
||||
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
|
||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||
import org.apache.hadoop.hbase.master.TableStateManager;
|
||||
|
@ -109,8 +108,8 @@ public class DisableTableProcedure
|
|||
setNextState(DisableTableState.DISABLE_TABLE_ADD_REPLICATION_BARRIER);
|
||||
break;
|
||||
case DISABLE_TABLE_ADD_REPLICATION_BARRIER:
|
||||
if (env.getMasterServices().getTableDescriptors().get(tableName)
|
||||
.hasGlobalReplicationScope()) {
|
||||
if (env.getMasterServices().getTableDescriptors().get(tableName).
|
||||
hasGlobalReplicationScope()) {
|
||||
MasterFileSystem fs = env.getMasterFileSystem();
|
||||
try (BufferedMutator mutator = env.getMasterServices().getConnection()
|
||||
.getBufferedMutator(TableName.META_TABLE_NAME)) {
|
||||
|
@ -242,10 +241,7 @@ public class DisableTableProcedure
|
|||
*/
|
||||
private boolean prepareDisable(final MasterProcedureEnv env) throws IOException {
|
||||
boolean canTableBeDisabled = true;
|
||||
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
||||
setFailure("master-disable-table", new ConstraintException("Cannot disable catalog table"));
|
||||
canTableBeDisabled = false;
|
||||
} else if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
|
||||
if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
|
||||
setFailure("master-disable-table", new TableNotFoundException(tableName));
|
||||
canTableBeDisabled = false;
|
||||
} else if (!skipTableStateCheck) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -27,11 +27,9 @@ import org.apache.hadoop.hbase.TableName;
|
|||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.TableState;
|
||||
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
|
||||
|
@ -99,66 +97,55 @@ public class EnableTableProcedure
|
|||
setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE);
|
||||
break;
|
||||
case ENABLE_TABLE_MARK_REGIONS_ONLINE:
|
||||
// Get the region replica count. If changed since disable, need to do
|
||||
// more work assigning.
|
||||
Connection connection = env.getMasterServices().getConnection();
|
||||
// we will need to get the tableDescriptor here to see if there is a change in the replica
|
||||
// count
|
||||
TableDescriptor hTableDescriptor =
|
||||
TableDescriptor tableDescriptor =
|
||||
env.getMasterServices().getTableDescriptors().get(tableName);
|
||||
|
||||
// Get the replica count
|
||||
int regionReplicaCount = hTableDescriptor.getRegionReplication();
|
||||
|
||||
// Get the regions for the table from memory; get both online and offline regions
|
||||
// ('true').
|
||||
int configuredReplicaCount = tableDescriptor.getRegionReplication();
|
||||
// Get regions for the table from memory; get both online and offline regions ('true').
|
||||
List<RegionInfo> regionsOfTable =
|
||||
env.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName, true);
|
||||
|
||||
int currentMaxReplica = 0;
|
||||
// Check if the regions in memory have replica regions as marked in META table
|
||||
for (RegionInfo regionInfo : regionsOfTable) {
|
||||
if (regionInfo.getReplicaId() > currentMaxReplica) {
|
||||
// Iterating through all the list to identify the highest replicaID region.
|
||||
// We can stop after checking with the first set of regions??
|
||||
currentMaxReplica = regionInfo.getReplicaId();
|
||||
}
|
||||
}
|
||||
// How many replicas do we currently have? Check regions returned from
|
||||
// in-memory state.
|
||||
int currentMaxReplica = getMaxReplicaId(regionsOfTable);
|
||||
|
||||
// read the META table to know the actual number of replicas for the table - if there
|
||||
// was a table modification on region replica then this will reflect the new entries also
|
||||
int replicasFound =
|
||||
getNumberOfReplicasFromMeta(connection, regionReplicaCount, regionsOfTable);
|
||||
assert regionReplicaCount - 1 == replicasFound;
|
||||
LOG.info(replicasFound + " META entries added for the given regionReplicaCount "
|
||||
+ regionReplicaCount + " for the table " + tableName.getNameAsString());
|
||||
if (currentMaxReplica == (regionReplicaCount - 1)) {
|
||||
// Read the META table to know the number of replicas the table currently has.
|
||||
// If there was a table modification on region replica count then need to
|
||||
// adjust replica counts here.
|
||||
int replicasFound = TableName.isMetaTableName(this.tableName)?
|
||||
0: // TODO: Figure better what to do here for hbase:meta replica.
|
||||
getReplicaCountInMeta(connection, configuredReplicaCount, regionsOfTable);
|
||||
LOG.info("replicasFound={} (configuredReplicaCount={} for {}", replicasFound,
|
||||
configuredReplicaCount, tableName.getNameAsString());
|
||||
if (currentMaxReplica == (configuredReplicaCount - 1)) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("There is no change to the number of region replicas."
|
||||
+ " Assigning the available regions." + " Current and previous"
|
||||
+ "replica count is " + regionReplicaCount);
|
||||
LOG.debug("No change in number of region replicas (configuredReplicaCount={});"
|
||||
+ " assigning.", configuredReplicaCount);
|
||||
}
|
||||
} else if (currentMaxReplica > (regionReplicaCount - 1)) {
|
||||
// we have additional regions as the replica count has been decreased. Delete
|
||||
} else if (currentMaxReplica > (configuredReplicaCount - 1)) {
|
||||
// We have additional regions as the replica count has been decreased. Delete
|
||||
// those regions because already the table is in the unassigned state
|
||||
LOG.info("The number of replicas " + (currentMaxReplica + 1)
|
||||
+ " is more than the region replica count " + regionReplicaCount);
|
||||
+ " is more than the region replica count " + configuredReplicaCount);
|
||||
List<RegionInfo> copyOfRegions = new ArrayList<RegionInfo>(regionsOfTable);
|
||||
for (RegionInfo regionInfo : copyOfRegions) {
|
||||
if (regionInfo.getReplicaId() > (regionReplicaCount - 1)) {
|
||||
if (regionInfo.getReplicaId() > (configuredReplicaCount - 1)) {
|
||||
// delete the region from the regionStates
|
||||
env.getAssignmentManager().getRegionStates().deleteRegion(regionInfo);
|
||||
// remove it from the list of regions of the table
|
||||
LOG.info("The regioninfo being removed is " + regionInfo + " "
|
||||
+ regionInfo.getReplicaId());
|
||||
LOG.info("Removed replica={} of {}", regionInfo.getRegionId(), regionInfo);
|
||||
regionsOfTable.remove(regionInfo);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// the replicasFound is less than the regionReplication
|
||||
LOG.info("The number of replicas has been changed(increased)."
|
||||
+ " Lets assign the new region replicas. The previous replica count was "
|
||||
+ (currentMaxReplica + 1) + ". The current replica count is " + regionReplicaCount);
|
||||
regionsOfTable = RegionReplicaUtil.addReplicas(hTableDescriptor, regionsOfTable,
|
||||
currentMaxReplica + 1, regionReplicaCount);
|
||||
LOG.info("Number of replicas has increased. Assigning new region replicas." +
|
||||
"The previous replica count was {}. The current replica count is {}.",
|
||||
(currentMaxReplica + 1), configuredReplicaCount);
|
||||
regionsOfTable = RegionReplicaUtil.addReplicas(tableDescriptor, regionsOfTable,
|
||||
currentMaxReplica + 1, configuredReplicaCount);
|
||||
}
|
||||
// Assign all the table regions. (including region replicas if added).
|
||||
// createAssignProcedure will try to retain old assignments if possible.
|
||||
|
@ -186,9 +173,13 @@ public class EnableTableProcedure
|
|||
return Flow.HAS_MORE_STATE;
|
||||
}
|
||||
|
||||
private int getNumberOfReplicasFromMeta(Connection connection, int regionReplicaCount,
|
||||
/**
|
||||
* @return Count of replicas found reading hbase:meta Region row or zk if
|
||||
* asking about the hbase:meta table itself..
|
||||
*/
|
||||
private int getReplicaCountInMeta(Connection connection, int regionReplicaCount,
|
||||
List<RegionInfo> regionsOfTable) throws IOException {
|
||||
Result r = getRegionFromMeta(connection, regionsOfTable);
|
||||
Result r = MetaTableAccessor.getCatalogFamilyRow(connection, regionsOfTable.get(0));
|
||||
int replicasFound = 0;
|
||||
for (int i = 1; i < regionReplicaCount; i++) {
|
||||
// Since we have already added the entries to the META we will be getting only that here
|
||||
|
@ -201,16 +192,6 @@ public class EnableTableProcedure
|
|||
return replicasFound;
|
||||
}
|
||||
|
||||
private Result getRegionFromMeta(Connection connection, List<RegionInfo> regionsOfTable)
|
||||
throws IOException {
|
||||
byte[] metaKeyForRegion = MetaTableAccessor.getMetaKeyForRegion(regionsOfTable.get(0));
|
||||
Get get = new Get(metaKeyForRegion);
|
||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
||||
Table metaTable = MetaTableAccessor.getMetaHTable(connection);
|
||||
Result r = metaTable.get(get);
|
||||
return r;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void rollbackState(final MasterProcedureEnv env, final EnableTableState state)
|
||||
throws IOException {
|
||||
|
@ -408,4 +389,20 @@ public class EnableTableProcedure
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Maximum region replica id found in passed list of regions.
|
||||
*/
|
||||
private static int getMaxReplicaId(List<RegionInfo> regions) {
|
||||
int max = 0;
|
||||
for (RegionInfo regionInfo: regions) {
|
||||
if (regionInfo.getReplicaId() > max) {
|
||||
// Iterating through all the list to identify the highest replicaID region.
|
||||
// We can stop after checking with the first set of regions??
|
||||
max = regionInfo.getReplicaId();
|
||||
}
|
||||
}
|
||||
return max;
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,11 +25,11 @@ import java.util.function.LongConsumer;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.TableState;
|
||||
import org.apache.hadoop.hbase.master.TableStateManager;
|
||||
import org.apache.hadoop.hbase.master.TableStateManager.TableStateNotFoundException;
|
||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
|
||||
import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure;
|
||||
|
@ -148,7 +148,7 @@ public abstract class ModifyPeerProcedure extends AbstractPeerProcedure<PeerModi
|
|||
return false;
|
||||
}
|
||||
Thread.sleep(SLEEP_INTERVAL_MS);
|
||||
} catch (TableStateNotFoundException e) {
|
||||
} catch (TableNotFoundException e) {
|
||||
return false;
|
||||
} catch (InterruptedException e) {
|
||||
throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);
|
||||
|
@ -227,7 +227,7 @@ public abstract class ModifyPeerProcedure extends AbstractPeerProcedure<PeerModi
|
|||
return true;
|
||||
}
|
||||
Thread.sleep(SLEEP_INTERVAL_MS);
|
||||
} catch (TableStateNotFoundException e) {
|
||||
} catch (TableNotFoundException e) {
|
||||
return false;
|
||||
} catch (InterruptedException e) {
|
||||
throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -41,6 +41,6 @@ public class MetaLocationSyncer extends ClientZKSyncer {
|
|||
|
||||
@Override
|
||||
Collection<String> getNodesToWatch() {
|
||||
return watcher.getZNodePaths().metaReplicaZNodes.values();
|
||||
return watcher.getZNodePaths().getMetaReplicaZNodes();
|
||||
}
|
||||
}
|
|
@ -472,11 +472,10 @@ public final class SnapshotManifest {
|
|||
|
||||
public void consolidate() throws IOException {
|
||||
if (getSnapshotFormat(desc) == SnapshotManifestV1.DESCRIPTOR_VERSION) {
|
||||
Path rootDir = FSUtils.getRootDir(conf);
|
||||
LOG.info("Using old Snapshot Format");
|
||||
// write a copy of descriptor to the snapshot directory
|
||||
new FSTableDescriptors(conf, workingDirFs, rootDir)
|
||||
.createTableDescriptorForTableDirectory(workingDir, htd, false);
|
||||
FSTableDescriptors.createTableDescriptorForTableDirectory(workingDirFs, workingDir, htd,
|
||||
false);
|
||||
} else {
|
||||
LOG.debug("Convert to Single Snapshot Manifest");
|
||||
convertToV2SingleManifest();
|
||||
|
|
|
@ -122,6 +122,7 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
* @param fsreadonly True if we are read-only when it comes to filesystem
|
||||
* operations; i.e. on remove, we do not do delete in fs.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public FSTableDescriptors(final Configuration conf, final FileSystem fs,
|
||||
final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
|
||||
this(conf, fs, rootdir, fsreadonly, usecache, null);
|
||||
|
@ -141,10 +142,26 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
this.rootdir = rootdir;
|
||||
this.fsreadonly = fsreadonly;
|
||||
this.usecache = usecache;
|
||||
this.metaTableDescriptor = metaObserver == null ? createMetaTableDescriptor(conf)
|
||||
: metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build();
|
||||
TableDescriptor td = null;
|
||||
try {
|
||||
td = getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME);
|
||||
} catch (TableInfoMissingException e) {
|
||||
td = metaObserver == null? createMetaTableDescriptor(conf):
|
||||
metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build();
|
||||
if (!fsreadonly) {
|
||||
LOG.info("Creating new hbase:meta table default descriptor/schema {}", td);
|
||||
updateTableDescriptor(td);
|
||||
}
|
||||
}
|
||||
this.metaTableDescriptor = td;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Should be private
|
||||
* @deprecated Since 2.3.0. Should be for internal use only. Used by testing.
|
||||
*/
|
||||
@Deprecated
|
||||
@VisibleForTesting
|
||||
public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) throws IOException {
|
||||
// TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now
|
||||
|
@ -218,16 +235,6 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
public TableDescriptor get(final TableName tablename)
|
||||
throws IOException {
|
||||
invocations++;
|
||||
if (TableName.META_TABLE_NAME.equals(tablename)) {
|
||||
cachehits++;
|
||||
return metaTableDescriptor;
|
||||
}
|
||||
// hbase:meta is already handled. If some one tries to get the descriptor for
|
||||
// .logs, .oldlogs or .corrupt throw an exception.
|
||||
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) {
|
||||
throw new IOException("No descriptor found for non table = " + tablename);
|
||||
}
|
||||
|
||||
if (usecache) {
|
||||
// Look in cache of descriptors.
|
||||
TableDescriptor cachedtdm = this.cache.get(tablename);
|
||||
|
@ -263,7 +270,6 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
public Map<String, TableDescriptor> getAll()
|
||||
throws IOException {
|
||||
Map<String, TableDescriptor> tds = new TreeMap<>();
|
||||
|
||||
if (fsvisited && usecache) {
|
||||
for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
|
||||
tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue());
|
||||
|
@ -326,15 +332,6 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
if (fsreadonly) {
|
||||
throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
|
||||
}
|
||||
TableName tableName = htd.getTableName();
|
||||
if (TableName.META_TABLE_NAME.equals(tableName)) {
|
||||
throw new NotImplementedException(HConstants.NOT_IMPLEMENTED);
|
||||
}
|
||||
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
|
||||
throw new NotImplementedException(
|
||||
"Cannot add a table descriptor for a reserved subdirectory name: "
|
||||
+ htd.getTableName().getNameAsString());
|
||||
}
|
||||
updateTableDescriptor(htd);
|
||||
}
|
||||
|
||||
|
@ -359,26 +356,6 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
return descriptor;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a current table info file exists for the given table
|
||||
*
|
||||
* @param tableName name of table
|
||||
* @return true if exists
|
||||
* @throws IOException
|
||||
*/
|
||||
public boolean isTableInfoExists(TableName tableName) throws IOException {
|
||||
return getTableInfoPath(tableName) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the most current table info file for the given table in the hbase root directory.
|
||||
* @return The file status of the current table info file or null if it does not exist
|
||||
*/
|
||||
private FileStatus getTableInfoPath(final TableName tableName) throws IOException {
|
||||
Path tableDir = getTableDir(tableName);
|
||||
return getTableInfoPath(tableDir);
|
||||
}
|
||||
|
||||
private FileStatus getTableInfoPath(Path tableDir)
|
||||
throws IOException {
|
||||
return getTableInfoPath(fs, tableDir, !fsreadonly);
|
||||
|
@ -393,7 +370,6 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
* were sequence numbers).
|
||||
*
|
||||
* @return The file status of the current table info file or null if it does not exist
|
||||
* @throws IOException
|
||||
*/
|
||||
public static FileStatus getTableInfoPath(FileSystem fs, Path tableDir)
|
||||
throws IOException {
|
||||
|
@ -411,7 +387,6 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
* older files.
|
||||
*
|
||||
* @return The file status of the current table info file or null if none exist
|
||||
* @throws IOException
|
||||
*/
|
||||
private static FileStatus getTableInfoPath(FileSystem fs, Path tableDir, boolean removeOldFiles)
|
||||
throws IOException {
|
||||
|
@ -599,21 +574,6 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes all the table descriptor files from the file system.
|
||||
* Used in unit tests only.
|
||||
* @throws NotImplementedException if in read only mode
|
||||
*/
|
||||
public void deleteTableDescriptorIfExists(TableName tableName) throws IOException {
|
||||
if (fsreadonly) {
|
||||
throw new NotImplementedException("Cannot delete a table descriptor - in read only mode");
|
||||
}
|
||||
|
||||
Path tableDir = getTableDir(tableName);
|
||||
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
|
||||
deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes files matching the table info file pattern within the given directory
|
||||
* whose sequenceId is at most the given max sequenceId.
|
||||
|
@ -736,7 +696,8 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
|
||||
/**
|
||||
* Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create
|
||||
* a new table or snapshot a table.
|
||||
* a new table during cluster start or in Clone and Create Table Procedures. Checks readOnly flag
|
||||
* passed on construction.
|
||||
* @param tableDir table directory under which we should write the file
|
||||
* @param htd description of the table to write
|
||||
* @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
|
||||
|
@ -745,11 +706,28 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
* already exists and we weren't forcing the descriptor creation.
|
||||
* @throws IOException if a filesystem error occurs
|
||||
*/
|
||||
public boolean createTableDescriptorForTableDirectory(Path tableDir,
|
||||
TableDescriptor htd, boolean forceCreation) throws IOException {
|
||||
if (fsreadonly) {
|
||||
public boolean createTableDescriptorForTableDirectory(Path tableDir, TableDescriptor htd,
|
||||
boolean forceCreation) throws IOException {
|
||||
if (this.fsreadonly) {
|
||||
throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
|
||||
}
|
||||
return createTableDescriptorForTableDirectory(this.fs, tableDir, htd, forceCreation);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create
|
||||
* a new table snapshoting. Does not enforce read-only. That is for caller to determine.
|
||||
* @param fs Filesystem to use.
|
||||
* @param tableDir table directory under which we should write the file
|
||||
* @param htd description of the table to write
|
||||
* @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
|
||||
* be overwritten
|
||||
* @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file
|
||||
* already exists and we weren't forcing the descriptor creation.
|
||||
* @throws IOException if a filesystem error occurs
|
||||
*/
|
||||
public static boolean createTableDescriptorForTableDirectory(FileSystem fs,
|
||||
Path tableDir, TableDescriptor htd, boolean forceCreation) throws IOException {
|
||||
FileStatus status = getTableInfoPath(fs, tableDir);
|
||||
if (status != null) {
|
||||
LOG.debug("Current path=" + status.getPath());
|
||||
|
@ -762,9 +740,7 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
}
|
||||
}
|
||||
}
|
||||
Path p = writeTableDescriptor(fs, htd, tableDir, status);
|
||||
return p != null;
|
||||
}
|
||||
|
||||
return writeTableDescriptor(fs, htd, tableDir, status) != null;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -37,6 +37,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
/**
|
||||
* Utlity method to migrate zookeeper data across HBase versions.
|
||||
* Used by Master mirroring table state to zk for hbase-1 clients.
|
||||
* @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
|
||||
*/
|
||||
@Deprecated
|
||||
|
@ -65,25 +66,7 @@ public class ZKDataMigrator {
|
|||
return rv;
|
||||
for (String child: children) {
|
||||
TableName tableName = TableName.valueOf(child);
|
||||
ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, tableName);
|
||||
TableState.State newState = TableState.State.ENABLED;
|
||||
if (state != null) {
|
||||
switch (state) {
|
||||
case ENABLED:
|
||||
newState = TableState.State.ENABLED;
|
||||
break;
|
||||
case DISABLED:
|
||||
newState = TableState.State.DISABLED;
|
||||
break;
|
||||
case DISABLING:
|
||||
newState = TableState.State.DISABLING;
|
||||
break;
|
||||
case ENABLING:
|
||||
newState = TableState.State.ENABLING;
|
||||
break;
|
||||
default:
|
||||
}
|
||||
}
|
||||
TableState.State newState = ProtobufUtil.toTableState(getTableState(zkw, tableName));
|
||||
rv.put(tableName, newState);
|
||||
}
|
||||
return rv;
|
||||
|
@ -105,15 +88,8 @@ public class ZKDataMigrator {
|
|||
throws KeeperException, InterruptedException {
|
||||
String znode = ZNodePaths.joinZNode(zkw.getZNodePaths().tableZNode,
|
||||
tableName.getNameAsString());
|
||||
byte [] data = ZKUtil.getData(zkw, znode);
|
||||
if (data == null || data.length <= 0) return null;
|
||||
try {
|
||||
ProtobufUtil.expectPBMagicPrefix(data);
|
||||
ZooKeeperProtos.DeprecatedTableState.Builder builder =
|
||||
ZooKeeperProtos.DeprecatedTableState.newBuilder();
|
||||
int magicLen = ProtobufUtil.lengthOfPBMagic();
|
||||
ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen);
|
||||
return builder.getState();
|
||||
return ProtobufUtil.toTableState(ZKUtil.getData(zkw, znode));
|
||||
} catch (IOException e) {
|
||||
KeeperException ke = new KeeperException.DataInconsistencyException();
|
||||
ke.initCause(e);
|
||||
|
|
|
@ -152,6 +152,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
import org.slf4j.impl.Log4jLoggerAdapter;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
|
||||
/**
|
||||
|
@ -496,7 +497,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
|||
|
||||
/**
|
||||
* @return META table descriptor
|
||||
* @deprecated since 2.0 version and will be removed in 3.0 version.
|
||||
* @deprecated since 2.0 version and will be removed in 3.0 version. Currently for test only.
|
||||
* use {@link #getMetaTableDescriptorBuilder()}
|
||||
*/
|
||||
@Deprecated
|
||||
|
@ -506,7 +507,10 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
|||
|
||||
/**
|
||||
* @return META table descriptor
|
||||
* @deprecated Since 2.3.0. No one should be using this internal. Used in testing only.
|
||||
*/
|
||||
@Deprecated
|
||||
@VisibleForTesting
|
||||
public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
|
||||
try {
|
||||
return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.regionserver.Region;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.rules.TestName;
|
||||
|
||||
/**
|
||||
* Test being able to edit hbase:meta.
|
||||
*/
|
||||
@Category({MiscTests.class, LargeTests.class})
|
||||
public class TestHBaseMetaEdit {
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestHBaseMetaEdit.class);
|
||||
@Rule
|
||||
public TestName name = new TestName();
|
||||
private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||
|
||||
@Before
|
||||
public void before() throws Exception {
|
||||
UTIL.startMiniCluster();
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() throws Exception {
|
||||
UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set versions, set HBASE-16213 indexed block encoding, and add a column family.
|
||||
* Verify they are all in place by looking at TableDescriptor AND by checking
|
||||
* what the RegionServer sees after opening Region.
|
||||
*/
|
||||
@Test
|
||||
public void testEditMeta() throws IOException {
|
||||
Admin admin = UTIL.getAdmin();
|
||||
admin.tableExists(TableName.META_TABLE_NAME);
|
||||
admin.disableTable(TableName.META_TABLE_NAME);
|
||||
assertTrue(admin.isTableDisabled(TableName.META_TABLE_NAME));
|
||||
TableDescriptor descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
|
||||
ColumnFamilyDescriptor cfd = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY);
|
||||
byte [] extraColumnFamilyName = Bytes.toBytes("xtra");
|
||||
ColumnFamilyDescriptor newCfd =
|
||||
ColumnFamilyDescriptorBuilder.newBuilder(extraColumnFamilyName).build();
|
||||
int oldVersions = cfd.getMaxVersions();
|
||||
// Add '1' to current versions count.
|
||||
cfd = ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(oldVersions + 1).
|
||||
setConfiguration(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING,
|
||||
DataBlockEncoding.ROW_INDEX_V1.toString()).build();
|
||||
admin.modifyColumnFamily(TableName.META_TABLE_NAME, cfd);
|
||||
admin.addColumnFamily(TableName.META_TABLE_NAME, newCfd);
|
||||
descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
|
||||
// Assert new max versions is == old versions plus 1.
|
||||
assertEquals(oldVersions + 1,
|
||||
descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
|
||||
admin.enableTable(TableName.META_TABLE_NAME);
|
||||
descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
|
||||
// Assert new max versions is == old versions plus 1.
|
||||
assertEquals(oldVersions + 1,
|
||||
descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
|
||||
assertTrue(descriptor.getColumnFamily(newCfd.getName()) != null);
|
||||
String encoding = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getConfiguration().
|
||||
get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
|
||||
assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
|
||||
Region r = UTIL.getHBaseCluster().getRegionServer(0).
|
||||
getRegion(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
|
||||
assertEquals(oldVersions + 1,
|
||||
r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getMaxVersions());
|
||||
encoding = r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().
|
||||
getConfigurationValue(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
|
||||
assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
|
||||
assertTrue(r.getStore(extraColumnFamilyName) != null);
|
||||
}
|
||||
}
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.TableNotFoundException;
|
|||
import org.apache.hadoop.hbase.UnknownRegionException;
|
||||
import org.apache.hadoop.hbase.Waiter.Predicate;
|
||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||
import org.apache.hadoop.hbase.constraint.ConstraintException;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
|
||||
|
@ -538,22 +537,6 @@ public class TestAdmin2 extends TestAdminBase {
|
|||
" HBase was not available");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDisableCatalogTable() throws Exception {
|
||||
try {
|
||||
ADMIN.disableTable(TableName.META_TABLE_NAME);
|
||||
fail("Expected to throw ConstraintException");
|
||||
} catch (ConstraintException e) {
|
||||
}
|
||||
// Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table
|
||||
// actually getting disabled by the disableTable() call.
|
||||
HTableDescriptor htd =
|
||||
new HTableDescriptor(TableName.valueOf(Bytes.toBytes(name.getMethodName())));
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("cf1"));
|
||||
htd.addFamily(hcd);
|
||||
TEST_UTIL.getHBaseAdmin().createTable(htd);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsEnabledOrDisabledOnUnknownTable() throws Exception {
|
||||
try {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
|
||||
* agreements. See the NOTICE file distributed with this work for additional information regarding
|
||||
* copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
|
||||
|
|
|
@ -39,7 +39,6 @@ import java.util.Set;
|
|||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
/**
|
||||
* Class to test asynchronous table admin operations
|
||||
|
@ -54,18 +53,6 @@ public class TestAsyncTableAdminApi2 extends TestAsyncAdminBase {
|
|||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestAsyncTableAdminApi2.class);
|
||||
|
||||
@Test
|
||||
public void testDisableCatalogTable() throws Exception {
|
||||
try {
|
||||
this.admin.disableTable(TableName.META_TABLE_NAME).join();
|
||||
fail("Expected to throw ConstraintException");
|
||||
} catch (Exception e) {
|
||||
}
|
||||
// Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table
|
||||
// actually getting disabled by the disableTable() call.
|
||||
createTableWithDefaultConf(tableName);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAddColumnFamily() throws Exception {
|
||||
// Create a table with two families
|
||||
|
|
|
@ -18,12 +18,9 @@
|
|||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
@ -31,7 +28,6 @@ import java.util.List;
|
|||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.regex.Pattern;
|
||||
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
|
@ -200,14 +196,6 @@ public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
|
|||
ok = false;
|
||||
}
|
||||
assertTrue(ok);
|
||||
// meta table can not be disabled.
|
||||
try {
|
||||
admin.disableTable(TableName.META_TABLE_NAME).get();
|
||||
fail("meta table can not be disabled");
|
||||
} catch (ExecutionException e) {
|
||||
Throwable cause = e.getCause();
|
||||
assertThat(cause, instanceOf(DoNotRetryIOException.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -192,6 +192,16 @@ public class TestConnectionImplementation {
|
|||
table.close();
|
||||
}
|
||||
|
||||
// See if stats change.
|
||||
LOG.info(((ConnectionImplementation)con1).tableStateCache.stats().toString());
|
||||
assertEquals(0, ((ConnectionImplementation)con1).tableStateCache.stats().missCount());
|
||||
try (Admin a = con1.getAdmin()) {
|
||||
a.isTableDisabled(TableName.META_TABLE_NAME);
|
||||
}
|
||||
LOG.info(((ConnectionImplementation)con1).tableStateCache.stats().toString());
|
||||
assertEquals(1, ((ConnectionImplementation)con1).tableStateCache.stats().missCount());
|
||||
assertEquals(1,
|
||||
((ConnectionImplementation)con1).tableStateCache.stats().loadSuccessCount());
|
||||
con1.close();
|
||||
|
||||
// if the pool was created on demand it should be closed upon connection close
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import static junit.framework.TestCase.assertTrue;
|
||||
import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
@ -128,4 +129,31 @@ public class TestZKAsyncRegistry {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test meta tablestate implementation.
|
||||
* Test is a bit involved because meta has replicas... Replica assign lags so check
|
||||
* between steps all assigned.
|
||||
*/
|
||||
@Test
|
||||
public void testMetaTableState() throws IOException, ExecutionException, InterruptedException {
|
||||
assertTrue(TEST_UTIL.getMiniHBaseCluster().getMaster().isActiveMaster());
|
||||
int ritTimeout = 10000;
|
||||
TEST_UTIL.waitUntilNoRegionsInTransition(ritTimeout);
|
||||
LOG.info("MASTER INITIALIZED");
|
||||
try (ZKAsyncRegistry registry = new ZKAsyncRegistry(TEST_UTIL.getConfiguration())) {
|
||||
assertEquals(TableState.State.ENABLED, registry.getMetaTableState().get().getState());
|
||||
LOG.info("META ENABLED");
|
||||
try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
|
||||
admin.disableTable(TableName.META_TABLE_NAME);
|
||||
assertEquals(TableState.State.DISABLED, registry.getMetaTableState().get().getState());
|
||||
TEST_UTIL.waitUntilNoRegionsInTransition(ritTimeout);
|
||||
LOG.info("META DISABLED");
|
||||
admin.enableTable(TableName.META_TABLE_NAME);
|
||||
assertEquals(TableState.State.ENABLED, registry.getMetaTableState().get().getState());
|
||||
TEST_UTIL.waitUntilNoRegionsInTransition(ritTimeout);
|
||||
LOG.info("META ENABLED");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -322,8 +322,13 @@ public class TestFSTableDescriptors {
|
|||
}
|
||||
|
||||
Map<String, TableDescriptor> tables = tds.getAll();
|
||||
// Remove hbase:meta from list. It shows up now since we made it dynamic. The schema
|
||||
// is written into the fs by the FSTableDescriptors constructor now where before it
|
||||
// didn't.
|
||||
tables.remove(TableName.META_TABLE_NAME.getNameAsString());
|
||||
assertEquals(4, tables.size());
|
||||
|
||||
|
||||
String[] tableNamesOrdered =
|
||||
new String[] { "bar:foo", "default:bar", "default:foo", "foo:bar" };
|
||||
int i = 0;
|
||||
|
@ -359,12 +364,13 @@ public class TestFSTableDescriptors {
|
|||
|
||||
assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
|
||||
|
||||
// add a new entry for hbase:meta
|
||||
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build();
|
||||
// add a new entry for random table name.
|
||||
TableName random = TableName.valueOf("random");
|
||||
TableDescriptor htd = TableDescriptorBuilder.newBuilder(random).build();
|
||||
nonchtds.createTableDescriptor(htd);
|
||||
|
||||
// hbase:meta will only increase the cachehit by 1
|
||||
assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
|
||||
// random will only increase the cachehit by 1
|
||||
assertEquals(nonchtds.getAll().size(), chtds.getAll().size() + 1);
|
||||
|
||||
for (Map.Entry<String, TableDescriptor> entry: nonchtds.getAll().entrySet()) {
|
||||
String t = (String) entry.getKey();
|
||||
|
|
|
@ -44,6 +44,8 @@ EOF
|
|||
puts
|
||||
end
|
||||
formatter.footer
|
||||
if table.to_s != 'hbase:meta'
|
||||
# No QUOTAS if hbase:meta table
|
||||
puts
|
||||
formatter.header(%w[QUOTAS])
|
||||
count = quotas_admin.list_quotas(TABLE => table.to_s) do |_, quota|
|
||||
|
@ -51,6 +53,7 @@ EOF
|
|||
end
|
||||
formatter.footer(count)
|
||||
end
|
||||
end
|
||||
# rubocop:enable Metrics/AbcSize, Metrics/MethodLength
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -61,14 +61,6 @@ public final class MetaTableLocator {
|
|||
private MetaTableLocator() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the meta region location is available.
|
||||
* @return true if meta region location is available, false if not
|
||||
*/
|
||||
public static boolean isLocationAvailable(ZKWatcher zkw) {
|
||||
return getMetaRegionLocation(zkw) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param zkw ZooKeeper watcher to be used
|
||||
* @return meta table regions and their locations.
|
||||
|
@ -266,7 +258,7 @@ public final class MetaTableLocator {
|
|||
}
|
||||
|
||||
/**
|
||||
* Load the meta region state from the meta server ZNode.
|
||||
* Load the meta region state from the meta region server ZNode.
|
||||
*
|
||||
* @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
|
||||
* @param replicaId the ID of the replica
|
||||
|
@ -306,10 +298,8 @@ public final class MetaTableLocator {
|
|||
if (serverName == null) {
|
||||
state = RegionState.State.OFFLINE;
|
||||
}
|
||||
return new RegionState(
|
||||
RegionReplicaUtil.getRegionInfoForReplica(
|
||||
RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId),
|
||||
state, serverName);
|
||||
return new RegionState(RegionReplicaUtil.getRegionInfoForReplica(
|
||||
RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), state, serverName);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -2056,7 +2056,7 @@ public final class ZKUtil {
|
|||
" byte(s) of data from znode " + znode +
|
||||
(watcherSet? " and set watcher; ": "; data=") +
|
||||
(data == null? "null": data.length == 0? "empty": (
|
||||
znode.startsWith(zkw.getZNodePaths().metaZNodePrefix)?
|
||||
zkw.getZNodePaths().isMetaZNodePrefix(znode)?
|
||||
getServerNameOrEmptyString(data):
|
||||
znode.startsWith(zkw.getZNodePaths().backupMasterAddressesZNode)?
|
||||
getServerNameOrEmptyString(data):
|
||||
|
|
Loading…
Reference in New Issue