Revert " HBASE-23055 Alter hbase:meta (#655)"
"Too radical for branch-2"
This reverts commit ff217d4269
.
This commit is contained in:
parent
ff217d4269
commit
61bc57f525
|
@ -304,18 +304,11 @@ public class MetaTableAccessor {
|
||||||
*/
|
*/
|
||||||
public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo)
|
public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return getRegionLocation(getCatalogFamilyRow(connection, regionInfo),
|
byte[] row = getMetaKeyForRegion(regionInfo);
|
||||||
regionInfo, regionInfo.getReplicaId());
|
Get get = new Get(row);
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return Return the {@link HConstants#CATALOG_FAMILY} row from hbase:meta.
|
|
||||||
*/
|
|
||||||
public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri)
|
|
||||||
throws IOException {
|
|
||||||
Get get = new Get(getMetaKeyForRegion(ri));
|
|
||||||
get.addFamily(HConstants.CATALOG_FAMILY);
|
get.addFamily(HConstants.CATALOG_FAMILY);
|
||||||
return get(getMetaHTable(connection), get);
|
Result r = get(getMetaHTable(connection), get);
|
||||||
|
return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Returns the row key to use for this regionInfo */
|
/** Returns the row key to use for this regionInfo */
|
||||||
|
@ -1117,7 +1110,7 @@ public class MetaTableAccessor {
|
||||||
public static TableState getTableState(Connection conn, TableName tableName)
|
public static TableState getTableState(Connection conn, TableName tableName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
||||||
throw new IllegalAccessError("Go to the Master to find hbase:meta table state, not here");
|
return new TableState(tableName, TableState.State.ENABLED);
|
||||||
}
|
}
|
||||||
Table metaHTable = getMetaHTable(conn);
|
Table metaHTable = getMetaHTable(conn);
|
||||||
Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
|
Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
|
||||||
|
@ -1145,8 +1138,7 @@ public class MetaTableAccessor {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Updates state in META.
|
* Updates state in META
|
||||||
* Do not use. For internal use only.
|
|
||||||
* @param conn connection to use
|
* @param conn connection to use
|
||||||
* @param tableName table to look for
|
* @param tableName table to look for
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
*
|
*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
@ -45,7 +45,6 @@ import java.util.concurrent.ThreadPoolExecutor;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.AuthUtil;
|
import org.apache.hadoop.hbase.AuthUtil;
|
||||||
import org.apache.hadoop.hbase.CallQueueTooBigException;
|
import org.apache.hadoop.hbase.CallQueueTooBigException;
|
||||||
|
@ -164,7 +163,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
||||||
private final int metaReplicaCallTimeoutScanInMicroSecond;
|
private final int metaReplicaCallTimeoutScanInMicroSecond;
|
||||||
private final int numTries;
|
private final int numTries;
|
||||||
final int rpcTimeout;
|
final int rpcTimeout;
|
||||||
private final int operationTimeout;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Global nonceGenerator shared per client.Currently there's no reason to limit its scope.
|
* Global nonceGenerator shared per client.Currently there's no reason to limit its scope.
|
||||||
|
@ -332,8 +330,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
||||||
close();
|
close();
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
|
|
||||||
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void spawnRenewalChore(final UserGroupInformation user) {
|
private void spawnRenewalChore(final UserGroupInformation user) {
|
||||||
|
@ -2061,30 +2057,12 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TableState getTableState(TableName tableName) throws IOException {
|
public TableState getTableState(TableName tableName) throws IOException {
|
||||||
// Go to the Master for Table State. It is the authority. It knows State for user-space
|
checkClosed();
|
||||||
// and for system-space tables. Previous we went direct to the hbase:meta table to find
|
TableState tableState = MetaTableAccessor.getTableState(this, tableName);
|
||||||
// table-state. hbase:meta does not have system-table states. This puts new load on Master.
|
if (tableState == null) {
|
||||||
// Now it proxies reads to the hbase:meta. Benefit is that we hide table state implementation.
|
|
||||||
// Downside is more load on Master. Master is host for hbase:meta table-state (and for that of
|
|
||||||
// other tables). Going to Master means one-stop-shop for all table states.
|
|
||||||
RpcControllerFactory factory = getRpcControllerFactory();
|
|
||||||
try (MasterCallable<TableState.State> c = new MasterCallable<TableState.State>(this, factory) {
|
|
||||||
@Override
|
|
||||||
protected TableState.State rpcCall() throws Exception {
|
|
||||||
setPriority(tableName);
|
|
||||||
MasterProtos.GetTableStateRequest req =
|
|
||||||
RequestConverter.buildGetTableStateRequest(tableName);
|
|
||||||
MasterProtos.GetTableStateResponse ret = master.getTableState(getRpcController(), req);
|
|
||||||
if (!ret.hasTableState() || ret.getTableState() == null) {
|
|
||||||
throw new TableNotFoundException(tableName);
|
throw new TableNotFoundException(tableName);
|
||||||
}
|
}
|
||||||
return TableState.State.valueOf(ret.getTableState().getState().toString());
|
return tableState;
|
||||||
}
|
|
||||||
}) {
|
|
||||||
RpcRetryingCaller<TableState.State> caller = getRpcRetryingCallerFactory().
|
|
||||||
newCaller(this.rpcTimeout);
|
|
||||||
return new TableState(tableName, caller.callWithRetries(c, this.operationTimeout));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -543,9 +543,7 @@ public class HBaseAdmin implements Admin {
|
||||||
static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection,
|
static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection,
|
||||||
RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
|
RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
|
||||||
int operationTimeout, int rpcTimeout) throws IOException {
|
int operationTimeout, int rpcTimeout) throws IOException {
|
||||||
if (tableName == null) {
|
if (tableName == null) return null;
|
||||||
return null;
|
|
||||||
}
|
|
||||||
TableDescriptor td =
|
TableDescriptor td =
|
||||||
executeCallable(new MasterCallable<TableDescriptor>(connection, rpcControllerFactory) {
|
executeCallable(new MasterCallable<TableDescriptor>(connection, rpcControllerFactory) {
|
||||||
@Override
|
@Override
|
||||||
|
@ -950,13 +948,22 @@ public class HBaseAdmin implements Admin {
|
||||||
@Override
|
@Override
|
||||||
public boolean isTableEnabled(final TableName tableName) throws IOException {
|
public boolean isTableEnabled(final TableName tableName) throws IOException {
|
||||||
checkTableExists(tableName);
|
checkTableExists(tableName);
|
||||||
return this.connection.getTableState(tableName).isEnabled();
|
return executeCallable(new RpcRetryingCallable<Boolean>() {
|
||||||
|
@Override
|
||||||
|
protected Boolean rpcCall(int callTimeout) throws Exception {
|
||||||
|
TableState tableState = MetaTableAccessor.getTableState(getConnection(), tableName);
|
||||||
|
if (tableState == null) {
|
||||||
|
throw new TableNotFoundException(tableName);
|
||||||
|
}
|
||||||
|
return tableState.inStates(TableState.State.ENABLED);
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isTableDisabled(TableName tableName) throws IOException {
|
public boolean isTableDisabled(TableName tableName) throws IOException {
|
||||||
checkTableExists(tableName);
|
checkTableExists(tableName);
|
||||||
return this.connection.getTableState(tableName).isDisabled();
|
return connection.isTableDisabled(tableName);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -4350,4 +4357,5 @@ public class HBaseAdmin implements Admin {
|
||||||
});
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Copyright The Apache Software Foundation
|
* Copyright The Apache Software Foundation
|
||||||
*
|
*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
@ -20,12 +20,9 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.client;
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import java.io.Closeable;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A KeepAlive connection is not physically closed immediately after the close,
|
* A KeepAlive connection is not physically closed immediately after the close,
|
||||||
* but rather kept alive for a few minutes. It makes sense only if it is shared.
|
* but rather kept alive for a few minutes. It makes sense only if it is shared.
|
||||||
|
@ -38,8 +35,7 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
* final user code. Hence it's package protected.
|
* final user code. Hence it's package protected.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
interface MasterKeepAliveConnection extends
|
interface MasterKeepAliveConnection extends MasterProtos.MasterService.BlockingInterface {
|
||||||
MasterProtos.MasterService.BlockingInterface, Closeable {
|
|
||||||
// Do this instead of implement Closeable because closeable returning IOE is PITA.
|
// Do this instead of implement Closeable because closeable returning IOE is PITA.
|
||||||
void close();
|
void close();
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest;
|
||||||
import org.apache.hadoop.hbase.security.access.Permission;
|
import org.apache.hadoop.hbase.security.access.Permission;
|
||||||
import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
|
import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
|
||||||
import org.apache.hadoop.hbase.security.access.UserPermission;
|
import org.apache.hadoop.hbase.security.access.UserPermission;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
||||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
|
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
|
||||||
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
||||||
|
@ -192,8 +191,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDe
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
|
||||||
|
@ -666,25 +663,42 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CompletableFuture<Boolean> isTableEnabled(TableName tableName) {
|
public CompletableFuture<Boolean> isTableEnabled(TableName tableName) {
|
||||||
return isTableState(tableName, TableState.State.ENABLED);
|
if (TableName.isMetaTableName(tableName)) {
|
||||||
|
return CompletableFuture.completedFuture(true);
|
||||||
|
}
|
||||||
|
CompletableFuture<Boolean> future = new CompletableFuture<>();
|
||||||
|
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (state, error) -> {
|
||||||
|
if (error != null) {
|
||||||
|
future.completeExceptionally(error);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (state.isPresent()) {
|
||||||
|
future.complete(state.get().inStates(TableState.State.ENABLED));
|
||||||
|
} else {
|
||||||
|
future.completeExceptionally(new TableNotFoundException(tableName));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return future;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CompletableFuture<Boolean> isTableDisabled(TableName tableName) {
|
public CompletableFuture<Boolean> isTableDisabled(TableName tableName) {
|
||||||
return isTableState(tableName, TableState.State.DISABLED);
|
if (TableName.isMetaTableName(tableName)) {
|
||||||
|
return CompletableFuture.completedFuture(false);
|
||||||
}
|
}
|
||||||
|
CompletableFuture<Boolean> future = new CompletableFuture<>();
|
||||||
/**
|
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (state, error) -> {
|
||||||
* @return Future that calls Master getTableState and compares to <code>state</code>
|
if (error != null) {
|
||||||
*/
|
future.completeExceptionally(error);
|
||||||
private CompletableFuture<Boolean> isTableState(TableName tableName, TableState.State state) {
|
return;
|
||||||
return this.<Boolean> newMasterCaller().
|
}
|
||||||
action((controller, stub) ->
|
if (state.isPresent()) {
|
||||||
this.<GetTableStateRequest, GetTableStateResponse, Boolean> call(controller, stub,
|
future.complete(state.get().inStates(TableState.State.DISABLED));
|
||||||
GetTableStateRequest.newBuilder().
|
} else {
|
||||||
setTableName(ProtobufUtil.toProtoTableName(tableName)).build(),
|
future.completeExceptionally(new TableNotFoundException(tableName));
|
||||||
(s, c, req, done) -> s.getTableState(c, req, done),
|
}
|
||||||
resp -> resp.getTableState().getState().toString().equals(state.toString()))).call();
|
});
|
||||||
|
return future;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -158,8 +158,7 @@ class ZKAsyncRegistry implements AsyncRegistry {
|
||||||
}
|
}
|
||||||
Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
|
Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
|
||||||
if (stateAndServerName.getFirst() != RegionState.State.OPEN) {
|
if (stateAndServerName.getFirst() != RegionState.State.OPEN) {
|
||||||
LOG.warn("hbase:meta region (replicaId={}) is in state {}", replicaId,
|
LOG.warn("Meta region is in state " + stateAndServerName.getFirst());
|
||||||
stateAndServerName.getFirst());
|
|
||||||
}
|
}
|
||||||
locs[DEFAULT_REPLICA_ID] = new HRegionLocation(
|
locs[DEFAULT_REPLICA_ID] = new HRegionLocation(
|
||||||
getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond());
|
getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond());
|
||||||
|
@ -174,7 +173,7 @@ class ZKAsyncRegistry implements AsyncRegistry {
|
||||||
LOG.warn("Failed to fetch " + path, error);
|
LOG.warn("Failed to fetch " + path, error);
|
||||||
locs[replicaId] = null;
|
locs[replicaId] = null;
|
||||||
} else if (proto == null) {
|
} else if (proto == null) {
|
||||||
LOG.warn("hbase:meta znode for replica " + replicaId + " is null");
|
LOG.warn("Meta znode for replica " + replicaId + " is null");
|
||||||
locs[replicaId] = null;
|
locs[replicaId] = null;
|
||||||
} else {
|
} else {
|
||||||
Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
|
Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
|
||||||
|
@ -198,8 +197,9 @@ class ZKAsyncRegistry implements AsyncRegistry {
|
||||||
public CompletableFuture<RegionLocations> getMetaRegionLocation() {
|
public CompletableFuture<RegionLocations> getMetaRegionLocation() {
|
||||||
CompletableFuture<RegionLocations> future = new CompletableFuture<>();
|
CompletableFuture<RegionLocations> future = new CompletableFuture<>();
|
||||||
addListener(
|
addListener(
|
||||||
zk.list(znodePaths.baseZNode).thenApply(children -> children.stream().
|
zk.list(znodePaths.baseZNode)
|
||||||
filter(c -> znodePaths.isMetaZNodePrefix(c)).collect(Collectors.toList())),
|
.thenApply(children -> children.stream()
|
||||||
|
.filter(c -> c.startsWith(znodePaths.metaZNodePrefix)).collect(Collectors.toList())),
|
||||||
(metaReplicaZNodes, error) -> {
|
(metaReplicaZNodes, error) -> {
|
||||||
if (error != null) {
|
if (error != null) {
|
||||||
future.completeExceptionally(error);
|
future.completeExceptionally(error);
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -24,7 +24,6 @@ import static org.apache.hadoop.hbase.HConstants.SPLIT_LOGDIR_NAME;
|
||||||
import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
|
import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
|
||||||
import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
|
import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
|
||||||
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.stream.IntStream;
|
import java.util.stream.IntStream;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -41,24 +40,15 @@ public class ZNodePaths {
|
||||||
// TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved.
|
// TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved.
|
||||||
public static final char ZNODE_PATH_SEPARATOR = '/';
|
public static final char ZNODE_PATH_SEPARATOR = '/';
|
||||||
|
|
||||||
private static final String META_ZNODE_PREFIX = "meta-region-server";
|
public final static String META_ZNODE_PREFIX = "meta-region-server";
|
||||||
private static final String DEFAULT_SNAPSHOT_CLEANUP_ZNODE = "snapshot-cleanup";
|
private static final String DEFAULT_SNAPSHOT_CLEANUP_ZNODE = "snapshot-cleanup";
|
||||||
|
|
||||||
// base znode for this cluster
|
// base znode for this cluster
|
||||||
public final String baseZNode;
|
public final String baseZNode;
|
||||||
|
// the prefix of meta znode, does not include baseZNode.
|
||||||
/**
|
public final String metaZNodePrefix;
|
||||||
* The prefix of meta znode. Does not include baseZNode.
|
// znodes containing the locations of the servers hosting the meta replicas
|
||||||
* Its a 'prefix' because meta replica id integer can be tagged on the end (if
|
public final ImmutableMap<Integer, String> metaReplicaZNodes;
|
||||||
* no number present, it is 'default' replica).
|
|
||||||
*/
|
|
||||||
private final String metaZNodePrefix;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* znodes containing the locations of the servers hosting the meta replicas
|
|
||||||
*/
|
|
||||||
private final ImmutableMap<Integer, String> metaReplicaZNodes;
|
|
||||||
|
|
||||||
// znode containing ephemeral nodes of the regionservers
|
// znode containing ephemeral nodes of the regionservers
|
||||||
public final String rsZNode;
|
public final String rsZNode;
|
||||||
// znode containing ephemeral nodes of the draining regionservers
|
// znode containing ephemeral nodes of the draining regionservers
|
||||||
|
@ -168,21 +158,21 @@ public class ZNodePaths {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return true if the znode is a meta region replica
|
* Is the znode of any meta replica
|
||||||
|
* @param node
|
||||||
|
* @return true or false
|
||||||
*/
|
*/
|
||||||
public boolean isAnyMetaReplicaZNode(String node) {
|
public boolean isAnyMetaReplicaZNode(String node) {
|
||||||
return this.metaReplicaZNodes.containsValue(node);
|
if (metaReplicaZNodes.containsValue(node)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return Meta Replica ZNodes
|
* Get the znode string corresponding to a replicaId
|
||||||
*/
|
* @param replicaId
|
||||||
public Collection<String> getMetaReplicaZNodes() {
|
* @return znode
|
||||||
return this.metaReplicaZNodes.values();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return the znode string corresponding to a replicaId
|
|
||||||
*/
|
*/
|
||||||
public String getZNodeForReplica(int replicaId) {
|
public String getZNodeForReplica(int replicaId) {
|
||||||
// return a newly created path but don't update the cache of paths
|
// return a newly created path but don't update the cache of paths
|
||||||
|
@ -193,21 +183,24 @@ public class ZNodePaths {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse the meta replicaId from the passed znode name.
|
* Parse the meta replicaId from the passed znode
|
||||||
* @param znode the name of the znode, does not include baseZNode
|
* @param znode the name of the znode, does not include baseZNode
|
||||||
* @return replicaId
|
* @return replicaId
|
||||||
*/
|
*/
|
||||||
public int getMetaReplicaIdFromZnode(String znode) {
|
public int getMetaReplicaIdFromZnode(String znode) {
|
||||||
return znode.equals(metaZNodePrefix)?
|
if (znode.equals(metaZNodePrefix)) {
|
||||||
RegionInfo.DEFAULT_REPLICA_ID:
|
return RegionInfo.DEFAULT_REPLICA_ID;
|
||||||
Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1));
|
}
|
||||||
|
return Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return True if meta znode.
|
* Is it the default meta replica's znode
|
||||||
|
* @param znode the name of the znode, does not include baseZNode
|
||||||
|
* @return true or false
|
||||||
*/
|
*/
|
||||||
public boolean isMetaZNodePrefix(String znode) {
|
public boolean isDefaultMetaReplicaZnode(String znode) {
|
||||||
return znode != null && znode.startsWith(this.metaZNodePrefix);
|
return metaReplicaZNodes.get(DEFAULT_REPLICA_ID).equals(znode);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1211,6 +1211,7 @@ public final class HConstants {
|
||||||
/** Directories that are not HBase user table directories */
|
/** Directories that are not HBase user table directories */
|
||||||
public static final List<String> HBASE_NON_USER_TABLE_DIRS =
|
public static final List<String> HBASE_NON_USER_TABLE_DIRS =
|
||||||
Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll(
|
Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll(
|
||||||
|
new String[] { TableName.META_TABLE_NAME.getNameAsString() },
|
||||||
HBASE_NON_TABLE_DIRS.toArray())));
|
HBASE_NON_TABLE_DIRS.toArray())));
|
||||||
|
|
||||||
/** Health script related settings. */
|
/** Health script related settings. */
|
||||||
|
|
|
@ -25,19 +25,25 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get, remove and modify table descriptors.
|
* Get, remove and modify table descriptors.
|
||||||
|
* Used by servers to host descriptors.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public interface TableDescriptors {
|
public interface TableDescriptors {
|
||||||
/**
|
/**
|
||||||
|
* @param tableName
|
||||||
* @return TableDescriptor for tablename
|
* @return TableDescriptor for tablename
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
TableDescriptor get(final TableName tableName) throws IOException;
|
TableDescriptor get(final TableName tableName)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get Map of all NamespaceDescriptors for a given namespace.
|
* Get Map of all NamespaceDescriptors for a given namespace.
|
||||||
* @return Map of all descriptors.
|
* @return Map of all descriptors.
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
Map<String, TableDescriptor> getByNamespace(String name) throws IOException;
|
Map<String, TableDescriptor> getByNamespace(String name)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get Map of all TableDescriptors. Populates the descriptor cache as a
|
* Get Map of all TableDescriptors. Populates the descriptor cache as a
|
||||||
|
@ -45,19 +51,25 @@ public interface TableDescriptors {
|
||||||
* Notice: the key of map is the table name which contains namespace. It was generated by
|
* Notice: the key of map is the table name which contains namespace. It was generated by
|
||||||
* {@link TableName#getNameWithNamespaceInclAsString()}.
|
* {@link TableName#getNameWithNamespaceInclAsString()}.
|
||||||
* @return Map of all descriptors.
|
* @return Map of all descriptors.
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
Map<String, TableDescriptor> getAll() throws IOException;
|
Map<String, TableDescriptor> getAll() throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add or update descriptor
|
* Add or update descriptor
|
||||||
* @param htd Descriptor to set into TableDescriptors
|
* @param htd Descriptor to set into TableDescriptors
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
void add(final TableDescriptor htd) throws IOException;
|
void add(final TableDescriptor htd)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* @param tablename
|
||||||
* @return Instance of table descriptor or null if none found.
|
* @return Instance of table descriptor or null if none found.
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
TableDescriptor remove(final TableName tablename) throws IOException;
|
TableDescriptor remove(final TableName tablename)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enables the tabledescriptor cache
|
* Enables the tabledescriptor cache
|
||||||
|
|
|
@ -49,6 +49,7 @@ import java.util.concurrent.Future;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.TimeoutException;
|
import java.util.concurrent.TimeoutException;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
import java.util.function.Function;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
import javax.servlet.ServletException;
|
import javax.servlet.ServletException;
|
||||||
|
@ -661,6 +662,10 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
return connector.getLocalPort();
|
return connector.getLocalPort();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Function<TableDescriptorBuilder, TableDescriptorBuilder> getMetaTableObserver() {
|
||||||
|
return builder -> builder.setRegionReplication(conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* For compatibility, if failed with regionserver credentials, try the master one
|
* For compatibility, if failed with regionserver credentials, try the master one
|
||||||
*/
|
*/
|
||||||
|
@ -1012,7 +1017,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
RegionState rs = this.assignmentManager.getRegionStates().
|
RegionState rs = this.assignmentManager.getRegionStates().
|
||||||
getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO);
|
getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO);
|
||||||
LOG.info("hbase:meta {}", rs);
|
LOG.info("hbase:meta {}", rs);
|
||||||
if (rs != null && rs.isOffline()) {
|
if (rs.isOffline()) {
|
||||||
Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream()
|
Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream()
|
||||||
.filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny();
|
.filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny();
|
||||||
initMetaProc = optProc.orElseGet(() -> {
|
initMetaProc = optProc.orElseGet(() -> {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
import org.apache.hadoop.hbase.client.TableState;
|
import org.apache.hadoop.hbase.client.TableState;
|
||||||
|
import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
|
||||||
import org.apache.hadoop.hbase.util.IdReadWriteLock;
|
import org.apache.hadoop.hbase.util.IdReadWriteLock;
|
||||||
import org.apache.hadoop.hbase.util.ZKDataMigrator;
|
import org.apache.hadoop.hbase.util.ZKDataMigrator;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||||
|
@ -52,20 +53,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
|
||||||
// TODO: Make this a guava Service
|
// TODO: Make this a guava Service
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class TableStateManager {
|
public class TableStateManager {
|
||||||
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(TableStateManager.class);
|
private static final Logger LOG = LoggerFactory.getLogger(TableStateManager.class);
|
||||||
|
|
||||||
/**
|
|
||||||
* All table state is kept in hbase:meta except that of hbase:meta itself.
|
|
||||||
* hbase:meta state is kept here locally in this in-memory variable. State
|
|
||||||
* for hbase:meta is not persistent. If this process dies, the hbase:meta
|
|
||||||
* state reverts to enabled. State is used so we can edit hbase:meta as we
|
|
||||||
* would any other table by disabling, altering, and then re-enabling. If this
|
|
||||||
* process dies in the midst of an edit, the table reverts to enabled. Schema
|
|
||||||
* is read from the filesystem. It is changed atomically so if we die midway
|
|
||||||
* through an edit we should be good.
|
|
||||||
*/
|
|
||||||
private TableState.State metaTableState = TableState.State.ENABLED;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set this key to false in Configuration to disable migrating table state from zookeeper so
|
* Set this key to false in Configuration to disable migrating table state from zookeeper so
|
||||||
* hbase:meta table.
|
* hbase:meta table.
|
||||||
|
@ -79,7 +68,7 @@ public class TableStateManager {
|
||||||
private final ConcurrentMap<TableName, TableState.State> tableName2State =
|
private final ConcurrentMap<TableName, TableState.State> tableName2State =
|
||||||
new ConcurrentHashMap<>();
|
new ConcurrentHashMap<>();
|
||||||
|
|
||||||
TableStateManager(MasterServices master) {
|
public TableStateManager(MasterServices master) {
|
||||||
this.master = master;
|
this.master = master;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,6 +87,61 @@ public class TableStateManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set table state to provided but only if table in specified states Caller should lock table on
|
||||||
|
* write.
|
||||||
|
* @param tableName table to change state for
|
||||||
|
* @param newState new state
|
||||||
|
* @param states states to check against
|
||||||
|
* @return null if succeed or table state if failed
|
||||||
|
*/
|
||||||
|
public TableState setTableStateIfInStates(TableName tableName, TableState.State newState,
|
||||||
|
TableState.State... states) throws IOException {
|
||||||
|
ReadWriteLock lock = tnLock.getLock(tableName);
|
||||||
|
lock.writeLock().lock();
|
||||||
|
try {
|
||||||
|
TableState currentState = readMetaState(tableName);
|
||||||
|
if (currentState == null) {
|
||||||
|
throw new TableNotFoundException(tableName);
|
||||||
|
}
|
||||||
|
if (currentState.inStates(states)) {
|
||||||
|
updateMetaState(tableName, newState);
|
||||||
|
return null;
|
||||||
|
} else {
|
||||||
|
return currentState;
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
lock.writeLock().unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set table state to provided but only if table not in specified states Caller should lock table
|
||||||
|
* on write.
|
||||||
|
* @param tableName table to change state for
|
||||||
|
* @param newState new state
|
||||||
|
* @param states states to check against
|
||||||
|
*/
|
||||||
|
public boolean setTableStateIfNotInStates(TableName tableName, TableState.State newState,
|
||||||
|
TableState.State... states) throws IOException {
|
||||||
|
ReadWriteLock lock = tnLock.getLock(tableName);
|
||||||
|
lock.writeLock().lock();
|
||||||
|
try {
|
||||||
|
TableState currentState = readMetaState(tableName);
|
||||||
|
if (currentState == null) {
|
||||||
|
throw new TableNotFoundException(tableName);
|
||||||
|
}
|
||||||
|
if (!currentState.inStates(states)) {
|
||||||
|
updateMetaState(tableName, newState);
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
lock.writeLock().unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public boolean isTableState(TableName tableName, TableState.State... states) {
|
public boolean isTableState(TableName tableName, TableState.State... states) {
|
||||||
try {
|
try {
|
||||||
TableState tableState = getTableState(tableName);
|
TableState tableState = getTableState(tableName);
|
||||||
|
@ -111,7 +155,6 @@ public class TableStateManager {
|
||||||
|
|
||||||
public void setDeletedTable(TableName tableName) throws IOException {
|
public void setDeletedTable(TableName tableName) throws IOException {
|
||||||
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
||||||
// Can't delete the hbase:meta table.
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ReadWriteLock lock = tnLock.getLock(tableName);
|
ReadWriteLock lock = tnLock.getLock(tableName);
|
||||||
|
@ -140,7 +183,7 @@ public class TableStateManager {
|
||||||
* @param states filter by states
|
* @param states filter by states
|
||||||
* @return tables in given states
|
* @return tables in given states
|
||||||
*/
|
*/
|
||||||
Set<TableName> getTablesInStates(TableState.State... states) throws IOException {
|
public Set<TableName> getTablesInStates(TableState.State... states) throws IOException {
|
||||||
// Only be called in region normalizer, will not use cache.
|
// Only be called in region normalizer, will not use cache.
|
||||||
final Set<TableName> rv = Sets.newHashSet();
|
final Set<TableName> rv = Sets.newHashSet();
|
||||||
MetaTableAccessor.fullScanTables(master.getConnection(), new MetaTableAccessor.Visitor() {
|
MetaTableAccessor.fullScanTables(master.getConnection(), new MetaTableAccessor.Visitor() {
|
||||||
|
@ -156,6 +199,12 @@ public class TableStateManager {
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static class TableStateNotFoundException extends TableNotFoundException {
|
||||||
|
TableStateNotFoundException(TableName tableName) {
|
||||||
|
super(tableName.getNameAsString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@NonNull
|
@NonNull
|
||||||
public TableState getTableState(TableName tableName) throws IOException {
|
public TableState getTableState(TableName tableName) throws IOException {
|
||||||
ReadWriteLock lock = tnLock.getLock(tableName);
|
ReadWriteLock lock = tnLock.getLock(tableName);
|
||||||
|
@ -163,7 +212,7 @@ public class TableStateManager {
|
||||||
try {
|
try {
|
||||||
TableState currentState = readMetaState(tableName);
|
TableState currentState = readMetaState(tableName);
|
||||||
if (currentState == null) {
|
if (currentState == null) {
|
||||||
throw new TableNotFoundException("No state found for " + tableName);
|
throw new TableStateNotFoundException(tableName);
|
||||||
}
|
}
|
||||||
return currentState;
|
return currentState;
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -172,18 +221,22 @@ public class TableStateManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void updateMetaState(TableName tableName, TableState.State newState) throws IOException {
|
private void updateMetaState(TableName tableName, TableState.State newState) throws IOException {
|
||||||
|
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
||||||
|
if (TableState.State.DISABLING.equals(newState) ||
|
||||||
|
TableState.State.DISABLED.equals(newState)) {
|
||||||
|
throw new IllegalArgumentIOException("Cannot disable the meta table; " + newState);
|
||||||
|
}
|
||||||
|
// Otherwise, just return; no need to set ENABLED on meta -- it is always ENABLED.
|
||||||
|
return;
|
||||||
|
}
|
||||||
boolean succ = false;
|
boolean succ = false;
|
||||||
try {
|
try {
|
||||||
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
|
||||||
this.metaTableState = newState;
|
|
||||||
} else {
|
|
||||||
MetaTableAccessor.updateTableState(master.getConnection(), tableName, newState);
|
MetaTableAccessor.updateTableState(master.getConnection(), tableName, newState);
|
||||||
}
|
tableName2State.put(tableName, newState);
|
||||||
this.tableName2State.put(tableName, newState);
|
|
||||||
succ = true;
|
succ = true;
|
||||||
} finally {
|
} finally {
|
||||||
if (!succ) {
|
if (!succ) {
|
||||||
this.tableName2State.remove(tableName);
|
tableName2State.remove(tableName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
metaStateUpdated(tableName, newState);
|
metaStateUpdated(tableName, newState);
|
||||||
|
@ -202,9 +255,7 @@ public class TableStateManager {
|
||||||
if (state != null) {
|
if (state != null) {
|
||||||
return new TableState(tableName, state);
|
return new TableState(tableName, state);
|
||||||
}
|
}
|
||||||
TableState tableState = tableName.equals(TableName.META_TABLE_NAME)?
|
TableState tableState = MetaTableAccessor.getTableState(master.getConnection(), tableName);
|
||||||
new TableState(TableName.META_TABLE_NAME, this.metaTableState):
|
|
||||||
MetaTableAccessor.getTableState(master.getConnection(), tableName);
|
|
||||||
if (tableState != null) {
|
if (tableState != null) {
|
||||||
tableName2State.putIfAbsent(tableName, tableState.getState());
|
tableName2State.putIfAbsent(tableName, tableState.getState());
|
||||||
}
|
}
|
||||||
|
@ -212,8 +263,10 @@ public class TableStateManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void start() throws IOException {
|
public void start() throws IOException {
|
||||||
|
TableDescriptors tableDescriptors = master.getTableDescriptors();
|
||||||
migrateZooKeeper();
|
migrateZooKeeper();
|
||||||
fixTableStates(master.getTableDescriptors(), master.getConnection());
|
Connection connection = master.getConnection();
|
||||||
|
fixTableStates(tableDescriptors, connection);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
|
private void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
|
||||||
|
@ -282,7 +335,7 @@ public class TableStateManager {
|
||||||
TableState ts = null;
|
TableState ts = null;
|
||||||
try {
|
try {
|
||||||
ts = getTableState(entry.getKey());
|
ts = getTableState(entry.getKey());
|
||||||
} catch (TableNotFoundException e) {
|
} catch (TableStateNotFoundException e) {
|
||||||
// This can happen; table exists but no TableState.
|
// This can happen; table exists but no TableState.
|
||||||
}
|
}
|
||||||
if (ts == null) {
|
if (ts == null) {
|
||||||
|
|
|
@ -147,7 +147,8 @@ public class RegionStateStore {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void updateRegionLocation(RegionStateNode regionStateNode) throws IOException {
|
public void updateRegionLocation(RegionStateNode regionStateNode)
|
||||||
|
throws IOException {
|
||||||
if (regionStateNode.getRegionInfo().isMetaRegion()) {
|
if (regionStateNode.getRegionInfo().isMetaRegion()) {
|
||||||
updateMetaLocation(regionStateNode.getRegionInfo(), regionStateNode.getRegionLocation(),
|
updateMetaLocation(regionStateNode.getRegionInfo(), regionStateNode.getRegionLocation(),
|
||||||
regionStateNode.getState());
|
regionStateNode.getState());
|
||||||
|
|
|
@ -78,7 +78,9 @@ public class CreateTableProcedure
|
||||||
@Override
|
@Override
|
||||||
protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableState state)
|
protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableState state)
|
||||||
throws InterruptedException {
|
throws InterruptedException {
|
||||||
LOG.info("{} execute state={}", this, state);
|
if (LOG.isTraceEnabled()) {
|
||||||
|
LOG.trace(this + " execute state=" + state);
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case CREATE_TABLE_PRE_OPERATION:
|
case CREATE_TABLE_PRE_OPERATION:
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
import org.apache.hadoop.hbase.client.BufferedMutator;
|
import org.apache.hadoop.hbase.client.BufferedMutator;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.apache.hadoop.hbase.client.TableState;
|
import org.apache.hadoop.hbase.client.TableState;
|
||||||
|
import org.apache.hadoop.hbase.constraint.ConstraintException;
|
||||||
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
|
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
|
||||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||||
import org.apache.hadoop.hbase.master.TableStateManager;
|
import org.apache.hadoop.hbase.master.TableStateManager;
|
||||||
|
@ -108,8 +109,8 @@ public class DisableTableProcedure
|
||||||
setNextState(DisableTableState.DISABLE_TABLE_ADD_REPLICATION_BARRIER);
|
setNextState(DisableTableState.DISABLE_TABLE_ADD_REPLICATION_BARRIER);
|
||||||
break;
|
break;
|
||||||
case DISABLE_TABLE_ADD_REPLICATION_BARRIER:
|
case DISABLE_TABLE_ADD_REPLICATION_BARRIER:
|
||||||
if (env.getMasterServices().getTableDescriptors().get(tableName).
|
if (env.getMasterServices().getTableDescriptors().get(tableName)
|
||||||
hasGlobalReplicationScope()) {
|
.hasGlobalReplicationScope()) {
|
||||||
MasterFileSystem fs = env.getMasterFileSystem();
|
MasterFileSystem fs = env.getMasterFileSystem();
|
||||||
try (BufferedMutator mutator = env.getMasterServices().getConnection()
|
try (BufferedMutator mutator = env.getMasterServices().getConnection()
|
||||||
.getBufferedMutator(TableName.META_TABLE_NAME)) {
|
.getBufferedMutator(TableName.META_TABLE_NAME)) {
|
||||||
|
@ -241,7 +242,10 @@ public class DisableTableProcedure
|
||||||
*/
|
*/
|
||||||
private boolean prepareDisable(final MasterProcedureEnv env) throws IOException {
|
private boolean prepareDisable(final MasterProcedureEnv env) throws IOException {
|
||||||
boolean canTableBeDisabled = true;
|
boolean canTableBeDisabled = true;
|
||||||
if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
|
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
||||||
|
setFailure("master-disable-table", new ConstraintException("Cannot disable catalog table"));
|
||||||
|
canTableBeDisabled = false;
|
||||||
|
} else if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
|
||||||
setFailure("master-disable-table", new TableNotFoundException(tableName));
|
setFailure("master-disable-table", new TableNotFoundException(tableName));
|
||||||
canTableBeDisabled = false;
|
canTableBeDisabled = false;
|
||||||
} else if (!skipTableStateCheck) {
|
} else if (!skipTableStateCheck) {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -22,17 +22,16 @@ import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionLocation;
|
|
||||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
|
||||||
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
import org.apache.hadoop.hbase.client.TableState;
|
import org.apache.hadoop.hbase.client.TableState;
|
||||||
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
|
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
|
||||||
|
@ -58,7 +57,6 @@ public class EnableTableProcedure
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor
|
* Constructor
|
||||||
*
|
|
||||||
* @param env MasterProcedureEnv
|
* @param env MasterProcedureEnv
|
||||||
* @param tableName the table to operate on
|
* @param tableName the table to operate on
|
||||||
*/
|
*/
|
||||||
|
@ -68,7 +66,6 @@ public class EnableTableProcedure
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor
|
* Constructor
|
||||||
*
|
|
||||||
* @param env MasterProcedureEnv
|
* @param env MasterProcedureEnv
|
||||||
* @param tableName the table to operate on
|
* @param tableName the table to operate on
|
||||||
*/
|
*/
|
||||||
|
@ -102,55 +99,66 @@ public class EnableTableProcedure
|
||||||
setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE);
|
setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE);
|
||||||
break;
|
break;
|
||||||
case ENABLE_TABLE_MARK_REGIONS_ONLINE:
|
case ENABLE_TABLE_MARK_REGIONS_ONLINE:
|
||||||
// Get the region replica count. If changed since disable, need to do
|
Connection connection = env.getMasterServices().getConnection();
|
||||||
// more work assigning.
|
// we will need to get the tableDescriptor here to see if there is a change in the replica
|
||||||
ClusterConnection connection = env.getMasterServices().getClusterConnection();
|
// count
|
||||||
TableDescriptor tableDescriptor =
|
TableDescriptor hTableDescriptor =
|
||||||
env.getMasterServices().getTableDescriptors().get(tableName);
|
env.getMasterServices().getTableDescriptors().get(tableName);
|
||||||
int configuredReplicaCount = tableDescriptor.getRegionReplication();
|
|
||||||
// Get regions for the table from memory; get both online and offline regions ('true').
|
// Get the replica count
|
||||||
|
int regionReplicaCount = hTableDescriptor.getRegionReplication();
|
||||||
|
|
||||||
|
// Get the regions for the table from memory; get both online and offline regions
|
||||||
|
// ('true').
|
||||||
List<RegionInfo> regionsOfTable =
|
List<RegionInfo> regionsOfTable =
|
||||||
env.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName, true);
|
env.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName, true);
|
||||||
|
|
||||||
// How many replicas do we currently have? Check regions returned from
|
int currentMaxReplica = 0;
|
||||||
// in-memory state.
|
// Check if the regions in memory have replica regions as marked in META table
|
||||||
int currentMaxReplica = getMaxReplicaId(regionsOfTable);
|
for (RegionInfo regionInfo : regionsOfTable) {
|
||||||
|
if (regionInfo.getReplicaId() > currentMaxReplica) {
|
||||||
// Read the META table to know the number of replicas the table currently has.
|
// Iterating through all the list to identify the highest replicaID region.
|
||||||
// If there was a table modification on region replica count then need to
|
// We can stop after checking with the first set of regions??
|
||||||
// adjust replica counts here.
|
currentMaxReplica = regionInfo.getReplicaId();
|
||||||
int replicasFound = TableName.isMetaTableName(this.tableName)?
|
|
||||||
getReplicaCountForMetaTable(connection):
|
|
||||||
getReplicaCountInMetaTable(connection, configuredReplicaCount, regionsOfTable);
|
|
||||||
LOG.info("replicasFound={} (configuredReplicaCount={} for {}", replicasFound,
|
|
||||||
configuredReplicaCount, tableName.getNameAsString());
|
|
||||||
if (currentMaxReplica == (configuredReplicaCount - 1)) {
|
|
||||||
if (LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("No change in number of region replicas (configuredReplicaCount={});"
|
|
||||||
+ " assigning.", configuredReplicaCount);
|
|
||||||
}
|
}
|
||||||
} else if (currentMaxReplica > (configuredReplicaCount - 1)) {
|
}
|
||||||
// We have additional regions as the replica count has been decreased. Delete
|
|
||||||
|
// read the META table to know the actual number of replicas for the table - if there
|
||||||
|
// was a table modification on region replica then this will reflect the new entries also
|
||||||
|
int replicasFound =
|
||||||
|
getNumberOfReplicasFromMeta(connection, regionReplicaCount, regionsOfTable);
|
||||||
|
assert regionReplicaCount - 1 == replicasFound;
|
||||||
|
LOG.info(replicasFound + " META entries added for the given regionReplicaCount "
|
||||||
|
+ regionReplicaCount + " for the table " + tableName.getNameAsString());
|
||||||
|
if (currentMaxReplica == (regionReplicaCount - 1)) {
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("There is no change to the number of region replicas."
|
||||||
|
+ " Assigning the available regions." + " Current and previous"
|
||||||
|
+ "replica count is " + regionReplicaCount);
|
||||||
|
}
|
||||||
|
} else if (currentMaxReplica > (regionReplicaCount - 1)) {
|
||||||
|
// we have additional regions as the replica count has been decreased. Delete
|
||||||
// those regions because already the table is in the unassigned state
|
// those regions because already the table is in the unassigned state
|
||||||
LOG.info("The number of replicas " + (currentMaxReplica + 1)
|
LOG.info("The number of replicas " + (currentMaxReplica + 1)
|
||||||
+ " is more than the region replica count " + configuredReplicaCount);
|
+ " is more than the region replica count " + regionReplicaCount);
|
||||||
List<RegionInfo> copyOfRegions = new ArrayList<>(regionsOfTable);
|
List<RegionInfo> copyOfRegions = new ArrayList<RegionInfo>(regionsOfTable);
|
||||||
for (RegionInfo regionInfo : copyOfRegions) {
|
for (RegionInfo regionInfo : copyOfRegions) {
|
||||||
if (regionInfo.getReplicaId() > (configuredReplicaCount - 1)) {
|
if (regionInfo.getReplicaId() > (regionReplicaCount - 1)) {
|
||||||
// delete the region from the regionStates
|
// delete the region from the regionStates
|
||||||
env.getAssignmentManager().getRegionStates().deleteRegion(regionInfo);
|
env.getAssignmentManager().getRegionStates().deleteRegion(regionInfo);
|
||||||
// remove it from the list of regions of the table
|
// remove it from the list of regions of the table
|
||||||
LOG.info("Removed replica={} of {}", regionInfo.getRegionId(), regionInfo);
|
LOG.info("The regioninfo being removed is " + regionInfo + " "
|
||||||
|
+ regionInfo.getReplicaId());
|
||||||
regionsOfTable.remove(regionInfo);
|
regionsOfTable.remove(regionInfo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// the replicasFound is less than the regionReplication
|
// the replicasFound is less than the regionReplication
|
||||||
LOG.info("Number of replicas has increased. Assigning new region replicas." +
|
LOG.info("The number of replicas has been changed(increased)."
|
||||||
"The previous replica count was {}. The current replica count is {}.",
|
+ " Lets assign the new region replicas. The previous replica count was "
|
||||||
(currentMaxReplica + 1), configuredReplicaCount);
|
+ (currentMaxReplica + 1) + ". The current replica count is " + regionReplicaCount);
|
||||||
regionsOfTable = RegionReplicaUtil.addReplicas(tableDescriptor, regionsOfTable,
|
regionsOfTable = RegionReplicaUtil.addReplicas(hTableDescriptor, regionsOfTable,
|
||||||
currentMaxReplica + 1, configuredReplicaCount);
|
currentMaxReplica + 1, regionReplicaCount);
|
||||||
}
|
}
|
||||||
// Assign all the table regions. (including region replicas if added).
|
// Assign all the table regions. (including region replicas if added).
|
||||||
// createAssignProcedure will try to retain old assignments if possible.
|
// createAssignProcedure will try to retain old assignments if possible.
|
||||||
|
@ -178,31 +186,9 @@ public class EnableTableProcedure
|
||||||
return Flow.HAS_MORE_STATE;
|
return Flow.HAS_MORE_STATE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
private int getNumberOfReplicasFromMeta(Connection connection, int regionReplicaCount,
|
||||||
* @return Count of hbase:meta replicas.
|
|
||||||
*/
|
|
||||||
private static int getReplicaCountForMetaTable(ClusterConnection connection) throws IOException {
|
|
||||||
// It is hard to get at the meta replicas. Do this ugly stuff for now.
|
|
||||||
// In getRegionLocator, when meta, it uses the Registry. Default implementation
|
|
||||||
// is zk-based registry. It reads znodes that host hbase:meta region replicas.
|
|
||||||
RegionLocator rl = connection.getRegionLocator(TableName.META_TABLE_NAME);
|
|
||||||
if (rl != null) {
|
|
||||||
// Look at first region only.
|
|
||||||
List<HRegionLocation> rls = rl.getRegionLocations(HConstants.EMPTY_START_ROW);
|
|
||||||
if (rls != null) {
|
|
||||||
return rls.size();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If can't find count, return 0.
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return Count of replicas found reading hbase:meta table row.
|
|
||||||
*/
|
|
||||||
private int getReplicaCountInMetaTable(Connection connection, int regionReplicaCount,
|
|
||||||
List<RegionInfo> regionsOfTable) throws IOException {
|
List<RegionInfo> regionsOfTable) throws IOException {
|
||||||
Result r = MetaTableAccessor.getCatalogFamilyRow(connection, regionsOfTable.get(0));
|
Result r = getRegionFromMeta(connection, regionsOfTable);
|
||||||
int replicasFound = 0;
|
int replicasFound = 0;
|
||||||
for (int i = 1; i < regionReplicaCount; i++) {
|
for (int i = 1; i < regionReplicaCount; i++) {
|
||||||
// Since we have already added the entries to the META we will be getting only that here
|
// Since we have already added the entries to the META we will be getting only that here
|
||||||
|
@ -215,6 +201,16 @@ public class EnableTableProcedure
|
||||||
return replicasFound;
|
return replicasFound;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Result getRegionFromMeta(Connection connection, List<RegionInfo> regionsOfTable)
|
||||||
|
throws IOException {
|
||||||
|
byte[] metaKeyForRegion = MetaTableAccessor.getMetaKeyForRegion(regionsOfTable.get(0));
|
||||||
|
Get get = new Get(metaKeyForRegion);
|
||||||
|
get.addFamily(HConstants.CATALOG_FAMILY);
|
||||||
|
Table metaTable = MetaTableAccessor.getMetaHTable(connection);
|
||||||
|
Result r = metaTable.get(get);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void rollbackState(final MasterProcedureEnv env, final EnableTableState state)
|
protected void rollbackState(final MasterProcedureEnv env, final EnableTableState state)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -412,19 +408,4 @@ public class EnableTableProcedure
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @return Maximum region replica id found in passed list of regions.
|
|
||||||
*/
|
|
||||||
private static int getMaxReplicaId(List<RegionInfo> regions) {
|
|
||||||
int max = 0;
|
|
||||||
for (RegionInfo regionInfo: regions) {
|
|
||||||
if (regionInfo.getReplicaId() > max) {
|
|
||||||
// Iterating through all the list to identify the highest replicaID region.
|
|
||||||
// We can stop after checking with the first set of regions??
|
|
||||||
max = regionInfo.getReplicaId();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return max;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,11 +25,11 @@ import java.util.function.LongConsumer;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
import org.apache.hadoop.hbase.client.TableState;
|
import org.apache.hadoop.hbase.client.TableState;
|
||||||
import org.apache.hadoop.hbase.master.TableStateManager;
|
import org.apache.hadoop.hbase.master.TableStateManager;
|
||||||
|
import org.apache.hadoop.hbase.master.TableStateManager.TableStateNotFoundException;
|
||||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||||
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
|
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
|
||||||
import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure;
|
import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure;
|
||||||
|
@ -149,7 +149,7 @@ public abstract class ModifyPeerProcedure extends AbstractPeerProcedure<PeerModi
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
Thread.sleep(SLEEP_INTERVAL_MS);
|
Thread.sleep(SLEEP_INTERVAL_MS);
|
||||||
} catch (TableNotFoundException e) {
|
} catch (TableStateNotFoundException e) {
|
||||||
return false;
|
return false;
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);
|
throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);
|
||||||
|
@ -228,7 +228,7 @@ public abstract class ModifyPeerProcedure extends AbstractPeerProcedure<PeerModi
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
Thread.sleep(SLEEP_INTERVAL_MS);
|
Thread.sleep(SLEEP_INTERVAL_MS);
|
||||||
} catch (TableNotFoundException e) {
|
} catch (TableStateNotFoundException e) {
|
||||||
return false;
|
return false;
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);
|
throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
*
|
*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
@ -41,6 +41,6 @@ public class MetaLocationSyncer extends ClientZKSyncer {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
Collection<String> getNodesToWatch() {
|
Collection<String> getNodesToWatch() {
|
||||||
return watcher.getZNodePaths().getMetaReplicaZNodes();
|
return watcher.getZNodePaths().metaReplicaZNodes.values();
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -588,9 +588,7 @@ public class CompactingMemStore extends AbstractMemStore {
|
||||||
* It takes the updatesLock exclusively, pushes active into the pipeline, releases updatesLock
|
* It takes the updatesLock exclusively, pushes active into the pipeline, releases updatesLock
|
||||||
* and compacts the pipeline.
|
* and compacts the pipeline.
|
||||||
*/
|
*/
|
||||||
// When this class private, saw ClassNotFoundException? Being loaded from another classloader?
|
private class InMemoryCompactionRunnable implements Runnable {
|
||||||
// Trying with class as public to see if makes a difference.
|
|
||||||
public class InMemoryCompactionRunnable implements Runnable {
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
inMemoryCompaction();
|
inMemoryCompaction();
|
||||||
|
|
|
@ -747,14 +747,9 @@ public class HRegionServer extends HasThread implements
|
||||||
|
|
||||||
protected TableDescriptors getFsTableDescriptors() throws IOException {
|
protected TableDescriptors getFsTableDescriptors() throws IOException {
|
||||||
return new FSTableDescriptors(this.conf,
|
return new FSTableDescriptors(this.conf,
|
||||||
this.fs, this.rootDir, !canUpdateTableDescriptor(), false);
|
this.fs, this.rootDir, !canUpdateTableDescriptor(), false, getMetaTableObserver());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated Since 2.3.0. Not needed anymore. Was used by Master to pass in replication
|
|
||||||
* setting on hbase:meta construction. To be removed in hbase4.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
protected Function<TableDescriptorBuilder, TableDescriptorBuilder> getMetaTableObserver() {
|
protected Function<TableDescriptorBuilder, TableDescriptorBuilder> getMetaTableObserver() {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.function.Function;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
@ -98,7 +99,10 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
// TODO.
|
// TODO.
|
||||||
private final Map<TableName, TableDescriptor> cache = new ConcurrentHashMap<>();
|
private final Map<TableName, TableDescriptor> cache = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
private final Configuration configuration;
|
/**
|
||||||
|
* Table descriptor for <code>hbase:meta</code> catalog table
|
||||||
|
*/
|
||||||
|
private final TableDescriptor metaTableDescriptor;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Construct a FSTableDescriptors instance using the hbase root dir of the given
|
* Construct a FSTableDescriptors instance using the hbase root dir of the given
|
||||||
|
@ -120,21 +124,29 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
*/
|
*/
|
||||||
public FSTableDescriptors(final Configuration conf, final FileSystem fs,
|
public FSTableDescriptors(final Configuration conf, final FileSystem fs,
|
||||||
final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
|
final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
|
||||||
|
this(conf, fs, rootdir, fsreadonly, usecache, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param fsreadonly True if we are read-only when it comes to filesystem
|
||||||
|
* operations; i.e. on remove, we do not do delete in fs.
|
||||||
|
* @param metaObserver Used by HMaster. It need to modify the META_REPLICAS_NUM for meta table descriptor.
|
||||||
|
* see HMaster#finishActiveMasterInitialization
|
||||||
|
* TODO: This is a workaround. Should remove this ugly code...
|
||||||
|
*/
|
||||||
|
public FSTableDescriptors(final Configuration conf, final FileSystem fs,
|
||||||
|
final Path rootdir, final boolean fsreadonly, final boolean usecache,
|
||||||
|
Function<TableDescriptorBuilder, TableDescriptorBuilder> metaObserver) throws IOException {
|
||||||
this.fs = fs;
|
this.fs = fs;
|
||||||
this.rootdir = rootdir;
|
this.rootdir = rootdir;
|
||||||
this.fsreadonly = fsreadonly;
|
this.fsreadonly = fsreadonly;
|
||||||
this.usecache = usecache;
|
this.usecache = usecache;
|
||||||
this.configuration = conf;
|
this.metaTableDescriptor = metaObserver == null ? createMetaTableDescriptor(conf)
|
||||||
|
: metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Should be private
|
|
||||||
* @deprecated Since 2.3.0. Should be for internal use only. Used by testing.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf)
|
public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) throws IOException {
|
||||||
throws IOException {
|
|
||||||
// TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now
|
// TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now
|
||||||
// the META table data goes to File mode BC only. Test how that affect the system. If too much,
|
// the META table data goes to File mode BC only. Test how that affect the system. If too much,
|
||||||
// we have to rethink about adding back the setCacheDataInL1 for META table CFs.
|
// we have to rethink about adding back the setCacheDataInL1 for META table CFs.
|
||||||
|
@ -169,9 +181,7 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
.setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(
|
.setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(
|
||||||
MultiRowMutationEndpoint.class.getName())
|
MultiRowMutationEndpoint.class.getName())
|
||||||
.setPriority(Coprocessor.PRIORITY_SYSTEM)
|
.setPriority(Coprocessor.PRIORITY_SYSTEM)
|
||||||
.build())
|
.build());
|
||||||
.setRegionReplication(conf.getInt(HConstants.META_REPLICAS_NUM,
|
|
||||||
HConstants.DEFAULT_META_REPLICA_NUM));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@ -208,11 +218,16 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
public TableDescriptor get(final TableName tablename)
|
public TableDescriptor get(final TableName tablename)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
invocations++;
|
invocations++;
|
||||||
// If some one tries to get the descriptor for
|
if (TableName.META_TABLE_NAME.equals(tablename)) {
|
||||||
|
cachehits++;
|
||||||
|
return metaTableDescriptor;
|
||||||
|
}
|
||||||
|
// hbase:meta is already handled. If some one tries to get the descriptor for
|
||||||
// .logs, .oldlogs or .corrupt throw an exception.
|
// .logs, .oldlogs or .corrupt throw an exception.
|
||||||
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) {
|
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) {
|
||||||
throw new IOException("No descriptor found for non table = " + tablename);
|
throw new IOException("No descriptor found for non table = " + tablename);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (usecache) {
|
if (usecache) {
|
||||||
// Look in cache of descriptors.
|
// Look in cache of descriptors.
|
||||||
TableDescriptor cachedtdm = this.cache.get(tablename);
|
TableDescriptor cachedtdm = this.cache.get(tablename);
|
||||||
|
@ -221,27 +236,24 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
return cachedtdm;
|
return cachedtdm;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
TableDescriptor td = null;
|
TableDescriptor tdmt = null;
|
||||||
try {
|
try {
|
||||||
td = getTableDescriptorFromFs(fs, rootdir, tablename);
|
tdmt = getTableDescriptorFromFs(fs, rootdir, tablename);
|
||||||
} catch (NullPointerException e) {
|
} catch (NullPointerException e) {
|
||||||
LOG.debug("Exception during readTableDecriptor; tableName={}", tablename, e);
|
LOG.debug("Exception during readTableDecriptor. Current table name = "
|
||||||
|
+ tablename, e);
|
||||||
} catch (TableInfoMissingException e) {
|
} catch (TableInfoMissingException e) {
|
||||||
if (TableName.isMetaTableName(tablename)) {
|
// ignore. This is regular operation
|
||||||
// If we tried to access hbase:meta and it not there, create it.
|
|
||||||
td = createMetaTableDescriptor(this.configuration);
|
|
||||||
LOG.info("Creating new hbase:meta table default descriptor/schema {}", td);
|
|
||||||
}
|
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
LOG.debug("Exception during readTableDecriptor. Current table name = "
|
LOG.debug("Exception during readTableDecriptor. Current table name = "
|
||||||
+ tablename, ioe);
|
+ tablename, ioe);
|
||||||
}
|
}
|
||||||
// last HTD written wins
|
// last HTD written wins
|
||||||
if (usecache && td != null) {
|
if (usecache && tdmt != null) {
|
||||||
this.cache.put(tablename, td);
|
this.cache.put(tablename, tdmt);
|
||||||
}
|
}
|
||||||
|
|
||||||
return td;
|
return tdmt;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -251,21 +263,16 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
public Map<String, TableDescriptor> getAll()
|
public Map<String, TableDescriptor> getAll()
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Map<String, TableDescriptor> tds = new TreeMap<>();
|
Map<String, TableDescriptor> tds = new TreeMap<>();
|
||||||
|
|
||||||
if (fsvisited && usecache) {
|
if (fsvisited && usecache) {
|
||||||
if (this.cache.get(TableName.META_TABLE_NAME) == null) {
|
|
||||||
// This get will create hbase:meta if it does not exist. Will also populate cache.
|
|
||||||
get(TableName.META_TABLE_NAME);
|
|
||||||
}
|
|
||||||
for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
|
for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
|
||||||
tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue());
|
tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue());
|
||||||
}
|
}
|
||||||
|
// add hbase:meta to the response
|
||||||
|
tds.put(this.metaTableDescriptor.getTableName().getNameAsString(), metaTableDescriptor);
|
||||||
} else {
|
} else {
|
||||||
LOG.trace("Fetching table descriptors from the filesystem.");
|
LOG.trace("Fetching table descriptors from the filesystem.");
|
||||||
boolean allvisited = true;
|
boolean allvisited = true;
|
||||||
// Add hbase:meta descriptor. The get will create hbase:meta in fs if doesn't
|
|
||||||
// exist. FSUtils listing table names in fs skip meta dirs. TODO: Fill out
|
|
||||||
// FSUtils with methods to get userspace tables and system tables.
|
|
||||||
tds.put(TableName.META_TABLE_NAME.toString(), get(TableName.META_TABLE_NAME));
|
|
||||||
for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
|
for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
|
||||||
TableDescriptor htd = null;
|
TableDescriptor htd = null;
|
||||||
try {
|
try {
|
||||||
|
@ -319,9 +326,14 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
if (fsreadonly) {
|
if (fsreadonly) {
|
||||||
throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
|
throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
|
||||||
}
|
}
|
||||||
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) {
|
TableName tableName = htd.getTableName();
|
||||||
throw new NotImplementedException("Cannot add Descriptor for reserved subdirectory name: " +
|
if (TableName.META_TABLE_NAME.equals(tableName)) {
|
||||||
htd.getTableName().getNameAsString());
|
throw new NotImplementedException(HConstants.NOT_IMPLEMENTED);
|
||||||
|
}
|
||||||
|
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
|
||||||
|
throw new NotImplementedException(
|
||||||
|
"Cannot add a table descriptor for a reserved subdirectory name: "
|
||||||
|
+ htd.getTableName().getNameAsString());
|
||||||
}
|
}
|
||||||
updateTableDescriptor(htd);
|
updateTableDescriptor(htd);
|
||||||
}
|
}
|
||||||
|
@ -347,6 +359,26 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
return descriptor;
|
return descriptor;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if a current table info file exists for the given table
|
||||||
|
*
|
||||||
|
* @param tableName name of table
|
||||||
|
* @return true if exists
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public boolean isTableInfoExists(TableName tableName) throws IOException {
|
||||||
|
return getTableInfoPath(tableName) != null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find the most current table info file for the given table in the hbase root directory.
|
||||||
|
* @return The file status of the current table info file or null if it does not exist
|
||||||
|
*/
|
||||||
|
private FileStatus getTableInfoPath(final TableName tableName) throws IOException {
|
||||||
|
Path tableDir = getTableDir(tableName);
|
||||||
|
return getTableInfoPath(tableDir);
|
||||||
|
}
|
||||||
|
|
||||||
private FileStatus getTableInfoPath(Path tableDir)
|
private FileStatus getTableInfoPath(Path tableDir)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return getTableInfoPath(fs, tableDir, !fsreadonly);
|
return getTableInfoPath(fs, tableDir, !fsreadonly);
|
||||||
|
@ -361,6 +393,7 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
* were sequence numbers).
|
* were sequence numbers).
|
||||||
*
|
*
|
||||||
* @return The file status of the current table info file or null if it does not exist
|
* @return The file status of the current table info file or null if it does not exist
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static FileStatus getTableInfoPath(FileSystem fs, Path tableDir)
|
public static FileStatus getTableInfoPath(FileSystem fs, Path tableDir)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -378,6 +411,7 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
* older files.
|
* older files.
|
||||||
*
|
*
|
||||||
* @return The file status of the current table info file or null if none exist
|
* @return The file status of the current table info file or null if none exist
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private static FileStatus getTableInfoPath(FileSystem fs, Path tableDir, boolean removeOldFiles)
|
private static FileStatus getTableInfoPath(FileSystem fs, Path tableDir, boolean removeOldFiles)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -565,6 +599,21 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deletes all the table descriptor files from the file system.
|
||||||
|
* Used in unit tests only.
|
||||||
|
* @throws NotImplementedException if in read only mode
|
||||||
|
*/
|
||||||
|
public void deleteTableDescriptorIfExists(TableName tableName) throws IOException {
|
||||||
|
if (fsreadonly) {
|
||||||
|
throw new NotImplementedException("Cannot delete a table descriptor - in read only mode");
|
||||||
|
}
|
||||||
|
|
||||||
|
Path tableDir = getTableDir(tableName);
|
||||||
|
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
|
||||||
|
deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes files matching the table info file pattern within the given directory
|
* Deletes files matching the table info file pattern within the given directory
|
||||||
* whose sequenceId is at most the given max sequenceId.
|
* whose sequenceId is at most the given max sequenceId.
|
||||||
|
@ -701,27 +750,6 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
if (fsreadonly) {
|
if (fsreadonly) {
|
||||||
throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
|
throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
|
||||||
}
|
}
|
||||||
return createTableDescriptorForTableDirectory(this.fs, tableDir, htd, forceCreation);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a new TableDescriptor in the specified table directory and filesystem. Happens when we
|
|
||||||
* create a new table or snapshot a table. This method doesn't require creationg of an
|
|
||||||
* {@link FSTableDescriptors} instance so it takes a bunch of arguments. Users of the method
|
|
||||||
* above used to create an FSTableDescriptors instance just to run the method. That was fine
|
|
||||||
* until construction started expecting to be able to read the hbase:meta schema. Snapshotting
|
|
||||||
* to some random dir would fail construction if no hbase:meta schema available.
|
|
||||||
* @param fs Filesystem to write to. Snapshot can set it to other than that of running system.
|
|
||||||
* @param tableDir table directory under which we should write the file
|
|
||||||
* @param htd description of the table to write
|
|
||||||
* @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
|
|
||||||
* be overwritten
|
|
||||||
* @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file
|
|
||||||
* already exists and we weren't forcing the descriptor creation.
|
|
||||||
* @throws IOException if a filesystem error occurs
|
|
||||||
*/
|
|
||||||
public static boolean createTableDescriptorForTableDirectory(FileSystem fs, Path tableDir,
|
|
||||||
TableDescriptor htd, boolean forceCreation) throws IOException {
|
|
||||||
FileStatus status = getTableInfoPath(fs, tableDir);
|
FileStatus status = getTableInfoPath(fs, tableDir);
|
||||||
if (status != null) {
|
if (status != null) {
|
||||||
LOG.debug("Current path=" + status.getPath());
|
LOG.debug("Current path=" + status.getPath());
|
||||||
|
@ -737,5 +765,6 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
Path p = writeTableDescriptor(fs, htd, tableDir, status);
|
Path p = writeTableDescriptor(fs, htd, tableDir, status);
|
||||||
return p != null;
|
return p != null;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -154,11 +154,8 @@ import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.slf4j.impl.Log4jLoggerAdapter;
|
import org.slf4j.impl.Log4jLoggerAdapter;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Facility for testing HBase. Replacement for
|
* Facility for testing HBase. Replacement for
|
||||||
* old HBaseTestCase and HBaseClusterTestCase functionality.
|
* old HBaseTestCase and HBaseClusterTestCase functionality.
|
||||||
|
@ -501,20 +498,17 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return META table descriptor
|
* @return META table descriptor
|
||||||
* @deprecated since 2.0 version and will be removed in 3.0 version. Currently for test only.
|
* @deprecated since 2.0 version and will be removed in 3.0 version.
|
||||||
* use {@link #getMetaTableDescriptorBuilder()}. Alter the hbase:meta table instead.
|
* use {@link #getMetaTableDescriptorBuilder()}
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public HTableDescriptor getMetaTableDescriptor() {
|
public HTableDescriptor getMetaTableDescriptor() {
|
||||||
return new ImmutableHTableDescriptor(getMetaTableDescriptor());
|
return new ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return META table descriptor
|
* @return META table descriptor
|
||||||
* @deprecated Since 2.3.0. No one should be using this internal. Used in testing only.
|
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
|
||||||
@VisibleForTesting
|
|
||||||
public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
|
public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
|
||||||
try {
|
try {
|
||||||
return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
|
return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
|
||||||
|
|
|
@ -1,109 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hbase;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.client.Admin;
|
|
||||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
|
||||||
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.Region;
|
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
|
||||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
import org.junit.After;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.ClassRule;
|
|
||||||
import org.junit.Rule;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.experimental.categories.Category;
|
|
||||||
import org.junit.rules.TestName;
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test being able to edit hbase:meta.
|
|
||||||
*/
|
|
||||||
@Category({MiscTests.class, LargeTests.class})
|
|
||||||
public class TestHBaseMetaEdit {
|
|
||||||
@ClassRule
|
|
||||||
public static final HBaseClassTestRule CLASS_RULE =
|
|
||||||
HBaseClassTestRule.forClass(TestHBaseMetaEdit.class);
|
|
||||||
@Rule
|
|
||||||
public TestName name = new TestName();
|
|
||||||
private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
|
||||||
|
|
||||||
@Before
|
|
||||||
public void before() throws Exception {
|
|
||||||
UTIL.startMiniCluster();
|
|
||||||
}
|
|
||||||
|
|
||||||
@After
|
|
||||||
public void after() throws Exception {
|
|
||||||
UTIL.shutdownMiniCluster();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set versions, set HBASE-16213 indexed block encoding, and add a column family.
|
|
||||||
* Verify they are all in place by looking at TableDescriptor AND by checking
|
|
||||||
* what the RegionServer sees after opening Region.
|
|
||||||
*/
|
|
||||||
@Test
|
|
||||||
public void testEditMeta() throws IOException {
|
|
||||||
Admin admin = UTIL.getAdmin();
|
|
||||||
admin.disableTable(TableName.META_TABLE_NAME);
|
|
||||||
TableDescriptor descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
|
|
||||||
ColumnFamilyDescriptor cfd = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY);
|
|
||||||
byte [] extraColumnFamilyName = Bytes.toBytes("xtra");
|
|
||||||
ColumnFamilyDescriptor newCfd =
|
|
||||||
ColumnFamilyDescriptorBuilder.newBuilder(extraColumnFamilyName).build();
|
|
||||||
int oldVersions = cfd.getMaxVersions();
|
|
||||||
// Add '1' to current versions count.
|
|
||||||
cfd = ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(oldVersions + 1).
|
|
||||||
setConfiguration(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING,
|
|
||||||
DataBlockEncoding.ROW_INDEX_V1.toString()).build();
|
|
||||||
admin.modifyColumnFamily(TableName.META_TABLE_NAME, cfd);
|
|
||||||
admin.addColumnFamily(TableName.META_TABLE_NAME, newCfd);
|
|
||||||
descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
|
|
||||||
// Assert new max versions is == old versions plus 1.
|
|
||||||
assertEquals(oldVersions + 1,
|
|
||||||
descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
|
|
||||||
admin.enableTable(TableName.META_TABLE_NAME);
|
|
||||||
descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
|
|
||||||
// Assert new max versions is == old versions plus 1.
|
|
||||||
assertEquals(oldVersions + 1,
|
|
||||||
descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
|
|
||||||
assertTrue(descriptor.getColumnFamily(newCfd.getName()) != null);
|
|
||||||
String encoding = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getConfiguration().
|
|
||||||
get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
|
|
||||||
assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
|
|
||||||
Region r = UTIL.getHBaseCluster().getRegionServer(0).
|
|
||||||
getRegion(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
|
|
||||||
assertEquals(oldVersions + 1,
|
|
||||||
r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getMaxVersions());
|
|
||||||
encoding = r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().
|
|
||||||
getConfigurationValue(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
|
|
||||||
assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
|
|
||||||
assertTrue(r.getStore(extraColumnFamilyName) != null);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
import org.apache.hadoop.hbase.UnknownRegionException;
|
import org.apache.hadoop.hbase.UnknownRegionException;
|
||||||
import org.apache.hadoop.hbase.Waiter.Predicate;
|
import org.apache.hadoop.hbase.Waiter.Predicate;
|
||||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||||
|
import org.apache.hadoop.hbase.constraint.ConstraintException;
|
||||||
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
|
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
|
||||||
import org.apache.hadoop.hbase.master.HMaster;
|
import org.apache.hadoop.hbase.master.HMaster;
|
||||||
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
|
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
|
||||||
|
@ -537,6 +538,22 @@ public class TestAdmin2 extends TestAdminBase {
|
||||||
" HBase was not available");
|
" HBase was not available");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDisableCatalogTable() throws Exception {
|
||||||
|
try {
|
||||||
|
ADMIN.disableTable(TableName.META_TABLE_NAME);
|
||||||
|
fail("Expected to throw ConstraintException");
|
||||||
|
} catch (ConstraintException e) {
|
||||||
|
}
|
||||||
|
// Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table
|
||||||
|
// actually getting disabled by the disableTable() call.
|
||||||
|
HTableDescriptor htd =
|
||||||
|
new HTableDescriptor(TableName.valueOf(Bytes.toBytes(name.getMethodName())));
|
||||||
|
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("cf1"));
|
||||||
|
htd.addFamily(hcd);
|
||||||
|
TEST_UTIL.getHBaseAdmin().createTable(htd);
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testIsEnabledOrDisabledOnUnknownTable() throws Exception {
|
public void testIsEnabledOrDisabledOnUnknownTable() throws Exception {
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
|
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
|
||||||
* agreements. See the NOTICE file distributed with this work for additional information regarding
|
* agreements. See the NOTICE file distributed with this work for additional information regarding
|
||||||
* copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
|
* copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
|
||||||
|
|
|
@ -39,6 +39,7 @@ import java.util.Set;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class to test asynchronous table admin operations
|
* Class to test asynchronous table admin operations
|
||||||
|
@ -53,6 +54,18 @@ public class TestAsyncTableAdminApi2 extends TestAsyncAdminBase {
|
||||||
public static final HBaseClassTestRule CLASS_RULE =
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
HBaseClassTestRule.forClass(TestAsyncTableAdminApi2.class);
|
HBaseClassTestRule.forClass(TestAsyncTableAdminApi2.class);
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDisableCatalogTable() throws Exception {
|
||||||
|
try {
|
||||||
|
this.admin.disableTable(TableName.META_TABLE_NAME).join();
|
||||||
|
fail("Expected to throw ConstraintException");
|
||||||
|
} catch (Exception e) {
|
||||||
|
}
|
||||||
|
// Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table
|
||||||
|
// actually getting disabled by the disableTable() call.
|
||||||
|
createTableWithDefaultConf(tableName);
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testAddColumnFamily() throws Exception {
|
public void testAddColumnFamily() throws Exception {
|
||||||
// Create a table with two families
|
// Create a table with two families
|
||||||
|
|
|
@ -18,9 +18,12 @@
|
||||||
package org.apache.hadoop.hbase.client;
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
|
import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
|
||||||
|
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertThat;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -29,6 +32,7 @@ import java.util.Optional;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
||||||
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.HRegionLocation;
|
import org.apache.hadoop.hbase.HRegionLocation;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
@ -197,6 +201,14 @@ public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
|
||||||
ok = false;
|
ok = false;
|
||||||
}
|
}
|
||||||
assertTrue(ok);
|
assertTrue(ok);
|
||||||
|
// meta table can not be disabled.
|
||||||
|
try {
|
||||||
|
admin.disableTable(TableName.META_TABLE_NAME).get();
|
||||||
|
fail("meta table can not be disabled");
|
||||||
|
} catch (ExecutionException e) {
|
||||||
|
Throwable cause = e.getCause();
|
||||||
|
assertThat(cause, instanceOf(DoNotRetryIOException.class));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
|
|
@ -161,7 +161,7 @@ public class TestLogRollingNoCluster {
|
||||||
byte[] bytes = Bytes.toBytes(i);
|
byte[] bytes = Bytes.toBytes(i);
|
||||||
edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY));
|
edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY));
|
||||||
RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
|
RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
|
||||||
TableDescriptor htd = TEST_UTIL.getMetaTableDescriptor();
|
TableDescriptor htd = TEST_UTIL.getMetaTableDescriptorBuilder().build();
|
||||||
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for(byte[] fam : htd.getColumnFamilyNames()) {
|
for(byte[] fam : htd.getColumnFamilyNames()) {
|
||||||
scopes.put(fam, 0);
|
scopes.put(fam, 0);
|
||||||
|
|
|
@ -322,9 +322,7 @@ public class TestFSTableDescriptors {
|
||||||
}
|
}
|
||||||
|
|
||||||
Map<String, TableDescriptor> tables = tds.getAll();
|
Map<String, TableDescriptor> tables = tds.getAll();
|
||||||
assertEquals(5, tables.size());
|
assertEquals(4, tables.size());
|
||||||
// Remove because it messes up below order test.
|
|
||||||
tables.remove(TableName.META_TABLE_NAME.toString());
|
|
||||||
|
|
||||||
String[] tableNamesOrdered =
|
String[] tableNamesOrdered =
|
||||||
new String[] { "bar:foo", "default:bar", "default:foo", "foo:bar" };
|
new String[] { "bar:foo", "default:bar", "default:foo", "foo:bar" };
|
||||||
|
@ -370,9 +368,6 @@ public class TestFSTableDescriptors {
|
||||||
|
|
||||||
for (Map.Entry<String, TableDescriptor> entry: nonchtds.getAll().entrySet()) {
|
for (Map.Entry<String, TableDescriptor> entry: nonchtds.getAll().entrySet()) {
|
||||||
String t = (String) entry.getKey();
|
String t = (String) entry.getKey();
|
||||||
if (t.equals(TableName.META_TABLE_NAME.toString())) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
TableDescriptor nchtd = entry.getValue();
|
TableDescriptor nchtd = entry.getValue();
|
||||||
assertTrue("expected " + htd.toString() +
|
assertTrue("expected " + htd.toString() +
|
||||||
" got: " + chtds.get(TableName.valueOf(t)).toString(),
|
" got: " + chtds.get(TableName.valueOf(t)).toString(),
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -61,6 +61,14 @@ public final class MetaTableLocator {
|
||||||
private MetaTableLocator() {
|
private MetaTableLocator() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the meta region location is available.
|
||||||
|
* @return true if meta region location is available, false if not
|
||||||
|
*/
|
||||||
|
public static boolean isLocationAvailable(ZKWatcher zkw) {
|
||||||
|
return getMetaRegionLocation(zkw) != null;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param zkw ZooKeeper watcher to be used
|
* @param zkw ZooKeeper watcher to be used
|
||||||
* @return meta table regions and their locations.
|
* @return meta table regions and their locations.
|
||||||
|
@ -258,7 +266,7 @@ public final class MetaTableLocator {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Load the meta region state from the meta region server ZNode.
|
* Load the meta region state from the meta server ZNode.
|
||||||
*
|
*
|
||||||
* @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
|
* @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
|
||||||
* @param replicaId the ID of the replica
|
* @param replicaId the ID of the replica
|
||||||
|
@ -298,8 +306,10 @@ public final class MetaTableLocator {
|
||||||
if (serverName == null) {
|
if (serverName == null) {
|
||||||
state = RegionState.State.OFFLINE;
|
state = RegionState.State.OFFLINE;
|
||||||
}
|
}
|
||||||
return new RegionState(RegionReplicaUtil.getRegionInfoForReplica(
|
return new RegionState(
|
||||||
RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), state, serverName);
|
RegionReplicaUtil.getRegionInfoForReplica(
|
||||||
|
RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId),
|
||||||
|
state, serverName);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -2057,7 +2057,7 @@ public final class ZKUtil {
|
||||||
" byte(s) of data from znode " + znode +
|
" byte(s) of data from znode " + znode +
|
||||||
(watcherSet? " and set watcher; ": "; data=") +
|
(watcherSet? " and set watcher; ": "; data=") +
|
||||||
(data == null? "null": data.length == 0? "empty": (
|
(data == null? "null": data.length == 0? "empty": (
|
||||||
zkw.getZNodePaths().isMetaZNodePrefix(znode)?
|
znode.startsWith(zkw.getZNodePaths().metaZNodePrefix)?
|
||||||
getServerNameOrEmptyString(data):
|
getServerNameOrEmptyString(data):
|
||||||
znode.startsWith(zkw.getZNodePaths().backupMasterAddressesZNode)?
|
znode.startsWith(zkw.getZNodePaths().backupMasterAddressesZNode)?
|
||||||
getServerNameOrEmptyString(data):
|
getServerNameOrEmptyString(data):
|
||||||
|
|
Loading…
Reference in New Issue