HBASE-25121 Refactor MetaTableAccessor.addRegionsToMeta and its usage places (#2476)

Signed-off-by: stack <stack@apache.org>
This commit is contained in:
Duo Zhang 2020-10-05 21:29:55 +08:00
parent 283bb52f7e
commit 4df536e31d
12 changed files with 250 additions and 208 deletions

View File

@ -31,12 +31,12 @@ import java.util.LinkedHashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.NavigableMap; import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.SortedMap; import java.util.SortedMap;
import java.util.TreeMap;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell.Type; import org.apache.hadoop.hbase.Cell.Type;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Consistency;
@ -358,7 +358,7 @@ public class MetaTableAccessor {
String regionEncodedName) throws IOException { String regionEncodedName) throws IOException {
RowFilter rowFilter = new RowFilter(CompareOperator.EQUAL, RowFilter rowFilter = new RowFilter(CompareOperator.EQUAL,
new SubstringComparator(regionEncodedName)); new SubstringComparator(regionEncodedName));
Scan scan = getMetaScan(connection, 1); Scan scan = getMetaScan(connection.getConfiguration(), 1);
scan.setFilter(rowFilter); scan.setFilter(rowFilter);
try (Table table = getMetaHTable(connection); try (Table table = getMetaHTable(connection);
ResultScanner resultScanner = table.getScanner(scan)) { ResultScanner resultScanner = table.getScanner(scan)) {
@ -563,25 +563,23 @@ public class MetaTableAccessor {
* @param tableName bytes of table's name * @param tableName bytes of table's name
* @return configured Scan object * @return configured Scan object
*/ */
public static Scan getScanForTableName(Connection connection, TableName tableName) { public static Scan getScanForTableName(Configuration conf, TableName tableName) {
// Start key is just the table name with delimiters // Start key is just the table name with delimiters
byte[] startKey = getTableStartRowForMeta(tableName, QueryType.REGION); byte[] startKey = getTableStartRowForMeta(tableName, QueryType.REGION);
// Stop key appends the smallest possible char to the table name // Stop key appends the smallest possible char to the table name
byte[] stopKey = getTableStopRowForMeta(tableName, QueryType.REGION); byte[] stopKey = getTableStopRowForMeta(tableName, QueryType.REGION);
Scan scan = getMetaScan(connection, -1); Scan scan = getMetaScan(conf, -1);
scan.setStartRow(startKey); scan.setStartRow(startKey);
scan.setStopRow(stopKey); scan.setStopRow(stopKey);
return scan; return scan;
} }
private static Scan getMetaScan(Connection connection, int rowUpperLimit) { private static Scan getMetaScan(Configuration conf, int rowUpperLimit) {
Scan scan = new Scan(); Scan scan = new Scan();
int scannerCaching = connection.getConfiguration() int scannerCaching = conf.getInt(HConstants.HBASE_META_SCANNER_CACHING,
.getInt(HConstants.HBASE_META_SCANNER_CACHING, HConstants.DEFAULT_HBASE_META_SCANNER_CACHING);
HConstants.DEFAULT_HBASE_META_SCANNER_CACHING); if (conf.getBoolean(HConstants.USE_META_REPLICAS, HConstants.DEFAULT_USE_META_REPLICAS)) {
if (connection.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS,
HConstants.DEFAULT_USE_META_REPLICAS)) {
scan.setConsistency(Consistency.TIMELINE); scan.setConsistency(Consistency.TIMELINE);
} }
if (rowUpperLimit > 0) { if (rowUpperLimit > 0) {
@ -591,6 +589,7 @@ public class MetaTableAccessor {
scan.setCaching(scannerCaching); scan.setCaching(scannerCaching);
return scan; return scan;
} }
/** /**
* Do not use this method to get meta table regions, use methods in MetaTableLocator instead. * Do not use this method to get meta table regions, use methods in MetaTableLocator instead.
* @param connection connection we're using * @param connection connection we're using
@ -774,7 +773,7 @@ public class MetaTableAccessor {
@Nullable final byte[] stopRow, QueryType type, @Nullable Filter filter, int maxRows, @Nullable final byte[] stopRow, QueryType type, @Nullable Filter filter, int maxRows,
final Visitor visitor) throws IOException { final Visitor visitor) throws IOException {
int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE; int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE;
Scan scan = getMetaScan(connection, rowUpperLimit); Scan scan = getMetaScan(connection.getConfiguration(), rowUpperLimit);
for (byte[] family : type.getFamilies()) { for (byte[] family : type.getFamilies()) {
scan.addFamily(family); scan.addFamily(family);
@ -824,7 +823,7 @@ public class MetaTableAccessor {
private static RegionInfo getClosestRegionInfo(Connection connection, private static RegionInfo getClosestRegionInfo(Connection connection,
@NonNull final TableName tableName, @NonNull final byte[] row) throws IOException { @NonNull final TableName tableName, @NonNull final byte[] row) throws IOException {
byte[] searchRow = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false); byte[] searchRow = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
Scan scan = getMetaScan(connection, 1); Scan scan = getMetaScan(connection.getConfiguration(), 1);
scan.setReversed(true); scan.setReversed(true);
scan.withStartRow(searchRow); scan.withStartRow(searchRow);
try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) { try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) {
@ -887,8 +886,7 @@ public class MetaTableAccessor {
* @param replicaId the replicaId of the region * @param replicaId the replicaId of the region
* @return a byte[] for state qualifier * @return a byte[] for state qualifier
*/ */
@VisibleForTesting public static byte[] getRegionStateColumn(int replicaId) {
static byte[] getRegionStateColumn(int replicaId) {
return replicaId == 0 ? HConstants.STATE_QUALIFIER return replicaId == 0 ? HConstants.STATE_QUALIFIER
: Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
+ String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
@ -1419,35 +1417,6 @@ public class MetaTableAccessor {
} }
} }
/**
* Deletes some replica columns corresponding to replicas for the passed rows
* @param metaRows rows in hbase:meta
* @param replicaIndexToDeleteFrom the replica ID we would start deleting from
* @param numReplicasToRemove how many replicas to remove
* @param connection connection we're using to access meta table
*/
public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows,
int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection connection)
throws IOException {
int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
for (byte[] row : metaRows) {
long now = EnvironmentEdgeManager.currentTime();
Delete deleteReplicaLocations = new Delete(row);
for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
deleteReplicaLocations.addColumns(getCatalogFamily(),
getServerColumn(i), now);
deleteReplicaLocations.addColumns(getCatalogFamily(),
getSeqNumColumn(i), now);
deleteReplicaLocations.addColumns(getCatalogFamily(),
getStartCodeColumn(i), now);
deleteReplicaLocations.addColumns(getCatalogFamily(), getServerNameColumn(i), now);
deleteReplicaLocations.addColumns(getCatalogFamily(), getRegionStateColumn(i), now);
}
deleteFromMetaTable(connection, deleteReplicaLocations);
}
}
private static Put addRegionStateToPut(Put put, RegionState.State state) throws IOException { private static Put addRegionStateToPut(Put put, RegionState.State state) throws IOException {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
.setRow(put.getRow()) .setRow(put.getRow())
@ -2030,7 +1999,7 @@ public class MetaTableAccessor {
.build()); .build());
} }
private static Put addEmptyLocation(Put p, int replicaId) throws IOException { public static Put addEmptyLocation(Put p, int replicaId) throws IOException {
CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY);
return p.add(builder.clear() return p.add(builder.clear()
.setRow(p.getRow()) .setRow(p.getRow())

View File

@ -18,11 +18,11 @@
package org.apache.hadoop.hbase.master.assignment; package org.apache.hadoop.hbase.master.assignment;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.SortedMap; import java.util.SortedMap;
import java.util.TreeMap; import java.util.TreeMap;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderFactory;
import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellBuilderType;
@ -32,9 +32,13 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterFileSystem;
@ -59,6 +63,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
@InterfaceAudience.Private @InterfaceAudience.Private
public class RegionStateStore { public class RegionStateStore {
private static final Logger LOG = LoggerFactory.getLogger(RegionStateStore.class); private static final Logger LOG = LoggerFactory.getLogger(RegionStateStore.class);
private static final Logger METALOG = LoggerFactory.getLogger("org.apache.hadoop.hbase.META");
/** The delimiter for meta columns for replicaIds &gt; 0 */ /** The delimiter for meta columns for replicaIds &gt; 0 */
protected static final char META_REPLICA_ID_DELIMITER = '_'; protected static final char META_REPLICA_ID_DELIMITER = '_';
@ -220,7 +225,8 @@ public class RegionStateStore {
private void updateRegionLocation(RegionInfo regionInfo, State state, Put put) private void updateRegionLocation(RegionInfo regionInfo, State state, Put put)
throws IOException { throws IOException {
try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) { try (Table table = getMetaTable()) {
debugLogMutation(put);
table.put(put); table.put(put);
} catch (IOException e) { } catch (IOException e) {
// TODO: Revist!!!! Means that if a server is loaded, then we will abort our host! // TODO: Revist!!!! Means that if a server is loaded, then we will abort our host!
@ -240,6 +246,10 @@ public class RegionStateStore {
return maxSeqId > 0 ? maxSeqId + 1 : HConstants.NO_SEQNUM; return maxSeqId > 0 ? maxSeqId + 1 : HConstants.NO_SEQNUM;
} }
private Table getMetaTable() throws IOException {
return master.getConnection().getTable(TableName.META_TABLE_NAME);
}
// ============================================================================================ // ============================================================================================
// Update Region Splitting State helpers // Update Region Splitting State helpers
// ============================================================================================ // ============================================================================================
@ -280,6 +290,83 @@ public class RegionStateStore {
MetaTableAccessor.deleteRegionInfos(master.getConnection(), regions); MetaTableAccessor.deleteRegionInfos(master.getConnection(), regions);
} }
/**
* Update region replicas if necessary by adding new replica locations or removing unused region
* replicas
*/
public void updateRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount)
throws IOException {
if (newReplicaCount < oldReplicaCount) {
removeRegionReplicas(tableName, oldReplicaCount, newReplicaCount);
} else if (newReplicaCount > oldReplicaCount) {
addRegionReplicas(tableName, oldReplicaCount, newReplicaCount);
}
}
private Scan getScanForUpdateRegionReplicas(TableName tableName) {
return MetaTableAccessor.getScanForTableName(master.getConfiguration(), tableName)
.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
}
private void removeRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount)
throws IOException {
Scan scan = getScanForUpdateRegionReplicas(tableName);
List<Delete> deletes = new ArrayList<>();
long now = EnvironmentEdgeManager.currentTime();
try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) {
for (;;) {
Result result = scanner.next();
if (result == null) {
break;
}
RegionInfo primaryRegionInfo = MetaTableAccessor.getRegionInfo(result);
if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) {
continue;
}
Delete delete = new Delete(result.getRow());
for (int i = newReplicaCount; i < oldReplicaCount; i++) {
delete.addColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(i), now);
delete.addColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(i), now);
delete.addColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(i),
now);
delete.addColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerNameColumn(i),
now);
delete.addColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getRegionStateColumn(i),
now);
}
deletes.add(delete);
}
debugLogMutations(deletes);
metaTable.delete(deletes);
}
}
private void addRegionReplicas(TableName tableName, int oldReplicaCount, int newReplicaCount)
throws IOException {
Scan scan = getScanForUpdateRegionReplicas(tableName);
List<Put> puts = new ArrayList<>();
long now = EnvironmentEdgeManager.currentTime();
try (Table metaTable = getMetaTable(); ResultScanner scanner = metaTable.getScanner(scan)) {
for (;;) {
Result result = scanner.next();
if (result == null) {
break;
}
RegionInfo primaryRegionInfo = MetaTableAccessor.getRegionInfo(result);
if (primaryRegionInfo == null || primaryRegionInfo.isSplitParent()) {
continue;
}
Put put = new Put(result.getRow(), now);
for (int i = oldReplicaCount; i < newReplicaCount; i++) {
MetaTableAccessor.addEmptyLocation(put, i);
}
puts.add(put);
}
debugLogMutations(puts);
metaTable.put(puts);
}
}
// ========================================================================== // ==========================================================================
// Table Descriptors helpers // Table Descriptors helpers
// ========================================================================== // ==========================================================================
@ -332,4 +419,19 @@ public class RegionStateStore {
: Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
+ String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
} }
private static void debugLogMutations(List<? extends Mutation> mutations) throws IOException {
if (!METALOG.isDebugEnabled()) {
return;
}
// Logging each mutation in separate line makes it easier to see diff between them visually
// because of common starting indentation.
for (Mutation mutation : mutations) {
debugLogMutation(mutation);
}
}
private static void debugLogMutation(Mutation p) throws IOException {
METALOG.debug("{} {}", p.getClass().getSimpleName(), p.toJSON());
}
} }

View File

@ -29,6 +29,7 @@ import java.util.Set;
import java.util.SortedSet; import java.util.SortedSet;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
@ -190,8 +192,8 @@ public class MetaFixer {
// Add replicas if needed // Add replicas if needed
// we need to create regions with replicaIds starting from 1 // we need to create regions with replicaIds starting from 1
List<RegionInfo> newRegions = RegionReplicaUtil.addReplicas( List<RegionInfo> newRegions = RegionReplicaUtil
Collections.singletonList(regionInfo), 1, td.getRegionReplication()); .addReplicas(Collections.singletonList(regionInfo), 1, td.getRegionReplication());
// Add regions to META // Add regions to META
MetaTableAccessor.addRegionsToMeta(masterServices.getConnection(), newRegions, MetaTableAccessor.addRegionsToMeta(masterServices.getConnection(), newRegions,
@ -199,12 +201,13 @@ public class MetaFixer {
// Setup replication for region replicas if needed // Setup replication for region replicas if needed
if (td.getRegionReplication() > 1) { if (td.getRegionReplication() > 1) {
ServerRegionReplicaUtil.setupRegionReplicaReplication( ServerRegionReplicaUtil.setupRegionReplicaReplication(masterServices);
masterServices.getConfiguration());
} }
return Either.<List<RegionInfo>, IOException>ofLeft(newRegions); return Either.<List<RegionInfo>, IOException> ofLeft(newRegions);
} catch (IOException e) { } catch (IOException e) {
return Either.<List<RegionInfo>, IOException>ofRight(e); return Either.<List<RegionInfo>, IOException> ofRight(e);
} catch (ReplicationException e) {
return Either.<List<RegionInfo>, IOException> ofRight(new HBaseIOException(e));
} }
}) })
.collect(Collectors.toList()); .collect(Collectors.toList());

View File

@ -24,6 +24,7 @@ import java.util.List;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.ModifyRegionUtils; import org.apache.hadoop.hbase.util.ModifyRegionUtils;
@ -343,23 +345,26 @@ public class CreateTableProcedure
} }
protected static List<RegionInfo> addTableToMeta(final MasterProcedureEnv env, protected static List<RegionInfo> addTableToMeta(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, final TableDescriptor tableDescriptor, final List<RegionInfo> regions) throws IOException {
final List<RegionInfo> regions) throws IOException {
assert (regions != null && regions.size() > 0) : "expected at least 1 region, got " + regions; assert (regions != null && regions.size() > 0) : "expected at least 1 region, got " + regions;
ProcedureSyncWait.waitMetaRegions(env); ProcedureSyncWait.waitMetaRegions(env);
// Add replicas if needed // Add replicas if needed
// we need to create regions with replicaIds starting from 1 // we need to create regions with replicaIds starting from 1
List<RegionInfo> newRegions = RegionReplicaUtil.addReplicas(regions, 1, List<RegionInfo> newRegions =
tableDescriptor.getRegionReplication()); RegionReplicaUtil.addReplicas(regions, 1, tableDescriptor.getRegionReplication());
// Add regions to META // Add regions to META
addRegionsToMeta(env, tableDescriptor, newRegions); addRegionsToMeta(env, tableDescriptor, newRegions);
// Setup replication for region replicas if needed // Setup replication for region replicas if needed
if (tableDescriptor.getRegionReplication() > 1) { if (tableDescriptor.getRegionReplication() > 1) {
ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration()); try {
ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterServices());
} catch (ReplicationException e) {
throw new HBaseIOException(e);
}
} }
return newRegions; return newRegions;
} }

View File

@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionReplicaUtil;
@ -40,12 +39,14 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.favored.FavoredNodesManager;
import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -357,22 +358,29 @@ public class DeleteTableProcedure
/** /**
* There may be items for this table still up in hbase:meta in the case where the info:regioninfo * There may be items for this table still up in hbase:meta in the case where the info:regioninfo
* column was empty because of some write error. Remove ALL rows from hbase:meta that have to do * column was empty because of some write error. Remove ALL rows from hbase:meta that have to do
* with this table. See HBASE-12980. * with this table.
* <p/>
* See HBASE-12980.
*/ */
private static void cleanRegionsInMeta(final MasterProcedureEnv env, final TableName tableName) private static void cleanRegionsInMeta(final MasterProcedureEnv env, final TableName tableName)
throws IOException { throws IOException {
Connection connection = env.getMasterServices().getConnection(); Scan tableScan = MetaTableAccessor.getScanForTableName(env.getMasterConfiguration(), tableName)
Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName); .setFilter(new KeyOnlyFilter());
try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { long now = EnvironmentEdgeManager.currentTime();
List<Delete> deletes = new ArrayList<>(); List<Delete> deletes = new ArrayList<>();
try (ResultScanner resScanner = metaTable.getScanner(tableScan)) { try (
for (Result result : resScanner) { Table metaTable = env.getMasterServices().getConnection().getTable(TableName.META_TABLE_NAME);
deletes.add(new Delete(result.getRow())); ResultScanner scanner = metaTable.getScanner(tableScan)) {
for (;;) {
Result result = scanner.next();
if (result == null) {
break;
} }
deletes.add(new Delete(result.getRow(), now));
} }
if (!deletes.isEmpty()) { if (!deletes.isEmpty()) {
LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from " LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from " +
+ TableName.META_TABLE_NAME); TableName.META_TABLE_NAME);
metaTable.delete(deletes); metaTable.delete(deletes);
} }
} }

View File

@ -21,28 +21,21 @@ package org.apache.hadoop.hbase.master.procedure;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.hbase.ConcurrentTableModificationException; import org.apache.hadoop.hbase.ConcurrentTableModificationException;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -339,8 +332,6 @@ public class ModifyTableProcedure
* Action before modifying table. * Action before modifying table.
* @param env MasterProcedureEnv * @param env MasterProcedureEnv
* @param state the procedure state * @param state the procedure state
* @throws IOException
* @throws InterruptedException
*/ */
private void preModify(final MasterProcedureEnv env, final ModifyTableState state) private void preModify(final MasterProcedureEnv env, final ModifyTableState state)
throws IOException, InterruptedException { throws IOException, InterruptedException {
@ -350,7 +341,6 @@ public class ModifyTableProcedure
/** /**
* Update descriptor * Update descriptor
* @param env MasterProcedureEnv * @param env MasterProcedureEnv
* @throws IOException
**/ **/
private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
env.getMasterServices().getTableDescriptors().update(modifiedTableDescriptor); env.getMasterServices().getTableDescriptors().update(modifiedTableDescriptor);
@ -359,7 +349,6 @@ public class ModifyTableProcedure
/** /**
* Removes from hdfs the families that are not longer present in the new table descriptor. * Removes from hdfs the families that are not longer present in the new table descriptor.
* @param env MasterProcedureEnv * @param env MasterProcedureEnv
* @throws IOException
*/ */
private void deleteFromFs(final MasterProcedureEnv env, private void deleteFromFs(final MasterProcedureEnv env,
final TableDescriptor oldTableDescriptor, final TableDescriptor newTableDescriptor) final TableDescriptor oldTableDescriptor, final TableDescriptor newTableDescriptor)
@ -379,61 +368,28 @@ public class ModifyTableProcedure
/** /**
* update replica column families if necessary. * update replica column families if necessary.
* @param env MasterProcedureEnv
* @throws IOException
*/ */
private void updateReplicaColumnsIfNeeded( private void updateReplicaColumnsIfNeeded(MasterProcedureEnv env,
final MasterProcedureEnv env, TableDescriptor oldTableDescriptor, TableDescriptor newTableDescriptor) throws IOException {
final TableDescriptor oldTableDescriptor,
final TableDescriptor newTableDescriptor) throws IOException {
final int oldReplicaCount = oldTableDescriptor.getRegionReplication(); final int oldReplicaCount = oldTableDescriptor.getRegionReplication();
final int newReplicaCount = newTableDescriptor.getRegionReplication(); final int newReplicaCount = newTableDescriptor.getRegionReplication();
env.getAssignmentManager().getRegionStateStore().updateRegionReplicas(getTableName(),
if (newReplicaCount < oldReplicaCount) { oldReplicaCount, newReplicaCount);
Set<byte[]> tableRows = new HashSet<>(); if (newReplicaCount > oldReplicaCount && oldReplicaCount <= 1) {
Connection connection = env.getMasterServices().getConnection(); // The table has been newly enabled for replica. So check if we need to setup
Scan scan = MetaTableAccessor.getScanForTableName(connection, getTableName()); // region replication
scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); try {
ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterServices());
try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { } catch (ReplicationException e) {
ResultScanner resScanner = metaTable.getScanner(scan); throw new HBaseIOException(e);
for (Result result : resScanner) {
tableRows.add(result.getRow());
}
MetaTableAccessor.removeRegionReplicasFromMeta(
tableRows,
newReplicaCount,
oldReplicaCount - newReplicaCount,
connection);
}
}
if (newReplicaCount > oldReplicaCount) {
Connection connection = env.getMasterServices().getConnection();
// Get the existing table regions
List<RegionInfo> existingTableRegions =
MetaTableAccessor.getTableRegions(connection, getTableName());
// add all the new entries to the meta table
addRegionsToMeta(env, newTableDescriptor, existingTableRegions);
if (oldReplicaCount <= 1) {
// The table has been newly enabled for replica. So check if we need to setup
// region replication
ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration());
} }
} }
} }
private static void addRegionsToMeta(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, final List<RegionInfo> regionInfos)
throws IOException {
MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(), regionInfos,
tableDescriptor.getRegionReplication());
}
/** /**
* Action after modifying table. * Action after modifying table.
* @param env MasterProcedureEnv * @param env MasterProcedureEnv
* @param state the procedure state * @param state the procedure state
* @throws IOException
* @throws InterruptedException
*/ */
private void postModify(final MasterProcedureEnv env, final ModifyTableState state) private void postModify(final MasterProcedureEnv env, final ModifyTableState state)
throws IOException, InterruptedException { throws IOException, InterruptedException {
@ -444,8 +400,6 @@ public class ModifyTableProcedure
* Coprocessor Action. * Coprocessor Action.
* @param env MasterProcedureEnv * @param env MasterProcedureEnv
* @param state the procedure state * @param state the procedure state
* @throws IOException
* @throws InterruptedException
*/ */
private void runCoprocessorAction(final MasterProcedureEnv env, final ModifyTableState state) private void runCoprocessorAction(final MasterProcedureEnv env, final ModifyTableState state)
throws IOException, InterruptedException { throws IOException, InterruptedException {

View File

@ -22,16 +22,15 @@ import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint; import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint;
import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKConfig;
@ -155,34 +154,24 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil {
/** /**
* Create replication peer for replicating to region replicas if needed. * Create replication peer for replicating to region replicas if needed.
* @param conf configuration to use * <p/>
* @throws IOException * This methods should only be called at master side.
*/ */
public static void setupRegionReplicaReplication(Configuration conf) throws IOException { public static void setupRegionReplicaReplication(MasterServices services)
if (!isRegionReplicaReplicationEnabled(conf)) { throws IOException, ReplicationException {
if (!isRegionReplicaReplicationEnabled(services.getConfiguration())) {
return; return;
} }
if (services.getReplicationPeerManager().getPeerConfig(REGION_REPLICA_REPLICATION_PEER)
try (Connection connection = ConnectionFactory.createConnection(conf); .isPresent()) {
Admin admin = connection.getAdmin()) { return;
ReplicationPeerConfig peerConfig = null;
try {
peerConfig = admin.getReplicationPeerConfig(REGION_REPLICA_REPLICATION_PEER);
} catch (ReplicationPeerNotFoundException e) {
LOG.warn(
"Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER + " not exist",
e);
}
if (peerConfig == null) {
LOG.info("Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER
+ " not exist. Creating...");
peerConfig = new ReplicationPeerConfig();
peerConfig.setClusterKey(ZKConfig.getZooKeeperClusterKey(conf));
peerConfig.setReplicationEndpointImpl(RegionReplicaReplicationEndpoint.class.getName());
admin.addReplicationPeer(REGION_REPLICA_REPLICATION_PEER, peerConfig);
}
} }
LOG.info("Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER +
" not exist. Creating...");
ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder()
.setClusterKey(ZKConfig.getZooKeeperClusterKey(services.getConfiguration()))
.setReplicationEndpointImpl(RegionReplicaReplicationEndpoint.class.getName()).build();
services.addReplicationPeer(REGION_REPLICA_REPLICATION_PEER, peerConfig, true);
} }
public static boolean isRegionReplicaReplicationEnabled(Configuration conf) { public static boolean isRegionReplicaReplicationEnabled(Configuration conf) {

View File

@ -2651,7 +2651,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
.setStartKey(startKeys[i]) .setStartKey(startKeys[i])
.setEndKey(startKeys[j]) .setEndKey(startKeys[j])
.build(); .build();
MetaTableAccessor.addRegionToMeta(getConnection(), hri); MetaTableAccessor.addRegionsToMeta(getConnection(), Collections.singletonList(hri), 1);
newRegions.add(hri); newRegions.add(hri);
} }

View File

@ -32,12 +32,12 @@ import static org.mockito.Mockito.verify;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.Random;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
@ -76,7 +76,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
/** /**
* Test {@link org.apache.hadoop.hbase.MetaTableAccessor}. * Test {@link org.apache.hadoop.hbase.MetaTableAccessor}.
@ -218,9 +217,11 @@ public class TestMetaTableAccessor {
} }
}; };
MetaTask writer = new MetaTask(connection, "writer") { MetaTask writer = new MetaTask(connection, "writer") {
@Override @Override
void metaTask() throws Throwable { void metaTask() throws IOException {
MetaTableAccessor.addRegionToMeta(connection, regions.get(0)); MetaTableAccessor.addRegionsToMeta(connection, Collections.singletonList(regions.get(0)),
1);
LOG.info("Wrote " + regions.get(0).getEncodedName()); LOG.info("Wrote " + regions.get(0).getEncodedName());
} }
}; };
@ -538,44 +539,6 @@ public class TestMetaTableAccessor {
assertEquals(0, startCodeCell.getValueLength()); assertEquals(0, startCodeCell.getValueLength());
} }
@Test
public void testMetaLocationForRegionReplicasIsRemovedAtTableDeletion() throws IOException {
long regionId = System.currentTimeMillis();
RegionInfo primary = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
Table meta = MetaTableAccessor.getMetaHTable(connection);
try {
List<RegionInfo> regionInfos = Lists.newArrayList(primary);
MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 3);
MetaTableAccessor.removeRegionReplicasFromMeta(Sets.newHashSet(primary.getRegionName()), 1, 2,
connection);
Get get = new Get(primary.getRegionName());
Result result = meta.get(get);
for (int replicaId = 0; replicaId < 3; replicaId++) {
Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
MetaTableAccessor.getServerColumn(replicaId));
Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
MetaTableAccessor.getStartCodeColumn(replicaId));
Cell stateCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
MetaTableAccessor.getRegionStateColumn(replicaId));
Cell snCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
MetaTableAccessor.getServerNameColumn(replicaId));
if (replicaId == 0) {
assertNotNull(stateCell);
} else {
assertNull(serverCell);
assertNull(startCodeCell);
assertNull(stateCell);
assertNull(snCell);
}
}
} finally {
meta.close();
}
}
@Test @Test
public void testMetaLocationForRegionReplicasIsAddedAtTableCreation() throws IOException { public void testMetaLocationForRegionReplicasIsAddedAtTableCreation() throws IOException {
long regionId = System.currentTimeMillis(); long regionId = System.currentTimeMillis();

View File

@ -98,7 +98,7 @@ public class TestEnableTable {
// content from a few of the rows. // content from a few of the rows.
try (Table metaTable = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { try (Table metaTable = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
try (ResultScanner scanner = metaTable.getScanner( try (ResultScanner scanner = metaTable.getScanner(
MetaTableAccessor.getScanForTableName(TEST_UTIL.getConnection(), tableName))) { MetaTableAccessor.getScanForTableName(TEST_UTIL.getConfiguration(), tableName))) {
for (Result result : scanner) { for (Result result : scanner) {
// Just delete one row. // Just delete one row.
Delete d = new Delete(result.getRow()); Delete d = new Delete(result.getRow());
@ -118,8 +118,8 @@ public class TestEnableTable {
fail("Got an exception while deleting " + tableName); fail("Got an exception while deleting " + tableName);
} }
int rowCount = 0; int rowCount = 0;
try (ResultScanner scanner = metaTable try (ResultScanner scanner = metaTable.getScanner(
.getScanner(MetaTableAccessor.getScanForTableName(TEST_UTIL.getConnection(), tableName))) { MetaTableAccessor.getScanForTableName(TEST_UTIL.getConfiguration(), tableName))) {
for (Result result : scanner) { for (Result result : scanner) {
LOG.info("Found when none expected: " + result); LOG.info("Found when none expected: " + result);
rowCount++; rowCount++;

View File

@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.util.Collections;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
@ -299,7 +300,8 @@ public class TestAssignmentManager extends TestAssignmentManagerBase {
RegionInfo hri = createRegionInfo(tableName, 1); RegionInfo hri = createRegionInfo(tableName, 1);
assertNull("RegionInfo was just instantiated by the test, but " assertNull("RegionInfo was just instantiated by the test, but "
+ "shouldn't be in AM regionStates yet.", am.getRegionStates().getRegionState(hri)); + "shouldn't be in AM regionStates yet.", am.getRegionStates().getRegionState(hri));
MetaTableAccessor.addRegionToMeta(this.util.getConnection(), hri); MetaTableAccessor.addRegionsToMeta(this.util.getConnection(), Collections.singletonList(hri),
1);
assertNull("RegionInfo was manually added in META, but " assertNull("RegionInfo was manually added in META, but "
+ "shouldn't be in AM regionStates yet.", am.getRegionStates().getRegionState(hri)); + "shouldn't be in AM regionStates yet.", am.getRegionStates().getRegionState(hri));
hri = am.loadRegionFromMeta(hri.getEncodedName()); hri = am.loadRegionFromMeta(hri.getEncodedName());

View File

@ -19,17 +19,25 @@ package org.apache.hadoop.hbase.master.assignment;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionState;
@ -41,10 +49,11 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.ClassRule; import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
@Category({ MasterTests.class, MediumTests.class }) @Category({ MasterTests.class, MediumTests.class })
@ -54,10 +63,11 @@ public class TestRegionStateStore {
public static final HBaseClassTestRule CLASS_RULE = public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestRegionStateStore.class); HBaseClassTestRule.forClass(TestRegionStateStore.class);
private static final Logger LOG = LoggerFactory.getLogger(TestRegionStateStore.class);
private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
@Rule
public final TableNameTestRule name = new TableNameTestRule();
@BeforeClass @BeforeClass
public static void beforeClass() throws Exception { public static void beforeClass() throws Exception {
UTIL.startMiniCluster(); UTIL.startMiniCluster();
@ -70,7 +80,7 @@ public class TestRegionStateStore {
@Test @Test
public void testVisitMetaForRegionExistingRegion() throws Exception { public void testVisitMetaForRegionExistingRegion() throws Exception {
final TableName tableName = TableName.valueOf("testVisitMetaForRegion"); final TableName tableName = name.getTableName();
UTIL.createTable(tableName, "cf"); UTIL.createTable(tableName, "cf");
final List<HRegion> regions = UTIL.getHBaseCluster().getRegions(tableName); final List<HRegion> regions = UTIL.getHBaseCluster().getRegions(tableName);
final String encodedName = regions.get(0).getRegionInfo().getEncodedName(); final String encodedName = regions.get(0).getRegionInfo().getEncodedName();
@ -90,7 +100,7 @@ public class TestRegionStateStore {
@Test @Test
public void testVisitMetaForBadRegionState() throws Exception { public void testVisitMetaForBadRegionState() throws Exception {
final TableName tableName = TableName.valueOf("testVisitMetaForBadRegionState"); final TableName tableName = name.getTableName();
UTIL.createTable(tableName, "cf"); UTIL.createTable(tableName, "cf");
final List<HRegion> regions = UTIL.getHBaseCluster().getRegions(tableName); final List<HRegion> regions = UTIL.getHBaseCluster().getRegions(tableName);
final String encodedName = regions.get(0).getRegionInfo().getEncodedName(); final String encodedName = regions.get(0).getRegionInfo().getEncodedName();
@ -136,4 +146,41 @@ public class TestRegionStateStore {
}); });
assertFalse("Visitor has been called, but it shouldn't.", visitorCalled.get()); assertFalse("Visitor has been called, but it shouldn't.", visitorCalled.get());
} }
@Test
public void testMetaLocationForRegionReplicasIsRemovedAtTableDeletion() throws IOException {
long regionId = System.currentTimeMillis();
TableName tableName = name.getTableName();
RegionInfo primary = RegionInfoBuilder.newBuilder(tableName)
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
try (Table meta = MetaTableAccessor.getMetaHTable(UTIL.getConnection())) {
List<RegionInfo> regionInfos = Lists.newArrayList(primary);
MetaTableAccessor.addRegionsToMeta(UTIL.getConnection(), regionInfos, 3);
final RegionStateStore regionStateStore =
UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore();
regionStateStore.updateRegionReplicas(tableName, 3, 1);
Get get = new Get(primary.getRegionName());
Result result = meta.get(get);
for (int replicaId = 0; replicaId < 3; replicaId++) {
Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
MetaTableAccessor.getServerColumn(replicaId));
Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
MetaTableAccessor.getStartCodeColumn(replicaId));
Cell stateCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
MetaTableAccessor.getRegionStateColumn(replicaId));
Cell snCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
MetaTableAccessor.getServerNameColumn(replicaId));
if (replicaId == 0) {
assertNotNull(stateCell);
} else {
assertNull(serverCell);
assertNull(startCodeCell);
assertNull(stateCell);
assertNull(snCell);
}
}
}
}
} }